brute-forced more changes from MontaVista's tree. SCSI partition table read still...
[linux-2.4.git] / drivers / block / cpqarray.c
1 /*
2  *    Disk Array driver for Compaq SMART2 Controllers
3  *    Copyright 1998 Compaq Computer Corporation
4  *
5  *    This program is free software; you can redistribute it and/or modify
6  *    it under the terms of the GNU General Public License as published by
7  *    the Free Software Foundation; either version 2 of the License, or
8  *    (at your option) any later version.
9  *
10  *    This program is distributed in the hope that it will be useful,
11  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
14  *
15  *    You should have received a copy of the GNU General Public License
16  *    along with this program; if not, write to the Free Software
17  *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18  *
19  *    Questions/Comments/Bugfixes to Cpqarray-discuss@lists.sourceforge.net
20  *
21  */
22 #include <linux/config.h>       /* CONFIG_PROC_FS */
23 #include <linux/module.h>
24 #include <linux/version.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
31 #include <linux/fs.h>
32 #include <linux/blkpg.h>
33 #include <linux/timer.h>
34 #include <linux/proc_fs.h>
35 #include <linux/init.h>
36 #include <linux/hdreg.h>
37 #include <linux/spinlock.h>
38 #include <asm/uaccess.h>
39 #include <asm/io.h>
40
41
42 #define SMART2_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
43
44 #define DRIVER_NAME "Compaq SMART2 Driver (v 2.4.28)"
45 #define DRIVER_VERSION SMART2_DRIVER_VERSION(2,4,28)
46
47 /* Embedded module documentation macros - see modules.h */
48 /* Original author Chris Frantz - Compaq Computer Corporation */
49 MODULE_AUTHOR("Compaq Computer Corporation");
50 MODULE_DESCRIPTION("Driver for Compaq Smart2 Array Controllers version 2.4.28");
51 MODULE_LICENSE("GPL");
52
53 #define MAJOR_NR COMPAQ_SMART2_MAJOR
54 #include <linux/blk.h>
55 #include <linux/blkdev.h>
56 #include <linux/genhd.h>
57
58 #include "cpqarray.h"
59 #include "ida_cmd.h"
60 #include "smart1,2.h"
61 #include "ida_ioctl.h"
62
63 #define READ_AHEAD      128
64 #define NR_CMDS         128 /* This could probably go as high as ~400 */
65
66 #define MAX_CTLR        8
67
68 #define CPQARRAY_DMA_MASK       0xFFFFFFFF      /* 32 bit DMA */
69
70 static ctlr_info_t *hba[MAX_CTLR] = 
71         { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL };
72
73 static int eisa[8];
74
75 #define NR_PRODUCTS (sizeof(products)/sizeof(struct board_type))
76
77 /*  board_id = Subsystem Device ID & Vendor ID
78  *  product = Marketing Name for the board
79  *  access = Address of the struct of function pointers 
80  */
81 static struct board_type products[] = {
82         { 0x0040110E, "IDA",                    &smart1_access },
83         { 0x0140110E, "IDA-2",                  &smart1_access },
84         { 0x1040110E, "IAES",                   &smart1_access },
85         { 0x2040110E, "SMART",                  &smart1_access },
86         { 0x3040110E, "SMART-2/E",              &smart2e_access },
87         { 0x40300E11, "SMART-2/P",              &smart2_access },
88         { 0x40310E11, "SMART-2SL",              &smart2_access },
89         { 0x40320E11, "Smart Array 3200",       &smart2_access },
90         { 0x40330E11, "Smart Array 3100ES",     &smart2_access },
91         { 0x40340E11, "Smart Array 221",        &smart2_access },
92         { 0x40400E11, "Integrated Array",       &smart4_access },
93         { 0x40480E11, "Compaq Raid LC2",        &smart4_access },
94         { 0x40500E11, "Smart Array 4200",       &smart4_access },
95         { 0x40510E11, "Smart Array 4250ES",     &smart4_access },
96         { 0x40580E11, "Smart Array 431",        &smart4_access },
97 };
98
99 /* define the PCI info for the PCI cards this driver can control */
100 const struct pci_device_id cpqarray_pci_device_id[] = 
101 {
102         { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX, 
103                 0x0E11, 0x4058, 0, 0, 0},       /* SA431 */
104         { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX, 
105                 0x0E11, 0x4051, 0, 0, 0},       /* SA4250ES */
106         { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX, 
107                 0x0E11, 0x4050, 0, 0, 0},       /* SA4200 */
108         { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
109                 0x0E11, 0x4048, 0, 0, 0},       /* LC2 */
110         { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
111                 0x0E11, 0x4040, 0, 0, 0},       /* Integrated Array */
112         { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P, 
113                 0x0E11, 0x4034, 0, 0, 0},       /* SA 221 */ 
114         { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P, 
115                 0x0E11, 0x4033, 0, 0, 0},       /* SA 3100ES*/
116         { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P, 
117                 0x0E11, 0x4032, 0, 0, 0},       /* SA 3200*/
118         { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P, 
119                 0x0E11, 0x4031, 0, 0, 0},       /* SA 2SL*/
120         { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P, 
121                 0x0E11, 0x4030, 0, 0, 0},       /* SA 2P */
122         { 0 }
123 };
124
125 MODULE_DEVICE_TABLE(pci, cpqarray_pci_device_id);
126
127 static struct proc_dir_entry *proc_array;
128
129 /* Debug... */
130 #define DBG(s)  do { s } while(0)
131 /* Debug (general info)... */
132 #define DBGINFO(s) do { } while(0)
133 /* Debug Paranoid... */
134 #define DBGP(s)  do { } while(0)
135 /* Debug Extra Paranoid... */
136 #define DBGPX(s) do { } while(0)
137
138 int cpqarray_init(void);
139 static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev);
140 static void *remap_pci_mem(ulong base, ulong size);
141 static int cpqarray_eisa_detect(void);
142 static int pollcomplete(int ctlr);
143 static void getgeometry(int ctlr);
144 static void start_fwbk(int ctlr);
145
146 static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool);
147 static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool);
148
149 static void free_hba(int i);
150 static int alloc_cpqarray_hba(void);
151
152 static int sendcmd(
153         __u8    cmd,
154         int     ctlr,
155         void    *buff,
156         size_t  size,
157         unsigned int blk,
158         unsigned int blkcnt,
159         unsigned int log_unit );
160
161 static int ida_open(struct inode *inode, struct file *filep);
162 static int ida_release(struct inode *inode, struct file *filep);
163 static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg);
164 static int ida_ctlr_ioctl(int ctlr, int dsk, ida_ioctl_t *io);
165 static int ida_ctlr_big_ioctl( int ctlr, int dsk, ida_big_ioctl_t *io);
166
167 static void do_ida_request(request_queue_t *q);
168 static void start_io(ctlr_info_t *h);
169
170 static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
171 static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
172 static inline void complete_buffers(struct buffer_head *bh, int ok);
173 static inline void complete_command(cmdlist_t *cmd, int timeout);
174
175 static void do_ida_intr(int irq, void *dev_id, struct pt_regs * regs);
176 static void ida_timer(unsigned long tdata);
177 static int frevalidate_logvol(kdev_t dev);
178 static int revalidate_logvol(kdev_t dev, int maxusage);
179 static int revalidate_allvol(kdev_t dev);
180
181 static int deregister_disk(int ctlr, int logvol);
182 static int register_new_disk(int cltr,int logvol);
183 static int cpqarray_register_ctlr(int ctlr, int type);
184
185 #ifdef CONFIG_PROC_FS
186 static void ida_procinit(int i);
187 static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
188 #else
189 static void ida_procinit(int i) {}
190 static int ida_proc_get_info(char *buffer, char **start, off_t offset,
191                              int length, int *eof, void *data) { return 0;}
192 #endif
193
194 static void ida_geninit(int ctlr)
195 {
196         int i,j;
197         drv_info_t *drv;
198
199         for(i=0; i<NWD; i++) {
200                 drv = &hba[ctlr]->drv[i];
201                 if (!drv->nr_blks)
202                         continue;
203                 hba[ctlr]->hd[i<<NWD_SHIFT].nr_sects =
204                         hba[ctlr]->sizes[i<<NWD_SHIFT] = drv->nr_blks;
205
206                 for(j=0; j<IDA_MAX_PART; j++) {
207                         hba[ctlr]->blocksizes[(i<<NWD_SHIFT)+j] = 1024;
208                         hba[ctlr]->hardsizes[(i<<NWD_SHIFT)+j] = drv->blk_size;
209                 }
210         }
211         hba[ctlr]->gendisk.nr_real = hba[ctlr]->highest_lun +1;
212
213 }
214
215 static struct block_device_operations ida_fops  = {
216         owner:          THIS_MODULE,
217         open:           ida_open,
218         release:        ida_release,
219         ioctl:          ida_ioctl,
220         revalidate:     frevalidate_logvol,
221 };
222
223
224 #ifdef CONFIG_PROC_FS
225
226 /*
227  * Get us a file in /proc/array that says something about each controller.
228  * Create /proc/array if it doesn't exist yet.
229  */
230 static void __init ida_procinit(int i)
231 {
232         if (proc_array == NULL) {
233                 proc_array = proc_mkdir("cpqarray", proc_root_driver);
234                 if (!proc_array) return;
235         }
236
237         create_proc_read_entry(hba[i]->devname, 0, proc_array,
238                                ida_proc_get_info, hba[i]);
239 }
240
241 /*
242  * Report information about this controller.
243  */
244 static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
245 {
246         off_t pos = 0;
247         off_t len = 0;
248         int size, i, ctlr;
249         ctlr_info_t *h = (ctlr_info_t*)data;
250         drv_info_t *drv;
251 #ifdef CPQ_PROC_PRINT_QUEUES
252         cmdlist_t *c;
253 #endif
254
255         ctlr = h->ctlr;
256         size = sprintf(buffer, "%s:  Compaq %s Controller\n"
257                 "       Board ID: 0x%08lx\n"
258                 "       Firmware Revision: %c%c%c%c\n"
259                 "       Controller Sig: 0x%08lx\n"
260                 "       Memory Address: 0x%08lx\n"
261                 "       I/O Port: 0x%04x\n"
262                 "       IRQ: %d\n"
263                 "       Logical drives: %d\n"
264                 "       Highest Logical ID: %d\n"
265                 "       Physical drives: %d\n\n"
266                 "       Current Q depth: %d\n"
267                 "       Max Q depth since init: %d\n\n",
268                 h->devname, 
269                 h->product_name,
270                 (unsigned long)h->board_id,
271                 h->firm_rev[0], h->firm_rev[1], h->firm_rev[2], h->firm_rev[3],
272                 (unsigned long)h->ctlr_sig, (unsigned long)h->vaddr,
273                 (unsigned int) h->io_mem_addr, (unsigned int)h->intr,
274                 h->log_drives, h->highest_lun, h->phys_drives,
275                 h->Qdepth, h->maxQsinceinit);
276
277         pos += size; len += size;
278         
279         size = sprintf(buffer+len, "Logical Drive Info:\n");
280         pos += size; len += size;
281
282         for(i=0; i<=h->highest_lun; i++) {
283                 drv = &h->drv[i];
284                 if(drv->nr_blks != 0) {
285                         size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
286                                 ctlr, i, drv->blk_size, drv->nr_blks);
287                         pos += size; len += size;
288                 }
289         }
290
291 #ifdef CPQ_PROC_PRINT_QUEUES
292         size = sprintf(buffer+len, "\nCurrent Queues:\n");
293         pos += size; len += size;
294
295         c = h->reqQ;
296         size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size;
297         if (c) 
298                 c=c->next;
299         while(c && c != h->reqQ) {
300                 size = sprintf(buffer+len, "->%p", c);
301                 pos += size; len += size;
302                 c=c->next;
303         }
304
305         c = h->cmpQ;
306         size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size;
307         if (c) 
308                 c=c->next;
309         while(c && c != h->cmpQ) {
310                 size = sprintf(buffer+len, "->%p", c);
311                 pos += size; len += size;
312                 c=c->next;
313         }
314
315         size = sprintf(buffer+len, "\n"); pos += size; len += size;
316 #endif
317         size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n",
318                         h->nr_allocs, h->nr_frees);
319         pos += size; len += size;
320
321         *eof = 1;
322         *start = buffer+offset;
323         len -= offset;
324         if (len>length)
325                 len = length;
326         return len;
327 }
328 #endif /* CONFIG_PROC_FS */
329
330
331 MODULE_PARM(eisa, "1-8i");
332 EXPORT_NO_SYMBOLS;
333
334 /* This is a bit of a hack... */
335 int __init init_cpqarray_module(void)
336 {
337         if (cpqarray_init() == 0) /* all the block dev numbers already used */
338                 return -ENODEV;   /* or no controllers were found */
339         return 0;
340 }
341
342 static void release_io_mem(ctlr_info_t *c)
343 {
344         /* if IO mem was not protected do nothing */
345         if( c->io_mem_addr == 0)
346                 return;
347         release_region(c->io_mem_addr, c->io_mem_length);
348         c->io_mem_addr = 0;
349         c->io_mem_length = 0;
350 }
351
352 static void __devexit cpqarray_remove_one (struct pci_dev *pdev)
353 {
354         int i;
355         ctlr_info_t *tmp_ptr;
356         char buff[4]; 
357
358         tmp_ptr = pci_get_drvdata(pdev);
359         if (tmp_ptr == NULL)
360         {
361                 printk( KERN_ERR "cpqarray: Unable to remove device \n");
362                 return;
363         }
364         i = tmp_ptr->ctlr;
365         if (hba[i] == NULL) 
366         {
367                 printk(KERN_ERR "cpqarray: device appears to "
368                         "already be removed \n");
369                 return;
370         }
371
372         /* sendcmd will turn off interrupt, and send the flush... 
373          * To write all data in the battery backed cache to disks    */
374         memset(buff, 0 , 4);
375         if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0)) {
376                 printk(KERN_WARNING "Unable to flush cache on controller %d\n",
377                          i);    
378         }
379         free_irq(hba[i]->intr, hba[i]);
380         pci_set_drvdata(pdev, NULL);
381         /* remove it from the disk list */
382         del_gendisk(&(hba[i]->gendisk));
383
384         iounmap(hba[i]->vaddr);
385         unregister_blkdev(MAJOR_NR+i, hba[i]->devname);
386         del_timer(&hba[i]->timer);
387         blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR + i));
388         remove_proc_entry(hba[i]->devname, proc_array);
389         pci_free_consistent(hba[i]->pci_dev, 
390                         NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool), 
391                         hba[i]->cmd_pool_dhandle);
392         kfree(hba[i]->cmd_pool_bits);
393         release_io_mem(hba[i]);
394         free_hba(i);
395 }
396
397 /* removing an instance that was not removed automatically.. 
398  * must be an eisa card. 
399  */
400 static void __devexit cpqarray_remove_one_eisa (int i)
401 {
402         char buff[4]; 
403
404         if (hba[i] == NULL) {
405                 printk(KERN_ERR "cpqarray: device appears to "
406                         "already be removed \n");
407                 return;
408         }
409
410         /* sendcmd will turn off interrupt, and send the flush... 
411          * To write all data in the battery backed cache to disks    
412          * no data returned, but don't want to send NULL to sendcmd */  
413         if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0)) {
414                 printk(KERN_WARNING "Unable to flush cache on controller %d\n",
415                          i);    
416         }
417         free_irq(hba[i]->intr, hba[i]);
418         /* remove it from the disk list */
419         del_gendisk(&(hba[i]->gendisk));
420
421         iounmap(hba[i]->vaddr);
422         unregister_blkdev(MAJOR_NR+i, hba[i]->devname);
423         del_timer(&hba[i]->timer);
424         blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR + i));
425         remove_proc_entry(hba[i]->devname, proc_array);
426         pci_free_consistent(hba[i]->pci_dev, 
427                         NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool), 
428                         hba[i]->cmd_pool_dhandle);
429         kfree(hba[i]->cmd_pool_bits);
430         release_io_mem(hba[i]);
431         free_hba(i);
432 }
433 static inline int cpq_new_segment(request_queue_t *q, struct request *rq,
434                                   int max_segments)
435 {
436         if (rq->nr_segments < SG_MAX) {
437                 rq->nr_segments++;
438                 return 1;
439         }
440         return 0;
441 }
442
443 static int cpq_back_merge_fn(request_queue_t *q, struct request *rq,
444                              struct buffer_head *bh, int max_segments)
445 {
446         if (blk_seg_merge_ok(rq->bhtail, bh))
447                 return 1;
448         return cpq_new_segment(q, rq, max_segments);
449 }
450
451 static int cpq_front_merge_fn(request_queue_t *q, struct request *rq,
452                              struct buffer_head *bh, int max_segments)
453 {
454         if (blk_seg_merge_ok(bh, rq->bh))
455                 return 1;
456         return cpq_new_segment(q, rq, max_segments);
457 }
458
459 static int cpq_merge_requests_fn(request_queue_t *q, struct request *rq,
460                                  struct request *nxt, int max_segments)
461 {
462         int total_segments = rq->nr_segments + nxt->nr_segments;
463
464         if (blk_seg_merge_ok(rq->bhtail, nxt->bh))
465                 total_segments--;
466
467         if (total_segments > SG_MAX)
468                 return 0;
469
470         rq->nr_segments = total_segments;
471         return 1;
472 }
473
474 static int cpqarray_register_ctlr(int ctlr, int type)
475 {
476         request_queue_t *q;
477         int j;
478         
479         /*
480          * register block devices
481          * Find disks and fill in structs
482          * Get an interrupt, set the Q depth and get into /proc
483          */
484
485         /* If this successful it should insure that we are the only */
486         /* instance of the driver for this card */
487         if (register_blkdev(MAJOR_NR+ctlr, hba[ctlr]->devname, &ida_fops)) {
488                 printk(KERN_ERR "cpqarray: Unable to get major number %d\n", MAJOR_NR+ctlr);
489                 goto err_out;
490         }
491
492         hba[ctlr]->access.set_intr_mask(hba[ctlr], 0);
493         if (request_irq(hba[ctlr]->intr, do_ida_intr,
494                 SA_INTERRUPT|SA_SHIRQ|SA_SAMPLE_RANDOM,
495                 hba[ctlr]->devname, hba[ctlr])) {
496                 printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
497                         hba[ctlr]->intr, hba[ctlr]->devname);
498                 unregister_blkdev(MAJOR_NR+ctlr, hba[ctlr]->devname);
499                 goto err_out;
500         }
501         hba[ctlr]->cmd_pool = (cmdlist_t *)pci_alloc_consistent(
502                 hba[ctlr]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
503                 &(hba[ctlr]->cmd_pool_dhandle));
504         hba[ctlr]->cmd_pool_bits = (__u32*)kmalloc(
505                 ((NR_CMDS+31)/32)*sizeof(__u32), GFP_KERNEL);
506
507         if (hba[ctlr]->cmd_pool_bits == NULL || hba[ctlr]->cmd_pool == NULL) {
508                 if (hba[ctlr]->cmd_pool_bits)
509                         kfree(hba[ctlr]->cmd_pool_bits);
510                 if (hba[ctlr]->cmd_pool)
511                         pci_free_consistent(hba[ctlr]->pci_dev,
512                                 NR_CMDS * sizeof(cmdlist_t),
513                                 hba[ctlr]->cmd_pool,
514                                 hba[ctlr]->cmd_pool_dhandle);
515
516                 free_irq(hba[ctlr]->intr, hba[ctlr]);
517                 unregister_blkdev(MAJOR_NR+ctlr, hba[ctlr]->devname);
518                 printk( KERN_ERR "cpqarray: out of memory");
519                 goto err_out;
520         }
521         memset(hba[ctlr]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t));
522         memset(hba[ctlr]->cmd_pool_bits, 0, ((NR_CMDS+31)/32)*sizeof(__u32));
523         printk(KERN_INFO "cpqarray: Finding drives on %s", hba[ctlr]->devname);
524         getgeometry(ctlr);
525         start_fwbk(ctlr);
526
527         hba[ctlr]->access.set_intr_mask(hba[ctlr], FIFO_NOT_EMPTY);
528
529         ida_procinit(ctlr);
530
531         q = BLK_DEFAULT_QUEUE(MAJOR_NR + ctlr);
532         q->queuedata = hba[ctlr];
533         blk_init_queue(q, do_ida_request);
534         if (type)
535                 blk_queue_bounce_limit(q, hba[ctlr]->pci_dev->dma_mask);
536         blk_queue_headactive(q, 0);
537         blksize_size[MAJOR_NR+ctlr] = hba[ctlr]->blocksizes;
538         hardsect_size[MAJOR_NR+ctlr] = hba[ctlr]->hardsizes;
539         read_ahead[MAJOR_NR+ctlr] = READ_AHEAD;
540
541         q->back_merge_fn = cpq_back_merge_fn;
542         q->front_merge_fn = cpq_front_merge_fn;
543         q->merge_requests_fn = cpq_merge_requests_fn;
544
545         hba[ctlr]->gendisk.major = MAJOR_NR + ctlr;
546         hba[ctlr]->gendisk.major_name = "ida";
547         hba[ctlr]->gendisk.minor_shift = NWD_SHIFT;
548         hba[ctlr]->gendisk.max_p = IDA_MAX_PART;
549         hba[ctlr]->gendisk.part = hba[ctlr]->hd;
550         hba[ctlr]->gendisk.sizes = hba[ctlr]->sizes;
551         hba[ctlr]->gendisk.nr_real = hba[ctlr]->highest_lun+1;
552         hba[ctlr]->gendisk.fops = &ida_fops;
553
554         /* Get on the disk list */
555         add_gendisk(&(hba[ctlr]->gendisk));
556
557         init_timer(&hba[ctlr]->timer);
558         hba[ctlr]->timer.expires = jiffies + IDA_TIMER;
559         hba[ctlr]->timer.data = (unsigned long)hba[ctlr];
560         hba[ctlr]->timer.function = ida_timer;
561         add_timer(&hba[ctlr]->timer);
562
563         ida_geninit(ctlr);
564         for(j=0; j<NWD; j++)
565                 register_disk(&(hba[ctlr]->gendisk), MKDEV(MAJOR_NR+ctlr,j<<4),
566                         IDA_MAX_PART, &ida_fops, hba[ctlr]->drv[j].nr_blks);
567         return(ctlr);
568
569 err_out:
570         release_io_mem(hba[ctlr]);
571         free_hba(ctlr);
572         return (-1);
573 }
574
575
576 static int __init cpqarray_init_one( struct pci_dev *pdev,
577         const struct pci_device_id *ent)
578 {
579         int i,j;
580
581
582         printk(KERN_DEBUG "cpqarray: Device 0x%x has been found at"
583                 " bus %d dev %d func %d\n",
584                 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
585                         PCI_FUNC(pdev->devfn));
586         i = alloc_cpqarray_hba();
587         if( i < 0 ) 
588                 return (-1);
589         memset(hba[i], 0, sizeof(ctlr_info_t));
590         /* fill in default block size */
591         for(j=0;j<256;j++)
592                 hba[i]->hardsizes[j] = hba[i]->drv[j].blk_size;
593
594         sprintf(hba[i]->devname, "ida%d", i);
595         hba[i]->ctlr = i;
596         /* Initialize the pdev driver private data */
597         pci_set_drvdata(pdev, hba[i]);
598
599         if (cpqarray_pci_init(hba[i], pdev) != 0) {
600                 release_io_mem(hba[i]);
601                 free_hba(i);
602                 return (-1);
603         }
604                         
605         return (cpqarray_register_ctlr(i, 1));
606 }
607 static struct pci_driver cpqarray_pci_driver = {
608         name:   "cpqarray",
609         probe:  cpqarray_init_one,
610         remove:  __devexit_p(cpqarray_remove_one),
611         id_table:  cpqarray_pci_device_id,
612 };
613
614 /*
615  *  This is it.  Find all the controllers and register them.  I really hate
616  *  stealing all these major device numbers.
617  *  returns the number of block devices registered.
618  */
619 int __init cpqarray_init(void)
620 {
621         int num_cntlrs_reg = 0;
622         int i;
623
624         /* detect controllers */
625         printk(DRIVER_NAME "\n");
626         pci_module_init(&cpqarray_pci_driver);
627         cpqarray_eisa_detect();
628
629         for(i=0; i< MAX_CTLR; i++) {
630                 if (hba[i] != NULL)
631                         num_cntlrs_reg++;
632         }
633         return(num_cntlrs_reg);
634 }
635 /* Function to find the first free pointer into our hba[] array */
636 /* Returns -1 if no free entries are left.  */
637 static int alloc_cpqarray_hba(void)
638 {
639         int i;
640         for(i=0; i< MAX_CTLR; i++) {
641                 if (hba[i] == NULL) {
642                         hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
643                         if(hba[i]==NULL) {
644                                 printk(KERN_ERR "cpqarray: out of memory.\n");
645                                 return (-1);
646                         }
647                         return (i);
648                 }
649         }
650         printk(KERN_WARNING "cpqarray: This driver supports a maximum"
651                 " of 8 controllers.\n");
652         return(-1);
653 }
654
655 static void free_hba(int i)
656 {
657         kfree(hba[i]);
658         hba[i]=NULL;
659 }
660
661 /*
662  * Find the IO address of the controller, its IRQ and so forth.  Fill
663  * in some basic stuff into the ctlr_info_t structure.
664  */
665 static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
666 {
667         ushort vendor_id, device_id, command;
668         unchar cache_line_size, latency_timer;
669         unchar irq, revision;
670         unsigned long addr[6];
671         __u32 board_id;
672
673         int i;
674
675         pci_read_config_word(pdev, PCI_COMMAND, &command);
676         /* check to see if controller has been disabled */
677         if(!(command & 0x02)) {
678                 printk(KERN_WARNING "cpqarray: controller appears to be disabled\n");
679                 return(-1);
680         }
681         
682         c->pci_dev = pdev;
683         vendor_id = pdev->vendor;
684         device_id = pdev->device;
685         irq = pdev->irq;
686
687         for(i=0; i<6; i++)
688                 addr[i] = pci_resource_start(pdev, i);
689
690         if (pci_enable_device(pdev)) {
691                 printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n");
692                 return -1;
693         }
694         if (pci_set_dma_mask(pdev, CPQARRAY_DMA_MASK) != 0) {
695                 printk(KERN_ERR "cpqarray: Unable to set DMA mask\n");
696                 return -1;
697         }
698
699         pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
700         pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
701         pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
702
703         pci_read_config_dword(pdev, 0x2c, &board_id);
704
705 DBGINFO(
706         printk("vendor_id = %x\n", vendor_id);
707         printk("device_id = %x\n", device_id);
708         printk("command = %x\n", command);
709         for(i=0; i<6; i++)
710                 printk("addr[%d] = %lx\n", i, addr[i]);
711         printk("revision = %x\n", revision);
712         printk("irq = %x\n", irq);
713         printk("cache_line_size = %x\n", cache_line_size);
714         printk("latency_timer = %x\n", latency_timer);
715         printk("board_id = %x\n", board_id);
716 );
717
718         c->intr = irq;
719         for(i=0; i<6; i++) {
720                 if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO) { 
721                         /* IO space */  
722                         c->io_mem_addr = addr[i];
723                         c->io_mem_length = pci_resource_end(pdev, i) 
724                                 - pci_resource_start(pdev, i) +1; 
725                         // printk("IO Value found addr[%d] %lx %lx\n",
726                         //              i, c->io_mem_addr, c->io_mem_length);
727                         if(!request_region( c->io_mem_addr, c->io_mem_length,
728                                 "cpqarray")) {
729                                 printk( KERN_WARNING "cpqarray I/O memory range already in use addr %lx length = %ld\n", c->io_mem_addr, c->io_mem_length);
730                                 c->io_mem_addr = 0;
731                                 c->io_mem_length = 0;
732                         }
733                         break;
734                 }
735         }       
736         c->paddr = 0;
737         for(i=0; i<6; i++)
738                 if (!(pci_resource_flags(pdev, i) & 
739                                         PCI_BASE_ADDRESS_SPACE_IO)) {
740                         c->paddr = pci_resource_start (pdev, i);
741                         break;
742                 }
743         if (!c->paddr)
744                 return -1;
745         c->vaddr = remap_pci_mem(c->paddr, 128);
746         if (!c->vaddr)
747                 return -1;
748         c->board_id = board_id;
749
750         for(i=0; i<NR_PRODUCTS; i++) {
751                 if (board_id == products[i].board_id) {
752                         c->product_name = products[i].product_name;
753                         c->access = *(products[i].access);
754                         break;
755                 }
756         }
757         if (i == NR_PRODUCTS) {
758                 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
759                         " to access the SMART Array controller %08lx\n", 
760                                 (unsigned long)board_id);
761                 return -1;
762         }
763
764         return 0;
765 }
766
767 /*
768  * Map (physical) PCI mem into (virtual) kernel space
769  */
770 static void *remap_pci_mem(ulong base, ulong size)
771 {
772         ulong page_base        = ((ulong) base) & PAGE_MASK;
773         ulong page_offs        = ((ulong) base) - page_base;
774         void *page_remapped    = ioremap(page_base, page_offs+size);
775
776         return (page_remapped ? (page_remapped + page_offs) : NULL);
777 }
778
779 #ifndef MODULE
780 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,13)
781 /*
782  * Config string is a comma seperated set of i/o addresses of EISA cards.
783  */
784 static int cpqarray_setup(char *str)
785 {
786         int i, ints[9];
787
788         (void)get_options(str, ARRAY_SIZE(ints), ints);
789
790         for(i=0; i<ints[0] && i<8; i++)
791                 eisa[i] = ints[i+1];
792         return 1;
793 }
794
795 __setup("smart2=", cpqarray_setup);
796
797 #else
798
799 /*
800  * Copy the contents of the ints[] array passed to us by init.
801  */
802 void cpqarray_setup(char *str, int *ints)
803 {
804         int i;
805         for(i=0; i<ints[0] && i<8; i++)
806                 eisa[i] = ints[i+1];
807 }
808 #endif
809 #endif
810
811 /*
812  * Find an EISA controller's signature.  Set up an hba if we find it.
813  */
814 static int cpqarray_eisa_detect(void)
815 {
816         int i=0, j;
817         __u32 board_id;
818         int intr;
819         int ctlr;
820         int num_ctlr = 0;
821         while(i<8 && eisa[i]) {
822                 ctlr = alloc_cpqarray_hba();
823                 if (ctlr == -1 ) {
824                         break;
825                 }
826                 board_id = inl(eisa[i]+0xC80);
827                 for(j=0; j < NR_PRODUCTS; j++)
828                         if (board_id == products[j].board_id) 
829                                 break;
830
831                 if (j == NR_PRODUCTS) {
832                         printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
833                                 " to access the SMART Array controller %08lx\n",                                 (unsigned long)board_id);
834                         free_hba(ctlr);
835                         continue;
836                 }
837                 memset(hba[ctlr], 0, sizeof(ctlr_info_t));
838                 hba[ctlr]->io_mem_addr = eisa[i];
839                 hba[ctlr]->io_mem_length = 0x7FF;               
840                 if(!request_region( hba[ctlr]->io_mem_addr, 
841                                         hba[ctlr]->io_mem_length, 
842                                 "cpqarray")) {
843                         printk( KERN_WARNING "cpqarray: I/0 range already in use addr = %lx length=%ld\n",
844                          hba[ctlr]->io_mem_addr, hba[ctlr]->io_mem_length);
845                         free_hba(ctlr);
846                         continue;       
847                 }
848                 /*
849                  * Read the config register to find our interrupt
850                  */
851                 intr = inb(eisa[i]+0xCC0) >> 4;
852                 if (intr & 1) 
853                         intr = 11;
854                 else if (intr & 2) 
855                         intr = 10;
856                 else if (intr & 4) 
857                         intr = 14;
858                 else if (intr & 8) 
859                         intr = 15;
860                 
861                 hba[ctlr]->intr = intr;
862                 sprintf(hba[ctlr]->devname, "ida%d", ctlr);
863                 hba[ctlr]->product_name = products[j].product_name;
864                 hba[ctlr]->access = *(products[j].access);
865                 hba[ctlr]->ctlr = ctlr;
866                 hba[ctlr]->board_id = board_id;
867                 hba[ctlr]->pci_dev = NULL; /* not PCI */
868
869 DBGINFO(
870         printk("i = %d, j = %d\n", i, j);
871         printk("irq = %x\n", intr);
872         printk("product name = %s\n", products[j].product_name);
873         printk("board_id = %x\n", board_id);
874 );
875
876                 num_ctlr++;
877                 i++;
878
879                 if (cpqarray_register_ctlr(ctlr, 0) == -1)
880                         printk(KERN_WARNING 
881                                 "cpqarray%d: Can't register EISA controller\n",
882                                 ctlr);
883         }
884
885         return num_ctlr;
886 }
887
888
889 /*
890  * Open.  Make sure the device is really there.
891  */
892 static int ida_open(struct inode *inode, struct file *filep)
893 {
894         int ctlr = MAJOR(inode->i_rdev) - MAJOR_NR;
895         int dsk  = MINOR(inode->i_rdev) >> NWD_SHIFT;
896
897         DBGINFO(printk("ida_open %x (%x:%x)\n", inode->i_rdev, ctlr, dsk) );
898         if (ctlr > MAX_CTLR || hba[ctlr] == NULL)
899                 return -ENXIO;
900
901         /*
902          * Root is allowed to open raw volume zero even if its not configured
903          * so array config can still work.  I don't think I really like this,
904          * but I'm already using way to many device nodes to claim another one
905          * for "raw controller".
906          */
907         if (hba[ctlr]->sizes[MINOR(inode->i_rdev)] == 0) {
908                 if (MINOR(inode->i_rdev) != 0)
909                         return -ENXIO;
910                 if (!capable(CAP_SYS_ADMIN))
911                         return -EPERM;
912         }
913
914         hba[ctlr]->drv[dsk].usage_count++;
915         hba[ctlr]->usage_count++;
916         return 0;
917 }
918
919 /*
920  * Close.  Sync first.
921  */
922 static int ida_release(struct inode *inode, struct file *filep)
923 {
924         int ctlr = MAJOR(inode->i_rdev) - MAJOR_NR;
925         int dsk  = MINOR(inode->i_rdev) >> NWD_SHIFT;
926
927         DBGINFO(printk("ida_release %x (%x:%x)\n", inode->i_rdev, ctlr, dsk) );
928
929         hba[ctlr]->drv[dsk].usage_count--;
930         hba[ctlr]->usage_count--;
931         return 0;
932 }
933
934 /*
935  * Enqueuing and dequeuing functions for cmdlists.
936  */
937 static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c)
938 {
939         if (*Qptr == NULL) {
940                 *Qptr = c;
941                 c->next = c->prev = c;
942         } else {
943                 c->prev = (*Qptr)->prev;
944                 c->next = (*Qptr);
945                 (*Qptr)->prev->next = c;
946                 (*Qptr)->prev = c;
947         }
948 }
949
950 static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
951 {
952         if (c && c->next != c) {
953                 if (*Qptr == c) 
954                         *Qptr = c->next;
955                 c->prev->next = c->next;
956                 c->next->prev = c->prev;
957         } else {
958                 *Qptr = NULL;
959         }
960         return c;
961 }
962
963 static inline void complete_buffers(struct buffer_head *bh, int ok)
964 {
965         struct buffer_head *xbh;
966         while(bh) {
967                 xbh = bh->b_reqnext;
968                 bh->b_reqnext = NULL;
969                 
970                 blk_finished_io(bh->b_size >> 9);
971                 bh->b_end_io(bh, ok);
972
973                 bh = xbh;
974         }
975 }
976
977 /*
978  * Get a request and submit it to the controller.
979  * This routine needs to grab all the requests it possibly can from the
980  * req Q and submit them.  Interrupts are off (and need to be off) when you
981  * are in here (either via the dummy do_ida_request functions or by being
982  * called from the interrupt handler
983  */
984 static void do_ida_request(request_queue_t *q)
985 {
986         ctlr_info_t *h = q->queuedata;
987         cmdlist_t *c;
988         unsigned long lastdataend;
989         struct list_head * queue_head = &q->queue_head;
990         struct buffer_head *bh;
991         struct request *creq;
992         struct scatterlist tmp_sg[SG_MAX];
993         int i, seg;
994
995         if (q->plugged)
996                 goto startio;
997
998 next:
999         if (list_empty(queue_head))
1000                 goto startio;
1001
1002         creq = blkdev_entry_next_request(queue_head);
1003         if (creq->nr_segments > SG_MAX)
1004                 BUG();
1005
1006         if (h->ctlr != MAJOR(creq->rq_dev)-MAJOR_NR ) {
1007                 printk(KERN_WARNING "doreq cmd for %d, %x at %p\n",
1008                                 h->ctlr, creq->rq_dev, creq);
1009                 blkdev_dequeue_request(creq);
1010                 complete_buffers(creq->bh, 0);
1011                 end_that_request_last(creq);
1012                 goto startio;
1013         }
1014
1015         if ((c = cmd_alloc(h,1)) == NULL)
1016                 goto startio;
1017
1018         blkdev_dequeue_request(creq);
1019
1020         spin_unlock_irq(&io_request_lock);
1021
1022         bh = creq->bh;
1023
1024         c->ctlr = h->ctlr;
1025         c->hdr.unit = MINOR(creq->rq_dev) >> NWD_SHIFT;
1026         c->hdr.size = sizeof(rblk_t) >> 2;
1027         c->size += sizeof(rblk_t);
1028
1029         c->req.hdr.blk = hba[h->ctlr]->hd[MINOR(creq->rq_dev)].start_sect 
1030                                 + creq->sector;
1031         c->rq = creq;
1032 DBGPX(
1033         if (bh == NULL)
1034                 panic("bh == NULL?");
1035         
1036         printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
1037 );
1038         seg = 0;
1039         lastdataend = ~0UL;
1040         while(bh) {
1041                 if (bh_phys(bh) == lastdataend) {
1042                         tmp_sg[seg-1].length += bh->b_size;
1043                         lastdataend += bh->b_size;
1044                 } else {
1045                         if (seg == SG_MAX)
1046                                 BUG();
1047                         tmp_sg[seg].page = bh->b_page;
1048                         tmp_sg[seg].length = bh->b_size;
1049                         tmp_sg[seg].offset = bh_offset(bh);
1050                         lastdataend = bh_phys(bh) + bh->b_size;
1051                         seg++;
1052                 }
1053                 bh = bh->b_reqnext;
1054         }
1055         /* Now do all the DMA Mappings */
1056         for( i=0; i < seg; i++) {
1057                 c->req.sg[i].size = tmp_sg[i].length;
1058                 c->req.sg[i].addr = (__u32) pci_map_page(
1059                                 h->pci_dev, tmp_sg[i].page, tmp_sg[i].offset,
1060                                 tmp_sg[i].length,
1061                                 (creq->cmd == READ) ? 
1062                                         PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
1063         }
1064 DBGPX(  printk("Submitting %d sectors in %d segments\n", sect, seg); );
1065         c->req.hdr.sg_cnt = seg;
1066         c->req.hdr.blk_cnt = creq->nr_sectors;
1067         c->req.hdr.cmd = (creq->cmd == READ) ? IDA_READ : IDA_WRITE;
1068         c->type = CMD_RWREQ;
1069
1070         spin_lock_irq(&io_request_lock);
1071
1072         /* Put the request on the tail of the request queue */
1073         addQ(&h->reqQ, c);
1074         h->Qdepth++;
1075         if (h->Qdepth > h->maxQsinceinit) 
1076                 h->maxQsinceinit = h->Qdepth;
1077
1078         goto next;
1079
1080 startio:
1081         start_io(h);
1082 }
1083
1084 /* 
1085  * start_io submits everything on a controller's request queue
1086  * and moves it to the completion queue.
1087  *
1088  * Interrupts had better be off if you're in here
1089  */
1090 static void start_io(ctlr_info_t *h)
1091 {
1092         cmdlist_t *c;
1093
1094         while((c = h->reqQ) != NULL) {
1095                 /* Can't do anything if we're busy */
1096                 if (h->access.fifo_full(h) == 0)
1097                         return;
1098
1099                 /* Get the first entry from the request Q */
1100                 removeQ(&h->reqQ, c);
1101                 h->Qdepth--;
1102         
1103                 /* Tell the controller to do our bidding */
1104                 h->access.submit_command(h, c);
1105
1106                 /* Get onto the completion Q */
1107                 addQ(&h->cmpQ, c);
1108         }
1109 }
1110
1111 /*
1112  * Mark all buffers that cmd was responsible for
1113  */
1114 static inline void complete_command(cmdlist_t *cmd, int timeout)
1115 {
1116         int ok=1;
1117         int i;
1118
1119         if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
1120            (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) {
1121                 printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n",
1122                                 cmd->ctlr, cmd->hdr.unit);
1123                 hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN;
1124         }
1125         if (cmd->req.hdr.rcode & RCODE_FATAL) {
1126                 printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
1127                                 cmd->ctlr, cmd->hdr.unit);
1128                 ok = 0;
1129         }
1130         if (cmd->req.hdr.rcode & RCODE_INVREQ) {
1131                                 printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
1132                                 cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
1133                                 cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
1134                                 cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
1135                 ok = 0; 
1136         }
1137         if (timeout) 
1138                 ok = 0;
1139         /* unmap the DMA mapping for all the scatter gather elements */
1140         for(i=0; i<cmd->req.hdr.sg_cnt; i++)
1141         {
1142                 pci_unmap_page(hba[cmd->ctlr]->pci_dev,
1143                         cmd->req.sg[i].addr, cmd->req.sg[i].size,
1144                         (cmd->req.hdr.cmd == IDA_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
1145         }
1146
1147         complete_buffers(cmd->rq->bh, ok);
1148         DBGPX(printk("Done with %p\n", cmd->rq););
1149         end_that_request_last(cmd->rq);
1150 }
1151
1152 /*
1153  *  The controller will interrupt us upon completion of commands.
1154  *  Find the command on the completion queue, remove it, tell the OS and
1155  *  try to queue up more IO
1156  */
1157 static void do_ida_intr(int irq, void *dev_id, struct pt_regs *regs)
1158 {
1159         ctlr_info_t *h = dev_id;
1160         cmdlist_t *c;
1161         unsigned long istat;
1162         unsigned long flags;
1163         __u32 a,a1;
1164
1165         istat = h->access.intr_pending(h);
1166         /* Is this interrupt for us? */
1167         if (istat == 0)
1168                 return;
1169
1170         /*
1171          * If there are completed commands in the completion queue,
1172          * we had better do something about it.
1173          */
1174         spin_lock_irqsave(&io_request_lock, flags);
1175         if (istat & FIFO_NOT_EMPTY) {
1176                 while((a = h->access.command_completed(h))) {
1177                         a1 = a; a &= ~3;
1178                         if ((c = h->cmpQ) == NULL)
1179                         {  
1180                                 printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1);
1181                                 continue;       
1182                         } 
1183                         while(c->busaddr != a) {
1184                                 c = c->next;
1185                                 if (c == h->cmpQ) 
1186                                         break;
1187                         }
1188                         /*
1189                          * If we've found the command, take it off the
1190                          * completion Q and free it
1191                          */
1192                         if (c->busaddr == a) {
1193                                 removeQ(&h->cmpQ, c);
1194                                 /*  Check for invalid command.
1195                                  *  Controller returns command error,
1196                                  *  But rcode = 0.
1197                                  */
1198
1199                                 if((a1 & 0x03) && (c->req.hdr.rcode == 0)) {
1200                                         c->req.hdr.rcode = RCODE_INVREQ;
1201                                 }
1202                                 if (c->type == CMD_RWREQ) {
1203                                         complete_command(c, 0);
1204                                         cmd_free(h, c, 1);
1205                                 } else if (c->type == CMD_IOCTL_PEND) {
1206                                         complete(c->waiting);
1207                                 }
1208                                 continue;
1209                         }
1210                 }
1211         }
1212
1213         /*
1214          * See if we can queue up some more IO
1215          */
1216         do_ida_request(BLK_DEFAULT_QUEUE(MAJOR_NR + h->ctlr));
1217         spin_unlock_irqrestore(&io_request_lock, flags);
1218 }
1219
1220 /*
1221  * This timer was for timing out requests that haven't happened after
1222  * IDA_TIMEOUT.  That wasn't such a good idea.  This timer is used to
1223  * reset a flags structure so we don't flood the user with
1224  * "Non-Fatal error" messages.
1225  */
1226 static void ida_timer(unsigned long tdata)
1227 {
1228         ctlr_info_t *h = (ctlr_info_t*)tdata;
1229
1230         h->timer.expires = jiffies + IDA_TIMER;
1231         add_timer(&h->timer);
1232         h->misc_tflags = 0;
1233 }
1234
1235 /*
1236  *  ida_ioctl does some miscellaneous stuff like reporting drive geometry,
1237  *  setting readahead and submitting commands from userspace to the controller.
1238  */
1239 static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg)
1240 {
1241         int ctlr = MAJOR(inode->i_rdev) - MAJOR_NR;
1242         int dsk  = MINOR(inode->i_rdev) >> NWD_SHIFT;
1243         int error;
1244
1245         switch(cmd) {
1246         case HDIO_GETGEO:
1247         {
1248                 struct hd_geometry driver_geo;
1249
1250                 if (hba[ctlr]->drv[dsk].cylinders) {
1251                         driver_geo.heads = hba[ctlr]->drv[dsk].heads;
1252                         driver_geo.sectors = hba[ctlr]->drv[dsk].sectors;
1253                         driver_geo.cylinders = hba[ctlr]->drv[dsk].cylinders;
1254                 } else {
1255                         driver_geo.heads = 0xff;
1256                         driver_geo.sectors = 0x3f;
1257                         driver_geo.cylinders = hba[ctlr]->drv[dsk].nr_blks 
1258                                                         / (0xff*0x3f);
1259                 }
1260                 driver_geo.start=
1261                 hba[ctlr]->hd[MINOR(inode->i_rdev)].start_sect;
1262                 if (copy_to_user((void *) arg, &driver_geo,
1263                                 sizeof( struct hd_geometry)))
1264                         return  -EFAULT;
1265                 return(0);
1266         }
1267         case HDIO_GETGEO_BIG:
1268         {
1269                 struct hd_big_geometry driver_geo;
1270
1271                 if (hba[ctlr]->drv[dsk].cylinders) {
1272                         driver_geo.heads = hba[ctlr]->drv[dsk].heads;
1273                         driver_geo.sectors = hba[ctlr]->drv[dsk].sectors;
1274                         driver_geo.cylinders = hba[ctlr]->drv[dsk].cylinders;
1275                 } else {
1276                         driver_geo.heads = 0xff;
1277                         driver_geo.sectors = 0x3f;
1278                         driver_geo.cylinders = hba[ctlr]->drv[dsk].nr_blks 
1279                                 / (0xff*0x3f);
1280                 }
1281                 driver_geo.start=
1282                         hba[ctlr]->hd[MINOR(inode->i_rdev)].start_sect;
1283                 if (copy_to_user((void *) arg, &driver_geo,
1284                                 sizeof( struct hd_big_geometry)))
1285                         return  -EFAULT;
1286                 return(0);
1287         }
1288
1289         case IDAGETDRVINFO:
1290         {
1291
1292                 ida_ioctl_t *io = (ida_ioctl_t*)arg;
1293                 return copy_to_user(&io->c.drv,&hba[ctlr]->drv[dsk],sizeof(drv_info_t));
1294         }
1295         case BLKRRPART:
1296                 return revalidate_logvol(inode->i_rdev, 1);
1297         case IDAPASSTHRU:
1298         {
1299
1300                 ida_ioctl_t *io = (ida_ioctl_t*)arg;
1301                 ida_ioctl_t my_io;
1302
1303                 if (!capable(CAP_SYS_RAWIO)) 
1304                         return -EPERM;
1305                 if (copy_from_user(&my_io, io, sizeof(my_io)))
1306                         return -EFAULT;
1307                 error = ida_ctlr_ioctl(ctlr, dsk, &my_io);
1308                 if (error)
1309                         return error;
1310                 if (copy_to_user(io, &my_io, sizeof(my_io)))
1311                         return -EFAULT;
1312                 return 0;
1313         }
1314         case IDABIGPASSTHRU:
1315         {
1316
1317                 ida_big_ioctl_t *io = (ida_big_ioctl_t*)arg;
1318                 ida_big_ioctl_t my_io;
1319                 
1320                 if (!capable(CAP_SYS_RAWIO))
1321                         return -EPERM;
1322                 if (copy_from_user(&my_io, io, sizeof(my_io)))
1323                         return -EFAULT;
1324                 error = ida_ctlr_big_ioctl(ctlr, dsk, &my_io);
1325                 if (error)
1326                         return error;
1327                 if (copy_to_user(io, &my_io, sizeof(my_io)))
1328                         return -EFAULT;
1329                 return 0;
1330         }
1331         case IDAGETCTLRSIG:
1332                 if (!arg) 
1333                         return -EINVAL;
1334                 put_user(hba[ctlr]->ctlr_sig, (int*)arg);
1335                 return 0;
1336         case IDAREVALIDATEVOLS:
1337                 return revalidate_allvol(inode->i_rdev);
1338         case IDADRIVERVERSION:
1339                 if (!arg) return -EINVAL;
1340                 put_user(DRIVER_VERSION, (unsigned long*)arg);
1341                 return 0;
1342         case IDAGETPCIINFO:
1343         {
1344                 
1345                 ida_pci_info_struct pciinfo;
1346
1347                 if (!arg) 
1348                         return -EINVAL;
1349                 pciinfo.bus = hba[ctlr]->pci_dev->bus->number;
1350                 pciinfo.dev_fn = hba[ctlr]->pci_dev->devfn;
1351                 pciinfo.board_id = hba[ctlr]->board_id;
1352                 if(copy_to_user((void *) arg, &pciinfo,  
1353                         sizeof( ida_pci_info_struct)))
1354                                 return -EFAULT;
1355                 return(0);
1356         }       
1357         case IDADEREGDISK:
1358                         return( deregister_disk(ctlr,dsk));
1359         case IDAREGNEWDISK:
1360         {
1361                 int logvol = arg; 
1362                 return(register_new_disk(ctlr, logvol));
1363         }       
1364
1365         case IDAGETLOGINFO:
1366         {
1367                 idaLogvolInfo_struct luninfo;
1368                 int num_parts = 0;
1369                 int i, start;
1370
1371                 luninfo.LogVolID = dsk; 
1372                 luninfo.num_opens = hba[ctlr]->drv[dsk].usage_count;
1373
1374                 /* count partitions 1 to 15 with sizes > 0 */
1375                 start = (dsk << NWD_SHIFT);     
1376                 for(i=1; i <IDA_MAX_PART; i++) {
1377                         int minor = start+i;
1378                         if(hba[ctlr]->sizes[minor] != 0)
1379                                 num_parts++;
1380                 }
1381                 luninfo.num_parts = num_parts;
1382                 if (copy_to_user((void *) arg, &luninfo, 
1383                         sizeof( idaLogvolInfo_struct) ))
1384                                 return -EFAULT;
1385                 return(0);
1386         }
1387
1388         case BLKGETSIZE:
1389         case BLKGETSIZE64:
1390         case BLKFLSBUF:
1391         case BLKBSZSET:
1392         case BLKBSZGET:
1393         case BLKSSZGET:
1394         case BLKROSET:
1395         case BLKROGET:
1396         case BLKRASET:
1397         case BLKRAGET:
1398         case BLKELVGET:
1399         case BLKELVSET:
1400         case BLKPG:
1401                 return blk_ioctl(inode->i_rdev, cmd, arg);
1402
1403         default:
1404                 return -EINVAL;
1405         }
1406                 
1407 }
1408 /*
1409  * ida_ctlr_ioctl is for passing commands to the controller from userspace.
1410  * The command block (io) has already been copied to kernel space for us,
1411  * however, any elements in the sglist need to be copied to kernel space
1412  * or copied back to userspace.
1413  *
1414  * Only root may perform a controller passthru command, however I'm not doing
1415  * any serious sanity checking on the arguments.  Doing an IDA_WRITE_MEDIA and
1416  * putting a 64M buffer in the sglist is probably a *bad* idea.
1417  */
1418 static int ida_ctlr_ioctl(int ctlr, int dsk, ida_ioctl_t *io)
1419 {
1420         ctlr_info_t *h = hba[ctlr];
1421         cmdlist_t *c;
1422         void *p = NULL;
1423         unsigned long flags;
1424         int error;
1425         DECLARE_COMPLETION(wait);
1426
1427         if ((c = cmd_alloc(h, 0)) == NULL)
1428                 return -ENOMEM;
1429         c->ctlr = ctlr;
1430         c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;
1431         c->hdr.size = sizeof(rblk_t) >> 2;
1432         c->size += sizeof(rblk_t);
1433
1434         c->req.hdr.cmd = io->cmd;
1435         c->req.hdr.blk = io->blk;
1436         c->req.hdr.blk_cnt = io->blk_cnt;
1437         c->type = CMD_IOCTL_PEND;
1438
1439         /* Pre submit processing */
1440         switch(io->cmd) {
1441         case PASSTHRU_A:
1442                 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1443                 if (!p) { 
1444                         error = -ENOMEM; 
1445                         cmd_free(h, c, 0); 
1446                         return(error);
1447                 }
1448                 if (copy_from_user(p, (void*)io->sg[0].addr, io->sg[0].size)) {
1449                         kfree(p);
1450                         cmd_free(h, c, 0); 
1451                         return -EFAULT;
1452                 }
1453                 c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c), 
1454                                 sizeof(ida_ioctl_t), 
1455                                 PCI_DMA_BIDIRECTIONAL);
1456                 c->req.sg[0].size = io->sg[0].size;
1457                 c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 
1458                         c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1459                 c->req.hdr.sg_cnt = 1;
1460                 break;
1461         case IDA_READ:
1462         case SENSE_SURF_STATUS:
1463         case READ_FLASH_ROM:
1464         case SENSE_CONTROLLER_PERFORMANCE:
1465                 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1466                 if (!p) { 
1467                         error = -ENOMEM; 
1468                         cmd_free(h, c, 0);
1469                         return(error);
1470                 }
1471
1472                 c->req.sg[0].size = io->sg[0].size;
1473                 c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 
1474                         c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); 
1475                 c->req.hdr.sg_cnt = 1;
1476                 break;
1477         case IDA_WRITE:
1478         case IDA_WRITE_MEDIA:
1479         case DIAG_PASS_THRU:
1480         case COLLECT_BUFFER:
1481         case WRITE_FLASH_ROM:
1482                 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1483                 if (!p) { 
1484                         error = -ENOMEM; 
1485                         cmd_free(h, c, 0);
1486                         return(error);
1487                 }
1488                 if (copy_from_user(p, (void*)io->sg[0].addr, io->sg[0].size)) {
1489                         kfree(p);
1490                         cmd_free(h, c, 0);
1491                         return -EFAULT;
1492                 }
1493                 c->req.sg[0].size = io->sg[0].size;
1494                 c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 
1495                         c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); 
1496                 c->req.hdr.sg_cnt = 1;
1497                 break;
1498         default:
1499                 c->req.sg[0].size = sizeof(io->c);
1500                 c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c, 
1501                         c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1502                 c->req.hdr.sg_cnt = 1;
1503         }
1504
1505         c->waiting = &wait;
1506
1507         /* Put the request on the tail of the request queue */
1508         spin_lock_irqsave(&io_request_lock, flags);
1509         addQ(&h->reqQ, c);
1510         h->Qdepth++;
1511         start_io(h);
1512         spin_unlock_irqrestore(&io_request_lock, flags);
1513
1514         /* Wait for completion */
1515         wait_for_completion(&wait);
1516
1517         /* Unmap the DMA  */
1518         pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size, 
1519                 PCI_DMA_BIDIRECTIONAL);
1520         /* Post submit processing */
1521         switch(io->cmd) {
1522         case PASSTHRU_A:
1523                 pci_unmap_single(h->pci_dev, c->req.hdr.blk,
1524                                 sizeof(ida_ioctl_t),
1525                                 PCI_DMA_BIDIRECTIONAL);
1526         case IDA_READ:
1527         case SENSE_SURF_STATUS:
1528         case DIAG_PASS_THRU:
1529         case SENSE_CONTROLLER_PERFORMANCE:
1530         case READ_FLASH_ROM:
1531                 if (copy_to_user((void*)io->sg[0].addr, p, io->sg[0].size)) {
1532                         kfree(p);
1533                         return -EFAULT;
1534                 }
1535                 /* fall through and free p */
1536         case IDA_WRITE:
1537         case IDA_WRITE_MEDIA:
1538         case COLLECT_BUFFER:
1539         case WRITE_FLASH_ROM:
1540                 kfree(p);
1541                 break;
1542         default:;
1543                 /* Nothing to do */
1544         }
1545
1546         io->rcode = c->req.hdr.rcode;
1547         cmd_free(h, c, 0);
1548         return(0);
1549 }
1550
1551 /*
1552  * ida_ctlr_big_ioctl is for passing commands to the controller from userspace.
1553  * The command block (io) has already been copied to kernel space for us,
1554  *
1555  * Only root may perform a controller passthru command, however I'm not doing
1556  * any serious sanity checking on the arguments.  
1557  */
1558 static int ida_ctlr_big_ioctl(int ctlr, int dsk, ida_big_ioctl_t *io)
1559 {
1560         ctlr_info_t *h = hba[ctlr];
1561         cmdlist_t *c;
1562         __u8   *scsi_param = NULL;
1563         __u8    *buff[SG_MAX] = {NULL,};
1564         size_t  buff_size[SG_MAX];
1565         __u8    sg_used = 0;
1566         unsigned long flags;
1567         int error = 0;
1568         int i;
1569         DECLARE_COMPLETION(wait);
1570
1571         /* Check kmalloc limits  using all SGs */
1572         if( io->buff_malloc_size > IDA_MAX_KMALLOC_SIZE)
1573                 return -EINVAL;
1574         if( io->buff_size > io->buff_malloc_size * SG_MAX)
1575                 return -EINVAL;
1576         if ((c = cmd_alloc(h, 0)) == NULL)
1577                 return -ENOMEM;
1578
1579         c->ctlr = ctlr;
1580         c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;
1581         c->hdr.size = sizeof(rblk_t) >> 2;
1582         c->size += sizeof(rblk_t);
1583
1584         c->req.hdr.cmd = io->cmd;
1585         c->req.hdr.blk = io->blk;
1586         c->req.hdr.blk_cnt = io->blk_cnt;
1587         c->type = CMD_IOCTL_PEND;
1588
1589         /* Pre submit processing */
1590         /* for passthru_a the scsi command is in another record */
1591         if (io->cmd == PASSTHRU_A) {
1592
1593                 if (io->scsi_param == NULL)
1594                 {
1595                         error = -EINVAL;
1596                         cmd_free(h, c, 0);
1597                         return(error);
1598                 }
1599                 scsi_param = kmalloc(sizeof(scsi_param_t),  GFP_KERNEL);
1600                 if (scsi_param == NULL) {
1601                         error = -ENOMEM;
1602                         cmd_free(h, c, 0);
1603                         return(error);
1604                 }
1605
1606                 /* copy the scsi command to get passed thru */  
1607                 if (copy_from_user(scsi_param, io->scsi_param, 
1608                                         sizeof(scsi_param_t))) {        
1609                         kfree(scsi_param);
1610                         cmd_free(h, c, 0);
1611                         return -EFAULT;
1612                 }
1613
1614                 /* with this command the scsi command is seperate */
1615                 c->req.hdr.blk = pci_map_single(h->pci_dev, scsi_param,
1616                                 sizeof(scsi_param_t), PCI_DMA_BIDIRECTIONAL);
1617         }
1618
1619         /* fill in the SG entries */
1620         /* create buffers if we need to */ 
1621         if(io->buff_size > 0) {
1622                 size_t size_left_alloc = io->buff_size;
1623                 __u8 *data_ptr = io->buff;
1624
1625                 while(size_left_alloc > 0) {
1626                         buff_size[sg_used] = (size_left_alloc 
1627                                         > io->buff_malloc_size)
1628                                 ? io->buff_malloc_size : size_left_alloc;
1629                         buff[sg_used] = kmalloc( buff_size[sg_used], 
1630                                         GFP_KERNEL);
1631                         if (buff[sg_used] == NULL) {
1632                                 error = -ENOMEM;
1633                                 goto ida_alloc_cleanup;
1634                         }
1635                         if(io->xfer_type & IDA_XFER_WRITE) {
1636                                 /* Copy the data into the buffer created */
1637                                 if (copy_from_user(buff[sg_used], data_ptr,
1638                                                 buff_size[sg_used])) {
1639                                         error = -EFAULT;
1640                                         goto ida_alloc_cleanup;
1641                                 }
1642                         }
1643                         /* put the data into the scatter gather list */
1644                         c->req.sg[sg_used].size = buff_size[sg_used];
1645                         c->req.sg[sg_used].addr = pci_map_single(h->pci_dev, 
1646                                         buff[sg_used], buff_size[sg_used],
1647                                          PCI_DMA_BIDIRECTIONAL);
1648                         
1649                         size_left_alloc -= buff_size[sg_used];
1650                         data_ptr += buff_size[sg_used];
1651                         sg_used++;
1652                 }
1653         }
1654         c->req.hdr.sg_cnt = sg_used;
1655
1656         c->waiting = &wait;
1657
1658         /* Put the request on the tail of the request queue */
1659         spin_lock_irqsave(&io_request_lock, flags);
1660         addQ(&h->reqQ, c);
1661         h->Qdepth++;
1662         start_io(h);
1663         spin_unlock_irqrestore(&io_request_lock, flags);
1664
1665         /* Wait for completion */
1666         wait_for_completion(&wait);
1667         /* Unmap the DMA  */
1668         for(i=0; i<c->req.hdr.sg_cnt; i++) {
1669                 pci_unmap_single(h->pci_dev, c->req.sg[i].addr, 
1670                                 c->req.sg[i].size, PCI_DMA_BIDIRECTIONAL);
1671         }
1672
1673         /* if we are reading data from the hardware copy it back to user */
1674         if (io->xfer_type & IDA_XFER_READ) {
1675                 __u8    *data_ptr = io->buff;
1676                 int i;
1677
1678                 for(i=0; i<c->req.hdr.sg_cnt; i++) {
1679                         if (copy_to_user(data_ptr, buff[i], buff_size[i])) { 
1680                                 error = -EFAULT;
1681                                 goto ida_alloc_cleanup;
1682                         }
1683                         data_ptr += buff_size[i];
1684                         
1685                 }
1686
1687         }
1688
1689         io->rcode = c->req.hdr.rcode;
1690
1691         if(scsi_param) {
1692                 pci_unmap_single(h->pci_dev, c->req.hdr.blk,
1693                         sizeof(scsi_param_t), PCI_DMA_BIDIRECTIONAL);
1694                 /* copy the scsi_params back to the user */ 
1695                 if( copy_to_user(io->scsi_param, scsi_param, 
1696                                         sizeof(scsi_param_t))) {
1697                         error = -EFAULT;        
1698                 }
1699                 kfree(scsi_param);
1700         }
1701         cmd_free(h, c, 0);
1702         return(error);
1703         
1704 ida_alloc_cleanup:
1705         if(scsi_param) {
1706                 pci_unmap_single(h->pci_dev, c->req.hdr.blk,
1707                         sizeof(scsi_param_t), PCI_DMA_BIDIRECTIONAL);
1708                 kfree(scsi_param);
1709         }
1710         for (i=0; i<sg_used; i++) {
1711                 if(buff[sg_used] != NULL) {     
1712                         pci_unmap_single(h->pci_dev, c->req.sg[i].addr, 
1713                                 buff_size[sg_used], PCI_DMA_BIDIRECTIONAL);
1714                         kfree(buff[sg_used]);
1715                 }
1716         }       
1717         cmd_free(h, c, 0);
1718         return(error);
1719 }
1720 /*
1721  * Commands are pre-allocated in a large block.  Here we use a simple bitmap
1722  * scheme to suballocte them to the driver.  Operations that are not time
1723  * critical (and can wait for kmalloc and possibly sleep) can pass in NULL
1724  * as the first argument to get a new command.
1725  */
1726 static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool)
1727 {
1728         cmdlist_t * c;
1729         int i;
1730         dma_addr_t cmd_dhandle;
1731
1732         if (!get_from_pool) {
1733                 c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev, 
1734                         sizeof(cmdlist_t), &cmd_dhandle);
1735                 if(c==NULL)
1736                         return NULL;
1737         } else {
1738                 do {
1739                         i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
1740                         if (i == NR_CMDS)
1741                                 return NULL;
1742                 } while(test_and_set_bit(i%32, h->cmd_pool_bits+(i/32)) != 0);
1743                 c = h->cmd_pool + i;
1744                 cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t);
1745                 h->nr_allocs++;
1746         }
1747
1748         memset(c, 0, sizeof(cmdlist_t));
1749         c->busaddr = cmd_dhandle; 
1750         return c;
1751 }
1752
1753 static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool)
1754 {
1755         int i;
1756
1757         if (!got_from_pool) {
1758                 pci_free_consistent(h->pci_dev, sizeof(cmdlist_t), c,
1759                         c->busaddr);
1760         } else {
1761                 i = c - h->cmd_pool;
1762                 clear_bit(i%32, h->cmd_pool_bits+(i/32));
1763                 h->nr_frees++;
1764         }
1765 }
1766
1767 /***********************************************************************
1768     name:        sendcmd
1769     Send a command to an IDA using the memory mapped FIFO interface
1770     and wait for it to complete.  
1771     This routine should only be called at init time.
1772 ***********************************************************************/
1773 static int sendcmd(
1774         __u8    cmd,
1775         int     ctlr,
1776         void    *buff,
1777         size_t  size,
1778         unsigned int blk,
1779         unsigned int blkcnt,
1780         unsigned int log_unit )
1781 {
1782         cmdlist_t *c;
1783         int complete;
1784         unsigned long temp;
1785         unsigned long i;
1786         ctlr_info_t *info_p = hba[ctlr];
1787
1788         c = cmd_alloc(info_p, 1);
1789         if(!c)
1790                 return IO_ERROR;
1791         c->ctlr = ctlr;
1792         c->hdr.unit = log_unit;
1793         c->hdr.prio = 0;
1794         c->hdr.size = sizeof(rblk_t) >> 2;
1795         c->size += sizeof(rblk_t);
1796
1797         /* The request information. */
1798         c->req.hdr.next = 0;
1799         c->req.hdr.rcode = 0;
1800         c->req.bp = 0;
1801         c->req.hdr.sg_cnt = 1;
1802         c->req.hdr.reserved = 0;
1803         
1804         if (size == 0)
1805                 c->req.sg[0].size = 512;
1806         else
1807                 c->req.sg[0].size = size;
1808
1809         c->req.hdr.blk = blk;
1810         c->req.hdr.blk_cnt = blkcnt;
1811         c->req.hdr.cmd = (unsigned char) cmd;
1812         c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev, 
1813                 buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1814         /*
1815          * Disable interrupt
1816          */
1817         info_p->access.set_intr_mask(info_p, 0);
1818         /* Make sure there is room in the command FIFO */
1819         /* Actually it should be completely empty at this time. */
1820         for (i = 200000; i > 0; i--) {
1821                 temp = info_p->access.fifo_full(info_p);
1822                 if (temp != 0) {
1823                         break;
1824                 }
1825                 udelay(10);
1826 DBG(
1827                 printk(KERN_WARNING "cpqarray ida%d: idaSendPciCmd FIFO full,"
1828                         " waiting!\n", ctlr);
1829 );
1830         } 
1831         /*
1832          * Send the cmd
1833          */
1834         info_p->access.submit_command(info_p, c);
1835         complete = pollcomplete(ctlr);
1836         
1837         pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr, 
1838                 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1839         if (complete != 1) {
1840                 if (complete != c->busaddr) {
1841                         printk( KERN_WARNING
1842                         "cpqarray ida%d: idaSendPciCmd "
1843                       "Invalid command list address returned! (%08lx)\n",
1844                                 ctlr, (unsigned long)complete);
1845                         cmd_free(info_p, c, 1);
1846                         return (IO_ERROR);
1847                 }
1848         } else {
1849                 printk( KERN_WARNING
1850                         "cpqarray ida%d: idaSendPciCmd Timeout out, "
1851                         "No command list address returned!\n",
1852                         ctlr);
1853                 cmd_free(info_p, c, 1);
1854                 return (IO_ERROR);
1855         }
1856
1857         if (c->req.hdr.rcode & 0x00FE) {
1858                 if (!(c->req.hdr.rcode & BIG_PROBLEM)) {
1859                         printk( KERN_WARNING
1860                         "cpqarray ida%d: idaSendPciCmd, error: "
1861                                 "Controller failed at init time "
1862                                 "cmd: 0x%x, return code = 0x%x\n",
1863                                 ctlr, c->req.hdr.cmd, c->req.hdr.rcode);
1864
1865                         cmd_free(info_p, c, 1);
1866                         return (IO_ERROR);
1867                 }
1868         }
1869         cmd_free(info_p, c, 1);
1870         return (IO_OK);
1871 }
1872
1873 static int frevalidate_logvol(kdev_t dev)
1874 {
1875         return revalidate_logvol(dev, 0);
1876 }
1877
1878 /*
1879  * revalidate_allvol is for online array config utilities.  After a
1880  * utility reconfigures the drives in the array, it can use this function
1881  * (through an ioctl) to make the driver zap any previous disk structs for
1882  * that controller and get new ones.
1883  *
1884  * Right now I'm using the getgeometry() function to do this, but this
1885  * function should probably be finer grained and allow you to revalidate one
1886  * particualar logical volume (instead of all of them on a particular
1887  * controller).
1888  */
1889 static int revalidate_allvol(kdev_t dev)
1890 {
1891         int ctlr, i;
1892         unsigned long flags;
1893
1894         ctlr = MAJOR(dev) - MAJOR_NR;
1895         if (MINOR(dev) != 0)
1896                 return -ENXIO;
1897
1898         spin_lock_irqsave(&io_request_lock, flags);
1899         if (hba[ctlr]->usage_count > 1) {
1900                 spin_unlock_irqrestore(&io_request_lock, flags);
1901                 printk(KERN_WARNING "cpqarray: Device busy for volume"
1902                         " revalidation (usage=%d)\n", hba[ctlr]->usage_count);
1903                 return -EBUSY;
1904         }
1905         spin_unlock_irqrestore(&io_request_lock, flags);
1906         hba[ctlr]->usage_count++;
1907
1908         /*
1909          * Set the partition and block size structures for all volumes
1910          * on this controller to zero.  And set the hardsizes to non zero to 
1911          * avoid a possible divide by zero error.  
1912          * We will reread all of this data
1913          */
1914         memset(hba[ctlr]->hd,         0, sizeof(struct hd_struct)*NWD*16);
1915         memset(hba[ctlr]->sizes,      0, sizeof(int)*NWD*16);
1916         memset(hba[ctlr]->blocksizes, 0, sizeof(int)*NWD*16);
1917         memset(hba[ctlr]->drv,        0, sizeof(drv_info_t)*NWD);
1918         hba[ctlr]->gendisk.nr_real = 0;
1919
1920         for(i=0;i<256;i++)
1921                 hba[ctlr]->hardsizes[i] = 0;
1922         /*
1923          * Tell the array controller not to give us any interrupts while
1924          * we check the new geometry.  Then turn interrupts back on when
1925          * we're done.
1926          */
1927         hba[ctlr]->access.set_intr_mask(hba[ctlr], 0);
1928         getgeometry(ctlr);
1929         hba[ctlr]->access.set_intr_mask(hba[ctlr], FIFO_NOT_EMPTY);
1930
1931         ida_geninit(ctlr);
1932         for(i=0; i<NWD; i++)
1933                 if (hba[ctlr]->sizes[i<<NWD_SHIFT])
1934                         revalidate_logvol(dev+(i<<NWD_SHIFT), 2);
1935
1936         hba[ctlr]->usage_count--;
1937         return 0;
1938 }
1939
1940 static int deregister_disk(int ctlr, int logvol)
1941 {
1942         unsigned long flags;
1943         struct gendisk *gdev = &(hba[ctlr]->gendisk); 
1944         ctlr_info_t  *h = hba[ctlr];
1945         int start, max_p, i;    
1946
1947
1948         if (!capable(CAP_SYS_RAWIO))
1949                 return -EPERM;
1950
1951         spin_lock_irqsave(&io_request_lock, flags);
1952         /* make sure logical volume is NOT is use */
1953         if( h->drv[logvol].usage_count > 1) {
1954                 spin_unlock_irqrestore(&io_request_lock, flags);
1955                 return -EBUSY;
1956         }
1957         h->drv[logvol].usage_count++;
1958         spin_unlock_irqrestore(&io_request_lock, flags);
1959
1960         /* invalidate the devices and deregister the disk */ 
1961         max_p = gdev->max_p;
1962         start = logvol << gdev->minor_shift;
1963         for (i=max_p-1; i>=0; i--) {
1964                 int minor = start+i;
1965                 // printk("invalidating( %d %d)\n", ctlr, minor);
1966                 invalidate_device(MKDEV(MAJOR_NR+ctlr, minor), 1);
1967                 /* so open will now fail */
1968                 hba[ctlr]->sizes[minor] = 0;
1969                 /* so it will no longer appear in /proc/partitions */
1970                 gdev->part[minor].start_sect = 0;
1971                 gdev->part[minor].nr_sects = 0;
1972         }
1973         /* check to see if it was the last disk */
1974         if (logvol == h->highest_lun) {
1975                 /* if so, find the new hightest lun */
1976                 int i, newhighest =-1;
1977                 for(i=0; i<h->highest_lun; i++) {
1978                         /* if the disk has size > 0, it is available */
1979                         if (hba[ctlr]->sizes[i << gdev->minor_shift] != 0)
1980                                 newhighest = i;
1981                 }
1982                 h->highest_lun = newhighest;
1983                                 
1984         }
1985         --h->log_drives;
1986         gdev->nr_real = h->highest_lun+1; 
1987         /* zero out the disk size info */ 
1988         h->drv[logvol].nr_blks = 0;
1989         h->drv[logvol].cylinders = 0;
1990         h->drv[logvol].blk_size = 0;
1991         return(0);
1992
1993 }
1994
1995 static int sendcmd_withirq(
1996         __u8    cmd,
1997         int     ctlr,
1998         void    *buff,
1999         size_t  size,
2000         unsigned int blk,
2001         unsigned int blkcnt,
2002         unsigned int log_unit )
2003 {
2004         cmdlist_t *c;
2005         unsigned long flags;
2006         ctlr_info_t *info_p = hba[ctlr];
2007         DECLARE_COMPLETION(wait);
2008
2009         c = cmd_alloc(info_p, 0);
2010         if(!c)
2011                 return IO_ERROR;
2012         c->type = CMD_IOCTL_PEND;
2013         c->ctlr = ctlr;
2014         c->hdr.unit = log_unit;
2015         c->hdr.prio = 0;
2016         c->hdr.size = sizeof(rblk_t) >> 2;
2017         c->size += sizeof(rblk_t);
2018
2019         /* The request information. */
2020         c->req.hdr.next = 0;
2021         c->req.hdr.rcode = 0;
2022         c->req.bp = 0;
2023         c->req.hdr.sg_cnt = 1;
2024         c->req.hdr.reserved = 0;
2025
2026         if (size == 0)
2027                 c->req.sg[0].size = 512;
2028         else
2029                 c->req.sg[0].size = size;
2030
2031         c->req.hdr.blk = blk;
2032         c->req.hdr.blk_cnt = blkcnt;
2033         c->req.hdr.cmd = (unsigned char) cmd;
2034         c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev, 
2035                 buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
2036
2037         c->waiting = &wait;
2038         /* Put the request on the tail of the request queue */
2039         spin_lock_irqsave(&io_request_lock, flags);
2040         addQ(&info_p->reqQ, c);
2041         info_p->Qdepth++;
2042         start_io(info_p);
2043         spin_unlock_irqrestore(&io_request_lock, flags);
2044
2045         /* Wait for completion */
2046         wait_for_completion(&wait);
2047
2048         if (c->req.hdr.rcode & RCODE_FATAL) {
2049                 printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
2050                                 c->ctlr, c->hdr.unit);
2051                 cmd_free(info_p, c, 0);
2052                 return(IO_ERROR);
2053         }
2054         if (c->req.hdr.rcode & RCODE_INVREQ) {
2055                 printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
2056                                 c->ctlr, c->hdr.unit, c->req.hdr.cmd,
2057                                 c->req.hdr.blk, c->req.hdr.blk_cnt,
2058                                 c->req.hdr.sg_cnt, c->req.hdr.rcode);
2059                 cmd_free(info_p, c, 0);
2060                 return(IO_ERROR);       
2061         }
2062         cmd_free(info_p, c, 0);
2063         return(IO_OK);
2064 }
2065
2066 static int register_new_disk(int ctlr, int logvol)
2067 {
2068         struct gendisk *gdev = &(hba[ctlr]->gendisk);
2069         ctlr_info_t *info_p = hba[ctlr];
2070         int ret_code, size;
2071         sense_log_drv_stat_t *id_lstatus_buf;
2072         id_log_drv_t *id_ldrive;
2073         drv_info_t *drv;
2074         int max_p;
2075         int start;
2076         int i;  
2077
2078         if (!capable(CAP_SYS_RAWIO))
2079                 return -EPERM;
2080         if( (logvol < 0) || (logvol >= IDA_MAX_PART))
2081                 return -EINVAL;
2082         /* disk is already registered */
2083         if(hba[ctlr]->sizes[logvol <<  gdev->minor_shift] != 0 )
2084                 return -EINVAL;
2085
2086         id_ldrive = (id_log_drv_t *)kmalloc(sizeof(id_log_drv_t), GFP_KERNEL);
2087         if(id_ldrive == NULL) {
2088                 printk( KERN_ERR "cpqarray:  out of memory.\n");
2089                 return -1;
2090         }
2091         id_lstatus_buf = (sense_log_drv_stat_t *)kmalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
2092         if(id_lstatus_buf == NULL) {
2093                 kfree(id_ldrive);
2094                 printk( KERN_ERR "cpqarray:  out of memory.\n");
2095                 return -1;
2096         }
2097
2098         size = sizeof(sense_log_drv_stat_t);
2099
2100         /*
2101                 Send "Identify logical drive status" cmd
2102          */
2103         ret_code = sendcmd_withirq(SENSE_LOG_DRV_STAT,
2104                 ctlr, id_lstatus_buf, size, 0, 0, logvol);
2105         if (ret_code == IO_ERROR) {
2106                         /*
2107                            If can't get logical drive status, set
2108                            the logical drive map to 0, so the
2109                            idastubopen will fail for all logical drives
2110                            on the controller. 
2111                          */
2112                         /* Free all the buffers and return */
2113
2114                 kfree(id_lstatus_buf);
2115                 kfree(id_ldrive);
2116                 return -1;
2117         }
2118         /*
2119                    Make sure the logical drive is configured
2120          */
2121         if (id_lstatus_buf->status == LOG_NOT_CONF) {
2122                 printk(KERN_WARNING "cpqarray: c%dd%d array not configured\n",
2123                         ctlr, logvol); 
2124                 kfree(id_lstatus_buf);
2125                 kfree(id_ldrive);
2126                 return -1;
2127         }
2128         ret_code = sendcmd_withirq(ID_LOG_DRV, ctlr, id_ldrive,
2129                                sizeof(id_log_drv_t), 0, 0, logvol);
2130                         /*
2131                            If error, the bit for this
2132                            logical drive won't be set and
2133                            idastubopen will return error. 
2134                          */
2135         if (ret_code == IO_ERROR) {
2136                 printk(KERN_WARNING "cpqarray: c%dd%d unable to ID logical volume\n",
2137                         ctlr,logvol);
2138                 kfree(id_lstatus_buf);
2139                 kfree(id_ldrive);
2140                 return -1;
2141         }
2142         drv = &info_p->drv[logvol];
2143         drv->blk_size = id_ldrive->blk_size;
2144         drv->nr_blks = id_ldrive->nr_blks;
2145         drv->cylinders = id_ldrive->drv.cyl;
2146         drv->heads = id_ldrive->drv.heads;
2147         drv->sectors = id_ldrive->drv.sect_per_track;
2148         info_p->log_drv_map |=  (1 << logvol);
2149         if (info_p->highest_lun < logvol)
2150                 info_p->highest_lun = logvol;
2151
2152         printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n",
2153                 ctlr, logvol, drv->blk_size, drv->nr_blks);
2154
2155         hba[ctlr]->drv[logvol].usage_count = 0; 
2156         
2157         max_p = gdev->max_p;
2158         start = logvol<< gdev->minor_shift;
2159         
2160         for(i=max_p-1; i>=0; i--) {
2161                 int minor = start+i;
2162                 invalidate_device(MKDEV(MAJOR_NR + ctlr, minor), 1);
2163                 gdev->part[minor].start_sect = 0;
2164                 gdev->part[minor].nr_sects = 0;
2165         
2166                 /* reset the blocksize so we can read the partition table */
2167                 blksize_size[MAJOR_NR+ctlr][minor] = 1024;
2168                 hba[ctlr]->hardsizes[minor] = drv->blk_size;
2169         }
2170         ++hba[ctlr]->log_drives;
2171         gdev->nr_real = info_p->highest_lun + 1;
2172         /* setup partitions per disk */
2173         grok_partitions(gdev, logvol, IDA_MAX_PART, drv->nr_blks); 
2174         
2175         kfree(id_lstatus_buf);
2176         kfree(id_ldrive);
2177         return (0);
2178 }
2179
2180 /* Borrowed and adapted from sd.c */
2181 static int revalidate_logvol(kdev_t dev, int maxusage)
2182 {
2183         int ctlr, target;
2184         struct gendisk *gdev;
2185         unsigned long flags;
2186         int max_p;
2187         int start;
2188         int i;
2189
2190         target = DEVICE_NR(dev);
2191         ctlr = MAJOR(dev) - MAJOR_NR;
2192         gdev = &(hba[ctlr]->gendisk);
2193         
2194         spin_lock_irqsave(&io_request_lock, flags);
2195         if (hba[ctlr]->drv[target].usage_count > maxusage) {
2196                 spin_unlock_irqrestore(&io_request_lock, flags);
2197                 printk(KERN_WARNING "cpqarray: Device busy for "
2198                         "revalidation (usage=%d)\n",
2199                         hba[ctlr]->drv[target].usage_count);
2200                 return -EBUSY;
2201         }
2202
2203         hba[ctlr]->drv[target].usage_count++;
2204         spin_unlock_irqrestore(&io_request_lock, flags);
2205
2206         max_p = gdev->max_p;
2207         start = target << gdev->minor_shift;
2208
2209         for(i=max_p-1; i>=0; i--) {
2210                 int minor = start+i;
2211                 invalidate_device(MKDEV(MAJOR_NR + ctlr, minor), 1);
2212                 gdev->part[minor].start_sect = 0;       
2213                 gdev->part[minor].nr_sects = 0; 
2214
2215                 /* reset the blocksize so we can read the partition table */
2216                 blksize_size[MAJOR_NR+ctlr][minor] = 1024;
2217         }
2218
2219         /* 16 minors per disk... */
2220         grok_partitions(gdev, target, IDA_MAX_PART, 
2221                 hba[ctlr]->drv[target].nr_blks);
2222         hba[ctlr]->drv[target].usage_count--;
2223         return 0;
2224 }
2225
2226
2227 /********************************************************************
2228     name: pollcomplete
2229     Wait polling for a command to complete.
2230     The memory mapped FIFO is polled for the completion.
2231     Used only at init time, interrupts disabled.
2232  ********************************************************************/
2233 static int pollcomplete(int ctlr)
2234 {
2235         int done;
2236         int i;
2237
2238         /* Wait (up to 2 seconds) for a command to complete */
2239
2240         for (i = 200000; i > 0; i--) {
2241                 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2242                 if (done == 0) {
2243                         udelay(10);     /* a short fixed delay */
2244                 } else
2245                         return (done);
2246         }
2247         /* Invalid address to tell caller we ran out of time */
2248         return 1;
2249 }
2250 /*****************************************************************
2251     start_fwbk
2252     Starts controller firmwares background processing. 
2253     Currently only the Integrated Raid controller needs this done.
2254     If the PCI mem address registers are written to after this, 
2255          data corruption may occur
2256 *****************************************************************/
2257 static void start_fwbk(int ctlr)
2258 {
2259                 id_ctlr_t *id_ctlr_buf; 
2260         int ret_code;
2261
2262         if(     (hba[ctlr]->board_id != 0x40400E11)
2263                 && (hba[ctlr]->board_id != 0x40480E11) )
2264
2265         /* Not a Integrated Raid, so there is nothing for us to do */
2266                 return;
2267         printk(KERN_DEBUG "cpqarray: Starting firmware's background"
2268                 " processing\n");
2269         /* Command does not return anything, but idasend command needs a 
2270                 buffer */
2271         id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
2272         if(id_ctlr_buf==NULL) {
2273                 printk(KERN_WARNING "cpqarray: Out of memory. "
2274                         "Unable to start background processing.\n");
2275                 return;
2276         }               
2277         ret_code = sendcmd(RESUME_BACKGROUND_ACTIVITY, ctlr, 
2278                 id_ctlr_buf, 0, 0, 0, 0);
2279         if(ret_code != IO_OK)
2280                 printk(KERN_WARNING "cpqarray: Unable to start"
2281                         " background processing\n");
2282
2283         kfree(id_ctlr_buf);
2284 }
2285 /*****************************************************************
2286     getgeometry
2287     Get ida logical volume geometry from the controller 
2288     This is a large bit of code which once existed in two flavors,
2289     It is used only at init time.
2290 *****************************************************************/
2291 static void getgeometry(int ctlr)
2292 {                               
2293         id_log_drv_t *id_ldrive;
2294         id_ctlr_t *id_ctlr_buf;
2295         sense_log_drv_stat_t *id_lstatus_buf;
2296         config_t *sense_config_buf;
2297         unsigned int log_unit, log_index;
2298         int ret_code, size;
2299         drv_info_t *drv;
2300         ctlr_info_t *info_p = hba[ctlr];
2301         int i;
2302
2303         info_p->log_drv_map = 0;        
2304         
2305         id_ldrive = (id_log_drv_t *)kmalloc(sizeof(id_log_drv_t), GFP_KERNEL);
2306         if(id_ldrive == NULL) {
2307                 printk( KERN_ERR "cpqarray:  out of memory.\n");
2308                 return;
2309         }
2310
2311         id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
2312         if(id_ctlr_buf == NULL) {
2313                 kfree(id_ldrive);
2314                 printk( KERN_ERR "cpqarray:  out of memory.\n");
2315                 return;
2316         }
2317
2318         id_lstatus_buf = (sense_log_drv_stat_t *)kmalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
2319         if(id_lstatus_buf == NULL) {
2320                 kfree(id_ctlr_buf);
2321                 kfree(id_ldrive);
2322                 printk( KERN_ERR "cpqarray:  out of memory.\n");
2323                 return;
2324         }
2325
2326         sense_config_buf = (config_t *)kmalloc(sizeof(config_t), GFP_KERNEL);
2327         if(sense_config_buf == NULL) {
2328                 kfree(id_lstatus_buf);
2329                 kfree(id_ctlr_buf);
2330                 kfree(id_ldrive);
2331                 printk( KERN_ERR "cpqarray:  out of memory.\n");
2332                 return;
2333         }
2334
2335         memset(id_ldrive, 0, sizeof(id_log_drv_t));
2336         memset(id_ctlr_buf, 0, sizeof(id_ctlr_t));
2337         memset(id_lstatus_buf, 0, sizeof(sense_log_drv_stat_t));
2338         memset(sense_config_buf, 0, sizeof(config_t));
2339
2340         info_p->phys_drives = 0;
2341         info_p->log_drv_map = 0;
2342         info_p->drv_assign_map = 0;
2343         info_p->drv_spare_map = 0;
2344         info_p->mp_failed_drv_map = 0;  /* only initialized here */
2345         /* Get controllers info for this logical drive */
2346         ret_code = sendcmd(ID_CTLR, ctlr, id_ctlr_buf, 0, 0, 0, 0);
2347         if (ret_code == IO_ERROR) {
2348                 /*
2349                  * If can't get controller info, set the logical drive map to 0,
2350                  * so the idastubopen will fail on all logical drives
2351                  * on the controller.
2352                  */
2353                  /* Free all the buffers and return */ 
2354                 printk(KERN_ERR "cpqarray: error sending ID controller\n");
2355                 kfree(sense_config_buf);
2356                 kfree(id_lstatus_buf);
2357                 kfree(id_ctlr_buf);
2358                 kfree(id_ldrive);
2359                 return;
2360         }
2361
2362         info_p->log_drives = id_ctlr_buf->nr_drvs;;
2363         for(i=0;i<4;i++)
2364                 info_p->firm_rev[i] = id_ctlr_buf->firm_rev[i];
2365         info_p->ctlr_sig = id_ctlr_buf->cfg_sig;
2366
2367         printk(" (%s)\n", info_p->product_name);
2368         /*
2369          * Initialize logical drive map to zero
2370          */
2371         log_index = 0;
2372         /*
2373          * Get drive geometry for all logical drives
2374          */
2375         if (id_ctlr_buf->nr_drvs > IDA_MAX_PART)
2376                 printk(KERN_WARNING "cpqarray ida%d:  This driver supports "
2377                         "16 logical drives per controller.\n.  "
2378                         " Additional drives will not be "
2379                         "detected\n", ctlr);
2380
2381         for (log_unit = 0;
2382              (log_index < id_ctlr_buf->nr_drvs)
2383              && (log_unit < NWD);
2384              log_unit++) {
2385
2386                 size = sizeof(sense_log_drv_stat_t);
2387
2388                 /*
2389                    Send "Identify logical drive status" cmd
2390                  */
2391                 ret_code = sendcmd(SENSE_LOG_DRV_STAT,
2392                              ctlr, id_lstatus_buf, size, 0, 0, log_unit);
2393                 if (ret_code == IO_ERROR) {
2394                         /*
2395                            If can't get logical drive status, set
2396                            the logical drive map to 0, so the
2397                            idastubopen will fail for all logical drives
2398                            on the controller. 
2399                          */
2400                         info_p->log_drv_map = 0;        
2401                         printk( KERN_WARNING
2402                              "cpqarray ida%d: idaGetGeometry - Controller"
2403                                 " failed to report status of logical drive %d\n"
2404                          "Access to this controller has been disabled\n",
2405                                 ctlr, log_unit);
2406                         /* Free all the buffers and return */
2407                         kfree(sense_config_buf);
2408                         kfree(id_lstatus_buf);
2409                         kfree(id_ctlr_buf);
2410                         kfree(id_ldrive);
2411                         return;
2412                 }
2413                 /*
2414                    Make sure the logical drive is configured
2415                  */
2416                 if (id_lstatus_buf->status != LOG_NOT_CONF) {
2417                         ret_code = sendcmd(ID_LOG_DRV, ctlr, id_ldrive,
2418                                sizeof(id_log_drv_t), 0, 0, log_unit);
2419                         /*
2420                            If error, the bit for this
2421                            logical drive won't be set and
2422                            idastubopen will return error. 
2423                          */
2424                         if (ret_code != IO_ERROR) {
2425                                 drv = &info_p->drv[log_unit];
2426                                 drv->blk_size = id_ldrive->blk_size;
2427                                 drv->nr_blks = id_ldrive->nr_blks;
2428                                 drv->cylinders = id_ldrive->drv.cyl;
2429                                 drv->heads = id_ldrive->drv.heads;
2430                                 drv->sectors = id_ldrive->drv.sect_per_track;
2431                                 info_p->log_drv_map |=  (1 << log_unit);
2432
2433         printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n",
2434                 ctlr, log_unit, drv->blk_size, drv->nr_blks);
2435                                 ret_code = sendcmd(SENSE_CONFIG,
2436                                                   ctlr, sense_config_buf,
2437                                  sizeof(config_t), 0, 0, log_unit);
2438                                 if (ret_code == IO_ERROR) {
2439                                         info_p->log_drv_map = 0;
2440                                         /* Free all the buffers and return */
2441                                         printk(KERN_ERR "cpqarray: error sending sense config\n");
2442                                         kfree(sense_config_buf);
2443                                         kfree(id_lstatus_buf);
2444                                         kfree(id_ctlr_buf);
2445                                         kfree(id_ldrive);
2446                                         return;
2447
2448                                 }
2449                                 if(log_unit > info_p->highest_lun)
2450                                         info_p->highest_lun = log_unit;
2451                                 info_p->phys_drives =
2452                                     sense_config_buf->ctlr_phys_drv;
2453                                 info_p->drv_assign_map
2454                                     |= sense_config_buf->drv_asgn_map;
2455                                 info_p->drv_assign_map
2456                                     |= sense_config_buf->spare_asgn_map;
2457                                 info_p->drv_spare_map
2458                                     |= sense_config_buf->spare_asgn_map;
2459                         }       /* end of if no error on id_ldrive */
2460                         log_index = log_index + 1;
2461                 }               /* end of if logical drive configured */
2462         }                       /* end of for log_unit */
2463         kfree(sense_config_buf);
2464         kfree(id_ldrive);
2465         kfree(id_lstatus_buf);
2466         kfree(id_ctlr_buf);
2467         return;
2468
2469 }
2470
2471 static void __exit cleanup_cpqarray_module(void)
2472 {
2473         int i;
2474
2475         pci_unregister_driver(&cpqarray_pci_driver);
2476         /* double check that all controller entrys have been removed */
2477         for (i=0; i< MAX_CTLR; i++) {
2478                 if (hba[i] != NULL) {
2479                         printk(KERN_WARNING "cpqarray: had to remove"
2480                                         " controller %d\n", i);
2481                         cpqarray_remove_one_eisa(i);
2482                 }
2483         }
2484         remove_proc_entry("cpqarray", proc_root_driver);
2485 }
2486
2487
2488 module_init(init_cpqarray_module);
2489 module_exit(cleanup_cpqarray_module);
2490