2 * Disk Array driver for Compaq SMART2 Controllers
3 * Copyright 1998 Compaq Computer Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Questions/Comments/Bugfixes to Cpqarray-discuss@lists.sourceforge.net
22 #include <linux/config.h> /* CONFIG_PROC_FS */
23 #include <linux/module.h>
24 #include <linux/version.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
32 #include <linux/blkpg.h>
33 #include <linux/timer.h>
34 #include <linux/proc_fs.h>
35 #include <linux/init.h>
36 #include <linux/hdreg.h>
37 #include <linux/spinlock.h>
38 #include <asm/uaccess.h>
42 #define SMART2_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
44 #define DRIVER_NAME "Compaq SMART2 Driver (v 2.4.28)"
45 #define DRIVER_VERSION SMART2_DRIVER_VERSION(2,4,28)
47 /* Embedded module documentation macros - see modules.h */
48 /* Original author Chris Frantz - Compaq Computer Corporation */
49 MODULE_AUTHOR("Compaq Computer Corporation");
50 MODULE_DESCRIPTION("Driver for Compaq Smart2 Array Controllers version 2.4.28");
51 MODULE_LICENSE("GPL");
53 #define MAJOR_NR COMPAQ_SMART2_MAJOR
54 #include <linux/blk.h>
55 #include <linux/blkdev.h>
56 #include <linux/genhd.h>
61 #include "ida_ioctl.h"
63 #define READ_AHEAD 128
64 #define NR_CMDS 128 /* This could probably go as high as ~400 */
68 #define CPQARRAY_DMA_MASK 0xFFFFFFFF /* 32 bit DMA */
70 static ctlr_info_t *hba[MAX_CTLR] =
71 { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL };
75 #define NR_PRODUCTS (sizeof(products)/sizeof(struct board_type))
77 /* board_id = Subsystem Device ID & Vendor ID
78 * product = Marketing Name for the board
79 * access = Address of the struct of function pointers
81 static struct board_type products[] = {
82 { 0x0040110E, "IDA", &smart1_access },
83 { 0x0140110E, "IDA-2", &smart1_access },
84 { 0x1040110E, "IAES", &smart1_access },
85 { 0x2040110E, "SMART", &smart1_access },
86 { 0x3040110E, "SMART-2/E", &smart2e_access },
87 { 0x40300E11, "SMART-2/P", &smart2_access },
88 { 0x40310E11, "SMART-2SL", &smart2_access },
89 { 0x40320E11, "Smart Array 3200", &smart2_access },
90 { 0x40330E11, "Smart Array 3100ES", &smart2_access },
91 { 0x40340E11, "Smart Array 221", &smart2_access },
92 { 0x40400E11, "Integrated Array", &smart4_access },
93 { 0x40480E11, "Compaq Raid LC2", &smart4_access },
94 { 0x40500E11, "Smart Array 4200", &smart4_access },
95 { 0x40510E11, "Smart Array 4250ES", &smart4_access },
96 { 0x40580E11, "Smart Array 431", &smart4_access },
99 /* define the PCI info for the PCI cards this driver can control */
100 const struct pci_device_id cpqarray_pci_device_id[] =
102 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
103 0x0E11, 0x4058, 0, 0, 0}, /* SA431 */
104 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
105 0x0E11, 0x4051, 0, 0, 0}, /* SA4250ES */
106 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
107 0x0E11, 0x4050, 0, 0, 0}, /* SA4200 */
108 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
109 0x0E11, 0x4048, 0, 0, 0}, /* LC2 */
110 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
111 0x0E11, 0x4040, 0, 0, 0}, /* Integrated Array */
112 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
113 0x0E11, 0x4034, 0, 0, 0}, /* SA 221 */
114 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
115 0x0E11, 0x4033, 0, 0, 0}, /* SA 3100ES*/
116 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
117 0x0E11, 0x4032, 0, 0, 0}, /* SA 3200*/
118 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
119 0x0E11, 0x4031, 0, 0, 0}, /* SA 2SL*/
120 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
121 0x0E11, 0x4030, 0, 0, 0}, /* SA 2P */
125 MODULE_DEVICE_TABLE(pci, cpqarray_pci_device_id);
127 static struct proc_dir_entry *proc_array;
130 #define DBG(s) do { s } while(0)
131 /* Debug (general info)... */
132 #define DBGINFO(s) do { } while(0)
133 /* Debug Paranoid... */
134 #define DBGP(s) do { } while(0)
135 /* Debug Extra Paranoid... */
136 #define DBGPX(s) do { } while(0)
138 int cpqarray_init(void);
139 static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev);
140 static void *remap_pci_mem(ulong base, ulong size);
141 static int cpqarray_eisa_detect(void);
142 static int pollcomplete(int ctlr);
143 static void getgeometry(int ctlr);
144 static void start_fwbk(int ctlr);
146 static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool);
147 static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool);
149 static void free_hba(int i);
150 static int alloc_cpqarray_hba(void);
159 unsigned int log_unit );
161 static int ida_open(struct inode *inode, struct file *filep);
162 static int ida_release(struct inode *inode, struct file *filep);
163 static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg);
164 static int ida_ctlr_ioctl(int ctlr, int dsk, ida_ioctl_t *io);
165 static int ida_ctlr_big_ioctl( int ctlr, int dsk, ida_big_ioctl_t *io);
167 static void do_ida_request(request_queue_t *q);
168 static void start_io(ctlr_info_t *h);
170 static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
171 static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
172 static inline void complete_buffers(struct buffer_head *bh, int ok);
173 static inline void complete_command(cmdlist_t *cmd, int timeout);
175 static void do_ida_intr(int irq, void *dev_id, struct pt_regs * regs);
176 static void ida_timer(unsigned long tdata);
177 static int frevalidate_logvol(kdev_t dev);
178 static int revalidate_logvol(kdev_t dev, int maxusage);
179 static int revalidate_allvol(kdev_t dev);
181 static int deregister_disk(int ctlr, int logvol);
182 static int register_new_disk(int cltr,int logvol);
183 static int cpqarray_register_ctlr(int ctlr, int type);
185 #ifdef CONFIG_PROC_FS
186 static void ida_procinit(int i);
187 static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
189 static void ida_procinit(int i) {}
190 static int ida_proc_get_info(char *buffer, char **start, off_t offset,
191 int length, int *eof, void *data) { return 0;}
194 static void ida_geninit(int ctlr)
199 for(i=0; i<NWD; i++) {
200 drv = &hba[ctlr]->drv[i];
203 hba[ctlr]->hd[i<<NWD_SHIFT].nr_sects =
204 hba[ctlr]->sizes[i<<NWD_SHIFT] = drv->nr_blks;
206 for(j=0; j<IDA_MAX_PART; j++) {
207 hba[ctlr]->blocksizes[(i<<NWD_SHIFT)+j] = 1024;
208 hba[ctlr]->hardsizes[(i<<NWD_SHIFT)+j] = drv->blk_size;
211 hba[ctlr]->gendisk.nr_real = hba[ctlr]->highest_lun +1;
215 static struct block_device_operations ida_fops = {
218 release: ida_release,
220 revalidate: frevalidate_logvol,
224 #ifdef CONFIG_PROC_FS
227 * Get us a file in /proc/array that says something about each controller.
228 * Create /proc/array if it doesn't exist yet.
230 static void __init ida_procinit(int i)
232 if (proc_array == NULL) {
233 proc_array = proc_mkdir("cpqarray", proc_root_driver);
234 if (!proc_array) return;
237 create_proc_read_entry(hba[i]->devname, 0, proc_array,
238 ida_proc_get_info, hba[i]);
242 * Report information about this controller.
244 static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
249 ctlr_info_t *h = (ctlr_info_t*)data;
251 #ifdef CPQ_PROC_PRINT_QUEUES
256 size = sprintf(buffer, "%s: Compaq %s Controller\n"
257 " Board ID: 0x%08lx\n"
258 " Firmware Revision: %c%c%c%c\n"
259 " Controller Sig: 0x%08lx\n"
260 " Memory Address: 0x%08lx\n"
261 " I/O Port: 0x%04x\n"
263 " Logical drives: %d\n"
264 " Highest Logical ID: %d\n"
265 " Physical drives: %d\n\n"
266 " Current Q depth: %d\n"
267 " Max Q depth since init: %d\n\n",
270 (unsigned long)h->board_id,
271 h->firm_rev[0], h->firm_rev[1], h->firm_rev[2], h->firm_rev[3],
272 (unsigned long)h->ctlr_sig, (unsigned long)h->vaddr,
273 (unsigned int) h->io_mem_addr, (unsigned int)h->intr,
274 h->log_drives, h->highest_lun, h->phys_drives,
275 h->Qdepth, h->maxQsinceinit);
277 pos += size; len += size;
279 size = sprintf(buffer+len, "Logical Drive Info:\n");
280 pos += size; len += size;
282 for(i=0; i<=h->highest_lun; i++) {
284 if(drv->nr_blks != 0) {
285 size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
286 ctlr, i, drv->blk_size, drv->nr_blks);
287 pos += size; len += size;
291 #ifdef CPQ_PROC_PRINT_QUEUES
292 size = sprintf(buffer+len, "\nCurrent Queues:\n");
293 pos += size; len += size;
296 size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size;
299 while(c && c != h->reqQ) {
300 size = sprintf(buffer+len, "->%p", c);
301 pos += size; len += size;
306 size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size;
309 while(c && c != h->cmpQ) {
310 size = sprintf(buffer+len, "->%p", c);
311 pos += size; len += size;
315 size = sprintf(buffer+len, "\n"); pos += size; len += size;
317 size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n",
318 h->nr_allocs, h->nr_frees);
319 pos += size; len += size;
322 *start = buffer+offset;
328 #endif /* CONFIG_PROC_FS */
331 MODULE_PARM(eisa, "1-8i");
334 /* This is a bit of a hack... */
335 int __init init_cpqarray_module(void)
337 if (cpqarray_init() == 0) /* all the block dev numbers already used */
338 return -ENODEV; /* or no controllers were found */
342 static void release_io_mem(ctlr_info_t *c)
344 /* if IO mem was not protected do nothing */
345 if( c->io_mem_addr == 0)
347 release_region(c->io_mem_addr, c->io_mem_length);
349 c->io_mem_length = 0;
352 static void __devexit cpqarray_remove_one (struct pci_dev *pdev)
355 ctlr_info_t *tmp_ptr;
358 tmp_ptr = pci_get_drvdata(pdev);
361 printk( KERN_ERR "cpqarray: Unable to remove device \n");
367 printk(KERN_ERR "cpqarray: device appears to "
368 "already be removed \n");
372 /* sendcmd will turn off interrupt, and send the flush...
373 * To write all data in the battery backed cache to disks */
375 if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0)) {
376 printk(KERN_WARNING "Unable to flush cache on controller %d\n",
379 free_irq(hba[i]->intr, hba[i]);
380 pci_set_drvdata(pdev, NULL);
381 /* remove it from the disk list */
382 del_gendisk(&(hba[i]->gendisk));
384 iounmap(hba[i]->vaddr);
385 unregister_blkdev(MAJOR_NR+i, hba[i]->devname);
386 del_timer(&hba[i]->timer);
387 blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR + i));
388 remove_proc_entry(hba[i]->devname, proc_array);
389 pci_free_consistent(hba[i]->pci_dev,
390 NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool),
391 hba[i]->cmd_pool_dhandle);
392 kfree(hba[i]->cmd_pool_bits);
393 release_io_mem(hba[i]);
397 /* removing an instance that was not removed automatically..
398 * must be an eisa card.
400 static void __devexit cpqarray_remove_one_eisa (int i)
404 if (hba[i] == NULL) {
405 printk(KERN_ERR "cpqarray: device appears to "
406 "already be removed \n");
410 /* sendcmd will turn off interrupt, and send the flush...
411 * To write all data in the battery backed cache to disks
412 * no data returned, but don't want to send NULL to sendcmd */
413 if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0)) {
414 printk(KERN_WARNING "Unable to flush cache on controller %d\n",
417 free_irq(hba[i]->intr, hba[i]);
418 /* remove it from the disk list */
419 del_gendisk(&(hba[i]->gendisk));
421 iounmap(hba[i]->vaddr);
422 unregister_blkdev(MAJOR_NR+i, hba[i]->devname);
423 del_timer(&hba[i]->timer);
424 blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR + i));
425 remove_proc_entry(hba[i]->devname, proc_array);
426 pci_free_consistent(hba[i]->pci_dev,
427 NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool),
428 hba[i]->cmd_pool_dhandle);
429 kfree(hba[i]->cmd_pool_bits);
430 release_io_mem(hba[i]);
433 static inline int cpq_new_segment(request_queue_t *q, struct request *rq,
436 if (rq->nr_segments < SG_MAX) {
443 static int cpq_back_merge_fn(request_queue_t *q, struct request *rq,
444 struct buffer_head *bh, int max_segments)
446 if (blk_seg_merge_ok(rq->bhtail, bh))
448 return cpq_new_segment(q, rq, max_segments);
451 static int cpq_front_merge_fn(request_queue_t *q, struct request *rq,
452 struct buffer_head *bh, int max_segments)
454 if (blk_seg_merge_ok(bh, rq->bh))
456 return cpq_new_segment(q, rq, max_segments);
459 static int cpq_merge_requests_fn(request_queue_t *q, struct request *rq,
460 struct request *nxt, int max_segments)
462 int total_segments = rq->nr_segments + nxt->nr_segments;
464 if (blk_seg_merge_ok(rq->bhtail, nxt->bh))
467 if (total_segments > SG_MAX)
470 rq->nr_segments = total_segments;
474 static int cpqarray_register_ctlr(int ctlr, int type)
480 * register block devices
481 * Find disks and fill in structs
482 * Get an interrupt, set the Q depth and get into /proc
485 /* If this successful it should insure that we are the only */
486 /* instance of the driver for this card */
487 if (register_blkdev(MAJOR_NR+ctlr, hba[ctlr]->devname, &ida_fops)) {
488 printk(KERN_ERR "cpqarray: Unable to get major number %d\n", MAJOR_NR+ctlr);
492 hba[ctlr]->access.set_intr_mask(hba[ctlr], 0);
493 if (request_irq(hba[ctlr]->intr, do_ida_intr,
494 SA_INTERRUPT|SA_SHIRQ|SA_SAMPLE_RANDOM,
495 hba[ctlr]->devname, hba[ctlr])) {
496 printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
497 hba[ctlr]->intr, hba[ctlr]->devname);
498 unregister_blkdev(MAJOR_NR+ctlr, hba[ctlr]->devname);
501 hba[ctlr]->cmd_pool = (cmdlist_t *)pci_alloc_consistent(
502 hba[ctlr]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
503 &(hba[ctlr]->cmd_pool_dhandle));
504 hba[ctlr]->cmd_pool_bits = (__u32*)kmalloc(
505 ((NR_CMDS+31)/32)*sizeof(__u32), GFP_KERNEL);
507 if (hba[ctlr]->cmd_pool_bits == NULL || hba[ctlr]->cmd_pool == NULL) {
508 if (hba[ctlr]->cmd_pool_bits)
509 kfree(hba[ctlr]->cmd_pool_bits);
510 if (hba[ctlr]->cmd_pool)
511 pci_free_consistent(hba[ctlr]->pci_dev,
512 NR_CMDS * sizeof(cmdlist_t),
514 hba[ctlr]->cmd_pool_dhandle);
516 free_irq(hba[ctlr]->intr, hba[ctlr]);
517 unregister_blkdev(MAJOR_NR+ctlr, hba[ctlr]->devname);
518 printk( KERN_ERR "cpqarray: out of memory");
521 memset(hba[ctlr]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t));
522 memset(hba[ctlr]->cmd_pool_bits, 0, ((NR_CMDS+31)/32)*sizeof(__u32));
523 printk(KERN_INFO "cpqarray: Finding drives on %s", hba[ctlr]->devname);
527 hba[ctlr]->access.set_intr_mask(hba[ctlr], FIFO_NOT_EMPTY);
531 q = BLK_DEFAULT_QUEUE(MAJOR_NR + ctlr);
532 q->queuedata = hba[ctlr];
533 blk_init_queue(q, do_ida_request);
535 blk_queue_bounce_limit(q, hba[ctlr]->pci_dev->dma_mask);
536 blk_queue_headactive(q, 0);
537 blksize_size[MAJOR_NR+ctlr] = hba[ctlr]->blocksizes;
538 hardsect_size[MAJOR_NR+ctlr] = hba[ctlr]->hardsizes;
539 read_ahead[MAJOR_NR+ctlr] = READ_AHEAD;
541 q->back_merge_fn = cpq_back_merge_fn;
542 q->front_merge_fn = cpq_front_merge_fn;
543 q->merge_requests_fn = cpq_merge_requests_fn;
545 hba[ctlr]->gendisk.major = MAJOR_NR + ctlr;
546 hba[ctlr]->gendisk.major_name = "ida";
547 hba[ctlr]->gendisk.minor_shift = NWD_SHIFT;
548 hba[ctlr]->gendisk.max_p = IDA_MAX_PART;
549 hba[ctlr]->gendisk.part = hba[ctlr]->hd;
550 hba[ctlr]->gendisk.sizes = hba[ctlr]->sizes;
551 hba[ctlr]->gendisk.nr_real = hba[ctlr]->highest_lun+1;
552 hba[ctlr]->gendisk.fops = &ida_fops;
554 /* Get on the disk list */
555 add_gendisk(&(hba[ctlr]->gendisk));
557 init_timer(&hba[ctlr]->timer);
558 hba[ctlr]->timer.expires = jiffies + IDA_TIMER;
559 hba[ctlr]->timer.data = (unsigned long)hba[ctlr];
560 hba[ctlr]->timer.function = ida_timer;
561 add_timer(&hba[ctlr]->timer);
565 register_disk(&(hba[ctlr]->gendisk), MKDEV(MAJOR_NR+ctlr,j<<4),
566 IDA_MAX_PART, &ida_fops, hba[ctlr]->drv[j].nr_blks);
570 release_io_mem(hba[ctlr]);
576 static int __init cpqarray_init_one( struct pci_dev *pdev,
577 const struct pci_device_id *ent)
582 printk(KERN_DEBUG "cpqarray: Device 0x%x has been found at"
583 " bus %d dev %d func %d\n",
584 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
585 PCI_FUNC(pdev->devfn));
586 i = alloc_cpqarray_hba();
589 memset(hba[i], 0, sizeof(ctlr_info_t));
590 /* fill in default block size */
592 hba[i]->hardsizes[j] = hba[i]->drv[j].blk_size;
594 sprintf(hba[i]->devname, "ida%d", i);
596 /* Initialize the pdev driver private data */
597 pci_set_drvdata(pdev, hba[i]);
599 if (cpqarray_pci_init(hba[i], pdev) != 0) {
600 release_io_mem(hba[i]);
605 return (cpqarray_register_ctlr(i, 1));
607 static struct pci_driver cpqarray_pci_driver = {
609 probe: cpqarray_init_one,
610 remove: __devexit_p(cpqarray_remove_one),
611 id_table: cpqarray_pci_device_id,
615 * This is it. Find all the controllers and register them. I really hate
616 * stealing all these major device numbers.
617 * returns the number of block devices registered.
619 int __init cpqarray_init(void)
621 int num_cntlrs_reg = 0;
624 /* detect controllers */
625 printk(DRIVER_NAME "\n");
626 pci_module_init(&cpqarray_pci_driver);
627 cpqarray_eisa_detect();
629 for(i=0; i< MAX_CTLR; i++) {
633 return(num_cntlrs_reg);
635 /* Function to find the first free pointer into our hba[] array */
636 /* Returns -1 if no free entries are left. */
637 static int alloc_cpqarray_hba(void)
640 for(i=0; i< MAX_CTLR; i++) {
641 if (hba[i] == NULL) {
642 hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
644 printk(KERN_ERR "cpqarray: out of memory.\n");
650 printk(KERN_WARNING "cpqarray: This driver supports a maximum"
651 " of 8 controllers.\n");
655 static void free_hba(int i)
662 * Find the IO address of the controller, its IRQ and so forth. Fill
663 * in some basic stuff into the ctlr_info_t structure.
665 static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
667 ushort vendor_id, device_id, command;
668 unchar cache_line_size, latency_timer;
669 unchar irq, revision;
670 unsigned long addr[6];
675 pci_read_config_word(pdev, PCI_COMMAND, &command);
676 /* check to see if controller has been disabled */
677 if(!(command & 0x02)) {
678 printk(KERN_WARNING "cpqarray: controller appears to be disabled\n");
683 vendor_id = pdev->vendor;
684 device_id = pdev->device;
688 addr[i] = pci_resource_start(pdev, i);
690 if (pci_enable_device(pdev)) {
691 printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n");
694 if (pci_set_dma_mask(pdev, CPQARRAY_DMA_MASK) != 0) {
695 printk(KERN_ERR "cpqarray: Unable to set DMA mask\n");
699 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
700 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
701 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
703 pci_read_config_dword(pdev, 0x2c, &board_id);
706 printk("vendor_id = %x\n", vendor_id);
707 printk("device_id = %x\n", device_id);
708 printk("command = %x\n", command);
710 printk("addr[%d] = %lx\n", i, addr[i]);
711 printk("revision = %x\n", revision);
712 printk("irq = %x\n", irq);
713 printk("cache_line_size = %x\n", cache_line_size);
714 printk("latency_timer = %x\n", latency_timer);
715 printk("board_id = %x\n", board_id);
720 if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO) {
722 c->io_mem_addr = addr[i];
723 c->io_mem_length = pci_resource_end(pdev, i)
724 - pci_resource_start(pdev, i) +1;
725 // printk("IO Value found addr[%d] %lx %lx\n",
726 // i, c->io_mem_addr, c->io_mem_length);
727 if(!request_region( c->io_mem_addr, c->io_mem_length,
729 printk( KERN_WARNING "cpqarray I/O memory range already in use addr %lx length = %ld\n", c->io_mem_addr, c->io_mem_length);
731 c->io_mem_length = 0;
738 if (!(pci_resource_flags(pdev, i) &
739 PCI_BASE_ADDRESS_SPACE_IO)) {
740 c->paddr = pci_resource_start (pdev, i);
745 c->vaddr = remap_pci_mem(c->paddr, 128);
748 c->board_id = board_id;
750 for(i=0; i<NR_PRODUCTS; i++) {
751 if (board_id == products[i].board_id) {
752 c->product_name = products[i].product_name;
753 c->access = *(products[i].access);
757 if (i == NR_PRODUCTS) {
758 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
759 " to access the SMART Array controller %08lx\n",
760 (unsigned long)board_id);
768 * Map (physical) PCI mem into (virtual) kernel space
770 static void *remap_pci_mem(ulong base, ulong size)
772 ulong page_base = ((ulong) base) & PAGE_MASK;
773 ulong page_offs = ((ulong) base) - page_base;
774 void *page_remapped = ioremap(page_base, page_offs+size);
776 return (page_remapped ? (page_remapped + page_offs) : NULL);
780 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,13)
782 * Config string is a comma seperated set of i/o addresses of EISA cards.
784 static int cpqarray_setup(char *str)
788 (void)get_options(str, ARRAY_SIZE(ints), ints);
790 for(i=0; i<ints[0] && i<8; i++)
795 __setup("smart2=", cpqarray_setup);
800 * Copy the contents of the ints[] array passed to us by init.
802 void cpqarray_setup(char *str, int *ints)
805 for(i=0; i<ints[0] && i<8; i++)
812 * Find an EISA controller's signature. Set up an hba if we find it.
814 static int cpqarray_eisa_detect(void)
821 while(i<8 && eisa[i]) {
822 ctlr = alloc_cpqarray_hba();
826 board_id = inl(eisa[i]+0xC80);
827 for(j=0; j < NR_PRODUCTS; j++)
828 if (board_id == products[j].board_id)
831 if (j == NR_PRODUCTS) {
832 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
833 " to access the SMART Array controller %08lx\n", (unsigned long)board_id);
837 memset(hba[ctlr], 0, sizeof(ctlr_info_t));
838 hba[ctlr]->io_mem_addr = eisa[i];
839 hba[ctlr]->io_mem_length = 0x7FF;
840 if(!request_region( hba[ctlr]->io_mem_addr,
841 hba[ctlr]->io_mem_length,
843 printk( KERN_WARNING "cpqarray: I/0 range already in use addr = %lx length=%ld\n",
844 hba[ctlr]->io_mem_addr, hba[ctlr]->io_mem_length);
849 * Read the config register to find our interrupt
851 intr = inb(eisa[i]+0xCC0) >> 4;
861 hba[ctlr]->intr = intr;
862 sprintf(hba[ctlr]->devname, "ida%d", ctlr);
863 hba[ctlr]->product_name = products[j].product_name;
864 hba[ctlr]->access = *(products[j].access);
865 hba[ctlr]->ctlr = ctlr;
866 hba[ctlr]->board_id = board_id;
867 hba[ctlr]->pci_dev = NULL; /* not PCI */
870 printk("i = %d, j = %d\n", i, j);
871 printk("irq = %x\n", intr);
872 printk("product name = %s\n", products[j].product_name);
873 printk("board_id = %x\n", board_id);
879 if (cpqarray_register_ctlr(ctlr, 0) == -1)
881 "cpqarray%d: Can't register EISA controller\n",
890 * Open. Make sure the device is really there.
892 static int ida_open(struct inode *inode, struct file *filep)
894 int ctlr = MAJOR(inode->i_rdev) - MAJOR_NR;
895 int dsk = MINOR(inode->i_rdev) >> NWD_SHIFT;
897 DBGINFO(printk("ida_open %x (%x:%x)\n", inode->i_rdev, ctlr, dsk) );
898 if (ctlr > MAX_CTLR || hba[ctlr] == NULL)
902 * Root is allowed to open raw volume zero even if its not configured
903 * so array config can still work. I don't think I really like this,
904 * but I'm already using way to many device nodes to claim another one
905 * for "raw controller".
907 if (hba[ctlr]->sizes[MINOR(inode->i_rdev)] == 0) {
908 if (MINOR(inode->i_rdev) != 0)
910 if (!capable(CAP_SYS_ADMIN))
914 hba[ctlr]->drv[dsk].usage_count++;
915 hba[ctlr]->usage_count++;
922 static int ida_release(struct inode *inode, struct file *filep)
924 int ctlr = MAJOR(inode->i_rdev) - MAJOR_NR;
925 int dsk = MINOR(inode->i_rdev) >> NWD_SHIFT;
927 DBGINFO(printk("ida_release %x (%x:%x)\n", inode->i_rdev, ctlr, dsk) );
929 hba[ctlr]->drv[dsk].usage_count--;
930 hba[ctlr]->usage_count--;
935 * Enqueuing and dequeuing functions for cmdlists.
937 static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c)
941 c->next = c->prev = c;
943 c->prev = (*Qptr)->prev;
945 (*Qptr)->prev->next = c;
950 static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
952 if (c && c->next != c) {
955 c->prev->next = c->next;
956 c->next->prev = c->prev;
963 static inline void complete_buffers(struct buffer_head *bh, int ok)
965 struct buffer_head *xbh;
968 bh->b_reqnext = NULL;
970 blk_finished_io(bh->b_size >> 9);
971 bh->b_end_io(bh, ok);
978 * Get a request and submit it to the controller.
979 * This routine needs to grab all the requests it possibly can from the
980 * req Q and submit them. Interrupts are off (and need to be off) when you
981 * are in here (either via the dummy do_ida_request functions or by being
982 * called from the interrupt handler
984 static void do_ida_request(request_queue_t *q)
986 ctlr_info_t *h = q->queuedata;
988 unsigned long lastdataend;
989 struct list_head * queue_head = &q->queue_head;
990 struct buffer_head *bh;
991 struct request *creq;
992 struct scatterlist tmp_sg[SG_MAX];
999 if (list_empty(queue_head))
1002 creq = blkdev_entry_next_request(queue_head);
1003 if (creq->nr_segments > SG_MAX)
1006 if (h->ctlr != MAJOR(creq->rq_dev)-MAJOR_NR ) {
1007 printk(KERN_WARNING "doreq cmd for %d, %x at %p\n",
1008 h->ctlr, creq->rq_dev, creq);
1009 blkdev_dequeue_request(creq);
1010 complete_buffers(creq->bh, 0);
1011 end_that_request_last(creq);
1015 if ((c = cmd_alloc(h,1)) == NULL)
1018 blkdev_dequeue_request(creq);
1020 spin_unlock_irq(&io_request_lock);
1025 c->hdr.unit = MINOR(creq->rq_dev) >> NWD_SHIFT;
1026 c->hdr.size = sizeof(rblk_t) >> 2;
1027 c->size += sizeof(rblk_t);
1029 c->req.hdr.blk = hba[h->ctlr]->hd[MINOR(creq->rq_dev)].start_sect
1034 panic("bh == NULL?");
1036 printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
1041 if (bh_phys(bh) == lastdataend) {
1042 tmp_sg[seg-1].length += bh->b_size;
1043 lastdataend += bh->b_size;
1047 tmp_sg[seg].page = bh->b_page;
1048 tmp_sg[seg].length = bh->b_size;
1049 tmp_sg[seg].offset = bh_offset(bh);
1050 lastdataend = bh_phys(bh) + bh->b_size;
1055 /* Now do all the DMA Mappings */
1056 for( i=0; i < seg; i++) {
1057 c->req.sg[i].size = tmp_sg[i].length;
1058 c->req.sg[i].addr = (__u32) pci_map_page(
1059 h->pci_dev, tmp_sg[i].page, tmp_sg[i].offset,
1061 (creq->cmd == READ) ?
1062 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
1064 DBGPX( printk("Submitting %d sectors in %d segments\n", sect, seg); );
1065 c->req.hdr.sg_cnt = seg;
1066 c->req.hdr.blk_cnt = creq->nr_sectors;
1067 c->req.hdr.cmd = (creq->cmd == READ) ? IDA_READ : IDA_WRITE;
1068 c->type = CMD_RWREQ;
1070 spin_lock_irq(&io_request_lock);
1072 /* Put the request on the tail of the request queue */
1075 if (h->Qdepth > h->maxQsinceinit)
1076 h->maxQsinceinit = h->Qdepth;
1085 * start_io submits everything on a controller's request queue
1086 * and moves it to the completion queue.
1088 * Interrupts had better be off if you're in here
1090 static void start_io(ctlr_info_t *h)
1094 while((c = h->reqQ) != NULL) {
1095 /* Can't do anything if we're busy */
1096 if (h->access.fifo_full(h) == 0)
1099 /* Get the first entry from the request Q */
1100 removeQ(&h->reqQ, c);
1103 /* Tell the controller to do our bidding */
1104 h->access.submit_command(h, c);
1106 /* Get onto the completion Q */
1112 * Mark all buffers that cmd was responsible for
1114 static inline void complete_command(cmdlist_t *cmd, int timeout)
1119 if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
1120 (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) {
1121 printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n",
1122 cmd->ctlr, cmd->hdr.unit);
1123 hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN;
1125 if (cmd->req.hdr.rcode & RCODE_FATAL) {
1126 printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
1127 cmd->ctlr, cmd->hdr.unit);
1130 if (cmd->req.hdr.rcode & RCODE_INVREQ) {
1131 printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
1132 cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
1133 cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
1134 cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
1139 /* unmap the DMA mapping for all the scatter gather elements */
1140 for(i=0; i<cmd->req.hdr.sg_cnt; i++)
1142 pci_unmap_page(hba[cmd->ctlr]->pci_dev,
1143 cmd->req.sg[i].addr, cmd->req.sg[i].size,
1144 (cmd->req.hdr.cmd == IDA_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
1147 complete_buffers(cmd->rq->bh, ok);
1148 DBGPX(printk("Done with %p\n", cmd->rq););
1149 end_that_request_last(cmd->rq);
1153 * The controller will interrupt us upon completion of commands.
1154 * Find the command on the completion queue, remove it, tell the OS and
1155 * try to queue up more IO
1157 static void do_ida_intr(int irq, void *dev_id, struct pt_regs *regs)
1159 ctlr_info_t *h = dev_id;
1161 unsigned long istat;
1162 unsigned long flags;
1165 istat = h->access.intr_pending(h);
1166 /* Is this interrupt for us? */
1171 * If there are completed commands in the completion queue,
1172 * we had better do something about it.
1174 spin_lock_irqsave(&io_request_lock, flags);
1175 if (istat & FIFO_NOT_EMPTY) {
1176 while((a = h->access.command_completed(h))) {
1178 if ((c = h->cmpQ) == NULL)
1180 printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1);
1183 while(c->busaddr != a) {
1189 * If we've found the command, take it off the
1190 * completion Q and free it
1192 if (c->busaddr == a) {
1193 removeQ(&h->cmpQ, c);
1194 /* Check for invalid command.
1195 * Controller returns command error,
1199 if((a1 & 0x03) && (c->req.hdr.rcode == 0)) {
1200 c->req.hdr.rcode = RCODE_INVREQ;
1202 if (c->type == CMD_RWREQ) {
1203 complete_command(c, 0);
1205 } else if (c->type == CMD_IOCTL_PEND) {
1206 complete(c->waiting);
1214 * See if we can queue up some more IO
1216 do_ida_request(BLK_DEFAULT_QUEUE(MAJOR_NR + h->ctlr));
1217 spin_unlock_irqrestore(&io_request_lock, flags);
1221 * This timer was for timing out requests that haven't happened after
1222 * IDA_TIMEOUT. That wasn't such a good idea. This timer is used to
1223 * reset a flags structure so we don't flood the user with
1224 * "Non-Fatal error" messages.
1226 static void ida_timer(unsigned long tdata)
1228 ctlr_info_t *h = (ctlr_info_t*)tdata;
1230 h->timer.expires = jiffies + IDA_TIMER;
1231 add_timer(&h->timer);
1236 * ida_ioctl does some miscellaneous stuff like reporting drive geometry,
1237 * setting readahead and submitting commands from userspace to the controller.
1239 static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg)
1241 int ctlr = MAJOR(inode->i_rdev) - MAJOR_NR;
1242 int dsk = MINOR(inode->i_rdev) >> NWD_SHIFT;
1248 struct hd_geometry driver_geo;
1250 if (hba[ctlr]->drv[dsk].cylinders) {
1251 driver_geo.heads = hba[ctlr]->drv[dsk].heads;
1252 driver_geo.sectors = hba[ctlr]->drv[dsk].sectors;
1253 driver_geo.cylinders = hba[ctlr]->drv[dsk].cylinders;
1255 driver_geo.heads = 0xff;
1256 driver_geo.sectors = 0x3f;
1257 driver_geo.cylinders = hba[ctlr]->drv[dsk].nr_blks
1261 hba[ctlr]->hd[MINOR(inode->i_rdev)].start_sect;
1262 if (copy_to_user((void *) arg, &driver_geo,
1263 sizeof( struct hd_geometry)))
1267 case HDIO_GETGEO_BIG:
1269 struct hd_big_geometry driver_geo;
1271 if (hba[ctlr]->drv[dsk].cylinders) {
1272 driver_geo.heads = hba[ctlr]->drv[dsk].heads;
1273 driver_geo.sectors = hba[ctlr]->drv[dsk].sectors;
1274 driver_geo.cylinders = hba[ctlr]->drv[dsk].cylinders;
1276 driver_geo.heads = 0xff;
1277 driver_geo.sectors = 0x3f;
1278 driver_geo.cylinders = hba[ctlr]->drv[dsk].nr_blks
1282 hba[ctlr]->hd[MINOR(inode->i_rdev)].start_sect;
1283 if (copy_to_user((void *) arg, &driver_geo,
1284 sizeof( struct hd_big_geometry)))
1292 ida_ioctl_t *io = (ida_ioctl_t*)arg;
1293 return copy_to_user(&io->c.drv,&hba[ctlr]->drv[dsk],sizeof(drv_info_t));
1296 return revalidate_logvol(inode->i_rdev, 1);
1300 ida_ioctl_t *io = (ida_ioctl_t*)arg;
1303 if (!capable(CAP_SYS_RAWIO))
1305 if (copy_from_user(&my_io, io, sizeof(my_io)))
1307 error = ida_ctlr_ioctl(ctlr, dsk, &my_io);
1310 if (copy_to_user(io, &my_io, sizeof(my_io)))
1314 case IDABIGPASSTHRU:
1317 ida_big_ioctl_t *io = (ida_big_ioctl_t*)arg;
1318 ida_big_ioctl_t my_io;
1320 if (!capable(CAP_SYS_RAWIO))
1322 if (copy_from_user(&my_io, io, sizeof(my_io)))
1324 error = ida_ctlr_big_ioctl(ctlr, dsk, &my_io);
1327 if (copy_to_user(io, &my_io, sizeof(my_io)))
1334 put_user(hba[ctlr]->ctlr_sig, (int*)arg);
1336 case IDAREVALIDATEVOLS:
1337 return revalidate_allvol(inode->i_rdev);
1338 case IDADRIVERVERSION:
1339 if (!arg) return -EINVAL;
1340 put_user(DRIVER_VERSION, (unsigned long*)arg);
1345 ida_pci_info_struct pciinfo;
1349 pciinfo.bus = hba[ctlr]->pci_dev->bus->number;
1350 pciinfo.dev_fn = hba[ctlr]->pci_dev->devfn;
1351 pciinfo.board_id = hba[ctlr]->board_id;
1352 if(copy_to_user((void *) arg, &pciinfo,
1353 sizeof( ida_pci_info_struct)))
1358 return( deregister_disk(ctlr,dsk));
1362 return(register_new_disk(ctlr, logvol));
1367 idaLogvolInfo_struct luninfo;
1371 luninfo.LogVolID = dsk;
1372 luninfo.num_opens = hba[ctlr]->drv[dsk].usage_count;
1374 /* count partitions 1 to 15 with sizes > 0 */
1375 start = (dsk << NWD_SHIFT);
1376 for(i=1; i <IDA_MAX_PART; i++) {
1377 int minor = start+i;
1378 if(hba[ctlr]->sizes[minor] != 0)
1381 luninfo.num_parts = num_parts;
1382 if (copy_to_user((void *) arg, &luninfo,
1383 sizeof( idaLogvolInfo_struct) ))
1401 return blk_ioctl(inode->i_rdev, cmd, arg);
1409 * ida_ctlr_ioctl is for passing commands to the controller from userspace.
1410 * The command block (io) has already been copied to kernel space for us,
1411 * however, any elements in the sglist need to be copied to kernel space
1412 * or copied back to userspace.
1414 * Only root may perform a controller passthru command, however I'm not doing
1415 * any serious sanity checking on the arguments. Doing an IDA_WRITE_MEDIA and
1416 * putting a 64M buffer in the sglist is probably a *bad* idea.
1418 static int ida_ctlr_ioctl(int ctlr, int dsk, ida_ioctl_t *io)
1420 ctlr_info_t *h = hba[ctlr];
1423 unsigned long flags;
1425 DECLARE_COMPLETION(wait);
1427 if ((c = cmd_alloc(h, 0)) == NULL)
1430 c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;
1431 c->hdr.size = sizeof(rblk_t) >> 2;
1432 c->size += sizeof(rblk_t);
1434 c->req.hdr.cmd = io->cmd;
1435 c->req.hdr.blk = io->blk;
1436 c->req.hdr.blk_cnt = io->blk_cnt;
1437 c->type = CMD_IOCTL_PEND;
1439 /* Pre submit processing */
1442 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1448 if (copy_from_user(p, (void*)io->sg[0].addr, io->sg[0].size)) {
1453 c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c),
1454 sizeof(ida_ioctl_t),
1455 PCI_DMA_BIDIRECTIONAL);
1456 c->req.sg[0].size = io->sg[0].size;
1457 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1458 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1459 c->req.hdr.sg_cnt = 1;
1462 case SENSE_SURF_STATUS:
1463 case READ_FLASH_ROM:
1464 case SENSE_CONTROLLER_PERFORMANCE:
1465 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1472 c->req.sg[0].size = io->sg[0].size;
1473 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1474 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1475 c->req.hdr.sg_cnt = 1;
1478 case IDA_WRITE_MEDIA:
1479 case DIAG_PASS_THRU:
1480 case COLLECT_BUFFER:
1481 case WRITE_FLASH_ROM:
1482 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1488 if (copy_from_user(p, (void*)io->sg[0].addr, io->sg[0].size)) {
1493 c->req.sg[0].size = io->sg[0].size;
1494 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1495 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1496 c->req.hdr.sg_cnt = 1;
1499 c->req.sg[0].size = sizeof(io->c);
1500 c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c,
1501 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1502 c->req.hdr.sg_cnt = 1;
1507 /* Put the request on the tail of the request queue */
1508 spin_lock_irqsave(&io_request_lock, flags);
1512 spin_unlock_irqrestore(&io_request_lock, flags);
1514 /* Wait for completion */
1515 wait_for_completion(&wait);
1518 pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size,
1519 PCI_DMA_BIDIRECTIONAL);
1520 /* Post submit processing */
1523 pci_unmap_single(h->pci_dev, c->req.hdr.blk,
1524 sizeof(ida_ioctl_t),
1525 PCI_DMA_BIDIRECTIONAL);
1527 case SENSE_SURF_STATUS:
1528 case DIAG_PASS_THRU:
1529 case SENSE_CONTROLLER_PERFORMANCE:
1530 case READ_FLASH_ROM:
1531 if (copy_to_user((void*)io->sg[0].addr, p, io->sg[0].size)) {
1535 /* fall through and free p */
1537 case IDA_WRITE_MEDIA:
1538 case COLLECT_BUFFER:
1539 case WRITE_FLASH_ROM:
1546 io->rcode = c->req.hdr.rcode;
1552 * ida_ctlr_big_ioctl is for passing commands to the controller from userspace.
1553 * The command block (io) has already been copied to kernel space for us,
1555 * Only root may perform a controller passthru command, however I'm not doing
1556 * any serious sanity checking on the arguments.
1558 static int ida_ctlr_big_ioctl(int ctlr, int dsk, ida_big_ioctl_t *io)
1560 ctlr_info_t *h = hba[ctlr];
1562 __u8 *scsi_param = NULL;
1563 __u8 *buff[SG_MAX] = {NULL,};
1564 size_t buff_size[SG_MAX];
1566 unsigned long flags;
1569 DECLARE_COMPLETION(wait);
1571 /* Check kmalloc limits using all SGs */
1572 if( io->buff_malloc_size > IDA_MAX_KMALLOC_SIZE)
1574 if( io->buff_size > io->buff_malloc_size * SG_MAX)
1576 if ((c = cmd_alloc(h, 0)) == NULL)
1580 c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;
1581 c->hdr.size = sizeof(rblk_t) >> 2;
1582 c->size += sizeof(rblk_t);
1584 c->req.hdr.cmd = io->cmd;
1585 c->req.hdr.blk = io->blk;
1586 c->req.hdr.blk_cnt = io->blk_cnt;
1587 c->type = CMD_IOCTL_PEND;
1589 /* Pre submit processing */
1590 /* for passthru_a the scsi command is in another record */
1591 if (io->cmd == PASSTHRU_A) {
1593 if (io->scsi_param == NULL)
1599 scsi_param = kmalloc(sizeof(scsi_param_t), GFP_KERNEL);
1600 if (scsi_param == NULL) {
1606 /* copy the scsi command to get passed thru */
1607 if (copy_from_user(scsi_param, io->scsi_param,
1608 sizeof(scsi_param_t))) {
1614 /* with this command the scsi command is seperate */
1615 c->req.hdr.blk = pci_map_single(h->pci_dev, scsi_param,
1616 sizeof(scsi_param_t), PCI_DMA_BIDIRECTIONAL);
1619 /* fill in the SG entries */
1620 /* create buffers if we need to */
1621 if(io->buff_size > 0) {
1622 size_t size_left_alloc = io->buff_size;
1623 __u8 *data_ptr = io->buff;
1625 while(size_left_alloc > 0) {
1626 buff_size[sg_used] = (size_left_alloc
1627 > io->buff_malloc_size)
1628 ? io->buff_malloc_size : size_left_alloc;
1629 buff[sg_used] = kmalloc( buff_size[sg_used],
1631 if (buff[sg_used] == NULL) {
1633 goto ida_alloc_cleanup;
1635 if(io->xfer_type & IDA_XFER_WRITE) {
1636 /* Copy the data into the buffer created */
1637 if (copy_from_user(buff[sg_used], data_ptr,
1638 buff_size[sg_used])) {
1640 goto ida_alloc_cleanup;
1643 /* put the data into the scatter gather list */
1644 c->req.sg[sg_used].size = buff_size[sg_used];
1645 c->req.sg[sg_used].addr = pci_map_single(h->pci_dev,
1646 buff[sg_used], buff_size[sg_used],
1647 PCI_DMA_BIDIRECTIONAL);
1649 size_left_alloc -= buff_size[sg_used];
1650 data_ptr += buff_size[sg_used];
1654 c->req.hdr.sg_cnt = sg_used;
1658 /* Put the request on the tail of the request queue */
1659 spin_lock_irqsave(&io_request_lock, flags);
1663 spin_unlock_irqrestore(&io_request_lock, flags);
1665 /* Wait for completion */
1666 wait_for_completion(&wait);
1668 for(i=0; i<c->req.hdr.sg_cnt; i++) {
1669 pci_unmap_single(h->pci_dev, c->req.sg[i].addr,
1670 c->req.sg[i].size, PCI_DMA_BIDIRECTIONAL);
1673 /* if we are reading data from the hardware copy it back to user */
1674 if (io->xfer_type & IDA_XFER_READ) {
1675 __u8 *data_ptr = io->buff;
1678 for(i=0; i<c->req.hdr.sg_cnt; i++) {
1679 if (copy_to_user(data_ptr, buff[i], buff_size[i])) {
1681 goto ida_alloc_cleanup;
1683 data_ptr += buff_size[i];
1689 io->rcode = c->req.hdr.rcode;
1692 pci_unmap_single(h->pci_dev, c->req.hdr.blk,
1693 sizeof(scsi_param_t), PCI_DMA_BIDIRECTIONAL);
1694 /* copy the scsi_params back to the user */
1695 if( copy_to_user(io->scsi_param, scsi_param,
1696 sizeof(scsi_param_t))) {
1706 pci_unmap_single(h->pci_dev, c->req.hdr.blk,
1707 sizeof(scsi_param_t), PCI_DMA_BIDIRECTIONAL);
1710 for (i=0; i<sg_used; i++) {
1711 if(buff[sg_used] != NULL) {
1712 pci_unmap_single(h->pci_dev, c->req.sg[i].addr,
1713 buff_size[sg_used], PCI_DMA_BIDIRECTIONAL);
1714 kfree(buff[sg_used]);
1721 * Commands are pre-allocated in a large block. Here we use a simple bitmap
1722 * scheme to suballocte them to the driver. Operations that are not time
1723 * critical (and can wait for kmalloc and possibly sleep) can pass in NULL
1724 * as the first argument to get a new command.
1726 static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool)
1730 dma_addr_t cmd_dhandle;
1732 if (!get_from_pool) {
1733 c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev,
1734 sizeof(cmdlist_t), &cmd_dhandle);
1739 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
1742 } while(test_and_set_bit(i%32, h->cmd_pool_bits+(i/32)) != 0);
1743 c = h->cmd_pool + i;
1744 cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t);
1748 memset(c, 0, sizeof(cmdlist_t));
1749 c->busaddr = cmd_dhandle;
1753 static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool)
1757 if (!got_from_pool) {
1758 pci_free_consistent(h->pci_dev, sizeof(cmdlist_t), c,
1761 i = c - h->cmd_pool;
1762 clear_bit(i%32, h->cmd_pool_bits+(i/32));
1767 /***********************************************************************
1769 Send a command to an IDA using the memory mapped FIFO interface
1770 and wait for it to complete.
1771 This routine should only be called at init time.
1772 ***********************************************************************/
1779 unsigned int blkcnt,
1780 unsigned int log_unit )
1786 ctlr_info_t *info_p = hba[ctlr];
1788 c = cmd_alloc(info_p, 1);
1792 c->hdr.unit = log_unit;
1794 c->hdr.size = sizeof(rblk_t) >> 2;
1795 c->size += sizeof(rblk_t);
1797 /* The request information. */
1798 c->req.hdr.next = 0;
1799 c->req.hdr.rcode = 0;
1801 c->req.hdr.sg_cnt = 1;
1802 c->req.hdr.reserved = 0;
1805 c->req.sg[0].size = 512;
1807 c->req.sg[0].size = size;
1809 c->req.hdr.blk = blk;
1810 c->req.hdr.blk_cnt = blkcnt;
1811 c->req.hdr.cmd = (unsigned char) cmd;
1812 c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev,
1813 buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1817 info_p->access.set_intr_mask(info_p, 0);
1818 /* Make sure there is room in the command FIFO */
1819 /* Actually it should be completely empty at this time. */
1820 for (i = 200000; i > 0; i--) {
1821 temp = info_p->access.fifo_full(info_p);
1827 printk(KERN_WARNING "cpqarray ida%d: idaSendPciCmd FIFO full,"
1828 " waiting!\n", ctlr);
1834 info_p->access.submit_command(info_p, c);
1835 complete = pollcomplete(ctlr);
1837 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
1838 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1839 if (complete != 1) {
1840 if (complete != c->busaddr) {
1841 printk( KERN_WARNING
1842 "cpqarray ida%d: idaSendPciCmd "
1843 "Invalid command list address returned! (%08lx)\n",
1844 ctlr, (unsigned long)complete);
1845 cmd_free(info_p, c, 1);
1849 printk( KERN_WARNING
1850 "cpqarray ida%d: idaSendPciCmd Timeout out, "
1851 "No command list address returned!\n",
1853 cmd_free(info_p, c, 1);
1857 if (c->req.hdr.rcode & 0x00FE) {
1858 if (!(c->req.hdr.rcode & BIG_PROBLEM)) {
1859 printk( KERN_WARNING
1860 "cpqarray ida%d: idaSendPciCmd, error: "
1861 "Controller failed at init time "
1862 "cmd: 0x%x, return code = 0x%x\n",
1863 ctlr, c->req.hdr.cmd, c->req.hdr.rcode);
1865 cmd_free(info_p, c, 1);
1869 cmd_free(info_p, c, 1);
1873 static int frevalidate_logvol(kdev_t dev)
1875 return revalidate_logvol(dev, 0);
1879 * revalidate_allvol is for online array config utilities. After a
1880 * utility reconfigures the drives in the array, it can use this function
1881 * (through an ioctl) to make the driver zap any previous disk structs for
1882 * that controller and get new ones.
1884 * Right now I'm using the getgeometry() function to do this, but this
1885 * function should probably be finer grained and allow you to revalidate one
1886 * particualar logical volume (instead of all of them on a particular
1889 static int revalidate_allvol(kdev_t dev)
1892 unsigned long flags;
1894 ctlr = MAJOR(dev) - MAJOR_NR;
1895 if (MINOR(dev) != 0)
1898 spin_lock_irqsave(&io_request_lock, flags);
1899 if (hba[ctlr]->usage_count > 1) {
1900 spin_unlock_irqrestore(&io_request_lock, flags);
1901 printk(KERN_WARNING "cpqarray: Device busy for volume"
1902 " revalidation (usage=%d)\n", hba[ctlr]->usage_count);
1905 spin_unlock_irqrestore(&io_request_lock, flags);
1906 hba[ctlr]->usage_count++;
1909 * Set the partition and block size structures for all volumes
1910 * on this controller to zero. And set the hardsizes to non zero to
1911 * avoid a possible divide by zero error.
1912 * We will reread all of this data
1914 memset(hba[ctlr]->hd, 0, sizeof(struct hd_struct)*NWD*16);
1915 memset(hba[ctlr]->sizes, 0, sizeof(int)*NWD*16);
1916 memset(hba[ctlr]->blocksizes, 0, sizeof(int)*NWD*16);
1917 memset(hba[ctlr]->drv, 0, sizeof(drv_info_t)*NWD);
1918 hba[ctlr]->gendisk.nr_real = 0;
1921 hba[ctlr]->hardsizes[i] = 0;
1923 * Tell the array controller not to give us any interrupts while
1924 * we check the new geometry. Then turn interrupts back on when
1927 hba[ctlr]->access.set_intr_mask(hba[ctlr], 0);
1929 hba[ctlr]->access.set_intr_mask(hba[ctlr], FIFO_NOT_EMPTY);
1932 for(i=0; i<NWD; i++)
1933 if (hba[ctlr]->sizes[i<<NWD_SHIFT])
1934 revalidate_logvol(dev+(i<<NWD_SHIFT), 2);
1936 hba[ctlr]->usage_count--;
1940 static int deregister_disk(int ctlr, int logvol)
1942 unsigned long flags;
1943 struct gendisk *gdev = &(hba[ctlr]->gendisk);
1944 ctlr_info_t *h = hba[ctlr];
1945 int start, max_p, i;
1948 if (!capable(CAP_SYS_RAWIO))
1951 spin_lock_irqsave(&io_request_lock, flags);
1952 /* make sure logical volume is NOT is use */
1953 if( h->drv[logvol].usage_count > 1) {
1954 spin_unlock_irqrestore(&io_request_lock, flags);
1957 h->drv[logvol].usage_count++;
1958 spin_unlock_irqrestore(&io_request_lock, flags);
1960 /* invalidate the devices and deregister the disk */
1961 max_p = gdev->max_p;
1962 start = logvol << gdev->minor_shift;
1963 for (i=max_p-1; i>=0; i--) {
1964 int minor = start+i;
1965 // printk("invalidating( %d %d)\n", ctlr, minor);
1966 invalidate_device(MKDEV(MAJOR_NR+ctlr, minor), 1);
1967 /* so open will now fail */
1968 hba[ctlr]->sizes[minor] = 0;
1969 /* so it will no longer appear in /proc/partitions */
1970 gdev->part[minor].start_sect = 0;
1971 gdev->part[minor].nr_sects = 0;
1973 /* check to see if it was the last disk */
1974 if (logvol == h->highest_lun) {
1975 /* if so, find the new hightest lun */
1976 int i, newhighest =-1;
1977 for(i=0; i<h->highest_lun; i++) {
1978 /* if the disk has size > 0, it is available */
1979 if (hba[ctlr]->sizes[i << gdev->minor_shift] != 0)
1982 h->highest_lun = newhighest;
1986 gdev->nr_real = h->highest_lun+1;
1987 /* zero out the disk size info */
1988 h->drv[logvol].nr_blks = 0;
1989 h->drv[logvol].cylinders = 0;
1990 h->drv[logvol].blk_size = 0;
1995 static int sendcmd_withirq(
2001 unsigned int blkcnt,
2002 unsigned int log_unit )
2005 unsigned long flags;
2006 ctlr_info_t *info_p = hba[ctlr];
2007 DECLARE_COMPLETION(wait);
2009 c = cmd_alloc(info_p, 0);
2012 c->type = CMD_IOCTL_PEND;
2014 c->hdr.unit = log_unit;
2016 c->hdr.size = sizeof(rblk_t) >> 2;
2017 c->size += sizeof(rblk_t);
2019 /* The request information. */
2020 c->req.hdr.next = 0;
2021 c->req.hdr.rcode = 0;
2023 c->req.hdr.sg_cnt = 1;
2024 c->req.hdr.reserved = 0;
2027 c->req.sg[0].size = 512;
2029 c->req.sg[0].size = size;
2031 c->req.hdr.blk = blk;
2032 c->req.hdr.blk_cnt = blkcnt;
2033 c->req.hdr.cmd = (unsigned char) cmd;
2034 c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev,
2035 buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
2038 /* Put the request on the tail of the request queue */
2039 spin_lock_irqsave(&io_request_lock, flags);
2040 addQ(&info_p->reqQ, c);
2043 spin_unlock_irqrestore(&io_request_lock, flags);
2045 /* Wait for completion */
2046 wait_for_completion(&wait);
2048 if (c->req.hdr.rcode & RCODE_FATAL) {
2049 printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
2050 c->ctlr, c->hdr.unit);
2051 cmd_free(info_p, c, 0);
2054 if (c->req.hdr.rcode & RCODE_INVREQ) {
2055 printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
2056 c->ctlr, c->hdr.unit, c->req.hdr.cmd,
2057 c->req.hdr.blk, c->req.hdr.blk_cnt,
2058 c->req.hdr.sg_cnt, c->req.hdr.rcode);
2059 cmd_free(info_p, c, 0);
2062 cmd_free(info_p, c, 0);
2066 static int register_new_disk(int ctlr, int logvol)
2068 struct gendisk *gdev = &(hba[ctlr]->gendisk);
2069 ctlr_info_t *info_p = hba[ctlr];
2071 sense_log_drv_stat_t *id_lstatus_buf;
2072 id_log_drv_t *id_ldrive;
2078 if (!capable(CAP_SYS_RAWIO))
2080 if( (logvol < 0) || (logvol >= IDA_MAX_PART))
2082 /* disk is already registered */
2083 if(hba[ctlr]->sizes[logvol << gdev->minor_shift] != 0 )
2086 id_ldrive = (id_log_drv_t *)kmalloc(sizeof(id_log_drv_t), GFP_KERNEL);
2087 if(id_ldrive == NULL) {
2088 printk( KERN_ERR "cpqarray: out of memory.\n");
2091 id_lstatus_buf = (sense_log_drv_stat_t *)kmalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
2092 if(id_lstatus_buf == NULL) {
2094 printk( KERN_ERR "cpqarray: out of memory.\n");
2098 size = sizeof(sense_log_drv_stat_t);
2101 Send "Identify logical drive status" cmd
2103 ret_code = sendcmd_withirq(SENSE_LOG_DRV_STAT,
2104 ctlr, id_lstatus_buf, size, 0, 0, logvol);
2105 if (ret_code == IO_ERROR) {
2107 If can't get logical drive status, set
2108 the logical drive map to 0, so the
2109 idastubopen will fail for all logical drives
2112 /* Free all the buffers and return */
2114 kfree(id_lstatus_buf);
2119 Make sure the logical drive is configured
2121 if (id_lstatus_buf->status == LOG_NOT_CONF) {
2122 printk(KERN_WARNING "cpqarray: c%dd%d array not configured\n",
2124 kfree(id_lstatus_buf);
2128 ret_code = sendcmd_withirq(ID_LOG_DRV, ctlr, id_ldrive,
2129 sizeof(id_log_drv_t), 0, 0, logvol);
2131 If error, the bit for this
2132 logical drive won't be set and
2133 idastubopen will return error.
2135 if (ret_code == IO_ERROR) {
2136 printk(KERN_WARNING "cpqarray: c%dd%d unable to ID logical volume\n",
2138 kfree(id_lstatus_buf);
2142 drv = &info_p->drv[logvol];
2143 drv->blk_size = id_ldrive->blk_size;
2144 drv->nr_blks = id_ldrive->nr_blks;
2145 drv->cylinders = id_ldrive->drv.cyl;
2146 drv->heads = id_ldrive->drv.heads;
2147 drv->sectors = id_ldrive->drv.sect_per_track;
2148 info_p->log_drv_map |= (1 << logvol);
2149 if (info_p->highest_lun < logvol)
2150 info_p->highest_lun = logvol;
2152 printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n",
2153 ctlr, logvol, drv->blk_size, drv->nr_blks);
2155 hba[ctlr]->drv[logvol].usage_count = 0;
2157 max_p = gdev->max_p;
2158 start = logvol<< gdev->minor_shift;
2160 for(i=max_p-1; i>=0; i--) {
2161 int minor = start+i;
2162 invalidate_device(MKDEV(MAJOR_NR + ctlr, minor), 1);
2163 gdev->part[minor].start_sect = 0;
2164 gdev->part[minor].nr_sects = 0;
2166 /* reset the blocksize so we can read the partition table */
2167 blksize_size[MAJOR_NR+ctlr][minor] = 1024;
2168 hba[ctlr]->hardsizes[minor] = drv->blk_size;
2170 ++hba[ctlr]->log_drives;
2171 gdev->nr_real = info_p->highest_lun + 1;
2172 /* setup partitions per disk */
2173 grok_partitions(gdev, logvol, IDA_MAX_PART, drv->nr_blks);
2175 kfree(id_lstatus_buf);
2180 /* Borrowed and adapted from sd.c */
2181 static int revalidate_logvol(kdev_t dev, int maxusage)
2184 struct gendisk *gdev;
2185 unsigned long flags;
2190 target = DEVICE_NR(dev);
2191 ctlr = MAJOR(dev) - MAJOR_NR;
2192 gdev = &(hba[ctlr]->gendisk);
2194 spin_lock_irqsave(&io_request_lock, flags);
2195 if (hba[ctlr]->drv[target].usage_count > maxusage) {
2196 spin_unlock_irqrestore(&io_request_lock, flags);
2197 printk(KERN_WARNING "cpqarray: Device busy for "
2198 "revalidation (usage=%d)\n",
2199 hba[ctlr]->drv[target].usage_count);
2203 hba[ctlr]->drv[target].usage_count++;
2204 spin_unlock_irqrestore(&io_request_lock, flags);
2206 max_p = gdev->max_p;
2207 start = target << gdev->minor_shift;
2209 for(i=max_p-1; i>=0; i--) {
2210 int minor = start+i;
2211 invalidate_device(MKDEV(MAJOR_NR + ctlr, minor), 1);
2212 gdev->part[minor].start_sect = 0;
2213 gdev->part[minor].nr_sects = 0;
2215 /* reset the blocksize so we can read the partition table */
2216 blksize_size[MAJOR_NR+ctlr][minor] = 1024;
2219 /* 16 minors per disk... */
2220 grok_partitions(gdev, target, IDA_MAX_PART,
2221 hba[ctlr]->drv[target].nr_blks);
2222 hba[ctlr]->drv[target].usage_count--;
2227 /********************************************************************
2229 Wait polling for a command to complete.
2230 The memory mapped FIFO is polled for the completion.
2231 Used only at init time, interrupts disabled.
2232 ********************************************************************/
2233 static int pollcomplete(int ctlr)
2238 /* Wait (up to 2 seconds) for a command to complete */
2240 for (i = 200000; i > 0; i--) {
2241 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2243 udelay(10); /* a short fixed delay */
2247 /* Invalid address to tell caller we ran out of time */
2250 /*****************************************************************
2252 Starts controller firmwares background processing.
2253 Currently only the Integrated Raid controller needs this done.
2254 If the PCI mem address registers are written to after this,
2255 data corruption may occur
2256 *****************************************************************/
2257 static void start_fwbk(int ctlr)
2259 id_ctlr_t *id_ctlr_buf;
2262 if( (hba[ctlr]->board_id != 0x40400E11)
2263 && (hba[ctlr]->board_id != 0x40480E11) )
2265 /* Not a Integrated Raid, so there is nothing for us to do */
2267 printk(KERN_DEBUG "cpqarray: Starting firmware's background"
2269 /* Command does not return anything, but idasend command needs a
2271 id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
2272 if(id_ctlr_buf==NULL) {
2273 printk(KERN_WARNING "cpqarray: Out of memory. "
2274 "Unable to start background processing.\n");
2277 ret_code = sendcmd(RESUME_BACKGROUND_ACTIVITY, ctlr,
2278 id_ctlr_buf, 0, 0, 0, 0);
2279 if(ret_code != IO_OK)
2280 printk(KERN_WARNING "cpqarray: Unable to start"
2281 " background processing\n");
2285 /*****************************************************************
2287 Get ida logical volume geometry from the controller
2288 This is a large bit of code which once existed in two flavors,
2289 It is used only at init time.
2290 *****************************************************************/
2291 static void getgeometry(int ctlr)
2293 id_log_drv_t *id_ldrive;
2294 id_ctlr_t *id_ctlr_buf;
2295 sense_log_drv_stat_t *id_lstatus_buf;
2296 config_t *sense_config_buf;
2297 unsigned int log_unit, log_index;
2300 ctlr_info_t *info_p = hba[ctlr];
2303 info_p->log_drv_map = 0;
2305 id_ldrive = (id_log_drv_t *)kmalloc(sizeof(id_log_drv_t), GFP_KERNEL);
2306 if(id_ldrive == NULL) {
2307 printk( KERN_ERR "cpqarray: out of memory.\n");
2311 id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
2312 if(id_ctlr_buf == NULL) {
2314 printk( KERN_ERR "cpqarray: out of memory.\n");
2318 id_lstatus_buf = (sense_log_drv_stat_t *)kmalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
2319 if(id_lstatus_buf == NULL) {
2322 printk( KERN_ERR "cpqarray: out of memory.\n");
2326 sense_config_buf = (config_t *)kmalloc(sizeof(config_t), GFP_KERNEL);
2327 if(sense_config_buf == NULL) {
2328 kfree(id_lstatus_buf);
2331 printk( KERN_ERR "cpqarray: out of memory.\n");
2335 memset(id_ldrive, 0, sizeof(id_log_drv_t));
2336 memset(id_ctlr_buf, 0, sizeof(id_ctlr_t));
2337 memset(id_lstatus_buf, 0, sizeof(sense_log_drv_stat_t));
2338 memset(sense_config_buf, 0, sizeof(config_t));
2340 info_p->phys_drives = 0;
2341 info_p->log_drv_map = 0;
2342 info_p->drv_assign_map = 0;
2343 info_p->drv_spare_map = 0;
2344 info_p->mp_failed_drv_map = 0; /* only initialized here */
2345 /* Get controllers info for this logical drive */
2346 ret_code = sendcmd(ID_CTLR, ctlr, id_ctlr_buf, 0, 0, 0, 0);
2347 if (ret_code == IO_ERROR) {
2349 * If can't get controller info, set the logical drive map to 0,
2350 * so the idastubopen will fail on all logical drives
2351 * on the controller.
2353 /* Free all the buffers and return */
2354 printk(KERN_ERR "cpqarray: error sending ID controller\n");
2355 kfree(sense_config_buf);
2356 kfree(id_lstatus_buf);
2362 info_p->log_drives = id_ctlr_buf->nr_drvs;;
2364 info_p->firm_rev[i] = id_ctlr_buf->firm_rev[i];
2365 info_p->ctlr_sig = id_ctlr_buf->cfg_sig;
2367 printk(" (%s)\n", info_p->product_name);
2369 * Initialize logical drive map to zero
2373 * Get drive geometry for all logical drives
2375 if (id_ctlr_buf->nr_drvs > IDA_MAX_PART)
2376 printk(KERN_WARNING "cpqarray ida%d: This driver supports "
2377 "16 logical drives per controller.\n. "
2378 " Additional drives will not be "
2379 "detected\n", ctlr);
2382 (log_index < id_ctlr_buf->nr_drvs)
2383 && (log_unit < NWD);
2386 size = sizeof(sense_log_drv_stat_t);
2389 Send "Identify logical drive status" cmd
2391 ret_code = sendcmd(SENSE_LOG_DRV_STAT,
2392 ctlr, id_lstatus_buf, size, 0, 0, log_unit);
2393 if (ret_code == IO_ERROR) {
2395 If can't get logical drive status, set
2396 the logical drive map to 0, so the
2397 idastubopen will fail for all logical drives
2400 info_p->log_drv_map = 0;
2401 printk( KERN_WARNING
2402 "cpqarray ida%d: idaGetGeometry - Controller"
2403 " failed to report status of logical drive %d\n"
2404 "Access to this controller has been disabled\n",
2406 /* Free all the buffers and return */
2407 kfree(sense_config_buf);
2408 kfree(id_lstatus_buf);
2414 Make sure the logical drive is configured
2416 if (id_lstatus_buf->status != LOG_NOT_CONF) {
2417 ret_code = sendcmd(ID_LOG_DRV, ctlr, id_ldrive,
2418 sizeof(id_log_drv_t), 0, 0, log_unit);
2420 If error, the bit for this
2421 logical drive won't be set and
2422 idastubopen will return error.
2424 if (ret_code != IO_ERROR) {
2425 drv = &info_p->drv[log_unit];
2426 drv->blk_size = id_ldrive->blk_size;
2427 drv->nr_blks = id_ldrive->nr_blks;
2428 drv->cylinders = id_ldrive->drv.cyl;
2429 drv->heads = id_ldrive->drv.heads;
2430 drv->sectors = id_ldrive->drv.sect_per_track;
2431 info_p->log_drv_map |= (1 << log_unit);
2433 printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n",
2434 ctlr, log_unit, drv->blk_size, drv->nr_blks);
2435 ret_code = sendcmd(SENSE_CONFIG,
2436 ctlr, sense_config_buf,
2437 sizeof(config_t), 0, 0, log_unit);
2438 if (ret_code == IO_ERROR) {
2439 info_p->log_drv_map = 0;
2440 /* Free all the buffers and return */
2441 printk(KERN_ERR "cpqarray: error sending sense config\n");
2442 kfree(sense_config_buf);
2443 kfree(id_lstatus_buf);
2449 if(log_unit > info_p->highest_lun)
2450 info_p->highest_lun = log_unit;
2451 info_p->phys_drives =
2452 sense_config_buf->ctlr_phys_drv;
2453 info_p->drv_assign_map
2454 |= sense_config_buf->drv_asgn_map;
2455 info_p->drv_assign_map
2456 |= sense_config_buf->spare_asgn_map;
2457 info_p->drv_spare_map
2458 |= sense_config_buf->spare_asgn_map;
2459 } /* end of if no error on id_ldrive */
2460 log_index = log_index + 1;
2461 } /* end of if logical drive configured */
2462 } /* end of for log_unit */
2463 kfree(sense_config_buf);
2465 kfree(id_lstatus_buf);
2471 static void __exit cleanup_cpqarray_module(void)
2475 pci_unregister_driver(&cpqarray_pci_driver);
2476 /* double check that all controller entrys have been removed */
2477 for (i=0; i< MAX_CTLR; i++) {
2478 if (hba[i] != NULL) {
2479 printk(KERN_WARNING "cpqarray: had to remove"
2480 " controller %d\n", i);
2481 cpqarray_remove_one_eisa(i);
2484 remove_proc_entry("cpqarray", proc_root_driver);
2488 module_init(init_cpqarray_module);
2489 module_exit(cleanup_cpqarray_module);