d70398ac64dbb71c5d49f49a9a95236c51391afb
[powerpc.git] / drivers / scsi / arcmsr / arcmsr_hba.c
1 /*
2 *******************************************************************************
3 **        O.S   : Linux
4 **   FILE NAME  : arcmsr_hba.c
5 **        BY    : Erich Chen
6 **   Description: SCSI RAID Device Driver for
7 **                ARECA RAID Host adapter
8 *******************************************************************************
9 ** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
10 **
11 **     Web site: www.areca.com.tw
12 **       E-mail: support@areca.com.tw
13 **
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License version 2 as
16 ** published by the Free Software Foundation.
17 ** This program is distributed in the hope that it will be useful,
18 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
19 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20 ** GNU General Public License for more details.
21 *******************************************************************************
22 ** Redistribution and use in source and binary forms, with or without
23 ** modification, are permitted provided that the following conditions
24 ** are met:
25 ** 1. Redistributions of source code must retain the above copyright
26 **    notice, this list of conditions and the following disclaimer.
27 ** 2. Redistributions in binary form must reproduce the above copyright
28 **    notice, this list of conditions and the following disclaimer in the
29 **    documentation and/or other materials provided with the distribution.
30 ** 3. The name of the author may not be used to endorse or promote products
31 **    derived from this software without specific prior written permission.
32 **
33 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
38 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
40 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41 ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
42 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 *******************************************************************************
44 ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
45 **     Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
46 *******************************************************************************
47 */
48 #include <linux/module.h>
49 #include <linux/reboot.h>
50 #include <linux/spinlock.h>
51 #include <linux/pci_ids.h>
52 #include <linux/interrupt.h>
53 #include <linux/moduleparam.h>
54 #include <linux/errno.h>
55 #include <linux/types.h>
56 #include <linux/delay.h>
57 #include <linux/dma-mapping.h>
58 #include <linux/timer.h>
59 #include <linux/pci.h>
60 #include <linux/aer.h>
61 #include <asm/dma.h>
62 #include <asm/io.h>
63 #include <asm/system.h>
64 #include <asm/uaccess.h>
65 #include <scsi/scsi_host.h>
66 #include <scsi/scsi.h>
67 #include <scsi/scsi_cmnd.h>
68 #include <scsi/scsi_tcq.h>
69 #include <scsi/scsi_device.h>
70 #include <scsi/scsi_transport.h>
71 #include <scsi/scsicam.h>
72 #include "arcmsr.h"
73
74 MODULE_AUTHOR("Erich Chen <support@areca.com.tw>");
75 MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID HOST Adapter");
76 MODULE_LICENSE("Dual BSD/GPL");
77 MODULE_VERSION(ARCMSR_DRIVER_VERSION);
78
79 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
80                                         struct scsi_cmnd *cmd);
81 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
82 static int arcmsr_abort(struct scsi_cmnd *);
83 static int arcmsr_bus_reset(struct scsi_cmnd *);
84 static int arcmsr_bios_param(struct scsi_device *sdev,
85                 struct block_device *bdev, sector_t capacity, int *info);
86 static int arcmsr_queue_command(struct scsi_cmnd *cmd,
87                                         void (*done) (struct scsi_cmnd *));
88 static int arcmsr_probe(struct pci_dev *pdev,
89                                 const struct pci_device_id *id);
90 static void arcmsr_remove(struct pci_dev *pdev);
91 static void arcmsr_shutdown(struct pci_dev *pdev);
92 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
93 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
94 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
95 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
96 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb);
97 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
98 static const char *arcmsr_info(struct Scsi_Host *);
99 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
100 static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
101                                                 pci_channel_state_t state);
102 static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev);
103 static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
104                                                                 int queue_depth)
105 {
106         if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
107                 queue_depth = ARCMSR_MAX_CMD_PERLUN;
108         scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
109         return queue_depth;
110 }
111
112 static struct scsi_host_template arcmsr_scsi_host_template = {
113         .module                 = THIS_MODULE,
114         .name                   = "ARCMSR ARECA SATA/SAS RAID HOST Adapter"
115                                                         ARCMSR_DRIVER_VERSION,
116         .info                   = arcmsr_info,
117         .queuecommand           = arcmsr_queue_command,
118         .eh_abort_handler       = arcmsr_abort,
119         .eh_bus_reset_handler   = arcmsr_bus_reset,
120         .bios_param             = arcmsr_bios_param,
121         .change_queue_depth     = arcmsr_adjust_disk_queue_depth,
122         .can_queue              = ARCMSR_MAX_OUTSTANDING_CMD,
123         .this_id                = ARCMSR_SCSI_INITIATOR_ID,
124         .sg_tablesize           = ARCMSR_MAX_SG_ENTRIES,
125         .max_sectors            = ARCMSR_MAX_XFER_SECTORS,
126         .cmd_per_lun            = ARCMSR_MAX_CMD_PERLUN,
127         .use_clustering         = ENABLE_CLUSTERING,
128         .shost_attrs            = arcmsr_host_attrs,
129 };
130 #ifdef CONFIG_SCSI_ARCMSR_AER
131 static struct pci_error_handlers arcmsr_pci_error_handlers = {
132         .error_detected         = arcmsr_pci_error_detected,
133         .slot_reset             = arcmsr_pci_slot_reset,
134 };
135 #endif
136 static struct pci_device_id arcmsr_device_id_table[] = {
137         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
138         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
139         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)},
140         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)},
141         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)},
142         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200)},
143         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201)},
144         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202)},
145         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)},
146         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)},
147         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)},
148         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)},
149         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)},
150         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)},
151         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)},
152         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)},
153         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)},
154         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)},
155         {0, 0}, /* Terminating entry */
156 };
157 MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
158 static struct pci_driver arcmsr_pci_driver = {
159         .name                   = "arcmsr",
160         .id_table               = arcmsr_device_id_table,
161         .probe                  = arcmsr_probe,
162         .remove                 = arcmsr_remove,
163         .shutdown               = arcmsr_shutdown,
164         #ifdef CONFIG_SCSI_ARCMSR_AER
165         .err_handler            = &arcmsr_pci_error_handlers,
166         #endif
167 };
168
169 static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
170 {
171         irqreturn_t handle_state;
172         struct AdapterControlBlock *acb = dev_id;
173
174         spin_lock(acb->host->host_lock);
175         handle_state = arcmsr_interrupt(acb);
176         spin_unlock(acb->host->host_lock);
177
178         return handle_state;
179 }
180
181 static int arcmsr_bios_param(struct scsi_device *sdev,
182                 struct block_device *bdev, sector_t capacity, int *geom)
183 {
184         int ret, heads, sectors, cylinders, total_capacity;
185         unsigned char *buffer;/* return copy of block device's partition table */
186
187         buffer = scsi_bios_ptable(bdev);
188         if (buffer) {
189                 ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
190                 kfree(buffer);
191                 if (ret != -1)
192                         return ret;
193         }
194         total_capacity = capacity;
195         heads = 64;
196         sectors = 32;
197         cylinders = total_capacity / (heads * sectors);
198         if (cylinders > 1024) {
199                 heads = 255;
200                 sectors = 63;
201                 cylinders = total_capacity / (heads * sectors);
202         }
203         geom[0] = heads;
204         geom[1] = sectors;
205         geom[2] = cylinders;
206         return 0;
207 }
208
209 static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb)
210 {
211         struct pci_dev *pdev = acb->pdev;
212         u16 dev_id;
213         pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
214         switch (dev_id) {
215         case 0x1201 : {
216                 acb->adapter_type = ACB_ADAPTER_TYPE_B;
217                 }
218                 break;
219
220         default : acb->adapter_type = ACB_ADAPTER_TYPE_A;
221         }
222 }
223
224 static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
225 {
226
227         switch (acb->adapter_type) {
228
229         case ACB_ADAPTER_TYPE_A: {
230                 struct pci_dev *pdev = acb->pdev;
231                 void *dma_coherent;
232                 dma_addr_t dma_coherent_handle, dma_addr;
233                 struct CommandControlBlock *ccb_tmp;
234                 uint32_t intmask_org;
235                 int i, j;
236
237                 acb->pmu = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
238                 if (!acb->pmu) {
239                         printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n",
240                                                         acb->host->host_no);
241                 }
242
243                 dma_coherent = dma_alloc_coherent(&pdev->dev,
244                         ARCMSR_MAX_FREECCB_NUM *
245                         sizeof (struct CommandControlBlock) + 0x20,
246                         &dma_coherent_handle, GFP_KERNEL);
247                 if (!dma_coherent)
248                         return -ENOMEM;
249
250                 acb->dma_coherent = dma_coherent;
251                 acb->dma_coherent_handle = dma_coherent_handle;
252
253                 if (((unsigned long)dma_coherent & 0x1F)) {
254                         dma_coherent = dma_coherent +
255                                 (0x20 - ((unsigned long)dma_coherent & 0x1F));
256                         dma_coherent_handle = dma_coherent_handle +
257                                 (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
258                 }
259
260                 dma_addr = dma_coherent_handle;
261                 ccb_tmp = (struct CommandControlBlock *)dma_coherent;
262                 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
263                         ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
264                         ccb_tmp->acb = acb;
265                         acb->pccb_pool[i] = ccb_tmp;
266                         list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
267                         dma_addr = dma_addr + sizeof(struct CommandControlBlock);
268                         ccb_tmp++;
269                 }
270
271                 acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr;
272                 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
273                         for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
274                                 acb->devstate[i][j] = ARECA_RAID_GONE;
275
276                 /*
277                 ** here we need to tell iop 331 our ccb_tmp.HighPart
278                 ** if ccb_tmp.HighPart is not zero
279                 */
280                 intmask_org = arcmsr_disable_outbound_ints(acb);
281                 }
282                 break;
283
284         case ACB_ADAPTER_TYPE_B: {
285
286                 struct pci_dev *pdev = acb->pdev;
287                 struct MessageUnit_B *reg;
288                 void *mem_base0, *mem_base1;
289                 void *dma_coherent;
290                 dma_addr_t dma_coherent_handle, dma_addr;
291                 uint32_t intmask_org;
292                 struct CommandControlBlock *ccb_tmp;
293                 int i, j;
294
295                 dma_coherent = dma_alloc_coherent(&pdev->dev,
296                         ((ARCMSR_MAX_FREECCB_NUM *
297                         sizeof(struct CommandControlBlock) + 0x20) +
298                         sizeof(struct MessageUnit_B)),
299                         &dma_coherent_handle, GFP_KERNEL);
300                 if (!dma_coherent)
301                         return -ENOMEM;
302
303                 acb->dma_coherent = dma_coherent;
304                 acb->dma_coherent_handle = dma_coherent_handle;
305
306                 if (((unsigned long)dma_coherent & 0x1F)) {
307                         dma_coherent = dma_coherent +
308                                 (0x20 - ((unsigned long)dma_coherent & 0x1F));
309                         dma_coherent_handle = dma_coherent_handle +
310                                 (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
311                 }
312
313                 reg = (struct MessageUnit_B *)(dma_coherent +
314                 ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
315
316                 dma_addr = dma_coherent_handle;
317                 ccb_tmp = (struct CommandControlBlock *)dma_coherent;
318                 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
319                         ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
320                         ccb_tmp->acb = acb;
321                         acb->pccb_pool[i] = ccb_tmp;
322                         list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
323                         dma_addr = dma_addr + sizeof(struct CommandControlBlock);
324                         ccb_tmp++;
325                 }
326
327                 reg = (struct MessageUnit_B *)(dma_coherent +
328                 ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
329                 acb->pmu = (struct MessageUnit_B *)reg;
330                 mem_base0 = ioremap(pci_resource_start(pdev, 0),
331                                         pci_resource_len(pdev, 0));
332                 mem_base1 = ioremap(pci_resource_start(pdev, 2),
333                                         pci_resource_len(pdev, 2));
334                 reg->drv2iop_doorbell_reg = (uint32_t *)((char *)mem_base0 +
335                                                 ARCMSR_DRV2IOP_DOORBELL);
336                 reg->drv2iop_doorbell_mask_reg = (uint32_t *)((char *)mem_base0 +
337                                                 ARCMSR_DRV2IOP_DOORBELL_MASK);
338                 reg->iop2drv_doorbell_reg = (uint32_t *)((char *)mem_base0 +
339                                                         ARCMSR_IOP2DRV_DOORBELL);
340                 reg->iop2drv_doorbell_mask_reg = (uint32_t *)((char *)mem_base0 +
341                                                 ARCMSR_IOP2DRV_DOORBELL_MASK);
342                 reg->ioctl_wbuffer_reg = (uint32_t *)((char *)mem_base1 +
343                                                         ARCMSR_IOCTL_WBUFFER);
344                 reg->ioctl_rbuffer_reg = (uint32_t *)((char *)mem_base1 +
345                                                         ARCMSR_IOCTL_RBUFFER);
346                 reg->msgcode_rwbuffer_reg = (uint32_t *)((char *)mem_base1 +
347                                                         ARCMSR_MSGCODE_RWBUFFER);
348
349                 acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr;
350                 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
351                         for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
352                                 acb->devstate[i][j] = ARECA_RAID_GOOD;
353
354                 /*
355                 ** here we need to tell iop 331 our ccb_tmp.HighPart
356                 ** if ccb_tmp.HighPart is not zero
357                 */
358                 intmask_org = arcmsr_disable_outbound_ints(acb);
359                 }
360                 break;
361         }
362         return 0;
363 }
364
365 static int arcmsr_probe(struct pci_dev *pdev,
366         const struct pci_device_id *id)
367 {
368         struct Scsi_Host *host;
369         struct AdapterControlBlock *acb;
370         uint8_t bus, dev_fun;
371         int error;
372
373         error = pci_enable_device(pdev);
374         if (error)
375                 goto out;
376         pci_set_master(pdev);
377
378         host = scsi_host_alloc(&arcmsr_scsi_host_template,
379                         sizeof(struct AdapterControlBlock));
380         if (!host) {
381                 error = -ENOMEM;
382                 goto out_disable_device;
383         }
384         acb = (struct AdapterControlBlock *)host->hostdata;
385         memset(acb, 0, sizeof (struct AdapterControlBlock));
386
387         error = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
388         if (error) {
389                 error = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
390                 if (error) {
391                         printk(KERN_WARNING
392                                "scsi%d: No suitable DMA mask available\n",
393                                host->host_no);
394                         goto out_host_put;
395                 }
396         }
397         bus = pdev->bus->number;
398         dev_fun = pdev->devfn;
399         acb->host = host;
400         acb->pdev = pdev;
401         host->max_sectors = ARCMSR_MAX_XFER_SECTORS;
402         host->max_lun = ARCMSR_MAX_TARGETLUN;
403         host->max_id = ARCMSR_MAX_TARGETID;/*16:8*/
404         host->max_cmd_len = 16;    /*this is issue of 64bit LBA, over 2T byte*/
405         host->sg_tablesize = ARCMSR_MAX_SG_ENTRIES;
406         host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
407         host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
408         host->this_id = ARCMSR_SCSI_INITIATOR_ID;
409         host->unique_id = (bus << 8) | dev_fun;
410         host->irq = pdev->irq;
411         error = pci_request_regions(pdev, "arcmsr");
412         if (error) {
413                 goto out_host_put;
414         }
415         arcmsr_define_adapter_type(acb);
416
417         acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
418                            ACB_F_MESSAGE_RQBUFFER_CLEARED |
419                            ACB_F_MESSAGE_WQBUFFER_READED);
420         acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
421         INIT_LIST_HEAD(&acb->ccb_free_list);
422
423         error = arcmsr_alloc_ccb_pool(acb);
424         if (error)
425                 goto out_release_regions;
426
427         error = request_irq(pdev->irq, arcmsr_do_interrupt,
428                                 IRQF_SHARED, "arcmsr", acb);
429         if (error)
430                 goto out_free_ccb_pool;
431
432         arcmsr_iop_init(acb);
433         pci_set_drvdata(pdev, host);
434         if (strncmp(acb->firm_version, "V1.42", 5) >= 0)
435                 host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B;
436
437         error = scsi_add_host(host, &pdev->dev);
438         if (error)
439                 goto out_free_irq;
440
441         error = arcmsr_alloc_sysfs_attr(acb);
442         if (error)
443                 goto out_free_sysfs;
444
445         scsi_scan_host(host);
446         #ifdef CONFIG_SCSI_ARCMSR_AER
447         pci_enable_pcie_error_reporting(pdev);
448         #endif
449         return 0;
450  out_free_sysfs:
451  out_free_irq:
452         free_irq(pdev->irq, acb);
453  out_free_ccb_pool:
454         arcmsr_free_ccb_pool(acb);
455         iounmap(acb->pmu);
456  out_release_regions:
457         pci_release_regions(pdev);
458  out_host_put:
459         scsi_host_put(host);
460  out_disable_device:
461         pci_disable_device(pdev);
462  out:
463         return error;
464 }
465
466 static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
467 {
468         struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
469         uint32_t Index;
470         uint8_t Retries = 0x00;
471
472         do {
473                 for (Index = 0; Index < 100; Index++) {
474                         if (readl(&reg->outbound_intstatus) &
475                                         ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
476                                 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
477                                         &reg->outbound_intstatus);
478                                 return 0x00;
479                         }
480                         msleep(10);
481                 }/*max 1 seconds*/
482
483         } while (Retries++ < 20);/*max 20 sec*/
484         return 0xff;
485 }
486
487 static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
488 {
489         struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
490         uint32_t Index;
491         uint8_t Retries = 0x00;
492
493         do {
494                 for (Index = 0; Index < 100; Index++) {
495                         if (readl(reg->iop2drv_doorbell_reg)
496                                 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
497                                 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN
498                                         , reg->iop2drv_doorbell_reg);
499                                 return 0x00;
500                         }
501                         msleep(10);
502                 }/*max 1 seconds*/
503
504         } while (Retries++ < 20);/*max 20 sec*/
505         return 0xff;
506 }
507
508 static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
509 {
510         struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
511
512         writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
513         if (arcmsr_hba_wait_msgint_ready(acb))
514                 printk(KERN_NOTICE
515                         "arcmsr%d: wait 'abort all outstanding command' timeout \n"
516                         , acb->host->host_no);
517 }
518
519 static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
520 {
521         struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
522
523         writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg);
524         if (arcmsr_hbb_wait_msgint_ready(acb))
525                 printk(KERN_NOTICE
526                         "arcmsr%d: wait 'abort all outstanding command' timeout \n"
527                         , acb->host->host_no);
528 }
529
530 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
531 {
532         switch (acb->adapter_type) {
533         case ACB_ADAPTER_TYPE_A: {
534                 arcmsr_abort_hba_allcmd(acb);
535                 }
536                 break;
537
538         case ACB_ADAPTER_TYPE_B: {
539                 arcmsr_abort_hbb_allcmd(acb);
540                 }
541         }
542 }
543
544 static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
545 {
546         struct scsi_cmnd *pcmd = ccb->pcmd;
547
548         scsi_dma_unmap(pcmd);
549 }
550
551 static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag)
552 {
553         struct AdapterControlBlock *acb = ccb->acb;
554         struct scsi_cmnd *pcmd = ccb->pcmd;
555
556         arcmsr_pci_unmap_dma(ccb);
557         if (stand_flag == 1)
558                 atomic_dec(&acb->ccboutstandingcount);
559         ccb->startdone = ARCMSR_CCB_DONE;
560         ccb->ccb_flags = 0;
561         list_add_tail(&ccb->list, &acb->ccb_free_list);
562         pcmd->scsi_done(pcmd);
563 }
564
565 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
566 {
567         struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
568         int retry_count = 30;
569
570         writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
571         do {
572                 if (!arcmsr_hba_wait_msgint_ready(acb))
573                         break;
574                 else {
575                         retry_count--;
576                         printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
577                         timeout, retry count down = %d \n", acb->host->host_no, retry_count);
578                 }
579         } while (retry_count != 0);
580 }
581
582 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
583 {
584         struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
585         int retry_count = 30;
586
587         writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell_reg);
588         do {
589                 if (!arcmsr_hbb_wait_msgint_ready(acb))
590                         break;
591                 else {
592                         retry_count--;
593                         printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
594                         timeout,retry count down = %d \n", acb->host->host_no, retry_count);
595                 }
596         } while (retry_count != 0);
597 }
598
599 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
600 {
601         switch (acb->adapter_type) {
602
603         case ACB_ADAPTER_TYPE_A: {
604                 arcmsr_flush_hba_cache(acb);
605                 }
606                 break;
607
608         case ACB_ADAPTER_TYPE_B: {
609                 arcmsr_flush_hbb_cache(acb);
610                 }
611         }
612 }
613
614 static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
615 {
616
617         struct scsi_cmnd *pcmd = ccb->pcmd;
618         struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
619
620         pcmd->result = DID_OK << 16;
621         if (sensebuffer) {
622                 int sense_data_length =
623                         sizeof(struct SENSE_DATA) < sizeof(pcmd->sense_buffer)
624                         ? sizeof(struct SENSE_DATA) : sizeof(pcmd->sense_buffer);
625                 memset(sensebuffer, 0, sizeof(pcmd->sense_buffer));
626                 memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
627                 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
628                 sensebuffer->Valid = 1;
629         }
630 }
631
632 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
633 {
634         u32 orig_mask = 0;
635         switch (acb->adapter_type) {
636
637         case ACB_ADAPTER_TYPE_A : {
638                 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
639                 orig_mask = readl(&reg->outbound_intmask)|\
640                                 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
641                 writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
642                                                 &reg->outbound_intmask);
643                 }
644                 break;
645
646         case ACB_ADAPTER_TYPE_B : {
647                 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
648                 orig_mask = readl(reg->iop2drv_doorbell_mask_reg) & \
649                                         (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
650                 writel(0, reg->iop2drv_doorbell_mask_reg);
651                 }
652                 break;
653         }
654         return orig_mask;
655 }
656
657 static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, \
658                         struct CommandControlBlock *ccb, uint32_t flag_ccb)
659 {
660
661         uint8_t id, lun;
662         id = ccb->pcmd->device->id;
663         lun = ccb->pcmd->device->lun;
664         if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
665                 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
666                         acb->devstate[id][lun] = ARECA_RAID_GOOD;
667                         ccb->pcmd->result = DID_OK << 16;
668                         arcmsr_ccb_complete(ccb, 1);
669         } else {
670                 switch (ccb->arcmsr_cdb.DeviceStatus) {
671                 case ARCMSR_DEV_SELECT_TIMEOUT: {
672                         acb->devstate[id][lun] = ARECA_RAID_GONE;
673                         ccb->pcmd->result = DID_NO_CONNECT << 16;
674                         arcmsr_ccb_complete(ccb, 1);
675                         }
676                         break;
677
678                 case ARCMSR_DEV_ABORTED:
679
680                 case ARCMSR_DEV_INIT_FAIL: {
681                         acb->devstate[id][lun] = ARECA_RAID_GONE;
682                         ccb->pcmd->result = DID_BAD_TARGET << 16;
683                         arcmsr_ccb_complete(ccb, 1);
684                         }
685                         break;
686
687                 case ARCMSR_DEV_CHECK_CONDITION: {
688                         acb->devstate[id][lun] = ARECA_RAID_GOOD;
689                         arcmsr_report_sense_info(ccb);
690                         arcmsr_ccb_complete(ccb, 1);
691                         }
692                         break;
693
694                 default:
695                                 printk(KERN_NOTICE
696                                         "arcmsr%d: scsi id = %d lun = %d"
697                                         " isr get command error done, "
698                                         "but got unknown DeviceStatus = 0x%x \n"
699                                         , acb->host->host_no
700                                         , id
701                                         , lun
702                                         , ccb->arcmsr_cdb.DeviceStatus);
703                                         acb->devstate[id][lun] = ARECA_RAID_GONE;
704                                         ccb->pcmd->result = DID_NO_CONNECT << 16;
705                                         arcmsr_ccb_complete(ccb, 1);
706                         break;
707                 }
708         }
709 }
710
711 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, uint32_t flag_ccb)
712
713 {
714         struct CommandControlBlock *ccb;
715
716         ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5));
717         if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
718                 if (ccb->startdone == ARCMSR_CCB_ABORTED) {
719                         struct scsi_cmnd *abortcmd = ccb->pcmd;
720                         if (abortcmd) {
721                                 abortcmd->result |= DID_ABORT << 16;
722                                 arcmsr_ccb_complete(ccb, 1);
723                                 printk(KERN_NOTICE "arcmsr%d: ccb ='0x%p' \
724                                 isr got aborted command \n", acb->host->host_no, ccb);
725                         }
726                 }
727                 printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
728                                 done acb = '0x%p'"
729                                 "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
730                                 " ccboutstandingcount = %d \n"
731                                 , acb->host->host_no
732                                 , acb
733                                 , ccb
734                                 , ccb->acb
735                                 , ccb->startdone
736                                 , atomic_read(&acb->ccboutstandingcount));
737                 }
738         arcmsr_report_ccb_state(acb, ccb, flag_ccb);
739 }
740
741 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
742 {
743         int i = 0;
744         uint32_t flag_ccb;
745
746         switch (acb->adapter_type) {
747
748         case ACB_ADAPTER_TYPE_A: {
749                 struct MessageUnit_A __iomem *reg = \
750                         (struct MessageUnit_A *)acb->pmu;
751                 uint32_t outbound_intstatus;
752                 outbound_intstatus = readl(&reg->outbound_intstatus) & \
753                                         acb->outbound_int_enable;
754                 /*clear and abort all outbound posted Q*/
755                 writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
756                 while (((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) \
757                                 && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
758                         arcmsr_drain_donequeue(acb, flag_ccb);
759                 }
760                 }
761                 break;
762
763         case ACB_ADAPTER_TYPE_B: {
764                 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
765                 /*clear all outbound posted Q*/
766                 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
767                         if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
768                                 writel(0, &reg->done_qbuffer[i]);
769                                 arcmsr_drain_donequeue(acb, flag_ccb);
770                         }
771                         writel(0, &reg->post_qbuffer[i]);
772                 }
773                 reg->doneq_index = 0;
774                 reg->postq_index = 0;
775                 }
776                 break;
777         }
778 }
779 static void arcmsr_remove(struct pci_dev *pdev)
780 {
781         struct Scsi_Host *host = pci_get_drvdata(pdev);
782         struct AdapterControlBlock *acb =
783                 (struct AdapterControlBlock *) host->hostdata;
784         int poll_count = 0;
785
786         arcmsr_free_sysfs_attr(acb);
787         scsi_remove_host(host);
788         arcmsr_stop_adapter_bgrb(acb);
789         arcmsr_flush_adapter_cache(acb);
790         arcmsr_disable_outbound_ints(acb);
791         acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
792         acb->acb_flags &= ~ACB_F_IOP_INITED;
793
794         for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++) {
795                 if (!atomic_read(&acb->ccboutstandingcount))
796                         break;
797                 arcmsr_interrupt(acb);/* FIXME: need spinlock */
798                 msleep(25);
799         }
800
801         if (atomic_read(&acb->ccboutstandingcount)) {
802                 int i;
803
804                 arcmsr_abort_allcmd(acb);
805                 arcmsr_done4abort_postqueue(acb);
806                 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
807                         struct CommandControlBlock *ccb = acb->pccb_pool[i];
808                         if (ccb->startdone == ARCMSR_CCB_START) {
809                                 ccb->startdone = ARCMSR_CCB_ABORTED;
810                                 ccb->pcmd->result = DID_ABORT << 16;
811                                 arcmsr_ccb_complete(ccb, 1);
812                         }
813                 }
814         }
815
816         free_irq(pdev->irq, acb);
817         iounmap(acb->pmu);
818         arcmsr_free_ccb_pool(acb);
819         pci_release_regions(pdev);
820
821         scsi_host_put(host);
822
823         pci_disable_device(pdev);
824         pci_set_drvdata(pdev, NULL);
825 }
826
827 static void arcmsr_shutdown(struct pci_dev *pdev)
828 {
829         struct Scsi_Host *host = pci_get_drvdata(pdev);
830         struct AdapterControlBlock *acb =
831                 (struct AdapterControlBlock *)host->hostdata;
832
833         arcmsr_stop_adapter_bgrb(acb);
834         arcmsr_flush_adapter_cache(acb);
835 }
836
837 static int arcmsr_module_init(void)
838 {
839         int error = 0;
840
841         error = pci_register_driver(&arcmsr_pci_driver);
842         return error;
843 }
844
845 static void arcmsr_module_exit(void)
846 {
847         pci_unregister_driver(&arcmsr_pci_driver);
848 }
849 module_init(arcmsr_module_init);
850 module_exit(arcmsr_module_exit);
851
852 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
853                                                 u32 intmask_org)
854 {
855         u32 mask;
856
857         switch (acb->adapter_type) {
858
859         case ACB_ADAPTER_TYPE_A : {
860                 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
861                 mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
862                              ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
863                 writel(mask, &reg->outbound_intmask);
864                 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
865                 }
866                 break;
867
868         case ACB_ADAPTER_TYPE_B : {
869                 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
870                 mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | \
871                         ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE);
872                 writel(mask, reg->iop2drv_doorbell_mask_reg);
873                 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
874                 }
875         }
876 }
877
878 static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
879         struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
880 {
881         struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
882         int8_t *psge = (int8_t *)&arcmsr_cdb->u;
883         uint32_t address_lo, address_hi;
884         int arccdbsize = 0x30;
885         int nseg;
886
887         ccb->pcmd = pcmd;
888         memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
889         arcmsr_cdb->Bus = 0;
890         arcmsr_cdb->TargetID = pcmd->device->id;
891         arcmsr_cdb->LUN = pcmd->device->lun;
892         arcmsr_cdb->Function = 1;
893         arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len;
894         arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
895         memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
896
897         nseg = scsi_dma_map(pcmd);
898         BUG_ON(nseg < 0);
899
900         if (nseg) {
901                 int length, i, cdb_sgcount = 0;
902                 struct scatterlist *sg;
903
904                 /* map stor port SG list to our iop SG List. */
905                 scsi_for_each_sg(pcmd, sg, nseg, i) {
906                         /* Get the physical address of the current data pointer */
907                         length = cpu_to_le32(sg_dma_len(sg));
908                         address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
909                         address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
910                         if (address_hi == 0) {
911                                 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
912
913                                 pdma_sg->address = address_lo;
914                                 pdma_sg->length = length;
915                                 psge += sizeof (struct SG32ENTRY);
916                                 arccdbsize += sizeof (struct SG32ENTRY);
917                         } else {
918                                 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
919
920                                 pdma_sg->addresshigh = address_hi;
921                                 pdma_sg->address = address_lo;
922                                 pdma_sg->length = length|IS_SG64_ADDR;
923                                 psge += sizeof (struct SG64ENTRY);
924                                 arccdbsize += sizeof (struct SG64ENTRY);
925                         }
926                         cdb_sgcount++;
927                 }
928                 arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
929                 arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
930                 if ( arccdbsize > 256)
931                         arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
932         }
933         if (pcmd->sc_data_direction == DMA_TO_DEVICE ) {
934                 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
935                 ccb->ccb_flags |= CCB_FLAG_WRITE;
936         }
937 }
938
939 static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
940 {
941         uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr;
942         struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
943         atomic_inc(&acb->ccboutstandingcount);
944         ccb->startdone = ARCMSR_CCB_START;
945
946         switch (acb->adapter_type) {
947         case ACB_ADAPTER_TYPE_A: {
948                 struct MessageUnit_A *reg = (struct MessageUnit_A *)acb->pmu;
949
950                 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
951                         writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
952                         &reg->inbound_queueport);
953                 else {
954                                 writel(cdb_shifted_phyaddr, &reg->inbound_queueport);
955                 }
956                 }
957                 break;
958
959         case ACB_ADAPTER_TYPE_B: {
960                 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
961                 uint32_t ending_index, index = reg->postq_index;
962
963                 ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
964                 writel(0, &reg->post_qbuffer[ending_index]);
965                 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
966                         writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\
967                                                  &reg->post_qbuffer[index]);
968                 }
969                 else {
970                         writel(cdb_shifted_phyaddr, &reg->post_qbuffer[index]);
971                 }
972                 index++;
973                 index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
974                 reg->postq_index = index;
975                 writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell_reg);
976                 }
977                 break;
978         }
979 }
980
981 static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
982 {
983         struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
984         acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
985         writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
986
987         if (arcmsr_hba_wait_msgint_ready(acb)) {
988                 printk(KERN_NOTICE
989                         "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
990                         , acb->host->host_no);
991         }
992 }
993
994 static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
995 {
996         struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
997         acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
998         writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell_reg);
999
1000         if (arcmsr_hbb_wait_msgint_ready(acb)) {
1001                 printk(KERN_NOTICE
1002                         "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1003                         , acb->host->host_no);
1004         }
1005 }
1006
1007 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1008 {
1009         switch (acb->adapter_type) {
1010         case ACB_ADAPTER_TYPE_A: {
1011                 arcmsr_stop_hba_bgrb(acb);
1012                 }
1013                 break;
1014
1015         case ACB_ADAPTER_TYPE_B: {
1016                 arcmsr_stop_hbb_bgrb(acb);
1017                 }
1018                 break;
1019         }
1020 }
1021
1022 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
1023 {
1024         dma_free_coherent(&acb->pdev->dev,
1025                 ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
1026                 acb->dma_coherent,
1027                 acb->dma_coherent_handle);
1028 }
1029
1030 void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1031 {
1032         switch (acb->adapter_type) {
1033         case ACB_ADAPTER_TYPE_A: {
1034                 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
1035                 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
1036                 }
1037                 break;
1038
1039         case ACB_ADAPTER_TYPE_B: {
1040                 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
1041                 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg);
1042                 }
1043                 break;
1044         }
1045 }
1046
1047 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1048 {
1049         switch (acb->adapter_type) {
1050         case ACB_ADAPTER_TYPE_A: {
1051                 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
1052                 /*
1053                 ** push inbound doorbell tell iop, driver data write ok
1054                 ** and wait reply on next hwinterrupt for next Qbuffer post
1055                 */
1056                 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, &reg->inbound_doorbell);
1057                 }
1058                 break;
1059
1060         case ACB_ADAPTER_TYPE_B: {
1061                 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
1062                 /*
1063                 ** push inbound doorbell tell iop, driver data write ok
1064                 ** and wait reply on next hwinterrupt for next Qbuffer post
1065                 */
1066                 writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell_reg);
1067                 }
1068                 break;
1069         }
1070 }
1071
1072 struct QBUFFER *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
1073 {
1074         static struct QBUFFER *qbuffer;
1075
1076         switch (acb->adapter_type) {
1077
1078         case ACB_ADAPTER_TYPE_A: {
1079                 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
1080                 qbuffer = (struct QBUFFER __iomem *) &reg->message_rbuffer;
1081                 }
1082                 break;
1083
1084         case ACB_ADAPTER_TYPE_B: {
1085                 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
1086                 qbuffer = (struct QBUFFER __iomem *) reg->ioctl_rbuffer_reg;
1087                 }
1088                 break;
1089         }
1090         return qbuffer;
1091 }
1092
1093 static struct QBUFFER *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
1094 {
1095         static struct QBUFFER *pqbuffer;
1096
1097         switch (acb->adapter_type) {
1098
1099         case ACB_ADAPTER_TYPE_A: {
1100                 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
1101                 pqbuffer = (struct QBUFFER *) &reg->message_wbuffer;
1102                 }
1103                 break;
1104
1105         case ACB_ADAPTER_TYPE_B: {
1106                 struct MessageUnit_B  *reg = (struct MessageUnit_B *)acb->pmu;
1107                 pqbuffer = (struct QBUFFER __iomem *)reg->ioctl_wbuffer_reg;
1108                 }
1109                 break;
1110         }
1111         return pqbuffer;
1112 }
1113
1114 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
1115 {
1116         struct QBUFFER *prbuffer;
1117         struct QBUFFER *pQbuffer;
1118         uint8_t *iop_data;
1119         int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
1120
1121         rqbuf_lastindex = acb->rqbuf_lastindex;
1122         rqbuf_firstindex = acb->rqbuf_firstindex;
1123         prbuffer = arcmsr_get_iop_rqbuffer(acb);
1124         iop_data = (uint8_t *)prbuffer->data;
1125         iop_len = prbuffer->data_len;
1126         my_empty_len = (rqbuf_firstindex - rqbuf_lastindex -1)&(ARCMSR_MAX_QBUFFER -1);
1127
1128         if (my_empty_len >= iop_len)
1129         {
1130                 while (iop_len > 0) {
1131                         pQbuffer = (struct QBUFFER *)&acb->rqbuffer[rqbuf_lastindex];
1132                         memcpy(pQbuffer, iop_data,1);
1133                         rqbuf_lastindex++;
1134                         rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1135                         iop_data++;
1136                         iop_len--;
1137                 }
1138                 acb->rqbuf_lastindex = rqbuf_lastindex;
1139                 arcmsr_iop_message_read(acb);
1140         }
1141
1142         else {
1143                 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
1144         }
1145 }
1146
1147 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
1148 {
1149         acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
1150         if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
1151                 uint8_t *pQbuffer;
1152                 struct QBUFFER *pwbuffer;
1153                 uint8_t *iop_data;
1154                 int32_t allxfer_len = 0;
1155
1156                 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
1157                 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1158                 iop_data = (uint8_t __iomem *)pwbuffer->data;
1159
1160                 while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && \
1161                                                         (allxfer_len < 124)) {
1162                         pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
1163                         memcpy(iop_data, pQbuffer, 1);
1164                         acb->wqbuf_firstindex++;
1165                         acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1166                         iop_data++;
1167                         allxfer_len++;
1168                 }
1169                 pwbuffer->data_len = allxfer_len;
1170
1171                 arcmsr_iop_message_wrote(acb);
1172         }
1173
1174         if (acb->wqbuf_firstindex == acb->wqbuf_lastindex) {
1175                 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
1176         }
1177 }
1178
1179 static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
1180 {
1181         uint32_t outbound_doorbell;
1182         struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
1183
1184         outbound_doorbell = readl(&reg->outbound_doorbell);
1185         writel(outbound_doorbell, &reg->outbound_doorbell);
1186         if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
1187                 arcmsr_iop2drv_data_wrote_handle(acb);
1188         }
1189
1190         if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)    {
1191                 arcmsr_iop2drv_data_read_handle(acb);
1192         }
1193 }
1194
1195 static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
1196 {
1197         uint32_t flag_ccb;
1198         struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
1199
1200         while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) {
1201                 arcmsr_drain_donequeue(acb, flag_ccb);
1202         }
1203 }
1204
1205 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1206 {
1207         uint32_t index;
1208         uint32_t flag_ccb;
1209         struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
1210
1211         index = reg->doneq_index;
1212
1213         while ((flag_ccb = readl(&reg->done_qbuffer[index])) != 0) {
1214                 writel(0, &reg->done_qbuffer[index]);
1215                 arcmsr_drain_donequeue(acb, flag_ccb);
1216                 index++;
1217                 index %= ARCMSR_MAX_HBB_POSTQUEUE;
1218                 reg->doneq_index = index;
1219         }
1220 }
1221
1222 static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
1223 {
1224         uint32_t outbound_intstatus;
1225         struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
1226
1227         outbound_intstatus = readl(&reg->outbound_intstatus) & \
1228                                                         acb->outbound_int_enable;
1229         if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))      {
1230                 return 1;
1231         }
1232         writel(outbound_intstatus, &reg->outbound_intstatus);
1233         if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)       {
1234                 arcmsr_hba_doorbell_isr(acb);
1235         }
1236         if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
1237                 arcmsr_hba_postqueue_isr(acb);
1238         }
1239         return 0;
1240 }
1241
1242 static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
1243 {
1244         uint32_t outbound_doorbell;
1245         struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
1246
1247         outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & \
1248                                                         acb->outbound_int_enable;
1249         if (!outbound_doorbell)
1250                 return 1;
1251
1252         writel(~outbound_doorbell, reg->iop2drv_doorbell_reg);
1253
1254         if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)   {
1255                 arcmsr_iop2drv_data_wrote_handle(acb);
1256         }
1257         if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
1258                 arcmsr_iop2drv_data_read_handle(acb);
1259         }
1260         if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
1261                 arcmsr_hbb_postqueue_isr(acb);
1262         }
1263
1264         return 0;
1265 }
1266
1267 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
1268 {
1269         switch (acb->adapter_type) {
1270         case ACB_ADAPTER_TYPE_A: {
1271                 if (arcmsr_handle_hba_isr(acb)) {
1272                         return IRQ_NONE;
1273                 }
1274                 }
1275                 break;
1276
1277         case ACB_ADAPTER_TYPE_B: {
1278                 if (arcmsr_handle_hbb_isr(acb)) {
1279                         return IRQ_NONE;
1280                 }
1281                 }
1282                 break;
1283         }
1284         return IRQ_HANDLED;
1285 }
1286
1287 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
1288 {
1289         if (acb) {
1290                 /* stop adapter background rebuild */
1291                 if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
1292                         uint32_t intmask_org;
1293                         acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1294                         intmask_org = arcmsr_disable_outbound_ints(acb);
1295                         arcmsr_stop_adapter_bgrb(acb);
1296                         arcmsr_flush_adapter_cache(acb);
1297                         arcmsr_enable_outbound_ints(acb, intmask_org);
1298                 }
1299         }
1300 }
1301
1302 void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
1303 {
1304         int32_t wqbuf_firstindex, wqbuf_lastindex;
1305         uint8_t *pQbuffer;
1306         struct QBUFFER *pwbuffer;
1307         uint8_t *iop_data;
1308         int32_t allxfer_len = 0;
1309
1310         pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1311         iop_data = (uint8_t __iomem *)pwbuffer->data;
1312         if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
1313                 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
1314                 wqbuf_firstindex = acb->wqbuf_firstindex;
1315                 wqbuf_lastindex = acb->wqbuf_lastindex;
1316                 while ((wqbuf_firstindex != wqbuf_lastindex) && (allxfer_len < 124)) {
1317                         pQbuffer = &acb->wqbuffer[wqbuf_firstindex];
1318                         memcpy(iop_data, pQbuffer, 1);
1319                         wqbuf_firstindex++;
1320                         wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1321                         iop_data++;
1322                         allxfer_len++;
1323                 }
1324                 acb->wqbuf_firstindex = wqbuf_firstindex;
1325                 pwbuffer->data_len = allxfer_len;
1326                 arcmsr_iop_message_wrote(acb);
1327         }
1328 }
1329
1330 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1331                                         struct scsi_cmnd *cmd)
1332 {
1333         struct CMD_MESSAGE_FIELD *pcmdmessagefld;
1334         int retvalue = 0, transfer_len = 0;
1335         char *buffer;
1336         struct scatterlist *sg;
1337         uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
1338                                                 (uint32_t ) cmd->cmnd[6] << 16 |
1339                                                 (uint32_t ) cmd->cmnd[7] << 8  |
1340                                                 (uint32_t ) cmd->cmnd[8];
1341                                                 /* 4 bytes: Areca io control code */
1342
1343         sg = scsi_sglist(cmd);
1344         buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1345         if (scsi_sg_count(cmd) > 1) {
1346                 retvalue = ARCMSR_MESSAGE_FAIL;
1347                 goto message_out;
1348         }
1349         transfer_len += sg->length;
1350
1351         if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
1352                 retvalue = ARCMSR_MESSAGE_FAIL;
1353                 goto message_out;
1354         }
1355         pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
1356         switch(controlcode) {
1357
1358         case ARCMSR_MESSAGE_READ_RQBUFFER: {
1359                 unsigned long *ver_addr;
1360                 dma_addr_t buf_handle;
1361                 uint8_t *pQbuffer, *ptmpQbuffer;
1362                 int32_t allxfer_len = 0;
1363
1364                 ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
1365                 if (!ver_addr) {
1366                         retvalue = ARCMSR_MESSAGE_FAIL;
1367                         goto message_out;
1368                 }
1369                 ptmpQbuffer = (uint8_t *) ver_addr;
1370                 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
1371                         && (allxfer_len < 1031)) {
1372                         pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
1373                         memcpy(ptmpQbuffer, pQbuffer, 1);
1374                         acb->rqbuf_firstindex++;
1375                         acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1376                         ptmpQbuffer++;
1377                         allxfer_len++;
1378                 }
1379                 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1380
1381                         struct QBUFFER *prbuffer;
1382                         uint8_t *iop_data;
1383                         int32_t iop_len;
1384
1385                         acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1386                         prbuffer = arcmsr_get_iop_rqbuffer(acb);
1387                         iop_data = (uint8_t *)prbuffer->data;
1388                         iop_len = readl(&prbuffer->data_len);
1389                         while (iop_len > 0) {
1390                                 acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
1391                                 acb->rqbuf_lastindex++;
1392                                 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1393                                 iop_data++;
1394                                 iop_len--;
1395                         }
1396                         arcmsr_iop_message_read(acb);
1397                 }
1398                 memcpy(pcmdmessagefld->messagedatabuffer, (uint8_t *)ver_addr, allxfer_len);
1399                 pcmdmessagefld->cmdmessage.Length = allxfer_len;
1400                 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1401                 pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
1402                 }
1403                 break;
1404
1405         case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1406                 unsigned long *ver_addr;
1407                 dma_addr_t buf_handle;
1408                 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
1409                 uint8_t *pQbuffer, *ptmpuserbuffer;
1410
1411                 ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
1412                 if (!ver_addr) {
1413                         retvalue = ARCMSR_MESSAGE_FAIL;
1414                         goto message_out;
1415                 }
1416                 ptmpuserbuffer = (uint8_t *)ver_addr;
1417                 user_len = pcmdmessagefld->cmdmessage.Length;
1418                 memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
1419                 wqbuf_lastindex = acb->wqbuf_lastindex;
1420                 wqbuf_firstindex = acb->wqbuf_firstindex;
1421                 if (wqbuf_lastindex != wqbuf_firstindex) {
1422                         struct SENSE_DATA *sensebuffer =
1423                                 (struct SENSE_DATA *)cmd->sense_buffer;
1424                         arcmsr_post_ioctldata2iop(acb);
1425                         /* has error report sensedata */
1426                         sensebuffer->ErrorCode = 0x70;
1427                         sensebuffer->SenseKey = ILLEGAL_REQUEST;
1428                         sensebuffer->AdditionalSenseLength = 0x0A;
1429                         sensebuffer->AdditionalSenseCode = 0x20;
1430                         sensebuffer->Valid = 1;
1431                         retvalue = ARCMSR_MESSAGE_FAIL;
1432                 } else {
1433                         my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
1434                                 &(ARCMSR_MAX_QBUFFER - 1);
1435                         if (my_empty_len >= user_len) {
1436                                 while (user_len > 0) {
1437                                         pQbuffer =
1438                                         &acb->wqbuffer[acb->wqbuf_lastindex];
1439                                         memcpy(pQbuffer, ptmpuserbuffer, 1);
1440                                         acb->wqbuf_lastindex++;
1441                                         acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1442                                         ptmpuserbuffer++;
1443                                         user_len--;
1444                                 }
1445                                 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
1446                                         acb->acb_flags &=
1447                                                 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
1448                                         arcmsr_post_ioctldata2iop(acb);
1449                                 }
1450                         } else {
1451                                 /* has error report sensedata */
1452                                 struct SENSE_DATA *sensebuffer =
1453                                         (struct SENSE_DATA *)cmd->sense_buffer;
1454                                 sensebuffer->ErrorCode = 0x70;
1455                                 sensebuffer->SenseKey = ILLEGAL_REQUEST;
1456                                 sensebuffer->AdditionalSenseLength = 0x0A;
1457                                 sensebuffer->AdditionalSenseCode = 0x20;
1458                                 sensebuffer->Valid = 1;
1459                                 retvalue = ARCMSR_MESSAGE_FAIL;
1460                         }
1461                         }
1462                         pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
1463                 }
1464                 break;
1465
1466         case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
1467                 uint8_t *pQbuffer = acb->rqbuffer;
1468
1469                 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1470                         acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1471                         arcmsr_iop_message_read(acb);
1472                 }
1473                 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
1474                 acb->rqbuf_firstindex = 0;
1475                 acb->rqbuf_lastindex = 0;
1476                 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1477                 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1478                 }
1479                 break;
1480
1481         case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
1482                 uint8_t *pQbuffer = acb->wqbuffer;
1483
1484                 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1485                         acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1486                         arcmsr_iop_message_read(acb);
1487                 }
1488                 acb->acb_flags |=
1489                         (ACB_F_MESSAGE_WQBUFFER_CLEARED |
1490                                 ACB_F_MESSAGE_WQBUFFER_READED);
1491                 acb->wqbuf_firstindex = 0;
1492                 acb->wqbuf_lastindex = 0;
1493                 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1494                 pcmdmessagefld->cmdmessage.ReturnCode =
1495                         ARCMSR_MESSAGE_RETURNCODE_OK;
1496                 }
1497                 break;
1498
1499         case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1500                 uint8_t *pQbuffer;
1501
1502                 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1503                         acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1504                         arcmsr_iop_message_read(acb);
1505                 }
1506                 acb->acb_flags |=
1507                         (ACB_F_MESSAGE_WQBUFFER_CLEARED
1508                         | ACB_F_MESSAGE_RQBUFFER_CLEARED
1509                         | ACB_F_MESSAGE_WQBUFFER_READED);
1510                 acb->rqbuf_firstindex = 0;
1511                 acb->rqbuf_lastindex = 0;
1512                 acb->wqbuf_firstindex = 0;
1513                 acb->wqbuf_lastindex = 0;
1514                 pQbuffer = acb->rqbuffer;
1515                 memset(pQbuffer, 0, sizeof(struct QBUFFER));
1516                 pQbuffer = acb->wqbuffer;
1517                 memset(pQbuffer, 0, sizeof(struct QBUFFER));
1518                 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1519                 }
1520                 break;
1521
1522         case ARCMSR_MESSAGE_RETURN_CODE_3F: {
1523                 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
1524                 }
1525                 break;
1526
1527         case ARCMSR_MESSAGE_SAY_HELLO: {
1528                 int8_t *hello_string = "Hello! I am ARCMSR";
1529
1530                 memcpy(pcmdmessagefld->messagedatabuffer, hello_string
1531                         , (int16_t)strlen(hello_string));
1532                 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1533                 }
1534                 break;
1535
1536         case ARCMSR_MESSAGE_SAY_GOODBYE:
1537                 arcmsr_iop_parking(acb);
1538                 break;
1539
1540         case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
1541                 arcmsr_flush_adapter_cache(acb);
1542                 break;
1543
1544         default:
1545                 retvalue = ARCMSR_MESSAGE_FAIL;
1546         }
1547         message_out:
1548         sg = scsi_sglist(cmd);
1549         kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1550         return retvalue;
1551 }
1552
1553 static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
1554 {
1555         struct list_head *head = &acb->ccb_free_list;
1556         struct CommandControlBlock *ccb = NULL;
1557
1558         if (!list_empty(head)) {
1559                 ccb = list_entry(head->next, struct CommandControlBlock, list);
1560                 list_del(head->next);
1561         }
1562         return ccb;
1563 }
1564
1565 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
1566                 struct scsi_cmnd *cmd)
1567 {
1568         switch (cmd->cmnd[0]) {
1569         case INQUIRY: {
1570                 unsigned char inqdata[36];
1571                 char *buffer;
1572                 struct scatterlist *sg;
1573
1574                 if (cmd->device->lun) {
1575                         cmd->result = (DID_TIME_OUT << 16);
1576                         cmd->scsi_done(cmd);
1577                         return;
1578                 }
1579                 inqdata[0] = TYPE_PROCESSOR;
1580                 /* Periph Qualifier & Periph Dev Type */
1581                 inqdata[1] = 0;
1582                 /* rem media bit & Dev Type Modifier */
1583                 inqdata[2] = 0;
1584                 /* ISO, ECMA, & ANSI versions */
1585                 inqdata[4] = 31;
1586                 /* length of additional data */
1587                 strncpy(&inqdata[8], "Areca   ", 8);
1588                 /* Vendor Identification */
1589                 strncpy(&inqdata[16], "RAID controller ", 16);
1590                 /* Product Identification */
1591                 strncpy(&inqdata[32], "R001", 4); /* Product Revision */
1592
1593                 sg = scsi_sglist(cmd);
1594                 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1595
1596                 memcpy(buffer, inqdata, sizeof(inqdata));
1597                 sg = scsi_sglist(cmd);
1598                 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1599
1600                 cmd->scsi_done(cmd);
1601         }
1602         break;
1603         case WRITE_BUFFER:
1604         case READ_BUFFER: {
1605                 if (arcmsr_iop_message_xfer(acb, cmd))
1606                         cmd->result = (DID_ERROR << 16);
1607                 cmd->scsi_done(cmd);
1608         }
1609         break;
1610         default:
1611                 cmd->scsi_done(cmd);
1612         }
1613 }
1614
1615 static int arcmsr_queue_command(struct scsi_cmnd *cmd,
1616         void (* done)(struct scsi_cmnd *))
1617 {
1618         struct Scsi_Host *host = cmd->device->host;
1619         struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
1620         struct CommandControlBlock *ccb;
1621         int target = cmd->device->id;
1622         int lun = cmd->device->lun;
1623
1624         cmd->scsi_done = done;
1625         cmd->host_scribble = NULL;
1626         cmd->result = 0;
1627         if (acb->acb_flags & ACB_F_BUS_RESET) {
1628                 printk(KERN_NOTICE "arcmsr%d: bus reset"
1629                         " and return busy \n"
1630                         , acb->host->host_no);
1631                 return SCSI_MLQUEUE_HOST_BUSY;
1632         }
1633         if (target == 16) {
1634                 /* virtual device for iop message transfer */
1635                 arcmsr_handle_virtual_command(acb, cmd);
1636                 return 0;
1637         }
1638         if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
1639                 uint8_t block_cmd;
1640
1641                 block_cmd = cmd->cmnd[0] & 0x0f;
1642                 if (block_cmd == 0x08 || block_cmd == 0x0a) {
1643                         printk(KERN_NOTICE
1644                                 "arcmsr%d: block 'read/write'"
1645                                 "command with gone raid volume"
1646                                 " Cmd = %2x, TargetId = %d, Lun = %d \n"
1647                                 , acb->host->host_no
1648                                 , cmd->cmnd[0]
1649                                 , target, lun);
1650                         cmd->result = (DID_NO_CONNECT << 16);
1651                         cmd->scsi_done(cmd);
1652                         return 0;
1653                 }
1654         }
1655         if (atomic_read(&acb->ccboutstandingcount) >=
1656                         ARCMSR_MAX_OUTSTANDING_CMD)
1657                 return SCSI_MLQUEUE_HOST_BUSY;
1658
1659         ccb = arcmsr_get_freeccb(acb);
1660         if (!ccb)
1661                 return SCSI_MLQUEUE_HOST_BUSY;
1662
1663         arcmsr_build_ccb(acb, ccb, cmd);
1664         arcmsr_post_ccb(acb, ccb);
1665         return 0;
1666 }
1667
1668 static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
1669 {
1670         struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
1671         char *acb_firm_model = acb->firm_model;
1672         char *acb_firm_version = acb->firm_version;
1673         char *iop_firm_model = (char *) (&reg->message_rwbuffer[15]);
1674         char *iop_firm_version = (char *) (&reg->message_rwbuffer[17]);
1675         int count;
1676
1677         writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
1678         if (arcmsr_hba_wait_msgint_ready(acb)) {
1679                 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
1680                         miscellaneous data' timeout \n", acb->host->host_no);
1681         }
1682
1683         count = 8;
1684         while (count) {
1685                 *acb_firm_model = readb(iop_firm_model);
1686                 acb_firm_model++;
1687                 iop_firm_model++;
1688                 count--;
1689         }
1690
1691         count = 16;
1692         while (count) {
1693                 *acb_firm_version = readb(iop_firm_version);
1694                 acb_firm_version++;
1695                 iop_firm_version++;
1696                 count--;
1697         }
1698
1699         printk(KERN_INFO        "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n"
1700                 , acb->host->host_no
1701                 , acb->firm_version);
1702
1703         acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
1704         acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
1705         acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
1706         acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
1707 }
1708
1709 static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
1710 {
1711         struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
1712         uint32_t *lrwbuffer = reg->msgcode_rwbuffer_reg;
1713         char *acb_firm_model = acb->firm_model;
1714         char *acb_firm_version = acb->firm_version;
1715         char *iop_firm_model = (char *) (&lrwbuffer[15]);
1716         /*firm_model,15,60-67*/
1717         char *iop_firm_version = (char *) (&lrwbuffer[17]);
1718         /*firm_version,17,68-83*/
1719         int count;
1720
1721         writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg);
1722         if (arcmsr_hbb_wait_msgint_ready(acb)) {
1723                 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
1724                         miscellaneous data' timeout \n", acb->host->host_no);
1725         }
1726
1727         count = 8;
1728         while (count)
1729         {
1730                 *acb_firm_model = readb(iop_firm_model);
1731                 acb_firm_model++;
1732                 iop_firm_model++;
1733                 count--;
1734         }
1735
1736         count = 16;
1737         while (count)
1738         {
1739                 *acb_firm_version = readb(iop_firm_version);
1740                 acb_firm_version++;
1741                 iop_firm_version++;
1742                 count--;
1743         }
1744
1745         printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n",
1746                         acb->host->host_no,
1747                         acb->firm_version);
1748
1749         lrwbuffer++;
1750         acb->firm_request_len = readl(lrwbuffer++);
1751         /*firm_request_len,1,04-07*/
1752         acb->firm_numbers_queue = readl(lrwbuffer++);
1753         /*firm_numbers_queue,2,08-11*/
1754         acb->firm_sdram_size = readl(lrwbuffer++);
1755         /*firm_sdram_size,3,12-15*/
1756         acb->firm_hd_channels = readl(lrwbuffer);
1757         /*firm_ide_channels,4,16-19*/
1758 }
1759
1760 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
1761 {
1762         switch (acb->adapter_type) {
1763         case ACB_ADAPTER_TYPE_A: {
1764                 arcmsr_get_hba_config(acb);
1765                 }
1766                 break;
1767
1768         case ACB_ADAPTER_TYPE_B: {
1769                 arcmsr_get_hbb_config(acb);
1770                 }
1771                 break;
1772         }
1773 }
1774
1775 static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
1776         struct CommandControlBlock *poll_ccb)
1777 {
1778         struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
1779         struct CommandControlBlock *ccb;
1780         uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
1781
1782         polling_hba_ccb_retry:
1783         poll_count++;
1784         outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable;
1785         writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
1786         while (1) {
1787                 if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
1788                         if (poll_ccb_done)
1789                                 break;
1790                         else {
1791                                 msleep(25);
1792                                 if (poll_count > 100)
1793                                         break;
1794                                 goto polling_hba_ccb_retry;
1795                         }
1796                 }
1797                 ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5));
1798                 poll_ccb_done = (ccb == poll_ccb) ? 1:0;
1799                 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
1800                         if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
1801                                 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
1802                                         " poll command abort successfully \n"
1803                                         , acb->host->host_no
1804                                         , ccb->pcmd->device->id
1805                                         , ccb->pcmd->device->lun
1806                                         , ccb);
1807                                 ccb->pcmd->result = DID_ABORT << 16;
1808                                 arcmsr_ccb_complete(ccb, 1);
1809                                 poll_ccb_done = 1;
1810                                 continue;
1811                         }
1812                         printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
1813                                 " command done ccb = '0x%p'"
1814                                 "ccboutstandingcount = %d \n"
1815                                 , acb->host->host_no
1816                                 , ccb
1817                                 , atomic_read(&acb->ccboutstandingcount));
1818                         continue;
1819                 }
1820                 arcmsr_report_ccb_state(acb, ccb, flag_ccb);
1821         }
1822 }
1823
1824 static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \
1825                                         struct CommandControlBlock *poll_ccb)
1826 {
1827                 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
1828                 struct CommandControlBlock *ccb;
1829                 uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
1830                 int index;
1831
1832         polling_hbb_ccb_retry:
1833                 poll_count++;
1834                 /* clear doorbell interrupt */
1835                 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
1836                 while (1) {
1837                         index = reg->doneq_index;
1838                         if ((flag_ccb = readl(&reg->done_qbuffer[index])) == 0) {
1839                                 if (poll_ccb_done)
1840                                         break;
1841                                 else {
1842                                         msleep(25);
1843                                         if (poll_count > 100)
1844                                                 break;
1845                                         goto polling_hbb_ccb_retry;
1846                                 }
1847                         }
1848                         writel(0, &reg->done_qbuffer[index]);
1849                         index++;
1850                         /*if last index number set it to 0 */
1851                         index %= ARCMSR_MAX_HBB_POSTQUEUE;
1852                         reg->doneq_index = index;
1853                         /* check ifcommand done with no error*/
1854                         ccb = (struct CommandControlBlock *)\
1855       (acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
1856                         poll_ccb_done = (ccb == poll_ccb) ? 1:0;
1857                         if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
1858                                 if (ccb->startdone == ARCMSR_CCB_ABORTED) {
1859                                         printk(KERN_NOTICE "arcmsr%d: \
1860                 scsi id = %d lun = %d ccb = '0x%p' poll command abort successfully \n"
1861                                                 ,acb->host->host_no
1862                                                 ,ccb->pcmd->device->id
1863                                                 ,ccb->pcmd->device->lun
1864                                                 ,ccb);
1865                                         ccb->pcmd->result = DID_ABORT << 16;
1866                                         arcmsr_ccb_complete(ccb, 1);
1867                                         continue;
1868                                 }
1869                                 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
1870                                         " command done ccb = '0x%p'"
1871                                         "ccboutstandingcount = %d \n"
1872                                         , acb->host->host_no
1873                                         , ccb
1874                                         , atomic_read(&acb->ccboutstandingcount));
1875                                 continue;
1876                         }
1877                         arcmsr_report_ccb_state(acb, ccb, flag_ccb);
1878                 }       /*drain reply FIFO*/
1879 }
1880
1881 static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, \
1882                                         struct CommandControlBlock *poll_ccb)
1883 {
1884         switch (acb->adapter_type) {
1885
1886         case ACB_ADAPTER_TYPE_A: {
1887                 arcmsr_polling_hba_ccbdone(acb,poll_ccb);
1888                 }
1889                 break;
1890
1891         case ACB_ADAPTER_TYPE_B: {
1892                 arcmsr_polling_hbb_ccbdone(acb,poll_ccb);
1893                 }
1894         }
1895 }
1896
1897 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
1898 {
1899         uint32_t cdb_phyaddr, ccb_phyaddr_hi32;
1900         dma_addr_t dma_coherent_handle;
1901         /*
1902         ********************************************************************
1903         ** here we need to tell iop 331 our freeccb.HighPart
1904         ** if freeccb.HighPart is not zero
1905         ********************************************************************
1906         */
1907         dma_coherent_handle = acb->dma_coherent_handle;
1908         cdb_phyaddr = (uint32_t)(dma_coherent_handle);
1909         ccb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
1910         /*
1911         ***********************************************************************
1912         **    if adapter type B, set window of "post command Q"
1913         ***********************************************************************
1914         */
1915         switch (acb->adapter_type) {
1916
1917         case ACB_ADAPTER_TYPE_A: {
1918                 if (ccb_phyaddr_hi32 != 0) {
1919                         struct MessageUnit_A __iomem *reg = \
1920                                         (struct MessageUnit_A *)acb->pmu;
1921                         uint32_t intmask_org;
1922                         intmask_org = arcmsr_disable_outbound_ints(acb);
1923                         writel(ARCMSR_SIGNATURE_SET_CONFIG, \
1924                                                 &reg->message_rwbuffer[0]);
1925                         writel(ccb_phyaddr_hi32, &reg->message_rwbuffer[1]);
1926                         writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
1927                                                         &reg->inbound_msgaddr0);
1928                         if (arcmsr_hba_wait_msgint_ready(acb)) {
1929                                 printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
1930                                 part physical address timeout\n",
1931                                 acb->host->host_no);
1932                                 return 1;
1933                         }
1934                         arcmsr_enable_outbound_ints(acb, intmask_org);
1935                 }
1936                 }
1937                 break;
1938
1939         case ACB_ADAPTER_TYPE_B: {
1940                 unsigned long post_queue_phyaddr;
1941                 uint32_t *rwbuffer;
1942
1943                 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
1944                 uint32_t intmask_org;
1945                 intmask_org = arcmsr_disable_outbound_ints(acb);
1946                 reg->postq_index = 0;
1947                 reg->doneq_index = 0;
1948                 writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell_reg);
1949                 if (arcmsr_hbb_wait_msgint_ready(acb)) {
1950                         printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \
1951                                 acb->host->host_no);
1952                         return 1;
1953                 }
1954                 post_queue_phyaddr = cdb_phyaddr + ARCMSR_MAX_FREECCB_NUM * \
1955                 sizeof(struct CommandControlBlock) + offsetof(struct MessageUnit_B, post_qbuffer) ;
1956                 rwbuffer = reg->msgcode_rwbuffer_reg;
1957                 /* driver "set config" signature */
1958                 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
1959                 /* normal should be zero */
1960                 writel(ccb_phyaddr_hi32, rwbuffer++);
1961                 /* postQ size (256 + 8)*4        */
1962                 writel(post_queue_phyaddr, rwbuffer++);
1963                 /* doneQ size (256 + 8)*4        */
1964                 writel(post_queue_phyaddr + 1056, rwbuffer++);
1965                 /* ccb maxQ size must be --> [(256 + 8)*4]*/
1966                 writel(1056, rwbuffer);
1967
1968                 writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell_reg);
1969                 if (arcmsr_hbb_wait_msgint_ready(acb)) {
1970                         printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
1971                         timeout \n",acb->host->host_no);
1972                         return 1;
1973                 }
1974
1975                 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell_reg);
1976                 if (arcmsr_hbb_wait_msgint_ready(acb)) {
1977                         printk(KERN_NOTICE "arcmsr%d: 'can not set diver mode \n"\
1978                         ,acb->host->host_no);
1979                         return 1;
1980                 }
1981                 arcmsr_enable_outbound_ints(acb, intmask_org);
1982                 }
1983                 break;
1984         }
1985         return 0;
1986 }
1987
1988 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
1989 {
1990         uint32_t firmware_state = 0;
1991
1992         switch (acb->adapter_type) {
1993
1994         case ACB_ADAPTER_TYPE_A: {
1995                 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
1996                 do {
1997                         firmware_state = readl(&reg->outbound_msgaddr1);
1998                 } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
1999                 }
2000                 break;
2001
2002         case ACB_ADAPTER_TYPE_B: {
2003                 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
2004                 do {
2005                         firmware_state = readl(reg->iop2drv_doorbell_reg);
2006                 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
2007                 }
2008                 break;
2009         }
2010 }
2011
2012 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
2013 {
2014         struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
2015         acb->acb_flags |= ACB_F_MSG_START_BGRB;
2016         writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
2017         if (arcmsr_hba_wait_msgint_ready(acb)) {
2018                 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
2019                                 rebulid' timeout \n", acb->host->host_no);
2020         }
2021 }
2022
2023 static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
2024 {
2025         struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
2026         acb->acb_flags |= ACB_F_MSG_START_BGRB;
2027         writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell_reg);
2028         if (arcmsr_hbb_wait_msgint_ready(acb)) {
2029                 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
2030                                 rebulid' timeout \n",acb->host->host_no);
2031         }
2032 }
2033
2034 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
2035 {
2036         switch (acb->adapter_type) {
2037         case ACB_ADAPTER_TYPE_A:
2038                 arcmsr_start_hba_bgrb(acb);
2039                 break;
2040         case ACB_ADAPTER_TYPE_B:
2041                 arcmsr_start_hbb_bgrb(acb);
2042                 break;
2043         }
2044 }
2045
2046 static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
2047 {
2048         switch (acb->adapter_type) {
2049         case ACB_ADAPTER_TYPE_A: {
2050                 struct MessageUnit_A *reg = (struct MessageUnit_A *)acb->pmu;
2051                 uint32_t outbound_doorbell;
2052                 /* empty doorbell Qbuffer if door bell ringed */
2053                 outbound_doorbell = readl(&reg->outbound_doorbell);
2054                 /*clear doorbell interrupt */
2055                 writel(outbound_doorbell, &reg->outbound_doorbell);
2056                 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
2057                 }
2058                 break;
2059
2060         case ACB_ADAPTER_TYPE_B: {
2061                 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
2062                 /*clear interrupt and message state*/
2063                 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
2064                 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg);
2065                 /* let IOP know data has been read */
2066                 }
2067                 break;
2068         }
2069 }
2070
2071 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
2072 {
2073         uint32_t intmask_org;
2074
2075         arcmsr_wait_firmware_ready(acb);
2076         arcmsr_iop_confirm(acb);
2077        /* disable all outbound interrupt */
2078        intmask_org = arcmsr_disable_outbound_ints(acb);
2079         arcmsr_get_firmware_spec(acb);
2080         /*start background rebuild*/
2081         arcmsr_start_adapter_bgrb(acb);
2082         /* empty doorbell Qbuffer if door bell ringed */
2083         arcmsr_clear_doorbell_queue_buffer(acb);
2084         /* enable outbound Post Queue,outbound doorbell Interrupt */
2085         arcmsr_enable_outbound_ints(acb, intmask_org);
2086         acb->acb_flags |= ACB_F_IOP_INITED;
2087 }
2088
2089 static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
2090 {
2091         struct CommandControlBlock *ccb;
2092         uint32_t intmask_org;
2093         int i = 0;
2094
2095         if (atomic_read(&acb->ccboutstandingcount) != 0) {
2096                 /* talk to iop 331 outstanding command aborted */
2097                 arcmsr_abort_allcmd(acb);
2098                 /* wait for 3 sec for all command aborted*/
2099                 ssleep(3);
2100                 /* disable all outbound interrupt */
2101                 intmask_org = arcmsr_disable_outbound_ints(acb);
2102                 /* clear all outbound posted Q */
2103                 arcmsr_done4abort_postqueue(acb);
2104                 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2105                         ccb = acb->pccb_pool[i];
2106                         if (ccb->startdone == ARCMSR_CCB_START) {
2107                                 ccb->startdone = ARCMSR_CCB_ABORTED;
2108                                 arcmsr_ccb_complete(ccb, 1);
2109                         }
2110                 }
2111                 /* enable all outbound interrupt */
2112                 arcmsr_enable_outbound_ints(acb, intmask_org);
2113         }
2114 }
2115
2116 static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
2117 {
2118         struct AdapterControlBlock *acb =
2119                 (struct AdapterControlBlock *)cmd->device->host->hostdata;
2120         int i;
2121
2122         acb->num_resets++;
2123         acb->acb_flags |= ACB_F_BUS_RESET;
2124         for (i = 0; i < 400; i++) {
2125                 if (!atomic_read(&acb->ccboutstandingcount))
2126                         break;
2127                 arcmsr_interrupt(acb);/* FIXME: need spinlock */
2128                 msleep(25);
2129         }
2130         arcmsr_iop_reset(acb);
2131         acb->acb_flags &= ~ACB_F_BUS_RESET;
2132         return SUCCESS;
2133 }
2134
2135 static void arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
2136                 struct CommandControlBlock *ccb)
2137 {
2138         u32 intmask;
2139
2140         ccb->startdone = ARCMSR_CCB_ABORTED;
2141
2142         /*
2143         ** Wait for 3 sec for all command done.
2144         */
2145         ssleep(3);
2146
2147         intmask = arcmsr_disable_outbound_ints(acb);
2148         arcmsr_polling_ccbdone(acb, ccb);
2149         arcmsr_enable_outbound_ints(acb, intmask);
2150 }
2151
2152 static int arcmsr_abort(struct scsi_cmnd *cmd)
2153 {
2154         struct AdapterControlBlock *acb =
2155                 (struct AdapterControlBlock *)cmd->device->host->hostdata;
2156         int i = 0;
2157
2158         printk(KERN_NOTICE
2159                 "arcmsr%d: abort device command of scsi id = %d lun = %d \n",
2160                 acb->host->host_no, cmd->device->id, cmd->device->lun);
2161         acb->num_aborts++;
2162         /*
2163         ************************************************
2164         ** the all interrupt service routine is locked
2165         ** we need to handle it as soon as possible and exit
2166         ************************************************
2167         */
2168         if (!atomic_read(&acb->ccboutstandingcount))
2169                 return SUCCESS;
2170
2171         for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2172                 struct CommandControlBlock *ccb = acb->pccb_pool[i];
2173                 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
2174                         arcmsr_abort_one_cmd(acb, ccb);
2175                         break;
2176                 }
2177         }
2178
2179         return SUCCESS;
2180 }
2181
2182 static const char *arcmsr_info(struct Scsi_Host *host)
2183 {
2184         struct AdapterControlBlock *acb =
2185                 (struct AdapterControlBlock *) host->hostdata;
2186         static char buf[256];
2187         char *type;
2188         int raid6 = 1;
2189
2190         switch (acb->pdev->device) {
2191         case PCI_DEVICE_ID_ARECA_1110:
2192         case PCI_DEVICE_ID_ARECA_1200:
2193         case PCI_DEVICE_ID_ARECA_1202:
2194         case PCI_DEVICE_ID_ARECA_1210:
2195                 raid6 = 0;
2196                 /*FALLTHRU*/
2197         case PCI_DEVICE_ID_ARECA_1120:
2198         case PCI_DEVICE_ID_ARECA_1130:
2199         case PCI_DEVICE_ID_ARECA_1160:
2200         case PCI_DEVICE_ID_ARECA_1170:
2201         case PCI_DEVICE_ID_ARECA_1201:
2202         case PCI_DEVICE_ID_ARECA_1220:
2203         case PCI_DEVICE_ID_ARECA_1230:
2204         case PCI_DEVICE_ID_ARECA_1260:
2205         case PCI_DEVICE_ID_ARECA_1270:
2206         case PCI_DEVICE_ID_ARECA_1280:
2207                 type = "SATA";
2208                 break;
2209         case PCI_DEVICE_ID_ARECA_1380:
2210         case PCI_DEVICE_ID_ARECA_1381:
2211         case PCI_DEVICE_ID_ARECA_1680:
2212         case PCI_DEVICE_ID_ARECA_1681:
2213                 type = "SAS";
2214                 break;
2215         default:
2216                 type = "X-TYPE";
2217                 break;
2218         }
2219         sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
2220                         type, raid6 ? "( RAID6 capable)" : "",
2221                         ARCMSR_DRIVER_VERSION);
2222         return buf;
2223 }
2224 #ifdef CONFIG_SCSI_ARCMSR_AER
2225 static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev)
2226 {
2227         struct Scsi_Host *host = pci_get_drvdata(pdev);
2228         struct AdapterControlBlock *acb =
2229                 (struct AdapterControlBlock *) host->hostdata;
2230         uint32_t intmask_org;
2231         int i, j;
2232
2233         if (pci_enable_device(pdev)) {
2234                 return PCI_ERS_RESULT_DISCONNECT;
2235         }
2236         pci_set_master(pdev);
2237         intmask_org = arcmsr_disable_outbound_ints(acb);
2238         acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2239                            ACB_F_MESSAGE_RQBUFFER_CLEARED |
2240                            ACB_F_MESSAGE_WQBUFFER_READED);
2241         acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
2242         for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
2243                 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
2244                         acb->devstate[i][j] = ARECA_RAID_GONE;
2245
2246         arcmsr_wait_firmware_ready(acb);
2247         arcmsr_iop_confirm(acb);
2248        /* disable all outbound interrupt */
2249         arcmsr_get_firmware_spec(acb);
2250         /*start background rebuild*/
2251         arcmsr_start_adapter_bgrb(acb);
2252         /* empty doorbell Qbuffer if door bell ringed */
2253         arcmsr_clear_doorbell_queue_buffer(acb);
2254         /* enable outbound Post Queue,outbound doorbell Interrupt */
2255         arcmsr_enable_outbound_ints(acb, intmask_org);
2256         acb->acb_flags |= ACB_F_IOP_INITED;
2257
2258         pci_enable_pcie_error_reporting(pdev);
2259         return PCI_ERS_RESULT_RECOVERED;
2260 }
2261
2262 static void arcmsr_pci_ers_need_reset_forepart(struct pci_dev *pdev)
2263 {
2264         struct Scsi_Host *host = pci_get_drvdata(pdev);
2265         struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata;
2266         struct CommandControlBlock *ccb;
2267         uint32_t intmask_org;
2268         int i = 0;
2269
2270         if (atomic_read(&acb->ccboutstandingcount) != 0) {
2271                 /* talk to iop 331 outstanding command aborted */
2272                 arcmsr_abort_allcmd(acb);
2273                 /* wait for 3 sec for all command aborted*/
2274                 ssleep(3);
2275                 /* disable all outbound interrupt */
2276                 intmask_org = arcmsr_disable_outbound_ints(acb);
2277                 /* clear all outbound posted Q */
2278                 arcmsr_done4abort_postqueue(acb);
2279                 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2280                         ccb = acb->pccb_pool[i];
2281                         if (ccb->startdone == ARCMSR_CCB_START) {
2282                                 ccb->startdone = ARCMSR_CCB_ABORTED;
2283                                 arcmsr_ccb_complete(ccb, 1);
2284                         }
2285                 }
2286                 /* enable all outbound interrupt */
2287                 arcmsr_enable_outbound_ints(acb, intmask_org);
2288         }
2289         pci_disable_device(pdev);
2290 }
2291
2292 static void arcmsr_pci_ers_disconnect_forepart(struct pci_dev *pdev)
2293 {
2294                         struct Scsi_Host *host = pci_get_drvdata(pdev);
2295                         struct AdapterControlBlock *acb = \
2296                                 (struct AdapterControlBlock *)host->hostdata;
2297
2298                         arcmsr_stop_adapter_bgrb(acb);
2299                         arcmsr_flush_adapter_cache(acb);
2300 }
2301
2302 static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
2303                                                 pci_channel_state_t state)
2304 {
2305         switch (state) {
2306         case pci_channel_io_frozen:
2307                         arcmsr_pci_ers_need_reset_forepart(pdev);
2308                         return PCI_ERS_RESULT_NEED_RESET;
2309         case pci_channel_io_perm_failure:
2310                         arcmsr_pci_ers_disconnect_forepart(pdev);
2311                         return PCI_ERS_RESULT_DISCONNECT;
2312                         break;
2313         default:
2314                         return PCI_ERS_RESULT_NEED_RESET;
2315           }
2316 }
2317 #endif