X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=drivers%2Fscsi%2Flibata-core.c;h=eeeeda0481a2aec0fd5b0842c44e164e9bc14b68;hb=a1af37344f669d0fefa8c8a9e37eb6a7c086a2c2;hp=5d00bb721e23bb3b5d61cb5eee71a4b7a7f74346;hpb=f01c18456993bab43067b678f56c87ca954aa43b;p=powerpc.git diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 5d00bb721e..eeeeda0481 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c @@ -64,9 +64,10 @@ static unsigned int ata_dev_init_params(struct ata_port *ap, struct ata_device *dev); static void ata_set_mode(struct ata_port *ap); -static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); -static unsigned int ata_dev_xfermask(struct ata_port *ap, - struct ata_device *dev); +static unsigned int ata_dev_set_xfermode(struct ata_port *ap, + struct ata_device *dev); +static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev); +static void ata_pio_error(struct ata_port *ap); static unsigned int ata_unique_id = 1; static struct workqueue_struct *ata_wq; @@ -190,7 +191,7 @@ static const u8 ata_rw_cmds[] = { * ata_rwcmd_protocol - set taskfile r/w commands and protocol * @qc: command to examine and configure * - * Examine the device configuration and tf->flags to calculate + * Examine the device configuration and tf->flags to calculate * the proper read/write commands and protocol to use. * * LOCKING: @@ -203,7 +204,7 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc) u8 cmd; int index, fua, lba48, write; - + fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; @@ -252,6 +253,29 @@ static unsigned int ata_pack_xfermask(unsigned int pio_mask, ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); } +/** + * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks + * @xfer_mask: xfer_mask to unpack + * @pio_mask: resulting pio_mask + * @mwdma_mask: resulting mwdma_mask + * @udma_mask: resulting udma_mask + * + * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. + * Any NULL distination masks will be ignored. + */ +static void ata_unpack_xfermask(unsigned int xfer_mask, + unsigned int *pio_mask, + unsigned int *mwdma_mask, + unsigned int *udma_mask) +{ + if (pio_mask) + *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; + if (mwdma_mask) + *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; + if (udma_mask) + *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; +} + static const struct ata_xfer_ent { unsigned int shift, bits; u8 base; @@ -372,6 +396,15 @@ static const char *ata_mode_string(unsigned int xfer_mask) return ""; } +static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev) +{ + if (ata_dev_present(dev)) { + printk(KERN_WARNING "ata%u: dev %u disabled\n", + ap->id, dev->devno); + dev->class++; + } +} + /** * ata_pio_devchk - PATA device presence detection * @ap: ATA channel to examine @@ -987,6 +1020,22 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev, ata_qc_free(qc); + /* XXX - Some LLDDs (sata_mv) disable port on command failure. + * Until those drivers are fixed, we detect the condition + * here, fail the command with AC_ERR_SYSTEM and reenable the + * port. + * + * Note that this doesn't change any behavior as internal + * command failure results in disabling the device in the + * higher layer for LLDDs without new reset/EH callbacks. + * + * Kill the following code as soon as those drivers are fixed. + */ + if (ap->flags & ATA_FLAG_PORT_DISABLED) { + err_mask |= AC_ERR_SYSTEM; + ata_port_probe(ap); + } + return err_mask; } @@ -1007,7 +1056,7 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev) return 0; if (speed > 2) return 1; - + /* If we have no drive specific rule, then PIO 2 is non IORDY */ if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ @@ -1033,9 +1082,8 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev) * * Read ID data from the specified device. ATA_CMD_ID_ATA is * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI - * devices. This function also takes care of EDD signature - * misreporting (to be removed once EDD support is gone) and - * issues ATA_CMD_INIT_DEV_PARAMS for pre-ATA4 drives. + * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS + * for pre-ATA4 drives. * * LOCKING: * Kernel thread context (may sleep) @@ -1047,7 +1095,6 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev, unsigned int *p_class, int post_reset, u16 **p_id) { unsigned int class = *p_class; - unsigned int using_edd; struct ata_taskfile tf; unsigned int err_mask = 0; u16 *id; @@ -1056,12 +1103,6 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev, DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno); - if (ap->ops->probe_reset || - ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET)) - using_edd = 0; - else - using_edd = 1; - ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL); @@ -1091,32 +1132,9 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev, err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE, id, sizeof(id[0]) * ATA_ID_WORDS); - if (err_mask) { rc = -EIO; reason = "I/O error"; - - if (err_mask & ~AC_ERR_DEV) - goto err_out; - - /* - * arg! EDD works for all test cases, but seems to return - * the ATA signature for some ATAPI devices. Until the - * reason for this is found and fixed, we fix up the mess - * here. If IDENTIFY DEVICE returns command aborted - * (as ATAPI devices do), then we issue an - * IDENTIFY PACKET DEVICE. - * - * ATA software reset (SRST, the default) does not appear - * to have this problem. - */ - if ((using_edd) && (class == ATA_DEV_ATA)) { - u8 err = tf.feature; - if (err & ATA_ABORTED) { - class = ATA_DEV_ATAPI; - goto retry; - } - } goto err_out; } @@ -1221,13 +1239,6 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev, * common ATA, ATAPI feature tests */ - /* we require DMA support (bits 8 of word 49) */ - if (!ata_id_has_dma(id)) { - printk(KERN_DEBUG "ata%u: no dma\n", ap->id); - rc = -EINVAL; - goto err_out_nosup; - } - /* find max transfer mode; for printk only */ xfer_mask = ata_id_xfermask(id); @@ -1282,6 +1293,12 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev, dev->cylinders, dev->heads, dev->sectors); } + if (dev->id[59] & 0x100) { + dev->multi_count = dev->id[59] & 0xff; + DPRINTK("ata%u: dev %u multi count %u\n", + ap->id, device, dev->multi_count); + } + dev->cdb_len = 16; } @@ -1295,6 +1312,9 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev, } dev->cdb_len = (unsigned int) rc; + if (ata_id_cdb_intr(dev->id)) + dev->flags |= ATA_DFLAG_CDB_INTR; + /* print device info to dmesg */ if (print_info) printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", @@ -1312,7 +1332,7 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev, if (print_info) printk(KERN_INFO "ata%u(%u): applying bridge limits\n", ap->id, dev->devno); - ap->udma_mask &= ATA_UDMA5; + dev->udma_mask &= ATA_UDMA5; dev->max_sectors = ATA_MAX_SECTORS; } @@ -1323,8 +1343,6 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev, return 0; err_out_nosup: - printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n", - ap->id, dev->devno); DPRINTK("EXIT, err\n"); return rc; } @@ -1391,7 +1409,7 @@ static int ata_bus_probe(struct ata_port *ap) } if (ata_dev_configure(ap, dev, 1)) { - dev->class++; /* disable device */ + ata_dev_disable(ap, dev); continue; } @@ -1536,6 +1554,23 @@ void sata_phy_reset(struct ata_port *ap) ata_bus_reset(ap); } +/** + * ata_dev_pair - return other device on cable + * @ap: port + * @adev: device + * + * Obtain the other device on the same cable, or if none is + * present NULL is returned + */ + +struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev) +{ + struct ata_device *pair = &ap->device[1 - adev->devno]; + if (!ata_dev_present(pair)) + return NULL; + return pair; +} + /** * ata_port_disable - Disable port. * @ap: Port to be disabled. @@ -1564,7 +1599,7 @@ void ata_port_disable(struct ata_port *ap) * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). * These were taken from ATA/ATAPI-6 standard, rev 0a, except * for PIO 5, which is a nonstandard extension and UDMA6, which - * is currently supported only by Maxtor drives. + * is currently supported only by Maxtor drives. */ static const struct ata_timing ata_timing[] = { @@ -1579,11 +1614,11 @@ static const struct ata_timing ata_timing[] = { { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 }, /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */ - + { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 }, { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 }, { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 }, - + { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 }, { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 }, @@ -1636,7 +1671,7 @@ static const struct ata_timing* ata_timing_find_mode(unsigned short speed) for (t = ata_timing; t->mode != speed; t++) if (t->mode == 0xFF) return NULL; - return t; + return t; } int ata_timing_compute(struct ata_device *adev, unsigned short speed, @@ -1646,7 +1681,7 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed, struct ata_timing p; /* - * Find the mode. + * Find the mode. */ if (!(s = ata_timing_find_mode(speed))) @@ -1704,20 +1739,28 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed, return 0; } -static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) +static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) { - if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED)) - return; + unsigned int err_mask; + int rc; if (dev->xfer_shift == ATA_SHIFT_PIO) dev->flags |= ATA_DFLAG_PIO; - ata_dev_set_xfermode(ap, dev); + err_mask = ata_dev_set_xfermode(ap, dev); + if (err_mask) { + printk(KERN_ERR + "ata%u: failed to set xfermode (err_mask=0x%x)\n", + ap->id, err_mask); + return -EIO; + } - if (ata_dev_revalidate(ap, dev, 0)) { - printk(KERN_ERR "ata%u: failed to revalidate after set " - "xfermode, disabled\n", ap->id); - ata_port_disable(ap); + rc = ata_dev_revalidate(ap, dev, 0); + if (rc) { + printk(KERN_ERR + "ata%u: failed to revalidate after set xfermode\n", + ap->id); + return rc; } DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", @@ -1726,6 +1769,7 @@ static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) printk(KERN_INFO "ata%u: dev %u configured for %s\n", ap->id, dev->devno, ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode))); + return 0; } static int ata_host_set_pio(struct ata_port *ap) @@ -1785,16 +1829,19 @@ static void ata_set_mode(struct ata_port *ap) /* step 1: calculate xfer_mask */ for (i = 0; i < ATA_MAX_DEVICES; i++) { struct ata_device *dev = &ap->device[i]; - unsigned int xfer_mask; + unsigned int pio_mask, dma_mask; if (!ata_dev_present(dev)) continue; - xfer_mask = ata_dev_xfermask(ap, dev); + ata_dev_xfermask(ap, dev); - dev->pio_mode = ata_xfer_mask2mode(xfer_mask & ATA_MASK_PIO); - dev->dma_mode = ata_xfer_mask2mode(xfer_mask & (ATA_MASK_MWDMA | - ATA_MASK_UDMA)); + /* TODO: let LLDD filter dev->*_mask here */ + + pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); + dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); + dev->pio_mode = ata_xfer_mask2mode(pio_mask); + dev->dma_mode = ata_xfer_mask2mode(dma_mask); } /* step 2: always set host PIO timings */ @@ -1806,11 +1853,15 @@ static void ata_set_mode(struct ata_port *ap) ata_host_set_dma(ap); /* step 4: update devices' xfer mode */ - for (i = 0; i < ATA_MAX_DEVICES; i++) - ata_dev_set_mode(ap, &ap->device[i]); + for (i = 0; i < ATA_MAX_DEVICES; i++) { + struct ata_device *dev = &ap->device[i]; - if (ap->flags & ATA_FLAG_PORT_DISABLED) - return; + if (!ata_dev_present(dev)) + continue; + + if (ata_dev_set_mode(ap, dev)) + goto err_out; + } if (ap->ops->post_set_mode) ap->ops->post_set_mode(ap); @@ -1933,45 +1984,6 @@ static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask) ap->ops->dev_select(ap, 0); } -/** - * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command. - * @ap: Port to reset and probe - * - * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and - * probe the bus. Not often used these days. - * - * LOCKING: - * PCI/etc. bus probe sem. - * Obtains host_set lock. - * - */ - -static unsigned int ata_bus_edd(struct ata_port *ap) -{ - struct ata_taskfile tf; - unsigned long flags; - - /* set up execute-device-diag (bus reset) taskfile */ - /* also, take interrupts to a known state (disabled) */ - DPRINTK("execute-device-diag\n"); - ata_tf_init(ap, &tf, 0); - tf.ctl |= ATA_NIEN; - tf.command = ATA_CMD_EDD; - tf.protocol = ATA_PROT_NODATA; - - /* do bus reset */ - spin_lock_irqsave(&ap->host_set->lock, flags); - ata_tf_to_host(ap, &tf); - spin_unlock_irqrestore(&ap->host_set->lock, flags); - - /* spec says at least 2ms. but who knows with those - * crazy ATAPI devices... - */ - msleep(150); - - return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); -} - static unsigned int ata_bus_softreset(struct ata_port *ap, unsigned int devmask) { @@ -2001,9 +2013,19 @@ static unsigned int ata_bus_softreset(struct ata_port *ap, * status is checked. Because waiting for "a while" before * checking status is fine, post SRST, we perform this magic * delay here as well. + * + * Old drivers/ide uses the 2mS rule and then waits for ready */ msleep(150); + + /* Before we perform post reset processing we want to see if + the bus shows 0xFF because the odd clown forgets the D7 pulldown + resistor */ + + if (ata_check_status(ap) == 0xFF) + return 1; /* Positive is failure for some reason */ + ata_bus_post_reset(ap, devmask); return 0; @@ -2034,7 +2056,7 @@ void ata_bus_reset(struct ata_port *ap) struct ata_ioports *ioaddr = &ap->ioaddr; unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; u8 err; - unsigned int dev0, dev1 = 0, rc = 0, devmask = 0; + unsigned int dev0, dev1 = 0, devmask = 0; DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no); @@ -2057,18 +2079,8 @@ void ata_bus_reset(struct ata_port *ap) /* issue bus reset */ if (ap->flags & ATA_FLAG_SRST) - rc = ata_bus_softreset(ap, devmask); - else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) { - /* set up device control */ - if (ap->flags & ATA_FLAG_MMIO) - writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); - else - outb(ap->ctl, ioaddr->ctl_addr); - rc = ata_bus_edd(ap); - } - - if (rc) - goto err_out; + if (ata_bus_softreset(ap, devmask)) + goto err_out; /* * determine by signature whether we have ATA or ATAPI devices @@ -2553,48 +2565,72 @@ int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev, } static const char * const ata_dma_blacklist [] = { - "WDC AC11000H", - "WDC AC22100H", - "WDC AC32500H", - "WDC AC33100H", - "WDC AC31600H", - "WDC AC32100H", - "WDC AC23200L", - "Compaq CRD-8241B", - "CRD-8400B", - "CRD-8480B", - "CRD-8482B", - "CRD-84", - "SanDisk SDP3B", - "SanDisk SDP3B-64", - "SANYO CD-ROM CRD", - "HITACHI CDR-8", - "HITACHI CDR-8335", - "HITACHI CDR-8435", - "Toshiba CD-ROM XM-6202B", - "TOSHIBA CD-ROM XM-1702BC", - "CD-532E-A", - "E-IDE CD-ROM CR-840", - "CD-ROM Drive/F5A", - "WPI CDD-820", - "SAMSUNG CD-ROM SC-148C", - "SAMSUNG CD-ROM SC", - "SanDisk SDP3B-64", - "ATAPI CD-ROM DRIVE 40X MAXIMUM", - "_NEC DV5800A", + "WDC AC11000H", NULL, + "WDC AC22100H", NULL, + "WDC AC32500H", NULL, + "WDC AC33100H", NULL, + "WDC AC31600H", NULL, + "WDC AC32100H", "24.09P07", + "WDC AC23200L", "21.10N21", + "Compaq CRD-8241B", NULL, + "CRD-8400B", NULL, + "CRD-8480B", NULL, + "CRD-8482B", NULL, + "CRD-84", NULL, + "SanDisk SDP3B", NULL, + "SanDisk SDP3B-64", NULL, + "SANYO CD-ROM CRD", NULL, + "HITACHI CDR-8", NULL, + "HITACHI CDR-8335", NULL, + "HITACHI CDR-8435", NULL, + "Toshiba CD-ROM XM-6202B", NULL, + "TOSHIBA CD-ROM XM-1702BC", NULL, + "CD-532E-A", NULL, + "E-IDE CD-ROM CR-840", NULL, + "CD-ROM Drive/F5A", NULL, + "WPI CDD-820", NULL, + "SAMSUNG CD-ROM SC-148C", NULL, + "SAMSUNG CD-ROM SC", NULL, + "SanDisk SDP3B-64", NULL, + "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL, + "_NEC DV5800A", NULL, + "SAMSUNG CD-ROM SN-124", "N001" }; +static int ata_strim(char *s, size_t len) +{ + len = strnlen(s, len); + + /* ATAPI specifies that empty space is blank-filled; remove blanks */ + while ((len > 0) && (s[len - 1] == ' ')) { + len--; + s[len] = 0; + } + return len; +} + static int ata_dma_blacklisted(const struct ata_device *dev) { - unsigned char model_num[41]; + unsigned char model_num[40]; + unsigned char model_rev[16]; + unsigned int nlen, rlen; int i; - ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num)); - - for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++) - if (!strcmp(ata_dma_blacklist[i], model_num)) - return 1; + ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS, + sizeof(model_num)); + ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS, + sizeof(model_rev)); + nlen = ata_strim(model_num, sizeof(model_num)); + rlen = ata_strim(model_rev, sizeof(model_rev)); + for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) { + if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) { + if (ata_dma_blacklist[i+1] == NULL) + return 1; + if (!strncmp(ata_dma_blacklist[i], model_rev, rlen)) + return 1; + } + } return 0; } @@ -2603,18 +2639,15 @@ static int ata_dma_blacklisted(const struct ata_device *dev) * @ap: Port on which the device to compute xfermask for resides * @dev: Device to compute xfermask for * - * Compute supported xfermask of @dev. This function is - * responsible for applying all known limits including host - * controller limits, device blacklist, etc... + * Compute supported xfermask of @dev and store it in + * dev->*_mask. This function is responsible for applying all + * known limits including host controller limits, device + * blacklist, etc... * * LOCKING: * None. - * - * RETURNS: - * Computed xfermask. */ -static unsigned int ata_dev_xfermask(struct ata_port *ap, - struct ata_device *dev) +static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev) { unsigned long xfer_mask; int i; @@ -2627,6 +2660,8 @@ static unsigned int ata_dev_xfermask(struct ata_port *ap, struct ata_device *d = &ap->device[i]; if (!ata_dev_present(d)) continue; + xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask, + d->udma_mask); xfer_mask &= ata_id_xfermask(d->id); if (ata_dma_blacklisted(d)) xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); @@ -2636,7 +2671,8 @@ static unsigned int ata_dev_xfermask(struct ata_port *ap, printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, " "disabling DMA\n", ap->id, dev->devno); - return xfer_mask; + ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, + &dev->udma_mask); } /** @@ -2649,11 +2685,16 @@ static unsigned int ata_dev_xfermask(struct ata_port *ap, * * LOCKING: * PCI/etc. bus probe sem. + * + * RETURNS: + * 0 on success, AC_ERR_* mask otherwise. */ -static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev) +static unsigned int ata_dev_set_xfermode(struct ata_port *ap, + struct ata_device *dev) { struct ata_taskfile tf; + unsigned int err_mask; /* set up set-features taskfile */ DPRINTK("set features - xfer mode\n"); @@ -2665,13 +2706,10 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev) tf.protocol = ATA_PROT_NODATA; tf.nsect = dev->xfer_mode; - if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) { - printk(KERN_ERR "ata%u: failed to set xfermode, disabled\n", - ap->id); - ata_port_disable(ap); - } + err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); - DPRINTK("EXIT\n"); + DPRINTK("EXIT, err_mask=%x\n", err_mask); + return err_mask; } /** @@ -2748,7 +2786,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) if (qc->flags & ATA_QCFLAG_SG) { if (qc->n_elem) - dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); + dma_unmap_sg(ap->dev, sg, qc->n_elem, dir); /* restore last sg */ sg[qc->orig_n_elem - 1].length += qc->pad_len; if (pad_buf) { @@ -2759,7 +2797,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) } } else { if (qc->n_elem) - dma_unmap_single(ap->host_set->dev, + dma_unmap_single(ap->dev, sg_dma_address(&sg[0]), sg_dma_len(&sg[0]), dir); /* restore sg */ @@ -2970,7 +3008,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) goto skip_map; } - dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, + dma_address = dma_map_single(ap->dev, qc->buf_virt, sg->length, dir); if (dma_mapping_error(dma_address)) { /* restore sg */ @@ -3058,7 +3096,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc) } dir = qc->dma_dir; - n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir); + n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir); if (n_elem < 1) { /* restore last sg */ lsg->length += qc->pad_len; @@ -3088,7 +3126,6 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc) unsigned long flags; spin_lock_irqsave(&ap->host_set->lock, flags); - ap->flags &= ~ATA_FLAG_NOINTR; ata_irq_on(ap); ata_qc_complete(qc); spin_unlock_irqrestore(&ap->host_set->lock, flags); @@ -3154,7 +3191,8 @@ static unsigned long ata_pio_poll(struct ata_port *ap) * None. (executing in kernel thread context) * * RETURNS: - * Non-zero if qc completed, zero otherwise. + * Zero if qc completed. + * Non-zero if has next. */ static int ata_pio_complete (struct ata_port *ap) @@ -3167,7 +3205,7 @@ static int ata_pio_complete (struct ata_port *ap) * we enter, BSY will be cleared in a chk-status or two. If not, * the drive is probably seeking or something. Snooze for a couple * msecs, then chk-status again. If still busy, fall back to - * HSM_ST_POLL state. + * HSM_ST_LAST_POLL state. */ drv_stat = ata_busy_wait(ap, ATA_BUSY, 10); if (drv_stat & ATA_BUSY) { @@ -3176,7 +3214,7 @@ static int ata_pio_complete (struct ata_port *ap) if (drv_stat & ATA_BUSY) { ap->hsm_task_state = HSM_ST_LAST_POLL; ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; - return 0; + return 1; } } @@ -3187,7 +3225,7 @@ static int ata_pio_complete (struct ata_port *ap) if (!ata_ok(drv_stat)) { qc->err_mask |= __ac_err_mask(drv_stat); ap->hsm_task_state = HSM_ST_ERR; - return 0; + return 1; } ap->hsm_task_state = HSM_ST_IDLE; @@ -3197,7 +3235,7 @@ static int ata_pio_complete (struct ata_port *ap) /* another command may start at this point */ - return 1; + return 0; } @@ -3369,7 +3407,23 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) page = nth_page(page, (offset >> PAGE_SHIFT)); offset %= PAGE_SIZE; - buf = kmap(page) + offset; + DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); + + if (PageHighMem(page)) { + unsigned long flags; + + local_irq_save(flags); + buf = kmap_atomic(page, KM_IRQ0); + + /* do the actual data transfer */ + ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write); + + kunmap_atomic(buf, KM_IRQ0); + local_irq_restore(flags); + } else { + buf = page_address(page); + ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write); + } qc->cursect++; qc->cursg_ofs++; @@ -3378,14 +3432,153 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) qc->cursg++; qc->cursg_ofs = 0; } +} - DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); +/** + * ata_pio_sectors - Transfer one or many 512-byte sectors. + * @qc: Command on going + * + * Transfer one or many ATA_SECT_SIZE of data from/to the + * ATA device for the DRQ request. + * + * LOCKING: + * Inherited from caller. + */ + +static void ata_pio_sectors(struct ata_queued_cmd *qc) +{ + if (is_multi_taskfile(&qc->tf)) { + /* READ/WRITE MULTIPLE */ + unsigned int nsect; + + WARN_ON(qc->dev->multi_count == 0); + + nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count); + while (nsect--) + ata_pio_sector(qc); + } else + ata_pio_sector(qc); +} + +/** + * atapi_send_cdb - Write CDB bytes to hardware + * @ap: Port to which ATAPI device is attached. + * @qc: Taskfile currently active + * + * When device has indicated its readiness to accept + * a CDB, this function is called. Send the CDB. + * + * LOCKING: + * caller. + */ + +static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) +{ + /* send SCSI cdb */ + DPRINTK("send cdb\n"); + WARN_ON(qc->dev->cdb_len < 12); + + ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1); + ata_altstatus(ap); /* flush */ + + switch (qc->tf.protocol) { + case ATA_PROT_ATAPI: + ap->hsm_task_state = HSM_ST; + break; + case ATA_PROT_ATAPI_NODATA: + ap->hsm_task_state = HSM_ST_LAST; + break; + case ATA_PROT_ATAPI_DMA: + ap->hsm_task_state = HSM_ST_LAST; + /* initiate bmdma */ + ap->ops->bmdma_start(qc); + break; + } +} + +/** + * ata_pio_first_block - Write first data block to hardware + * @ap: Port to which ATA/ATAPI device is attached. + * + * When device has indicated its readiness to accept + * the data, this function sends out the CDB or + * the first data block by PIO. + * After this, + * - If polling, ata_pio_task() handles the rest. + * - Otherwise, interrupt handler takes over. + * + * LOCKING: + * Kernel thread context (may sleep) + * + * RETURNS: + * Zero if irq handler takes over + * Non-zero if has next (polling). + */ + +static int ata_pio_first_block(struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + u8 status; + unsigned long flags; + int has_next; + + qc = ata_qc_from_tag(ap, ap->active_tag); + WARN_ON(qc == NULL); + WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0); + + /* if polling, we will stay in the work queue after sending the data. + * otherwise, interrupt handler takes over after sending the data. + */ + has_next = (qc->tf.flags & ATA_TFLAG_POLLING); + + /* sleep-wait for BSY to clear */ + DPRINTK("busy wait\n"); + if (ata_busy_sleep(ap, ATA_TMOUT_DATAOUT_QUICK, ATA_TMOUT_DATAOUT)) { + qc->err_mask |= AC_ERR_TIMEOUT; + ap->hsm_task_state = HSM_ST_TMOUT; + goto err_out; + } + + /* make sure DRQ is set */ + status = ata_chk_status(ap); + if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) { + /* device status error */ + qc->err_mask |= AC_ERR_HSM; + ap->hsm_task_state = HSM_ST_ERR; + goto err_out; + } + + /* Send the CDB (atapi) or the first data block (ata pio out). + * During the state transition, interrupt handler shouldn't + * be invoked before the data transfer is complete and + * hsm_task_state is changed. Hence, the following locking. + */ + spin_lock_irqsave(&ap->host_set->lock, flags); + + if (qc->tf.protocol == ATA_PROT_PIO) { + /* PIO data out protocol. + * send first data block. + */ + + /* ata_pio_sectors() might change the state to HSM_ST_LAST. + * so, the state is changed here before ata_pio_sectors(). + */ + ap->hsm_task_state = HSM_ST; + ata_pio_sectors(qc); + ata_altstatus(ap); /* flush */ + } else + /* send CDB */ + atapi_send_cdb(ap, qc); + + spin_unlock_irqrestore(&ap->host_set->lock, flags); - /* do the actual data transfer */ - do_write = (qc->tf.flags & ATA_TFLAG_WRITE); - ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write); + /* if polling, ata_pio_task() handles the rest. + * otherwise, interrupt handler takes over from here. + */ + return has_next; - kunmap(page); +err_out: + return 1; /* has next */ } /** @@ -3451,7 +3644,23 @@ next_sg: /* don't cross page boundaries */ count = min(count, (unsigned int)PAGE_SIZE - offset); - buf = kmap(page) + offset; + DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); + + if (PageHighMem(page)) { + unsigned long flags; + + local_irq_save(flags); + buf = kmap_atomic(page, KM_IRQ0); + + /* do the actual data transfer */ + ata_data_xfer(ap, buf + offset, count, do_write); + + kunmap_atomic(buf, KM_IRQ0); + local_irq_restore(flags); + } else { + buf = page_address(page); + ata_data_xfer(ap, buf + offset, count, do_write); + } bytes -= count; qc->curbytes += count; @@ -3462,13 +3671,6 @@ next_sg: qc->cursg_ofs = 0; } - DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); - - /* do the actual data transfer */ - ata_data_xfer(ap, buf, count, do_write); - - kunmap(page); - if (bytes) goto next_sg; } @@ -3505,6 +3707,8 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc) if (do_write != i_write) goto err_out; + VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes); + __atapi_pio_bytes(qc, bytes); return; @@ -3575,8 +3779,10 @@ static void ata_pio_block(struct ata_port *ap) return; } - ata_pio_sector(qc); + ata_pio_sectors(qc); } + + ata_altstatus(ap); /* flush */ } static void ata_pio_error(struct ata_port *ap) @@ -3589,7 +3795,7 @@ static void ata_pio_error(struct ata_port *ap) if (qc->tf.command != ATA_CMD_PACKET) printk(KERN_WARNING "ata%u: PIO error\n", ap->id); - /* make sure qc->err_mask is available to + /* make sure qc->err_mask is available to * know what's wrong and recover */ WARN_ON(qc->err_mask == 0); @@ -3599,115 +3805,222 @@ static void ata_pio_error(struct ata_port *ap) ata_poll_qc_complete(qc); } -static void ata_pio_task(void *_data) +/** + * ata_hsm_move - move the HSM to the next state. + * @ap: the target ata_port + * @qc: qc on going + * @status: current device status + * @in_wq: 1 if called from workqueue, 0 otherwise + * + * RETURNS: + * 1 when poll next status needed, 0 otherwise. + */ + +static int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, + u8 status, int in_wq) { - struct ata_port *ap = _data; - unsigned long timeout; - int qc_completed; + unsigned long flags = 0; + int poll_next; -fsm_start: - timeout = 0; - qc_completed = 0; + WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0); + + /* Make sure ata_qc_issue_prot() does not throw things + * like DMA polling into the workqueue. Notice that + * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). + */ + WARN_ON(in_wq != ((qc->tf.flags & ATA_TFLAG_POLLING) || + (ap->hsm_task_state == HSM_ST_FIRST && + ((qc->tf.protocol == ATA_PROT_PIO && + (qc->tf.flags & ATA_TFLAG_WRITE)) || + (is_atapi_taskfile(&qc->tf) && + !(qc->dev->flags & ATA_DFLAG_CDB_INTR)))))); + + /* check error */ + if (unlikely(status & (ATA_ERR | ATA_DF))) { + qc->err_mask |= AC_ERR_DEV; + ap->hsm_task_state = HSM_ST_ERR; + } +fsm_start: switch (ap->hsm_task_state) { - case HSM_ST_IDLE: - return; + case HSM_ST_FIRST: + /* Send first data block or PACKET CDB */ + + /* If polling, we will stay in the work queue after + * sending the data. Otherwise, interrupt handler + * takes over after sending the data. + */ + poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); + + /* check device status */ + if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) { + /* Wrong status. Let EH handle this */ + qc->err_mask |= AC_ERR_HSM; + ap->hsm_task_state = HSM_ST_ERR; + goto fsm_start; + } + + /* Send the CDB (atapi) or the first data block (ata pio out). + * During the state transition, interrupt handler shouldn't + * be invoked before the data transfer is complete and + * hsm_task_state is changed. Hence, the following locking. + */ + if (in_wq) + spin_lock_irqsave(&ap->host_set->lock, flags); + + if (qc->tf.protocol == ATA_PROT_PIO) { + /* PIO data out protocol. + * send first data block. + */ + + /* ata_pio_sectors() might change the state + * to HSM_ST_LAST. so, the state is changed here + * before ata_pio_sectors(). + */ + ap->hsm_task_state = HSM_ST; + ata_pio_sectors(qc); + ata_altstatus(ap); /* flush */ + } else + /* send CDB */ + atapi_send_cdb(ap, qc); + + if (in_wq) + spin_unlock_irqrestore(&ap->host_set->lock, flags); + + /* if polling, ata_pio_task() handles the rest. + * otherwise, interrupt handler takes over from here. + */ + break; case HSM_ST: - ata_pio_block(ap); + /* complete command or read/write the data register */ + if (qc->tf.protocol == ATA_PROT_ATAPI) { + /* ATAPI PIO protocol */ + if ((status & ATA_DRQ) == 0) { + /* no more data to transfer */ + ap->hsm_task_state = HSM_ST_LAST; + goto fsm_start; + } + + atapi_pio_bytes(qc); + + if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) + /* bad ireason reported by device */ + goto fsm_start; + + } else { + /* ATA PIO protocol */ + if (unlikely((status & ATA_DRQ) == 0)) { + /* handle BSY=0, DRQ=0 as error */ + qc->err_mask |= AC_ERR_HSM; + ap->hsm_task_state = HSM_ST_ERR; + goto fsm_start; + } + + ata_pio_sectors(qc); + + if (ap->hsm_task_state == HSM_ST_LAST && + (!(qc->tf.flags & ATA_TFLAG_WRITE))) { + /* all data read */ + ata_altstatus(ap); + status = ata_chk_status(ap); + goto fsm_start; + } + } + + ata_altstatus(ap); /* flush */ + poll_next = 1; break; case HSM_ST_LAST: - qc_completed = ata_pio_complete(ap); - break; + if (unlikely(!ata_ok(status))) { + qc->err_mask |= __ac_err_mask(status); + ap->hsm_task_state = HSM_ST_ERR; + goto fsm_start; + } - case HSM_ST_POLL: - case HSM_ST_LAST_POLL: - timeout = ata_pio_poll(ap); + /* no more data to transfer */ + DPRINTK("ata%u: command complete, drv_stat 0x%x\n", + ap->id, status); + + WARN_ON(qc->err_mask); + + ap->hsm_task_state = HSM_ST_IDLE; + + /* complete taskfile transaction */ + if (in_wq) + ata_poll_qc_complete(qc); + else + ata_qc_complete(qc); + + poll_next = 0; break; - case HSM_ST_TMOUT: case HSM_ST_ERR: - ata_pio_error(ap); - return; + if (qc->tf.command != ATA_CMD_PACKET) + printk(KERN_ERR "ata%u: command error, drv_stat 0x%x\n", + ap->id, status); + + /* make sure qc->err_mask is available to + * know what's wrong and recover + */ + WARN_ON(qc->err_mask == 0); + + ap->hsm_task_state = HSM_ST_IDLE; + + if (in_wq) + ata_poll_qc_complete(qc); + else + ata_qc_complete(qc); + + poll_next = 0; + break; + default: + poll_next = 0; + BUG(); } - if (timeout) - ata_port_queue_task(ap, ata_pio_task, ap, timeout); - else if (!qc_completed) - goto fsm_start; + return poll_next; } -/** - * atapi_packet_task - Write CDB bytes to hardware - * @_data: Port to which ATAPI device is attached. - * - * When device has indicated its readiness to accept - * a CDB, this function is called. Send the CDB. - * If DMA is to be performed, exit immediately. - * Otherwise, we are in polling mode, so poll - * status under operation succeeds or fails. - * - * LOCKING: - * Kernel thread context (may sleep) - */ - -static void atapi_packet_task(void *_data) +static void ata_pio_task(void *_data) { struct ata_port *ap = _data; struct ata_queued_cmd *qc; u8 status; + int poll_next; + +fsm_start: + WARN_ON(ap->hsm_task_state == HSM_ST_IDLE); qc = ata_qc_from_tag(ap, ap->active_tag); WARN_ON(qc == NULL); - WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); - - /* sleep-wait for BSY to clear */ - DPRINTK("busy wait\n"); - if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) { - qc->err_mask |= AC_ERR_TIMEOUT; - goto err_out; - } - - /* make sure DRQ is set */ - status = ata_chk_status(ap); - if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) { - qc->err_mask |= AC_ERR_HSM; - goto err_out; - } - - /* send SCSI cdb */ - DPRINTK("send cdb\n"); - WARN_ON(qc->dev->cdb_len < 12); - - if (qc->tf.protocol == ATA_PROT_ATAPI_DMA || - qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { - unsigned long flags; - - /* Once we're done issuing command and kicking bmdma, - * irq handler takes over. To not lose irq, we need - * to clear NOINTR flag before sending cdb, but - * interrupt handler shouldn't be invoked before we're - * finished. Hence, the following locking. - */ - spin_lock_irqsave(&ap->host_set->lock, flags); - ap->flags &= ~ATA_FLAG_NOINTR; - ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1); - if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) - ap->ops->bmdma_start(qc); /* initiate bmdma */ - spin_unlock_irqrestore(&ap->host_set->lock, flags); - } else { - ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1); - /* PIO commands are handled by polling */ - ap->hsm_task_state = HSM_ST; - ata_port_queue_task(ap, ata_pio_task, ap, 0); + /* + * This is purely heuristic. This is a fast path. + * Sometimes when we enter, BSY will be cleared in + * a chk-status or two. If not, the drive is probably seeking + * or something. Snooze for a couple msecs, then + * chk-status again. If still busy, queue delayed work. + */ + status = ata_busy_wait(ap, ATA_BUSY, 5); + if (status & ATA_BUSY) { + msleep(2); + status = ata_busy_wait(ap, ATA_BUSY, 10); + if (status & ATA_BUSY) { + ata_port_queue_task(ap, ata_pio_task, ap, ATA_SHORT_PAUSE); + return; + } } - return; + /* move the HSM */ + poll_next = ata_hsm_move(ap, qc, status, 1); -err_out: - ata_poll_qc_complete(qc); + /* another command or interrupt handler + * may be running at this point. + */ + if (poll_next) + goto fsm_start; } /** @@ -3763,8 +4076,10 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", ap->id, qc->tf.command, drv_stat, host_stat); + ap->hsm_task_state = HSM_ST_IDLE; + /* complete taskfile transaction */ - qc->err_mask |= ac_err_mask(drv_stat); + qc->err_mask |= AC_ERR_TIMEOUT; break; } @@ -3990,43 +4305,104 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; + /* Use polling pio if the LLD doesn't handle + * interrupt driven pio and atapi CDB interrupt. + */ + if (ap->flags & ATA_FLAG_PIO_POLLING) { + switch (qc->tf.protocol) { + case ATA_PROT_PIO: + case ATA_PROT_ATAPI: + case ATA_PROT_ATAPI_NODATA: + qc->tf.flags |= ATA_TFLAG_POLLING; + break; + case ATA_PROT_ATAPI_DMA: + if (qc->dev->flags & ATA_DFLAG_CDB_INTR) + BUG(); + break; + default: + break; + } + } + + /* select the device */ ata_dev_select(ap, qc->dev->devno, 1, 0); + /* start the command */ switch (qc->tf.protocol) { case ATA_PROT_NODATA: + if (qc->tf.flags & ATA_TFLAG_POLLING) + ata_qc_set_polling(qc); + ata_tf_to_host(ap, &qc->tf); + ap->hsm_task_state = HSM_ST_LAST; + + if (qc->tf.flags & ATA_TFLAG_POLLING) + ata_port_queue_task(ap, ata_pio_task, ap, 0); + break; case ATA_PROT_DMA: + WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); + ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ ap->ops->bmdma_setup(qc); /* set up bmdma */ ap->ops->bmdma_start(qc); /* initiate bmdma */ + ap->hsm_task_state = HSM_ST_LAST; break; - case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ - ata_qc_set_polling(qc); - ata_tf_to_host(ap, &qc->tf); - ap->hsm_task_state = HSM_ST; - ata_port_queue_task(ap, ata_pio_task, ap, 0); - break; + case ATA_PROT_PIO: + if (qc->tf.flags & ATA_TFLAG_POLLING) + ata_qc_set_polling(qc); - case ATA_PROT_ATAPI: - ata_qc_set_polling(qc); ata_tf_to_host(ap, &qc->tf); - ata_port_queue_task(ap, atapi_packet_task, ap, 0); + + if (qc->tf.flags & ATA_TFLAG_WRITE) { + /* PIO data out protocol */ + ap->hsm_task_state = HSM_ST_FIRST; + ata_port_queue_task(ap, ata_pio_task, ap, 0); + + /* always send first data block using + * the ata_pio_task() codepath. + */ + } else { + /* PIO data in protocol */ + ap->hsm_task_state = HSM_ST; + + if (qc->tf.flags & ATA_TFLAG_POLLING) + ata_port_queue_task(ap, ata_pio_task, ap, 0); + + /* if polling, ata_pio_task() handles the rest. + * otherwise, interrupt handler takes over from here. + */ + } + break; + case ATA_PROT_ATAPI: case ATA_PROT_ATAPI_NODATA: - ap->flags |= ATA_FLAG_NOINTR; + if (qc->tf.flags & ATA_TFLAG_POLLING) + ata_qc_set_polling(qc); + ata_tf_to_host(ap, &qc->tf); - ata_port_queue_task(ap, atapi_packet_task, ap, 0); + + ap->hsm_task_state = HSM_ST_FIRST; + + /* send cdb by polling if no cdb interrupt */ + if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || + (qc->tf.flags & ATA_TFLAG_POLLING)) + ata_port_queue_task(ap, ata_pio_task, ap, 0); break; case ATA_PROT_ATAPI_DMA: - ap->flags |= ATA_FLAG_NOINTR; + WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); + ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ ap->ops->bmdma_setup(qc); /* set up bmdma */ - ata_port_queue_task(ap, atapi_packet_task, ap, 0); + ap->hsm_task_state = HSM_ST_FIRST; + + /* send cdb by polling if no cdb interrupt */ + if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) + ata_port_queue_task(ap, ata_pio_task, ap, 0); break; default: @@ -4037,240 +4413,6 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) return 0; } -/** - * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction - * @qc: Info associated with this ATA transaction. - * - * LOCKING: - * spin_lock_irqsave(host_set lock) - */ - -static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); - u8 dmactl; - void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; - - /* load PRD table addr. */ - mb(); /* make sure PRD table writes are visible to controller */ - writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS); - - /* specify data direction, triple-check start bit is clear */ - dmactl = readb(mmio + ATA_DMA_CMD); - dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); - if (!rw) - dmactl |= ATA_DMA_WR; - writeb(dmactl, mmio + ATA_DMA_CMD); - - /* issue r/w command */ - ap->ops->exec_command(ap, &qc->tf); -} - -/** - * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction - * @qc: Info associated with this ATA transaction. - * - * LOCKING: - * spin_lock_irqsave(host_set lock) - */ - -static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; - u8 dmactl; - - /* start host DMA transaction */ - dmactl = readb(mmio + ATA_DMA_CMD); - writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD); - - /* Strictly, one may wish to issue a readb() here, to - * flush the mmio write. However, control also passes - * to the hardware at this point, and it will interrupt - * us when we are to resume control. So, in effect, - * we don't care when the mmio write flushes. - * Further, a read of the DMA status register _immediately_ - * following the write may not be what certain flaky hardware - * is expected, so I think it is best to not add a readb() - * without first all the MMIO ATA cards/mobos. - * Or maybe I'm just being paranoid. - */ -} - -/** - * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO) - * @qc: Info associated with this ATA transaction. - * - * LOCKING: - * spin_lock_irqsave(host_set lock) - */ - -static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); - u8 dmactl; - - /* load PRD table addr. */ - outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); - - /* specify data direction, triple-check start bit is clear */ - dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); - dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); - if (!rw) - dmactl |= ATA_DMA_WR; - outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); - - /* issue r/w command */ - ap->ops->exec_command(ap, &qc->tf); -} - -/** - * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO) - * @qc: Info associated with this ATA transaction. - * - * LOCKING: - * spin_lock_irqsave(host_set lock) - */ - -static void ata_bmdma_start_pio (struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - u8 dmactl; - - /* start host DMA transaction */ - dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); - outb(dmactl | ATA_DMA_START, - ap->ioaddr.bmdma_addr + ATA_DMA_CMD); -} - - -/** - * ata_bmdma_start - Start a PCI IDE BMDMA transaction - * @qc: Info associated with this ATA transaction. - * - * Writes the ATA_DMA_START flag to the DMA command register. - * - * May be used as the bmdma_start() entry in ata_port_operations. - * - * LOCKING: - * spin_lock_irqsave(host_set lock) - */ -void ata_bmdma_start(struct ata_queued_cmd *qc) -{ - if (qc->ap->flags & ATA_FLAG_MMIO) - ata_bmdma_start_mmio(qc); - else - ata_bmdma_start_pio(qc); -} - - -/** - * ata_bmdma_setup - Set up PCI IDE BMDMA transaction - * @qc: Info associated with this ATA transaction. - * - * Writes address of PRD table to device's PRD Table Address - * register, sets the DMA control register, and calls - * ops->exec_command() to start the transfer. - * - * May be used as the bmdma_setup() entry in ata_port_operations. - * - * LOCKING: - * spin_lock_irqsave(host_set lock) - */ -void ata_bmdma_setup(struct ata_queued_cmd *qc) -{ - if (qc->ap->flags & ATA_FLAG_MMIO) - ata_bmdma_setup_mmio(qc); - else - ata_bmdma_setup_pio(qc); -} - - -/** - * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. - * @ap: Port associated with this ATA transaction. - * - * Clear interrupt and error flags in DMA status register. - * - * May be used as the irq_clear() entry in ata_port_operations. - * - * LOCKING: - * spin_lock_irqsave(host_set lock) - */ - -void ata_bmdma_irq_clear(struct ata_port *ap) -{ - if (!ap->ioaddr.bmdma_addr) - return; - - if (ap->flags & ATA_FLAG_MMIO) { - void __iomem *mmio = - ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS; - writeb(readb(mmio), mmio); - } else { - unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS; - outb(inb(addr), addr); - } -} - - -/** - * ata_bmdma_status - Read PCI IDE BMDMA status - * @ap: Port associated with this ATA transaction. - * - * Read and return BMDMA status register. - * - * May be used as the bmdma_status() entry in ata_port_operations. - * - * LOCKING: - * spin_lock_irqsave(host_set lock) - */ - -u8 ata_bmdma_status(struct ata_port *ap) -{ - u8 host_stat; - if (ap->flags & ATA_FLAG_MMIO) { - void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; - host_stat = readb(mmio + ATA_DMA_STATUS); - } else - host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); - return host_stat; -} - - -/** - * ata_bmdma_stop - Stop PCI IDE BMDMA transfer - * @qc: Command we are ending DMA for - * - * Clears the ATA_DMA_START flag in the dma control register - * - * May be used as the bmdma_stop() entry in ata_port_operations. - * - * LOCKING: - * spin_lock_irqsave(host_set lock) - */ - -void ata_bmdma_stop(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - if (ap->flags & ATA_FLAG_MMIO) { - void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; - - /* clear start/stop bit */ - writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, - mmio + ATA_DMA_CMD); - } else { - /* clear start/stop bit */ - outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START, - ap->ioaddr.bmdma_addr + ATA_DMA_CMD); - } - - /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ - ata_altstatus(ap); /* dummy read */ -} - /** * ata_host_intr - Handle host interrupt for given (port, task) * @ap: Port on which interrupt arrived (possibly...) @@ -4290,52 +4432,69 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc) inline unsigned int ata_host_intr (struct ata_port *ap, struct ata_queued_cmd *qc) { - u8 status, host_stat; - - switch (qc->tf.protocol) { - - case ATA_PROT_DMA: - case ATA_PROT_ATAPI_DMA: - case ATA_PROT_ATAPI: - /* check status of DMA engine */ - host_stat = ap->ops->bmdma_status(ap); - VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); - - /* if it's not our irq... */ - if (!(host_stat & ATA_DMA_INTR)) - goto idle_irq; - - /* before we do anything else, clear DMA-Start bit */ - ap->ops->bmdma_stop(qc); + u8 status, host_stat = 0; - /* fall through */ + VPRINTK("ata%u: protocol %d task_state %d\n", + ap->id, qc->tf.protocol, ap->hsm_task_state); - case ATA_PROT_ATAPI_NODATA: - case ATA_PROT_NODATA: - /* check altstatus */ - status = ata_altstatus(ap); - if (status & ATA_BUSY) - goto idle_irq; + /* Check whether we are expecting interrupt in this state */ + switch (ap->hsm_task_state) { + case HSM_ST_FIRST: + /* Some pre-ATAPI-4 devices assert INTRQ + * at this state when ready to receive CDB. + */ - /* check main status, clearing INTRQ */ - status = ata_chk_status(ap); - if (unlikely(status & ATA_BUSY)) + /* Check the ATA_DFLAG_CDB_INTR flag is enough here. + * The flag was turned on only for atapi devices. + * No need to check is_atapi_taskfile(&qc->tf) again. + */ + if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) goto idle_irq; - DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", - ap->id, qc->tf.protocol, status); - - /* ack bmdma irq events */ - ap->ops->irq_clear(ap); - - /* complete taskfile transaction */ - qc->err_mask |= ac_err_mask(status); - ata_qc_complete(qc); break; - + case HSM_ST_LAST: + if (qc->tf.protocol == ATA_PROT_DMA || + qc->tf.protocol == ATA_PROT_ATAPI_DMA) { + /* check status of DMA engine */ + host_stat = ap->ops->bmdma_status(ap); + VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); + + /* if it's not our irq... */ + if (!(host_stat & ATA_DMA_INTR)) + goto idle_irq; + + /* before we do anything else, clear DMA-Start bit */ + ap->ops->bmdma_stop(qc); + + if (unlikely(host_stat & ATA_DMA_ERR)) { + /* error when transfering data to/from memory */ + qc->err_mask |= AC_ERR_HOST_BUS; + ap->hsm_task_state = HSM_ST_ERR; + } + } + break; + case HSM_ST: + break; default: goto idle_irq; } + /* check altstatus */ + status = ata_altstatus(ap); + if (status & ATA_BUSY) + goto idle_irq; + + /* check main status, clearing INTRQ */ + status = ata_chk_status(ap); + if (unlikely(status & ATA_BUSY)) + goto idle_irq; + + DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", + ap->id, qc->tf.protocol, ap->hsm_task_state, status); + + /* ack bmdma irq events */ + ap->ops->irq_clear(ap); + + ata_hsm_move(ap, qc, status, 0); return 1; /* irq handled */ idle_irq: @@ -4382,11 +4541,11 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) ap = host_set->ports[i]; if (ap && - !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { + !(ap->flags & ATA_FLAG_PORT_DISABLED)) { struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc && (!(qc->tf.ctl & ATA_NIEN)) && + if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && (qc->flags & ATA_QCFLAG_ACTIVE)) handled |= ata_host_intr(ap, qc); } @@ -4479,14 +4638,15 @@ int ata_device_resume(struct ata_port *ap, struct ata_device *dev) * Flush the cache on the drive, if appropriate, then issue a * standbynow command. */ -int ata_device_suspend(struct ata_port *ap, struct ata_device *dev) +int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state) { if (!ata_dev_present(dev)) return 0; if (dev->class == ATA_DEV_ATA) ata_flush_cache(ap, dev); - ata_standby_drive(ap, dev); + if (state.event != PM_EVENT_FREEZE) + ata_standby_drive(ap, dev); ap->flags |= ATA_FLAG_SUSPENDED; return 0; } @@ -4506,7 +4666,7 @@ int ata_device_suspend(struct ata_port *ap, struct ata_device *dev) int ata_port_start (struct ata_port *ap) { - struct device *dev = ap->host_set->dev; + struct device *dev = ap->dev; int rc; ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL); @@ -4539,7 +4699,7 @@ int ata_port_start (struct ata_port *ap) void ata_port_stop (struct ata_port *ap) { - struct device *dev = ap->host_set->dev; + struct device *dev = ap->dev; dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); ata_pad_free(ap, dev); @@ -4605,6 +4765,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, ap->host = host; ap->ctl = ATA_DEVCTL_OBS; ap->host_set = host_set; + ap->dev = ent->dev; ap->port_no = port_no; ap->hard_port_no = ent->legacy_mode ? ent->hard_port_no : port_no; @@ -4620,8 +4781,13 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, INIT_WORK(&ap->port_task, NULL, NULL); INIT_LIST_HEAD(&ap->eh_done_q); - for (i = 0; i < ATA_MAX_DEVICES; i++) - ap->device[i].devno = i; + for (i = 0; i < ATA_MAX_DEVICES; i++) { + struct ata_device *dev = &ap->device[i]; + dev->devno = i; + dev->pio_mask = UINT_MAX; + dev->mwdma_mask = UINT_MAX; + dev->udma_mask = UINT_MAX; + } #ifdef ATA_IRQ_TRAP ap->stats.unhandled_irq = 1; @@ -4655,6 +4821,14 @@ static struct ata_port * ata_host_add(const struct ata_probe_ent *ent, int rc; DPRINTK("ENTER\n"); + + if (!ent->port_ops->probe_reset && + !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) { + printk(KERN_ERR "ata%u: no reset mechanism available\n", + port_no); + return NULL; + } + host = scsi_host_alloc(ent->sht, sizeof(struct ata_port)); if (!host) return NULL; @@ -4815,7 +4989,7 @@ err_free_ret: * ata_host_set_remove - PCI layer callback for device removal * @host_set: ATA host set that was removed * - * Unregister all objects associated with this host set. Free those + * Unregister all objects associated with this host set. Free those * objects. * * LOCKING: @@ -5087,6 +5261,8 @@ EXPORT_SYMBOL_GPL(ata_std_postreset); EXPORT_SYMBOL_GPL(ata_std_probe_reset); EXPORT_SYMBOL_GPL(ata_drive_probe_reset); EXPORT_SYMBOL_GPL(ata_dev_revalidate); +EXPORT_SYMBOL_GPL(ata_dev_classify); +EXPORT_SYMBOL_GPL(ata_dev_pair); EXPORT_SYMBOL_GPL(ata_port_disable); EXPORT_SYMBOL_GPL(ata_ratelimit); EXPORT_SYMBOL_GPL(ata_busy_sleep); @@ -5097,7 +5273,6 @@ EXPORT_SYMBOL_GPL(ata_scsi_error); EXPORT_SYMBOL_GPL(ata_scsi_slave_config); EXPORT_SYMBOL_GPL(ata_scsi_release); EXPORT_SYMBOL_GPL(ata_host_intr); -EXPORT_SYMBOL_GPL(ata_dev_classify); EXPORT_SYMBOL_GPL(ata_id_string); EXPORT_SYMBOL_GPL(ata_id_c_string); EXPORT_SYMBOL_GPL(ata_scsi_simulate); @@ -5116,6 +5291,8 @@ EXPORT_SYMBOL_GPL(ata_pci_init_one); EXPORT_SYMBOL_GPL(ata_pci_remove_one); EXPORT_SYMBOL_GPL(ata_pci_device_suspend); EXPORT_SYMBOL_GPL(ata_pci_device_resume); +EXPORT_SYMBOL_GPL(ata_pci_default_filter); +EXPORT_SYMBOL_GPL(ata_pci_clear_simplex); #endif /* CONFIG_PCI */ EXPORT_SYMBOL_GPL(ata_device_suspend);