X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=drivers%2Fscsi%2Flibata-core.c;h=1c1a7caf785e9e1fbf6659daa72c18d2004842e5;hb=8c65b4a60450590e79a28e9717ceffa9e4debb3f;hp=d568914c4344c061c4c52670cd705a17c87a70fa;hpb=3d3467f0fdf61a421361c00cf84fcf0f1a6dc1e8;p=powerpc.git diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index d568914c43..1c1a7caf78 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include "scsi.h" #include "scsi_priv.h" @@ -63,12 +64,13 @@ static unsigned int ata_busy_sleep (struct ata_port *ap, unsigned long tmout_pat, unsigned long tmout); +static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev); static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev); static void ata_set_mode(struct ata_port *ap); static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); -static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift); +static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift); static int fgb(u32 bitmap); -static int ata_choose_xfer_mode(struct ata_port *ap, +static int ata_choose_xfer_mode(const struct ata_port *ap, u8 *xfer_mode_out, unsigned int *xfer_shift_out); static void __ata_qc_complete(struct ata_queued_cmd *qc); @@ -86,7 +88,7 @@ MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); /** - * ata_tf_load - send taskfile registers to host controller + * ata_tf_load_pio - send taskfile registers to host controller * @ap: Port to which output is sent * @tf: ATA taskfile register set * @@ -96,7 +98,7 @@ MODULE_VERSION(DRV_VERSION); * Inherited from caller. */ -static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf) +static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; @@ -154,7 +156,7 @@ static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf) * Inherited from caller. */ -static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf) +static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; @@ -223,7 +225,7 @@ static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf) * LOCKING: * Inherited from caller. */ -void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf) +void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) { if (ap->flags & ATA_FLAG_MMIO) ata_tf_load_mmio(ap, tf); @@ -243,7 +245,7 @@ void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf) * spin_lock_irqsave(host_set lock) */ -static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf) +static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf) { DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); @@ -264,7 +266,7 @@ static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf) * spin_lock_irqsave(host_set lock) */ -static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf) +static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) { DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); @@ -284,7 +286,7 @@ static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf) * LOCKING: * spin_lock_irqsave(host_set lock) */ -void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf) +void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) { if (ap->flags & ATA_FLAG_MMIO) ata_exec_command_mmio(ap, tf); @@ -292,28 +294,6 @@ void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf) ata_exec_command_pio(ap, tf); } -/** - * ata_exec - issue ATA command to host controller - * @ap: port to which command is being issued - * @tf: ATA taskfile register set - * - * Issues PIO/MMIO write to ATA command register, with proper - * synchronization with interrupt handler / other threads. - * - * LOCKING: - * Obtains host_set lock. - */ - -static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf) -{ - unsigned long flags; - - DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); - spin_lock_irqsave(&ap->host_set->lock, flags); - ap->ops->exec_command(ap, tf); - spin_unlock_irqrestore(&ap->host_set->lock, flags); -} - /** * ata_tf_to_host - issue ATA taskfile to host controller * @ap: port to which command is being issued @@ -324,30 +304,11 @@ static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf) * other threads. * * LOCKING: - * Obtains host_set lock. - */ - -static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf) -{ - ap->ops->tf_load(ap, tf); - - ata_exec(ap, tf); -} - -/** - * ata_tf_to_host_nolock - issue ATA taskfile to host controller - * @ap: port to which command is being issued - * @tf: ATA taskfile register set - * - * Issues ATA taskfile register set to ATA host controller, - * with proper synchronization with interrupt handler and - * other threads. - * - * LOCKING: * spin_lock_irqsave(host_set lock) */ -void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf) +static inline void ata_tf_to_host(struct ata_port *ap, + const struct ata_taskfile *tf) { ap->ops->tf_load(ap, tf); ap->ops->exec_command(ap, tf); @@ -369,6 +330,8 @@ static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; + tf->command = ata_check_status(ap); + tf->feature = inb(ioaddr->error_addr); tf->nsect = inb(ioaddr->nsect_addr); tf->lbal = inb(ioaddr->lbal_addr); tf->lbam = inb(ioaddr->lbam_addr); @@ -401,6 +364,8 @@ static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; + tf->command = ata_check_status(ap); + tf->feature = readb((void __iomem *)ioaddr->error_addr); tf->nsect = readb((void __iomem *)ioaddr->nsect_addr); tf->lbal = readb((void __iomem *)ioaddr->lbal_addr); tf->lbam = readb((void __iomem *)ioaddr->lbam_addr); @@ -520,30 +485,6 @@ u8 ata_altstatus(struct ata_port *ap) } -/** - * ata_chk_err - Read device error reg - * @ap: port where the device is - * - * Reads ATA taskfile error register for - * currently-selected device and return its value. - * - * Note: may NOT be used as the check_err() entry in - * ata_port_operations. - * - * LOCKING: - * Inherited from caller. - */ -u8 ata_chk_err(struct ata_port *ap) -{ - if (ap->ops->check_err) - return ap->ops->check_err(ap); - - if (ap->flags & ATA_FLAG_MMIO) { - return readb((void __iomem *) ap->ioaddr.error_addr); - } - return inb(ap->ioaddr.error_addr); -} - /** * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure * @tf: Taskfile to convert @@ -557,7 +498,7 @@ u8 ata_chk_err(struct ata_port *ap) * Inherited from caller. */ -void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp) +void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp) { fis[0] = 0x27; /* Register - Host to Device FIS */ fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number, @@ -598,7 +539,7 @@ void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp) * Inherited from caller. */ -void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf) +void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) { tf->command = fis[2]; /* status */ tf->feature = fis[3]; /* error */ @@ -616,79 +557,53 @@ void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf) tf->hob_nsect = fis[13]; } -/** - * ata_prot_to_cmd - determine which read/write opcodes to use - * @protocol: ATA_PROT_xxx taskfile protocol - * @lba48: true is lba48 is present - * - * Given necessary input, determine which read/write commands - * to use to transfer data. - * - * LOCKING: - * None. - */ -static int ata_prot_to_cmd(int protocol, int lba48) -{ - int rcmd = 0, wcmd = 0; - - switch (protocol) { - case ATA_PROT_PIO: - if (lba48) { - rcmd = ATA_CMD_PIO_READ_EXT; - wcmd = ATA_CMD_PIO_WRITE_EXT; - } else { - rcmd = ATA_CMD_PIO_READ; - wcmd = ATA_CMD_PIO_WRITE; - } - break; - - case ATA_PROT_DMA: - if (lba48) { - rcmd = ATA_CMD_READ_EXT; - wcmd = ATA_CMD_WRITE_EXT; - } else { - rcmd = ATA_CMD_READ; - wcmd = ATA_CMD_WRITE; - } - break; - - default: - return -1; - } - - return rcmd | (wcmd << 8); -} +static const u8 ata_rw_cmds[] = { + /* pio multi */ + ATA_CMD_READ_MULTI, + ATA_CMD_WRITE_MULTI, + ATA_CMD_READ_MULTI_EXT, + ATA_CMD_WRITE_MULTI_EXT, + /* pio */ + ATA_CMD_PIO_READ, + ATA_CMD_PIO_WRITE, + ATA_CMD_PIO_READ_EXT, + ATA_CMD_PIO_WRITE_EXT, + /* dma */ + ATA_CMD_READ, + ATA_CMD_WRITE, + ATA_CMD_READ_EXT, + ATA_CMD_WRITE_EXT +}; /** - * ata_dev_set_protocol - set taskfile protocol and r/w commands - * @dev: device to examine and configure + * ata_rwcmd_protocol - set taskfile r/w commands and protocol + * @qc: command to examine and configure * - * Examine the device configuration, after we have - * read the identify-device page and configured the - * data transfer mode. Set internal state related to - * the ATA taskfile protocol (pio, pio mult, dma, etc.) - * and calculate the proper read/write commands to use. + * Examine the device configuration and tf->flags to calculate + * the proper read/write commands and protocol to use. * * LOCKING: * caller. */ -static void ata_dev_set_protocol(struct ata_device *dev) +void ata_rwcmd_protocol(struct ata_queued_cmd *qc) { - int pio = (dev->flags & ATA_DFLAG_PIO); - int lba48 = (dev->flags & ATA_DFLAG_LBA48); - int proto, cmd; + struct ata_taskfile *tf = &qc->tf; + struct ata_device *dev = qc->dev; - if (pio) - proto = dev->xfer_protocol = ATA_PROT_PIO; - else - proto = dev->xfer_protocol = ATA_PROT_DMA; + int index, lba48, write; + + lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; + write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; - cmd = ata_prot_to_cmd(proto, lba48); - if (cmd < 0) - BUG(); + if (dev->flags & ATA_DFLAG_PIO) { + tf->protocol = ATA_PROT_PIO; + index = dev->multi_count ? 0 : 4; + } else { + tf->protocol = ATA_PROT_DMA; + index = 8; + } - dev->read_cmd = cmd & 0xff; - dev->write_cmd = (cmd >> 8) & 0xff; + tf->command = ata_rw_cmds[index + lba48 + write]; } static const char * xfer_mode_str[] = { @@ -870,7 +785,7 @@ static unsigned int ata_devchk(struct ata_port *ap, * the event of failure. */ -unsigned int ata_dev_classify(struct ata_taskfile *tf) +unsigned int ata_dev_classify(const struct ata_taskfile *tf) { /* Apple's open source Darwin code hints that some devices only * put a proper signature into the LBA mid/high registers, @@ -922,8 +837,8 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device) memset(&tf, 0, sizeof(tf)); - err = ata_chk_err(ap); ap->ops->tf_read(ap, &tf); + err = tf.feature; dev->class = ATA_DEV_NONE; @@ -962,7 +877,7 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device) * caller. */ -void ata_dev_id_string(u16 *id, unsigned char *s, +void ata_dev_id_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len) { unsigned int c; @@ -1079,7 +994,7 @@ void ata_dev_select(struct ata_port *ap, unsigned int device, * caller. */ -static inline void ata_dump_id(struct ata_device *dev) +static inline void ata_dump_id(const struct ata_device *dev) { DPRINTK("49==0x%04x " "53==0x%04x " @@ -1107,6 +1022,31 @@ static inline void ata_dump_id(struct ata_device *dev) dev->id[93]); } +/* + * Compute the PIO modes available for this device. This is not as + * trivial as it seems if we must consider early devices correctly. + * + * FIXME: pre IDE drive timing (do we care ?). + */ + +static unsigned int ata_pio_modes(const struct ata_device *adev) +{ + u16 modes; + + /* Usual case. Word 53 indicates word 88 is valid */ + if (adev->id[ATA_ID_FIELD_VALID] & (1 << 2)) { + modes = adev->id[ATA_ID_PIO_MODES] & 0x03; + modes <<= 3; + modes |= 0x7; + return modes; + } + + /* If word 88 isn't valid then Word 51 holds the PIO timing number + for the maximum. Turn it into a mask and return it */ + modes = (2 << (adev->id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ; + return modes; +} + /** * ata_dev_identify - obtain IDENTIFY x DEVICE page * @ap: port on which device we wish to probe resides @@ -1135,7 +1075,6 @@ static void ata_dev_identify(struct ata_port *ap, unsigned int device) unsigned int major_version; u16 tmp; unsigned long xfer_modes; - u8 status; unsigned int using_edd; DECLARE_COMPLETION(wait); struct ata_queued_cmd *qc; @@ -1189,8 +1128,11 @@ retry: else wait_for_completion(&wait); - status = ata_chk_status(ap); - if (status & ATA_ERR) { + spin_lock_irqsave(&ap->host_set->lock, flags); + ap->ops->tf_read(ap, &qc->tf); + spin_unlock_irqrestore(&ap->host_set->lock, flags); + + if (qc->tf.command & ATA_ERR) { /* * arg! EDD works for all test cases, but seems to return * the ATA signature for some ATAPI devices. Until the @@ -1203,7 +1145,7 @@ retry: * to have this problem. */ if ((using_edd) && (qc->tf.command == ATA_CMD_ID_ATA)) { - u8 err = ata_chk_err(ap); + u8 err = qc->tf.feature; if (err & ATA_ABORTED) { dev->class = ATA_DEV_ATAPI; qc->cursg = 0; @@ -1240,10 +1182,8 @@ retry: xfer_modes = dev->id[ATA_ID_UDMA_MODES]; if (!xfer_modes) xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA; - if (!xfer_modes) { - xfer_modes = (dev->id[ATA_ID_PIO_MODES]) << (ATA_SHIFT_PIO + 3); - xfer_modes |= (0x7 << ATA_SHIFT_PIO); - } + if (!xfer_modes) + xfer_modes = ata_pio_modes(dev); ata_dump_id(dev); @@ -1266,9 +1206,15 @@ retry: * anything else.. * Some drives were very specific about that exact sequence. */ - if (major_version < 4 || (!ata_id_has_lba(dev->id))) + if (major_version < 4 || (!ata_id_has_lba(dev->id))) { ata_dev_init_params(ap, dev); + /* current CHS translation info (id[53-58]) might be + * changed. reread the identify device info. + */ + ata_dev_reread_id(ap, dev); + } + if (ata_id_has_lba(dev->id)) { dev->flags |= ATA_DFLAG_LBA; @@ -1348,7 +1294,7 @@ err_out: } -static inline u8 ata_dev_knobble(struct ata_port *ap) +static inline u8 ata_dev_knobble(const struct ata_port *ap) { return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id))); } @@ -1534,7 +1480,153 @@ void ata_port_disable(struct ata_port *ap) ap->flags |= ATA_FLAG_PORT_DISABLED; } -static struct { +/* + * This mode timing computation functionality is ported over from + * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik + */ +/* + * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). + * These were taken from ATA/ATAPI-6 standard, rev 0a, except + * for PIO 5, which is a nonstandard extension and UDMA6, which + * is currently supported only by Maxtor drives. + */ + +static const struct ata_timing ata_timing[] = { + + { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 }, + { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 }, + { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 }, + { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 }, + + { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 }, + { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 }, + { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 }, + +/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */ + + { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 }, + { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 }, + { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 }, + + { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 }, + { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, + { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 }, + +/* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */ + { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 }, + { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 }, + + { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 }, + { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 }, + { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 }, + +/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */ + + { 0xFF } +}; + +#define ENOUGH(v,unit) (((v)-1)/(unit)+1) +#define EZ(v,unit) ((v)?ENOUGH(v,unit):0) + +static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) +{ + q->setup = EZ(t->setup * 1000, T); + q->act8b = EZ(t->act8b * 1000, T); + q->rec8b = EZ(t->rec8b * 1000, T); + q->cyc8b = EZ(t->cyc8b * 1000, T); + q->active = EZ(t->active * 1000, T); + q->recover = EZ(t->recover * 1000, T); + q->cycle = EZ(t->cycle * 1000, T); + q->udma = EZ(t->udma * 1000, UT); +} + +void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, + struct ata_timing *m, unsigned int what) +{ + if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); + if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); + if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); + if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); + if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); + if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); + if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); + if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); +} + +static const struct ata_timing* ata_timing_find_mode(unsigned short speed) +{ + const struct ata_timing *t; + + for (t = ata_timing; t->mode != speed; t++) + if (t->mode == 0xFF) + return NULL; + return t; +} + +int ata_timing_compute(struct ata_device *adev, unsigned short speed, + struct ata_timing *t, int T, int UT) +{ + const struct ata_timing *s; + struct ata_timing p; + + /* + * Find the mode. + */ + + if (!(s = ata_timing_find_mode(speed))) + return -EINVAL; + + /* + * If the drive is an EIDE drive, it can tell us it needs extended + * PIO/MW_DMA cycle timing. + */ + + if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ + memset(&p, 0, sizeof(p)); + if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { + if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO]; + else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY]; + } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) { + p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN]; + } + ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); + } + + /* + * Convert the timing to bus clock counts. + */ + + ata_timing_quantize(s, t, T, UT); + + /* + * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T + * and some other commands. We have to ensure that the DMA cycle timing is + * slower/equal than the fastest PIO timing. + */ + + if (speed > XFER_PIO_4) { + ata_timing_compute(adev, adev->pio_mode, &p, T, UT); + ata_timing_merge(&p, t, t, ATA_TIMING_ALL); + } + + /* + * Lenghten active & recovery time so that cycle time is correct. + */ + + if (t->act8b + t->rec8b < t->cyc8b) { + t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; + t->rec8b = t->cyc8b - t->act8b; + } + + if (t->active + t->recover < t->cycle) { + t->active += (t->cycle - (t->active + t->recover)) / 2; + t->recover = t->cycle - t->active; + } + + return 0; +} + +static const struct { unsigned int shift; u8 base; } xfer_mode_classes[] = { @@ -1641,7 +1733,7 @@ static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode, */ static void ata_set_mode(struct ata_port *ap) { - unsigned int i, xfer_shift; + unsigned int xfer_shift; u8 xfer_mode; int rc; @@ -1670,11 +1762,6 @@ static void ata_set_mode(struct ata_port *ap) if (ap->ops->post_set_mode) ap->ops->post_set_mode(ap); - for (i = 0; i < 2; i++) { - struct ata_device *dev = &ap->device[i]; - ata_dev_set_protocol(dev); - } - return; err_out: @@ -1784,12 +1871,14 @@ static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask) * * LOCKING: * PCI/etc. bus probe sem. + * Obtains host_set lock. * */ static unsigned int ata_bus_edd(struct ata_port *ap) { struct ata_taskfile tf; + unsigned long flags; /* set up execute-device-diag (bus reset) taskfile */ /* also, take interrupts to a known state (disabled) */ @@ -1800,7 +1889,9 @@ static unsigned int ata_bus_edd(struct ata_port *ap) tf.protocol = ATA_PROT_NODATA; /* do bus reset */ + spin_lock_irqsave(&ap->host_set->lock, flags); ata_tf_to_host(ap, &tf); + spin_unlock_irqrestore(&ap->host_set->lock, flags); /* spec says at least 2ms. but who knows with those * crazy ATAPI devices... @@ -1948,7 +2039,8 @@ err_out: DPRINTK("EXIT\n"); } -static void ata_pr_blacklisted(struct ata_port *ap, struct ata_device *dev) +static void ata_pr_blacklisted(const struct ata_port *ap, + const struct ata_device *dev) { printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n", ap->id, dev->devno); @@ -1986,7 +2078,7 @@ static const char * ata_dma_blacklist [] = { "_NEC DV5800A", }; -static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev) +static int ata_dma_blacklisted(const struct ata_device *dev) { unsigned char model_num[40]; char *s; @@ -2011,9 +2103,9 @@ static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev) return 0; } -static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift) +static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift) { - struct ata_device *master, *slave; + const struct ata_device *master, *slave; unsigned int mask; master = &ap->device[0]; @@ -2025,14 +2117,14 @@ static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift) mask = ap->udma_mask; if (ata_dev_present(master)) { mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff); - if (ata_dma_blacklisted(ap, master)) { + if (ata_dma_blacklisted(master)) { mask = 0; ata_pr_blacklisted(ap, master); } } if (ata_dev_present(slave)) { mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff); - if (ata_dma_blacklisted(ap, slave)) { + if (ata_dma_blacklisted(slave)) { mask = 0; ata_pr_blacklisted(ap, slave); } @@ -2042,14 +2134,14 @@ static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift) mask = ap->mwdma_mask; if (ata_dev_present(master)) { mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07); - if (ata_dma_blacklisted(ap, master)) { + if (ata_dma_blacklisted(master)) { mask = 0; ata_pr_blacklisted(ap, master); } } if (ata_dev_present(slave)) { mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07); - if (ata_dma_blacklisted(ap, slave)) { + if (ata_dma_blacklisted(slave)) { mask = 0; ata_pr_blacklisted(ap, slave); } @@ -2113,7 +2205,7 @@ static int fgb(u32 bitmap) * Zero on success, negative on error. */ -static int ata_choose_xfer_mode(struct ata_port *ap, +static int ata_choose_xfer_mode(const struct ata_port *ap, u8 *xfer_mode_out, unsigned int *xfer_shift_out) { @@ -2181,6 +2273,62 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev) DPRINTK("EXIT\n"); } +/** + * ata_dev_reread_id - Reread the device identify device info + * @ap: port where the device is + * @dev: device to reread the identify device info + * + * LOCKING: + */ + +static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev) +{ + DECLARE_COMPLETION(wait); + struct ata_queued_cmd *qc; + unsigned long flags; + int rc; + + qc = ata_qc_new_init(ap, dev); + BUG_ON(qc == NULL); + + ata_sg_init_one(qc, dev->id, sizeof(dev->id)); + qc->dma_dir = DMA_FROM_DEVICE; + + if (dev->class == ATA_DEV_ATA) { + qc->tf.command = ATA_CMD_ID_ATA; + DPRINTK("do ATA identify\n"); + } else { + qc->tf.command = ATA_CMD_ID_ATAPI; + DPRINTK("do ATAPI identify\n"); + } + + qc->tf.flags |= ATA_TFLAG_DEVICE; + qc->tf.protocol = ATA_PROT_PIO; + qc->nsect = 1; + + qc->waiting = &wait; + qc->complete_fn = ata_qc_complete_noop; + + spin_lock_irqsave(&ap->host_set->lock, flags); + rc = ata_qc_issue(qc); + spin_unlock_irqrestore(&ap->host_set->lock, flags); + + if (rc) + goto err_out; + + wait_for_completion(&wait); + + swap_buf_le16(dev->id, ATA_ID_WORDS); + + ata_dump_id(dev); + + DPRINTK("EXIT\n"); + + return; +err_out: + ata_port_disable(ap); +} + /** * ata_dev_init_params - Issue INIT DEV PARAMS command * @ap: Port associated with device @dev @@ -2242,8 +2390,9 @@ static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev) static void ata_sg_clean(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; - struct scatterlist *sg = qc->sg; + struct scatterlist *sg = qc->__sg; int dir = qc->dma_dir; + void *pad_buf = NULL; assert(qc->flags & ATA_QCFLAG_DMAMAP); assert(sg != NULL); @@ -2253,14 +2402,35 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) DPRINTK("unmapping %u sg elements\n", qc->n_elem); - if (qc->flags & ATA_QCFLAG_SG) + /* if we padded the buffer out to 32-bit bound, and data + * xfer direction is from-device, we must copy from the + * pad buffer back into the supplied buffer + */ + if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE)) + pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); + + if (qc->flags & ATA_QCFLAG_SG) { dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); - else + /* restore last sg */ + sg[qc->orig_n_elem - 1].length += qc->pad_len; + if (pad_buf) { + struct scatterlist *psg = &qc->pad_sgent; + void *addr = kmap_atomic(psg->page, KM_IRQ0); + memcpy(addr + psg->offset, pad_buf, qc->pad_len); + kunmap_atomic(psg->page, KM_IRQ0); + } + } else { dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]), sg_dma_len(&sg[0]), dir); + /* restore sg */ + sg->length += qc->pad_len; + if (pad_buf) + memcpy(qc->buf_virt + sg->length - qc->pad_len, + pad_buf, qc->pad_len); + } qc->flags &= ~ATA_QCFLAG_DMAMAP; - qc->sg = NULL; + qc->__sg = NULL; } /** @@ -2276,15 +2446,15 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) */ static void ata_fill_sg(struct ata_queued_cmd *qc) { - struct scatterlist *sg = qc->sg; struct ata_port *ap = qc->ap; - unsigned int idx, nelem; + struct scatterlist *sg; + unsigned int idx; - assert(sg != NULL); + assert(qc->__sg != NULL); assert(qc->n_elem > 0); idx = 0; - for (nelem = qc->n_elem; nelem; nelem--,sg++) { + ata_for_each_sg(sg, qc) { u32 addr, offset; u32 sg_len, len; @@ -2375,14 +2545,13 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) qc->flags |= ATA_QCFLAG_SINGLE; memset(&qc->sgent, 0, sizeof(qc->sgent)); - qc->sg = &qc->sgent; + qc->__sg = &qc->sgent; qc->n_elem = 1; + qc->orig_n_elem = 1; qc->buf_virt = buf; - sg = qc->sg; - sg->page = virt_to_page(buf); - sg->offset = (unsigned long) buf & ~PAGE_MASK; - sg->length = buflen; + sg = qc->__sg; + sg_init_one(sg, buf, buflen); } /** @@ -2403,8 +2572,9 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, unsigned int n_elem) { qc->flags |= ATA_QCFLAG_SG; - qc->sg = sg; + qc->__sg = sg; qc->n_elem = n_elem; + qc->orig_n_elem = n_elem; } /** @@ -2424,13 +2594,39 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; int dir = qc->dma_dir; - struct scatterlist *sg = qc->sg; + struct scatterlist *sg = qc->__sg; dma_addr_t dma_address; + /* we must lengthen transfers to end on a 32-bit boundary */ + qc->pad_len = sg->length & 3; + if (qc->pad_len) { + void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); + struct scatterlist *psg = &qc->pad_sgent; + + assert(qc->dev->class == ATA_DEV_ATAPI); + + memset(pad_buf, 0, ATA_DMA_PAD_SZ); + + if (qc->tf.flags & ATA_TFLAG_WRITE) + memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len, + qc->pad_len); + + sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); + sg_dma_len(psg) = ATA_DMA_PAD_SZ; + /* trim sg */ + sg->length -= qc->pad_len; + + DPRINTK("padding done, sg->length=%u pad_len=%u\n", + sg->length, qc->pad_len); + } + dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, sg->length, dir); - if (dma_mapping_error(dma_address)) + if (dma_mapping_error(dma_address)) { + /* restore sg */ + sg->length += qc->pad_len; return -1; + } sg_dma_address(sg) = dma_address; sg_dma_len(sg) = sg->length; @@ -2458,16 +2654,54 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) static int ata_sg_setup(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; - struct scatterlist *sg = qc->sg; + struct scatterlist *sg = qc->__sg; + struct scatterlist *lsg = &sg[qc->n_elem - 1]; int n_elem, dir; VPRINTK("ENTER, ata%u\n", ap->id); assert(qc->flags & ATA_QCFLAG_SG); + /* we must lengthen transfers to end on a 32-bit boundary */ + qc->pad_len = lsg->length & 3; + if (qc->pad_len) { + void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); + struct scatterlist *psg = &qc->pad_sgent; + unsigned int offset; + + assert(qc->dev->class == ATA_DEV_ATAPI); + + memset(pad_buf, 0, ATA_DMA_PAD_SZ); + + /* + * psg->page/offset are used to copy to-be-written + * data in this function or read data in ata_sg_clean. + */ + offset = lsg->offset + lsg->length - qc->pad_len; + psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT); + psg->offset = offset_in_page(offset); + + if (qc->tf.flags & ATA_TFLAG_WRITE) { + void *addr = kmap_atomic(psg->page, KM_IRQ0); + memcpy(pad_buf, addr + psg->offset, qc->pad_len); + kunmap_atomic(psg->page, KM_IRQ0); + } + + sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); + sg_dma_len(psg) = ATA_DMA_PAD_SZ; + /* trim last sg */ + lsg->length -= qc->pad_len; + + DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n", + qc->n_elem - 1, lsg->length, qc->pad_len); + } + dir = qc->dma_dir; n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir); - if (n_elem < 1) + if (n_elem < 1) { + /* restore last sg */ + lsg->length += qc->pad_len; return -1; + } DPRINTK("%d sg elements mapped\n", n_elem); @@ -2485,7 +2719,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc) * None. (grabs host lock) */ -void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) +void ata_poll_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask) { struct ata_port *ap = qc->ap; unsigned long flags; @@ -2493,19 +2727,19 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) spin_lock_irqsave(&ap->host_set->lock, flags); ap->flags &= ~ATA_FLAG_NOINTR; ata_irq_on(ap); - ata_qc_complete(qc, drv_stat); + ata_qc_complete(qc, err_mask); spin_unlock_irqrestore(&ap->host_set->lock, flags); } /** * ata_pio_poll - - * @ap: + * @ap: the target ata_port * * LOCKING: * None. (executing in kernel thread context) * * RETURNS: - * + * timeout value to use */ static unsigned long ata_pio_poll(struct ata_port *ap) @@ -2546,8 +2780,8 @@ static unsigned long ata_pio_poll(struct ata_port *ap) } /** - * ata_pio_complete - - * @ap: + * ata_pio_complete - check if drive is busy or idle + * @ap: the target ata_port * * LOCKING: * None. (executing in kernel thread context) @@ -2590,7 +2824,7 @@ static int ata_pio_complete (struct ata_port *ap) ap->hsm_task_state = HSM_ST_IDLE; - ata_poll_qc_complete(qc, drv_stat); + ata_poll_qc_complete(qc, 0); /* another command may start at this point */ @@ -2599,7 +2833,7 @@ static int ata_pio_complete (struct ata_port *ap) /** - * swap_buf_le16 - + * swap_buf_le16 - swap halves of 16-words in place * @buf: Buffer to swap * @buf_words: Number of 16-bit words in buffer. * @@ -2608,6 +2842,7 @@ static int ata_pio_complete (struct ata_port *ap) * vice-versa. * * LOCKING: + * Inherited from caller. */ void swap_buf_le16(u16 *buf, unsigned int buf_words) { @@ -2630,7 +2865,6 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words) * * LOCKING: * Inherited from caller. - * */ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, @@ -2676,7 +2910,6 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, * * LOCKING: * Inherited from caller. - * */ static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf, @@ -2716,7 +2949,6 @@ static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf, * * LOCKING: * Inherited from caller. - * */ static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, @@ -2741,7 +2973,7 @@ static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, static void ata_pio_sector(struct ata_queued_cmd *qc) { int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); - struct scatterlist *sg = qc->sg; + struct scatterlist *sg = qc->__sg; struct ata_port *ap = qc->ap; struct page *page; unsigned int offset; @@ -2791,7 +3023,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) { int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); - struct scatterlist *sg = qc->sg; + struct scatterlist *sg = qc->__sg; struct ata_port *ap = qc->ap; struct page *page; unsigned char *buf; @@ -2824,7 +3056,7 @@ next_sg: return; } - sg = &qc->sg[qc->cursg]; + sg = &qc->__sg[qc->cursg]; page = sg->page; offset = sg->offset + qc->cursg_ofs; @@ -2869,7 +3101,6 @@ next_sg: * * LOCKING: * Inherited from caller. - * */ static void atapi_pio_bytes(struct ata_queued_cmd *qc) @@ -2905,8 +3136,8 @@ err_out: } /** - * ata_pio_sector - - * @ap: + * ata_pio_block - start PIO on a block + * @ap: the target ata_port * * LOCKING: * None. (executing in kernel thread context) @@ -2918,7 +3149,7 @@ static void ata_pio_block(struct ata_port *ap) u8 status; /* - * This is purely hueristic. This is a fast path. + * This is purely heuristic. This is a fast path. * Sometimes when we enter, BSY will be cleared in * a chk-status or two. If not, the drive is probably seeking * or something. Snooze for a couple msecs, then @@ -2961,18 +3192,15 @@ static void ata_pio_block(struct ata_port *ap) static void ata_pio_error(struct ata_port *ap) { struct ata_queued_cmd *qc; - u8 drv_stat; + + printk(KERN_WARNING "ata%u: PIO error\n", ap->id); qc = ata_qc_from_tag(ap, ap->active_tag); assert(qc != NULL); - drv_stat = ata_chk_status(ap); - printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n", - ap->id, drv_stat); - ap->hsm_task_state = HSM_ST_IDLE; - ata_poll_qc_complete(qc, drv_stat | ATA_ERR); + ata_poll_qc_complete(qc, AC_ERR_ATA_BUS); } static void ata_pio_task(void *_data) @@ -3095,7 +3323,7 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) ap->id, qc->tf.command, drv_stat, host_stat); /* complete taskfile transaction */ - ata_qc_complete(qc, drv_stat); + ata_qc_complete(qc, ac_err_mask(drv_stat)); break; } @@ -3185,7 +3413,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, qc = ata_qc_new(ap); if (qc) { - qc->sg = NULL; + qc->__sg = NULL; qc->flags = 0; qc->scsicmd = NULL; qc->ap = ap; @@ -3195,19 +3423,12 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, qc->nbytes = qc->curbytes = 0; ata_tf_init(ap, &qc->tf, dev->devno); - - if (dev->flags & ATA_DFLAG_LBA) { - qc->tf.flags |= ATA_TFLAG_LBA; - - if (dev->flags & ATA_DFLAG_LBA48) - qc->tf.flags |= ATA_TFLAG_LBA48; - } } return qc; } -int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat) +int ata_qc_complete_noop(struct ata_queued_cmd *qc, unsigned int err_mask) { return 0; } @@ -3245,7 +3466,6 @@ static void __ata_qc_complete(struct ata_queued_cmd *qc) * * LOCKING: * spin_lock_irqsave(host_set lock) - * */ void ata_qc_free(struct ata_queued_cmd *qc) { @@ -3265,10 +3485,9 @@ void ata_qc_free(struct ata_queued_cmd *qc) * * LOCKING: * spin_lock_irqsave(host_set lock) - * */ -void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) +void ata_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask) { int rc; @@ -3285,7 +3504,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) qc->flags &= ~ATA_QCFLAG_ACTIVE; /* call completion callback */ - rc = qc->complete_fn(qc, drv_stat); + rc = qc->complete_fn(qc, err_mask); /* if callback indicates not to complete command (non-zero), * return immediately @@ -3392,7 +3611,7 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc) switch (qc->tf.protocol) { case ATA_PROT_NODATA: - ata_tf_to_host_nolock(ap, &qc->tf); + ata_tf_to_host(ap, &qc->tf); break; case ATA_PROT_DMA: @@ -3403,20 +3622,20 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc) case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ ata_qc_set_polling(qc); - ata_tf_to_host_nolock(ap, &qc->tf); + ata_tf_to_host(ap, &qc->tf); ap->hsm_task_state = HSM_ST; queue_work(ata_wq, &ap->pio_task); break; case ATA_PROT_ATAPI: ata_qc_set_polling(qc); - ata_tf_to_host_nolock(ap, &qc->tf); + ata_tf_to_host(ap, &qc->tf); queue_work(ata_wq, &ap->packet_task); break; case ATA_PROT_ATAPI_NODATA: ap->flags |= ATA_FLAG_NOINTR; - ata_tf_to_host_nolock(ap, &qc->tf); + ata_tf_to_host(ap, &qc->tf); queue_work(ata_wq, &ap->packet_task); break; @@ -3723,7 +3942,7 @@ inline unsigned int ata_host_intr (struct ata_port *ap, ap->ops->irq_clear(ap); /* complete taskfile transaction */ - ata_qc_complete(qc, status); + ata_qc_complete(qc, ac_err_mask(status)); break; default: @@ -3759,7 +3978,6 @@ idle_irq: * * RETURNS: * IRQ_NONE or IRQ_HANDLED. - * */ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) @@ -3819,7 +4037,7 @@ static void atapi_packet_task(void *_data) /* sleep-wait for BSY to clear */ DPRINTK("busy wait\n"); if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) - goto err_out; + goto err_out_status; /* make sure DRQ is set */ status = ata_chk_status(ap); @@ -3856,8 +4074,10 @@ static void atapi_packet_task(void *_data) return; +err_out_status: + status = ata_chk_status(ap); err_out: - ata_poll_qc_complete(qc, ATA_ERR); + ata_poll_qc_complete(qc, __ac_err_mask(status)); } @@ -3871,16 +4091,24 @@ err_out: * May be used as the port_start() entry in ata_port_operations. * * LOCKING: + * Inherited from caller. */ int ata_port_start (struct ata_port *ap) { struct device *dev = ap->host_set->dev; + int rc; ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL); if (!ap->prd) return -ENOMEM; + rc = ata_pad_alloc(ap, dev); + if (rc) { + dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); + return rc; + } + DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma); return 0; @@ -3896,6 +4124,7 @@ int ata_port_start (struct ata_port *ap) * May be used as the port_stop() entry in ata_port_operations. * * LOCKING: + * Inherited from caller. */ void ata_port_stop (struct ata_port *ap) @@ -3903,6 +4132,7 @@ void ata_port_stop (struct ata_port *ap) struct device *dev = ap->host_set->dev; dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); + ata_pad_free(ap, dev); } void ata_host_stop (struct ata_host_set *host_set) @@ -3918,6 +4148,7 @@ void ata_host_stop (struct ata_host_set *host_set) * @do_unregister: 1 if we fully unregister, 0 to just stop the port * * LOCKING: + * Inherited from caller. */ static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister) @@ -3945,12 +4176,11 @@ static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister) * * LOCKING: * Inherited from caller. - * */ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, struct ata_host_set *host_set, - struct ata_probe_ent *ent, unsigned int port_no) + const struct ata_probe_ent *ent, unsigned int port_no) { unsigned int i; @@ -3960,8 +4190,6 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, host->unique_id = ata_unique_id++; host->max_cmd_len = 12; - scsi_assign_lock(host, &host_set->lock); - ap->flags = ATA_FLAG_PORT_DISABLED; ap->id = host->unique_id; ap->host = host; @@ -4006,10 +4234,9 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, * * RETURNS: * New ata_port on success, for NULL on error. - * */ -static struct ata_port * ata_host_add(struct ata_probe_ent *ent, +static struct ata_port * ata_host_add(const struct ata_probe_ent *ent, struct ata_host_set *host_set, unsigned int port_no) { @@ -4054,10 +4281,9 @@ err_out: * * RETURNS: * Number of ports registered. Zero on error (no ports registered). - * */ -int ata_device_add(struct ata_probe_ent *ent) +int ata_device_add(const struct ata_probe_ent *ent) { unsigned int count = 0, i; struct device *dev = ent->dev; @@ -4065,11 +4291,10 @@ int ata_device_add(struct ata_probe_ent *ent) DPRINTK("ENTER\n"); /* alloc a container for our list of ATA ports (buses) */ - host_set = kmalloc(sizeof(struct ata_host_set) + + host_set = kzalloc(sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *)), GFP_KERNEL); if (!host_set) return 0; - memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *))); spin_lock_init(&host_set->lock); host_set->dev = dev; @@ -4109,10 +4334,8 @@ int ata_device_add(struct ata_probe_ent *ent) count++; } - if (!count) { - kfree(host_set); - return 0; - } + if (!count) + goto err_free_ret; /* obtain irq, that is shared between channels */ if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags, @@ -4170,6 +4393,7 @@ err_out: ata_host_remove(host_set->ports[i], 1); scsi_host_put(host_set->ports[i]->host); } +err_free_ret: kfree(host_set); VPRINTK("EXIT, returning 0\n"); return 0; @@ -4186,7 +4410,6 @@ err_out: * Inherited from calling layer (may sleep). */ - void ata_host_set_remove(struct ata_host_set *host_set) { struct ata_port *ap; @@ -4276,19 +4499,17 @@ void ata_std_ports(struct ata_ioports *ioaddr) } static struct ata_probe_ent * -ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port) +ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port) { struct ata_probe_ent *probe_ent; - probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); + probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL); if (!probe_ent) { printk(KERN_ERR DRV_NAME "(%s): out of memory\n", kobject_name(&(dev->kobj))); return NULL; } - memset(probe_ent, 0, sizeof(*probe_ent)); - INIT_LIST_HEAD(&probe_ent->node); probe_ent->dev = dev; @@ -4368,15 +4589,14 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int return probe_ent; } -static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info **port, int port_num) +static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info *port, int port_num) { struct ata_probe_ent *probe_ent; - probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); + probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port); if (!probe_ent) return NULL; - probe_ent->legacy_mode = 1; probe_ent->n_ports = 1; probe_ent->hard_port_no = port_num; @@ -4420,7 +4640,6 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, stru * * RETURNS: * Zero on success, negative on errno-based value on error. - * */ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, @@ -4521,9 +4740,9 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, if (legacy_mode) { if (legacy_mode & (1 << 0)) - probe_ent = ata_pci_init_legacy_port(pdev, port, 0); + probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0); if (legacy_mode & (1 << 1)) - probe_ent2 = ata_pci_init_legacy_port(pdev, port, 1); + probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1); } else { if (n_ports == 2) probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); @@ -4568,7 +4787,7 @@ err_out: * @pdev: PCI device that was removed * * PCI layer indicates to libata via this hook that - * hot-unplug or module unload event has occured. + * hot-unplug or module unload event has occurred. * Handle this by unregistering all objects associated * with this PCI device. Free those objects. Then finally * release PCI resources and disable device. @@ -4589,7 +4808,7 @@ void ata_pci_remove_one (struct pci_dev *pdev) } /* move to PCI subsystem */ -int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits) +int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) { unsigned long tmp = 0; @@ -4687,7 +4906,6 @@ EXPORT_SYMBOL_GPL(ata_tf_to_fis); EXPORT_SYMBOL_GPL(ata_tf_from_fis); EXPORT_SYMBOL_GPL(ata_check_status); EXPORT_SYMBOL_GPL(ata_altstatus); -EXPORT_SYMBOL_GPL(ata_chk_err); EXPORT_SYMBOL_GPL(ata_exec_command); EXPORT_SYMBOL_GPL(ata_port_start); EXPORT_SYMBOL_GPL(ata_port_stop); @@ -4716,6 +4934,9 @@ EXPORT_SYMBOL_GPL(ata_dev_id_string); EXPORT_SYMBOL_GPL(ata_dev_config); EXPORT_SYMBOL_GPL(ata_scsi_simulate); +EXPORT_SYMBOL_GPL(ata_timing_compute); +EXPORT_SYMBOL_GPL(ata_timing_merge); + #ifdef CONFIG_PCI EXPORT_SYMBOL_GPL(pci_test_config_bits); EXPORT_SYMBOL_GPL(ata_pci_host_stop);