tf->flags |= tf_flags;
if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
- ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ) {
+ ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ &&
+ likely(tag != ATA_TAG_INTERNAL)) {
/* yay, NCQ */
if (!lba_48_ok(block, n_block))
return -ERANGE;
* the PIO timing number for the maximum. Turn it into
* a mask.
*/
- u8 mode = id[ATA_ID_OLD_PIO_MODES] & 0xFF;
+ u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
if (mode < 5) /* Valid PIO range */
pio_mask = (2 << mode) - 1;
else
* ata_port_queue_task - Queue port_task
* @ap: The ata_port to queue port_task for
* @fn: workqueue function to be scheduled
- * @data: data value to pass to workqueue function
+ * @data: data for @fn to use
* @delay: delay time for workqueue function
*
* Schedule @fn(@data) for execution after @delay jiffies using
* LOCKING:
* Inherited from caller.
*/
-void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
+void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
unsigned long delay)
{
int rc;
if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
return;
- PREPARE_WORK(&ap->port_task, fn, data);
+ PREPARE_DELAYED_WORK(&ap->port_task, fn);
+ ap->port_task_data = data;
- if (!delay)
- rc = queue_work(ata_wq, &ap->port_task);
- else
- rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
+ rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
/* rc == 0 means that another user is using port task */
WARN_ON(rc == 0);
ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
}
-void ata_qc_complete_internal(struct ata_queued_cmd *qc)
+static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
{
struct completion *waiting = qc->private_data;
ata_sg_init(qc, sg, n_elem);
qc->nsect = buflen / ATA_SECT_SIZE;
+ qc->nbytes = buflen;
}
qc->private_data = &wait;
}
/**
- * ata_exec_internal_sg - execute libata internal command
+ * ata_exec_internal - execute libata internal command
* @dev: Device to which the command is sent
* @tf: Taskfile registers for the command and the result
* @cdb: CDB for packet command
struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, void *buf, unsigned int buflen)
{
- struct scatterlist sg;
+ struct scatterlist *psg = NULL, sg;
+ unsigned int n_elem = 0;
- sg_init_one(&sg, buf, buflen);
+ if (dma_dir != DMA_NONE) {
+ WARN_ON(!buf);
+ sg_init_one(&sg, buf, buflen);
+ psg = &sg;
+ n_elem++;
+ }
- return ata_exec_internal_sg(dev, tf, cdb, dma_dir, &sg, 1);
+ return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
}
/**
}
tf.protocol = ATA_PROT_PIO;
-
- /* presence detection using polling IDENTIFY? */
- if (flags & ATA_READID_DETECT)
- tf.flags |= ATA_TFLAG_POLLING;
+ tf.flags |= ATA_TFLAG_POLLING; /* for polling presence detection */
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
id, sizeof(id[0]) * ATA_ID_WORDS);
if (err_mask) {
- if ((flags & ATA_READID_DETECT) &&
- (err_mask & AC_ERR_NODEV_HINT)) {
+ if (err_mask & AC_ERR_NODEV_HINT) {
DPRINTK("ata%u.%d: NODEV after polling detection\n",
ap->id, dev->devno);
return -ENOENT;
* DMA cycle timing is slower/equal than the fastest PIO timing.
*/
- if (speed > XFER_PIO_4) {
+ if (speed > XFER_PIO_6) {
ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
}
int i, rc = 0, used_dma = 0, found = 0;
/* has private set_mode? */
- if (ap->ops->set_mode) {
- /* FIXME: make ->set_mode handle no device case and
- * return error code and failing device on failure.
- */
- for (i = 0; i < ATA_MAX_DEVICES; i++) {
- if (ata_dev_ready(&ap->device[i])) {
- ap->ops->set_mode(ap);
- break;
- }
- }
- return 0;
- }
+ if (ap->ops->set_mode)
+ return ap->ops->set_mode(ap, r_failed_dev);
/* step 1: calculate xfer_mask */
for (i = 0; i < ATA_MAX_DEVICES; i++) {
const u16 *new_id)
{
const u16 *old_id = dev->id;
- unsigned char model[2][41], serial[2][21];
+ unsigned char model[2][ATA_ID_PROD_LEN + 1];
+ unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
u64 new_n_sectors;
if (dev->class != new_class) {
return 0;
}
- ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
- ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
- ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
- ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
+ ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
+ ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
+ ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
+ ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
new_n_sectors = ata_id_n_sectors(new_id);
if (strcmp(model[0], model[1])) {
unsigned long ata_device_blacklisted(const struct ata_device *dev)
{
- unsigned char model_num[40];
- unsigned char model_rev[16];
+ unsigned char model_num[ATA_ID_PROD_LEN];
+ unsigned char model_rev[ATA_ID_FW_REV_LEN];
unsigned int nlen, rlen;
const struct ata_blacklist_entry *ad = ata_device_blacklist;
- ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
- sizeof(model_num));
- ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
- sizeof(model_rev));
+ ata_id_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+ ata_id_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
nlen = ata_strim(model_num, sizeof(model_num));
rlen = ata_strim(model_rev, sizeof(model_rev));
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-
-static void ata_sg_clean(struct ata_queued_cmd *qc)
+void ata_sg_clean(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct scatterlist *sg = qc->__sg;
return poll_next;
}
-static void ata_pio_task(void *_data)
+static void ata_pio_task(struct work_struct *work)
{
- struct ata_queued_cmd *qc = _data;
- struct ata_port *ap = qc->ap;
+ struct ata_port *ap =
+ container_of(work, struct ata_port, port_task.work);
+ struct ata_queued_cmd *qc = ap->port_task_data;
u8 status;
int poll_next;
if (ap->flags & ATA_FLAG_PIO_POLLING) {
switch (qc->tf.protocol) {
case ATA_PROT_PIO:
+ case ATA_PROT_NODATA:
case ATA_PROT_ATAPI:
case ATA_PROT_ATAPI_NODATA:
qc->tf.flags |= ATA_TFLAG_POLLING;
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
#endif
- INIT_WORK(&ap->port_task, NULL, NULL);
- INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
- INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
+ INIT_DELAYED_WORK(&ap->port_task, NULL);
+ INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
+ INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
INIT_LIST_HEAD(&ap->eh_done_q);
init_waitqueue_head(&ap->eh_wait_q);
int rc;
DPRINTK("ENTER\n");
-
+
if (ent->irq == 0) {
dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
return 0;