static kmem_cache_t *scsi_io_context_cache;
-static void scsi_end_async(struct request *req)
+static void scsi_end_async(struct request *req, int uptodate)
{
struct scsi_io_context *sioc = req->end_io_data;
spin_lock_irqsave(q->queue_lock, flags);
if (blk_rq_tagged(req))
blk_queue_end_tag(q, req);
- end_that_request_last(req);
+ end_that_request_last(req, uptodate);
spin_unlock_irqrestore(q->queue_lock, flags);
/*
int sense_valid = 0;
int sense_deferred = 0;
- if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
- return;
-
/*
* Free up any indirection buffers we allocated for DMA purposes.
* For the case of a READ, we need to copy the data out of the
return BLKPREP_KILL;
}
-static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
-{
- struct scsi_device *sdev = q->queuedata;
- struct scsi_driver *drv;
-
- if (sdev->sdev_state == SDEV_RUNNING) {
- drv = *(struct scsi_driver **) rq->rq_disk->private_data;
-
- if (drv->prepare_flush)
- return drv->prepare_flush(q, rq);
- }
-
- return 0;
-}
-
-static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
-{
- struct scsi_device *sdev = q->queuedata;
- struct request *flush_rq = rq->end_io_data;
- struct scsi_driver *drv;
-
- if (flush_rq->errors) {
- printk("scsi: barrier error, disabling flush support\n");
- blk_queue_ordered(q, QUEUE_ORDERED_NONE);
- }
-
- if (sdev->sdev_state == SDEV_RUNNING) {
- drv = *(struct scsi_driver **) rq->rq_disk->private_data;
- drv->end_flush(q, rq);
- }
-}
-
static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
sector_t *error_sector)
{
scsi_io_completion(cmd, cmd->bufflen, 0);
}
+void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
+{
+ struct request *req = cmd->request;
+
+ BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
+ memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
+ cmd->cmd_len = req->cmd_len;
+ if (!req->data_len)
+ cmd->sc_data_direction = DMA_NONE;
+ else if (rq_data_dir(req) == WRITE)
+ cmd->sc_data_direction = DMA_TO_DEVICE;
+ else
+ cmd->sc_data_direction = DMA_FROM_DEVICE;
+
+ cmd->transfersize = req->data_len;
+ cmd->allowed = req->retries;
+ cmd->timeout_per_command = req->timeout;
+}
+EXPORT_SYMBOL_GPL(scsi_setup_blk_pc_cmnd);
+
static int scsi_prep_fn(struct request_queue *q, struct request *req)
{
struct scsi_device *sdev = q->queuedata;
goto kill;
}
} else {
- memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
- cmd->cmd_len = req->cmd_len;
- if (rq_data_dir(req) == WRITE)
- cmd->sc_data_direction = DMA_TO_DEVICE;
- else if (req->data_len)
- cmd->sc_data_direction = DMA_FROM_DEVICE;
- else
- cmd->sc_data_direction = DMA_NONE;
-
- cmd->transfersize = req->data_len;
- cmd->allowed = req->retries;
- cmd->timeout_per_command = req->timeout;
+ scsi_setup_blk_pc_cmnd(cmd);
cmd->done = scsi_generic_done;
}
}
blk_queue_segment_boundary(q, shost->dma_boundary);
blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
- /*
- * ordered tags are superior to flush ordering
- */
- if (shost->ordered_tag)
- blk_queue_ordered(q, QUEUE_ORDERED_TAG);
- else if (shost->ordered_flush) {
- blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
- q->prepare_flush_fn = scsi_prepare_flush_fn;
- q->end_flush_fn = scsi_end_flush_fn;
- }
-
if (!shost->use_clustering)
clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
return q;