#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
+#include <linux/hardirq.h>
#include <scsi/scsi.h>
#include <scsi/scsi_dbg.h>
static kmem_cache_t *scsi_io_context_cache;
-static void scsi_end_async(struct request *req)
+static void scsi_end_async(struct request *req, int uptodate)
{
struct scsi_io_context *sioc = req->end_io_data;
* scsi_execute_async - insert request
* @sdev: scsi device
* @cmd: scsi command
+ * @cmd_len: length of scsi cdb
* @data_direction: data direction
* @buffer: data buffer (this can be a kernel buffer or scatterlist)
* @bufflen: len of buffer
* @flags: or into request flags
**/
int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
- int data_direction, void *buffer, unsigned bufflen,
+ int cmd_len, int data_direction, void *buffer, unsigned bufflen,
int use_sg, int timeout, int retries, void *privdata,
void (*done)(void *, char *, int, int), gfp_t gfp)
{
if (err)
goto free_req;
- req->cmd_len = COMMAND_SIZE(cmd[0]);
+ req->cmd_len = cmd_len;
memcpy(req->cmd, cmd, req->cmd_len);
req->sense = sioc->sense;
req->sense_len = 0;
spin_lock_irqsave(q->queue_lock, flags);
if (blk_rq_tagged(req))
blk_queue_end_tag(q, req);
- end_that_request_last(req);
+ end_that_request_last(req, uptodate);
spin_unlock_irqrestore(q->queue_lock, flags);
/*
int sense_valid = 0;
int sense_deferred = 0;
- if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
- return;
-
/*
* Free up any indirection buffers we allocated for DMA purposes.
* For the case of a READ, we need to copy the data out of the
return BLKPREP_KILL;
}
-static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
-{
- struct scsi_device *sdev = q->queuedata;
- struct scsi_driver *drv;
-
- if (sdev->sdev_state == SDEV_RUNNING) {
- drv = *(struct scsi_driver **) rq->rq_disk->private_data;
-
- if (drv->prepare_flush)
- return drv->prepare_flush(q, rq);
- }
-
- return 0;
-}
-
-static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
-{
- struct scsi_device *sdev = q->queuedata;
- struct request *flush_rq = rq->end_io_data;
- struct scsi_driver *drv;
-
- if (flush_rq->errors) {
- printk("scsi: barrier error, disabling flush support\n");
- blk_queue_ordered(q, QUEUE_ORDERED_NONE);
- }
-
- if (sdev->sdev_state == SDEV_RUNNING) {
- drv = *(struct scsi_driver **) rq->rq_disk->private_data;
- drv->end_flush(q, rq);
- }
-}
-
static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
sector_t *error_sector)
{
return -EOPNOTSUPP;
}
-static void scsi_generic_done(struct scsi_cmnd *cmd)
+static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
{
BUG_ON(!blk_pc_request(cmd->request));
/*
scsi_io_completion(cmd, cmd->bufflen, 0);
}
+static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
+{
+ struct request *req = cmd->request;
+
+ BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
+ memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
+ cmd->cmd_len = req->cmd_len;
+ if (!req->data_len)
+ cmd->sc_data_direction = DMA_NONE;
+ else if (rq_data_dir(req) == WRITE)
+ cmd->sc_data_direction = DMA_TO_DEVICE;
+ else
+ cmd->sc_data_direction = DMA_FROM_DEVICE;
+
+ cmd->transfersize = req->data_len;
+ cmd->allowed = req->retries;
+ cmd->timeout_per_command = req->timeout;
+ cmd->done = scsi_blk_pc_done;
+}
+
static int scsi_prep_fn(struct request_queue *q, struct request *req)
{
struct scsi_device *sdev = q->queuedata;
* happening now.
*/
if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
- struct scsi_driver *drv;
int ret;
/*
/*
* Initialize the actual SCSI command for this request.
*/
- if (req->rq_disk) {
+ if (req->flags & REQ_BLOCK_PC) {
+ scsi_setup_blk_pc_cmnd(cmd);
+ } else if (req->rq_disk) {
+ struct scsi_driver *drv;
+
drv = *(struct scsi_driver **)req->rq_disk->private_data;
if (unlikely(!drv->init_command(cmd))) {
scsi_release_buffers(cmd);
scsi_put_command(cmd);
goto kill;
}
- } else {
- memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
- cmd->cmd_len = req->cmd_len;
- if (rq_data_dir(req) == WRITE)
- cmd->sc_data_direction = DMA_TO_DEVICE;
- else if (req->data_len)
- cmd->sc_data_direction = DMA_FROM_DEVICE;
- else
- cmd->sc_data_direction = DMA_NONE;
-
- cmd->transfersize = req->data_len;
- cmd->allowed = req->retries;
- cmd->timeout_per_command = req->timeout;
- cmd->done = scsi_generic_done;
}
}
__scsi_done(cmd);
}
+static void scsi_softirq_done(struct request *rq)
+{
+ struct scsi_cmnd *cmd = rq->completion_data;
+ unsigned long wait_for = cmd->allowed * cmd->timeout_per_command;
+ int disposition;
+
+ INIT_LIST_HEAD(&cmd->eh_entry);
+
+ disposition = scsi_decide_disposition(cmd);
+ if (disposition != SUCCESS &&
+ time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
+ sdev_printk(KERN_ERR, cmd->device,
+ "timing out command, waited %lus\n",
+ wait_for/HZ);
+ disposition = SUCCESS;
+ }
+
+ scsi_log_completion(cmd, disposition);
+
+ switch (disposition) {
+ case SUCCESS:
+ scsi_finish_command(cmd);
+ break;
+ case NEEDS_RETRY:
+ scsi_retry_command(cmd);
+ break;
+ case ADD_TO_MLQUEUE:
+ scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
+ break;
+ default:
+ if (!scsi_eh_scmd_add(cmd, 0))
+ scsi_finish_command(cmd);
+ }
+}
+
/*
* Function: scsi_request_fn()
*
blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
blk_queue_segment_boundary(q, shost->dma_boundary);
blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
-
- /*
- * ordered tags are superior to flush ordering
- */
- if (shost->ordered_tag)
- blk_queue_ordered(q, QUEUE_ORDERED_TAG);
- else if (shost->ordered_flush) {
- blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
- q->prepare_flush_fn = scsi_prepare_flush_fn;
- q->end_flush_fn = scsi_end_flush_fn;
- }
+ blk_queue_softirq_done(q, scsi_softirq_done);
if (!shost->use_clustering)
clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
device_for_each_child(dev, NULL, target_unblock);
}
EXPORT_SYMBOL_GPL(scsi_target_unblock);
+
+
+struct work_queue_work {
+ struct work_struct work;
+ void (*fn)(void *);
+ void *data;
+};
+
+static void execute_in_process_context_work(void *data)
+{
+ void (*fn)(void *data);
+ struct work_queue_work *wqw = data;
+
+ fn = wqw->fn;
+ data = wqw->data;
+
+ kfree(wqw);
+
+ fn(data);
+}
+
+/**
+ * scsi_execute_in_process_context - reliably execute the routine with user context
+ * @fn: the function to execute
+ * @data: data to pass to the function
+ *
+ * Executes the function immediately if process context is available,
+ * otherwise schedules the function for delayed execution.
+ *
+ * Returns: 0 - function was executed
+ * 1 - function was scheduled for execution
+ * <0 - error
+ */
+int scsi_execute_in_process_context(void (*fn)(void *data), void *data)
+{
+ struct work_queue_work *wqw;
+
+ if (!in_interrupt()) {
+ fn(data);
+ return 0;
+ }
+
+ wqw = kmalloc(sizeof(struct work_queue_work), GFP_ATOMIC);
+
+ if (unlikely(!wqw)) {
+ printk(KERN_ERR "Failed to allocate memory\n");
+ WARN_ON(1);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&wqw->work, execute_in_process_context_work, wqw);
+ wqw->fn = fn;
+ wqw->data = data;
+ schedule_work(&wqw->work);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(scsi_execute_in_process_context);