2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
5 * generic mid-level SCSI driver
6 * Initial versions: Drew Eckhardt
7 * Subsequent revisions: Eric Youngdale
11 * Bug correction thanks go to :
12 * Rik Faith <faith@cs.unc.edu>
13 * Tommy Thorn <tthorn>
14 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
16 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
17 * add scatter-gather, multiple outstanding request, and other
20 * Native multichannel, wide scsi, /proc/scsi and hot plugging
21 * support added by Michael Neuffer <mike@i-connect.net>
23 * Added request_module("scsi_hostadapter") for kerneld:
24 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modules.conf)
25 * Bjorn Ekwall <bj0rn@blox.se>
28 * Major improvements to the timeout, abort, and reset processing,
29 * as well as performance modifications for large queue depths by
30 * Leonard N. Zubkoff <lnz@dandelion.com>
32 * Converted cli() code to spinlocks, Ingo Molnar
34 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
36 * out_of_space hacks, D. Gilbert (dpg) 990608
39 #define REVISION "Revision: 1.00"
40 #define VERSION "Id: scsi.c 1.00 2000/09/26"
42 #include <linux/config.h>
43 #include <linux/module.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/string.h>
48 #include <linux/slab.h>
49 #include <linux/ioport.h>
50 #include <linux/kernel.h>
51 #include <linux/stat.h>
52 #include <linux/blk.h>
53 #include <linux/interrupt.h>
54 #include <linux/delay.h>
55 #include <linux/init.h>
56 #include <linux/smp_lock.h>
57 #include <linux/completion.h>
59 #define __KERNEL_SYSCALLS__
61 #include <linux/unistd.h>
62 #include <linux/spinlock.h>
64 #include <asm/system.h>
67 #include <asm/uaccess.h>
71 #include "constants.h"
74 #include <linux/kmod.h>
77 #undef USE_STATIC_SCSI_MEMORY
79 struct proc_dir_entry *proc_scsi;
82 static int scsi_proc_info(char *buffer, char **start, off_t offset, int length);
83 static void scsi_dump_status(int level);
87 static const char RCSid[] = "$Header: /vger/u4/cvs/linux/drivers/scsi/scsi.c,v 1.38 1997/01/19 23:07:18 davem Exp $";
91 * Definitions and constants.
94 #define MIN_RESET_DELAY (2*HZ)
96 /* Do not call reset on error if we just did a reset within 15 sec. */
97 #define MIN_RESET_PERIOD (15*HZ)
100 * Macro to determine the size of SCSI command. This macro takes vendor
101 * unique commands into account. SCSI commands in groups 6 and 7 are
102 * vendor unique and we will depend upon the command length being
103 * supplied correctly in cmd_len.
105 #define CDB_SIZE(SCpnt) ((((SCpnt->cmnd[0] >> 5) & 7) < 6) ? \
106 COMMAND_SIZE(SCpnt->cmnd[0]) : SCpnt->cmd_len)
111 unsigned long scsi_pid;
112 Scsi_Cmnd *last_cmnd;
113 /* Command group 3 is reserved and should never be used. */
114 const unsigned char scsi_command_size[8] =
119 static unsigned long serial_number;
120 static Scsi_Cmnd *scsi_bh_queue_head;
121 static Scsi_Cmnd *scsi_bh_queue_tail;
124 * Note - the initial logging level can be set here to log events at boot time.
125 * After the system is up, you may enable logging via the /proc interface.
127 unsigned int scsi_logging_level;
129 const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] =
148 * Function prototypes.
150 extern void scsi_times_out(Scsi_Cmnd * SCpnt);
151 void scsi_build_commandblocks(Scsi_Device * SDpnt);
154 * These are the interface to the old error handling code. It should go away
157 extern void scsi_old_done(Scsi_Cmnd * SCpnt);
158 extern void scsi_old_times_out(Scsi_Cmnd * SCpnt);
159 extern int scsi_old_reset(Scsi_Cmnd *SCpnt, unsigned int flag);
162 * Private interface into the new error handling code.
164 extern int scsi_new_reset(Scsi_Cmnd *SCpnt, unsigned int flag);
167 * Function: scsi_initialize_queue()
169 * Purpose: Selects queue handler function for a device.
171 * Arguments: SDpnt - device for which we need a handler function.
175 * Lock status: No locking assumed or required.
177 * Notes: Most devices will end up using scsi_request_fn for the
178 * handler function (at least as things are done now).
179 * The "block" feature basically ensures that only one of
180 * the blocked hosts is active at one time, mainly to work around
181 * buggy DMA chipsets where the memory gets starved.
182 * For this case, we have a special handler function, which
183 * does some checks and ultimately calls scsi_request_fn.
185 * The single_lun feature is a similar special case.
187 * We handle these things by stacking the handlers. The
188 * special case handlers simply check a few conditions,
189 * and return if they are not supposed to do anything.
190 * In the event that things are OK, then they call the next
191 * handler in the list - ultimately they call scsi_request_fn
192 * to do the dirty deed.
194 void scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt)
196 request_queue_t *q = &SDpnt->request_queue;
198 blk_init_queue(q, scsi_request_fn);
199 blk_queue_headactive(q, 0);
200 blk_queue_throttle_sectors(q, 1);
201 q->queuedata = (void *) SDpnt;
205 MODULE_PARM(scsi_logging_level, "i");
206 MODULE_PARM_DESC(scsi_logging_level, "SCSI logging level; should be zero or nonzero");
210 static int __init scsi_logging_setup(char *str)
214 if (get_option(&str, &tmp) == 1) {
215 scsi_logging_level = (tmp ? ~0 : 0);
218 printk(KERN_INFO "scsi_logging_setup : usage scsi_logging_level=n "
219 "(n should be 0 or non-zero)\n");
224 __setup("scsi_logging=", scsi_logging_setup);
229 * Issue a command and wait for it to complete
232 static void scsi_wait_done(Scsi_Cmnd * SCpnt)
236 req = &SCpnt->request;
237 req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
239 if (req->waiting != NULL) {
240 complete(req->waiting);
245 * This lock protects the freelist for all devices on the system.
246 * We could make this finer grained by having a single lock per
247 * device if it is ever found that there is excessive contention
250 static spinlock_t device_request_lock = SPIN_LOCK_UNLOCKED;
253 * Used to protect insertion into and removal from the queue of
254 * commands to be processed by the bottom half handler.
256 static spinlock_t scsi_bhqueue_lock = SPIN_LOCK_UNLOCKED;
259 * Function: scsi_allocate_request
261 * Purpose: Allocate a request descriptor.
263 * Arguments: device - device for which we want a request
265 * Lock status: No locks assumed to be held. This function is SMP-safe.
267 * Returns: Pointer to request block.
269 * Notes: With the new queueing code, it becomes important
270 * to track the difference between a command and a
271 * request. A request is a pending item in the queue that
272 * has not yet reached the top of the queue.
275 Scsi_Request *scsi_allocate_request(Scsi_Device * device)
277 Scsi_Request *SRpnt = NULL;
280 panic("No device passed to scsi_allocate_request().\n");
282 SRpnt = (Scsi_Request *) kmalloc(sizeof(Scsi_Request), GFP_ATOMIC);
288 memset(SRpnt, 0, sizeof(Scsi_Request));
289 SRpnt->sr_device = device;
290 SRpnt->sr_host = device->host;
291 SRpnt->sr_magic = SCSI_REQ_MAGIC;
292 SRpnt->sr_data_direction = SCSI_DATA_UNKNOWN;
298 * Function: scsi_release_request
300 * Purpose: Release a request descriptor.
302 * Arguments: device - device for which we want a request
304 * Lock status: No locks assumed to be held. This function is SMP-safe.
306 * Returns: Pointer to request block.
308 * Notes: With the new queueing code, it becomes important
309 * to track the difference between a command and a
310 * request. A request is a pending item in the queue that
311 * has not yet reached the top of the queue. We still need
312 * to free a request when we are done with it, of course.
314 void scsi_release_request(Scsi_Request * req)
316 if( req->sr_command != NULL )
318 scsi_release_command(req->sr_command);
319 req->sr_command = NULL;
326 * Function: scsi_allocate_device
328 * Purpose: Allocate a command descriptor.
330 * Arguments: device - device for which we want a command descriptor
331 * wait - 1 if we should wait in the event that none
333 * interruptible - 1 if we should unblock and return NULL
334 * in the event that we must wait, and a signal
337 * Lock status: No locks assumed to be held. This function is SMP-safe.
339 * Returns: Pointer to command descriptor.
341 * Notes: Prior to the new queue code, this function was not SMP-safe.
343 * If the wait flag is true, and we are waiting for a free
344 * command block, this function will interrupt and return
345 * NULL in the event that a signal arrives that needs to
348 * This function is deprecated, and drivers should be
349 * rewritten to use Scsi_Request instead of Scsi_Cmnd.
352 Scsi_Cmnd *scsi_allocate_device(Scsi_Device * device, int wait,
355 struct Scsi_Host *host;
356 Scsi_Cmnd *SCpnt = NULL;
361 panic("No device passed to scsi_allocate_device().\n");
365 spin_lock_irqsave(&device_request_lock, flags);
369 if (!device->device_blocked) {
370 if (device->single_lun) {
372 * FIXME(eric) - this is not at all optimal. Given that
373 * single lun devices are rare and usually slow
374 * (i.e. CD changers), this is good enough for now, but
375 * we may want to come back and optimize this later.
377 * Scan through all of the devices attached to this
378 * host, and see if any are active or not. If so,
379 * we need to defer this command.
381 * We really need a busy counter per device. This would
382 * allow us to more easily figure out whether we should
383 * do anything here or not.
385 for (SDpnt = host->host_queue;
387 SDpnt = SDpnt->next) {
389 * Only look for other devices on the same bus
390 * with the same target ID.
392 if (SDpnt->channel != device->channel
393 || SDpnt->id != device->id
394 || SDpnt == device) {
397 if( atomic_read(&SDpnt->device_active) != 0)
404 * Some other device in this cluster is busy.
405 * If asked to wait, we need to wait, otherwise
413 * Now we can check for a free command block for this device.
415 for (SCpnt = device->device_queue; SCpnt; SCpnt = SCpnt->next) {
416 if (SCpnt->request.rq_status == RQ_INACTIVE)
421 * If we couldn't find a free command block, and we have been
422 * asked to wait, then do so.
429 * If we have been asked to wait for a free block, then
433 DECLARE_WAITQUEUE(wait, current);
436 * We need to wait for a free commandblock. We need to
437 * insert ourselves into the list before we release the
438 * lock. This way if a block were released the same
439 * microsecond that we released the lock, the call
440 * to schedule() wouldn't block (well, it might switch,
441 * but the current task will still be schedulable.
443 add_wait_queue(&device->scpnt_wait, &wait);
444 if( interruptable ) {
445 set_current_state(TASK_INTERRUPTIBLE);
447 set_current_state(TASK_UNINTERRUPTIBLE);
450 spin_unlock_irqrestore(&device_request_lock, flags);
453 * This should block until a device command block
458 spin_lock_irqsave(&device_request_lock, flags);
460 remove_wait_queue(&device->scpnt_wait, &wait);
462 * FIXME - Isn't this redundant?? Someone
463 * else will have forced the state back to running.
465 set_current_state(TASK_RUNNING);
467 * In the event that a signal has arrived that we need
468 * to consider, then simply return NULL. Everyone
469 * that calls us should be prepared for this
470 * possibility, and pass the appropriate code back
473 if( interruptable ) {
474 if (signal_pending(current)) {
475 spin_unlock_irqrestore(&device_request_lock, flags);
480 spin_unlock_irqrestore(&device_request_lock, flags);
485 SCpnt->request.rq_status = RQ_SCSI_BUSY;
486 SCpnt->request.waiting = NULL; /* And no one is waiting for this
488 atomic_inc(&SCpnt->host->host_active);
489 atomic_inc(&SCpnt->device->device_active);
491 SCpnt->buffer = NULL;
493 SCpnt->request_buffer = NULL;
494 SCpnt->request_bufflen = 0;
496 SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
497 SCpnt->old_use_sg = 0;
498 SCpnt->transfersize = 0; /* No default transfer size */
501 SCpnt->sc_data_direction = SCSI_DATA_UNKNOWN;
502 SCpnt->sc_request = NULL;
503 SCpnt->sc_magic = SCSI_CMND_MAGIC;
506 SCpnt->underflow = 0; /* Do not flag underflow conditions */
507 SCpnt->old_underflow = 0;
509 SCpnt->state = SCSI_STATE_INITIALIZING;
510 SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
512 spin_unlock_irqrestore(&device_request_lock, flags);
514 SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n",
516 atomic_read(&SCpnt->host->host_active)));
521 inline void __scsi_release_command(Scsi_Cmnd * SCpnt)
526 spin_lock_irqsave(&device_request_lock, flags);
528 SDpnt = SCpnt->device;
530 SCpnt->request.rq_status = RQ_INACTIVE;
531 SCpnt->state = SCSI_STATE_UNUSED;
532 SCpnt->owner = SCSI_OWNER_NOBODY;
533 atomic_dec(&SCpnt->host->host_active);
534 atomic_dec(&SDpnt->device_active);
536 SCSI_LOG_MLQUEUE(5, printk("Deactivating command for device %d (active=%d, failed=%d)\n",
538 atomic_read(&SCpnt->host->host_active),
539 SCpnt->host->host_failed));
540 if (SCpnt->host->host_failed != 0) {
541 SCSI_LOG_ERROR_RECOVERY(5, printk("Error handler thread %d %d\n",
542 SCpnt->host->in_recovery,
543 SCpnt->host->eh_active));
546 * If the host is having troubles, then look to see if this was the last
547 * command that might have failed. If so, wake up the error handler.
549 if (SCpnt->host->in_recovery
550 && !SCpnt->host->eh_active
551 && SCpnt->host->host_busy == SCpnt->host->host_failed) {
552 SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
553 atomic_read(&SCpnt->host->eh_wait->count)));
554 up(SCpnt->host->eh_wait);
557 spin_unlock_irqrestore(&device_request_lock, flags);
560 * Wake up anyone waiting for this device. Do this after we
561 * have released the lock, as they will need it as soon as
564 wake_up(&SDpnt->scpnt_wait);
568 * Function: scsi_release_command
570 * Purpose: Release a command block.
572 * Arguments: SCpnt - command block we are releasing.
574 * Notes: The command block can no longer be used by the caller once
575 * this funciton is called. This is in effect the inverse
576 * of scsi_allocate_device. Note that we also must perform
577 * a couple of additional tasks. We must first wake up any
578 * processes that might have blocked waiting for a command
579 * block, and secondly we must hit the queue handler function
580 * to make sure that the device is busy. Note - there is an
581 * option to not do this - there were instances where we could
582 * recurse too deeply and blow the stack if this happened
583 * when we were indirectly called from the request function
586 * The idea is that a lot of the mid-level internals gunk
587 * gets hidden in this function. Upper level drivers don't
588 * have any chickens to wave in the air to get things to
591 * This function is deprecated, and drivers should be
592 * rewritten to use Scsi_Request instead of Scsi_Cmnd.
594 void scsi_release_command(Scsi_Cmnd * SCpnt)
599 SDpnt = SCpnt->device;
601 __scsi_release_command(SCpnt);
604 * Finally, hit the queue request function to make sure that
605 * the device is actually busy if there are requests present.
606 * This won't block - if the device cannot take any more, life
609 q = &SDpnt->request_queue;
610 scsi_queue_next_request(q, NULL);
614 * Function: scsi_dispatch_command
616 * Purpose: Dispatch a command to the low-level driver.
618 * Arguments: SCpnt - command block we are dispatching.
622 int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
627 struct Scsi_Host *host;
629 unsigned long flags = 0;
630 unsigned long timeout;
632 ASSERT_LOCK(&io_request_lock, 0);
635 unsigned long *ret = 0;
637 __asm__ __volatile__("move\t%0,$31":"=r"(ret));
639 ret = __builtin_return_address(0);
645 /* Assign a unique nonzero serial_number. */
646 if (++serial_number == 0)
648 SCpnt->serial_number = serial_number;
649 SCpnt->pid = scsi_pid++;
652 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
653 * we can avoid the drive not being ready.
655 timeout = host->last_reset + MIN_RESET_DELAY;
657 if (host->resetting && time_before(jiffies, timeout)) {
658 int ticks_remaining = timeout - jiffies;
660 * NOTE: This may be executed from within an interrupt
661 * handler! This is bad, but for now, it'll do. The irq
662 * level of the interrupt handler has been masked out by the
663 * platform dependent interrupt handling code already, so the
664 * sti() here will not cause another call to the SCSI host's
665 * interrupt handler (assuming there is one irq-level per
668 while (--ticks_remaining >= 0)
669 mdelay(1 + 999 / HZ);
672 if (host->hostt->use_new_eh_code) {
673 scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_times_out);
675 scsi_add_timer(SCpnt, SCpnt->timeout_per_command,
680 * We will use a queued command if possible, otherwise we will emulate the
681 * queuing and calling of completion function ourselves.
683 SCSI_LOG_MLQUEUE(3, printk("scsi_dispatch_cmnd (host = %d, channel = %d, target = %d, "
684 "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n",
685 SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd,
686 SCpnt->buffer, SCpnt->bufflen, SCpnt->done));
688 SCpnt->state = SCSI_STATE_QUEUED;
689 SCpnt->owner = SCSI_OWNER_LOWLEVEL;
690 if (host->can_queue) {
691 SCSI_LOG_MLQUEUE(3, printk("queuecommand : routine at %p\n",
692 host->hostt->queuecommand));
694 * Use the old error handling code if we haven't converted the driver
695 * to use the new one yet. Note - only the new queuecommand variant
696 * passes a meaningful return value.
698 if (host->hostt->use_new_eh_code) {
700 * Before we queue this command, check if the command
701 * length exceeds what the host adapter can handle.
703 if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {
704 spin_lock_irqsave(&io_request_lock, flags);
705 rtn = host->hostt->queuecommand(SCpnt, scsi_done);
706 spin_unlock_irqrestore(&io_request_lock, flags);
708 scsi_delete_timer(SCpnt);
709 scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY);
710 SCSI_LOG_MLQUEUE(3, printk("queuecommand : request rejected\n"));
713 SCSI_LOG_MLQUEUE(3, printk("queuecommand : command too long.\n"));
714 SCpnt->result = (DID_ABORT << 16);
715 spin_lock_irqsave(&io_request_lock, flags);
717 spin_unlock_irqrestore(&io_request_lock, flags);
722 * Before we queue this command, check if the command
723 * length exceeds what the host adapter can handle.
725 if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {
726 spin_lock_irqsave(&io_request_lock, flags);
727 host->hostt->queuecommand(SCpnt, scsi_old_done);
728 spin_unlock_irqrestore(&io_request_lock, flags);
730 SCSI_LOG_MLQUEUE(3, printk("queuecommand : command too long.\n"));
731 SCpnt->result = (DID_ABORT << 16);
732 spin_lock_irqsave(&io_request_lock, flags);
733 scsi_old_done(SCpnt);
734 spin_unlock_irqrestore(&io_request_lock, flags);
741 SCSI_LOG_MLQUEUE(3, printk("command() : routine at %p\n", host->hostt->command));
742 spin_lock_irqsave(&io_request_lock, flags);
743 temp = host->hostt->command(SCpnt);
744 SCpnt->result = temp;
746 spin_unlock_irqrestore(&io_request_lock, flags);
747 clock = jiffies + 4 * HZ;
748 while (time_before(jiffies, clock)) {
752 printk("done(host = %d, result = %04x) : routine at %p\n",
753 host->host_no, temp, host->hostt->command);
754 spin_lock_irqsave(&io_request_lock, flags);
756 if (host->hostt->use_new_eh_code) {
759 scsi_old_done(SCpnt);
761 spin_unlock_irqrestore(&io_request_lock, flags);
763 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
767 devfs_handle_t scsi_devfs_handle;
770 * scsi_do_cmd sends all the commands out to the low-level driver. It
771 * handles the specifics required for each low level driver - ie queued
772 * or non queued. It also prevents conflicts when different high level
773 * drivers go for the same host at the same time.
776 void scsi_wait_req (Scsi_Request * SRpnt, const void *cmnd ,
777 void *buffer, unsigned bufflen,
778 int timeout, int retries)
780 DECLARE_COMPLETION(wait);
781 request_queue_t *q = &SRpnt->sr_device->request_queue;
783 SRpnt->sr_request.waiting = &wait;
784 SRpnt->sr_request.rq_status = RQ_SCSI_BUSY;
785 scsi_do_req (SRpnt, (void *) cmnd,
786 buffer, bufflen, scsi_wait_done, timeout, retries);
787 generic_unplug_device(q);
788 wait_for_completion(&wait);
789 SRpnt->sr_request.waiting = NULL;
790 if( SRpnt->sr_command != NULL )
792 scsi_release_command(SRpnt->sr_command);
793 SRpnt->sr_command = NULL;
799 * Function: scsi_do_req
801 * Purpose: Queue a SCSI request
803 * Arguments: SRpnt - command descriptor.
804 * cmnd - actual SCSI command to be performed.
805 * buffer - data buffer.
806 * bufflen - size of data buffer.
807 * done - completion function to be run.
808 * timeout - how long to let it run before timeout.
809 * retries - number of retries we allow.
811 * Lock status: With the new queueing code, this is SMP-safe, and no locks
812 * need be held upon entry. The old queueing code the lock was
813 * assumed to be held upon entry.
817 * Notes: Prior to the new queue code, this function was not SMP-safe.
818 * Also, this function is now only used for queueing requests
819 * for things like ioctls and character device requests - this
820 * is because we essentially just inject a request into the
821 * queue for the device. Normal block device handling manipulates
822 * the queue directly.
824 void scsi_do_req(Scsi_Request * SRpnt, const void *cmnd,
825 void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
826 int timeout, int retries)
828 Scsi_Device * SDpnt = SRpnt->sr_device;
829 struct Scsi_Host *host = SDpnt->host;
831 ASSERT_LOCK(&io_request_lock, 0);
836 int target = SDpnt->id;
837 int size = COMMAND_SIZE(((const unsigned char *)cmnd)[0]);
838 printk("scsi_do_req (host = %d, channel = %d target = %d, "
839 "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
841 "command : ", host->host_no, SDpnt->channel, target, buffer,
842 bufflen, done, timeout, retries);
843 for (i = 0; i < size; ++i)
844 printk("%02x ", ((unsigned char *) cmnd)[i]);
849 panic("Invalid or not present host.\n");
853 * If the upper level driver is reusing these things, then
854 * we should release the low-level block now. Another one will
855 * be allocated later when this request is getting queued.
857 if( SRpnt->sr_command != NULL )
859 scsi_release_command(SRpnt->sr_command);
860 SRpnt->sr_command = NULL;
864 * We must prevent reentrancy to the lowlevel host driver. This prevents
865 * it - we enter a loop until the host we want to talk to is not busy.
866 * Race conditions are prevented, as interrupts are disabled in between the
867 * time we check for the host being not busy, and the time we mark it busy
873 * Our own function scsi_done (which marks the host as not busy, disables
874 * the timeout counter, etc) will be called by us or by the
875 * scsi_hosts[host].queuecommand() function needs to also call
876 * the completion function for the high level driver.
879 memcpy((void *) SRpnt->sr_cmnd, (const void *) cmnd,
880 sizeof(SRpnt->sr_cmnd));
881 SRpnt->sr_bufflen = bufflen;
882 SRpnt->sr_buffer = buffer;
883 SRpnt->sr_allowed = retries;
884 SRpnt->sr_done = done;
885 SRpnt->sr_timeout_per_command = timeout;
887 if (SRpnt->sr_cmd_len == 0)
888 SRpnt->sr_cmd_len = COMMAND_SIZE(SRpnt->sr_cmnd[0]);
891 * At this point, we merely set up the command, stick it in the normal
892 * request queue, and return. Eventually that request will come to the
893 * top of the list, and will be dispatched.
895 scsi_insert_special_req(SRpnt, 0);
897 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_req()\n"));
901 * Function: scsi_init_cmd_from_req
903 * Purpose: Queue a SCSI command
904 * Purpose: Initialize a Scsi_Cmnd from a Scsi_Request
906 * Arguments: SCpnt - command descriptor.
907 * SRpnt - Request from the queue.
909 * Lock status: None needed.
913 * Notes: Mainly transfer data from the request structure to the
914 * command structure. The request structure is allocated
915 * using the normal memory allocator, and requests can pile
916 * up to more or less any depth. The command structure represents
917 * a consumable resource, as these are allocated into a pool
918 * when the SCSI subsystem initializes. The preallocation is
919 * required so that in low-memory situations a disk I/O request
920 * won't cause the memory manager to try and write out a page.
921 * The request structure is generally used by ioctls and character
924 void scsi_init_cmd_from_req(Scsi_Cmnd * SCpnt, Scsi_Request * SRpnt)
926 struct Scsi_Host *host = SCpnt->host;
928 ASSERT_LOCK(&io_request_lock, 0);
930 SCpnt->owner = SCSI_OWNER_MIDLEVEL;
931 SRpnt->sr_command = SCpnt;
934 panic("Invalid or not present host.\n");
937 SCpnt->cmd_len = SRpnt->sr_cmd_len;
938 SCpnt->use_sg = SRpnt->sr_use_sg;
940 memcpy((void *) &SCpnt->request, (const void *) &SRpnt->sr_request,
941 sizeof(SRpnt->sr_request));
942 memcpy((void *) SCpnt->data_cmnd, (const void *) SRpnt->sr_cmnd,
943 sizeof(SCpnt->data_cmnd));
944 SCpnt->reset_chain = NULL;
945 SCpnt->serial_number = 0;
946 SCpnt->serial_number_at_timeout = 0;
947 SCpnt->bufflen = SRpnt->sr_bufflen;
948 SCpnt->buffer = SRpnt->sr_buffer;
951 SCpnt->allowed = SRpnt->sr_allowed;
952 SCpnt->done = SRpnt->sr_done;
953 SCpnt->timeout_per_command = SRpnt->sr_timeout_per_command;
955 SCpnt->sc_data_direction = SRpnt->sr_data_direction;
957 SCpnt->sglist_len = SRpnt->sr_sglist_len;
958 SCpnt->underflow = SRpnt->sr_underflow;
960 SCpnt->sc_request = SRpnt;
962 memcpy((void *) SCpnt->cmnd, (const void *) SRpnt->sr_cmnd,
963 sizeof(SCpnt->cmnd));
964 /* Zero the sense buffer. Some host adapters automatically request
965 * sense on error. 0 is not a valid sense code.
967 memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
968 SCpnt->request_buffer = SRpnt->sr_buffer;
969 SCpnt->request_bufflen = SRpnt->sr_bufflen;
970 SCpnt->old_use_sg = SCpnt->use_sg;
971 if (SCpnt->cmd_len == 0)
972 SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
973 SCpnt->old_cmd_len = SCpnt->cmd_len;
974 SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
975 SCpnt->old_underflow = SCpnt->underflow;
977 /* Start the timer ticking. */
979 SCpnt->internal_timeout = NORMAL_TIMEOUT;
980 SCpnt->abort_reason = 0;
983 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_init_cmd_from_req()\n"));
987 * Function: scsi_do_cmd
989 * Purpose: Queue a SCSI command
991 * Arguments: SCpnt - command descriptor.
992 * cmnd - actual SCSI command to be performed.
993 * buffer - data buffer.
994 * bufflen - size of data buffer.
995 * done - completion function to be run.
996 * timeout - how long to let it run before timeout.
997 * retries - number of retries we allow.
999 * Lock status: With the new queueing code, this is SMP-safe, and no locks
1000 * need be held upon entry. The old queueing code the lock was
1001 * assumed to be held upon entry.
1005 * Notes: Prior to the new queue code, this function was not SMP-safe.
1006 * Also, this function is now only used for queueing requests
1007 * for things like ioctls and character device requests - this
1008 * is because we essentially just inject a request into the
1009 * queue for the device. Normal block device handling manipulates
1010 * the queue directly.
1012 void scsi_do_cmd(Scsi_Cmnd * SCpnt, const void *cmnd,
1013 void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
1014 int timeout, int retries)
1016 struct Scsi_Host *host = SCpnt->host;
1018 ASSERT_LOCK(&io_request_lock, 0);
1020 SCpnt->pid = scsi_pid++;
1021 SCpnt->owner = SCSI_OWNER_MIDLEVEL;
1026 int target = SCpnt->target;
1027 int size = COMMAND_SIZE(((const unsigned char *)cmnd)[0]);
1028 printk("scsi_do_cmd (host = %d, channel = %d target = %d, "
1029 "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
1031 "command : ", host->host_no, SCpnt->channel, target, buffer,
1032 bufflen, done, timeout, retries);
1033 for (i = 0; i < size; ++i)
1034 printk("%02x ", ((unsigned char *) cmnd)[i]);
1039 panic("Invalid or not present host.\n");
1042 * We must prevent reentrancy to the lowlevel host driver. This prevents
1043 * it - we enter a loop until the host we want to talk to is not busy.
1044 * Race conditions are prevented, as interrupts are disabled in between the
1045 * time we check for the host being not busy, and the time we mark it busy
1051 * Our own function scsi_done (which marks the host as not busy, disables
1052 * the timeout counter, etc) will be called by us or by the
1053 * scsi_hosts[host].queuecommand() function needs to also call
1054 * the completion function for the high level driver.
1057 memcpy((void *) SCpnt->data_cmnd, (const void *) cmnd,
1058 sizeof(SCpnt->data_cmnd));
1059 SCpnt->reset_chain = NULL;
1060 SCpnt->serial_number = 0;
1061 SCpnt->serial_number_at_timeout = 0;
1062 SCpnt->bufflen = bufflen;
1063 SCpnt->buffer = buffer;
1066 SCpnt->allowed = retries;
1068 SCpnt->timeout_per_command = timeout;
1070 memcpy((void *) SCpnt->cmnd, (const void *) cmnd,
1071 sizeof(SCpnt->cmnd));
1072 /* Zero the sense buffer. Some host adapters automatically request
1073 * sense on error. 0 is not a valid sense code.
1075 memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
1076 SCpnt->request_buffer = buffer;
1077 SCpnt->request_bufflen = bufflen;
1078 SCpnt->old_use_sg = SCpnt->use_sg;
1079 if (SCpnt->cmd_len == 0)
1080 SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
1081 SCpnt->old_cmd_len = SCpnt->cmd_len;
1082 SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
1083 SCpnt->old_underflow = SCpnt->underflow;
1085 /* Start the timer ticking. */
1087 SCpnt->internal_timeout = NORMAL_TIMEOUT;
1088 SCpnt->abort_reason = 0;
1092 * At this point, we merely set up the command, stick it in the normal
1093 * request queue, and return. Eventually that request will come to the
1094 * top of the list, and will be dispatched.
1096 scsi_insert_special_cmd(SCpnt, 0);
1098 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n"));
1102 * This function is the mid-level interrupt routine, which decides how
1103 * to handle error conditions. Each invocation of this function must
1104 * do one and *only* one of the following:
1106 * 1) Insert command in BH queue.
1107 * 2) Activate error handler for host.
1109 * FIXME(eric) - I am concerned about stack overflow (still). An
1110 * interrupt could come while we are processing the bottom queue,
1111 * which would cause another command to be stuffed onto the bottom
1112 * queue, and it would in turn be processed as that interrupt handler
1113 * is returning. Given a sufficiently steady rate of returning
1114 * commands, this could cause the stack to overflow. I am not sure
1115 * what is the most appropriate solution here - we should probably
1116 * keep a depth count, and not process any commands while we still
1117 * have a bottom handler active higher in the stack.
1119 * There is currently code in the bottom half handler to monitor
1120 * recursion in the bottom handler and report if it ever happens. If
1121 * this becomes a problem, it won't be hard to engineer something to
1122 * deal with it so that only the outer layer ever does any real
1125 void scsi_done(Scsi_Cmnd * SCpnt)
1127 unsigned long flags;
1131 * We don't have to worry about this one timing out any more.
1133 tstatus = scsi_delete_timer(SCpnt);
1136 * If we are unable to remove the timer, it means that the command
1137 * has already timed out. In this case, we have no choice but to
1138 * let the timeout function run, as we have no idea where in fact
1139 * that function could really be. It might be on another processor,
1143 SCpnt->done_late = 1;
1146 /* Set the serial numbers back to zero */
1147 SCpnt->serial_number = 0;
1150 * First, see whether this command already timed out. If so, we ignore
1151 * the response. We treat it as if the command never finished.
1153 * Since serial_number is now 0, the error handler cound detect this
1154 * situation and avoid to call the low level driver abort routine.
1157 * FIXME(eric) - I believe that this test is now redundant, due to
1158 * the test of the return status of del_timer().
1160 if (SCpnt->state == SCSI_STATE_TIMEOUT) {
1161 SCSI_LOG_MLCOMPLETE(1, printk("Ignoring completion of %p due to timeout status", SCpnt));
1164 spin_lock_irqsave(&scsi_bhqueue_lock, flags);
1166 SCpnt->serial_number_at_timeout = 0;
1167 SCpnt->state = SCSI_STATE_BHQUEUE;
1168 SCpnt->owner = SCSI_OWNER_BH_HANDLER;
1169 SCpnt->bh_next = NULL;
1172 * Next, put this command in the BH queue.
1174 * We need a spinlock here, or compare and exchange if we can reorder incoming
1175 * Scsi_Cmnds, as it happens pretty often scsi_done is called multiple times
1176 * before bh is serviced. -jj
1178 * We already have the io_request_lock here, since we are called from the
1179 * interrupt handler or the error handler. (DB)
1181 * This may be true at the moment, but I would like to wean all of the low
1182 * level drivers away from using io_request_lock. Technically they should
1183 * all use their own locking. I am adding a small spinlock to protect
1184 * this datastructure to make it safe for that day. (ERY)
1186 if (!scsi_bh_queue_head) {
1187 scsi_bh_queue_head = SCpnt;
1188 scsi_bh_queue_tail = SCpnt;
1190 scsi_bh_queue_tail->bh_next = SCpnt;
1191 scsi_bh_queue_tail = SCpnt;
1194 spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
1196 * Mark the bottom half handler to be run.
1202 * Procedure: scsi_bottom_half_handler
1204 * Purpose: Called after we have finished processing interrupts, it
1205 * performs post-interrupt handling for commands that may
1208 * Notes: This is called with all interrupts enabled. This should reduce
1209 * interrupt latency, stack depth, and reentrancy of the low-level
1212 * The io_request_lock is required in all the routine. There was a subtle
1213 * race condition when scsi_done is called after a command has already
1214 * timed out but before the time out is processed by the error handler.
1217 * I believe I have corrected this. We simply monitor the return status of
1218 * del_timer() - if this comes back as 0, it means that the timer has fired
1219 * and that a timeout is in progress. I have modified scsi_done() such
1220 * that in this instance the command is never inserted in the bottom
1221 * half queue. Thus the only time we hold the lock here is when
1222 * we wish to atomically remove the contents of the queue.
1224 void scsi_bottom_half_handler(void)
1228 unsigned long flags;
1232 spin_lock_irqsave(&scsi_bhqueue_lock, flags);
1233 SCpnt = scsi_bh_queue_head;
1234 scsi_bh_queue_head = NULL;
1235 spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
1237 if (SCpnt == NULL) {
1240 SCnext = SCpnt->bh_next;
1242 for (; SCpnt; SCpnt = SCnext) {
1243 SCnext = SCpnt->bh_next;
1245 switch (scsi_decide_disposition(SCpnt)) {
1250 SCSI_LOG_MLCOMPLETE(3, printk("Command finished %d %d 0x%x\n", SCpnt->host->host_busy,
1251 SCpnt->host->host_failed,
1254 scsi_finish_command(SCpnt);
1258 * We only come in here if we want to retry a command. The
1259 * test to see whether the command should be retried should be
1260 * keeping track of the number of tries, so we don't end up looping,
1263 SCSI_LOG_MLCOMPLETE(3, printk("Command needs retry %d %d 0x%x\n", SCpnt->host->host_busy,
1264 SCpnt->host->host_failed, SCpnt->result));
1266 scsi_retry_command(SCpnt);
1268 case ADD_TO_MLQUEUE:
1270 * This typically happens for a QUEUE_FULL message -
1271 * typically only when the queue depth is only
1272 * approximate for a given device. Adding a command
1273 * to the queue for the device will prevent further commands
1274 * from being sent to the device, so we shouldn't end up
1275 * with tons of things being sent down that shouldn't be.
1277 SCSI_LOG_MLCOMPLETE(3, printk("Command rejected as device queue full, put on ml queue %p\n",
1279 scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_DEVICE_BUSY);
1283 * Here we have a fatal error of some sort. Turn it over to
1284 * the error handler.
1286 SCSI_LOG_MLCOMPLETE(3, printk("Command failed %p %x active=%d busy=%d failed=%d\n",
1287 SCpnt, SCpnt->result,
1288 atomic_read(&SCpnt->host->host_active),
1289 SCpnt->host->host_busy,
1290 SCpnt->host->host_failed));
1293 * Dump the sense information too.
1295 if ((status_byte(SCpnt->result) & CHECK_CONDITION) != 0) {
1296 SCSI_LOG_MLCOMPLETE(3, print_sense("bh", SCpnt));
1298 if (SCpnt->host->eh_wait != NULL) {
1299 SCpnt->host->host_failed++;
1300 SCpnt->owner = SCSI_OWNER_ERROR_HANDLER;
1301 SCpnt->state = SCSI_STATE_FAILED;
1302 SCpnt->host->in_recovery = 1;
1304 * If the host is having troubles, then look to see if this was the last
1305 * command that might have failed. If so, wake up the error handler.
1307 if (SCpnt->host->host_busy == SCpnt->host->host_failed) {
1308 SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
1309 atomic_read(&SCpnt->host->eh_wait->count)));
1310 up(SCpnt->host->eh_wait);
1314 * We only get here if the error recovery thread has died.
1316 scsi_finish_command(SCpnt);
1319 } /* for(; SCpnt...) */
1326 * Function: scsi_retry_command
1328 * Purpose: Send a command back to the low level to be retried.
1330 * Notes: This command is always executed in the context of the
1331 * bottom half handler, or the error handler thread. Low
1332 * level drivers should not become re-entrant as a result of
1335 int scsi_retry_command(Scsi_Cmnd * SCpnt)
1337 memcpy((void *) SCpnt->cmnd, (void *) SCpnt->data_cmnd,
1338 sizeof(SCpnt->data_cmnd));
1339 SCpnt->request_buffer = SCpnt->buffer;
1340 SCpnt->request_bufflen = SCpnt->bufflen;
1341 SCpnt->use_sg = SCpnt->old_use_sg;
1342 SCpnt->cmd_len = SCpnt->old_cmd_len;
1343 SCpnt->sc_data_direction = SCpnt->sc_old_data_direction;
1344 SCpnt->underflow = SCpnt->old_underflow;
1347 * Zero the sense information from the last time we tried
1350 memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
1352 return scsi_dispatch_cmd(SCpnt);
1356 * Function: scsi_finish_command
1358 * Purpose: Pass command off to upper layer for finishing of I/O
1359 * request, waking processes that are waiting on results,
1362 void scsi_finish_command(Scsi_Cmnd * SCpnt)
1364 struct Scsi_Host *host;
1365 Scsi_Device *device;
1366 Scsi_Request * SRpnt;
1367 unsigned long flags;
1369 ASSERT_LOCK(&io_request_lock, 0);
1372 device = SCpnt->device;
1375 * We need to protect the decrement, as otherwise a race condition
1376 * would exist. Fiddling with SCpnt isn't a problem as the
1377 * design only allows a single SCpnt to be active in only
1378 * one execution context, but the device and host structures are
1381 spin_lock_irqsave(&io_request_lock, flags);
1382 host->host_busy--; /* Indicate that we are free */
1383 device->device_busy--; /* Decrement device usage counter. */
1384 spin_unlock_irqrestore(&io_request_lock, flags);
1387 * Clear the flags which say that the device/host is no longer
1388 * capable of accepting new commands. These are set in scsi_queue.c
1389 * for both the queue full condition on a device, and for a
1390 * host full condition on the host.
1392 host->host_blocked = FALSE;
1393 device->device_blocked = FALSE;
1396 * If we have valid sense information, then some kind of recovery
1397 * must have taken place. Make a note of this.
1399 if (scsi_sense_valid(SCpnt)) {
1400 SCpnt->result |= (DRIVER_SENSE << 24);
1402 SCSI_LOG_MLCOMPLETE(3, printk("Notifying upper driver of completion for device %d %x\n",
1403 SCpnt->device->id, SCpnt->result));
1405 SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
1406 SCpnt->state = SCSI_STATE_FINISHED;
1408 /* We can get here with use_sg=0, causing a panic in the upper level (DB) */
1409 SCpnt->use_sg = SCpnt->old_use_sg;
1412 * If there is an associated request structure, copy the data over before we call the
1413 * completion function.
1415 SRpnt = SCpnt->sc_request;
1416 if( SRpnt != NULL ) {
1417 SRpnt->sr_result = SRpnt->sr_command->result;
1418 if( SRpnt->sr_result != 0 ) {
1419 memcpy(SRpnt->sr_sense_buffer,
1420 SRpnt->sr_command->sense_buffer,
1421 sizeof(SRpnt->sr_sense_buffer));
1428 static int scsi_register_host(Scsi_Host_Template *);
1429 static int scsi_unregister_host(Scsi_Host_Template *);
1432 * Function: scsi_release_commandblocks()
1434 * Purpose: Release command blocks associated with a device.
1436 * Arguments: SDpnt - device
1440 * Lock status: No locking assumed or required.
1444 void scsi_release_commandblocks(Scsi_Device * SDpnt)
1446 Scsi_Cmnd *SCpnt, *SCnext;
1447 unsigned long flags;
1449 spin_lock_irqsave(&device_request_lock, flags);
1450 for (SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCnext) {
1451 SDpnt->device_queue = SCnext = SCpnt->next;
1452 kfree((char *) SCpnt);
1454 SDpnt->has_cmdblocks = 0;
1455 SDpnt->queue_depth = 0;
1456 spin_unlock_irqrestore(&device_request_lock, flags);
1460 * Function: scsi_build_commandblocks()
1462 * Purpose: Allocate command blocks associated with a device.
1464 * Arguments: SDpnt - device
1468 * Lock status: No locking assumed or required.
1472 void scsi_build_commandblocks(Scsi_Device * SDpnt)
1474 unsigned long flags;
1475 struct Scsi_Host *host = SDpnt->host;
1479 spin_lock_irqsave(&device_request_lock, flags);
1481 if (SDpnt->queue_depth == 0)
1483 SDpnt->queue_depth = host->cmd_per_lun;
1484 if (SDpnt->queue_depth == 0)
1485 SDpnt->queue_depth = 1; /* live to fight another day */
1487 SDpnt->device_queue = NULL;
1489 for (j = 0; j < SDpnt->queue_depth; j++) {
1490 SCpnt = (Scsi_Cmnd *)
1491 kmalloc(sizeof(Scsi_Cmnd),
1493 (host->unchecked_isa_dma ? GFP_DMA : 0));
1495 break; /* If not, the next line will oops ... */
1496 memset(SCpnt, 0, sizeof(Scsi_Cmnd));
1498 SCpnt->device = SDpnt;
1499 SCpnt->target = SDpnt->id;
1500 SCpnt->lun = SDpnt->lun;
1501 SCpnt->channel = SDpnt->channel;
1502 SCpnt->request.rq_status = RQ_INACTIVE;
1504 SCpnt->old_use_sg = 0;
1505 SCpnt->old_cmd_len = 0;
1506 SCpnt->underflow = 0;
1507 SCpnt->old_underflow = 0;
1508 SCpnt->transfersize = 0;
1510 SCpnt->serial_number = 0;
1511 SCpnt->serial_number_at_timeout = 0;
1512 SCpnt->host_scribble = NULL;
1513 SCpnt->next = SDpnt->device_queue;
1514 SDpnt->device_queue = SCpnt;
1515 SCpnt->state = SCSI_STATE_UNUSED;
1516 SCpnt->owner = SCSI_OWNER_NOBODY;
1518 if (j < SDpnt->queue_depth) { /* low on space (D.Gilbert 990424) */
1519 printk(KERN_WARNING "scsi_build_commandblocks: want=%d, space for=%d blocks\n",
1520 SDpnt->queue_depth, j);
1521 SDpnt->queue_depth = j;
1522 SDpnt->has_cmdblocks = (0 != j);
1524 SDpnt->has_cmdblocks = 1;
1526 spin_unlock_irqrestore(&device_request_lock, flags);
1529 void __init scsi_host_no_insert(char *str, int n)
1531 Scsi_Host_Name *shn, *shn2;
1535 if (len && (shn = (Scsi_Host_Name *) kmalloc(sizeof(Scsi_Host_Name), GFP_ATOMIC))) {
1536 if ((shn->name = kmalloc(len+1, GFP_ATOMIC))) {
1537 strncpy(shn->name, str, len);
1540 shn->host_registered = 0;
1541 shn->loaded_as_module = 1; /* numbers shouldn't be freed in any case */
1543 if (scsi_host_no_list) {
1544 for (shn2 = scsi_host_no_list;shn2->next;shn2 = shn2->next)
1549 scsi_host_no_list = shn;
1550 max_scsi_hosts = n+1;
1553 kfree((char *) shn);
1557 #ifdef CONFIG_PROC_FS
1558 static int scsi_proc_info(char *buffer, char **start, off_t offset, int length)
1561 struct Scsi_Host *HBA_ptr;
1567 * First, see if there are any attached devices or not.
1569 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1570 if (HBA_ptr->host_queue != NULL) {
1574 size = sprintf(buffer + len, "Attached devices: %s\n", (HBA_ptr) ? "" : "none");
1577 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1579 size += sprintf(buffer + len, "scsi%2d: %s\n", (int) HBA_ptr->host_no,
1580 HBA_ptr->hostt->procname);
1584 for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1585 proc_print_scsidevice(scd, buffer, &size, len);
1593 if (pos > offset + length)
1599 *start = buffer + (offset - begin); /* Start of wanted data */
1600 len -= (offset - begin); /* Start slop */
1602 len = length; /* Ending slop */
1606 static int proc_scsi_gen_write(struct file * file, const char * buf,
1607 unsigned long length, void *data)
1609 struct Scsi_Device_Template *SDTpnt;
1611 struct Scsi_Host *HBA_ptr;
1613 int host, channel, id, lun;
1617 if (!buf || length>PAGE_SIZE)
1620 if (!(buffer = (char *) __get_free_page(GFP_KERNEL)))
1622 if(copy_from_user(buffer, buf, length))
1630 if (length < PAGE_SIZE)
1631 buffer[length] = '\0';
1632 else if (buffer[PAGE_SIZE-1])
1635 if (length < 11 || strncmp("scsi", buffer, 4))
1639 * Usage: echo "scsi dump #N" > /proc/scsi/scsi
1640 * to dump status of all scsi commands. The number is used to specify the level
1641 * of detail in the dump.
1643 if (!strncmp("dump", buffer + 5, 4)) {
1651 level = simple_strtoul(p, NULL, 0);
1652 scsi_dump_status(level);
1655 * Usage: echo "scsi log token #N" > /proc/scsi/scsi
1656 * where token is one of [error,scan,mlqueue,mlcomplete,llqueue,
1657 * llcomplete,hlqueue,hlcomplete]
1659 #ifdef CONFIG_SCSI_LOGGING /* { */
1661 if (!strncmp("log", buffer + 5, 3)) {
1667 while (*p != ' ' && *p != '\t' && *p != '\0') {
1672 if (strncmp(token, "all", 3) == 0) {
1674 * Turn on absolutely everything.
1676 scsi_logging_level = ~0;
1677 } else if (strncmp(token, "none", 4) == 0) {
1679 * Turn off absolutely everything.
1681 scsi_logging_level = 0;
1688 level = simple_strtoul(p, NULL, 0);
1691 * Now figure out what to do with it.
1693 if (strcmp(token, "error") == 0) {
1694 SCSI_SET_ERROR_RECOVERY_LOGGING(level);
1695 } else if (strcmp(token, "timeout") == 0) {
1696 SCSI_SET_TIMEOUT_LOGGING(level);
1697 } else if (strcmp(token, "scan") == 0) {
1698 SCSI_SET_SCAN_BUS_LOGGING(level);
1699 } else if (strcmp(token, "mlqueue") == 0) {
1700 SCSI_SET_MLQUEUE_LOGGING(level);
1701 } else if (strcmp(token, "mlcomplete") == 0) {
1702 SCSI_SET_MLCOMPLETE_LOGGING(level);
1703 } else if (strcmp(token, "llqueue") == 0) {
1704 SCSI_SET_LLQUEUE_LOGGING(level);
1705 } else if (strcmp(token, "llcomplete") == 0) {
1706 SCSI_SET_LLCOMPLETE_LOGGING(level);
1707 } else if (strcmp(token, "hlqueue") == 0) {
1708 SCSI_SET_HLQUEUE_LOGGING(level);
1709 } else if (strcmp(token, "hlcomplete") == 0) {
1710 SCSI_SET_HLCOMPLETE_LOGGING(level);
1711 } else if (strcmp(token, "ioctl") == 0) {
1712 SCSI_SET_IOCTL_LOGGING(level);
1718 printk(KERN_INFO "scsi logging level set to 0x%8.8x\n", scsi_logging_level);
1720 #endif /* CONFIG_SCSI_LOGGING */ /* } */
1723 * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi
1724 * with "0 1 2 3" replaced by your "Host Channel Id Lun".
1725 * Consider this feature BETA.
1726 * CAUTION: This is not for hotplugging your peripherals. As
1727 * SCSI was not designed for this you could damage your
1729 * However perhaps it is legal to switch on an
1730 * already connected device. It is perhaps not
1731 * guaranteed this device doesn't corrupt an ongoing data transfer.
1733 if (!strncmp("add-single-device", buffer + 5, 17)) {
1736 host = simple_strtoul(p, &p, 0);
1737 channel = simple_strtoul(p + 1, &p, 0);
1738 id = simple_strtoul(p + 1, &p, 0);
1739 lun = simple_strtoul(p + 1, &p, 0);
1741 printk(KERN_INFO "scsi singledevice %d %d %d %d\n", host, channel,
1744 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1745 if (HBA_ptr->host_no == host) {
1753 for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1754 if ((scd->channel == channel
1756 && scd->lun == lun)) {
1763 goto out; /* We do not yet support unplugging */
1765 scan_scsis(HBA_ptr, 1, channel, id, lun);
1767 /* FIXME (DB) This assumes that the queue_depth routines can be used
1768 in this context as well, while they were all designed to be
1769 called only once after the detect routine. (DB) */
1770 /* queue_depth routine moved to inside scan_scsis(,1,,,) so
1771 it is called before build_commandblocks() */
1777 * Usage: echo "scsi remove-single-device 0 1 2 3" >/proc/scsi/scsi
1778 * with "0 1 2 3" replaced by your "Host Channel Id Lun".
1780 * Consider this feature pre-BETA.
1782 * CAUTION: This is not for hotplugging your peripherals. As
1783 * SCSI was not designed for this you could damage your
1784 * hardware and thoroughly confuse the SCSI subsystem.
1787 else if (!strncmp("remove-single-device", buffer + 5, 20)) {
1790 host = simple_strtoul(p, &p, 0);
1791 channel = simple_strtoul(p + 1, &p, 0);
1792 id = simple_strtoul(p + 1, &p, 0);
1793 lun = simple_strtoul(p + 1, &p, 0);
1796 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1797 if (HBA_ptr->host_no == host) {
1805 for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1806 if ((scd->channel == channel
1808 && scd->lun == lun)) {
1814 goto out; /* there is no such device attached */
1817 if (scd->access_count)
1820 SDTpnt = scsi_devicelist;
1821 while (SDTpnt != NULL) {
1823 (*SDTpnt->detach) (scd);
1824 SDTpnt = SDTpnt->next;
1827 if (scd->attached == 0) {
1829 * Nobody is using this device any more.
1830 * Free all of the command structures.
1832 if (HBA_ptr->hostt->revoke)
1833 HBA_ptr->hostt->revoke(scd);
1834 devfs_unregister (scd->de);
1835 scsi_release_commandblocks(scd);
1837 /* Now we can remove the device structure */
1838 if (scd->next != NULL)
1839 scd->next->prev = scd->prev;
1841 if (scd->prev != NULL)
1842 scd->prev->next = scd->next;
1844 if (HBA_ptr->host_queue == scd) {
1845 HBA_ptr->host_queue = scd->next;
1847 blk_cleanup_queue(&scd->request_queue);
1848 kfree((char *) scd);
1856 free_page((unsigned long) buffer);
1862 * This entry point should be called by a driver if it is trying
1863 * to add a low level scsi driver to the system.
1865 static int scsi_register_host(Scsi_Host_Template * tpnt)
1868 struct Scsi_Host *shpnt;
1870 struct Scsi_Device_Template *sdtpnt;
1872 unsigned long flags;
1873 int out_of_space = 0;
1875 if (tpnt->next || !tpnt->detect)
1876 return 1; /* Must be already loaded, or
1877 * no detect routine available
1880 /* If max_sectors isn't set, default to max */
1881 if (!tpnt->max_sectors)
1882 tpnt->max_sectors = MAX_SECTORS;
1884 pcount = next_scsi_host;
1888 /* The detect routine must carefully spinunlock/spinlock if
1889 it enables interrupts, since all interrupt handlers do
1891 All lame drivers are going to fail due to the following
1892 spinlock. For the time beeing let's use it only for drivers
1893 using the new scsi code. NOTE: the detect routine could
1894 redefine the value tpnt->use_new_eh_code. (DB, 13 May 1998) */
1896 if (tpnt->use_new_eh_code) {
1897 spin_lock_irqsave(&io_request_lock, flags);
1898 tpnt->present = tpnt->detect(tpnt);
1899 spin_unlock_irqrestore(&io_request_lock, flags);
1901 tpnt->present = tpnt->detect(tpnt);
1903 if (tpnt->present) {
1904 if (pcount == next_scsi_host) {
1905 if (tpnt->present > 1) {
1906 printk(KERN_ERR "scsi: Failure to register low-level scsi driver");
1907 scsi_unregister_host(tpnt);
1911 * The low-level driver failed to register a driver.
1912 * We can do this now.
1914 if(scsi_register(tpnt, 0)==NULL)
1916 printk(KERN_ERR "scsi: register failed.\n");
1917 scsi_unregister_host(tpnt);
1921 tpnt->next = scsi_hosts; /* Add to the linked list */
1924 /* Add the new driver to /proc/scsi */
1925 #ifdef CONFIG_PROC_FS
1926 build_proc_dir_entries(tpnt);
1931 * Add the kernel threads for each host adapter that will
1932 * handle error correction.
1934 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1935 if (shpnt->hostt == tpnt && shpnt->hostt->use_new_eh_code) {
1936 DECLARE_MUTEX_LOCKED(sem);
1938 shpnt->eh_notify = &sem;
1939 kernel_thread((int (*)(void *)) scsi_error_handler,
1943 * Now wait for the kernel error thread to initialize itself
1944 * as it might be needed when we scan the bus.
1947 shpnt->eh_notify = NULL;
1951 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1952 if (shpnt->hostt == tpnt) {
1954 name = tpnt->info(shpnt);
1958 printk(KERN_INFO "scsi%d : %s\n", /* And print a little message */
1959 shpnt->host_no, name);
1963 /* The next step is to call scan_scsis here. This generates the
1964 * Scsi_Devices entries
1966 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1967 if (shpnt->hostt == tpnt) {
1968 scan_scsis(shpnt, 0, 0, 0, 0);
1969 if (shpnt->select_queue_depths != NULL) {
1970 (shpnt->select_queue_depths) (shpnt, shpnt->host_queue);
1975 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
1976 if (sdtpnt->init && sdtpnt->dev_noticed)
1981 * Next we create the Scsi_Cmnd structures for this host
1983 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1984 for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next)
1985 if (SDpnt->host->hostt == tpnt) {
1986 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
1988 (*sdtpnt->attach) (SDpnt);
1989 if (SDpnt->attached) {
1990 scsi_build_commandblocks(SDpnt);
1991 if (0 == SDpnt->has_cmdblocks)
1998 * Now that we have all of the devices, resize the DMA pool,
2001 scsi_resize_dma_pool();
2004 /* This does any final handling that is required. */
2005 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
2006 if (sdtpnt->finish && sdtpnt->nr_dev) {
2007 (*sdtpnt->finish) ();
2011 #if defined(USE_STATIC_SCSI_MEMORY)
2012 printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
2013 (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
2014 (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
2015 (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
2019 scsi_unregister_host(tpnt); /* easiest way to clean up?? */
2026 * Similarly, this entry point should be called by a loadable module if it
2027 * is trying to remove a low level scsi driver from the system.
2029 static int scsi_unregister_host(Scsi_Host_Template * tpnt)
2032 int pcount0, pcount;
2035 Scsi_Device *SDpnt1;
2036 struct Scsi_Device_Template *sdtpnt;
2037 struct Scsi_Host *sh1;
2038 struct Scsi_Host *shpnt;
2039 char name[10]; /* host_no>=10^9? I don't think so. */
2041 /* get the big kernel lock, so we don't race with open() */
2045 * First verify that this host adapter is completely free with no pending
2048 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2049 for (SDpnt = shpnt->host_queue; SDpnt;
2050 SDpnt = SDpnt->next) {
2051 if (SDpnt->host->hostt == tpnt
2052 && SDpnt->host->hostt->module
2053 && GET_USE_COUNT(SDpnt->host->hostt->module))
2056 * FIXME(eric) - We need to find a way to notify the
2057 * low level driver that we are shutting down - via the
2058 * special device entry that still needs to get added.
2060 * Is detach interface below good enough for this?
2066 * FIXME(eric) put a spinlock on this. We force all of the devices offline
2067 * to help prevent race conditions where other hosts/processors could try and
2068 * get in and queue a command.
2070 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2071 for (SDpnt = shpnt->host_queue; SDpnt;
2072 SDpnt = SDpnt->next) {
2073 if (SDpnt->host->hostt == tpnt)
2074 SDpnt->online = FALSE;
2079 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2080 if (shpnt->hostt != tpnt) {
2083 for (SDpnt = shpnt->host_queue; SDpnt;
2084 SDpnt = SDpnt->next) {
2086 * Loop over all of the commands associated with the device. If any of
2087 * them are busy, then set the state back to inactive and bail.
2089 for (SCpnt = SDpnt->device_queue; SCpnt;
2090 SCpnt = SCpnt->next) {
2091 online_status = SDpnt->online;
2092 SDpnt->online = FALSE;
2093 if (SCpnt->request.rq_status != RQ_INACTIVE) {
2094 printk(KERN_ERR "SCSI device not inactive - rq_status=%d, target=%d, pid=%ld, state=%d, owner=%d.\n",
2095 SCpnt->request.rq_status, SCpnt->target, SCpnt->pid,
2096 SCpnt->state, SCpnt->owner);
2097 for (SDpnt1 = shpnt->host_queue; SDpnt1;
2098 SDpnt1 = SDpnt1->next) {
2099 for (SCpnt = SDpnt1->device_queue; SCpnt;
2100 SCpnt = SCpnt->next)
2101 if (SCpnt->request.rq_status == RQ_SCSI_DISCONNECTING)
2102 SCpnt->request.rq_status = RQ_INACTIVE;
2104 SDpnt->online = online_status;
2105 printk(KERN_ERR "Device busy???\n");
2109 * No, this device is really free. Mark it as such, and
2112 SCpnt->state = SCSI_STATE_DISCONNECTING;
2113 SCpnt->request.rq_status = RQ_SCSI_DISCONNECTING; /* Mark as busy */
2117 /* Next we detach the high level drivers from the Scsi_Device structures */
2119 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2120 if (shpnt->hostt != tpnt) {
2123 for (SDpnt = shpnt->host_queue; SDpnt;
2124 SDpnt = SDpnt->next) {
2125 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2127 (*sdtpnt->detach) (SDpnt);
2129 /* If something still attached, punt */
2130 if (SDpnt->attached) {
2131 printk(KERN_ERR "Attached usage count = %d\n", SDpnt->attached);
2134 devfs_unregister (SDpnt->de);
2139 * Next, kill the kernel error recovery thread for this host.
2141 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2142 if (shpnt->hostt == tpnt
2143 && shpnt->hostt->use_new_eh_code
2144 && shpnt->ehandler != NULL) {
2145 DECLARE_MUTEX_LOCKED(sem);
2147 shpnt->eh_notify = &sem;
2148 send_sig(SIGHUP, shpnt->ehandler, 1);
2150 shpnt->eh_notify = NULL;
2154 /* Next we free up the Scsi_Cmnd structures for this host */
2156 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2157 if (shpnt->hostt != tpnt) {
2160 for (SDpnt = shpnt->host_queue; SDpnt;
2161 SDpnt = shpnt->host_queue) {
2162 scsi_release_commandblocks(SDpnt);
2164 blk_cleanup_queue(&SDpnt->request_queue);
2165 /* Next free up the Scsi_Device structures for this host */
2166 shpnt->host_queue = SDpnt->next;
2167 kfree((char *) SDpnt);
2172 /* Next we go through and remove the instances of the individual hosts
2173 * that were detected */
2175 pcount0 = next_scsi_host;
2176 for (shpnt = scsi_hostlist; shpnt; shpnt = sh1) {
2178 if (shpnt->hostt != tpnt)
2180 pcount = next_scsi_host;
2181 /* Remove the /proc/scsi directory entry */
2182 sprintf(name,"%d",shpnt->host_no);
2183 remove_proc_entry(name, tpnt->proc_dir);
2185 (*tpnt->release) (shpnt);
2187 /* This is the default case for the release function.
2188 * It should do the right thing for most correctly
2189 * written host adapters.
2192 free_irq(shpnt->irq, NULL);
2193 if (shpnt->dma_channel != 0xff)
2194 free_dma(shpnt->dma_channel);
2195 if (shpnt->io_port && shpnt->n_io_port)
2196 release_region(shpnt->io_port, shpnt->n_io_port);
2198 if (pcount == next_scsi_host)
2199 scsi_unregister(shpnt);
2204 * If there are absolutely no more hosts left, it is safe
2205 * to completely nuke the DMA pool. The resize operation will
2206 * do the right thing and free everything.
2209 scsi_resize_dma_pool();
2211 if (pcount0 != next_scsi_host)
2212 printk(KERN_INFO "scsi : %d host%s left.\n", next_scsi_host,
2213 (next_scsi_host == 1) ? "" : "s");
2215 #if defined(USE_STATIC_SCSI_MEMORY)
2216 printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
2217 (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
2218 (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
2219 (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
2223 * Remove it from the linked list and /proc if all
2224 * hosts were successfully removed (ie preset == 0)
2226 if (!tpnt->present) {
2227 Scsi_Host_Template **SHTp = &scsi_hosts;
2228 Scsi_Host_Template *SHT;
2230 while ((SHT = *SHTp) != NULL) {
2233 remove_proc_entry(tpnt->proc_name, proc_scsi);
2249 static int scsi_unregister_device(struct Scsi_Device_Template *tpnt);
2252 * This entry point should be called by a loadable module if it is trying
2253 * add a high level scsi driver to the system.
2255 static int scsi_register_device_module(struct Scsi_Device_Template *tpnt)
2258 struct Scsi_Host *shpnt;
2259 int out_of_space = 0;
2264 scsi_register_device(tpnt);
2266 * First scan the devices that we know about, and see if we notice them.
2269 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2270 for (SDpnt = shpnt->host_queue; SDpnt;
2271 SDpnt = SDpnt->next) {
2273 SDpnt->detected = (*tpnt->detect) (SDpnt);
2278 * If any of the devices would match this driver, then perform the
2281 if (tpnt->init && tpnt->dev_noticed) {
2282 if ((*tpnt->init) ()) {
2283 for (shpnt = scsi_hostlist; shpnt;
2284 shpnt = shpnt->next) {
2285 for (SDpnt = shpnt->host_queue; SDpnt;
2286 SDpnt = SDpnt->next) {
2287 SDpnt->detected = 0;
2290 scsi_deregister_device(tpnt);
2296 * Now actually connect the devices to the new driver.
2298 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2299 for (SDpnt = shpnt->host_queue; SDpnt;
2300 SDpnt = SDpnt->next) {
2301 SDpnt->attached += SDpnt->detected;
2302 SDpnt->detected = 0;
2304 (*tpnt->attach) (SDpnt);
2306 * If this driver attached to the device, and don't have any
2307 * command blocks for this device, allocate some.
2309 if (SDpnt->attached && SDpnt->has_cmdblocks == 0) {
2310 SDpnt->online = TRUE;
2311 scsi_build_commandblocks(SDpnt);
2312 if (0 == SDpnt->has_cmdblocks)
2319 * This does any final handling that is required.
2321 if (tpnt->finish && tpnt->nr_dev)
2324 scsi_resize_dma_pool();
2328 scsi_unregister_device(tpnt); /* easiest way to clean up?? */
2334 static int scsi_unregister_device(struct Scsi_Device_Template *tpnt)
2337 struct Scsi_Host *shpnt;
2341 * If we are busy, this is not going to fly.
2343 if (GET_USE_COUNT(tpnt->module) != 0)
2347 * Next, detach the devices from the driver.
2350 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2351 for (SDpnt = shpnt->host_queue; SDpnt;
2352 SDpnt = SDpnt->next) {
2354 (*tpnt->detach) (SDpnt);
2355 if (SDpnt->attached == 0) {
2356 SDpnt->online = FALSE;
2359 * Nobody is using this device any more. Free all of the
2360 * command structures.
2362 scsi_release_commandblocks(SDpnt);
2367 * Extract the template from the linked list.
2369 scsi_deregister_device(tpnt);
2374 * Final cleanup for the driver is done in the driver sources in the
2384 /* This function should be called by drivers which needs to register
2385 * with the midlevel scsi system. As of 2.4.0-test9pre3 this is our
2386 * main device/hosts register function /mathiasen
2388 int scsi_register_module(int module_type, void *ptr)
2390 switch (module_type) {
2391 case MODULE_SCSI_HA:
2392 return scsi_register_host((Scsi_Host_Template *) ptr);
2394 /* Load upper level device handler of some kind */
2395 case MODULE_SCSI_DEV:
2397 if (scsi_hosts == NULL)
2398 request_module("scsi_hostadapter");
2400 return scsi_register_device_module((struct Scsi_Device_Template *) ptr);
2401 /* The rest of these are not yet implemented */
2403 /* Load constants.o */
2404 case MODULE_SCSI_CONST:
2406 /* Load specialized ioctl handler for some device. Intended for
2407 * cdroms that have non-SCSI2 audio command sets. */
2408 case MODULE_SCSI_IOCTL:
2415 /* Reverse the actions taken above
2417 int scsi_unregister_module(int module_type, void *ptr)
2421 switch (module_type) {
2422 case MODULE_SCSI_HA:
2423 retval = scsi_unregister_host((Scsi_Host_Template *) ptr);
2425 case MODULE_SCSI_DEV:
2426 retval = scsi_unregister_device((struct Scsi_Device_Template *)ptr);
2428 /* The rest of these are not yet implemented. */
2429 case MODULE_SCSI_CONST:
2430 case MODULE_SCSI_IOCTL:
2437 #ifdef CONFIG_PROC_FS
2439 * Function: scsi_dump_status
2441 * Purpose: Brain dump of scsi system, used for problem solving.
2443 * Arguments: level - used to indicate level of detail.
2445 * Notes: The level isn't used at all yet, but we need to find some way
2446 * of sensibly logging varying degrees of information. A quick one-line
2447 * display of each command, plus the status would be most useful.
2449 * This does depend upon CONFIG_SCSI_LOGGING - I do want some way of turning
2450 * it all off if the user wants a lean and mean kernel. It would probably
2451 * also be useful to allow the user to specify one single host to be dumped.
2452 * A second argument to the function would be useful for that purpose.
2454 * FIXME - some formatting of the output into tables would be very handy.
2456 static void scsi_dump_status(int level)
2458 #ifdef CONFIG_SCSI_LOGGING /* { */
2460 struct Scsi_Host *shpnt;
2463 printk(KERN_INFO "Dump of scsi host parameters:\n");
2465 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2466 printk(KERN_INFO " %d %d %d : %d %d\n",
2469 atomic_read(&shpnt->host_active),
2470 shpnt->host_blocked,
2471 shpnt->host_self_blocked);
2474 printk(KERN_INFO "\n\n");
2475 printk(KERN_INFO "Dump of scsi command parameters:\n");
2476 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2477 printk(KERN_INFO "h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result\n");
2478 for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
2479 for (SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCpnt->next) {
2480 /* (0) h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result %d %x */
2481 printk(KERN_INFO "(%3d) %2d:%1d:%2d:%2d (%6s %4ld %4ld %4ld %4x %1d) (%1d %1d 0x%2x) (%4d %4d %4d) 0x%2.2x 0x%2.2x 0x%8.8x\n",
2484 SCpnt->host->host_no,
2489 kdevname(SCpnt->request.rq_dev),
2490 SCpnt->request.sector,
2491 SCpnt->request.nr_sectors,
2492 SCpnt->request.current_nr_sectors,
2493 SCpnt->request.rq_status,
2500 SCpnt->timeout_per_command,
2502 SCpnt->internal_timeout,
2505 SCpnt->sense_buffer[2],
2511 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2512 for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
2513 /* Now dump the request lists for each block device */
2514 printk(KERN_INFO "Dump of pending block device requests\n");
2515 for (i = 0; i < MAX_BLKDEV; i++) {
2516 struct list_head * queue_head;
2518 queue_head = &blk_dev[i].request_queue.queue_head;
2519 if (!list_empty(queue_head)) {
2520 struct request *req;
2521 struct list_head * entry;
2523 printk(KERN_INFO "%d: ", i);
2524 entry = queue_head->next;
2526 req = blkdev_entry_to_request(entry);
2527 printk("(%s %d %ld %ld %ld) ",
2528 kdevname(req->rq_dev),
2532 req->current_nr_sectors);
2533 } while ((entry = entry->next) != queue_head);
2539 #endif /* CONFIG_SCSI_LOGGING */ /* } */
2541 #endif /* CONFIG_PROC_FS */
2543 static int __init scsi_host_no_init (char *str)
2545 static int next_no = 0;
2550 while (*temp && (*temp != ':') && (*temp != ','))
2556 scsi_host_no_insert(str, next_no);
2563 static char *scsihosts;
2565 MODULE_PARM(scsihosts, "s");
2566 MODULE_DESCRIPTION("SCSI core");
2567 MODULE_LICENSE("GPL");
2570 int __init scsi_setup(char *str)
2576 __setup("scsihosts=", scsi_setup);
2579 static int __init init_scsi(void)
2581 struct proc_dir_entry *generic;
2583 printk(KERN_INFO "SCSI subsystem driver " REVISION "\n");
2585 if( scsi_init_minimal_dma_pool() != 0 )
2591 * This makes /proc/scsi and /proc/scsi/scsi visible.
2593 #ifdef CONFIG_PROC_FS
2594 proc_scsi = proc_mkdir("scsi", 0);
2596 printk (KERN_ERR "cannot init /proc/scsi\n");
2599 generic = create_proc_info_entry ("scsi/scsi", 0, 0, scsi_proc_info);
2601 printk (KERN_ERR "cannot init /proc/scsi/scsi\n");
2602 remove_proc_entry("scsi", 0);
2605 generic->write_proc = proc_scsi_gen_write;
2608 scsi_devfs_handle = devfs_mk_dir (NULL, "scsi", NULL);
2610 printk(KERN_INFO "scsi: host order: %s\n", scsihosts);
2611 scsi_host_no_init (scsihosts);
2613 * This is where the processing takes place for most everything
2614 * when commands are completed.
2616 init_bh(SCSI_BH, scsi_bottom_half_handler);
2621 static void __exit exit_scsi(void)
2623 Scsi_Host_Name *shn, *shn2 = NULL;
2627 devfs_unregister (scsi_devfs_handle);
2628 for (shn = scsi_host_no_list;shn;shn = shn->next) {
2638 #ifdef CONFIG_PROC_FS
2639 /* No, we're not here anymore. Don't show the /proc/scsi files. */
2640 remove_proc_entry ("scsi/scsi", 0);
2641 remove_proc_entry ("scsi", 0);
2645 * Free up the DMA pool.
2647 scsi_resize_dma_pool();
2651 module_init(init_scsi);
2652 module_exit(exit_scsi);
2655 * Function: scsi_get_host_dev()
2657 * Purpose: Create a Scsi_Device that points to the host adapter itself.
2659 * Arguments: SHpnt - Host that needs a Scsi_Device
2661 * Lock status: None assumed.
2663 * Returns: The Scsi_Device or NULL
2667 Scsi_Device * scsi_get_host_dev(struct Scsi_Host * SHpnt)
2669 Scsi_Device * SDpnt;
2672 * Attach a single Scsi_Device to the Scsi_Host - this should
2673 * be made to look like a "pseudo-device" that points to the
2674 * HA itself. For the moment, we include it at the head of
2675 * the host_queue itself - I don't think we want to show this
2676 * to the HA in select_queue_depths(), as this would probably confuse
2678 * Note - this device is not accessible from any high-level
2679 * drivers (including generics), which is probably not
2680 * optimal. We can add hooks later to attach
2682 SDpnt = (Scsi_Device *) kmalloc(sizeof(Scsi_Device),
2687 memset(SDpnt, 0, sizeof(Scsi_Device));
2689 SDpnt->host = SHpnt;
2690 SDpnt->id = SHpnt->this_id;
2692 SDpnt->queue_depth = 1;
2694 scsi_build_commandblocks(SDpnt);
2696 scsi_initialize_queue(SDpnt, SHpnt);
2698 SDpnt->online = TRUE;
2701 * Initialize the object that we will use to wait for command blocks.
2703 init_waitqueue_head(&SDpnt->scpnt_wait);
2708 * Function: scsi_free_host_dev()
2710 * Purpose: Create a Scsi_Device that points to the host adapter itself.
2712 * Arguments: SHpnt - Host that needs a Scsi_Device
2714 * Lock status: None assumed.
2720 void scsi_free_host_dev(Scsi_Device * SDpnt)
2722 if( (unsigned char) SDpnt->id != (unsigned char) SDpnt->host->this_id )
2724 panic("Attempt to delete wrong device\n");
2727 blk_cleanup_queue(&SDpnt->request_queue);
2730 * We only have a single SCpnt attached to this device. Free
2733 scsi_release_commandblocks(SDpnt);
2738 * Function: scsi_reset_provider_done_command
2740 * Purpose: Dummy done routine.
2742 * Notes: Some low level drivers will call scsi_done and end up here,
2743 * others won't bother.
2744 * We don't want the bogus command used for the bus/device
2745 * reset to find its way into the mid-layer so we intercept
2749 scsi_reset_provider_done_command(Scsi_Cmnd *SCpnt)
2754 * Function: scsi_reset_provider
2756 * Purpose: Send requested reset to a bus or device at any phase.
2758 * Arguments: device - device to send reset to
2759 * flag - reset type (see scsi.h)
2761 * Returns: SUCCESS/FAILURE.
2763 * Notes: This is used by the SCSI Generic driver to provide
2764 * Bus/Device reset capability.
2767 scsi_reset_provider(Scsi_Device *dev, int flag)
2769 Scsi_Cmnd SC, *SCpnt = &SC;
2772 memset(&SCpnt->eh_timeout, 0, sizeof(SCpnt->eh_timeout));
2773 SCpnt->host = dev->host;
2774 SCpnt->device = dev;
2775 SCpnt->target = dev->id;
2776 SCpnt->lun = dev->lun;
2777 SCpnt->channel = dev->channel;
2778 SCpnt->request.rq_status = RQ_SCSI_BUSY;
2779 SCpnt->request.waiting = NULL;
2781 SCpnt->old_use_sg = 0;
2782 SCpnt->old_cmd_len = 0;
2783 SCpnt->underflow = 0;
2784 SCpnt->transfersize = 0;
2786 SCpnt->serial_number = 0;
2787 SCpnt->serial_number_at_timeout = 0;
2788 SCpnt->host_scribble = NULL;
2790 SCpnt->state = SCSI_STATE_INITIALIZING;
2791 SCpnt->owner = SCSI_OWNER_MIDLEVEL;
2793 memset(&SCpnt->cmnd, '\0', sizeof(SCpnt->cmnd));
2795 SCpnt->scsi_done = scsi_reset_provider_done_command;
2797 SCpnt->reset_chain = NULL;
2799 SCpnt->buffer = NULL;
2801 SCpnt->request_buffer = NULL;
2802 SCpnt->request_bufflen = 0;
2804 SCpnt->internal_timeout = NORMAL_TIMEOUT;
2805 SCpnt->abort_reason = DID_ABORT;
2809 SCpnt->sc_data_direction = SCSI_DATA_UNKNOWN;
2810 SCpnt->sc_request = NULL;
2811 SCpnt->sc_magic = SCSI_CMND_MAGIC;
2814 * Sometimes the command can get back into the timer chain,
2815 * so use the pid as an identifier.
2819 if (dev->host->hostt->use_new_eh_code) {
2820 rtn = scsi_new_reset(SCpnt, flag);
2822 unsigned long flags;
2824 spin_lock_irqsave(&io_request_lock, flags);
2825 rtn = scsi_old_reset(SCpnt, flag);
2826 spin_unlock_irqrestore(&io_request_lock, flags);
2829 scsi_delete_timer(SCpnt);
2834 * Overrides for Emacs so that we follow Linus's tabbing style.
2835 * Emacs will notice this stuff at the end of the file and automatically
2836 * adjust the settings for this buffer only. This must remain at the end
2838 * ---------------------------------------------------------------------------
2841 * c-brace-imaginary-offset: 0
2842 * c-brace-offset: -4
2843 * c-argdecl-indent: 4
2844 * c-label-offset: -4
2845 * c-continued-statement-offset: 4
2846 * c-continued-brace-offset: 0
2847 * indent-tabs-mode: nil