3 * Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
4 * to allow user process control of SCSI devices.
5 * Development Sponsored by Killy Corp. NY NY
7 * Original driver (sg.c):
8 * Copyright (C) 1992 Lawrence Foard
9 * Version 2 and 3 extensions to driver:
10 * Copyright (C) 1998 - 2002 Douglas Gilbert
12 * Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
20 #include <linux/config.h>
22 static char * sg_version_str = "Version: 3.1.24 (20020505)";
24 static int sg_version_num = 30124; /* 2 digits for each component */
26 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
27 * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
28 * the kernel/module needs to be built with CONFIG_SCSI_LOGGING
29 * (otherwise the macros compile to empty statements).
30 * Then before running the program to be debugged enter:
31 * # echo "scsi log timeout 7" > /proc/scsi/scsi
32 * This will send copious output to the console and the log which
33 * is usually /var/log/messages. To turn off debugging enter:
34 * # echo "scsi log timeout 0" > /proc/scsi/scsi
35 * The 'timeout' token was chosen because it is relatively unused.
36 * The token 'hlcomplete' should be used but that triggers too
37 * much output from the sd device driver. To dump the current
38 * state of the SCSI mid level data structures enter:
39 * # echo "scsi dump 1" > /proc/scsi/scsi
40 * To dump the state of sg's data structures use:
41 * # cat /proc/scsi/sg/debug
44 #include <linux/module.h>
47 #include <linux/kernel.h>
48 #include <linux/sched.h>
49 #include <linux/string.h>
51 #include <linux/errno.h>
52 #include <linux/mtio.h>
53 #include <linux/ioctl.h>
54 #include <linux/fcntl.h>
55 #include <linux/init.h>
56 #include <linux/poll.h>
57 #include <linux/smp_lock.h>
60 #include <asm/uaccess.h>
61 #include <asm/system.h>
63 #include <linux/blk.h>
66 #include <scsi/scsi_ioctl.h>
70 #include <linux/proc_fs.h>
71 static int sg_proc_init(void);
72 static void sg_proc_cleanup(void);
75 #ifndef LINUX_VERSION_CODE
76 #include <linux/version.h>
77 #endif /* LINUX_VERSION_CODE */
79 #define SG_ALLOW_DIO_DEF 0
80 #define SG_ALLOW_DIO_CODE /* compile out be commenting this define */
81 #ifdef SG_ALLOW_DIO_CODE
82 #include <linux/iobuf.h>
85 #define SG_NEW_KIOVEC 0 /* use alloc_kiovec(), not alloc_kiovec_sz() */
87 int sg_big_buff = SG_DEF_RESERVED_SIZE;
88 /* N.B. This variable is readable and writeable via
89 /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
90 of this size (or less if there is not enough memory) will be reserved
91 for use by this file descriptor. [Deprecated usage: this variable is also
92 readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
93 the kernel (i.e. it is not a module).] */
94 static int def_reserved_size = -1; /* picks up init parameter */
95 static int sg_allow_dio = SG_ALLOW_DIO_DEF;
97 #define SG_SECTOR_SZ 512
98 #define SG_SECTOR_MSK (SG_SECTOR_SZ - 1)
100 #define SG_LOW_POOL_THRESHHOLD 30
101 #define SG_MAX_POOL_SECTORS 320 /* Max. number of pool sectors to take */
103 static int sg_pool_secs_avail = SG_MAX_POOL_SECTORS;
105 #define SG_HEAP_PAGE 1 /* heap from kernel via get_free_pages() */
106 #define SG_HEAP_KMAL 2 /* heap from kernel via kmalloc() */
107 #define SG_HEAP_POOL 3 /* heap from scsi dma pool (mid-level) */
108 #define SG_USER_MEM 4 /* memory belongs to user space */
110 #define SG_DEV_ARR_LUMP 6 /* amount to over allocate sg_dev_arr by */
113 static int sg_init(void);
114 static int sg_attach(Scsi_Device *);
115 static void sg_finish(void);
116 static int sg_detect(Scsi_Device *);
117 static void sg_detach(Scsi_Device *);
119 static Scsi_Request * dummy_cmdp; /* only used for sizeof */
121 static rwlock_t sg_dev_arr_lock = RW_LOCK_UNLOCKED; /* Also used to lock
122 file descriptor list for device */
124 static struct Scsi_Device_Template sg_template =
128 major:SCSI_GENERIC_MAJOR,
137 typedef struct sg_scatter_hold /* holding area for scsi scatter gather info */
139 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
140 unsigned short sglist_len; /* size of malloc'd scatter-gather list ++ */
141 unsigned bufflen; /* Size of (aggregate) data buffer */
142 unsigned b_malloc_len; /* actual len malloc'ed in buffer */
143 void * buffer; /* Data buffer or scatter list + mem_src_arr */
144 struct kiobuf * kiobp; /* for direct IO information */
145 char mapped; /* indicates kiobp has locked pages */
146 char buffer_mem_src; /* heap whereabouts of 'buffer' */
147 unsigned char cmd_opcode; /* first byte of command */
148 } Sg_scatter_hold; /* 24 bytes long on i386 */
150 struct sg_device; /* forward declarations */
153 typedef struct sg_request /* SG_MAX_QUEUE requests outstanding per file */
155 Scsi_Request * my_cmdp; /* != 0 when request with lower levels */
156 struct sg_request * nextrp; /* NULL -> tail request (slist) */
157 struct sg_fd * parentfp; /* NULL -> not in use */
158 Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
159 sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
160 unsigned char sense_b[sizeof(dummy_cmdp->sr_sense_buffer)];
161 char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
162 char orphan; /* 1 -> drop on sight, 0 -> normal */
163 char sg_io_owned; /* 1 -> packet belongs to SG_IO */
164 volatile char done; /* 0->before bh, 1->before read, 2->read */
165 } Sg_request; /* 168 bytes long on i386 */
167 typedef struct sg_fd /* holds the state of a file descriptor */
169 struct sg_fd * nextfp; /* NULL when last opened fd on this device */
170 struct sg_device * parentdp; /* owning device */
171 wait_queue_head_t read_wait; /* queue read until command done */
172 rwlock_t rq_list_lock; /* protect access to list in req_arr */
173 int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
174 Sg_scatter_hold reserve; /* buffer held for this file descriptor */
175 unsigned save_scat_len; /* original length of trunc. scat. element */
176 Sg_request * headrp; /* head of request slist, NULL->empty */
177 struct fasync_struct * async_qp; /* used by asynchronous notification */
178 Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
179 char low_dma; /* as in parent but possibly overridden to 1 */
180 char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
181 volatile char closed; /* 1 -> fd closed but request(s) outstanding */
182 char fd_mem_src; /* heap whereabouts of this Sg_fd object */
183 char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
184 char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */
185 char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
186 char mmap_called; /* 0 -> mmap() never called on this fd */
187 } Sg_fd; /* 2760 bytes long on i386 */
189 typedef struct sg_device /* holds the state of each scsi generic device */
191 Scsi_Device * device;
192 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
193 int sg_tablesize; /* adapter's max scatter-gather table size */
194 Sg_fd * headfp; /* first open fd belonging to this device */
196 kdev_t i_rdev; /* holds device major+minor number */
197 volatile char detached; /* 0->attached, 1->detached pending removal */
198 volatile char exclude; /* opened for exclusive access */
199 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
200 } Sg_device; /* 36 bytes long on i386 */
203 static int sg_fasync(int fd, struct file * filp, int mode);
204 static void sg_cmd_done_bh(Scsi_Cmnd * SCpnt);
205 static int sg_start_req(Sg_request * srp);
206 static void sg_finish_rem_req(Sg_request * srp);
207 static int sg_build_indi(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
208 static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
210 static ssize_t sg_new_read(Sg_fd * sfp, char * buf, size_t count,
212 static ssize_t sg_new_write(Sg_fd * sfp, const char * buf, size_t count,
213 int blocking, int read_only, Sg_request ** o_srp);
214 static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
215 unsigned char * cmnd, int timeout, int blocking);
216 static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
217 int wr_xf, int * countp, unsigned char ** up);
218 static int sg_write_xfer(Sg_request * srp);
219 static int sg_read_xfer(Sg_request * srp);
220 static void sg_read_oxfer(Sg_request * srp, char * outp, int num_read_xfer);
221 static void sg_remove_scat(Sg_scatter_hold * schp);
222 static char * sg_get_sgat_msa(Sg_scatter_hold * schp);
223 static void sg_build_reserve(Sg_fd * sfp, int req_size);
224 static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
225 static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
226 static char * sg_malloc(const Sg_fd * sfp, int size, int * retSzp,
228 static void sg_free(char * buff, int size, int mem_src);
229 static char * sg_low_malloc(int rqSz, int lowDma, int mem_src,
231 static void sg_low_free(char * buff, int size, int mem_src);
232 static Sg_fd * sg_add_sfp(Sg_device * sdp, int dev);
233 static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
234 static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
235 static Sg_request * sg_get_rq_mark(Sg_fd * sfp, int pack_id);
236 static Sg_request * sg_add_request(Sg_fd * sfp);
237 static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
238 static int sg_res_in_use(Sg_fd * sfp);
239 static int sg_ms_to_jif(unsigned int msecs);
240 static inline unsigned sg_jif_to_ms(int jifs);
241 static int sg_allow_access(unsigned char opcode, char dev_type);
242 static int sg_build_dir(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
243 static void sg_unmap_and(Sg_scatter_hold * schp, int free_also);
244 static Sg_device * sg_get_dev(int dev);
245 static inline int sg_alloc_kiovec(int nr, struct kiobuf **bufp, int *szp);
246 static inline void sg_free_kiovec(int nr, struct kiobuf **bufp, int *szp);
247 #ifdef CONFIG_PROC_FS
248 static int sg_last_dev(void);
251 static Sg_device ** sg_dev_arr = NULL;
253 #define SZ_SG_HEADER sizeof(struct sg_header)
254 #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
255 #define SZ_SG_IOVEC sizeof(sg_iovec_t)
256 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
259 static int sg_open(struct inode * inode, struct file * filp)
261 int dev = MINOR(inode->i_rdev);
262 int flags = filp->f_flags;
268 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
269 sdp = sg_get_dev(dev);
270 if ((! sdp) || (! sdp->device))
275 /* This driver's module count bumped by fops_get in <linux/fs.h> */
276 /* Prevent the device driver from vanishing while we sleep */
277 if (sdp->device->host->hostt->module)
278 __MOD_INC_USE_COUNT(sdp->device->host->hostt->module);
279 sdp->device->access_count++;
281 if (! ((flags & O_NONBLOCK) ||
282 scsi_block_when_processing_errors(sdp->device))) {
284 /* we are in error recovery for this device */
288 if (flags & O_EXCL) {
289 if (O_RDONLY == (flags & O_ACCMODE)) {
290 retval = -EPERM; /* Can't lock it with read only access */
293 if (sdp->headfp && (flags & O_NONBLOCK))
296 __wait_event_interruptible(sdp->o_excl_wait,
297 ((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)),
300 retval = res; /* -ERESTARTSYS because signal hit process */
304 else if (sdp->exclude) { /* some other fd has an exclusive lock on dev */
305 if (flags & O_NONBLOCK)
308 __wait_event_interruptible(sdp->o_excl_wait, (! sdp->exclude), res);
310 retval = res; /* -ERESTARTSYS because signal hit process */
318 if (! sdp->headfp) { /* no existing opens on this device */
320 sdp->sg_tablesize = sdp->device->host->sg_tablesize;
322 if ((sfp = sg_add_sfp(sdp, dev)))
323 filp->private_data = sfp;
325 if (flags & O_EXCL) sdp->exclude = 0; /* undo if error */
332 sdp->device->access_count--;
333 if ((! sdp->detached) && sdp->device->host->hostt->module)
334 __MOD_DEC_USE_COUNT(sdp->device->host->hostt->module);
338 /* Following function was formerly called 'sg_close' */
339 static int sg_release(struct inode * inode, struct file * filp)
345 if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp))) {
349 SCSI_LOG_TIMEOUT(3, printk("sg_release: dev=%d\n", MINOR(sdp->i_rdev)));
350 sg_fasync(-1, filp, 0); /* remove filp from async notification list */
351 if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */
352 if (! sdp->detached) {
353 sdp->device->access_count--;
354 if (sdp->device->host->hostt->module)
355 __MOD_DEC_USE_COUNT(sdp->device->host->hostt->module);
358 wake_up_interruptible(&sdp->o_excl_wait);
364 static ssize_t sg_read(struct file * filp, char * buf,
365 size_t count, loff_t *ppos)
371 int req_pack_id = -1;
372 struct sg_header old_hdr;
376 if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp)))
378 SCSI_LOG_TIMEOUT(3, printk("sg_read: dev=%d, count=%d\n",
379 MINOR(sdp->i_rdev), (int)count));
380 if (ppos != &filp->f_pos)
381 ; /* FIXME: Hmm. Seek to the right place, or fail? */
382 if ((k = verify_area(VERIFY_WRITE, buf, count)))
384 if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
385 __copy_from_user(&old_hdr, buf, SZ_SG_HEADER);
386 if (old_hdr.reply_len < 0) {
387 if (count >= SZ_SG_IO_HDR) {
388 __copy_from_user(&new_hdr, buf, SZ_SG_IO_HDR);
389 req_pack_id = new_hdr.pack_id;
393 req_pack_id = old_hdr.pack_id;
395 srp = sg_get_rq_mark(sfp, req_pack_id);
396 if (! srp) { /* now wait on packet to arrive */
399 if (filp->f_flags & O_NONBLOCK)
402 res = 0; /* following is a macro that beats race condition */
403 __wait_event_interruptible(sfp->read_wait, (sdp->detached ||
404 (srp = sg_get_rq_mark(sfp, req_pack_id))), res);
409 return res; /* -ERESTARTSYS because signal hit process */
412 if (srp->header.interface_id != '\0')
413 return sg_new_read(sfp, buf, count, srp);
416 memset(&old_hdr, 0, SZ_SG_HEADER);
417 old_hdr.reply_len = (int)hp->timeout;
418 old_hdr.pack_len = old_hdr.reply_len; /* very old, strange behaviour */
419 old_hdr.pack_id = hp->pack_id;
420 old_hdr.twelve_byte =
421 ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
422 old_hdr.target_status = hp->masked_status;
423 old_hdr.host_status = hp->host_status;
424 old_hdr.driver_status = hp->driver_status;
425 if ((CHECK_CONDITION & hp->masked_status) ||
426 (DRIVER_SENSE & hp->driver_status))
427 memcpy(old_hdr.sense_buffer, srp->sense_b,
428 sizeof(old_hdr.sense_buffer));
429 switch (hp->host_status)
430 { /* This setup of 'result' is for backward compatibility and is best
431 ignored by the user who should use target, host + driver status */
433 case DID_PASSTHROUGH:
440 old_hdr.result = EBUSY;
447 old_hdr.result = EIO;
451 (srp->sense_b[0] == 0 && hp->masked_status == GOOD) ? 0 : EIO;
454 old_hdr.result = EIO;
458 /* Now copy the result back to the user buffer. */
459 if (count >= SZ_SG_HEADER) {
460 __copy_to_user(buf, &old_hdr, SZ_SG_HEADER);
462 if (count > old_hdr.reply_len)
463 count = old_hdr.reply_len;
464 if (count > SZ_SG_HEADER)
465 sg_read_oxfer(srp, buf, count - SZ_SG_HEADER);
468 count = (old_hdr.result == 0) ? 0 : -EIO;
469 sg_finish_rem_req(srp);
473 static ssize_t sg_new_read(Sg_fd * sfp, char * buf, size_t count,
476 sg_io_hdr_t * hp = &srp->header;
480 if (count < SZ_SG_IO_HDR) {
485 if ((hp->mx_sb_len > 0) && hp->sbp) {
486 if ((CHECK_CONDITION & hp->masked_status) ||
487 (DRIVER_SENSE & hp->driver_status)) {
488 int sb_len = sizeof(dummy_cmdp->sr_sense_buffer);
489 sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
490 len = 8 + (int)srp->sense_b[7]; /* Additional sense length field */
491 len = (len > sb_len) ? sb_len : len;
492 if ((err = verify_area(VERIFY_WRITE, hp->sbp, len)))
494 __copy_to_user(hp->sbp, srp->sense_b, len);
498 if (hp->masked_status || hp->host_status || hp->driver_status)
499 hp->info |= SG_INFO_CHECK;
500 copy_to_user(buf, hp, SZ_SG_IO_HDR);
501 err = sg_read_xfer(srp);
503 sg_finish_rem_req(srp);
504 return (0 == err) ? count : err;
508 static ssize_t sg_write(struct file * filp, const char * buf,
509 size_t count, loff_t *ppos)
511 int mxsize, cmd_size, k;
512 int input_size, blocking;
513 unsigned char opcode;
517 struct sg_header old_hdr;
519 unsigned char cmnd[sizeof(dummy_cmdp->sr_cmnd)];
521 if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp)))
523 SCSI_LOG_TIMEOUT(3, printk("sg_write: dev=%d, count=%d\n",
524 MINOR(sdp->i_rdev), (int)count));
527 if (! ((filp->f_flags & O_NONBLOCK) ||
528 scsi_block_when_processing_errors(sdp->device)))
530 if (ppos != &filp->f_pos)
531 ; /* FIXME: Hmm. Seek to the right place, or fail? */
533 if ((k = verify_area(VERIFY_READ, buf, count)))
534 return k; /* protects following copy_from_user()s + get_user()s */
535 if (count < SZ_SG_HEADER)
537 __copy_from_user(&old_hdr, buf, SZ_SG_HEADER);
538 blocking = !(filp->f_flags & O_NONBLOCK);
539 if (old_hdr.reply_len < 0)
540 return sg_new_write(sfp, buf, count, blocking, 0, NULL);
541 if (count < (SZ_SG_HEADER + 6))
542 return -EIO; /* The minimum scsi command length is 6 bytes. */
544 if (! (srp = sg_add_request(sfp))) {
545 SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n"));
549 __get_user(opcode, buf);
550 if (sfp->next_cmd_len > 0) {
551 if (sfp->next_cmd_len > MAX_COMMAND_SIZE) {
552 SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
553 sfp->next_cmd_len = 0;
554 sg_remove_request(sfp, srp);
557 cmd_size = sfp->next_cmd_len;
558 sfp->next_cmd_len = 0; /* reset so only this write() effected */
561 cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
562 if ((opcode >= 0xc0) && old_hdr.twelve_byte)
565 SCSI_LOG_TIMEOUT(4, printk("sg_write: scsi opcode=0x%02x, cmd_size=%d\n",
566 (int)opcode, cmd_size));
567 /* Determine buffer size. */
568 input_size = count - cmd_size;
569 mxsize = (input_size > old_hdr.reply_len) ? input_size :
571 mxsize -= SZ_SG_HEADER;
572 input_size -= SZ_SG_HEADER;
573 if (input_size < 0) {
574 sg_remove_request(sfp, srp);
575 return -EIO; /* User did not pass enough bytes for this command. */
578 hp->interface_id = '\0'; /* indicator of old interface tunnelled */
579 hp->cmd_len = (unsigned char)cmd_size;
583 hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
584 SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
586 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV :
588 hp->dxfer_len = mxsize;
589 hp->dxferp = (unsigned char *)buf + cmd_size;
591 hp->timeout = old_hdr.reply_len; /* structure abuse ... */
592 hp->flags = input_size; /* structure abuse ... */
593 hp->pack_id = old_hdr.pack_id;
595 __copy_from_user(cmnd, buf, cmd_size);
596 k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
597 return (k < 0) ? k : count;
600 static ssize_t sg_new_write(Sg_fd * sfp, const char * buf, size_t count,
601 int blocking, int read_only, Sg_request ** o_srp)
606 unsigned char cmnd[sizeof(dummy_cmdp->sr_cmnd)];
609 if (count < SZ_SG_IO_HDR)
611 if ((k = verify_area(VERIFY_READ, buf, count)))
612 return k; /* protects following copy_from_user()s + get_user()s */
614 sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
615 if (! (srp = sg_add_request(sfp))) {
616 SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
620 __copy_from_user(hp, buf, SZ_SG_IO_HDR);
621 if (hp->interface_id != 'S') {
622 sg_remove_request(sfp, srp);
625 if (hp->flags & SG_FLAG_MMAP_IO) {
626 if (hp->dxfer_len > sfp->reserve.bufflen) {
627 sg_remove_request(sfp, srp);
628 return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */
630 if (hp->flags & SG_FLAG_DIRECT_IO) {
631 sg_remove_request(sfp, srp);
632 return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
634 if (sg_res_in_use(sfp)) {
635 sg_remove_request(sfp, srp);
636 return -EBUSY; /* reserve buffer already being used */
639 timeout = sg_ms_to_jif(srp->header.timeout);
640 if ((! hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof(cmnd))) {
641 sg_remove_request(sfp, srp);
644 if ((k = verify_area(VERIFY_READ, hp->cmdp, hp->cmd_len))) {
645 sg_remove_request(sfp, srp);
646 return k; /* protects following copy_from_user()s + get_user()s */
648 __copy_from_user(cmnd, hp->cmdp, hp->cmd_len);
650 (! sg_allow_access(cmnd[0], sfp->parentdp->device->type))) {
651 sg_remove_request(sfp, srp);
654 k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
656 if (o_srp) *o_srp = srp;
660 static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
661 unsigned char * cmnd, int timeout, int blocking)
664 Scsi_Request * SRpnt;
665 Sg_device * sdp = sfp->parentdp;
666 sg_io_hdr_t * hp = &srp->header;
669 srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */
671 hp->masked_status = 0;
675 hp->driver_status = 0;
678 printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
679 (int)cmnd[0], (int)hp->cmd_len));
681 if ((k = sg_start_req(srp))) {
682 SCSI_LOG_TIMEOUT(1, printk("sg_write: start_req err=%d\n", k));
683 sg_finish_rem_req(srp);
684 return k; /* probably out of space --> ENOMEM */
686 if ((k = sg_write_xfer(srp))) {
687 SCSI_LOG_TIMEOUT(1, printk("sg_write: write_xfer, bad address\n"));
688 sg_finish_rem_req(srp);
692 sg_finish_rem_req(srp);
695 SRpnt = scsi_allocate_request(sdp->device);
697 SCSI_LOG_TIMEOUT(1, printk("sg_write: no mem\n"));
698 sg_finish_rem_req(srp);
702 srp->my_cmdp = SRpnt;
703 q = &SRpnt->sr_device->request_queue;
704 SRpnt->sr_request.rq_dev = sdp->i_rdev;
705 SRpnt->sr_request.rq_status = RQ_ACTIVE;
706 SRpnt->sr_sense_buffer[0] = 0;
707 SRpnt->sr_cmd_len = hp->cmd_len;
708 if (! (hp->flags & SG_FLAG_LUN_INHIBIT)) {
709 if (sdp->device->scsi_level <= SCSI_2)
710 cmnd[1] = (cmnd[1] & 0x1f) | (sdp->device->lun << 5);
712 SRpnt->sr_use_sg = srp->data.k_use_sg;
713 SRpnt->sr_sglist_len = srp->data.sglist_len;
714 SRpnt->sr_bufflen = srp->data.bufflen;
715 SRpnt->sr_underflow = 0;
716 SRpnt->sr_buffer = srp->data.buffer;
717 switch (hp->dxfer_direction) {
718 case SG_DXFER_TO_FROM_DEV:
719 case SG_DXFER_FROM_DEV:
720 SRpnt->sr_data_direction = SCSI_DATA_READ; break;
721 case SG_DXFER_TO_DEV:
722 SRpnt->sr_data_direction = SCSI_DATA_WRITE; break;
723 case SG_DXFER_UNKNOWN:
724 SRpnt->sr_data_direction = SCSI_DATA_UNKNOWN; break;
726 SRpnt->sr_data_direction = SCSI_DATA_NONE; break;
728 srp->data.k_use_sg = 0;
729 srp->data.sglist_len = 0;
730 srp->data.bufflen = 0;
731 srp->data.buffer = NULL;
732 hp->duration = jiffies; /* unit jiffies now, millisecs after done */
733 /* Now send everything of to mid-level. The next time we hear about this
734 packet is when sg_cmd_done_bh() is called (i.e. a callback). */
735 scsi_do_req(SRpnt, (void *)cmnd,
736 (void *)SRpnt->sr_buffer, hp->dxfer_len,
737 sg_cmd_done_bh, timeout, SG_DEFAULT_RETRIES);
738 /* dxfer_len overwrites SRpnt->sr_bufflen, hence need for b_malloc_len */
739 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,1)
740 generic_unplug_device(q);
745 static int sg_ioctl(struct inode * inode, struct file * filp,
746 unsigned int cmd_in, unsigned long arg)
748 int result, val, read_only;
752 unsigned long iflags;
754 if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp)))
756 SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: dev=%d, cmd=0x%x\n",
757 MINOR(sdp->i_rdev), (int)cmd_in));
758 read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
764 int blocking = 1; /* ignore O_NONBLOCK flag */
768 if(! scsi_block_when_processing_errors(sdp->device) )
770 result = verify_area(VERIFY_WRITE, (void *)arg, SZ_SG_IO_HDR);
771 if (result) return result;
772 result = sg_new_write(sfp, (const char *)arg, SZ_SG_IO_HDR,
773 blocking, read_only, &srp);
774 if (result < 0) return result;
775 srp->sg_io_owned = 1;
777 result = 0; /* following macro to beat race condition */
778 __wait_event_interruptible(sfp->read_wait,
779 (sdp->detached || sfp->closed || srp->done), result);
783 return 0; /* request packet dropped already */
787 return result; /* -ERESTARTSYS because signal hit process */
790 result = sg_new_read(sfp, (char *)arg, SZ_SG_IO_HDR, srp);
791 return (result < 0) ? result : 0;
794 result = get_user(val, (int *)arg);
795 if (result) return result;
800 case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
801 return sfp->timeout; /* strange ..., for backward compatibility */
802 case SG_SET_FORCE_LOW_DMA:
803 result = get_user(val, (int *)arg);
804 if (result) return result;
807 if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
808 val = (int)sfp->reserve.bufflen;
809 sg_remove_scat(&sfp->reserve);
810 sg_build_reserve(sfp, val);
816 sfp->low_dma = sdp->device->host->unchecked_isa_dma;
820 return put_user((int)sfp->low_dma, (int *)arg);
822 result = verify_area(VERIFY_WRITE, (void *)arg, sizeof(sg_scsi_id_t));
823 if (result) return result;
825 sg_scsi_id_t * sg_idp = (sg_scsi_id_t *)arg;
829 __put_user((int)sdp->device->host->host_no, &sg_idp->host_no);
830 __put_user((int)sdp->device->channel, &sg_idp->channel);
831 __put_user((int)sdp->device->id, &sg_idp->scsi_id);
832 __put_user((int)sdp->device->lun, &sg_idp->lun);
833 __put_user((int)sdp->device->type, &sg_idp->scsi_type);
834 __put_user((short)sdp->device->host->cmd_per_lun,
835 &sg_idp->h_cmd_per_lun);
836 __put_user((short)sdp->device->queue_depth,
837 &sg_idp->d_queue_depth);
838 __put_user(0, &sg_idp->unused[0]);
839 __put_user(0, &sg_idp->unused[1]);
842 case SG_SET_FORCE_PACK_ID:
843 result = get_user(val, (int *)arg);
844 if (result) return result;
845 sfp->force_packid = val ? 1 : 0;
848 result = verify_area(VERIFY_WRITE, (void *) arg, sizeof(int));
849 if (result) return result;
850 read_lock_irqsave(&sfp->rq_list_lock, iflags);
851 for (srp = sfp->headrp; srp; srp = srp->nextrp) {
852 if ((1 == srp->done) && (! srp->sg_io_owned)) {
853 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
854 __put_user(srp->header.pack_id, (int *)arg);
858 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
859 __put_user(-1, (int *)arg);
861 case SG_GET_NUM_WAITING:
862 read_lock_irqsave(&sfp->rq_list_lock, iflags);
863 for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
864 if ((1 == srp->done) && (! srp->sg_io_owned))
867 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
868 return put_user(val, (int *)arg);
869 case SG_GET_SG_TABLESIZE:
870 return put_user(sdp->sg_tablesize, (int *)arg);
871 case SG_SET_RESERVED_SIZE:
872 result = get_user(val, (int *)arg);
873 if (result) return result;
874 //+Wilson04012004,from 2.4.24
878 if (val != sfp->reserve.bufflen) {
879 if (sg_res_in_use(sfp) || sfp->mmap_called)
881 sg_remove_scat(&sfp->reserve);
882 sg_build_reserve(sfp, val);
885 case SG_GET_RESERVED_SIZE:
886 val = (int)sfp->reserve.bufflen;
887 return put_user(val, (int *)arg);
888 case SG_SET_COMMAND_Q:
889 result = get_user(val, (int *)arg);
890 if (result) return result;
891 sfp->cmd_q = val ? 1 : 0;
893 case SG_GET_COMMAND_Q:
894 return put_user((int)sfp->cmd_q, (int *)arg);
895 case SG_SET_KEEP_ORPHAN:
896 result = get_user(val, (int *)arg);
897 if (result) return result;
898 sfp->keep_orphan = val;
900 case SG_GET_KEEP_ORPHAN:
901 return put_user((int)sfp->keep_orphan, (int *)arg);
902 case SG_NEXT_CMD_LEN:
903 result = get_user(val, (int *)arg);
904 if (result) return result;
905 sfp->next_cmd_len = (val > 0) ? val : 0;
907 case SG_GET_VERSION_NUM:
908 return put_user(sg_version_num, (int *)arg);
909 case SG_GET_ACCESS_COUNT:
910 val = (sdp->device ? sdp->device->access_count : 0);
911 return put_user(val, (int *)arg);
912 case SG_GET_REQUEST_TABLE:
913 result = verify_area(VERIFY_WRITE, (void *) arg,
914 SZ_SG_REQ_INFO * SG_MAX_QUEUE);
915 if (result) return result;
917 sg_req_info_t rinfo[SG_MAX_QUEUE];
919 read_lock_irqsave(&sfp->rq_list_lock, iflags);
920 for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
921 ++val, srp = srp ? srp->nextrp : srp) {
922 memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
924 rinfo[val].req_state = srp->done + 1;
925 rinfo[val].problem = srp->header.masked_status &
926 srp->header.host_status & srp->header.driver_status;
927 rinfo[val].duration = srp->done ?
928 srp->header.duration :
929 sg_jif_to_ms(jiffies - srp->header.duration);
930 rinfo[val].orphan = srp->orphan;
931 rinfo[val].sg_io_owned = srp->sg_io_owned;
932 rinfo[val].pack_id = srp->header.pack_id;
933 rinfo[val].usr_ptr = srp->header.usr_ptr;
936 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
937 __copy_to_user((void *)arg, rinfo, SZ_SG_REQ_INFO * SG_MAX_QUEUE);
940 case SG_EMULATED_HOST:
943 return put_user(sdp->device->host->hostt->emulated, (int *)arg);
947 if (filp->f_flags & O_NONBLOCK) {
948 if (sdp->device->host->in_recovery)
951 else if (! scsi_block_when_processing_errors(sdp->device))
953 result = get_user(val, (int *)arg);
954 if (result) return result;
955 if (SG_SCSI_RESET_NOTHING == val)
957 #ifdef SCSI_TRY_RESET_DEVICE
960 case SG_SCSI_RESET_DEVICE:
961 val = SCSI_TRY_RESET_DEVICE;
963 case SG_SCSI_RESET_BUS:
964 val = SCSI_TRY_RESET_BUS;
966 case SG_SCSI_RESET_HOST:
967 val = SCSI_TRY_RESET_HOST;
972 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
974 return (scsi_reset_provider(sdp->device, val) == SUCCESS) ? 0 : -EIO;
976 SCSI_LOG_TIMEOUT(1, printk("sg_ioctl: SG_RESET_SCSI not supported\n"));
979 case SCSI_IOCTL_SEND_COMMAND:
983 unsigned char opcode = WRITE_6;
984 Scsi_Ioctl_Command * siocp = (void *)arg;
986 copy_from_user(&opcode, siocp->data, 1);
987 if (! sg_allow_access(opcode, sdp->device->type))
990 return scsi_ioctl_send_command(sdp->device, (void *)arg);
992 result = get_user(val, (int *)arg);
993 if (result) return result;
994 sdp->sgdebug = (char)val;
996 case SCSI_IOCTL_GET_IDLUN:
997 case SCSI_IOCTL_GET_BUS_NUMBER:
998 case SCSI_IOCTL_PROBE_HOST:
999 case SG_GET_TRANSFORM:
1002 return scsi_ioctl(sdp->device, cmd_in, (void *)arg);
1005 return -EPERM; /* don't know so take safe approach */
1006 return scsi_ioctl(sdp->device, cmd_in, (void *)arg);
1010 static unsigned int sg_poll(struct file * filp, poll_table * wait)
1012 unsigned int res = 0;
1017 unsigned long iflags;
1019 if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp))
1022 poll_wait(filp, &sfp->read_wait, wait);
1023 read_lock_irqsave(&sfp->rq_list_lock, iflags);
1024 for (srp = sfp->headrp; srp; srp = srp->nextrp) {
1025 /* if any read waiting, flag it */
1026 if ((0 == res) && (1 == srp->done) && (! srp->sg_io_owned))
1027 res = POLLIN | POLLRDNORM;
1030 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1034 else if (! sfp->cmd_q) {
1036 res |= POLLOUT | POLLWRNORM;
1038 else if (count < SG_MAX_QUEUE)
1039 res |= POLLOUT | POLLWRNORM;
1040 SCSI_LOG_TIMEOUT(3, printk("sg_poll: dev=%d, res=0x%x\n",
1041 MINOR(sdp->i_rdev), (int)res));
1045 static int sg_fasync(int fd, struct file * filp, int mode)
1051 if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp)))
1053 SCSI_LOG_TIMEOUT(3, printk("sg_fasync: dev=%d, mode=%d\n",
1054 MINOR(sdp->i_rdev), mode));
1056 retval = fasync_helper(fd, filp, mode, &sfp->async_qp);
1057 return (retval < 0) ? retval : 0;
1060 static void sg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish)
1066 SCSI_LOG_TIMEOUT(3, printk("sg_rb_correct4mmap: startFinish=%d, "
1067 "scatg=%d\n", startFinish, rsv_schp->k_use_sg));
1068 /* N.B. correction _not_ applied to base page of aech allocation */
1069 if (rsv_schp->k_use_sg) { /* reserve buffer is a scatter gather list */
1070 struct scatterlist * sclp = rsv_schp->buffer;
1072 for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sclp) {
1073 for (m = PAGE_SIZE; m < sclp->length; m += PAGE_SIZE) {
1074 page_ptr = (unsigned char *)sclp->address + m;
1075 page = virt_to_page(page_ptr);
1077 get_page(page); /* increment page count */
1079 if (page_count(page) > 0)
1080 put_page_testzero(page); /* decrement page count */
1085 else { /* reserve buffer is just a single allocation */
1086 for (m = PAGE_SIZE; m < rsv_schp->bufflen; m += PAGE_SIZE) {
1087 page_ptr = (unsigned char *)rsv_schp->buffer + m;
1088 page = virt_to_page(page_ptr);
1090 get_page(page); /* increment page count */
1092 if (page_count(page) > 0)
1093 put_page_testzero(page); /* decrement page count */
1099 static struct page * sg_vma_nopage(struct vm_area_struct *vma,
1100 unsigned long addr, int unused)
1103 struct page * page = NOPAGE_SIGBUS;
1104 void * page_ptr = NULL;
1105 unsigned long offset;
1106 Sg_scatter_hold * rsv_schp;
1108 if ((NULL == vma) || (! (sfp = (Sg_fd *)vma->vm_private_data)))
1110 rsv_schp = &sfp->reserve;
1111 offset = addr - vma->vm_start;
1112 if (offset >= rsv_schp->bufflen)
1114 SCSI_LOG_TIMEOUT(3, printk("sg_vma_nopage: offset=%lu, scatg=%d\n",
1115 offset, rsv_schp->k_use_sg));
1116 if (rsv_schp->k_use_sg) { /* reserve buffer is a scatter gather list */
1118 unsigned long sa = vma->vm_start;
1120 struct scatterlist * sclp = rsv_schp->buffer;
1122 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1124 len = vma->vm_end - sa;
1125 len = (len < sclp->length) ? len : sclp->length;
1127 page_ptr = (unsigned char *)sclp->address + offset;
1128 page = virt_to_page(page_ptr);
1129 get_page(page); /* increment page count */
1136 else { /* reserve buffer is just a single allocation */
1137 page_ptr = (unsigned char *)rsv_schp->buffer + offset;
1138 page = virt_to_page(page_ptr);
1139 get_page(page); /* increment page count */
1144 static struct vm_operations_struct sg_mmap_vm_ops = {
1145 nopage : sg_vma_nopage,
1148 static int sg_mmap(struct file * filp, struct vm_area_struct *vma)
1151 unsigned long req_sz = vma->vm_end - vma->vm_start;
1152 Sg_scatter_hold * rsv_schp;
1154 if ((! filp) || (! vma) || (! (sfp = (Sg_fd *)filp->private_data)))
1156 SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",
1157 (void *)vma->vm_start, (int)req_sz));
1159 return -EINVAL; /* want no offset */
1160 rsv_schp = &sfp->reserve;
1161 if (req_sz > rsv_schp->bufflen)
1162 return -ENOMEM; /* cannot map more than reserved buffer */
1164 if (rsv_schp->k_use_sg) { /* reserve buffer is a scatter gather list */
1166 unsigned long sa = vma->vm_start;
1168 struct scatterlist * sclp = rsv_schp->buffer;
1170 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1172 if ((unsigned long)sclp->address & (PAGE_SIZE - 1))
1173 return -EFAULT; /* non page aligned memory ?? */
1174 len = vma->vm_end - sa;
1175 len = (len < sclp->length) ? len : sclp->length;
1179 else { /* reserve buffer is just a single allocation */
1180 if ((unsigned long)rsv_schp->buffer & (PAGE_SIZE - 1))
1181 return -EFAULT; /* non page aligned memory ?? */
1183 if (0 == sfp->mmap_called) {
1184 sg_rb_correct4mmap(rsv_schp, 1); /* do only once per fd lifetime */
1185 sfp->mmap_called = 1;
1187 vma->vm_flags |= (VM_RESERVED | VM_IO);
1188 vma->vm_private_data = sfp;
1189 vma->vm_ops = &sg_mmap_vm_ops;
1193 /* This function is a "bottom half" handler that is called by the
1194 * mid level when a command is completed (or has failed). */
1195 static void sg_cmd_done_bh(Scsi_Cmnd * SCpnt)
1197 Scsi_Request * SRpnt = SCpnt->sc_request;
1198 int dev = MINOR(SRpnt->sr_request.rq_dev);
1199 Sg_device * sdp = NULL;
1201 Sg_request * srp = NULL;
1203 read_lock(&sg_dev_arr_lock);
1204 if (sg_dev_arr && (dev >= 0)) {
1205 if (dev < sg_template.dev_max)
1206 sdp = sg_dev_arr[dev];
1208 if ((NULL == sdp) || sdp->detached) {
1209 read_unlock(&sg_dev_arr_lock);
1210 SCSI_LOG_TIMEOUT(1, printk("sg...bh: dev=%d gone\n", dev));
1211 scsi_release_request(SRpnt);
1217 read_lock(&sfp->rq_list_lock);
1218 for (srp = sfp->headrp; srp; srp = srp->nextrp) {
1219 if (SRpnt == srp->my_cmdp)
1222 read_unlock(&sfp->rq_list_lock);
1228 read_unlock(&sg_dev_arr_lock);
1229 SCSI_LOG_TIMEOUT(1, printk("sg...bh: req missing, dev=%d\n", dev));
1230 scsi_release_request(SRpnt);
1234 /* First transfer ownership of data buffers to sg_device object. */
1235 srp->data.k_use_sg = SRpnt->sr_use_sg;
1236 srp->data.sglist_len = SRpnt->sr_sglist_len;
1237 srp->data.bufflen = SRpnt->sr_bufflen;
1238 srp->data.buffer = SRpnt->sr_buffer;
1239 /* now clear out request structure */
1240 SRpnt->sr_use_sg = 0;
1241 SRpnt->sr_sglist_len = 0;
1242 SRpnt->sr_bufflen = 0;
1243 SRpnt->sr_buffer = NULL;
1244 SRpnt->sr_underflow = 0;
1245 SRpnt->sr_request.rq_dev = MKDEV(0, 0); /* "sg" _disowns_ request blk */
1247 srp->my_cmdp = NULL;
1249 read_unlock(&sg_dev_arr_lock);
1251 SCSI_LOG_TIMEOUT(4, printk("sg...bh: dev=%d, pack_id=%d, res=0x%x\n",
1252 dev, srp->header.pack_id, (int)SRpnt->sr_result));
1253 srp->header.resid = SCpnt->resid;
1254 /* sg_unmap_and(&srp->data, 0); */ /* unmap locked pages a.s.a.p. */
1255 /* N.B. unit of duration changes here from jiffies to millisecs */
1256 srp->header.duration = sg_jif_to_ms(jiffies - (int)srp->header.duration);
1257 if (0 != SRpnt->sr_result) {
1258 memcpy(srp->sense_b, SRpnt->sr_sense_buffer, sizeof(srp->sense_b));
1259 srp->header.status = 0xff & SRpnt->sr_result;
1260 srp->header.masked_status = status_byte(SRpnt->sr_result);
1261 srp->header.msg_status = msg_byte(SRpnt->sr_result);
1262 srp->header.host_status = host_byte(SRpnt->sr_result);
1263 srp->header.driver_status = driver_byte(SRpnt->sr_result);
1264 if ((sdp->sgdebug > 0) &&
1265 ((CHECK_CONDITION == srp->header.masked_status) ||
1266 (COMMAND_TERMINATED == srp->header.masked_status)))
1267 print_req_sense("sg_cmd_done_bh", SRpnt);
1269 /* Following if statement is a patch supplied by Eric Youngdale */
1270 if (driver_byte(SRpnt->sr_result) != 0
1271 && (SRpnt->sr_sense_buffer[0] & 0x7f) == 0x70
1272 && (SRpnt->sr_sense_buffer[2] & 0xf) == UNIT_ATTENTION
1273 && sdp->device->removable) {
1274 /* Detected disc change. Set the bit - this may be used if */
1275 /* there are filesystems using this device. */
1276 sdp->device->changed = 1;
1279 /* Rely on write phase to clean out srp status values, so no "else" */
1281 scsi_release_request(SRpnt);
1283 if (sfp->closed) { /* whoops this fd already released, cleanup */
1285 printk("sg...bh: already closed, freeing ...\n"));
1286 sg_finish_rem_req(srp);
1288 if (NULL == sfp->headrp) {
1290 printk("sg...bh: already closed, final cleanup\n"));
1291 if (0 == sg_remove_sfp(sdp, sfp)) { /* device still present */
1292 sdp->device->access_count--;
1293 if (sdp->device->host->hostt->module)
1294 __MOD_DEC_USE_COUNT(sdp->device->host->hostt->module);
1296 if (sg_template.module)
1297 __MOD_DEC_USE_COUNT(sg_template.module);
1301 else if (srp && srp->orphan) {
1302 if (sfp->keep_orphan)
1303 srp->sg_io_owned = 0;
1305 sg_finish_rem_req(srp);
1310 /* Now wake up any sg_read() that is waiting for this packet. */
1311 wake_up_interruptible(&sfp->read_wait);
1312 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
1316 static struct file_operations sg_fops = {
1324 release: sg_release,
1329 static int sg_detect(Scsi_Device * scsidp)
1331 sg_template.dev_noticed++;
1335 /* Driver initialization */
1336 static int sg_init()
1338 static int sg_registered = 0;
1339 unsigned long iflags;
1341 if ((sg_template.dev_noticed == 0) || sg_dev_arr)
1344 write_lock_irqsave(&sg_dev_arr_lock, iflags);
1345 if(!sg_registered) {
1346 if (devfs_register_chrdev(SCSI_GENERIC_MAJOR,"sg",&sg_fops))
1348 printk(KERN_ERR "Unable to get major %d for generic SCSI device\n",
1349 SCSI_GENERIC_MAJOR);
1350 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1351 sg_template.dev_noticed = 0;
1357 SCSI_LOG_TIMEOUT(3, printk("sg_init\n"));
1358 sg_template.dev_max = sg_template.dev_noticed + SG_DEV_ARR_LUMP;
1359 sg_dev_arr = (Sg_device **)kmalloc(sg_template.dev_max *
1360 sizeof(Sg_device *), GFP_ATOMIC);
1361 if (NULL == sg_dev_arr) {
1362 printk(KERN_ERR "sg_init: no space for sg_dev_arr\n");
1363 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1364 sg_template.dev_noticed = 0;
1367 memset(sg_dev_arr, 0, sg_template.dev_max * sizeof(Sg_device *));
1368 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1369 #ifdef CONFIG_PROC_FS
1371 #endif /* CONFIG_PROC_FS */
1376 static int __init sg_def_reserved_size_setup(char *str)
1380 if (get_option(&str, &tmp) == 1) {
1381 def_reserved_size = tmp;
1386 printk(KERN_WARNING "sg_def_reserved_size : usage "
1387 "sg_def_reserved_size=n (n could be 65536, 131072 or 262144)\n");
1392 __setup("sg_def_reserved_size=", sg_def_reserved_size_setup);
1396 static int sg_attach(Scsi_Device * scsidp)
1399 unsigned long iflags;
1402 write_lock_irqsave(&sg_dev_arr_lock, iflags);
1403 if (sg_template.nr_dev >= sg_template.dev_max) { /* try to resize */
1404 Sg_device ** tmp_da;
1405 int tmp_dev_max = sg_template.nr_dev + SG_DEV_ARR_LUMP;
1407 tmp_da = (Sg_device **)kmalloc(tmp_dev_max *
1408 sizeof(Sg_device *), GFP_ATOMIC);
1409 if (NULL == tmp_da) {
1411 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1412 printk(KERN_ERR "sg_attach: device array cannot be resized\n");
1415 memset(tmp_da, 0, tmp_dev_max * sizeof(Sg_device *));
1416 memcpy(tmp_da, sg_dev_arr, sg_template.dev_max * sizeof(Sg_device *));
1417 kfree((char *)sg_dev_arr);
1418 sg_dev_arr = tmp_da;
1419 sg_template.dev_max = tmp_dev_max;
1422 for(k = 0; k < sg_template.dev_max; k++)
1423 if(! sg_dev_arr[k]) break;
1424 if (k > MINORMASK) {
1426 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1427 printk(KERN_WARNING "Unable to attach sg device <%d, %d, %d, %d>"
1428 " type=%d, minor number exceed %d\n", scsidp->host->host_no,
1429 scsidp->channel, scsidp->id, scsidp->lun, scsidp->type,
1433 if(k < sg_template.dev_max)
1434 sdp = (Sg_device *)kmalloc(sizeof(Sg_device), GFP_ATOMIC);
1439 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1440 printk(KERN_ERR "sg_attach: Sg_device cannot be allocated\n");
1444 SCSI_LOG_TIMEOUT(3, printk("sg_attach: dev=%d \n", k));
1445 sdp->device = scsidp;
1446 init_waitqueue_head(&sdp->o_excl_wait);
1451 sdp->sg_tablesize = scsidp->host ? scsidp->host->sg_tablesize : 0;
1452 sdp->i_rdev = MKDEV(SCSI_GENERIC_MAJOR, k);
1453 sdp->de = devfs_register (scsidp->de, "generic", DEVFS_FL_DEFAULT,
1454 SCSI_GENERIC_MAJOR, k,
1455 S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
1457 sg_template.nr_dev++;
1458 sg_dev_arr[k] = sdp;
1459 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1460 switch (scsidp->type) {
1465 case TYPE_TAPE: break;
1467 printk(KERN_NOTICE "Attached scsi generic sg%d at scsi%d, channel"
1468 " %d, id %d, lun %d, type %d\n", k, scsidp->host->host_no,
1469 scsidp->channel, scsidp->id, scsidp->lun, scsidp->type);
1474 /* Called at 'finish' of init process, after all attaches */
1475 static void sg_finish(void)
1478 static void sg_detach(Scsi_Device * scsidp)
1481 unsigned long iflags;
1488 if (NULL == sg_dev_arr)
1491 write_lock_irqsave(&sg_dev_arr_lock, iflags);
1492 for (k = 0; k < sg_template.dev_max; k++) {
1493 sdp = sg_dev_arr[k];
1494 if ((NULL == sdp) || (sdp->device != scsidp))
1495 continue; /* dirty but lowers nesting */
1498 for (sfp = sdp->headfp; sfp; sfp = tsfp) {
1500 for (srp = sfp->headrp; srp; srp = tsrp) {
1502 if (sfp->closed || (0 == srp->done))
1503 sg_finish_rem_req(srp);
1506 sdp->device->access_count--;
1507 if (sg_template.module)
1508 __MOD_DEC_USE_COUNT(sg_template.module);
1509 if (sdp->device->host->hostt->module)
1510 __MOD_DEC_USE_COUNT(sdp->device->host->hostt->module);
1511 __sg_remove_sfp(sdp, sfp);
1515 wake_up_interruptible(&sfp->read_wait);
1516 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
1519 SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d, dirty\n", k));
1520 devfs_unregister (sdp->de);
1522 if (NULL == sdp->headfp) {
1524 sg_dev_arr[k] = NULL;
1527 else { /* nothing active, simple case */
1528 SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d\n", k));
1529 devfs_unregister (sdp->de);
1531 sg_dev_arr[k] = NULL;
1534 sg_template.nr_dev--;
1535 sg_template.dev_noticed--; /* from <dan@lectra.fr> */
1538 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1540 scsi_sleep(2); /* dirty detach so delay device destruction */
1543 MODULE_AUTHOR("Douglas Gilbert");
1544 MODULE_DESCRIPTION("SCSI generic (sg) driver");
1546 #ifdef MODULE_LICENSE
1547 MODULE_LICENSE("GPL");
1550 MODULE_PARM(def_reserved_size, "i");
1551 MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
1553 static int __init init_sg(void) {
1554 if (def_reserved_size >= 0)
1555 sg_big_buff = def_reserved_size;
1556 sg_template.module = THIS_MODULE;
1557 return scsi_register_module(MODULE_SCSI_DEV, &sg_template);
1560 static void __exit exit_sg( void)
1562 #ifdef CONFIG_PROC_FS
1564 #endif /* CONFIG_PROC_FS */
1565 scsi_unregister_module(MODULE_SCSI_DEV, &sg_template);
1566 devfs_unregister_chrdev(SCSI_GENERIC_MAJOR, "sg");
1567 if(sg_dev_arr != NULL) {
1568 kfree((char *)sg_dev_arr);
1571 sg_template.dev_max = 0;
1575 static int sg_start_req(Sg_request * srp)
1578 Sg_fd * sfp = srp->parentfp;
1579 sg_io_hdr_t * hp = &srp->header;
1580 int dxfer_len = (int)hp->dxfer_len;
1581 int dxfer_dir = hp->dxfer_direction;
1582 Sg_scatter_hold * req_schp = &srp->data;
1583 Sg_scatter_hold * rsv_schp = &sfp->reserve;
1585 SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len));
1586 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
1588 if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) &&
1589 (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) &&
1590 (! sfp->parentdp->device->host->unchecked_isa_dma)) {
1591 res = sg_build_dir(srp, sfp, dxfer_len);
1592 if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */
1595 if ((! sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen))
1596 sg_link_reserve(sfp, srp, dxfer_len);
1598 res = sg_build_indi(req_schp, sfp, dxfer_len);
1600 sg_remove_scat(req_schp);
1607 static void sg_finish_rem_req(Sg_request * srp)
1609 Sg_fd * sfp = srp->parentfp;
1610 Sg_scatter_hold * req_schp = &srp->data;
1612 SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n",
1613 (int)srp->res_used));
1614 sg_unmap_and(&srp->data, 1);
1616 sg_unlink_reserve(sfp, srp);
1618 sg_remove_scat(req_schp);
1619 sg_remove_request(sfp, srp);
1622 static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
1625 int mem_src, ret_sz;
1626 int elem_sz = sizeof(struct scatterlist) + sizeof(char);
1627 /* scatter gather array, followed by mem_src_arr (array of chars) */
1628 int sg_bufflen = tablesize * elem_sz;
1629 int mx_sc_elems = tablesize;
1631 mem_src = SG_HEAP_KMAL;
1632 schp->buffer = sg_malloc(sfp, sg_bufflen, &ret_sz, &mem_src);
1635 else if (ret_sz != sg_bufflen) {
1636 sg_bufflen = ret_sz;
1637 mx_sc_elems = sg_bufflen / elem_sz;
1639 schp->buffer_mem_src = (char)mem_src;
1640 schp->sglist_len = sg_bufflen;
1641 memset(schp->buffer, 0, sg_bufflen);
1642 return mx_sc_elems; /* number of scat_gath elements allocated */
1645 static void sg_unmap_and(Sg_scatter_hold * schp, int free_also)
1647 #ifdef SG_ALLOW_DIO_CODE
1650 if (schp && schp->kiobp) {
1652 unmap_kiobuf(schp->kiobp);
1656 sg_free_kiovec(1, &schp->kiobp, &nbhs);
1663 static int sg_build_dir(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
1665 #ifdef SG_ALLOW_DIO_CODE
1666 int res, k, split, offset, num, mx_sc_elems, rem_sz;
1669 struct scatterlist * sclp;
1670 unsigned long addr, prev_addr;
1671 sg_io_hdr_t * hp = &srp->header;
1672 Sg_scatter_hold * schp = &srp->data;
1673 int sg_tablesize = sfp->parentdp->sg_tablesize;
1676 res = sg_alloc_kiovec(1, &schp->kiobp, &nbhs);
1678 SCSI_LOG_TIMEOUT(5, printk("sg_build_dir: sg_alloc_kiovec res=%d\n",
1682 res = map_user_kiobuf((SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0,
1683 schp->kiobp, (unsigned long)hp->dxferp, dxfer_len);
1686 printk("sg_build_dir: map_user_kiobuf res=%d\n", res));
1687 sg_unmap_and(schp, 1);
1692 prev_addr = (unsigned long) page_address(kp->maplist[0]);
1693 for (k = 1, split = 0; k < kp->nr_pages; ++k, prev_addr = addr) {
1694 addr = (unsigned long) page_address(kp->maplist[k]);
1695 if ((prev_addr + PAGE_SIZE) != addr) {
1702 schp->buffer = page_address(kp->maplist[0]) + kp->offset;
1703 schp->bufflen = dxfer_len;
1704 schp->buffer_mem_src = SG_USER_MEM;
1705 schp->b_malloc_len = dxfer_len;
1706 hp->info |= SG_INFO_DIRECT_IO;
1709 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1710 if (mx_sc_elems <= 1) {
1711 sg_unmap_and(schp, 1);
1712 sg_remove_scat(schp);
1715 mem_src_arr = schp->buffer + (mx_sc_elems * sizeof(struct scatterlist));
1716 for (k = 0, sclp = schp->buffer, rem_sz = dxfer_len;
1717 (rem_sz > 0) && (k < mx_sc_elems);
1719 offset = (0 == k) ? kp->offset : 0;
1720 num = (rem_sz > (PAGE_SIZE - offset)) ? (PAGE_SIZE - offset) :
1722 sclp->address = page_address(kp->maplist[k]) + offset;
1723 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,13)
1727 mem_src_arr[k] = SG_USER_MEM;
1730 printk("sg_build_dir: k=%d, a=0x%p, len=%d, ms=%d\n",
1731 k, sclp->address, num, mem_src_arr[k]));
1735 printk("sg_build_dir: k_use_sg=%d, rem_sz=%d\n", k, rem_sz));
1736 schp->bufflen = dxfer_len;
1737 if (rem_sz > 0) { /* must have failed */
1738 sg_unmap_and(schp, 1);
1739 sg_remove_scat(schp);
1740 return 1; /* out of scatter gather elements, try indirect */
1742 hp->info |= SG_INFO_DIRECT_IO;
1746 #endif /* SG_ALLOW_DIO_CODE */
1749 static int sg_build_indi(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1751 int ret_sz, mem_src;
1752 int blk_size = buff_size;
1755 if ((blk_size < 0) || (! sfp))
1758 ++blk_size; /* don't know why */
1759 /* round request up to next highest SG_SECTOR_SZ byte boundary */
1760 blk_size = (blk_size + SG_SECTOR_MSK) & (~SG_SECTOR_MSK);
1761 SCSI_LOG_TIMEOUT(4, printk("sg_build_indi: buff_size=%d, blk_size=%d\n",
1762 buff_size, blk_size));
1763 if (blk_size <= SG_SCATTER_SZ) {
1764 mem_src = SG_HEAP_PAGE;
1765 p = sg_malloc(sfp, blk_size, &ret_sz, &mem_src);
1768 if (blk_size == ret_sz) { /* got it on the first attempt */
1771 schp->bufflen = blk_size;
1772 schp->buffer_mem_src = (char)mem_src;
1773 schp->b_malloc_len = blk_size;
1778 mem_src = SG_HEAP_PAGE;
1779 p = sg_malloc(sfp, SG_SCATTER_SZ, &ret_sz, &mem_src);
1783 /* Want some local declarations, so start new block ... */
1784 { /* lets try and build a scatter gather list */
1785 struct scatterlist * sclp;
1788 int sg_tablesize = sfp->parentdp->sg_tablesize;
1792 /* N.B. ret_sz and mem_src carried into this block ... */
1793 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1794 if (mx_sc_elems < 0)
1795 return mx_sc_elems; /* most likely -ENOMEM */
1796 mem_src_arr = schp->buffer +
1797 (mx_sc_elems * sizeof(struct scatterlist));
1799 for (k = 0, sclp = schp->buffer, rem_sz = blk_size;
1800 (rem_sz > 0) && (k < mx_sc_elems);
1801 ++k, rem_sz -= ret_sz, ++sclp) {
1805 num = (rem_sz > SG_SCATTER_SZ) ? SG_SCATTER_SZ : rem_sz;
1806 mem_src = SG_HEAP_PAGE;
1807 p = sg_malloc(sfp, num, &ret_sz, &mem_src);
1812 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,13)
1815 sclp->length = ret_sz;
1816 mem_src_arr[k] = mem_src;
1819 printk("sg_build_build: k=%d, a=0x%p, len=%d, ms=%d\n",
1820 k, sclp->address, ret_sz, mem_src));
1821 } /* end of for loop */
1824 printk("sg_build_indi: k_use_sg=%d, rem_sz=%d\n", k, rem_sz));
1825 schp->bufflen = blk_size;
1826 if (rem_sz > 0) /* must have failed */
1832 static int sg_write_xfer(Sg_request * srp)
1834 sg_io_hdr_t * hp = &srp->header;
1835 Sg_scatter_hold * schp = &srp->data;
1837 int j, k, onum, usglen, ksglen, res, ok;
1838 int iovec_count = (int)hp->iovec_count;
1839 int dxfer_dir = hp->dxfer_direction;
1842 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
1844 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) ||
1845 (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
1846 num_xfer = (int)(new_interface ? hp->dxfer_len : hp->flags);
1847 if (schp->bufflen < num_xfer)
1848 num_xfer = schp->bufflen;
1850 if ((num_xfer <= 0) ||
1851 (new_interface && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
1855 printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
1856 num_xfer, iovec_count, schp->k_use_sg));
1859 if ((k = verify_area(VERIFY_READ, hp->dxferp,
1860 SZ_SG_IOVEC * onum)))
1866 if (0 == schp->k_use_sg) { /* kernel has single buffer */
1867 if (SG_USER_MEM != schp->buffer_mem_src) { /* else nothing to do */
1869 for (j = 0, p = schp->buffer; j < onum; ++j) {
1870 res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
1871 if (res) return res;
1872 usglen = (num_xfer > usglen) ? usglen : num_xfer;
1873 __copy_from_user(p, up, usglen);
1881 else { /* kernel using scatter gather list */
1882 struct scatterlist * sclp = (struct scatterlist *)schp->buffer;
1883 char * mem_src_arr = sg_get_sgat_msa(schp);
1884 ksglen = (int)sclp->length;
1887 for (j = 0, k = 0; j < onum; ++j) {
1888 res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
1889 if (res) return res;
1892 // for (; k < schp->k_use_sg; ++k, ++sclp) {
1893 // ksglen = (int)sclp->length;
1894 // p = sclp->address;
1897 for ( ; p; ++sclp, ksglen = (int)sclp->length, p = sclp->address) { //+Wilson04012004,from 2.4.24
1898 ok = (SG_USER_MEM != mem_src_arr[k]);
1901 if (ksglen > usglen) {
1902 if (usglen >= num_xfer) {
1903 if (ok) __copy_from_user(p, up, num_xfer);
1906 if (ok) __copy_from_user(p, up, usglen);
1912 if (ksglen >= num_xfer) {
1913 if (ok) __copy_from_user(p, up, num_xfer);
1916 if (ok) __copy_from_user(p, up, ksglen);
1920 //+Wilson04012004,from 2.4.24
1922 if (k >= schp->k_use_sg)
1931 static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
1932 int wr_xf, int * countp, unsigned char ** up)
1934 int num_xfer = (int)hp->dxfer_len;
1940 p = (unsigned char *)hp->dxferp;
1941 if (wr_xf && ('\0' == hp->interface_id))
1942 count = (int)hp->flags; /* holds "old" input_size */
1947 __copy_from_user(&u_iovec,
1948 (unsigned char *)hp->dxferp + (ind * SZ_SG_IOVEC),
1950 p = (unsigned char *)u_iovec.iov_base;
1951 count = (int)u_iovec.iov_len;
1953 if ((k = verify_area(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count)))
1956 if (countp) *countp = count;
1960 static char * sg_get_sgat_msa(Sg_scatter_hold * schp)
1962 int elem_sz = sizeof(struct scatterlist) + sizeof(char);
1963 int mx_sc_elems = schp->sglist_len / elem_sz;
1964 return schp->buffer + (sizeof(struct scatterlist) * mx_sc_elems);
1967 static void sg_remove_scat(Sg_scatter_hold * schp)
1969 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n",
1971 if (schp->buffer && schp->sglist_len) {
1973 struct scatterlist * sclp = (struct scatterlist *)schp->buffer;
1974 char * mem_src_arr = sg_get_sgat_msa(schp);
1976 for (k = 0; (k < schp->k_use_sg) && sclp->address; ++k, ++sclp) {
1977 mem_src = mem_src_arr[k];
1979 printk("sg_remove_scat: k=%d, a=0x%p, len=%d, ms=%d\n",
1980 k, sclp->address, sclp->length, mem_src));
1981 sg_free(sclp->address, sclp->length, mem_src);
1982 sclp->address = NULL;
1983 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,13)
1988 sg_free(schp->buffer, schp->sglist_len, schp->buffer_mem_src);
1990 else if (schp->buffer)
1991 sg_free(schp->buffer, schp->b_malloc_len, schp->buffer_mem_src);
1992 memset(schp, 0, sizeof(*schp));
1995 static int sg_read_xfer(Sg_request * srp)
1997 sg_io_hdr_t * hp = &srp->header;
1998 Sg_scatter_hold * schp = &srp->data;
2000 int j, k, onum, usglen, ksglen, res, ok;
2001 int iovec_count = (int)hp->iovec_count;
2002 int dxfer_dir = hp->dxfer_direction;
2005 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
2007 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir) ||
2008 (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
2009 num_xfer = hp->dxfer_len;
2010 if (schp->bufflen < num_xfer)
2011 num_xfer = schp->bufflen;
2013 if ((num_xfer <= 0) ||
2014 (new_interface && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
2018 printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
2019 num_xfer, iovec_count, schp->k_use_sg));
2022 if ((k = verify_area(VERIFY_READ, hp->dxferp,
2023 SZ_SG_IOVEC * onum)))
2029 if (0 == schp->k_use_sg) { /* kernel has single buffer */
2030 if (SG_USER_MEM != schp->buffer_mem_src) { /* else nothing to do */
2032 for (j = 0, p = schp->buffer; j < onum; ++j) {
2033 res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
2034 if (res) return res;
2035 usglen = (num_xfer > usglen) ? usglen : num_xfer;
2036 __copy_to_user(up, p, usglen);
2044 else { /* kernel using scatter gather list */
2045 struct scatterlist * sclp = (struct scatterlist *)schp->buffer;
2046 char * mem_src_arr = sg_get_sgat_msa(schp);
2047 ksglen = (int)sclp->length;
2050 for (j = 0, k = 0; j < onum; ++j) {
2051 res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
2052 if (res) return res;
2055 // for (; k < schp->k_use_sg; ++k, ++sclp) {
2056 // ksglen = (int)sclp->length;
2057 // p = sclp->address;
2060 for ( ; p; ++sclp, ksglen = (int)sclp->length, p = sclp->address) { //+Wilson04012004,from 2.4.24
2061 ok = (SG_USER_MEM != mem_src_arr[k]);
2064 if (ksglen > usglen) {
2065 if (usglen >= num_xfer) {
2066 if (ok) __copy_to_user(up, p, num_xfer);
2069 if (ok) __copy_to_user(up, p, usglen);
2075 if (ksglen >= num_xfer) {
2076 if (ok) __copy_to_user(up, p, num_xfer);
2079 if (ok) __copy_to_user(up, p, ksglen);
2083 //+Wilson04012004,from 2.4.24
2085 if (k >= schp->k_use_sg)
2094 static void sg_read_oxfer(Sg_request * srp, char * outp, int num_read_xfer)
2096 Sg_scatter_hold * schp = &srp->data;
2098 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
2100 if ((! outp) || (num_read_xfer <= 0))
2102 if(schp->k_use_sg > 0) {
2104 struct scatterlist * sclp = (struct scatterlist *)schp->buffer;
2106 for (k = 0; (k < schp->k_use_sg) && sclp->address; ++k, ++sclp) {
2107 num = (int)sclp->length;
2108 if (num > num_read_xfer) {
2109 __copy_to_user(outp, sclp->address, num_read_xfer);
2113 __copy_to_user(outp, sclp->address, num);
2114 num_read_xfer -= num;
2115 if (num_read_xfer <= 0)
2122 __copy_to_user(outp, schp->buffer, num_read_xfer);
2125 static void sg_build_reserve(Sg_fd * sfp, int req_size)
2127 Sg_scatter_hold * schp = &sfp->reserve;
2129 SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size));
2131 if (req_size < PAGE_SIZE)
2132 req_size = PAGE_SIZE;
2133 if (0 == sg_build_indi(schp, sfp, req_size))
2136 sg_remove_scat(schp);
2137 req_size >>= 1; /* divide by 2 */
2138 } while (req_size > (PAGE_SIZE / 2));
2141 static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
2143 Sg_scatter_hold * req_schp = &srp->data;
2144 Sg_scatter_hold * rsv_schp = &sfp->reserve;
2147 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
2148 size = (size + 1) & (~1); /* round to even for aha1542 */
2149 if (rsv_schp->k_use_sg > 0) {
2152 struct scatterlist * sclp = (struct scatterlist *)rsv_schp->buffer;
2154 for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sclp) {
2155 num = (int)sclp->length;
2158 req_schp->k_use_sg = 0;
2159 req_schp->buffer = sclp->address;
2162 sfp->save_scat_len = num;
2163 sclp->length = (unsigned)rem;
2164 req_schp->k_use_sg = k + 1;
2165 req_schp->sglist_len = rsv_schp->sglist_len;
2166 req_schp->buffer = rsv_schp->buffer;
2168 req_schp->bufflen = size;
2169 req_schp->buffer_mem_src = rsv_schp->buffer_mem_src;
2170 req_schp->b_malloc_len = rsv_schp->b_malloc_len;
2176 if (k >= rsv_schp->k_use_sg)
2177 SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n"));
2180 req_schp->k_use_sg = 0;
2181 req_schp->bufflen = size;
2182 req_schp->buffer = rsv_schp->buffer;
2183 req_schp->buffer_mem_src = rsv_schp->buffer_mem_src;
2184 req_schp->b_malloc_len = rsv_schp->b_malloc_len;
2188 static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
2190 Sg_scatter_hold * req_schp = &srp->data;
2191 Sg_scatter_hold * rsv_schp = &sfp->reserve;
2193 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
2194 (int)req_schp->k_use_sg));
2195 if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) {
2196 struct scatterlist * sclp = (struct scatterlist *)rsv_schp->buffer;
2198 if (sfp->save_scat_len > 0)
2199 (sclp + (req_schp->k_use_sg - 1))->length =
2200 (unsigned)sfp->save_scat_len;
2202 SCSI_LOG_TIMEOUT(1, printk(
2203 "sg_unlink_reserve: BAD save_scat_len\n"));
2205 req_schp->k_use_sg = 0;
2206 req_schp->bufflen = 0;
2207 req_schp->buffer = NULL;
2208 req_schp->sglist_len = 0;
2209 sfp->save_scat_len = 0;
2213 static Sg_request * sg_get_rq_mark(Sg_fd * sfp, int pack_id)
2216 unsigned long iflags;
2218 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2219 for (resp = sfp->headrp; resp; resp = resp->nextrp) {
2220 /* look for requests that are ready + not SG_IO owned */
2221 if ((1 == resp->done) && (! resp->sg_io_owned) &&
2222 ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
2223 resp->done = 2; /* guard against other readers */
2227 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2231 #ifdef CONFIG_PROC_FS
2232 static Sg_request * sg_get_nth_request(Sg_fd * sfp, int nth)
2235 unsigned long iflags;
2238 read_lock_irqsave(&sfp->rq_list_lock, iflags);
2239 for (k = 0, resp = sfp->headrp; resp && (k < nth);
2240 ++k, resp = resp->nextrp)
2242 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2247 /* always adds to end of list */
2248 static Sg_request * sg_add_request(Sg_fd * sfp)
2251 unsigned long iflags;
2253 Sg_request * rp = sfp->req_arr;
2255 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2258 memset(rp, 0, sizeof(Sg_request));
2264 if (0 == sfp->cmd_q)
2265 resp = NULL; /* command queuing disallowed */
2267 for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
2271 if (k < SG_MAX_QUEUE) {
2272 memset(rp, 0, sizeof(Sg_request));
2274 while (resp->nextrp)
2275 resp = resp->nextrp;
2284 resp->nextrp = NULL;
2285 resp->header.duration = jiffies;
2286 resp->my_cmdp = NULL;
2287 resp->data.kiobp = NULL;
2289 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2293 /* Return of 1 for found; 0 for not found */
2294 static int sg_remove_request(Sg_fd * sfp, Sg_request * srp)
2296 Sg_request * prev_rp;
2298 unsigned long iflags;
2301 if ((! sfp) || (! srp) || (! sfp->headrp))
2303 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2304 prev_rp = sfp->headrp;
2305 if (srp == prev_rp) {
2306 sfp->headrp = prev_rp->nextrp;
2307 prev_rp->parentfp = NULL;
2311 while ((rp = prev_rp->nextrp)) {
2313 prev_rp->nextrp = rp->nextrp;
2314 rp->parentfp = NULL;
2321 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2325 #ifdef CONFIG_PROC_FS
2326 static Sg_fd * sg_get_nth_sfp(Sg_device * sdp, int nth)
2329 unsigned long iflags;
2332 read_lock_irqsave(&sg_dev_arr_lock, iflags);
2333 for (k = 0, resp = sdp->headfp; resp && (k < nth);
2334 ++k, resp = resp->nextfp)
2336 read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2341 static Sg_fd * sg_add_sfp(Sg_device * sdp, int dev)
2344 unsigned long iflags;
2346 sfp = (Sg_fd *)sg_low_malloc(sizeof(Sg_fd), 0, SG_HEAP_KMAL, 0);
2349 memset(sfp, 0, sizeof(Sg_fd));
2350 sfp->fd_mem_src = SG_HEAP_KMAL;
2351 init_waitqueue_head(&sfp->read_wait);
2352 sfp->rq_list_lock = RW_LOCK_UNLOCKED;
2354 sfp->timeout = SG_DEFAULT_TIMEOUT;
2355 sfp->force_packid = SG_DEF_FORCE_PACK_ID;
2356 sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
2357 sdp->device->host->unchecked_isa_dma : 1;
2358 sfp->cmd_q = SG_DEF_COMMAND_Q;
2359 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
2360 sfp->parentdp = sdp;
2361 write_lock_irqsave(&sg_dev_arr_lock, iflags);
2364 else { /* add to tail of existing list */
2365 Sg_fd * pfp = sdp->headfp;
2370 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2371 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p, m_s=%d\n",
2372 sfp, (int)sfp->fd_mem_src));
2373 sg_build_reserve(sfp, sg_big_buff);
2374 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2375 sfp->reserve.bufflen, sfp->reserve.k_use_sg));
2379 static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2384 prev_fp = sdp->headfp;
2386 sdp->headfp = prev_fp->nextfp;
2388 while ((fp = prev_fp->nextfp)) {
2390 prev_fp->nextfp = fp->nextfp;
2396 if (sfp->reserve.bufflen > 0) {
2397 SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
2398 (int)sfp->reserve.bufflen, (int)sfp->reserve.k_use_sg));
2399 if (sfp->mmap_called)
2400 sg_rb_correct4mmap(&sfp->reserve, 0); /* undo correction */
2401 sg_remove_scat(&sfp->reserve);
2403 sfp->parentdp = NULL;
2404 SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: sfp=0x%p\n", sfp));
2405 sg_low_free((char *)sfp, sizeof(Sg_fd), sfp->fd_mem_src);
2408 /* Returns 0 in normal case, 1 when detached and sdp object removed */
2409 static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2416 for (srp = sfp->headrp; srp; srp = tsrp) {
2419 sg_finish_rem_req(srp);
2424 unsigned long iflags;
2426 write_lock_irqsave(&sg_dev_arr_lock, iflags);
2427 __sg_remove_sfp(sdp, sfp);
2428 if (sdp->detached && (NULL == sdp->headfp)) {
2431 maxd = sg_template.dev_max;
2432 for (k = 0; k < maxd; ++k) {
2433 if (sdp == sg_dev_arr[k])
2437 sg_dev_arr[k] = NULL;
2441 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2444 sfp->closed = 1; /* flag dirty state on this fd */
2445 sdp->device->access_count++;
2446 /* MOD_INC's to inhibit unloading sg and associated adapter driver */
2447 if (sg_template.module)
2448 __MOD_INC_USE_COUNT(sg_template.module);
2449 if (sdp->device->host->hostt->module)
2450 __MOD_INC_USE_COUNT(sdp->device->host->hostt->module);
2451 SCSI_LOG_TIMEOUT(1, printk(
2452 "sg_remove_sfp: worrisome, %d writes pending\n", dirty));
2457 static int sg_res_in_use(Sg_fd * sfp)
2459 const Sg_request * srp;
2460 unsigned long iflags;
2462 read_lock_irqsave(&sfp->rq_list_lock, iflags);
2463 for (srp = sfp->headrp; srp; srp = srp->nextrp)
2464 if (srp->res_used) break;
2465 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2469 /* If retSzp==NULL want exact size or fail */
2470 static char * sg_low_malloc(int rqSz, int lowDma, int mem_src, int * retSzp)
2473 int page_mask = lowDma ? (GFP_ATOMIC | GFP_DMA) : GFP_ATOMIC;
2477 if (SG_HEAP_KMAL == mem_src) {
2478 resp = kmalloc(rqSz, page_mask);
2480 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2481 memset(resp, 0, rqSz);
2482 if (retSzp) *retSzp = rqSz;
2486 if (SG_HEAP_POOL == mem_src) {
2487 int num_sect = rqSz / SG_SECTOR_SZ;
2489 if (0 != (rqSz & SG_SECTOR_MSK)) {
2493 rqSz = num_sect * SG_SECTOR_SZ;
2495 while (num_sect > 0) {
2496 if ((num_sect <= sg_pool_secs_avail) &&
2497 (scsi_dma_free_sectors > (SG_LOW_POOL_THRESHHOLD + num_sect))) {
2498 resp = scsi_malloc(rqSz);
2500 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2501 memset(resp, 0, rqSz);
2502 if (retSzp) *retSzp = rqSz;
2503 sg_pool_secs_avail -= num_sect;
2509 num_sect /= 2; /* try half as many */
2510 rqSz = num_sect * SG_SECTOR_SZ;
2513 else if (SG_HEAP_PAGE == mem_src) {
2517 for (order = 0, a_size = PAGE_SIZE;
2518 a_size < rqSz; order++, a_size <<= 1)
2520 resp = (char *)__get_free_pages(page_mask, order);
2521 while ((! resp) && order && retSzp) {
2523 a_size >>= 1; /* divide by 2, until PAGE_SIZE */
2524 resp = (char *)__get_free_pages(page_mask, order); /* try half */
2528 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2529 memset(resp, 0, resSz);
2530 if (retSzp) *retSzp = resSz;
2534 printk(KERN_ERR "sg_low_malloc: bad mem_src=%d, rqSz=%df\n",
2539 static char * sg_malloc(const Sg_fd * sfp, int size, int * retSzp,
2544 if (retSzp) *retSzp = size;
2548 int low_dma = sfp->low_dma;
2549 int l_ms = -1; /* invalid value */
2554 l_ms = (size < PAGE_SIZE) ? SG_HEAP_POOL : SG_HEAP_PAGE;
2555 resp = sg_low_malloc(size, low_dma, l_ms, 0);
2558 resp = sg_low_malloc(size, low_dma, l_ms, &size);
2560 l_ms = (SG_HEAP_POOL == l_ms) ? SG_HEAP_PAGE : SG_HEAP_POOL;
2561 resp = sg_low_malloc(size, low_dma, l_ms, &size);
2563 l_ms = SG_HEAP_KMAL;
2564 resp = sg_low_malloc(size, low_dma, l_ms, &size);
2567 if (resp && retSzp) *retSzp = size;
2570 l_ms = SG_HEAP_KMAL; /* was SG_HEAP_PAGE */
2571 resp = sg_low_malloc(size, low_dma, l_ms, 0);
2574 l_ms = SG_HEAP_POOL;
2575 resp = sg_low_malloc(size, low_dma, l_ms, &size);
2576 if (resp && retSzp) *retSzp = size;
2579 SCSI_LOG_TIMEOUT(1, printk("sg_malloc: bad ms=%d\n", *mem_srcp));
2582 if (resp) *mem_srcp = l_ms;
2584 SCSI_LOG_TIMEOUT(6, printk("sg_malloc: size=%d, ms=%d, ret=0x%p\n",
2585 size, *mem_srcp, resp));
2589 static inline int sg_alloc_kiovec(int nr, struct kiobuf **bufp, int *szp)
2592 return alloc_kiovec_sz(nr, bufp, szp);
2594 return alloc_kiovec(nr, bufp);
2598 static void sg_low_free(char * buff, int size, int mem_src)
2604 int num_sect = size / SG_SECTOR_SZ;
2606 scsi_free(buff, size);
2607 sg_pool_secs_avail += num_sect;
2611 kfree(buff); /* size not used */
2616 for (order = 0, a_size = PAGE_SIZE;
2617 a_size < size; order++, a_size <<= 1)
2619 free_pages((unsigned long)buff, order);
2623 break; /* nothing to do */
2625 printk(KERN_ERR "sg_low_free: bad mem_src=%d, buff=0x%p, rqSz=%d\n",
2626 mem_src, buff, size);
2631 static void sg_free(char * buff, int size, int mem_src)
2634 printk("sg_free: buff=0x%p, size=%d\n", buff, size));
2635 if ((! buff) || (size <= 0))
2638 sg_low_free(buff, size, mem_src);
2641 static inline void sg_free_kiovec(int nr, struct kiobuf **bufp, int *szp)
2644 free_kiovec_sz(nr, bufp, szp);
2646 free_kiovec(nr, bufp);
2650 static int sg_ms_to_jif(unsigned int msecs)
2652 if ((UINT_MAX / 2U) < msecs)
2653 return INT_MAX; /* special case, set largest possible */
2655 return ((int)msecs < (INT_MAX / 1000)) ? (((int)msecs * HZ) / 1000)
2656 : (((int)msecs / 1000) * HZ);
2659 static inline unsigned sg_jif_to_ms(int jifs)
2664 unsigned int j = (unsigned int)jifs;
2665 return (j < (UINT_MAX / 1000)) ? ((j * 1000) / HZ) : ((j / HZ) * 1000);
2669 static unsigned char allow_ops[] = {TEST_UNIT_READY, REQUEST_SENSE,
2670 INQUIRY, READ_CAPACITY, READ_BUFFER, READ_6, READ_10, READ_12,
2671 MODE_SENSE, MODE_SENSE_10, LOG_SENSE};
2673 static int sg_allow_access(unsigned char opcode, char dev_type)
2677 if (TYPE_SCANNER == dev_type) /* TYPE_ROM maybe burner */
2679 for (k = 0; k < sizeof(allow_ops); ++k) {
2680 if (opcode == allow_ops[k])
2687 #ifdef CONFIG_PROC_FS
2688 static int sg_last_dev()
2691 unsigned long iflags;
2693 read_lock_irqsave(&sg_dev_arr_lock, iflags);
2694 for (k = sg_template.dev_max - 1; k >= 0; --k)
2695 if (sg_dev_arr[k] && sg_dev_arr[k]->device) break;
2696 read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2697 return k + 1; /* origin 1 */
2701 static Sg_device * sg_get_dev(int dev)
2703 Sg_device * sdp = NULL;
2704 unsigned long iflags;
2706 if (sg_dev_arr && (dev >= 0))
2708 read_lock_irqsave(&sg_dev_arr_lock, iflags);
2709 if (dev < sg_template.dev_max)
2710 sdp = sg_dev_arr[dev];
2711 read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2716 #ifdef CONFIG_PROC_FS
2718 static struct proc_dir_entry * sg_proc_sgp = NULL;
2720 static char sg_proc_sg_dirname[] = "sg";
2721 static const char * sg_proc_leaf_names[] = {"allow_dio", "def_reserved_size",
2722 "debug", "devices", "device_hdr", "device_strs",
2723 "hosts", "host_hdr", "host_strs", "version"};
2725 static int sg_proc_adio_read(char * buffer, char ** start, off_t offset,
2726 int size, int * eof, void * data);
2727 static int sg_proc_adio_info(char * buffer, int * len, off_t * begin,
2728 off_t offset, int size);
2729 static int sg_proc_adio_write(struct file * filp, const char * buffer,
2730 unsigned long count, void * data);
2731 static int sg_proc_dressz_read(char * buffer, char ** start, off_t offset,
2732 int size, int * eof, void * data);
2733 static int sg_proc_dressz_info(char * buffer, int * len, off_t * begin,
2734 off_t offset, int size);
2735 static int sg_proc_dressz_write(struct file * filp, const char * buffer,
2736 unsigned long count, void * data);
2737 static int sg_proc_debug_read(char * buffer, char ** start, off_t offset,
2738 int size, int * eof, void * data);
2739 static int sg_proc_debug_info(char * buffer, int * len, off_t * begin,
2740 off_t offset, int size);
2741 static int sg_proc_dev_read(char * buffer, char ** start, off_t offset,
2742 int size, int * eof, void * data);
2743 static int sg_proc_dev_info(char * buffer, int * len, off_t * begin,
2744 off_t offset, int size);
2745 static int sg_proc_devhdr_read(char * buffer, char ** start, off_t offset,
2746 int size, int * eof, void * data);
2747 static int sg_proc_devhdr_info(char * buffer, int * len, off_t * begin,
2748 off_t offset, int size);
2749 static int sg_proc_devstrs_read(char * buffer, char ** start, off_t offset,
2750 int size, int * eof, void * data);
2751 static int sg_proc_devstrs_info(char * buffer, int * len, off_t * begin,
2752 off_t offset, int size);
2753 static int sg_proc_host_read(char * buffer, char ** start, off_t offset,
2754 int size, int * eof, void * data);
2755 static int sg_proc_host_info(char * buffer, int * len, off_t * begin,
2756 off_t offset, int size);
2757 static int sg_proc_hosthdr_read(char * buffer, char ** start, off_t offset,
2758 int size, int * eof, void * data);
2759 static int sg_proc_hosthdr_info(char * buffer, int * len, off_t * begin,
2760 off_t offset, int size);
2761 static int sg_proc_hoststrs_read(char * buffer, char ** start, off_t offset,
2762 int size, int * eof, void * data);
2763 static int sg_proc_hoststrs_info(char * buffer, int * len, off_t * begin,
2764 off_t offset, int size);
2765 static int sg_proc_version_read(char * buffer, char ** start, off_t offset,
2766 int size, int * eof, void * data);
2767 static int sg_proc_version_info(char * buffer, int * len, off_t * begin,
2768 off_t offset, int size);
2769 static read_proc_t * sg_proc_leaf_reads[] = {
2770 sg_proc_adio_read, sg_proc_dressz_read, sg_proc_debug_read,
2771 sg_proc_dev_read, sg_proc_devhdr_read, sg_proc_devstrs_read,
2772 sg_proc_host_read, sg_proc_hosthdr_read, sg_proc_hoststrs_read,
2773 sg_proc_version_read};
2774 static write_proc_t * sg_proc_leaf_writes[] = {
2775 sg_proc_adio_write, sg_proc_dressz_write, 0, 0, 0, 0, 0, 0, 0, 0};
2777 #define PRINT_PROC(fmt,args...) \
2779 *len += sprintf(buffer + *len, fmt, ##args); \
2780 if (*begin + *len > offset + size) \
2782 if (*begin + *len < offset) { \
2788 #define SG_PROC_READ_FN(infofp) \
2792 *eof = infofp(buffer, &len, &begin, offset, size); \
2793 if (offset >= (begin + len)) \
2795 *start = buffer + offset - begin; \
2796 return (size < (begin + len - offset)) ? \
2797 size : begin + len - offset; \
2801 static int sg_proc_init()
2804 int leaves = sizeof(sg_proc_leaf_names) / sizeof(sg_proc_leaf_names[0]);
2805 struct proc_dir_entry * pdep;
2809 sg_proc_sgp = create_proc_entry(sg_proc_sg_dirname,
2810 S_IFDIR | S_IRUGO | S_IXUGO, proc_scsi);
2813 for (k = 0; k < leaves; ++k) {
2814 mask = sg_proc_leaf_writes[k] ? S_IRUGO | S_IWUSR : S_IRUGO;
2815 pdep = create_proc_entry(sg_proc_leaf_names[k], mask, sg_proc_sgp);
2817 pdep->read_proc = sg_proc_leaf_reads[k];
2818 if (sg_proc_leaf_writes[k])
2819 pdep->write_proc = sg_proc_leaf_writes[k];
2825 static void sg_proc_cleanup()
2828 int leaves = sizeof(sg_proc_leaf_names) / sizeof(sg_proc_leaf_names[0]);
2830 if ((! proc_scsi) || (! sg_proc_sgp))
2832 for (k = 0; k < leaves; ++k)
2833 remove_proc_entry(sg_proc_leaf_names[k], sg_proc_sgp);
2834 remove_proc_entry(sg_proc_sg_dirname, proc_scsi);
2837 static int sg_proc_adio_read(char * buffer, char ** start, off_t offset,
2838 int size, int * eof, void * data)
2839 { SG_PROC_READ_FN(sg_proc_adio_info); }
2841 static int sg_proc_adio_info(char * buffer, int * len, off_t * begin,
2842 off_t offset, int size)
2844 PRINT_PROC("%d\n", sg_allow_dio);
2848 static int sg_proc_adio_write(struct file * filp, const char * buffer,
2849 unsigned long count, void * data)
2854 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2856 num = (count < 10) ? count : 10;
2857 copy_from_user(buff, buffer, num);
2859 sg_allow_dio = simple_strtoul(buff, 0, 10) ? 1 : 0;
2863 static int sg_proc_dressz_read(char * buffer, char ** start, off_t offset,
2864 int size, int * eof, void * data)
2865 { SG_PROC_READ_FN(sg_proc_dressz_info); }
2867 static int sg_proc_dressz_info(char * buffer, int * len, off_t * begin,
2868 off_t offset, int size)
2870 PRINT_PROC("%d\n", sg_big_buff);
2874 static int sg_proc_dressz_write(struct file * filp, const char * buffer,
2875 unsigned long count, void * data)
2878 unsigned long k = ULONG_MAX;
2881 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2883 num = (count < 10) ? count : 10;
2884 copy_from_user(buff, buffer, num);
2886 k = simple_strtoul(buff, 0, 10);
2894 static int sg_proc_debug_read(char * buffer, char ** start, off_t offset,
2895 int size, int * eof, void * data)
2896 { SG_PROC_READ_FN(sg_proc_debug_info); }
2898 static int sg_proc_debug_info(char * buffer, int * len, off_t * begin,
2899 off_t offset, int size)
2902 const sg_io_hdr_t * hp;
2903 int j, max_dev, new_interface;
2905 if (NULL == sg_dev_arr) {
2906 PRINT_PROC("sg_dev_arr NULL, driver not initialized\n");
2909 max_dev = sg_last_dev();
2910 PRINT_PROC("dev_max(currently)=%d max_active_device=%d (origin 1)\n",
2911 sg_template.dev_max, max_dev);
2912 PRINT_PROC(" scsi_dma_free_sectors=%u sg_pool_secs_aval=%d "
2913 "def_reserved_size=%d\n",
2914 scsi_dma_free_sectors, sg_pool_secs_avail, sg_big_buff);
2915 for (j = 0; j < max_dev; ++j) {
2916 if ((sdp = sg_get_dev(j))) {
2919 struct scsi_device * scsidp;
2920 int dev, k, m, blen, usg;
2922 scsidp = sdp->device;
2923 if (NULL == scsidp) {
2924 PRINT_PROC("device %d detached ??\n", j);
2927 dev = MINOR(sdp->i_rdev);
2929 if (sg_get_nth_sfp(sdp, 0)) {
2930 PRINT_PROC(" >>> device=sg%d ", dev);
2932 PRINT_PROC("detached pending close ");
2934 PRINT_PROC("scsi%d chan=%d id=%d lun=%d em=%d",
2935 scsidp->host->host_no, scsidp->channel,
2936 scsidp->id, scsidp->lun, scsidp->host->hostt->emulated);
2937 PRINT_PROC(" sg_tablesize=%d excl=%d\n", sdp->sg_tablesize,
2940 for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) {
2941 PRINT_PROC(" FD(%d): timeout=%dms bufflen=%d "
2942 "(res)sgat=%d low_dma=%d\n", k + 1,
2943 sg_jif_to_ms(fp->timeout), fp->reserve.bufflen,
2944 (int)fp->reserve.k_use_sg, (int)fp->low_dma);
2945 PRINT_PROC(" cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
2946 (int)fp->cmd_q, (int)fp->force_packid,
2947 (int)fp->keep_orphan, (int)fp->closed);
2948 for (m = 0; (srp = sg_get_nth_request(fp, m)); ++m) {
2950 new_interface = (hp->interface_id == '\0') ? 0 : 1;
2951 /* stop indenting so far ... */
2952 PRINT_PROC(srp->res_used ? ((new_interface &&
2953 (SG_FLAG_MMAP_IO & hp->flags)) ? " mmap>> " : " rb>> ") :
2954 ((SG_INFO_DIRECT_IO_MASK & hp->info) ? " dio>> " : " "));
2955 blen = srp->my_cmdp ? srp->my_cmdp->sr_bufflen : srp->data.bufflen;
2956 usg = srp->my_cmdp ? srp->my_cmdp->sr_use_sg : srp->data.k_use_sg;
2957 PRINT_PROC(srp->done ? ((1 == srp->done) ? "rcv:" : "fin:")
2958 : (srp->my_cmdp ? "act:" : "prior:"));
2959 PRINT_PROC(" id=%d blen=%d", srp->header.pack_id, blen);
2961 PRINT_PROC(" dur=%d", hp->duration);
2963 PRINT_PROC(" t_o/elap=%d/%d", new_interface ? hp->timeout :
2964 sg_jif_to_ms(fp->timeout),
2965 sg_jif_to_ms(hp->duration ? (jiffies - hp->duration) : 0));
2966 PRINT_PROC("ms sgat=%d op=0x%02x\n", usg, (int)srp->data.cmd_opcode);
2967 /* reset indenting */
2970 PRINT_PROC(" No requests active\n");
2977 static int sg_proc_dev_read(char * buffer, char ** start, off_t offset,
2978 int size, int * eof, void * data)
2979 { SG_PROC_READ_FN(sg_proc_dev_info); }
2981 static int sg_proc_dev_info(char * buffer, int * len, off_t * begin,
2982 off_t offset, int size)
2986 struct scsi_device * scsidp;
2988 max_dev = sg_last_dev();
2989 for (j = 0; j < max_dev; ++j) {
2990 sdp = sg_get_dev(j);
2991 if (sdp && (scsidp = sdp->device) && (! sdp->detached))
2992 PRINT_PROC("%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
2993 scsidp->host->host_no, scsidp->channel, scsidp->id,
2994 scsidp->lun, (int)scsidp->type, (int)scsidp->access_count,
2995 (int)scsidp->queue_depth, (int)scsidp->device_busy,
2996 (int)scsidp->online);
2998 PRINT_PROC("-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
3003 static int sg_proc_devhdr_read(char * buffer, char ** start, off_t offset,
3004 int size, int * eof, void * data)
3005 { SG_PROC_READ_FN(sg_proc_devhdr_info); }
3007 static int sg_proc_devhdr_info(char * buffer, int * len, off_t * begin,
3008 off_t offset, int size)
3010 PRINT_PROC("host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n");
3014 static int sg_proc_devstrs_read(char * buffer, char ** start, off_t offset,
3015 int size, int * eof, void * data)
3016 { SG_PROC_READ_FN(sg_proc_devstrs_info); }
3018 static int sg_proc_devstrs_info(char * buffer, int * len, off_t * begin,
3019 off_t offset, int size)
3023 struct scsi_device * scsidp;
3025 max_dev = sg_last_dev();
3026 for (j = 0; j < max_dev; ++j) {
3027 sdp = sg_get_dev(j);
3028 if (sdp && (scsidp = sdp->device) && (! sdp->detached))
3029 PRINT_PROC("%8.8s\t%16.16s\t%4.4s\n",
3030 scsidp->vendor, scsidp->model, scsidp->rev);
3032 PRINT_PROC("<no active device>\n");
3037 static int sg_proc_host_read(char * buffer, char ** start, off_t offset,
3038 int size, int * eof, void * data)
3039 { SG_PROC_READ_FN(sg_proc_host_info); }
3041 static int sg_proc_host_info(char * buffer, int * len, off_t * begin,
3042 off_t offset, int size)
3044 struct Scsi_Host * shp;
3047 for (k = 0, shp = scsi_hostlist; shp; shp = shp->next, ++k) {
3048 for ( ; k < shp->host_no; ++k)
3049 PRINT_PROC("-1\t-1\t-1\t-1\t-1\t-1\n");
3050 PRINT_PROC("%u\t%hu\t%hd\t%hu\t%d\t%d\n",
3051 shp->unique_id, shp->host_busy, shp->cmd_per_lun,
3052 shp->sg_tablesize, (int)shp->unchecked_isa_dma,
3053 (int)shp->hostt->emulated);
3058 static int sg_proc_hosthdr_read(char * buffer, char ** start, off_t offset,
3059 int size, int * eof, void * data)
3060 { SG_PROC_READ_FN(sg_proc_hosthdr_info); }
3062 static int sg_proc_hosthdr_info(char * buffer, int * len, off_t * begin,
3063 off_t offset, int size)
3065 PRINT_PROC("uid\tbusy\tcpl\tscatg\tisa\temul\n");
3069 static int sg_proc_hoststrs_read(char * buffer, char ** start, off_t offset,
3070 int size, int * eof, void * data)
3071 { SG_PROC_READ_FN(sg_proc_hoststrs_info); }
3073 #define SG_MAX_HOST_STR_LEN 256
3075 static int sg_proc_hoststrs_info(char * buffer, int * len, off_t * begin,
3076 off_t offset, int size)
3078 struct Scsi_Host * shp;
3080 char buff[SG_MAX_HOST_STR_LEN];
3083 for (k = 0, shp = scsi_hostlist; shp; shp = shp->next, ++k) {
3084 for ( ; k < shp->host_no; ++k)
3085 PRINT_PROC("<no active host>\n");
3086 strncpy(buff, shp->hostt->info ? shp->hostt->info(shp) :
3087 (shp->hostt->name ? shp->hostt->name : "<no name>"),
3088 SG_MAX_HOST_STR_LEN);
3089 buff[SG_MAX_HOST_STR_LEN - 1] = '\0';
3090 for (cp = buff; *cp; ++cp) {
3092 *cp = ' '; /* suppress imbedded newlines */
3094 PRINT_PROC("%s\n", buff);
3099 static int sg_proc_version_read(char * buffer, char ** start, off_t offset,
3100 int size, int * eof, void * data)
3101 { SG_PROC_READ_FN(sg_proc_version_info); }
3103 static int sg_proc_version_info(char * buffer, int * len, off_t * begin,
3104 off_t offset, int size)
3106 PRINT_PROC("%d\t%s\n", sg_version_num, sg_version_str);
3109 #endif /* CONFIG_PROC_FS */
3112 module_init(init_sg);
3113 module_exit(exit_sg);