2 * bsg.c - block layer implementation of the sg v3 interface
4 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
5 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License version 2. See the file "COPYING" in the main directory of this
9 * archive for more details.
14 * - Should this get merged, block/scsi_ioctl.c will be migrated into
15 * this file. To keep maintenance down, it's easier to have them
16 * seperated right now.
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/file.h>
22 #include <linux/blkdev.h>
23 #include <linux/poll.h>
24 #include <linux/cdev.h>
25 #include <linux/percpu.h>
26 #include <linux/uio.h>
27 #include <linux/bsg.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_ioctl.h>
31 #include <scsi/scsi_cmnd.h>
34 static char bsg_version[] = "block layer sg (bsg) 0.4";
40 request_queue_t *queue;
42 struct list_head busy_list;
43 struct list_head done_list;
44 struct hlist_node dev_list;
49 unsigned long *cmd_bitmap;
50 struct bsg_command *cmd_map;
51 wait_queue_head_t wq_done;
52 wait_queue_head_t wq_free;
53 char name[BDEVNAME_SIZE];
64 * command allocation bitmap defines
66 #define BSG_CMDS_PAGE_ORDER (1)
67 #define BSG_CMDS_PER_LONG (sizeof(unsigned long) * 8)
68 #define BSG_CMDS_MASK (BSG_CMDS_PER_LONG - 1)
69 #define BSG_CMDS_BYTES (PAGE_SIZE * (1 << BSG_CMDS_PAGE_ORDER))
70 #define BSG_CMDS (BSG_CMDS_BYTES / sizeof(struct bsg_command))
75 #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args)
77 #define dprintk(fmt, args...)
80 #define list_entry_bc(entry) list_entry((entry), struct bsg_command, list)
85 #define BSG_MAJOR (240)
87 static DEFINE_MUTEX(bsg_mutex);
88 static int bsg_device_nr;
90 #define BSG_LIST_SIZE (8)
91 #define bsg_list_idx(minor) ((minor) & (BSG_LIST_SIZE - 1))
92 static struct hlist_head bsg_device_list[BSG_LIST_SIZE];
94 static struct class *bsg_class;
95 static LIST_HEAD(bsg_class_list);
98 * our internal command type
101 struct bsg_device *bd;
102 struct list_head list;
106 struct sg_io_hdr hdr;
107 struct sg_io_hdr __user *uhdr;
108 char sense[SCSI_SENSE_BUFFERSIZE];
111 static void bsg_free_command(struct bsg_command *bc)
113 struct bsg_device *bd = bc->bd;
114 unsigned long bitnr = bc - bd->cmd_map;
117 dprintk("%s: command bit offset %lu\n", bd->name, bitnr);
119 spin_lock_irqsave(&bd->lock, flags);
121 __clear_bit(bitnr, bd->cmd_bitmap);
122 spin_unlock_irqrestore(&bd->lock, flags);
124 wake_up(&bd->wq_free);
127 static struct bsg_command *__bsg_alloc_command(struct bsg_device *bd)
129 struct bsg_command *bc = NULL;
133 spin_lock_irq(&bd->lock);
135 if (bd->queued_cmds >= bd->max_queue)
138 for (free_nr = 0, map = bd->cmd_bitmap; *map == ~0UL; map++)
139 free_nr += BSG_CMDS_PER_LONG;
141 BUG_ON(*map == ~0UL);
144 free_nr += ffz(*map);
145 __set_bit(free_nr, bd->cmd_bitmap);
146 spin_unlock_irq(&bd->lock);
148 bc = bd->cmd_map + free_nr;
149 memset(bc, 0, sizeof(*bc));
151 INIT_LIST_HEAD(&bc->list);
152 dprintk("%s: returning free cmd %p (bit %d)\n", bd->name, bc, free_nr);
155 dprintk("%s: failed (depth %d)\n", bd->name, bd->queued_cmds);
156 spin_unlock_irq(&bd->lock);
161 bsg_del_done_cmd(struct bsg_device *bd, struct bsg_command *bc)
168 bsg_add_done_cmd(struct bsg_device *bd, struct bsg_command *bc)
171 list_add_tail(&bc->list, &bd->done_list);
172 wake_up(&bd->wq_done);
175 static inline int bsg_io_schedule(struct bsg_device *bd, int state)
180 spin_lock_irq(&bd->lock);
182 BUG_ON(bd->done_cmds > bd->queued_cmds);
185 * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no
186 * work to do", even though we return -ENOSPC after this same test
187 * during bsg_write() -- there, it means our buffer can't have more
188 * bsg_commands added to it, thus has no space left.
190 if (bd->done_cmds == bd->queued_cmds) {
195 if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
200 prepare_to_wait(&bd->wq_done, &wait, state);
201 spin_unlock_irq(&bd->lock);
203 finish_wait(&bd->wq_done, &wait);
205 if ((state == TASK_INTERRUPTIBLE) && signal_pending(current))
210 spin_unlock_irq(&bd->lock);
215 * get a new free command, blocking if needed and specified
217 static struct bsg_command *bsg_get_command(struct bsg_device *bd)
219 struct bsg_command *bc;
223 bc = __bsg_alloc_command(bd);
227 ret = bsg_io_schedule(bd, TASK_INTERRUPTIBLE);
239 * Check if sg_io_hdr from user is allowed and valid
242 bsg_validate_sghdr(request_queue_t *q, struct sg_io_hdr *hdr, int *rw)
244 if (hdr->interface_id != 'S')
246 if (hdr->cmd_len > BLK_MAX_CDB)
248 if (hdr->dxfer_len > (q->max_sectors << 9))
252 * looks sane, if no data then it should be fine from our POV
257 switch (hdr->dxfer_direction) {
258 case SG_DXFER_TO_FROM_DEV:
259 case SG_DXFER_FROM_DEV:
262 case SG_DXFER_TO_DEV:
273 * map sg_io_hdr to a request. for scatter-gather sg_io_hdr, we map
274 * each segment to a bio and string multiple bio's to the request
276 static struct request *
277 bsg_map_hdr(struct bsg_device *bd, int rw, struct sg_io_hdr *hdr)
279 request_queue_t *q = bd->queue;
281 struct sg_iovec __user *u_iov;
285 dprintk("map hdr %p/%d/%d\n", hdr->dxferp, hdr->dxfer_len,
288 ret = bsg_validate_sghdr(q, hdr, &rw);
293 * map scatter-gather elements seperately and string them to request
295 rq = blk_get_request(q, rw, GFP_KERNEL);
296 ret = blk_fill_sghdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM,
303 if (!hdr->iovec_count) {
304 ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);
310 for (ret = 0, i = 0; i < hdr->iovec_count; i++, u_iov++) {
311 if (copy_from_user(&iov, u_iov, sizeof(iov))) {
316 if (!iov.iov_len || !iov.iov_base) {
321 ret = blk_rq_map_user(q, rq, iov.iov_base, iov.iov_len);
331 dprintk("failed map at %d: %d\n", i, ret);
332 blk_unmap_sghdr_rq(rq, hdr);
340 * async completion call-back from the block layer, when scsi/ide/whatever
341 * calls end_that_request_last() on a request
343 static void bsg_rq_end_io(struct request *rq, int uptodate)
345 struct bsg_command *bc = rq->end_io_data;
346 struct bsg_device *bd = bc->bd;
349 dprintk("%s: finished rq %p bc %p, bio %p offset %d stat %d\n",
350 bd->name, rq, bc, bc->bio, bc - bd->cmd_map, uptodate);
352 bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
354 spin_lock_irqsave(&bd->lock, flags);
356 bsg_add_done_cmd(bd, bc);
357 spin_unlock_irqrestore(&bd->lock, flags);
361 * do final setup of a 'bc' and submit the matching 'rq' to the block
364 static void bsg_add_command(struct bsg_device *bd, request_queue_t *q,
365 struct bsg_command *bc, struct request *rq)
367 rq->sense = bc->sense;
371 * add bc command to busy queue and submit rq for io
375 bc->hdr.duration = jiffies;
376 spin_lock_irq(&bd->lock);
377 list_add_tail(&bc->list, &bd->busy_list);
378 spin_unlock_irq(&bd->lock);
380 dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
382 rq->end_io_data = bc;
383 blk_execute_rq_nowait(q, bd->disk, rq, 1, bsg_rq_end_io);
386 static inline struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
388 struct bsg_command *bc = NULL;
390 spin_lock_irq(&bd->lock);
392 bc = list_entry_bc(bd->done_list.next);
393 bsg_del_done_cmd(bd, bc);
395 spin_unlock_irq(&bd->lock);
401 * Get a finished command from the done list
403 static struct bsg_command *__bsg_get_done_cmd(struct bsg_device *bd, int state)
405 struct bsg_command *bc;
409 bc = bsg_next_done_cmd(bd);
413 ret = bsg_io_schedule(bd, state);
420 dprintk("%s: returning done %p\n", bd->name, bc);
425 static struct bsg_command *
426 bsg_get_done_cmd(struct bsg_device *bd, const struct iovec *iov)
428 return __bsg_get_done_cmd(bd, TASK_INTERRUPTIBLE);
431 static struct bsg_command *
432 bsg_get_done_cmd_nosignals(struct bsg_device *bd)
434 return __bsg_get_done_cmd(bd, TASK_UNINTERRUPTIBLE);
437 static int bsg_complete_all_commands(struct bsg_device *bd)
439 struct bsg_command *bc;
442 dprintk("%s: entered\n", bd->name);
444 set_bit(BSG_F_BLOCK, &bd->flags);
447 * wait for all commands to complete
451 ret = bsg_io_schedule(bd, TASK_UNINTERRUPTIBLE);
453 * look for -ENODATA specifically -- we'll sometimes get
454 * -ERESTARTSYS when we've taken a signal, but we can't
455 * return until we're done freeing the queue, so ignore
456 * it. The signal will get handled when we're done freeing
459 } while (ret != -ENODATA);
462 * discard done commands
466 bc = bsg_get_done_cmd_nosignals(bd);
469 * we _must_ complete before restarting, because
470 * bsg_release can't handle this failing.
472 if (PTR_ERR(bc) == -ERESTARTSYS)
479 tret = blk_complete_sghdr_rq(bc->rq, &bc->hdr, bc->bio);
483 bsg_free_command(bc);
489 typedef struct bsg_command *(*bsg_command_callback)(struct bsg_device *bd, const struct iovec *iov);
492 __bsg_read(char __user *buf, size_t count, bsg_command_callback get_bc,
493 struct bsg_device *bd, const struct iovec *iov, ssize_t *bytes_read)
495 struct bsg_command *bc;
496 int nr_commands, ret;
498 if (count % sizeof(struct sg_io_hdr))
502 nr_commands = count / sizeof(struct sg_io_hdr);
503 while (nr_commands) {
504 bc = get_bc(bd, iov);
511 * this is the only case where we need to copy data back
512 * after completing the request. so do that here,
513 * bsg_complete_work() cannot do that for us
515 ret = blk_complete_sghdr_rq(bc->rq, &bc->hdr, bc->bio);
517 if (copy_to_user(buf, (char *) &bc->hdr, sizeof(bc->hdr)))
520 bsg_free_command(bc);
525 buf += sizeof(struct sg_io_hdr);
526 *bytes_read += sizeof(struct sg_io_hdr);
533 static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
535 if (file->f_flags & O_NONBLOCK)
536 clear_bit(BSG_F_BLOCK, &bd->flags);
538 set_bit(BSG_F_BLOCK, &bd->flags);
541 static inline void bsg_set_write_perm(struct bsg_device *bd, struct file *file)
543 if (file->f_mode & FMODE_WRITE)
544 set_bit(BSG_F_WRITE_PERM, &bd->flags);
546 clear_bit(BSG_F_WRITE_PERM, &bd->flags);
549 static inline int err_block_err(int ret)
551 if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
558 bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
560 struct bsg_device *bd = file->private_data;
564 dprintk("%s: read %Zd bytes\n", bd->name, count);
566 bsg_set_block(bd, file);
568 ret = __bsg_read(buf, count, bsg_get_done_cmd,
569 bd, NULL, &bytes_read);
572 if (!bytes_read || (bytes_read && err_block_err(ret)))
578 static ssize_t __bsg_write(struct bsg_device *bd, const char __user *buf,
579 size_t count, ssize_t *bytes_read)
581 struct bsg_command *bc;
583 int ret, nr_commands;
585 if (count % sizeof(struct sg_io_hdr))
588 nr_commands = count / sizeof(struct sg_io_hdr);
592 while (nr_commands) {
593 request_queue_t *q = bd->queue;
596 bc = bsg_get_command(bd);
605 bc->uhdr = (struct sg_io_hdr __user *) buf;
606 if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
612 * get a request, fill in the blanks, and add to request queue
614 rq = bsg_map_hdr(bd, rw, &bc->hdr);
621 bsg_add_command(bd, q, bc, rq);
625 buf += sizeof(struct sg_io_hdr);
626 *bytes_read += sizeof(struct sg_io_hdr);
630 blk_unmap_sghdr_rq(rq, &bc->hdr);
632 bsg_free_command(bc);
638 bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
640 struct bsg_device *bd = file->private_data;
644 dprintk("%s: write %Zd bytes\n", bd->name, count);
646 bsg_set_block(bd, file);
647 bsg_set_write_perm(bd, file);
650 ret = __bsg_write(bd, buf, count, &bytes_read);
654 * return bytes written on non-fatal errors
656 if (!bytes_read || (bytes_read && err_block_err(ret)))
659 dprintk("%s: returning %Zd\n", bd->name, bytes_read);
663 static void bsg_free_device(struct bsg_device *bd)
666 free_pages((unsigned long) bd->cmd_map, BSG_CMDS_PAGE_ORDER);
668 kfree(bd->cmd_bitmap);
672 static struct bsg_device *bsg_alloc_device(void)
674 struct bsg_command *cmd_map;
675 unsigned long *cmd_bitmap;
676 struct bsg_device *bd;
679 bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
683 spin_lock_init(&bd->lock);
685 bd->max_queue = BSG_CMDS;
687 bits = (BSG_CMDS / BSG_CMDS_PER_LONG) + 1;
688 cmd_bitmap = kzalloc(bits * sizeof(unsigned long), GFP_KERNEL);
691 bd->cmd_bitmap = cmd_bitmap;
693 cmd_map = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
694 BSG_CMDS_PAGE_ORDER);
696 goto out_free_bitmap;
697 bd->cmd_map = cmd_map;
699 INIT_LIST_HEAD(&bd->busy_list);
700 INIT_LIST_HEAD(&bd->done_list);
701 INIT_HLIST_NODE(&bd->dev_list);
703 init_waitqueue_head(&bd->wq_free);
704 init_waitqueue_head(&bd->wq_done);
714 static int bsg_put_device(struct bsg_device *bd)
718 mutex_lock(&bsg_mutex);
720 if (!atomic_dec_and_test(&bd->ref_count))
723 dprintk("%s: tearing down\n", bd->name);
726 * close can always block
728 set_bit(BSG_F_BLOCK, &bd->flags);
731 * correct error detection baddies here again. it's the responsibility
732 * of the app to properly reap commands before close() if it wants
733 * fool-proof error detection
735 ret = bsg_complete_all_commands(bd);
737 blk_put_queue(bd->queue);
738 hlist_del(&bd->dev_list);
741 mutex_unlock(&bsg_mutex);
745 static struct bsg_device *bsg_add_device(struct inode *inode,
746 struct gendisk *disk,
749 struct bsg_device *bd = NULL;
751 unsigned char buf[32];
754 bd = bsg_alloc_device();
756 return ERR_PTR(-ENOMEM);
759 bd->queue = disk->queue;
760 kobject_get(&disk->queue->kobj);
761 bsg_set_block(bd, file);
763 atomic_set(&bd->ref_count, 1);
764 bd->minor = iminor(inode);
765 mutex_lock(&bsg_mutex);
766 hlist_add_head(&bd->dev_list,&bsg_device_list[bsg_list_idx(bd->minor)]);
768 strncpy(bd->name, disk->disk_name, sizeof(bd->name) - 1);
769 dprintk("bound to <%s>, max queue %d\n",
770 format_dev_t(buf, inode->i_rdev), bd->max_queue);
772 mutex_unlock(&bsg_mutex);
776 static struct bsg_device *__bsg_get_device(int minor)
778 struct hlist_head *list = &bsg_device_list[bsg_list_idx(minor)];
779 struct bsg_device *bd = NULL;
780 struct hlist_node *entry;
782 mutex_lock(&bsg_mutex);
784 hlist_for_each(entry, list) {
785 bd = hlist_entry(entry, struct bsg_device, dev_list);
786 if (bd->minor == minor) {
787 atomic_inc(&bd->ref_count);
794 mutex_unlock(&bsg_mutex);
798 static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
800 struct bsg_device *bd = __bsg_get_device(iminor(inode));
801 struct bsg_class_device *bcd, *__bcd;
807 * find the class device
810 mutex_lock(&bsg_mutex);
811 list_for_each_entry(__bcd, &bsg_class_list, list) {
812 if (__bcd->minor == iminor(inode)) {
817 mutex_unlock(&bsg_mutex);
820 return ERR_PTR(-ENODEV);
822 return bsg_add_device(inode, bcd->disk, file);
825 static int bsg_open(struct inode *inode, struct file *file)
827 struct bsg_device *bd = bsg_get_device(inode, file);
832 file->private_data = bd;
836 static int bsg_release(struct inode *inode, struct file *file)
838 struct bsg_device *bd = file->private_data;
840 file->private_data = NULL;
841 return bsg_put_device(bd);
844 static unsigned int bsg_poll(struct file *file, poll_table *wait)
846 struct bsg_device *bd = file->private_data;
847 unsigned int mask = 0;
849 poll_wait(file, &bd->wq_done, wait);
850 poll_wait(file, &bd->wq_free, wait);
852 spin_lock_irq(&bd->lock);
853 if (!list_empty(&bd->done_list))
854 mask |= POLLIN | POLLRDNORM;
855 if (bd->queued_cmds >= bd->max_queue)
857 spin_unlock_irq(&bd->lock);
863 bsg_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
866 struct bsg_device *bd = file->private_data;
867 int __user *uarg = (int __user *) arg;
876 case SG_GET_COMMAND_Q:
877 return put_user(bd->max_queue, uarg);
878 case SG_SET_COMMAND_Q: {
881 if (get_user(queue, uarg))
883 if (queue > BSG_CMDS || queue < 1)
886 bd->max_queue = queue;
893 case SG_GET_VERSION_NUM:
894 case SCSI_IOCTL_GET_IDLUN:
895 case SCSI_IOCTL_GET_BUS_NUMBER:
898 case SG_GET_RESERVED_SIZE:
899 case SG_SET_RESERVED_SIZE:
900 case SG_EMULATED_HOST:
902 case SCSI_IOCTL_SEND_COMMAND: {
903 void __user *uarg = (void __user *) arg;
904 return scsi_cmd_ioctl(file, bd->disk, cmd, uarg);
907 * block device ioctls
911 return ioctl_by_bdev(bd->bdev, cmd, arg);
918 static struct file_operations bsg_fops = {
923 .release = bsg_release,
925 .owner = THIS_MODULE,
928 void bsg_unregister_disk(struct gendisk *disk)
930 struct bsg_class_device *bcd = &disk->bsg_dev;
935 mutex_lock(&bsg_mutex);
936 sysfs_remove_link(&bcd->disk->queue->kobj, "bsg");
937 class_device_destroy(bsg_class, MKDEV(BSG_MAJOR, bcd->minor));
938 bcd->class_dev = NULL;
939 list_del_init(&bcd->list);
940 mutex_unlock(&bsg_mutex);
943 int bsg_register_disk(struct gendisk *disk)
945 request_queue_t *q = disk->queue;
946 struct bsg_class_device *bcd;
950 * we need a proper transport to send commands, not a stacked device
955 bcd = &disk->bsg_dev;
956 memset(bcd, 0, sizeof(*bcd));
957 INIT_LIST_HEAD(&bcd->list);
959 mutex_lock(&bsg_mutex);
960 dev = MKDEV(BSG_MAJOR, bsg_device_nr);
961 bcd->minor = bsg_device_nr;
964 bcd->class_dev = class_device_create(bsg_class, NULL, dev, bcd->dev, "%s", disk->disk_name);
965 list_add_tail(&bcd->list, &bsg_class_list);
966 sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
967 mutex_unlock(&bsg_mutex);
971 static int __init bsg_init(void)
975 for (i = 0; i < BSG_LIST_SIZE; i++)
976 INIT_HLIST_HEAD(&bsg_device_list[i]);
978 bsg_class = class_create(THIS_MODULE, "bsg");
979 if (IS_ERR(bsg_class))
980 return PTR_ERR(bsg_class);
982 ret = register_chrdev(BSG_MAJOR, "bsg", &bsg_fops);
984 class_destroy(bsg_class);
988 printk(KERN_INFO "%s loaded\n", bsg_version);
992 MODULE_AUTHOR("Jens Axboe");
993 MODULE_DESCRIPTION("Block layer SGSI generic (sg) driver");
994 MODULE_LICENSE("GPL");
996 subsys_initcall(bsg_init);