2 * linux/drivers/block/loop.c
4 * Written by Theodore Ts'o, 3/29/93
6 * Copyright 1993 by Theodore Ts'o. Redistribution of this file is
7 * permitted under the GNU General Public License.
9 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993
10 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996
12 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
13 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
15 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
17 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
19 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
21 * Loadable modules and other fixes by AK, 1998
23 * Make real block number available to downstream transfer functions, enables
24 * CBC (and relatives) mode encryption requiring unique IVs per data block.
25 * Reed H. Petty, rhp@draper.net
27 * Maximum number of loop devices now dynamic via max_loop module parameter.
28 * Russell Kroll <rkroll@exploits.org> 19990701
30 * Maximum number of loop devices when compiled-in now selectable by passing
31 * max_loop=<1-255> to the kernel on boot.
32 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999
34 * Completely rewrite request handling to be make_request_fn style and
35 * non blocking, pushing work to a helper thread. Lots of fixes from
37 * Jens Axboe <axboe@suse.de>, Nov 2000
39 * Support up to 256 loop devices
40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
43 * - Advisory locking is ignored here.
44 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
47 * - The block number as IV passing to low level transfer functions is broken:
48 * it passes the underlying device's block number instead of the
49 * offset. This makes it change for a given block when the file is
50 * moved/restored/copied and also doesn't work over NFS.
51 * AV, Feb 12, 2000: we pass the logical block number now. It fixes the
52 * problem above. Encryption modules that used to rely on the old scheme
53 * should just call ->i_mapping->bmap() to calculate the physical block
57 #include <linux/config.h>
58 #include <linux/module.h>
60 #include <linux/sched.h>
62 #include <linux/file.h>
63 #include <linux/stat.h>
64 #include <linux/errno.h>
65 #include <linux/major.h>
66 #include <linux/wait.h>
67 #include <linux/blk.h>
68 #include <linux/blkpg.h>
69 #include <linux/init.h>
70 #include <linux/devfs_fs_kernel.h>
71 #include <linux/smp_lock.h>
72 #include <linux/swap.h>
73 #include <linux/slab.h>
75 #include <asm/uaccess.h>
77 #include <linux/loop.h>
79 #define MAJOR_NR LOOP_MAJOR
81 static int max_loop = 8;
82 static struct loop_device *loop_dev;
83 static int *loop_sizes;
84 static int *loop_blksizes;
85 static devfs_handle_t devfs_handle; /* For the directory */
90 static int transfer_none(struct loop_device *lo, int cmd, char *raw_buf,
91 char *loop_buf, int size, int real_block)
93 if (raw_buf != loop_buf) {
95 memcpy(loop_buf, raw_buf, size);
97 memcpy(raw_buf, loop_buf, size);
103 static int transfer_xor(struct loop_device *lo, int cmd, char *raw_buf,
104 char *loop_buf, int size, int real_block)
106 char *in, *out, *key;
117 key = lo->lo_encrypt_key;
118 keysize = lo->lo_encrypt_key_size;
119 for (i = 0; i < size; i++)
120 *out++ = *in++ ^ key[(i & 511) % keysize];
124 static int none_status(struct loop_device *lo, struct loop_info *info)
126 lo->lo_flags |= LO_FLAGS_BH_REMAP;
130 static int xor_status(struct loop_device *lo, struct loop_info *info)
132 if (info->lo_encrypt_key_size <= 0)
137 struct loop_func_table none_funcs = {
138 number: LO_CRYPT_NONE,
139 transfer: transfer_none,
143 struct loop_func_table xor_funcs = {
144 number: LO_CRYPT_XOR,
145 transfer: transfer_xor,
149 /* xfer_funcs[0] is special - its release function is never called */
150 struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
155 #define MAX_DISK_SIZE 1024*1024*1024
157 static int compute_loop_size(struct loop_device *lo, struct dentry * lo_dentry, kdev_t lodev)
159 if (S_ISREG(lo_dentry->d_inode->i_mode))
160 return (lo_dentry->d_inode->i_size - lo->lo_offset) >> BLOCK_SIZE_BITS;
161 if (blk_size[MAJOR(lodev)])
162 return blk_size[MAJOR(lodev)][MINOR(lodev)] -
163 (lo->lo_offset >> BLOCK_SIZE_BITS);
164 return MAX_DISK_SIZE;
167 static void figure_loop_size(struct loop_device *lo)
169 loop_sizes[lo->lo_number] = compute_loop_size(lo,
170 lo->lo_backing_file->f_dentry,
174 static int lo_send(struct loop_device *lo, struct buffer_head *bh, int bsize,
177 struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */
178 struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
179 struct address_space_operations *aops = mapping->a_ops;
183 unsigned size, offset;
186 down(&mapping->host->i_sem);
187 index = pos >> PAGE_CACHE_SHIFT;
188 offset = pos & (PAGE_CACHE_SIZE - 1);
192 int IV = index * (PAGE_CACHE_SIZE/bsize) + offset/bsize;
195 size = PAGE_CACHE_SIZE - offset;
199 page = grab_cache_page(mapping, index);
203 if (aops->prepare_write(file, page, offset, offset+size))
205 flush_dcache_page(page);
206 transfer_result = lo_do_transfer(lo, WRITE, kaddr + offset, data, size, IV);
207 if (transfer_result) {
209 * The transfer failed, but we still write the data to
210 * keep prepare/commit calls balanced.
212 printk(KERN_ERR "loop: transfer error block %ld\n", index);
213 memset(kaddr + offset, 0, size);
215 if (aops->commit_write(file, page, offset, offset+size))
226 page_cache_release(page);
228 up(&mapping->host->i_sem);
234 page_cache_release(page);
236 up(&mapping->host->i_sem);
240 struct lo_read_data {
241 struct loop_device *lo;
246 static int lo_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size)
249 unsigned long count = desc->count;
250 struct lo_read_data *p = (struct lo_read_data*)desc->buf;
251 struct loop_device *lo = p->lo;
252 int IV = page->index * (PAGE_CACHE_SIZE/p->bsize) + offset/p->bsize;
258 if (lo_do_transfer(lo, READ, kaddr + offset, p->data, size, IV)) {
260 printk(KERN_ERR "loop: transfer error block %ld\n",page->index);
261 desc->error = -EINVAL;
265 desc->count = count - size;
266 desc->written += size;
271 static int lo_receive(struct loop_device *lo, struct buffer_head *bh, int bsize,
274 struct lo_read_data cookie;
275 read_descriptor_t desc;
279 cookie.data = bh->b_data;
280 cookie.bsize = bsize;
282 desc.count = bh->b_size;
283 desc.buf = (char*)&cookie;
285 spin_lock_irq(&lo->lo_lock);
286 file = lo->lo_backing_file;
287 spin_unlock_irq(&lo->lo_lock);
288 do_generic_file_read(file, &pos, &desc, lo_read_actor);
292 static inline int loop_get_bs(struct loop_device *lo)
296 if (blksize_size[MAJOR(lo->lo_device)])
297 bs = blksize_size[MAJOR(lo->lo_device)][MINOR(lo->lo_device)];
304 static inline unsigned long loop_get_iv(struct loop_device *lo,
305 unsigned long sector)
307 int bs = loop_get_bs(lo);
308 unsigned long offset, IV;
310 IV = sector / (bs >> 9) + lo->lo_offset / bs;
311 offset = ((sector % (bs >> 9)) << 9) + lo->lo_offset % bs;
318 static int do_bh_filebacked(struct loop_device *lo, struct buffer_head *bh, int rw)
323 pos = ((loff_t) bh->b_rsector << 9) + lo->lo_offset;
326 ret = lo_send(lo, bh, loop_get_bs(lo), pos);
328 ret = lo_receive(lo, bh, loop_get_bs(lo), pos);
333 static void loop_end_io_transfer(struct buffer_head *bh, int uptodate);
334 static void loop_put_buffer(struct buffer_head *bh)
337 * check b_end_io, may just be a remapped bh and not an allocated one
339 if (bh && bh->b_end_io == loop_end_io_transfer) {
340 __free_page(bh->b_page);
341 kmem_cache_free(bh_cachep, bh);
346 * Add buffer_head to back of pending list
348 static void loop_add_bh(struct loop_device *lo, struct buffer_head *bh)
352 spin_lock_irqsave(&lo->lo_lock, flags);
354 lo->lo_bhtail->b_reqnext = bh;
357 lo->lo_bh = lo->lo_bhtail = bh;
358 spin_unlock_irqrestore(&lo->lo_lock, flags);
360 up(&lo->lo_bh_mutex);
364 * Grab first pending buffer
366 static struct buffer_head *loop_get_bh(struct loop_device *lo)
368 struct buffer_head *bh;
370 spin_lock_irq(&lo->lo_lock);
371 if ((bh = lo->lo_bh)) {
372 if (bh == lo->lo_bhtail)
373 lo->lo_bhtail = NULL;
374 lo->lo_bh = bh->b_reqnext;
375 bh->b_reqnext = NULL;
377 spin_unlock_irq(&lo->lo_lock);
383 * when buffer i/o has completed. if BH_Dirty is set, this was a WRITE
384 * and lo->transfer stuff has already been done. if not, it was a READ
385 * so queue it for the loop thread and let it do the transfer out of
386 * b_end_io context (we don't want to do decrypt of a page with irqs
389 static void loop_end_io_transfer(struct buffer_head *bh, int uptodate)
391 struct loop_device *lo = &loop_dev[MINOR(bh->b_dev)];
393 if (!uptodate || test_bit(BH_Dirty, &bh->b_state)) {
394 struct buffer_head *rbh = bh->b_private;
396 rbh->b_end_io(rbh, uptodate);
397 if (atomic_dec_and_test(&lo->lo_pending))
398 up(&lo->lo_bh_mutex);
404 static struct buffer_head *loop_get_buffer(struct loop_device *lo,
405 struct buffer_head *rbh)
407 struct buffer_head *bh;
410 * for xfer_funcs that can operate on the same bh, do that
412 if (lo->lo_flags & LO_FLAGS_BH_REMAP) {
418 bh = kmem_cache_alloc(bh_cachep, SLAB_NOIO);
422 run_task_queue(&tq_disk);
423 set_current_state(TASK_INTERRUPTIBLE);
424 schedule_timeout(HZ);
426 memset(bh, 0, sizeof(*bh));
428 bh->b_size = rbh->b_size;
429 bh->b_dev = rbh->b_rdev;
430 bh->b_state = (1 << BH_Req) | (1 << BH_Mapped) | (1 << BH_Lock);
433 * easy way out, although it does waste some memory for < PAGE_SIZE
434 * blocks... if highmem bounce buffering can get away with it,
438 bh->b_page = alloc_page(GFP_NOIO);
442 run_task_queue(&tq_disk);
443 set_current_state(TASK_INTERRUPTIBLE);
444 schedule_timeout(HZ);
447 bh->b_data = page_address(bh->b_page);
448 bh->b_end_io = loop_end_io_transfer;
450 init_waitqueue_head(&bh->b_wait);
453 bh->b_rsector = rbh->b_rsector + (lo->lo_offset >> 9);
454 spin_lock_irq(&lo->lo_lock);
455 bh->b_rdev = lo->lo_device;
456 spin_unlock_irq(&lo->lo_lock);
461 static int loop_make_request(request_queue_t *q, int rw, struct buffer_head *rbh)
463 struct buffer_head *bh = NULL;
464 struct loop_device *lo;
467 if (!buffer_locked(rbh))
470 if (MINOR(rbh->b_rdev) >= max_loop)
473 lo = &loop_dev[MINOR(rbh->b_rdev)];
474 spin_lock_irq(&lo->lo_lock);
475 if (lo->lo_state != Lo_bound)
477 atomic_inc(&lo->lo_pending);
478 spin_unlock_irq(&lo->lo_lock);
481 if (lo->lo_flags & LO_FLAGS_READ_ONLY)
483 } else if (rw == READA) {
485 } else if (rw != READ) {
486 printk(KERN_ERR "loop: unknown command (%d)\n", rw);
490 rbh = blk_queue_bounce(q, rw, rbh);
493 * file backed, queue for loop_thread to handle
495 if (lo->lo_flags & LO_FLAGS_DO_BMAP) {
497 * rbh locked at this point, noone else should clear
501 set_bit(BH_Dirty, &rbh->b_state);
502 loop_add_bh(lo, rbh);
507 * piggy old buffer on original, and submit for I/O
509 bh = loop_get_buffer(lo, rbh);
510 IV = loop_get_iv(lo, rbh->b_rsector);
512 set_bit(BH_Dirty, &bh->b_state);
513 if (lo_do_transfer(lo, WRITE, bh->b_data, rbh->b_data,
518 generic_make_request(rw, bh);
522 if (atomic_dec_and_test(&lo->lo_pending))
523 up(&lo->lo_bh_mutex);
526 buffer_IO_error(rbh);
529 spin_unlock_irq(&lo->lo_lock);
533 static inline void loop_handle_bh(struct loop_device *lo,struct buffer_head *bh)
538 * For block backed loop, we know this is a READ
540 if (lo->lo_flags & LO_FLAGS_DO_BMAP) {
541 int rw = !!test_and_clear_bit(BH_Dirty, &bh->b_state);
543 ret = do_bh_filebacked(lo, bh, rw);
544 bh->b_end_io(bh, !ret);
546 struct buffer_head *rbh = bh->b_private;
547 unsigned long IV = loop_get_iv(lo, rbh->b_rsector);
549 ret = lo_do_transfer(lo, READ, bh->b_data, rbh->b_data,
552 rbh->b_end_io(rbh, !ret);
558 * worker thread that handles reads/writes to file backed loop devices,
559 * to avoid blocking in our make_request_fn. it also does loop decrypting
560 * on reads for block backed loop, as that is too heavy to do from
561 * b_end_io context where irqs may be disabled.
563 static int loop_thread(void *data)
565 struct loop_device *lo = data;
566 struct buffer_head *bh;
572 sprintf(current->comm, "loop%d", lo->lo_number);
574 spin_lock_irq(¤t->sigmask_lock);
575 sigfillset(¤t->blocked);
576 flush_signals(current);
577 spin_unlock_irq(¤t->sigmask_lock);
579 spin_lock_irq(&lo->lo_lock);
580 lo->lo_state = Lo_bound;
581 atomic_inc(&lo->lo_pending);
582 spin_unlock_irq(&lo->lo_lock);
584 current->flags |= PF_NOIO;
587 * up sem, we are running
592 down_interruptible(&lo->lo_bh_mutex);
594 * could be upped because of tear-down, not because of
597 if (!atomic_read(&lo->lo_pending))
600 bh = loop_get_bh(lo);
602 printk("loop: missing bh\n");
605 loop_handle_bh(lo, bh);
608 * upped both for pending work and tear-down, lo_pending
611 if (atomic_dec_and_test(&lo->lo_pending))
619 static int loop_set_fd(struct loop_device *lo, struct file *lo_file, kdev_t dev,
632 if (lo->lo_state != Lo_unbound)
641 inode = file->f_dentry->d_inode;
643 if (!(file->f_mode & FMODE_WRITE))
644 lo_flags |= LO_FLAGS_READ_ONLY;
646 if (S_ISBLK(inode->i_mode)) {
647 lo_device = inode->i_rdev;
648 if (lo_device == dev) {
652 } else if (S_ISREG(inode->i_mode)) {
653 struct address_space_operations *aops = inode->i_mapping->a_ops;
655 * If we can't read - sorry. If we only can't write - well,
656 * it's going to be read-only.
661 if (!aops->prepare_write || !aops->commit_write)
662 lo_flags |= LO_FLAGS_READ_ONLY;
664 lo_device = inode->i_dev;
665 lo_flags |= LO_FLAGS_DO_BMAP;
672 if (IS_RDONLY (inode) || is_read_only(lo_device)
673 || !(lo_file->f_mode & FMODE_WRITE))
674 lo_flags |= LO_FLAGS_READ_ONLY;
676 set_device_ro(dev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
678 lo->lo_device = lo_device;
679 lo->lo_flags = lo_flags;
680 lo->lo_backing_file = file;
683 figure_loop_size(lo);
684 lo->old_gfp_mask = inode->i_mapping->gfp_mask;
685 inode->i_mapping->gfp_mask = GFP_NOIO;
688 if (blksize_size[MAJOR(lo_device)])
689 bs = blksize_size[MAJOR(lo_device)][MINOR(lo_device)];
693 set_blocksize(dev, bs);
695 lo->lo_bh = lo->lo_bhtail = NULL;
696 kernel_thread(loop_thread, lo, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
709 static int loop_release_xfer(struct loop_device *lo)
712 if (lo->lo_encrypt_type) {
713 struct loop_func_table *xfer= xfer_funcs[lo->lo_encrypt_type];
714 if (xfer && xfer->release)
715 err = xfer->release(lo);
716 if (xfer && xfer->unlock)
718 lo->lo_encrypt_type = 0;
723 static int loop_init_xfer(struct loop_device *lo, int type,struct loop_info *i)
727 struct loop_func_table *xfer = xfer_funcs[type];
729 err = xfer->init(lo, i);
731 lo->lo_encrypt_type = type;
739 static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
741 struct file *filp = lo->lo_backing_file;
742 int gfp = lo->old_gfp_mask;
744 if (lo->lo_state != Lo_bound)
746 if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */
751 spin_lock_irq(&lo->lo_lock);
752 lo->lo_state = Lo_rundown;
753 if (atomic_dec_and_test(&lo->lo_pending))
754 up(&lo->lo_bh_mutex);
755 spin_unlock_irq(&lo->lo_lock);
759 lo->lo_backing_file = NULL;
761 loop_release_xfer(lo);
765 lo->lo_encrypt_type = 0;
767 lo->lo_encrypt_key_size = 0;
769 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
770 memset(lo->lo_name, 0, LO_NAME_SIZE);
771 loop_sizes[lo->lo_number] = 0;
772 invalidate_bdev(bdev, 0);
773 filp->f_dentry->d_inode->i_mapping->gfp_mask = gfp;
774 lo->lo_state = Lo_unbound;
780 static int loop_set_status(struct loop_device *lo, struct loop_info *arg)
782 struct loop_info info;
786 if (lo->lo_encrypt_key_size && lo->lo_key_owner != current->uid &&
787 !capable(CAP_SYS_ADMIN))
789 if (lo->lo_state != Lo_bound)
791 if (copy_from_user(&info, arg, sizeof (struct loop_info)))
793 if ((unsigned int) info.lo_encrypt_key_size > LO_KEY_SIZE)
795 type = info.lo_encrypt_type;
796 if (type >= MAX_LO_CRYPT || xfer_funcs[type] == NULL)
798 if (type == LO_CRYPT_XOR && info.lo_encrypt_key_size == 0)
800 err = loop_release_xfer(lo);
802 err = loop_init_xfer(lo, type, &info);
806 lo->lo_offset = info.lo_offset;
807 strncpy(lo->lo_name, info.lo_name, LO_NAME_SIZE);
809 lo->transfer = xfer_funcs[type]->transfer;
810 lo->ioctl = xfer_funcs[type]->ioctl;
811 lo->lo_encrypt_key_size = info.lo_encrypt_key_size;
812 lo->lo_init[0] = info.lo_init[0];
813 lo->lo_init[1] = info.lo_init[1];
814 if (info.lo_encrypt_key_size) {
815 memcpy(lo->lo_encrypt_key, info.lo_encrypt_key,
816 info.lo_encrypt_key_size);
817 lo->lo_key_owner = current->uid;
819 figure_loop_size(lo);
823 static int loop_get_status(struct loop_device *lo, struct loop_info *arg)
825 struct loop_info info;
826 struct file *file = lo->lo_backing_file;
828 if (lo->lo_state != Lo_bound)
832 memset(&info, 0, sizeof(info));
833 info.lo_number = lo->lo_number;
834 info.lo_device = kdev_t_to_nr(file->f_dentry->d_inode->i_dev);
835 info.lo_inode = file->f_dentry->d_inode->i_ino;
836 info.lo_rdevice = kdev_t_to_nr(lo->lo_device);
837 info.lo_offset = lo->lo_offset;
838 info.lo_flags = lo->lo_flags;
839 strncpy(info.lo_name, lo->lo_name, LO_NAME_SIZE);
840 info.lo_encrypt_type = lo->lo_encrypt_type;
841 if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
842 info.lo_encrypt_key_size = lo->lo_encrypt_key_size;
843 memcpy(info.lo_encrypt_key, lo->lo_encrypt_key,
844 lo->lo_encrypt_key_size);
846 return copy_to_user(arg, &info, sizeof(info)) ? -EFAULT : 0;
849 static int lo_ioctl(struct inode * inode, struct file * file,
850 unsigned int cmd, unsigned long arg)
852 struct loop_device *lo;
857 if (MAJOR(inode->i_rdev) != MAJOR_NR) {
858 printk(KERN_WARNING "lo_ioctl: pseudo-major != %d\n",
862 dev = MINOR(inode->i_rdev);
866 down(&lo->lo_ctl_mutex);
869 err = loop_set_fd(lo, file, inode->i_rdev, arg);
872 err = loop_clr_fd(lo, inode->i_bdev);
874 case LOOP_SET_STATUS:
875 err = loop_set_status(lo, (struct loop_info *) arg);
877 case LOOP_GET_STATUS:
878 err = loop_get_status(lo, (struct loop_info *) arg);
881 if (lo->lo_state != Lo_bound) {
885 err = put_user((unsigned long)loop_sizes[lo->lo_number] << 1, (unsigned long *) arg);
888 if (lo->lo_state != Lo_bound) {
892 err = put_user((u64)loop_sizes[lo->lo_number] << 10, (u64*)arg);
896 err = blk_ioctl(inode->i_rdev, cmd, arg);
899 err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
901 up(&lo->lo_ctl_mutex);
905 static int lo_open(struct inode *inode, struct file *file)
907 struct loop_device *lo;
912 if (MAJOR(inode->i_rdev) != MAJOR_NR) {
913 printk(KERN_WARNING "lo_open: pseudo-major != %d\n", MAJOR_NR);
916 dev = MINOR(inode->i_rdev);
922 down(&lo->lo_ctl_mutex);
924 type = lo->lo_encrypt_type;
925 if (type && xfer_funcs[type] && xfer_funcs[type]->lock)
926 xfer_funcs[type]->lock(lo);
928 up(&lo->lo_ctl_mutex);
932 static int lo_release(struct inode *inode, struct file *file)
934 struct loop_device *lo;
939 if (MAJOR(inode->i_rdev) != MAJOR_NR) {
940 printk(KERN_WARNING "lo_release: pseudo-major != %d\n",
944 dev = MINOR(inode->i_rdev);
949 down(&lo->lo_ctl_mutex);
950 type = lo->lo_encrypt_type;
952 if (xfer_funcs[type] && xfer_funcs[type]->unlock)
953 xfer_funcs[type]->unlock(lo);
955 up(&lo->lo_ctl_mutex);
960 static struct block_device_operations lo_fops = {
968 * And now the modules code and kernel interface.
970 MODULE_PARM(max_loop, "i");
971 MODULE_PARM_DESC(max_loop, "Maximum number of loop devices (1-256)");
972 MODULE_LICENSE("GPL");
974 int loop_register_transfer(struct loop_func_table *funcs)
976 if ((unsigned)funcs->number > MAX_LO_CRYPT || xfer_funcs[funcs->number])
978 xfer_funcs[funcs->number] = funcs;
982 int loop_unregister_transfer(int number)
984 struct loop_device *lo;
986 if ((unsigned)number >= MAX_LO_CRYPT)
988 for (lo = &loop_dev[0]; lo < &loop_dev[max_loop]; lo++) {
989 int type = lo->lo_encrypt_type;
990 if (type == number) {
991 xfer_funcs[type]->release(lo);
993 lo->lo_encrypt_type = 0;
996 xfer_funcs[number] = NULL;
1000 EXPORT_SYMBOL(loop_register_transfer);
1001 EXPORT_SYMBOL(loop_unregister_transfer);
1003 int __init loop_init(void)
1007 if ((max_loop < 1) || (max_loop > 256)) {
1008 printk(KERN_WARNING "loop: invalid max_loop (must be between"
1009 " 1 and 256), using default (8)\n");
1013 if (devfs_register_blkdev(MAJOR_NR, "loop", &lo_fops)) {
1014 printk(KERN_WARNING "Unable to get major number %d for loop"
1015 " device\n", MAJOR_NR);
1020 loop_dev = kmalloc(max_loop * sizeof(struct loop_device), GFP_KERNEL);
1024 loop_sizes = kmalloc(max_loop * sizeof(int), GFP_KERNEL);
1028 loop_blksizes = kmalloc(max_loop * sizeof(int), GFP_KERNEL);
1032 blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), loop_make_request);
1034 for (i = 0; i < max_loop; i++) {
1035 struct loop_device *lo = &loop_dev[i];
1036 memset(lo, 0, sizeof(struct loop_device));
1037 init_MUTEX(&lo->lo_ctl_mutex);
1038 init_MUTEX_LOCKED(&lo->lo_sem);
1039 init_MUTEX_LOCKED(&lo->lo_bh_mutex);
1041 spin_lock_init(&lo->lo_lock);
1044 memset(loop_sizes, 0, max_loop * sizeof(int));
1045 memset(loop_blksizes, 0, max_loop * sizeof(int));
1046 blk_size[MAJOR_NR] = loop_sizes;
1047 blksize_size[MAJOR_NR] = loop_blksizes;
1048 for (i = 0; i < max_loop; i++)
1049 register_disk(NULL, MKDEV(MAJOR_NR, i), 1, &lo_fops, 0);
1051 devfs_handle = devfs_mk_dir(NULL, "loop", NULL);
1052 devfs_register_series(devfs_handle, "%u", max_loop, DEVFS_FL_DEFAULT,
1054 S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP,
1057 printk(KERN_INFO "loop: loaded (max %d devices)\n", max_loop);
1064 if (devfs_unregister_blkdev(MAJOR_NR, "loop"))
1065 printk(KERN_WARNING "loop: cannot unregister blkdev\n");
1066 printk(KERN_ERR "loop: ran out of memory\n");
1070 void loop_exit(void)
1072 devfs_unregister(devfs_handle);
1073 if (devfs_unregister_blkdev(MAJOR_NR, "loop"))
1074 printk(KERN_WARNING "loop: cannot unregister blkdev\n");
1077 kfree(loop_blksizes);
1080 module_init(loop_init);
1081 module_exit(loop_exit);
1084 static int __init max_loop_setup(char *str)
1086 max_loop = simple_strtol(str, NULL, 0);
1090 __setup("max_loop=", max_loop_setup);