2 * linux/drivers/block/loop.c
4 * Written by Theodore Ts'o, 3/29/93
6 * Copyright 1993 by Theodore Ts'o. Redistribution of this file is
7 * permitted under the GNU General Public License.
9 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993
10 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996
12 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
13 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
15 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
17 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
19 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
21 * Loadable modules and other fixes by AK, 1998
23 * Make real block number available to downstream transfer functions, enables
24 * CBC (and relatives) mode encryption requiring unique IVs per data block.
25 * Reed H. Petty, rhp@draper.net
27 * Maximum number of loop devices now dynamic via max_loop module parameter.
28 * Russell Kroll <rkroll@exploits.org> 19990701
30 * Maximum number of loop devices when compiled-in now selectable by passing
31 * max_loop=<1-255> to the kernel on boot.
32 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999
34 * Completely rewrite request handling to be make_request_fn style and
35 * non blocking, pushing work to a helper thread. Lots of fixes from
37 * Jens Axboe <axboe@suse.de>, Nov 2000
39 * Support up to 256 loop devices
40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
42 * Support for falling back on the write file operation when the address space
43 * operations prepare_write and/or commit_write are not available on the
45 * Anton Altaparmakov, 16 Feb 2005
48 * - Advisory locking is ignored here.
49 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
53 #include <linux/config.h>
54 #include <linux/module.h>
55 #include <linux/moduleparam.h>
56 #include <linux/sched.h>
58 #include <linux/file.h>
59 #include <linux/stat.h>
60 #include <linux/errno.h>
61 #include <linux/major.h>
62 #include <linux/wait.h>
63 #include <linux/blkdev.h>
64 #include <linux/blkpg.h>
65 #include <linux/init.h>
66 #include <linux/devfs_fs_kernel.h>
67 #include <linux/smp_lock.h>
68 #include <linux/swap.h>
69 #include <linux/slab.h>
70 #include <linux/loop.h>
71 #include <linux/suspend.h>
72 #include <linux/writeback.h>
73 #include <linux/buffer_head.h> /* for invalidate_bdev() */
74 #include <linux/completion.h>
75 #include <linux/highmem.h>
76 #include <linux/gfp.h>
77 #include <linux/vs_base.h>
78 #include <linux/vs_context.h>
80 #include <asm/uaccess.h>
82 static int max_loop = 8;
83 static struct loop_device *loop_dev;
84 static struct gendisk **disks;
89 static int transfer_none(struct loop_device *lo, int cmd,
90 struct page *raw_page, unsigned raw_off,
91 struct page *loop_page, unsigned loop_off,
92 int size, sector_t real_block)
94 char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
95 char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
98 memcpy(loop_buf, raw_buf, size);
100 memcpy(raw_buf, loop_buf, size);
102 kunmap_atomic(raw_buf, KM_USER0);
103 kunmap_atomic(loop_buf, KM_USER1);
108 static int transfer_xor(struct loop_device *lo, int cmd,
109 struct page *raw_page, unsigned raw_off,
110 struct page *loop_page, unsigned loop_off,
111 int size, sector_t real_block)
113 char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
114 char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
115 char *in, *out, *key;
126 key = lo->lo_encrypt_key;
127 keysize = lo->lo_encrypt_key_size;
128 for (i = 0; i < size; i++)
129 *out++ = *in++ ^ key[(i & 511) % keysize];
131 kunmap_atomic(raw_buf, KM_USER0);
132 kunmap_atomic(loop_buf, KM_USER1);
137 static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
139 if (unlikely(info->lo_encrypt_key_size <= 0))
144 static struct loop_func_table none_funcs = {
145 .number = LO_CRYPT_NONE,
146 .transfer = transfer_none,
149 static struct loop_func_table xor_funcs = {
150 .number = LO_CRYPT_XOR,
151 .transfer = transfer_xor,
155 /* xfer_funcs[0] is special - its release function is never called */
156 static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
161 static loff_t get_loop_size(struct loop_device *lo, struct file *file)
163 loff_t size, offset, loopsize;
165 /* Compute loopsize in bytes */
166 size = i_size_read(file->f_mapping->host);
167 offset = lo->lo_offset;
168 loopsize = size - offset;
169 if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
170 loopsize = lo->lo_sizelimit;
173 * Unfortunately, if we want to do I/O on the device,
174 * the number of 512-byte sectors has to fit into a sector_t.
176 return loopsize >> 9;
180 figure_loop_size(struct loop_device *lo)
182 loff_t size = get_loop_size(lo, lo->lo_backing_file);
183 sector_t x = (sector_t)size;
185 if (unlikely((loff_t)x != size))
188 set_capacity(disks[lo->lo_number], x);
193 lo_do_transfer(struct loop_device *lo, int cmd,
194 struct page *rpage, unsigned roffs,
195 struct page *lpage, unsigned loffs,
196 int size, sector_t rblock)
198 if (unlikely(!lo->transfer))
201 return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
205 * do_lo_send_aops - helper for writing data to a loop device
207 * This is the fast version for backing filesystems which implement the address
208 * space operations prepare_write and commit_write.
210 static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
211 int bsize, loff_t pos, struct page *page)
213 struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */
214 struct address_space *mapping = file->f_mapping;
215 struct address_space_operations *aops = mapping->a_ops;
217 unsigned offset, bv_offs;
220 mutex_lock(&mapping->host->i_mutex);
221 index = pos >> PAGE_CACHE_SHIFT;
222 offset = pos & ((pgoff_t)PAGE_CACHE_SIZE - 1);
223 bv_offs = bvec->bv_offset;
230 IV = ((sector_t)index << (PAGE_CACHE_SHIFT - 9))+(offset >> 9);
231 size = PAGE_CACHE_SIZE - offset;
234 page = grab_cache_page(mapping, index);
237 ret = aops->prepare_write(file, page, offset,
240 if (ret == AOP_TRUNCATED_PAGE) {
241 page_cache_release(page);
246 transfer_result = lo_do_transfer(lo, WRITE, page, offset,
247 bvec->bv_page, bv_offs, size, IV);
248 if (unlikely(transfer_result)) {
252 * The transfer failed, but we still write the data to
253 * keep prepare/commit calls balanced.
255 printk(KERN_ERR "loop: transfer error block %llu\n",
256 (unsigned long long)index);
257 kaddr = kmap_atomic(page, KM_USER0);
258 memset(kaddr + offset, 0, size);
259 kunmap_atomic(kaddr, KM_USER0);
261 flush_dcache_page(page);
262 ret = aops->commit_write(file, page, offset,
265 if (ret == AOP_TRUNCATED_PAGE) {
266 page_cache_release(page);
271 if (unlikely(transfer_result))
279 page_cache_release(page);
283 mutex_unlock(&mapping->host->i_mutex);
287 page_cache_release(page);
294 * __do_lo_send_write - helper for writing data to a loop device
296 * This helper just factors out common code between do_lo_send_direct_write()
297 * and do_lo_send_write().
299 static int __do_lo_send_write(struct file *file,
300 u8 __user *buf, const int len, loff_t pos)
303 mm_segment_t old_fs = get_fs();
306 bw = file->f_op->write(file, buf, len, &pos);
308 if (likely(bw == len))
310 printk(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
311 (unsigned long long)pos, len);
318 * do_lo_send_direct_write - helper for writing data to a loop device
320 * This is the fast, non-transforming version for backing filesystems which do
321 * not implement the address space operations prepare_write and commit_write.
322 * It uses the write file operation which should be present on all writeable
325 static int do_lo_send_direct_write(struct loop_device *lo,
326 struct bio_vec *bvec, int bsize, loff_t pos, struct page *page)
328 ssize_t bw = __do_lo_send_write(lo->lo_backing_file,
329 (u8 __user *)kmap(bvec->bv_page) + bvec->bv_offset,
331 kunmap(bvec->bv_page);
337 * do_lo_send_write - helper for writing data to a loop device
339 * This is the slow, transforming version for filesystems which do not
340 * implement the address space operations prepare_write and commit_write. It
341 * uses the write file operation which should be present on all writeable
344 * Using fops->write is slower than using aops->{prepare,commit}_write in the
345 * transforming case because we need to double buffer the data as we cannot do
346 * the transformations in place as we do not have direct access to the
347 * destination pages of the backing file.
349 static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
350 int bsize, loff_t pos, struct page *page)
352 int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page,
353 bvec->bv_offset, bvec->bv_len, pos >> 9);
355 return __do_lo_send_write(lo->lo_backing_file,
356 (u8 __user *)page_address(page), bvec->bv_len,
358 printk(KERN_ERR "loop: Transfer error at byte offset %llu, "
359 "length %i.\n", (unsigned long long)pos, bvec->bv_len);
365 static int lo_send(struct loop_device *lo, struct bio *bio, int bsize,
368 int (*do_lo_send)(struct loop_device *, struct bio_vec *, int, loff_t,
370 struct bio_vec *bvec;
371 struct page *page = NULL;
374 do_lo_send = do_lo_send_aops;
375 if (!(lo->lo_flags & LO_FLAGS_USE_AOPS)) {
376 do_lo_send = do_lo_send_direct_write;
377 if (lo->transfer != transfer_none) {
378 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
382 do_lo_send = do_lo_send_write;
385 bio_for_each_segment(bvec, bio, i) {
386 ret = do_lo_send(lo, bvec, bsize, pos, page);
398 printk(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
403 struct lo_read_data {
404 struct loop_device *lo;
411 lo_read_actor(read_descriptor_t *desc, struct page *page,
412 unsigned long offset, unsigned long size)
414 unsigned long count = desc->count;
415 struct lo_read_data *p = desc->arg.data;
416 struct loop_device *lo = p->lo;
419 IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9))+(offset >> 9);
424 if (lo_do_transfer(lo, READ, page, offset, p->page, p->offset, size, IV)) {
426 printk(KERN_ERR "loop: transfer error block %ld\n",
428 desc->error = -EINVAL;
431 flush_dcache_page(p->page);
433 desc->count = count - size;
434 desc->written += size;
440 do_lo_receive(struct loop_device *lo,
441 struct bio_vec *bvec, int bsize, loff_t pos)
443 struct lo_read_data cookie;
448 cookie.page = bvec->bv_page;
449 cookie.offset = bvec->bv_offset;
450 cookie.bsize = bsize;
451 file = lo->lo_backing_file;
452 retval = file->f_op->sendfile(file, &pos, bvec->bv_len,
453 lo_read_actor, &cookie);
454 return (retval < 0)? retval: 0;
458 lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
460 struct bio_vec *bvec;
463 bio_for_each_segment(bvec, bio, i) {
464 ret = do_lo_receive(lo, bvec, bsize, pos);
472 static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
477 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
478 if (bio_rw(bio) == WRITE)
479 ret = lo_send(lo, bio, lo->lo_blocksize, pos);
481 ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
486 * Add bio to back of pending list
488 static void loop_add_bio(struct loop_device *lo, struct bio *bio)
490 if (lo->lo_biotail) {
491 lo->lo_biotail->bi_next = bio;
492 lo->lo_biotail = bio;
494 lo->lo_bio = lo->lo_biotail = bio;
498 * Grab first pending buffer
500 static struct bio *loop_get_bio(struct loop_device *lo)
504 if ((bio = lo->lo_bio)) {
505 if (bio == lo->lo_biotail)
506 lo->lo_biotail = NULL;
507 lo->lo_bio = bio->bi_next;
514 static int loop_make_request(request_queue_t *q, struct bio *old_bio)
516 struct loop_device *lo = q->queuedata;
517 int rw = bio_rw(old_bio);
522 BUG_ON(!lo || (rw != READ && rw != WRITE));
524 spin_lock_irq(&lo->lo_lock);
525 if (lo->lo_state != Lo_bound)
527 if (unlikely(rw == WRITE && (lo->lo_flags & LO_FLAGS_READ_ONLY)))
530 loop_add_bio(lo, old_bio);
531 spin_unlock_irq(&lo->lo_lock);
532 complete(&lo->lo_bh_done);
536 if (lo->lo_pending == 0)
537 complete(&lo->lo_bh_done);
538 spin_unlock_irq(&lo->lo_lock);
539 bio_io_error(old_bio, old_bio->bi_size);
544 * kick off io on the underlying address space
546 static void loop_unplug(request_queue_t *q)
548 struct loop_device *lo = q->queuedata;
550 clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
551 blk_run_address_space(lo->lo_backing_file->f_mapping);
554 struct switch_request {
556 struct completion wait;
559 static void do_loop_switch(struct loop_device *, struct switch_request *);
561 static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
563 if (unlikely(!bio->bi_bdev)) {
564 do_loop_switch(lo, bio->bi_private);
567 int ret = do_bio_filebacked(lo, bio);
568 bio_endio(bio, bio->bi_size, ret);
573 * worker thread that handles reads/writes to file backed loop devices,
574 * to avoid blocking in our make_request_fn. it also does loop decrypting
575 * on reads for block backed loop, as that is too heavy to do from
576 * b_end_io context where irqs may be disabled.
578 static int loop_thread(void *data)
580 struct loop_device *lo = data;
583 daemonize("loop%d", lo->lo_number);
586 * loop can be used in an encrypted device,
587 * hence, it mustn't be stopped at all
588 * because it could be indirectly used during suspension
590 current->flags |= PF_NOFREEZE;
592 set_user_nice(current, -20);
594 lo->lo_state = Lo_bound;
598 * complete it, we are running
600 complete(&lo->lo_done);
605 if (wait_for_completion_interruptible(&lo->lo_bh_done))
608 spin_lock_irq(&lo->lo_lock);
611 * could be completed because of tear-down, not pending work
613 if (unlikely(!lo->lo_pending)) {
614 spin_unlock_irq(&lo->lo_lock);
618 bio = loop_get_bio(lo);
620 pending = lo->lo_pending;
621 spin_unlock_irq(&lo->lo_lock);
624 loop_handle_bio(lo, bio);
627 * upped both for pending work and tear-down, lo_pending
630 if (unlikely(!pending))
634 complete(&lo->lo_done);
639 * loop_switch performs the hard work of switching a backing store.
640 * First it needs to flush existing IO, it does this by sending a magic
641 * BIO down the pipe. The completion of this BIO does the actual switch.
643 static int loop_switch(struct loop_device *lo, struct file *file)
645 struct switch_request w;
646 struct bio *bio = bio_alloc(GFP_KERNEL, 1);
649 init_completion(&w.wait);
651 bio->bi_private = &w;
653 loop_make_request(lo->lo_queue, bio);
654 wait_for_completion(&w.wait);
659 * Do the actual switch; called from the BIO completion routine
661 static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
663 struct file *file = p->file;
664 struct file *old_file = lo->lo_backing_file;
665 struct address_space *mapping = file->f_mapping;
667 mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
668 lo->lo_backing_file = file;
669 lo->lo_blocksize = mapping->host->i_blksize;
670 lo->old_gfp_mask = mapping_gfp_mask(mapping);
671 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
677 * loop_change_fd switched the backing store of a loopback device to
678 * a new file. This is useful for operating system installers to free up
679 * the original file and in High Availability environments to switch to
680 * an alternative location for the content in case of server meltdown.
681 * This can only work if the loop device is used read-only, and if the
682 * new backing store is the same size and type as the old backing store.
684 static int loop_change_fd(struct loop_device *lo, struct file *lo_file,
685 struct block_device *bdev, unsigned int arg)
687 struct file *file, *old_file;
692 if (lo->lo_state != Lo_bound)
695 /* the loop device has to be read-only */
697 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
705 inode = file->f_mapping->host;
706 old_file = lo->lo_backing_file;
710 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
713 /* new backing store needs to support loop (eg sendfile) */
714 if (!inode->i_fop->sendfile)
717 /* size of the new backing store needs to be the same */
718 if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
722 error = loop_switch(lo, file);
735 static inline int is_loop_device(struct file *file)
737 struct inode *i = file->f_mapping->host;
739 return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
742 static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
743 struct block_device *bdev, unsigned int arg)
745 struct file *file, *f;
747 struct address_space *mapping;
748 unsigned lo_blocksize;
753 /* This is safe, since we have a reference from open(). */
754 __module_get(THIS_MODULE);
762 if (lo->lo_state != Lo_unbound)
765 /* Avoid recursion */
767 while (is_loop_device(f)) {
768 struct loop_device *l;
770 if (f->f_mapping->host->i_rdev == lo_file->f_mapping->host->i_rdev)
773 l = f->f_mapping->host->i_bdev->bd_disk->private_data;
774 if (l->lo_state == Lo_unbound) {
778 f = l->lo_backing_file;
781 mapping = file->f_mapping;
782 inode = mapping->host;
784 if (!(file->f_mode & FMODE_WRITE))
785 lo_flags |= LO_FLAGS_READ_ONLY;
788 if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) {
789 struct address_space_operations *aops = mapping->a_ops;
791 * If we can't read - sorry. If we only can't write - well,
792 * it's going to be read-only.
794 if (!file->f_op->sendfile)
796 if (aops->prepare_write && aops->commit_write)
797 lo_flags |= LO_FLAGS_USE_AOPS;
798 if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
799 lo_flags |= LO_FLAGS_READ_ONLY;
801 lo_blocksize = inode->i_blksize;
807 size = get_loop_size(lo, file);
809 if ((loff_t)(sector_t)size != size) {
814 if (!(lo_file->f_mode & FMODE_WRITE))
815 lo_flags |= LO_FLAGS_READ_ONLY;
817 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
819 lo->lo_blocksize = lo_blocksize;
820 lo->lo_device = bdev;
821 lo->lo_flags = lo_flags;
822 lo->lo_backing_file = file;
825 lo->lo_sizelimit = 0;
826 lo->old_gfp_mask = mapping_gfp_mask(mapping);
827 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
829 lo->lo_bio = lo->lo_biotail = NULL;
832 * set queue make_request_fn, and add limits based on lower level
835 blk_queue_make_request(lo->lo_queue, loop_make_request);
836 lo->lo_queue->queuedata = lo;
837 lo->lo_queue->unplug_fn = loop_unplug;
839 set_capacity(disks[lo->lo_number], size);
840 bd_set_size(bdev, size << 9);
842 set_blocksize(bdev, lo_blocksize);
844 error = kernel_thread(loop_thread, lo, CLONE_KERNEL);
847 wait_for_completion(&lo->lo_done);
853 /* This is safe: open() is still holding a reference. */
854 module_put(THIS_MODULE);
859 loop_release_xfer(struct loop_device *lo)
862 struct loop_func_table *xfer = lo->lo_encryption;
866 err = xfer->release(lo);
868 lo->lo_encryption = NULL;
869 module_put(xfer->owner);
875 loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
876 const struct loop_info64 *i)
881 struct module *owner = xfer->owner;
883 if (!try_module_get(owner))
886 err = xfer->init(lo, i);
890 lo->lo_encryption = xfer;
895 static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
897 struct file *filp = lo->lo_backing_file;
898 gfp_t gfp = lo->old_gfp_mask;
900 if (lo->lo_state != Lo_bound)
903 if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */
909 spin_lock_irq(&lo->lo_lock);
910 lo->lo_state = Lo_rundown;
913 complete(&lo->lo_bh_done);
914 spin_unlock_irq(&lo->lo_lock);
916 wait_for_completion(&lo->lo_done);
918 lo->lo_backing_file = NULL;
920 loop_release_xfer(lo);
923 lo->lo_device = NULL;
924 lo->lo_encryption = NULL;
926 lo->lo_sizelimit = 0;
927 lo->lo_encrypt_key_size = 0;
929 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
930 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
931 memset(lo->lo_file_name, 0, LO_NAME_SIZE);
932 invalidate_bdev(bdev, 0);
933 set_capacity(disks[lo->lo_number], 0);
934 bd_set_size(bdev, 0);
935 mapping_set_gfp_mask(filp->f_mapping, gfp);
936 lo->lo_state = Lo_unbound;
938 /* This is safe: open() is still holding a reference. */
939 module_put(THIS_MODULE);
944 loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
947 struct loop_func_table *xfer;
949 if (lo->lo_encrypt_key_size && lo->lo_key_owner != current->uid &&
950 !capable(CAP_SYS_ADMIN))
952 if (lo->lo_state != Lo_bound)
954 if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
957 err = loop_release_xfer(lo);
961 if (info->lo_encrypt_type) {
962 unsigned int type = info->lo_encrypt_type;
964 if (type >= MAX_LO_CRYPT)
966 xfer = xfer_funcs[type];
972 err = loop_init_xfer(lo, xfer, info);
976 if (lo->lo_offset != info->lo_offset ||
977 lo->lo_sizelimit != info->lo_sizelimit) {
978 lo->lo_offset = info->lo_offset;
979 lo->lo_sizelimit = info->lo_sizelimit;
980 if (figure_loop_size(lo))
984 memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
985 memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
986 lo->lo_file_name[LO_NAME_SIZE-1] = 0;
987 lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
991 lo->transfer = xfer->transfer;
992 lo->ioctl = xfer->ioctl;
994 lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
995 lo->lo_init[0] = info->lo_init[0];
996 lo->lo_init[1] = info->lo_init[1];
997 if (info->lo_encrypt_key_size) {
998 memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
999 info->lo_encrypt_key_size);
1000 lo->lo_key_owner = current->uid;
1007 loop_get_status(struct loop_device *lo, struct loop_info64 *info)
1009 struct file *file = lo->lo_backing_file;
1013 if (lo->lo_state != Lo_bound)
1015 error = vfs_getattr(file->f_vfsmnt, file->f_dentry, &stat);
1018 memset(info, 0, sizeof(*info));
1019 info->lo_number = lo->lo_number;
1020 info->lo_device = huge_encode_dev(stat.dev);
1021 info->lo_inode = stat.ino;
1022 info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev);
1023 info->lo_offset = lo->lo_offset;
1024 info->lo_sizelimit = lo->lo_sizelimit;
1025 info->lo_flags = lo->lo_flags;
1026 memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
1027 memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
1028 info->lo_encrypt_type =
1029 lo->lo_encryption ? lo->lo_encryption->number : 0;
1030 if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
1031 info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
1032 memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
1033 lo->lo_encrypt_key_size);
1039 loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
1041 memset(info64, 0, sizeof(*info64));
1042 info64->lo_number = info->lo_number;
1043 info64->lo_device = info->lo_device;
1044 info64->lo_inode = info->lo_inode;
1045 info64->lo_rdevice = info->lo_rdevice;
1046 info64->lo_offset = info->lo_offset;
1047 info64->lo_sizelimit = 0;
1048 info64->lo_encrypt_type = info->lo_encrypt_type;
1049 info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
1050 info64->lo_flags = info->lo_flags;
1051 info64->lo_init[0] = info->lo_init[0];
1052 info64->lo_init[1] = info->lo_init[1];
1053 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1054 memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE);
1056 memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
1057 memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
1061 loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
1063 memset(info, 0, sizeof(*info));
1064 info->lo_number = info64->lo_number;
1065 info->lo_device = info64->lo_device;
1066 info->lo_inode = info64->lo_inode;
1067 info->lo_rdevice = info64->lo_rdevice;
1068 info->lo_offset = info64->lo_offset;
1069 info->lo_encrypt_type = info64->lo_encrypt_type;
1070 info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
1071 info->lo_flags = info64->lo_flags;
1072 info->lo_init[0] = info64->lo_init[0];
1073 info->lo_init[1] = info64->lo_init[1];
1074 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1075 memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1077 memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
1078 memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1080 /* error in case values were truncated */
1081 if (info->lo_device != info64->lo_device ||
1082 info->lo_rdevice != info64->lo_rdevice ||
1083 info->lo_inode != info64->lo_inode ||
1084 info->lo_offset != info64->lo_offset)
1091 loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
1093 struct loop_info info;
1094 struct loop_info64 info64;
1096 if (copy_from_user(&info, arg, sizeof (struct loop_info)))
1098 loop_info64_from_old(&info, &info64);
1099 return loop_set_status(lo, &info64);
1103 loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
1105 struct loop_info64 info64;
1107 if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
1109 return loop_set_status(lo, &info64);
1113 loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
1114 struct loop_info info;
1115 struct loop_info64 info64;
1121 err = loop_get_status(lo, &info64);
1123 err = loop_info64_to_old(&info64, &info);
1124 if (!err && copy_to_user(arg, &info, sizeof(info)))
1131 loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
1132 struct loop_info64 info64;
1138 err = loop_get_status(lo, &info64);
1139 if (!err && copy_to_user(arg, &info64, sizeof(info64)))
1145 static int lo_ioctl(struct inode * inode, struct file * file,
1146 unsigned int cmd, unsigned long arg)
1148 struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
1151 mutex_lock(&lo->lo_ctl_mutex);
1154 err = loop_set_fd(lo, file, inode->i_bdev, arg);
1156 case LOOP_CHANGE_FD:
1157 err = loop_change_fd(lo, file, inode->i_bdev, arg);
1160 err = loop_clr_fd(lo, inode->i_bdev);
1162 case LOOP_SET_STATUS:
1163 err = loop_set_status_old(lo, (struct loop_info __user *) arg);
1165 case LOOP_GET_STATUS:
1166 err = loop_get_status_old(lo, (struct loop_info __user *) arg);
1168 case LOOP_SET_STATUS64:
1169 err = loop_set_status64(lo, (struct loop_info64 __user *) arg);
1171 case LOOP_GET_STATUS64:
1172 err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
1175 err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
1177 mutex_unlock(&lo->lo_ctl_mutex);
1181 static int lo_open(struct inode *inode, struct file *file)
1183 struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
1185 mutex_lock(&lo->lo_ctl_mutex);
1187 mutex_unlock(&lo->lo_ctl_mutex);
1192 static int lo_release(struct inode *inode, struct file *file)
1194 struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
1196 mutex_lock(&lo->lo_ctl_mutex);
1198 mutex_unlock(&lo->lo_ctl_mutex);
1203 static struct block_device_operations lo_fops = {
1204 .owner = THIS_MODULE,
1206 .release = lo_release,
1211 * And now the modules code and kernel interface.
1213 module_param(max_loop, int, 0);
1214 MODULE_PARM_DESC(max_loop, "Maximum number of loop devices (1-256)");
1215 MODULE_LICENSE("GPL");
1216 MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
1218 int loop_register_transfer(struct loop_func_table *funcs)
1220 unsigned int n = funcs->number;
1222 if (n >= MAX_LO_CRYPT || xfer_funcs[n])
1224 xfer_funcs[n] = funcs;
1228 int loop_unregister_transfer(int number)
1230 unsigned int n = number;
1231 struct loop_device *lo;
1232 struct loop_func_table *xfer;
1234 if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
1237 xfer_funcs[n] = NULL;
1239 for (lo = &loop_dev[0]; lo < &loop_dev[max_loop]; lo++) {
1240 mutex_lock(&lo->lo_ctl_mutex);
1242 if (lo->lo_encryption == xfer)
1243 loop_release_xfer(lo);
1245 mutex_unlock(&lo->lo_ctl_mutex);
1251 EXPORT_SYMBOL(loop_register_transfer);
1252 EXPORT_SYMBOL(loop_unregister_transfer);
1254 static int __init loop_init(void)
1258 if (max_loop < 1 || max_loop > 256) {
1259 printk(KERN_WARNING "loop: invalid max_loop (must be between"
1260 " 1 and 256), using default (8)\n");
1264 if (register_blkdev(LOOP_MAJOR, "loop"))
1267 loop_dev = kmalloc(max_loop * sizeof(struct loop_device), GFP_KERNEL);
1270 memset(loop_dev, 0, max_loop * sizeof(struct loop_device));
1272 disks = kmalloc(max_loop * sizeof(struct gendisk *), GFP_KERNEL);
1276 for (i = 0; i < max_loop; i++) {
1277 disks[i] = alloc_disk(1);
1282 devfs_mk_dir("loop");
1284 for (i = 0; i < max_loop; i++) {
1285 struct loop_device *lo = &loop_dev[i];
1286 struct gendisk *disk = disks[i];
1288 memset(lo, 0, sizeof(*lo));
1289 lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
1292 mutex_init(&lo->lo_ctl_mutex);
1293 init_completion(&lo->lo_done);
1294 init_completion(&lo->lo_bh_done);
1296 spin_lock_init(&lo->lo_lock);
1297 disk->major = LOOP_MAJOR;
1298 disk->first_minor = i;
1299 disk->fops = &lo_fops;
1300 sprintf(disk->disk_name, "loop%d", i);
1301 sprintf(disk->devfs_name, "loop/%d", i);
1302 disk->private_data = lo;
1303 disk->queue = lo->lo_queue;
1306 /* We cannot fail after we call this, so another loop!*/
1307 for (i = 0; i < max_loop; i++)
1309 printk(KERN_INFO "loop: loaded (max %d devices)\n", max_loop);
1314 blk_cleanup_queue(loop_dev[i].lo_queue);
1315 devfs_remove("loop");
1324 unregister_blkdev(LOOP_MAJOR, "loop");
1325 printk(KERN_ERR "loop: ran out of memory\n");
1329 static void loop_exit(void)
1333 for (i = 0; i < max_loop; i++) {
1334 del_gendisk(disks[i]);
1335 blk_cleanup_queue(loop_dev[i].lo_queue);
1338 devfs_remove("loop");
1339 if (unregister_blkdev(LOOP_MAJOR, "loop"))
1340 printk(KERN_WARNING "loop: cannot unregister blkdev\n");
1346 module_init(loop_init);
1347 module_exit(loop_exit);
1350 static int __init max_loop_setup(char *str)
1352 max_loop = simple_strtol(str, NULL, 0);
1356 __setup("max_loop=", max_loop_setup);