1 /* 2 * linux/drivers/block/loop.c 3 * 4 * Written by Theodore Ts'o, 3/29/93 5 * 6 * Copyright 1993 by Theodore Ts'o. Redistribution of this file is 7 * permitted under the GNU General Public License. 8 * 9 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993 10 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996 11 * 12 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994 13 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996 14 * 15 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997 16 * 17 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998 18 * 19 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998 20 * 21 * Loadable modules and other fixes by AK, 1998 22 * 23 * Make real block number available to downstream transfer functions, enables 24 * CBC (and relatives) mode encryption requiring unique IVs per data block. 25 * Reed H. Petty, rhp@draper.net 26 * 27 * Maximum number of loop devices now dynamic via max_loop module parameter. 28 * Russell Kroll <rkroll@exploits.org> 19990701 29 * 30 * Maximum number of loop devices when compiled-in now selectable by passing 31 * max_loop=<1-255> to the kernel on boot. 32 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999 33 * 34 * Completely rewrite request handling to be make_request_fn style and 35 * non blocking, pushing work to a helper thread. Lots of fixes from 36 * Al Viro too. 37 * Jens Axboe <axboe@suse.de>, Nov 2000 38 * 39 * Support up to 256 loop devices 40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002 41 * 42 * Support for falling back on the write file operation when the address space 43 * operations write_begin is not available on the backing filesystem. 44 * Anton Altaparmakov, 16 Feb 2005 45 * 46 * Still To Fix: 47 * - Advisory locking is ignored here. 48 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN 49 * 50 */ 51 52 #include <linux/module.h> 53 #include <linux/moduleparam.h> 54 #include <linux/sched.h> 55 #include <linux/fs.h> 56 #include <linux/file.h> 57 #include <linux/stat.h> 58 #include <linux/errno.h> 59 #include <linux/major.h> 60 #include <linux/wait.h> 61 #include <linux/blkdev.h> 62 #include <linux/blkpg.h> 63 #include <linux/init.h> 64 #include <linux/swap.h> 65 #include <linux/slab.h> 66 #include <linux/compat.h> 67 #include <linux/suspend.h> 68 #include <linux/freezer.h> 69 #include <linux/mutex.h> 70 #include <linux/writeback.h> 71 #include <linux/completion.h> 72 #include <linux/highmem.h> 73 #include <linux/kthread.h> 74 #include <linux/splice.h> 75 #include <linux/sysfs.h> 76 #include <linux/miscdevice.h> 77 #include <linux/falloc.h> 78 #include <linux/uio.h> 79 #include "loop.h" 80 81 #include <linux/uaccess.h> 82 83 static DEFINE_IDR(loop_index_idr); 84 static DEFINE_MUTEX(loop_index_mutex); 85 86 static int max_part; 87 static int part_shift; 88 89 static int transfer_xor(struct loop_device *lo, int cmd, 90 struct page *raw_page, unsigned raw_off, 91 struct page *loop_page, unsigned loop_off, 92 int size, sector_t real_block) 93 { 94 char *raw_buf = kmap_atomic(raw_page) + raw_off; 95 char *loop_buf = kmap_atomic(loop_page) + loop_off; 96 char *in, *out, *key; 97 int i, keysize; 98 99 if (cmd == READ) { 100 in = raw_buf; 101 out = loop_buf; 102 } else { 103 in = loop_buf; 104 out = raw_buf; 105 } 106 107 key = lo->lo_encrypt_key; 108 keysize = lo->lo_encrypt_key_size; 109 for (i = 0; i < size; i++) 110 *out++ = *in++ ^ key[(i & 511) % keysize]; 111 112 kunmap_atomic(loop_buf); 113 kunmap_atomic(raw_buf); 114 cond_resched(); 115 return 0; 116 } 117 118 static int xor_init(struct loop_device *lo, const struct loop_info64 *info) 119 { 120 if (unlikely(info->lo_encrypt_key_size <= 0)) 121 return -EINVAL; 122 return 0; 123 } 124 125 static struct loop_func_table none_funcs = { 126 .number = LO_CRYPT_NONE, 127 }; 128 129 static struct loop_func_table xor_funcs = { 130 .number = LO_CRYPT_XOR, 131 .transfer = transfer_xor, 132 .init = xor_init 133 }; 134 135 /* xfer_funcs[0] is special - its release function is never called */ 136 static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = { 137 &none_funcs, 138 &xor_funcs 139 }; 140 141 static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file) 142 { 143 loff_t loopsize; 144 145 /* Compute loopsize in bytes */ 146 loopsize = i_size_read(file->f_mapping->host); 147 if (offset > 0) 148 loopsize -= offset; 149 /* offset is beyond i_size, weird but possible */ 150 if (loopsize < 0) 151 return 0; 152 153 if (sizelimit > 0 && sizelimit < loopsize) 154 loopsize = sizelimit; 155 /* 156 * Unfortunately, if we want to do I/O on the device, 157 * the number of 512-byte sectors has to fit into a sector_t. 158 */ 159 return loopsize >> 9; 160 } 161 162 static loff_t get_loop_size(struct loop_device *lo, struct file *file) 163 { 164 return get_size(lo->lo_offset, lo->lo_sizelimit, file); 165 } 166 167 static void __loop_update_dio(struct loop_device *lo, bool dio) 168 { 169 struct file *file = lo->lo_backing_file; 170 struct address_space *mapping = file->f_mapping; 171 struct inode *inode = mapping->host; 172 unsigned short sb_bsize = 0; 173 unsigned dio_align = 0; 174 bool use_dio; 175 176 if (inode->i_sb->s_bdev) { 177 sb_bsize = bdev_logical_block_size(inode->i_sb->s_bdev); 178 dio_align = sb_bsize - 1; 179 } 180 181 /* 182 * We support direct I/O only if lo_offset is aligned with the 183 * logical I/O size of backing device, and the logical block 184 * size of loop is bigger than the backing device's and the loop 185 * needn't transform transfer. 186 * 187 * TODO: the above condition may be loosed in the future, and 188 * direct I/O may be switched runtime at that time because most 189 * of requests in sane applications should be PAGE_SIZE aligned 190 */ 191 if (dio) { 192 if (queue_logical_block_size(lo->lo_queue) >= sb_bsize && 193 !(lo->lo_offset & dio_align) && 194 mapping->a_ops->direct_IO && 195 !lo->transfer) 196 use_dio = true; 197 else 198 use_dio = false; 199 } else { 200 use_dio = false; 201 } 202 203 if (lo->use_dio == use_dio) 204 return; 205 206 /* flush dirty pages before changing direct IO */ 207 vfs_fsync(file, 0); 208 209 /* 210 * The flag of LO_FLAGS_DIRECT_IO is handled similarly with 211 * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup 212 * will get updated by ioctl(LOOP_GET_STATUS) 213 */ 214 blk_mq_freeze_queue(lo->lo_queue); 215 lo->use_dio = use_dio; 216 if (use_dio) 217 lo->lo_flags |= LO_FLAGS_DIRECT_IO; 218 else 219 lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; 220 blk_mq_unfreeze_queue(lo->lo_queue); 221 } 222 223 static int 224 figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit) 225 { 226 loff_t size = get_size(offset, sizelimit, lo->lo_backing_file); 227 sector_t x = (sector_t)size; 228 struct block_device *bdev = lo->lo_device; 229 230 if (unlikely((loff_t)x != size)) 231 return -EFBIG; 232 if (lo->lo_offset != offset) 233 lo->lo_offset = offset; 234 if (lo->lo_sizelimit != sizelimit) 235 lo->lo_sizelimit = sizelimit; 236 set_capacity(lo->lo_disk, x); 237 bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9); 238 /* let user-space know about the new size */ 239 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); 240 return 0; 241 } 242 243 static inline int 244 lo_do_transfer(struct loop_device *lo, int cmd, 245 struct page *rpage, unsigned roffs, 246 struct page *lpage, unsigned loffs, 247 int size, sector_t rblock) 248 { 249 int ret; 250 251 ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock); 252 if (likely(!ret)) 253 return 0; 254 255 printk_ratelimited(KERN_ERR 256 "loop: Transfer error at byte offset %llu, length %i.\n", 257 (unsigned long long)rblock << 9, size); 258 return ret; 259 } 260 261 static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) 262 { 263 struct iov_iter i; 264 ssize_t bw; 265 266 iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); 267 268 file_start_write(file); 269 bw = vfs_iter_write(file, &i, ppos, 0); 270 file_end_write(file); 271 272 if (likely(bw == bvec->bv_len)) 273 return 0; 274 275 printk_ratelimited(KERN_ERR 276 "loop: Write error at byte offset %llu, length %i.\n", 277 (unsigned long long)*ppos, bvec->bv_len); 278 if (bw >= 0) 279 bw = -EIO; 280 return bw; 281 } 282 283 static int lo_write_simple(struct loop_device *lo, struct request *rq, 284 loff_t pos) 285 { 286 struct bio_vec bvec; 287 struct req_iterator iter; 288 int ret = 0; 289 290 rq_for_each_segment(bvec, rq, iter) { 291 ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos); 292 if (ret < 0) 293 break; 294 cond_resched(); 295 } 296 297 return ret; 298 } 299 300 /* 301 * This is the slow, transforming version that needs to double buffer the 302 * data as it cannot do the transformations in place without having direct 303 * access to the destination pages of the backing file. 304 */ 305 static int lo_write_transfer(struct loop_device *lo, struct request *rq, 306 loff_t pos) 307 { 308 struct bio_vec bvec, b; 309 struct req_iterator iter; 310 struct page *page; 311 int ret = 0; 312 313 page = alloc_page(GFP_NOIO); 314 if (unlikely(!page)) 315 return -ENOMEM; 316 317 rq_for_each_segment(bvec, rq, iter) { 318 ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page, 319 bvec.bv_offset, bvec.bv_len, pos >> 9); 320 if (unlikely(ret)) 321 break; 322 323 b.bv_page = page; 324 b.bv_offset = 0; 325 b.bv_len = bvec.bv_len; 326 ret = lo_write_bvec(lo->lo_backing_file, &b, &pos); 327 if (ret < 0) 328 break; 329 } 330 331 __free_page(page); 332 return ret; 333 } 334 335 static int lo_read_simple(struct loop_device *lo, struct request *rq, 336 loff_t pos) 337 { 338 struct bio_vec bvec; 339 struct req_iterator iter; 340 struct iov_iter i; 341 ssize_t len; 342 343 rq_for_each_segment(bvec, rq, iter) { 344 iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len); 345 len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); 346 if (len < 0) 347 return len; 348 349 flush_dcache_page(bvec.bv_page); 350 351 if (len != bvec.bv_len) { 352 struct bio *bio; 353 354 __rq_for_each_bio(bio, rq) 355 zero_fill_bio(bio); 356 break; 357 } 358 cond_resched(); 359 } 360 361 return 0; 362 } 363 364 static int lo_read_transfer(struct loop_device *lo, struct request *rq, 365 loff_t pos) 366 { 367 struct bio_vec bvec, b; 368 struct req_iterator iter; 369 struct iov_iter i; 370 struct page *page; 371 ssize_t len; 372 int ret = 0; 373 374 page = alloc_page(GFP_NOIO); 375 if (unlikely(!page)) 376 return -ENOMEM; 377 378 rq_for_each_segment(bvec, rq, iter) { 379 loff_t offset = pos; 380 381 b.bv_page = page; 382 b.bv_offset = 0; 383 b.bv_len = bvec.bv_len; 384 385 iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len); 386 len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); 387 if (len < 0) { 388 ret = len; 389 goto out_free_page; 390 } 391 392 ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page, 393 bvec.bv_offset, len, offset >> 9); 394 if (ret) 395 goto out_free_page; 396 397 flush_dcache_page(bvec.bv_page); 398 399 if (len != bvec.bv_len) { 400 struct bio *bio; 401 402 __rq_for_each_bio(bio, rq) 403 zero_fill_bio(bio); 404 break; 405 } 406 } 407 408 ret = 0; 409 out_free_page: 410 __free_page(page); 411 return ret; 412 } 413 414 static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos) 415 { 416 /* 417 * We use punch hole to reclaim the free space used by the 418 * image a.k.a. discard. However we do not support discard if 419 * encryption is enabled, because it may give an attacker 420 * useful information. 421 */ 422 struct file *file = lo->lo_backing_file; 423 int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE; 424 int ret; 425 426 if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) { 427 ret = -EOPNOTSUPP; 428 goto out; 429 } 430 431 ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq)); 432 if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP)) 433 ret = -EIO; 434 out: 435 return ret; 436 } 437 438 static int lo_req_flush(struct loop_device *lo, struct request *rq) 439 { 440 struct file *file = lo->lo_backing_file; 441 int ret = vfs_fsync(file, 0); 442 if (unlikely(ret && ret != -EINVAL)) 443 ret = -EIO; 444 445 return ret; 446 } 447 448 static void lo_complete_rq(struct request *rq) 449 { 450 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 451 452 if (unlikely(req_op(cmd->rq) == REQ_OP_READ && cmd->use_aio && 453 cmd->ret >= 0 && cmd->ret < blk_rq_bytes(cmd->rq))) { 454 struct bio *bio = cmd->rq->bio; 455 456 bio_advance(bio, cmd->ret); 457 zero_fill_bio(bio); 458 } 459 460 blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK); 461 } 462 463 static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) 464 { 465 struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb); 466 467 cmd->ret = ret; 468 blk_mq_complete_request(cmd->rq); 469 } 470 471 static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, 472 loff_t pos, bool rw) 473 { 474 struct iov_iter iter; 475 struct bio_vec *bvec; 476 struct bio *bio = cmd->rq->bio; 477 struct file *file = lo->lo_backing_file; 478 int ret; 479 480 /* nomerge for loop request queue */ 481 WARN_ON(cmd->rq->bio != cmd->rq->biotail); 482 483 bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); 484 iov_iter_bvec(&iter, ITER_BVEC | rw, bvec, 485 bio_segments(bio), blk_rq_bytes(cmd->rq)); 486 /* 487 * This bio may be started from the middle of the 'bvec' 488 * because of bio splitting, so offset from the bvec must 489 * be passed to iov iterator 490 */ 491 iter.iov_offset = bio->bi_iter.bi_bvec_done; 492 493 cmd->iocb.ki_pos = pos; 494 cmd->iocb.ki_filp = file; 495 cmd->iocb.ki_complete = lo_rw_aio_complete; 496 cmd->iocb.ki_flags = IOCB_DIRECT; 497 498 if (rw == WRITE) 499 ret = call_write_iter(file, &cmd->iocb, &iter); 500 else 501 ret = call_read_iter(file, &cmd->iocb, &iter); 502 503 if (ret != -EIOCBQUEUED) 504 cmd->iocb.ki_complete(&cmd->iocb, ret, 0); 505 return 0; 506 } 507 508 static int do_req_filebacked(struct loop_device *lo, struct request *rq) 509 { 510 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 511 loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset; 512 513 /* 514 * lo_write_simple and lo_read_simple should have been covered 515 * by io submit style function like lo_rw_aio(), one blocker 516 * is that lo_read_simple() need to call flush_dcache_page after 517 * the page is written from kernel, and it isn't easy to handle 518 * this in io submit style function which submits all segments 519 * of the req at one time. And direct read IO doesn't need to 520 * run flush_dcache_page(). 521 */ 522 switch (req_op(rq)) { 523 case REQ_OP_FLUSH: 524 return lo_req_flush(lo, rq); 525 case REQ_OP_DISCARD: 526 case REQ_OP_WRITE_ZEROES: 527 return lo_discard(lo, rq, pos); 528 case REQ_OP_WRITE: 529 if (lo->transfer) 530 return lo_write_transfer(lo, rq, pos); 531 else if (cmd->use_aio) 532 return lo_rw_aio(lo, cmd, pos, WRITE); 533 else 534 return lo_write_simple(lo, rq, pos); 535 case REQ_OP_READ: 536 if (lo->transfer) 537 return lo_read_transfer(lo, rq, pos); 538 else if (cmd->use_aio) 539 return lo_rw_aio(lo, cmd, pos, READ); 540 else 541 return lo_read_simple(lo, rq, pos); 542 default: 543 WARN_ON_ONCE(1); 544 return -EIO; 545 break; 546 } 547 } 548 549 struct switch_request { 550 struct file *file; 551 struct completion wait; 552 }; 553 554 static inline void loop_update_dio(struct loop_device *lo) 555 { 556 __loop_update_dio(lo, io_is_direct(lo->lo_backing_file) | 557 lo->use_dio); 558 } 559 560 /* 561 * Do the actual switch; called from the BIO completion routine 562 */ 563 static void do_loop_switch(struct loop_device *lo, struct switch_request *p) 564 { 565 struct file *file = p->file; 566 struct file *old_file = lo->lo_backing_file; 567 struct address_space *mapping; 568 569 /* if no new file, only flush of queued bios requested */ 570 if (!file) 571 return; 572 573 mapping = file->f_mapping; 574 mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask); 575 lo->lo_backing_file = file; 576 lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ? 577 mapping->host->i_bdev->bd_block_size : PAGE_SIZE; 578 lo->old_gfp_mask = mapping_gfp_mask(mapping); 579 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); 580 loop_update_dio(lo); 581 } 582 583 /* 584 * loop_switch performs the hard work of switching a backing store. 585 * First it needs to flush existing IO, it does this by sending a magic 586 * BIO down the pipe. The completion of this BIO does the actual switch. 587 */ 588 static int loop_switch(struct loop_device *lo, struct file *file) 589 { 590 struct switch_request w; 591 592 w.file = file; 593 594 /* freeze queue and wait for completion of scheduled requests */ 595 blk_mq_freeze_queue(lo->lo_queue); 596 597 /* do the switch action */ 598 do_loop_switch(lo, &w); 599 600 /* unfreeze */ 601 blk_mq_unfreeze_queue(lo->lo_queue); 602 603 return 0; 604 } 605 606 /* 607 * Helper to flush the IOs in loop, but keeping loop thread running 608 */ 609 static int loop_flush(struct loop_device *lo) 610 { 611 /* loop not yet configured, no running thread, nothing to flush */ 612 if (lo->lo_state != Lo_bound) 613 return 0; 614 return loop_switch(lo, NULL); 615 } 616 617 static void loop_reread_partitions(struct loop_device *lo, 618 struct block_device *bdev) 619 { 620 int rc; 621 622 /* 623 * bd_mutex has been held already in release path, so don't 624 * acquire it if this function is called in such case. 625 * 626 * If the reread partition isn't from release path, lo_refcnt 627 * must be at least one and it can only become zero when the 628 * current holder is released. 629 */ 630 if (!atomic_read(&lo->lo_refcnt)) 631 rc = __blkdev_reread_part(bdev); 632 else 633 rc = blkdev_reread_part(bdev); 634 if (rc) 635 pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n", 636 __func__, lo->lo_number, lo->lo_file_name, rc); 637 } 638 639 /* 640 * loop_change_fd switched the backing store of a loopback device to 641 * a new file. This is useful for operating system installers to free up 642 * the original file and in High Availability environments to switch to 643 * an alternative location for the content in case of server meltdown. 644 * This can only work if the loop device is used read-only, and if the 645 * new backing store is the same size and type as the old backing store. 646 */ 647 static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, 648 unsigned int arg) 649 { 650 struct file *file, *old_file; 651 struct inode *inode; 652 int error; 653 654 error = -ENXIO; 655 if (lo->lo_state != Lo_bound) 656 goto out; 657 658 /* the loop device has to be read-only */ 659 error = -EINVAL; 660 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY)) 661 goto out; 662 663 error = -EBADF; 664 file = fget(arg); 665 if (!file) 666 goto out; 667 668 inode = file->f_mapping->host; 669 old_file = lo->lo_backing_file; 670 671 error = -EINVAL; 672 673 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) 674 goto out_putf; 675 676 /* size of the new backing store needs to be the same */ 677 if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) 678 goto out_putf; 679 680 /* and ... switch */ 681 error = loop_switch(lo, file); 682 if (error) 683 goto out_putf; 684 685 fput(old_file); 686 if (lo->lo_flags & LO_FLAGS_PARTSCAN) 687 loop_reread_partitions(lo, bdev); 688 return 0; 689 690 out_putf: 691 fput(file); 692 out: 693 return error; 694 } 695 696 static inline int is_loop_device(struct file *file) 697 { 698 struct inode *i = file->f_mapping->host; 699 700 return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR; 701 } 702 703 /* loop sysfs attributes */ 704 705 static ssize_t loop_attr_show(struct device *dev, char *page, 706 ssize_t (*callback)(struct loop_device *, char *)) 707 { 708 struct gendisk *disk = dev_to_disk(dev); 709 struct loop_device *lo = disk->private_data; 710 711 return callback(lo, page); 712 } 713 714 #define LOOP_ATTR_RO(_name) \ 715 static ssize_t loop_attr_##_name##_show(struct loop_device *, char *); \ 716 static ssize_t loop_attr_do_show_##_name(struct device *d, \ 717 struct device_attribute *attr, char *b) \ 718 { \ 719 return loop_attr_show(d, b, loop_attr_##_name##_show); \ 720 } \ 721 static struct device_attribute loop_attr_##_name = \ 722 __ATTR(_name, S_IRUGO, loop_attr_do_show_##_name, NULL); 723 724 static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) 725 { 726 ssize_t ret; 727 char *p = NULL; 728 729 spin_lock_irq(&lo->lo_lock); 730 if (lo->lo_backing_file) 731 p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1); 732 spin_unlock_irq(&lo->lo_lock); 733 734 if (IS_ERR_OR_NULL(p)) 735 ret = PTR_ERR(p); 736 else { 737 ret = strlen(p); 738 memmove(buf, p, ret); 739 buf[ret++] = '\n'; 740 buf[ret] = 0; 741 } 742 743 return ret; 744 } 745 746 static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf) 747 { 748 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset); 749 } 750 751 static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf) 752 { 753 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); 754 } 755 756 static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf) 757 { 758 int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR); 759 760 return sprintf(buf, "%s\n", autoclear ? "1" : "0"); 761 } 762 763 static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf) 764 { 765 int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN); 766 767 return sprintf(buf, "%s\n", partscan ? "1" : "0"); 768 } 769 770 static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf) 771 { 772 int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO); 773 774 return sprintf(buf, "%s\n", dio ? "1" : "0"); 775 } 776 777 LOOP_ATTR_RO(backing_file); 778 LOOP_ATTR_RO(offset); 779 LOOP_ATTR_RO(sizelimit); 780 LOOP_ATTR_RO(autoclear); 781 LOOP_ATTR_RO(partscan); 782 LOOP_ATTR_RO(dio); 783 784 static struct attribute *loop_attrs[] = { 785 &loop_attr_backing_file.attr, 786 &loop_attr_offset.attr, 787 &loop_attr_sizelimit.attr, 788 &loop_attr_autoclear.attr, 789 &loop_attr_partscan.attr, 790 &loop_attr_dio.attr, 791 NULL, 792 }; 793 794 static struct attribute_group loop_attribute_group = { 795 .name = "loop", 796 .attrs= loop_attrs, 797 }; 798 799 static int loop_sysfs_init(struct loop_device *lo) 800 { 801 return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj, 802 &loop_attribute_group); 803 } 804 805 static void loop_sysfs_exit(struct loop_device *lo) 806 { 807 sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj, 808 &loop_attribute_group); 809 } 810 811 static void loop_config_discard(struct loop_device *lo) 812 { 813 struct file *file = lo->lo_backing_file; 814 struct inode *inode = file->f_mapping->host; 815 struct request_queue *q = lo->lo_queue; 816 817 /* 818 * We use punch hole to reclaim the free space used by the 819 * image a.k.a. discard. However we do not support discard if 820 * encryption is enabled, because it may give an attacker 821 * useful information. 822 */ 823 if ((!file->f_op->fallocate) || 824 lo->lo_encrypt_key_size) { 825 q->limits.discard_granularity = 0; 826 q->limits.discard_alignment = 0; 827 blk_queue_max_discard_sectors(q, 0); 828 blk_queue_max_write_zeroes_sectors(q, 0); 829 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); 830 return; 831 } 832 833 q->limits.discard_granularity = inode->i_sb->s_blocksize; 834 q->limits.discard_alignment = 0; 835 836 blk_queue_max_discard_sectors(q, UINT_MAX >> 9); 837 blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); 838 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 839 } 840 841 static void loop_unprepare_queue(struct loop_device *lo) 842 { 843 kthread_flush_worker(&lo->worker); 844 kthread_stop(lo->worker_task); 845 } 846 847 static int loop_kthread_worker_fn(void *worker_ptr) 848 { 849 current->flags |= PF_LESS_THROTTLE; 850 return kthread_worker_fn(worker_ptr); 851 } 852 853 static int loop_prepare_queue(struct loop_device *lo) 854 { 855 kthread_init_worker(&lo->worker); 856 lo->worker_task = kthread_run(loop_kthread_worker_fn, 857 &lo->worker, "loop%d", lo->lo_number); 858 if (IS_ERR(lo->worker_task)) 859 return -ENOMEM; 860 set_user_nice(lo->worker_task, MIN_NICE); 861 return 0; 862 } 863 864 static int loop_set_fd(struct loop_device *lo, fmode_t mode, 865 struct block_device *bdev, unsigned int arg) 866 { 867 struct file *file, *f; 868 struct inode *inode; 869 struct address_space *mapping; 870 unsigned lo_blocksize; 871 int lo_flags = 0; 872 int error; 873 loff_t size; 874 875 /* This is safe, since we have a reference from open(). */ 876 __module_get(THIS_MODULE); 877 878 error = -EBADF; 879 file = fget(arg); 880 if (!file) 881 goto out; 882 883 error = -EBUSY; 884 if (lo->lo_state != Lo_unbound) 885 goto out_putf; 886 887 /* Avoid recursion */ 888 f = file; 889 while (is_loop_device(f)) { 890 struct loop_device *l; 891 892 if (f->f_mapping->host->i_bdev == bdev) 893 goto out_putf; 894 895 l = f->f_mapping->host->i_bdev->bd_disk->private_data; 896 if (l->lo_state == Lo_unbound) { 897 error = -EINVAL; 898 goto out_putf; 899 } 900 f = l->lo_backing_file; 901 } 902 903 mapping = file->f_mapping; 904 inode = mapping->host; 905 906 error = -EINVAL; 907 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) 908 goto out_putf; 909 910 if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) || 911 !file->f_op->write_iter) 912 lo_flags |= LO_FLAGS_READ_ONLY; 913 914 lo_blocksize = S_ISBLK(inode->i_mode) ? 915 inode->i_bdev->bd_block_size : PAGE_SIZE; 916 917 error = -EFBIG; 918 size = get_loop_size(lo, file); 919 if ((loff_t)(sector_t)size != size) 920 goto out_putf; 921 error = loop_prepare_queue(lo); 922 if (error) 923 goto out_putf; 924 925 error = 0; 926 927 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); 928 929 lo->use_dio = false; 930 lo->lo_blocksize = lo_blocksize; 931 lo->lo_device = bdev; 932 lo->lo_flags = lo_flags; 933 lo->lo_backing_file = file; 934 lo->transfer = NULL; 935 lo->ioctl = NULL; 936 lo->lo_sizelimit = 0; 937 lo->old_gfp_mask = mapping_gfp_mask(mapping); 938 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); 939 940 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) 941 blk_queue_write_cache(lo->lo_queue, true, false); 942 943 loop_update_dio(lo); 944 set_capacity(lo->lo_disk, size); 945 bd_set_size(bdev, size << 9); 946 loop_sysfs_init(lo); 947 /* let user-space know about the new size */ 948 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); 949 950 set_blocksize(bdev, lo_blocksize); 951 952 lo->lo_state = Lo_bound; 953 if (part_shift) 954 lo->lo_flags |= LO_FLAGS_PARTSCAN; 955 if (lo->lo_flags & LO_FLAGS_PARTSCAN) 956 loop_reread_partitions(lo, bdev); 957 958 /* Grab the block_device to prevent its destruction after we 959 * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev). 960 */ 961 bdgrab(bdev); 962 return 0; 963 964 out_putf: 965 fput(file); 966 out: 967 /* This is safe: open() is still holding a reference. */ 968 module_put(THIS_MODULE); 969 return error; 970 } 971 972 static int 973 loop_release_xfer(struct loop_device *lo) 974 { 975 int err = 0; 976 struct loop_func_table *xfer = lo->lo_encryption; 977 978 if (xfer) { 979 if (xfer->release) 980 err = xfer->release(lo); 981 lo->transfer = NULL; 982 lo->lo_encryption = NULL; 983 module_put(xfer->owner); 984 } 985 return err; 986 } 987 988 static int 989 loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, 990 const struct loop_info64 *i) 991 { 992 int err = 0; 993 994 if (xfer) { 995 struct module *owner = xfer->owner; 996 997 if (!try_module_get(owner)) 998 return -EINVAL; 999 if (xfer->init) 1000 err = xfer->init(lo, i); 1001 if (err) 1002 module_put(owner); 1003 else 1004 lo->lo_encryption = xfer; 1005 } 1006 return err; 1007 } 1008 1009 static int loop_clr_fd(struct loop_device *lo) 1010 { 1011 struct file *filp = lo->lo_backing_file; 1012 gfp_t gfp = lo->old_gfp_mask; 1013 struct block_device *bdev = lo->lo_device; 1014 1015 if (lo->lo_state != Lo_bound) 1016 return -ENXIO; 1017 1018 /* 1019 * If we've explicitly asked to tear down the loop device, 1020 * and it has an elevated reference count, set it for auto-teardown when 1021 * the last reference goes away. This stops $!~#$@ udev from 1022 * preventing teardown because it decided that it needs to run blkid on 1023 * the loopback device whenever they appear. xfstests is notorious for 1024 * failing tests because blkid via udev races with a losetup 1025 * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d 1026 * command to fail with EBUSY. 1027 */ 1028 if (atomic_read(&lo->lo_refcnt) > 1) { 1029 lo->lo_flags |= LO_FLAGS_AUTOCLEAR; 1030 mutex_unlock(&lo->lo_ctl_mutex); 1031 return 0; 1032 } 1033 1034 if (filp == NULL) 1035 return -EINVAL; 1036 1037 /* freeze request queue during the transition */ 1038 blk_mq_freeze_queue(lo->lo_queue); 1039 1040 spin_lock_irq(&lo->lo_lock); 1041 lo->lo_state = Lo_rundown; 1042 lo->lo_backing_file = NULL; 1043 spin_unlock_irq(&lo->lo_lock); 1044 1045 loop_release_xfer(lo); 1046 lo->transfer = NULL; 1047 lo->ioctl = NULL; 1048 lo->lo_device = NULL; 1049 lo->lo_encryption = NULL; 1050 lo->lo_offset = 0; 1051 lo->lo_sizelimit = 0; 1052 lo->lo_encrypt_key_size = 0; 1053 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); 1054 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); 1055 memset(lo->lo_file_name, 0, LO_NAME_SIZE); 1056 if (bdev) { 1057 bdput(bdev); 1058 invalidate_bdev(bdev); 1059 } 1060 set_capacity(lo->lo_disk, 0); 1061 loop_sysfs_exit(lo); 1062 if (bdev) { 1063 bd_set_size(bdev, 0); 1064 /* let user-space know about this change */ 1065 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); 1066 } 1067 mapping_set_gfp_mask(filp->f_mapping, gfp); 1068 lo->lo_state = Lo_unbound; 1069 /* This is safe: open() is still holding a reference. */ 1070 module_put(THIS_MODULE); 1071 blk_mq_unfreeze_queue(lo->lo_queue); 1072 1073 if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev) 1074 loop_reread_partitions(lo, bdev); 1075 lo->lo_flags = 0; 1076 if (!part_shift) 1077 lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; 1078 loop_unprepare_queue(lo); 1079 mutex_unlock(&lo->lo_ctl_mutex); 1080 /* 1081 * Need not hold lo_ctl_mutex to fput backing file. 1082 * Calling fput holding lo_ctl_mutex triggers a circular 1083 * lock dependency possibility warning as fput can take 1084 * bd_mutex which is usually taken before lo_ctl_mutex. 1085 */ 1086 fput(filp); 1087 return 0; 1088 } 1089 1090 static int 1091 loop_set_status(struct loop_device *lo, const struct loop_info64 *info) 1092 { 1093 int err; 1094 struct loop_func_table *xfer; 1095 kuid_t uid = current_uid(); 1096 1097 if (lo->lo_encrypt_key_size && 1098 !uid_eq(lo->lo_key_owner, uid) && 1099 !capable(CAP_SYS_ADMIN)) 1100 return -EPERM; 1101 if (lo->lo_state != Lo_bound) 1102 return -ENXIO; 1103 if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) 1104 return -EINVAL; 1105 1106 /* I/O need to be drained during transfer transition */ 1107 blk_mq_freeze_queue(lo->lo_queue); 1108 1109 err = loop_release_xfer(lo); 1110 if (err) 1111 goto exit; 1112 1113 if (info->lo_encrypt_type) { 1114 unsigned int type = info->lo_encrypt_type; 1115 1116 if (type >= MAX_LO_CRYPT) 1117 return -EINVAL; 1118 xfer = xfer_funcs[type]; 1119 if (xfer == NULL) 1120 return -EINVAL; 1121 } else 1122 xfer = NULL; 1123 1124 err = loop_init_xfer(lo, xfer, info); 1125 if (err) 1126 goto exit; 1127 1128 if (lo->lo_offset != info->lo_offset || 1129 lo->lo_sizelimit != info->lo_sizelimit) { 1130 if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { 1131 err = -EFBIG; 1132 goto exit; 1133 } 1134 } 1135 1136 loop_config_discard(lo); 1137 1138 memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); 1139 memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); 1140 lo->lo_file_name[LO_NAME_SIZE-1] = 0; 1141 lo->lo_crypt_name[LO_NAME_SIZE-1] = 0; 1142 1143 if (!xfer) 1144 xfer = &none_funcs; 1145 lo->transfer = xfer->transfer; 1146 lo->ioctl = xfer->ioctl; 1147 1148 if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) != 1149 (info->lo_flags & LO_FLAGS_AUTOCLEAR)) 1150 lo->lo_flags ^= LO_FLAGS_AUTOCLEAR; 1151 1152 lo->lo_encrypt_key_size = info->lo_encrypt_key_size; 1153 lo->lo_init[0] = info->lo_init[0]; 1154 lo->lo_init[1] = info->lo_init[1]; 1155 if (info->lo_encrypt_key_size) { 1156 memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, 1157 info->lo_encrypt_key_size); 1158 lo->lo_key_owner = uid; 1159 } 1160 1161 /* update dio if lo_offset or transfer is changed */ 1162 __loop_update_dio(lo, lo->use_dio); 1163 1164 exit: 1165 blk_mq_unfreeze_queue(lo->lo_queue); 1166 1167 if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) && 1168 !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { 1169 lo->lo_flags |= LO_FLAGS_PARTSCAN; 1170 lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; 1171 loop_reread_partitions(lo, lo->lo_device); 1172 } 1173 1174 return err; 1175 } 1176 1177 static int 1178 loop_get_status(struct loop_device *lo, struct loop_info64 *info) 1179 { 1180 struct file *file = lo->lo_backing_file; 1181 struct kstat stat; 1182 int error; 1183 1184 if (lo->lo_state != Lo_bound) 1185 return -ENXIO; 1186 error = vfs_getattr(&file->f_path, &stat, 1187 STATX_INO, AT_STATX_SYNC_AS_STAT); 1188 if (error) 1189 return error; 1190 memset(info, 0, sizeof(*info)); 1191 info->lo_number = lo->lo_number; 1192 info->lo_device = huge_encode_dev(stat.dev); 1193 info->lo_inode = stat.ino; 1194 info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev); 1195 info->lo_offset = lo->lo_offset; 1196 info->lo_sizelimit = lo->lo_sizelimit; 1197 info->lo_flags = lo->lo_flags; 1198 memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE); 1199 memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE); 1200 info->lo_encrypt_type = 1201 lo->lo_encryption ? lo->lo_encryption->number : 0; 1202 if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) { 1203 info->lo_encrypt_key_size = lo->lo_encrypt_key_size; 1204 memcpy(info->lo_encrypt_key, lo->lo_encrypt_key, 1205 lo->lo_encrypt_key_size); 1206 } 1207 return 0; 1208 } 1209 1210 static void 1211 loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64) 1212 { 1213 memset(info64, 0, sizeof(*info64)); 1214 info64->lo_number = info->lo_number; 1215 info64->lo_device = info->lo_device; 1216 info64->lo_inode = info->lo_inode; 1217 info64->lo_rdevice = info->lo_rdevice; 1218 info64->lo_offset = info->lo_offset; 1219 info64->lo_sizelimit = 0; 1220 info64->lo_encrypt_type = info->lo_encrypt_type; 1221 info64->lo_encrypt_key_size = info->lo_encrypt_key_size; 1222 info64->lo_flags = info->lo_flags; 1223 info64->lo_init[0] = info->lo_init[0]; 1224 info64->lo_init[1] = info->lo_init[1]; 1225 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) 1226 memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE); 1227 else 1228 memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE); 1229 memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE); 1230 } 1231 1232 static int 1233 loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info) 1234 { 1235 memset(info, 0, sizeof(*info)); 1236 info->lo_number = info64->lo_number; 1237 info->lo_device = info64->lo_device; 1238 info->lo_inode = info64->lo_inode; 1239 info->lo_rdevice = info64->lo_rdevice; 1240 info->lo_offset = info64->lo_offset; 1241 info->lo_encrypt_type = info64->lo_encrypt_type; 1242 info->lo_encrypt_key_size = info64->lo_encrypt_key_size; 1243 info->lo_flags = info64->lo_flags; 1244 info->lo_init[0] = info64->lo_init[0]; 1245 info->lo_init[1] = info64->lo_init[1]; 1246 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) 1247 memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE); 1248 else 1249 memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE); 1250 memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE); 1251 1252 /* error in case values were truncated */ 1253 if (info->lo_device != info64->lo_device || 1254 info->lo_rdevice != info64->lo_rdevice || 1255 info->lo_inode != info64->lo_inode || 1256 info->lo_offset != info64->lo_offset) 1257 return -EOVERFLOW; 1258 1259 return 0; 1260 } 1261 1262 static int 1263 loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg) 1264 { 1265 struct loop_info info; 1266 struct loop_info64 info64; 1267 1268 if (copy_from_user(&info, arg, sizeof (struct loop_info))) 1269 return -EFAULT; 1270 loop_info64_from_old(&info, &info64); 1271 return loop_set_status(lo, &info64); 1272 } 1273 1274 static int 1275 loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg) 1276 { 1277 struct loop_info64 info64; 1278 1279 if (copy_from_user(&info64, arg, sizeof (struct loop_info64))) 1280 return -EFAULT; 1281 return loop_set_status(lo, &info64); 1282 } 1283 1284 static int 1285 loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) { 1286 struct loop_info info; 1287 struct loop_info64 info64; 1288 int err = 0; 1289 1290 if (!arg) 1291 err = -EINVAL; 1292 if (!err) 1293 err = loop_get_status(lo, &info64); 1294 if (!err) 1295 err = loop_info64_to_old(&info64, &info); 1296 if (!err && copy_to_user(arg, &info, sizeof(info))) 1297 err = -EFAULT; 1298 1299 return err; 1300 } 1301 1302 static int 1303 loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { 1304 struct loop_info64 info64; 1305 int err = 0; 1306 1307 if (!arg) 1308 err = -EINVAL; 1309 if (!err) 1310 err = loop_get_status(lo, &info64); 1311 if (!err && copy_to_user(arg, &info64, sizeof(info64))) 1312 err = -EFAULT; 1313 1314 return err; 1315 } 1316 1317 static int loop_set_capacity(struct loop_device *lo) 1318 { 1319 if (unlikely(lo->lo_state != Lo_bound)) 1320 return -ENXIO; 1321 1322 return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit); 1323 } 1324 1325 static int loop_set_dio(struct loop_device *lo, unsigned long arg) 1326 { 1327 int error = -ENXIO; 1328 if (lo->lo_state != Lo_bound) 1329 goto out; 1330 1331 __loop_update_dio(lo, !!arg); 1332 if (lo->use_dio == !!arg) 1333 return 0; 1334 error = -EINVAL; 1335 out: 1336 return error; 1337 } 1338 1339 static int lo_ioctl(struct block_device *bdev, fmode_t mode, 1340 unsigned int cmd, unsigned long arg) 1341 { 1342 struct loop_device *lo = bdev->bd_disk->private_data; 1343 int err; 1344 1345 mutex_lock_nested(&lo->lo_ctl_mutex, 1); 1346 switch (cmd) { 1347 case LOOP_SET_FD: 1348 err = loop_set_fd(lo, mode, bdev, arg); 1349 break; 1350 case LOOP_CHANGE_FD: 1351 err = loop_change_fd(lo, bdev, arg); 1352 break; 1353 case LOOP_CLR_FD: 1354 /* loop_clr_fd would have unlocked lo_ctl_mutex on success */ 1355 err = loop_clr_fd(lo); 1356 if (!err) 1357 goto out_unlocked; 1358 break; 1359 case LOOP_SET_STATUS: 1360 err = -EPERM; 1361 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) 1362 err = loop_set_status_old(lo, 1363 (struct loop_info __user *)arg); 1364 break; 1365 case LOOP_GET_STATUS: 1366 err = loop_get_status_old(lo, (struct loop_info __user *) arg); 1367 break; 1368 case LOOP_SET_STATUS64: 1369 err = -EPERM; 1370 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) 1371 err = loop_set_status64(lo, 1372 (struct loop_info64 __user *) arg); 1373 break; 1374 case LOOP_GET_STATUS64: 1375 err = loop_get_status64(lo, (struct loop_info64 __user *) arg); 1376 break; 1377 case LOOP_SET_CAPACITY: 1378 err = -EPERM; 1379 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) 1380 err = loop_set_capacity(lo); 1381 break; 1382 case LOOP_SET_DIRECT_IO: 1383 err = -EPERM; 1384 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) 1385 err = loop_set_dio(lo, arg); 1386 break; 1387 default: 1388 err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; 1389 } 1390 mutex_unlock(&lo->lo_ctl_mutex); 1391 1392 out_unlocked: 1393 return err; 1394 } 1395 1396 #ifdef CONFIG_COMPAT 1397 struct compat_loop_info { 1398 compat_int_t lo_number; /* ioctl r/o */ 1399 compat_dev_t lo_device; /* ioctl r/o */ 1400 compat_ulong_t lo_inode; /* ioctl r/o */ 1401 compat_dev_t lo_rdevice; /* ioctl r/o */ 1402 compat_int_t lo_offset; 1403 compat_int_t lo_encrypt_type; 1404 compat_int_t lo_encrypt_key_size; /* ioctl w/o */ 1405 compat_int_t lo_flags; /* ioctl r/o */ 1406 char lo_name[LO_NAME_SIZE]; 1407 unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ 1408 compat_ulong_t lo_init[2]; 1409 char reserved[4]; 1410 }; 1411 1412 /* 1413 * Transfer 32-bit compatibility structure in userspace to 64-bit loop info 1414 * - noinlined to reduce stack space usage in main part of driver 1415 */ 1416 static noinline int 1417 loop_info64_from_compat(const struct compat_loop_info __user *arg, 1418 struct loop_info64 *info64) 1419 { 1420 struct compat_loop_info info; 1421 1422 if (copy_from_user(&info, arg, sizeof(info))) 1423 return -EFAULT; 1424 1425 memset(info64, 0, sizeof(*info64)); 1426 info64->lo_number = info.lo_number; 1427 info64->lo_device = info.lo_device; 1428 info64->lo_inode = info.lo_inode; 1429 info64->lo_rdevice = info.lo_rdevice; 1430 info64->lo_offset = info.lo_offset; 1431 info64->lo_sizelimit = 0; 1432 info64->lo_encrypt_type = info.lo_encrypt_type; 1433 info64->lo_encrypt_key_size = info.lo_encrypt_key_size; 1434 info64->lo_flags = info.lo_flags; 1435 info64->lo_init[0] = info.lo_init[0]; 1436 info64->lo_init[1] = info.lo_init[1]; 1437 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI) 1438 memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE); 1439 else 1440 memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE); 1441 memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE); 1442 return 0; 1443 } 1444 1445 /* 1446 * Transfer 64-bit loop info to 32-bit compatibility structure in userspace 1447 * - noinlined to reduce stack space usage in main part of driver 1448 */ 1449 static noinline int 1450 loop_info64_to_compat(const struct loop_info64 *info64, 1451 struct compat_loop_info __user *arg) 1452 { 1453 struct compat_loop_info info; 1454 1455 memset(&info, 0, sizeof(info)); 1456 info.lo_number = info64->lo_number; 1457 info.lo_device = info64->lo_device; 1458 info.lo_inode = info64->lo_inode; 1459 info.lo_rdevice = info64->lo_rdevice; 1460 info.lo_offset = info64->lo_offset; 1461 info.lo_encrypt_type = info64->lo_encrypt_type; 1462 info.lo_encrypt_key_size = info64->lo_encrypt_key_size; 1463 info.lo_flags = info64->lo_flags; 1464 info.lo_init[0] = info64->lo_init[0]; 1465 info.lo_init[1] = info64->lo_init[1]; 1466 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI) 1467 memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE); 1468 else 1469 memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE); 1470 memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE); 1471 1472 /* error in case values were truncated */ 1473 if (info.lo_device != info64->lo_device || 1474 info.lo_rdevice != info64->lo_rdevice || 1475 info.lo_inode != info64->lo_inode || 1476 info.lo_offset != info64->lo_offset || 1477 info.lo_init[0] != info64->lo_init[0] || 1478 info.lo_init[1] != info64->lo_init[1]) 1479 return -EOVERFLOW; 1480 1481 if (copy_to_user(arg, &info, sizeof(info))) 1482 return -EFAULT; 1483 return 0; 1484 } 1485 1486 static int 1487 loop_set_status_compat(struct loop_device *lo, 1488 const struct compat_loop_info __user *arg) 1489 { 1490 struct loop_info64 info64; 1491 int ret; 1492 1493 ret = loop_info64_from_compat(arg, &info64); 1494 if (ret < 0) 1495 return ret; 1496 return loop_set_status(lo, &info64); 1497 } 1498 1499 static int 1500 loop_get_status_compat(struct loop_device *lo, 1501 struct compat_loop_info __user *arg) 1502 { 1503 struct loop_info64 info64; 1504 int err = 0; 1505 1506 if (!arg) 1507 err = -EINVAL; 1508 if (!err) 1509 err = loop_get_status(lo, &info64); 1510 if (!err) 1511 err = loop_info64_to_compat(&info64, arg); 1512 return err; 1513 } 1514 1515 static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, 1516 unsigned int cmd, unsigned long arg) 1517 { 1518 struct loop_device *lo = bdev->bd_disk->private_data; 1519 int err; 1520 1521 switch(cmd) { 1522 case LOOP_SET_STATUS: 1523 mutex_lock(&lo->lo_ctl_mutex); 1524 err = loop_set_status_compat( 1525 lo, (const struct compat_loop_info __user *) arg); 1526 mutex_unlock(&lo->lo_ctl_mutex); 1527 break; 1528 case LOOP_GET_STATUS: 1529 mutex_lock(&lo->lo_ctl_mutex); 1530 err = loop_get_status_compat( 1531 lo, (struct compat_loop_info __user *) arg); 1532 mutex_unlock(&lo->lo_ctl_mutex); 1533 break; 1534 case LOOP_SET_CAPACITY: 1535 case LOOP_CLR_FD: 1536 case LOOP_GET_STATUS64: 1537 case LOOP_SET_STATUS64: 1538 arg = (unsigned long) compat_ptr(arg); 1539 case LOOP_SET_FD: 1540 case LOOP_CHANGE_FD: 1541 err = lo_ioctl(bdev, mode, cmd, arg); 1542 break; 1543 default: 1544 err = -ENOIOCTLCMD; 1545 break; 1546 } 1547 return err; 1548 } 1549 #endif 1550 1551 static int lo_open(struct block_device *bdev, fmode_t mode) 1552 { 1553 struct loop_device *lo; 1554 int err = 0; 1555 1556 mutex_lock(&loop_index_mutex); 1557 lo = bdev->bd_disk->private_data; 1558 if (!lo) { 1559 err = -ENXIO; 1560 goto out; 1561 } 1562 1563 atomic_inc(&lo->lo_refcnt); 1564 out: 1565 mutex_unlock(&loop_index_mutex); 1566 return err; 1567 } 1568 1569 static void lo_release(struct gendisk *disk, fmode_t mode) 1570 { 1571 struct loop_device *lo = disk->private_data; 1572 int err; 1573 1574 if (atomic_dec_return(&lo->lo_refcnt)) 1575 return; 1576 1577 mutex_lock(&lo->lo_ctl_mutex); 1578 if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) { 1579 /* 1580 * In autoclear mode, stop the loop thread 1581 * and remove configuration after last close. 1582 */ 1583 err = loop_clr_fd(lo); 1584 if (!err) 1585 return; 1586 } else { 1587 /* 1588 * Otherwise keep thread (if running) and config, 1589 * but flush possible ongoing bios in thread. 1590 */ 1591 loop_flush(lo); 1592 } 1593 1594 mutex_unlock(&lo->lo_ctl_mutex); 1595 } 1596 1597 static const struct block_device_operations lo_fops = { 1598 .owner = THIS_MODULE, 1599 .open = lo_open, 1600 .release = lo_release, 1601 .ioctl = lo_ioctl, 1602 #ifdef CONFIG_COMPAT 1603 .compat_ioctl = lo_compat_ioctl, 1604 #endif 1605 }; 1606 1607 /* 1608 * And now the modules code and kernel interface. 1609 */ 1610 static int max_loop; 1611 module_param(max_loop, int, S_IRUGO); 1612 MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); 1613 module_param(max_part, int, S_IRUGO); 1614 MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device"); 1615 MODULE_LICENSE("GPL"); 1616 MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR); 1617 1618 int loop_register_transfer(struct loop_func_table *funcs) 1619 { 1620 unsigned int n = funcs->number; 1621 1622 if (n >= MAX_LO_CRYPT || xfer_funcs[n]) 1623 return -EINVAL; 1624 xfer_funcs[n] = funcs; 1625 return 0; 1626 } 1627 1628 static int unregister_transfer_cb(int id, void *ptr, void *data) 1629 { 1630 struct loop_device *lo = ptr; 1631 struct loop_func_table *xfer = data; 1632 1633 mutex_lock(&lo->lo_ctl_mutex); 1634 if (lo->lo_encryption == xfer) 1635 loop_release_xfer(lo); 1636 mutex_unlock(&lo->lo_ctl_mutex); 1637 return 0; 1638 } 1639 1640 int loop_unregister_transfer(int number) 1641 { 1642 unsigned int n = number; 1643 struct loop_func_table *xfer; 1644 1645 if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL) 1646 return -EINVAL; 1647 1648 xfer_funcs[n] = NULL; 1649 idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer); 1650 return 0; 1651 } 1652 1653 EXPORT_SYMBOL(loop_register_transfer); 1654 EXPORT_SYMBOL(loop_unregister_transfer); 1655 1656 static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, 1657 const struct blk_mq_queue_data *bd) 1658 { 1659 struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 1660 struct loop_device *lo = cmd->rq->q->queuedata; 1661 1662 blk_mq_start_request(bd->rq); 1663 1664 if (lo->lo_state != Lo_bound) 1665 return BLK_STS_IOERR; 1666 1667 switch (req_op(cmd->rq)) { 1668 case REQ_OP_FLUSH: 1669 case REQ_OP_DISCARD: 1670 case REQ_OP_WRITE_ZEROES: 1671 cmd->use_aio = false; 1672 break; 1673 default: 1674 cmd->use_aio = lo->use_dio; 1675 break; 1676 } 1677 1678 kthread_queue_work(&lo->worker, &cmd->work); 1679 1680 return BLK_STS_OK; 1681 } 1682 1683 static void loop_handle_cmd(struct loop_cmd *cmd) 1684 { 1685 const bool write = op_is_write(req_op(cmd->rq)); 1686 struct loop_device *lo = cmd->rq->q->queuedata; 1687 int ret = 0; 1688 1689 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { 1690 ret = -EIO; 1691 goto failed; 1692 } 1693 1694 ret = do_req_filebacked(lo, cmd->rq); 1695 failed: 1696 /* complete non-aio request */ 1697 if (!cmd->use_aio || ret) { 1698 cmd->ret = ret ? -EIO : 0; 1699 blk_mq_complete_request(cmd->rq); 1700 } 1701 } 1702 1703 static void loop_queue_work(struct kthread_work *work) 1704 { 1705 struct loop_cmd *cmd = 1706 container_of(work, struct loop_cmd, work); 1707 1708 loop_handle_cmd(cmd); 1709 } 1710 1711 static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq, 1712 unsigned int hctx_idx, unsigned int numa_node) 1713 { 1714 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 1715 1716 cmd->rq = rq; 1717 kthread_init_work(&cmd->work, loop_queue_work); 1718 1719 return 0; 1720 } 1721 1722 static const struct blk_mq_ops loop_mq_ops = { 1723 .queue_rq = loop_queue_rq, 1724 .init_request = loop_init_request, 1725 .complete = lo_complete_rq, 1726 }; 1727 1728 static int loop_add(struct loop_device **l, int i) 1729 { 1730 struct loop_device *lo; 1731 struct gendisk *disk; 1732 int err; 1733 1734 err = -ENOMEM; 1735 lo = kzalloc(sizeof(*lo), GFP_KERNEL); 1736 if (!lo) 1737 goto out; 1738 1739 lo->lo_state = Lo_unbound; 1740 1741 /* allocate id, if @id >= 0, we're requesting that specific id */ 1742 if (i >= 0) { 1743 err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL); 1744 if (err == -ENOSPC) 1745 err = -EEXIST; 1746 } else { 1747 err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL); 1748 } 1749 if (err < 0) 1750 goto out_free_dev; 1751 i = err; 1752 1753 err = -ENOMEM; 1754 lo->tag_set.ops = &loop_mq_ops; 1755 lo->tag_set.nr_hw_queues = 1; 1756 lo->tag_set.queue_depth = 128; 1757 lo->tag_set.numa_node = NUMA_NO_NODE; 1758 lo->tag_set.cmd_size = sizeof(struct loop_cmd); 1759 lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 1760 lo->tag_set.driver_data = lo; 1761 1762 err = blk_mq_alloc_tag_set(&lo->tag_set); 1763 if (err) 1764 goto out_free_idr; 1765 1766 lo->lo_queue = blk_mq_init_queue(&lo->tag_set); 1767 if (IS_ERR_OR_NULL(lo->lo_queue)) { 1768 err = PTR_ERR(lo->lo_queue); 1769 goto out_cleanup_tags; 1770 } 1771 lo->lo_queue->queuedata = lo; 1772 1773 /* 1774 * It doesn't make sense to enable merge because the I/O 1775 * submitted to backing file is handled page by page. 1776 */ 1777 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); 1778 1779 err = -ENOMEM; 1780 disk = lo->lo_disk = alloc_disk(1 << part_shift); 1781 if (!disk) 1782 goto out_free_queue; 1783 1784 /* 1785 * Disable partition scanning by default. The in-kernel partition 1786 * scanning can be requested individually per-device during its 1787 * setup. Userspace can always add and remove partitions from all 1788 * devices. The needed partition minors are allocated from the 1789 * extended minor space, the main loop device numbers will continue 1790 * to match the loop minors, regardless of the number of partitions 1791 * used. 1792 * 1793 * If max_part is given, partition scanning is globally enabled for 1794 * all loop devices. The minors for the main loop devices will be 1795 * multiples of max_part. 1796 * 1797 * Note: Global-for-all-devices, set-only-at-init, read-only module 1798 * parameteters like 'max_loop' and 'max_part' make things needlessly 1799 * complicated, are too static, inflexible and may surprise 1800 * userspace tools. Parameters like this in general should be avoided. 1801 */ 1802 if (!part_shift) 1803 disk->flags |= GENHD_FL_NO_PART_SCAN; 1804 disk->flags |= GENHD_FL_EXT_DEVT; 1805 mutex_init(&lo->lo_ctl_mutex); 1806 atomic_set(&lo->lo_refcnt, 0); 1807 lo->lo_number = i; 1808 spin_lock_init(&lo->lo_lock); 1809 disk->major = LOOP_MAJOR; 1810 disk->first_minor = i << part_shift; 1811 disk->fops = &lo_fops; 1812 disk->private_data = lo; 1813 disk->queue = lo->lo_queue; 1814 sprintf(disk->disk_name, "loop%d", i); 1815 add_disk(disk); 1816 *l = lo; 1817 return lo->lo_number; 1818 1819 out_free_queue: 1820 blk_cleanup_queue(lo->lo_queue); 1821 out_cleanup_tags: 1822 blk_mq_free_tag_set(&lo->tag_set); 1823 out_free_idr: 1824 idr_remove(&loop_index_idr, i); 1825 out_free_dev: 1826 kfree(lo); 1827 out: 1828 return err; 1829 } 1830 1831 static void loop_remove(struct loop_device *lo) 1832 { 1833 blk_cleanup_queue(lo->lo_queue); 1834 del_gendisk(lo->lo_disk); 1835 blk_mq_free_tag_set(&lo->tag_set); 1836 put_disk(lo->lo_disk); 1837 kfree(lo); 1838 } 1839 1840 static int find_free_cb(int id, void *ptr, void *data) 1841 { 1842 struct loop_device *lo = ptr; 1843 struct loop_device **l = data; 1844 1845 if (lo->lo_state == Lo_unbound) { 1846 *l = lo; 1847 return 1; 1848 } 1849 return 0; 1850 } 1851 1852 static int loop_lookup(struct loop_device **l, int i) 1853 { 1854 struct loop_device *lo; 1855 int ret = -ENODEV; 1856 1857 if (i < 0) { 1858 int err; 1859 1860 err = idr_for_each(&loop_index_idr, &find_free_cb, &lo); 1861 if (err == 1) { 1862 *l = lo; 1863 ret = lo->lo_number; 1864 } 1865 goto out; 1866 } 1867 1868 /* lookup and return a specific i */ 1869 lo = idr_find(&loop_index_idr, i); 1870 if (lo) { 1871 *l = lo; 1872 ret = lo->lo_number; 1873 } 1874 out: 1875 return ret; 1876 } 1877 1878 static struct kobject *loop_probe(dev_t dev, int *part, void *data) 1879 { 1880 struct loop_device *lo; 1881 struct kobject *kobj; 1882 int err; 1883 1884 mutex_lock(&loop_index_mutex); 1885 err = loop_lookup(&lo, MINOR(dev) >> part_shift); 1886 if (err < 0) 1887 err = loop_add(&lo, MINOR(dev) >> part_shift); 1888 if (err < 0) 1889 kobj = NULL; 1890 else 1891 kobj = get_disk(lo->lo_disk); 1892 mutex_unlock(&loop_index_mutex); 1893 1894 *part = 0; 1895 return kobj; 1896 } 1897 1898 static long loop_control_ioctl(struct file *file, unsigned int cmd, 1899 unsigned long parm) 1900 { 1901 struct loop_device *lo; 1902 int ret = -ENOSYS; 1903 1904 mutex_lock(&loop_index_mutex); 1905 switch (cmd) { 1906 case LOOP_CTL_ADD: 1907 ret = loop_lookup(&lo, parm); 1908 if (ret >= 0) { 1909 ret = -EEXIST; 1910 break; 1911 } 1912 ret = loop_add(&lo, parm); 1913 break; 1914 case LOOP_CTL_REMOVE: 1915 ret = loop_lookup(&lo, parm); 1916 if (ret < 0) 1917 break; 1918 mutex_lock(&lo->lo_ctl_mutex); 1919 if (lo->lo_state != Lo_unbound) { 1920 ret = -EBUSY; 1921 mutex_unlock(&lo->lo_ctl_mutex); 1922 break; 1923 } 1924 if (atomic_read(&lo->lo_refcnt) > 0) { 1925 ret = -EBUSY; 1926 mutex_unlock(&lo->lo_ctl_mutex); 1927 break; 1928 } 1929 lo->lo_disk->private_data = NULL; 1930 mutex_unlock(&lo->lo_ctl_mutex); 1931 idr_remove(&loop_index_idr, lo->lo_number); 1932 loop_remove(lo); 1933 break; 1934 case LOOP_CTL_GET_FREE: 1935 ret = loop_lookup(&lo, -1); 1936 if (ret >= 0) 1937 break; 1938 ret = loop_add(&lo, -1); 1939 } 1940 mutex_unlock(&loop_index_mutex); 1941 1942 return ret; 1943 } 1944 1945 static const struct file_operations loop_ctl_fops = { 1946 .open = nonseekable_open, 1947 .unlocked_ioctl = loop_control_ioctl, 1948 .compat_ioctl = loop_control_ioctl, 1949 .owner = THIS_MODULE, 1950 .llseek = noop_llseek, 1951 }; 1952 1953 static struct miscdevice loop_misc = { 1954 .minor = LOOP_CTRL_MINOR, 1955 .name = "loop-control", 1956 .fops = &loop_ctl_fops, 1957 }; 1958 1959 MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR); 1960 MODULE_ALIAS("devname:loop-control"); 1961 1962 static int __init loop_init(void) 1963 { 1964 int i, nr; 1965 unsigned long range; 1966 struct loop_device *lo; 1967 int err; 1968 1969 err = misc_register(&loop_misc); 1970 if (err < 0) 1971 return err; 1972 1973 part_shift = 0; 1974 if (max_part > 0) { 1975 part_shift = fls(max_part); 1976 1977 /* 1978 * Adjust max_part according to part_shift as it is exported 1979 * to user space so that user can decide correct minor number 1980 * if [s]he want to create more devices. 1981 * 1982 * Note that -1 is required because partition 0 is reserved 1983 * for the whole disk. 1984 */ 1985 max_part = (1UL << part_shift) - 1; 1986 } 1987 1988 if ((1UL << part_shift) > DISK_MAX_PARTS) { 1989 err = -EINVAL; 1990 goto misc_out; 1991 } 1992 1993 if (max_loop > 1UL << (MINORBITS - part_shift)) { 1994 err = -EINVAL; 1995 goto misc_out; 1996 } 1997 1998 /* 1999 * If max_loop is specified, create that many devices upfront. 2000 * This also becomes a hard limit. If max_loop is not specified, 2001 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module 2002 * init time. Loop devices can be requested on-demand with the 2003 * /dev/loop-control interface, or be instantiated by accessing 2004 * a 'dead' device node. 2005 */ 2006 if (max_loop) { 2007 nr = max_loop; 2008 range = max_loop << part_shift; 2009 } else { 2010 nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT; 2011 range = 1UL << MINORBITS; 2012 } 2013 2014 if (register_blkdev(LOOP_MAJOR, "loop")) { 2015 err = -EIO; 2016 goto misc_out; 2017 } 2018 2019 blk_register_region(MKDEV(LOOP_MAJOR, 0), range, 2020 THIS_MODULE, loop_probe, NULL, NULL); 2021 2022 /* pre-create number of devices given by config or max_loop */ 2023 mutex_lock(&loop_index_mutex); 2024 for (i = 0; i < nr; i++) 2025 loop_add(&lo, i); 2026 mutex_unlock(&loop_index_mutex); 2027 2028 printk(KERN_INFO "loop: module loaded\n"); 2029 return 0; 2030 2031 misc_out: 2032 misc_deregister(&loop_misc); 2033 return err; 2034 } 2035 2036 static int loop_exit_cb(int id, void *ptr, void *data) 2037 { 2038 struct loop_device *lo = ptr; 2039 2040 loop_remove(lo); 2041 return 0; 2042 } 2043 2044 static void __exit loop_exit(void) 2045 { 2046 unsigned long range; 2047 2048 range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; 2049 2050 idr_for_each(&loop_index_idr, &loop_exit_cb, NULL); 2051 idr_destroy(&loop_index_idr); 2052 2053 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); 2054 unregister_blkdev(LOOP_MAJOR, "loop"); 2055 2056 misc_deregister(&loop_misc); 2057 } 2058 2059 module_init(loop_init); 2060 module_exit(loop_exit); 2061 2062 #ifndef MODULE 2063 static int __init max_loop_setup(char *str) 2064 { 2065 max_loop = simple_strtol(str, NULL, 0); 2066 return 1; 2067 } 2068 2069 __setup("max_loop=", max_loop_setup); 2070 #endif 2071