1 /* 2 * linux/drivers/block/loop.c 3 * 4 * Written by Theodore Ts'o, 3/29/93 5 * 6 * Copyright 1993 by Theodore Ts'o. Redistribution of this file is 7 * permitted under the GNU General Public License. 8 * 9 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993 10 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996 11 * 12 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994 13 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996 14 * 15 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997 16 * 17 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998 18 * 19 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998 20 * 21 * Loadable modules and other fixes by AK, 1998 22 * 23 * Make real block number available to downstream transfer functions, enables 24 * CBC (and relatives) mode encryption requiring unique IVs per data block. 25 * Reed H. Petty, rhp@draper.net 26 * 27 * Maximum number of loop devices now dynamic via max_loop module parameter. 28 * Russell Kroll <rkroll@exploits.org> 19990701 29 * 30 * Maximum number of loop devices when compiled-in now selectable by passing 31 * max_loop=<1-255> to the kernel on boot. 32 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999 33 * 34 * Completely rewrite request handling to be make_request_fn style and 35 * non blocking, pushing work to a helper thread. Lots of fixes from 36 * Al Viro too. 37 * Jens Axboe <axboe@suse.de>, Nov 2000 38 * 39 * Support up to 256 loop devices 40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002 41 * 42 * Support for falling back on the write file operation when the address space 43 * operations write_begin is not available on the backing filesystem. 44 * Anton Altaparmakov, 16 Feb 2005 45 * 46 * Still To Fix: 47 * - Advisory locking is ignored here. 48 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN 49 * 50 */ 51 52 #include <linux/module.h> 53 #include <linux/moduleparam.h> 54 #include <linux/sched.h> 55 #include <linux/fs.h> 56 #include <linux/file.h> 57 #include <linux/stat.h> 58 #include <linux/errno.h> 59 #include <linux/major.h> 60 #include <linux/wait.h> 61 #include <linux/blkdev.h> 62 #include <linux/blkpg.h> 63 #include <linux/init.h> 64 #include <linux/swap.h> 65 #include <linux/slab.h> 66 #include <linux/compat.h> 67 #include <linux/suspend.h> 68 #include <linux/freezer.h> 69 #include <linux/mutex.h> 70 #include <linux/writeback.h> 71 #include <linux/completion.h> 72 #include <linux/highmem.h> 73 #include <linux/kthread.h> 74 #include <linux/splice.h> 75 #include <linux/sysfs.h> 76 #include <linux/miscdevice.h> 77 #include <linux/falloc.h> 78 #include <linux/uio.h> 79 #include "loop.h" 80 81 #include <linux/uaccess.h> 82 83 static DEFINE_IDR(loop_index_idr); 84 static DEFINE_MUTEX(loop_index_mutex); 85 86 static int max_part; 87 static int part_shift; 88 89 static int transfer_xor(struct loop_device *lo, int cmd, 90 struct page *raw_page, unsigned raw_off, 91 struct page *loop_page, unsigned loop_off, 92 int size, sector_t real_block) 93 { 94 char *raw_buf = kmap_atomic(raw_page) + raw_off; 95 char *loop_buf = kmap_atomic(loop_page) + loop_off; 96 char *in, *out, *key; 97 int i, keysize; 98 99 if (cmd == READ) { 100 in = raw_buf; 101 out = loop_buf; 102 } else { 103 in = loop_buf; 104 out = raw_buf; 105 } 106 107 key = lo->lo_encrypt_key; 108 keysize = lo->lo_encrypt_key_size; 109 for (i = 0; i < size; i++) 110 *out++ = *in++ ^ key[(i & 511) % keysize]; 111 112 kunmap_atomic(loop_buf); 113 kunmap_atomic(raw_buf); 114 cond_resched(); 115 return 0; 116 } 117 118 static int xor_init(struct loop_device *lo, const struct loop_info64 *info) 119 { 120 if (unlikely(info->lo_encrypt_key_size <= 0)) 121 return -EINVAL; 122 return 0; 123 } 124 125 static struct loop_func_table none_funcs = { 126 .number = LO_CRYPT_NONE, 127 }; 128 129 static struct loop_func_table xor_funcs = { 130 .number = LO_CRYPT_XOR, 131 .transfer = transfer_xor, 132 .init = xor_init 133 }; 134 135 /* xfer_funcs[0] is special - its release function is never called */ 136 static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = { 137 &none_funcs, 138 &xor_funcs 139 }; 140 141 static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file) 142 { 143 loff_t loopsize; 144 145 /* Compute loopsize in bytes */ 146 loopsize = i_size_read(file->f_mapping->host); 147 if (offset > 0) 148 loopsize -= offset; 149 /* offset is beyond i_size, weird but possible */ 150 if (loopsize < 0) 151 return 0; 152 153 if (sizelimit > 0 && sizelimit < loopsize) 154 loopsize = sizelimit; 155 /* 156 * Unfortunately, if we want to do I/O on the device, 157 * the number of 512-byte sectors has to fit into a sector_t. 158 */ 159 return loopsize >> 9; 160 } 161 162 static loff_t get_loop_size(struct loop_device *lo, struct file *file) 163 { 164 return get_size(lo->lo_offset, lo->lo_sizelimit, file); 165 } 166 167 static void __loop_update_dio(struct loop_device *lo, bool dio) 168 { 169 struct file *file = lo->lo_backing_file; 170 struct address_space *mapping = file->f_mapping; 171 struct inode *inode = mapping->host; 172 unsigned short sb_bsize = 0; 173 unsigned dio_align = 0; 174 bool use_dio; 175 176 if (inode->i_sb->s_bdev) { 177 sb_bsize = bdev_logical_block_size(inode->i_sb->s_bdev); 178 dio_align = sb_bsize - 1; 179 } 180 181 /* 182 * We support direct I/O only if lo_offset is aligned with the 183 * logical I/O size of backing device, and the logical block 184 * size of loop is bigger than the backing device's and the loop 185 * needn't transform transfer. 186 * 187 * TODO: the above condition may be loosed in the future, and 188 * direct I/O may be switched runtime at that time because most 189 * of requests in sane applications should be PAGE_SIZE aligned 190 */ 191 if (dio) { 192 if (queue_logical_block_size(lo->lo_queue) >= sb_bsize && 193 !(lo->lo_offset & dio_align) && 194 mapping->a_ops->direct_IO && 195 !lo->transfer) 196 use_dio = true; 197 else 198 use_dio = false; 199 } else { 200 use_dio = false; 201 } 202 203 if (lo->use_dio == use_dio) 204 return; 205 206 /* flush dirty pages before changing direct IO */ 207 vfs_fsync(file, 0); 208 209 /* 210 * The flag of LO_FLAGS_DIRECT_IO is handled similarly with 211 * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup 212 * will get updated by ioctl(LOOP_GET_STATUS) 213 */ 214 blk_mq_freeze_queue(lo->lo_queue); 215 lo->use_dio = use_dio; 216 if (use_dio) 217 lo->lo_flags |= LO_FLAGS_DIRECT_IO; 218 else 219 lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; 220 blk_mq_unfreeze_queue(lo->lo_queue); 221 } 222 223 static int 224 figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit) 225 { 226 loff_t size = get_size(offset, sizelimit, lo->lo_backing_file); 227 sector_t x = (sector_t)size; 228 struct block_device *bdev = lo->lo_device; 229 230 if (unlikely((loff_t)x != size)) 231 return -EFBIG; 232 if (lo->lo_offset != offset) 233 lo->lo_offset = offset; 234 if (lo->lo_sizelimit != sizelimit) 235 lo->lo_sizelimit = sizelimit; 236 set_capacity(lo->lo_disk, x); 237 bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9); 238 /* let user-space know about the new size */ 239 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); 240 return 0; 241 } 242 243 static inline int 244 lo_do_transfer(struct loop_device *lo, int cmd, 245 struct page *rpage, unsigned roffs, 246 struct page *lpage, unsigned loffs, 247 int size, sector_t rblock) 248 { 249 int ret; 250 251 ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock); 252 if (likely(!ret)) 253 return 0; 254 255 printk_ratelimited(KERN_ERR 256 "loop: Transfer error at byte offset %llu, length %i.\n", 257 (unsigned long long)rblock << 9, size); 258 return ret; 259 } 260 261 static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) 262 { 263 struct iov_iter i; 264 ssize_t bw; 265 266 iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); 267 268 file_start_write(file); 269 bw = vfs_iter_write(file, &i, ppos, 0); 270 file_end_write(file); 271 272 if (likely(bw == bvec->bv_len)) 273 return 0; 274 275 printk_ratelimited(KERN_ERR 276 "loop: Write error at byte offset %llu, length %i.\n", 277 (unsigned long long)*ppos, bvec->bv_len); 278 if (bw >= 0) 279 bw = -EIO; 280 return bw; 281 } 282 283 static int lo_write_simple(struct loop_device *lo, struct request *rq, 284 loff_t pos) 285 { 286 struct bio_vec bvec; 287 struct req_iterator iter; 288 int ret = 0; 289 290 rq_for_each_segment(bvec, rq, iter) { 291 ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos); 292 if (ret < 0) 293 break; 294 cond_resched(); 295 } 296 297 return ret; 298 } 299 300 /* 301 * This is the slow, transforming version that needs to double buffer the 302 * data as it cannot do the transformations in place without having direct 303 * access to the destination pages of the backing file. 304 */ 305 static int lo_write_transfer(struct loop_device *lo, struct request *rq, 306 loff_t pos) 307 { 308 struct bio_vec bvec, b; 309 struct req_iterator iter; 310 struct page *page; 311 int ret = 0; 312 313 page = alloc_page(GFP_NOIO); 314 if (unlikely(!page)) 315 return -ENOMEM; 316 317 rq_for_each_segment(bvec, rq, iter) { 318 ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page, 319 bvec.bv_offset, bvec.bv_len, pos >> 9); 320 if (unlikely(ret)) 321 break; 322 323 b.bv_page = page; 324 b.bv_offset = 0; 325 b.bv_len = bvec.bv_len; 326 ret = lo_write_bvec(lo->lo_backing_file, &b, &pos); 327 if (ret < 0) 328 break; 329 } 330 331 __free_page(page); 332 return ret; 333 } 334 335 static int lo_read_simple(struct loop_device *lo, struct request *rq, 336 loff_t pos) 337 { 338 struct bio_vec bvec; 339 struct req_iterator iter; 340 struct iov_iter i; 341 ssize_t len; 342 343 rq_for_each_segment(bvec, rq, iter) { 344 iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len); 345 len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); 346 if (len < 0) 347 return len; 348 349 flush_dcache_page(bvec.bv_page); 350 351 if (len != bvec.bv_len) { 352 struct bio *bio; 353 354 __rq_for_each_bio(bio, rq) 355 zero_fill_bio(bio); 356 break; 357 } 358 cond_resched(); 359 } 360 361 return 0; 362 } 363 364 static int lo_read_transfer(struct loop_device *lo, struct request *rq, 365 loff_t pos) 366 { 367 struct bio_vec bvec, b; 368 struct req_iterator iter; 369 struct iov_iter i; 370 struct page *page; 371 ssize_t len; 372 int ret = 0; 373 374 page = alloc_page(GFP_NOIO); 375 if (unlikely(!page)) 376 return -ENOMEM; 377 378 rq_for_each_segment(bvec, rq, iter) { 379 loff_t offset = pos; 380 381 b.bv_page = page; 382 b.bv_offset = 0; 383 b.bv_len = bvec.bv_len; 384 385 iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len); 386 len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); 387 if (len < 0) { 388 ret = len; 389 goto out_free_page; 390 } 391 392 ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page, 393 bvec.bv_offset, len, offset >> 9); 394 if (ret) 395 goto out_free_page; 396 397 flush_dcache_page(bvec.bv_page); 398 399 if (len != bvec.bv_len) { 400 struct bio *bio; 401 402 __rq_for_each_bio(bio, rq) 403 zero_fill_bio(bio); 404 break; 405 } 406 } 407 408 ret = 0; 409 out_free_page: 410 __free_page(page); 411 return ret; 412 } 413 414 static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos) 415 { 416 /* 417 * We use punch hole to reclaim the free space used by the 418 * image a.k.a. discard. However we do not support discard if 419 * encryption is enabled, because it may give an attacker 420 * useful information. 421 */ 422 struct file *file = lo->lo_backing_file; 423 int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE; 424 int ret; 425 426 if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) { 427 ret = -EOPNOTSUPP; 428 goto out; 429 } 430 431 ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq)); 432 if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP)) 433 ret = -EIO; 434 out: 435 return ret; 436 } 437 438 static int lo_req_flush(struct loop_device *lo, struct request *rq) 439 { 440 struct file *file = lo->lo_backing_file; 441 int ret = vfs_fsync(file, 0); 442 if (unlikely(ret && ret != -EINVAL)) 443 ret = -EIO; 444 445 return ret; 446 } 447 448 static void lo_complete_rq(struct request *rq) 449 { 450 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 451 452 if (unlikely(req_op(cmd->rq) == REQ_OP_READ && cmd->use_aio && 453 cmd->ret >= 0 && cmd->ret < blk_rq_bytes(cmd->rq))) { 454 struct bio *bio = cmd->rq->bio; 455 456 bio_advance(bio, cmd->ret); 457 zero_fill_bio(bio); 458 } 459 460 blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK); 461 } 462 463 static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) 464 { 465 struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb); 466 467 cmd->ret = ret; 468 blk_mq_complete_request(cmd->rq); 469 } 470 471 static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, 472 loff_t pos, bool rw) 473 { 474 struct iov_iter iter; 475 struct bio_vec *bvec; 476 struct bio *bio = cmd->rq->bio; 477 struct file *file = lo->lo_backing_file; 478 int ret; 479 480 /* nomerge for loop request queue */ 481 WARN_ON(cmd->rq->bio != cmd->rq->biotail); 482 483 bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); 484 iov_iter_bvec(&iter, ITER_BVEC | rw, bvec, 485 bio_segments(bio), blk_rq_bytes(cmd->rq)); 486 /* 487 * This bio may be started from the middle of the 'bvec' 488 * because of bio splitting, so offset from the bvec must 489 * be passed to iov iterator 490 */ 491 iter.iov_offset = bio->bi_iter.bi_bvec_done; 492 493 cmd->iocb.ki_pos = pos; 494 cmd->iocb.ki_filp = file; 495 cmd->iocb.ki_complete = lo_rw_aio_complete; 496 cmd->iocb.ki_flags = IOCB_DIRECT; 497 498 if (rw == WRITE) 499 ret = call_write_iter(file, &cmd->iocb, &iter); 500 else 501 ret = call_read_iter(file, &cmd->iocb, &iter); 502 503 if (ret != -EIOCBQUEUED) 504 cmd->iocb.ki_complete(&cmd->iocb, ret, 0); 505 return 0; 506 } 507 508 static int do_req_filebacked(struct loop_device *lo, struct request *rq) 509 { 510 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 511 loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset; 512 513 /* 514 * lo_write_simple and lo_read_simple should have been covered 515 * by io submit style function like lo_rw_aio(), one blocker 516 * is that lo_read_simple() need to call flush_dcache_page after 517 * the page is written from kernel, and it isn't easy to handle 518 * this in io submit style function which submits all segments 519 * of the req at one time. And direct read IO doesn't need to 520 * run flush_dcache_page(). 521 */ 522 switch (req_op(rq)) { 523 case REQ_OP_FLUSH: 524 return lo_req_flush(lo, rq); 525 case REQ_OP_DISCARD: 526 case REQ_OP_WRITE_ZEROES: 527 return lo_discard(lo, rq, pos); 528 case REQ_OP_WRITE: 529 if (lo->transfer) 530 return lo_write_transfer(lo, rq, pos); 531 else if (cmd->use_aio) 532 return lo_rw_aio(lo, cmd, pos, WRITE); 533 else 534 return lo_write_simple(lo, rq, pos); 535 case REQ_OP_READ: 536 if (lo->transfer) 537 return lo_read_transfer(lo, rq, pos); 538 else if (cmd->use_aio) 539 return lo_rw_aio(lo, cmd, pos, READ); 540 else 541 return lo_read_simple(lo, rq, pos); 542 default: 543 WARN_ON_ONCE(1); 544 return -EIO; 545 break; 546 } 547 } 548 549 struct switch_request { 550 struct file *file; 551 struct completion wait; 552 }; 553 554 static inline void loop_update_dio(struct loop_device *lo) 555 { 556 __loop_update_dio(lo, io_is_direct(lo->lo_backing_file) | 557 lo->use_dio); 558 } 559 560 /* 561 * Do the actual switch; called from the BIO completion routine 562 */ 563 static void do_loop_switch(struct loop_device *lo, struct switch_request *p) 564 { 565 struct file *file = p->file; 566 struct file *old_file = lo->lo_backing_file; 567 struct address_space *mapping; 568 569 /* if no new file, only flush of queued bios requested */ 570 if (!file) 571 return; 572 573 mapping = file->f_mapping; 574 mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask); 575 lo->lo_backing_file = file; 576 lo->old_gfp_mask = mapping_gfp_mask(mapping); 577 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); 578 loop_update_dio(lo); 579 } 580 581 /* 582 * loop_switch performs the hard work of switching a backing store. 583 * First it needs to flush existing IO, it does this by sending a magic 584 * BIO down the pipe. The completion of this BIO does the actual switch. 585 */ 586 static int loop_switch(struct loop_device *lo, struct file *file) 587 { 588 struct switch_request w; 589 590 w.file = file; 591 592 /* freeze queue and wait for completion of scheduled requests */ 593 blk_mq_freeze_queue(lo->lo_queue); 594 595 /* do the switch action */ 596 do_loop_switch(lo, &w); 597 598 /* unfreeze */ 599 blk_mq_unfreeze_queue(lo->lo_queue); 600 601 return 0; 602 } 603 604 /* 605 * Helper to flush the IOs in loop, but keeping loop thread running 606 */ 607 static int loop_flush(struct loop_device *lo) 608 { 609 /* loop not yet configured, no running thread, nothing to flush */ 610 if (lo->lo_state != Lo_bound) 611 return 0; 612 return loop_switch(lo, NULL); 613 } 614 615 static void loop_reread_partitions(struct loop_device *lo, 616 struct block_device *bdev) 617 { 618 int rc; 619 620 /* 621 * bd_mutex has been held already in release path, so don't 622 * acquire it if this function is called in such case. 623 * 624 * If the reread partition isn't from release path, lo_refcnt 625 * must be at least one and it can only become zero when the 626 * current holder is released. 627 */ 628 if (!atomic_read(&lo->lo_refcnt)) 629 rc = __blkdev_reread_part(bdev); 630 else 631 rc = blkdev_reread_part(bdev); 632 if (rc) 633 pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n", 634 __func__, lo->lo_number, lo->lo_file_name, rc); 635 } 636 637 /* 638 * loop_change_fd switched the backing store of a loopback device to 639 * a new file. This is useful for operating system installers to free up 640 * the original file and in High Availability environments to switch to 641 * an alternative location for the content in case of server meltdown. 642 * This can only work if the loop device is used read-only, and if the 643 * new backing store is the same size and type as the old backing store. 644 */ 645 static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, 646 unsigned int arg) 647 { 648 struct file *file, *old_file; 649 struct inode *inode; 650 int error; 651 652 error = -ENXIO; 653 if (lo->lo_state != Lo_bound) 654 goto out; 655 656 /* the loop device has to be read-only */ 657 error = -EINVAL; 658 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY)) 659 goto out; 660 661 error = -EBADF; 662 file = fget(arg); 663 if (!file) 664 goto out; 665 666 inode = file->f_mapping->host; 667 old_file = lo->lo_backing_file; 668 669 error = -EINVAL; 670 671 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) 672 goto out_putf; 673 674 /* size of the new backing store needs to be the same */ 675 if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) 676 goto out_putf; 677 678 /* and ... switch */ 679 error = loop_switch(lo, file); 680 if (error) 681 goto out_putf; 682 683 fput(old_file); 684 if (lo->lo_flags & LO_FLAGS_PARTSCAN) 685 loop_reread_partitions(lo, bdev); 686 return 0; 687 688 out_putf: 689 fput(file); 690 out: 691 return error; 692 } 693 694 static inline int is_loop_device(struct file *file) 695 { 696 struct inode *i = file->f_mapping->host; 697 698 return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR; 699 } 700 701 /* loop sysfs attributes */ 702 703 static ssize_t loop_attr_show(struct device *dev, char *page, 704 ssize_t (*callback)(struct loop_device *, char *)) 705 { 706 struct gendisk *disk = dev_to_disk(dev); 707 struct loop_device *lo = disk->private_data; 708 709 return callback(lo, page); 710 } 711 712 #define LOOP_ATTR_RO(_name) \ 713 static ssize_t loop_attr_##_name##_show(struct loop_device *, char *); \ 714 static ssize_t loop_attr_do_show_##_name(struct device *d, \ 715 struct device_attribute *attr, char *b) \ 716 { \ 717 return loop_attr_show(d, b, loop_attr_##_name##_show); \ 718 } \ 719 static struct device_attribute loop_attr_##_name = \ 720 __ATTR(_name, S_IRUGO, loop_attr_do_show_##_name, NULL); 721 722 static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) 723 { 724 ssize_t ret; 725 char *p = NULL; 726 727 spin_lock_irq(&lo->lo_lock); 728 if (lo->lo_backing_file) 729 p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1); 730 spin_unlock_irq(&lo->lo_lock); 731 732 if (IS_ERR_OR_NULL(p)) 733 ret = PTR_ERR(p); 734 else { 735 ret = strlen(p); 736 memmove(buf, p, ret); 737 buf[ret++] = '\n'; 738 buf[ret] = 0; 739 } 740 741 return ret; 742 } 743 744 static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf) 745 { 746 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset); 747 } 748 749 static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf) 750 { 751 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); 752 } 753 754 static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf) 755 { 756 int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR); 757 758 return sprintf(buf, "%s\n", autoclear ? "1" : "0"); 759 } 760 761 static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf) 762 { 763 int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN); 764 765 return sprintf(buf, "%s\n", partscan ? "1" : "0"); 766 } 767 768 static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf) 769 { 770 int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO); 771 772 return sprintf(buf, "%s\n", dio ? "1" : "0"); 773 } 774 775 LOOP_ATTR_RO(backing_file); 776 LOOP_ATTR_RO(offset); 777 LOOP_ATTR_RO(sizelimit); 778 LOOP_ATTR_RO(autoclear); 779 LOOP_ATTR_RO(partscan); 780 LOOP_ATTR_RO(dio); 781 782 static struct attribute *loop_attrs[] = { 783 &loop_attr_backing_file.attr, 784 &loop_attr_offset.attr, 785 &loop_attr_sizelimit.attr, 786 &loop_attr_autoclear.attr, 787 &loop_attr_partscan.attr, 788 &loop_attr_dio.attr, 789 NULL, 790 }; 791 792 static struct attribute_group loop_attribute_group = { 793 .name = "loop", 794 .attrs= loop_attrs, 795 }; 796 797 static int loop_sysfs_init(struct loop_device *lo) 798 { 799 return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj, 800 &loop_attribute_group); 801 } 802 803 static void loop_sysfs_exit(struct loop_device *lo) 804 { 805 sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj, 806 &loop_attribute_group); 807 } 808 809 static void loop_config_discard(struct loop_device *lo) 810 { 811 struct file *file = lo->lo_backing_file; 812 struct inode *inode = file->f_mapping->host; 813 struct request_queue *q = lo->lo_queue; 814 815 /* 816 * We use punch hole to reclaim the free space used by the 817 * image a.k.a. discard. However we do not support discard if 818 * encryption is enabled, because it may give an attacker 819 * useful information. 820 */ 821 if ((!file->f_op->fallocate) || 822 lo->lo_encrypt_key_size) { 823 q->limits.discard_granularity = 0; 824 q->limits.discard_alignment = 0; 825 blk_queue_max_discard_sectors(q, 0); 826 blk_queue_max_write_zeroes_sectors(q, 0); 827 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); 828 return; 829 } 830 831 q->limits.discard_granularity = inode->i_sb->s_blocksize; 832 q->limits.discard_alignment = 0; 833 834 blk_queue_max_discard_sectors(q, UINT_MAX >> 9); 835 blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); 836 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 837 } 838 839 static void loop_unprepare_queue(struct loop_device *lo) 840 { 841 kthread_flush_worker(&lo->worker); 842 kthread_stop(lo->worker_task); 843 } 844 845 static int loop_kthread_worker_fn(void *worker_ptr) 846 { 847 current->flags |= PF_LESS_THROTTLE; 848 return kthread_worker_fn(worker_ptr); 849 } 850 851 static int loop_prepare_queue(struct loop_device *lo) 852 { 853 kthread_init_worker(&lo->worker); 854 lo->worker_task = kthread_run(loop_kthread_worker_fn, 855 &lo->worker, "loop%d", lo->lo_number); 856 if (IS_ERR(lo->worker_task)) 857 return -ENOMEM; 858 set_user_nice(lo->worker_task, MIN_NICE); 859 return 0; 860 } 861 862 static int loop_set_fd(struct loop_device *lo, fmode_t mode, 863 struct block_device *bdev, unsigned int arg) 864 { 865 struct file *file, *f; 866 struct inode *inode; 867 struct address_space *mapping; 868 int lo_flags = 0; 869 int error; 870 loff_t size; 871 872 /* This is safe, since we have a reference from open(). */ 873 __module_get(THIS_MODULE); 874 875 error = -EBADF; 876 file = fget(arg); 877 if (!file) 878 goto out; 879 880 error = -EBUSY; 881 if (lo->lo_state != Lo_unbound) 882 goto out_putf; 883 884 /* Avoid recursion */ 885 f = file; 886 while (is_loop_device(f)) { 887 struct loop_device *l; 888 889 if (f->f_mapping->host->i_bdev == bdev) 890 goto out_putf; 891 892 l = f->f_mapping->host->i_bdev->bd_disk->private_data; 893 if (l->lo_state == Lo_unbound) { 894 error = -EINVAL; 895 goto out_putf; 896 } 897 f = l->lo_backing_file; 898 } 899 900 mapping = file->f_mapping; 901 inode = mapping->host; 902 903 error = -EINVAL; 904 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) 905 goto out_putf; 906 907 if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) || 908 !file->f_op->write_iter) 909 lo_flags |= LO_FLAGS_READ_ONLY; 910 911 error = -EFBIG; 912 size = get_loop_size(lo, file); 913 if ((loff_t)(sector_t)size != size) 914 goto out_putf; 915 error = loop_prepare_queue(lo); 916 if (error) 917 goto out_putf; 918 919 error = 0; 920 921 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); 922 923 lo->use_dio = false; 924 lo->lo_device = bdev; 925 lo->lo_flags = lo_flags; 926 lo->lo_backing_file = file; 927 lo->transfer = NULL; 928 lo->ioctl = NULL; 929 lo->lo_sizelimit = 0; 930 lo->old_gfp_mask = mapping_gfp_mask(mapping); 931 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); 932 933 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) 934 blk_queue_write_cache(lo->lo_queue, true, false); 935 936 loop_update_dio(lo); 937 set_capacity(lo->lo_disk, size); 938 bd_set_size(bdev, size << 9); 939 loop_sysfs_init(lo); 940 /* let user-space know about the new size */ 941 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); 942 943 set_blocksize(bdev, S_ISBLK(inode->i_mode) ? 944 block_size(inode->i_bdev) : PAGE_SIZE); 945 946 lo->lo_state = Lo_bound; 947 if (part_shift) 948 lo->lo_flags |= LO_FLAGS_PARTSCAN; 949 if (lo->lo_flags & LO_FLAGS_PARTSCAN) 950 loop_reread_partitions(lo, bdev); 951 952 /* Grab the block_device to prevent its destruction after we 953 * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev). 954 */ 955 bdgrab(bdev); 956 return 0; 957 958 out_putf: 959 fput(file); 960 out: 961 /* This is safe: open() is still holding a reference. */ 962 module_put(THIS_MODULE); 963 return error; 964 } 965 966 static int 967 loop_release_xfer(struct loop_device *lo) 968 { 969 int err = 0; 970 struct loop_func_table *xfer = lo->lo_encryption; 971 972 if (xfer) { 973 if (xfer->release) 974 err = xfer->release(lo); 975 lo->transfer = NULL; 976 lo->lo_encryption = NULL; 977 module_put(xfer->owner); 978 } 979 return err; 980 } 981 982 static int 983 loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, 984 const struct loop_info64 *i) 985 { 986 int err = 0; 987 988 if (xfer) { 989 struct module *owner = xfer->owner; 990 991 if (!try_module_get(owner)) 992 return -EINVAL; 993 if (xfer->init) 994 err = xfer->init(lo, i); 995 if (err) 996 module_put(owner); 997 else 998 lo->lo_encryption = xfer; 999 } 1000 return err; 1001 } 1002 1003 static int loop_clr_fd(struct loop_device *lo) 1004 { 1005 struct file *filp = lo->lo_backing_file; 1006 gfp_t gfp = lo->old_gfp_mask; 1007 struct block_device *bdev = lo->lo_device; 1008 1009 if (lo->lo_state != Lo_bound) 1010 return -ENXIO; 1011 1012 /* 1013 * If we've explicitly asked to tear down the loop device, 1014 * and it has an elevated reference count, set it for auto-teardown when 1015 * the last reference goes away. This stops $!~#$@ udev from 1016 * preventing teardown because it decided that it needs to run blkid on 1017 * the loopback device whenever they appear. xfstests is notorious for 1018 * failing tests because blkid via udev races with a losetup 1019 * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d 1020 * command to fail with EBUSY. 1021 */ 1022 if (atomic_read(&lo->lo_refcnt) > 1) { 1023 lo->lo_flags |= LO_FLAGS_AUTOCLEAR; 1024 mutex_unlock(&lo->lo_ctl_mutex); 1025 return 0; 1026 } 1027 1028 if (filp == NULL) 1029 return -EINVAL; 1030 1031 /* freeze request queue during the transition */ 1032 blk_mq_freeze_queue(lo->lo_queue); 1033 1034 spin_lock_irq(&lo->lo_lock); 1035 lo->lo_state = Lo_rundown; 1036 lo->lo_backing_file = NULL; 1037 spin_unlock_irq(&lo->lo_lock); 1038 1039 loop_release_xfer(lo); 1040 lo->transfer = NULL; 1041 lo->ioctl = NULL; 1042 lo->lo_device = NULL; 1043 lo->lo_encryption = NULL; 1044 lo->lo_offset = 0; 1045 lo->lo_sizelimit = 0; 1046 lo->lo_encrypt_key_size = 0; 1047 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); 1048 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); 1049 memset(lo->lo_file_name, 0, LO_NAME_SIZE); 1050 blk_queue_logical_block_size(lo->lo_queue, 512); 1051 if (bdev) { 1052 bdput(bdev); 1053 invalidate_bdev(bdev); 1054 } 1055 set_capacity(lo->lo_disk, 0); 1056 loop_sysfs_exit(lo); 1057 if (bdev) { 1058 bd_set_size(bdev, 0); 1059 /* let user-space know about this change */ 1060 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); 1061 } 1062 mapping_set_gfp_mask(filp->f_mapping, gfp); 1063 lo->lo_state = Lo_unbound; 1064 /* This is safe: open() is still holding a reference. */ 1065 module_put(THIS_MODULE); 1066 blk_mq_unfreeze_queue(lo->lo_queue); 1067 1068 if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev) 1069 loop_reread_partitions(lo, bdev); 1070 lo->lo_flags = 0; 1071 if (!part_shift) 1072 lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; 1073 loop_unprepare_queue(lo); 1074 mutex_unlock(&lo->lo_ctl_mutex); 1075 /* 1076 * Need not hold lo_ctl_mutex to fput backing file. 1077 * Calling fput holding lo_ctl_mutex triggers a circular 1078 * lock dependency possibility warning as fput can take 1079 * bd_mutex which is usually taken before lo_ctl_mutex. 1080 */ 1081 fput(filp); 1082 return 0; 1083 } 1084 1085 static int 1086 loop_set_status(struct loop_device *lo, const struct loop_info64 *info) 1087 { 1088 int err; 1089 struct loop_func_table *xfer; 1090 kuid_t uid = current_uid(); 1091 1092 if (lo->lo_encrypt_key_size && 1093 !uid_eq(lo->lo_key_owner, uid) && 1094 !capable(CAP_SYS_ADMIN)) 1095 return -EPERM; 1096 if (lo->lo_state != Lo_bound) 1097 return -ENXIO; 1098 if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) 1099 return -EINVAL; 1100 1101 /* I/O need to be drained during transfer transition */ 1102 blk_mq_freeze_queue(lo->lo_queue); 1103 1104 err = loop_release_xfer(lo); 1105 if (err) 1106 goto exit; 1107 1108 if (info->lo_encrypt_type) { 1109 unsigned int type = info->lo_encrypt_type; 1110 1111 if (type >= MAX_LO_CRYPT) 1112 return -EINVAL; 1113 xfer = xfer_funcs[type]; 1114 if (xfer == NULL) 1115 return -EINVAL; 1116 } else 1117 xfer = NULL; 1118 1119 err = loop_init_xfer(lo, xfer, info); 1120 if (err) 1121 goto exit; 1122 1123 if (lo->lo_offset != info->lo_offset || 1124 lo->lo_sizelimit != info->lo_sizelimit) { 1125 if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { 1126 err = -EFBIG; 1127 goto exit; 1128 } 1129 } 1130 1131 loop_config_discard(lo); 1132 1133 memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); 1134 memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); 1135 lo->lo_file_name[LO_NAME_SIZE-1] = 0; 1136 lo->lo_crypt_name[LO_NAME_SIZE-1] = 0; 1137 1138 if (!xfer) 1139 xfer = &none_funcs; 1140 lo->transfer = xfer->transfer; 1141 lo->ioctl = xfer->ioctl; 1142 1143 if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) != 1144 (info->lo_flags & LO_FLAGS_AUTOCLEAR)) 1145 lo->lo_flags ^= LO_FLAGS_AUTOCLEAR; 1146 1147 lo->lo_encrypt_key_size = info->lo_encrypt_key_size; 1148 lo->lo_init[0] = info->lo_init[0]; 1149 lo->lo_init[1] = info->lo_init[1]; 1150 if (info->lo_encrypt_key_size) { 1151 memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, 1152 info->lo_encrypt_key_size); 1153 lo->lo_key_owner = uid; 1154 } 1155 1156 /* update dio if lo_offset or transfer is changed */ 1157 __loop_update_dio(lo, lo->use_dio); 1158 1159 exit: 1160 blk_mq_unfreeze_queue(lo->lo_queue); 1161 1162 if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) && 1163 !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { 1164 lo->lo_flags |= LO_FLAGS_PARTSCAN; 1165 lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; 1166 loop_reread_partitions(lo, lo->lo_device); 1167 } 1168 1169 return err; 1170 } 1171 1172 static int 1173 loop_get_status(struct loop_device *lo, struct loop_info64 *info) 1174 { 1175 struct file *file = lo->lo_backing_file; 1176 struct kstat stat; 1177 int error; 1178 1179 if (lo->lo_state != Lo_bound) 1180 return -ENXIO; 1181 error = vfs_getattr(&file->f_path, &stat, 1182 STATX_INO, AT_STATX_SYNC_AS_STAT); 1183 if (error) 1184 return error; 1185 memset(info, 0, sizeof(*info)); 1186 info->lo_number = lo->lo_number; 1187 info->lo_device = huge_encode_dev(stat.dev); 1188 info->lo_inode = stat.ino; 1189 info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev); 1190 info->lo_offset = lo->lo_offset; 1191 info->lo_sizelimit = lo->lo_sizelimit; 1192 info->lo_flags = lo->lo_flags; 1193 memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE); 1194 memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE); 1195 info->lo_encrypt_type = 1196 lo->lo_encryption ? lo->lo_encryption->number : 0; 1197 if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) { 1198 info->lo_encrypt_key_size = lo->lo_encrypt_key_size; 1199 memcpy(info->lo_encrypt_key, lo->lo_encrypt_key, 1200 lo->lo_encrypt_key_size); 1201 } 1202 return 0; 1203 } 1204 1205 static void 1206 loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64) 1207 { 1208 memset(info64, 0, sizeof(*info64)); 1209 info64->lo_number = info->lo_number; 1210 info64->lo_device = info->lo_device; 1211 info64->lo_inode = info->lo_inode; 1212 info64->lo_rdevice = info->lo_rdevice; 1213 info64->lo_offset = info->lo_offset; 1214 info64->lo_sizelimit = 0; 1215 info64->lo_encrypt_type = info->lo_encrypt_type; 1216 info64->lo_encrypt_key_size = info->lo_encrypt_key_size; 1217 info64->lo_flags = info->lo_flags; 1218 info64->lo_init[0] = info->lo_init[0]; 1219 info64->lo_init[1] = info->lo_init[1]; 1220 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) 1221 memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE); 1222 else 1223 memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE); 1224 memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE); 1225 } 1226 1227 static int 1228 loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info) 1229 { 1230 memset(info, 0, sizeof(*info)); 1231 info->lo_number = info64->lo_number; 1232 info->lo_device = info64->lo_device; 1233 info->lo_inode = info64->lo_inode; 1234 info->lo_rdevice = info64->lo_rdevice; 1235 info->lo_offset = info64->lo_offset; 1236 info->lo_encrypt_type = info64->lo_encrypt_type; 1237 info->lo_encrypt_key_size = info64->lo_encrypt_key_size; 1238 info->lo_flags = info64->lo_flags; 1239 info->lo_init[0] = info64->lo_init[0]; 1240 info->lo_init[1] = info64->lo_init[1]; 1241 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) 1242 memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE); 1243 else 1244 memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE); 1245 memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE); 1246 1247 /* error in case values were truncated */ 1248 if (info->lo_device != info64->lo_device || 1249 info->lo_rdevice != info64->lo_rdevice || 1250 info->lo_inode != info64->lo_inode || 1251 info->lo_offset != info64->lo_offset) 1252 return -EOVERFLOW; 1253 1254 return 0; 1255 } 1256 1257 static int 1258 loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg) 1259 { 1260 struct loop_info info; 1261 struct loop_info64 info64; 1262 1263 if (copy_from_user(&info, arg, sizeof (struct loop_info))) 1264 return -EFAULT; 1265 loop_info64_from_old(&info, &info64); 1266 return loop_set_status(lo, &info64); 1267 } 1268 1269 static int 1270 loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg) 1271 { 1272 struct loop_info64 info64; 1273 1274 if (copy_from_user(&info64, arg, sizeof (struct loop_info64))) 1275 return -EFAULT; 1276 return loop_set_status(lo, &info64); 1277 } 1278 1279 static int 1280 loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) { 1281 struct loop_info info; 1282 struct loop_info64 info64; 1283 int err = 0; 1284 1285 if (!arg) 1286 err = -EINVAL; 1287 if (!err) 1288 err = loop_get_status(lo, &info64); 1289 if (!err) 1290 err = loop_info64_to_old(&info64, &info); 1291 if (!err && copy_to_user(arg, &info, sizeof(info))) 1292 err = -EFAULT; 1293 1294 return err; 1295 } 1296 1297 static int 1298 loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { 1299 struct loop_info64 info64; 1300 int err = 0; 1301 1302 if (!arg) 1303 err = -EINVAL; 1304 if (!err) 1305 err = loop_get_status(lo, &info64); 1306 if (!err && copy_to_user(arg, &info64, sizeof(info64))) 1307 err = -EFAULT; 1308 1309 return err; 1310 } 1311 1312 static int loop_set_capacity(struct loop_device *lo) 1313 { 1314 if (unlikely(lo->lo_state != Lo_bound)) 1315 return -ENXIO; 1316 1317 return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit); 1318 } 1319 1320 static int loop_set_dio(struct loop_device *lo, unsigned long arg) 1321 { 1322 int error = -ENXIO; 1323 if (lo->lo_state != Lo_bound) 1324 goto out; 1325 1326 __loop_update_dio(lo, !!arg); 1327 if (lo->use_dio == !!arg) 1328 return 0; 1329 error = -EINVAL; 1330 out: 1331 return error; 1332 } 1333 1334 static int loop_set_block_size(struct loop_device *lo, unsigned long arg) 1335 { 1336 if (lo->lo_state != Lo_bound) 1337 return -ENXIO; 1338 1339 if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg)) 1340 return -EINVAL; 1341 1342 blk_mq_freeze_queue(lo->lo_queue); 1343 1344 blk_queue_logical_block_size(lo->lo_queue, arg); 1345 loop_update_dio(lo); 1346 1347 blk_mq_unfreeze_queue(lo->lo_queue); 1348 1349 return 0; 1350 } 1351 1352 static int lo_ioctl(struct block_device *bdev, fmode_t mode, 1353 unsigned int cmd, unsigned long arg) 1354 { 1355 struct loop_device *lo = bdev->bd_disk->private_data; 1356 int err; 1357 1358 mutex_lock_nested(&lo->lo_ctl_mutex, 1); 1359 switch (cmd) { 1360 case LOOP_SET_FD: 1361 err = loop_set_fd(lo, mode, bdev, arg); 1362 break; 1363 case LOOP_CHANGE_FD: 1364 err = loop_change_fd(lo, bdev, arg); 1365 break; 1366 case LOOP_CLR_FD: 1367 /* loop_clr_fd would have unlocked lo_ctl_mutex on success */ 1368 err = loop_clr_fd(lo); 1369 if (!err) 1370 goto out_unlocked; 1371 break; 1372 case LOOP_SET_STATUS: 1373 err = -EPERM; 1374 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) 1375 err = loop_set_status_old(lo, 1376 (struct loop_info __user *)arg); 1377 break; 1378 case LOOP_GET_STATUS: 1379 err = loop_get_status_old(lo, (struct loop_info __user *) arg); 1380 break; 1381 case LOOP_SET_STATUS64: 1382 err = -EPERM; 1383 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) 1384 err = loop_set_status64(lo, 1385 (struct loop_info64 __user *) arg); 1386 break; 1387 case LOOP_GET_STATUS64: 1388 err = loop_get_status64(lo, (struct loop_info64 __user *) arg); 1389 break; 1390 case LOOP_SET_CAPACITY: 1391 err = -EPERM; 1392 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) 1393 err = loop_set_capacity(lo); 1394 break; 1395 case LOOP_SET_DIRECT_IO: 1396 err = -EPERM; 1397 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) 1398 err = loop_set_dio(lo, arg); 1399 break; 1400 case LOOP_SET_BLOCK_SIZE: 1401 err = -EPERM; 1402 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) 1403 err = loop_set_block_size(lo, arg); 1404 break; 1405 default: 1406 err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; 1407 } 1408 mutex_unlock(&lo->lo_ctl_mutex); 1409 1410 out_unlocked: 1411 return err; 1412 } 1413 1414 #ifdef CONFIG_COMPAT 1415 struct compat_loop_info { 1416 compat_int_t lo_number; /* ioctl r/o */ 1417 compat_dev_t lo_device; /* ioctl r/o */ 1418 compat_ulong_t lo_inode; /* ioctl r/o */ 1419 compat_dev_t lo_rdevice; /* ioctl r/o */ 1420 compat_int_t lo_offset; 1421 compat_int_t lo_encrypt_type; 1422 compat_int_t lo_encrypt_key_size; /* ioctl w/o */ 1423 compat_int_t lo_flags; /* ioctl r/o */ 1424 char lo_name[LO_NAME_SIZE]; 1425 unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ 1426 compat_ulong_t lo_init[2]; 1427 char reserved[4]; 1428 }; 1429 1430 /* 1431 * Transfer 32-bit compatibility structure in userspace to 64-bit loop info 1432 * - noinlined to reduce stack space usage in main part of driver 1433 */ 1434 static noinline int 1435 loop_info64_from_compat(const struct compat_loop_info __user *arg, 1436 struct loop_info64 *info64) 1437 { 1438 struct compat_loop_info info; 1439 1440 if (copy_from_user(&info, arg, sizeof(info))) 1441 return -EFAULT; 1442 1443 memset(info64, 0, sizeof(*info64)); 1444 info64->lo_number = info.lo_number; 1445 info64->lo_device = info.lo_device; 1446 info64->lo_inode = info.lo_inode; 1447 info64->lo_rdevice = info.lo_rdevice; 1448 info64->lo_offset = info.lo_offset; 1449 info64->lo_sizelimit = 0; 1450 info64->lo_encrypt_type = info.lo_encrypt_type; 1451 info64->lo_encrypt_key_size = info.lo_encrypt_key_size; 1452 info64->lo_flags = info.lo_flags; 1453 info64->lo_init[0] = info.lo_init[0]; 1454 info64->lo_init[1] = info.lo_init[1]; 1455 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI) 1456 memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE); 1457 else 1458 memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE); 1459 memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE); 1460 return 0; 1461 } 1462 1463 /* 1464 * Transfer 64-bit loop info to 32-bit compatibility structure in userspace 1465 * - noinlined to reduce stack space usage in main part of driver 1466 */ 1467 static noinline int 1468 loop_info64_to_compat(const struct loop_info64 *info64, 1469 struct compat_loop_info __user *arg) 1470 { 1471 struct compat_loop_info info; 1472 1473 memset(&info, 0, sizeof(info)); 1474 info.lo_number = info64->lo_number; 1475 info.lo_device = info64->lo_device; 1476 info.lo_inode = info64->lo_inode; 1477 info.lo_rdevice = info64->lo_rdevice; 1478 info.lo_offset = info64->lo_offset; 1479 info.lo_encrypt_type = info64->lo_encrypt_type; 1480 info.lo_encrypt_key_size = info64->lo_encrypt_key_size; 1481 info.lo_flags = info64->lo_flags; 1482 info.lo_init[0] = info64->lo_init[0]; 1483 info.lo_init[1] = info64->lo_init[1]; 1484 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI) 1485 memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE); 1486 else 1487 memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE); 1488 memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE); 1489 1490 /* error in case values were truncated */ 1491 if (info.lo_device != info64->lo_device || 1492 info.lo_rdevice != info64->lo_rdevice || 1493 info.lo_inode != info64->lo_inode || 1494 info.lo_offset != info64->lo_offset || 1495 info.lo_init[0] != info64->lo_init[0] || 1496 info.lo_init[1] != info64->lo_init[1]) 1497 return -EOVERFLOW; 1498 1499 if (copy_to_user(arg, &info, sizeof(info))) 1500 return -EFAULT; 1501 return 0; 1502 } 1503 1504 static int 1505 loop_set_status_compat(struct loop_device *lo, 1506 const struct compat_loop_info __user *arg) 1507 { 1508 struct loop_info64 info64; 1509 int ret; 1510 1511 ret = loop_info64_from_compat(arg, &info64); 1512 if (ret < 0) 1513 return ret; 1514 return loop_set_status(lo, &info64); 1515 } 1516 1517 static int 1518 loop_get_status_compat(struct loop_device *lo, 1519 struct compat_loop_info __user *arg) 1520 { 1521 struct loop_info64 info64; 1522 int err = 0; 1523 1524 if (!arg) 1525 err = -EINVAL; 1526 if (!err) 1527 err = loop_get_status(lo, &info64); 1528 if (!err) 1529 err = loop_info64_to_compat(&info64, arg); 1530 return err; 1531 } 1532 1533 static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, 1534 unsigned int cmd, unsigned long arg) 1535 { 1536 struct loop_device *lo = bdev->bd_disk->private_data; 1537 int err; 1538 1539 switch(cmd) { 1540 case LOOP_SET_STATUS: 1541 mutex_lock(&lo->lo_ctl_mutex); 1542 err = loop_set_status_compat( 1543 lo, (const struct compat_loop_info __user *) arg); 1544 mutex_unlock(&lo->lo_ctl_mutex); 1545 break; 1546 case LOOP_GET_STATUS: 1547 mutex_lock(&lo->lo_ctl_mutex); 1548 err = loop_get_status_compat( 1549 lo, (struct compat_loop_info __user *) arg); 1550 mutex_unlock(&lo->lo_ctl_mutex); 1551 break; 1552 case LOOP_SET_CAPACITY: 1553 case LOOP_CLR_FD: 1554 case LOOP_GET_STATUS64: 1555 case LOOP_SET_STATUS64: 1556 arg = (unsigned long) compat_ptr(arg); 1557 case LOOP_SET_FD: 1558 case LOOP_CHANGE_FD: 1559 err = lo_ioctl(bdev, mode, cmd, arg); 1560 break; 1561 default: 1562 err = -ENOIOCTLCMD; 1563 break; 1564 } 1565 return err; 1566 } 1567 #endif 1568 1569 static int lo_open(struct block_device *bdev, fmode_t mode) 1570 { 1571 struct loop_device *lo; 1572 int err = 0; 1573 1574 mutex_lock(&loop_index_mutex); 1575 lo = bdev->bd_disk->private_data; 1576 if (!lo) { 1577 err = -ENXIO; 1578 goto out; 1579 } 1580 1581 atomic_inc(&lo->lo_refcnt); 1582 out: 1583 mutex_unlock(&loop_index_mutex); 1584 return err; 1585 } 1586 1587 static void lo_release(struct gendisk *disk, fmode_t mode) 1588 { 1589 struct loop_device *lo = disk->private_data; 1590 int err; 1591 1592 if (atomic_dec_return(&lo->lo_refcnt)) 1593 return; 1594 1595 mutex_lock(&lo->lo_ctl_mutex); 1596 if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) { 1597 /* 1598 * In autoclear mode, stop the loop thread 1599 * and remove configuration after last close. 1600 */ 1601 err = loop_clr_fd(lo); 1602 if (!err) 1603 return; 1604 } else { 1605 /* 1606 * Otherwise keep thread (if running) and config, 1607 * but flush possible ongoing bios in thread. 1608 */ 1609 loop_flush(lo); 1610 } 1611 1612 mutex_unlock(&lo->lo_ctl_mutex); 1613 } 1614 1615 static const struct block_device_operations lo_fops = { 1616 .owner = THIS_MODULE, 1617 .open = lo_open, 1618 .release = lo_release, 1619 .ioctl = lo_ioctl, 1620 #ifdef CONFIG_COMPAT 1621 .compat_ioctl = lo_compat_ioctl, 1622 #endif 1623 }; 1624 1625 /* 1626 * And now the modules code and kernel interface. 1627 */ 1628 static int max_loop; 1629 module_param(max_loop, int, S_IRUGO); 1630 MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); 1631 module_param(max_part, int, S_IRUGO); 1632 MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device"); 1633 MODULE_LICENSE("GPL"); 1634 MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR); 1635 1636 int loop_register_transfer(struct loop_func_table *funcs) 1637 { 1638 unsigned int n = funcs->number; 1639 1640 if (n >= MAX_LO_CRYPT || xfer_funcs[n]) 1641 return -EINVAL; 1642 xfer_funcs[n] = funcs; 1643 return 0; 1644 } 1645 1646 static int unregister_transfer_cb(int id, void *ptr, void *data) 1647 { 1648 struct loop_device *lo = ptr; 1649 struct loop_func_table *xfer = data; 1650 1651 mutex_lock(&lo->lo_ctl_mutex); 1652 if (lo->lo_encryption == xfer) 1653 loop_release_xfer(lo); 1654 mutex_unlock(&lo->lo_ctl_mutex); 1655 return 0; 1656 } 1657 1658 int loop_unregister_transfer(int number) 1659 { 1660 unsigned int n = number; 1661 struct loop_func_table *xfer; 1662 1663 if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL) 1664 return -EINVAL; 1665 1666 xfer_funcs[n] = NULL; 1667 idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer); 1668 return 0; 1669 } 1670 1671 EXPORT_SYMBOL(loop_register_transfer); 1672 EXPORT_SYMBOL(loop_unregister_transfer); 1673 1674 static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, 1675 const struct blk_mq_queue_data *bd) 1676 { 1677 struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 1678 struct loop_device *lo = cmd->rq->q->queuedata; 1679 1680 blk_mq_start_request(bd->rq); 1681 1682 if (lo->lo_state != Lo_bound) 1683 return BLK_STS_IOERR; 1684 1685 switch (req_op(cmd->rq)) { 1686 case REQ_OP_FLUSH: 1687 case REQ_OP_DISCARD: 1688 case REQ_OP_WRITE_ZEROES: 1689 cmd->use_aio = false; 1690 break; 1691 default: 1692 cmd->use_aio = lo->use_dio; 1693 break; 1694 } 1695 1696 kthread_queue_work(&lo->worker, &cmd->work); 1697 1698 return BLK_STS_OK; 1699 } 1700 1701 static void loop_handle_cmd(struct loop_cmd *cmd) 1702 { 1703 const bool write = op_is_write(req_op(cmd->rq)); 1704 struct loop_device *lo = cmd->rq->q->queuedata; 1705 int ret = 0; 1706 1707 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { 1708 ret = -EIO; 1709 goto failed; 1710 } 1711 1712 ret = do_req_filebacked(lo, cmd->rq); 1713 failed: 1714 /* complete non-aio request */ 1715 if (!cmd->use_aio || ret) { 1716 cmd->ret = ret ? -EIO : 0; 1717 blk_mq_complete_request(cmd->rq); 1718 } 1719 } 1720 1721 static void loop_queue_work(struct kthread_work *work) 1722 { 1723 struct loop_cmd *cmd = 1724 container_of(work, struct loop_cmd, work); 1725 1726 loop_handle_cmd(cmd); 1727 } 1728 1729 static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq, 1730 unsigned int hctx_idx, unsigned int numa_node) 1731 { 1732 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 1733 1734 cmd->rq = rq; 1735 kthread_init_work(&cmd->work, loop_queue_work); 1736 1737 return 0; 1738 } 1739 1740 static const struct blk_mq_ops loop_mq_ops = { 1741 .queue_rq = loop_queue_rq, 1742 .init_request = loop_init_request, 1743 .complete = lo_complete_rq, 1744 }; 1745 1746 static int loop_add(struct loop_device **l, int i) 1747 { 1748 struct loop_device *lo; 1749 struct gendisk *disk; 1750 int err; 1751 1752 err = -ENOMEM; 1753 lo = kzalloc(sizeof(*lo), GFP_KERNEL); 1754 if (!lo) 1755 goto out; 1756 1757 lo->lo_state = Lo_unbound; 1758 1759 /* allocate id, if @id >= 0, we're requesting that specific id */ 1760 if (i >= 0) { 1761 err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL); 1762 if (err == -ENOSPC) 1763 err = -EEXIST; 1764 } else { 1765 err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL); 1766 } 1767 if (err < 0) 1768 goto out_free_dev; 1769 i = err; 1770 1771 err = -ENOMEM; 1772 lo->tag_set.ops = &loop_mq_ops; 1773 lo->tag_set.nr_hw_queues = 1; 1774 lo->tag_set.queue_depth = 128; 1775 lo->tag_set.numa_node = NUMA_NO_NODE; 1776 lo->tag_set.cmd_size = sizeof(struct loop_cmd); 1777 lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 1778 lo->tag_set.driver_data = lo; 1779 1780 err = blk_mq_alloc_tag_set(&lo->tag_set); 1781 if (err) 1782 goto out_free_idr; 1783 1784 lo->lo_queue = blk_mq_init_queue(&lo->tag_set); 1785 if (IS_ERR_OR_NULL(lo->lo_queue)) { 1786 err = PTR_ERR(lo->lo_queue); 1787 goto out_cleanup_tags; 1788 } 1789 lo->lo_queue->queuedata = lo; 1790 1791 blk_queue_physical_block_size(lo->lo_queue, PAGE_SIZE); 1792 1793 /* 1794 * It doesn't make sense to enable merge because the I/O 1795 * submitted to backing file is handled page by page. 1796 */ 1797 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); 1798 1799 err = -ENOMEM; 1800 disk = lo->lo_disk = alloc_disk(1 << part_shift); 1801 if (!disk) 1802 goto out_free_queue; 1803 1804 /* 1805 * Disable partition scanning by default. The in-kernel partition 1806 * scanning can be requested individually per-device during its 1807 * setup. Userspace can always add and remove partitions from all 1808 * devices. The needed partition minors are allocated from the 1809 * extended minor space, the main loop device numbers will continue 1810 * to match the loop minors, regardless of the number of partitions 1811 * used. 1812 * 1813 * If max_part is given, partition scanning is globally enabled for 1814 * all loop devices. The minors for the main loop devices will be 1815 * multiples of max_part. 1816 * 1817 * Note: Global-for-all-devices, set-only-at-init, read-only module 1818 * parameteters like 'max_loop' and 'max_part' make things needlessly 1819 * complicated, are too static, inflexible and may surprise 1820 * userspace tools. Parameters like this in general should be avoided. 1821 */ 1822 if (!part_shift) 1823 disk->flags |= GENHD_FL_NO_PART_SCAN; 1824 disk->flags |= GENHD_FL_EXT_DEVT; 1825 mutex_init(&lo->lo_ctl_mutex); 1826 atomic_set(&lo->lo_refcnt, 0); 1827 lo->lo_number = i; 1828 spin_lock_init(&lo->lo_lock); 1829 disk->major = LOOP_MAJOR; 1830 disk->first_minor = i << part_shift; 1831 disk->fops = &lo_fops; 1832 disk->private_data = lo; 1833 disk->queue = lo->lo_queue; 1834 sprintf(disk->disk_name, "loop%d", i); 1835 add_disk(disk); 1836 *l = lo; 1837 return lo->lo_number; 1838 1839 out_free_queue: 1840 blk_cleanup_queue(lo->lo_queue); 1841 out_cleanup_tags: 1842 blk_mq_free_tag_set(&lo->tag_set); 1843 out_free_idr: 1844 idr_remove(&loop_index_idr, i); 1845 out_free_dev: 1846 kfree(lo); 1847 out: 1848 return err; 1849 } 1850 1851 static void loop_remove(struct loop_device *lo) 1852 { 1853 blk_cleanup_queue(lo->lo_queue); 1854 del_gendisk(lo->lo_disk); 1855 blk_mq_free_tag_set(&lo->tag_set); 1856 put_disk(lo->lo_disk); 1857 kfree(lo); 1858 } 1859 1860 static int find_free_cb(int id, void *ptr, void *data) 1861 { 1862 struct loop_device *lo = ptr; 1863 struct loop_device **l = data; 1864 1865 if (lo->lo_state == Lo_unbound) { 1866 *l = lo; 1867 return 1; 1868 } 1869 return 0; 1870 } 1871 1872 static int loop_lookup(struct loop_device **l, int i) 1873 { 1874 struct loop_device *lo; 1875 int ret = -ENODEV; 1876 1877 if (i < 0) { 1878 int err; 1879 1880 err = idr_for_each(&loop_index_idr, &find_free_cb, &lo); 1881 if (err == 1) { 1882 *l = lo; 1883 ret = lo->lo_number; 1884 } 1885 goto out; 1886 } 1887 1888 /* lookup and return a specific i */ 1889 lo = idr_find(&loop_index_idr, i); 1890 if (lo) { 1891 *l = lo; 1892 ret = lo->lo_number; 1893 } 1894 out: 1895 return ret; 1896 } 1897 1898 static struct kobject *loop_probe(dev_t dev, int *part, void *data) 1899 { 1900 struct loop_device *lo; 1901 struct kobject *kobj; 1902 int err; 1903 1904 mutex_lock(&loop_index_mutex); 1905 err = loop_lookup(&lo, MINOR(dev) >> part_shift); 1906 if (err < 0) 1907 err = loop_add(&lo, MINOR(dev) >> part_shift); 1908 if (err < 0) 1909 kobj = NULL; 1910 else 1911 kobj = get_disk(lo->lo_disk); 1912 mutex_unlock(&loop_index_mutex); 1913 1914 *part = 0; 1915 return kobj; 1916 } 1917 1918 static long loop_control_ioctl(struct file *file, unsigned int cmd, 1919 unsigned long parm) 1920 { 1921 struct loop_device *lo; 1922 int ret = -ENOSYS; 1923 1924 mutex_lock(&loop_index_mutex); 1925 switch (cmd) { 1926 case LOOP_CTL_ADD: 1927 ret = loop_lookup(&lo, parm); 1928 if (ret >= 0) { 1929 ret = -EEXIST; 1930 break; 1931 } 1932 ret = loop_add(&lo, parm); 1933 break; 1934 case LOOP_CTL_REMOVE: 1935 ret = loop_lookup(&lo, parm); 1936 if (ret < 0) 1937 break; 1938 mutex_lock(&lo->lo_ctl_mutex); 1939 if (lo->lo_state != Lo_unbound) { 1940 ret = -EBUSY; 1941 mutex_unlock(&lo->lo_ctl_mutex); 1942 break; 1943 } 1944 if (atomic_read(&lo->lo_refcnt) > 0) { 1945 ret = -EBUSY; 1946 mutex_unlock(&lo->lo_ctl_mutex); 1947 break; 1948 } 1949 lo->lo_disk->private_data = NULL; 1950 mutex_unlock(&lo->lo_ctl_mutex); 1951 idr_remove(&loop_index_idr, lo->lo_number); 1952 loop_remove(lo); 1953 break; 1954 case LOOP_CTL_GET_FREE: 1955 ret = loop_lookup(&lo, -1); 1956 if (ret >= 0) 1957 break; 1958 ret = loop_add(&lo, -1); 1959 } 1960 mutex_unlock(&loop_index_mutex); 1961 1962 return ret; 1963 } 1964 1965 static const struct file_operations loop_ctl_fops = { 1966 .open = nonseekable_open, 1967 .unlocked_ioctl = loop_control_ioctl, 1968 .compat_ioctl = loop_control_ioctl, 1969 .owner = THIS_MODULE, 1970 .llseek = noop_llseek, 1971 }; 1972 1973 static struct miscdevice loop_misc = { 1974 .minor = LOOP_CTRL_MINOR, 1975 .name = "loop-control", 1976 .fops = &loop_ctl_fops, 1977 }; 1978 1979 MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR); 1980 MODULE_ALIAS("devname:loop-control"); 1981 1982 static int __init loop_init(void) 1983 { 1984 int i, nr; 1985 unsigned long range; 1986 struct loop_device *lo; 1987 int err; 1988 1989 part_shift = 0; 1990 if (max_part > 0) { 1991 part_shift = fls(max_part); 1992 1993 /* 1994 * Adjust max_part according to part_shift as it is exported 1995 * to user space so that user can decide correct minor number 1996 * if [s]he want to create more devices. 1997 * 1998 * Note that -1 is required because partition 0 is reserved 1999 * for the whole disk. 2000 */ 2001 max_part = (1UL << part_shift) - 1; 2002 } 2003 2004 if ((1UL << part_shift) > DISK_MAX_PARTS) { 2005 err = -EINVAL; 2006 goto err_out; 2007 } 2008 2009 if (max_loop > 1UL << (MINORBITS - part_shift)) { 2010 err = -EINVAL; 2011 goto err_out; 2012 } 2013 2014 /* 2015 * If max_loop is specified, create that many devices upfront. 2016 * This also becomes a hard limit. If max_loop is not specified, 2017 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module 2018 * init time. Loop devices can be requested on-demand with the 2019 * /dev/loop-control interface, or be instantiated by accessing 2020 * a 'dead' device node. 2021 */ 2022 if (max_loop) { 2023 nr = max_loop; 2024 range = max_loop << part_shift; 2025 } else { 2026 nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT; 2027 range = 1UL << MINORBITS; 2028 } 2029 2030 err = misc_register(&loop_misc); 2031 if (err < 0) 2032 goto err_out; 2033 2034 2035 if (register_blkdev(LOOP_MAJOR, "loop")) { 2036 err = -EIO; 2037 goto misc_out; 2038 } 2039 2040 blk_register_region(MKDEV(LOOP_MAJOR, 0), range, 2041 THIS_MODULE, loop_probe, NULL, NULL); 2042 2043 /* pre-create number of devices given by config or max_loop */ 2044 mutex_lock(&loop_index_mutex); 2045 for (i = 0; i < nr; i++) 2046 loop_add(&lo, i); 2047 mutex_unlock(&loop_index_mutex); 2048 2049 printk(KERN_INFO "loop: module loaded\n"); 2050 return 0; 2051 2052 misc_out: 2053 misc_deregister(&loop_misc); 2054 err_out: 2055 return err; 2056 } 2057 2058 static int loop_exit_cb(int id, void *ptr, void *data) 2059 { 2060 struct loop_device *lo = ptr; 2061 2062 loop_remove(lo); 2063 return 0; 2064 } 2065 2066 static void __exit loop_exit(void) 2067 { 2068 unsigned long range; 2069 2070 range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; 2071 2072 idr_for_each(&loop_index_idr, &loop_exit_cb, NULL); 2073 idr_destroy(&loop_index_idr); 2074 2075 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); 2076 unregister_blkdev(LOOP_MAJOR, "loop"); 2077 2078 misc_deregister(&loop_misc); 2079 } 2080 2081 module_init(loop_init); 2082 module_exit(loop_exit); 2083 2084 #ifndef MODULE 2085 static int __init max_loop_setup(char *str) 2086 { 2087 max_loop = simple_strtol(str, NULL, 0); 2088 return 1; 2089 } 2090 2091 __setup("max_loop=", max_loop_setup); 2092 #endif 2093