1 /* 2 * linux/drivers/block/loop.c 3 * 4 * Written by Theodore Ts'o, 3/29/93 5 * 6 * Copyright 1993 by Theodore Ts'o. Redistribution of this file is 7 * permitted under the GNU General Public License. 8 * 9 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993 10 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996 11 * 12 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994 13 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996 14 * 15 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997 16 * 17 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998 18 * 19 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998 20 * 21 * Loadable modules and other fixes by AK, 1998 22 * 23 * Make real block number available to downstream transfer functions, enables 24 * CBC (and relatives) mode encryption requiring unique IVs per data block. 25 * Reed H. Petty, rhp@draper.net 26 * 27 * Maximum number of loop devices now dynamic via max_loop module parameter. 28 * Russell Kroll <rkroll@exploits.org> 19990701 29 * 30 * Maximum number of loop devices when compiled-in now selectable by passing 31 * max_loop=<1-255> to the kernel on boot. 32 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999 33 * 34 * Completely rewrite request handling to be make_request_fn style and 35 * non blocking, pushing work to a helper thread. Lots of fixes from 36 * Al Viro too. 37 * Jens Axboe <axboe@suse.de>, Nov 2000 38 * 39 * Support up to 256 loop devices 40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002 41 * 42 * Support for falling back on the write file operation when the address space 43 * operations write_begin is not available on the backing filesystem. 44 * Anton Altaparmakov, 16 Feb 2005 45 * 46 * Still To Fix: 47 * - Advisory locking is ignored here. 48 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN 49 * 50 */ 51 52 #include <linux/module.h> 53 #include <linux/moduleparam.h> 54 #include <linux/sched.h> 55 #include <linux/fs.h> 56 #include <linux/file.h> 57 #include <linux/stat.h> 58 #include <linux/errno.h> 59 #include <linux/major.h> 60 #include <linux/wait.h> 61 #include <linux/blkdev.h> 62 #include <linux/blkpg.h> 63 #include <linux/init.h> 64 #include <linux/swap.h> 65 #include <linux/slab.h> 66 #include <linux/compat.h> 67 #include <linux/suspend.h> 68 #include <linux/freezer.h> 69 #include <linux/mutex.h> 70 #include <linux/writeback.h> 71 #include <linux/completion.h> 72 #include <linux/highmem.h> 73 #include <linux/kthread.h> 74 #include <linux/splice.h> 75 #include <linux/sysfs.h> 76 #include <linux/miscdevice.h> 77 #include <linux/falloc.h> 78 #include <linux/uio.h> 79 #include "loop.h" 80 81 #include <linux/uaccess.h> 82 83 static DEFINE_IDR(loop_index_idr); 84 static DEFINE_MUTEX(loop_index_mutex); 85 86 static int max_part; 87 static int part_shift; 88 89 static int transfer_xor(struct loop_device *lo, int cmd, 90 struct page *raw_page, unsigned raw_off, 91 struct page *loop_page, unsigned loop_off, 92 int size, sector_t real_block) 93 { 94 char *raw_buf = kmap_atomic(raw_page) + raw_off; 95 char *loop_buf = kmap_atomic(loop_page) + loop_off; 96 char *in, *out, *key; 97 int i, keysize; 98 99 if (cmd == READ) { 100 in = raw_buf; 101 out = loop_buf; 102 } else { 103 in = loop_buf; 104 out = raw_buf; 105 } 106 107 key = lo->lo_encrypt_key; 108 keysize = lo->lo_encrypt_key_size; 109 for (i = 0; i < size; i++) 110 *out++ = *in++ ^ key[(i & 511) % keysize]; 111 112 kunmap_atomic(loop_buf); 113 kunmap_atomic(raw_buf); 114 cond_resched(); 115 return 0; 116 } 117 118 static int xor_init(struct loop_device *lo, const struct loop_info64 *info) 119 { 120 if (unlikely(info->lo_encrypt_key_size <= 0)) 121 return -EINVAL; 122 return 0; 123 } 124 125 static struct loop_func_table none_funcs = { 126 .number = LO_CRYPT_NONE, 127 }; 128 129 static struct loop_func_table xor_funcs = { 130 .number = LO_CRYPT_XOR, 131 .transfer = transfer_xor, 132 .init = xor_init 133 }; 134 135 /* xfer_funcs[0] is special - its release function is never called */ 136 static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = { 137 &none_funcs, 138 &xor_funcs 139 }; 140 141 static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file) 142 { 143 loff_t loopsize; 144 145 /* Compute loopsize in bytes */ 146 loopsize = i_size_read(file->f_mapping->host); 147 if (offset > 0) 148 loopsize -= offset; 149 /* offset is beyond i_size, weird but possible */ 150 if (loopsize < 0) 151 return 0; 152 153 if (sizelimit > 0 && sizelimit < loopsize) 154 loopsize = sizelimit; 155 /* 156 * Unfortunately, if we want to do I/O on the device, 157 * the number of 512-byte sectors has to fit into a sector_t. 158 */ 159 return loopsize >> 9; 160 } 161 162 static loff_t get_loop_size(struct loop_device *lo, struct file *file) 163 { 164 return get_size(lo->lo_offset, lo->lo_sizelimit, file); 165 } 166 167 static void __loop_update_dio(struct loop_device *lo, bool dio) 168 { 169 struct file *file = lo->lo_backing_file; 170 struct address_space *mapping = file->f_mapping; 171 struct inode *inode = mapping->host; 172 unsigned short sb_bsize = 0; 173 unsigned dio_align = 0; 174 bool use_dio; 175 176 if (inode->i_sb->s_bdev) { 177 sb_bsize = bdev_logical_block_size(inode->i_sb->s_bdev); 178 dio_align = sb_bsize - 1; 179 } 180 181 /* 182 * We support direct I/O only if lo_offset is aligned with the 183 * logical I/O size of backing device, and the logical block 184 * size of loop is bigger than the backing device's and the loop 185 * needn't transform transfer. 186 * 187 * TODO: the above condition may be loosed in the future, and 188 * direct I/O may be switched runtime at that time because most 189 * of requests in sane applications should be PAGE_SIZE aligned 190 */ 191 if (dio) { 192 if (queue_logical_block_size(lo->lo_queue) >= sb_bsize && 193 !(lo->lo_offset & dio_align) && 194 mapping->a_ops->direct_IO && 195 !lo->transfer) 196 use_dio = true; 197 else 198 use_dio = false; 199 } else { 200 use_dio = false; 201 } 202 203 if (lo->use_dio == use_dio) 204 return; 205 206 /* flush dirty pages before changing direct IO */ 207 vfs_fsync(file, 0); 208 209 /* 210 * The flag of LO_FLAGS_DIRECT_IO is handled similarly with 211 * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup 212 * will get updated by ioctl(LOOP_GET_STATUS) 213 */ 214 blk_mq_freeze_queue(lo->lo_queue); 215 lo->use_dio = use_dio; 216 if (use_dio) 217 lo->lo_flags |= LO_FLAGS_DIRECT_IO; 218 else 219 lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; 220 blk_mq_unfreeze_queue(lo->lo_queue); 221 } 222 223 static int 224 figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit, 225 loff_t logical_blocksize) 226 { 227 loff_t size = get_size(offset, sizelimit, lo->lo_backing_file); 228 sector_t x = (sector_t)size; 229 struct block_device *bdev = lo->lo_device; 230 231 if (unlikely((loff_t)x != size)) 232 return -EFBIG; 233 if (lo->lo_offset != offset) 234 lo->lo_offset = offset; 235 if (lo->lo_sizelimit != sizelimit) 236 lo->lo_sizelimit = sizelimit; 237 if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) { 238 lo->lo_logical_blocksize = logical_blocksize; 239 blk_queue_physical_block_size(lo->lo_queue, lo->lo_blocksize); 240 blk_queue_logical_block_size(lo->lo_queue, 241 lo->lo_logical_blocksize); 242 } 243 set_capacity(lo->lo_disk, x); 244 bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9); 245 /* let user-space know about the new size */ 246 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); 247 return 0; 248 } 249 250 static inline int 251 lo_do_transfer(struct loop_device *lo, int cmd, 252 struct page *rpage, unsigned roffs, 253 struct page *lpage, unsigned loffs, 254 int size, sector_t rblock) 255 { 256 int ret; 257 258 ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock); 259 if (likely(!ret)) 260 return 0; 261 262 printk_ratelimited(KERN_ERR 263 "loop: Transfer error at byte offset %llu, length %i.\n", 264 (unsigned long long)rblock << 9, size); 265 return ret; 266 } 267 268 static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) 269 { 270 struct iov_iter i; 271 ssize_t bw; 272 273 iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); 274 275 file_start_write(file); 276 bw = vfs_iter_write(file, &i, ppos, 0); 277 file_end_write(file); 278 279 if (likely(bw == bvec->bv_len)) 280 return 0; 281 282 printk_ratelimited(KERN_ERR 283 "loop: Write error at byte offset %llu, length %i.\n", 284 (unsigned long long)*ppos, bvec->bv_len); 285 if (bw >= 0) 286 bw = -EIO; 287 return bw; 288 } 289 290 static int lo_write_simple(struct loop_device *lo, struct request *rq, 291 loff_t pos) 292 { 293 struct bio_vec bvec; 294 struct req_iterator iter; 295 int ret = 0; 296 297 rq_for_each_segment(bvec, rq, iter) { 298 ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos); 299 if (ret < 0) 300 break; 301 cond_resched(); 302 } 303 304 return ret; 305 } 306 307 /* 308 * This is the slow, transforming version that needs to double buffer the 309 * data as it cannot do the transformations in place without having direct 310 * access to the destination pages of the backing file. 311 */ 312 static int lo_write_transfer(struct loop_device *lo, struct request *rq, 313 loff_t pos) 314 { 315 struct bio_vec bvec, b; 316 struct req_iterator iter; 317 struct page *page; 318 int ret = 0; 319 320 page = alloc_page(GFP_NOIO); 321 if (unlikely(!page)) 322 return -ENOMEM; 323 324 rq_for_each_segment(bvec, rq, iter) { 325 ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page, 326 bvec.bv_offset, bvec.bv_len, pos >> 9); 327 if (unlikely(ret)) 328 break; 329 330 b.bv_page = page; 331 b.bv_offset = 0; 332 b.bv_len = bvec.bv_len; 333 ret = lo_write_bvec(lo->lo_backing_file, &b, &pos); 334 if (ret < 0) 335 break; 336 } 337 338 __free_page(page); 339 return ret; 340 } 341 342 static int lo_read_simple(struct loop_device *lo, struct request *rq, 343 loff_t pos) 344 { 345 struct bio_vec bvec; 346 struct req_iterator iter; 347 struct iov_iter i; 348 ssize_t len; 349 350 rq_for_each_segment(bvec, rq, iter) { 351 iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len); 352 len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); 353 if (len < 0) 354 return len; 355 356 flush_dcache_page(bvec.bv_page); 357 358 if (len != bvec.bv_len) { 359 struct bio *bio; 360 361 __rq_for_each_bio(bio, rq) 362 zero_fill_bio(bio); 363 break; 364 } 365 cond_resched(); 366 } 367 368 return 0; 369 } 370 371 static int lo_read_transfer(struct loop_device *lo, struct request *rq, 372 loff_t pos) 373 { 374 struct bio_vec bvec, b; 375 struct req_iterator iter; 376 struct iov_iter i; 377 struct page *page; 378 ssize_t len; 379 int ret = 0; 380 381 page = alloc_page(GFP_NOIO); 382 if (unlikely(!page)) 383 return -ENOMEM; 384 385 rq_for_each_segment(bvec, rq, iter) { 386 loff_t offset = pos; 387 388 b.bv_page = page; 389 b.bv_offset = 0; 390 b.bv_len = bvec.bv_len; 391 392 iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len); 393 len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); 394 if (len < 0) { 395 ret = len; 396 goto out_free_page; 397 } 398 399 ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page, 400 bvec.bv_offset, len, offset >> 9); 401 if (ret) 402 goto out_free_page; 403 404 flush_dcache_page(bvec.bv_page); 405 406 if (len != bvec.bv_len) { 407 struct bio *bio; 408 409 __rq_for_each_bio(bio, rq) 410 zero_fill_bio(bio); 411 break; 412 } 413 } 414 415 ret = 0; 416 out_free_page: 417 __free_page(page); 418 return ret; 419 } 420 421 static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos) 422 { 423 /* 424 * We use punch hole to reclaim the free space used by the 425 * image a.k.a. discard. However we do not support discard if 426 * encryption is enabled, because it may give an attacker 427 * useful information. 428 */ 429 struct file *file = lo->lo_backing_file; 430 int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE; 431 int ret; 432 433 if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) { 434 ret = -EOPNOTSUPP; 435 goto out; 436 } 437 438 ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq)); 439 if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP)) 440 ret = -EIO; 441 out: 442 return ret; 443 } 444 445 static int lo_req_flush(struct loop_device *lo, struct request *rq) 446 { 447 struct file *file = lo->lo_backing_file; 448 int ret = vfs_fsync(file, 0); 449 if (unlikely(ret && ret != -EINVAL)) 450 ret = -EIO; 451 452 return ret; 453 } 454 455 static void lo_complete_rq(struct request *rq) 456 { 457 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 458 459 if (unlikely(req_op(cmd->rq) == REQ_OP_READ && cmd->use_aio && 460 cmd->ret >= 0 && cmd->ret < blk_rq_bytes(cmd->rq))) { 461 struct bio *bio = cmd->rq->bio; 462 463 bio_advance(bio, cmd->ret); 464 zero_fill_bio(bio); 465 } 466 467 blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK); 468 } 469 470 static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) 471 { 472 struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb); 473 474 cmd->ret = ret; 475 blk_mq_complete_request(cmd->rq); 476 } 477 478 static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, 479 loff_t pos, bool rw) 480 { 481 struct iov_iter iter; 482 struct bio_vec *bvec; 483 struct bio *bio = cmd->rq->bio; 484 struct file *file = lo->lo_backing_file; 485 int ret; 486 487 /* nomerge for loop request queue */ 488 WARN_ON(cmd->rq->bio != cmd->rq->biotail); 489 490 bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); 491 iov_iter_bvec(&iter, ITER_BVEC | rw, bvec, 492 bio_segments(bio), blk_rq_bytes(cmd->rq)); 493 /* 494 * This bio may be started from the middle of the 'bvec' 495 * because of bio splitting, so offset from the bvec must 496 * be passed to iov iterator 497 */ 498 iter.iov_offset = bio->bi_iter.bi_bvec_done; 499 500 cmd->iocb.ki_pos = pos; 501 cmd->iocb.ki_filp = file; 502 cmd->iocb.ki_complete = lo_rw_aio_complete; 503 cmd->iocb.ki_flags = IOCB_DIRECT; 504 505 if (rw == WRITE) 506 ret = call_write_iter(file, &cmd->iocb, &iter); 507 else 508 ret = call_read_iter(file, &cmd->iocb, &iter); 509 510 if (ret != -EIOCBQUEUED) 511 cmd->iocb.ki_complete(&cmd->iocb, ret, 0); 512 return 0; 513 } 514 515 static int do_req_filebacked(struct loop_device *lo, struct request *rq) 516 { 517 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 518 loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset; 519 520 /* 521 * lo_write_simple and lo_read_simple should have been covered 522 * by io submit style function like lo_rw_aio(), one blocker 523 * is that lo_read_simple() need to call flush_dcache_page after 524 * the page is written from kernel, and it isn't easy to handle 525 * this in io submit style function which submits all segments 526 * of the req at one time. And direct read IO doesn't need to 527 * run flush_dcache_page(). 528 */ 529 switch (req_op(rq)) { 530 case REQ_OP_FLUSH: 531 return lo_req_flush(lo, rq); 532 case REQ_OP_DISCARD: 533 case REQ_OP_WRITE_ZEROES: 534 return lo_discard(lo, rq, pos); 535 case REQ_OP_WRITE: 536 if (lo->transfer) 537 return lo_write_transfer(lo, rq, pos); 538 else if (cmd->use_aio) 539 return lo_rw_aio(lo, cmd, pos, WRITE); 540 else 541 return lo_write_simple(lo, rq, pos); 542 case REQ_OP_READ: 543 if (lo->transfer) 544 return lo_read_transfer(lo, rq, pos); 545 else if (cmd->use_aio) 546 return lo_rw_aio(lo, cmd, pos, READ); 547 else 548 return lo_read_simple(lo, rq, pos); 549 default: 550 WARN_ON_ONCE(1); 551 return -EIO; 552 break; 553 } 554 } 555 556 struct switch_request { 557 struct file *file; 558 struct completion wait; 559 }; 560 561 static inline void loop_update_dio(struct loop_device *lo) 562 { 563 __loop_update_dio(lo, io_is_direct(lo->lo_backing_file) | 564 lo->use_dio); 565 } 566 567 /* 568 * Do the actual switch; called from the BIO completion routine 569 */ 570 static void do_loop_switch(struct loop_device *lo, struct switch_request *p) 571 { 572 struct file *file = p->file; 573 struct file *old_file = lo->lo_backing_file; 574 struct address_space *mapping; 575 576 /* if no new file, only flush of queued bios requested */ 577 if (!file) 578 return; 579 580 mapping = file->f_mapping; 581 mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask); 582 lo->lo_backing_file = file; 583 lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ? 584 mapping->host->i_bdev->bd_block_size : PAGE_SIZE; 585 lo->old_gfp_mask = mapping_gfp_mask(mapping); 586 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); 587 loop_update_dio(lo); 588 } 589 590 /* 591 * loop_switch performs the hard work of switching a backing store. 592 * First it needs to flush existing IO, it does this by sending a magic 593 * BIO down the pipe. The completion of this BIO does the actual switch. 594 */ 595 static int loop_switch(struct loop_device *lo, struct file *file) 596 { 597 struct switch_request w; 598 599 w.file = file; 600 601 /* freeze queue and wait for completion of scheduled requests */ 602 blk_mq_freeze_queue(lo->lo_queue); 603 604 /* do the switch action */ 605 do_loop_switch(lo, &w); 606 607 /* unfreeze */ 608 blk_mq_unfreeze_queue(lo->lo_queue); 609 610 return 0; 611 } 612 613 /* 614 * Helper to flush the IOs in loop, but keeping loop thread running 615 */ 616 static int loop_flush(struct loop_device *lo) 617 { 618 /* loop not yet configured, no running thread, nothing to flush */ 619 if (lo->lo_state != Lo_bound) 620 return 0; 621 return loop_switch(lo, NULL); 622 } 623 624 static void loop_reread_partitions(struct loop_device *lo, 625 struct block_device *bdev) 626 { 627 int rc; 628 629 /* 630 * bd_mutex has been held already in release path, so don't 631 * acquire it if this function is called in such case. 632 * 633 * If the reread partition isn't from release path, lo_refcnt 634 * must be at least one and it can only become zero when the 635 * current holder is released. 636 */ 637 if (!atomic_read(&lo->lo_refcnt)) 638 rc = __blkdev_reread_part(bdev); 639 else 640 rc = blkdev_reread_part(bdev); 641 if (rc) 642 pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n", 643 __func__, lo->lo_number, lo->lo_file_name, rc); 644 } 645 646 /* 647 * loop_change_fd switched the backing store of a loopback device to 648 * a new file. This is useful for operating system installers to free up 649 * the original file and in High Availability environments to switch to 650 * an alternative location for the content in case of server meltdown. 651 * This can only work if the loop device is used read-only, and if the 652 * new backing store is the same size and type as the old backing store. 653 */ 654 static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, 655 unsigned int arg) 656 { 657 struct file *file, *old_file; 658 struct inode *inode; 659 int error; 660 661 error = -ENXIO; 662 if (lo->lo_state != Lo_bound) 663 goto out; 664 665 /* the loop device has to be read-only */ 666 error = -EINVAL; 667 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY)) 668 goto out; 669 670 error = -EBADF; 671 file = fget(arg); 672 if (!file) 673 goto out; 674 675 inode = file->f_mapping->host; 676 old_file = lo->lo_backing_file; 677 678 error = -EINVAL; 679 680 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) 681 goto out_putf; 682 683 /* size of the new backing store needs to be the same */ 684 if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) 685 goto out_putf; 686 687 /* and ... switch */ 688 error = loop_switch(lo, file); 689 if (error) 690 goto out_putf; 691 692 fput(old_file); 693 if (lo->lo_flags & LO_FLAGS_PARTSCAN) 694 loop_reread_partitions(lo, bdev); 695 return 0; 696 697 out_putf: 698 fput(file); 699 out: 700 return error; 701 } 702 703 static inline int is_loop_device(struct file *file) 704 { 705 struct inode *i = file->f_mapping->host; 706 707 return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR; 708 } 709 710 /* loop sysfs attributes */ 711 712 static ssize_t loop_attr_show(struct device *dev, char *page, 713 ssize_t (*callback)(struct loop_device *, char *)) 714 { 715 struct gendisk *disk = dev_to_disk(dev); 716 struct loop_device *lo = disk->private_data; 717 718 return callback(lo, page); 719 } 720 721 #define LOOP_ATTR_RO(_name) \ 722 static ssize_t loop_attr_##_name##_show(struct loop_device *, char *); \ 723 static ssize_t loop_attr_do_show_##_name(struct device *d, \ 724 struct device_attribute *attr, char *b) \ 725 { \ 726 return loop_attr_show(d, b, loop_attr_##_name##_show); \ 727 } \ 728 static struct device_attribute loop_attr_##_name = \ 729 __ATTR(_name, S_IRUGO, loop_attr_do_show_##_name, NULL); 730 731 static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) 732 { 733 ssize_t ret; 734 char *p = NULL; 735 736 spin_lock_irq(&lo->lo_lock); 737 if (lo->lo_backing_file) 738 p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1); 739 spin_unlock_irq(&lo->lo_lock); 740 741 if (IS_ERR_OR_NULL(p)) 742 ret = PTR_ERR(p); 743 else { 744 ret = strlen(p); 745 memmove(buf, p, ret); 746 buf[ret++] = '\n'; 747 buf[ret] = 0; 748 } 749 750 return ret; 751 } 752 753 static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf) 754 { 755 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset); 756 } 757 758 static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf) 759 { 760 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); 761 } 762 763 static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf) 764 { 765 int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR); 766 767 return sprintf(buf, "%s\n", autoclear ? "1" : "0"); 768 } 769 770 static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf) 771 { 772 int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN); 773 774 return sprintf(buf, "%s\n", partscan ? "1" : "0"); 775 } 776 777 static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf) 778 { 779 int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO); 780 781 return sprintf(buf, "%s\n", dio ? "1" : "0"); 782 } 783 784 LOOP_ATTR_RO(backing_file); 785 LOOP_ATTR_RO(offset); 786 LOOP_ATTR_RO(sizelimit); 787 LOOP_ATTR_RO(autoclear); 788 LOOP_ATTR_RO(partscan); 789 LOOP_ATTR_RO(dio); 790 791 static struct attribute *loop_attrs[] = { 792 &loop_attr_backing_file.attr, 793 &loop_attr_offset.attr, 794 &loop_attr_sizelimit.attr, 795 &loop_attr_autoclear.attr, 796 &loop_attr_partscan.attr, 797 &loop_attr_dio.attr, 798 NULL, 799 }; 800 801 static struct attribute_group loop_attribute_group = { 802 .name = "loop", 803 .attrs= loop_attrs, 804 }; 805 806 static int loop_sysfs_init(struct loop_device *lo) 807 { 808 return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj, 809 &loop_attribute_group); 810 } 811 812 static void loop_sysfs_exit(struct loop_device *lo) 813 { 814 sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj, 815 &loop_attribute_group); 816 } 817 818 static void loop_config_discard(struct loop_device *lo) 819 { 820 struct file *file = lo->lo_backing_file; 821 struct inode *inode = file->f_mapping->host; 822 struct request_queue *q = lo->lo_queue; 823 int lo_bits = 9; 824 825 /* 826 * We use punch hole to reclaim the free space used by the 827 * image a.k.a. discard. However we do not support discard if 828 * encryption is enabled, because it may give an attacker 829 * useful information. 830 */ 831 if ((!file->f_op->fallocate) || 832 lo->lo_encrypt_key_size) { 833 q->limits.discard_granularity = 0; 834 q->limits.discard_alignment = 0; 835 blk_queue_max_discard_sectors(q, 0); 836 blk_queue_max_write_zeroes_sectors(q, 0); 837 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); 838 return; 839 } 840 841 q->limits.discard_granularity = inode->i_sb->s_blocksize; 842 q->limits.discard_alignment = 0; 843 if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) 844 lo_bits = blksize_bits(lo->lo_logical_blocksize); 845 846 blk_queue_max_discard_sectors(q, UINT_MAX >> lo_bits); 847 blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> lo_bits); 848 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 849 } 850 851 static void loop_unprepare_queue(struct loop_device *lo) 852 { 853 kthread_flush_worker(&lo->worker); 854 kthread_stop(lo->worker_task); 855 } 856 857 static int loop_kthread_worker_fn(void *worker_ptr) 858 { 859 current->flags |= PF_LESS_THROTTLE; 860 return kthread_worker_fn(worker_ptr); 861 } 862 863 static int loop_prepare_queue(struct loop_device *lo) 864 { 865 kthread_init_worker(&lo->worker); 866 lo->worker_task = kthread_run(loop_kthread_worker_fn, 867 &lo->worker, "loop%d", lo->lo_number); 868 if (IS_ERR(lo->worker_task)) 869 return -ENOMEM; 870 set_user_nice(lo->worker_task, MIN_NICE); 871 return 0; 872 } 873 874 static int loop_set_fd(struct loop_device *lo, fmode_t mode, 875 struct block_device *bdev, unsigned int arg) 876 { 877 struct file *file, *f; 878 struct inode *inode; 879 struct address_space *mapping; 880 unsigned lo_blocksize; 881 int lo_flags = 0; 882 int error; 883 loff_t size; 884 885 /* This is safe, since we have a reference from open(). */ 886 __module_get(THIS_MODULE); 887 888 error = -EBADF; 889 file = fget(arg); 890 if (!file) 891 goto out; 892 893 error = -EBUSY; 894 if (lo->lo_state != Lo_unbound) 895 goto out_putf; 896 897 /* Avoid recursion */ 898 f = file; 899 while (is_loop_device(f)) { 900 struct loop_device *l; 901 902 if (f->f_mapping->host->i_bdev == bdev) 903 goto out_putf; 904 905 l = f->f_mapping->host->i_bdev->bd_disk->private_data; 906 if (l->lo_state == Lo_unbound) { 907 error = -EINVAL; 908 goto out_putf; 909 } 910 f = l->lo_backing_file; 911 } 912 913 mapping = file->f_mapping; 914 inode = mapping->host; 915 916 error = -EINVAL; 917 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) 918 goto out_putf; 919 920 if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) || 921 !file->f_op->write_iter) 922 lo_flags |= LO_FLAGS_READ_ONLY; 923 924 lo_blocksize = S_ISBLK(inode->i_mode) ? 925 inode->i_bdev->bd_block_size : PAGE_SIZE; 926 927 error = -EFBIG; 928 size = get_loop_size(lo, file); 929 if ((loff_t)(sector_t)size != size) 930 goto out_putf; 931 error = loop_prepare_queue(lo); 932 if (error) 933 goto out_putf; 934 935 error = 0; 936 937 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); 938 939 lo->use_dio = false; 940 lo->lo_blocksize = lo_blocksize; 941 lo->lo_logical_blocksize = 512; 942 lo->lo_device = bdev; 943 lo->lo_flags = lo_flags; 944 lo->lo_backing_file = file; 945 lo->transfer = NULL; 946 lo->ioctl = NULL; 947 lo->lo_sizelimit = 0; 948 lo->old_gfp_mask = mapping_gfp_mask(mapping); 949 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); 950 951 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) 952 blk_queue_write_cache(lo->lo_queue, true, false); 953 954 loop_update_dio(lo); 955 set_capacity(lo->lo_disk, size); 956 bd_set_size(bdev, size << 9); 957 loop_sysfs_init(lo); 958 /* let user-space know about the new size */ 959 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); 960 961 set_blocksize(bdev, lo_blocksize); 962 963 lo->lo_state = Lo_bound; 964 if (part_shift) 965 lo->lo_flags |= LO_FLAGS_PARTSCAN; 966 if (lo->lo_flags & LO_FLAGS_PARTSCAN) 967 loop_reread_partitions(lo, bdev); 968 969 /* Grab the block_device to prevent its destruction after we 970 * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev). 971 */ 972 bdgrab(bdev); 973 return 0; 974 975 out_putf: 976 fput(file); 977 out: 978 /* This is safe: open() is still holding a reference. */ 979 module_put(THIS_MODULE); 980 return error; 981 } 982 983 static int 984 loop_release_xfer(struct loop_device *lo) 985 { 986 int err = 0; 987 struct loop_func_table *xfer = lo->lo_encryption; 988 989 if (xfer) { 990 if (xfer->release) 991 err = xfer->release(lo); 992 lo->transfer = NULL; 993 lo->lo_encryption = NULL; 994 module_put(xfer->owner); 995 } 996 return err; 997 } 998 999 static int 1000 loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, 1001 const struct loop_info64 *i) 1002 { 1003 int err = 0; 1004 1005 if (xfer) { 1006 struct module *owner = xfer->owner; 1007 1008 if (!try_module_get(owner)) 1009 return -EINVAL; 1010 if (xfer->init) 1011 err = xfer->init(lo, i); 1012 if (err) 1013 module_put(owner); 1014 else 1015 lo->lo_encryption = xfer; 1016 } 1017 return err; 1018 } 1019 1020 static int loop_clr_fd(struct loop_device *lo) 1021 { 1022 struct file *filp = lo->lo_backing_file; 1023 gfp_t gfp = lo->old_gfp_mask; 1024 struct block_device *bdev = lo->lo_device; 1025 1026 if (lo->lo_state != Lo_bound) 1027 return -ENXIO; 1028 1029 /* 1030 * If we've explicitly asked to tear down the loop device, 1031 * and it has an elevated reference count, set it for auto-teardown when 1032 * the last reference goes away. This stops $!~#$@ udev from 1033 * preventing teardown because it decided that it needs to run blkid on 1034 * the loopback device whenever they appear. xfstests is notorious for 1035 * failing tests because blkid via udev races with a losetup 1036 * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d 1037 * command to fail with EBUSY. 1038 */ 1039 if (atomic_read(&lo->lo_refcnt) > 1) { 1040 lo->lo_flags |= LO_FLAGS_AUTOCLEAR; 1041 mutex_unlock(&lo->lo_ctl_mutex); 1042 return 0; 1043 } 1044 1045 if (filp == NULL) 1046 return -EINVAL; 1047 1048 /* freeze request queue during the transition */ 1049 blk_mq_freeze_queue(lo->lo_queue); 1050 1051 spin_lock_irq(&lo->lo_lock); 1052 lo->lo_state = Lo_rundown; 1053 lo->lo_backing_file = NULL; 1054 spin_unlock_irq(&lo->lo_lock); 1055 1056 loop_release_xfer(lo); 1057 lo->transfer = NULL; 1058 lo->ioctl = NULL; 1059 lo->lo_device = NULL; 1060 lo->lo_encryption = NULL; 1061 lo->lo_offset = 0; 1062 lo->lo_sizelimit = 0; 1063 lo->lo_encrypt_key_size = 0; 1064 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); 1065 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); 1066 memset(lo->lo_file_name, 0, LO_NAME_SIZE); 1067 if (bdev) { 1068 bdput(bdev); 1069 invalidate_bdev(bdev); 1070 } 1071 set_capacity(lo->lo_disk, 0); 1072 loop_sysfs_exit(lo); 1073 if (bdev) { 1074 bd_set_size(bdev, 0); 1075 /* let user-space know about this change */ 1076 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); 1077 } 1078 mapping_set_gfp_mask(filp->f_mapping, gfp); 1079 lo->lo_state = Lo_unbound; 1080 /* This is safe: open() is still holding a reference. */ 1081 module_put(THIS_MODULE); 1082 blk_mq_unfreeze_queue(lo->lo_queue); 1083 1084 if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev) 1085 loop_reread_partitions(lo, bdev); 1086 lo->lo_flags = 0; 1087 if (!part_shift) 1088 lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; 1089 loop_unprepare_queue(lo); 1090 mutex_unlock(&lo->lo_ctl_mutex); 1091 /* 1092 * Need not hold lo_ctl_mutex to fput backing file. 1093 * Calling fput holding lo_ctl_mutex triggers a circular 1094 * lock dependency possibility warning as fput can take 1095 * bd_mutex which is usually taken before lo_ctl_mutex. 1096 */ 1097 fput(filp); 1098 return 0; 1099 } 1100 1101 static int 1102 loop_set_status(struct loop_device *lo, const struct loop_info64 *info) 1103 { 1104 int err; 1105 struct loop_func_table *xfer; 1106 kuid_t uid = current_uid(); 1107 int lo_flags = lo->lo_flags; 1108 1109 if (lo->lo_encrypt_key_size && 1110 !uid_eq(lo->lo_key_owner, uid) && 1111 !capable(CAP_SYS_ADMIN)) 1112 return -EPERM; 1113 if (lo->lo_state != Lo_bound) 1114 return -ENXIO; 1115 if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) 1116 return -EINVAL; 1117 1118 /* I/O need to be drained during transfer transition */ 1119 blk_mq_freeze_queue(lo->lo_queue); 1120 1121 err = loop_release_xfer(lo); 1122 if (err) 1123 goto exit; 1124 1125 if (info->lo_encrypt_type) { 1126 unsigned int type = info->lo_encrypt_type; 1127 1128 if (type >= MAX_LO_CRYPT) 1129 return -EINVAL; 1130 xfer = xfer_funcs[type]; 1131 if (xfer == NULL) 1132 return -EINVAL; 1133 } else 1134 xfer = NULL; 1135 1136 err = loop_init_xfer(lo, xfer, info); 1137 if (err) 1138 goto exit; 1139 1140 if (info->lo_flags & LO_FLAGS_BLOCKSIZE) { 1141 if (!(lo->lo_flags & LO_FLAGS_BLOCKSIZE)) 1142 lo->lo_logical_blocksize = 512; 1143 lo->lo_flags |= LO_FLAGS_BLOCKSIZE; 1144 if (LO_INFO_BLOCKSIZE(info) != 512 && 1145 LO_INFO_BLOCKSIZE(info) != 1024 && 1146 LO_INFO_BLOCKSIZE(info) != 2048 && 1147 LO_INFO_BLOCKSIZE(info) != 4096) 1148 return -EINVAL; 1149 if (LO_INFO_BLOCKSIZE(info) > lo->lo_blocksize) 1150 return -EINVAL; 1151 } 1152 1153 if (lo->lo_offset != info->lo_offset || 1154 lo->lo_sizelimit != info->lo_sizelimit || 1155 lo->lo_flags != lo_flags || 1156 ((lo->lo_flags & LO_FLAGS_BLOCKSIZE) && 1157 lo->lo_logical_blocksize != LO_INFO_BLOCKSIZE(info))) { 1158 if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit, 1159 LO_INFO_BLOCKSIZE(info))) { 1160 err = -EFBIG; 1161 goto exit; 1162 } 1163 } 1164 1165 loop_config_discard(lo); 1166 1167 memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); 1168 memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); 1169 lo->lo_file_name[LO_NAME_SIZE-1] = 0; 1170 lo->lo_crypt_name[LO_NAME_SIZE-1] = 0; 1171 1172 if (!xfer) 1173 xfer = &none_funcs; 1174 lo->transfer = xfer->transfer; 1175 lo->ioctl = xfer->ioctl; 1176 1177 if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) != 1178 (info->lo_flags & LO_FLAGS_AUTOCLEAR)) 1179 lo->lo_flags ^= LO_FLAGS_AUTOCLEAR; 1180 1181 lo->lo_encrypt_key_size = info->lo_encrypt_key_size; 1182 lo->lo_init[0] = info->lo_init[0]; 1183 lo->lo_init[1] = info->lo_init[1]; 1184 if (info->lo_encrypt_key_size) { 1185 memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, 1186 info->lo_encrypt_key_size); 1187 lo->lo_key_owner = uid; 1188 } 1189 1190 /* update dio if lo_offset or transfer is changed */ 1191 __loop_update_dio(lo, lo->use_dio); 1192 1193 exit: 1194 blk_mq_unfreeze_queue(lo->lo_queue); 1195 1196 if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) && 1197 !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { 1198 lo->lo_flags |= LO_FLAGS_PARTSCAN; 1199 lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; 1200 loop_reread_partitions(lo, lo->lo_device); 1201 } 1202 1203 return err; 1204 } 1205 1206 static int 1207 loop_get_status(struct loop_device *lo, struct loop_info64 *info) 1208 { 1209 struct file *file = lo->lo_backing_file; 1210 struct kstat stat; 1211 int error; 1212 1213 if (lo->lo_state != Lo_bound) 1214 return -ENXIO; 1215 error = vfs_getattr(&file->f_path, &stat, 1216 STATX_INO, AT_STATX_SYNC_AS_STAT); 1217 if (error) 1218 return error; 1219 memset(info, 0, sizeof(*info)); 1220 info->lo_number = lo->lo_number; 1221 info->lo_device = huge_encode_dev(stat.dev); 1222 info->lo_inode = stat.ino; 1223 info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev); 1224 info->lo_offset = lo->lo_offset; 1225 info->lo_sizelimit = lo->lo_sizelimit; 1226 info->lo_flags = lo->lo_flags; 1227 memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE); 1228 memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE); 1229 info->lo_encrypt_type = 1230 lo->lo_encryption ? lo->lo_encryption->number : 0; 1231 if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) { 1232 info->lo_encrypt_key_size = lo->lo_encrypt_key_size; 1233 memcpy(info->lo_encrypt_key, lo->lo_encrypt_key, 1234 lo->lo_encrypt_key_size); 1235 } 1236 return 0; 1237 } 1238 1239 static void 1240 loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64) 1241 { 1242 memset(info64, 0, sizeof(*info64)); 1243 info64->lo_number = info->lo_number; 1244 info64->lo_device = info->lo_device; 1245 info64->lo_inode = info->lo_inode; 1246 info64->lo_rdevice = info->lo_rdevice; 1247 info64->lo_offset = info->lo_offset; 1248 info64->lo_sizelimit = 0; 1249 info64->lo_encrypt_type = info->lo_encrypt_type; 1250 info64->lo_encrypt_key_size = info->lo_encrypt_key_size; 1251 info64->lo_flags = info->lo_flags; 1252 info64->lo_init[0] = info->lo_init[0]; 1253 info64->lo_init[1] = info->lo_init[1]; 1254 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) 1255 memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE); 1256 else 1257 memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE); 1258 memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE); 1259 } 1260 1261 static int 1262 loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info) 1263 { 1264 memset(info, 0, sizeof(*info)); 1265 info->lo_number = info64->lo_number; 1266 info->lo_device = info64->lo_device; 1267 info->lo_inode = info64->lo_inode; 1268 info->lo_rdevice = info64->lo_rdevice; 1269 info->lo_offset = info64->lo_offset; 1270 info->lo_encrypt_type = info64->lo_encrypt_type; 1271 info->lo_encrypt_key_size = info64->lo_encrypt_key_size; 1272 info->lo_flags = info64->lo_flags; 1273 info->lo_init[0] = info64->lo_init[0]; 1274 info->lo_init[1] = info64->lo_init[1]; 1275 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) 1276 memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE); 1277 else 1278 memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE); 1279 memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE); 1280 1281 /* error in case values were truncated */ 1282 if (info->lo_device != info64->lo_device || 1283 info->lo_rdevice != info64->lo_rdevice || 1284 info->lo_inode != info64->lo_inode || 1285 info->lo_offset != info64->lo_offset) 1286 return -EOVERFLOW; 1287 1288 return 0; 1289 } 1290 1291 static int 1292 loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg) 1293 { 1294 struct loop_info info; 1295 struct loop_info64 info64; 1296 1297 if (copy_from_user(&info, arg, sizeof (struct loop_info))) 1298 return -EFAULT; 1299 loop_info64_from_old(&info, &info64); 1300 return loop_set_status(lo, &info64); 1301 } 1302 1303 static int 1304 loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg) 1305 { 1306 struct loop_info64 info64; 1307 1308 if (copy_from_user(&info64, arg, sizeof (struct loop_info64))) 1309 return -EFAULT; 1310 return loop_set_status(lo, &info64); 1311 } 1312 1313 static int 1314 loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) { 1315 struct loop_info info; 1316 struct loop_info64 info64; 1317 int err = 0; 1318 1319 if (!arg) 1320 err = -EINVAL; 1321 if (!err) 1322 err = loop_get_status(lo, &info64); 1323 if (!err) 1324 err = loop_info64_to_old(&info64, &info); 1325 if (!err && copy_to_user(arg, &info, sizeof(info))) 1326 err = -EFAULT; 1327 1328 return err; 1329 } 1330 1331 static int 1332 loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { 1333 struct loop_info64 info64; 1334 int err = 0; 1335 1336 if (!arg) 1337 err = -EINVAL; 1338 if (!err) 1339 err = loop_get_status(lo, &info64); 1340 if (!err && copy_to_user(arg, &info64, sizeof(info64))) 1341 err = -EFAULT; 1342 1343 return err; 1344 } 1345 1346 static int loop_set_capacity(struct loop_device *lo) 1347 { 1348 if (unlikely(lo->lo_state != Lo_bound)) 1349 return -ENXIO; 1350 1351 return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit, 1352 lo->lo_logical_blocksize); 1353 } 1354 1355 static int loop_set_dio(struct loop_device *lo, unsigned long arg) 1356 { 1357 int error = -ENXIO; 1358 if (lo->lo_state != Lo_bound) 1359 goto out; 1360 1361 __loop_update_dio(lo, !!arg); 1362 if (lo->use_dio == !!arg) 1363 return 0; 1364 error = -EINVAL; 1365 out: 1366 return error; 1367 } 1368 1369 static int lo_ioctl(struct block_device *bdev, fmode_t mode, 1370 unsigned int cmd, unsigned long arg) 1371 { 1372 struct loop_device *lo = bdev->bd_disk->private_data; 1373 int err; 1374 1375 mutex_lock_nested(&lo->lo_ctl_mutex, 1); 1376 switch (cmd) { 1377 case LOOP_SET_FD: 1378 err = loop_set_fd(lo, mode, bdev, arg); 1379 break; 1380 case LOOP_CHANGE_FD: 1381 err = loop_change_fd(lo, bdev, arg); 1382 break; 1383 case LOOP_CLR_FD: 1384 /* loop_clr_fd would have unlocked lo_ctl_mutex on success */ 1385 err = loop_clr_fd(lo); 1386 if (!err) 1387 goto out_unlocked; 1388 break; 1389 case LOOP_SET_STATUS: 1390 err = -EPERM; 1391 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) 1392 err = loop_set_status_old(lo, 1393 (struct loop_info __user *)arg); 1394 break; 1395 case LOOP_GET_STATUS: 1396 err = loop_get_status_old(lo, (struct loop_info __user *) arg); 1397 break; 1398 case LOOP_SET_STATUS64: 1399 err = -EPERM; 1400 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) 1401 err = loop_set_status64(lo, 1402 (struct loop_info64 __user *) arg); 1403 break; 1404 case LOOP_GET_STATUS64: 1405 err = loop_get_status64(lo, (struct loop_info64 __user *) arg); 1406 break; 1407 case LOOP_SET_CAPACITY: 1408 err = -EPERM; 1409 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) 1410 err = loop_set_capacity(lo); 1411 break; 1412 case LOOP_SET_DIRECT_IO: 1413 err = -EPERM; 1414 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) 1415 err = loop_set_dio(lo, arg); 1416 break; 1417 default: 1418 err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; 1419 } 1420 mutex_unlock(&lo->lo_ctl_mutex); 1421 1422 out_unlocked: 1423 return err; 1424 } 1425 1426 #ifdef CONFIG_COMPAT 1427 struct compat_loop_info { 1428 compat_int_t lo_number; /* ioctl r/o */ 1429 compat_dev_t lo_device; /* ioctl r/o */ 1430 compat_ulong_t lo_inode; /* ioctl r/o */ 1431 compat_dev_t lo_rdevice; /* ioctl r/o */ 1432 compat_int_t lo_offset; 1433 compat_int_t lo_encrypt_type; 1434 compat_int_t lo_encrypt_key_size; /* ioctl w/o */ 1435 compat_int_t lo_flags; /* ioctl r/o */ 1436 char lo_name[LO_NAME_SIZE]; 1437 unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ 1438 compat_ulong_t lo_init[2]; 1439 char reserved[4]; 1440 }; 1441 1442 /* 1443 * Transfer 32-bit compatibility structure in userspace to 64-bit loop info 1444 * - noinlined to reduce stack space usage in main part of driver 1445 */ 1446 static noinline int 1447 loop_info64_from_compat(const struct compat_loop_info __user *arg, 1448 struct loop_info64 *info64) 1449 { 1450 struct compat_loop_info info; 1451 1452 if (copy_from_user(&info, arg, sizeof(info))) 1453 return -EFAULT; 1454 1455 memset(info64, 0, sizeof(*info64)); 1456 info64->lo_number = info.lo_number; 1457 info64->lo_device = info.lo_device; 1458 info64->lo_inode = info.lo_inode; 1459 info64->lo_rdevice = info.lo_rdevice; 1460 info64->lo_offset = info.lo_offset; 1461 info64->lo_sizelimit = 0; 1462 info64->lo_encrypt_type = info.lo_encrypt_type; 1463 info64->lo_encrypt_key_size = info.lo_encrypt_key_size; 1464 info64->lo_flags = info.lo_flags; 1465 info64->lo_init[0] = info.lo_init[0]; 1466 info64->lo_init[1] = info.lo_init[1]; 1467 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI) 1468 memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE); 1469 else 1470 memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE); 1471 memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE); 1472 return 0; 1473 } 1474 1475 /* 1476 * Transfer 64-bit loop info to 32-bit compatibility structure in userspace 1477 * - noinlined to reduce stack space usage in main part of driver 1478 */ 1479 static noinline int 1480 loop_info64_to_compat(const struct loop_info64 *info64, 1481 struct compat_loop_info __user *arg) 1482 { 1483 struct compat_loop_info info; 1484 1485 memset(&info, 0, sizeof(info)); 1486 info.lo_number = info64->lo_number; 1487 info.lo_device = info64->lo_device; 1488 info.lo_inode = info64->lo_inode; 1489 info.lo_rdevice = info64->lo_rdevice; 1490 info.lo_offset = info64->lo_offset; 1491 info.lo_encrypt_type = info64->lo_encrypt_type; 1492 info.lo_encrypt_key_size = info64->lo_encrypt_key_size; 1493 info.lo_flags = info64->lo_flags; 1494 info.lo_init[0] = info64->lo_init[0]; 1495 info.lo_init[1] = info64->lo_init[1]; 1496 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI) 1497 memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE); 1498 else 1499 memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE); 1500 memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE); 1501 1502 /* error in case values were truncated */ 1503 if (info.lo_device != info64->lo_device || 1504 info.lo_rdevice != info64->lo_rdevice || 1505 info.lo_inode != info64->lo_inode || 1506 info.lo_offset != info64->lo_offset || 1507 info.lo_init[0] != info64->lo_init[0] || 1508 info.lo_init[1] != info64->lo_init[1]) 1509 return -EOVERFLOW; 1510 1511 if (copy_to_user(arg, &info, sizeof(info))) 1512 return -EFAULT; 1513 return 0; 1514 } 1515 1516 static int 1517 loop_set_status_compat(struct loop_device *lo, 1518 const struct compat_loop_info __user *arg) 1519 { 1520 struct loop_info64 info64; 1521 int ret; 1522 1523 ret = loop_info64_from_compat(arg, &info64); 1524 if (ret < 0) 1525 return ret; 1526 return loop_set_status(lo, &info64); 1527 } 1528 1529 static int 1530 loop_get_status_compat(struct loop_device *lo, 1531 struct compat_loop_info __user *arg) 1532 { 1533 struct loop_info64 info64; 1534 int err = 0; 1535 1536 if (!arg) 1537 err = -EINVAL; 1538 if (!err) 1539 err = loop_get_status(lo, &info64); 1540 if (!err) 1541 err = loop_info64_to_compat(&info64, arg); 1542 return err; 1543 } 1544 1545 static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, 1546 unsigned int cmd, unsigned long arg) 1547 { 1548 struct loop_device *lo = bdev->bd_disk->private_data; 1549 int err; 1550 1551 switch(cmd) { 1552 case LOOP_SET_STATUS: 1553 mutex_lock(&lo->lo_ctl_mutex); 1554 err = loop_set_status_compat( 1555 lo, (const struct compat_loop_info __user *) arg); 1556 mutex_unlock(&lo->lo_ctl_mutex); 1557 break; 1558 case LOOP_GET_STATUS: 1559 mutex_lock(&lo->lo_ctl_mutex); 1560 err = loop_get_status_compat( 1561 lo, (struct compat_loop_info __user *) arg); 1562 mutex_unlock(&lo->lo_ctl_mutex); 1563 break; 1564 case LOOP_SET_CAPACITY: 1565 case LOOP_CLR_FD: 1566 case LOOP_GET_STATUS64: 1567 case LOOP_SET_STATUS64: 1568 arg = (unsigned long) compat_ptr(arg); 1569 case LOOP_SET_FD: 1570 case LOOP_CHANGE_FD: 1571 err = lo_ioctl(bdev, mode, cmd, arg); 1572 break; 1573 default: 1574 err = -ENOIOCTLCMD; 1575 break; 1576 } 1577 return err; 1578 } 1579 #endif 1580 1581 static int lo_open(struct block_device *bdev, fmode_t mode) 1582 { 1583 struct loop_device *lo; 1584 int err = 0; 1585 1586 mutex_lock(&loop_index_mutex); 1587 lo = bdev->bd_disk->private_data; 1588 if (!lo) { 1589 err = -ENXIO; 1590 goto out; 1591 } 1592 1593 atomic_inc(&lo->lo_refcnt); 1594 out: 1595 mutex_unlock(&loop_index_mutex); 1596 return err; 1597 } 1598 1599 static void lo_release(struct gendisk *disk, fmode_t mode) 1600 { 1601 struct loop_device *lo = disk->private_data; 1602 int err; 1603 1604 if (atomic_dec_return(&lo->lo_refcnt)) 1605 return; 1606 1607 mutex_lock(&lo->lo_ctl_mutex); 1608 if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) { 1609 /* 1610 * In autoclear mode, stop the loop thread 1611 * and remove configuration after last close. 1612 */ 1613 err = loop_clr_fd(lo); 1614 if (!err) 1615 return; 1616 } else { 1617 /* 1618 * Otherwise keep thread (if running) and config, 1619 * but flush possible ongoing bios in thread. 1620 */ 1621 loop_flush(lo); 1622 } 1623 1624 mutex_unlock(&lo->lo_ctl_mutex); 1625 } 1626 1627 static const struct block_device_operations lo_fops = { 1628 .owner = THIS_MODULE, 1629 .open = lo_open, 1630 .release = lo_release, 1631 .ioctl = lo_ioctl, 1632 #ifdef CONFIG_COMPAT 1633 .compat_ioctl = lo_compat_ioctl, 1634 #endif 1635 }; 1636 1637 /* 1638 * And now the modules code and kernel interface. 1639 */ 1640 static int max_loop; 1641 module_param(max_loop, int, S_IRUGO); 1642 MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); 1643 module_param(max_part, int, S_IRUGO); 1644 MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device"); 1645 MODULE_LICENSE("GPL"); 1646 MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR); 1647 1648 int loop_register_transfer(struct loop_func_table *funcs) 1649 { 1650 unsigned int n = funcs->number; 1651 1652 if (n >= MAX_LO_CRYPT || xfer_funcs[n]) 1653 return -EINVAL; 1654 xfer_funcs[n] = funcs; 1655 return 0; 1656 } 1657 1658 static int unregister_transfer_cb(int id, void *ptr, void *data) 1659 { 1660 struct loop_device *lo = ptr; 1661 struct loop_func_table *xfer = data; 1662 1663 mutex_lock(&lo->lo_ctl_mutex); 1664 if (lo->lo_encryption == xfer) 1665 loop_release_xfer(lo); 1666 mutex_unlock(&lo->lo_ctl_mutex); 1667 return 0; 1668 } 1669 1670 int loop_unregister_transfer(int number) 1671 { 1672 unsigned int n = number; 1673 struct loop_func_table *xfer; 1674 1675 if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL) 1676 return -EINVAL; 1677 1678 xfer_funcs[n] = NULL; 1679 idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer); 1680 return 0; 1681 } 1682 1683 EXPORT_SYMBOL(loop_register_transfer); 1684 EXPORT_SYMBOL(loop_unregister_transfer); 1685 1686 static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, 1687 const struct blk_mq_queue_data *bd) 1688 { 1689 struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 1690 struct loop_device *lo = cmd->rq->q->queuedata; 1691 1692 blk_mq_start_request(bd->rq); 1693 1694 if (lo->lo_state != Lo_bound) 1695 return BLK_STS_IOERR; 1696 1697 switch (req_op(cmd->rq)) { 1698 case REQ_OP_FLUSH: 1699 case REQ_OP_DISCARD: 1700 case REQ_OP_WRITE_ZEROES: 1701 cmd->use_aio = false; 1702 break; 1703 default: 1704 cmd->use_aio = lo->use_dio; 1705 break; 1706 } 1707 1708 kthread_queue_work(&lo->worker, &cmd->work); 1709 1710 return BLK_STS_OK; 1711 } 1712 1713 static void loop_handle_cmd(struct loop_cmd *cmd) 1714 { 1715 const bool write = op_is_write(req_op(cmd->rq)); 1716 struct loop_device *lo = cmd->rq->q->queuedata; 1717 int ret = 0; 1718 1719 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { 1720 ret = -EIO; 1721 goto failed; 1722 } 1723 1724 ret = do_req_filebacked(lo, cmd->rq); 1725 failed: 1726 /* complete non-aio request */ 1727 if (!cmd->use_aio || ret) { 1728 cmd->ret = ret ? -EIO : 0; 1729 blk_mq_complete_request(cmd->rq); 1730 } 1731 } 1732 1733 static void loop_queue_work(struct kthread_work *work) 1734 { 1735 struct loop_cmd *cmd = 1736 container_of(work, struct loop_cmd, work); 1737 1738 loop_handle_cmd(cmd); 1739 } 1740 1741 static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq, 1742 unsigned int hctx_idx, unsigned int numa_node) 1743 { 1744 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 1745 1746 cmd->rq = rq; 1747 kthread_init_work(&cmd->work, loop_queue_work); 1748 1749 return 0; 1750 } 1751 1752 static const struct blk_mq_ops loop_mq_ops = { 1753 .queue_rq = loop_queue_rq, 1754 .init_request = loop_init_request, 1755 .complete = lo_complete_rq, 1756 }; 1757 1758 static int loop_add(struct loop_device **l, int i) 1759 { 1760 struct loop_device *lo; 1761 struct gendisk *disk; 1762 int err; 1763 1764 err = -ENOMEM; 1765 lo = kzalloc(sizeof(*lo), GFP_KERNEL); 1766 if (!lo) 1767 goto out; 1768 1769 lo->lo_state = Lo_unbound; 1770 1771 /* allocate id, if @id >= 0, we're requesting that specific id */ 1772 if (i >= 0) { 1773 err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL); 1774 if (err == -ENOSPC) 1775 err = -EEXIST; 1776 } else { 1777 err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL); 1778 } 1779 if (err < 0) 1780 goto out_free_dev; 1781 i = err; 1782 1783 err = -ENOMEM; 1784 lo->tag_set.ops = &loop_mq_ops; 1785 lo->tag_set.nr_hw_queues = 1; 1786 lo->tag_set.queue_depth = 128; 1787 lo->tag_set.numa_node = NUMA_NO_NODE; 1788 lo->tag_set.cmd_size = sizeof(struct loop_cmd); 1789 lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 1790 lo->tag_set.driver_data = lo; 1791 1792 err = blk_mq_alloc_tag_set(&lo->tag_set); 1793 if (err) 1794 goto out_free_idr; 1795 1796 lo->lo_queue = blk_mq_init_queue(&lo->tag_set); 1797 if (IS_ERR_OR_NULL(lo->lo_queue)) { 1798 err = PTR_ERR(lo->lo_queue); 1799 goto out_cleanup_tags; 1800 } 1801 lo->lo_queue->queuedata = lo; 1802 1803 /* 1804 * It doesn't make sense to enable merge because the I/O 1805 * submitted to backing file is handled page by page. 1806 */ 1807 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); 1808 1809 err = -ENOMEM; 1810 disk = lo->lo_disk = alloc_disk(1 << part_shift); 1811 if (!disk) 1812 goto out_free_queue; 1813 1814 /* 1815 * Disable partition scanning by default. The in-kernel partition 1816 * scanning can be requested individually per-device during its 1817 * setup. Userspace can always add and remove partitions from all 1818 * devices. The needed partition minors are allocated from the 1819 * extended minor space, the main loop device numbers will continue 1820 * to match the loop minors, regardless of the number of partitions 1821 * used. 1822 * 1823 * If max_part is given, partition scanning is globally enabled for 1824 * all loop devices. The minors for the main loop devices will be 1825 * multiples of max_part. 1826 * 1827 * Note: Global-for-all-devices, set-only-at-init, read-only module 1828 * parameteters like 'max_loop' and 'max_part' make things needlessly 1829 * complicated, are too static, inflexible and may surprise 1830 * userspace tools. Parameters like this in general should be avoided. 1831 */ 1832 if (!part_shift) 1833 disk->flags |= GENHD_FL_NO_PART_SCAN; 1834 disk->flags |= GENHD_FL_EXT_DEVT; 1835 mutex_init(&lo->lo_ctl_mutex); 1836 atomic_set(&lo->lo_refcnt, 0); 1837 lo->lo_number = i; 1838 spin_lock_init(&lo->lo_lock); 1839 disk->major = LOOP_MAJOR; 1840 disk->first_minor = i << part_shift; 1841 disk->fops = &lo_fops; 1842 disk->private_data = lo; 1843 disk->queue = lo->lo_queue; 1844 sprintf(disk->disk_name, "loop%d", i); 1845 add_disk(disk); 1846 *l = lo; 1847 return lo->lo_number; 1848 1849 out_free_queue: 1850 blk_cleanup_queue(lo->lo_queue); 1851 out_cleanup_tags: 1852 blk_mq_free_tag_set(&lo->tag_set); 1853 out_free_idr: 1854 idr_remove(&loop_index_idr, i); 1855 out_free_dev: 1856 kfree(lo); 1857 out: 1858 return err; 1859 } 1860 1861 static void loop_remove(struct loop_device *lo) 1862 { 1863 blk_cleanup_queue(lo->lo_queue); 1864 del_gendisk(lo->lo_disk); 1865 blk_mq_free_tag_set(&lo->tag_set); 1866 put_disk(lo->lo_disk); 1867 kfree(lo); 1868 } 1869 1870 static int find_free_cb(int id, void *ptr, void *data) 1871 { 1872 struct loop_device *lo = ptr; 1873 struct loop_device **l = data; 1874 1875 if (lo->lo_state == Lo_unbound) { 1876 *l = lo; 1877 return 1; 1878 } 1879 return 0; 1880 } 1881 1882 static int loop_lookup(struct loop_device **l, int i) 1883 { 1884 struct loop_device *lo; 1885 int ret = -ENODEV; 1886 1887 if (i < 0) { 1888 int err; 1889 1890 err = idr_for_each(&loop_index_idr, &find_free_cb, &lo); 1891 if (err == 1) { 1892 *l = lo; 1893 ret = lo->lo_number; 1894 } 1895 goto out; 1896 } 1897 1898 /* lookup and return a specific i */ 1899 lo = idr_find(&loop_index_idr, i); 1900 if (lo) { 1901 *l = lo; 1902 ret = lo->lo_number; 1903 } 1904 out: 1905 return ret; 1906 } 1907 1908 static struct kobject *loop_probe(dev_t dev, int *part, void *data) 1909 { 1910 struct loop_device *lo; 1911 struct kobject *kobj; 1912 int err; 1913 1914 mutex_lock(&loop_index_mutex); 1915 err = loop_lookup(&lo, MINOR(dev) >> part_shift); 1916 if (err < 0) 1917 err = loop_add(&lo, MINOR(dev) >> part_shift); 1918 if (err < 0) 1919 kobj = NULL; 1920 else 1921 kobj = get_disk(lo->lo_disk); 1922 mutex_unlock(&loop_index_mutex); 1923 1924 *part = 0; 1925 return kobj; 1926 } 1927 1928 static long loop_control_ioctl(struct file *file, unsigned int cmd, 1929 unsigned long parm) 1930 { 1931 struct loop_device *lo; 1932 int ret = -ENOSYS; 1933 1934 mutex_lock(&loop_index_mutex); 1935 switch (cmd) { 1936 case LOOP_CTL_ADD: 1937 ret = loop_lookup(&lo, parm); 1938 if (ret >= 0) { 1939 ret = -EEXIST; 1940 break; 1941 } 1942 ret = loop_add(&lo, parm); 1943 break; 1944 case LOOP_CTL_REMOVE: 1945 ret = loop_lookup(&lo, parm); 1946 if (ret < 0) 1947 break; 1948 mutex_lock(&lo->lo_ctl_mutex); 1949 if (lo->lo_state != Lo_unbound) { 1950 ret = -EBUSY; 1951 mutex_unlock(&lo->lo_ctl_mutex); 1952 break; 1953 } 1954 if (atomic_read(&lo->lo_refcnt) > 0) { 1955 ret = -EBUSY; 1956 mutex_unlock(&lo->lo_ctl_mutex); 1957 break; 1958 } 1959 lo->lo_disk->private_data = NULL; 1960 mutex_unlock(&lo->lo_ctl_mutex); 1961 idr_remove(&loop_index_idr, lo->lo_number); 1962 loop_remove(lo); 1963 break; 1964 case LOOP_CTL_GET_FREE: 1965 ret = loop_lookup(&lo, -1); 1966 if (ret >= 0) 1967 break; 1968 ret = loop_add(&lo, -1); 1969 } 1970 mutex_unlock(&loop_index_mutex); 1971 1972 return ret; 1973 } 1974 1975 static const struct file_operations loop_ctl_fops = { 1976 .open = nonseekable_open, 1977 .unlocked_ioctl = loop_control_ioctl, 1978 .compat_ioctl = loop_control_ioctl, 1979 .owner = THIS_MODULE, 1980 .llseek = noop_llseek, 1981 }; 1982 1983 static struct miscdevice loop_misc = { 1984 .minor = LOOP_CTRL_MINOR, 1985 .name = "loop-control", 1986 .fops = &loop_ctl_fops, 1987 }; 1988 1989 MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR); 1990 MODULE_ALIAS("devname:loop-control"); 1991 1992 static int __init loop_init(void) 1993 { 1994 int i, nr; 1995 unsigned long range; 1996 struct loop_device *lo; 1997 int err; 1998 1999 err = misc_register(&loop_misc); 2000 if (err < 0) 2001 return err; 2002 2003 part_shift = 0; 2004 if (max_part > 0) { 2005 part_shift = fls(max_part); 2006 2007 /* 2008 * Adjust max_part according to part_shift as it is exported 2009 * to user space so that user can decide correct minor number 2010 * if [s]he want to create more devices. 2011 * 2012 * Note that -1 is required because partition 0 is reserved 2013 * for the whole disk. 2014 */ 2015 max_part = (1UL << part_shift) - 1; 2016 } 2017 2018 if ((1UL << part_shift) > DISK_MAX_PARTS) { 2019 err = -EINVAL; 2020 goto misc_out; 2021 } 2022 2023 if (max_loop > 1UL << (MINORBITS - part_shift)) { 2024 err = -EINVAL; 2025 goto misc_out; 2026 } 2027 2028 /* 2029 * If max_loop is specified, create that many devices upfront. 2030 * This also becomes a hard limit. If max_loop is not specified, 2031 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module 2032 * init time. Loop devices can be requested on-demand with the 2033 * /dev/loop-control interface, or be instantiated by accessing 2034 * a 'dead' device node. 2035 */ 2036 if (max_loop) { 2037 nr = max_loop; 2038 range = max_loop << part_shift; 2039 } else { 2040 nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT; 2041 range = 1UL << MINORBITS; 2042 } 2043 2044 if (register_blkdev(LOOP_MAJOR, "loop")) { 2045 err = -EIO; 2046 goto misc_out; 2047 } 2048 2049 blk_register_region(MKDEV(LOOP_MAJOR, 0), range, 2050 THIS_MODULE, loop_probe, NULL, NULL); 2051 2052 /* pre-create number of devices given by config or max_loop */ 2053 mutex_lock(&loop_index_mutex); 2054 for (i = 0; i < nr; i++) 2055 loop_add(&lo, i); 2056 mutex_unlock(&loop_index_mutex); 2057 2058 printk(KERN_INFO "loop: module loaded\n"); 2059 return 0; 2060 2061 misc_out: 2062 misc_deregister(&loop_misc); 2063 return err; 2064 } 2065 2066 static int loop_exit_cb(int id, void *ptr, void *data) 2067 { 2068 struct loop_device *lo = ptr; 2069 2070 loop_remove(lo); 2071 return 0; 2072 } 2073 2074 static void __exit loop_exit(void) 2075 { 2076 unsigned long range; 2077 2078 range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; 2079 2080 idr_for_each(&loop_index_idr, &loop_exit_cb, NULL); 2081 idr_destroy(&loop_index_idr); 2082 2083 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); 2084 unregister_blkdev(LOOP_MAJOR, "loop"); 2085 2086 misc_deregister(&loop_misc); 2087 } 2088 2089 module_init(loop_init); 2090 module_exit(loop_exit); 2091 2092 #ifndef MODULE 2093 static int __init max_loop_setup(char *str) 2094 { 2095 max_loop = simple_strtol(str, NULL, 0); 2096 return 1; 2097 } 2098 2099 __setup("max_loop=", max_loop_setup); 2100 #endif 2101