xref: /openbmc/linux/drivers/block/loop.c (revision 6c6b6f28)
1 /*
2  *  linux/drivers/block/loop.c
3  *
4  *  Written by Theodore Ts'o, 3/29/93
5  *
6  * Copyright 1993 by Theodore Ts'o.  Redistribution of this file is
7  * permitted under the GNU General Public License.
8  *
9  * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993
10  * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996
11  *
12  * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
13  * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
14  *
15  * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
16  *
17  * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
18  *
19  * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
20  *
21  * Loadable modules and other fixes by AK, 1998
22  *
23  * Make real block number available to downstream transfer functions, enables
24  * CBC (and relatives) mode encryption requiring unique IVs per data block.
25  * Reed H. Petty, rhp@draper.net
26  *
27  * Maximum number of loop devices now dynamic via max_loop module parameter.
28  * Russell Kroll <rkroll@exploits.org> 19990701
29  *
30  * Maximum number of loop devices when compiled-in now selectable by passing
31  * max_loop=<1-255> to the kernel on boot.
32  * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999
33  *
34  * Completely rewrite request handling to be make_request_fn style and
35  * non blocking, pushing work to a helper thread. Lots of fixes from
36  * Al Viro too.
37  * Jens Axboe <axboe@suse.de>, Nov 2000
38  *
39  * Support up to 256 loop devices
40  * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
41  *
42  * Support for falling back on the write file operation when the address space
43  * operations write_begin is not available on the backing filesystem.
44  * Anton Altaparmakov, 16 Feb 2005
45  *
46  * Still To Fix:
47  * - Advisory locking is ignored here.
48  * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
49  *
50  */
51 
52 #include <linux/module.h>
53 #include <linux/moduleparam.h>
54 #include <linux/sched.h>
55 #include <linux/fs.h>
56 #include <linux/file.h>
57 #include <linux/stat.h>
58 #include <linux/errno.h>
59 #include <linux/major.h>
60 #include <linux/wait.h>
61 #include <linux/blkdev.h>
62 #include <linux/blkpg.h>
63 #include <linux/init.h>
64 #include <linux/swap.h>
65 #include <linux/slab.h>
66 #include <linux/compat.h>
67 #include <linux/suspend.h>
68 #include <linux/freezer.h>
69 #include <linux/mutex.h>
70 #include <linux/writeback.h>
71 #include <linux/completion.h>
72 #include <linux/highmem.h>
73 #include <linux/kthread.h>
74 #include <linux/splice.h>
75 #include <linux/sysfs.h>
76 #include <linux/miscdevice.h>
77 #include <linux/falloc.h>
78 #include <linux/uio.h>
79 #include "loop.h"
80 
81 #include <linux/uaccess.h>
82 
83 static DEFINE_IDR(loop_index_idr);
84 static DEFINE_MUTEX(loop_index_mutex);
85 
86 static int max_part;
87 static int part_shift;
88 
89 static int transfer_xor(struct loop_device *lo, int cmd,
90 			struct page *raw_page, unsigned raw_off,
91 			struct page *loop_page, unsigned loop_off,
92 			int size, sector_t real_block)
93 {
94 	char *raw_buf = kmap_atomic(raw_page) + raw_off;
95 	char *loop_buf = kmap_atomic(loop_page) + loop_off;
96 	char *in, *out, *key;
97 	int i, keysize;
98 
99 	if (cmd == READ) {
100 		in = raw_buf;
101 		out = loop_buf;
102 	} else {
103 		in = loop_buf;
104 		out = raw_buf;
105 	}
106 
107 	key = lo->lo_encrypt_key;
108 	keysize = lo->lo_encrypt_key_size;
109 	for (i = 0; i < size; i++)
110 		*out++ = *in++ ^ key[(i & 511) % keysize];
111 
112 	kunmap_atomic(loop_buf);
113 	kunmap_atomic(raw_buf);
114 	cond_resched();
115 	return 0;
116 }
117 
118 static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
119 {
120 	if (unlikely(info->lo_encrypt_key_size <= 0))
121 		return -EINVAL;
122 	return 0;
123 }
124 
125 static struct loop_func_table none_funcs = {
126 	.number = LO_CRYPT_NONE,
127 };
128 
129 static struct loop_func_table xor_funcs = {
130 	.number = LO_CRYPT_XOR,
131 	.transfer = transfer_xor,
132 	.init = xor_init
133 };
134 
135 /* xfer_funcs[0] is special - its release function is never called */
136 static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
137 	&none_funcs,
138 	&xor_funcs
139 };
140 
141 static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
142 {
143 	loff_t loopsize;
144 
145 	/* Compute loopsize in bytes */
146 	loopsize = i_size_read(file->f_mapping->host);
147 	if (offset > 0)
148 		loopsize -= offset;
149 	/* offset is beyond i_size, weird but possible */
150 	if (loopsize < 0)
151 		return 0;
152 
153 	if (sizelimit > 0 && sizelimit < loopsize)
154 		loopsize = sizelimit;
155 	/*
156 	 * Unfortunately, if we want to do I/O on the device,
157 	 * the number of 512-byte sectors has to fit into a sector_t.
158 	 */
159 	return loopsize >> 9;
160 }
161 
162 static loff_t get_loop_size(struct loop_device *lo, struct file *file)
163 {
164 	return get_size(lo->lo_offset, lo->lo_sizelimit, file);
165 }
166 
167 static void __loop_update_dio(struct loop_device *lo, bool dio)
168 {
169 	struct file *file = lo->lo_backing_file;
170 	struct address_space *mapping = file->f_mapping;
171 	struct inode *inode = mapping->host;
172 	unsigned short sb_bsize = 0;
173 	unsigned dio_align = 0;
174 	bool use_dio;
175 
176 	if (inode->i_sb->s_bdev) {
177 		sb_bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
178 		dio_align = sb_bsize - 1;
179 	}
180 
181 	/*
182 	 * We support direct I/O only if lo_offset is aligned with the
183 	 * logical I/O size of backing device, and the logical block
184 	 * size of loop is bigger than the backing device's and the loop
185 	 * needn't transform transfer.
186 	 *
187 	 * TODO: the above condition may be loosed in the future, and
188 	 * direct I/O may be switched runtime at that time because most
189 	 * of requests in sane applications should be PAGE_SIZE aligned
190 	 */
191 	if (dio) {
192 		if (queue_logical_block_size(lo->lo_queue) >= sb_bsize &&
193 				!(lo->lo_offset & dio_align) &&
194 				mapping->a_ops->direct_IO &&
195 				!lo->transfer)
196 			use_dio = true;
197 		else
198 			use_dio = false;
199 	} else {
200 		use_dio = false;
201 	}
202 
203 	if (lo->use_dio == use_dio)
204 		return;
205 
206 	/* flush dirty pages before changing direct IO */
207 	vfs_fsync(file, 0);
208 
209 	/*
210 	 * The flag of LO_FLAGS_DIRECT_IO is handled similarly with
211 	 * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup
212 	 * will get updated by ioctl(LOOP_GET_STATUS)
213 	 */
214 	blk_mq_freeze_queue(lo->lo_queue);
215 	lo->use_dio = use_dio;
216 	if (use_dio)
217 		lo->lo_flags |= LO_FLAGS_DIRECT_IO;
218 	else
219 		lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
220 	blk_mq_unfreeze_queue(lo->lo_queue);
221 }
222 
223 static int
224 figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
225 {
226 	loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
227 	sector_t x = (sector_t)size;
228 	struct block_device *bdev = lo->lo_device;
229 
230 	if (unlikely((loff_t)x != size))
231 		return -EFBIG;
232 	if (lo->lo_offset != offset)
233 		lo->lo_offset = offset;
234 	if (lo->lo_sizelimit != sizelimit)
235 		lo->lo_sizelimit = sizelimit;
236 	set_capacity(lo->lo_disk, x);
237 	bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
238 	/* let user-space know about the new size */
239 	kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
240 	return 0;
241 }
242 
243 static inline int
244 lo_do_transfer(struct loop_device *lo, int cmd,
245 	       struct page *rpage, unsigned roffs,
246 	       struct page *lpage, unsigned loffs,
247 	       int size, sector_t rblock)
248 {
249 	int ret;
250 
251 	ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
252 	if (likely(!ret))
253 		return 0;
254 
255 	printk_ratelimited(KERN_ERR
256 		"loop: Transfer error at byte offset %llu, length %i.\n",
257 		(unsigned long long)rblock << 9, size);
258 	return ret;
259 }
260 
261 static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
262 {
263 	struct iov_iter i;
264 	ssize_t bw;
265 
266 	iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len);
267 
268 	file_start_write(file);
269 	bw = vfs_iter_write(file, &i, ppos, 0);
270 	file_end_write(file);
271 
272 	if (likely(bw ==  bvec->bv_len))
273 		return 0;
274 
275 	printk_ratelimited(KERN_ERR
276 		"loop: Write error at byte offset %llu, length %i.\n",
277 		(unsigned long long)*ppos, bvec->bv_len);
278 	if (bw >= 0)
279 		bw = -EIO;
280 	return bw;
281 }
282 
283 static int lo_write_simple(struct loop_device *lo, struct request *rq,
284 		loff_t pos)
285 {
286 	struct bio_vec bvec;
287 	struct req_iterator iter;
288 	int ret = 0;
289 
290 	rq_for_each_segment(bvec, rq, iter) {
291 		ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
292 		if (ret < 0)
293 			break;
294 		cond_resched();
295 	}
296 
297 	return ret;
298 }
299 
300 /*
301  * This is the slow, transforming version that needs to double buffer the
302  * data as it cannot do the transformations in place without having direct
303  * access to the destination pages of the backing file.
304  */
305 static int lo_write_transfer(struct loop_device *lo, struct request *rq,
306 		loff_t pos)
307 {
308 	struct bio_vec bvec, b;
309 	struct req_iterator iter;
310 	struct page *page;
311 	int ret = 0;
312 
313 	page = alloc_page(GFP_NOIO);
314 	if (unlikely(!page))
315 		return -ENOMEM;
316 
317 	rq_for_each_segment(bvec, rq, iter) {
318 		ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page,
319 			bvec.bv_offset, bvec.bv_len, pos >> 9);
320 		if (unlikely(ret))
321 			break;
322 
323 		b.bv_page = page;
324 		b.bv_offset = 0;
325 		b.bv_len = bvec.bv_len;
326 		ret = lo_write_bvec(lo->lo_backing_file, &b, &pos);
327 		if (ret < 0)
328 			break;
329 	}
330 
331 	__free_page(page);
332 	return ret;
333 }
334 
335 static int lo_read_simple(struct loop_device *lo, struct request *rq,
336 		loff_t pos)
337 {
338 	struct bio_vec bvec;
339 	struct req_iterator iter;
340 	struct iov_iter i;
341 	ssize_t len;
342 
343 	rq_for_each_segment(bvec, rq, iter) {
344 		iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len);
345 		len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
346 		if (len < 0)
347 			return len;
348 
349 		flush_dcache_page(bvec.bv_page);
350 
351 		if (len != bvec.bv_len) {
352 			struct bio *bio;
353 
354 			__rq_for_each_bio(bio, rq)
355 				zero_fill_bio(bio);
356 			break;
357 		}
358 		cond_resched();
359 	}
360 
361 	return 0;
362 }
363 
364 static int lo_read_transfer(struct loop_device *lo, struct request *rq,
365 		loff_t pos)
366 {
367 	struct bio_vec bvec, b;
368 	struct req_iterator iter;
369 	struct iov_iter i;
370 	struct page *page;
371 	ssize_t len;
372 	int ret = 0;
373 
374 	page = alloc_page(GFP_NOIO);
375 	if (unlikely(!page))
376 		return -ENOMEM;
377 
378 	rq_for_each_segment(bvec, rq, iter) {
379 		loff_t offset = pos;
380 
381 		b.bv_page = page;
382 		b.bv_offset = 0;
383 		b.bv_len = bvec.bv_len;
384 
385 		iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len);
386 		len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
387 		if (len < 0) {
388 			ret = len;
389 			goto out_free_page;
390 		}
391 
392 		ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page,
393 			bvec.bv_offset, len, offset >> 9);
394 		if (ret)
395 			goto out_free_page;
396 
397 		flush_dcache_page(bvec.bv_page);
398 
399 		if (len != bvec.bv_len) {
400 			struct bio *bio;
401 
402 			__rq_for_each_bio(bio, rq)
403 				zero_fill_bio(bio);
404 			break;
405 		}
406 	}
407 
408 	ret = 0;
409 out_free_page:
410 	__free_page(page);
411 	return ret;
412 }
413 
414 static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos)
415 {
416 	/*
417 	 * We use punch hole to reclaim the free space used by the
418 	 * image a.k.a. discard. However we do not support discard if
419 	 * encryption is enabled, because it may give an attacker
420 	 * useful information.
421 	 */
422 	struct file *file = lo->lo_backing_file;
423 	int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
424 	int ret;
425 
426 	if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) {
427 		ret = -EOPNOTSUPP;
428 		goto out;
429 	}
430 
431 	ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
432 	if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
433 		ret = -EIO;
434  out:
435 	return ret;
436 }
437 
438 static int lo_req_flush(struct loop_device *lo, struct request *rq)
439 {
440 	struct file *file = lo->lo_backing_file;
441 	int ret = vfs_fsync(file, 0);
442 	if (unlikely(ret && ret != -EINVAL))
443 		ret = -EIO;
444 
445 	return ret;
446 }
447 
448 static void lo_complete_rq(struct request *rq)
449 {
450 	struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
451 
452 	if (unlikely(req_op(cmd->rq) == REQ_OP_READ && cmd->use_aio &&
453 		     cmd->ret >= 0 && cmd->ret < blk_rq_bytes(cmd->rq))) {
454 		struct bio *bio = cmd->rq->bio;
455 
456 		bio_advance(bio, cmd->ret);
457 		zero_fill_bio(bio);
458 	}
459 
460 	blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK);
461 }
462 
463 static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
464 {
465 	struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
466 
467 	cmd->ret = ret;
468 	blk_mq_complete_request(cmd->rq);
469 }
470 
471 static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
472 		     loff_t pos, bool rw)
473 {
474 	struct iov_iter iter;
475 	struct bio_vec *bvec;
476 	struct bio *bio = cmd->rq->bio;
477 	struct file *file = lo->lo_backing_file;
478 	int ret;
479 
480 	/* nomerge for loop request queue */
481 	WARN_ON(cmd->rq->bio != cmd->rq->biotail);
482 
483 	bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
484 	iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
485 		      bio_segments(bio), blk_rq_bytes(cmd->rq));
486 	/*
487 	 * This bio may be started from the middle of the 'bvec'
488 	 * because of bio splitting, so offset from the bvec must
489 	 * be passed to iov iterator
490 	 */
491 	iter.iov_offset = bio->bi_iter.bi_bvec_done;
492 
493 	cmd->iocb.ki_pos = pos;
494 	cmd->iocb.ki_filp = file;
495 	cmd->iocb.ki_complete = lo_rw_aio_complete;
496 	cmd->iocb.ki_flags = IOCB_DIRECT;
497 
498 	if (rw == WRITE)
499 		ret = call_write_iter(file, &cmd->iocb, &iter);
500 	else
501 		ret = call_read_iter(file, &cmd->iocb, &iter);
502 
503 	if (ret != -EIOCBQUEUED)
504 		cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
505 	return 0;
506 }
507 
508 static int do_req_filebacked(struct loop_device *lo, struct request *rq)
509 {
510 	struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
511 	loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
512 
513 	/*
514 	 * lo_write_simple and lo_read_simple should have been covered
515 	 * by io submit style function like lo_rw_aio(), one blocker
516 	 * is that lo_read_simple() need to call flush_dcache_page after
517 	 * the page is written from kernel, and it isn't easy to handle
518 	 * this in io submit style function which submits all segments
519 	 * of the req at one time. And direct read IO doesn't need to
520 	 * run flush_dcache_page().
521 	 */
522 	switch (req_op(rq)) {
523 	case REQ_OP_FLUSH:
524 		return lo_req_flush(lo, rq);
525 	case REQ_OP_DISCARD:
526 	case REQ_OP_WRITE_ZEROES:
527 		return lo_discard(lo, rq, pos);
528 	case REQ_OP_WRITE:
529 		if (lo->transfer)
530 			return lo_write_transfer(lo, rq, pos);
531 		else if (cmd->use_aio)
532 			return lo_rw_aio(lo, cmd, pos, WRITE);
533 		else
534 			return lo_write_simple(lo, rq, pos);
535 	case REQ_OP_READ:
536 		if (lo->transfer)
537 			return lo_read_transfer(lo, rq, pos);
538 		else if (cmd->use_aio)
539 			return lo_rw_aio(lo, cmd, pos, READ);
540 		else
541 			return lo_read_simple(lo, rq, pos);
542 	default:
543 		WARN_ON_ONCE(1);
544 		return -EIO;
545 		break;
546 	}
547 }
548 
549 struct switch_request {
550 	struct file *file;
551 	struct completion wait;
552 };
553 
554 static inline void loop_update_dio(struct loop_device *lo)
555 {
556 	__loop_update_dio(lo, io_is_direct(lo->lo_backing_file) |
557 			lo->use_dio);
558 }
559 
560 /*
561  * Do the actual switch; called from the BIO completion routine
562  */
563 static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
564 {
565 	struct file *file = p->file;
566 	struct file *old_file = lo->lo_backing_file;
567 	struct address_space *mapping;
568 
569 	/* if no new file, only flush of queued bios requested */
570 	if (!file)
571 		return;
572 
573 	mapping = file->f_mapping;
574 	mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
575 	lo->lo_backing_file = file;
576 	lo->old_gfp_mask = mapping_gfp_mask(mapping);
577 	mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
578 	loop_update_dio(lo);
579 }
580 
581 /*
582  * loop_switch performs the hard work of switching a backing store.
583  * First it needs to flush existing IO, it does this by sending a magic
584  * BIO down the pipe. The completion of this BIO does the actual switch.
585  */
586 static int loop_switch(struct loop_device *lo, struct file *file)
587 {
588 	struct switch_request w;
589 
590 	w.file = file;
591 
592 	/* freeze queue and wait for completion of scheduled requests */
593 	blk_mq_freeze_queue(lo->lo_queue);
594 
595 	/* do the switch action */
596 	do_loop_switch(lo, &w);
597 
598 	/* unfreeze */
599 	blk_mq_unfreeze_queue(lo->lo_queue);
600 
601 	return 0;
602 }
603 
604 /*
605  * Helper to flush the IOs in loop, but keeping loop thread running
606  */
607 static int loop_flush(struct loop_device *lo)
608 {
609 	/* loop not yet configured, no running thread, nothing to flush */
610 	if (lo->lo_state != Lo_bound)
611 		return 0;
612 	return loop_switch(lo, NULL);
613 }
614 
615 static void loop_reread_partitions(struct loop_device *lo,
616 				   struct block_device *bdev)
617 {
618 	int rc;
619 
620 	/*
621 	 * bd_mutex has been held already in release path, so don't
622 	 * acquire it if this function is called in such case.
623 	 *
624 	 * If the reread partition isn't from release path, lo_refcnt
625 	 * must be at least one and it can only become zero when the
626 	 * current holder is released.
627 	 */
628 	if (!atomic_read(&lo->lo_refcnt))
629 		rc = __blkdev_reread_part(bdev);
630 	else
631 		rc = blkdev_reread_part(bdev);
632 	if (rc)
633 		pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
634 			__func__, lo->lo_number, lo->lo_file_name, rc);
635 }
636 
637 /*
638  * loop_change_fd switched the backing store of a loopback device to
639  * a new file. This is useful for operating system installers to free up
640  * the original file and in High Availability environments to switch to
641  * an alternative location for the content in case of server meltdown.
642  * This can only work if the loop device is used read-only, and if the
643  * new backing store is the same size and type as the old backing store.
644  */
645 static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
646 			  unsigned int arg)
647 {
648 	struct file	*file, *old_file;
649 	struct inode	*inode;
650 	int		error;
651 
652 	error = -ENXIO;
653 	if (lo->lo_state != Lo_bound)
654 		goto out;
655 
656 	/* the loop device has to be read-only */
657 	error = -EINVAL;
658 	if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
659 		goto out;
660 
661 	error = -EBADF;
662 	file = fget(arg);
663 	if (!file)
664 		goto out;
665 
666 	inode = file->f_mapping->host;
667 	old_file = lo->lo_backing_file;
668 
669 	error = -EINVAL;
670 
671 	if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
672 		goto out_putf;
673 
674 	/* size of the new backing store needs to be the same */
675 	if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
676 		goto out_putf;
677 
678 	/* and ... switch */
679 	error = loop_switch(lo, file);
680 	if (error)
681 		goto out_putf;
682 
683 	fput(old_file);
684 	if (lo->lo_flags & LO_FLAGS_PARTSCAN)
685 		loop_reread_partitions(lo, bdev);
686 	return 0;
687 
688  out_putf:
689 	fput(file);
690  out:
691 	return error;
692 }
693 
694 static inline int is_loop_device(struct file *file)
695 {
696 	struct inode *i = file->f_mapping->host;
697 
698 	return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
699 }
700 
701 /* loop sysfs attributes */
702 
703 static ssize_t loop_attr_show(struct device *dev, char *page,
704 			      ssize_t (*callback)(struct loop_device *, char *))
705 {
706 	struct gendisk *disk = dev_to_disk(dev);
707 	struct loop_device *lo = disk->private_data;
708 
709 	return callback(lo, page);
710 }
711 
712 #define LOOP_ATTR_RO(_name)						\
713 static ssize_t loop_attr_##_name##_show(struct loop_device *, char *);	\
714 static ssize_t loop_attr_do_show_##_name(struct device *d,		\
715 				struct device_attribute *attr, char *b)	\
716 {									\
717 	return loop_attr_show(d, b, loop_attr_##_name##_show);		\
718 }									\
719 static struct device_attribute loop_attr_##_name =			\
720 	__ATTR(_name, S_IRUGO, loop_attr_do_show_##_name, NULL);
721 
722 static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
723 {
724 	ssize_t ret;
725 	char *p = NULL;
726 
727 	spin_lock_irq(&lo->lo_lock);
728 	if (lo->lo_backing_file)
729 		p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1);
730 	spin_unlock_irq(&lo->lo_lock);
731 
732 	if (IS_ERR_OR_NULL(p))
733 		ret = PTR_ERR(p);
734 	else {
735 		ret = strlen(p);
736 		memmove(buf, p, ret);
737 		buf[ret++] = '\n';
738 		buf[ret] = 0;
739 	}
740 
741 	return ret;
742 }
743 
744 static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
745 {
746 	return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset);
747 }
748 
749 static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
750 {
751 	return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
752 }
753 
754 static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
755 {
756 	int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
757 
758 	return sprintf(buf, "%s\n", autoclear ? "1" : "0");
759 }
760 
761 static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf)
762 {
763 	int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN);
764 
765 	return sprintf(buf, "%s\n", partscan ? "1" : "0");
766 }
767 
768 static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf)
769 {
770 	int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO);
771 
772 	return sprintf(buf, "%s\n", dio ? "1" : "0");
773 }
774 
775 LOOP_ATTR_RO(backing_file);
776 LOOP_ATTR_RO(offset);
777 LOOP_ATTR_RO(sizelimit);
778 LOOP_ATTR_RO(autoclear);
779 LOOP_ATTR_RO(partscan);
780 LOOP_ATTR_RO(dio);
781 
782 static struct attribute *loop_attrs[] = {
783 	&loop_attr_backing_file.attr,
784 	&loop_attr_offset.attr,
785 	&loop_attr_sizelimit.attr,
786 	&loop_attr_autoclear.attr,
787 	&loop_attr_partscan.attr,
788 	&loop_attr_dio.attr,
789 	NULL,
790 };
791 
792 static struct attribute_group loop_attribute_group = {
793 	.name = "loop",
794 	.attrs= loop_attrs,
795 };
796 
797 static int loop_sysfs_init(struct loop_device *lo)
798 {
799 	return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
800 				  &loop_attribute_group);
801 }
802 
803 static void loop_sysfs_exit(struct loop_device *lo)
804 {
805 	sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
806 			   &loop_attribute_group);
807 }
808 
809 static void loop_config_discard(struct loop_device *lo)
810 {
811 	struct file *file = lo->lo_backing_file;
812 	struct inode *inode = file->f_mapping->host;
813 	struct request_queue *q = lo->lo_queue;
814 
815 	/*
816 	 * We use punch hole to reclaim the free space used by the
817 	 * image a.k.a. discard. However we do not support discard if
818 	 * encryption is enabled, because it may give an attacker
819 	 * useful information.
820 	 */
821 	if ((!file->f_op->fallocate) ||
822 	    lo->lo_encrypt_key_size) {
823 		q->limits.discard_granularity = 0;
824 		q->limits.discard_alignment = 0;
825 		blk_queue_max_discard_sectors(q, 0);
826 		blk_queue_max_write_zeroes_sectors(q, 0);
827 		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
828 		return;
829 	}
830 
831 	q->limits.discard_granularity = inode->i_sb->s_blocksize;
832 	q->limits.discard_alignment = 0;
833 
834 	blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
835 	blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
836 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
837 }
838 
839 static void loop_unprepare_queue(struct loop_device *lo)
840 {
841 	kthread_flush_worker(&lo->worker);
842 	kthread_stop(lo->worker_task);
843 }
844 
845 static int loop_kthread_worker_fn(void *worker_ptr)
846 {
847 	current->flags |= PF_LESS_THROTTLE;
848 	return kthread_worker_fn(worker_ptr);
849 }
850 
851 static int loop_prepare_queue(struct loop_device *lo)
852 {
853 	kthread_init_worker(&lo->worker);
854 	lo->worker_task = kthread_run(loop_kthread_worker_fn,
855 			&lo->worker, "loop%d", lo->lo_number);
856 	if (IS_ERR(lo->worker_task))
857 		return -ENOMEM;
858 	set_user_nice(lo->worker_task, MIN_NICE);
859 	return 0;
860 }
861 
862 static int loop_set_fd(struct loop_device *lo, fmode_t mode,
863 		       struct block_device *bdev, unsigned int arg)
864 {
865 	struct file	*file, *f;
866 	struct inode	*inode;
867 	struct address_space *mapping;
868 	int		lo_flags = 0;
869 	int		error;
870 	loff_t		size;
871 
872 	/* This is safe, since we have a reference from open(). */
873 	__module_get(THIS_MODULE);
874 
875 	error = -EBADF;
876 	file = fget(arg);
877 	if (!file)
878 		goto out;
879 
880 	error = -EBUSY;
881 	if (lo->lo_state != Lo_unbound)
882 		goto out_putf;
883 
884 	/* Avoid recursion */
885 	f = file;
886 	while (is_loop_device(f)) {
887 		struct loop_device *l;
888 
889 		if (f->f_mapping->host->i_bdev == bdev)
890 			goto out_putf;
891 
892 		l = f->f_mapping->host->i_bdev->bd_disk->private_data;
893 		if (l->lo_state == Lo_unbound) {
894 			error = -EINVAL;
895 			goto out_putf;
896 		}
897 		f = l->lo_backing_file;
898 	}
899 
900 	mapping = file->f_mapping;
901 	inode = mapping->host;
902 
903 	error = -EINVAL;
904 	if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
905 		goto out_putf;
906 
907 	if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
908 	    !file->f_op->write_iter)
909 		lo_flags |= LO_FLAGS_READ_ONLY;
910 
911 	error = -EFBIG;
912 	size = get_loop_size(lo, file);
913 	if ((loff_t)(sector_t)size != size)
914 		goto out_putf;
915 	error = loop_prepare_queue(lo);
916 	if (error)
917 		goto out_putf;
918 
919 	error = 0;
920 
921 	set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
922 
923 	lo->use_dio = false;
924 	lo->lo_device = bdev;
925 	lo->lo_flags = lo_flags;
926 	lo->lo_backing_file = file;
927 	lo->transfer = NULL;
928 	lo->ioctl = NULL;
929 	lo->lo_sizelimit = 0;
930 	lo->old_gfp_mask = mapping_gfp_mask(mapping);
931 	mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
932 
933 	if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
934 		blk_queue_write_cache(lo->lo_queue, true, false);
935 
936 	loop_update_dio(lo);
937 	set_capacity(lo->lo_disk, size);
938 	bd_set_size(bdev, size << 9);
939 	loop_sysfs_init(lo);
940 	/* let user-space know about the new size */
941 	kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
942 
943 	set_blocksize(bdev, S_ISBLK(inode->i_mode) ?
944 		      block_size(inode->i_bdev) : PAGE_SIZE);
945 
946 	lo->lo_state = Lo_bound;
947 	if (part_shift)
948 		lo->lo_flags |= LO_FLAGS_PARTSCAN;
949 	if (lo->lo_flags & LO_FLAGS_PARTSCAN)
950 		loop_reread_partitions(lo, bdev);
951 
952 	/* Grab the block_device to prevent its destruction after we
953 	 * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
954 	 */
955 	bdgrab(bdev);
956 	return 0;
957 
958  out_putf:
959 	fput(file);
960  out:
961 	/* This is safe: open() is still holding a reference. */
962 	module_put(THIS_MODULE);
963 	return error;
964 }
965 
966 static int
967 loop_release_xfer(struct loop_device *lo)
968 {
969 	int err = 0;
970 	struct loop_func_table *xfer = lo->lo_encryption;
971 
972 	if (xfer) {
973 		if (xfer->release)
974 			err = xfer->release(lo);
975 		lo->transfer = NULL;
976 		lo->lo_encryption = NULL;
977 		module_put(xfer->owner);
978 	}
979 	return err;
980 }
981 
982 static int
983 loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
984 	       const struct loop_info64 *i)
985 {
986 	int err = 0;
987 
988 	if (xfer) {
989 		struct module *owner = xfer->owner;
990 
991 		if (!try_module_get(owner))
992 			return -EINVAL;
993 		if (xfer->init)
994 			err = xfer->init(lo, i);
995 		if (err)
996 			module_put(owner);
997 		else
998 			lo->lo_encryption = xfer;
999 	}
1000 	return err;
1001 }
1002 
1003 static int loop_clr_fd(struct loop_device *lo)
1004 {
1005 	struct file *filp = lo->lo_backing_file;
1006 	gfp_t gfp = lo->old_gfp_mask;
1007 	struct block_device *bdev = lo->lo_device;
1008 
1009 	if (lo->lo_state != Lo_bound)
1010 		return -ENXIO;
1011 
1012 	/*
1013 	 * If we've explicitly asked to tear down the loop device,
1014 	 * and it has an elevated reference count, set it for auto-teardown when
1015 	 * the last reference goes away. This stops $!~#$@ udev from
1016 	 * preventing teardown because it decided that it needs to run blkid on
1017 	 * the loopback device whenever they appear. xfstests is notorious for
1018 	 * failing tests because blkid via udev races with a losetup
1019 	 * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
1020 	 * command to fail with EBUSY.
1021 	 */
1022 	if (atomic_read(&lo->lo_refcnt) > 1) {
1023 		lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
1024 		mutex_unlock(&lo->lo_ctl_mutex);
1025 		return 0;
1026 	}
1027 
1028 	if (filp == NULL)
1029 		return -EINVAL;
1030 
1031 	/* freeze request queue during the transition */
1032 	blk_mq_freeze_queue(lo->lo_queue);
1033 
1034 	spin_lock_irq(&lo->lo_lock);
1035 	lo->lo_state = Lo_rundown;
1036 	lo->lo_backing_file = NULL;
1037 	spin_unlock_irq(&lo->lo_lock);
1038 
1039 	loop_release_xfer(lo);
1040 	lo->transfer = NULL;
1041 	lo->ioctl = NULL;
1042 	lo->lo_device = NULL;
1043 	lo->lo_encryption = NULL;
1044 	lo->lo_offset = 0;
1045 	lo->lo_sizelimit = 0;
1046 	lo->lo_encrypt_key_size = 0;
1047 	memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
1048 	memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
1049 	memset(lo->lo_file_name, 0, LO_NAME_SIZE);
1050 	if (bdev) {
1051 		bdput(bdev);
1052 		invalidate_bdev(bdev);
1053 	}
1054 	set_capacity(lo->lo_disk, 0);
1055 	loop_sysfs_exit(lo);
1056 	if (bdev) {
1057 		bd_set_size(bdev, 0);
1058 		/* let user-space know about this change */
1059 		kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
1060 	}
1061 	mapping_set_gfp_mask(filp->f_mapping, gfp);
1062 	lo->lo_state = Lo_unbound;
1063 	/* This is safe: open() is still holding a reference. */
1064 	module_put(THIS_MODULE);
1065 	blk_mq_unfreeze_queue(lo->lo_queue);
1066 
1067 	if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
1068 		loop_reread_partitions(lo, bdev);
1069 	lo->lo_flags = 0;
1070 	if (!part_shift)
1071 		lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
1072 	loop_unprepare_queue(lo);
1073 	mutex_unlock(&lo->lo_ctl_mutex);
1074 	/*
1075 	 * Need not hold lo_ctl_mutex to fput backing file.
1076 	 * Calling fput holding lo_ctl_mutex triggers a circular
1077 	 * lock dependency possibility warning as fput can take
1078 	 * bd_mutex which is usually taken before lo_ctl_mutex.
1079 	 */
1080 	fput(filp);
1081 	return 0;
1082 }
1083 
1084 static int
1085 loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1086 {
1087 	int err;
1088 	struct loop_func_table *xfer;
1089 	kuid_t uid = current_uid();
1090 
1091 	if (lo->lo_encrypt_key_size &&
1092 	    !uid_eq(lo->lo_key_owner, uid) &&
1093 	    !capable(CAP_SYS_ADMIN))
1094 		return -EPERM;
1095 	if (lo->lo_state != Lo_bound)
1096 		return -ENXIO;
1097 	if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
1098 		return -EINVAL;
1099 
1100 	/* I/O need to be drained during transfer transition */
1101 	blk_mq_freeze_queue(lo->lo_queue);
1102 
1103 	err = loop_release_xfer(lo);
1104 	if (err)
1105 		goto exit;
1106 
1107 	if (info->lo_encrypt_type) {
1108 		unsigned int type = info->lo_encrypt_type;
1109 
1110 		if (type >= MAX_LO_CRYPT)
1111 			return -EINVAL;
1112 		xfer = xfer_funcs[type];
1113 		if (xfer == NULL)
1114 			return -EINVAL;
1115 	} else
1116 		xfer = NULL;
1117 
1118 	err = loop_init_xfer(lo, xfer, info);
1119 	if (err)
1120 		goto exit;
1121 
1122 	if (lo->lo_offset != info->lo_offset ||
1123 	    lo->lo_sizelimit != info->lo_sizelimit) {
1124 		if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
1125 			err = -EFBIG;
1126 			goto exit;
1127 		}
1128 	}
1129 
1130 	loop_config_discard(lo);
1131 
1132 	memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
1133 	memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
1134 	lo->lo_file_name[LO_NAME_SIZE-1] = 0;
1135 	lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
1136 
1137 	if (!xfer)
1138 		xfer = &none_funcs;
1139 	lo->transfer = xfer->transfer;
1140 	lo->ioctl = xfer->ioctl;
1141 
1142 	if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) !=
1143 	     (info->lo_flags & LO_FLAGS_AUTOCLEAR))
1144 		lo->lo_flags ^= LO_FLAGS_AUTOCLEAR;
1145 
1146 	lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
1147 	lo->lo_init[0] = info->lo_init[0];
1148 	lo->lo_init[1] = info->lo_init[1];
1149 	if (info->lo_encrypt_key_size) {
1150 		memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
1151 		       info->lo_encrypt_key_size);
1152 		lo->lo_key_owner = uid;
1153 	}
1154 
1155 	/* update dio if lo_offset or transfer is changed */
1156 	__loop_update_dio(lo, lo->use_dio);
1157 
1158  exit:
1159 	blk_mq_unfreeze_queue(lo->lo_queue);
1160 
1161 	if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) &&
1162 	     !(lo->lo_flags & LO_FLAGS_PARTSCAN)) {
1163 		lo->lo_flags |= LO_FLAGS_PARTSCAN;
1164 		lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
1165 		loop_reread_partitions(lo, lo->lo_device);
1166 	}
1167 
1168 	return err;
1169 }
1170 
1171 static int
1172 loop_get_status(struct loop_device *lo, struct loop_info64 *info)
1173 {
1174 	struct file *file = lo->lo_backing_file;
1175 	struct kstat stat;
1176 	int error;
1177 
1178 	if (lo->lo_state != Lo_bound)
1179 		return -ENXIO;
1180 	error = vfs_getattr(&file->f_path, &stat,
1181 			    STATX_INO, AT_STATX_SYNC_AS_STAT);
1182 	if (error)
1183 		return error;
1184 	memset(info, 0, sizeof(*info));
1185 	info->lo_number = lo->lo_number;
1186 	info->lo_device = huge_encode_dev(stat.dev);
1187 	info->lo_inode = stat.ino;
1188 	info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev);
1189 	info->lo_offset = lo->lo_offset;
1190 	info->lo_sizelimit = lo->lo_sizelimit;
1191 	info->lo_flags = lo->lo_flags;
1192 	memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
1193 	memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
1194 	info->lo_encrypt_type =
1195 		lo->lo_encryption ? lo->lo_encryption->number : 0;
1196 	if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
1197 		info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
1198 		memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
1199 		       lo->lo_encrypt_key_size);
1200 	}
1201 	return 0;
1202 }
1203 
1204 static void
1205 loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
1206 {
1207 	memset(info64, 0, sizeof(*info64));
1208 	info64->lo_number = info->lo_number;
1209 	info64->lo_device = info->lo_device;
1210 	info64->lo_inode = info->lo_inode;
1211 	info64->lo_rdevice = info->lo_rdevice;
1212 	info64->lo_offset = info->lo_offset;
1213 	info64->lo_sizelimit = 0;
1214 	info64->lo_encrypt_type = info->lo_encrypt_type;
1215 	info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
1216 	info64->lo_flags = info->lo_flags;
1217 	info64->lo_init[0] = info->lo_init[0];
1218 	info64->lo_init[1] = info->lo_init[1];
1219 	if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1220 		memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE);
1221 	else
1222 		memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
1223 	memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
1224 }
1225 
1226 static int
1227 loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
1228 {
1229 	memset(info, 0, sizeof(*info));
1230 	info->lo_number = info64->lo_number;
1231 	info->lo_device = info64->lo_device;
1232 	info->lo_inode = info64->lo_inode;
1233 	info->lo_rdevice = info64->lo_rdevice;
1234 	info->lo_offset = info64->lo_offset;
1235 	info->lo_encrypt_type = info64->lo_encrypt_type;
1236 	info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
1237 	info->lo_flags = info64->lo_flags;
1238 	info->lo_init[0] = info64->lo_init[0];
1239 	info->lo_init[1] = info64->lo_init[1];
1240 	if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1241 		memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1242 	else
1243 		memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
1244 	memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1245 
1246 	/* error in case values were truncated */
1247 	if (info->lo_device != info64->lo_device ||
1248 	    info->lo_rdevice != info64->lo_rdevice ||
1249 	    info->lo_inode != info64->lo_inode ||
1250 	    info->lo_offset != info64->lo_offset)
1251 		return -EOVERFLOW;
1252 
1253 	return 0;
1254 }
1255 
1256 static int
1257 loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
1258 {
1259 	struct loop_info info;
1260 	struct loop_info64 info64;
1261 
1262 	if (copy_from_user(&info, arg, sizeof (struct loop_info)))
1263 		return -EFAULT;
1264 	loop_info64_from_old(&info, &info64);
1265 	return loop_set_status(lo, &info64);
1266 }
1267 
1268 static int
1269 loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
1270 {
1271 	struct loop_info64 info64;
1272 
1273 	if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
1274 		return -EFAULT;
1275 	return loop_set_status(lo, &info64);
1276 }
1277 
1278 static int
1279 loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
1280 	struct loop_info info;
1281 	struct loop_info64 info64;
1282 	int err = 0;
1283 
1284 	if (!arg)
1285 		err = -EINVAL;
1286 	if (!err)
1287 		err = loop_get_status(lo, &info64);
1288 	if (!err)
1289 		err = loop_info64_to_old(&info64, &info);
1290 	if (!err && copy_to_user(arg, &info, sizeof(info)))
1291 		err = -EFAULT;
1292 
1293 	return err;
1294 }
1295 
1296 static int
1297 loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
1298 	struct loop_info64 info64;
1299 	int err = 0;
1300 
1301 	if (!arg)
1302 		err = -EINVAL;
1303 	if (!err)
1304 		err = loop_get_status(lo, &info64);
1305 	if (!err && copy_to_user(arg, &info64, sizeof(info64)))
1306 		err = -EFAULT;
1307 
1308 	return err;
1309 }
1310 
1311 static int loop_set_capacity(struct loop_device *lo)
1312 {
1313 	if (unlikely(lo->lo_state != Lo_bound))
1314 		return -ENXIO;
1315 
1316 	return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
1317 }
1318 
1319 static int loop_set_dio(struct loop_device *lo, unsigned long arg)
1320 {
1321 	int error = -ENXIO;
1322 	if (lo->lo_state != Lo_bound)
1323 		goto out;
1324 
1325 	__loop_update_dio(lo, !!arg);
1326 	if (lo->use_dio == !!arg)
1327 		return 0;
1328 	error = -EINVAL;
1329  out:
1330 	return error;
1331 }
1332 
1333 static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1334 	unsigned int cmd, unsigned long arg)
1335 {
1336 	struct loop_device *lo = bdev->bd_disk->private_data;
1337 	int err;
1338 
1339 	mutex_lock_nested(&lo->lo_ctl_mutex, 1);
1340 	switch (cmd) {
1341 	case LOOP_SET_FD:
1342 		err = loop_set_fd(lo, mode, bdev, arg);
1343 		break;
1344 	case LOOP_CHANGE_FD:
1345 		err = loop_change_fd(lo, bdev, arg);
1346 		break;
1347 	case LOOP_CLR_FD:
1348 		/* loop_clr_fd would have unlocked lo_ctl_mutex on success */
1349 		err = loop_clr_fd(lo);
1350 		if (!err)
1351 			goto out_unlocked;
1352 		break;
1353 	case LOOP_SET_STATUS:
1354 		err = -EPERM;
1355 		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1356 			err = loop_set_status_old(lo,
1357 					(struct loop_info __user *)arg);
1358 		break;
1359 	case LOOP_GET_STATUS:
1360 		err = loop_get_status_old(lo, (struct loop_info __user *) arg);
1361 		break;
1362 	case LOOP_SET_STATUS64:
1363 		err = -EPERM;
1364 		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1365 			err = loop_set_status64(lo,
1366 					(struct loop_info64 __user *) arg);
1367 		break;
1368 	case LOOP_GET_STATUS64:
1369 		err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
1370 		break;
1371 	case LOOP_SET_CAPACITY:
1372 		err = -EPERM;
1373 		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1374 			err = loop_set_capacity(lo);
1375 		break;
1376 	case LOOP_SET_DIRECT_IO:
1377 		err = -EPERM;
1378 		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1379 			err = loop_set_dio(lo, arg);
1380 		break;
1381 	default:
1382 		err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
1383 	}
1384 	mutex_unlock(&lo->lo_ctl_mutex);
1385 
1386 out_unlocked:
1387 	return err;
1388 }
1389 
1390 #ifdef CONFIG_COMPAT
1391 struct compat_loop_info {
1392 	compat_int_t	lo_number;      /* ioctl r/o */
1393 	compat_dev_t	lo_device;      /* ioctl r/o */
1394 	compat_ulong_t	lo_inode;       /* ioctl r/o */
1395 	compat_dev_t	lo_rdevice;     /* ioctl r/o */
1396 	compat_int_t	lo_offset;
1397 	compat_int_t	lo_encrypt_type;
1398 	compat_int_t	lo_encrypt_key_size;    /* ioctl w/o */
1399 	compat_int_t	lo_flags;       /* ioctl r/o */
1400 	char		lo_name[LO_NAME_SIZE];
1401 	unsigned char	lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
1402 	compat_ulong_t	lo_init[2];
1403 	char		reserved[4];
1404 };
1405 
1406 /*
1407  * Transfer 32-bit compatibility structure in userspace to 64-bit loop info
1408  * - noinlined to reduce stack space usage in main part of driver
1409  */
1410 static noinline int
1411 loop_info64_from_compat(const struct compat_loop_info __user *arg,
1412 			struct loop_info64 *info64)
1413 {
1414 	struct compat_loop_info info;
1415 
1416 	if (copy_from_user(&info, arg, sizeof(info)))
1417 		return -EFAULT;
1418 
1419 	memset(info64, 0, sizeof(*info64));
1420 	info64->lo_number = info.lo_number;
1421 	info64->lo_device = info.lo_device;
1422 	info64->lo_inode = info.lo_inode;
1423 	info64->lo_rdevice = info.lo_rdevice;
1424 	info64->lo_offset = info.lo_offset;
1425 	info64->lo_sizelimit = 0;
1426 	info64->lo_encrypt_type = info.lo_encrypt_type;
1427 	info64->lo_encrypt_key_size = info.lo_encrypt_key_size;
1428 	info64->lo_flags = info.lo_flags;
1429 	info64->lo_init[0] = info.lo_init[0];
1430 	info64->lo_init[1] = info.lo_init[1];
1431 	if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1432 		memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE);
1433 	else
1434 		memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
1435 	memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE);
1436 	return 0;
1437 }
1438 
1439 /*
1440  * Transfer 64-bit loop info to 32-bit compatibility structure in userspace
1441  * - noinlined to reduce stack space usage in main part of driver
1442  */
1443 static noinline int
1444 loop_info64_to_compat(const struct loop_info64 *info64,
1445 		      struct compat_loop_info __user *arg)
1446 {
1447 	struct compat_loop_info info;
1448 
1449 	memset(&info, 0, sizeof(info));
1450 	info.lo_number = info64->lo_number;
1451 	info.lo_device = info64->lo_device;
1452 	info.lo_inode = info64->lo_inode;
1453 	info.lo_rdevice = info64->lo_rdevice;
1454 	info.lo_offset = info64->lo_offset;
1455 	info.lo_encrypt_type = info64->lo_encrypt_type;
1456 	info.lo_encrypt_key_size = info64->lo_encrypt_key_size;
1457 	info.lo_flags = info64->lo_flags;
1458 	info.lo_init[0] = info64->lo_init[0];
1459 	info.lo_init[1] = info64->lo_init[1];
1460 	if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1461 		memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1462 	else
1463 		memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
1464 	memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1465 
1466 	/* error in case values were truncated */
1467 	if (info.lo_device != info64->lo_device ||
1468 	    info.lo_rdevice != info64->lo_rdevice ||
1469 	    info.lo_inode != info64->lo_inode ||
1470 	    info.lo_offset != info64->lo_offset ||
1471 	    info.lo_init[0] != info64->lo_init[0] ||
1472 	    info.lo_init[1] != info64->lo_init[1])
1473 		return -EOVERFLOW;
1474 
1475 	if (copy_to_user(arg, &info, sizeof(info)))
1476 		return -EFAULT;
1477 	return 0;
1478 }
1479 
1480 static int
1481 loop_set_status_compat(struct loop_device *lo,
1482 		       const struct compat_loop_info __user *arg)
1483 {
1484 	struct loop_info64 info64;
1485 	int ret;
1486 
1487 	ret = loop_info64_from_compat(arg, &info64);
1488 	if (ret < 0)
1489 		return ret;
1490 	return loop_set_status(lo, &info64);
1491 }
1492 
1493 static int
1494 loop_get_status_compat(struct loop_device *lo,
1495 		       struct compat_loop_info __user *arg)
1496 {
1497 	struct loop_info64 info64;
1498 	int err = 0;
1499 
1500 	if (!arg)
1501 		err = -EINVAL;
1502 	if (!err)
1503 		err = loop_get_status(lo, &info64);
1504 	if (!err)
1505 		err = loop_info64_to_compat(&info64, arg);
1506 	return err;
1507 }
1508 
1509 static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1510 			   unsigned int cmd, unsigned long arg)
1511 {
1512 	struct loop_device *lo = bdev->bd_disk->private_data;
1513 	int err;
1514 
1515 	switch(cmd) {
1516 	case LOOP_SET_STATUS:
1517 		mutex_lock(&lo->lo_ctl_mutex);
1518 		err = loop_set_status_compat(
1519 			lo, (const struct compat_loop_info __user *) arg);
1520 		mutex_unlock(&lo->lo_ctl_mutex);
1521 		break;
1522 	case LOOP_GET_STATUS:
1523 		mutex_lock(&lo->lo_ctl_mutex);
1524 		err = loop_get_status_compat(
1525 			lo, (struct compat_loop_info __user *) arg);
1526 		mutex_unlock(&lo->lo_ctl_mutex);
1527 		break;
1528 	case LOOP_SET_CAPACITY:
1529 	case LOOP_CLR_FD:
1530 	case LOOP_GET_STATUS64:
1531 	case LOOP_SET_STATUS64:
1532 		arg = (unsigned long) compat_ptr(arg);
1533 	case LOOP_SET_FD:
1534 	case LOOP_CHANGE_FD:
1535 		err = lo_ioctl(bdev, mode, cmd, arg);
1536 		break;
1537 	default:
1538 		err = -ENOIOCTLCMD;
1539 		break;
1540 	}
1541 	return err;
1542 }
1543 #endif
1544 
1545 static int lo_open(struct block_device *bdev, fmode_t mode)
1546 {
1547 	struct loop_device *lo;
1548 	int err = 0;
1549 
1550 	mutex_lock(&loop_index_mutex);
1551 	lo = bdev->bd_disk->private_data;
1552 	if (!lo) {
1553 		err = -ENXIO;
1554 		goto out;
1555 	}
1556 
1557 	atomic_inc(&lo->lo_refcnt);
1558 out:
1559 	mutex_unlock(&loop_index_mutex);
1560 	return err;
1561 }
1562 
1563 static void lo_release(struct gendisk *disk, fmode_t mode)
1564 {
1565 	struct loop_device *lo = disk->private_data;
1566 	int err;
1567 
1568 	if (atomic_dec_return(&lo->lo_refcnt))
1569 		return;
1570 
1571 	mutex_lock(&lo->lo_ctl_mutex);
1572 	if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
1573 		/*
1574 		 * In autoclear mode, stop the loop thread
1575 		 * and remove configuration after last close.
1576 		 */
1577 		err = loop_clr_fd(lo);
1578 		if (!err)
1579 			return;
1580 	} else {
1581 		/*
1582 		 * Otherwise keep thread (if running) and config,
1583 		 * but flush possible ongoing bios in thread.
1584 		 */
1585 		loop_flush(lo);
1586 	}
1587 
1588 	mutex_unlock(&lo->lo_ctl_mutex);
1589 }
1590 
1591 static const struct block_device_operations lo_fops = {
1592 	.owner =	THIS_MODULE,
1593 	.open =		lo_open,
1594 	.release =	lo_release,
1595 	.ioctl =	lo_ioctl,
1596 #ifdef CONFIG_COMPAT
1597 	.compat_ioctl =	lo_compat_ioctl,
1598 #endif
1599 };
1600 
1601 /*
1602  * And now the modules code and kernel interface.
1603  */
1604 static int max_loop;
1605 module_param(max_loop, int, S_IRUGO);
1606 MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
1607 module_param(max_part, int, S_IRUGO);
1608 MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
1609 MODULE_LICENSE("GPL");
1610 MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
1611 
1612 int loop_register_transfer(struct loop_func_table *funcs)
1613 {
1614 	unsigned int n = funcs->number;
1615 
1616 	if (n >= MAX_LO_CRYPT || xfer_funcs[n])
1617 		return -EINVAL;
1618 	xfer_funcs[n] = funcs;
1619 	return 0;
1620 }
1621 
1622 static int unregister_transfer_cb(int id, void *ptr, void *data)
1623 {
1624 	struct loop_device *lo = ptr;
1625 	struct loop_func_table *xfer = data;
1626 
1627 	mutex_lock(&lo->lo_ctl_mutex);
1628 	if (lo->lo_encryption == xfer)
1629 		loop_release_xfer(lo);
1630 	mutex_unlock(&lo->lo_ctl_mutex);
1631 	return 0;
1632 }
1633 
1634 int loop_unregister_transfer(int number)
1635 {
1636 	unsigned int n = number;
1637 	struct loop_func_table *xfer;
1638 
1639 	if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
1640 		return -EINVAL;
1641 
1642 	xfer_funcs[n] = NULL;
1643 	idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer);
1644 	return 0;
1645 }
1646 
1647 EXPORT_SYMBOL(loop_register_transfer);
1648 EXPORT_SYMBOL(loop_unregister_transfer);
1649 
1650 static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1651 		const struct blk_mq_queue_data *bd)
1652 {
1653 	struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
1654 	struct loop_device *lo = cmd->rq->q->queuedata;
1655 
1656 	blk_mq_start_request(bd->rq);
1657 
1658 	if (lo->lo_state != Lo_bound)
1659 		return BLK_STS_IOERR;
1660 
1661 	switch (req_op(cmd->rq)) {
1662 	case REQ_OP_FLUSH:
1663 	case REQ_OP_DISCARD:
1664 	case REQ_OP_WRITE_ZEROES:
1665 		cmd->use_aio = false;
1666 		break;
1667 	default:
1668 		cmd->use_aio = lo->use_dio;
1669 		break;
1670 	}
1671 
1672 	kthread_queue_work(&lo->worker, &cmd->work);
1673 
1674 	return BLK_STS_OK;
1675 }
1676 
1677 static void loop_handle_cmd(struct loop_cmd *cmd)
1678 {
1679 	const bool write = op_is_write(req_op(cmd->rq));
1680 	struct loop_device *lo = cmd->rq->q->queuedata;
1681 	int ret = 0;
1682 
1683 	if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
1684 		ret = -EIO;
1685 		goto failed;
1686 	}
1687 
1688 	ret = do_req_filebacked(lo, cmd->rq);
1689  failed:
1690 	/* complete non-aio request */
1691 	if (!cmd->use_aio || ret) {
1692 		cmd->ret = ret ? -EIO : 0;
1693 		blk_mq_complete_request(cmd->rq);
1694 	}
1695 }
1696 
1697 static void loop_queue_work(struct kthread_work *work)
1698 {
1699 	struct loop_cmd *cmd =
1700 		container_of(work, struct loop_cmd, work);
1701 
1702 	loop_handle_cmd(cmd);
1703 }
1704 
1705 static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
1706 		unsigned int hctx_idx, unsigned int numa_node)
1707 {
1708 	struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
1709 
1710 	cmd->rq = rq;
1711 	kthread_init_work(&cmd->work, loop_queue_work);
1712 
1713 	return 0;
1714 }
1715 
1716 static const struct blk_mq_ops loop_mq_ops = {
1717 	.queue_rq       = loop_queue_rq,
1718 	.init_request	= loop_init_request,
1719 	.complete	= lo_complete_rq,
1720 };
1721 
1722 static int loop_add(struct loop_device **l, int i)
1723 {
1724 	struct loop_device *lo;
1725 	struct gendisk *disk;
1726 	int err;
1727 
1728 	err = -ENOMEM;
1729 	lo = kzalloc(sizeof(*lo), GFP_KERNEL);
1730 	if (!lo)
1731 		goto out;
1732 
1733 	lo->lo_state = Lo_unbound;
1734 
1735 	/* allocate id, if @id >= 0, we're requesting that specific id */
1736 	if (i >= 0) {
1737 		err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
1738 		if (err == -ENOSPC)
1739 			err = -EEXIST;
1740 	} else {
1741 		err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
1742 	}
1743 	if (err < 0)
1744 		goto out_free_dev;
1745 	i = err;
1746 
1747 	err = -ENOMEM;
1748 	lo->tag_set.ops = &loop_mq_ops;
1749 	lo->tag_set.nr_hw_queues = 1;
1750 	lo->tag_set.queue_depth = 128;
1751 	lo->tag_set.numa_node = NUMA_NO_NODE;
1752 	lo->tag_set.cmd_size = sizeof(struct loop_cmd);
1753 	lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
1754 	lo->tag_set.driver_data = lo;
1755 
1756 	err = blk_mq_alloc_tag_set(&lo->tag_set);
1757 	if (err)
1758 		goto out_free_idr;
1759 
1760 	lo->lo_queue = blk_mq_init_queue(&lo->tag_set);
1761 	if (IS_ERR_OR_NULL(lo->lo_queue)) {
1762 		err = PTR_ERR(lo->lo_queue);
1763 		goto out_cleanup_tags;
1764 	}
1765 	lo->lo_queue->queuedata = lo;
1766 
1767 	blk_queue_physical_block_size(lo->lo_queue, PAGE_SIZE);
1768 
1769 	/*
1770 	 * It doesn't make sense to enable merge because the I/O
1771 	 * submitted to backing file is handled page by page.
1772 	 */
1773 	queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
1774 
1775 	err = -ENOMEM;
1776 	disk = lo->lo_disk = alloc_disk(1 << part_shift);
1777 	if (!disk)
1778 		goto out_free_queue;
1779 
1780 	/*
1781 	 * Disable partition scanning by default. The in-kernel partition
1782 	 * scanning can be requested individually per-device during its
1783 	 * setup. Userspace can always add and remove partitions from all
1784 	 * devices. The needed partition minors are allocated from the
1785 	 * extended minor space, the main loop device numbers will continue
1786 	 * to match the loop minors, regardless of the number of partitions
1787 	 * used.
1788 	 *
1789 	 * If max_part is given, partition scanning is globally enabled for
1790 	 * all loop devices. The minors for the main loop devices will be
1791 	 * multiples of max_part.
1792 	 *
1793 	 * Note: Global-for-all-devices, set-only-at-init, read-only module
1794 	 * parameteters like 'max_loop' and 'max_part' make things needlessly
1795 	 * complicated, are too static, inflexible and may surprise
1796 	 * userspace tools. Parameters like this in general should be avoided.
1797 	 */
1798 	if (!part_shift)
1799 		disk->flags |= GENHD_FL_NO_PART_SCAN;
1800 	disk->flags |= GENHD_FL_EXT_DEVT;
1801 	mutex_init(&lo->lo_ctl_mutex);
1802 	atomic_set(&lo->lo_refcnt, 0);
1803 	lo->lo_number		= i;
1804 	spin_lock_init(&lo->lo_lock);
1805 	disk->major		= LOOP_MAJOR;
1806 	disk->first_minor	= i << part_shift;
1807 	disk->fops		= &lo_fops;
1808 	disk->private_data	= lo;
1809 	disk->queue		= lo->lo_queue;
1810 	sprintf(disk->disk_name, "loop%d", i);
1811 	add_disk(disk);
1812 	*l = lo;
1813 	return lo->lo_number;
1814 
1815 out_free_queue:
1816 	blk_cleanup_queue(lo->lo_queue);
1817 out_cleanup_tags:
1818 	blk_mq_free_tag_set(&lo->tag_set);
1819 out_free_idr:
1820 	idr_remove(&loop_index_idr, i);
1821 out_free_dev:
1822 	kfree(lo);
1823 out:
1824 	return err;
1825 }
1826 
1827 static void loop_remove(struct loop_device *lo)
1828 {
1829 	blk_cleanup_queue(lo->lo_queue);
1830 	del_gendisk(lo->lo_disk);
1831 	blk_mq_free_tag_set(&lo->tag_set);
1832 	put_disk(lo->lo_disk);
1833 	kfree(lo);
1834 }
1835 
1836 static int find_free_cb(int id, void *ptr, void *data)
1837 {
1838 	struct loop_device *lo = ptr;
1839 	struct loop_device **l = data;
1840 
1841 	if (lo->lo_state == Lo_unbound) {
1842 		*l = lo;
1843 		return 1;
1844 	}
1845 	return 0;
1846 }
1847 
1848 static int loop_lookup(struct loop_device **l, int i)
1849 {
1850 	struct loop_device *lo;
1851 	int ret = -ENODEV;
1852 
1853 	if (i < 0) {
1854 		int err;
1855 
1856 		err = idr_for_each(&loop_index_idr, &find_free_cb, &lo);
1857 		if (err == 1) {
1858 			*l = lo;
1859 			ret = lo->lo_number;
1860 		}
1861 		goto out;
1862 	}
1863 
1864 	/* lookup and return a specific i */
1865 	lo = idr_find(&loop_index_idr, i);
1866 	if (lo) {
1867 		*l = lo;
1868 		ret = lo->lo_number;
1869 	}
1870 out:
1871 	return ret;
1872 }
1873 
1874 static struct kobject *loop_probe(dev_t dev, int *part, void *data)
1875 {
1876 	struct loop_device *lo;
1877 	struct kobject *kobj;
1878 	int err;
1879 
1880 	mutex_lock(&loop_index_mutex);
1881 	err = loop_lookup(&lo, MINOR(dev) >> part_shift);
1882 	if (err < 0)
1883 		err = loop_add(&lo, MINOR(dev) >> part_shift);
1884 	if (err < 0)
1885 		kobj = NULL;
1886 	else
1887 		kobj = get_disk(lo->lo_disk);
1888 	mutex_unlock(&loop_index_mutex);
1889 
1890 	*part = 0;
1891 	return kobj;
1892 }
1893 
1894 static long loop_control_ioctl(struct file *file, unsigned int cmd,
1895 			       unsigned long parm)
1896 {
1897 	struct loop_device *lo;
1898 	int ret = -ENOSYS;
1899 
1900 	mutex_lock(&loop_index_mutex);
1901 	switch (cmd) {
1902 	case LOOP_CTL_ADD:
1903 		ret = loop_lookup(&lo, parm);
1904 		if (ret >= 0) {
1905 			ret = -EEXIST;
1906 			break;
1907 		}
1908 		ret = loop_add(&lo, parm);
1909 		break;
1910 	case LOOP_CTL_REMOVE:
1911 		ret = loop_lookup(&lo, parm);
1912 		if (ret < 0)
1913 			break;
1914 		mutex_lock(&lo->lo_ctl_mutex);
1915 		if (lo->lo_state != Lo_unbound) {
1916 			ret = -EBUSY;
1917 			mutex_unlock(&lo->lo_ctl_mutex);
1918 			break;
1919 		}
1920 		if (atomic_read(&lo->lo_refcnt) > 0) {
1921 			ret = -EBUSY;
1922 			mutex_unlock(&lo->lo_ctl_mutex);
1923 			break;
1924 		}
1925 		lo->lo_disk->private_data = NULL;
1926 		mutex_unlock(&lo->lo_ctl_mutex);
1927 		idr_remove(&loop_index_idr, lo->lo_number);
1928 		loop_remove(lo);
1929 		break;
1930 	case LOOP_CTL_GET_FREE:
1931 		ret = loop_lookup(&lo, -1);
1932 		if (ret >= 0)
1933 			break;
1934 		ret = loop_add(&lo, -1);
1935 	}
1936 	mutex_unlock(&loop_index_mutex);
1937 
1938 	return ret;
1939 }
1940 
1941 static const struct file_operations loop_ctl_fops = {
1942 	.open		= nonseekable_open,
1943 	.unlocked_ioctl	= loop_control_ioctl,
1944 	.compat_ioctl	= loop_control_ioctl,
1945 	.owner		= THIS_MODULE,
1946 	.llseek		= noop_llseek,
1947 };
1948 
1949 static struct miscdevice loop_misc = {
1950 	.minor		= LOOP_CTRL_MINOR,
1951 	.name		= "loop-control",
1952 	.fops		= &loop_ctl_fops,
1953 };
1954 
1955 MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR);
1956 MODULE_ALIAS("devname:loop-control");
1957 
1958 static int __init loop_init(void)
1959 {
1960 	int i, nr;
1961 	unsigned long range;
1962 	struct loop_device *lo;
1963 	int err;
1964 
1965 	part_shift = 0;
1966 	if (max_part > 0) {
1967 		part_shift = fls(max_part);
1968 
1969 		/*
1970 		 * Adjust max_part according to part_shift as it is exported
1971 		 * to user space so that user can decide correct minor number
1972 		 * if [s]he want to create more devices.
1973 		 *
1974 		 * Note that -1 is required because partition 0 is reserved
1975 		 * for the whole disk.
1976 		 */
1977 		max_part = (1UL << part_shift) - 1;
1978 	}
1979 
1980 	if ((1UL << part_shift) > DISK_MAX_PARTS) {
1981 		err = -EINVAL;
1982 		goto err_out;
1983 	}
1984 
1985 	if (max_loop > 1UL << (MINORBITS - part_shift)) {
1986 		err = -EINVAL;
1987 		goto err_out;
1988 	}
1989 
1990 	/*
1991 	 * If max_loop is specified, create that many devices upfront.
1992 	 * This also becomes a hard limit. If max_loop is not specified,
1993 	 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
1994 	 * init time. Loop devices can be requested on-demand with the
1995 	 * /dev/loop-control interface, or be instantiated by accessing
1996 	 * a 'dead' device node.
1997 	 */
1998 	if (max_loop) {
1999 		nr = max_loop;
2000 		range = max_loop << part_shift;
2001 	} else {
2002 		nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
2003 		range = 1UL << MINORBITS;
2004 	}
2005 
2006 	err = misc_register(&loop_misc);
2007 	if (err < 0)
2008 		goto err_out;
2009 
2010 
2011 	if (register_blkdev(LOOP_MAJOR, "loop")) {
2012 		err = -EIO;
2013 		goto misc_out;
2014 	}
2015 
2016 	blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
2017 				  THIS_MODULE, loop_probe, NULL, NULL);
2018 
2019 	/* pre-create number of devices given by config or max_loop */
2020 	mutex_lock(&loop_index_mutex);
2021 	for (i = 0; i < nr; i++)
2022 		loop_add(&lo, i);
2023 	mutex_unlock(&loop_index_mutex);
2024 
2025 	printk(KERN_INFO "loop: module loaded\n");
2026 	return 0;
2027 
2028 misc_out:
2029 	misc_deregister(&loop_misc);
2030 err_out:
2031 	return err;
2032 }
2033 
2034 static int loop_exit_cb(int id, void *ptr, void *data)
2035 {
2036 	struct loop_device *lo = ptr;
2037 
2038 	loop_remove(lo);
2039 	return 0;
2040 }
2041 
2042 static void __exit loop_exit(void)
2043 {
2044 	unsigned long range;
2045 
2046 	range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
2047 
2048 	idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
2049 	idr_destroy(&loop_index_idr);
2050 
2051 	blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
2052 	unregister_blkdev(LOOP_MAJOR, "loop");
2053 
2054 	misc_deregister(&loop_misc);
2055 }
2056 
2057 module_init(loop_init);
2058 module_exit(loop_exit);
2059 
2060 #ifndef MODULE
2061 static int __init max_loop_setup(char *str)
2062 {
2063 	max_loop = simple_strtol(str, NULL, 0);
2064 	return 1;
2065 }
2066 
2067 __setup("max_loop=", max_loop_setup);
2068 #endif
2069