xref: /openbmc/linux/drivers/md/dm.c (revision df2634f43f5106947f3735a0b61a6527a4b278cd)
1 /*
2  * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm.h"
9 #include "dm-uevent.h"
10 
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/buffer_head.h>
18 #include <linux/mempool.h>
19 #include <linux/slab.h>
20 #include <linux/idr.h>
21 #include <linux/hdreg.h>
22 #include <linux/delay.h>
23 
24 #include <trace/events/block.h>
25 
26 #define DM_MSG_PREFIX "core"
27 
28 /*
29  * Cookies are numeric values sent with CHANGE and REMOVE
30  * uevents while resuming, removing or renaming the device.
31  */
32 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
33 #define DM_COOKIE_LENGTH 24
34 
35 static const char *_name = DM_NAME;
36 
37 static unsigned int major = 0;
38 static unsigned int _major = 0;
39 
40 static DEFINE_SPINLOCK(_minor_lock);
41 /*
42  * For bio-based dm.
43  * One of these is allocated per bio.
44  */
45 struct dm_io {
46 	struct mapped_device *md;
47 	int error;
48 	atomic_t io_count;
49 	struct bio *bio;
50 	unsigned long start_time;
51 	spinlock_t endio_lock;
52 };
53 
54 /*
55  * For bio-based dm.
56  * One of these is allocated per target within a bio.  Hopefully
57  * this will be simplified out one day.
58  */
59 struct dm_target_io {
60 	struct dm_io *io;
61 	struct dm_target *ti;
62 	union map_info info;
63 };
64 
65 /*
66  * For request-based dm.
67  * One of these is allocated per request.
68  */
69 struct dm_rq_target_io {
70 	struct mapped_device *md;
71 	struct dm_target *ti;
72 	struct request *orig, clone;
73 	int error;
74 	union map_info info;
75 };
76 
77 /*
78  * For request-based dm.
79  * One of these is allocated per bio.
80  */
81 struct dm_rq_clone_bio_info {
82 	struct bio *orig;
83 	struct dm_rq_target_io *tio;
84 };
85 
86 union map_info *dm_get_mapinfo(struct bio *bio)
87 {
88 	if (bio && bio->bi_private)
89 		return &((struct dm_target_io *)bio->bi_private)->info;
90 	return NULL;
91 }
92 
93 union map_info *dm_get_rq_mapinfo(struct request *rq)
94 {
95 	if (rq && rq->end_io_data)
96 		return &((struct dm_rq_target_io *)rq->end_io_data)->info;
97 	return NULL;
98 }
99 EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
100 
101 #define MINOR_ALLOCED ((void *)-1)
102 
103 /*
104  * Bits for the md->flags field.
105  */
106 #define DMF_BLOCK_IO_FOR_SUSPEND 0
107 #define DMF_SUSPENDED 1
108 #define DMF_FROZEN 2
109 #define DMF_FREEING 3
110 #define DMF_DELETING 4
111 #define DMF_NOFLUSH_SUSPENDING 5
112 
113 /*
114  * Work processed by per-device workqueue.
115  */
116 struct mapped_device {
117 	struct rw_semaphore io_lock;
118 	struct mutex suspend_lock;
119 	rwlock_t map_lock;
120 	atomic_t holders;
121 	atomic_t open_count;
122 
123 	unsigned long flags;
124 
125 	struct request_queue *queue;
126 	unsigned type;
127 	/* Protect queue and type against concurrent access. */
128 	struct mutex type_lock;
129 
130 	struct gendisk *disk;
131 	char name[16];
132 
133 	void *interface_ptr;
134 
135 	/*
136 	 * A list of ios that arrived while we were suspended.
137 	 */
138 	atomic_t pending[2];
139 	wait_queue_head_t wait;
140 	struct work_struct work;
141 	struct bio_list deferred;
142 	spinlock_t deferred_lock;
143 
144 	/*
145 	 * Processing queue (flush)
146 	 */
147 	struct workqueue_struct *wq;
148 
149 	/*
150 	 * The current mapping.
151 	 */
152 	struct dm_table *map;
153 
154 	/*
155 	 * io objects are allocated from here.
156 	 */
157 	mempool_t *io_pool;
158 	mempool_t *tio_pool;
159 
160 	struct bio_set *bs;
161 
162 	/*
163 	 * Event handling.
164 	 */
165 	atomic_t event_nr;
166 	wait_queue_head_t eventq;
167 	atomic_t uevent_seq;
168 	struct list_head uevent_list;
169 	spinlock_t uevent_lock; /* Protect access to uevent_list */
170 
171 	/*
172 	 * freeze/thaw support require holding onto a super block
173 	 */
174 	struct super_block *frozen_sb;
175 	struct block_device *bdev;
176 
177 	/* forced geometry settings */
178 	struct hd_geometry geometry;
179 
180 	/* For saving the address of __make_request for request based dm */
181 	make_request_fn *saved_make_request_fn;
182 
183 	/* sysfs handle */
184 	struct kobject kobj;
185 
186 	/* zero-length flush that will be cloned and submitted to targets */
187 	struct bio flush_bio;
188 };
189 
190 /*
191  * For mempools pre-allocation at the table loading time.
192  */
193 struct dm_md_mempools {
194 	mempool_t *io_pool;
195 	mempool_t *tio_pool;
196 	struct bio_set *bs;
197 };
198 
199 #define MIN_IOS 256
200 static struct kmem_cache *_io_cache;
201 static struct kmem_cache *_tio_cache;
202 static struct kmem_cache *_rq_tio_cache;
203 static struct kmem_cache *_rq_bio_info_cache;
204 
205 static int __init local_init(void)
206 {
207 	int r = -ENOMEM;
208 
209 	/* allocate a slab for the dm_ios */
210 	_io_cache = KMEM_CACHE(dm_io, 0);
211 	if (!_io_cache)
212 		return r;
213 
214 	/* allocate a slab for the target ios */
215 	_tio_cache = KMEM_CACHE(dm_target_io, 0);
216 	if (!_tio_cache)
217 		goto out_free_io_cache;
218 
219 	_rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
220 	if (!_rq_tio_cache)
221 		goto out_free_tio_cache;
222 
223 	_rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
224 	if (!_rq_bio_info_cache)
225 		goto out_free_rq_tio_cache;
226 
227 	r = dm_uevent_init();
228 	if (r)
229 		goto out_free_rq_bio_info_cache;
230 
231 	_major = major;
232 	r = register_blkdev(_major, _name);
233 	if (r < 0)
234 		goto out_uevent_exit;
235 
236 	if (!_major)
237 		_major = r;
238 
239 	return 0;
240 
241 out_uevent_exit:
242 	dm_uevent_exit();
243 out_free_rq_bio_info_cache:
244 	kmem_cache_destroy(_rq_bio_info_cache);
245 out_free_rq_tio_cache:
246 	kmem_cache_destroy(_rq_tio_cache);
247 out_free_tio_cache:
248 	kmem_cache_destroy(_tio_cache);
249 out_free_io_cache:
250 	kmem_cache_destroy(_io_cache);
251 
252 	return r;
253 }
254 
255 static void local_exit(void)
256 {
257 	kmem_cache_destroy(_rq_bio_info_cache);
258 	kmem_cache_destroy(_rq_tio_cache);
259 	kmem_cache_destroy(_tio_cache);
260 	kmem_cache_destroy(_io_cache);
261 	unregister_blkdev(_major, _name);
262 	dm_uevent_exit();
263 
264 	_major = 0;
265 
266 	DMINFO("cleaned up");
267 }
268 
269 static int (*_inits[])(void) __initdata = {
270 	local_init,
271 	dm_target_init,
272 	dm_linear_init,
273 	dm_stripe_init,
274 	dm_io_init,
275 	dm_kcopyd_init,
276 	dm_interface_init,
277 };
278 
279 static void (*_exits[])(void) = {
280 	local_exit,
281 	dm_target_exit,
282 	dm_linear_exit,
283 	dm_stripe_exit,
284 	dm_io_exit,
285 	dm_kcopyd_exit,
286 	dm_interface_exit,
287 };
288 
289 static int __init dm_init(void)
290 {
291 	const int count = ARRAY_SIZE(_inits);
292 
293 	int r, i;
294 
295 	for (i = 0; i < count; i++) {
296 		r = _inits[i]();
297 		if (r)
298 			goto bad;
299 	}
300 
301 	return 0;
302 
303       bad:
304 	while (i--)
305 		_exits[i]();
306 
307 	return r;
308 }
309 
310 static void __exit dm_exit(void)
311 {
312 	int i = ARRAY_SIZE(_exits);
313 
314 	while (i--)
315 		_exits[i]();
316 }
317 
318 /*
319  * Block device functions
320  */
321 int dm_deleting_md(struct mapped_device *md)
322 {
323 	return test_bit(DMF_DELETING, &md->flags);
324 }
325 
326 static int dm_blk_open(struct block_device *bdev, fmode_t mode)
327 {
328 	struct mapped_device *md;
329 
330 	spin_lock(&_minor_lock);
331 
332 	md = bdev->bd_disk->private_data;
333 	if (!md)
334 		goto out;
335 
336 	if (test_bit(DMF_FREEING, &md->flags) ||
337 	    dm_deleting_md(md)) {
338 		md = NULL;
339 		goto out;
340 	}
341 
342 	dm_get(md);
343 	atomic_inc(&md->open_count);
344 
345 out:
346 	spin_unlock(&_minor_lock);
347 
348 	return md ? 0 : -ENXIO;
349 }
350 
351 static int dm_blk_close(struct gendisk *disk, fmode_t mode)
352 {
353 	struct mapped_device *md = disk->private_data;
354 
355 	spin_lock(&_minor_lock);
356 
357 	atomic_dec(&md->open_count);
358 	dm_put(md);
359 
360 	spin_unlock(&_minor_lock);
361 
362 	return 0;
363 }
364 
365 int dm_open_count(struct mapped_device *md)
366 {
367 	return atomic_read(&md->open_count);
368 }
369 
370 /*
371  * Guarantees nothing is using the device before it's deleted.
372  */
373 int dm_lock_for_deletion(struct mapped_device *md)
374 {
375 	int r = 0;
376 
377 	spin_lock(&_minor_lock);
378 
379 	if (dm_open_count(md))
380 		r = -EBUSY;
381 	else
382 		set_bit(DMF_DELETING, &md->flags);
383 
384 	spin_unlock(&_minor_lock);
385 
386 	return r;
387 }
388 
389 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
390 {
391 	struct mapped_device *md = bdev->bd_disk->private_data;
392 
393 	return dm_get_geometry(md, geo);
394 }
395 
396 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
397 			unsigned int cmd, unsigned long arg)
398 {
399 	struct mapped_device *md = bdev->bd_disk->private_data;
400 	struct dm_table *map = dm_get_live_table(md);
401 	struct dm_target *tgt;
402 	int r = -ENOTTY;
403 
404 	if (!map || !dm_table_get_size(map))
405 		goto out;
406 
407 	/* We only support devices that have a single target */
408 	if (dm_table_get_num_targets(map) != 1)
409 		goto out;
410 
411 	tgt = dm_table_get_target(map, 0);
412 
413 	if (dm_suspended_md(md)) {
414 		r = -EAGAIN;
415 		goto out;
416 	}
417 
418 	if (tgt->type->ioctl)
419 		r = tgt->type->ioctl(tgt, cmd, arg);
420 
421 out:
422 	dm_table_put(map);
423 
424 	return r;
425 }
426 
427 static struct dm_io *alloc_io(struct mapped_device *md)
428 {
429 	return mempool_alloc(md->io_pool, GFP_NOIO);
430 }
431 
432 static void free_io(struct mapped_device *md, struct dm_io *io)
433 {
434 	mempool_free(io, md->io_pool);
435 }
436 
437 static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
438 {
439 	mempool_free(tio, md->tio_pool);
440 }
441 
442 static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
443 					    gfp_t gfp_mask)
444 {
445 	return mempool_alloc(md->tio_pool, gfp_mask);
446 }
447 
448 static void free_rq_tio(struct dm_rq_target_io *tio)
449 {
450 	mempool_free(tio, tio->md->tio_pool);
451 }
452 
453 static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
454 {
455 	return mempool_alloc(md->io_pool, GFP_ATOMIC);
456 }
457 
458 static void free_bio_info(struct dm_rq_clone_bio_info *info)
459 {
460 	mempool_free(info, info->tio->md->io_pool);
461 }
462 
463 static int md_in_flight(struct mapped_device *md)
464 {
465 	return atomic_read(&md->pending[READ]) +
466 	       atomic_read(&md->pending[WRITE]);
467 }
468 
469 static void start_io_acct(struct dm_io *io)
470 {
471 	struct mapped_device *md = io->md;
472 	int cpu;
473 	int rw = bio_data_dir(io->bio);
474 
475 	io->start_time = jiffies;
476 
477 	cpu = part_stat_lock();
478 	part_round_stats(cpu, &dm_disk(md)->part0);
479 	part_stat_unlock();
480 	dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
481 }
482 
483 static void end_io_acct(struct dm_io *io)
484 {
485 	struct mapped_device *md = io->md;
486 	struct bio *bio = io->bio;
487 	unsigned long duration = jiffies - io->start_time;
488 	int pending, cpu;
489 	int rw = bio_data_dir(bio);
490 
491 	cpu = part_stat_lock();
492 	part_round_stats(cpu, &dm_disk(md)->part0);
493 	part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
494 	part_stat_unlock();
495 
496 	/*
497 	 * After this is decremented the bio must not be touched if it is
498 	 * a flush.
499 	 */
500 	dm_disk(md)->part0.in_flight[rw] = pending =
501 		atomic_dec_return(&md->pending[rw]);
502 	pending += atomic_read(&md->pending[rw^0x1]);
503 
504 	/* nudge anyone waiting on suspend queue */
505 	if (!pending)
506 		wake_up(&md->wait);
507 }
508 
509 /*
510  * Add the bio to the list of deferred io.
511  */
512 static void queue_io(struct mapped_device *md, struct bio *bio)
513 {
514 	unsigned long flags;
515 
516 	spin_lock_irqsave(&md->deferred_lock, flags);
517 	bio_list_add(&md->deferred, bio);
518 	spin_unlock_irqrestore(&md->deferred_lock, flags);
519 	queue_work(md->wq, &md->work);
520 }
521 
522 /*
523  * Everyone (including functions in this file), should use this
524  * function to access the md->map field, and make sure they call
525  * dm_table_put() when finished.
526  */
527 struct dm_table *dm_get_live_table(struct mapped_device *md)
528 {
529 	struct dm_table *t;
530 	unsigned long flags;
531 
532 	read_lock_irqsave(&md->map_lock, flags);
533 	t = md->map;
534 	if (t)
535 		dm_table_get(t);
536 	read_unlock_irqrestore(&md->map_lock, flags);
537 
538 	return t;
539 }
540 
541 /*
542  * Get the geometry associated with a dm device
543  */
544 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
545 {
546 	*geo = md->geometry;
547 
548 	return 0;
549 }
550 
551 /*
552  * Set the geometry of a device.
553  */
554 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
555 {
556 	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
557 
558 	if (geo->start > sz) {
559 		DMWARN("Start sector is beyond the geometry limits.");
560 		return -EINVAL;
561 	}
562 
563 	md->geometry = *geo;
564 
565 	return 0;
566 }
567 
568 /*-----------------------------------------------------------------
569  * CRUD START:
570  *   A more elegant soln is in the works that uses the queue
571  *   merge fn, unfortunately there are a couple of changes to
572  *   the block layer that I want to make for this.  So in the
573  *   interests of getting something for people to use I give
574  *   you this clearly demarcated crap.
575  *---------------------------------------------------------------*/
576 
577 static int __noflush_suspending(struct mapped_device *md)
578 {
579 	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
580 }
581 
582 /*
583  * Decrements the number of outstanding ios that a bio has been
584  * cloned into, completing the original io if necc.
585  */
586 static void dec_pending(struct dm_io *io, int error)
587 {
588 	unsigned long flags;
589 	int io_error;
590 	struct bio *bio;
591 	struct mapped_device *md = io->md;
592 
593 	/* Push-back supersedes any I/O errors */
594 	if (unlikely(error)) {
595 		spin_lock_irqsave(&io->endio_lock, flags);
596 		if (!(io->error > 0 && __noflush_suspending(md)))
597 			io->error = error;
598 		spin_unlock_irqrestore(&io->endio_lock, flags);
599 	}
600 
601 	if (atomic_dec_and_test(&io->io_count)) {
602 		if (io->error == DM_ENDIO_REQUEUE) {
603 			/*
604 			 * Target requested pushing back the I/O.
605 			 */
606 			spin_lock_irqsave(&md->deferred_lock, flags);
607 			if (__noflush_suspending(md))
608 				bio_list_add_head(&md->deferred, io->bio);
609 			else
610 				/* noflush suspend was interrupted. */
611 				io->error = -EIO;
612 			spin_unlock_irqrestore(&md->deferred_lock, flags);
613 		}
614 
615 		io_error = io->error;
616 		bio = io->bio;
617 		end_io_acct(io);
618 		free_io(md, io);
619 
620 		if (io_error == DM_ENDIO_REQUEUE)
621 			return;
622 
623 		if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) {
624 			/*
625 			 * Preflush done for flush with data, reissue
626 			 * without REQ_FLUSH.
627 			 */
628 			bio->bi_rw &= ~REQ_FLUSH;
629 			queue_io(md, bio);
630 		} else {
631 			/* done with normal IO or empty flush */
632 			trace_block_bio_complete(md->queue, bio, io_error);
633 			bio_endio(bio, io_error);
634 		}
635 	}
636 }
637 
638 static void clone_endio(struct bio *bio, int error)
639 {
640 	int r = 0;
641 	struct dm_target_io *tio = bio->bi_private;
642 	struct dm_io *io = tio->io;
643 	struct mapped_device *md = tio->io->md;
644 	dm_endio_fn endio = tio->ti->type->end_io;
645 
646 	if (!bio_flagged(bio, BIO_UPTODATE) && !error)
647 		error = -EIO;
648 
649 	if (endio) {
650 		r = endio(tio->ti, bio, error, &tio->info);
651 		if (r < 0 || r == DM_ENDIO_REQUEUE)
652 			/*
653 			 * error and requeue request are handled
654 			 * in dec_pending().
655 			 */
656 			error = r;
657 		else if (r == DM_ENDIO_INCOMPLETE)
658 			/* The target will handle the io */
659 			return;
660 		else if (r) {
661 			DMWARN("unimplemented target endio return value: %d", r);
662 			BUG();
663 		}
664 	}
665 
666 	/*
667 	 * Store md for cleanup instead of tio which is about to get freed.
668 	 */
669 	bio->bi_private = md->bs;
670 
671 	free_tio(md, tio);
672 	bio_put(bio);
673 	dec_pending(io, error);
674 }
675 
676 /*
677  * Partial completion handling for request-based dm
678  */
679 static void end_clone_bio(struct bio *clone, int error)
680 {
681 	struct dm_rq_clone_bio_info *info = clone->bi_private;
682 	struct dm_rq_target_io *tio = info->tio;
683 	struct bio *bio = info->orig;
684 	unsigned int nr_bytes = info->orig->bi_size;
685 
686 	bio_put(clone);
687 
688 	if (tio->error)
689 		/*
690 		 * An error has already been detected on the request.
691 		 * Once error occurred, just let clone->end_io() handle
692 		 * the remainder.
693 		 */
694 		return;
695 	else if (error) {
696 		/*
697 		 * Don't notice the error to the upper layer yet.
698 		 * The error handling decision is made by the target driver,
699 		 * when the request is completed.
700 		 */
701 		tio->error = error;
702 		return;
703 	}
704 
705 	/*
706 	 * I/O for the bio successfully completed.
707 	 * Notice the data completion to the upper layer.
708 	 */
709 
710 	/*
711 	 * bios are processed from the head of the list.
712 	 * So the completing bio should always be rq->bio.
713 	 * If it's not, something wrong is happening.
714 	 */
715 	if (tio->orig->bio != bio)
716 		DMERR("bio completion is going in the middle of the request");
717 
718 	/*
719 	 * Update the original request.
720 	 * Do not use blk_end_request() here, because it may complete
721 	 * the original request before the clone, and break the ordering.
722 	 */
723 	blk_update_request(tio->orig, 0, nr_bytes);
724 }
725 
726 /*
727  * Don't touch any member of the md after calling this function because
728  * the md may be freed in dm_put() at the end of this function.
729  * Or do dm_get() before calling this function and dm_put() later.
730  */
731 static void rq_completed(struct mapped_device *md, int rw, int run_queue)
732 {
733 	atomic_dec(&md->pending[rw]);
734 
735 	/* nudge anyone waiting on suspend queue */
736 	if (!md_in_flight(md))
737 		wake_up(&md->wait);
738 
739 	if (run_queue)
740 		blk_run_queue(md->queue);
741 
742 	/*
743 	 * dm_put() must be at the end of this function. See the comment above
744 	 */
745 	dm_put(md);
746 }
747 
748 static void free_rq_clone(struct request *clone)
749 {
750 	struct dm_rq_target_io *tio = clone->end_io_data;
751 
752 	blk_rq_unprep_clone(clone);
753 	free_rq_tio(tio);
754 }
755 
756 /*
757  * Complete the clone and the original request.
758  * Must be called without queue lock.
759  */
760 static void dm_end_request(struct request *clone, int error)
761 {
762 	int rw = rq_data_dir(clone);
763 	struct dm_rq_target_io *tio = clone->end_io_data;
764 	struct mapped_device *md = tio->md;
765 	struct request *rq = tio->orig;
766 
767 	if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
768 		rq->errors = clone->errors;
769 		rq->resid_len = clone->resid_len;
770 
771 		if (rq->sense)
772 			/*
773 			 * We are using the sense buffer of the original
774 			 * request.
775 			 * So setting the length of the sense data is enough.
776 			 */
777 			rq->sense_len = clone->sense_len;
778 	}
779 
780 	free_rq_clone(clone);
781 	blk_end_request_all(rq, error);
782 	rq_completed(md, rw, true);
783 }
784 
785 static void dm_unprep_request(struct request *rq)
786 {
787 	struct request *clone = rq->special;
788 
789 	rq->special = NULL;
790 	rq->cmd_flags &= ~REQ_DONTPREP;
791 
792 	free_rq_clone(clone);
793 }
794 
795 /*
796  * Requeue the original request of a clone.
797  */
798 void dm_requeue_unmapped_request(struct request *clone)
799 {
800 	int rw = rq_data_dir(clone);
801 	struct dm_rq_target_io *tio = clone->end_io_data;
802 	struct mapped_device *md = tio->md;
803 	struct request *rq = tio->orig;
804 	struct request_queue *q = rq->q;
805 	unsigned long flags;
806 
807 	dm_unprep_request(rq);
808 
809 	spin_lock_irqsave(q->queue_lock, flags);
810 	if (elv_queue_empty(q))
811 		blk_plug_device(q);
812 	blk_requeue_request(q, rq);
813 	spin_unlock_irqrestore(q->queue_lock, flags);
814 
815 	rq_completed(md, rw, 0);
816 }
817 EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
818 
819 static void __stop_queue(struct request_queue *q)
820 {
821 	blk_stop_queue(q);
822 }
823 
824 static void stop_queue(struct request_queue *q)
825 {
826 	unsigned long flags;
827 
828 	spin_lock_irqsave(q->queue_lock, flags);
829 	__stop_queue(q);
830 	spin_unlock_irqrestore(q->queue_lock, flags);
831 }
832 
833 static void __start_queue(struct request_queue *q)
834 {
835 	if (blk_queue_stopped(q))
836 		blk_start_queue(q);
837 }
838 
839 static void start_queue(struct request_queue *q)
840 {
841 	unsigned long flags;
842 
843 	spin_lock_irqsave(q->queue_lock, flags);
844 	__start_queue(q);
845 	spin_unlock_irqrestore(q->queue_lock, flags);
846 }
847 
848 static void dm_done(struct request *clone, int error, bool mapped)
849 {
850 	int r = error;
851 	struct dm_rq_target_io *tio = clone->end_io_data;
852 	dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
853 
854 	if (mapped && rq_end_io)
855 		r = rq_end_io(tio->ti, clone, error, &tio->info);
856 
857 	if (r <= 0)
858 		/* The target wants to complete the I/O */
859 		dm_end_request(clone, r);
860 	else if (r == DM_ENDIO_INCOMPLETE)
861 		/* The target will handle the I/O */
862 		return;
863 	else if (r == DM_ENDIO_REQUEUE)
864 		/* The target wants to requeue the I/O */
865 		dm_requeue_unmapped_request(clone);
866 	else {
867 		DMWARN("unimplemented target endio return value: %d", r);
868 		BUG();
869 	}
870 }
871 
872 /*
873  * Request completion handler for request-based dm
874  */
875 static void dm_softirq_done(struct request *rq)
876 {
877 	bool mapped = true;
878 	struct request *clone = rq->completion_data;
879 	struct dm_rq_target_io *tio = clone->end_io_data;
880 
881 	if (rq->cmd_flags & REQ_FAILED)
882 		mapped = false;
883 
884 	dm_done(clone, tio->error, mapped);
885 }
886 
887 /*
888  * Complete the clone and the original request with the error status
889  * through softirq context.
890  */
891 static void dm_complete_request(struct request *clone, int error)
892 {
893 	struct dm_rq_target_io *tio = clone->end_io_data;
894 	struct request *rq = tio->orig;
895 
896 	tio->error = error;
897 	rq->completion_data = clone;
898 	blk_complete_request(rq);
899 }
900 
901 /*
902  * Complete the not-mapped clone and the original request with the error status
903  * through softirq context.
904  * Target's rq_end_io() function isn't called.
905  * This may be used when the target's map_rq() function fails.
906  */
907 void dm_kill_unmapped_request(struct request *clone, int error)
908 {
909 	struct dm_rq_target_io *tio = clone->end_io_data;
910 	struct request *rq = tio->orig;
911 
912 	rq->cmd_flags |= REQ_FAILED;
913 	dm_complete_request(clone, error);
914 }
915 EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
916 
917 /*
918  * Called with the queue lock held
919  */
920 static void end_clone_request(struct request *clone, int error)
921 {
922 	/*
923 	 * For just cleaning up the information of the queue in which
924 	 * the clone was dispatched.
925 	 * The clone is *NOT* freed actually here because it is alloced from
926 	 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
927 	 */
928 	__blk_put_request(clone->q, clone);
929 
930 	/*
931 	 * Actual request completion is done in a softirq context which doesn't
932 	 * hold the queue lock.  Otherwise, deadlock could occur because:
933 	 *     - another request may be submitted by the upper level driver
934 	 *       of the stacking during the completion
935 	 *     - the submission which requires queue lock may be done
936 	 *       against this queue
937 	 */
938 	dm_complete_request(clone, error);
939 }
940 
941 /*
942  * Return maximum size of I/O possible at the supplied sector up to the current
943  * target boundary.
944  */
945 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
946 {
947 	sector_t target_offset = dm_target_offset(ti, sector);
948 
949 	return ti->len - target_offset;
950 }
951 
952 static sector_t max_io_len(sector_t sector, struct dm_target *ti)
953 {
954 	sector_t len = max_io_len_target_boundary(sector, ti);
955 
956 	/*
957 	 * Does the target need to split even further ?
958 	 */
959 	if (ti->split_io) {
960 		sector_t boundary;
961 		sector_t offset = dm_target_offset(ti, sector);
962 		boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
963 			   - offset;
964 		if (len > boundary)
965 			len = boundary;
966 	}
967 
968 	return len;
969 }
970 
971 static void __map_bio(struct dm_target *ti, struct bio *clone,
972 		      struct dm_target_io *tio)
973 {
974 	int r;
975 	sector_t sector;
976 	struct mapped_device *md;
977 
978 	clone->bi_end_io = clone_endio;
979 	clone->bi_private = tio;
980 
981 	/*
982 	 * Map the clone.  If r == 0 we don't need to do
983 	 * anything, the target has assumed ownership of
984 	 * this io.
985 	 */
986 	atomic_inc(&tio->io->io_count);
987 	sector = clone->bi_sector;
988 	r = ti->type->map(ti, clone, &tio->info);
989 	if (r == DM_MAPIO_REMAPPED) {
990 		/* the bio has been remapped so dispatch it */
991 
992 		trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
993 				      tio->io->bio->bi_bdev->bd_dev, sector);
994 
995 		generic_make_request(clone);
996 	} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
997 		/* error the io and bail out, or requeue it if needed */
998 		md = tio->io->md;
999 		dec_pending(tio->io, r);
1000 		/*
1001 		 * Store bio_set for cleanup.
1002 		 */
1003 		clone->bi_private = md->bs;
1004 		bio_put(clone);
1005 		free_tio(md, tio);
1006 	} else if (r) {
1007 		DMWARN("unimplemented target map return value: %d", r);
1008 		BUG();
1009 	}
1010 }
1011 
1012 struct clone_info {
1013 	struct mapped_device *md;
1014 	struct dm_table *map;
1015 	struct bio *bio;
1016 	struct dm_io *io;
1017 	sector_t sector;
1018 	sector_t sector_count;
1019 	unsigned short idx;
1020 };
1021 
1022 static void dm_bio_destructor(struct bio *bio)
1023 {
1024 	struct bio_set *bs = bio->bi_private;
1025 
1026 	bio_free(bio, bs);
1027 }
1028 
1029 /*
1030  * Creates a little bio that just does part of a bvec.
1031  */
1032 static struct bio *split_bvec(struct bio *bio, sector_t sector,
1033 			      unsigned short idx, unsigned int offset,
1034 			      unsigned int len, struct bio_set *bs)
1035 {
1036 	struct bio *clone;
1037 	struct bio_vec *bv = bio->bi_io_vec + idx;
1038 
1039 	clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
1040 	clone->bi_destructor = dm_bio_destructor;
1041 	*clone->bi_io_vec = *bv;
1042 
1043 	clone->bi_sector = sector;
1044 	clone->bi_bdev = bio->bi_bdev;
1045 	clone->bi_rw = bio->bi_rw;
1046 	clone->bi_vcnt = 1;
1047 	clone->bi_size = to_bytes(len);
1048 	clone->bi_io_vec->bv_offset = offset;
1049 	clone->bi_io_vec->bv_len = clone->bi_size;
1050 	clone->bi_flags |= 1 << BIO_CLONED;
1051 
1052 	if (bio_integrity(bio)) {
1053 		bio_integrity_clone(clone, bio, GFP_NOIO, bs);
1054 		bio_integrity_trim(clone,
1055 				   bio_sector_offset(bio, idx, offset), len);
1056 	}
1057 
1058 	return clone;
1059 }
1060 
1061 /*
1062  * Creates a bio that consists of range of complete bvecs.
1063  */
1064 static struct bio *clone_bio(struct bio *bio, sector_t sector,
1065 			     unsigned short idx, unsigned short bv_count,
1066 			     unsigned int len, struct bio_set *bs)
1067 {
1068 	struct bio *clone;
1069 
1070 	clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
1071 	__bio_clone(clone, bio);
1072 	clone->bi_destructor = dm_bio_destructor;
1073 	clone->bi_sector = sector;
1074 	clone->bi_idx = idx;
1075 	clone->bi_vcnt = idx + bv_count;
1076 	clone->bi_size = to_bytes(len);
1077 	clone->bi_flags &= ~(1 << BIO_SEG_VALID);
1078 
1079 	if (bio_integrity(bio)) {
1080 		bio_integrity_clone(clone, bio, GFP_NOIO, bs);
1081 
1082 		if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
1083 			bio_integrity_trim(clone,
1084 					   bio_sector_offset(bio, idx, 0), len);
1085 	}
1086 
1087 	return clone;
1088 }
1089 
1090 static struct dm_target_io *alloc_tio(struct clone_info *ci,
1091 				      struct dm_target *ti)
1092 {
1093 	struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO);
1094 
1095 	tio->io = ci->io;
1096 	tio->ti = ti;
1097 	memset(&tio->info, 0, sizeof(tio->info));
1098 
1099 	return tio;
1100 }
1101 
1102 static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
1103 				   unsigned request_nr, sector_t len)
1104 {
1105 	struct dm_target_io *tio = alloc_tio(ci, ti);
1106 	struct bio *clone;
1107 
1108 	tio->info.target_request_nr = request_nr;
1109 
1110 	/*
1111 	 * Discard requests require the bio's inline iovecs be initialized.
1112 	 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
1113 	 * and discard, so no need for concern about wasted bvec allocations.
1114 	 */
1115 	clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs);
1116 	__bio_clone(clone, ci->bio);
1117 	clone->bi_destructor = dm_bio_destructor;
1118 	if (len) {
1119 		clone->bi_sector = ci->sector;
1120 		clone->bi_size = to_bytes(len);
1121 	}
1122 
1123 	__map_bio(ti, clone, tio);
1124 }
1125 
1126 static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
1127 				    unsigned num_requests, sector_t len)
1128 {
1129 	unsigned request_nr;
1130 
1131 	for (request_nr = 0; request_nr < num_requests; request_nr++)
1132 		__issue_target_request(ci, ti, request_nr, len);
1133 }
1134 
1135 static int __clone_and_map_empty_flush(struct clone_info *ci)
1136 {
1137 	unsigned target_nr = 0;
1138 	struct dm_target *ti;
1139 
1140 	BUG_ON(bio_has_data(ci->bio));
1141 	while ((ti = dm_table_get_target(ci->map, target_nr++)))
1142 		__issue_target_requests(ci, ti, ti->num_flush_requests, 0);
1143 
1144 	return 0;
1145 }
1146 
1147 /*
1148  * Perform all io with a single clone.
1149  */
1150 static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
1151 {
1152 	struct bio *clone, *bio = ci->bio;
1153 	struct dm_target_io *tio;
1154 
1155 	tio = alloc_tio(ci, ti);
1156 	clone = clone_bio(bio, ci->sector, ci->idx,
1157 			  bio->bi_vcnt - ci->idx, ci->sector_count,
1158 			  ci->md->bs);
1159 	__map_bio(ti, clone, tio);
1160 	ci->sector_count = 0;
1161 }
1162 
1163 static int __clone_and_map_discard(struct clone_info *ci)
1164 {
1165 	struct dm_target *ti;
1166 	sector_t len;
1167 
1168 	do {
1169 		ti = dm_table_find_target(ci->map, ci->sector);
1170 		if (!dm_target_is_valid(ti))
1171 			return -EIO;
1172 
1173 		/*
1174 		 * Even though the device advertised discard support,
1175 		 * reconfiguration might have changed that since the
1176 		 * check was performed.
1177 		 */
1178 		if (!ti->num_discard_requests)
1179 			return -EOPNOTSUPP;
1180 
1181 		len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1182 
1183 		__issue_target_requests(ci, ti, ti->num_discard_requests, len);
1184 
1185 		ci->sector += len;
1186 	} while (ci->sector_count -= len);
1187 
1188 	return 0;
1189 }
1190 
1191 static int __clone_and_map(struct clone_info *ci)
1192 {
1193 	struct bio *clone, *bio = ci->bio;
1194 	struct dm_target *ti;
1195 	sector_t len = 0, max;
1196 	struct dm_target_io *tio;
1197 
1198 	if (unlikely(bio->bi_rw & REQ_DISCARD))
1199 		return __clone_and_map_discard(ci);
1200 
1201 	ti = dm_table_find_target(ci->map, ci->sector);
1202 	if (!dm_target_is_valid(ti))
1203 		return -EIO;
1204 
1205 	max = max_io_len(ci->sector, ti);
1206 
1207 	if (ci->sector_count <= max) {
1208 		/*
1209 		 * Optimise for the simple case where we can do all of
1210 		 * the remaining io with a single clone.
1211 		 */
1212 		__clone_and_map_simple(ci, ti);
1213 
1214 	} else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
1215 		/*
1216 		 * There are some bvecs that don't span targets.
1217 		 * Do as many of these as possible.
1218 		 */
1219 		int i;
1220 		sector_t remaining = max;
1221 		sector_t bv_len;
1222 
1223 		for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
1224 			bv_len = to_sector(bio->bi_io_vec[i].bv_len);
1225 
1226 			if (bv_len > remaining)
1227 				break;
1228 
1229 			remaining -= bv_len;
1230 			len += bv_len;
1231 		}
1232 
1233 		tio = alloc_tio(ci, ti);
1234 		clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
1235 				  ci->md->bs);
1236 		__map_bio(ti, clone, tio);
1237 
1238 		ci->sector += len;
1239 		ci->sector_count -= len;
1240 		ci->idx = i;
1241 
1242 	} else {
1243 		/*
1244 		 * Handle a bvec that must be split between two or more targets.
1245 		 */
1246 		struct bio_vec *bv = bio->bi_io_vec + ci->idx;
1247 		sector_t remaining = to_sector(bv->bv_len);
1248 		unsigned int offset = 0;
1249 
1250 		do {
1251 			if (offset) {
1252 				ti = dm_table_find_target(ci->map, ci->sector);
1253 				if (!dm_target_is_valid(ti))
1254 					return -EIO;
1255 
1256 				max = max_io_len(ci->sector, ti);
1257 			}
1258 
1259 			len = min(remaining, max);
1260 
1261 			tio = alloc_tio(ci, ti);
1262 			clone = split_bvec(bio, ci->sector, ci->idx,
1263 					   bv->bv_offset + offset, len,
1264 					   ci->md->bs);
1265 
1266 			__map_bio(ti, clone, tio);
1267 
1268 			ci->sector += len;
1269 			ci->sector_count -= len;
1270 			offset += to_bytes(len);
1271 		} while (remaining -= len);
1272 
1273 		ci->idx++;
1274 	}
1275 
1276 	return 0;
1277 }
1278 
1279 /*
1280  * Split the bio into several clones and submit it to targets.
1281  */
1282 static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1283 {
1284 	struct clone_info ci;
1285 	int error = 0;
1286 
1287 	ci.map = dm_get_live_table(md);
1288 	if (unlikely(!ci.map)) {
1289 		bio_io_error(bio);
1290 		return;
1291 	}
1292 
1293 	ci.md = md;
1294 	ci.io = alloc_io(md);
1295 	ci.io->error = 0;
1296 	atomic_set(&ci.io->io_count, 1);
1297 	ci.io->bio = bio;
1298 	ci.io->md = md;
1299 	spin_lock_init(&ci.io->endio_lock);
1300 	ci.sector = bio->bi_sector;
1301 	ci.idx = bio->bi_idx;
1302 
1303 	start_io_acct(ci.io);
1304 	if (bio->bi_rw & REQ_FLUSH) {
1305 		ci.bio = &ci.md->flush_bio;
1306 		ci.sector_count = 0;
1307 		error = __clone_and_map_empty_flush(&ci);
1308 		/* dec_pending submits any data associated with flush */
1309 	} else {
1310 		ci.bio = bio;
1311 		ci.sector_count = bio_sectors(bio);
1312 		while (ci.sector_count && !error)
1313 			error = __clone_and_map(&ci);
1314 	}
1315 
1316 	/* drop the extra reference count */
1317 	dec_pending(ci.io, error);
1318 	dm_table_put(ci.map);
1319 }
1320 /*-----------------------------------------------------------------
1321  * CRUD END
1322  *---------------------------------------------------------------*/
1323 
1324 static int dm_merge_bvec(struct request_queue *q,
1325 			 struct bvec_merge_data *bvm,
1326 			 struct bio_vec *biovec)
1327 {
1328 	struct mapped_device *md = q->queuedata;
1329 	struct dm_table *map = dm_get_live_table(md);
1330 	struct dm_target *ti;
1331 	sector_t max_sectors;
1332 	int max_size = 0;
1333 
1334 	if (unlikely(!map))
1335 		goto out;
1336 
1337 	ti = dm_table_find_target(map, bvm->bi_sector);
1338 	if (!dm_target_is_valid(ti))
1339 		goto out_table;
1340 
1341 	/*
1342 	 * Find maximum amount of I/O that won't need splitting
1343 	 */
1344 	max_sectors = min(max_io_len(bvm->bi_sector, ti),
1345 			  (sector_t) BIO_MAX_SECTORS);
1346 	max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1347 	if (max_size < 0)
1348 		max_size = 0;
1349 
1350 	/*
1351 	 * merge_bvec_fn() returns number of bytes
1352 	 * it can accept at this offset
1353 	 * max is precomputed maximal io size
1354 	 */
1355 	if (max_size && ti->type->merge)
1356 		max_size = ti->type->merge(ti, bvm, biovec, max_size);
1357 	/*
1358 	 * If the target doesn't support merge method and some of the devices
1359 	 * provided their merge_bvec method (we know this by looking at
1360 	 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1361 	 * entries.  So always set max_size to 0, and the code below allows
1362 	 * just one page.
1363 	 */
1364 	else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1365 
1366 		max_size = 0;
1367 
1368 out_table:
1369 	dm_table_put(map);
1370 
1371 out:
1372 	/*
1373 	 * Always allow an entire first page
1374 	 */
1375 	if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
1376 		max_size = biovec->bv_len;
1377 
1378 	return max_size;
1379 }
1380 
1381 /*
1382  * The request function that just remaps the bio built up by
1383  * dm_merge_bvec.
1384  */
1385 static int _dm_request(struct request_queue *q, struct bio *bio)
1386 {
1387 	int rw = bio_data_dir(bio);
1388 	struct mapped_device *md = q->queuedata;
1389 	int cpu;
1390 
1391 	down_read(&md->io_lock);
1392 
1393 	cpu = part_stat_lock();
1394 	part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
1395 	part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1396 	part_stat_unlock();
1397 
1398 	/* if we're suspended, we have to queue this io for later */
1399 	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1400 		up_read(&md->io_lock);
1401 
1402 		if (bio_rw(bio) != READA)
1403 			queue_io(md, bio);
1404 		else
1405 			bio_io_error(bio);
1406 		return 0;
1407 	}
1408 
1409 	__split_and_process_bio(md, bio);
1410 	up_read(&md->io_lock);
1411 	return 0;
1412 }
1413 
1414 static int dm_make_request(struct request_queue *q, struct bio *bio)
1415 {
1416 	struct mapped_device *md = q->queuedata;
1417 
1418 	return md->saved_make_request_fn(q, bio); /* call __make_request() */
1419 }
1420 
1421 static int dm_request_based(struct mapped_device *md)
1422 {
1423 	return blk_queue_stackable(md->queue);
1424 }
1425 
1426 static int dm_request(struct request_queue *q, struct bio *bio)
1427 {
1428 	struct mapped_device *md = q->queuedata;
1429 
1430 	if (dm_request_based(md))
1431 		return dm_make_request(q, bio);
1432 
1433 	return _dm_request(q, bio);
1434 }
1435 
1436 void dm_dispatch_request(struct request *rq)
1437 {
1438 	int r;
1439 
1440 	if (blk_queue_io_stat(rq->q))
1441 		rq->cmd_flags |= REQ_IO_STAT;
1442 
1443 	rq->start_time = jiffies;
1444 	r = blk_insert_cloned_request(rq->q, rq);
1445 	if (r)
1446 		dm_complete_request(rq, r);
1447 }
1448 EXPORT_SYMBOL_GPL(dm_dispatch_request);
1449 
1450 static void dm_rq_bio_destructor(struct bio *bio)
1451 {
1452 	struct dm_rq_clone_bio_info *info = bio->bi_private;
1453 	struct mapped_device *md = info->tio->md;
1454 
1455 	free_bio_info(info);
1456 	bio_free(bio, md->bs);
1457 }
1458 
1459 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1460 				 void *data)
1461 {
1462 	struct dm_rq_target_io *tio = data;
1463 	struct mapped_device *md = tio->md;
1464 	struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
1465 
1466 	if (!info)
1467 		return -ENOMEM;
1468 
1469 	info->orig = bio_orig;
1470 	info->tio = tio;
1471 	bio->bi_end_io = end_clone_bio;
1472 	bio->bi_private = info;
1473 	bio->bi_destructor = dm_rq_bio_destructor;
1474 
1475 	return 0;
1476 }
1477 
1478 static int setup_clone(struct request *clone, struct request *rq,
1479 		       struct dm_rq_target_io *tio)
1480 {
1481 	int r;
1482 
1483 	r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1484 			      dm_rq_bio_constructor, tio);
1485 	if (r)
1486 		return r;
1487 
1488 	clone->cmd = rq->cmd;
1489 	clone->cmd_len = rq->cmd_len;
1490 	clone->sense = rq->sense;
1491 	clone->buffer = rq->buffer;
1492 	clone->end_io = end_clone_request;
1493 	clone->end_io_data = tio;
1494 
1495 	return 0;
1496 }
1497 
1498 static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1499 				gfp_t gfp_mask)
1500 {
1501 	struct request *clone;
1502 	struct dm_rq_target_io *tio;
1503 
1504 	tio = alloc_rq_tio(md, gfp_mask);
1505 	if (!tio)
1506 		return NULL;
1507 
1508 	tio->md = md;
1509 	tio->ti = NULL;
1510 	tio->orig = rq;
1511 	tio->error = 0;
1512 	memset(&tio->info, 0, sizeof(tio->info));
1513 
1514 	clone = &tio->clone;
1515 	if (setup_clone(clone, rq, tio)) {
1516 		/* -ENOMEM */
1517 		free_rq_tio(tio);
1518 		return NULL;
1519 	}
1520 
1521 	return clone;
1522 }
1523 
1524 /*
1525  * Called with the queue lock held.
1526  */
1527 static int dm_prep_fn(struct request_queue *q, struct request *rq)
1528 {
1529 	struct mapped_device *md = q->queuedata;
1530 	struct request *clone;
1531 
1532 	if (unlikely(rq->special)) {
1533 		DMWARN("Already has something in rq->special.");
1534 		return BLKPREP_KILL;
1535 	}
1536 
1537 	clone = clone_rq(rq, md, GFP_ATOMIC);
1538 	if (!clone)
1539 		return BLKPREP_DEFER;
1540 
1541 	rq->special = clone;
1542 	rq->cmd_flags |= REQ_DONTPREP;
1543 
1544 	return BLKPREP_OK;
1545 }
1546 
1547 /*
1548  * Returns:
1549  * 0  : the request has been processed (not requeued)
1550  * !0 : the request has been requeued
1551  */
1552 static int map_request(struct dm_target *ti, struct request *clone,
1553 		       struct mapped_device *md)
1554 {
1555 	int r, requeued = 0;
1556 	struct dm_rq_target_io *tio = clone->end_io_data;
1557 
1558 	/*
1559 	 * Hold the md reference here for the in-flight I/O.
1560 	 * We can't rely on the reference count by device opener,
1561 	 * because the device may be closed during the request completion
1562 	 * when all bios are completed.
1563 	 * See the comment in rq_completed() too.
1564 	 */
1565 	dm_get(md);
1566 
1567 	tio->ti = ti;
1568 	r = ti->type->map_rq(ti, clone, &tio->info);
1569 	switch (r) {
1570 	case DM_MAPIO_SUBMITTED:
1571 		/* The target has taken the I/O to submit by itself later */
1572 		break;
1573 	case DM_MAPIO_REMAPPED:
1574 		/* The target has remapped the I/O so dispatch it */
1575 		trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1576 				     blk_rq_pos(tio->orig));
1577 		dm_dispatch_request(clone);
1578 		break;
1579 	case DM_MAPIO_REQUEUE:
1580 		/* The target wants to requeue the I/O */
1581 		dm_requeue_unmapped_request(clone);
1582 		requeued = 1;
1583 		break;
1584 	default:
1585 		if (r > 0) {
1586 			DMWARN("unimplemented target map return value: %d", r);
1587 			BUG();
1588 		}
1589 
1590 		/* The target wants to complete the I/O */
1591 		dm_kill_unmapped_request(clone, r);
1592 		break;
1593 	}
1594 
1595 	return requeued;
1596 }
1597 
1598 /*
1599  * q->request_fn for request-based dm.
1600  * Called with the queue lock held.
1601  */
1602 static void dm_request_fn(struct request_queue *q)
1603 {
1604 	struct mapped_device *md = q->queuedata;
1605 	struct dm_table *map = dm_get_live_table(md);
1606 	struct dm_target *ti;
1607 	struct request *rq, *clone;
1608 	sector_t pos;
1609 
1610 	/*
1611 	 * For suspend, check blk_queue_stopped() and increment
1612 	 * ->pending within a single queue_lock not to increment the
1613 	 * number of in-flight I/Os after the queue is stopped in
1614 	 * dm_suspend().
1615 	 */
1616 	while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
1617 		rq = blk_peek_request(q);
1618 		if (!rq)
1619 			goto plug_and_out;
1620 
1621 		/* always use block 0 to find the target for flushes for now */
1622 		pos = 0;
1623 		if (!(rq->cmd_flags & REQ_FLUSH))
1624 			pos = blk_rq_pos(rq);
1625 
1626 		ti = dm_table_find_target(map, pos);
1627 		BUG_ON(!dm_target_is_valid(ti));
1628 
1629 		if (ti->type->busy && ti->type->busy(ti))
1630 			goto plug_and_out;
1631 
1632 		blk_start_request(rq);
1633 		clone = rq->special;
1634 		atomic_inc(&md->pending[rq_data_dir(clone)]);
1635 
1636 		spin_unlock(q->queue_lock);
1637 		if (map_request(ti, clone, md))
1638 			goto requeued;
1639 
1640 		BUG_ON(!irqs_disabled());
1641 		spin_lock(q->queue_lock);
1642 	}
1643 
1644 	goto out;
1645 
1646 requeued:
1647 	BUG_ON(!irqs_disabled());
1648 	spin_lock(q->queue_lock);
1649 
1650 plug_and_out:
1651 	if (!elv_queue_empty(q))
1652 		/* Some requests still remain, retry later */
1653 		blk_plug_device(q);
1654 
1655 out:
1656 	dm_table_put(map);
1657 
1658 	return;
1659 }
1660 
1661 int dm_underlying_device_busy(struct request_queue *q)
1662 {
1663 	return blk_lld_busy(q);
1664 }
1665 EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
1666 
1667 static int dm_lld_busy(struct request_queue *q)
1668 {
1669 	int r;
1670 	struct mapped_device *md = q->queuedata;
1671 	struct dm_table *map = dm_get_live_table(md);
1672 
1673 	if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
1674 		r = 1;
1675 	else
1676 		r = dm_table_any_busy_target(map);
1677 
1678 	dm_table_put(map);
1679 
1680 	return r;
1681 }
1682 
1683 static void dm_unplug_all(struct request_queue *q)
1684 {
1685 	struct mapped_device *md = q->queuedata;
1686 	struct dm_table *map = dm_get_live_table(md);
1687 
1688 	if (map) {
1689 		if (dm_request_based(md))
1690 			generic_unplug_device(q);
1691 
1692 		dm_table_unplug_all(map);
1693 		dm_table_put(map);
1694 	}
1695 }
1696 
1697 static int dm_any_congested(void *congested_data, int bdi_bits)
1698 {
1699 	int r = bdi_bits;
1700 	struct mapped_device *md = congested_data;
1701 	struct dm_table *map;
1702 
1703 	if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1704 		map = dm_get_live_table(md);
1705 		if (map) {
1706 			/*
1707 			 * Request-based dm cares about only own queue for
1708 			 * the query about congestion status of request_queue
1709 			 */
1710 			if (dm_request_based(md))
1711 				r = md->queue->backing_dev_info.state &
1712 				    bdi_bits;
1713 			else
1714 				r = dm_table_any_congested(map, bdi_bits);
1715 
1716 			dm_table_put(map);
1717 		}
1718 	}
1719 
1720 	return r;
1721 }
1722 
1723 /*-----------------------------------------------------------------
1724  * An IDR is used to keep track of allocated minor numbers.
1725  *---------------------------------------------------------------*/
1726 static DEFINE_IDR(_minor_idr);
1727 
1728 static void free_minor(int minor)
1729 {
1730 	spin_lock(&_minor_lock);
1731 	idr_remove(&_minor_idr, minor);
1732 	spin_unlock(&_minor_lock);
1733 }
1734 
1735 /*
1736  * See if the device with a specific minor # is free.
1737  */
1738 static int specific_minor(int minor)
1739 {
1740 	int r, m;
1741 
1742 	if (minor >= (1 << MINORBITS))
1743 		return -EINVAL;
1744 
1745 	r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1746 	if (!r)
1747 		return -ENOMEM;
1748 
1749 	spin_lock(&_minor_lock);
1750 
1751 	if (idr_find(&_minor_idr, minor)) {
1752 		r = -EBUSY;
1753 		goto out;
1754 	}
1755 
1756 	r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
1757 	if (r)
1758 		goto out;
1759 
1760 	if (m != minor) {
1761 		idr_remove(&_minor_idr, m);
1762 		r = -EBUSY;
1763 		goto out;
1764 	}
1765 
1766 out:
1767 	spin_unlock(&_minor_lock);
1768 	return r;
1769 }
1770 
1771 static int next_free_minor(int *minor)
1772 {
1773 	int r, m;
1774 
1775 	r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1776 	if (!r)
1777 		return -ENOMEM;
1778 
1779 	spin_lock(&_minor_lock);
1780 
1781 	r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
1782 	if (r)
1783 		goto out;
1784 
1785 	if (m >= (1 << MINORBITS)) {
1786 		idr_remove(&_minor_idr, m);
1787 		r = -ENOSPC;
1788 		goto out;
1789 	}
1790 
1791 	*minor = m;
1792 
1793 out:
1794 	spin_unlock(&_minor_lock);
1795 	return r;
1796 }
1797 
1798 static const struct block_device_operations dm_blk_dops;
1799 
1800 static void dm_wq_work(struct work_struct *work);
1801 
1802 static void dm_init_md_queue(struct mapped_device *md)
1803 {
1804 	/*
1805 	 * Request-based dm devices cannot be stacked on top of bio-based dm
1806 	 * devices.  The type of this dm device has not been decided yet.
1807 	 * The type is decided at the first table loading time.
1808 	 * To prevent problematic device stacking, clear the queue flag
1809 	 * for request stacking support until then.
1810 	 *
1811 	 * This queue is new, so no concurrency on the queue_flags.
1812 	 */
1813 	queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
1814 
1815 	md->queue->queuedata = md;
1816 	md->queue->backing_dev_info.congested_fn = dm_any_congested;
1817 	md->queue->backing_dev_info.congested_data = md;
1818 	blk_queue_make_request(md->queue, dm_request);
1819 	blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1820 	md->queue->unplug_fn = dm_unplug_all;
1821 	blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1822 	blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
1823 }
1824 
1825 /*
1826  * Allocate and initialise a blank device with a given minor.
1827  */
1828 static struct mapped_device *alloc_dev(int minor)
1829 {
1830 	int r;
1831 	struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
1832 	void *old_md;
1833 
1834 	if (!md) {
1835 		DMWARN("unable to allocate device, out of memory.");
1836 		return NULL;
1837 	}
1838 
1839 	if (!try_module_get(THIS_MODULE))
1840 		goto bad_module_get;
1841 
1842 	/* get a minor number for the dev */
1843 	if (minor == DM_ANY_MINOR)
1844 		r = next_free_minor(&minor);
1845 	else
1846 		r = specific_minor(minor);
1847 	if (r < 0)
1848 		goto bad_minor;
1849 
1850 	md->type = DM_TYPE_NONE;
1851 	init_rwsem(&md->io_lock);
1852 	mutex_init(&md->suspend_lock);
1853 	mutex_init(&md->type_lock);
1854 	spin_lock_init(&md->deferred_lock);
1855 	rwlock_init(&md->map_lock);
1856 	atomic_set(&md->holders, 1);
1857 	atomic_set(&md->open_count, 0);
1858 	atomic_set(&md->event_nr, 0);
1859 	atomic_set(&md->uevent_seq, 0);
1860 	INIT_LIST_HEAD(&md->uevent_list);
1861 	spin_lock_init(&md->uevent_lock);
1862 
1863 	md->queue = blk_alloc_queue(GFP_KERNEL);
1864 	if (!md->queue)
1865 		goto bad_queue;
1866 
1867 	dm_init_md_queue(md);
1868 
1869 	md->disk = alloc_disk(1);
1870 	if (!md->disk)
1871 		goto bad_disk;
1872 
1873 	atomic_set(&md->pending[0], 0);
1874 	atomic_set(&md->pending[1], 0);
1875 	init_waitqueue_head(&md->wait);
1876 	INIT_WORK(&md->work, dm_wq_work);
1877 	init_waitqueue_head(&md->eventq);
1878 
1879 	md->disk->major = _major;
1880 	md->disk->first_minor = minor;
1881 	md->disk->fops = &dm_blk_dops;
1882 	md->disk->queue = md->queue;
1883 	md->disk->private_data = md;
1884 	sprintf(md->disk->disk_name, "dm-%d", minor);
1885 	add_disk(md->disk);
1886 	format_dev_t(md->name, MKDEV(_major, minor));
1887 
1888 	md->wq = alloc_workqueue("kdmflush",
1889 				 WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
1890 	if (!md->wq)
1891 		goto bad_thread;
1892 
1893 	md->bdev = bdget_disk(md->disk, 0);
1894 	if (!md->bdev)
1895 		goto bad_bdev;
1896 
1897 	bio_init(&md->flush_bio);
1898 	md->flush_bio.bi_bdev = md->bdev;
1899 	md->flush_bio.bi_rw = WRITE_FLUSH;
1900 
1901 	/* Populate the mapping, nobody knows we exist yet */
1902 	spin_lock(&_minor_lock);
1903 	old_md = idr_replace(&_minor_idr, md, minor);
1904 	spin_unlock(&_minor_lock);
1905 
1906 	BUG_ON(old_md != MINOR_ALLOCED);
1907 
1908 	return md;
1909 
1910 bad_bdev:
1911 	destroy_workqueue(md->wq);
1912 bad_thread:
1913 	del_gendisk(md->disk);
1914 	put_disk(md->disk);
1915 bad_disk:
1916 	blk_cleanup_queue(md->queue);
1917 bad_queue:
1918 	free_minor(minor);
1919 bad_minor:
1920 	module_put(THIS_MODULE);
1921 bad_module_get:
1922 	kfree(md);
1923 	return NULL;
1924 }
1925 
1926 static void unlock_fs(struct mapped_device *md);
1927 
1928 static void free_dev(struct mapped_device *md)
1929 {
1930 	int minor = MINOR(disk_devt(md->disk));
1931 
1932 	unlock_fs(md);
1933 	bdput(md->bdev);
1934 	destroy_workqueue(md->wq);
1935 	if (md->tio_pool)
1936 		mempool_destroy(md->tio_pool);
1937 	if (md->io_pool)
1938 		mempool_destroy(md->io_pool);
1939 	if (md->bs)
1940 		bioset_free(md->bs);
1941 	blk_integrity_unregister(md->disk);
1942 	del_gendisk(md->disk);
1943 	free_minor(minor);
1944 
1945 	spin_lock(&_minor_lock);
1946 	md->disk->private_data = NULL;
1947 	spin_unlock(&_minor_lock);
1948 
1949 	put_disk(md->disk);
1950 	blk_cleanup_queue(md->queue);
1951 	module_put(THIS_MODULE);
1952 	kfree(md);
1953 }
1954 
1955 static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1956 {
1957 	struct dm_md_mempools *p;
1958 
1959 	if (md->io_pool && md->tio_pool && md->bs)
1960 		/* the md already has necessary mempools */
1961 		goto out;
1962 
1963 	p = dm_table_get_md_mempools(t);
1964 	BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
1965 
1966 	md->io_pool = p->io_pool;
1967 	p->io_pool = NULL;
1968 	md->tio_pool = p->tio_pool;
1969 	p->tio_pool = NULL;
1970 	md->bs = p->bs;
1971 	p->bs = NULL;
1972 
1973 out:
1974 	/* mempool bind completed, now no need any mempools in the table */
1975 	dm_table_free_md_mempools(t);
1976 }
1977 
1978 /*
1979  * Bind a table to the device.
1980  */
1981 static void event_callback(void *context)
1982 {
1983 	unsigned long flags;
1984 	LIST_HEAD(uevents);
1985 	struct mapped_device *md = (struct mapped_device *) context;
1986 
1987 	spin_lock_irqsave(&md->uevent_lock, flags);
1988 	list_splice_init(&md->uevent_list, &uevents);
1989 	spin_unlock_irqrestore(&md->uevent_lock, flags);
1990 
1991 	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
1992 
1993 	atomic_inc(&md->event_nr);
1994 	wake_up(&md->eventq);
1995 }
1996 
1997 /*
1998  * Protected by md->suspend_lock obtained by dm_swap_table().
1999  */
2000 static void __set_size(struct mapped_device *md, sector_t size)
2001 {
2002 	set_capacity(md->disk, size);
2003 
2004 	i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2005 }
2006 
2007 /*
2008  * Returns old map, which caller must destroy.
2009  */
2010 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2011 			       struct queue_limits *limits)
2012 {
2013 	struct dm_table *old_map;
2014 	struct request_queue *q = md->queue;
2015 	sector_t size;
2016 	unsigned long flags;
2017 
2018 	size = dm_table_get_size(t);
2019 
2020 	/*
2021 	 * Wipe any geometry if the size of the table changed.
2022 	 */
2023 	if (size != get_capacity(md->disk))
2024 		memset(&md->geometry, 0, sizeof(md->geometry));
2025 
2026 	__set_size(md, size);
2027 
2028 	dm_table_event_callback(t, event_callback, md);
2029 
2030 	/*
2031 	 * The queue hasn't been stopped yet, if the old table type wasn't
2032 	 * for request-based during suspension.  So stop it to prevent
2033 	 * I/O mapping before resume.
2034 	 * This must be done before setting the queue restrictions,
2035 	 * because request-based dm may be run just after the setting.
2036 	 */
2037 	if (dm_table_request_based(t) && !blk_queue_stopped(q))
2038 		stop_queue(q);
2039 
2040 	__bind_mempools(md, t);
2041 
2042 	write_lock_irqsave(&md->map_lock, flags);
2043 	old_map = md->map;
2044 	md->map = t;
2045 	dm_table_set_restrictions(t, q, limits);
2046 	write_unlock_irqrestore(&md->map_lock, flags);
2047 
2048 	return old_map;
2049 }
2050 
2051 /*
2052  * Returns unbound table for the caller to free.
2053  */
2054 static struct dm_table *__unbind(struct mapped_device *md)
2055 {
2056 	struct dm_table *map = md->map;
2057 	unsigned long flags;
2058 
2059 	if (!map)
2060 		return NULL;
2061 
2062 	dm_table_event_callback(map, NULL, NULL);
2063 	write_lock_irqsave(&md->map_lock, flags);
2064 	md->map = NULL;
2065 	write_unlock_irqrestore(&md->map_lock, flags);
2066 
2067 	return map;
2068 }
2069 
2070 /*
2071  * Constructor for a new device.
2072  */
2073 int dm_create(int minor, struct mapped_device **result)
2074 {
2075 	struct mapped_device *md;
2076 
2077 	md = alloc_dev(minor);
2078 	if (!md)
2079 		return -ENXIO;
2080 
2081 	dm_sysfs_init(md);
2082 
2083 	*result = md;
2084 	return 0;
2085 }
2086 
2087 /*
2088  * Functions to manage md->type.
2089  * All are required to hold md->type_lock.
2090  */
2091 void dm_lock_md_type(struct mapped_device *md)
2092 {
2093 	mutex_lock(&md->type_lock);
2094 }
2095 
2096 void dm_unlock_md_type(struct mapped_device *md)
2097 {
2098 	mutex_unlock(&md->type_lock);
2099 }
2100 
2101 void dm_set_md_type(struct mapped_device *md, unsigned type)
2102 {
2103 	md->type = type;
2104 }
2105 
2106 unsigned dm_get_md_type(struct mapped_device *md)
2107 {
2108 	return md->type;
2109 }
2110 
2111 /*
2112  * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2113  */
2114 static int dm_init_request_based_queue(struct mapped_device *md)
2115 {
2116 	struct request_queue *q = NULL;
2117 
2118 	if (md->queue->elevator)
2119 		return 1;
2120 
2121 	/* Fully initialize the queue */
2122 	q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2123 	if (!q)
2124 		return 0;
2125 
2126 	md->queue = q;
2127 	md->saved_make_request_fn = md->queue->make_request_fn;
2128 	dm_init_md_queue(md);
2129 	blk_queue_softirq_done(md->queue, dm_softirq_done);
2130 	blk_queue_prep_rq(md->queue, dm_prep_fn);
2131 	blk_queue_lld_busy(md->queue, dm_lld_busy);
2132 
2133 	elv_register_queue(md->queue);
2134 
2135 	return 1;
2136 }
2137 
2138 /*
2139  * Setup the DM device's queue based on md's type
2140  */
2141 int dm_setup_md_queue(struct mapped_device *md)
2142 {
2143 	if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
2144 	    !dm_init_request_based_queue(md)) {
2145 		DMWARN("Cannot initialize queue for request-based mapped device");
2146 		return -EINVAL;
2147 	}
2148 
2149 	return 0;
2150 }
2151 
2152 static struct mapped_device *dm_find_md(dev_t dev)
2153 {
2154 	struct mapped_device *md;
2155 	unsigned minor = MINOR(dev);
2156 
2157 	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2158 		return NULL;
2159 
2160 	spin_lock(&_minor_lock);
2161 
2162 	md = idr_find(&_minor_idr, minor);
2163 	if (md && (md == MINOR_ALLOCED ||
2164 		   (MINOR(disk_devt(dm_disk(md))) != minor) ||
2165 		   dm_deleting_md(md) ||
2166 		   test_bit(DMF_FREEING, &md->flags))) {
2167 		md = NULL;
2168 		goto out;
2169 	}
2170 
2171 out:
2172 	spin_unlock(&_minor_lock);
2173 
2174 	return md;
2175 }
2176 
2177 struct mapped_device *dm_get_md(dev_t dev)
2178 {
2179 	struct mapped_device *md = dm_find_md(dev);
2180 
2181 	if (md)
2182 		dm_get(md);
2183 
2184 	return md;
2185 }
2186 
2187 void *dm_get_mdptr(struct mapped_device *md)
2188 {
2189 	return md->interface_ptr;
2190 }
2191 
2192 void dm_set_mdptr(struct mapped_device *md, void *ptr)
2193 {
2194 	md->interface_ptr = ptr;
2195 }
2196 
2197 void dm_get(struct mapped_device *md)
2198 {
2199 	atomic_inc(&md->holders);
2200 	BUG_ON(test_bit(DMF_FREEING, &md->flags));
2201 }
2202 
2203 const char *dm_device_name(struct mapped_device *md)
2204 {
2205 	return md->name;
2206 }
2207 EXPORT_SYMBOL_GPL(dm_device_name);
2208 
2209 static void __dm_destroy(struct mapped_device *md, bool wait)
2210 {
2211 	struct dm_table *map;
2212 
2213 	might_sleep();
2214 
2215 	spin_lock(&_minor_lock);
2216 	map = dm_get_live_table(md);
2217 	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2218 	set_bit(DMF_FREEING, &md->flags);
2219 	spin_unlock(&_minor_lock);
2220 
2221 	if (!dm_suspended_md(md)) {
2222 		dm_table_presuspend_targets(map);
2223 		dm_table_postsuspend_targets(map);
2224 	}
2225 
2226 	/*
2227 	 * Rare, but there may be I/O requests still going to complete,
2228 	 * for example.  Wait for all references to disappear.
2229 	 * No one should increment the reference count of the mapped_device,
2230 	 * after the mapped_device state becomes DMF_FREEING.
2231 	 */
2232 	if (wait)
2233 		while (atomic_read(&md->holders))
2234 			msleep(1);
2235 	else if (atomic_read(&md->holders))
2236 		DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2237 		       dm_device_name(md), atomic_read(&md->holders));
2238 
2239 	dm_sysfs_exit(md);
2240 	dm_table_put(map);
2241 	dm_table_destroy(__unbind(md));
2242 	free_dev(md);
2243 }
2244 
2245 void dm_destroy(struct mapped_device *md)
2246 {
2247 	__dm_destroy(md, true);
2248 }
2249 
2250 void dm_destroy_immediate(struct mapped_device *md)
2251 {
2252 	__dm_destroy(md, false);
2253 }
2254 
2255 void dm_put(struct mapped_device *md)
2256 {
2257 	atomic_dec(&md->holders);
2258 }
2259 EXPORT_SYMBOL_GPL(dm_put);
2260 
2261 static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2262 {
2263 	int r = 0;
2264 	DECLARE_WAITQUEUE(wait, current);
2265 
2266 	dm_unplug_all(md->queue);
2267 
2268 	add_wait_queue(&md->wait, &wait);
2269 
2270 	while (1) {
2271 		set_current_state(interruptible);
2272 
2273 		smp_mb();
2274 		if (!md_in_flight(md))
2275 			break;
2276 
2277 		if (interruptible == TASK_INTERRUPTIBLE &&
2278 		    signal_pending(current)) {
2279 			r = -EINTR;
2280 			break;
2281 		}
2282 
2283 		io_schedule();
2284 	}
2285 	set_current_state(TASK_RUNNING);
2286 
2287 	remove_wait_queue(&md->wait, &wait);
2288 
2289 	return r;
2290 }
2291 
2292 /*
2293  * Process the deferred bios
2294  */
2295 static void dm_wq_work(struct work_struct *work)
2296 {
2297 	struct mapped_device *md = container_of(work, struct mapped_device,
2298 						work);
2299 	struct bio *c;
2300 
2301 	down_read(&md->io_lock);
2302 
2303 	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2304 		spin_lock_irq(&md->deferred_lock);
2305 		c = bio_list_pop(&md->deferred);
2306 		spin_unlock_irq(&md->deferred_lock);
2307 
2308 		if (!c)
2309 			break;
2310 
2311 		up_read(&md->io_lock);
2312 
2313 		if (dm_request_based(md))
2314 			generic_make_request(c);
2315 		else
2316 			__split_and_process_bio(md, c);
2317 
2318 		down_read(&md->io_lock);
2319 	}
2320 
2321 	up_read(&md->io_lock);
2322 }
2323 
2324 static void dm_queue_flush(struct mapped_device *md)
2325 {
2326 	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2327 	smp_mb__after_clear_bit();
2328 	queue_work(md->wq, &md->work);
2329 }
2330 
2331 /*
2332  * Swap in a new table, returning the old one for the caller to destroy.
2333  */
2334 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2335 {
2336 	struct dm_table *map = ERR_PTR(-EINVAL);
2337 	struct queue_limits limits;
2338 	int r;
2339 
2340 	mutex_lock(&md->suspend_lock);
2341 
2342 	/* device must be suspended */
2343 	if (!dm_suspended_md(md))
2344 		goto out;
2345 
2346 	r = dm_calculate_queue_limits(table, &limits);
2347 	if (r) {
2348 		map = ERR_PTR(r);
2349 		goto out;
2350 	}
2351 
2352 	map = __bind(md, table, &limits);
2353 
2354 out:
2355 	mutex_unlock(&md->suspend_lock);
2356 	return map;
2357 }
2358 
2359 /*
2360  * Functions to lock and unlock any filesystem running on the
2361  * device.
2362  */
2363 static int lock_fs(struct mapped_device *md)
2364 {
2365 	int r;
2366 
2367 	WARN_ON(md->frozen_sb);
2368 
2369 	md->frozen_sb = freeze_bdev(md->bdev);
2370 	if (IS_ERR(md->frozen_sb)) {
2371 		r = PTR_ERR(md->frozen_sb);
2372 		md->frozen_sb = NULL;
2373 		return r;
2374 	}
2375 
2376 	set_bit(DMF_FROZEN, &md->flags);
2377 
2378 	return 0;
2379 }
2380 
2381 static void unlock_fs(struct mapped_device *md)
2382 {
2383 	if (!test_bit(DMF_FROZEN, &md->flags))
2384 		return;
2385 
2386 	thaw_bdev(md->bdev, md->frozen_sb);
2387 	md->frozen_sb = NULL;
2388 	clear_bit(DMF_FROZEN, &md->flags);
2389 }
2390 
2391 /*
2392  * We need to be able to change a mapping table under a mounted
2393  * filesystem.  For example we might want to move some data in
2394  * the background.  Before the table can be swapped with
2395  * dm_bind_table, dm_suspend must be called to flush any in
2396  * flight bios and ensure that any further io gets deferred.
2397  */
2398 /*
2399  * Suspend mechanism in request-based dm.
2400  *
2401  * 1. Flush all I/Os by lock_fs() if needed.
2402  * 2. Stop dispatching any I/O by stopping the request_queue.
2403  * 3. Wait for all in-flight I/Os to be completed or requeued.
2404  *
2405  * To abort suspend, start the request_queue.
2406  */
2407 int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2408 {
2409 	struct dm_table *map = NULL;
2410 	int r = 0;
2411 	int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
2412 	int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
2413 
2414 	mutex_lock(&md->suspend_lock);
2415 
2416 	if (dm_suspended_md(md)) {
2417 		r = -EINVAL;
2418 		goto out_unlock;
2419 	}
2420 
2421 	map = dm_get_live_table(md);
2422 
2423 	/*
2424 	 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2425 	 * This flag is cleared before dm_suspend returns.
2426 	 */
2427 	if (noflush)
2428 		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2429 
2430 	/* This does not get reverted if there's an error later. */
2431 	dm_table_presuspend_targets(map);
2432 
2433 	/*
2434 	 * Flush I/O to the device.
2435 	 * Any I/O submitted after lock_fs() may not be flushed.
2436 	 * noflush takes precedence over do_lockfs.
2437 	 * (lock_fs() flushes I/Os and waits for them to complete.)
2438 	 */
2439 	if (!noflush && do_lockfs) {
2440 		r = lock_fs(md);
2441 		if (r)
2442 			goto out;
2443 	}
2444 
2445 	/*
2446 	 * Here we must make sure that no processes are submitting requests
2447 	 * to target drivers i.e. no one may be executing
2448 	 * __split_and_process_bio. This is called from dm_request and
2449 	 * dm_wq_work.
2450 	 *
2451 	 * To get all processes out of __split_and_process_bio in dm_request,
2452 	 * we take the write lock. To prevent any process from reentering
2453 	 * __split_and_process_bio from dm_request and quiesce the thread
2454 	 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2455 	 * flush_workqueue(md->wq).
2456 	 */
2457 	down_write(&md->io_lock);
2458 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2459 	up_write(&md->io_lock);
2460 
2461 	/*
2462 	 * Stop md->queue before flushing md->wq in case request-based
2463 	 * dm defers requests to md->wq from md->queue.
2464 	 */
2465 	if (dm_request_based(md))
2466 		stop_queue(md->queue);
2467 
2468 	flush_workqueue(md->wq);
2469 
2470 	/*
2471 	 * At this point no more requests are entering target request routines.
2472 	 * We call dm_wait_for_completion to wait for all existing requests
2473 	 * to finish.
2474 	 */
2475 	r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
2476 
2477 	down_write(&md->io_lock);
2478 	if (noflush)
2479 		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2480 	up_write(&md->io_lock);
2481 
2482 	/* were we interrupted ? */
2483 	if (r < 0) {
2484 		dm_queue_flush(md);
2485 
2486 		if (dm_request_based(md))
2487 			start_queue(md->queue);
2488 
2489 		unlock_fs(md);
2490 		goto out; /* pushback list is already flushed, so skip flush */
2491 	}
2492 
2493 	/*
2494 	 * If dm_wait_for_completion returned 0, the device is completely
2495 	 * quiescent now. There is no request-processing activity. All new
2496 	 * requests are being added to md->deferred list.
2497 	 */
2498 
2499 	set_bit(DMF_SUSPENDED, &md->flags);
2500 
2501 	dm_table_postsuspend_targets(map);
2502 
2503 out:
2504 	dm_table_put(map);
2505 
2506 out_unlock:
2507 	mutex_unlock(&md->suspend_lock);
2508 	return r;
2509 }
2510 
2511 int dm_resume(struct mapped_device *md)
2512 {
2513 	int r = -EINVAL;
2514 	struct dm_table *map = NULL;
2515 
2516 	mutex_lock(&md->suspend_lock);
2517 	if (!dm_suspended_md(md))
2518 		goto out;
2519 
2520 	map = dm_get_live_table(md);
2521 	if (!map || !dm_table_get_size(map))
2522 		goto out;
2523 
2524 	r = dm_table_resume_targets(map);
2525 	if (r)
2526 		goto out;
2527 
2528 	dm_queue_flush(md);
2529 
2530 	/*
2531 	 * Flushing deferred I/Os must be done after targets are resumed
2532 	 * so that mapping of targets can work correctly.
2533 	 * Request-based dm is queueing the deferred I/Os in its request_queue.
2534 	 */
2535 	if (dm_request_based(md))
2536 		start_queue(md->queue);
2537 
2538 	unlock_fs(md);
2539 
2540 	clear_bit(DMF_SUSPENDED, &md->flags);
2541 
2542 	dm_table_unplug_all(map);
2543 	r = 0;
2544 out:
2545 	dm_table_put(map);
2546 	mutex_unlock(&md->suspend_lock);
2547 
2548 	return r;
2549 }
2550 
2551 /*-----------------------------------------------------------------
2552  * Event notification.
2553  *---------------------------------------------------------------*/
2554 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2555 		       unsigned cookie)
2556 {
2557 	char udev_cookie[DM_COOKIE_LENGTH];
2558 	char *envp[] = { udev_cookie, NULL };
2559 
2560 	if (!cookie)
2561 		return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
2562 	else {
2563 		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2564 			 DM_COOKIE_ENV_VAR_NAME, cookie);
2565 		return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2566 					  action, envp);
2567 	}
2568 }
2569 
2570 uint32_t dm_next_uevent_seq(struct mapped_device *md)
2571 {
2572 	return atomic_add_return(1, &md->uevent_seq);
2573 }
2574 
2575 uint32_t dm_get_event_nr(struct mapped_device *md)
2576 {
2577 	return atomic_read(&md->event_nr);
2578 }
2579 
2580 int dm_wait_event(struct mapped_device *md, int event_nr)
2581 {
2582 	return wait_event_interruptible(md->eventq,
2583 			(event_nr != atomic_read(&md->event_nr)));
2584 }
2585 
2586 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2587 {
2588 	unsigned long flags;
2589 
2590 	spin_lock_irqsave(&md->uevent_lock, flags);
2591 	list_add(elist, &md->uevent_list);
2592 	spin_unlock_irqrestore(&md->uevent_lock, flags);
2593 }
2594 
2595 /*
2596  * The gendisk is only valid as long as you have a reference
2597  * count on 'md'.
2598  */
2599 struct gendisk *dm_disk(struct mapped_device *md)
2600 {
2601 	return md->disk;
2602 }
2603 
2604 struct kobject *dm_kobject(struct mapped_device *md)
2605 {
2606 	return &md->kobj;
2607 }
2608 
2609 /*
2610  * struct mapped_device should not be exported outside of dm.c
2611  * so use this check to verify that kobj is part of md structure
2612  */
2613 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2614 {
2615 	struct mapped_device *md;
2616 
2617 	md = container_of(kobj, struct mapped_device, kobj);
2618 	if (&md->kobj != kobj)
2619 		return NULL;
2620 
2621 	if (test_bit(DMF_FREEING, &md->flags) ||
2622 	    dm_deleting_md(md))
2623 		return NULL;
2624 
2625 	dm_get(md);
2626 	return md;
2627 }
2628 
2629 int dm_suspended_md(struct mapped_device *md)
2630 {
2631 	return test_bit(DMF_SUSPENDED, &md->flags);
2632 }
2633 
2634 int dm_suspended(struct dm_target *ti)
2635 {
2636 	return dm_suspended_md(dm_table_get_md(ti->table));
2637 }
2638 EXPORT_SYMBOL_GPL(dm_suspended);
2639 
2640 int dm_noflush_suspending(struct dm_target *ti)
2641 {
2642 	return __noflush_suspending(dm_table_get_md(ti->table));
2643 }
2644 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2645 
2646 struct dm_md_mempools *dm_alloc_md_mempools(unsigned type)
2647 {
2648 	struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
2649 
2650 	if (!pools)
2651 		return NULL;
2652 
2653 	pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
2654 			 mempool_create_slab_pool(MIN_IOS, _io_cache) :
2655 			 mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
2656 	if (!pools->io_pool)
2657 		goto free_pools_and_out;
2658 
2659 	pools->tio_pool = (type == DM_TYPE_BIO_BASED) ?
2660 			  mempool_create_slab_pool(MIN_IOS, _tio_cache) :
2661 			  mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
2662 	if (!pools->tio_pool)
2663 		goto free_io_pool_and_out;
2664 
2665 	pools->bs = (type == DM_TYPE_BIO_BASED) ?
2666 		    bioset_create(16, 0) : bioset_create(MIN_IOS, 0);
2667 	if (!pools->bs)
2668 		goto free_tio_pool_and_out;
2669 
2670 	return pools;
2671 
2672 free_tio_pool_and_out:
2673 	mempool_destroy(pools->tio_pool);
2674 
2675 free_io_pool_and_out:
2676 	mempool_destroy(pools->io_pool);
2677 
2678 free_pools_and_out:
2679 	kfree(pools);
2680 
2681 	return NULL;
2682 }
2683 
2684 void dm_free_md_mempools(struct dm_md_mempools *pools)
2685 {
2686 	if (!pools)
2687 		return;
2688 
2689 	if (pools->io_pool)
2690 		mempool_destroy(pools->io_pool);
2691 
2692 	if (pools->tio_pool)
2693 		mempool_destroy(pools->tio_pool);
2694 
2695 	if (pools->bs)
2696 		bioset_free(pools->bs);
2697 
2698 	kfree(pools);
2699 }
2700 
2701 static const struct block_device_operations dm_blk_dops = {
2702 	.open = dm_blk_open,
2703 	.release = dm_blk_close,
2704 	.ioctl = dm_blk_ioctl,
2705 	.getgeo = dm_blk_getgeo,
2706 	.owner = THIS_MODULE
2707 };
2708 
2709 EXPORT_SYMBOL(dm_get_mapinfo);
2710 
2711 /*
2712  * module hooks
2713  */
2714 module_init(dm_init);
2715 module_exit(dm_exit);
2716 
2717 module_param(major, uint, 0);
2718 MODULE_PARM_DESC(major, "The major number of the device mapper");
2719 MODULE_DESCRIPTION(DM_NAME " driver");
2720 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2721 MODULE_LICENSE("GPL");
2722