xref: /openbmc/linux/drivers/md/dm-thin.c (revision 1f9f6a78)
1 /*
2  * Copyright (C) 2011-2012 Red Hat UK.
3  *
4  * This file is released under the GPL.
5  */
6 
7 #include "dm-thin-metadata.h"
8 #include "dm-bio-prison.h"
9 #include "dm.h"
10 
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/dm-kcopyd.h>
14 #include <linux/log2.h>
15 #include <linux/list.h>
16 #include <linux/rculist.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/sort.h>
21 #include <linux/rbtree.h>
22 
23 #define	DM_MSG_PREFIX	"thin"
24 
25 /*
26  * Tunable constants
27  */
28 #define ENDIO_HOOK_POOL_SIZE 1024
29 #define MAPPING_POOL_SIZE 1024
30 #define COMMIT_PERIOD HZ
31 #define NO_SPACE_TIMEOUT_SECS 60
32 
33 static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
34 
35 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
36 		"A percentage of time allocated for copy on write");
37 
38 /*
39  * The block size of the device holding pool data must be
40  * between 64KB and 1GB.
41  */
42 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
43 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
44 
45 /*
46  * Device id is restricted to 24 bits.
47  */
48 #define MAX_DEV_ID ((1 << 24) - 1)
49 
50 /*
51  * How do we handle breaking sharing of data blocks?
52  * =================================================
53  *
54  * We use a standard copy-on-write btree to store the mappings for the
55  * devices (note I'm talking about copy-on-write of the metadata here, not
56  * the data).  When you take an internal snapshot you clone the root node
57  * of the origin btree.  After this there is no concept of an origin or a
58  * snapshot.  They are just two device trees that happen to point to the
59  * same data blocks.
60  *
61  * When we get a write in we decide if it's to a shared data block using
62  * some timestamp magic.  If it is, we have to break sharing.
63  *
64  * Let's say we write to a shared block in what was the origin.  The
65  * steps are:
66  *
67  * i) plug io further to this physical block. (see bio_prison code).
68  *
69  * ii) quiesce any read io to that shared data block.  Obviously
70  * including all devices that share this block.  (see dm_deferred_set code)
71  *
72  * iii) copy the data block to a newly allocate block.  This step can be
73  * missed out if the io covers the block. (schedule_copy).
74  *
75  * iv) insert the new mapping into the origin's btree
76  * (process_prepared_mapping).  This act of inserting breaks some
77  * sharing of btree nodes between the two devices.  Breaking sharing only
78  * effects the btree of that specific device.  Btrees for the other
79  * devices that share the block never change.  The btree for the origin
80  * device as it was after the last commit is untouched, ie. we're using
81  * persistent data structures in the functional programming sense.
82  *
83  * v) unplug io to this physical block, including the io that triggered
84  * the breaking of sharing.
85  *
86  * Steps (ii) and (iii) occur in parallel.
87  *
88  * The metadata _doesn't_ need to be committed before the io continues.  We
89  * get away with this because the io is always written to a _new_ block.
90  * If there's a crash, then:
91  *
92  * - The origin mapping will point to the old origin block (the shared
93  * one).  This will contain the data as it was before the io that triggered
94  * the breaking of sharing came in.
95  *
96  * - The snap mapping still points to the old block.  As it would after
97  * the commit.
98  *
99  * The downside of this scheme is the timestamp magic isn't perfect, and
100  * will continue to think that data block in the snapshot device is shared
101  * even after the write to the origin has broken sharing.  I suspect data
102  * blocks will typically be shared by many different devices, so we're
103  * breaking sharing n + 1 times, rather than n, where n is the number of
104  * devices that reference this data block.  At the moment I think the
105  * benefits far, far outweigh the disadvantages.
106  */
107 
108 /*----------------------------------------------------------------*/
109 
110 /*
111  * Key building.
112  */
113 static void build_data_key(struct dm_thin_device *td,
114 			   dm_block_t b, struct dm_cell_key *key)
115 {
116 	key->virtual = 0;
117 	key->dev = dm_thin_dev_id(td);
118 	key->block_begin = b;
119 	key->block_end = b + 1ULL;
120 }
121 
122 static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
123 			      struct dm_cell_key *key)
124 {
125 	key->virtual = 1;
126 	key->dev = dm_thin_dev_id(td);
127 	key->block_begin = b;
128 	key->block_end = b + 1ULL;
129 }
130 
131 /*----------------------------------------------------------------*/
132 
133 #define THROTTLE_THRESHOLD (1 * HZ)
134 
135 struct throttle {
136 	struct rw_semaphore lock;
137 	unsigned long threshold;
138 	bool throttle_applied;
139 };
140 
141 static void throttle_init(struct throttle *t)
142 {
143 	init_rwsem(&t->lock);
144 	t->throttle_applied = false;
145 }
146 
147 static void throttle_work_start(struct throttle *t)
148 {
149 	t->threshold = jiffies + THROTTLE_THRESHOLD;
150 }
151 
152 static void throttle_work_update(struct throttle *t)
153 {
154 	if (!t->throttle_applied && jiffies > t->threshold) {
155 		down_write(&t->lock);
156 		t->throttle_applied = true;
157 	}
158 }
159 
160 static void throttle_work_complete(struct throttle *t)
161 {
162 	if (t->throttle_applied) {
163 		t->throttle_applied = false;
164 		up_write(&t->lock);
165 	}
166 }
167 
168 static void throttle_lock(struct throttle *t)
169 {
170 	down_read(&t->lock);
171 }
172 
173 static void throttle_unlock(struct throttle *t)
174 {
175 	up_read(&t->lock);
176 }
177 
178 /*----------------------------------------------------------------*/
179 
180 /*
181  * A pool device ties together a metadata device and a data device.  It
182  * also provides the interface for creating and destroying internal
183  * devices.
184  */
185 struct dm_thin_new_mapping;
186 
187 /*
188  * The pool runs in 4 modes.  Ordered in degraded order for comparisons.
189  */
190 enum pool_mode {
191 	PM_WRITE,		/* metadata may be changed */
192 	PM_OUT_OF_DATA_SPACE,	/* metadata may be changed, though data may not be allocated */
193 	PM_READ_ONLY,		/* metadata may not be changed */
194 	PM_FAIL,		/* all I/O fails */
195 };
196 
197 struct pool_features {
198 	enum pool_mode mode;
199 
200 	bool zero_new_blocks:1;
201 	bool discard_enabled:1;
202 	bool discard_passdown:1;
203 	bool error_if_no_space:1;
204 };
205 
206 struct thin_c;
207 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
208 typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
209 typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
210 
211 #define CELL_SORT_ARRAY_SIZE 8192
212 
213 struct pool {
214 	struct list_head list;
215 	struct dm_target *ti;	/* Only set if a pool target is bound */
216 
217 	struct mapped_device *pool_md;
218 	struct block_device *md_dev;
219 	struct dm_pool_metadata *pmd;
220 
221 	dm_block_t low_water_blocks;
222 	uint32_t sectors_per_block;
223 	int sectors_per_block_shift;
224 
225 	struct pool_features pf;
226 	bool low_water_triggered:1;	/* A dm event has been sent */
227 	bool suspended:1;
228 
229 	struct dm_bio_prison *prison;
230 	struct dm_kcopyd_client *copier;
231 
232 	struct workqueue_struct *wq;
233 	struct throttle throttle;
234 	struct work_struct worker;
235 	struct delayed_work waker;
236 	struct delayed_work no_space_timeout;
237 
238 	unsigned long last_commit_jiffies;
239 	unsigned ref_count;
240 
241 	spinlock_t lock;
242 	struct bio_list deferred_flush_bios;
243 	struct list_head prepared_mappings;
244 	struct list_head prepared_discards;
245 	struct list_head active_thins;
246 
247 	struct dm_deferred_set *shared_read_ds;
248 	struct dm_deferred_set *all_io_ds;
249 
250 	struct dm_thin_new_mapping *next_mapping;
251 	mempool_t *mapping_pool;
252 
253 	process_bio_fn process_bio;
254 	process_bio_fn process_discard;
255 
256 	process_cell_fn process_cell;
257 	process_cell_fn process_discard_cell;
258 
259 	process_mapping_fn process_prepared_mapping;
260 	process_mapping_fn process_prepared_discard;
261 
262 	struct dm_bio_prison_cell *cell_sort_array[CELL_SORT_ARRAY_SIZE];
263 };
264 
265 static enum pool_mode get_pool_mode(struct pool *pool);
266 static void metadata_operation_failed(struct pool *pool, const char *op, int r);
267 
268 /*
269  * Target context for a pool.
270  */
271 struct pool_c {
272 	struct dm_target *ti;
273 	struct pool *pool;
274 	struct dm_dev *data_dev;
275 	struct dm_dev *metadata_dev;
276 	struct dm_target_callbacks callbacks;
277 
278 	dm_block_t low_water_blocks;
279 	struct pool_features requested_pf; /* Features requested during table load */
280 	struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
281 };
282 
283 /*
284  * Target context for a thin.
285  */
286 struct thin_c {
287 	struct list_head list;
288 	struct dm_dev *pool_dev;
289 	struct dm_dev *origin_dev;
290 	sector_t origin_size;
291 	dm_thin_id dev_id;
292 
293 	struct pool *pool;
294 	struct dm_thin_device *td;
295 	struct mapped_device *thin_md;
296 
297 	bool requeue_mode:1;
298 	spinlock_t lock;
299 	struct list_head deferred_cells;
300 	struct bio_list deferred_bio_list;
301 	struct bio_list retry_on_resume_list;
302 	struct rb_root sort_bio_list; /* sorted list of deferred bios */
303 
304 	/*
305 	 * Ensures the thin is not destroyed until the worker has finished
306 	 * iterating the active_thins list.
307 	 */
308 	atomic_t refcount;
309 	struct completion can_destroy;
310 };
311 
312 /*----------------------------------------------------------------*/
313 
314 /*
315  * wake_worker() is used when new work is queued and when pool_resume is
316  * ready to continue deferred IO processing.
317  */
318 static void wake_worker(struct pool *pool)
319 {
320 	queue_work(pool->wq, &pool->worker);
321 }
322 
323 /*----------------------------------------------------------------*/
324 
325 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
326 		      struct dm_bio_prison_cell **cell_result)
327 {
328 	int r;
329 	struct dm_bio_prison_cell *cell_prealloc;
330 
331 	/*
332 	 * Allocate a cell from the prison's mempool.
333 	 * This might block but it can't fail.
334 	 */
335 	cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
336 
337 	r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
338 	if (r)
339 		/*
340 		 * We reused an old cell; we can get rid of
341 		 * the new one.
342 		 */
343 		dm_bio_prison_free_cell(pool->prison, cell_prealloc);
344 
345 	return r;
346 }
347 
348 static void cell_release(struct pool *pool,
349 			 struct dm_bio_prison_cell *cell,
350 			 struct bio_list *bios)
351 {
352 	dm_cell_release(pool->prison, cell, bios);
353 	dm_bio_prison_free_cell(pool->prison, cell);
354 }
355 
356 static void cell_visit_release(struct pool *pool,
357 			       void (*fn)(void *, struct dm_bio_prison_cell *),
358 			       void *context,
359 			       struct dm_bio_prison_cell *cell)
360 {
361 	dm_cell_visit_release(pool->prison, fn, context, cell);
362 	dm_bio_prison_free_cell(pool->prison, cell);
363 }
364 
365 static void cell_release_no_holder(struct pool *pool,
366 				   struct dm_bio_prison_cell *cell,
367 				   struct bio_list *bios)
368 {
369 	dm_cell_release_no_holder(pool->prison, cell, bios);
370 	dm_bio_prison_free_cell(pool->prison, cell);
371 }
372 
373 static void cell_error_with_code(struct pool *pool,
374 				 struct dm_bio_prison_cell *cell, int error_code)
375 {
376 	dm_cell_error(pool->prison, cell, error_code);
377 	dm_bio_prison_free_cell(pool->prison, cell);
378 }
379 
380 static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
381 {
382 	cell_error_with_code(pool, cell, -EIO);
383 }
384 
385 static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
386 {
387 	cell_error_with_code(pool, cell, 0);
388 }
389 
390 static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
391 {
392 	cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE);
393 }
394 
395 /*----------------------------------------------------------------*/
396 
397 /*
398  * A global list of pools that uses a struct mapped_device as a key.
399  */
400 static struct dm_thin_pool_table {
401 	struct mutex mutex;
402 	struct list_head pools;
403 } dm_thin_pool_table;
404 
405 static void pool_table_init(void)
406 {
407 	mutex_init(&dm_thin_pool_table.mutex);
408 	INIT_LIST_HEAD(&dm_thin_pool_table.pools);
409 }
410 
411 static void __pool_table_insert(struct pool *pool)
412 {
413 	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
414 	list_add(&pool->list, &dm_thin_pool_table.pools);
415 }
416 
417 static void __pool_table_remove(struct pool *pool)
418 {
419 	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
420 	list_del(&pool->list);
421 }
422 
423 static struct pool *__pool_table_lookup(struct mapped_device *md)
424 {
425 	struct pool *pool = NULL, *tmp;
426 
427 	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
428 
429 	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
430 		if (tmp->pool_md == md) {
431 			pool = tmp;
432 			break;
433 		}
434 	}
435 
436 	return pool;
437 }
438 
439 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
440 {
441 	struct pool *pool = NULL, *tmp;
442 
443 	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
444 
445 	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
446 		if (tmp->md_dev == md_dev) {
447 			pool = tmp;
448 			break;
449 		}
450 	}
451 
452 	return pool;
453 }
454 
455 /*----------------------------------------------------------------*/
456 
457 struct dm_thin_endio_hook {
458 	struct thin_c *tc;
459 	struct dm_deferred_entry *shared_read_entry;
460 	struct dm_deferred_entry *all_io_entry;
461 	struct dm_thin_new_mapping *overwrite_mapping;
462 	struct rb_node rb_node;
463 };
464 
465 static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
466 {
467 	bio_list_merge(bios, master);
468 	bio_list_init(master);
469 }
470 
471 static void error_bio_list(struct bio_list *bios, int error)
472 {
473 	struct bio *bio;
474 
475 	while ((bio = bio_list_pop(bios)))
476 		bio_endio(bio, error);
477 }
478 
479 static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error)
480 {
481 	struct bio_list bios;
482 	unsigned long flags;
483 
484 	bio_list_init(&bios);
485 
486 	spin_lock_irqsave(&tc->lock, flags);
487 	__merge_bio_list(&bios, master);
488 	spin_unlock_irqrestore(&tc->lock, flags);
489 
490 	error_bio_list(&bios, error);
491 }
492 
493 static void requeue_deferred_cells(struct thin_c *tc)
494 {
495 	struct pool *pool = tc->pool;
496 	unsigned long flags;
497 	struct list_head cells;
498 	struct dm_bio_prison_cell *cell, *tmp;
499 
500 	INIT_LIST_HEAD(&cells);
501 
502 	spin_lock_irqsave(&tc->lock, flags);
503 	list_splice_init(&tc->deferred_cells, &cells);
504 	spin_unlock_irqrestore(&tc->lock, flags);
505 
506 	list_for_each_entry_safe(cell, tmp, &cells, user_list)
507 		cell_requeue(pool, cell);
508 }
509 
510 static void requeue_io(struct thin_c *tc)
511 {
512 	struct bio_list bios;
513 	unsigned long flags;
514 
515 	bio_list_init(&bios);
516 
517 	spin_lock_irqsave(&tc->lock, flags);
518 	__merge_bio_list(&bios, &tc->deferred_bio_list);
519 	__merge_bio_list(&bios, &tc->retry_on_resume_list);
520 	spin_unlock_irqrestore(&tc->lock, flags);
521 
522 	error_bio_list(&bios, DM_ENDIO_REQUEUE);
523 	requeue_deferred_cells(tc);
524 }
525 
526 static void error_retry_list(struct pool *pool)
527 {
528 	struct thin_c *tc;
529 
530 	rcu_read_lock();
531 	list_for_each_entry_rcu(tc, &pool->active_thins, list)
532 		error_thin_bio_list(tc, &tc->retry_on_resume_list, -EIO);
533 	rcu_read_unlock();
534 }
535 
536 /*
537  * This section of code contains the logic for processing a thin device's IO.
538  * Much of the code depends on pool object resources (lists, workqueues, etc)
539  * but most is exclusively called from the thin target rather than the thin-pool
540  * target.
541  */
542 
543 static bool block_size_is_power_of_two(struct pool *pool)
544 {
545 	return pool->sectors_per_block_shift >= 0;
546 }
547 
548 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
549 {
550 	struct pool *pool = tc->pool;
551 	sector_t block_nr = bio->bi_iter.bi_sector;
552 
553 	if (block_size_is_power_of_two(pool))
554 		block_nr >>= pool->sectors_per_block_shift;
555 	else
556 		(void) sector_div(block_nr, pool->sectors_per_block);
557 
558 	return block_nr;
559 }
560 
561 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
562 {
563 	struct pool *pool = tc->pool;
564 	sector_t bi_sector = bio->bi_iter.bi_sector;
565 
566 	bio->bi_bdev = tc->pool_dev->bdev;
567 	if (block_size_is_power_of_two(pool))
568 		bio->bi_iter.bi_sector =
569 			(block << pool->sectors_per_block_shift) |
570 			(bi_sector & (pool->sectors_per_block - 1));
571 	else
572 		bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
573 				 sector_div(bi_sector, pool->sectors_per_block);
574 }
575 
576 static void remap_to_origin(struct thin_c *tc, struct bio *bio)
577 {
578 	bio->bi_bdev = tc->origin_dev->bdev;
579 }
580 
581 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
582 {
583 	return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
584 		dm_thin_changed_this_transaction(tc->td);
585 }
586 
587 static void inc_all_io_entry(struct pool *pool, struct bio *bio)
588 {
589 	struct dm_thin_endio_hook *h;
590 
591 	if (bio->bi_rw & REQ_DISCARD)
592 		return;
593 
594 	h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
595 	h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
596 }
597 
598 static void issue(struct thin_c *tc, struct bio *bio)
599 {
600 	struct pool *pool = tc->pool;
601 	unsigned long flags;
602 
603 	if (!bio_triggers_commit(tc, bio)) {
604 		generic_make_request(bio);
605 		return;
606 	}
607 
608 	/*
609 	 * Complete bio with an error if earlier I/O caused changes to
610 	 * the metadata that can't be committed e.g, due to I/O errors
611 	 * on the metadata device.
612 	 */
613 	if (dm_thin_aborted_changes(tc->td)) {
614 		bio_io_error(bio);
615 		return;
616 	}
617 
618 	/*
619 	 * Batch together any bios that trigger commits and then issue a
620 	 * single commit for them in process_deferred_bios().
621 	 */
622 	spin_lock_irqsave(&pool->lock, flags);
623 	bio_list_add(&pool->deferred_flush_bios, bio);
624 	spin_unlock_irqrestore(&pool->lock, flags);
625 }
626 
627 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
628 {
629 	remap_to_origin(tc, bio);
630 	issue(tc, bio);
631 }
632 
633 static void remap_and_issue(struct thin_c *tc, struct bio *bio,
634 			    dm_block_t block)
635 {
636 	remap(tc, bio, block);
637 	issue(tc, bio);
638 }
639 
640 /*----------------------------------------------------------------*/
641 
642 /*
643  * Bio endio functions.
644  */
645 struct dm_thin_new_mapping {
646 	struct list_head list;
647 
648 	bool pass_discard:1;
649 	bool definitely_not_shared:1;
650 
651 	/*
652 	 * Track quiescing, copying and zeroing preparation actions.  When this
653 	 * counter hits zero the block is prepared and can be inserted into the
654 	 * btree.
655 	 */
656 	atomic_t prepare_actions;
657 
658 	int err;
659 	struct thin_c *tc;
660 	dm_block_t virt_block;
661 	dm_block_t data_block;
662 	struct dm_bio_prison_cell *cell, *cell2;
663 
664 	/*
665 	 * If the bio covers the whole area of a block then we can avoid
666 	 * zeroing or copying.  Instead this bio is hooked.  The bio will
667 	 * still be in the cell, so care has to be taken to avoid issuing
668 	 * the bio twice.
669 	 */
670 	struct bio *bio;
671 	bio_end_io_t *saved_bi_end_io;
672 };
673 
674 static void __complete_mapping_preparation(struct dm_thin_new_mapping *m)
675 {
676 	struct pool *pool = m->tc->pool;
677 
678 	if (atomic_dec_and_test(&m->prepare_actions)) {
679 		list_add_tail(&m->list, &pool->prepared_mappings);
680 		wake_worker(pool);
681 	}
682 }
683 
684 static void complete_mapping_preparation(struct dm_thin_new_mapping *m)
685 {
686 	unsigned long flags;
687 	struct pool *pool = m->tc->pool;
688 
689 	spin_lock_irqsave(&pool->lock, flags);
690 	__complete_mapping_preparation(m);
691 	spin_unlock_irqrestore(&pool->lock, flags);
692 }
693 
694 static void copy_complete(int read_err, unsigned long write_err, void *context)
695 {
696 	struct dm_thin_new_mapping *m = context;
697 
698 	m->err = read_err || write_err ? -EIO : 0;
699 	complete_mapping_preparation(m);
700 }
701 
702 static void overwrite_endio(struct bio *bio, int err)
703 {
704 	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
705 	struct dm_thin_new_mapping *m = h->overwrite_mapping;
706 
707 	m->err = err;
708 	complete_mapping_preparation(m);
709 }
710 
711 /*----------------------------------------------------------------*/
712 
713 /*
714  * Workqueue.
715  */
716 
717 /*
718  * Prepared mapping jobs.
719  */
720 
721 /*
722  * This sends the bios in the cell, except the original holder, back
723  * to the deferred_bios list.
724  */
725 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
726 {
727 	struct pool *pool = tc->pool;
728 	unsigned long flags;
729 
730 	spin_lock_irqsave(&tc->lock, flags);
731 	cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
732 	spin_unlock_irqrestore(&tc->lock, flags);
733 
734 	wake_worker(pool);
735 }
736 
737 static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
738 
739 struct remap_info {
740 	struct thin_c *tc;
741 	struct bio_list defer_bios;
742 	struct bio_list issue_bios;
743 };
744 
745 static void __inc_remap_and_issue_cell(void *context,
746 				       struct dm_bio_prison_cell *cell)
747 {
748 	struct remap_info *info = context;
749 	struct bio *bio;
750 
751 	while ((bio = bio_list_pop(&cell->bios))) {
752 		if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))
753 			bio_list_add(&info->defer_bios, bio);
754 		else {
755 			inc_all_io_entry(info->tc->pool, bio);
756 
757 			/*
758 			 * We can't issue the bios with the bio prison lock
759 			 * held, so we add them to a list to issue on
760 			 * return from this function.
761 			 */
762 			bio_list_add(&info->issue_bios, bio);
763 		}
764 	}
765 }
766 
767 static void inc_remap_and_issue_cell(struct thin_c *tc,
768 				     struct dm_bio_prison_cell *cell,
769 				     dm_block_t block)
770 {
771 	struct bio *bio;
772 	struct remap_info info;
773 
774 	info.tc = tc;
775 	bio_list_init(&info.defer_bios);
776 	bio_list_init(&info.issue_bios);
777 
778 	/*
779 	 * We have to be careful to inc any bios we're about to issue
780 	 * before the cell is released, and avoid a race with new bios
781 	 * being added to the cell.
782 	 */
783 	cell_visit_release(tc->pool, __inc_remap_and_issue_cell,
784 			   &info, cell);
785 
786 	while ((bio = bio_list_pop(&info.defer_bios)))
787 		thin_defer_bio(tc, bio);
788 
789 	while ((bio = bio_list_pop(&info.issue_bios)))
790 		remap_and_issue(info.tc, bio, block);
791 }
792 
793 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
794 {
795 	if (m->bio) {
796 		m->bio->bi_end_io = m->saved_bi_end_io;
797 		atomic_inc(&m->bio->bi_remaining);
798 	}
799 	cell_error(m->tc->pool, m->cell);
800 	list_del(&m->list);
801 	mempool_free(m, m->tc->pool->mapping_pool);
802 }
803 
804 static void process_prepared_mapping(struct dm_thin_new_mapping *m)
805 {
806 	struct thin_c *tc = m->tc;
807 	struct pool *pool = tc->pool;
808 	struct bio *bio;
809 	int r;
810 
811 	bio = m->bio;
812 	if (bio) {
813 		bio->bi_end_io = m->saved_bi_end_io;
814 		atomic_inc(&bio->bi_remaining);
815 	}
816 
817 	if (m->err) {
818 		cell_error(pool, m->cell);
819 		goto out;
820 	}
821 
822 	/*
823 	 * Commit the prepared block into the mapping btree.
824 	 * Any I/O for this block arriving after this point will get
825 	 * remapped to it directly.
826 	 */
827 	r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
828 	if (r) {
829 		metadata_operation_failed(pool, "dm_thin_insert_block", r);
830 		cell_error(pool, m->cell);
831 		goto out;
832 	}
833 
834 	/*
835 	 * Release any bios held while the block was being provisioned.
836 	 * If we are processing a write bio that completely covers the block,
837 	 * we already processed it so can ignore it now when processing
838 	 * the bios in the cell.
839 	 */
840 	if (bio) {
841 		inc_remap_and_issue_cell(tc, m->cell, m->data_block);
842 		bio_endio(bio, 0);
843 	} else {
844 		inc_all_io_entry(tc->pool, m->cell->holder);
845 		remap_and_issue(tc, m->cell->holder, m->data_block);
846 		inc_remap_and_issue_cell(tc, m->cell, m->data_block);
847 	}
848 
849 out:
850 	list_del(&m->list);
851 	mempool_free(m, pool->mapping_pool);
852 }
853 
854 static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
855 {
856 	struct thin_c *tc = m->tc;
857 
858 	bio_io_error(m->bio);
859 	cell_defer_no_holder(tc, m->cell);
860 	cell_defer_no_holder(tc, m->cell2);
861 	mempool_free(m, tc->pool->mapping_pool);
862 }
863 
864 static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
865 {
866 	struct thin_c *tc = m->tc;
867 
868 	inc_all_io_entry(tc->pool, m->bio);
869 	cell_defer_no_holder(tc, m->cell);
870 	cell_defer_no_holder(tc, m->cell2);
871 
872 	if (m->pass_discard)
873 		if (m->definitely_not_shared)
874 			remap_and_issue(tc, m->bio, m->data_block);
875 		else {
876 			bool used = false;
877 			if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
878 				bio_endio(m->bio, 0);
879 			else
880 				remap_and_issue(tc, m->bio, m->data_block);
881 		}
882 	else
883 		bio_endio(m->bio, 0);
884 
885 	mempool_free(m, tc->pool->mapping_pool);
886 }
887 
888 static void process_prepared_discard(struct dm_thin_new_mapping *m)
889 {
890 	int r;
891 	struct thin_c *tc = m->tc;
892 
893 	r = dm_thin_remove_block(tc->td, m->virt_block);
894 	if (r)
895 		DMERR_LIMIT("dm_thin_remove_block() failed");
896 
897 	process_prepared_discard_passdown(m);
898 }
899 
900 static void process_prepared(struct pool *pool, struct list_head *head,
901 			     process_mapping_fn *fn)
902 {
903 	unsigned long flags;
904 	struct list_head maps;
905 	struct dm_thin_new_mapping *m, *tmp;
906 
907 	INIT_LIST_HEAD(&maps);
908 	spin_lock_irqsave(&pool->lock, flags);
909 	list_splice_init(head, &maps);
910 	spin_unlock_irqrestore(&pool->lock, flags);
911 
912 	list_for_each_entry_safe(m, tmp, &maps, list)
913 		(*fn)(m);
914 }
915 
916 /*
917  * Deferred bio jobs.
918  */
919 static int io_overlaps_block(struct pool *pool, struct bio *bio)
920 {
921 	return bio->bi_iter.bi_size ==
922 		(pool->sectors_per_block << SECTOR_SHIFT);
923 }
924 
925 static int io_overwrites_block(struct pool *pool, struct bio *bio)
926 {
927 	return (bio_data_dir(bio) == WRITE) &&
928 		io_overlaps_block(pool, bio);
929 }
930 
931 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
932 			       bio_end_io_t *fn)
933 {
934 	*save = bio->bi_end_io;
935 	bio->bi_end_io = fn;
936 }
937 
938 static int ensure_next_mapping(struct pool *pool)
939 {
940 	if (pool->next_mapping)
941 		return 0;
942 
943 	pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
944 
945 	return pool->next_mapping ? 0 : -ENOMEM;
946 }
947 
948 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
949 {
950 	struct dm_thin_new_mapping *m = pool->next_mapping;
951 
952 	BUG_ON(!pool->next_mapping);
953 
954 	memset(m, 0, sizeof(struct dm_thin_new_mapping));
955 	INIT_LIST_HEAD(&m->list);
956 	m->bio = NULL;
957 
958 	pool->next_mapping = NULL;
959 
960 	return m;
961 }
962 
963 static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
964 		    sector_t begin, sector_t end)
965 {
966 	int r;
967 	struct dm_io_region to;
968 
969 	to.bdev = tc->pool_dev->bdev;
970 	to.sector = begin;
971 	to.count = end - begin;
972 
973 	r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
974 	if (r < 0) {
975 		DMERR_LIMIT("dm_kcopyd_zero() failed");
976 		copy_complete(1, 1, m);
977 	}
978 }
979 
980 static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
981 				      dm_block_t data_block,
982 				      struct dm_thin_new_mapping *m)
983 {
984 	struct pool *pool = tc->pool;
985 	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
986 
987 	h->overwrite_mapping = m;
988 	m->bio = bio;
989 	save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
990 	inc_all_io_entry(pool, bio);
991 	remap_and_issue(tc, bio, data_block);
992 }
993 
994 /*
995  * A partial copy also needs to zero the uncopied region.
996  */
997 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
998 			  struct dm_dev *origin, dm_block_t data_origin,
999 			  dm_block_t data_dest,
1000 			  struct dm_bio_prison_cell *cell, struct bio *bio,
1001 			  sector_t len)
1002 {
1003 	int r;
1004 	struct pool *pool = tc->pool;
1005 	struct dm_thin_new_mapping *m = get_next_mapping(pool);
1006 
1007 	m->tc = tc;
1008 	m->virt_block = virt_block;
1009 	m->data_block = data_dest;
1010 	m->cell = cell;
1011 
1012 	/*
1013 	 * quiesce action + copy action + an extra reference held for the
1014 	 * duration of this function (we may need to inc later for a
1015 	 * partial zero).
1016 	 */
1017 	atomic_set(&m->prepare_actions, 3);
1018 
1019 	if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
1020 		complete_mapping_preparation(m); /* already quiesced */
1021 
1022 	/*
1023 	 * IO to pool_dev remaps to the pool target's data_dev.
1024 	 *
1025 	 * If the whole block of data is being overwritten, we can issue the
1026 	 * bio immediately. Otherwise we use kcopyd to clone the data first.
1027 	 */
1028 	if (io_overwrites_block(pool, bio))
1029 		remap_and_issue_overwrite(tc, bio, data_dest, m);
1030 	else {
1031 		struct dm_io_region from, to;
1032 
1033 		from.bdev = origin->bdev;
1034 		from.sector = data_origin * pool->sectors_per_block;
1035 		from.count = len;
1036 
1037 		to.bdev = tc->pool_dev->bdev;
1038 		to.sector = data_dest * pool->sectors_per_block;
1039 		to.count = len;
1040 
1041 		r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
1042 				   0, copy_complete, m);
1043 		if (r < 0) {
1044 			DMERR_LIMIT("dm_kcopyd_copy() failed");
1045 			copy_complete(1, 1, m);
1046 
1047 			/*
1048 			 * We allow the zero to be issued, to simplify the
1049 			 * error path.  Otherwise we'd need to start
1050 			 * worrying about decrementing the prepare_actions
1051 			 * counter.
1052 			 */
1053 		}
1054 
1055 		/*
1056 		 * Do we need to zero a tail region?
1057 		 */
1058 		if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) {
1059 			atomic_inc(&m->prepare_actions);
1060 			ll_zero(tc, m,
1061 				data_dest * pool->sectors_per_block + len,
1062 				(data_dest + 1) * pool->sectors_per_block);
1063 		}
1064 	}
1065 
1066 	complete_mapping_preparation(m); /* drop our ref */
1067 }
1068 
1069 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1070 				   dm_block_t data_origin, dm_block_t data_dest,
1071 				   struct dm_bio_prison_cell *cell, struct bio *bio)
1072 {
1073 	schedule_copy(tc, virt_block, tc->pool_dev,
1074 		      data_origin, data_dest, cell, bio,
1075 		      tc->pool->sectors_per_block);
1076 }
1077 
1078 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1079 			  dm_block_t data_block, struct dm_bio_prison_cell *cell,
1080 			  struct bio *bio)
1081 {
1082 	struct pool *pool = tc->pool;
1083 	struct dm_thin_new_mapping *m = get_next_mapping(pool);
1084 
1085 	atomic_set(&m->prepare_actions, 1); /* no need to quiesce */
1086 	m->tc = tc;
1087 	m->virt_block = virt_block;
1088 	m->data_block = data_block;
1089 	m->cell = cell;
1090 
1091 	/*
1092 	 * If the whole block of data is being overwritten or we are not
1093 	 * zeroing pre-existing data, we can issue the bio immediately.
1094 	 * Otherwise we use kcopyd to zero the data first.
1095 	 */
1096 	if (!pool->pf.zero_new_blocks)
1097 		process_prepared_mapping(m);
1098 
1099 	else if (io_overwrites_block(pool, bio))
1100 		remap_and_issue_overwrite(tc, bio, data_block, m);
1101 
1102 	else
1103 		ll_zero(tc, m,
1104 			data_block * pool->sectors_per_block,
1105 			(data_block + 1) * pool->sectors_per_block);
1106 }
1107 
1108 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1109 				   dm_block_t data_dest,
1110 				   struct dm_bio_prison_cell *cell, struct bio *bio)
1111 {
1112 	struct pool *pool = tc->pool;
1113 	sector_t virt_block_begin = virt_block * pool->sectors_per_block;
1114 	sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;
1115 
1116 	if (virt_block_end <= tc->origin_size)
1117 		schedule_copy(tc, virt_block, tc->origin_dev,
1118 			      virt_block, data_dest, cell, bio,
1119 			      pool->sectors_per_block);
1120 
1121 	else if (virt_block_begin < tc->origin_size)
1122 		schedule_copy(tc, virt_block, tc->origin_dev,
1123 			      virt_block, data_dest, cell, bio,
1124 			      tc->origin_size - virt_block_begin);
1125 
1126 	else
1127 		schedule_zero(tc, virt_block, data_dest, cell, bio);
1128 }
1129 
1130 /*
1131  * A non-zero return indicates read_only or fail_io mode.
1132  * Many callers don't care about the return value.
1133  */
1134 static int commit(struct pool *pool)
1135 {
1136 	int r;
1137 
1138 	if (get_pool_mode(pool) >= PM_READ_ONLY)
1139 		return -EINVAL;
1140 
1141 	r = dm_pool_commit_metadata(pool->pmd);
1142 	if (r)
1143 		metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
1144 
1145 	return r;
1146 }
1147 
1148 static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
1149 {
1150 	unsigned long flags;
1151 
1152 	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
1153 		DMWARN("%s: reached low water mark for data device: sending event.",
1154 		       dm_device_name(pool->pool_md));
1155 		spin_lock_irqsave(&pool->lock, flags);
1156 		pool->low_water_triggered = true;
1157 		spin_unlock_irqrestore(&pool->lock, flags);
1158 		dm_table_event(pool->ti->table);
1159 	}
1160 }
1161 
1162 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1163 
1164 static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1165 {
1166 	int r;
1167 	dm_block_t free_blocks;
1168 	struct pool *pool = tc->pool;
1169 
1170 	if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
1171 		return -EINVAL;
1172 
1173 	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1174 	if (r) {
1175 		metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
1176 		return r;
1177 	}
1178 
1179 	check_low_water_mark(pool, free_blocks);
1180 
1181 	if (!free_blocks) {
1182 		/*
1183 		 * Try to commit to see if that will free up some
1184 		 * more space.
1185 		 */
1186 		r = commit(pool);
1187 		if (r)
1188 			return r;
1189 
1190 		r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1191 		if (r) {
1192 			metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
1193 			return r;
1194 		}
1195 
1196 		if (!free_blocks) {
1197 			set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
1198 			return -ENOSPC;
1199 		}
1200 	}
1201 
1202 	r = dm_pool_alloc_data_block(pool->pmd, result);
1203 	if (r) {
1204 		metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
1205 		return r;
1206 	}
1207 
1208 	return 0;
1209 }
1210 
1211 /*
1212  * If we have run out of space, queue bios until the device is
1213  * resumed, presumably after having been reloaded with more space.
1214  */
1215 static void retry_on_resume(struct bio *bio)
1216 {
1217 	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1218 	struct thin_c *tc = h->tc;
1219 	unsigned long flags;
1220 
1221 	spin_lock_irqsave(&tc->lock, flags);
1222 	bio_list_add(&tc->retry_on_resume_list, bio);
1223 	spin_unlock_irqrestore(&tc->lock, flags);
1224 }
1225 
1226 static int should_error_unserviceable_bio(struct pool *pool)
1227 {
1228 	enum pool_mode m = get_pool_mode(pool);
1229 
1230 	switch (m) {
1231 	case PM_WRITE:
1232 		/* Shouldn't get here */
1233 		DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
1234 		return -EIO;
1235 
1236 	case PM_OUT_OF_DATA_SPACE:
1237 		return pool->pf.error_if_no_space ? -ENOSPC : 0;
1238 
1239 	case PM_READ_ONLY:
1240 	case PM_FAIL:
1241 		return -EIO;
1242 	default:
1243 		/* Shouldn't get here */
1244 		DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
1245 		return -EIO;
1246 	}
1247 }
1248 
1249 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
1250 {
1251 	int error = should_error_unserviceable_bio(pool);
1252 
1253 	if (error)
1254 		bio_endio(bio, error);
1255 	else
1256 		retry_on_resume(bio);
1257 }
1258 
1259 static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1260 {
1261 	struct bio *bio;
1262 	struct bio_list bios;
1263 	int error;
1264 
1265 	error = should_error_unserviceable_bio(pool);
1266 	if (error) {
1267 		cell_error_with_code(pool, cell, error);
1268 		return;
1269 	}
1270 
1271 	bio_list_init(&bios);
1272 	cell_release(pool, cell, &bios);
1273 
1274 	while ((bio = bio_list_pop(&bios)))
1275 		retry_on_resume(bio);
1276 }
1277 
1278 static void process_discard_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1279 {
1280 	int r;
1281 	struct bio *bio = cell->holder;
1282 	struct pool *pool = tc->pool;
1283 	struct dm_bio_prison_cell *cell2;
1284 	struct dm_cell_key key2;
1285 	dm_block_t block = get_bio_block(tc, bio);
1286 	struct dm_thin_lookup_result lookup_result;
1287 	struct dm_thin_new_mapping *m;
1288 
1289 	if (tc->requeue_mode) {
1290 		cell_requeue(pool, cell);
1291 		return;
1292 	}
1293 
1294 	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1295 	switch (r) {
1296 	case 0:
1297 		/*
1298 		 * Check nobody is fiddling with this pool block.  This can
1299 		 * happen if someone's in the process of breaking sharing
1300 		 * on this block.
1301 		 */
1302 		build_data_key(tc->td, lookup_result.block, &key2);
1303 		if (bio_detain(tc->pool, &key2, bio, &cell2)) {
1304 			cell_defer_no_holder(tc, cell);
1305 			break;
1306 		}
1307 
1308 		if (io_overlaps_block(pool, bio)) {
1309 			/*
1310 			 * IO may still be going to the destination block.  We must
1311 			 * quiesce before we can do the removal.
1312 			 */
1313 			m = get_next_mapping(pool);
1314 			m->tc = tc;
1315 			m->pass_discard = pool->pf.discard_passdown;
1316 			m->definitely_not_shared = !lookup_result.shared;
1317 			m->virt_block = block;
1318 			m->data_block = lookup_result.block;
1319 			m->cell = cell;
1320 			m->cell2 = cell2;
1321 			m->bio = bio;
1322 
1323 			if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1324 				pool->process_prepared_discard(m);
1325 
1326 		} else {
1327 			inc_all_io_entry(pool, bio);
1328 			cell_defer_no_holder(tc, cell);
1329 			cell_defer_no_holder(tc, cell2);
1330 
1331 			/*
1332 			 * The DM core makes sure that the discard doesn't span
1333 			 * a block boundary.  So we submit the discard of a
1334 			 * partial block appropriately.
1335 			 */
1336 			if ((!lookup_result.shared) && pool->pf.discard_passdown)
1337 				remap_and_issue(tc, bio, lookup_result.block);
1338 			else
1339 				bio_endio(bio, 0);
1340 		}
1341 		break;
1342 
1343 	case -ENODATA:
1344 		/*
1345 		 * It isn't provisioned, just forget it.
1346 		 */
1347 		cell_defer_no_holder(tc, cell);
1348 		bio_endio(bio, 0);
1349 		break;
1350 
1351 	default:
1352 		DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1353 			    __func__, r);
1354 		cell_defer_no_holder(tc, cell);
1355 		bio_io_error(bio);
1356 		break;
1357 	}
1358 }
1359 
1360 static void process_discard_bio(struct thin_c *tc, struct bio *bio)
1361 {
1362 	struct dm_bio_prison_cell *cell;
1363 	struct dm_cell_key key;
1364 	dm_block_t block = get_bio_block(tc, bio);
1365 
1366 	build_virtual_key(tc->td, block, &key);
1367 	if (bio_detain(tc->pool, &key, bio, &cell))
1368 		return;
1369 
1370 	process_discard_cell(tc, cell);
1371 }
1372 
1373 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1374 			  struct dm_cell_key *key,
1375 			  struct dm_thin_lookup_result *lookup_result,
1376 			  struct dm_bio_prison_cell *cell)
1377 {
1378 	int r;
1379 	dm_block_t data_block;
1380 	struct pool *pool = tc->pool;
1381 
1382 	r = alloc_data_block(tc, &data_block);
1383 	switch (r) {
1384 	case 0:
1385 		schedule_internal_copy(tc, block, lookup_result->block,
1386 				       data_block, cell, bio);
1387 		break;
1388 
1389 	case -ENOSPC:
1390 		retry_bios_on_resume(pool, cell);
1391 		break;
1392 
1393 	default:
1394 		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1395 			    __func__, r);
1396 		cell_error(pool, cell);
1397 		break;
1398 	}
1399 }
1400 
1401 static void __remap_and_issue_shared_cell(void *context,
1402 					  struct dm_bio_prison_cell *cell)
1403 {
1404 	struct remap_info *info = context;
1405 	struct bio *bio;
1406 
1407 	while ((bio = bio_list_pop(&cell->bios))) {
1408 		if ((bio_data_dir(bio) == WRITE) ||
1409 		    (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)))
1410 			bio_list_add(&info->defer_bios, bio);
1411 		else {
1412 			struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
1413 
1414 			h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
1415 			inc_all_io_entry(info->tc->pool, bio);
1416 			bio_list_add(&info->issue_bios, bio);
1417 		}
1418 	}
1419 }
1420 
1421 static void remap_and_issue_shared_cell(struct thin_c *tc,
1422 					struct dm_bio_prison_cell *cell,
1423 					dm_block_t block)
1424 {
1425 	struct bio *bio;
1426 	struct remap_info info;
1427 
1428 	info.tc = tc;
1429 	bio_list_init(&info.defer_bios);
1430 	bio_list_init(&info.issue_bios);
1431 
1432 	cell_visit_release(tc->pool, __remap_and_issue_shared_cell,
1433 			   &info, cell);
1434 
1435 	while ((bio = bio_list_pop(&info.defer_bios)))
1436 		thin_defer_bio(tc, bio);
1437 
1438 	while ((bio = bio_list_pop(&info.issue_bios)))
1439 		remap_and_issue(tc, bio, block);
1440 }
1441 
1442 static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1443 			       dm_block_t block,
1444 			       struct dm_thin_lookup_result *lookup_result,
1445 			       struct dm_bio_prison_cell *virt_cell)
1446 {
1447 	struct dm_bio_prison_cell *data_cell;
1448 	struct pool *pool = tc->pool;
1449 	struct dm_cell_key key;
1450 
1451 	/*
1452 	 * If cell is already occupied, then sharing is already in the process
1453 	 * of being broken so we have nothing further to do here.
1454 	 */
1455 	build_data_key(tc->td, lookup_result->block, &key);
1456 	if (bio_detain(pool, &key, bio, &data_cell)) {
1457 		cell_defer_no_holder(tc, virt_cell);
1458 		return;
1459 	}
1460 
1461 	if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) {
1462 		break_sharing(tc, bio, block, &key, lookup_result, data_cell);
1463 		cell_defer_no_holder(tc, virt_cell);
1464 	} else {
1465 		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1466 
1467 		h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1468 		inc_all_io_entry(pool, bio);
1469 		remap_and_issue(tc, bio, lookup_result->block);
1470 
1471 		remap_and_issue_shared_cell(tc, data_cell, lookup_result->block);
1472 		remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block);
1473 	}
1474 }
1475 
1476 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1477 			    struct dm_bio_prison_cell *cell)
1478 {
1479 	int r;
1480 	dm_block_t data_block;
1481 	struct pool *pool = tc->pool;
1482 
1483 	/*
1484 	 * Remap empty bios (flushes) immediately, without provisioning.
1485 	 */
1486 	if (!bio->bi_iter.bi_size) {
1487 		inc_all_io_entry(pool, bio);
1488 		cell_defer_no_holder(tc, cell);
1489 
1490 		remap_and_issue(tc, bio, 0);
1491 		return;
1492 	}
1493 
1494 	/*
1495 	 * Fill read bios with zeroes and complete them immediately.
1496 	 */
1497 	if (bio_data_dir(bio) == READ) {
1498 		zero_fill_bio(bio);
1499 		cell_defer_no_holder(tc, cell);
1500 		bio_endio(bio, 0);
1501 		return;
1502 	}
1503 
1504 	r = alloc_data_block(tc, &data_block);
1505 	switch (r) {
1506 	case 0:
1507 		if (tc->origin_dev)
1508 			schedule_external_copy(tc, block, data_block, cell, bio);
1509 		else
1510 			schedule_zero(tc, block, data_block, cell, bio);
1511 		break;
1512 
1513 	case -ENOSPC:
1514 		retry_bios_on_resume(pool, cell);
1515 		break;
1516 
1517 	default:
1518 		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1519 			    __func__, r);
1520 		cell_error(pool, cell);
1521 		break;
1522 	}
1523 }
1524 
1525 static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1526 {
1527 	int r;
1528 	struct pool *pool = tc->pool;
1529 	struct bio *bio = cell->holder;
1530 	dm_block_t block = get_bio_block(tc, bio);
1531 	struct dm_thin_lookup_result lookup_result;
1532 
1533 	if (tc->requeue_mode) {
1534 		cell_requeue(pool, cell);
1535 		return;
1536 	}
1537 
1538 	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1539 	switch (r) {
1540 	case 0:
1541 		if (lookup_result.shared)
1542 			process_shared_bio(tc, bio, block, &lookup_result, cell);
1543 		else {
1544 			inc_all_io_entry(pool, bio);
1545 			remap_and_issue(tc, bio, lookup_result.block);
1546 			inc_remap_and_issue_cell(tc, cell, lookup_result.block);
1547 		}
1548 		break;
1549 
1550 	case -ENODATA:
1551 		if (bio_data_dir(bio) == READ && tc->origin_dev) {
1552 			inc_all_io_entry(pool, bio);
1553 			cell_defer_no_holder(tc, cell);
1554 
1555 			if (bio_end_sector(bio) <= tc->origin_size)
1556 				remap_to_origin_and_issue(tc, bio);
1557 
1558 			else if (bio->bi_iter.bi_sector < tc->origin_size) {
1559 				zero_fill_bio(bio);
1560 				bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT;
1561 				remap_to_origin_and_issue(tc, bio);
1562 
1563 			} else {
1564 				zero_fill_bio(bio);
1565 				bio_endio(bio, 0);
1566 			}
1567 		} else
1568 			provision_block(tc, bio, block, cell);
1569 		break;
1570 
1571 	default:
1572 		DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1573 			    __func__, r);
1574 		cell_defer_no_holder(tc, cell);
1575 		bio_io_error(bio);
1576 		break;
1577 	}
1578 }
1579 
1580 static void process_bio(struct thin_c *tc, struct bio *bio)
1581 {
1582 	struct pool *pool = tc->pool;
1583 	dm_block_t block = get_bio_block(tc, bio);
1584 	struct dm_bio_prison_cell *cell;
1585 	struct dm_cell_key key;
1586 
1587 	/*
1588 	 * If cell is already occupied, then the block is already
1589 	 * being provisioned so we have nothing further to do here.
1590 	 */
1591 	build_virtual_key(tc->td, block, &key);
1592 	if (bio_detain(pool, &key, bio, &cell))
1593 		return;
1594 
1595 	process_cell(tc, cell);
1596 }
1597 
1598 static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
1599 				    struct dm_bio_prison_cell *cell)
1600 {
1601 	int r;
1602 	int rw = bio_data_dir(bio);
1603 	dm_block_t block = get_bio_block(tc, bio);
1604 	struct dm_thin_lookup_result lookup_result;
1605 
1606 	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1607 	switch (r) {
1608 	case 0:
1609 		if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) {
1610 			handle_unserviceable_bio(tc->pool, bio);
1611 			if (cell)
1612 				cell_defer_no_holder(tc, cell);
1613 		} else {
1614 			inc_all_io_entry(tc->pool, bio);
1615 			remap_and_issue(tc, bio, lookup_result.block);
1616 			if (cell)
1617 				inc_remap_and_issue_cell(tc, cell, lookup_result.block);
1618 		}
1619 		break;
1620 
1621 	case -ENODATA:
1622 		if (cell)
1623 			cell_defer_no_holder(tc, cell);
1624 		if (rw != READ) {
1625 			handle_unserviceable_bio(tc->pool, bio);
1626 			break;
1627 		}
1628 
1629 		if (tc->origin_dev) {
1630 			inc_all_io_entry(tc->pool, bio);
1631 			remap_to_origin_and_issue(tc, bio);
1632 			break;
1633 		}
1634 
1635 		zero_fill_bio(bio);
1636 		bio_endio(bio, 0);
1637 		break;
1638 
1639 	default:
1640 		DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1641 			    __func__, r);
1642 		if (cell)
1643 			cell_defer_no_holder(tc, cell);
1644 		bio_io_error(bio);
1645 		break;
1646 	}
1647 }
1648 
1649 static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1650 {
1651 	__process_bio_read_only(tc, bio, NULL);
1652 }
1653 
1654 static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1655 {
1656 	__process_bio_read_only(tc, cell->holder, cell);
1657 }
1658 
1659 static void process_bio_success(struct thin_c *tc, struct bio *bio)
1660 {
1661 	bio_endio(bio, 0);
1662 }
1663 
1664 static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1665 {
1666 	bio_io_error(bio);
1667 }
1668 
1669 static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1670 {
1671 	cell_success(tc->pool, cell);
1672 }
1673 
1674 static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1675 {
1676 	cell_error(tc->pool, cell);
1677 }
1678 
1679 /*
1680  * FIXME: should we also commit due to size of transaction, measured in
1681  * metadata blocks?
1682  */
1683 static int need_commit_due_to_time(struct pool *pool)
1684 {
1685 	return jiffies < pool->last_commit_jiffies ||
1686 	       jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
1687 }
1688 
1689 #define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
1690 #define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook))
1691 
1692 static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
1693 {
1694 	struct rb_node **rbp, *parent;
1695 	struct dm_thin_endio_hook *pbd;
1696 	sector_t bi_sector = bio->bi_iter.bi_sector;
1697 
1698 	rbp = &tc->sort_bio_list.rb_node;
1699 	parent = NULL;
1700 	while (*rbp) {
1701 		parent = *rbp;
1702 		pbd = thin_pbd(parent);
1703 
1704 		if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector)
1705 			rbp = &(*rbp)->rb_left;
1706 		else
1707 			rbp = &(*rbp)->rb_right;
1708 	}
1709 
1710 	pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1711 	rb_link_node(&pbd->rb_node, parent, rbp);
1712 	rb_insert_color(&pbd->rb_node, &tc->sort_bio_list);
1713 }
1714 
1715 static void __extract_sorted_bios(struct thin_c *tc)
1716 {
1717 	struct rb_node *node;
1718 	struct dm_thin_endio_hook *pbd;
1719 	struct bio *bio;
1720 
1721 	for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) {
1722 		pbd = thin_pbd(node);
1723 		bio = thin_bio(pbd);
1724 
1725 		bio_list_add(&tc->deferred_bio_list, bio);
1726 		rb_erase(&pbd->rb_node, &tc->sort_bio_list);
1727 	}
1728 
1729 	WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list));
1730 }
1731 
1732 static void __sort_thin_deferred_bios(struct thin_c *tc)
1733 {
1734 	struct bio *bio;
1735 	struct bio_list bios;
1736 
1737 	bio_list_init(&bios);
1738 	bio_list_merge(&bios, &tc->deferred_bio_list);
1739 	bio_list_init(&tc->deferred_bio_list);
1740 
1741 	/* Sort deferred_bio_list using rb-tree */
1742 	while ((bio = bio_list_pop(&bios)))
1743 		__thin_bio_rb_add(tc, bio);
1744 
1745 	/*
1746 	 * Transfer the sorted bios in sort_bio_list back to
1747 	 * deferred_bio_list to allow lockless submission of
1748 	 * all bios.
1749 	 */
1750 	__extract_sorted_bios(tc);
1751 }
1752 
1753 static void process_thin_deferred_bios(struct thin_c *tc)
1754 {
1755 	struct pool *pool = tc->pool;
1756 	unsigned long flags;
1757 	struct bio *bio;
1758 	struct bio_list bios;
1759 	struct blk_plug plug;
1760 	unsigned count = 0;
1761 
1762 	if (tc->requeue_mode) {
1763 		error_thin_bio_list(tc, &tc->deferred_bio_list, DM_ENDIO_REQUEUE);
1764 		return;
1765 	}
1766 
1767 	bio_list_init(&bios);
1768 
1769 	spin_lock_irqsave(&tc->lock, flags);
1770 
1771 	if (bio_list_empty(&tc->deferred_bio_list)) {
1772 		spin_unlock_irqrestore(&tc->lock, flags);
1773 		return;
1774 	}
1775 
1776 	__sort_thin_deferred_bios(tc);
1777 
1778 	bio_list_merge(&bios, &tc->deferred_bio_list);
1779 	bio_list_init(&tc->deferred_bio_list);
1780 
1781 	spin_unlock_irqrestore(&tc->lock, flags);
1782 
1783 	blk_start_plug(&plug);
1784 	while ((bio = bio_list_pop(&bios))) {
1785 		/*
1786 		 * If we've got no free new_mapping structs, and processing
1787 		 * this bio might require one, we pause until there are some
1788 		 * prepared mappings to process.
1789 		 */
1790 		if (ensure_next_mapping(pool)) {
1791 			spin_lock_irqsave(&tc->lock, flags);
1792 			bio_list_add(&tc->deferred_bio_list, bio);
1793 			bio_list_merge(&tc->deferred_bio_list, &bios);
1794 			spin_unlock_irqrestore(&tc->lock, flags);
1795 			break;
1796 		}
1797 
1798 		if (bio->bi_rw & REQ_DISCARD)
1799 			pool->process_discard(tc, bio);
1800 		else
1801 			pool->process_bio(tc, bio);
1802 
1803 		if ((count++ & 127) == 0) {
1804 			throttle_work_update(&pool->throttle);
1805 			dm_pool_issue_prefetches(pool->pmd);
1806 		}
1807 	}
1808 	blk_finish_plug(&plug);
1809 }
1810 
1811 static int cmp_cells(const void *lhs, const void *rhs)
1812 {
1813 	struct dm_bio_prison_cell *lhs_cell = *((struct dm_bio_prison_cell **) lhs);
1814 	struct dm_bio_prison_cell *rhs_cell = *((struct dm_bio_prison_cell **) rhs);
1815 
1816 	BUG_ON(!lhs_cell->holder);
1817 	BUG_ON(!rhs_cell->holder);
1818 
1819 	if (lhs_cell->holder->bi_iter.bi_sector < rhs_cell->holder->bi_iter.bi_sector)
1820 		return -1;
1821 
1822 	if (lhs_cell->holder->bi_iter.bi_sector > rhs_cell->holder->bi_iter.bi_sector)
1823 		return 1;
1824 
1825 	return 0;
1826 }
1827 
1828 static unsigned sort_cells(struct pool *pool, struct list_head *cells)
1829 {
1830 	unsigned count = 0;
1831 	struct dm_bio_prison_cell *cell, *tmp;
1832 
1833 	list_for_each_entry_safe(cell, tmp, cells, user_list) {
1834 		if (count >= CELL_SORT_ARRAY_SIZE)
1835 			break;
1836 
1837 		pool->cell_sort_array[count++] = cell;
1838 		list_del(&cell->user_list);
1839 	}
1840 
1841 	sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL);
1842 
1843 	return count;
1844 }
1845 
1846 static void process_thin_deferred_cells(struct thin_c *tc)
1847 {
1848 	struct pool *pool = tc->pool;
1849 	unsigned long flags;
1850 	struct list_head cells;
1851 	struct dm_bio_prison_cell *cell;
1852 	unsigned i, j, count;
1853 
1854 	INIT_LIST_HEAD(&cells);
1855 
1856 	spin_lock_irqsave(&tc->lock, flags);
1857 	list_splice_init(&tc->deferred_cells, &cells);
1858 	spin_unlock_irqrestore(&tc->lock, flags);
1859 
1860 	if (list_empty(&cells))
1861 		return;
1862 
1863 	do {
1864 		count = sort_cells(tc->pool, &cells);
1865 
1866 		for (i = 0; i < count; i++) {
1867 			cell = pool->cell_sort_array[i];
1868 			BUG_ON(!cell->holder);
1869 
1870 			/*
1871 			 * If we've got no free new_mapping structs, and processing
1872 			 * this bio might require one, we pause until there are some
1873 			 * prepared mappings to process.
1874 			 */
1875 			if (ensure_next_mapping(pool)) {
1876 				for (j = i; j < count; j++)
1877 					list_add(&pool->cell_sort_array[j]->user_list, &cells);
1878 
1879 				spin_lock_irqsave(&tc->lock, flags);
1880 				list_splice(&cells, &tc->deferred_cells);
1881 				spin_unlock_irqrestore(&tc->lock, flags);
1882 				return;
1883 			}
1884 
1885 			if (cell->holder->bi_rw & REQ_DISCARD)
1886 				pool->process_discard_cell(tc, cell);
1887 			else
1888 				pool->process_cell(tc, cell);
1889 		}
1890 	} while (!list_empty(&cells));
1891 }
1892 
1893 static void thin_get(struct thin_c *tc);
1894 static void thin_put(struct thin_c *tc);
1895 
1896 /*
1897  * We can't hold rcu_read_lock() around code that can block.  So we
1898  * find a thin with the rcu lock held; bump a refcount; then drop
1899  * the lock.
1900  */
1901 static struct thin_c *get_first_thin(struct pool *pool)
1902 {
1903 	struct thin_c *tc = NULL;
1904 
1905 	rcu_read_lock();
1906 	if (!list_empty(&pool->active_thins)) {
1907 		tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
1908 		thin_get(tc);
1909 	}
1910 	rcu_read_unlock();
1911 
1912 	return tc;
1913 }
1914 
1915 static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
1916 {
1917 	struct thin_c *old_tc = tc;
1918 
1919 	rcu_read_lock();
1920 	list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
1921 		thin_get(tc);
1922 		thin_put(old_tc);
1923 		rcu_read_unlock();
1924 		return tc;
1925 	}
1926 	thin_put(old_tc);
1927 	rcu_read_unlock();
1928 
1929 	return NULL;
1930 }
1931 
1932 static void process_deferred_bios(struct pool *pool)
1933 {
1934 	unsigned long flags;
1935 	struct bio *bio;
1936 	struct bio_list bios;
1937 	struct thin_c *tc;
1938 
1939 	tc = get_first_thin(pool);
1940 	while (tc) {
1941 		process_thin_deferred_cells(tc);
1942 		process_thin_deferred_bios(tc);
1943 		tc = get_next_thin(pool, tc);
1944 	}
1945 
1946 	/*
1947 	 * If there are any deferred flush bios, we must commit
1948 	 * the metadata before issuing them.
1949 	 */
1950 	bio_list_init(&bios);
1951 	spin_lock_irqsave(&pool->lock, flags);
1952 	bio_list_merge(&bios, &pool->deferred_flush_bios);
1953 	bio_list_init(&pool->deferred_flush_bios);
1954 	spin_unlock_irqrestore(&pool->lock, flags);
1955 
1956 	if (bio_list_empty(&bios) &&
1957 	    !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
1958 		return;
1959 
1960 	if (commit(pool)) {
1961 		while ((bio = bio_list_pop(&bios)))
1962 			bio_io_error(bio);
1963 		return;
1964 	}
1965 	pool->last_commit_jiffies = jiffies;
1966 
1967 	while ((bio = bio_list_pop(&bios)))
1968 		generic_make_request(bio);
1969 }
1970 
1971 static void do_worker(struct work_struct *ws)
1972 {
1973 	struct pool *pool = container_of(ws, struct pool, worker);
1974 
1975 	throttle_work_start(&pool->throttle);
1976 	dm_pool_issue_prefetches(pool->pmd);
1977 	throttle_work_update(&pool->throttle);
1978 	process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
1979 	throttle_work_update(&pool->throttle);
1980 	process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
1981 	throttle_work_update(&pool->throttle);
1982 	process_deferred_bios(pool);
1983 	throttle_work_complete(&pool->throttle);
1984 }
1985 
1986 /*
1987  * We want to commit periodically so that not too much
1988  * unwritten data builds up.
1989  */
1990 static void do_waker(struct work_struct *ws)
1991 {
1992 	struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
1993 	wake_worker(pool);
1994 	queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1995 }
1996 
1997 /*
1998  * We're holding onto IO to allow userland time to react.  After the
1999  * timeout either the pool will have been resized (and thus back in
2000  * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
2001  */
2002 static void do_no_space_timeout(struct work_struct *ws)
2003 {
2004 	struct pool *pool = container_of(to_delayed_work(ws), struct pool,
2005 					 no_space_timeout);
2006 
2007 	if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
2008 		set_pool_mode(pool, PM_READ_ONLY);
2009 }
2010 
2011 /*----------------------------------------------------------------*/
2012 
2013 struct pool_work {
2014 	struct work_struct worker;
2015 	struct completion complete;
2016 };
2017 
2018 static struct pool_work *to_pool_work(struct work_struct *ws)
2019 {
2020 	return container_of(ws, struct pool_work, worker);
2021 }
2022 
2023 static void pool_work_complete(struct pool_work *pw)
2024 {
2025 	complete(&pw->complete);
2026 }
2027 
2028 static void pool_work_wait(struct pool_work *pw, struct pool *pool,
2029 			   void (*fn)(struct work_struct *))
2030 {
2031 	INIT_WORK_ONSTACK(&pw->worker, fn);
2032 	init_completion(&pw->complete);
2033 	queue_work(pool->wq, &pw->worker);
2034 	wait_for_completion(&pw->complete);
2035 }
2036 
2037 /*----------------------------------------------------------------*/
2038 
2039 struct noflush_work {
2040 	struct pool_work pw;
2041 	struct thin_c *tc;
2042 };
2043 
2044 static struct noflush_work *to_noflush(struct work_struct *ws)
2045 {
2046 	return container_of(to_pool_work(ws), struct noflush_work, pw);
2047 }
2048 
2049 static void do_noflush_start(struct work_struct *ws)
2050 {
2051 	struct noflush_work *w = to_noflush(ws);
2052 	w->tc->requeue_mode = true;
2053 	requeue_io(w->tc);
2054 	pool_work_complete(&w->pw);
2055 }
2056 
2057 static void do_noflush_stop(struct work_struct *ws)
2058 {
2059 	struct noflush_work *w = to_noflush(ws);
2060 	w->tc->requeue_mode = false;
2061 	pool_work_complete(&w->pw);
2062 }
2063 
2064 static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
2065 {
2066 	struct noflush_work w;
2067 
2068 	w.tc = tc;
2069 	pool_work_wait(&w.pw, tc->pool, fn);
2070 }
2071 
2072 /*----------------------------------------------------------------*/
2073 
2074 static enum pool_mode get_pool_mode(struct pool *pool)
2075 {
2076 	return pool->pf.mode;
2077 }
2078 
2079 static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
2080 {
2081 	dm_table_event(pool->ti->table);
2082 	DMINFO("%s: switching pool to %s mode",
2083 	       dm_device_name(pool->pool_md), new_mode);
2084 }
2085 
2086 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2087 {
2088 	struct pool_c *pt = pool->ti->private;
2089 	bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
2090 	enum pool_mode old_mode = get_pool_mode(pool);
2091 	unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
2092 
2093 	/*
2094 	 * Never allow the pool to transition to PM_WRITE mode if user
2095 	 * intervention is required to verify metadata and data consistency.
2096 	 */
2097 	if (new_mode == PM_WRITE && needs_check) {
2098 		DMERR("%s: unable to switch pool to write mode until repaired.",
2099 		      dm_device_name(pool->pool_md));
2100 		if (old_mode != new_mode)
2101 			new_mode = old_mode;
2102 		else
2103 			new_mode = PM_READ_ONLY;
2104 	}
2105 	/*
2106 	 * If we were in PM_FAIL mode, rollback of metadata failed.  We're
2107 	 * not going to recover without a thin_repair.	So we never let the
2108 	 * pool move out of the old mode.
2109 	 */
2110 	if (old_mode == PM_FAIL)
2111 		new_mode = old_mode;
2112 
2113 	switch (new_mode) {
2114 	case PM_FAIL:
2115 		if (old_mode != new_mode)
2116 			notify_of_pool_mode_change(pool, "failure");
2117 		dm_pool_metadata_read_only(pool->pmd);
2118 		pool->process_bio = process_bio_fail;
2119 		pool->process_discard = process_bio_fail;
2120 		pool->process_cell = process_cell_fail;
2121 		pool->process_discard_cell = process_cell_fail;
2122 		pool->process_prepared_mapping = process_prepared_mapping_fail;
2123 		pool->process_prepared_discard = process_prepared_discard_fail;
2124 
2125 		error_retry_list(pool);
2126 		break;
2127 
2128 	case PM_READ_ONLY:
2129 		if (old_mode != new_mode)
2130 			notify_of_pool_mode_change(pool, "read-only");
2131 		dm_pool_metadata_read_only(pool->pmd);
2132 		pool->process_bio = process_bio_read_only;
2133 		pool->process_discard = process_bio_success;
2134 		pool->process_cell = process_cell_read_only;
2135 		pool->process_discard_cell = process_cell_success;
2136 		pool->process_prepared_mapping = process_prepared_mapping_fail;
2137 		pool->process_prepared_discard = process_prepared_discard_passdown;
2138 
2139 		error_retry_list(pool);
2140 		break;
2141 
2142 	case PM_OUT_OF_DATA_SPACE:
2143 		/*
2144 		 * Ideally we'd never hit this state; the low water mark
2145 		 * would trigger userland to extend the pool before we
2146 		 * completely run out of data space.  However, many small
2147 		 * IOs to unprovisioned space can consume data space at an
2148 		 * alarming rate.  Adjust your low water mark if you're
2149 		 * frequently seeing this mode.
2150 		 */
2151 		if (old_mode != new_mode)
2152 			notify_of_pool_mode_change(pool, "out-of-data-space");
2153 		pool->process_bio = process_bio_read_only;
2154 		pool->process_discard = process_discard_bio;
2155 		pool->process_cell = process_cell_read_only;
2156 		pool->process_discard_cell = process_discard_cell;
2157 		pool->process_prepared_mapping = process_prepared_mapping;
2158 		pool->process_prepared_discard = process_prepared_discard_passdown;
2159 
2160 		if (!pool->pf.error_if_no_space && no_space_timeout)
2161 			queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
2162 		break;
2163 
2164 	case PM_WRITE:
2165 		if (old_mode != new_mode)
2166 			notify_of_pool_mode_change(pool, "write");
2167 		dm_pool_metadata_read_write(pool->pmd);
2168 		pool->process_bio = process_bio;
2169 		pool->process_discard = process_discard_bio;
2170 		pool->process_cell = process_cell;
2171 		pool->process_discard_cell = process_discard_cell;
2172 		pool->process_prepared_mapping = process_prepared_mapping;
2173 		pool->process_prepared_discard = process_prepared_discard;
2174 		break;
2175 	}
2176 
2177 	pool->pf.mode = new_mode;
2178 	/*
2179 	 * The pool mode may have changed, sync it so bind_control_target()
2180 	 * doesn't cause an unexpected mode transition on resume.
2181 	 */
2182 	pt->adjusted_pf.mode = new_mode;
2183 }
2184 
2185 static void abort_transaction(struct pool *pool)
2186 {
2187 	const char *dev_name = dm_device_name(pool->pool_md);
2188 
2189 	DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
2190 	if (dm_pool_abort_metadata(pool->pmd)) {
2191 		DMERR("%s: failed to abort metadata transaction", dev_name);
2192 		set_pool_mode(pool, PM_FAIL);
2193 	}
2194 
2195 	if (dm_pool_metadata_set_needs_check(pool->pmd)) {
2196 		DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
2197 		set_pool_mode(pool, PM_FAIL);
2198 	}
2199 }
2200 
2201 static void metadata_operation_failed(struct pool *pool, const char *op, int r)
2202 {
2203 	DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
2204 		    dm_device_name(pool->pool_md), op, r);
2205 
2206 	abort_transaction(pool);
2207 	set_pool_mode(pool, PM_READ_ONLY);
2208 }
2209 
2210 /*----------------------------------------------------------------*/
2211 
2212 /*
2213  * Mapping functions.
2214  */
2215 
2216 /*
2217  * Called only while mapping a thin bio to hand it over to the workqueue.
2218  */
2219 static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
2220 {
2221 	unsigned long flags;
2222 	struct pool *pool = tc->pool;
2223 
2224 	spin_lock_irqsave(&tc->lock, flags);
2225 	bio_list_add(&tc->deferred_bio_list, bio);
2226 	spin_unlock_irqrestore(&tc->lock, flags);
2227 
2228 	wake_worker(pool);
2229 }
2230 
2231 static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
2232 {
2233 	struct pool *pool = tc->pool;
2234 
2235 	throttle_lock(&pool->throttle);
2236 	thin_defer_bio(tc, bio);
2237 	throttle_unlock(&pool->throttle);
2238 }
2239 
2240 static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2241 {
2242 	unsigned long flags;
2243 	struct pool *pool = tc->pool;
2244 
2245 	throttle_lock(&pool->throttle);
2246 	spin_lock_irqsave(&tc->lock, flags);
2247 	list_add_tail(&cell->user_list, &tc->deferred_cells);
2248 	spin_unlock_irqrestore(&tc->lock, flags);
2249 	throttle_unlock(&pool->throttle);
2250 
2251 	wake_worker(pool);
2252 }
2253 
2254 static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
2255 {
2256 	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
2257 
2258 	h->tc = tc;
2259 	h->shared_read_entry = NULL;
2260 	h->all_io_entry = NULL;
2261 	h->overwrite_mapping = NULL;
2262 }
2263 
2264 /*
2265  * Non-blocking function called from the thin target's map function.
2266  */
2267 static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2268 {
2269 	int r;
2270 	struct thin_c *tc = ti->private;
2271 	dm_block_t block = get_bio_block(tc, bio);
2272 	struct dm_thin_device *td = tc->td;
2273 	struct dm_thin_lookup_result result;
2274 	struct dm_bio_prison_cell *virt_cell, *data_cell;
2275 	struct dm_cell_key key;
2276 
2277 	thin_hook_bio(tc, bio);
2278 
2279 	if (tc->requeue_mode) {
2280 		bio_endio(bio, DM_ENDIO_REQUEUE);
2281 		return DM_MAPIO_SUBMITTED;
2282 	}
2283 
2284 	if (get_pool_mode(tc->pool) == PM_FAIL) {
2285 		bio_io_error(bio);
2286 		return DM_MAPIO_SUBMITTED;
2287 	}
2288 
2289 	if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
2290 		thin_defer_bio_with_throttle(tc, bio);
2291 		return DM_MAPIO_SUBMITTED;
2292 	}
2293 
2294 	/*
2295 	 * We must hold the virtual cell before doing the lookup, otherwise
2296 	 * there's a race with discard.
2297 	 */
2298 	build_virtual_key(tc->td, block, &key);
2299 	if (bio_detain(tc->pool, &key, bio, &virt_cell))
2300 		return DM_MAPIO_SUBMITTED;
2301 
2302 	r = dm_thin_find_block(td, block, 0, &result);
2303 
2304 	/*
2305 	 * Note that we defer readahead too.
2306 	 */
2307 	switch (r) {
2308 	case 0:
2309 		if (unlikely(result.shared)) {
2310 			/*
2311 			 * We have a race condition here between the
2312 			 * result.shared value returned by the lookup and
2313 			 * snapshot creation, which may cause new
2314 			 * sharing.
2315 			 *
2316 			 * To avoid this always quiesce the origin before
2317 			 * taking the snap.  You want to do this anyway to
2318 			 * ensure a consistent application view
2319 			 * (i.e. lockfs).
2320 			 *
2321 			 * More distant ancestors are irrelevant. The
2322 			 * shared flag will be set in their case.
2323 			 */
2324 			thin_defer_cell(tc, virt_cell);
2325 			return DM_MAPIO_SUBMITTED;
2326 		}
2327 
2328 		build_data_key(tc->td, result.block, &key);
2329 		if (bio_detain(tc->pool, &key, bio, &data_cell)) {
2330 			cell_defer_no_holder(tc, virt_cell);
2331 			return DM_MAPIO_SUBMITTED;
2332 		}
2333 
2334 		inc_all_io_entry(tc->pool, bio);
2335 		cell_defer_no_holder(tc, data_cell);
2336 		cell_defer_no_holder(tc, virt_cell);
2337 
2338 		remap(tc, bio, result.block);
2339 		return DM_MAPIO_REMAPPED;
2340 
2341 	case -ENODATA:
2342 		if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
2343 			/*
2344 			 * This block isn't provisioned, and we have no way
2345 			 * of doing so.
2346 			 */
2347 			handle_unserviceable_bio(tc->pool, bio);
2348 			cell_defer_no_holder(tc, virt_cell);
2349 			return DM_MAPIO_SUBMITTED;
2350 		}
2351 		/* fall through */
2352 
2353 	case -EWOULDBLOCK:
2354 		thin_defer_cell(tc, virt_cell);
2355 		return DM_MAPIO_SUBMITTED;
2356 
2357 	default:
2358 		/*
2359 		 * Must always call bio_io_error on failure.
2360 		 * dm_thin_find_block can fail with -EINVAL if the
2361 		 * pool is switched to fail-io mode.
2362 		 */
2363 		bio_io_error(bio);
2364 		cell_defer_no_holder(tc, virt_cell);
2365 		return DM_MAPIO_SUBMITTED;
2366 	}
2367 }
2368 
2369 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
2370 {
2371 	struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
2372 	struct request_queue *q;
2373 
2374 	if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
2375 		return 1;
2376 
2377 	q = bdev_get_queue(pt->data_dev->bdev);
2378 	return bdi_congested(&q->backing_dev_info, bdi_bits);
2379 }
2380 
2381 static void requeue_bios(struct pool *pool)
2382 {
2383 	unsigned long flags;
2384 	struct thin_c *tc;
2385 
2386 	rcu_read_lock();
2387 	list_for_each_entry_rcu(tc, &pool->active_thins, list) {
2388 		spin_lock_irqsave(&tc->lock, flags);
2389 		bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
2390 		bio_list_init(&tc->retry_on_resume_list);
2391 		spin_unlock_irqrestore(&tc->lock, flags);
2392 	}
2393 	rcu_read_unlock();
2394 }
2395 
2396 /*----------------------------------------------------------------
2397  * Binding of control targets to a pool object
2398  *--------------------------------------------------------------*/
2399 static bool data_dev_supports_discard(struct pool_c *pt)
2400 {
2401 	struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2402 
2403 	return q && blk_queue_discard(q);
2404 }
2405 
2406 static bool is_factor(sector_t block_size, uint32_t n)
2407 {
2408 	return !sector_div(block_size, n);
2409 }
2410 
2411 /*
2412  * If discard_passdown was enabled verify that the data device
2413  * supports discards.  Disable discard_passdown if not.
2414  */
2415 static void disable_passdown_if_not_supported(struct pool_c *pt)
2416 {
2417 	struct pool *pool = pt->pool;
2418 	struct block_device *data_bdev = pt->data_dev->bdev;
2419 	struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
2420 	sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
2421 	const char *reason = NULL;
2422 	char buf[BDEVNAME_SIZE];
2423 
2424 	if (!pt->adjusted_pf.discard_passdown)
2425 		return;
2426 
2427 	if (!data_dev_supports_discard(pt))
2428 		reason = "discard unsupported";
2429 
2430 	else if (data_limits->max_discard_sectors < pool->sectors_per_block)
2431 		reason = "max discard sectors smaller than a block";
2432 
2433 	else if (data_limits->discard_granularity > block_size)
2434 		reason = "discard granularity larger than a block";
2435 
2436 	else if (!is_factor(block_size, data_limits->discard_granularity))
2437 		reason = "discard granularity not a factor of block size";
2438 
2439 	if (reason) {
2440 		DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
2441 		pt->adjusted_pf.discard_passdown = false;
2442 	}
2443 }
2444 
2445 static int bind_control_target(struct pool *pool, struct dm_target *ti)
2446 {
2447 	struct pool_c *pt = ti->private;
2448 
2449 	/*
2450 	 * We want to make sure that a pool in PM_FAIL mode is never upgraded.
2451 	 */
2452 	enum pool_mode old_mode = get_pool_mode(pool);
2453 	enum pool_mode new_mode = pt->adjusted_pf.mode;
2454 
2455 	/*
2456 	 * Don't change the pool's mode until set_pool_mode() below.
2457 	 * Otherwise the pool's process_* function pointers may
2458 	 * not match the desired pool mode.
2459 	 */
2460 	pt->adjusted_pf.mode = old_mode;
2461 
2462 	pool->ti = ti;
2463 	pool->pf = pt->adjusted_pf;
2464 	pool->low_water_blocks = pt->low_water_blocks;
2465 
2466 	set_pool_mode(pool, new_mode);
2467 
2468 	return 0;
2469 }
2470 
2471 static void unbind_control_target(struct pool *pool, struct dm_target *ti)
2472 {
2473 	if (pool->ti == ti)
2474 		pool->ti = NULL;
2475 }
2476 
2477 /*----------------------------------------------------------------
2478  * Pool creation
2479  *--------------------------------------------------------------*/
2480 /* Initialize pool features. */
2481 static void pool_features_init(struct pool_features *pf)
2482 {
2483 	pf->mode = PM_WRITE;
2484 	pf->zero_new_blocks = true;
2485 	pf->discard_enabled = true;
2486 	pf->discard_passdown = true;
2487 	pf->error_if_no_space = false;
2488 }
2489 
2490 static void __pool_destroy(struct pool *pool)
2491 {
2492 	__pool_table_remove(pool);
2493 
2494 	if (dm_pool_metadata_close(pool->pmd) < 0)
2495 		DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2496 
2497 	dm_bio_prison_destroy(pool->prison);
2498 	dm_kcopyd_client_destroy(pool->copier);
2499 
2500 	if (pool->wq)
2501 		destroy_workqueue(pool->wq);
2502 
2503 	if (pool->next_mapping)
2504 		mempool_free(pool->next_mapping, pool->mapping_pool);
2505 	mempool_destroy(pool->mapping_pool);
2506 	dm_deferred_set_destroy(pool->shared_read_ds);
2507 	dm_deferred_set_destroy(pool->all_io_ds);
2508 	kfree(pool);
2509 }
2510 
2511 static struct kmem_cache *_new_mapping_cache;
2512 
2513 static struct pool *pool_create(struct mapped_device *pool_md,
2514 				struct block_device *metadata_dev,
2515 				unsigned long block_size,
2516 				int read_only, char **error)
2517 {
2518 	int r;
2519 	void *err_p;
2520 	struct pool *pool;
2521 	struct dm_pool_metadata *pmd;
2522 	bool format_device = read_only ? false : true;
2523 
2524 	pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
2525 	if (IS_ERR(pmd)) {
2526 		*error = "Error creating metadata object";
2527 		return (struct pool *)pmd;
2528 	}
2529 
2530 	pool = kmalloc(sizeof(*pool), GFP_KERNEL);
2531 	if (!pool) {
2532 		*error = "Error allocating memory for pool";
2533 		err_p = ERR_PTR(-ENOMEM);
2534 		goto bad_pool;
2535 	}
2536 
2537 	pool->pmd = pmd;
2538 	pool->sectors_per_block = block_size;
2539 	if (block_size & (block_size - 1))
2540 		pool->sectors_per_block_shift = -1;
2541 	else
2542 		pool->sectors_per_block_shift = __ffs(block_size);
2543 	pool->low_water_blocks = 0;
2544 	pool_features_init(&pool->pf);
2545 	pool->prison = dm_bio_prison_create();
2546 	if (!pool->prison) {
2547 		*error = "Error creating pool's bio prison";
2548 		err_p = ERR_PTR(-ENOMEM);
2549 		goto bad_prison;
2550 	}
2551 
2552 	pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2553 	if (IS_ERR(pool->copier)) {
2554 		r = PTR_ERR(pool->copier);
2555 		*error = "Error creating pool's kcopyd client";
2556 		err_p = ERR_PTR(r);
2557 		goto bad_kcopyd_client;
2558 	}
2559 
2560 	/*
2561 	 * Create singlethreaded workqueue that will service all devices
2562 	 * that use this metadata.
2563 	 */
2564 	pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2565 	if (!pool->wq) {
2566 		*error = "Error creating pool's workqueue";
2567 		err_p = ERR_PTR(-ENOMEM);
2568 		goto bad_wq;
2569 	}
2570 
2571 	throttle_init(&pool->throttle);
2572 	INIT_WORK(&pool->worker, do_worker);
2573 	INIT_DELAYED_WORK(&pool->waker, do_waker);
2574 	INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
2575 	spin_lock_init(&pool->lock);
2576 	bio_list_init(&pool->deferred_flush_bios);
2577 	INIT_LIST_HEAD(&pool->prepared_mappings);
2578 	INIT_LIST_HEAD(&pool->prepared_discards);
2579 	INIT_LIST_HEAD(&pool->active_thins);
2580 	pool->low_water_triggered = false;
2581 	pool->suspended = true;
2582 
2583 	pool->shared_read_ds = dm_deferred_set_create();
2584 	if (!pool->shared_read_ds) {
2585 		*error = "Error creating pool's shared read deferred set";
2586 		err_p = ERR_PTR(-ENOMEM);
2587 		goto bad_shared_read_ds;
2588 	}
2589 
2590 	pool->all_io_ds = dm_deferred_set_create();
2591 	if (!pool->all_io_ds) {
2592 		*error = "Error creating pool's all io deferred set";
2593 		err_p = ERR_PTR(-ENOMEM);
2594 		goto bad_all_io_ds;
2595 	}
2596 
2597 	pool->next_mapping = NULL;
2598 	pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
2599 						      _new_mapping_cache);
2600 	if (!pool->mapping_pool) {
2601 		*error = "Error creating pool's mapping mempool";
2602 		err_p = ERR_PTR(-ENOMEM);
2603 		goto bad_mapping_pool;
2604 	}
2605 
2606 	pool->ref_count = 1;
2607 	pool->last_commit_jiffies = jiffies;
2608 	pool->pool_md = pool_md;
2609 	pool->md_dev = metadata_dev;
2610 	__pool_table_insert(pool);
2611 
2612 	return pool;
2613 
2614 bad_mapping_pool:
2615 	dm_deferred_set_destroy(pool->all_io_ds);
2616 bad_all_io_ds:
2617 	dm_deferred_set_destroy(pool->shared_read_ds);
2618 bad_shared_read_ds:
2619 	destroy_workqueue(pool->wq);
2620 bad_wq:
2621 	dm_kcopyd_client_destroy(pool->copier);
2622 bad_kcopyd_client:
2623 	dm_bio_prison_destroy(pool->prison);
2624 bad_prison:
2625 	kfree(pool);
2626 bad_pool:
2627 	if (dm_pool_metadata_close(pmd))
2628 		DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2629 
2630 	return err_p;
2631 }
2632 
2633 static void __pool_inc(struct pool *pool)
2634 {
2635 	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2636 	pool->ref_count++;
2637 }
2638 
2639 static void __pool_dec(struct pool *pool)
2640 {
2641 	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2642 	BUG_ON(!pool->ref_count);
2643 	if (!--pool->ref_count)
2644 		__pool_destroy(pool);
2645 }
2646 
2647 static struct pool *__pool_find(struct mapped_device *pool_md,
2648 				struct block_device *metadata_dev,
2649 				unsigned long block_size, int read_only,
2650 				char **error, int *created)
2651 {
2652 	struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
2653 
2654 	if (pool) {
2655 		if (pool->pool_md != pool_md) {
2656 			*error = "metadata device already in use by a pool";
2657 			return ERR_PTR(-EBUSY);
2658 		}
2659 		__pool_inc(pool);
2660 
2661 	} else {
2662 		pool = __pool_table_lookup(pool_md);
2663 		if (pool) {
2664 			if (pool->md_dev != metadata_dev) {
2665 				*error = "different pool cannot replace a pool";
2666 				return ERR_PTR(-EINVAL);
2667 			}
2668 			__pool_inc(pool);
2669 
2670 		} else {
2671 			pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
2672 			*created = 1;
2673 		}
2674 	}
2675 
2676 	return pool;
2677 }
2678 
2679 /*----------------------------------------------------------------
2680  * Pool target methods
2681  *--------------------------------------------------------------*/
2682 static void pool_dtr(struct dm_target *ti)
2683 {
2684 	struct pool_c *pt = ti->private;
2685 
2686 	mutex_lock(&dm_thin_pool_table.mutex);
2687 
2688 	unbind_control_target(pt->pool, ti);
2689 	__pool_dec(pt->pool);
2690 	dm_put_device(ti, pt->metadata_dev);
2691 	dm_put_device(ti, pt->data_dev);
2692 	kfree(pt);
2693 
2694 	mutex_unlock(&dm_thin_pool_table.mutex);
2695 }
2696 
2697 static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
2698 			       struct dm_target *ti)
2699 {
2700 	int r;
2701 	unsigned argc;
2702 	const char *arg_name;
2703 
2704 	static struct dm_arg _args[] = {
2705 		{0, 4, "Invalid number of pool feature arguments"},
2706 	};
2707 
2708 	/*
2709 	 * No feature arguments supplied.
2710 	 */
2711 	if (!as->argc)
2712 		return 0;
2713 
2714 	r = dm_read_arg_group(_args, as, &argc, &ti->error);
2715 	if (r)
2716 		return -EINVAL;
2717 
2718 	while (argc && !r) {
2719 		arg_name = dm_shift_arg(as);
2720 		argc--;
2721 
2722 		if (!strcasecmp(arg_name, "skip_block_zeroing"))
2723 			pf->zero_new_blocks = false;
2724 
2725 		else if (!strcasecmp(arg_name, "ignore_discard"))
2726 			pf->discard_enabled = false;
2727 
2728 		else if (!strcasecmp(arg_name, "no_discard_passdown"))
2729 			pf->discard_passdown = false;
2730 
2731 		else if (!strcasecmp(arg_name, "read_only"))
2732 			pf->mode = PM_READ_ONLY;
2733 
2734 		else if (!strcasecmp(arg_name, "error_if_no_space"))
2735 			pf->error_if_no_space = true;
2736 
2737 		else {
2738 			ti->error = "Unrecognised pool feature requested";
2739 			r = -EINVAL;
2740 			break;
2741 		}
2742 	}
2743 
2744 	return r;
2745 }
2746 
2747 static void metadata_low_callback(void *context)
2748 {
2749 	struct pool *pool = context;
2750 
2751 	DMWARN("%s: reached low water mark for metadata device: sending event.",
2752 	       dm_device_name(pool->pool_md));
2753 
2754 	dm_table_event(pool->ti->table);
2755 }
2756 
2757 static sector_t get_dev_size(struct block_device *bdev)
2758 {
2759 	return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
2760 }
2761 
2762 static void warn_if_metadata_device_too_big(struct block_device *bdev)
2763 {
2764 	sector_t metadata_dev_size = get_dev_size(bdev);
2765 	char buffer[BDEVNAME_SIZE];
2766 
2767 	if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
2768 		DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
2769 		       bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
2770 }
2771 
2772 static sector_t get_metadata_dev_size(struct block_device *bdev)
2773 {
2774 	sector_t metadata_dev_size = get_dev_size(bdev);
2775 
2776 	if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
2777 		metadata_dev_size = THIN_METADATA_MAX_SECTORS;
2778 
2779 	return metadata_dev_size;
2780 }
2781 
2782 static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
2783 {
2784 	sector_t metadata_dev_size = get_metadata_dev_size(bdev);
2785 
2786 	sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
2787 
2788 	return metadata_dev_size;
2789 }
2790 
2791 /*
2792  * When a metadata threshold is crossed a dm event is triggered, and
2793  * userland should respond by growing the metadata device.  We could let
2794  * userland set the threshold, like we do with the data threshold, but I'm
2795  * not sure they know enough to do this well.
2796  */
2797 static dm_block_t calc_metadata_threshold(struct pool_c *pt)
2798 {
2799 	/*
2800 	 * 4M is ample for all ops with the possible exception of thin
2801 	 * device deletion which is harmless if it fails (just retry the
2802 	 * delete after you've grown the device).
2803 	 */
2804 	dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
2805 	return min((dm_block_t)1024ULL /* 4M */, quarter);
2806 }
2807 
2808 /*
2809  * thin-pool <metadata dev> <data dev>
2810  *	     <data block size (sectors)>
2811  *	     <low water mark (blocks)>
2812  *	     [<#feature args> [<arg>]*]
2813  *
2814  * Optional feature arguments are:
2815  *	     skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
2816  *	     ignore_discard: disable discard
2817  *	     no_discard_passdown: don't pass discards down to the data device
2818  *	     read_only: Don't allow any changes to be made to the pool metadata.
2819  *	     error_if_no_space: error IOs, instead of queueing, if no space.
2820  */
2821 static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2822 {
2823 	int r, pool_created = 0;
2824 	struct pool_c *pt;
2825 	struct pool *pool;
2826 	struct pool_features pf;
2827 	struct dm_arg_set as;
2828 	struct dm_dev *data_dev;
2829 	unsigned long block_size;
2830 	dm_block_t low_water_blocks;
2831 	struct dm_dev *metadata_dev;
2832 	fmode_t metadata_mode;
2833 
2834 	/*
2835 	 * FIXME Remove validation from scope of lock.
2836 	 */
2837 	mutex_lock(&dm_thin_pool_table.mutex);
2838 
2839 	if (argc < 4) {
2840 		ti->error = "Invalid argument count";
2841 		r = -EINVAL;
2842 		goto out_unlock;
2843 	}
2844 
2845 	as.argc = argc;
2846 	as.argv = argv;
2847 
2848 	/*
2849 	 * Set default pool features.
2850 	 */
2851 	pool_features_init(&pf);
2852 
2853 	dm_consume_args(&as, 4);
2854 	r = parse_pool_features(&as, &pf, ti);
2855 	if (r)
2856 		goto out_unlock;
2857 
2858 	metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
2859 	r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
2860 	if (r) {
2861 		ti->error = "Error opening metadata block device";
2862 		goto out_unlock;
2863 	}
2864 	warn_if_metadata_device_too_big(metadata_dev->bdev);
2865 
2866 	r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
2867 	if (r) {
2868 		ti->error = "Error getting data device";
2869 		goto out_metadata;
2870 	}
2871 
2872 	if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
2873 	    block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2874 	    block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
2875 	    block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
2876 		ti->error = "Invalid block size";
2877 		r = -EINVAL;
2878 		goto out;
2879 	}
2880 
2881 	if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
2882 		ti->error = "Invalid low water mark";
2883 		r = -EINVAL;
2884 		goto out;
2885 	}
2886 
2887 	pt = kzalloc(sizeof(*pt), GFP_KERNEL);
2888 	if (!pt) {
2889 		r = -ENOMEM;
2890 		goto out;
2891 	}
2892 
2893 	pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
2894 			   block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
2895 	if (IS_ERR(pool)) {
2896 		r = PTR_ERR(pool);
2897 		goto out_free_pt;
2898 	}
2899 
2900 	/*
2901 	 * 'pool_created' reflects whether this is the first table load.
2902 	 * Top level discard support is not allowed to be changed after
2903 	 * initial load.  This would require a pool reload to trigger thin
2904 	 * device changes.
2905 	 */
2906 	if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
2907 		ti->error = "Discard support cannot be disabled once enabled";
2908 		r = -EINVAL;
2909 		goto out_flags_changed;
2910 	}
2911 
2912 	pt->pool = pool;
2913 	pt->ti = ti;
2914 	pt->metadata_dev = metadata_dev;
2915 	pt->data_dev = data_dev;
2916 	pt->low_water_blocks = low_water_blocks;
2917 	pt->adjusted_pf = pt->requested_pf = pf;
2918 	ti->num_flush_bios = 1;
2919 
2920 	/*
2921 	 * Only need to enable discards if the pool should pass
2922 	 * them down to the data device.  The thin device's discard
2923 	 * processing will cause mappings to be removed from the btree.
2924 	 */
2925 	ti->discard_zeroes_data_unsupported = true;
2926 	if (pf.discard_enabled && pf.discard_passdown) {
2927 		ti->num_discard_bios = 1;
2928 
2929 		/*
2930 		 * Setting 'discards_supported' circumvents the normal
2931 		 * stacking of discard limits (this keeps the pool and
2932 		 * thin devices' discard limits consistent).
2933 		 */
2934 		ti->discards_supported = true;
2935 	}
2936 	ti->private = pt;
2937 
2938 	r = dm_pool_register_metadata_threshold(pt->pool->pmd,
2939 						calc_metadata_threshold(pt),
2940 						metadata_low_callback,
2941 						pool);
2942 	if (r)
2943 		goto out_free_pt;
2944 
2945 	pt->callbacks.congested_fn = pool_is_congested;
2946 	dm_table_add_target_callbacks(ti->table, &pt->callbacks);
2947 
2948 	mutex_unlock(&dm_thin_pool_table.mutex);
2949 
2950 	return 0;
2951 
2952 out_flags_changed:
2953 	__pool_dec(pool);
2954 out_free_pt:
2955 	kfree(pt);
2956 out:
2957 	dm_put_device(ti, data_dev);
2958 out_metadata:
2959 	dm_put_device(ti, metadata_dev);
2960 out_unlock:
2961 	mutex_unlock(&dm_thin_pool_table.mutex);
2962 
2963 	return r;
2964 }
2965 
2966 static int pool_map(struct dm_target *ti, struct bio *bio)
2967 {
2968 	int r;
2969 	struct pool_c *pt = ti->private;
2970 	struct pool *pool = pt->pool;
2971 	unsigned long flags;
2972 
2973 	/*
2974 	 * As this is a singleton target, ti->begin is always zero.
2975 	 */
2976 	spin_lock_irqsave(&pool->lock, flags);
2977 	bio->bi_bdev = pt->data_dev->bdev;
2978 	r = DM_MAPIO_REMAPPED;
2979 	spin_unlock_irqrestore(&pool->lock, flags);
2980 
2981 	return r;
2982 }
2983 
2984 static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
2985 {
2986 	int r;
2987 	struct pool_c *pt = ti->private;
2988 	struct pool *pool = pt->pool;
2989 	sector_t data_size = ti->len;
2990 	dm_block_t sb_data_size;
2991 
2992 	*need_commit = false;
2993 
2994 	(void) sector_div(data_size, pool->sectors_per_block);
2995 
2996 	r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
2997 	if (r) {
2998 		DMERR("%s: failed to retrieve data device size",
2999 		      dm_device_name(pool->pool_md));
3000 		return r;
3001 	}
3002 
3003 	if (data_size < sb_data_size) {
3004 		DMERR("%s: pool target (%llu blocks) too small: expected %llu",
3005 		      dm_device_name(pool->pool_md),
3006 		      (unsigned long long)data_size, sb_data_size);
3007 		return -EINVAL;
3008 
3009 	} else if (data_size > sb_data_size) {
3010 		if (dm_pool_metadata_needs_check(pool->pmd)) {
3011 			DMERR("%s: unable to grow the data device until repaired.",
3012 			      dm_device_name(pool->pool_md));
3013 			return 0;
3014 		}
3015 
3016 		if (sb_data_size)
3017 			DMINFO("%s: growing the data device from %llu to %llu blocks",
3018 			       dm_device_name(pool->pool_md),
3019 			       sb_data_size, (unsigned long long)data_size);
3020 		r = dm_pool_resize_data_dev(pool->pmd, data_size);
3021 		if (r) {
3022 			metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);
3023 			return r;
3024 		}
3025 
3026 		*need_commit = true;
3027 	}
3028 
3029 	return 0;
3030 }
3031 
3032 static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
3033 {
3034 	int r;
3035 	struct pool_c *pt = ti->private;
3036 	struct pool *pool = pt->pool;
3037 	dm_block_t metadata_dev_size, sb_metadata_dev_size;
3038 
3039 	*need_commit = false;
3040 
3041 	metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
3042 
3043 	r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
3044 	if (r) {
3045 		DMERR("%s: failed to retrieve metadata device size",
3046 		      dm_device_name(pool->pool_md));
3047 		return r;
3048 	}
3049 
3050 	if (metadata_dev_size < sb_metadata_dev_size) {
3051 		DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
3052 		      dm_device_name(pool->pool_md),
3053 		      metadata_dev_size, sb_metadata_dev_size);
3054 		return -EINVAL;
3055 
3056 	} else if (metadata_dev_size > sb_metadata_dev_size) {
3057 		if (dm_pool_metadata_needs_check(pool->pmd)) {
3058 			DMERR("%s: unable to grow the metadata device until repaired.",
3059 			      dm_device_name(pool->pool_md));
3060 			return 0;
3061 		}
3062 
3063 		warn_if_metadata_device_too_big(pool->md_dev);
3064 		DMINFO("%s: growing the metadata device from %llu to %llu blocks",
3065 		       dm_device_name(pool->pool_md),
3066 		       sb_metadata_dev_size, metadata_dev_size);
3067 		r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
3068 		if (r) {
3069 			metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
3070 			return r;
3071 		}
3072 
3073 		*need_commit = true;
3074 	}
3075 
3076 	return 0;
3077 }
3078 
3079 /*
3080  * Retrieves the number of blocks of the data device from
3081  * the superblock and compares it to the actual device size,
3082  * thus resizing the data device in case it has grown.
3083  *
3084  * This both copes with opening preallocated data devices in the ctr
3085  * being followed by a resume
3086  * -and-
3087  * calling the resume method individually after userspace has
3088  * grown the data device in reaction to a table event.
3089  */
3090 static int pool_preresume(struct dm_target *ti)
3091 {
3092 	int r;
3093 	bool need_commit1, need_commit2;
3094 	struct pool_c *pt = ti->private;
3095 	struct pool *pool = pt->pool;
3096 
3097 	/*
3098 	 * Take control of the pool object.
3099 	 */
3100 	r = bind_control_target(pool, ti);
3101 	if (r)
3102 		return r;
3103 
3104 	r = maybe_resize_data_dev(ti, &need_commit1);
3105 	if (r)
3106 		return r;
3107 
3108 	r = maybe_resize_metadata_dev(ti, &need_commit2);
3109 	if (r)
3110 		return r;
3111 
3112 	if (need_commit1 || need_commit2)
3113 		(void) commit(pool);
3114 
3115 	return 0;
3116 }
3117 
3118 static void pool_suspend_active_thins(struct pool *pool)
3119 {
3120 	struct thin_c *tc;
3121 
3122 	/* Suspend all active thin devices */
3123 	tc = get_first_thin(pool);
3124 	while (tc) {
3125 		dm_internal_suspend_noflush(tc->thin_md);
3126 		tc = get_next_thin(pool, tc);
3127 	}
3128 }
3129 
3130 static void pool_resume_active_thins(struct pool *pool)
3131 {
3132 	struct thin_c *tc;
3133 
3134 	/* Resume all active thin devices */
3135 	tc = get_first_thin(pool);
3136 	while (tc) {
3137 		dm_internal_resume(tc->thin_md);
3138 		tc = get_next_thin(pool, tc);
3139 	}
3140 }
3141 
3142 static void pool_resume(struct dm_target *ti)
3143 {
3144 	struct pool_c *pt = ti->private;
3145 	struct pool *pool = pt->pool;
3146 	unsigned long flags;
3147 
3148 	/*
3149 	 * Must requeue active_thins' bios and then resume
3150 	 * active_thins _before_ clearing 'suspend' flag.
3151 	 */
3152 	requeue_bios(pool);
3153 	pool_resume_active_thins(pool);
3154 
3155 	spin_lock_irqsave(&pool->lock, flags);
3156 	pool->low_water_triggered = false;
3157 	pool->suspended = false;
3158 	spin_unlock_irqrestore(&pool->lock, flags);
3159 
3160 	do_waker(&pool->waker.work);
3161 }
3162 
3163 static void pool_presuspend(struct dm_target *ti)
3164 {
3165 	struct pool_c *pt = ti->private;
3166 	struct pool *pool = pt->pool;
3167 	unsigned long flags;
3168 
3169 	spin_lock_irqsave(&pool->lock, flags);
3170 	pool->suspended = true;
3171 	spin_unlock_irqrestore(&pool->lock, flags);
3172 
3173 	pool_suspend_active_thins(pool);
3174 }
3175 
3176 static void pool_presuspend_undo(struct dm_target *ti)
3177 {
3178 	struct pool_c *pt = ti->private;
3179 	struct pool *pool = pt->pool;
3180 	unsigned long flags;
3181 
3182 	pool_resume_active_thins(pool);
3183 
3184 	spin_lock_irqsave(&pool->lock, flags);
3185 	pool->suspended = false;
3186 	spin_unlock_irqrestore(&pool->lock, flags);
3187 }
3188 
3189 static void pool_postsuspend(struct dm_target *ti)
3190 {
3191 	struct pool_c *pt = ti->private;
3192 	struct pool *pool = pt->pool;
3193 
3194 	cancel_delayed_work(&pool->waker);
3195 	cancel_delayed_work(&pool->no_space_timeout);
3196 	flush_workqueue(pool->wq);
3197 	(void) commit(pool);
3198 }
3199 
3200 static int check_arg_count(unsigned argc, unsigned args_required)
3201 {
3202 	if (argc != args_required) {
3203 		DMWARN("Message received with %u arguments instead of %u.",
3204 		       argc, args_required);
3205 		return -EINVAL;
3206 	}
3207 
3208 	return 0;
3209 }
3210 
3211 static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
3212 {
3213 	if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
3214 	    *dev_id <= MAX_DEV_ID)
3215 		return 0;
3216 
3217 	if (warning)
3218 		DMWARN("Message received with invalid device id: %s", arg);
3219 
3220 	return -EINVAL;
3221 }
3222 
3223 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
3224 {
3225 	dm_thin_id dev_id;
3226 	int r;
3227 
3228 	r = check_arg_count(argc, 2);
3229 	if (r)
3230 		return r;
3231 
3232 	r = read_dev_id(argv[1], &dev_id, 1);
3233 	if (r)
3234 		return r;
3235 
3236 	r = dm_pool_create_thin(pool->pmd, dev_id);
3237 	if (r) {
3238 		DMWARN("Creation of new thinly-provisioned device with id %s failed.",
3239 		       argv[1]);
3240 		return r;
3241 	}
3242 
3243 	return 0;
3244 }
3245 
3246 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3247 {
3248 	dm_thin_id dev_id;
3249 	dm_thin_id origin_dev_id;
3250 	int r;
3251 
3252 	r = check_arg_count(argc, 3);
3253 	if (r)
3254 		return r;
3255 
3256 	r = read_dev_id(argv[1], &dev_id, 1);
3257 	if (r)
3258 		return r;
3259 
3260 	r = read_dev_id(argv[2], &origin_dev_id, 1);
3261 	if (r)
3262 		return r;
3263 
3264 	r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
3265 	if (r) {
3266 		DMWARN("Creation of new snapshot %s of device %s failed.",
3267 		       argv[1], argv[2]);
3268 		return r;
3269 	}
3270 
3271 	return 0;
3272 }
3273 
3274 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
3275 {
3276 	dm_thin_id dev_id;
3277 	int r;
3278 
3279 	r = check_arg_count(argc, 2);
3280 	if (r)
3281 		return r;
3282 
3283 	r = read_dev_id(argv[1], &dev_id, 1);
3284 	if (r)
3285 		return r;
3286 
3287 	r = dm_pool_delete_thin_device(pool->pmd, dev_id);
3288 	if (r)
3289 		DMWARN("Deletion of thin device %s failed.", argv[1]);
3290 
3291 	return r;
3292 }
3293 
3294 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
3295 {
3296 	dm_thin_id old_id, new_id;
3297 	int r;
3298 
3299 	r = check_arg_count(argc, 3);
3300 	if (r)
3301 		return r;
3302 
3303 	if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
3304 		DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
3305 		return -EINVAL;
3306 	}
3307 
3308 	if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
3309 		DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
3310 		return -EINVAL;
3311 	}
3312 
3313 	r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
3314 	if (r) {
3315 		DMWARN("Failed to change transaction id from %s to %s.",
3316 		       argv[1], argv[2]);
3317 		return r;
3318 	}
3319 
3320 	return 0;
3321 }
3322 
3323 static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3324 {
3325 	int r;
3326 
3327 	r = check_arg_count(argc, 1);
3328 	if (r)
3329 		return r;
3330 
3331 	(void) commit(pool);
3332 
3333 	r = dm_pool_reserve_metadata_snap(pool->pmd);
3334 	if (r)
3335 		DMWARN("reserve_metadata_snap message failed.");
3336 
3337 	return r;
3338 }
3339 
3340 static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3341 {
3342 	int r;
3343 
3344 	r = check_arg_count(argc, 1);
3345 	if (r)
3346 		return r;
3347 
3348 	r = dm_pool_release_metadata_snap(pool->pmd);
3349 	if (r)
3350 		DMWARN("release_metadata_snap message failed.");
3351 
3352 	return r;
3353 }
3354 
3355 /*
3356  * Messages supported:
3357  *   create_thin	<dev_id>
3358  *   create_snap	<dev_id> <origin_id>
3359  *   delete		<dev_id>
3360  *   set_transaction_id <current_trans_id> <new_trans_id>
3361  *   reserve_metadata_snap
3362  *   release_metadata_snap
3363  */
3364 static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
3365 {
3366 	int r = -EINVAL;
3367 	struct pool_c *pt = ti->private;
3368 	struct pool *pool = pt->pool;
3369 
3370 	if (!strcasecmp(argv[0], "create_thin"))
3371 		r = process_create_thin_mesg(argc, argv, pool);
3372 
3373 	else if (!strcasecmp(argv[0], "create_snap"))
3374 		r = process_create_snap_mesg(argc, argv, pool);
3375 
3376 	else if (!strcasecmp(argv[0], "delete"))
3377 		r = process_delete_mesg(argc, argv, pool);
3378 
3379 	else if (!strcasecmp(argv[0], "set_transaction_id"))
3380 		r = process_set_transaction_id_mesg(argc, argv, pool);
3381 
3382 	else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
3383 		r = process_reserve_metadata_snap_mesg(argc, argv, pool);
3384 
3385 	else if (!strcasecmp(argv[0], "release_metadata_snap"))
3386 		r = process_release_metadata_snap_mesg(argc, argv, pool);
3387 
3388 	else
3389 		DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
3390 
3391 	if (!r)
3392 		(void) commit(pool);
3393 
3394 	return r;
3395 }
3396 
3397 static void emit_flags(struct pool_features *pf, char *result,
3398 		       unsigned sz, unsigned maxlen)
3399 {
3400 	unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
3401 		!pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
3402 		pf->error_if_no_space;
3403 	DMEMIT("%u ", count);
3404 
3405 	if (!pf->zero_new_blocks)
3406 		DMEMIT("skip_block_zeroing ");
3407 
3408 	if (!pf->discard_enabled)
3409 		DMEMIT("ignore_discard ");
3410 
3411 	if (!pf->discard_passdown)
3412 		DMEMIT("no_discard_passdown ");
3413 
3414 	if (pf->mode == PM_READ_ONLY)
3415 		DMEMIT("read_only ");
3416 
3417 	if (pf->error_if_no_space)
3418 		DMEMIT("error_if_no_space ");
3419 }
3420 
3421 /*
3422  * Status line is:
3423  *    <transaction id> <used metadata sectors>/<total metadata sectors>
3424  *    <used data sectors>/<total data sectors> <held metadata root>
3425  */
3426 static void pool_status(struct dm_target *ti, status_type_t type,
3427 			unsigned status_flags, char *result, unsigned maxlen)
3428 {
3429 	int r;
3430 	unsigned sz = 0;
3431 	uint64_t transaction_id;
3432 	dm_block_t nr_free_blocks_data;
3433 	dm_block_t nr_free_blocks_metadata;
3434 	dm_block_t nr_blocks_data;
3435 	dm_block_t nr_blocks_metadata;
3436 	dm_block_t held_root;
3437 	char buf[BDEVNAME_SIZE];
3438 	char buf2[BDEVNAME_SIZE];
3439 	struct pool_c *pt = ti->private;
3440 	struct pool *pool = pt->pool;
3441 
3442 	switch (type) {
3443 	case STATUSTYPE_INFO:
3444 		if (get_pool_mode(pool) == PM_FAIL) {
3445 			DMEMIT("Fail");
3446 			break;
3447 		}
3448 
3449 		/* Commit to ensure statistics aren't out-of-date */
3450 		if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
3451 			(void) commit(pool);
3452 
3453 		r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
3454 		if (r) {
3455 			DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
3456 			      dm_device_name(pool->pool_md), r);
3457 			goto err;
3458 		}
3459 
3460 		r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
3461 		if (r) {
3462 			DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
3463 			      dm_device_name(pool->pool_md), r);
3464 			goto err;
3465 		}
3466 
3467 		r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
3468 		if (r) {
3469 			DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
3470 			      dm_device_name(pool->pool_md), r);
3471 			goto err;
3472 		}
3473 
3474 		r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
3475 		if (r) {
3476 			DMERR("%s: dm_pool_get_free_block_count returned %d",
3477 			      dm_device_name(pool->pool_md), r);
3478 			goto err;
3479 		}
3480 
3481 		r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
3482 		if (r) {
3483 			DMERR("%s: dm_pool_get_data_dev_size returned %d",
3484 			      dm_device_name(pool->pool_md), r);
3485 			goto err;
3486 		}
3487 
3488 		r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
3489 		if (r) {
3490 			DMERR("%s: dm_pool_get_metadata_snap returned %d",
3491 			      dm_device_name(pool->pool_md), r);
3492 			goto err;
3493 		}
3494 
3495 		DMEMIT("%llu %llu/%llu %llu/%llu ",
3496 		       (unsigned long long)transaction_id,
3497 		       (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
3498 		       (unsigned long long)nr_blocks_metadata,
3499 		       (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
3500 		       (unsigned long long)nr_blocks_data);
3501 
3502 		if (held_root)
3503 			DMEMIT("%llu ", held_root);
3504 		else
3505 			DMEMIT("- ");
3506 
3507 		if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
3508 			DMEMIT("out_of_data_space ");
3509 		else if (pool->pf.mode == PM_READ_ONLY)
3510 			DMEMIT("ro ");
3511 		else
3512 			DMEMIT("rw ");
3513 
3514 		if (!pool->pf.discard_enabled)
3515 			DMEMIT("ignore_discard ");
3516 		else if (pool->pf.discard_passdown)
3517 			DMEMIT("discard_passdown ");
3518 		else
3519 			DMEMIT("no_discard_passdown ");
3520 
3521 		if (pool->pf.error_if_no_space)
3522 			DMEMIT("error_if_no_space ");
3523 		else
3524 			DMEMIT("queue_if_no_space ");
3525 
3526 		break;
3527 
3528 	case STATUSTYPE_TABLE:
3529 		DMEMIT("%s %s %lu %llu ",
3530 		       format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
3531 		       format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
3532 		       (unsigned long)pool->sectors_per_block,
3533 		       (unsigned long long)pt->low_water_blocks);
3534 		emit_flags(&pt->requested_pf, result, sz, maxlen);
3535 		break;
3536 	}
3537 	return;
3538 
3539 err:
3540 	DMEMIT("Error");
3541 }
3542 
3543 static int pool_iterate_devices(struct dm_target *ti,
3544 				iterate_devices_callout_fn fn, void *data)
3545 {
3546 	struct pool_c *pt = ti->private;
3547 
3548 	return fn(ti, pt->data_dev, 0, ti->len, data);
3549 }
3550 
3551 static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
3552 		      struct bio_vec *biovec, int max_size)
3553 {
3554 	struct pool_c *pt = ti->private;
3555 	struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
3556 
3557 	if (!q->merge_bvec_fn)
3558 		return max_size;
3559 
3560 	bvm->bi_bdev = pt->data_dev->bdev;
3561 
3562 	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
3563 }
3564 
3565 static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
3566 {
3567 	struct pool *pool = pt->pool;
3568 	struct queue_limits *data_limits;
3569 
3570 	limits->max_discard_sectors = pool->sectors_per_block;
3571 
3572 	/*
3573 	 * discard_granularity is just a hint, and not enforced.
3574 	 */
3575 	if (pt->adjusted_pf.discard_passdown) {
3576 		data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
3577 		limits->discard_granularity = max(data_limits->discard_granularity,
3578 						  pool->sectors_per_block << SECTOR_SHIFT);
3579 	} else
3580 		limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
3581 }
3582 
3583 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
3584 {
3585 	struct pool_c *pt = ti->private;
3586 	struct pool *pool = pt->pool;
3587 	sector_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3588 
3589 	/*
3590 	 * If max_sectors is smaller than pool->sectors_per_block adjust it
3591 	 * to the highest possible power-of-2 factor of pool->sectors_per_block.
3592 	 * This is especially beneficial when the pool's data device is a RAID
3593 	 * device that has a full stripe width that matches pool->sectors_per_block
3594 	 * -- because even though partial RAID stripe-sized IOs will be issued to a
3595 	 *    single RAID stripe; when aggregated they will end on a full RAID stripe
3596 	 *    boundary.. which avoids additional partial RAID stripe writes cascading
3597 	 */
3598 	if (limits->max_sectors < pool->sectors_per_block) {
3599 		while (!is_factor(pool->sectors_per_block, limits->max_sectors)) {
3600 			if ((limits->max_sectors & (limits->max_sectors - 1)) == 0)
3601 				limits->max_sectors--;
3602 			limits->max_sectors = rounddown_pow_of_two(limits->max_sectors);
3603 		}
3604 	}
3605 
3606 	/*
3607 	 * If the system-determined stacked limits are compatible with the
3608 	 * pool's blocksize (io_opt is a factor) do not override them.
3609 	 */
3610 	if (io_opt_sectors < pool->sectors_per_block ||
3611 	    !is_factor(io_opt_sectors, pool->sectors_per_block)) {
3612 		if (is_factor(pool->sectors_per_block, limits->max_sectors))
3613 			blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT);
3614 		else
3615 			blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT);
3616 		blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
3617 	}
3618 
3619 	/*
3620 	 * pt->adjusted_pf is a staging area for the actual features to use.
3621 	 * They get transferred to the live pool in bind_control_target()
3622 	 * called from pool_preresume().
3623 	 */
3624 	if (!pt->adjusted_pf.discard_enabled) {
3625 		/*
3626 		 * Must explicitly disallow stacking discard limits otherwise the
3627 		 * block layer will stack them if pool's data device has support.
3628 		 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
3629 		 * user to see that, so make sure to set all discard limits to 0.
3630 		 */
3631 		limits->discard_granularity = 0;
3632 		return;
3633 	}
3634 
3635 	disable_passdown_if_not_supported(pt);
3636 
3637 	set_discard_limits(pt, limits);
3638 }
3639 
3640 static struct target_type pool_target = {
3641 	.name = "thin-pool",
3642 	.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
3643 		    DM_TARGET_IMMUTABLE,
3644 	.version = {1, 14, 0},
3645 	.module = THIS_MODULE,
3646 	.ctr = pool_ctr,
3647 	.dtr = pool_dtr,
3648 	.map = pool_map,
3649 	.presuspend = pool_presuspend,
3650 	.presuspend_undo = pool_presuspend_undo,
3651 	.postsuspend = pool_postsuspend,
3652 	.preresume = pool_preresume,
3653 	.resume = pool_resume,
3654 	.message = pool_message,
3655 	.status = pool_status,
3656 	.merge = pool_merge,
3657 	.iterate_devices = pool_iterate_devices,
3658 	.io_hints = pool_io_hints,
3659 };
3660 
3661 /*----------------------------------------------------------------
3662  * Thin target methods
3663  *--------------------------------------------------------------*/
3664 static void thin_get(struct thin_c *tc)
3665 {
3666 	atomic_inc(&tc->refcount);
3667 }
3668 
3669 static void thin_put(struct thin_c *tc)
3670 {
3671 	if (atomic_dec_and_test(&tc->refcount))
3672 		complete(&tc->can_destroy);
3673 }
3674 
3675 static void thin_dtr(struct dm_target *ti)
3676 {
3677 	struct thin_c *tc = ti->private;
3678 	unsigned long flags;
3679 
3680 	spin_lock_irqsave(&tc->pool->lock, flags);
3681 	list_del_rcu(&tc->list);
3682 	spin_unlock_irqrestore(&tc->pool->lock, flags);
3683 	synchronize_rcu();
3684 
3685 	thin_put(tc);
3686 	wait_for_completion(&tc->can_destroy);
3687 
3688 	mutex_lock(&dm_thin_pool_table.mutex);
3689 
3690 	__pool_dec(tc->pool);
3691 	dm_pool_close_thin_device(tc->td);
3692 	dm_put_device(ti, tc->pool_dev);
3693 	if (tc->origin_dev)
3694 		dm_put_device(ti, tc->origin_dev);
3695 	kfree(tc);
3696 
3697 	mutex_unlock(&dm_thin_pool_table.mutex);
3698 }
3699 
3700 /*
3701  * Thin target parameters:
3702  *
3703  * <pool_dev> <dev_id> [origin_dev]
3704  *
3705  * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
3706  * dev_id: the internal device identifier
3707  * origin_dev: a device external to the pool that should act as the origin
3708  *
3709  * If the pool device has discards disabled, they get disabled for the thin
3710  * device as well.
3711  */
3712 static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3713 {
3714 	int r;
3715 	struct thin_c *tc;
3716 	struct dm_dev *pool_dev, *origin_dev;
3717 	struct mapped_device *pool_md;
3718 	unsigned long flags;
3719 
3720 	mutex_lock(&dm_thin_pool_table.mutex);
3721 
3722 	if (argc != 2 && argc != 3) {
3723 		ti->error = "Invalid argument count";
3724 		r = -EINVAL;
3725 		goto out_unlock;
3726 	}
3727 
3728 	tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
3729 	if (!tc) {
3730 		ti->error = "Out of memory";
3731 		r = -ENOMEM;
3732 		goto out_unlock;
3733 	}
3734 	tc->thin_md = dm_table_get_md(ti->table);
3735 	spin_lock_init(&tc->lock);
3736 	INIT_LIST_HEAD(&tc->deferred_cells);
3737 	bio_list_init(&tc->deferred_bio_list);
3738 	bio_list_init(&tc->retry_on_resume_list);
3739 	tc->sort_bio_list = RB_ROOT;
3740 
3741 	if (argc == 3) {
3742 		r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
3743 		if (r) {
3744 			ti->error = "Error opening origin device";
3745 			goto bad_origin_dev;
3746 		}
3747 		tc->origin_dev = origin_dev;
3748 	}
3749 
3750 	r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
3751 	if (r) {
3752 		ti->error = "Error opening pool device";
3753 		goto bad_pool_dev;
3754 	}
3755 	tc->pool_dev = pool_dev;
3756 
3757 	if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
3758 		ti->error = "Invalid device id";
3759 		r = -EINVAL;
3760 		goto bad_common;
3761 	}
3762 
3763 	pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
3764 	if (!pool_md) {
3765 		ti->error = "Couldn't get pool mapped device";
3766 		r = -EINVAL;
3767 		goto bad_common;
3768 	}
3769 
3770 	tc->pool = __pool_table_lookup(pool_md);
3771 	if (!tc->pool) {
3772 		ti->error = "Couldn't find pool object";
3773 		r = -EINVAL;
3774 		goto bad_pool_lookup;
3775 	}
3776 	__pool_inc(tc->pool);
3777 
3778 	if (get_pool_mode(tc->pool) == PM_FAIL) {
3779 		ti->error = "Couldn't open thin device, Pool is in fail mode";
3780 		r = -EINVAL;
3781 		goto bad_pool;
3782 	}
3783 
3784 	r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
3785 	if (r) {
3786 		ti->error = "Couldn't open thin internal device";
3787 		goto bad_pool;
3788 	}
3789 
3790 	r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
3791 	if (r)
3792 		goto bad;
3793 
3794 	ti->num_flush_bios = 1;
3795 	ti->flush_supported = true;
3796 	ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
3797 
3798 	/* In case the pool supports discards, pass them on. */
3799 	ti->discard_zeroes_data_unsupported = true;
3800 	if (tc->pool->pf.discard_enabled) {
3801 		ti->discards_supported = true;
3802 		ti->num_discard_bios = 1;
3803 		/* Discard bios must be split on a block boundary */
3804 		ti->split_discard_bios = true;
3805 	}
3806 
3807 	mutex_unlock(&dm_thin_pool_table.mutex);
3808 
3809 	spin_lock_irqsave(&tc->pool->lock, flags);
3810 	if (tc->pool->suspended) {
3811 		spin_unlock_irqrestore(&tc->pool->lock, flags);
3812 		mutex_lock(&dm_thin_pool_table.mutex); /* reacquire for __pool_dec */
3813 		ti->error = "Unable to activate thin device while pool is suspended";
3814 		r = -EINVAL;
3815 		goto bad;
3816 	}
3817 	list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
3818 	spin_unlock_irqrestore(&tc->pool->lock, flags);
3819 	/*
3820 	 * This synchronize_rcu() call is needed here otherwise we risk a
3821 	 * wake_worker() call finding no bios to process (because the newly
3822 	 * added tc isn't yet visible).  So this reduces latency since we
3823 	 * aren't then dependent on the periodic commit to wake_worker().
3824 	 */
3825 	synchronize_rcu();
3826 
3827 	dm_put(pool_md);
3828 
3829 	atomic_set(&tc->refcount, 1);
3830 	init_completion(&tc->can_destroy);
3831 
3832 	return 0;
3833 
3834 bad:
3835 	dm_pool_close_thin_device(tc->td);
3836 bad_pool:
3837 	__pool_dec(tc->pool);
3838 bad_pool_lookup:
3839 	dm_put(pool_md);
3840 bad_common:
3841 	dm_put_device(ti, tc->pool_dev);
3842 bad_pool_dev:
3843 	if (tc->origin_dev)
3844 		dm_put_device(ti, tc->origin_dev);
3845 bad_origin_dev:
3846 	kfree(tc);
3847 out_unlock:
3848 	mutex_unlock(&dm_thin_pool_table.mutex);
3849 
3850 	return r;
3851 }
3852 
3853 static int thin_map(struct dm_target *ti, struct bio *bio)
3854 {
3855 	bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
3856 
3857 	return thin_bio_map(ti, bio);
3858 }
3859 
3860 static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
3861 {
3862 	unsigned long flags;
3863 	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
3864 	struct list_head work;
3865 	struct dm_thin_new_mapping *m, *tmp;
3866 	struct pool *pool = h->tc->pool;
3867 
3868 	if (h->shared_read_entry) {
3869 		INIT_LIST_HEAD(&work);
3870 		dm_deferred_entry_dec(h->shared_read_entry, &work);
3871 
3872 		spin_lock_irqsave(&pool->lock, flags);
3873 		list_for_each_entry_safe(m, tmp, &work, list) {
3874 			list_del(&m->list);
3875 			__complete_mapping_preparation(m);
3876 		}
3877 		spin_unlock_irqrestore(&pool->lock, flags);
3878 	}
3879 
3880 	if (h->all_io_entry) {
3881 		INIT_LIST_HEAD(&work);
3882 		dm_deferred_entry_dec(h->all_io_entry, &work);
3883 		if (!list_empty(&work)) {
3884 			spin_lock_irqsave(&pool->lock, flags);
3885 			list_for_each_entry_safe(m, tmp, &work, list)
3886 				list_add_tail(&m->list, &pool->prepared_discards);
3887 			spin_unlock_irqrestore(&pool->lock, flags);
3888 			wake_worker(pool);
3889 		}
3890 	}
3891 
3892 	return 0;
3893 }
3894 
3895 static void thin_presuspend(struct dm_target *ti)
3896 {
3897 	struct thin_c *tc = ti->private;
3898 
3899 	if (dm_noflush_suspending(ti))
3900 		noflush_work(tc, do_noflush_start);
3901 }
3902 
3903 static void thin_postsuspend(struct dm_target *ti)
3904 {
3905 	struct thin_c *tc = ti->private;
3906 
3907 	/*
3908 	 * The dm_noflush_suspending flag has been cleared by now, so
3909 	 * unfortunately we must always run this.
3910 	 */
3911 	noflush_work(tc, do_noflush_stop);
3912 }
3913 
3914 static int thin_preresume(struct dm_target *ti)
3915 {
3916 	struct thin_c *tc = ti->private;
3917 
3918 	if (tc->origin_dev)
3919 		tc->origin_size = get_dev_size(tc->origin_dev->bdev);
3920 
3921 	return 0;
3922 }
3923 
3924 /*
3925  * <nr mapped sectors> <highest mapped sector>
3926  */
3927 static void thin_status(struct dm_target *ti, status_type_t type,
3928 			unsigned status_flags, char *result, unsigned maxlen)
3929 {
3930 	int r;
3931 	ssize_t sz = 0;
3932 	dm_block_t mapped, highest;
3933 	char buf[BDEVNAME_SIZE];
3934 	struct thin_c *tc = ti->private;
3935 
3936 	if (get_pool_mode(tc->pool) == PM_FAIL) {
3937 		DMEMIT("Fail");
3938 		return;
3939 	}
3940 
3941 	if (!tc->td)
3942 		DMEMIT("-");
3943 	else {
3944 		switch (type) {
3945 		case STATUSTYPE_INFO:
3946 			r = dm_thin_get_mapped_count(tc->td, &mapped);
3947 			if (r) {
3948 				DMERR("dm_thin_get_mapped_count returned %d", r);
3949 				goto err;
3950 			}
3951 
3952 			r = dm_thin_get_highest_mapped_block(tc->td, &highest);
3953 			if (r < 0) {
3954 				DMERR("dm_thin_get_highest_mapped_block returned %d", r);
3955 				goto err;
3956 			}
3957 
3958 			DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
3959 			if (r)
3960 				DMEMIT("%llu", ((highest + 1) *
3961 						tc->pool->sectors_per_block) - 1);
3962 			else
3963 				DMEMIT("-");
3964 			break;
3965 
3966 		case STATUSTYPE_TABLE:
3967 			DMEMIT("%s %lu",
3968 			       format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
3969 			       (unsigned long) tc->dev_id);
3970 			if (tc->origin_dev)
3971 				DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
3972 			break;
3973 		}
3974 	}
3975 
3976 	return;
3977 
3978 err:
3979 	DMEMIT("Error");
3980 }
3981 
3982 static int thin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
3983 		      struct bio_vec *biovec, int max_size)
3984 {
3985 	struct thin_c *tc = ti->private;
3986 	struct request_queue *q = bdev_get_queue(tc->pool_dev->bdev);
3987 
3988 	if (!q->merge_bvec_fn)
3989 		return max_size;
3990 
3991 	bvm->bi_bdev = tc->pool_dev->bdev;
3992 	bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector);
3993 
3994 	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
3995 }
3996 
3997 static int thin_iterate_devices(struct dm_target *ti,
3998 				iterate_devices_callout_fn fn, void *data)
3999 {
4000 	sector_t blocks;
4001 	struct thin_c *tc = ti->private;
4002 	struct pool *pool = tc->pool;
4003 
4004 	/*
4005 	 * We can't call dm_pool_get_data_dev_size() since that blocks.  So
4006 	 * we follow a more convoluted path through to the pool's target.
4007 	 */
4008 	if (!pool->ti)
4009 		return 0;	/* nothing is bound */
4010 
4011 	blocks = pool->ti->len;
4012 	(void) sector_div(blocks, pool->sectors_per_block);
4013 	if (blocks)
4014 		return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
4015 
4016 	return 0;
4017 }
4018 
4019 static struct target_type thin_target = {
4020 	.name = "thin",
4021 	.version = {1, 14, 0},
4022 	.module	= THIS_MODULE,
4023 	.ctr = thin_ctr,
4024 	.dtr = thin_dtr,
4025 	.map = thin_map,
4026 	.end_io = thin_endio,
4027 	.preresume = thin_preresume,
4028 	.presuspend = thin_presuspend,
4029 	.postsuspend = thin_postsuspend,
4030 	.status = thin_status,
4031 	.merge = thin_merge,
4032 	.iterate_devices = thin_iterate_devices,
4033 };
4034 
4035 /*----------------------------------------------------------------*/
4036 
4037 static int __init dm_thin_init(void)
4038 {
4039 	int r;
4040 
4041 	pool_table_init();
4042 
4043 	r = dm_register_target(&thin_target);
4044 	if (r)
4045 		return r;
4046 
4047 	r = dm_register_target(&pool_target);
4048 	if (r)
4049 		goto bad_pool_target;
4050 
4051 	r = -ENOMEM;
4052 
4053 	_new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
4054 	if (!_new_mapping_cache)
4055 		goto bad_new_mapping_cache;
4056 
4057 	return 0;
4058 
4059 bad_new_mapping_cache:
4060 	dm_unregister_target(&pool_target);
4061 bad_pool_target:
4062 	dm_unregister_target(&thin_target);
4063 
4064 	return r;
4065 }
4066 
4067 static void dm_thin_exit(void)
4068 {
4069 	dm_unregister_target(&thin_target);
4070 	dm_unregister_target(&pool_target);
4071 
4072 	kmem_cache_destroy(_new_mapping_cache);
4073 }
4074 
4075 module_init(dm_thin_init);
4076 module_exit(dm_thin_exit);
4077 
4078 module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
4079 MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
4080 
4081 MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
4082 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
4083 MODULE_LICENSE("GPL");
4084