1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 *
6 * This file is released under the GPL.
7 */
8
9 #include "dm-core.h"
10 #include "dm-rq.h"
11 #include "dm-uevent.h"
12 #include "dm-ima.h"
13
14 #include <linux/init.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/signal.h>
19 #include <linux/blkpg.h>
20 #include <linux/bio.h>
21 #include <linux/mempool.h>
22 #include <linux/dax.h>
23 #include <linux/slab.h>
24 #include <linux/idr.h>
25 #include <linux/uio.h>
26 #include <linux/hdreg.h>
27 #include <linux/delay.h>
28 #include <linux/wait.h>
29 #include <linux/pr.h>
30 #include <linux/refcount.h>
31 #include <linux/part_stat.h>
32 #include <linux/blk-crypto.h>
33 #include <linux/blk-crypto-profile.h>
34
35 #define DM_MSG_PREFIX "core"
36
37 /*
38 * Cookies are numeric values sent with CHANGE and REMOVE
39 * uevents while resuming, removing or renaming the device.
40 */
41 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
42 #define DM_COOKIE_LENGTH 24
43
44 /*
45 * For REQ_POLLED fs bio, this flag is set if we link mapped underlying
46 * dm_io into one list, and reuse bio->bi_private as the list head. Before
47 * ending this fs bio, we will recover its ->bi_private.
48 */
49 #define REQ_DM_POLL_LIST REQ_DRV
50
51 static const char *_name = DM_NAME;
52
53 static unsigned int major;
54 static unsigned int _major;
55
56 static DEFINE_IDR(_minor_idr);
57
58 static DEFINE_SPINLOCK(_minor_lock);
59
60 static void do_deferred_remove(struct work_struct *w);
61
62 static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
63
64 static struct workqueue_struct *deferred_remove_workqueue;
65
66 atomic_t dm_global_event_nr = ATOMIC_INIT(0);
67 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
68
dm_issue_global_event(void)69 void dm_issue_global_event(void)
70 {
71 atomic_inc(&dm_global_event_nr);
72 wake_up(&dm_global_eventq);
73 }
74
75 DEFINE_STATIC_KEY_FALSE(stats_enabled);
76 DEFINE_STATIC_KEY_FALSE(swap_bios_enabled);
77 DEFINE_STATIC_KEY_FALSE(zoned_enabled);
78
79 /*
80 * One of these is allocated (on-stack) per original bio.
81 */
82 struct clone_info {
83 struct dm_table *map;
84 struct bio *bio;
85 struct dm_io *io;
86 sector_t sector;
87 unsigned int sector_count;
88 bool is_abnormal_io:1;
89 bool submit_as_polled:1;
90 };
91
clone_to_tio(struct bio * clone)92 static inline struct dm_target_io *clone_to_tio(struct bio *clone)
93 {
94 return container_of(clone, struct dm_target_io, clone);
95 }
96
dm_per_bio_data(struct bio * bio,size_t data_size)97 void *dm_per_bio_data(struct bio *bio, size_t data_size)
98 {
99 if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO))
100 return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size;
101 return (char *)bio - DM_IO_BIO_OFFSET - data_size;
102 }
103 EXPORT_SYMBOL_GPL(dm_per_bio_data);
104
dm_bio_from_per_bio_data(void * data,size_t data_size)105 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
106 {
107 struct dm_io *io = (struct dm_io *)((char *)data + data_size);
108
109 if (io->magic == DM_IO_MAGIC)
110 return (struct bio *)((char *)io + DM_IO_BIO_OFFSET);
111 BUG_ON(io->magic != DM_TIO_MAGIC);
112 return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET);
113 }
114 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
115
dm_bio_get_target_bio_nr(const struct bio * bio)116 unsigned int dm_bio_get_target_bio_nr(const struct bio *bio)
117 {
118 return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
119 }
120 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
121
122 #define MINOR_ALLOCED ((void *)-1)
123
124 #define DM_NUMA_NODE NUMA_NO_NODE
125 static int dm_numa_node = DM_NUMA_NODE;
126
127 #define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE)
128 static int swap_bios = DEFAULT_SWAP_BIOS;
get_swap_bios(void)129 static int get_swap_bios(void)
130 {
131 int latch = READ_ONCE(swap_bios);
132
133 if (unlikely(latch <= 0))
134 latch = DEFAULT_SWAP_BIOS;
135 return latch;
136 }
137
138 struct table_device {
139 struct list_head list;
140 refcount_t count;
141 struct dm_dev dm_dev;
142 };
143
144 /*
145 * Bio-based DM's mempools' reserved IOs set by the user.
146 */
147 #define RESERVED_BIO_BASED_IOS 16
148 static unsigned int reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
149
__dm_get_module_param_int(int * module_param,int min,int max)150 static int __dm_get_module_param_int(int *module_param, int min, int max)
151 {
152 int param = READ_ONCE(*module_param);
153 int modified_param = 0;
154 bool modified = true;
155
156 if (param < min)
157 modified_param = min;
158 else if (param > max)
159 modified_param = max;
160 else
161 modified = false;
162
163 if (modified) {
164 (void)cmpxchg(module_param, param, modified_param);
165 param = modified_param;
166 }
167
168 return param;
169 }
170
__dm_get_module_param(unsigned int * module_param,unsigned int def,unsigned int max)171 unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max)
172 {
173 unsigned int param = READ_ONCE(*module_param);
174 unsigned int modified_param = 0;
175
176 if (!param)
177 modified_param = def;
178 else if (param > max)
179 modified_param = max;
180
181 if (modified_param) {
182 (void)cmpxchg(module_param, param, modified_param);
183 param = modified_param;
184 }
185
186 return param;
187 }
188
dm_get_reserved_bio_based_ios(void)189 unsigned int dm_get_reserved_bio_based_ios(void)
190 {
191 return __dm_get_module_param(&reserved_bio_based_ios,
192 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
193 }
194 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
195
dm_get_numa_node(void)196 static unsigned int dm_get_numa_node(void)
197 {
198 return __dm_get_module_param_int(&dm_numa_node,
199 DM_NUMA_NODE, num_online_nodes() - 1);
200 }
201
local_init(void)202 static int __init local_init(void)
203 {
204 int r;
205
206 r = dm_uevent_init();
207 if (r)
208 return r;
209
210 deferred_remove_workqueue = alloc_ordered_workqueue("kdmremove", 0);
211 if (!deferred_remove_workqueue) {
212 r = -ENOMEM;
213 goto out_uevent_exit;
214 }
215
216 _major = major;
217 r = register_blkdev(_major, _name);
218 if (r < 0)
219 goto out_free_workqueue;
220
221 if (!_major)
222 _major = r;
223
224 return 0;
225
226 out_free_workqueue:
227 destroy_workqueue(deferred_remove_workqueue);
228 out_uevent_exit:
229 dm_uevent_exit();
230
231 return r;
232 }
233
local_exit(void)234 static void local_exit(void)
235 {
236 destroy_workqueue(deferred_remove_workqueue);
237
238 unregister_blkdev(_major, _name);
239 dm_uevent_exit();
240
241 _major = 0;
242
243 DMINFO("cleaned up");
244 }
245
246 static int (*_inits[])(void) __initdata = {
247 local_init,
248 dm_target_init,
249 dm_linear_init,
250 dm_stripe_init,
251 dm_io_init,
252 dm_kcopyd_init,
253 dm_interface_init,
254 dm_statistics_init,
255 };
256
257 static void (*_exits[])(void) = {
258 local_exit,
259 dm_target_exit,
260 dm_linear_exit,
261 dm_stripe_exit,
262 dm_io_exit,
263 dm_kcopyd_exit,
264 dm_interface_exit,
265 dm_statistics_exit,
266 };
267
dm_init(void)268 static int __init dm_init(void)
269 {
270 const int count = ARRAY_SIZE(_inits);
271 int r, i;
272
273 #if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE))
274 DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled."
275 " Duplicate IMA measurements will not be recorded in the IMA log.");
276 #endif
277
278 for (i = 0; i < count; i++) {
279 r = _inits[i]();
280 if (r)
281 goto bad;
282 }
283
284 return 0;
285 bad:
286 while (i--)
287 _exits[i]();
288
289 return r;
290 }
291
dm_exit(void)292 static void __exit dm_exit(void)
293 {
294 int i = ARRAY_SIZE(_exits);
295
296 while (i--)
297 _exits[i]();
298
299 /*
300 * Should be empty by this point.
301 */
302 idr_destroy(&_minor_idr);
303 }
304
305 /*
306 * Block device functions
307 */
dm_deleting_md(struct mapped_device * md)308 int dm_deleting_md(struct mapped_device *md)
309 {
310 return test_bit(DMF_DELETING, &md->flags);
311 }
312
dm_blk_open(struct gendisk * disk,blk_mode_t mode)313 static int dm_blk_open(struct gendisk *disk, blk_mode_t mode)
314 {
315 struct mapped_device *md;
316
317 spin_lock(&_minor_lock);
318
319 md = disk->private_data;
320 if (!md)
321 goto out;
322
323 if (test_bit(DMF_FREEING, &md->flags) ||
324 dm_deleting_md(md)) {
325 md = NULL;
326 goto out;
327 }
328
329 dm_get(md);
330 atomic_inc(&md->open_count);
331 out:
332 spin_unlock(&_minor_lock);
333
334 return md ? 0 : -ENXIO;
335 }
336
dm_blk_close(struct gendisk * disk)337 static void dm_blk_close(struct gendisk *disk)
338 {
339 struct mapped_device *md;
340
341 spin_lock(&_minor_lock);
342
343 md = disk->private_data;
344 if (WARN_ON(!md))
345 goto out;
346
347 if (atomic_dec_and_test(&md->open_count) &&
348 (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
349 queue_work(deferred_remove_workqueue, &deferred_remove_work);
350
351 dm_put(md);
352 out:
353 spin_unlock(&_minor_lock);
354 }
355
dm_open_count(struct mapped_device * md)356 int dm_open_count(struct mapped_device *md)
357 {
358 return atomic_read(&md->open_count);
359 }
360
361 /*
362 * Guarantees nothing is using the device before it's deleted.
363 */
dm_lock_for_deletion(struct mapped_device * md,bool mark_deferred,bool only_deferred)364 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
365 {
366 int r = 0;
367
368 spin_lock(&_minor_lock);
369
370 if (dm_open_count(md)) {
371 r = -EBUSY;
372 if (mark_deferred)
373 set_bit(DMF_DEFERRED_REMOVE, &md->flags);
374 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
375 r = -EEXIST;
376 else
377 set_bit(DMF_DELETING, &md->flags);
378
379 spin_unlock(&_minor_lock);
380
381 return r;
382 }
383
dm_cancel_deferred_remove(struct mapped_device * md)384 int dm_cancel_deferred_remove(struct mapped_device *md)
385 {
386 int r = 0;
387
388 spin_lock(&_minor_lock);
389
390 if (test_bit(DMF_DELETING, &md->flags))
391 r = -EBUSY;
392 else
393 clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
394
395 spin_unlock(&_minor_lock);
396
397 return r;
398 }
399
do_deferred_remove(struct work_struct * w)400 static void do_deferred_remove(struct work_struct *w)
401 {
402 dm_deferred_remove();
403 }
404
dm_blk_getgeo(struct block_device * bdev,struct hd_geometry * geo)405 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
406 {
407 struct mapped_device *md = bdev->bd_disk->private_data;
408
409 return dm_get_geometry(md, geo);
410 }
411
dm_prepare_ioctl(struct mapped_device * md,int * srcu_idx,struct block_device ** bdev)412 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
413 struct block_device **bdev)
414 {
415 struct dm_target *ti;
416 struct dm_table *map;
417 int r;
418
419 retry:
420 r = -ENOTTY;
421 map = dm_get_live_table(md, srcu_idx);
422 if (!map || !dm_table_get_size(map))
423 return r;
424
425 /* We only support devices that have a single target */
426 if (map->num_targets != 1)
427 return r;
428
429 ti = dm_table_get_target(map, 0);
430 if (!ti->type->prepare_ioctl)
431 return r;
432
433 if (dm_suspended_md(md))
434 return -EAGAIN;
435
436 r = ti->type->prepare_ioctl(ti, bdev);
437 if (r == -ENOTCONN && !fatal_signal_pending(current)) {
438 dm_put_live_table(md, *srcu_idx);
439 fsleep(10000);
440 goto retry;
441 }
442
443 return r;
444 }
445
dm_unprepare_ioctl(struct mapped_device * md,int srcu_idx)446 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
447 {
448 dm_put_live_table(md, srcu_idx);
449 }
450
dm_blk_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)451 static int dm_blk_ioctl(struct block_device *bdev, blk_mode_t mode,
452 unsigned int cmd, unsigned long arg)
453 {
454 struct mapped_device *md = bdev->bd_disk->private_data;
455 int r, srcu_idx;
456
457 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
458 if (r < 0)
459 goto out;
460
461 if (r > 0) {
462 /*
463 * Target determined this ioctl is being issued against a
464 * subset of the parent bdev; require extra privileges.
465 */
466 if (!capable(CAP_SYS_RAWIO)) {
467 DMDEBUG_LIMIT(
468 "%s: sending ioctl %x to DM device without required privilege.",
469 current->comm, cmd);
470 r = -ENOIOCTLCMD;
471 goto out;
472 }
473 }
474
475 if (!bdev->bd_disk->fops->ioctl)
476 r = -ENOTTY;
477 else
478 r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
479 out:
480 dm_unprepare_ioctl(md, srcu_idx);
481 return r;
482 }
483
dm_start_time_ns_from_clone(struct bio * bio)484 u64 dm_start_time_ns_from_clone(struct bio *bio)
485 {
486 return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time);
487 }
488 EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
489
bio_is_flush_with_data(struct bio * bio)490 static inline bool bio_is_flush_with_data(struct bio *bio)
491 {
492 return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size);
493 }
494
dm_io_sectors(struct dm_io * io,struct bio * bio)495 static inline unsigned int dm_io_sectors(struct dm_io *io, struct bio *bio)
496 {
497 /*
498 * If REQ_PREFLUSH set, don't account payload, it will be
499 * submitted (and accounted) after this flush completes.
500 */
501 if (bio_is_flush_with_data(bio))
502 return 0;
503 if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT)))
504 return io->sectors;
505 return bio_sectors(bio);
506 }
507
dm_io_acct(struct dm_io * io,bool end)508 static void dm_io_acct(struct dm_io *io, bool end)
509 {
510 struct bio *bio = io->orig_bio;
511
512 if (dm_io_flagged(io, DM_IO_BLK_STAT)) {
513 if (!end)
514 bdev_start_io_acct(bio->bi_bdev, bio_op(bio),
515 io->start_time);
516 else
517 bdev_end_io_acct(bio->bi_bdev, bio_op(bio),
518 dm_io_sectors(io, bio),
519 io->start_time);
520 }
521
522 if (static_branch_unlikely(&stats_enabled) &&
523 unlikely(dm_stats_used(&io->md->stats))) {
524 sector_t sector;
525
526 if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT)))
527 sector = bio_end_sector(bio) - io->sector_offset;
528 else
529 sector = bio->bi_iter.bi_sector;
530
531 dm_stats_account_io(&io->md->stats, bio_data_dir(bio),
532 sector, dm_io_sectors(io, bio),
533 end, io->start_time, &io->stats_aux);
534 }
535 }
536
__dm_start_io_acct(struct dm_io * io)537 static void __dm_start_io_acct(struct dm_io *io)
538 {
539 dm_io_acct(io, false);
540 }
541
dm_start_io_acct(struct dm_io * io,struct bio * clone)542 static void dm_start_io_acct(struct dm_io *io, struct bio *clone)
543 {
544 /*
545 * Ensure IO accounting is only ever started once.
546 */
547 if (dm_io_flagged(io, DM_IO_ACCOUNTED))
548 return;
549
550 /* Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO. */
551 if (!clone || likely(dm_tio_is_normal(clone_to_tio(clone)))) {
552 dm_io_set_flag(io, DM_IO_ACCOUNTED);
553 } else {
554 unsigned long flags;
555 /* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */
556 spin_lock_irqsave(&io->lock, flags);
557 if (dm_io_flagged(io, DM_IO_ACCOUNTED)) {
558 spin_unlock_irqrestore(&io->lock, flags);
559 return;
560 }
561 dm_io_set_flag(io, DM_IO_ACCOUNTED);
562 spin_unlock_irqrestore(&io->lock, flags);
563 }
564
565 __dm_start_io_acct(io);
566 }
567
dm_end_io_acct(struct dm_io * io)568 static void dm_end_io_acct(struct dm_io *io)
569 {
570 dm_io_acct(io, true);
571 }
572
alloc_io(struct mapped_device * md,struct bio * bio)573 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
574 {
575 struct dm_io *io;
576 struct dm_target_io *tio;
577 struct bio *clone;
578
579 clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs);
580 tio = clone_to_tio(clone);
581 tio->flags = 0;
582 dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO);
583 tio->io = NULL;
584
585 io = container_of(tio, struct dm_io, tio);
586 io->magic = DM_IO_MAGIC;
587 io->status = BLK_STS_OK;
588
589 /* one ref is for submission, the other is for completion */
590 atomic_set(&io->io_count, 2);
591 this_cpu_inc(*md->pending_io);
592 io->orig_bio = bio;
593 io->md = md;
594 spin_lock_init(&io->lock);
595 io->start_time = jiffies;
596 io->flags = 0;
597 if (blk_queue_io_stat(md->queue))
598 dm_io_set_flag(io, DM_IO_BLK_STAT);
599
600 if (static_branch_unlikely(&stats_enabled) &&
601 unlikely(dm_stats_used(&md->stats)))
602 dm_stats_record_start(&md->stats, &io->stats_aux);
603
604 return io;
605 }
606
free_io(struct dm_io * io)607 static void free_io(struct dm_io *io)
608 {
609 bio_put(&io->tio.clone);
610 }
611
alloc_tio(struct clone_info * ci,struct dm_target * ti,unsigned int target_bio_nr,unsigned int * len,gfp_t gfp_mask)612 static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
613 unsigned int target_bio_nr, unsigned int *len, gfp_t gfp_mask)
614 {
615 struct mapped_device *md = ci->io->md;
616 struct dm_target_io *tio;
617 struct bio *clone;
618
619 if (!ci->io->tio.io) {
620 /* the dm_target_io embedded in ci->io is available */
621 tio = &ci->io->tio;
622 /* alloc_io() already initialized embedded clone */
623 clone = &tio->clone;
624 } else {
625 clone = bio_alloc_clone(NULL, ci->bio, gfp_mask,
626 &md->mempools->bs);
627 if (!clone)
628 return NULL;
629
630 /* REQ_DM_POLL_LIST shouldn't be inherited */
631 clone->bi_opf &= ~REQ_DM_POLL_LIST;
632
633 tio = clone_to_tio(clone);
634 tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */
635 }
636
637 tio->magic = DM_TIO_MAGIC;
638 tio->io = ci->io;
639 tio->ti = ti;
640 tio->target_bio_nr = target_bio_nr;
641 tio->len_ptr = len;
642 tio->old_sector = 0;
643
644 /* Set default bdev, but target must bio_set_dev() before issuing IO */
645 clone->bi_bdev = md->disk->part0;
646 if (unlikely(ti->needs_bio_set_dev))
647 bio_set_dev(clone, md->disk->part0);
648
649 if (len) {
650 clone->bi_iter.bi_size = to_bytes(*len);
651 if (bio_integrity(clone))
652 bio_integrity_trim(clone);
653 }
654
655 return clone;
656 }
657
free_tio(struct bio * clone)658 static void free_tio(struct bio *clone)
659 {
660 if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO))
661 return;
662 bio_put(clone);
663 }
664
665 /*
666 * Add the bio to the list of deferred io.
667 */
queue_io(struct mapped_device * md,struct bio * bio)668 static void queue_io(struct mapped_device *md, struct bio *bio)
669 {
670 unsigned long flags;
671
672 spin_lock_irqsave(&md->deferred_lock, flags);
673 bio_list_add(&md->deferred, bio);
674 spin_unlock_irqrestore(&md->deferred_lock, flags);
675 queue_work(md->wq, &md->work);
676 }
677
678 /*
679 * Everyone (including functions in this file), should use this
680 * function to access the md->map field, and make sure they call
681 * dm_put_live_table() when finished.
682 */
dm_get_live_table(struct mapped_device * md,int * srcu_idx)683 struct dm_table *dm_get_live_table(struct mapped_device *md,
684 int *srcu_idx) __acquires(md->io_barrier)
685 {
686 *srcu_idx = srcu_read_lock(&md->io_barrier);
687
688 return srcu_dereference(md->map, &md->io_barrier);
689 }
690
dm_put_live_table(struct mapped_device * md,int srcu_idx)691 void dm_put_live_table(struct mapped_device *md,
692 int srcu_idx) __releases(md->io_barrier)
693 {
694 srcu_read_unlock(&md->io_barrier, srcu_idx);
695 }
696
dm_sync_table(struct mapped_device * md)697 void dm_sync_table(struct mapped_device *md)
698 {
699 synchronize_srcu(&md->io_barrier);
700 synchronize_rcu_expedited();
701 }
702
703 /*
704 * A fast alternative to dm_get_live_table/dm_put_live_table.
705 * The caller must not block between these two functions.
706 */
dm_get_live_table_fast(struct mapped_device * md)707 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
708 {
709 rcu_read_lock();
710 return rcu_dereference(md->map);
711 }
712
dm_put_live_table_fast(struct mapped_device * md)713 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
714 {
715 rcu_read_unlock();
716 }
717
718 static char *_dm_claim_ptr = "I belong to device-mapper";
719
720 /*
721 * Open a table device so we can use it as a map destination.
722 */
open_table_device(struct mapped_device * md,dev_t dev,blk_mode_t mode)723 static struct table_device *open_table_device(struct mapped_device *md,
724 dev_t dev, blk_mode_t mode)
725 {
726 struct table_device *td;
727 struct block_device *bdev;
728 u64 part_off;
729 int r;
730
731 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
732 if (!td)
733 return ERR_PTR(-ENOMEM);
734 refcount_set(&td->count, 1);
735
736 bdev = blkdev_get_by_dev(dev, mode, _dm_claim_ptr, NULL);
737 if (IS_ERR(bdev)) {
738 r = PTR_ERR(bdev);
739 goto out_free_td;
740 }
741
742 /*
743 * We can be called before the dm disk is added. In that case we can't
744 * register the holder relation here. It will be done once add_disk was
745 * called.
746 */
747 if (md->disk->slave_dir) {
748 r = bd_link_disk_holder(bdev, md->disk);
749 if (r)
750 goto out_blkdev_put;
751 }
752
753 td->dm_dev.mode = mode;
754 td->dm_dev.bdev = bdev;
755 td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, NULL, NULL);
756 format_dev_t(td->dm_dev.name, dev);
757 list_add(&td->list, &md->table_devices);
758 return td;
759
760 out_blkdev_put:
761 blkdev_put(bdev, _dm_claim_ptr);
762 out_free_td:
763 kfree(td);
764 return ERR_PTR(r);
765 }
766
767 /*
768 * Close a table device that we've been using.
769 */
close_table_device(struct table_device * td,struct mapped_device * md)770 static void close_table_device(struct table_device *td, struct mapped_device *md)
771 {
772 if (md->disk->slave_dir)
773 bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
774 blkdev_put(td->dm_dev.bdev, _dm_claim_ptr);
775 put_dax(td->dm_dev.dax_dev);
776 list_del(&td->list);
777 kfree(td);
778 }
779
find_table_device(struct list_head * l,dev_t dev,blk_mode_t mode)780 static struct table_device *find_table_device(struct list_head *l, dev_t dev,
781 blk_mode_t mode)
782 {
783 struct table_device *td;
784
785 list_for_each_entry(td, l, list)
786 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
787 return td;
788
789 return NULL;
790 }
791
dm_get_table_device(struct mapped_device * md,dev_t dev,blk_mode_t mode,struct dm_dev ** result)792 int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode,
793 struct dm_dev **result)
794 {
795 struct table_device *td;
796
797 mutex_lock(&md->table_devices_lock);
798 td = find_table_device(&md->table_devices, dev, mode);
799 if (!td) {
800 td = open_table_device(md, dev, mode);
801 if (IS_ERR(td)) {
802 mutex_unlock(&md->table_devices_lock);
803 return PTR_ERR(td);
804 }
805 } else {
806 refcount_inc(&td->count);
807 }
808 mutex_unlock(&md->table_devices_lock);
809
810 *result = &td->dm_dev;
811 return 0;
812 }
813
dm_put_table_device(struct mapped_device * md,struct dm_dev * d)814 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
815 {
816 struct table_device *td = container_of(d, struct table_device, dm_dev);
817
818 mutex_lock(&md->table_devices_lock);
819 if (refcount_dec_and_test(&td->count))
820 close_table_device(td, md);
821 mutex_unlock(&md->table_devices_lock);
822 }
823
824 /*
825 * Get the geometry associated with a dm device
826 */
dm_get_geometry(struct mapped_device * md,struct hd_geometry * geo)827 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
828 {
829 *geo = md->geometry;
830
831 return 0;
832 }
833
834 /*
835 * Set the geometry of a device.
836 */
dm_set_geometry(struct mapped_device * md,struct hd_geometry * geo)837 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
838 {
839 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
840
841 if (geo->start > sz) {
842 DMERR("Start sector is beyond the geometry limits.");
843 return -EINVAL;
844 }
845
846 md->geometry = *geo;
847
848 return 0;
849 }
850
__noflush_suspending(struct mapped_device * md)851 static int __noflush_suspending(struct mapped_device *md)
852 {
853 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
854 }
855
dm_requeue_add_io(struct dm_io * io,bool first_stage)856 static void dm_requeue_add_io(struct dm_io *io, bool first_stage)
857 {
858 struct mapped_device *md = io->md;
859
860 if (first_stage) {
861 struct dm_io *next = md->requeue_list;
862
863 md->requeue_list = io;
864 io->next = next;
865 } else {
866 bio_list_add_head(&md->deferred, io->orig_bio);
867 }
868 }
869
dm_kick_requeue(struct mapped_device * md,bool first_stage)870 static void dm_kick_requeue(struct mapped_device *md, bool first_stage)
871 {
872 if (first_stage)
873 queue_work(md->wq, &md->requeue_work);
874 else
875 queue_work(md->wq, &md->work);
876 }
877
878 /*
879 * Return true if the dm_io's original bio is requeued.
880 * io->status is updated with error if requeue disallowed.
881 */
dm_handle_requeue(struct dm_io * io,bool first_stage)882 static bool dm_handle_requeue(struct dm_io *io, bool first_stage)
883 {
884 struct bio *bio = io->orig_bio;
885 bool handle_requeue = (io->status == BLK_STS_DM_REQUEUE);
886 bool handle_polled_eagain = ((io->status == BLK_STS_AGAIN) &&
887 (bio->bi_opf & REQ_POLLED));
888 struct mapped_device *md = io->md;
889 bool requeued = false;
890
891 if (handle_requeue || handle_polled_eagain) {
892 unsigned long flags;
893
894 if (bio->bi_opf & REQ_POLLED) {
895 /*
896 * Upper layer won't help us poll split bio
897 * (io->orig_bio may only reflect a subset of the
898 * pre-split original) so clear REQ_POLLED.
899 */
900 bio_clear_polled(bio);
901 }
902
903 /*
904 * Target requested pushing back the I/O or
905 * polled IO hit BLK_STS_AGAIN.
906 */
907 spin_lock_irqsave(&md->deferred_lock, flags);
908 if ((__noflush_suspending(md) &&
909 !WARN_ON_ONCE(dm_is_zone_write(md, bio))) ||
910 handle_polled_eagain || first_stage) {
911 dm_requeue_add_io(io, first_stage);
912 requeued = true;
913 } else {
914 /*
915 * noflush suspend was interrupted or this is
916 * a write to a zoned target.
917 */
918 io->status = BLK_STS_IOERR;
919 }
920 spin_unlock_irqrestore(&md->deferred_lock, flags);
921 }
922
923 if (requeued)
924 dm_kick_requeue(md, first_stage);
925
926 return requeued;
927 }
928
__dm_io_complete(struct dm_io * io,bool first_stage)929 static void __dm_io_complete(struct dm_io *io, bool first_stage)
930 {
931 struct bio *bio = io->orig_bio;
932 struct mapped_device *md = io->md;
933 blk_status_t io_error;
934 bool requeued;
935
936 requeued = dm_handle_requeue(io, first_stage);
937 if (requeued && first_stage)
938 return;
939
940 io_error = io->status;
941 if (dm_io_flagged(io, DM_IO_ACCOUNTED))
942 dm_end_io_acct(io);
943 else if (!io_error) {
944 /*
945 * Must handle target that DM_MAPIO_SUBMITTED only to
946 * then bio_endio() rather than dm_submit_bio_remap()
947 */
948 __dm_start_io_acct(io);
949 dm_end_io_acct(io);
950 }
951 free_io(io);
952 smp_wmb();
953 this_cpu_dec(*md->pending_io);
954
955 /* nudge anyone waiting on suspend queue */
956 if (unlikely(wq_has_sleeper(&md->wait)))
957 wake_up(&md->wait);
958
959 /* Return early if the original bio was requeued */
960 if (requeued)
961 return;
962
963 if (bio_is_flush_with_data(bio)) {
964 /*
965 * Preflush done for flush with data, reissue
966 * without REQ_PREFLUSH.
967 */
968 bio->bi_opf &= ~REQ_PREFLUSH;
969 queue_io(md, bio);
970 } else {
971 /* done with normal IO or empty flush */
972 if (io_error)
973 bio->bi_status = io_error;
974 bio_endio(bio);
975 }
976 }
977
dm_wq_requeue_work(struct work_struct * work)978 static void dm_wq_requeue_work(struct work_struct *work)
979 {
980 struct mapped_device *md = container_of(work, struct mapped_device,
981 requeue_work);
982 unsigned long flags;
983 struct dm_io *io;
984
985 /* reuse deferred lock to simplify dm_handle_requeue */
986 spin_lock_irqsave(&md->deferred_lock, flags);
987 io = md->requeue_list;
988 md->requeue_list = NULL;
989 spin_unlock_irqrestore(&md->deferred_lock, flags);
990
991 while (io) {
992 struct dm_io *next = io->next;
993
994 dm_io_rewind(io, &md->disk->bio_split);
995
996 io->next = NULL;
997 __dm_io_complete(io, false);
998 io = next;
999 cond_resched();
1000 }
1001 }
1002
1003 /*
1004 * Two staged requeue:
1005 *
1006 * 1) io->orig_bio points to the real original bio, and the part mapped to
1007 * this io must be requeued, instead of other parts of the original bio.
1008 *
1009 * 2) io->orig_bio points to new cloned bio which matches the requeued dm_io.
1010 */
dm_io_complete(struct dm_io * io)1011 static void dm_io_complete(struct dm_io *io)
1012 {
1013 bool first_requeue;
1014
1015 /*
1016 * Only dm_io that has been split needs two stage requeue, otherwise
1017 * we may run into long bio clone chain during suspend and OOM could
1018 * be triggered.
1019 *
1020 * Also flush data dm_io won't be marked as DM_IO_WAS_SPLIT, so they
1021 * also aren't handled via the first stage requeue.
1022 */
1023 if (dm_io_flagged(io, DM_IO_WAS_SPLIT))
1024 first_requeue = true;
1025 else
1026 first_requeue = false;
1027
1028 __dm_io_complete(io, first_requeue);
1029 }
1030
1031 /*
1032 * Decrements the number of outstanding ios that a bio has been
1033 * cloned into, completing the original io if necc.
1034 */
__dm_io_dec_pending(struct dm_io * io)1035 static inline void __dm_io_dec_pending(struct dm_io *io)
1036 {
1037 if (atomic_dec_and_test(&io->io_count))
1038 dm_io_complete(io);
1039 }
1040
dm_io_set_error(struct dm_io * io,blk_status_t error)1041 static void dm_io_set_error(struct dm_io *io, blk_status_t error)
1042 {
1043 unsigned long flags;
1044
1045 /* Push-back supersedes any I/O errors */
1046 spin_lock_irqsave(&io->lock, flags);
1047 if (!(io->status == BLK_STS_DM_REQUEUE &&
1048 __noflush_suspending(io->md))) {
1049 io->status = error;
1050 }
1051 spin_unlock_irqrestore(&io->lock, flags);
1052 }
1053
dm_io_dec_pending(struct dm_io * io,blk_status_t error)1054 static void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
1055 {
1056 if (unlikely(error))
1057 dm_io_set_error(io, error);
1058
1059 __dm_io_dec_pending(io);
1060 }
1061
1062 /*
1063 * The queue_limits are only valid as long as you have a reference
1064 * count on 'md'. But _not_ imposing verification to avoid atomic_read(),
1065 */
dm_get_queue_limits(struct mapped_device * md)1066 static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
1067 {
1068 return &md->queue->limits;
1069 }
1070
disable_discard(struct mapped_device * md)1071 void disable_discard(struct mapped_device *md)
1072 {
1073 struct queue_limits *limits = dm_get_queue_limits(md);
1074
1075 /* device doesn't really support DISCARD, disable it */
1076 limits->max_discard_sectors = 0;
1077 }
1078
disable_write_zeroes(struct mapped_device * md)1079 void disable_write_zeroes(struct mapped_device *md)
1080 {
1081 struct queue_limits *limits = dm_get_queue_limits(md);
1082
1083 /* device doesn't really support WRITE ZEROES, disable it */
1084 limits->max_write_zeroes_sectors = 0;
1085 }
1086
swap_bios_limit(struct dm_target * ti,struct bio * bio)1087 static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
1088 {
1089 return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
1090 }
1091
clone_endio(struct bio * bio)1092 static void clone_endio(struct bio *bio)
1093 {
1094 blk_status_t error = bio->bi_status;
1095 struct dm_target_io *tio = clone_to_tio(bio);
1096 struct dm_target *ti = tio->ti;
1097 dm_endio_fn endio = ti->type->end_io;
1098 struct dm_io *io = tio->io;
1099 struct mapped_device *md = io->md;
1100
1101 if (unlikely(error == BLK_STS_TARGET)) {
1102 if (bio_op(bio) == REQ_OP_DISCARD &&
1103 !bdev_max_discard_sectors(bio->bi_bdev))
1104 disable_discard(md);
1105 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
1106 !bdev_write_zeroes_sectors(bio->bi_bdev))
1107 disable_write_zeroes(md);
1108 }
1109
1110 if (static_branch_unlikely(&zoned_enabled) &&
1111 unlikely(bdev_is_zoned(bio->bi_bdev)))
1112 dm_zone_endio(io, bio);
1113
1114 if (endio) {
1115 int r = endio(ti, bio, &error);
1116
1117 switch (r) {
1118 case DM_ENDIO_REQUEUE:
1119 if (static_branch_unlikely(&zoned_enabled)) {
1120 /*
1121 * Requeuing writes to a sequential zone of a zoned
1122 * target will break the sequential write pattern:
1123 * fail such IO.
1124 */
1125 if (WARN_ON_ONCE(dm_is_zone_write(md, bio)))
1126 error = BLK_STS_IOERR;
1127 else
1128 error = BLK_STS_DM_REQUEUE;
1129 } else
1130 error = BLK_STS_DM_REQUEUE;
1131 fallthrough;
1132 case DM_ENDIO_DONE:
1133 break;
1134 case DM_ENDIO_INCOMPLETE:
1135 /* The target will handle the io */
1136 return;
1137 default:
1138 DMCRIT("unimplemented target endio return value: %d", r);
1139 BUG();
1140 }
1141 }
1142
1143 if (static_branch_unlikely(&swap_bios_enabled) &&
1144 unlikely(swap_bios_limit(ti, bio)))
1145 up(&md->swap_bios_semaphore);
1146
1147 free_tio(bio);
1148 dm_io_dec_pending(io, error);
1149 }
1150
1151 /*
1152 * Return maximum size of I/O possible at the supplied sector up to the current
1153 * target boundary.
1154 */
max_io_len_target_boundary(struct dm_target * ti,sector_t target_offset)1155 static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
1156 sector_t target_offset)
1157 {
1158 return ti->len - target_offset;
1159 }
1160
__max_io_len(struct dm_target * ti,sector_t sector,unsigned int max_granularity,unsigned int max_sectors)1161 static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
1162 unsigned int max_granularity,
1163 unsigned int max_sectors)
1164 {
1165 sector_t target_offset = dm_target_offset(ti, sector);
1166 sector_t len = max_io_len_target_boundary(ti, target_offset);
1167
1168 /*
1169 * Does the target need to split IO even further?
1170 * - varied (per target) IO splitting is a tenet of DM; this
1171 * explains why stacked chunk_sectors based splitting via
1172 * bio_split_to_limits() isn't possible here.
1173 */
1174 if (!max_granularity)
1175 return len;
1176 return min_t(sector_t, len,
1177 min(max_sectors ? : queue_max_sectors(ti->table->md->queue),
1178 blk_chunk_sectors_left(target_offset, max_granularity)));
1179 }
1180
max_io_len(struct dm_target * ti,sector_t sector)1181 static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)
1182 {
1183 return __max_io_len(ti, sector, ti->max_io_len, 0);
1184 }
1185
dm_set_target_max_io_len(struct dm_target * ti,sector_t len)1186 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1187 {
1188 if (len > UINT_MAX) {
1189 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1190 (unsigned long long)len, UINT_MAX);
1191 ti->error = "Maximum size of target IO is too large";
1192 return -EINVAL;
1193 }
1194
1195 ti->max_io_len = (uint32_t) len;
1196
1197 return 0;
1198 }
1199 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1200
dm_dax_get_live_target(struct mapped_device * md,sector_t sector,int * srcu_idx)1201 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
1202 sector_t sector, int *srcu_idx)
1203 __acquires(md->io_barrier)
1204 {
1205 struct dm_table *map;
1206 struct dm_target *ti;
1207
1208 map = dm_get_live_table(md, srcu_idx);
1209 if (!map)
1210 return NULL;
1211
1212 ti = dm_table_find_target(map, sector);
1213 if (!ti)
1214 return NULL;
1215
1216 return ti;
1217 }
1218
dm_dax_direct_access(struct dax_device * dax_dev,pgoff_t pgoff,long nr_pages,enum dax_access_mode mode,void ** kaddr,pfn_t * pfn)1219 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
1220 long nr_pages, enum dax_access_mode mode, void **kaddr,
1221 pfn_t *pfn)
1222 {
1223 struct mapped_device *md = dax_get_private(dax_dev);
1224 sector_t sector = pgoff * PAGE_SECTORS;
1225 struct dm_target *ti;
1226 long len, ret = -EIO;
1227 int srcu_idx;
1228
1229 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1230
1231 if (!ti)
1232 goto out;
1233 if (!ti->type->direct_access)
1234 goto out;
1235 len = max_io_len(ti, sector) / PAGE_SECTORS;
1236 if (len < 1)
1237 goto out;
1238 nr_pages = min(len, nr_pages);
1239 ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn);
1240
1241 out:
1242 dm_put_live_table(md, srcu_idx);
1243
1244 return ret;
1245 }
1246
dm_dax_zero_page_range(struct dax_device * dax_dev,pgoff_t pgoff,size_t nr_pages)1247 static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
1248 size_t nr_pages)
1249 {
1250 struct mapped_device *md = dax_get_private(dax_dev);
1251 sector_t sector = pgoff * PAGE_SECTORS;
1252 struct dm_target *ti;
1253 int ret = -EIO;
1254 int srcu_idx;
1255
1256 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1257
1258 if (!ti)
1259 goto out;
1260 if (WARN_ON(!ti->type->dax_zero_page_range)) {
1261 /*
1262 * ->zero_page_range() is mandatory dax operation. If we are
1263 * here, something is wrong.
1264 */
1265 goto out;
1266 }
1267 ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
1268 out:
1269 dm_put_live_table(md, srcu_idx);
1270
1271 return ret;
1272 }
1273
dm_dax_recovery_write(struct dax_device * dax_dev,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)1274 static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
1275 void *addr, size_t bytes, struct iov_iter *i)
1276 {
1277 struct mapped_device *md = dax_get_private(dax_dev);
1278 sector_t sector = pgoff * PAGE_SECTORS;
1279 struct dm_target *ti;
1280 int srcu_idx;
1281 long ret = 0;
1282
1283 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1284 if (!ti || !ti->type->dax_recovery_write)
1285 goto out;
1286
1287 ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i);
1288 out:
1289 dm_put_live_table(md, srcu_idx);
1290 return ret;
1291 }
1292
1293 /*
1294 * A target may call dm_accept_partial_bio only from the map routine. It is
1295 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
1296 * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by
1297 * __send_duplicate_bios().
1298 *
1299 * dm_accept_partial_bio informs the dm that the target only wants to process
1300 * additional n_sectors sectors of the bio and the rest of the data should be
1301 * sent in a next bio.
1302 *
1303 * A diagram that explains the arithmetics:
1304 * +--------------------+---------------+-------+
1305 * | 1 | 2 | 3 |
1306 * +--------------------+---------------+-------+
1307 *
1308 * <-------------- *tio->len_ptr --------------->
1309 * <----- bio_sectors ----->
1310 * <-- n_sectors -->
1311 *
1312 * Region 1 was already iterated over with bio_advance or similar function.
1313 * (it may be empty if the target doesn't use bio_advance)
1314 * Region 2 is the remaining bio size that the target wants to process.
1315 * (it may be empty if region 1 is non-empty, although there is no reason
1316 * to make it empty)
1317 * The target requires that region 3 is to be sent in the next bio.
1318 *
1319 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1320 * the partially processed part (the sum of regions 1+2) must be the same for all
1321 * copies of the bio.
1322 */
dm_accept_partial_bio(struct bio * bio,unsigned int n_sectors)1323 void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors)
1324 {
1325 struct dm_target_io *tio = clone_to_tio(bio);
1326 struct dm_io *io = tio->io;
1327 unsigned int bio_sectors = bio_sectors(bio);
1328
1329 BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
1330 BUG_ON(op_is_zone_mgmt(bio_op(bio)));
1331 BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND);
1332 BUG_ON(bio_sectors > *tio->len_ptr);
1333 BUG_ON(n_sectors > bio_sectors);
1334
1335 *tio->len_ptr -= bio_sectors - n_sectors;
1336 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1337
1338 /*
1339 * __split_and_process_bio() may have already saved mapped part
1340 * for accounting but it is being reduced so update accordingly.
1341 */
1342 dm_io_set_flag(io, DM_IO_WAS_SPLIT);
1343 io->sectors = n_sectors;
1344 io->sector_offset = bio_sectors(io->orig_bio);
1345 }
1346 EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1347
1348 /*
1349 * @clone: clone bio that DM core passed to target's .map function
1350 * @tgt_clone: clone of @clone bio that target needs submitted
1351 *
1352 * Targets should use this interface to submit bios they take
1353 * ownership of when returning DM_MAPIO_SUBMITTED.
1354 *
1355 * Target should also enable ti->accounts_remapped_io
1356 */
dm_submit_bio_remap(struct bio * clone,struct bio * tgt_clone)1357 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone)
1358 {
1359 struct dm_target_io *tio = clone_to_tio(clone);
1360 struct dm_io *io = tio->io;
1361
1362 /* establish bio that will get submitted */
1363 if (!tgt_clone)
1364 tgt_clone = clone;
1365
1366 /*
1367 * Account io->origin_bio to DM dev on behalf of target
1368 * that took ownership of IO with DM_MAPIO_SUBMITTED.
1369 */
1370 dm_start_io_acct(io, clone);
1371
1372 trace_block_bio_remap(tgt_clone, disk_devt(io->md->disk),
1373 tio->old_sector);
1374 submit_bio_noacct(tgt_clone);
1375 }
1376 EXPORT_SYMBOL_GPL(dm_submit_bio_remap);
1377
__set_swap_bios_limit(struct mapped_device * md,int latch)1378 static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
1379 {
1380 mutex_lock(&md->swap_bios_lock);
1381 while (latch < md->swap_bios) {
1382 cond_resched();
1383 down(&md->swap_bios_semaphore);
1384 md->swap_bios--;
1385 }
1386 while (latch > md->swap_bios) {
1387 cond_resched();
1388 up(&md->swap_bios_semaphore);
1389 md->swap_bios++;
1390 }
1391 mutex_unlock(&md->swap_bios_lock);
1392 }
1393
__map_bio(struct bio * clone)1394 static void __map_bio(struct bio *clone)
1395 {
1396 struct dm_target_io *tio = clone_to_tio(clone);
1397 struct dm_target *ti = tio->ti;
1398 struct dm_io *io = tio->io;
1399 struct mapped_device *md = io->md;
1400 int r;
1401
1402 clone->bi_end_io = clone_endio;
1403
1404 /*
1405 * Map the clone.
1406 */
1407 tio->old_sector = clone->bi_iter.bi_sector;
1408
1409 if (static_branch_unlikely(&swap_bios_enabled) &&
1410 unlikely(swap_bios_limit(ti, clone))) {
1411 int latch = get_swap_bios();
1412
1413 if (unlikely(latch != md->swap_bios))
1414 __set_swap_bios_limit(md, latch);
1415 down(&md->swap_bios_semaphore);
1416 }
1417
1418 if (static_branch_unlikely(&zoned_enabled)) {
1419 /*
1420 * Check if the IO needs a special mapping due to zone append
1421 * emulation on zoned target. In this case, dm_zone_map_bio()
1422 * calls the target map operation.
1423 */
1424 if (unlikely(dm_emulate_zone_append(md)))
1425 r = dm_zone_map_bio(tio);
1426 else
1427 r = ti->type->map(ti, clone);
1428 } else
1429 r = ti->type->map(ti, clone);
1430
1431 switch (r) {
1432 case DM_MAPIO_SUBMITTED:
1433 /* target has assumed ownership of this io */
1434 if (!ti->accounts_remapped_io)
1435 dm_start_io_acct(io, clone);
1436 break;
1437 case DM_MAPIO_REMAPPED:
1438 dm_submit_bio_remap(clone, NULL);
1439 break;
1440 case DM_MAPIO_KILL:
1441 case DM_MAPIO_REQUEUE:
1442 if (static_branch_unlikely(&swap_bios_enabled) &&
1443 unlikely(swap_bios_limit(ti, clone)))
1444 up(&md->swap_bios_semaphore);
1445 free_tio(clone);
1446 if (r == DM_MAPIO_KILL)
1447 dm_io_dec_pending(io, BLK_STS_IOERR);
1448 else
1449 dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
1450 break;
1451 default:
1452 DMCRIT("unimplemented target map return value: %d", r);
1453 BUG();
1454 }
1455 }
1456
setup_split_accounting(struct clone_info * ci,unsigned int len)1457 static void setup_split_accounting(struct clone_info *ci, unsigned int len)
1458 {
1459 struct dm_io *io = ci->io;
1460
1461 if (ci->sector_count > len) {
1462 /*
1463 * Split needed, save the mapped part for accounting.
1464 * NOTE: dm_accept_partial_bio() will update accordingly.
1465 */
1466 dm_io_set_flag(io, DM_IO_WAS_SPLIT);
1467 io->sectors = len;
1468 io->sector_offset = bio_sectors(ci->bio);
1469 }
1470 }
1471
alloc_multiple_bios(struct bio_list * blist,struct clone_info * ci,struct dm_target * ti,unsigned int num_bios,unsigned * len)1472 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
1473 struct dm_target *ti, unsigned int num_bios,
1474 unsigned *len)
1475 {
1476 struct bio *bio;
1477 int try;
1478
1479 for (try = 0; try < 2; try++) {
1480 int bio_nr;
1481
1482 if (try)
1483 mutex_lock(&ci->io->md->table_devices_lock);
1484 for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
1485 bio = alloc_tio(ci, ti, bio_nr, len,
1486 try ? GFP_NOIO : GFP_NOWAIT);
1487 if (!bio)
1488 break;
1489
1490 bio_list_add(blist, bio);
1491 }
1492 if (try)
1493 mutex_unlock(&ci->io->md->table_devices_lock);
1494 if (bio_nr == num_bios)
1495 return;
1496
1497 while ((bio = bio_list_pop(blist)))
1498 free_tio(bio);
1499 }
1500 }
1501
__send_duplicate_bios(struct clone_info * ci,struct dm_target * ti,unsigned int num_bios,unsigned int * len)1502 static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1503 unsigned int num_bios, unsigned int *len)
1504 {
1505 struct bio_list blist = BIO_EMPTY_LIST;
1506 struct bio *clone;
1507 unsigned int ret = 0;
1508
1509 switch (num_bios) {
1510 case 0:
1511 break;
1512 case 1:
1513 if (len)
1514 setup_split_accounting(ci, *len);
1515 clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
1516 __map_bio(clone);
1517 ret = 1;
1518 break;
1519 default:
1520 if (len)
1521 setup_split_accounting(ci, *len);
1522 /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
1523 alloc_multiple_bios(&blist, ci, ti, num_bios, len);
1524 while ((clone = bio_list_pop(&blist))) {
1525 dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
1526 __map_bio(clone);
1527 ret += 1;
1528 }
1529 break;
1530 }
1531
1532 return ret;
1533 }
1534
__send_empty_flush(struct clone_info * ci)1535 static void __send_empty_flush(struct clone_info *ci)
1536 {
1537 struct dm_table *t = ci->map;
1538 struct bio flush_bio;
1539 blk_opf_t opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
1540
1541 if ((ci->io->orig_bio->bi_opf & (REQ_IDLE | REQ_SYNC)) ==
1542 (REQ_IDLE | REQ_SYNC))
1543 opf |= REQ_IDLE;
1544
1545 /*
1546 * Use an on-stack bio for this, it's safe since we don't
1547 * need to reference it after submit. It's just used as
1548 * the basis for the clone(s).
1549 */
1550 bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, opf);
1551
1552 ci->bio = &flush_bio;
1553 ci->sector_count = 0;
1554 ci->io->tio.clone.bi_iter.bi_size = 0;
1555
1556 for (unsigned int i = 0; i < t->num_targets; i++) {
1557 unsigned int bios;
1558 struct dm_target *ti = dm_table_get_target(t, i);
1559
1560 atomic_add(ti->num_flush_bios, &ci->io->io_count);
1561 bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
1562 atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count);
1563 }
1564
1565 /*
1566 * alloc_io() takes one extra reference for submission, so the
1567 * reference won't reach 0 without the following subtraction
1568 */
1569 atomic_sub(1, &ci->io->io_count);
1570
1571 bio_uninit(ci->bio);
1572 }
1573
__send_changing_extent_only(struct clone_info * ci,struct dm_target * ti,unsigned int num_bios,unsigned int max_granularity,unsigned int max_sectors)1574 static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
1575 unsigned int num_bios,
1576 unsigned int max_granularity,
1577 unsigned int max_sectors)
1578 {
1579 unsigned int len, bios;
1580
1581 len = min_t(sector_t, ci->sector_count,
1582 __max_io_len(ti, ci->sector, max_granularity, max_sectors));
1583
1584 atomic_add(num_bios, &ci->io->io_count);
1585 bios = __send_duplicate_bios(ci, ti, num_bios, &len);
1586 /*
1587 * alloc_io() takes one extra reference for submission, so the
1588 * reference won't reach 0 without the following (+1) subtraction
1589 */
1590 atomic_sub(num_bios - bios + 1, &ci->io->io_count);
1591
1592 ci->sector += len;
1593 ci->sector_count -= len;
1594 }
1595
is_abnormal_io(struct bio * bio)1596 static bool is_abnormal_io(struct bio *bio)
1597 {
1598 enum req_op op = bio_op(bio);
1599
1600 if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) {
1601 switch (op) {
1602 case REQ_OP_DISCARD:
1603 case REQ_OP_SECURE_ERASE:
1604 case REQ_OP_WRITE_ZEROES:
1605 return true;
1606 default:
1607 break;
1608 }
1609 }
1610
1611 return false;
1612 }
1613
__process_abnormal_io(struct clone_info * ci,struct dm_target * ti)1614 static blk_status_t __process_abnormal_io(struct clone_info *ci,
1615 struct dm_target *ti)
1616 {
1617 unsigned int num_bios = 0;
1618 unsigned int max_granularity = 0;
1619 unsigned int max_sectors = 0;
1620 struct queue_limits *limits = dm_get_queue_limits(ti->table->md);
1621
1622 switch (bio_op(ci->bio)) {
1623 case REQ_OP_DISCARD:
1624 num_bios = ti->num_discard_bios;
1625 max_sectors = limits->max_discard_sectors;
1626 if (ti->max_discard_granularity)
1627 max_granularity = max_sectors;
1628 break;
1629 case REQ_OP_SECURE_ERASE:
1630 num_bios = ti->num_secure_erase_bios;
1631 max_sectors = limits->max_secure_erase_sectors;
1632 if (ti->max_secure_erase_granularity)
1633 max_granularity = max_sectors;
1634 break;
1635 case REQ_OP_WRITE_ZEROES:
1636 num_bios = ti->num_write_zeroes_bios;
1637 max_sectors = limits->max_write_zeroes_sectors;
1638 if (ti->max_write_zeroes_granularity)
1639 max_granularity = max_sectors;
1640 break;
1641 default:
1642 break;
1643 }
1644
1645 /*
1646 * Even though the device advertised support for this type of
1647 * request, that does not mean every target supports it, and
1648 * reconfiguration might also have changed that since the
1649 * check was performed.
1650 */
1651 if (unlikely(!num_bios))
1652 return BLK_STS_NOTSUPP;
1653
1654 __send_changing_extent_only(ci, ti, num_bios,
1655 max_granularity, max_sectors);
1656 return BLK_STS_OK;
1657 }
1658
1659 /*
1660 * Reuse ->bi_private as dm_io list head for storing all dm_io instances
1661 * associated with this bio, and this bio's bi_private needs to be
1662 * stored in dm_io->data before the reuse.
1663 *
1664 * bio->bi_private is owned by fs or upper layer, so block layer won't
1665 * touch it after splitting. Meantime it won't be changed by anyone after
1666 * bio is submitted. So this reuse is safe.
1667 */
dm_poll_list_head(struct bio * bio)1668 static inline struct dm_io **dm_poll_list_head(struct bio *bio)
1669 {
1670 return (struct dm_io **)&bio->bi_private;
1671 }
1672
dm_queue_poll_io(struct bio * bio,struct dm_io * io)1673 static void dm_queue_poll_io(struct bio *bio, struct dm_io *io)
1674 {
1675 struct dm_io **head = dm_poll_list_head(bio);
1676
1677 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) {
1678 bio->bi_opf |= REQ_DM_POLL_LIST;
1679 /*
1680 * Save .bi_private into dm_io, so that we can reuse
1681 * .bi_private as dm_io list head for storing dm_io list
1682 */
1683 io->data = bio->bi_private;
1684
1685 /* tell block layer to poll for completion */
1686 bio->bi_cookie = ~BLK_QC_T_NONE;
1687
1688 io->next = NULL;
1689 } else {
1690 /*
1691 * bio recursed due to split, reuse original poll list,
1692 * and save bio->bi_private too.
1693 */
1694 io->data = (*head)->data;
1695 io->next = *head;
1696 }
1697
1698 *head = io;
1699 }
1700
1701 /*
1702 * Select the correct strategy for processing a non-flush bio.
1703 */
__split_and_process_bio(struct clone_info * ci)1704 static blk_status_t __split_and_process_bio(struct clone_info *ci)
1705 {
1706 struct bio *clone;
1707 struct dm_target *ti;
1708 unsigned int len;
1709
1710 ti = dm_table_find_target(ci->map, ci->sector);
1711 if (unlikely(!ti))
1712 return BLK_STS_IOERR;
1713
1714 if (unlikely((ci->bio->bi_opf & REQ_NOWAIT) != 0) &&
1715 unlikely(!dm_target_supports_nowait(ti->type)))
1716 return BLK_STS_NOTSUPP;
1717
1718 if (unlikely(ci->is_abnormal_io))
1719 return __process_abnormal_io(ci, ti);
1720
1721 /*
1722 * Only support bio polling for normal IO, and the target io is
1723 * exactly inside the dm_io instance (verified in dm_poll_dm_io)
1724 */
1725 ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED);
1726
1727 len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
1728 setup_split_accounting(ci, len);
1729 clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO);
1730 __map_bio(clone);
1731
1732 ci->sector += len;
1733 ci->sector_count -= len;
1734
1735 return BLK_STS_OK;
1736 }
1737
init_clone_info(struct clone_info * ci,struct mapped_device * md,struct dm_table * map,struct bio * bio,bool is_abnormal)1738 static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
1739 struct dm_table *map, struct bio *bio, bool is_abnormal)
1740 {
1741 ci->map = map;
1742 ci->io = alloc_io(md, bio);
1743 ci->bio = bio;
1744 ci->is_abnormal_io = is_abnormal;
1745 ci->submit_as_polled = false;
1746 ci->sector = bio->bi_iter.bi_sector;
1747 ci->sector_count = bio_sectors(bio);
1748
1749 /* Shouldn't happen but sector_count was being set to 0 so... */
1750 if (static_branch_unlikely(&zoned_enabled) &&
1751 WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count))
1752 ci->sector_count = 0;
1753 }
1754
1755 /*
1756 * Entry point to split a bio into clones and submit them to the targets.
1757 */
dm_split_and_process_bio(struct mapped_device * md,struct dm_table * map,struct bio * bio)1758 static void dm_split_and_process_bio(struct mapped_device *md,
1759 struct dm_table *map, struct bio *bio)
1760 {
1761 struct clone_info ci;
1762 struct dm_io *io;
1763 blk_status_t error = BLK_STS_OK;
1764 bool is_abnormal;
1765
1766 is_abnormal = is_abnormal_io(bio);
1767 if (unlikely(is_abnormal)) {
1768 /*
1769 * Use bio_split_to_limits() for abnormal IO (e.g. discard, etc)
1770 * otherwise associated queue_limits won't be imposed.
1771 */
1772 bio = bio_split_to_limits(bio);
1773 if (!bio)
1774 return;
1775 }
1776
1777 init_clone_info(&ci, md, map, bio, is_abnormal);
1778 io = ci.io;
1779
1780 if (bio->bi_opf & REQ_PREFLUSH) {
1781 __send_empty_flush(&ci);
1782 /* dm_io_complete submits any data associated with flush */
1783 goto out;
1784 }
1785
1786 error = __split_and_process_bio(&ci);
1787 if (error || !ci.sector_count)
1788 goto out;
1789 /*
1790 * Remainder must be passed to submit_bio_noacct() so it gets handled
1791 * *after* bios already submitted have been completely processed.
1792 */
1793 bio_trim(bio, io->sectors, ci.sector_count);
1794 trace_block_split(bio, bio->bi_iter.bi_sector);
1795 bio_inc_remaining(bio);
1796 submit_bio_noacct(bio);
1797 out:
1798 /*
1799 * Drop the extra reference count for non-POLLED bio, and hold one
1800 * reference for POLLED bio, which will be released in dm_poll_bio
1801 *
1802 * Add every dm_io instance into the dm_io list head which is stored
1803 * in bio->bi_private, so that dm_poll_bio can poll them all.
1804 */
1805 if (error || !ci.submit_as_polled) {
1806 /*
1807 * In case of submission failure, the extra reference for
1808 * submitting io isn't consumed yet
1809 */
1810 if (error)
1811 atomic_dec(&io->io_count);
1812 dm_io_dec_pending(io, error);
1813 } else
1814 dm_queue_poll_io(bio, io);
1815 }
1816
dm_submit_bio(struct bio * bio)1817 static void dm_submit_bio(struct bio *bio)
1818 {
1819 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
1820 int srcu_idx;
1821 struct dm_table *map;
1822
1823 map = dm_get_live_table(md, &srcu_idx);
1824 if (unlikely(!map)) {
1825 DMERR_LIMIT("%s: mapping table unavailable, erroring io",
1826 dm_device_name(md));
1827 bio_io_error(bio);
1828 goto out;
1829 }
1830
1831 /* If suspended, queue this IO for later */
1832 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1833 if (bio->bi_opf & REQ_NOWAIT)
1834 bio_wouldblock_error(bio);
1835 else if (bio->bi_opf & REQ_RAHEAD)
1836 bio_io_error(bio);
1837 else
1838 queue_io(md, bio);
1839 goto out;
1840 }
1841
1842 dm_split_and_process_bio(md, map, bio);
1843 out:
1844 dm_put_live_table(md, srcu_idx);
1845 }
1846
dm_poll_dm_io(struct dm_io * io,struct io_comp_batch * iob,unsigned int flags)1847 static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
1848 unsigned int flags)
1849 {
1850 WARN_ON_ONCE(!dm_tio_is_normal(&io->tio));
1851
1852 /* don't poll if the mapped io is done */
1853 if (atomic_read(&io->io_count) > 1)
1854 bio_poll(&io->tio.clone, iob, flags);
1855
1856 /* bio_poll holds the last reference */
1857 return atomic_read(&io->io_count) == 1;
1858 }
1859
dm_poll_bio(struct bio * bio,struct io_comp_batch * iob,unsigned int flags)1860 static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
1861 unsigned int flags)
1862 {
1863 struct dm_io **head = dm_poll_list_head(bio);
1864 struct dm_io *list = *head;
1865 struct dm_io *tmp = NULL;
1866 struct dm_io *curr, *next;
1867
1868 /* Only poll normal bio which was marked as REQ_DM_POLL_LIST */
1869 if (!(bio->bi_opf & REQ_DM_POLL_LIST))
1870 return 0;
1871
1872 WARN_ON_ONCE(!list);
1873
1874 /*
1875 * Restore .bi_private before possibly completing dm_io.
1876 *
1877 * bio_poll() is only possible once @bio has been completely
1878 * submitted via submit_bio_noacct()'s depth-first submission.
1879 * So there is no dm_queue_poll_io() race associated with
1880 * clearing REQ_DM_POLL_LIST here.
1881 */
1882 bio->bi_opf &= ~REQ_DM_POLL_LIST;
1883 bio->bi_private = list->data;
1884
1885 for (curr = list, next = curr->next; curr; curr = next, next =
1886 curr ? curr->next : NULL) {
1887 if (dm_poll_dm_io(curr, iob, flags)) {
1888 /*
1889 * clone_endio() has already occurred, so no
1890 * error handling is needed here.
1891 */
1892 __dm_io_dec_pending(curr);
1893 } else {
1894 curr->next = tmp;
1895 tmp = curr;
1896 }
1897 }
1898
1899 /* Not done? */
1900 if (tmp) {
1901 bio->bi_opf |= REQ_DM_POLL_LIST;
1902 /* Reset bio->bi_private to dm_io list head */
1903 *head = tmp;
1904 return 0;
1905 }
1906 return 1;
1907 }
1908
1909 /*
1910 *---------------------------------------------------------------
1911 * An IDR is used to keep track of allocated minor numbers.
1912 *---------------------------------------------------------------
1913 */
free_minor(int minor)1914 static void free_minor(int minor)
1915 {
1916 spin_lock(&_minor_lock);
1917 idr_remove(&_minor_idr, minor);
1918 spin_unlock(&_minor_lock);
1919 }
1920
1921 /*
1922 * See if the device with a specific minor # is free.
1923 */
specific_minor(int minor)1924 static int specific_minor(int minor)
1925 {
1926 int r;
1927
1928 if (minor >= (1 << MINORBITS))
1929 return -EINVAL;
1930
1931 idr_preload(GFP_KERNEL);
1932 spin_lock(&_minor_lock);
1933
1934 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
1935
1936 spin_unlock(&_minor_lock);
1937 idr_preload_end();
1938 if (r < 0)
1939 return r == -ENOSPC ? -EBUSY : r;
1940 return 0;
1941 }
1942
next_free_minor(int * minor)1943 static int next_free_minor(int *minor)
1944 {
1945 int r;
1946
1947 idr_preload(GFP_KERNEL);
1948 spin_lock(&_minor_lock);
1949
1950 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
1951
1952 spin_unlock(&_minor_lock);
1953 idr_preload_end();
1954 if (r < 0)
1955 return r;
1956 *minor = r;
1957 return 0;
1958 }
1959
1960 static const struct block_device_operations dm_blk_dops;
1961 static const struct block_device_operations dm_rq_blk_dops;
1962 static const struct dax_operations dm_dax_ops;
1963
1964 static void dm_wq_work(struct work_struct *work);
1965
1966 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
dm_queue_destroy_crypto_profile(struct request_queue * q)1967 static void dm_queue_destroy_crypto_profile(struct request_queue *q)
1968 {
1969 dm_destroy_crypto_profile(q->crypto_profile);
1970 }
1971
1972 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
1973
dm_queue_destroy_crypto_profile(struct request_queue * q)1974 static inline void dm_queue_destroy_crypto_profile(struct request_queue *q)
1975 {
1976 }
1977 #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
1978
cleanup_mapped_device(struct mapped_device * md)1979 static void cleanup_mapped_device(struct mapped_device *md)
1980 {
1981 if (md->wq)
1982 destroy_workqueue(md->wq);
1983 dm_free_md_mempools(md->mempools);
1984
1985 if (md->dax_dev) {
1986 dax_remove_host(md->disk);
1987 kill_dax(md->dax_dev);
1988 put_dax(md->dax_dev);
1989 md->dax_dev = NULL;
1990 }
1991
1992 dm_cleanup_zoned_dev(md);
1993 if (md->disk) {
1994 spin_lock(&_minor_lock);
1995 md->disk->private_data = NULL;
1996 spin_unlock(&_minor_lock);
1997 if (dm_get_md_type(md) != DM_TYPE_NONE) {
1998 struct table_device *td;
1999
2000 dm_sysfs_exit(md);
2001 list_for_each_entry(td, &md->table_devices, list) {
2002 bd_unlink_disk_holder(td->dm_dev.bdev,
2003 md->disk);
2004 }
2005
2006 /*
2007 * Hold lock to make sure del_gendisk() won't concurrent
2008 * with open/close_table_device().
2009 */
2010 mutex_lock(&md->table_devices_lock);
2011 del_gendisk(md->disk);
2012 mutex_unlock(&md->table_devices_lock);
2013 }
2014 dm_queue_destroy_crypto_profile(md->queue);
2015 put_disk(md->disk);
2016 }
2017
2018 if (md->pending_io) {
2019 free_percpu(md->pending_io);
2020 md->pending_io = NULL;
2021 }
2022
2023 cleanup_srcu_struct(&md->io_barrier);
2024
2025 mutex_destroy(&md->suspend_lock);
2026 mutex_destroy(&md->type_lock);
2027 mutex_destroy(&md->table_devices_lock);
2028 mutex_destroy(&md->swap_bios_lock);
2029
2030 dm_mq_cleanup_mapped_device(md);
2031 }
2032
2033 /*
2034 * Allocate and initialise a blank device with a given minor.
2035 */
alloc_dev(int minor)2036 static struct mapped_device *alloc_dev(int minor)
2037 {
2038 int r, numa_node_id = dm_get_numa_node();
2039 struct mapped_device *md;
2040 void *old_md;
2041
2042 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
2043 if (!md) {
2044 DMERR("unable to allocate device, out of memory.");
2045 return NULL;
2046 }
2047
2048 if (!try_module_get(THIS_MODULE))
2049 goto bad_module_get;
2050
2051 /* get a minor number for the dev */
2052 if (minor == DM_ANY_MINOR)
2053 r = next_free_minor(&minor);
2054 else
2055 r = specific_minor(minor);
2056 if (r < 0)
2057 goto bad_minor;
2058
2059 r = init_srcu_struct(&md->io_barrier);
2060 if (r < 0)
2061 goto bad_io_barrier;
2062
2063 md->numa_node_id = numa_node_id;
2064 md->init_tio_pdu = false;
2065 md->type = DM_TYPE_NONE;
2066 mutex_init(&md->suspend_lock);
2067 mutex_init(&md->type_lock);
2068 mutex_init(&md->table_devices_lock);
2069 spin_lock_init(&md->deferred_lock);
2070 atomic_set(&md->holders, 1);
2071 atomic_set(&md->open_count, 0);
2072 atomic_set(&md->event_nr, 0);
2073 atomic_set(&md->uevent_seq, 0);
2074 INIT_LIST_HEAD(&md->uevent_list);
2075 INIT_LIST_HEAD(&md->table_devices);
2076 spin_lock_init(&md->uevent_lock);
2077
2078 /*
2079 * default to bio-based until DM table is loaded and md->type
2080 * established. If request-based table is loaded: blk-mq will
2081 * override accordingly.
2082 */
2083 md->disk = blk_alloc_disk(md->numa_node_id);
2084 if (!md->disk)
2085 goto bad;
2086 md->queue = md->disk->queue;
2087
2088 init_waitqueue_head(&md->wait);
2089 INIT_WORK(&md->work, dm_wq_work);
2090 INIT_WORK(&md->requeue_work, dm_wq_requeue_work);
2091 init_waitqueue_head(&md->eventq);
2092 init_completion(&md->kobj_holder.completion);
2093
2094 md->requeue_list = NULL;
2095 md->swap_bios = get_swap_bios();
2096 sema_init(&md->swap_bios_semaphore, md->swap_bios);
2097 mutex_init(&md->swap_bios_lock);
2098
2099 md->disk->major = _major;
2100 md->disk->first_minor = minor;
2101 md->disk->minors = 1;
2102 md->disk->flags |= GENHD_FL_NO_PART;
2103 md->disk->fops = &dm_blk_dops;
2104 md->disk->private_data = md;
2105 sprintf(md->disk->disk_name, "dm-%d", minor);
2106
2107 if (IS_ENABLED(CONFIG_FS_DAX)) {
2108 md->dax_dev = alloc_dax(md, &dm_dax_ops);
2109 if (IS_ERR(md->dax_dev)) {
2110 md->dax_dev = NULL;
2111 goto bad;
2112 }
2113 set_dax_nocache(md->dax_dev);
2114 set_dax_nomc(md->dax_dev);
2115 if (dax_add_host(md->dax_dev, md->disk))
2116 goto bad;
2117 }
2118
2119 format_dev_t(md->name, MKDEV(_major, minor));
2120
2121 md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name);
2122 if (!md->wq)
2123 goto bad;
2124
2125 md->pending_io = alloc_percpu(unsigned long);
2126 if (!md->pending_io)
2127 goto bad;
2128
2129 r = dm_stats_init(&md->stats);
2130 if (r < 0)
2131 goto bad;
2132
2133 /* Populate the mapping, nobody knows we exist yet */
2134 spin_lock(&_minor_lock);
2135 old_md = idr_replace(&_minor_idr, md, minor);
2136 spin_unlock(&_minor_lock);
2137
2138 BUG_ON(old_md != MINOR_ALLOCED);
2139
2140 return md;
2141
2142 bad:
2143 cleanup_mapped_device(md);
2144 bad_io_barrier:
2145 free_minor(minor);
2146 bad_minor:
2147 module_put(THIS_MODULE);
2148 bad_module_get:
2149 kvfree(md);
2150 return NULL;
2151 }
2152
2153 static void unlock_fs(struct mapped_device *md);
2154
free_dev(struct mapped_device * md)2155 static void free_dev(struct mapped_device *md)
2156 {
2157 int minor = MINOR(disk_devt(md->disk));
2158
2159 unlock_fs(md);
2160
2161 cleanup_mapped_device(md);
2162
2163 WARN_ON_ONCE(!list_empty(&md->table_devices));
2164 dm_stats_cleanup(&md->stats);
2165 free_minor(minor);
2166
2167 module_put(THIS_MODULE);
2168 kvfree(md);
2169 }
2170
2171 /*
2172 * Bind a table to the device.
2173 */
event_callback(void * context)2174 static void event_callback(void *context)
2175 {
2176 unsigned long flags;
2177 LIST_HEAD(uevents);
2178 struct mapped_device *md = context;
2179
2180 spin_lock_irqsave(&md->uevent_lock, flags);
2181 list_splice_init(&md->uevent_list, &uevents);
2182 spin_unlock_irqrestore(&md->uevent_lock, flags);
2183
2184 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
2185
2186 atomic_inc(&md->event_nr);
2187 wake_up(&md->eventq);
2188 dm_issue_global_event();
2189 }
2190
2191 /*
2192 * Returns old map, which caller must destroy.
2193 */
__bind(struct mapped_device * md,struct dm_table * t,struct queue_limits * limits)2194 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2195 struct queue_limits *limits)
2196 {
2197 struct dm_table *old_map;
2198 sector_t size, old_size;
2199 int ret;
2200
2201 lockdep_assert_held(&md->suspend_lock);
2202
2203 size = dm_table_get_size(t);
2204
2205 old_size = dm_get_size(md);
2206 set_capacity(md->disk, size);
2207
2208 ret = dm_table_set_restrictions(t, md->queue, limits);
2209 if (ret) {
2210 set_capacity(md->disk, old_size);
2211 old_map = ERR_PTR(ret);
2212 goto out;
2213 }
2214
2215 /*
2216 * Wipe any geometry if the size of the table changed.
2217 */
2218 if (size != old_size)
2219 memset(&md->geometry, 0, sizeof(md->geometry));
2220
2221 dm_table_event_callback(t, event_callback, md);
2222
2223 if (dm_table_request_based(t)) {
2224 /*
2225 * Leverage the fact that request-based DM targets are
2226 * immutable singletons - used to optimize dm_mq_queue_rq.
2227 */
2228 md->immutable_target = dm_table_get_immutable_target(t);
2229
2230 /*
2231 * There is no need to reload with request-based dm because the
2232 * size of front_pad doesn't change.
2233 *
2234 * Note for future: If you are to reload bioset, prep-ed
2235 * requests in the queue may refer to bio from the old bioset,
2236 * so you must walk through the queue to unprep.
2237 */
2238 if (!md->mempools)
2239 md->mempools = t->mempools;
2240 else
2241 dm_free_md_mempools(t->mempools);
2242 } else {
2243 /*
2244 * The md may already have mempools that need changing.
2245 * If so, reload bioset because front_pad may have changed
2246 * because a different table was loaded.
2247 */
2248 dm_free_md_mempools(md->mempools);
2249 md->mempools = t->mempools;
2250 }
2251 t->mempools = NULL;
2252
2253 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2254 rcu_assign_pointer(md->map, (void *)t);
2255 md->immutable_target_type = dm_table_get_immutable_target_type(t);
2256
2257 if (old_map)
2258 dm_sync_table(md);
2259 out:
2260 return old_map;
2261 }
2262
2263 /*
2264 * Returns unbound table for the caller to free.
2265 */
__unbind(struct mapped_device * md)2266 static struct dm_table *__unbind(struct mapped_device *md)
2267 {
2268 struct dm_table *map = rcu_dereference_protected(md->map, 1);
2269
2270 if (!map)
2271 return NULL;
2272
2273 dm_table_event_callback(map, NULL, NULL);
2274 RCU_INIT_POINTER(md->map, NULL);
2275 dm_sync_table(md);
2276
2277 return map;
2278 }
2279
2280 /*
2281 * Constructor for a new device.
2282 */
dm_create(int minor,struct mapped_device ** result)2283 int dm_create(int minor, struct mapped_device **result)
2284 {
2285 struct mapped_device *md;
2286
2287 md = alloc_dev(minor);
2288 if (!md)
2289 return -ENXIO;
2290
2291 dm_ima_reset_data(md);
2292
2293 *result = md;
2294 return 0;
2295 }
2296
2297 /*
2298 * Functions to manage md->type.
2299 * All are required to hold md->type_lock.
2300 */
dm_lock_md_type(struct mapped_device * md)2301 void dm_lock_md_type(struct mapped_device *md)
2302 {
2303 mutex_lock(&md->type_lock);
2304 }
2305
dm_unlock_md_type(struct mapped_device * md)2306 void dm_unlock_md_type(struct mapped_device *md)
2307 {
2308 mutex_unlock(&md->type_lock);
2309 }
2310
dm_set_md_type(struct mapped_device * md,enum dm_queue_mode type)2311 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
2312 {
2313 BUG_ON(!mutex_is_locked(&md->type_lock));
2314 md->type = type;
2315 }
2316
dm_get_md_type(struct mapped_device * md)2317 enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
2318 {
2319 return md->type;
2320 }
2321
dm_get_immutable_target_type(struct mapped_device * md)2322 struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2323 {
2324 return md->immutable_target_type;
2325 }
2326
2327 /*
2328 * Setup the DM device's queue based on md's type
2329 */
dm_setup_md_queue(struct mapped_device * md,struct dm_table * t)2330 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
2331 {
2332 enum dm_queue_mode type = dm_table_get_type(t);
2333 struct queue_limits limits;
2334 struct table_device *td;
2335 int r;
2336
2337 switch (type) {
2338 case DM_TYPE_REQUEST_BASED:
2339 md->disk->fops = &dm_rq_blk_dops;
2340 r = dm_mq_init_request_queue(md, t);
2341 if (r) {
2342 DMERR("Cannot initialize queue for request-based dm mapped device");
2343 return r;
2344 }
2345 break;
2346 case DM_TYPE_BIO_BASED:
2347 case DM_TYPE_DAX_BIO_BASED:
2348 blk_queue_flag_set(QUEUE_FLAG_IO_STAT, md->queue);
2349 break;
2350 case DM_TYPE_NONE:
2351 WARN_ON_ONCE(true);
2352 break;
2353 }
2354
2355 r = dm_calculate_queue_limits(t, &limits);
2356 if (r) {
2357 DMERR("Cannot calculate initial queue limits");
2358 return r;
2359 }
2360 r = dm_table_set_restrictions(t, md->queue, &limits);
2361 if (r)
2362 return r;
2363
2364 /*
2365 * Hold lock to make sure add_disk() and del_gendisk() won't concurrent
2366 * with open_table_device() and close_table_device().
2367 */
2368 mutex_lock(&md->table_devices_lock);
2369 r = add_disk(md->disk);
2370 mutex_unlock(&md->table_devices_lock);
2371 if (r)
2372 return r;
2373
2374 /*
2375 * Register the holder relationship for devices added before the disk
2376 * was live.
2377 */
2378 list_for_each_entry(td, &md->table_devices, list) {
2379 r = bd_link_disk_holder(td->dm_dev.bdev, md->disk);
2380 if (r)
2381 goto out_undo_holders;
2382 }
2383
2384 r = dm_sysfs_init(md);
2385 if (r)
2386 goto out_undo_holders;
2387
2388 md->type = type;
2389 return 0;
2390
2391 out_undo_holders:
2392 list_for_each_entry_continue_reverse(td, &md->table_devices, list)
2393 bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
2394 mutex_lock(&md->table_devices_lock);
2395 del_gendisk(md->disk);
2396 mutex_unlock(&md->table_devices_lock);
2397 return r;
2398 }
2399
dm_get_md(dev_t dev)2400 struct mapped_device *dm_get_md(dev_t dev)
2401 {
2402 struct mapped_device *md;
2403 unsigned int minor = MINOR(dev);
2404
2405 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2406 return NULL;
2407
2408 spin_lock(&_minor_lock);
2409
2410 md = idr_find(&_minor_idr, minor);
2411 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
2412 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2413 md = NULL;
2414 goto out;
2415 }
2416 dm_get(md);
2417 out:
2418 spin_unlock(&_minor_lock);
2419
2420 return md;
2421 }
2422 EXPORT_SYMBOL_GPL(dm_get_md);
2423
dm_get_mdptr(struct mapped_device * md)2424 void *dm_get_mdptr(struct mapped_device *md)
2425 {
2426 return md->interface_ptr;
2427 }
2428
dm_set_mdptr(struct mapped_device * md,void * ptr)2429 void dm_set_mdptr(struct mapped_device *md, void *ptr)
2430 {
2431 md->interface_ptr = ptr;
2432 }
2433
dm_get(struct mapped_device * md)2434 void dm_get(struct mapped_device *md)
2435 {
2436 atomic_inc(&md->holders);
2437 BUG_ON(test_bit(DMF_FREEING, &md->flags));
2438 }
2439
dm_hold(struct mapped_device * md)2440 int dm_hold(struct mapped_device *md)
2441 {
2442 spin_lock(&_minor_lock);
2443 if (test_bit(DMF_FREEING, &md->flags)) {
2444 spin_unlock(&_minor_lock);
2445 return -EBUSY;
2446 }
2447 dm_get(md);
2448 spin_unlock(&_minor_lock);
2449 return 0;
2450 }
2451 EXPORT_SYMBOL_GPL(dm_hold);
2452
dm_device_name(struct mapped_device * md)2453 const char *dm_device_name(struct mapped_device *md)
2454 {
2455 return md->name;
2456 }
2457 EXPORT_SYMBOL_GPL(dm_device_name);
2458
__dm_destroy(struct mapped_device * md,bool wait)2459 static void __dm_destroy(struct mapped_device *md, bool wait)
2460 {
2461 struct dm_table *map;
2462 int srcu_idx;
2463
2464 might_sleep();
2465
2466 spin_lock(&_minor_lock);
2467 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2468 set_bit(DMF_FREEING, &md->flags);
2469 spin_unlock(&_minor_lock);
2470
2471 blk_mark_disk_dead(md->disk);
2472
2473 /*
2474 * Take suspend_lock so that presuspend and postsuspend methods
2475 * do not race with internal suspend.
2476 */
2477 mutex_lock(&md->suspend_lock);
2478 map = dm_get_live_table(md, &srcu_idx);
2479 if (!dm_suspended_md(md)) {
2480 dm_table_presuspend_targets(map);
2481 set_bit(DMF_SUSPENDED, &md->flags);
2482 set_bit(DMF_POST_SUSPENDING, &md->flags);
2483 dm_table_postsuspend_targets(map);
2484 }
2485 /* dm_put_live_table must be before fsleep, otherwise deadlock is possible */
2486 dm_put_live_table(md, srcu_idx);
2487 mutex_unlock(&md->suspend_lock);
2488
2489 /*
2490 * Rare, but there may be I/O requests still going to complete,
2491 * for example. Wait for all references to disappear.
2492 * No one should increment the reference count of the mapped_device,
2493 * after the mapped_device state becomes DMF_FREEING.
2494 */
2495 if (wait)
2496 while (atomic_read(&md->holders))
2497 fsleep(1000);
2498 else if (atomic_read(&md->holders))
2499 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2500 dm_device_name(md), atomic_read(&md->holders));
2501
2502 dm_table_destroy(__unbind(md));
2503 free_dev(md);
2504 }
2505
dm_destroy(struct mapped_device * md)2506 void dm_destroy(struct mapped_device *md)
2507 {
2508 __dm_destroy(md, true);
2509 }
2510
dm_destroy_immediate(struct mapped_device * md)2511 void dm_destroy_immediate(struct mapped_device *md)
2512 {
2513 __dm_destroy(md, false);
2514 }
2515
dm_put(struct mapped_device * md)2516 void dm_put(struct mapped_device *md)
2517 {
2518 atomic_dec(&md->holders);
2519 }
2520 EXPORT_SYMBOL_GPL(dm_put);
2521
dm_in_flight_bios(struct mapped_device * md)2522 static bool dm_in_flight_bios(struct mapped_device *md)
2523 {
2524 int cpu;
2525 unsigned long sum = 0;
2526
2527 for_each_possible_cpu(cpu)
2528 sum += *per_cpu_ptr(md->pending_io, cpu);
2529
2530 return sum != 0;
2531 }
2532
dm_wait_for_bios_completion(struct mapped_device * md,unsigned int task_state)2533 static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state)
2534 {
2535 int r = 0;
2536 DEFINE_WAIT(wait);
2537
2538 while (true) {
2539 prepare_to_wait(&md->wait, &wait, task_state);
2540
2541 if (!dm_in_flight_bios(md))
2542 break;
2543
2544 if (signal_pending_state(task_state, current)) {
2545 r = -ERESTARTSYS;
2546 break;
2547 }
2548
2549 io_schedule();
2550 }
2551 finish_wait(&md->wait, &wait);
2552
2553 smp_rmb();
2554
2555 return r;
2556 }
2557
dm_wait_for_completion(struct mapped_device * md,unsigned int task_state)2558 static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state)
2559 {
2560 int r = 0;
2561
2562 if (!queue_is_mq(md->queue))
2563 return dm_wait_for_bios_completion(md, task_state);
2564
2565 while (true) {
2566 if (!blk_mq_queue_inflight(md->queue))
2567 break;
2568
2569 if (signal_pending_state(task_state, current)) {
2570 r = -ERESTARTSYS;
2571 break;
2572 }
2573
2574 fsleep(5000);
2575 }
2576
2577 return r;
2578 }
2579
2580 /*
2581 * Process the deferred bios
2582 */
dm_wq_work(struct work_struct * work)2583 static void dm_wq_work(struct work_struct *work)
2584 {
2585 struct mapped_device *md = container_of(work, struct mapped_device, work);
2586 struct bio *bio;
2587
2588 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2589 spin_lock_irq(&md->deferred_lock);
2590 bio = bio_list_pop(&md->deferred);
2591 spin_unlock_irq(&md->deferred_lock);
2592
2593 if (!bio)
2594 break;
2595
2596 submit_bio_noacct(bio);
2597 cond_resched();
2598 }
2599 }
2600
dm_queue_flush(struct mapped_device * md)2601 static void dm_queue_flush(struct mapped_device *md)
2602 {
2603 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2604 smp_mb__after_atomic();
2605 queue_work(md->wq, &md->work);
2606 }
2607
2608 /*
2609 * Swap in a new table, returning the old one for the caller to destroy.
2610 */
dm_swap_table(struct mapped_device * md,struct dm_table * table)2611 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2612 {
2613 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2614 struct queue_limits limits;
2615 int r;
2616
2617 mutex_lock(&md->suspend_lock);
2618
2619 /* device must be suspended */
2620 if (!dm_suspended_md(md))
2621 goto out;
2622
2623 /*
2624 * If the new table has no data devices, retain the existing limits.
2625 * This helps multipath with queue_if_no_path if all paths disappear,
2626 * then new I/O is queued based on these limits, and then some paths
2627 * reappear.
2628 */
2629 if (dm_table_has_no_data_devices(table)) {
2630 live_map = dm_get_live_table_fast(md);
2631 if (live_map)
2632 limits = md->queue->limits;
2633 dm_put_live_table_fast(md);
2634 }
2635
2636 if (!live_map) {
2637 r = dm_calculate_queue_limits(table, &limits);
2638 if (r) {
2639 map = ERR_PTR(r);
2640 goto out;
2641 }
2642 }
2643
2644 map = __bind(md, table, &limits);
2645 dm_issue_global_event();
2646
2647 out:
2648 mutex_unlock(&md->suspend_lock);
2649 return map;
2650 }
2651
2652 /*
2653 * Functions to lock and unlock any filesystem running on the
2654 * device.
2655 */
lock_fs(struct mapped_device * md)2656 static int lock_fs(struct mapped_device *md)
2657 {
2658 int r;
2659
2660 WARN_ON(test_bit(DMF_FROZEN, &md->flags));
2661
2662 r = freeze_bdev(md->disk->part0);
2663 if (!r)
2664 set_bit(DMF_FROZEN, &md->flags);
2665 return r;
2666 }
2667
unlock_fs(struct mapped_device * md)2668 static void unlock_fs(struct mapped_device *md)
2669 {
2670 if (!test_bit(DMF_FROZEN, &md->flags))
2671 return;
2672 thaw_bdev(md->disk->part0);
2673 clear_bit(DMF_FROZEN, &md->flags);
2674 }
2675
2676 /*
2677 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2678 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2679 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
2680 *
2681 * If __dm_suspend returns 0, the device is completely quiescent
2682 * now. There is no request-processing activity. All new requests
2683 * are being added to md->deferred list.
2684 */
__dm_suspend(struct mapped_device * md,struct dm_table * map,unsigned int suspend_flags,unsigned int task_state,int dmf_suspended_flag)2685 static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2686 unsigned int suspend_flags, unsigned int task_state,
2687 int dmf_suspended_flag)
2688 {
2689 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2690 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2691 int r;
2692
2693 lockdep_assert_held(&md->suspend_lock);
2694
2695 /*
2696 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2697 * This flag is cleared before dm_suspend returns.
2698 */
2699 if (noflush)
2700 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2701 else
2702 DMDEBUG("%s: suspending with flush", dm_device_name(md));
2703
2704 /*
2705 * This gets reverted if there's an error later and the targets
2706 * provide the .presuspend_undo hook.
2707 */
2708 dm_table_presuspend_targets(map);
2709
2710 /*
2711 * Flush I/O to the device.
2712 * Any I/O submitted after lock_fs() may not be flushed.
2713 * noflush takes precedence over do_lockfs.
2714 * (lock_fs() flushes I/Os and waits for them to complete.)
2715 */
2716 if (!noflush && do_lockfs) {
2717 r = lock_fs(md);
2718 if (r) {
2719 dm_table_presuspend_undo_targets(map);
2720 return r;
2721 }
2722 }
2723
2724 /*
2725 * Here we must make sure that no processes are submitting requests
2726 * to target drivers i.e. no one may be executing
2727 * dm_split_and_process_bio from dm_submit_bio.
2728 *
2729 * To get all processes out of dm_split_and_process_bio in dm_submit_bio,
2730 * we take the write lock. To prevent any process from reentering
2731 * dm_split_and_process_bio from dm_submit_bio and quiesce the thread
2732 * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call
2733 * flush_workqueue(md->wq).
2734 */
2735 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2736 if (map)
2737 synchronize_srcu(&md->io_barrier);
2738
2739 /*
2740 * Stop md->queue before flushing md->wq in case request-based
2741 * dm defers requests to md->wq from md->queue.
2742 */
2743 if (dm_request_based(md))
2744 dm_stop_queue(md->queue);
2745
2746 flush_workqueue(md->wq);
2747
2748 /*
2749 * At this point no more requests are entering target request routines.
2750 * We call dm_wait_for_completion to wait for all existing requests
2751 * to finish.
2752 */
2753 r = dm_wait_for_completion(md, task_state);
2754 if (!r)
2755 set_bit(dmf_suspended_flag, &md->flags);
2756
2757 if (noflush)
2758 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2759 if (map)
2760 synchronize_srcu(&md->io_barrier);
2761
2762 /* were we interrupted ? */
2763 if (r < 0) {
2764 dm_queue_flush(md);
2765
2766 if (dm_request_based(md))
2767 dm_start_queue(md->queue);
2768
2769 unlock_fs(md);
2770 dm_table_presuspend_undo_targets(map);
2771 /* pushback list is already flushed, so skip flush */
2772 }
2773
2774 return r;
2775 }
2776
2777 /*
2778 * We need to be able to change a mapping table under a mounted
2779 * filesystem. For example we might want to move some data in
2780 * the background. Before the table can be swapped with
2781 * dm_bind_table, dm_suspend must be called to flush any in
2782 * flight bios and ensure that any further io gets deferred.
2783 */
2784 /*
2785 * Suspend mechanism in request-based dm.
2786 *
2787 * 1. Flush all I/Os by lock_fs() if needed.
2788 * 2. Stop dispatching any I/O by stopping the request_queue.
2789 * 3. Wait for all in-flight I/Os to be completed or requeued.
2790 *
2791 * To abort suspend, start the request_queue.
2792 */
dm_suspend(struct mapped_device * md,unsigned int suspend_flags)2793 int dm_suspend(struct mapped_device *md, unsigned int suspend_flags)
2794 {
2795 struct dm_table *map = NULL;
2796 int r = 0;
2797
2798 retry:
2799 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2800
2801 if (dm_suspended_md(md)) {
2802 r = -EINVAL;
2803 goto out_unlock;
2804 }
2805
2806 if (dm_suspended_internally_md(md)) {
2807 /* already internally suspended, wait for internal resume */
2808 mutex_unlock(&md->suspend_lock);
2809 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2810 if (r)
2811 return r;
2812 goto retry;
2813 }
2814
2815 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2816 if (!map) {
2817 /* avoid deadlock with fs/namespace.c:do_mount() */
2818 suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
2819 }
2820
2821 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
2822 if (r)
2823 goto out_unlock;
2824
2825 set_bit(DMF_POST_SUSPENDING, &md->flags);
2826 dm_table_postsuspend_targets(map);
2827 clear_bit(DMF_POST_SUSPENDING, &md->flags);
2828
2829 out_unlock:
2830 mutex_unlock(&md->suspend_lock);
2831 return r;
2832 }
2833
__dm_resume(struct mapped_device * md,struct dm_table * map)2834 static int __dm_resume(struct mapped_device *md, struct dm_table *map)
2835 {
2836 if (map) {
2837 int r = dm_table_resume_targets(map);
2838
2839 if (r)
2840 return r;
2841 }
2842
2843 dm_queue_flush(md);
2844
2845 /*
2846 * Flushing deferred I/Os must be done after targets are resumed
2847 * so that mapping of targets can work correctly.
2848 * Request-based dm is queueing the deferred I/Os in its request_queue.
2849 */
2850 if (dm_request_based(md))
2851 dm_start_queue(md->queue);
2852
2853 unlock_fs(md);
2854
2855 return 0;
2856 }
2857
dm_resume(struct mapped_device * md)2858 int dm_resume(struct mapped_device *md)
2859 {
2860 int r;
2861 struct dm_table *map = NULL;
2862
2863 retry:
2864 r = -EINVAL;
2865 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2866
2867 if (!dm_suspended_md(md))
2868 goto out;
2869
2870 if (dm_suspended_internally_md(md)) {
2871 /* already internally suspended, wait for internal resume */
2872 mutex_unlock(&md->suspend_lock);
2873 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2874 if (r)
2875 return r;
2876 goto retry;
2877 }
2878
2879 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2880 if (!map || !dm_table_get_size(map))
2881 goto out;
2882
2883 r = __dm_resume(md, map);
2884 if (r)
2885 goto out;
2886
2887 clear_bit(DMF_SUSPENDED, &md->flags);
2888 out:
2889 mutex_unlock(&md->suspend_lock);
2890
2891 return r;
2892 }
2893
2894 /*
2895 * Internal suspend/resume works like userspace-driven suspend. It waits
2896 * until all bios finish and prevents issuing new bios to the target drivers.
2897 * It may be used only from the kernel.
2898 */
2899
__dm_internal_suspend(struct mapped_device * md,unsigned int suspend_flags)2900 static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend_flags)
2901 {
2902 struct dm_table *map = NULL;
2903
2904 lockdep_assert_held(&md->suspend_lock);
2905
2906 if (md->internal_suspend_count++)
2907 return; /* nested internal suspend */
2908
2909 if (dm_suspended_md(md)) {
2910 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2911 return; /* nest suspend */
2912 }
2913
2914 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2915
2916 /*
2917 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2918 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend
2919 * would require changing .presuspend to return an error -- avoid this
2920 * until there is a need for more elaborate variants of internal suspend.
2921 */
2922 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
2923 DMF_SUSPENDED_INTERNALLY);
2924
2925 set_bit(DMF_POST_SUSPENDING, &md->flags);
2926 dm_table_postsuspend_targets(map);
2927 clear_bit(DMF_POST_SUSPENDING, &md->flags);
2928 }
2929
__dm_internal_resume(struct mapped_device * md)2930 static void __dm_internal_resume(struct mapped_device *md)
2931 {
2932 int r;
2933 struct dm_table *map;
2934
2935 BUG_ON(!md->internal_suspend_count);
2936
2937 if (--md->internal_suspend_count)
2938 return; /* resume from nested internal suspend */
2939
2940 if (dm_suspended_md(md))
2941 goto done; /* resume from nested suspend */
2942
2943 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2944 r = __dm_resume(md, map);
2945 if (r) {
2946 /*
2947 * If a preresume method of some target failed, we are in a
2948 * tricky situation. We can't return an error to the caller. We
2949 * can't fake success because then the "resume" and
2950 * "postsuspend" methods would not be paired correctly, and it
2951 * would break various targets, for example it would cause list
2952 * corruption in the "origin" target.
2953 *
2954 * So, we fake normal suspend here, to make sure that the
2955 * "resume" and "postsuspend" methods will be paired correctly.
2956 */
2957 DMERR("Preresume method failed: %d", r);
2958 set_bit(DMF_SUSPENDED, &md->flags);
2959 }
2960 done:
2961 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2962 smp_mb__after_atomic();
2963 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
2964 }
2965
dm_internal_suspend_noflush(struct mapped_device * md)2966 void dm_internal_suspend_noflush(struct mapped_device *md)
2967 {
2968 mutex_lock(&md->suspend_lock);
2969 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
2970 mutex_unlock(&md->suspend_lock);
2971 }
2972 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
2973
dm_internal_resume(struct mapped_device * md)2974 void dm_internal_resume(struct mapped_device *md)
2975 {
2976 mutex_lock(&md->suspend_lock);
2977 __dm_internal_resume(md);
2978 mutex_unlock(&md->suspend_lock);
2979 }
2980 EXPORT_SYMBOL_GPL(dm_internal_resume);
2981
2982 /*
2983 * Fast variants of internal suspend/resume hold md->suspend_lock,
2984 * which prevents interaction with userspace-driven suspend.
2985 */
2986
dm_internal_suspend_fast(struct mapped_device * md)2987 void dm_internal_suspend_fast(struct mapped_device *md)
2988 {
2989 mutex_lock(&md->suspend_lock);
2990 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2991 return;
2992
2993 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2994 synchronize_srcu(&md->io_barrier);
2995 flush_workqueue(md->wq);
2996 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2997 }
2998 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
2999
dm_internal_resume_fast(struct mapped_device * md)3000 void dm_internal_resume_fast(struct mapped_device *md)
3001 {
3002 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
3003 goto done;
3004
3005 dm_queue_flush(md);
3006
3007 done:
3008 mutex_unlock(&md->suspend_lock);
3009 }
3010 EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
3011
3012 /*
3013 *---------------------------------------------------------------
3014 * Event notification.
3015 *---------------------------------------------------------------
3016 */
dm_kobject_uevent(struct mapped_device * md,enum kobject_action action,unsigned int cookie,bool need_resize_uevent)3017 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
3018 unsigned int cookie, bool need_resize_uevent)
3019 {
3020 int r;
3021 unsigned int noio_flag;
3022 char udev_cookie[DM_COOKIE_LENGTH];
3023 char *envp[3] = { NULL, NULL, NULL };
3024 char **envpp = envp;
3025 if (cookie) {
3026 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
3027 DM_COOKIE_ENV_VAR_NAME, cookie);
3028 *envpp++ = udev_cookie;
3029 }
3030 if (need_resize_uevent) {
3031 *envpp++ = "RESIZE=1";
3032 }
3033
3034 noio_flag = memalloc_noio_save();
3035
3036 r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
3037
3038 memalloc_noio_restore(noio_flag);
3039
3040 return r;
3041 }
3042
dm_next_uevent_seq(struct mapped_device * md)3043 uint32_t dm_next_uevent_seq(struct mapped_device *md)
3044 {
3045 return atomic_add_return(1, &md->uevent_seq);
3046 }
3047
dm_get_event_nr(struct mapped_device * md)3048 uint32_t dm_get_event_nr(struct mapped_device *md)
3049 {
3050 return atomic_read(&md->event_nr);
3051 }
3052
dm_wait_event(struct mapped_device * md,int event_nr)3053 int dm_wait_event(struct mapped_device *md, int event_nr)
3054 {
3055 return wait_event_interruptible(md->eventq,
3056 (event_nr != atomic_read(&md->event_nr)));
3057 }
3058
dm_uevent_add(struct mapped_device * md,struct list_head * elist)3059 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
3060 {
3061 unsigned long flags;
3062
3063 spin_lock_irqsave(&md->uevent_lock, flags);
3064 list_add(elist, &md->uevent_list);
3065 spin_unlock_irqrestore(&md->uevent_lock, flags);
3066 }
3067
3068 /*
3069 * The gendisk is only valid as long as you have a reference
3070 * count on 'md'.
3071 */
dm_disk(struct mapped_device * md)3072 struct gendisk *dm_disk(struct mapped_device *md)
3073 {
3074 return md->disk;
3075 }
3076 EXPORT_SYMBOL_GPL(dm_disk);
3077
dm_kobject(struct mapped_device * md)3078 struct kobject *dm_kobject(struct mapped_device *md)
3079 {
3080 return &md->kobj_holder.kobj;
3081 }
3082
dm_get_from_kobject(struct kobject * kobj)3083 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
3084 {
3085 struct mapped_device *md;
3086
3087 md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
3088
3089 spin_lock(&_minor_lock);
3090 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
3091 md = NULL;
3092 goto out;
3093 }
3094 dm_get(md);
3095 out:
3096 spin_unlock(&_minor_lock);
3097
3098 return md;
3099 }
3100
dm_suspended_md(struct mapped_device * md)3101 int dm_suspended_md(struct mapped_device *md)
3102 {
3103 return test_bit(DMF_SUSPENDED, &md->flags);
3104 }
3105
dm_post_suspending_md(struct mapped_device * md)3106 static int dm_post_suspending_md(struct mapped_device *md)
3107 {
3108 return test_bit(DMF_POST_SUSPENDING, &md->flags);
3109 }
3110
dm_suspended_internally_md(struct mapped_device * md)3111 int dm_suspended_internally_md(struct mapped_device *md)
3112 {
3113 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3114 }
3115
dm_test_deferred_remove_flag(struct mapped_device * md)3116 int dm_test_deferred_remove_flag(struct mapped_device *md)
3117 {
3118 return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
3119 }
3120
dm_suspended(struct dm_target * ti)3121 int dm_suspended(struct dm_target *ti)
3122 {
3123 return dm_suspended_md(ti->table->md);
3124 }
3125 EXPORT_SYMBOL_GPL(dm_suspended);
3126
dm_post_suspending(struct dm_target * ti)3127 int dm_post_suspending(struct dm_target *ti)
3128 {
3129 return dm_post_suspending_md(ti->table->md);
3130 }
3131 EXPORT_SYMBOL_GPL(dm_post_suspending);
3132
dm_noflush_suspending(struct dm_target * ti)3133 int dm_noflush_suspending(struct dm_target *ti)
3134 {
3135 return __noflush_suspending(ti->table->md);
3136 }
3137 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
3138
dm_free_md_mempools(struct dm_md_mempools * pools)3139 void dm_free_md_mempools(struct dm_md_mempools *pools)
3140 {
3141 if (!pools)
3142 return;
3143
3144 bioset_exit(&pools->bs);
3145 bioset_exit(&pools->io_bs);
3146
3147 kfree(pools);
3148 }
3149
3150 struct dm_pr {
3151 u64 old_key;
3152 u64 new_key;
3153 u32 flags;
3154 bool abort;
3155 bool fail_early;
3156 int ret;
3157 enum pr_type type;
3158 struct pr_keys *read_keys;
3159 struct pr_held_reservation *rsv;
3160 };
3161
dm_call_pr(struct block_device * bdev,iterate_devices_callout_fn fn,struct dm_pr * pr)3162 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
3163 struct dm_pr *pr)
3164 {
3165 struct mapped_device *md = bdev->bd_disk->private_data;
3166 struct dm_table *table;
3167 struct dm_target *ti;
3168 int ret = -ENOTTY, srcu_idx;
3169
3170 table = dm_get_live_table(md, &srcu_idx);
3171 if (!table || !dm_table_get_size(table))
3172 goto out;
3173
3174 /* We only support devices that have a single target */
3175 if (table->num_targets != 1)
3176 goto out;
3177 ti = dm_table_get_target(table, 0);
3178
3179 if (dm_suspended_md(md)) {
3180 ret = -EAGAIN;
3181 goto out;
3182 }
3183
3184 ret = -EINVAL;
3185 if (!ti->type->iterate_devices)
3186 goto out;
3187
3188 ti->type->iterate_devices(ti, fn, pr);
3189 ret = 0;
3190 out:
3191 dm_put_live_table(md, srcu_idx);
3192 return ret;
3193 }
3194
3195 /*
3196 * For register / unregister we need to manually call out to every path.
3197 */
__dm_pr_register(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3198 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
3199 sector_t start, sector_t len, void *data)
3200 {
3201 struct dm_pr *pr = data;
3202 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3203 int ret;
3204
3205 if (!ops || !ops->pr_register) {
3206 pr->ret = -EOPNOTSUPP;
3207 return -1;
3208 }
3209
3210 ret = ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
3211 if (!ret)
3212 return 0;
3213
3214 if (!pr->ret)
3215 pr->ret = ret;
3216
3217 if (pr->fail_early)
3218 return -1;
3219
3220 return 0;
3221 }
3222
dm_pr_register(struct block_device * bdev,u64 old_key,u64 new_key,u32 flags)3223 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
3224 u32 flags)
3225 {
3226 struct dm_pr pr = {
3227 .old_key = old_key,
3228 .new_key = new_key,
3229 .flags = flags,
3230 .fail_early = true,
3231 .ret = 0,
3232 };
3233 int ret;
3234
3235 ret = dm_call_pr(bdev, __dm_pr_register, &pr);
3236 if (ret) {
3237 /* Didn't even get to register a path */
3238 return ret;
3239 }
3240
3241 if (!pr.ret)
3242 return 0;
3243 ret = pr.ret;
3244
3245 if (!new_key)
3246 return ret;
3247
3248 /* unregister all paths if we failed to register any path */
3249 pr.old_key = new_key;
3250 pr.new_key = 0;
3251 pr.flags = 0;
3252 pr.fail_early = false;
3253 (void) dm_call_pr(bdev, __dm_pr_register, &pr);
3254 return ret;
3255 }
3256
3257
__dm_pr_reserve(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3258 static int __dm_pr_reserve(struct dm_target *ti, struct dm_dev *dev,
3259 sector_t start, sector_t len, void *data)
3260 {
3261 struct dm_pr *pr = data;
3262 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3263
3264 if (!ops || !ops->pr_reserve) {
3265 pr->ret = -EOPNOTSUPP;
3266 return -1;
3267 }
3268
3269 pr->ret = ops->pr_reserve(dev->bdev, pr->old_key, pr->type, pr->flags);
3270 if (!pr->ret)
3271 return -1;
3272
3273 return 0;
3274 }
3275
dm_pr_reserve(struct block_device * bdev,u64 key,enum pr_type type,u32 flags)3276 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3277 u32 flags)
3278 {
3279 struct dm_pr pr = {
3280 .old_key = key,
3281 .flags = flags,
3282 .type = type,
3283 .fail_early = false,
3284 .ret = 0,
3285 };
3286 int ret;
3287
3288 ret = dm_call_pr(bdev, __dm_pr_reserve, &pr);
3289 if (ret)
3290 return ret;
3291
3292 return pr.ret;
3293 }
3294
3295 /*
3296 * If there is a non-All Registrants type of reservation, the release must be
3297 * sent down the holding path. For the cases where there is no reservation or
3298 * the path is not the holder the device will also return success, so we must
3299 * try each path to make sure we got the correct path.
3300 */
__dm_pr_release(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3301 static int __dm_pr_release(struct dm_target *ti, struct dm_dev *dev,
3302 sector_t start, sector_t len, void *data)
3303 {
3304 struct dm_pr *pr = data;
3305 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3306
3307 if (!ops || !ops->pr_release) {
3308 pr->ret = -EOPNOTSUPP;
3309 return -1;
3310 }
3311
3312 pr->ret = ops->pr_release(dev->bdev, pr->old_key, pr->type);
3313 if (pr->ret)
3314 return -1;
3315
3316 return 0;
3317 }
3318
dm_pr_release(struct block_device * bdev,u64 key,enum pr_type type)3319 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3320 {
3321 struct dm_pr pr = {
3322 .old_key = key,
3323 .type = type,
3324 .fail_early = false,
3325 };
3326 int ret;
3327
3328 ret = dm_call_pr(bdev, __dm_pr_release, &pr);
3329 if (ret)
3330 return ret;
3331
3332 return pr.ret;
3333 }
3334
__dm_pr_preempt(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3335 static int __dm_pr_preempt(struct dm_target *ti, struct dm_dev *dev,
3336 sector_t start, sector_t len, void *data)
3337 {
3338 struct dm_pr *pr = data;
3339 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3340
3341 if (!ops || !ops->pr_preempt) {
3342 pr->ret = -EOPNOTSUPP;
3343 return -1;
3344 }
3345
3346 pr->ret = ops->pr_preempt(dev->bdev, pr->old_key, pr->new_key, pr->type,
3347 pr->abort);
3348 if (!pr->ret)
3349 return -1;
3350
3351 return 0;
3352 }
3353
dm_pr_preempt(struct block_device * bdev,u64 old_key,u64 new_key,enum pr_type type,bool abort)3354 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3355 enum pr_type type, bool abort)
3356 {
3357 struct dm_pr pr = {
3358 .new_key = new_key,
3359 .old_key = old_key,
3360 .type = type,
3361 .fail_early = false,
3362 };
3363 int ret;
3364
3365 ret = dm_call_pr(bdev, __dm_pr_preempt, &pr);
3366 if (ret)
3367 return ret;
3368
3369 return pr.ret;
3370 }
3371
dm_pr_clear(struct block_device * bdev,u64 key)3372 static int dm_pr_clear(struct block_device *bdev, u64 key)
3373 {
3374 struct mapped_device *md = bdev->bd_disk->private_data;
3375 const struct pr_ops *ops;
3376 int r, srcu_idx;
3377
3378 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3379 if (r < 0)
3380 goto out;
3381
3382 ops = bdev->bd_disk->fops->pr_ops;
3383 if (ops && ops->pr_clear)
3384 r = ops->pr_clear(bdev, key);
3385 else
3386 r = -EOPNOTSUPP;
3387 out:
3388 dm_unprepare_ioctl(md, srcu_idx);
3389 return r;
3390 }
3391
__dm_pr_read_keys(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3392 static int __dm_pr_read_keys(struct dm_target *ti, struct dm_dev *dev,
3393 sector_t start, sector_t len, void *data)
3394 {
3395 struct dm_pr *pr = data;
3396 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3397
3398 if (!ops || !ops->pr_read_keys) {
3399 pr->ret = -EOPNOTSUPP;
3400 return -1;
3401 }
3402
3403 pr->ret = ops->pr_read_keys(dev->bdev, pr->read_keys);
3404 if (!pr->ret)
3405 return -1;
3406
3407 return 0;
3408 }
3409
dm_pr_read_keys(struct block_device * bdev,struct pr_keys * keys)3410 static int dm_pr_read_keys(struct block_device *bdev, struct pr_keys *keys)
3411 {
3412 struct dm_pr pr = {
3413 .read_keys = keys,
3414 };
3415 int ret;
3416
3417 ret = dm_call_pr(bdev, __dm_pr_read_keys, &pr);
3418 if (ret)
3419 return ret;
3420
3421 return pr.ret;
3422 }
3423
__dm_pr_read_reservation(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3424 static int __dm_pr_read_reservation(struct dm_target *ti, struct dm_dev *dev,
3425 sector_t start, sector_t len, void *data)
3426 {
3427 struct dm_pr *pr = data;
3428 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3429
3430 if (!ops || !ops->pr_read_reservation) {
3431 pr->ret = -EOPNOTSUPP;
3432 return -1;
3433 }
3434
3435 pr->ret = ops->pr_read_reservation(dev->bdev, pr->rsv);
3436 if (!pr->ret)
3437 return -1;
3438
3439 return 0;
3440 }
3441
dm_pr_read_reservation(struct block_device * bdev,struct pr_held_reservation * rsv)3442 static int dm_pr_read_reservation(struct block_device *bdev,
3443 struct pr_held_reservation *rsv)
3444 {
3445 struct dm_pr pr = {
3446 .rsv = rsv,
3447 };
3448 int ret;
3449
3450 ret = dm_call_pr(bdev, __dm_pr_read_reservation, &pr);
3451 if (ret)
3452 return ret;
3453
3454 return pr.ret;
3455 }
3456
3457 static const struct pr_ops dm_pr_ops = {
3458 .pr_register = dm_pr_register,
3459 .pr_reserve = dm_pr_reserve,
3460 .pr_release = dm_pr_release,
3461 .pr_preempt = dm_pr_preempt,
3462 .pr_clear = dm_pr_clear,
3463 .pr_read_keys = dm_pr_read_keys,
3464 .pr_read_reservation = dm_pr_read_reservation,
3465 };
3466
3467 static const struct block_device_operations dm_blk_dops = {
3468 .submit_bio = dm_submit_bio,
3469 .poll_bio = dm_poll_bio,
3470 .open = dm_blk_open,
3471 .release = dm_blk_close,
3472 .ioctl = dm_blk_ioctl,
3473 .getgeo = dm_blk_getgeo,
3474 .report_zones = dm_blk_report_zones,
3475 .pr_ops = &dm_pr_ops,
3476 .owner = THIS_MODULE
3477 };
3478
3479 static const struct block_device_operations dm_rq_blk_dops = {
3480 .open = dm_blk_open,
3481 .release = dm_blk_close,
3482 .ioctl = dm_blk_ioctl,
3483 .getgeo = dm_blk_getgeo,
3484 .pr_ops = &dm_pr_ops,
3485 .owner = THIS_MODULE
3486 };
3487
3488 static const struct dax_operations dm_dax_ops = {
3489 .direct_access = dm_dax_direct_access,
3490 .zero_page_range = dm_dax_zero_page_range,
3491 .recovery_write = dm_dax_recovery_write,
3492 };
3493
3494 /*
3495 * module hooks
3496 */
3497 module_init(dm_init);
3498 module_exit(dm_exit);
3499
3500 module_param(major, uint, 0);
3501 MODULE_PARM_DESC(major, "The major number of the device mapper");
3502
3503 module_param(reserved_bio_based_ios, uint, 0644);
3504 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3505
3506 module_param(dm_numa_node, int, 0644);
3507 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
3508
3509 module_param(swap_bios, int, 0644);
3510 MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
3511
3512 MODULE_DESCRIPTION(DM_NAME " driver");
3513 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3514 MODULE_LICENSE("GPL");
3515