dm-thin.c (8c57a5e7b2820f349c95b8c8393fec1e0f4070d2) dm-thin.c (2a0fbffb1e50939a969d5efe495667a3aa0f72f7)
1/*
2 * Copyright (C) 2011-2012 Red Hat UK.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-thin-metadata.h"
8#include "dm-bio-prison.h"

--- 239 unchanged lines hidden (view full) ---

248
249 unsigned long last_commit_jiffies;
250 unsigned ref_count;
251
252 spinlock_t lock;
253 struct bio_list deferred_flush_bios;
254 struct list_head prepared_mappings;
255 struct list_head prepared_discards;
1/*
2 * Copyright (C) 2011-2012 Red Hat UK.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-thin-metadata.h"
8#include "dm-bio-prison.h"

--- 239 unchanged lines hidden (view full) ---

248
249 unsigned long last_commit_jiffies;
250 unsigned ref_count;
251
252 spinlock_t lock;
253 struct bio_list deferred_flush_bios;
254 struct list_head prepared_mappings;
255 struct list_head prepared_discards;
256 struct list_head prepared_discards_pt2;
256 struct list_head active_thins;
257
258 struct dm_deferred_set *shared_read_ds;
259 struct dm_deferred_set *all_io_ds;
260
261 struct dm_thin_new_mapping *next_mapping;
262 mempool_t *mapping_pool;
263
264 process_bio_fn process_bio;
265 process_bio_fn process_discard;
266
267 process_cell_fn process_cell;
268 process_cell_fn process_discard_cell;
269
270 process_mapping_fn process_prepared_mapping;
271 process_mapping_fn process_prepared_discard;
257 struct list_head active_thins;
258
259 struct dm_deferred_set *shared_read_ds;
260 struct dm_deferred_set *all_io_ds;
261
262 struct dm_thin_new_mapping *next_mapping;
263 mempool_t *mapping_pool;
264
265 process_bio_fn process_bio;
266 process_bio_fn process_discard;
267
268 process_cell_fn process_cell;
269 process_cell_fn process_discard_cell;
270
271 process_mapping_fn process_prepared_mapping;
272 process_mapping_fn process_prepared_discard;
273 process_mapping_fn process_prepared_discard_pt2;
272
273 struct dm_bio_prison_cell **cell_sort_array;
274};
275
276static enum pool_mode get_pool_mode(struct pool *pool);
277static void metadata_operation_failed(struct pool *pool, const char *op, int r);
278
279/*

--- 37 unchanged lines hidden (view full) ---

317 * iterating the active_thins list.
318 */
319 atomic_t refcount;
320 struct completion can_destroy;
321};
322
323/*----------------------------------------------------------------*/
324
274
275 struct dm_bio_prison_cell **cell_sort_array;
276};
277
278static enum pool_mode get_pool_mode(struct pool *pool);
279static void metadata_operation_failed(struct pool *pool, const char *op, int r);
280
281/*

--- 37 unchanged lines hidden (view full) ---

319 * iterating the active_thins list.
320 */
321 atomic_t refcount;
322 struct completion can_destroy;
323};
324
325/*----------------------------------------------------------------*/
326
325/**
326 * __blkdev_issue_discard_async - queue a discard with async completion
327 * @bdev: blockdev to issue discard for
328 * @sector: start sector
329 * @nr_sects: number of sectors to discard
330 * @gfp_mask: memory allocation flags (for bio_alloc)
331 * @flags: BLKDEV_IFL_* flags to control behaviour
332 * @parent_bio: parent discard bio that all sub discards get chained to
333 *
334 * Description:
335 * Asynchronously issue a discard request for the sectors in question.
336 */
337static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sector,
338 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags,
339 struct bio *parent_bio)
340{
341 struct request_queue *q = bdev_get_queue(bdev);
342 int type = REQ_WRITE | REQ_DISCARD;
343 struct bio *bio;
344
345 if (!q || !nr_sects)
346 return -ENXIO;
347
348 if (!blk_queue_discard(q))
349 return -EOPNOTSUPP;
350
351 if (flags & BLKDEV_DISCARD_SECURE) {
352 if (!blk_queue_secdiscard(q))
353 return -EOPNOTSUPP;
354 type |= REQ_SECURE;
355 }
356
357 /*
358 * Required bio_put occurs in bio_endio thanks to bio_chain below
359 */
360 bio = bio_alloc(gfp_mask, 1);
361 if (!bio)
362 return -ENOMEM;
363
364 bio_chain(bio, parent_bio);
365
366 bio->bi_iter.bi_sector = sector;
367 bio->bi_bdev = bdev;
368 bio->bi_iter.bi_size = nr_sects << 9;
369
370 submit_bio(type, bio);
371
372 return 0;
373}
374
375static bool block_size_is_power_of_two(struct pool *pool)
376{
377 return pool->sectors_per_block_shift >= 0;
378}
379
380static sector_t block_to_sectors(struct pool *pool, dm_block_t b)
381{
382 return block_size_is_power_of_two(pool) ?
383 (b << pool->sectors_per_block_shift) :
384 (b * pool->sectors_per_block);
385}
386
327static bool block_size_is_power_of_two(struct pool *pool)
328{
329 return pool->sectors_per_block_shift >= 0;
330}
331
332static sector_t block_to_sectors(struct pool *pool, dm_block_t b)
333{
334 return block_size_is_power_of_two(pool) ?
335 (b << pool->sectors_per_block_shift) :
336 (b * pool->sectors_per_block);
337}
338
387static int issue_discard(struct thin_c *tc, dm_block_t data_b, dm_block_t data_e,
388 struct bio *parent_bio)
339/*----------------------------------------------------------------*/
340
341struct discard_op {
342 struct thin_c *tc;
343 struct blk_plug plug;
344 struct bio *parent_bio;
345 struct bio *bio;
346};
347
348static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent)
389{
349{
350 BUG_ON(!parent);
351
352 op->tc = tc;
353 blk_start_plug(&op->plug);
354 op->parent_bio = parent;
355 op->bio = NULL;
356}
357
358static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t data_e)
359{
360 struct thin_c *tc = op->tc;
390 sector_t s = block_to_sectors(tc->pool, data_b);
391 sector_t len = block_to_sectors(tc->pool, data_e - data_b);
392
361 sector_t s = block_to_sectors(tc->pool, data_b);
362 sector_t len = block_to_sectors(tc->pool, data_e - data_b);
363
393 return __blkdev_issue_discard_async(tc->pool_dev->bdev, s, len,
394 GFP_NOWAIT, 0, parent_bio);
364 return __blkdev_issue_discard(tc->pool_dev->bdev, s, len,
365 GFP_NOWAIT, 0, &op->bio);
395}
396
366}
367
368static void end_discard(struct discard_op *op, int r)
369{
370 if (op->bio) {
371 /*
372 * Even if one of the calls to issue_discard failed, we
373 * need to wait for the chain to complete.
374 */
375 bio_chain(op->bio, op->parent_bio);
376 bio_set_op_attrs(op->bio, REQ_OP_DISCARD, 0);
377 submit_bio(op->bio);
378 }
379
380 blk_finish_plug(&op->plug);
381
382 /*
383 * Even if r is set, there could be sub discards in flight that we
384 * need to wait for.
385 */
386 if (r && !op->parent_bio->bi_error)
387 op->parent_bio->bi_error = r;
388 bio_endio(op->parent_bio);
389}
390
397/*----------------------------------------------------------------*/
398
399/*
400 * wake_worker() is used when new work is queued and when pool_resume is
401 * ready to continue deferred IO processing.
402 */
403static void wake_worker(struct pool *pool)
404{

--- 222 unchanged lines hidden (view full) ---

627 error_thin_bio_list(tc, &tc->retry_on_resume_list, error);
628 rcu_read_unlock();
629}
630
631static void error_retry_list(struct pool *pool)
632{
633 int error = get_pool_io_error_code(pool);
634
391/*----------------------------------------------------------------*/
392
393/*
394 * wake_worker() is used when new work is queued and when pool_resume is
395 * ready to continue deferred IO processing.
396 */
397static void wake_worker(struct pool *pool)
398{

--- 222 unchanged lines hidden (view full) ---

621 error_thin_bio_list(tc, &tc->retry_on_resume_list, error);
622 rcu_read_unlock();
623}
624
625static void error_retry_list(struct pool *pool)
626{
627 int error = get_pool_io_error_code(pool);
628
635 return error_retry_list_with_code(pool, error);
629 error_retry_list_with_code(pool, error);
636}
637
638/*
639 * This section of code contains the logic for processing a thin device's IO.
640 * Much of the code depends on pool object resources (lists, workqueues, etc)
641 * but most is exclusively called from the thin target rather than the thin-pool
642 * target.
643 */

--- 56 unchanged lines hidden (view full) ---

700
701static void remap_to_origin(struct thin_c *tc, struct bio *bio)
702{
703 bio->bi_bdev = tc->origin_dev->bdev;
704}
705
706static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
707{
630}
631
632/*
633 * This section of code contains the logic for processing a thin device's IO.
634 * Much of the code depends on pool object resources (lists, workqueues, etc)
635 * but most is exclusively called from the thin target rather than the thin-pool
636 * target.
637 */

--- 56 unchanged lines hidden (view full) ---

694
695static void remap_to_origin(struct thin_c *tc, struct bio *bio)
696{
697 bio->bi_bdev = tc->origin_dev->bdev;
698}
699
700static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
701{
708 return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
702 return (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) &&
709 dm_thin_changed_this_transaction(tc->td);
710}
711
712static void inc_all_io_entry(struct pool *pool, struct bio *bio)
713{
714 struct dm_thin_endio_hook *h;
715
703 dm_thin_changed_this_transaction(tc->td);
704}
705
706static void inc_all_io_entry(struct pool *pool, struct bio *bio)
707{
708 struct dm_thin_endio_hook *h;
709
716 if (bio->bi_rw & REQ_DISCARD)
710 if (bio_op(bio) == REQ_OP_DISCARD)
717 return;
718
719 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
720 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
721}
722
723static void issue(struct thin_c *tc, struct bio *bio)
724{

--- 146 unchanged lines hidden (view full) ---

871
872static void __inc_remap_and_issue_cell(void *context,
873 struct dm_bio_prison_cell *cell)
874{
875 struct remap_info *info = context;
876 struct bio *bio;
877
878 while ((bio = bio_list_pop(&cell->bios))) {
711 return;
712
713 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
714 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
715}
716
717static void issue(struct thin_c *tc, struct bio *bio)
718{

--- 146 unchanged lines hidden (view full) ---

865
866static void __inc_remap_and_issue_cell(void *context,
867 struct dm_bio_prison_cell *cell)
868{
869 struct remap_info *info = context;
870 struct bio *bio;
871
872 while ((bio = bio_list_pop(&cell->bios))) {
879 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))
873 if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
874 bio_op(bio) == REQ_OP_DISCARD)
880 bio_list_add(&info->defer_bios, bio);
881 else {
882 inc_all_io_entry(info->tc->pool, bio);
883
884 /*
885 * We can't issue the bios with the bio prison lock
886 * held, so we add them to a list to issue on
887 * return from this function.

--- 113 unchanged lines hidden (view full) ---

1001 bio_io_error(m->bio);
1002 } else
1003 bio_endio(m->bio);
1004
1005 cell_defer_no_holder(tc, m->cell);
1006 mempool_free(m, tc->pool->mapping_pool);
1007}
1008
875 bio_list_add(&info->defer_bios, bio);
876 else {
877 inc_all_io_entry(info->tc->pool, bio);
878
879 /*
880 * We can't issue the bios with the bio prison lock
881 * held, so we add them to a list to issue on
882 * return from this function.

--- 113 unchanged lines hidden (view full) ---

996 bio_io_error(m->bio);
997 } else
998 bio_endio(m->bio);
999
1000 cell_defer_no_holder(tc, m->cell);
1001 mempool_free(m, tc->pool->mapping_pool);
1002}
1003
1009static int passdown_double_checking_shared_status(struct dm_thin_new_mapping *m)
1004/*----------------------------------------------------------------*/
1005
1006static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m,
1007 struct bio *discard_parent)
1010{
1011 /*
1012 * We've already unmapped this range of blocks, but before we
1013 * passdown we have to check that these blocks are now unused.
1014 */
1008{
1009 /*
1010 * We've already unmapped this range of blocks, but before we
1011 * passdown we have to check that these blocks are now unused.
1012 */
1015 int r;
1013 int r = 0;
1016 bool used = true;
1017 struct thin_c *tc = m->tc;
1018 struct pool *pool = tc->pool;
1019 dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
1014 bool used = true;
1015 struct thin_c *tc = m->tc;
1016 struct pool *pool = tc->pool;
1017 dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
1018 struct discard_op op;
1020
1019
1020 begin_discard(&op, tc, discard_parent);
1021 while (b != end) {
1022 /* find start of unmapped run */
1023 for (; b < end; b++) {
1024 r = dm_pool_block_is_used(pool->pmd, b, &used);
1025 if (r)
1021 while (b != end) {
1022 /* find start of unmapped run */
1023 for (; b < end; b++) {
1024 r = dm_pool_block_is_used(pool->pmd, b, &used);
1025 if (r)
1026 return r;
1026 goto out;
1027
1028 if (!used)
1029 break;
1030 }
1031
1032 if (b == end)
1033 break;
1034
1035 /* find end of run */
1036 for (e = b + 1; e != end; e++) {
1037 r = dm_pool_block_is_used(pool->pmd, e, &used);
1038 if (r)
1027
1028 if (!used)
1029 break;
1030 }
1031
1032 if (b == end)
1033 break;
1034
1035 /* find end of run */
1036 for (e = b + 1; e != end; e++) {
1037 r = dm_pool_block_is_used(pool->pmd, e, &used);
1038 if (r)
1039 return r;
1039 goto out;
1040
1041 if (used)
1042 break;
1043 }
1044
1040
1041 if (used)
1042 break;
1043 }
1044
1045 r = issue_discard(tc, b, e, m->bio);
1045 r = issue_discard(&op, b, e);
1046 if (r)
1046 if (r)
1047 return r;
1047 goto out;
1048
1049 b = e;
1050 }
1048
1049 b = e;
1050 }
1051out:
1052 end_discard(&op, r);
1053}
1051
1054
1052 return 0;
1055static void queue_passdown_pt2(struct dm_thin_new_mapping *m)
1056{
1057 unsigned long flags;
1058 struct pool *pool = m->tc->pool;
1059
1060 spin_lock_irqsave(&pool->lock, flags);
1061 list_add_tail(&m->list, &pool->prepared_discards_pt2);
1062 spin_unlock_irqrestore(&pool->lock, flags);
1063 wake_worker(pool);
1053}
1054
1064}
1065
1055static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
1066static void passdown_endio(struct bio *bio)
1056{
1067{
1068 /*
1069 * It doesn't matter if the passdown discard failed, we still want
1070 * to unmap (we ignore err).
1071 */
1072 queue_passdown_pt2(bio->bi_private);
1073}
1074
1075static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
1076{
1057 int r;
1058 struct thin_c *tc = m->tc;
1059 struct pool *pool = tc->pool;
1077 int r;
1078 struct thin_c *tc = m->tc;
1079 struct pool *pool = tc->pool;
1080 struct bio *discard_parent;
1081 dm_block_t data_end = m->data_block + (m->virt_end - m->virt_begin);
1060
1082
1083 /*
1084 * Only this thread allocates blocks, so we can be sure that the
1085 * newly unmapped blocks will not be allocated before the end of
1086 * the function.
1087 */
1061 r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end);
1088 r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end);
1062 if (r)
1089 if (r) {
1063 metadata_operation_failed(pool, "dm_thin_remove_range", r);
1090 metadata_operation_failed(pool, "dm_thin_remove_range", r);
1091 bio_io_error(m->bio);
1092 cell_defer_no_holder(tc, m->cell);
1093 mempool_free(m, pool->mapping_pool);
1094 return;
1095 }
1064
1096
1065 else if (m->maybe_shared)
1066 r = passdown_double_checking_shared_status(m);
1067 else
1068 r = issue_discard(tc, m->data_block, m->data_block + (m->virt_end - m->virt_begin), m->bio);
1097 discard_parent = bio_alloc(GFP_NOIO, 1);
1098 if (!discard_parent) {
1099 DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
1100 dm_device_name(tc->pool->pool_md));
1101 queue_passdown_pt2(m);
1069
1102
1103 } else {
1104 discard_parent->bi_end_io = passdown_endio;
1105 discard_parent->bi_private = m;
1106
1107 if (m->maybe_shared)
1108 passdown_double_checking_shared_status(m, discard_parent);
1109 else {
1110 struct discard_op op;
1111
1112 begin_discard(&op, tc, discard_parent);
1113 r = issue_discard(&op, m->data_block, data_end);
1114 end_discard(&op, r);
1115 }
1116 }
1117
1070 /*
1118 /*
1071 * Even if r is set, there could be sub discards in flight that we
1072 * need to wait for.
1119 * Increment the unmapped blocks. This prevents a race between the
1120 * passdown io and reallocation of freed blocks.
1073 */
1121 */
1074 m->bio->bi_error = r;
1075 bio_endio(m->bio);
1122 r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
1123 if (r) {
1124 metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
1125 bio_io_error(m->bio);
1126 cell_defer_no_holder(tc, m->cell);
1127 mempool_free(m, pool->mapping_pool);
1128 return;
1129 }
1130}
1131
1132static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)
1133{
1134 int r;
1135 struct thin_c *tc = m->tc;
1136 struct pool *pool = tc->pool;
1137
1138 /*
1139 * The passdown has completed, so now we can decrement all those
1140 * unmapped blocks.
1141 */
1142 r = dm_pool_dec_data_range(pool->pmd, m->data_block,
1143 m->data_block + (m->virt_end - m->virt_begin));
1144 if (r) {
1145 metadata_operation_failed(pool, "dm_pool_dec_data_range", r);
1146 bio_io_error(m->bio);
1147 } else
1148 bio_endio(m->bio);
1149
1076 cell_defer_no_holder(tc, m->cell);
1077 mempool_free(m, pool->mapping_pool);
1078}
1079
1080static void process_prepared(struct pool *pool, struct list_head *head,
1081 process_mapping_fn *fn)
1082{
1083 unsigned long flags;

--- 405 unchanged lines hidden (view full) ---

1489 m->virt_end = virt_cell->key.block_end;
1490 m->cell = virt_cell;
1491 m->bio = virt_cell->holder;
1492
1493 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1494 pool->process_prepared_discard(m);
1495}
1496
1150 cell_defer_no_holder(tc, m->cell);
1151 mempool_free(m, pool->mapping_pool);
1152}
1153
1154static void process_prepared(struct pool *pool, struct list_head *head,
1155 process_mapping_fn *fn)
1156{
1157 unsigned long flags;

--- 405 unchanged lines hidden (view full) ---

1563 m->virt_end = virt_cell->key.block_end;
1564 m->cell = virt_cell;
1565 m->bio = virt_cell->holder;
1566
1567 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1568 pool->process_prepared_discard(m);
1569}
1570
1497/*
1498 * __bio_inc_remaining() is used to defer parent bios's end_io until
1499 * we _know_ all chained sub range discard bios have completed.
1500 */
1501static inline void __bio_inc_remaining(struct bio *bio)
1502{
1503 bio->bi_flags |= (1 << BIO_CHAIN);
1504 smp_mb__before_atomic();
1505 atomic_inc(&bio->__bi_remaining);
1506}
1507
1508static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t end,
1509 struct bio *bio)
1510{
1511 struct pool *pool = tc->pool;
1512
1513 int r;
1514 bool maybe_shared;
1515 struct dm_cell_key data_key;

--- 33 unchanged lines hidden (view full) ---

1549 m->virt_begin = virt_begin;
1550 m->virt_end = virt_end;
1551 m->data_block = data_begin;
1552 m->cell = data_cell;
1553 m->bio = bio;
1554
1555 /*
1556 * The parent bio must not complete before sub discard bios are
1571static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t end,
1572 struct bio *bio)
1573{
1574 struct pool *pool = tc->pool;
1575
1576 int r;
1577 bool maybe_shared;
1578 struct dm_cell_key data_key;

--- 33 unchanged lines hidden (view full) ---

1612 m->virt_begin = virt_begin;
1613 m->virt_end = virt_end;
1614 m->data_block = data_begin;
1615 m->cell = data_cell;
1616 m->bio = bio;
1617
1618 /*
1619 * The parent bio must not complete before sub discard bios are
1557 * chained to it (see __blkdev_issue_discard_async's bio_chain)!
1620 * chained to it (see end_discard's bio_chain)!
1558 *
1559 * This per-mapping bi_remaining increment is paired with
1560 * the implicit decrement that occurs via bio_endio() in
1621 *
1622 * This per-mapping bi_remaining increment is paired with
1623 * the implicit decrement that occurs via bio_endio() in
1561 * process_prepared_discard_{passdown,no_passdown}.
1624 * end_discard().
1562 */
1625 */
1563 __bio_inc_remaining(bio);
1626 bio_inc_remaining(bio);
1564 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1565 pool->process_prepared_discard(m);
1566
1567 begin = virt_end;
1568 }
1569}
1570
1571static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_prison_cell *virt_cell)

--- 77 unchanged lines hidden (view full) ---

1649static void __remap_and_issue_shared_cell(void *context,
1650 struct dm_bio_prison_cell *cell)
1651{
1652 struct remap_info *info = context;
1653 struct bio *bio;
1654
1655 while ((bio = bio_list_pop(&cell->bios))) {
1656 if ((bio_data_dir(bio) == WRITE) ||
1627 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1628 pool->process_prepared_discard(m);
1629
1630 begin = virt_end;
1631 }
1632}
1633
1634static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_prison_cell *virt_cell)

--- 77 unchanged lines hidden (view full) ---

1712static void __remap_and_issue_shared_cell(void *context,
1713 struct dm_bio_prison_cell *cell)
1714{
1715 struct remap_info *info = context;
1716 struct bio *bio;
1717
1718 while ((bio = bio_list_pop(&cell->bios))) {
1719 if ((bio_data_dir(bio) == WRITE) ||
1657 (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)))
1720 (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
1721 bio_op(bio) == REQ_OP_DISCARD))
1658 bio_list_add(&info->defer_bios, bio);
1659 else {
1660 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
1661
1662 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
1663 inc_all_io_entry(info->tc->pool, bio);
1664 bio_list_add(&info->issue_bios, bio);
1665 }

--- 372 unchanged lines hidden (view full) ---

2038 if (ensure_next_mapping(pool)) {
2039 spin_lock_irqsave(&tc->lock, flags);
2040 bio_list_add(&tc->deferred_bio_list, bio);
2041 bio_list_merge(&tc->deferred_bio_list, &bios);
2042 spin_unlock_irqrestore(&tc->lock, flags);
2043 break;
2044 }
2045
1722 bio_list_add(&info->defer_bios, bio);
1723 else {
1724 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
1725
1726 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
1727 inc_all_io_entry(info->tc->pool, bio);
1728 bio_list_add(&info->issue_bios, bio);
1729 }

--- 372 unchanged lines hidden (view full) ---

2102 if (ensure_next_mapping(pool)) {
2103 spin_lock_irqsave(&tc->lock, flags);
2104 bio_list_add(&tc->deferred_bio_list, bio);
2105 bio_list_merge(&tc->deferred_bio_list, &bios);
2106 spin_unlock_irqrestore(&tc->lock, flags);
2107 break;
2108 }
2109
2046 if (bio->bi_rw & REQ_DISCARD)
2110 if (bio_op(bio) == REQ_OP_DISCARD)
2047 pool->process_discard(tc, bio);
2048 else
2049 pool->process_bio(tc, bio);
2050
2051 if ((count++ & 127) == 0) {
2052 throttle_work_update(&pool->throttle);
2053 dm_pool_issue_prefetches(pool->pmd);
2054 }

--- 70 unchanged lines hidden (view full) ---

2125 list_add(&pool->cell_sort_array[j]->user_list, &cells);
2126
2127 spin_lock_irqsave(&tc->lock, flags);
2128 list_splice(&cells, &tc->deferred_cells);
2129 spin_unlock_irqrestore(&tc->lock, flags);
2130 return;
2131 }
2132
2111 pool->process_discard(tc, bio);
2112 else
2113 pool->process_bio(tc, bio);
2114
2115 if ((count++ & 127) == 0) {
2116 throttle_work_update(&pool->throttle);
2117 dm_pool_issue_prefetches(pool->pmd);
2118 }

--- 70 unchanged lines hidden (view full) ---

2189 list_add(&pool->cell_sort_array[j]->user_list, &cells);
2190
2191 spin_lock_irqsave(&tc->lock, flags);
2192 list_splice(&cells, &tc->deferred_cells);
2193 spin_unlock_irqrestore(&tc->lock, flags);
2194 return;
2195 }
2196
2133 if (cell->holder->bi_rw & REQ_DISCARD)
2197 if (bio_op(cell->holder) == REQ_OP_DISCARD)
2134 pool->process_discard_cell(tc, cell);
2135 else
2136 pool->process_cell(tc, cell);
2137 }
2138 } while (!list_empty(&cells));
2139}
2140
2141static void thin_get(struct thin_c *tc);

--- 80 unchanged lines hidden (view full) ---

2222
2223 throttle_work_start(&pool->throttle);
2224 dm_pool_issue_prefetches(pool->pmd);
2225 throttle_work_update(&pool->throttle);
2226 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
2227 throttle_work_update(&pool->throttle);
2228 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
2229 throttle_work_update(&pool->throttle);
2198 pool->process_discard_cell(tc, cell);
2199 else
2200 pool->process_cell(tc, cell);
2201 }
2202 } while (!list_empty(&cells));
2203}
2204
2205static void thin_get(struct thin_c *tc);

--- 80 unchanged lines hidden (view full) ---

2286
2287 throttle_work_start(&pool->throttle);
2288 dm_pool_issue_prefetches(pool->pmd);
2289 throttle_work_update(&pool->throttle);
2290 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
2291 throttle_work_update(&pool->throttle);
2292 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
2293 throttle_work_update(&pool->throttle);
2294 process_prepared(pool, &pool->prepared_discards_pt2, &pool->process_prepared_discard_pt2);
2295 throttle_work_update(&pool->throttle);
2230 process_deferred_bios(pool);
2231 throttle_work_complete(&pool->throttle);
2232}
2233
2234/*
2235 * We want to commit periodically so that not too much
2236 * unwritten data builds up.
2237 */

--- 112 unchanged lines hidden (view full) ---

2350}
2351
2352static void set_discard_callbacks(struct pool *pool)
2353{
2354 struct pool_c *pt = pool->ti->private;
2355
2356 if (passdown_enabled(pt)) {
2357 pool->process_discard_cell = process_discard_cell_passdown;
2296 process_deferred_bios(pool);
2297 throttle_work_complete(&pool->throttle);
2298}
2299
2300/*
2301 * We want to commit periodically so that not too much
2302 * unwritten data builds up.
2303 */

--- 112 unchanged lines hidden (view full) ---

2416}
2417
2418static void set_discard_callbacks(struct pool *pool)
2419{
2420 struct pool_c *pt = pool->ti->private;
2421
2422 if (passdown_enabled(pt)) {
2423 pool->process_discard_cell = process_discard_cell_passdown;
2358 pool->process_prepared_discard = process_prepared_discard_passdown;
2424 pool->process_prepared_discard = process_prepared_discard_passdown_pt1;
2425 pool->process_prepared_discard_pt2 = process_prepared_discard_passdown_pt2;
2359 } else {
2360 pool->process_discard_cell = process_discard_cell_no_passdown;
2361 pool->process_prepared_discard = process_prepared_discard_no_passdown;
2362 }
2363}
2364
2365static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2366{

--- 196 unchanged lines hidden (view full) ---

2563 return DM_MAPIO_SUBMITTED;
2564 }
2565
2566 if (get_pool_mode(tc->pool) == PM_FAIL) {
2567 bio_io_error(bio);
2568 return DM_MAPIO_SUBMITTED;
2569 }
2570
2426 } else {
2427 pool->process_discard_cell = process_discard_cell_no_passdown;
2428 pool->process_prepared_discard = process_prepared_discard_no_passdown;
2429 }
2430}
2431
2432static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2433{

--- 196 unchanged lines hidden (view full) ---

2630 return DM_MAPIO_SUBMITTED;
2631 }
2632
2633 if (get_pool_mode(tc->pool) == PM_FAIL) {
2634 bio_io_error(bio);
2635 return DM_MAPIO_SUBMITTED;
2636 }
2637
2571 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
2638 if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
2639 bio_op(bio) == REQ_OP_DISCARD) {
2572 thin_defer_bio_with_throttle(tc, bio);
2573 return DM_MAPIO_SUBMITTED;
2574 }
2575
2576 /*
2577 * We must hold the virtual cell before doing the lookup, otherwise
2578 * there's a race with discard.
2579 */

--- 256 unchanged lines hidden (view full) ---

2836 throttle_init(&pool->throttle);
2837 INIT_WORK(&pool->worker, do_worker);
2838 INIT_DELAYED_WORK(&pool->waker, do_waker);
2839 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
2840 spin_lock_init(&pool->lock);
2841 bio_list_init(&pool->deferred_flush_bios);
2842 INIT_LIST_HEAD(&pool->prepared_mappings);
2843 INIT_LIST_HEAD(&pool->prepared_discards);
2640 thin_defer_bio_with_throttle(tc, bio);
2641 return DM_MAPIO_SUBMITTED;
2642 }
2643
2644 /*
2645 * We must hold the virtual cell before doing the lookup, otherwise
2646 * there's a race with discard.
2647 */

--- 256 unchanged lines hidden (view full) ---

2904 throttle_init(&pool->throttle);
2905 INIT_WORK(&pool->worker, do_worker);
2906 INIT_DELAYED_WORK(&pool->waker, do_waker);
2907 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
2908 spin_lock_init(&pool->lock);
2909 bio_list_init(&pool->deferred_flush_bios);
2910 INIT_LIST_HEAD(&pool->prepared_mappings);
2911 INIT_LIST_HEAD(&pool->prepared_discards);
2912 INIT_LIST_HEAD(&pool->prepared_discards_pt2);
2844 INIT_LIST_HEAD(&pool->active_thins);
2845 pool->low_water_triggered = false;
2846 pool->suspended = true;
2847 pool->out_of_data_space = false;
2848
2849 pool->shared_read_ds = dm_deferred_set_create();
2850 if (!pool->shared_read_ds) {
2851 *error = "Error creating pool's shared read deferred set";

--- 1042 unchanged lines hidden (view full) ---

3894 * device. DM core has already set this up.
3895 */
3896}
3897
3898static struct target_type pool_target = {
3899 .name = "thin-pool",
3900 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
3901 DM_TARGET_IMMUTABLE,
2913 INIT_LIST_HEAD(&pool->active_thins);
2914 pool->low_water_triggered = false;
2915 pool->suspended = true;
2916 pool->out_of_data_space = false;
2917
2918 pool->shared_read_ds = dm_deferred_set_create();
2919 if (!pool->shared_read_ds) {
2920 *error = "Error creating pool's shared read deferred set";

--- 1042 unchanged lines hidden (view full) ---

3963 * device. DM core has already set this up.
3964 */
3965}
3966
3967static struct target_type pool_target = {
3968 .name = "thin-pool",
3969 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
3970 DM_TARGET_IMMUTABLE,
3902 .version = {1, 18, 0},
3971 .version = {1, 19, 0},
3903 .module = THIS_MODULE,
3904 .ctr = pool_ctr,
3905 .dtr = pool_dtr,
3906 .map = pool_map,
3907 .presuspend = pool_presuspend,
3908 .presuspend_undo = pool_presuspend_undo,
3909 .postsuspend = pool_postsuspend,
3910 .preresume = pool_preresume,

--- 357 unchanged lines hidden (view full) ---

4268 return;
4269
4270 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
4271 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
4272}
4273
4274static struct target_type thin_target = {
4275 .name = "thin",
3972 .module = THIS_MODULE,
3973 .ctr = pool_ctr,
3974 .dtr = pool_dtr,
3975 .map = pool_map,
3976 .presuspend = pool_presuspend,
3977 .presuspend_undo = pool_presuspend_undo,
3978 .postsuspend = pool_postsuspend,
3979 .preresume = pool_preresume,

--- 357 unchanged lines hidden (view full) ---

4337 return;
4338
4339 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
4340 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
4341}
4342
4343static struct target_type thin_target = {
4344 .name = "thin",
4276 .version = {1, 18, 0},
4345 .version = {1, 19, 0},
4277 .module = THIS_MODULE,
4278 .ctr = thin_ctr,
4279 .dtr = thin_dtr,
4280 .map = thin_map,
4281 .end_io = thin_endio,
4282 .preresume = thin_preresume,
4283 .presuspend = thin_presuspend,
4284 .postsuspend = thin_postsuspend,

--- 54 unchanged lines hidden ---
4346 .module = THIS_MODULE,
4347 .ctr = thin_ctr,
4348 .dtr = thin_dtr,
4349 .map = thin_map,
4350 .end_io = thin_endio,
4351 .preresume = thin_preresume,
4352 .presuspend = thin_presuspend,
4353 .postsuspend = thin_postsuspend,

--- 54 unchanged lines hidden ---