Lines Matching refs:pool
232 struct pool { struct
290 static void metadata_operation_failed(struct pool *pool, const char *op, int r); argument
292 static enum pool_mode get_pool_mode(struct pool *pool) in get_pool_mode() argument
294 return pool->pf.mode; in get_pool_mode()
297 static void notify_of_pool_mode_change(struct pool *pool) in notify_of_pool_mode_change() argument
307 enum pool_mode mode = get_pool_mode(pool); in notify_of_pool_mode_change()
310 if (!pool->pf.error_if_no_space) in notify_of_pool_mode_change()
316 dm_table_event(pool->ti->table); in notify_of_pool_mode_change()
318 dm_device_name(pool->pool_md), in notify_of_pool_mode_change()
327 struct pool *pool; member
346 struct pool *pool; member
367 static bool block_size_is_power_of_two(struct pool *pool) in block_size_is_power_of_two() argument
369 return pool->sectors_per_block_shift >= 0; in block_size_is_power_of_two()
372 static sector_t block_to_sectors(struct pool *pool, dm_block_t b) in block_to_sectors() argument
374 return block_size_is_power_of_two(pool) ? in block_to_sectors()
375 (b << pool->sectors_per_block_shift) : in block_to_sectors()
376 (b * pool->sectors_per_block); in block_to_sectors()
401 sector_t s = block_to_sectors(tc->pool, data_b); in issue_discard()
402 sector_t len = block_to_sectors(tc->pool, data_e - data_b); in issue_discard()
436 static void wake_worker(struct pool *pool) in wake_worker() argument
438 queue_work(pool->wq, &pool->worker); in wake_worker()
443 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio, in bio_detain() argument
453 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO); in bio_detain()
455 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result); in bio_detain()
461 dm_bio_prison_free_cell(pool->prison, cell_prealloc); in bio_detain()
466 static void cell_release(struct pool *pool, in cell_release() argument
470 dm_cell_release(pool->prison, cell, bios); in cell_release()
471 dm_bio_prison_free_cell(pool->prison, cell); in cell_release()
474 static void cell_visit_release(struct pool *pool, in cell_visit_release() argument
479 dm_cell_visit_release(pool->prison, fn, context, cell); in cell_visit_release()
480 dm_bio_prison_free_cell(pool->prison, cell); in cell_visit_release()
483 static void cell_release_no_holder(struct pool *pool, in cell_release_no_holder() argument
487 dm_cell_release_no_holder(pool->prison, cell, bios); in cell_release_no_holder()
488 dm_bio_prison_free_cell(pool->prison, cell); in cell_release_no_holder()
491 static void cell_error_with_code(struct pool *pool, in cell_error_with_code() argument
494 dm_cell_error(pool->prison, cell, error_code); in cell_error_with_code()
495 dm_bio_prison_free_cell(pool->prison, cell); in cell_error_with_code()
498 static blk_status_t get_pool_io_error_code(struct pool *pool) in get_pool_io_error_code() argument
500 return pool->out_of_data_space ? BLK_STS_NOSPC : BLK_STS_IOERR; in get_pool_io_error_code()
503 static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) in cell_error() argument
505 cell_error_with_code(pool, cell, get_pool_io_error_code(pool)); in cell_error()
508 static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) in cell_success() argument
510 cell_error_with_code(pool, cell, 0); in cell_success()
513 static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell) in cell_requeue() argument
515 cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE); in cell_requeue()
539 static void __pool_table_insert(struct pool *pool) in __pool_table_insert() argument
542 list_add(&pool->list, &dm_thin_pool_table.pools); in __pool_table_insert()
545 static void __pool_table_remove(struct pool *pool) in __pool_table_remove() argument
548 list_del(&pool->list); in __pool_table_remove()
551 static struct pool *__pool_table_lookup(struct mapped_device *md) in __pool_table_lookup()
553 struct pool *pool = NULL, *tmp; in __pool_table_lookup() local
559 pool = tmp; in __pool_table_lookup()
564 return pool; in __pool_table_lookup()
567 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev) in __pool_table_lookup_metadata_dev()
569 struct pool *pool = NULL, *tmp; in __pool_table_lookup_metadata_dev() local
575 pool = tmp; in __pool_table_lookup_metadata_dev()
580 return pool; in __pool_table_lookup_metadata_dev()
626 struct pool *pool = tc->pool; in requeue_deferred_cells() local
637 cell_requeue(pool, cell); in requeue_deferred_cells()
655 static void error_retry_list_with_code(struct pool *pool, blk_status_t error) in error_retry_list_with_code() argument
660 list_for_each_entry_rcu(tc, &pool->active_thins, list) in error_retry_list_with_code()
665 static void error_retry_list(struct pool *pool) in error_retry_list() argument
667 error_retry_list_with_code(pool, get_pool_io_error_code(pool)); in error_retry_list()
679 struct pool *pool = tc->pool; in get_bio_block() local
682 if (block_size_is_power_of_two(pool)) in get_bio_block()
683 block_nr >>= pool->sectors_per_block_shift; in get_bio_block()
685 (void) sector_div(block_nr, pool->sectors_per_block); in get_bio_block()
696 struct pool *pool = tc->pool; in get_bio_block_range() local
700 b += pool->sectors_per_block - 1ull; /* so we round up */ in get_bio_block_range()
702 if (block_size_is_power_of_two(pool)) { in get_bio_block_range()
703 b >>= pool->sectors_per_block_shift; in get_bio_block_range()
704 e >>= pool->sectors_per_block_shift; in get_bio_block_range()
706 (void) sector_div(b, pool->sectors_per_block); in get_bio_block_range()
707 (void) sector_div(e, pool->sectors_per_block); in get_bio_block_range()
720 struct pool *pool = tc->pool; in remap() local
724 if (block_size_is_power_of_two(pool)) in remap()
726 (block << pool->sectors_per_block_shift) | in remap()
727 (bi_sector & (pool->sectors_per_block - 1)); in remap()
729 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + in remap()
730 sector_div(bi_sector, pool->sectors_per_block); in remap()
744 static void inc_all_io_entry(struct pool *pool, struct bio *bio) in inc_all_io_entry() argument
752 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds); in inc_all_io_entry()
757 struct pool *pool = tc->pool; in issue() local
778 spin_lock_irq(&pool->lock); in issue()
779 bio_list_add(&pool->deferred_flush_bios, bio); in issue()
780 spin_unlock_irq(&pool->lock); in issue()
832 struct pool *pool = m->tc->pool; in __complete_mapping_preparation() local
835 list_add_tail(&m->list, &pool->prepared_mappings); in __complete_mapping_preparation()
836 wake_worker(pool); in __complete_mapping_preparation()
843 struct pool *pool = m->tc->pool; in complete_mapping_preparation() local
845 spin_lock_irqsave(&pool->lock, flags); in complete_mapping_preparation()
847 spin_unlock_irqrestore(&pool->lock, flags); in complete_mapping_preparation()
885 struct pool *pool = tc->pool; in cell_defer_no_holder() local
890 cell_release_no_holder(pool, cell, &bios); in cell_defer_no_holder()
896 wake_worker(pool); in cell_defer_no_holder()
918 inc_all_io_entry(info->tc->pool, bio); in __inc_remap_and_issue_cell()
946 cell_visit_release(tc->pool, __inc_remap_and_issue_cell, in inc_remap_and_issue_cell()
958 cell_error(m->tc->pool, m->cell); in process_prepared_mapping_fail()
960 mempool_free(m, &m->tc->pool->mapping_pool); in process_prepared_mapping_fail()
965 struct pool *pool = tc->pool; in complete_overwrite_bio() local
990 spin_lock_irq(&pool->lock); in complete_overwrite_bio()
991 bio_list_add(&pool->deferred_flush_completions, bio); in complete_overwrite_bio()
992 spin_unlock_irq(&pool->lock); in complete_overwrite_bio()
998 struct pool *pool = tc->pool; in process_prepared_mapping() local
1003 cell_error(pool, m->cell); in process_prepared_mapping()
1014 metadata_operation_failed(pool, "dm_thin_insert_block", r); in process_prepared_mapping()
1015 cell_error(pool, m->cell); in process_prepared_mapping()
1029 inc_all_io_entry(tc->pool, m->cell->holder); in process_prepared_mapping()
1036 mempool_free(m, &pool->mapping_pool); in process_prepared_mapping()
1047 mempool_free(m, &tc->pool->mapping_pool); in free_discard_mapping()
1069 metadata_operation_failed(tc->pool, "dm_thin_remove_range", r); in process_prepared_discard_no_passdown()
1075 mempool_free(m, &tc->pool->mapping_pool); in process_prepared_discard_no_passdown()
1090 struct pool *pool = tc->pool; in passdown_double_checking_shared_status() local
1098 r = dm_pool_block_is_shared(pool->pmd, b, &shared); in passdown_double_checking_shared_status()
1111 r = dm_pool_block_is_shared(pool->pmd, e, &shared); in passdown_double_checking_shared_status()
1132 struct pool *pool = m->tc->pool; in queue_passdown_pt2() local
1134 spin_lock_irqsave(&pool->lock, flags); in queue_passdown_pt2()
1135 list_add_tail(&m->list, &pool->prepared_discards_pt2); in queue_passdown_pt2()
1136 spin_unlock_irqrestore(&pool->lock, flags); in queue_passdown_pt2()
1137 wake_worker(pool); in queue_passdown_pt2()
1154 struct pool *pool = tc->pool; in process_prepared_discard_passdown_pt1() local
1165 metadata_operation_failed(pool, "dm_thin_remove_range", r); in process_prepared_discard_passdown_pt1()
1168 mempool_free(m, &pool->mapping_pool); in process_prepared_discard_passdown_pt1()
1176 r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end); in process_prepared_discard_passdown_pt1()
1178 metadata_operation_failed(pool, "dm_pool_inc_data_range", r); in process_prepared_discard_passdown_pt1()
1181 mempool_free(m, &pool->mapping_pool); in process_prepared_discard_passdown_pt1()
1203 struct pool *pool = tc->pool; in process_prepared_discard_passdown_pt2() local
1209 r = dm_pool_dec_data_range(pool->pmd, m->data_block, in process_prepared_discard_passdown_pt2()
1212 metadata_operation_failed(pool, "dm_pool_dec_data_range", r); in process_prepared_discard_passdown_pt2()
1218 mempool_free(m, &pool->mapping_pool); in process_prepared_discard_passdown_pt2()
1221 static void process_prepared(struct pool *pool, struct list_head *head, in process_prepared() argument
1228 spin_lock_irq(&pool->lock); in process_prepared()
1230 spin_unlock_irq(&pool->lock); in process_prepared()
1239 static int io_overlaps_block(struct pool *pool, struct bio *bio) in io_overlaps_block() argument
1242 (pool->sectors_per_block << SECTOR_SHIFT); in io_overlaps_block()
1245 static int io_overwrites_block(struct pool *pool, struct bio *bio) in io_overwrites_block() argument
1248 io_overlaps_block(pool, bio); in io_overwrites_block()
1258 static int ensure_next_mapping(struct pool *pool) in ensure_next_mapping() argument
1260 if (pool->next_mapping) in ensure_next_mapping()
1263 pool->next_mapping = mempool_alloc(&pool->mapping_pool, GFP_ATOMIC); in ensure_next_mapping()
1265 return pool->next_mapping ? 0 : -ENOMEM; in ensure_next_mapping()
1268 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool) in get_next_mapping() argument
1270 struct dm_thin_new_mapping *m = pool->next_mapping; in get_next_mapping()
1272 BUG_ON(!pool->next_mapping); in get_next_mapping()
1278 pool->next_mapping = NULL; in get_next_mapping()
1292 dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m); in ll_zero()
1299 struct pool *pool = tc->pool; in remap_and_issue_overwrite() local
1305 inc_all_io_entry(pool, bio); in remap_and_issue_overwrite()
1318 struct pool *pool = tc->pool; in schedule_copy() local
1319 struct dm_thin_new_mapping *m = get_next_mapping(pool); in schedule_copy()
1334 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) in schedule_copy()
1343 if (io_overwrites_block(pool, bio)) in schedule_copy()
1349 from.sector = data_origin * pool->sectors_per_block; in schedule_copy()
1353 to.sector = data_dest * pool->sectors_per_block; in schedule_copy()
1356 dm_kcopyd_copy(pool->copier, &from, 1, &to, in schedule_copy()
1362 if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) { in schedule_copy()
1365 data_dest * pool->sectors_per_block + len, in schedule_copy()
1366 (data_dest + 1) * pool->sectors_per_block); in schedule_copy()
1379 tc->pool->sectors_per_block); in schedule_internal_copy()
1386 struct pool *pool = tc->pool; in schedule_zero() local
1387 struct dm_thin_new_mapping *m = get_next_mapping(pool); in schedule_zero()
1401 if (pool->pf.zero_new_blocks) { in schedule_zero()
1402 if (io_overwrites_block(pool, bio)) in schedule_zero()
1405 ll_zero(tc, m, data_block * pool->sectors_per_block, in schedule_zero()
1406 (data_block + 1) * pool->sectors_per_block); in schedule_zero()
1415 struct pool *pool = tc->pool; in schedule_external_copy() local
1416 sector_t virt_block_begin = virt_block * pool->sectors_per_block; in schedule_external_copy()
1417 sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block; in schedule_external_copy()
1422 pool->sectors_per_block); in schedule_external_copy()
1433 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1435 static void requeue_bios(struct pool *pool);
1442 static bool is_read_only(struct pool *pool) in is_read_only() argument
1444 return is_read_only_pool_mode(get_pool_mode(pool)); in is_read_only()
1447 static void check_for_metadata_space(struct pool *pool) in check_for_metadata_space() argument
1453 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free); in check_for_metadata_space()
1459 if (ooms_reason && !is_read_only(pool)) { in check_for_metadata_space()
1461 set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE); in check_for_metadata_space()
1465 static void check_for_data_space(struct pool *pool) in check_for_data_space() argument
1470 if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE) in check_for_data_space()
1473 r = dm_pool_get_free_block_count(pool->pmd, &nr_free); in check_for_data_space()
1478 set_pool_mode(pool, PM_WRITE); in check_for_data_space()
1479 requeue_bios(pool); in check_for_data_space()
1487 static int commit(struct pool *pool) in commit() argument
1491 if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) in commit()
1494 r = dm_pool_commit_metadata(pool->pmd); in commit()
1496 metadata_operation_failed(pool, "dm_pool_commit_metadata", r); in commit()
1498 check_for_metadata_space(pool); in commit()
1499 check_for_data_space(pool); in commit()
1505 static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks) in check_low_water_mark() argument
1507 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) { in check_low_water_mark()
1509 dm_device_name(pool->pool_md)); in check_low_water_mark()
1510 spin_lock_irq(&pool->lock); in check_low_water_mark()
1511 pool->low_water_triggered = true; in check_low_water_mark()
1512 spin_unlock_irq(&pool->lock); in check_low_water_mark()
1513 dm_table_event(pool->ti->table); in check_low_water_mark()
1521 struct pool *pool = tc->pool; in alloc_data_block() local
1523 if (WARN_ON(get_pool_mode(pool) != PM_WRITE)) in alloc_data_block()
1526 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); in alloc_data_block()
1528 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r); in alloc_data_block()
1532 check_low_water_mark(pool, free_blocks); in alloc_data_block()
1539 r = commit(pool); in alloc_data_block()
1543 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); in alloc_data_block()
1545 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r); in alloc_data_block()
1550 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE); in alloc_data_block()
1555 r = dm_pool_alloc_data_block(pool->pmd, result); in alloc_data_block()
1558 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE); in alloc_data_block()
1560 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r); in alloc_data_block()
1564 r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks); in alloc_data_block()
1566 metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r); in alloc_data_block()
1572 r = commit(pool); in alloc_data_block()
1594 static blk_status_t should_error_unserviceable_bio(struct pool *pool) in should_error_unserviceable_bio() argument
1596 enum pool_mode m = get_pool_mode(pool); in should_error_unserviceable_bio()
1605 return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0; in should_error_unserviceable_bio()
1618 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) in handle_unserviceable_bio() argument
1620 blk_status_t error = should_error_unserviceable_bio(pool); in handle_unserviceable_bio()
1629 static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell) in retry_bios_on_resume() argument
1635 error = should_error_unserviceable_bio(pool); in retry_bios_on_resume()
1637 cell_error_with_code(pool, cell, error); in retry_bios_on_resume()
1642 cell_release(pool, cell, &bios); in retry_bios_on_resume()
1651 struct pool *pool = tc->pool; in process_discard_cell_no_passdown() local
1652 struct dm_thin_new_mapping *m = get_next_mapping(pool); in process_discard_cell_no_passdown()
1664 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) in process_discard_cell_no_passdown()
1665 pool->process_prepared_discard(m); in process_discard_cell_no_passdown()
1671 struct pool *pool = tc->pool; in break_up_discard_bio() local
1698 r = ensure_next_mapping(pool); in break_up_discard_bio()
1708 if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) { in break_up_discard_bio()
1718 m = get_next_mapping(pool); in break_up_discard_bio()
1736 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) in break_up_discard_bio()
1737 pool->process_prepared_discard(m); in break_up_discard_bio()
1789 if (bio_detain(tc->pool, &virt_key, bio, &virt_cell)) { in process_discard_bio()
1800 tc->pool->process_discard_cell(tc, virt_cell); in process_discard_bio()
1810 struct pool *pool = tc->pool; in break_sharing() local
1820 retry_bios_on_resume(pool, cell); in break_sharing()
1826 cell_error(pool, cell); in break_sharing()
1844 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds); in __remap_and_issue_shared_cell()
1845 inc_all_io_entry(info->tc->pool, bio); in __remap_and_issue_shared_cell()
1862 cell_visit_release(tc->pool, __remap_and_issue_shared_cell, in remap_and_issue_shared_cell()
1878 struct pool *pool = tc->pool; in process_shared_bio() local
1886 if (bio_detain(pool, &key, bio, &data_cell)) { in process_shared_bio()
1897 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); in process_shared_bio()
1898 inc_all_io_entry(pool, bio); in process_shared_bio()
1911 struct pool *pool = tc->pool; in provision_block() local
1917 inc_all_io_entry(pool, bio); in provision_block()
1944 retry_bios_on_resume(pool, cell); in provision_block()
1950 cell_error(pool, cell); in provision_block()
1958 struct pool *pool = tc->pool; in process_cell() local
1964 cell_requeue(pool, cell); in process_cell()
1974 inc_all_io_entry(pool, bio); in process_cell()
1982 inc_all_io_entry(pool, bio); in process_cell()
2012 struct pool *pool = tc->pool; in process_bio() local
2022 if (bio_detain(pool, &key, bio, &cell)) in process_bio()
2040 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
2044 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
2055 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
2060 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
2101 cell_success(tc->pool, cell); in process_cell_success()
2106 cell_error(tc->pool, cell); in process_cell_fail()
2113 static int need_commit_due_to_time(struct pool *pool) in need_commit_due_to_time() argument
2115 return !time_in_range(jiffies, pool->last_commit_jiffies, in need_commit_due_to_time()
2116 pool->last_commit_jiffies + COMMIT_PERIOD); in need_commit_due_to_time()
2185 struct pool *pool = tc->pool; in process_thin_deferred_bios() local
2220 if (ensure_next_mapping(pool)) { in process_thin_deferred_bios()
2229 pool->process_discard(tc, bio); in process_thin_deferred_bios()
2231 pool->process_bio(tc, bio); in process_thin_deferred_bios()
2234 throttle_work_update(&pool->throttle); in process_thin_deferred_bios()
2235 dm_pool_issue_prefetches(pool->pmd); in process_thin_deferred_bios()
2259 static unsigned int sort_cells(struct pool *pool, struct list_head *cells) in sort_cells() argument
2268 pool->cell_sort_array[count++] = cell; in sort_cells()
2272 sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL); in sort_cells()
2279 struct pool *pool = tc->pool; in process_thin_deferred_cells() local
2294 count = sort_cells(tc->pool, &cells); in process_thin_deferred_cells()
2297 cell = pool->cell_sort_array[i]; in process_thin_deferred_cells()
2305 if (ensure_next_mapping(pool)) { in process_thin_deferred_cells()
2307 list_add(&pool->cell_sort_array[j]->user_list, &cells); in process_thin_deferred_cells()
2316 pool->process_discard_cell(tc, cell); in process_thin_deferred_cells()
2318 pool->process_cell(tc, cell); in process_thin_deferred_cells()
2332 static struct thin_c *get_first_thin(struct pool *pool) in get_first_thin() argument
2337 if (!list_empty(&pool->active_thins)) { in get_first_thin()
2338 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list); in get_first_thin()
2346 static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc) in get_next_thin() argument
2351 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) { in get_next_thin()
2363 static void process_deferred_bios(struct pool *pool) in process_deferred_bios() argument
2369 tc = get_first_thin(pool); in process_deferred_bios()
2373 tc = get_next_thin(pool, tc); in process_deferred_bios()
2383 spin_lock_irq(&pool->lock); in process_deferred_bios()
2384 bio_list_merge(&bios, &pool->deferred_flush_bios); in process_deferred_bios()
2385 bio_list_init(&pool->deferred_flush_bios); in process_deferred_bios()
2387 bio_list_merge(&bio_completions, &pool->deferred_flush_completions); in process_deferred_bios()
2388 bio_list_init(&pool->deferred_flush_completions); in process_deferred_bios()
2389 spin_unlock_irq(&pool->lock); in process_deferred_bios()
2392 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) in process_deferred_bios()
2395 if (commit(pool)) { in process_deferred_bios()
2402 pool->last_commit_jiffies = jiffies; in process_deferred_bios()
2421 struct pool *pool = container_of(ws, struct pool, worker); in do_worker() local
2423 throttle_work_start(&pool->throttle); in do_worker()
2424 dm_pool_issue_prefetches(pool->pmd); in do_worker()
2425 throttle_work_update(&pool->throttle); in do_worker()
2426 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping); in do_worker()
2427 throttle_work_update(&pool->throttle); in do_worker()
2428 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard); in do_worker()
2429 throttle_work_update(&pool->throttle); in do_worker()
2430 process_prepared(pool, &pool->prepared_discards_pt2, &pool->process_prepared_discard_pt2); in do_worker()
2431 throttle_work_update(&pool->throttle); in do_worker()
2432 process_deferred_bios(pool); in do_worker()
2433 throttle_work_complete(&pool->throttle); in do_worker()
2442 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker); in do_waker() local
2444 wake_worker(pool); in do_waker()
2445 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); in do_waker()
2455 struct pool *pool = container_of(to_delayed_work(ws), struct pool, in do_no_space_timeout() local
2458 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { in do_no_space_timeout()
2459 pool->pf.error_if_no_space = true; in do_no_space_timeout()
2460 notify_of_pool_mode_change(pool); in do_no_space_timeout()
2461 error_retry_list_with_code(pool, BLK_STS_NOSPC); in do_no_space_timeout()
2482 static void pool_work_wait(struct pool_work *pw, struct pool *pool, in pool_work_wait() argument
2487 queue_work(pool->wq, &pw->worker); in pool_work_wait()
2525 pool_work_wait(&w.pw, tc->pool, fn); in noflush_work()
2530 static void set_discard_callbacks(struct pool *pool) in set_discard_callbacks() argument
2532 struct pool_c *pt = pool->ti->private; in set_discard_callbacks()
2535 pool->process_discard_cell = process_discard_cell_passdown; in set_discard_callbacks()
2536 pool->process_prepared_discard = process_prepared_discard_passdown_pt1; in set_discard_callbacks()
2537 pool->process_prepared_discard_pt2 = process_prepared_discard_passdown_pt2; in set_discard_callbacks()
2539 pool->process_discard_cell = process_discard_cell_no_passdown; in set_discard_callbacks()
2540 pool->process_prepared_discard = process_prepared_discard_no_passdown; in set_discard_callbacks()
2544 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) in set_pool_mode() argument
2546 struct pool_c *pt = pool->ti->private; in set_pool_mode()
2547 bool needs_check = dm_pool_metadata_needs_check(pool->pmd); in set_pool_mode()
2548 enum pool_mode old_mode = get_pool_mode(pool); in set_pool_mode()
2557 dm_device_name(pool->pool_md)); in set_pool_mode()
2573 dm_pool_metadata_read_only(pool->pmd); in set_pool_mode()
2574 pool->process_bio = process_bio_fail; in set_pool_mode()
2575 pool->process_discard = process_bio_fail; in set_pool_mode()
2576 pool->process_cell = process_cell_fail; in set_pool_mode()
2577 pool->process_discard_cell = process_cell_fail; in set_pool_mode()
2578 pool->process_prepared_mapping = process_prepared_mapping_fail; in set_pool_mode()
2579 pool->process_prepared_discard = process_prepared_discard_fail; in set_pool_mode()
2581 error_retry_list(pool); in set_pool_mode()
2586 dm_pool_metadata_read_only(pool->pmd); in set_pool_mode()
2587 pool->process_bio = process_bio_read_only; in set_pool_mode()
2588 pool->process_discard = process_bio_success; in set_pool_mode()
2589 pool->process_cell = process_cell_read_only; in set_pool_mode()
2590 pool->process_discard_cell = process_cell_success; in set_pool_mode()
2591 pool->process_prepared_mapping = process_prepared_mapping_fail; in set_pool_mode()
2592 pool->process_prepared_discard = process_prepared_discard_success; in set_pool_mode()
2594 error_retry_list(pool); in set_pool_mode()
2606 pool->out_of_data_space = true; in set_pool_mode()
2607 pool->process_bio = process_bio_read_only; in set_pool_mode()
2608 pool->process_discard = process_discard_bio; in set_pool_mode()
2609 pool->process_cell = process_cell_read_only; in set_pool_mode()
2610 pool->process_prepared_mapping = process_prepared_mapping; in set_pool_mode()
2611 set_discard_callbacks(pool); in set_pool_mode()
2613 if (!pool->pf.error_if_no_space && no_space_timeout) in set_pool_mode()
2614 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout); in set_pool_mode()
2619 cancel_delayed_work_sync(&pool->no_space_timeout); in set_pool_mode()
2620 pool->out_of_data_space = false; in set_pool_mode()
2621 pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space; in set_pool_mode()
2622 dm_pool_metadata_read_write(pool->pmd); in set_pool_mode()
2623 pool->process_bio = process_bio; in set_pool_mode()
2624 pool->process_discard = process_discard_bio; in set_pool_mode()
2625 pool->process_cell = process_cell; in set_pool_mode()
2626 pool->process_prepared_mapping = process_prepared_mapping; in set_pool_mode()
2627 set_discard_callbacks(pool); in set_pool_mode()
2631 pool->pf.mode = new_mode; in set_pool_mode()
2639 notify_of_pool_mode_change(pool); in set_pool_mode()
2642 static void abort_transaction(struct pool *pool) in abort_transaction() argument
2644 const char *dev_name = dm_device_name(pool->pool_md); in abort_transaction()
2647 if (dm_pool_abort_metadata(pool->pmd)) { in abort_transaction()
2649 set_pool_mode(pool, PM_FAIL); in abort_transaction()
2652 if (dm_pool_metadata_set_needs_check(pool->pmd)) { in abort_transaction()
2654 set_pool_mode(pool, PM_FAIL); in abort_transaction()
2658 static void metadata_operation_failed(struct pool *pool, const char *op, int r) in metadata_operation_failed() argument
2661 dm_device_name(pool->pool_md), op, r); in metadata_operation_failed()
2663 abort_transaction(pool); in metadata_operation_failed()
2664 set_pool_mode(pool, PM_READ_ONLY); in metadata_operation_failed()
2678 struct pool *pool = tc->pool; in thin_defer_bio() local
2684 wake_worker(pool); in thin_defer_bio()
2689 struct pool *pool = tc->pool; in thin_defer_bio_with_throttle() local
2691 throttle_lock(&pool->throttle); in thin_defer_bio_with_throttle()
2693 throttle_unlock(&pool->throttle); in thin_defer_bio_with_throttle()
2698 struct pool *pool = tc->pool; in thin_defer_cell() local
2700 throttle_lock(&pool->throttle); in thin_defer_cell()
2704 throttle_unlock(&pool->throttle); in thin_defer_cell()
2706 wake_worker(pool); in thin_defer_cell()
2741 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_bio_map()
2756 if (bio_detain(tc->pool, &key, bio, &virt_cell)) in thin_bio_map()
2786 if (bio_detain(tc->pool, &key, bio, &data_cell)) { in thin_bio_map()
2791 inc_all_io_entry(tc->pool, bio); in thin_bio_map()
2815 static void requeue_bios(struct pool *pool) in requeue_bios() argument
2820 list_for_each_entry_rcu(tc, &pool->active_thins, list) { in requeue_bios()
2845 struct pool *pool = pt->pool; in disable_discard_passdown_if_not_supported() local
2856 else if (data_limits->max_discard_sectors < pool->sectors_per_block) in disable_discard_passdown_if_not_supported()
2865 static int bind_control_target(struct pool *pool, struct dm_target *ti) in bind_control_target() argument
2872 enum pool_mode old_mode = get_pool_mode(pool); in bind_control_target()
2882 pool->ti = ti; in bind_control_target()
2883 pool->pf = pt->adjusted_pf; in bind_control_target()
2884 pool->low_water_blocks = pt->low_water_blocks; in bind_control_target()
2886 set_pool_mode(pool, new_mode); in bind_control_target()
2891 static void unbind_control_target(struct pool *pool, struct dm_target *ti) in unbind_control_target() argument
2893 if (pool->ti == ti) in unbind_control_target()
2894 pool->ti = NULL; in unbind_control_target()
2912 static void __pool_destroy(struct pool *pool) in __pool_destroy() argument
2914 __pool_table_remove(pool); in __pool_destroy()
2916 vfree(pool->cell_sort_array); in __pool_destroy()
2917 if (dm_pool_metadata_close(pool->pmd) < 0) in __pool_destroy()
2920 dm_bio_prison_destroy(pool->prison); in __pool_destroy()
2921 dm_kcopyd_client_destroy(pool->copier); in __pool_destroy()
2923 cancel_delayed_work_sync(&pool->waker); in __pool_destroy()
2924 cancel_delayed_work_sync(&pool->no_space_timeout); in __pool_destroy()
2925 if (pool->wq) in __pool_destroy()
2926 destroy_workqueue(pool->wq); in __pool_destroy()
2928 if (pool->next_mapping) in __pool_destroy()
2929 mempool_free(pool->next_mapping, &pool->mapping_pool); in __pool_destroy()
2930 mempool_exit(&pool->mapping_pool); in __pool_destroy()
2931 dm_deferred_set_destroy(pool->shared_read_ds); in __pool_destroy()
2932 dm_deferred_set_destroy(pool->all_io_ds); in __pool_destroy()
2933 kfree(pool); in __pool_destroy()
2938 static struct pool *pool_create(struct mapped_device *pool_md, in pool_create()
2946 struct pool *pool; in pool_create() local
2953 return (struct pool *)pmd; in pool_create()
2956 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in pool_create()
2957 if (!pool) { in pool_create()
2963 pool->pmd = pmd; in pool_create()
2964 pool->sectors_per_block = block_size; in pool_create()
2966 pool->sectors_per_block_shift = -1; in pool_create()
2968 pool->sectors_per_block_shift = __ffs(block_size); in pool_create()
2969 pool->low_water_blocks = 0; in pool_create()
2970 pool_features_init(&pool->pf); in pool_create()
2971 pool->prison = dm_bio_prison_create(); in pool_create()
2972 if (!pool->prison) { in pool_create()
2978 pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); in pool_create()
2979 if (IS_ERR(pool->copier)) { in pool_create()
2980 r = PTR_ERR(pool->copier); in pool_create()
2990 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); in pool_create()
2991 if (!pool->wq) { in pool_create()
2997 throttle_init(&pool->throttle); in pool_create()
2998 INIT_WORK(&pool->worker, do_worker); in pool_create()
2999 INIT_DELAYED_WORK(&pool->waker, do_waker); in pool_create()
3000 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); in pool_create()
3001 spin_lock_init(&pool->lock); in pool_create()
3002 bio_list_init(&pool->deferred_flush_bios); in pool_create()
3003 bio_list_init(&pool->deferred_flush_completions); in pool_create()
3004 INIT_LIST_HEAD(&pool->prepared_mappings); in pool_create()
3005 INIT_LIST_HEAD(&pool->prepared_discards); in pool_create()
3006 INIT_LIST_HEAD(&pool->prepared_discards_pt2); in pool_create()
3007 INIT_LIST_HEAD(&pool->active_thins); in pool_create()
3008 pool->low_water_triggered = false; in pool_create()
3009 pool->suspended = true; in pool_create()
3010 pool->out_of_data_space = false; in pool_create()
3012 pool->shared_read_ds = dm_deferred_set_create(); in pool_create()
3013 if (!pool->shared_read_ds) { in pool_create()
3019 pool->all_io_ds = dm_deferred_set_create(); in pool_create()
3020 if (!pool->all_io_ds) { in pool_create()
3026 pool->next_mapping = NULL; in pool_create()
3027 r = mempool_init_slab_pool(&pool->mapping_pool, MAPPING_POOL_SIZE, in pool_create()
3035 pool->cell_sort_array = in pool_create()
3037 sizeof(*pool->cell_sort_array))); in pool_create()
3038 if (!pool->cell_sort_array) { in pool_create()
3044 pool->ref_count = 1; in pool_create()
3045 pool->last_commit_jiffies = jiffies; in pool_create()
3046 pool->pool_md = pool_md; in pool_create()
3047 pool->md_dev = metadata_dev; in pool_create()
3048 pool->data_dev = data_dev; in pool_create()
3049 __pool_table_insert(pool); in pool_create()
3051 return pool; in pool_create()
3054 mempool_exit(&pool->mapping_pool); in pool_create()
3056 dm_deferred_set_destroy(pool->all_io_ds); in pool_create()
3058 dm_deferred_set_destroy(pool->shared_read_ds); in pool_create()
3060 destroy_workqueue(pool->wq); in pool_create()
3062 dm_kcopyd_client_destroy(pool->copier); in pool_create()
3064 dm_bio_prison_destroy(pool->prison); in pool_create()
3066 kfree(pool); in pool_create()
3074 static void __pool_inc(struct pool *pool) in __pool_inc() argument
3077 pool->ref_count++; in __pool_inc()
3080 static void __pool_dec(struct pool *pool) in __pool_dec() argument
3083 BUG_ON(!pool->ref_count); in __pool_dec()
3084 if (!--pool->ref_count) in __pool_dec()
3085 __pool_destroy(pool); in __pool_dec()
3088 static struct pool *__pool_find(struct mapped_device *pool_md, in __pool_find()
3094 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev); in __pool_find() local
3096 if (pool) { in __pool_find()
3097 if (pool->pool_md != pool_md) { in __pool_find()
3101 if (pool->data_dev != data_dev) { in __pool_find()
3105 __pool_inc(pool); in __pool_find()
3108 pool = __pool_table_lookup(pool_md); in __pool_find()
3109 if (pool) { in __pool_find()
3110 if (pool->md_dev != metadata_dev || pool->data_dev != data_dev) { in __pool_find()
3114 __pool_inc(pool); in __pool_find()
3117 pool = pool_create(pool_md, metadata_dev, data_dev, block_size, read_only, error); in __pool_find()
3122 return pool; in __pool_find()
3136 unbind_control_target(pt->pool, ti); in pool_dtr()
3137 __pool_dec(pt->pool); in pool_dtr()
3197 struct pool *pool = context; in metadata_low_callback() local
3200 dm_device_name(pool->pool_md)); in metadata_low_callback()
3202 dm_table_event(pool->ti->table); in metadata_low_callback()
3218 struct pool *pool = context; in metadata_pre_commit_callback() local
3220 return blkdev_issue_flush(pool->data_dev); in metadata_pre_commit_callback()
3291 struct pool *pool; in pool_ctr() local
3367 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, data_dev->bdev, in pool_ctr()
3369 if (IS_ERR(pool)) { in pool_ctr()
3370 r = PTR_ERR(pool); in pool_ctr()
3380 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) { in pool_ctr()
3386 pt->pool = pool; in pool_ctr()
3412 r = dm_pool_register_metadata_threshold(pt->pool->pmd, in pool_ctr()
3415 pool); in pool_ctr()
3421 dm_pool_register_pre_commit_callback(pool->pmd, in pool_ctr()
3422 metadata_pre_commit_callback, pool); in pool_ctr()
3429 __pool_dec(pool); in pool_ctr()
3445 struct pool *pool = pt->pool; in pool_map() local
3450 spin_lock_irq(&pool->lock); in pool_map()
3452 spin_unlock_irq(&pool->lock); in pool_map()
3461 struct pool *pool = pt->pool; in maybe_resize_data_dev() local
3467 (void) sector_div(data_size, pool->sectors_per_block); in maybe_resize_data_dev()
3469 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size); in maybe_resize_data_dev()
3472 dm_device_name(pool->pool_md)); in maybe_resize_data_dev()
3478 dm_device_name(pool->pool_md), in maybe_resize_data_dev()
3483 if (dm_pool_metadata_needs_check(pool->pmd)) { in maybe_resize_data_dev()
3485 dm_device_name(pool->pool_md)); in maybe_resize_data_dev()
3491 dm_device_name(pool->pool_md), in maybe_resize_data_dev()
3493 r = dm_pool_resize_data_dev(pool->pmd, data_size); in maybe_resize_data_dev()
3495 metadata_operation_failed(pool, "dm_pool_resize_data_dev", r); in maybe_resize_data_dev()
3509 struct pool *pool = pt->pool; in maybe_resize_metadata_dev() local
3514 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev); in maybe_resize_metadata_dev()
3516 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size); in maybe_resize_metadata_dev()
3519 dm_device_name(pool->pool_md)); in maybe_resize_metadata_dev()
3525 dm_device_name(pool->pool_md), in maybe_resize_metadata_dev()
3530 if (dm_pool_metadata_needs_check(pool->pmd)) { in maybe_resize_metadata_dev()
3532 dm_device_name(pool->pool_md)); in maybe_resize_metadata_dev()
3536 warn_if_metadata_device_too_big(pool->md_dev); in maybe_resize_metadata_dev()
3538 dm_device_name(pool->pool_md), in maybe_resize_metadata_dev()
3541 if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE) in maybe_resize_metadata_dev()
3542 set_pool_mode(pool, PM_WRITE); in maybe_resize_metadata_dev()
3544 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); in maybe_resize_metadata_dev()
3546 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r); in maybe_resize_metadata_dev()
3572 struct pool *pool = pt->pool; in pool_preresume() local
3577 r = bind_control_target(pool, ti); in pool_preresume()
3590 (void) commit(pool); in pool_preresume()
3597 if (r && get_pool_mode(pool) == PM_FAIL) in pool_preresume()
3603 static void pool_suspend_active_thins(struct pool *pool) in pool_suspend_active_thins() argument
3608 tc = get_first_thin(pool); in pool_suspend_active_thins()
3611 tc = get_next_thin(pool, tc); in pool_suspend_active_thins()
3615 static void pool_resume_active_thins(struct pool *pool) in pool_resume_active_thins() argument
3620 tc = get_first_thin(pool); in pool_resume_active_thins()
3623 tc = get_next_thin(pool, tc); in pool_resume_active_thins()
3630 struct pool *pool = pt->pool; in pool_resume() local
3636 requeue_bios(pool); in pool_resume()
3637 pool_resume_active_thins(pool); in pool_resume()
3639 spin_lock_irq(&pool->lock); in pool_resume()
3640 pool->low_water_triggered = false; in pool_resume()
3641 pool->suspended = false; in pool_resume()
3642 spin_unlock_irq(&pool->lock); in pool_resume()
3644 do_waker(&pool->waker.work); in pool_resume()
3650 struct pool *pool = pt->pool; in pool_presuspend() local
3652 spin_lock_irq(&pool->lock); in pool_presuspend()
3653 pool->suspended = true; in pool_presuspend()
3654 spin_unlock_irq(&pool->lock); in pool_presuspend()
3656 pool_suspend_active_thins(pool); in pool_presuspend()
3662 struct pool *pool = pt->pool; in pool_presuspend_undo() local
3664 pool_resume_active_thins(pool); in pool_presuspend_undo()
3666 spin_lock_irq(&pool->lock); in pool_presuspend_undo()
3667 pool->suspended = false; in pool_presuspend_undo()
3668 spin_unlock_irq(&pool->lock); in pool_presuspend_undo()
3674 struct pool *pool = pt->pool; in pool_postsuspend() local
3676 cancel_delayed_work_sync(&pool->waker); in pool_postsuspend()
3677 cancel_delayed_work_sync(&pool->no_space_timeout); in pool_postsuspend()
3678 flush_workqueue(pool->wq); in pool_postsuspend()
3679 (void) commit(pool); in pool_postsuspend()
3705 static int process_create_thin_mesg(unsigned int argc, char **argv, struct pool *pool) in process_create_thin_mesg() argument
3718 r = dm_pool_create_thin(pool->pmd, dev_id); in process_create_thin_mesg()
3728 static int process_create_snap_mesg(unsigned int argc, char **argv, struct pool *pool) in process_create_snap_mesg() argument
3746 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id); in process_create_snap_mesg()
3756 static int process_delete_mesg(unsigned int argc, char **argv, struct pool *pool) in process_delete_mesg() argument
3769 r = dm_pool_delete_thin_device(pool->pmd, dev_id); in process_delete_mesg()
3776 static int process_set_transaction_id_mesg(unsigned int argc, char **argv, struct pool *pool) in process_set_transaction_id_mesg() argument
3795 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id); in process_set_transaction_id_mesg()
3805 static int process_reserve_metadata_snap_mesg(unsigned int argc, char **argv, struct pool *pool) in process_reserve_metadata_snap_mesg() argument
3813 (void) commit(pool); in process_reserve_metadata_snap_mesg()
3815 r = dm_pool_reserve_metadata_snap(pool->pmd); in process_reserve_metadata_snap_mesg()
3822 static int process_release_metadata_snap_mesg(unsigned int argc, char **argv, struct pool *pool) in process_release_metadata_snap_mesg() argument
3830 r = dm_pool_release_metadata_snap(pool->pmd); in process_release_metadata_snap_mesg()
3851 struct pool *pool = pt->pool; in pool_message() local
3853 if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) { in pool_message()
3855 dm_device_name(pool->pool_md)); in pool_message()
3860 r = process_create_thin_mesg(argc, argv, pool); in pool_message()
3863 r = process_create_snap_mesg(argc, argv, pool); in pool_message()
3866 r = process_delete_mesg(argc, argv, pool); in pool_message()
3869 r = process_set_transaction_id_mesg(argc, argv, pool); in pool_message()
3872 r = process_reserve_metadata_snap_mesg(argc, argv, pool); in pool_message()
3875 r = process_release_metadata_snap_mesg(argc, argv, pool); in pool_message()
3881 (void) commit(pool); in pool_message()
3931 struct pool *pool = pt->pool; in pool_status() local
3935 if (get_pool_mode(pool) == PM_FAIL) { in pool_status()
3942 (void) commit(pool); in pool_status()
3944 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id); in pool_status()
3947 dm_device_name(pool->pool_md), r); in pool_status()
3951 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata); in pool_status()
3954 dm_device_name(pool->pool_md), r); in pool_status()
3958 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata); in pool_status()
3961 dm_device_name(pool->pool_md), r); in pool_status()
3965 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data); in pool_status()
3968 dm_device_name(pool->pool_md), r); in pool_status()
3972 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data); in pool_status()
3975 dm_device_name(pool->pool_md), r); in pool_status()
3979 r = dm_pool_get_metadata_snap(pool->pmd, &held_root); in pool_status()
3982 dm_device_name(pool->pool_md), r); in pool_status()
3998 mode = get_pool_mode(pool); in pool_status()
4006 if (!pool->pf.discard_enabled) in pool_status()
4008 else if (pool->pf.discard_passdown) in pool_status()
4013 if (pool->pf.error_if_no_space) in pool_status()
4018 if (dm_pool_metadata_needs_check(pool->pmd)) in pool_status()
4031 (unsigned long)pool->sectors_per_block, in pool_status()
4057 struct pool *pool = pt->pool; in pool_io_hints() local
4069 if (limits->max_sectors < pool->sectors_per_block) { in pool_io_hints()
4070 while (!is_factor(pool->sectors_per_block, limits->max_sectors)) { in pool_io_hints()
4081 if (io_opt_sectors < pool->sectors_per_block || in pool_io_hints()
4082 !is_factor(io_opt_sectors, pool->sectors_per_block)) { in pool_io_hints()
4083 if (is_factor(pool->sectors_per_block, limits->max_sectors)) in pool_io_hints()
4086 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT); in pool_io_hints()
4087 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); in pool_io_hints()
4153 spin_lock_irq(&tc->pool->lock); in thin_dtr()
4155 spin_unlock_irq(&tc->pool->lock); in thin_dtr()
4163 __pool_dec(tc->pool); in thin_dtr()
4248 tc->pool = __pool_table_lookup(pool_md); in thin_ctr()
4249 if (!tc->pool) { in thin_ctr()
4254 __pool_inc(tc->pool); in thin_ctr()
4256 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_ctr()
4262 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td); in thin_ctr()
4268 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block); in thin_ctr()
4279 if (tc->pool->pf.discard_enabled) { in thin_ctr()
4287 spin_lock_irq(&tc->pool->lock); in thin_ctr()
4288 if (tc->pool->suspended) { in thin_ctr()
4289 spin_unlock_irq(&tc->pool->lock); in thin_ctr()
4297 list_add_tail_rcu(&tc->list, &tc->pool->active_thins); in thin_ctr()
4298 spin_unlock_irq(&tc->pool->lock); in thin_ctr()
4314 __pool_dec(tc->pool); in thin_ctr()
4344 struct pool *pool = h->tc->pool; in thin_endio() local
4350 spin_lock_irqsave(&pool->lock, flags); in thin_endio()
4355 spin_unlock_irqrestore(&pool->lock, flags); in thin_endio()
4362 spin_lock_irqsave(&pool->lock, flags); in thin_endio()
4364 list_add_tail(&m->list, &pool->prepared_discards); in thin_endio()
4365 spin_unlock_irqrestore(&pool->lock, flags); in thin_endio()
4366 wake_worker(pool); in thin_endio()
4417 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_status()
4439 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block); in thin_status()
4442 tc->pool->sectors_per_block) - 1); in thin_status()
4472 struct pool *pool = tc->pool; in thin_iterate_devices() local
4478 if (!pool->ti) in thin_iterate_devices()
4481 blocks = pool->ti->len; in thin_iterate_devices()
4482 (void) sector_div(blocks, pool->sectors_per_block); in thin_iterate_devices()
4484 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data); in thin_iterate_devices()
4492 struct pool *pool = tc->pool; in thin_io_hints() local
4494 if (pool->pf.discard_enabled) { in thin_io_hints()
4495 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; in thin_io_hints()
4496 limits->max_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE; in thin_io_hints()