Lines Matching full:era

15 #define DM_MSG_PREFIX "era"
47 * after digesting into the era array.
216 DMERR("Era metadata version %u found, but only versions between %u and %u supported.", in check_metadata_version()
283 * We preallocate 2 writesets. When an era rolls over we
467 DMERR("couldn't create era array"); in create_fresh_metadata()
674 * Writesets get 'digested' into the main era array.
682 uint32_t era; member
698 uint64_t key = d->era; in metadata_digest_remove_writeset()
759 d->era = key; in metadata_digest_lookup_writeset()
962 DMERR("%s: new era failed", __func__); in metadata_era_rollover()
1030 * Metadata snapshots allow userland to access era data.
1044 DMERR("%s: era rollover failed", __func__); in metadata_take_snap()
1078 DMERR("%s: couldn't inc era tree root", __func__); in metadata_take_snap()
1126 DMERR("%s: error deleting era array clone", __func__); in metadata_drop_snap()
1141 uint32_t era; member
1165 s->era = md->current_era; in metadata_get_stats()
1172 struct era { struct
1212 static bool block_size_is_power_of_two(struct era *era) in block_size_is_power_of_two() argument
1214 return era->sectors_per_block_shift >= 0; in block_size_is_power_of_two()
1217 static dm_block_t get_block(struct era *era, struct bio *bio) in get_block() argument
1221 if (!block_size_is_power_of_two(era)) in get_block()
1222 (void) sector_div(block_nr, era->sectors_per_block); in get_block()
1224 block_nr >>= era->sectors_per_block_shift; in get_block()
1229 static void remap_to_origin(struct era *era, struct bio *bio) in remap_to_origin() argument
1231 bio_set_dev(bio, era->origin_dev->bdev); in remap_to_origin()
1239 static void wake_worker(struct era *era) in wake_worker() argument
1241 if (!atomic_read(&era->suspended)) in wake_worker()
1242 queue_work(era->wq, &era->worker); in wake_worker()
1245 static void process_old_eras(struct era *era) in process_old_eras() argument
1249 if (!era->digest.step) in process_old_eras()
1252 r = era->digest.step(era->md, &era->digest); in process_old_eras()
1255 era->digest.step = NULL; in process_old_eras()
1257 } else if (era->digest.step) in process_old_eras()
1258 wake_worker(era); in process_old_eras()
1261 static void process_deferred_bios(struct era *era) in process_deferred_bios() argument
1269 struct writeset *ws = era->md->current_writeset; in process_deferred_bios()
1274 spin_lock(&era->deferred_lock); in process_deferred_bios()
1275 bio_list_merge(&deferred_bios, &era->deferred_bios); in process_deferred_bios()
1276 bio_list_init(&era->deferred_bios); in process_deferred_bios()
1277 spin_unlock(&era->deferred_lock); in process_deferred_bios()
1283 r = writeset_test_and_set(&era->md->bitset_info, ws, in process_deferred_bios()
1284 get_block(era, bio)); in process_deferred_bios()
1298 r = metadata_commit(era->md); in process_deferred_bios()
1314 set_bit(get_block(era, bio), ws->bits); in process_deferred_bios()
1321 static void process_rpc_calls(struct era *era) in process_rpc_calls() argument
1329 spin_lock(&era->rpc_lock); in process_rpc_calls()
1330 list_splice_init(&era->rpc_calls, &calls); in process_rpc_calls()
1331 spin_unlock(&era->rpc_lock); in process_rpc_calls()
1334 rpc->result = rpc->fn0 ? rpc->fn0(era->md) : rpc->fn1(era->md, rpc->arg); in process_rpc_calls()
1339 r = metadata_commit(era->md); in process_rpc_calls()
1349 static void kick_off_digest(struct era *era) in kick_off_digest() argument
1351 if (era->md->archived_writesets) { in kick_off_digest()
1352 era->md->archived_writesets = false; in kick_off_digest()
1353 metadata_digest_start(era->md, &era->digest); in kick_off_digest()
1359 struct era *era = container_of(ws, struct era, worker); in do_work() local
1361 kick_off_digest(era); in do_work()
1362 process_old_eras(era); in do_work()
1363 process_deferred_bios(era); in do_work()
1364 process_rpc_calls(era); in do_work()
1367 static void defer_bio(struct era *era, struct bio *bio) in defer_bio() argument
1369 spin_lock(&era->deferred_lock); in defer_bio()
1370 bio_list_add(&era->deferred_bios, bio); in defer_bio()
1371 spin_unlock(&era->deferred_lock); in defer_bio()
1373 wake_worker(era); in defer_bio()
1379 static int perform_rpc(struct era *era, struct rpc *rpc) in perform_rpc() argument
1384 spin_lock(&era->rpc_lock); in perform_rpc()
1385 list_add(&rpc->list, &era->rpc_calls); in perform_rpc()
1386 spin_unlock(&era->rpc_lock); in perform_rpc()
1388 wake_worker(era); in perform_rpc()
1394 static int in_worker0(struct era *era, int (*fn)(struct era_metadata *md)) in in_worker0() argument
1401 return perform_rpc(era, &rpc); in in_worker0()
1404 static int in_worker1(struct era *era, in in_worker1() argument
1413 return perform_rpc(era, &rpc); in in_worker1()
1416 static void start_worker(struct era *era) in start_worker() argument
1418 atomic_set(&era->suspended, 0); in start_worker()
1421 static void stop_worker(struct era *era) in stop_worker() argument
1423 atomic_set(&era->suspended, 1); in stop_worker()
1424 drain_workqueue(era->wq); in stop_worker()
1432 static void era_destroy(struct era *era) in era_destroy() argument
1434 if (era->md) in era_destroy()
1435 metadata_close(era->md); in era_destroy()
1437 if (era->wq) in era_destroy()
1438 destroy_workqueue(era->wq); in era_destroy()
1440 if (era->origin_dev) in era_destroy()
1441 dm_put_device(era->ti, era->origin_dev); in era_destroy()
1443 if (era->metadata_dev) in era_destroy()
1444 dm_put_device(era->ti, era->metadata_dev); in era_destroy()
1446 kfree(era); in era_destroy()
1449 static dm_block_t calc_nr_blocks(struct era *era) in calc_nr_blocks() argument
1451 return dm_sector_div_up(era->ti->len, era->sectors_per_block); in calc_nr_blocks()
1469 struct era *era; in era_ctr() local
1477 era = kzalloc(sizeof(*era), GFP_KERNEL); in era_ctr()
1478 if (!era) { in era_ctr()
1479 ti->error = "Error allocating era structure"; in era_ctr()
1483 era->ti = ti; in era_ctr()
1486 &era->metadata_dev); in era_ctr()
1489 era_destroy(era); in era_ctr()
1494 &era->origin_dev); in era_ctr()
1497 era_destroy(era); in era_ctr()
1501 r = sscanf(argv[2], "%u%c", &era->sectors_per_block, &dummy); in era_ctr()
1504 era_destroy(era); in era_ctr()
1508 r = dm_set_target_max_io_len(ti, era->sectors_per_block); in era_ctr()
1511 era_destroy(era); in era_ctr()
1515 if (!valid_block_size(era->sectors_per_block)) { in era_ctr()
1517 era_destroy(era); in era_ctr()
1520 if (era->sectors_per_block & (era->sectors_per_block - 1)) in era_ctr()
1521 era->sectors_per_block_shift = -1; in era_ctr()
1523 era->sectors_per_block_shift = __ffs(era->sectors_per_block); in era_ctr()
1525 md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true); in era_ctr()
1528 era_destroy(era); in era_ctr()
1531 era->md = md; in era_ctr()
1533 era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); in era_ctr()
1534 if (!era->wq) { in era_ctr()
1536 era_destroy(era); in era_ctr()
1539 INIT_WORK(&era->worker, do_work); in era_ctr()
1541 spin_lock_init(&era->deferred_lock); in era_ctr()
1542 bio_list_init(&era->deferred_bios); in era_ctr()
1544 spin_lock_init(&era->rpc_lock); in era_ctr()
1545 INIT_LIST_HEAD(&era->rpc_calls); in era_ctr()
1547 ti->private = era; in era_ctr()
1563 struct era *era = ti->private; in era_map() local
1564 dm_block_t block = get_block(era, bio); in era_map()
1569 * block is marked in this era. in era_map()
1571 remap_to_origin(era, bio); in era_map()
1578 !metadata_current_marked(era->md, block)) { in era_map()
1579 defer_bio(era, bio); in era_map()
1589 struct era *era = ti->private; in era_postsuspend() local
1591 r = in_worker0(era, metadata_era_archive); in era_postsuspend()
1593 DMERR("%s: couldn't archive current era", __func__); in era_postsuspend()
1597 stop_worker(era); in era_postsuspend()
1599 r = metadata_commit(era->md); in era_postsuspend()
1609 struct era *era = ti->private; in era_preresume() local
1610 dm_block_t new_size = calc_nr_blocks(era); in era_preresume()
1612 if (era->nr_blocks != new_size) { in era_preresume()
1613 r = metadata_resize(era->md, &new_size); in era_preresume()
1619 r = metadata_commit(era->md); in era_preresume()
1625 era->nr_blocks = new_size; in era_preresume()
1628 start_worker(era); in era_preresume()
1630 r = in_worker0(era, metadata_era_rollover); in era_preresume()
1643 * <current era> <held metadata root | '-'>
1649 struct era *era = ti->private; in era_status() local
1656 r = in_worker1(era, metadata_get_stats, &stats); in era_status()
1664 (unsigned int) stats.era); in era_status()
1673 format_dev_t(buf, era->metadata_dev->bdev->bd_dev); in era_status()
1675 format_dev_t(buf, era->origin_dev->bdev->bd_dev); in era_status()
1676 DMEMIT("%s %u", buf, era->sectors_per_block); in era_status()
1693 struct era *era = ti->private; in era_message() local
1701 return in_worker0(era, metadata_checkpoint); in era_message()
1704 return in_worker0(era, metadata_take_snap); in era_message()
1707 return in_worker0(era, metadata_drop_snap); in era_message()
1721 struct era *era = ti->private; in era_iterate_devices() local
1723 return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data); in era_iterate_devices()
1728 struct era *era = ti->private; in era_io_hints() local
1733 * era device's blocksize (io_opt is a factor) do not override them. in era_io_hints()
1735 if (io_opt_sectors < era->sectors_per_block || in era_io_hints()
1736 do_div(io_opt_sectors, era->sectors_per_block)) { in era_io_hints()
1738 blk_limits_io_opt(limits, era->sectors_per_block << SECTOR_SHIFT); in era_io_hints()
1745 .name = "era",
1758 module_dm(era);
1760 MODULE_DESCRIPTION(DM_NAME " era target");