Lines Matching defs:cache

10 #include "free-space-cache.h"
141 void btrfs_get_block_group(struct btrfs_block_group *cache)
143 refcount_inc(&cache->refs);
146 void btrfs_put_block_group(struct btrfs_block_group *cache)
148 if (refcount_dec_and_test(&cache->refs)) {
149 WARN_ON(cache->pinned > 0);
157 if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) ||
158 !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info))
159 WARN_ON(cache->reserved > 0);
166 if (WARN_ON(!list_empty(&cache->discard_list)))
167 btrfs_discard_cancel_work(&cache->fs_info->discard_ctl,
168 cache);
170 kfree(cache->free_space_ctl);
171 kfree(cache->physical_map);
172 kfree(cache);
177 * This adds the block group to the fs_info rb tree for the block group cache
184 struct btrfs_block_group *cache;
194 cache = rb_entry(parent, struct btrfs_block_group, cache_node);
195 if (block_group->start < cache->start) {
197 } else if (block_group->start > cache->start) {
222 struct btrfs_block_group *cache, *ret = NULL;
230 cache = rb_entry(n, struct btrfs_block_group, cache_node);
231 end = cache->start + cache->length - 1;
232 start = cache->start;
236 ret = cache;
240 ret = cache;
245 ret = cache;
275 struct btrfs_block_group *cache)
277 struct btrfs_fs_info *fs_info = cache->fs_info;
283 if (RB_EMPTY_NODE(&cache->cache_node)) {
284 const u64 next_bytenr = cache->start + cache->length;
287 btrfs_put_block_group(cache);
290 node = rb_next(&cache->cache_node);
291 btrfs_put_block_group(cache);
293 cache = rb_entry(node, struct btrfs_block_group, cache_node);
294 btrfs_get_block_group(cache);
296 cache = NULL;
298 return cache;
405 struct btrfs_block_group *cache)
409 spin_lock(&cache->lock);
410 if (!cache->caching_ctl) {
411 spin_unlock(&cache->lock);
415 ctl = cache->caching_ctl;
417 spin_unlock(&cache->lock);
437 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
440 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
446 caching_ctl = btrfs_get_caching_control(cache);
458 wait_event(caching_ctl->wait, btrfs_block_group_done(cache) ||
460 (cache->free_space_ctl->free_space >= num_bytes)));
465 static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache,
468 wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
469 return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0;
472 static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
477 caching_ctl = btrfs_get_caching_control(cache);
479 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
480 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
507 * Add a free space range to the in memory free space cache of a block group.
509 * locations are not added to the free space cache.
515 * added to the block group's free space cache.
867 * We failed to load the space cache, set ourselves to
878 * can't actually cache from the free space tree as our commit root and
918 int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait)
920 struct btrfs_fs_info *fs_info = cache->fs_info;
924 /* Allocator for zoned filesystems does not use the cache at all */
935 caching_ctl->block_group = cache;
940 spin_lock(&cache->lock);
941 if (cache->cached != BTRFS_CACHE_NO) {
944 caching_ctl = cache->caching_ctl;
947 spin_unlock(&cache->lock);
950 WARN_ON(cache->caching_ctl);
951 cache->caching_ctl = caching_ctl;
952 cache->cached = BTRFS_CACHE_STARTED;
953 spin_unlock(&cache->lock);
960 btrfs_get_block_group(cache);
965 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
1113 * Make sure our free space cache IO is done before removing the
1335 * Mark block group @cache read-only, so later write won't happen to block
1336 * group @cache.
1347 static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
1349 struct btrfs_space_info *sinfo = cache->space_info;
1354 spin_lock(&cache->lock);
1356 if (cache->swap_extents) {
1361 if (cache->ro) {
1362 cache->ro++;
1367 num_bytes = cache->length - cache->reserved - cache->pinned -
1368 cache->bytes_super - cache->zone_unusable - cache->used;
1392 if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes,
1399 if (btrfs_is_zoned(cache->fs_info)) {
1401 sinfo->bytes_readonly += cache->zone_unusable;
1402 sinfo->bytes_zone_unusable -= cache->zone_unusable;
1403 cache->zone_unusable = 0;
1405 cache->ro++;
1406 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
1409 spin_unlock(&cache->lock);
1411 if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
1412 btrfs_info(cache->fs_info,
1413 "unable to make block group %llu ro", cache->start);
1414 btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
2163 static int exclude_super_stripes(struct btrfs_block_group *cache)
2165 struct btrfs_fs_info *fs_info = cache->fs_info;
2172 if (cache->start < BTRFS_SUPER_INFO_OFFSET) {
2173 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start;
2174 cache->bytes_super += stripe_len;
2175 ret = set_extent_bit(&fs_info->excluded_extents, cache->start,
2176 cache->start + stripe_len - 1,
2184 ret = btrfs_rmap_block(fs_info, cache->start,
2194 cache->start);
2200 cache->start + cache->length - logical[nr]);
2202 cache->bytes_super += len;
2220 struct btrfs_block_group *cache;
2222 cache = kzalloc(sizeof(*cache), GFP_NOFS);
2223 if (!cache)
2226 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
2228 if (!cache->free_space_ctl) {
2229 kfree(cache);
2233 cache->start = start;
2235 cache->fs_info = fs_info;
2236 cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
2238 cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
2240 refcount_set(&cache->refs, 1);
2241 spin_lock_init(&cache->lock);
2242 init_rwsem(&cache->data_rwsem);
2243 INIT_LIST_HEAD(&cache->list);
2244 INIT_LIST_HEAD(&cache->cluster_list);
2245 INIT_LIST_HEAD(&cache->bg_list);
2246 INIT_LIST_HEAD(&cache->ro_list);
2247 INIT_LIST_HEAD(&cache->discard_list);
2248 INIT_LIST_HEAD(&cache->dirty_list);
2249 INIT_LIST_HEAD(&cache->io_list);
2250 INIT_LIST_HEAD(&cache->active_bg_list);
2251 btrfs_init_free_space_ctl(cache, cache->free_space_ctl);
2252 atomic_set(&cache->frozen, 0);
2253 mutex_init(&cache->free_space_lock);
2255 return cache;
2317 struct btrfs_block_group *cache;
2323 cache = btrfs_create_block_group_cache(info, key->objectid);
2324 if (!cache)
2327 cache->length = key->offset;
2328 cache->used = btrfs_stack_block_group_used(bgi);
2329 cache->commit_used = cache->used;
2330 cache->flags = btrfs_stack_block_group_flags(bgi);
2331 cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi);
2333 set_free_space_tree_thresholds(cache);
2337 * When we mount with old space cache, we need to
2341 * truncate the old free space cache inode and
2344 * the new space cache info onto disk.
2347 cache->disk_cache_state = BTRFS_DC_CLEAR;
2349 if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
2350 (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
2353 cache->start);
2358 ret = btrfs_load_block_group_zone_info(cache, false);
2361 cache->start);
2370 ret = exclude_super_stripes(cache);
2373 btrfs_free_excluded_extents(cache);
2390 btrfs_calc_zone_unusable(cache);
2392 btrfs_free_excluded_extents(cache);
2393 } else if (cache->length == cache->used) {
2394 cache->cached = BTRFS_CACHE_FINISHED;
2395 btrfs_free_excluded_extents(cache);
2396 } else if (cache->used == 0) {
2397 cache->cached = BTRFS_CACHE_FINISHED;
2398 ret = btrfs_add_new_free_space(cache, cache->start,
2399 cache->start + cache->length, NULL);
2400 btrfs_free_excluded_extents(cache);
2405 ret = btrfs_add_block_group_cache(info, cache);
2407 btrfs_remove_free_space_cache(cache);
2410 trace_btrfs_add_block_group(info, cache, 0);
2411 btrfs_add_bg_to_space_info(info, cache);
2413 set_avail_alloc_bits(info, cache->flags);
2414 if (btrfs_chunk_writeable(info, cache->start)) {
2415 if (cache->used == 0) {
2416 ASSERT(list_empty(&cache->bg_list));
2418 btrfs_discard_queue_work(&info->discard_ctl, cache);
2420 btrfs_mark_bg_unused(cache);
2423 inc_block_group_ro(cache, 1);
2428 btrfs_put_block_group(cache);
2451 /* Fill dummy cache as FULL */
2459 * We may have some valid block group cache added already, in
2488 struct btrfs_block_group *cache;
2553 cache = list_first_entry(&space_info->block_groups[i],
2556 btrfs_sysfs_add_block_group_type(cache);
2569 list_for_each_entry(cache,
2572 inc_block_group_ro(cache, 1);
2573 list_for_each_entry(cache,
2576 inc_block_group_ro(cache, 1);
2807 struct btrfs_block_group *cache;
2812 cache = btrfs_create_block_group_cache(fs_info, chunk_offset);
2813 if (!cache)
2821 set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags);
2823 cache->length = size;
2824 set_free_space_tree_thresholds(cache);
2825 cache->flags = type;
2826 cache->cached = BTRFS_CACHE_FINISHED;
2827 cache->global_root_id = calculate_global_root_id(fs_info, cache->start);
2830 set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags);
2832 ret = btrfs_load_block_group_zone_info(cache, true);
2834 btrfs_put_block_group(cache);
2838 ret = exclude_super_stripes(cache);
2841 btrfs_free_excluded_extents(cache);
2842 btrfs_put_block_group(cache);
2846 ret = btrfs_add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL);
2847 btrfs_free_excluded_extents(cache);
2849 btrfs_put_block_group(cache);
2858 cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
2859 ASSERT(cache->space_info);
2861 ret = btrfs_add_block_group_cache(fs_info, cache);
2863 btrfs_remove_free_space_cache(cache);
2864 btrfs_put_block_group(cache);
2872 trace_btrfs_add_block_group(fs_info, cache, 1);
2873 btrfs_add_bg_to_space_info(fs_info, cache);
2877 if (btrfs_should_fragment_free_space(cache)) {
2878 cache->space_info->bytes_used += size >> 1;
2879 fragment_free_space(cache);
2883 list_add_tail(&cache->bg_list, &trans->new_bgs);
2888 return cache;
2895 * @cache: the destination block group
2900 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
2903 struct btrfs_fs_info *fs_info = cache->fs_info;
2918 ret = inc_block_group_ro(cache, 0);
2932 * block group cache has started writing. If it already started,
2954 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
2955 if (alloc_flags != cache->flags) {
2969 ret = inc_block_group_ro(cache, 0);
2981 (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM))
2984 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
2992 ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true);
2996 ret = inc_block_group_ro(cache, 0);
3000 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
3001 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
3013 void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
3015 struct btrfs_space_info *sinfo = cache->space_info;
3018 BUG_ON(!cache->ro);
3021 spin_lock(&cache->lock);
3022 if (!--cache->ro) {
3023 if (btrfs_is_zoned(cache->fs_info)) {
3025 cache->zone_unusable =
3026 (cache->alloc_offset - cache->used) +
3027 (cache->length - cache->zone_capacity);
3028 sinfo->bytes_zone_unusable += cache->zone_unusable;
3029 sinfo->bytes_readonly -= cache->zone_unusable;
3031 num_bytes = cache->length - cache->reserved -
3032 cache->pinned - cache->bytes_super -
3033 cache->zone_unusable - cache->used;
3035 list_del_init(&cache->ro_list);
3037 spin_unlock(&cache->lock);
3043 struct btrfs_block_group *cache)
3058 * We cannot use cache->used directly outside of the spin lock, as it
3061 spin_lock(&cache->lock);
3062 old_commit_used = cache->commit_used;
3063 used = cache->used;
3065 if (cache->commit_used == used) {
3066 spin_unlock(&cache->lock);
3069 cache->commit_used = used;
3070 spin_unlock(&cache->lock);
3072 key.objectid = cache->start;
3074 key.offset = cache->length;
3087 cache->global_root_id);
3088 btrfs_set_stack_block_group_flags(&bgi, cache->flags);
3103 spin_lock(&cache->lock);
3104 cache->commit_used = old_commit_used;
3105 spin_unlock(&cache->lock);
3164 * from here on out we know not to trust this cache when we load up next
3172 * super cache generation to 0 so we know to invalidate the
3173 * cache, but then we'd have to keep track of the block groups
3174 * that fail this way so we know we _have_ to reset this cache
3175 * before the next commit or risk reading stale cache. So to
3219 * We hit an ENOSPC when setting up the cache in this transaction, just
3220 * skip doing the setup, we've already cleared the cache so we're safe.
3231 * cache.
3249 * Our cache requires contiguous chunks so that we don't modify a bunch
3250 * of metadata or split extents when writing the cache out, which means
3279 struct btrfs_block_group *cache, *tmp;
3292 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3294 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3295 cache_save_setup(cache, trans, path);
3303 * Transaction commit does final block group cache writeback during a critical
3305 * order for the cache to actually match the block group, but can introduce a
3308 * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
3317 struct btrfs_block_group *cache;
3349 * writing out the cache
3355 cache = list_first_entry(&dirty, struct btrfs_block_group,
3362 if (!list_empty(&cache->io_list)) {
3363 list_del_init(&cache->io_list);
3364 btrfs_wait_cache_io(trans, cache, path);
3365 btrfs_put_block_group(cache);
3370 * btrfs_wait_cache_io uses the cache->dirty_list to decide if
3378 list_del_init(&cache->dirty_list);
3383 cache_save_setup(cache, trans, path);
3385 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3386 cache->io_ctl.inode = NULL;
3387 ret = btrfs_write_out_cache(trans, cache, path);
3388 if (ret == 0 && cache->io_ctl.inode) {
3396 list_add_tail(&cache->io_list, io);
3399 * If we failed to write the cache, the
3406 ret = update_block_group_item(trans, path, cache);
3419 if (list_empty(&cache->dirty_list)) {
3420 list_add_tail(&cache->dirty_list,
3422 btrfs_get_block_group(cache);
3433 btrfs_put_block_group(cache);
3483 struct btrfs_block_group *cache;
3499 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
3511 cache = list_first_entry(&cur_trans->dirty_bgs,
3520 if (!list_empty(&cache->io_list)) {
3522 list_del_init(&cache->io_list);
3523 btrfs_wait_cache_io(trans, cache, path);
3524 btrfs_put_block_group(cache);
3532 list_del_init(&cache->dirty_list);
3536 cache_save_setup(cache, trans, path);
3542 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3543 cache->io_ctl.inode = NULL;
3544 ret = btrfs_write_out_cache(trans, cache, path);
3545 if (ret == 0 && cache->io_ctl.inode) {
3547 list_add_tail(&cache->io_list, io);
3550 * If we failed to write the cache, the
3557 ret = update_block_group_item(trans, path, cache);
3561 * cache's inode (at inode.c:btrfs_finish_ordered_io())
3574 ret = update_block_group_item(trans, path, cache);
3582 btrfs_put_block_group(cache);
3593 cache = list_first_entry(io, struct btrfs_block_group,
3595 list_del_init(&cache->io_list);
3596 btrfs_wait_cache_io(trans, cache, path);
3597 btrfs_put_block_group(cache);
3608 struct btrfs_block_group *cache = NULL;
3629 cache = btrfs_lookup_block_group(info, bytenr);
3630 if (!cache) {
3634 space_info = cache->space_info;
3635 factor = btrfs_bg_type_to_factor(cache->flags);
3638 * If this block group has free space cache written out, we
3643 if (!alloc && !btrfs_block_group_done(cache))
3644 btrfs_cache_block_group(cache, true);
3646 byte_in_group = bytenr - cache->start;
3647 WARN_ON(byte_in_group > cache->length);
3650 spin_lock(&cache->lock);
3653 cache->disk_cache_state < BTRFS_DC_CLEAR)
3654 cache->disk_cache_state = BTRFS_DC_CLEAR;
3656 old_val = cache->used;
3657 num_bytes = min(total, cache->length - byte_in_group);
3660 cache->used = old_val;
3661 cache->reserved -= num_bytes;
3665 spin_unlock(&cache->lock);
3669 cache->used = old_val;
3670 cache->pinned += num_bytes;
3676 reclaim = should_reclaim_block_group(cache, num_bytes);
3678 spin_unlock(&cache->lock);
3687 if (list_empty(&cache->dirty_list)) {
3688 list_add_tail(&cache->dirty_list,
3691 btrfs_get_block_group(cache);
3699 * cache writeout.
3703 btrfs_mark_bg_unused(cache);
3705 btrfs_mark_bg_to_reclaim(cache);
3708 btrfs_put_block_group(cache);
3721 * @cache: The cache we are manipulating
3731 int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
3735 struct btrfs_space_info *space_info = cache->space_info;
3740 spin_lock(&cache->lock);
3741 if (cache->ro) {
3746 if (btrfs_block_group_should_use_size_class(cache)) {
3748 ret = btrfs_use_block_group_size_class(cache, size_class, force_wrong_size_class);
3752 cache->reserved += num_bytes;
3754 trace_btrfs_space_reservation(cache->fs_info, "space_info",
3756 btrfs_space_info_update_bytes_may_use(cache->fs_info,
3759 cache->delalloc_bytes += num_bytes;
3766 btrfs_try_granting_tickets(cache->fs_info, space_info);
3768 spin_unlock(&cache->lock);
3776 * @cache: The cache we are manipulating
3785 void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
3788 struct btrfs_space_info *space_info = cache->space_info;
3791 spin_lock(&cache->lock);
3792 if (cache->ro)
3794 cache->reserved -= num_bytes;
3799 cache->delalloc_bytes -= num_bytes;
3800 spin_unlock(&cache->lock);
3802 btrfs_try_granting_tickets(cache->fs_info, space_info);
3906 * then adds back the entry to the block group cache).
4477 void btrfs_freeze_block_group(struct btrfs_block_group *cache)
4479 atomic_inc(&cache->frozen);