xref: /openbmc/linux/fs/btrfs/block-rsv.h (revision f6d4d29a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef BTRFS_BLOCK_RSV_H
4 #define BTRFS_BLOCK_RSV_H
5 
6 struct btrfs_trans_handle;
7 struct btrfs_root;
8 enum btrfs_reserve_flush_enum;
9 
10 /*
11  * Types of block reserves
12  */
13 enum btrfs_rsv_type {
14 	BTRFS_BLOCK_RSV_GLOBAL,
15 	BTRFS_BLOCK_RSV_DELALLOC,
16 	BTRFS_BLOCK_RSV_TRANS,
17 	BTRFS_BLOCK_RSV_CHUNK,
18 	BTRFS_BLOCK_RSV_DELOPS,
19 	BTRFS_BLOCK_RSV_DELREFS,
20 	BTRFS_BLOCK_RSV_EMPTY,
21 	BTRFS_BLOCK_RSV_TEMP,
22 };
23 
24 struct btrfs_block_rsv {
25 	u64 size;
26 	u64 reserved;
27 	struct btrfs_space_info *space_info;
28 	spinlock_t lock;
29 	bool full;
30 	bool failfast;
31 	/* Block reserve type, one of BTRFS_BLOCK_RSV_* */
32 	enum btrfs_rsv_type type:8;
33 
34 	/*
35 	 * Qgroup equivalent for @size @reserved
36 	 *
37 	 * Unlike normal @size/@reserved for inode rsv, qgroup doesn't care
38 	 * about things like csum size nor how many tree blocks it will need to
39 	 * reserve.
40 	 *
41 	 * Qgroup cares more about net change of the extent usage.
42 	 *
43 	 * So for one newly inserted file extent, in worst case it will cause
44 	 * leaf split and level increase, nodesize for each file extent is
45 	 * already too much.
46 	 *
47 	 * In short, qgroup_size/reserved is the upper limit of possible needed
48 	 * qgroup metadata reservation.
49 	 */
50 	u64 qgroup_rsv_size;
51 	u64 qgroup_rsv_reserved;
52 };
53 
54 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, enum btrfs_rsv_type type);
55 void btrfs_init_root_block_rsv(struct btrfs_root *root);
56 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
57 					      enum btrfs_rsv_type type);
58 void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
59 				   struct btrfs_block_rsv *rsv,
60 				   enum btrfs_rsv_type type);
61 void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
62 			  struct btrfs_block_rsv *rsv);
63 int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info,
64 			struct btrfs_block_rsv *block_rsv, u64 num_bytes,
65 			enum btrfs_reserve_flush_enum flush);
66 int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent);
67 int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info,
68 			   struct btrfs_block_rsv *block_rsv, u64 num_bytes,
69 			   enum btrfs_reserve_flush_enum flush);
70 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
71 			    struct btrfs_block_rsv *dst_rsv, u64 num_bytes,
72 			    bool update_size);
73 int btrfs_block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes);
74 void btrfs_block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
75 			       u64 num_bytes, bool update_size);
76 u64 btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
77 			      struct btrfs_block_rsv *block_rsv,
78 			      u64 num_bytes, u64 *qgroup_to_release);
79 void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info);
80 void btrfs_init_global_block_rsv(struct btrfs_fs_info *fs_info);
81 void btrfs_release_global_block_rsv(struct btrfs_fs_info *fs_info);
82 struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
83 					    struct btrfs_root *root,
84 					    u32 blocksize);
85 int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
86 				       struct btrfs_block_rsv *rsv);
btrfs_unuse_block_rsv(struct btrfs_fs_info * fs_info,struct btrfs_block_rsv * block_rsv,u32 blocksize)87 static inline void btrfs_unuse_block_rsv(struct btrfs_fs_info *fs_info,
88 					 struct btrfs_block_rsv *block_rsv,
89 					 u32 blocksize)
90 {
91 	btrfs_block_rsv_add_bytes(block_rsv, blocksize, false);
92 	btrfs_block_rsv_release(fs_info, block_rsv, 0, NULL);
93 }
94 
95 /*
96  * Fast path to check if the reserve is full, may be carefully used outside of
97  * locks.
98  */
btrfs_block_rsv_full(const struct btrfs_block_rsv * rsv)99 static inline bool btrfs_block_rsv_full(const struct btrfs_block_rsv *rsv)
100 {
101 	return data_race(rsv->full);
102 }
103 
104 /*
105  * Get the reserved mount of a block reserve in a context where getting a stale
106  * value is acceptable, instead of accessing it directly and trigger data race
107  * warning from KCSAN.
108  */
btrfs_block_rsv_reserved(struct btrfs_block_rsv * rsv)109 static inline u64 btrfs_block_rsv_reserved(struct btrfs_block_rsv *rsv)
110 {
111 	u64 ret;
112 
113 	spin_lock(&rsv->lock);
114 	ret = rsv->reserved;
115 	spin_unlock(&rsv->lock);
116 
117 	return ret;
118 }
119 
120 /*
121  * Get the size of a block reserve in a context where getting a stale value is
122  * acceptable, instead of accessing it directly and trigger data race warning
123  * from KCSAN.
124  */
btrfs_block_rsv_size(struct btrfs_block_rsv * rsv)125 static inline u64 btrfs_block_rsv_size(struct btrfs_block_rsv *rsv)
126 {
127 	u64 ret;
128 
129 	spin_lock(&rsv->lock);
130 	ret = rsv->size;
131 	spin_unlock(&rsv->lock);
132 
133 	return ret;
134 }
135 
136 #endif /* BTRFS_BLOCK_RSV_H */
137