xref: /openbmc/linux/fs/btrfs/extent-io-tree.h (revision f4356947)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef BTRFS_EXTENT_IO_TREE_H
4 #define BTRFS_EXTENT_IO_TREE_H
5 
6 #include "misc.h"
7 
8 struct extent_changeset;
9 
10 /* Bits for the extent state */
11 enum {
12 	ENUM_BIT(EXTENT_DIRTY),
13 	ENUM_BIT(EXTENT_UPTODATE),
14 	ENUM_BIT(EXTENT_LOCKED),
15 	ENUM_BIT(EXTENT_NEW),
16 	ENUM_BIT(EXTENT_DELALLOC),
17 	ENUM_BIT(EXTENT_DEFRAG),
18 	ENUM_BIT(EXTENT_BOUNDARY),
19 	ENUM_BIT(EXTENT_NODATASUM),
20 	ENUM_BIT(EXTENT_CLEAR_META_RESV),
21 	ENUM_BIT(EXTENT_NEED_WAIT),
22 	ENUM_BIT(EXTENT_NORESERVE),
23 	ENUM_BIT(EXTENT_QGROUP_RESERVED),
24 	ENUM_BIT(EXTENT_CLEAR_DATA_RESV),
25 	/*
26 	 * Must be cleared only during ordered extent completion or on error
27 	 * paths if we did not manage to submit bios and create the ordered
28 	 * extents for the range.  Should not be cleared during page release
29 	 * and page invalidation (if there is an ordered extent in flight),
30 	 * that is left for the ordered extent completion.
31 	 */
32 	ENUM_BIT(EXTENT_DELALLOC_NEW),
33 	/*
34 	 * When an ordered extent successfully completes for a region marked as
35 	 * a new delalloc range, use this flag when clearing a new delalloc
36 	 * range to indicate that the VFS' inode number of bytes should be
37 	 * incremented and the inode's new delalloc bytes decremented, in an
38 	 * atomic way to prevent races with stat(2).
39 	 */
40 	ENUM_BIT(EXTENT_ADD_INODE_BYTES),
41 	/*
42 	 * Set during truncate when we're clearing an entire range and we just
43 	 * want the extent states to go away.
44 	 */
45 	ENUM_BIT(EXTENT_CLEAR_ALL_BITS),
46 };
47 
48 #define EXTENT_DO_ACCOUNTING    (EXTENT_CLEAR_META_RESV | \
49 				 EXTENT_CLEAR_DATA_RESV)
50 #define EXTENT_CTLBITS		(EXTENT_DO_ACCOUNTING | \
51 				 EXTENT_ADD_INODE_BYTES | \
52 				 EXTENT_CLEAR_ALL_BITS)
53 
54 /*
55  * Redefined bits above which are used only in the device allocation tree,
56  * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
57  * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
58  * manipulation functions
59  */
60 #define CHUNK_ALLOCATED				EXTENT_DIRTY
61 #define CHUNK_TRIMMED				EXTENT_DEFRAG
62 #define CHUNK_STATE_MASK			(CHUNK_ALLOCATED |		\
63 						 CHUNK_TRIMMED)
64 
65 enum {
66 	IO_TREE_FS_PINNED_EXTENTS,
67 	IO_TREE_FS_EXCLUDED_EXTENTS,
68 	IO_TREE_BTREE_INODE_IO,
69 	IO_TREE_INODE_IO,
70 	IO_TREE_RELOC_BLOCKS,
71 	IO_TREE_TRANS_DIRTY_PAGES,
72 	IO_TREE_ROOT_DIRTY_LOG_PAGES,
73 	IO_TREE_INODE_FILE_EXTENT,
74 	IO_TREE_LOG_CSUM_RANGE,
75 	IO_TREE_SELFTEST,
76 	IO_TREE_DEVICE_ALLOC_STATE,
77 };
78 
79 struct extent_io_tree {
80 	struct rb_root state;
81 	struct btrfs_fs_info *fs_info;
82 	/* Inode associated with this tree, or NULL. */
83 	struct btrfs_inode *inode;
84 
85 	/* Who owns this io tree, should be one of IO_TREE_* */
86 	u8 owner;
87 
88 	spinlock_t lock;
89 };
90 
91 struct extent_state {
92 	u64 start;
93 	u64 end; /* inclusive */
94 	struct rb_node rb_node;
95 
96 	/* ADD NEW ELEMENTS AFTER THIS */
97 	wait_queue_head_t wq;
98 	refcount_t refs;
99 	u32 state;
100 
101 #ifdef CONFIG_BTRFS_DEBUG
102 	struct list_head leak_list;
103 #endif
104 };
105 
106 void extent_io_tree_init(struct btrfs_fs_info *fs_info,
107 			 struct extent_io_tree *tree, unsigned int owner);
108 void extent_io_tree_release(struct extent_io_tree *tree);
109 
110 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
111 		struct extent_state **cached);
112 
113 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
114 		    struct extent_state **cached);
115 
116 int __init extent_state_init_cachep(void);
117 void __cold extent_state_free_cachep(void);
118 
119 u64 count_range_bits(struct extent_io_tree *tree,
120 		     u64 *start, u64 search_end,
121 		     u64 max_bytes, u32 bits, int contig,
122 		     struct extent_state **cached_state);
123 
124 void free_extent_state(struct extent_state *state);
125 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
126 		   u32 bits, int filled, struct extent_state *cached_state);
127 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
128 			     u32 bits, struct extent_changeset *changeset);
129 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
130 		       u32 bits, struct extent_state **cached, gfp_t mask,
131 		       struct extent_changeset *changeset);
132 
133 static inline int clear_extent_bit(struct extent_io_tree *tree, u64 start,
134 				   u64 end, u32 bits,
135 				   struct extent_state **cached)
136 {
137 	return __clear_extent_bit(tree, start, end, bits, cached,
138 				  GFP_NOFS, NULL);
139 }
140 
141 static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
142 				struct extent_state **cached)
143 {
144 	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached,
145 				  GFP_NOFS, NULL);
146 }
147 
148 static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
149 				    u64 end, u32 bits)
150 {
151 	return clear_extent_bit(tree, start, end, bits, NULL);
152 }
153 
154 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
155 			   u32 bits, struct extent_changeset *changeset);
156 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
157 		   u32 bits, struct extent_state **cached_state, gfp_t mask);
158 
159 static inline int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start,
160 					 u64 end, u32 bits)
161 {
162 	return set_extent_bit(tree, start, end, bits, NULL, GFP_NOWAIT);
163 }
164 
165 static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
166 		u64 end, u32 bits)
167 {
168 	return set_extent_bit(tree, start, end, bits, NULL, GFP_NOFS);
169 }
170 
171 static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
172 		u64 end, struct extent_state **cached_state)
173 {
174 	return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
175 				  cached_state, GFP_NOFS, NULL);
176 }
177 
178 static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
179 		u64 end, gfp_t mask)
180 {
181 	return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL, mask);
182 }
183 
184 static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
185 				     u64 end, struct extent_state **cached)
186 {
187 	return clear_extent_bit(tree, start, end,
188 				EXTENT_DIRTY | EXTENT_DELALLOC |
189 				EXTENT_DO_ACCOUNTING, cached);
190 }
191 
192 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
193 		       u32 bits, u32 clear_bits,
194 		       struct extent_state **cached_state);
195 
196 static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
197 				      u64 end, u32 extra_bits,
198 				      struct extent_state **cached_state)
199 {
200 	return set_extent_bit(tree, start, end,
201 			      EXTENT_DELALLOC | extra_bits,
202 			      cached_state, GFP_NOFS);
203 }
204 
205 static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
206 		u64 end, struct extent_state **cached_state)
207 {
208 	return set_extent_bit(tree, start, end,
209 			      EXTENT_DELALLOC | EXTENT_DEFRAG,
210 			      cached_state, GFP_NOFS);
211 }
212 
213 static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
214 		u64 end)
215 {
216 	return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, GFP_NOFS);
217 }
218 
219 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
220 			  u64 *start_ret, u64 *end_ret, u32 bits,
221 			  struct extent_state **cached_state);
222 void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
223 				 u64 *start_ret, u64 *end_ret, u32 bits);
224 int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
225 			       u64 *start_ret, u64 *end_ret, u32 bits);
226 bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
227 			       u64 *end, u64 max_bytes,
228 			       struct extent_state **cached_state);
229 void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
230 		     struct extent_state **cached_state);
231 
232 #endif /* BTRFS_EXTENT_IO_TREE_H */
233