xref: /openbmc/linux/fs/btrfs/extent-io-tree.h (revision c4e78957)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef BTRFS_EXTENT_IO_TREE_H
4 #define BTRFS_EXTENT_IO_TREE_H
5 
6 struct extent_changeset;
7 struct io_failure_record;
8 
9 /* Bits for the extent state */
10 #define EXTENT_DIRTY		(1U << 0)
11 #define EXTENT_UPTODATE		(1U << 1)
12 #define EXTENT_LOCKED		(1U << 2)
13 #define EXTENT_NEW		(1U << 3)
14 #define EXTENT_DELALLOC		(1U << 4)
15 #define EXTENT_DEFRAG		(1U << 5)
16 #define EXTENT_BOUNDARY		(1U << 6)
17 #define EXTENT_NODATASUM	(1U << 7)
18 #define EXTENT_CLEAR_META_RESV	(1U << 8)
19 #define EXTENT_NEED_WAIT	(1U << 9)
20 #define EXTENT_DAMAGED		(1U << 10)
21 #define EXTENT_NORESERVE	(1U << 11)
22 #define EXTENT_QGROUP_RESERVED	(1U << 12)
23 #define EXTENT_CLEAR_DATA_RESV	(1U << 13)
24 /*
25  * Must be cleared only during ordered extent completion or on error paths if we
26  * did not manage to submit bios and create the ordered extents for the range.
27  * Should not be cleared during page release and page invalidation (if there is
28  * an ordered extent in flight), that is left for the ordered extent completion.
29  */
30 #define EXTENT_DELALLOC_NEW	(1U << 14)
31 /*
32  * When an ordered extent successfully completes for a region marked as a new
33  * delalloc range, use this flag when clearing a new delalloc range to indicate
34  * that the VFS' inode number of bytes should be incremented and the inode's new
35  * delalloc bytes decremented, in an atomic way to prevent races with stat(2).
36  */
37 #define EXTENT_ADD_INODE_BYTES  (1U << 15)
38 #define EXTENT_DO_ACCOUNTING    (EXTENT_CLEAR_META_RESV | \
39 				 EXTENT_CLEAR_DATA_RESV)
40 #define EXTENT_CTLBITS		(EXTENT_DO_ACCOUNTING | \
41 				 EXTENT_ADD_INODE_BYTES)
42 
43 /*
44  * Redefined bits above which are used only in the device allocation tree,
45  * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
46  * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
47  * manipulation functions
48  */
49 #define CHUNK_ALLOCATED				EXTENT_DIRTY
50 #define CHUNK_TRIMMED				EXTENT_DEFRAG
51 #define CHUNK_STATE_MASK			(CHUNK_ALLOCATED |		\
52 						 CHUNK_TRIMMED)
53 
54 enum {
55 	IO_TREE_FS_PINNED_EXTENTS,
56 	IO_TREE_FS_EXCLUDED_EXTENTS,
57 	IO_TREE_BTREE_INODE_IO,
58 	IO_TREE_INODE_IO,
59 	IO_TREE_INODE_IO_FAILURE,
60 	IO_TREE_RELOC_BLOCKS,
61 	IO_TREE_TRANS_DIRTY_PAGES,
62 	IO_TREE_ROOT_DIRTY_LOG_PAGES,
63 	IO_TREE_INODE_FILE_EXTENT,
64 	IO_TREE_LOG_CSUM_RANGE,
65 	IO_TREE_SELFTEST,
66 	IO_TREE_DEVICE_ALLOC_STATE,
67 };
68 
69 struct extent_io_tree {
70 	struct rb_root state;
71 	struct btrfs_fs_info *fs_info;
72 	void *private_data;
73 	u64 dirty_bytes;
74 	bool track_uptodate;
75 
76 	/* Who owns this io tree, should be one of IO_TREE_* */
77 	u8 owner;
78 
79 	spinlock_t lock;
80 };
81 
82 struct extent_state {
83 	u64 start;
84 	u64 end; /* inclusive */
85 	struct rb_node rb_node;
86 
87 	/* ADD NEW ELEMENTS AFTER THIS */
88 	wait_queue_head_t wq;
89 	refcount_t refs;
90 	u32 state;
91 
92 	struct io_failure_record *failrec;
93 
94 #ifdef CONFIG_BTRFS_DEBUG
95 	struct list_head leak_list;
96 #endif
97 };
98 
99 int __init extent_state_cache_init(void);
100 void __cold extent_state_cache_exit(void);
101 
102 void extent_io_tree_init(struct btrfs_fs_info *fs_info,
103 			 struct extent_io_tree *tree, unsigned int owner,
104 			 void *private_data);
105 void extent_io_tree_release(struct extent_io_tree *tree);
106 
107 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
108 		     struct extent_state **cached);
109 
110 static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
111 {
112 	return lock_extent_bits(tree, start, end, NULL);
113 }
114 
115 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
116 
117 int __init extent_io_init(void);
118 void __cold extent_io_exit(void);
119 
120 u64 count_range_bits(struct extent_io_tree *tree,
121 		     u64 *start, u64 search_end,
122 		     u64 max_bytes, u32 bits, int contig);
123 
124 void free_extent_state(struct extent_state *state);
125 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
126 		   u32 bits, int filled, struct extent_state *cached_state);
127 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
128 			     u32 bits, struct extent_changeset *changeset);
129 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
130 		     u32 bits, int wake, int delete,
131 		     struct extent_state **cached);
132 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
133 		     u32 bits, int wake, int delete,
134 		     struct extent_state **cached, gfp_t mask,
135 		     struct extent_changeset *changeset);
136 
137 static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
138 {
139 	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
140 }
141 
142 static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
143 		u64 end, struct extent_state **cached)
144 {
145 	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
146 				GFP_NOFS, NULL);
147 }
148 
149 static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
150 		u64 start, u64 end, struct extent_state **cached)
151 {
152 	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
153 				GFP_ATOMIC, NULL);
154 }
155 
156 static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
157 				    u64 end, u32 bits)
158 {
159 	int wake = 0;
160 
161 	if (bits & EXTENT_LOCKED)
162 		wake = 1;
163 
164 	return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
165 }
166 
167 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
168 			   u32 bits, struct extent_changeset *changeset);
169 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
170 		   u32 bits, unsigned exclusive_bits, u64 *failed_start,
171 		   struct extent_state **cached_state, gfp_t mask,
172 		   struct extent_changeset *changeset);
173 int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
174 			   u32 bits);
175 
176 static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
177 		u64 end, u32 bits)
178 {
179 	return set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
180 			      NULL);
181 }
182 
183 static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
184 		u64 end, struct extent_state **cached_state)
185 {
186 	return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
187 				cached_state, GFP_NOFS, NULL);
188 }
189 
190 static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
191 		u64 end, gfp_t mask)
192 {
193 	return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL, NULL,
194 			      mask, NULL);
195 }
196 
197 static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
198 				     u64 end, struct extent_state **cached)
199 {
200 	return clear_extent_bit(tree, start, end,
201 				EXTENT_DIRTY | EXTENT_DELALLOC |
202 				EXTENT_DO_ACCOUNTING, 0, 0, cached);
203 }
204 
205 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
206 		       u32 bits, u32 clear_bits,
207 		       struct extent_state **cached_state);
208 
209 static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
210 				      u64 end, u32 extra_bits,
211 				      struct extent_state **cached_state)
212 {
213 	return set_extent_bit(tree, start, end,
214 			      EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
215 			      0, NULL, cached_state, GFP_NOFS, NULL);
216 }
217 
218 static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
219 		u64 end, struct extent_state **cached_state)
220 {
221 	return set_extent_bit(tree, start, end,
222 			      EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
223 			      0, NULL, cached_state, GFP_NOFS, NULL);
224 }
225 
226 static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
227 		u64 end)
228 {
229 	return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL, NULL,
230 			      GFP_NOFS, NULL);
231 }
232 
233 static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
234 		u64 end, struct extent_state **cached_state, gfp_t mask)
235 {
236 	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
237 			      cached_state, mask, NULL);
238 }
239 
240 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
241 			  u64 *start_ret, u64 *end_ret, u32 bits,
242 			  struct extent_state **cached_state);
243 void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
244 				 u64 *start_ret, u64 *end_ret, u32 bits);
245 int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
246 			       u64 *start_ret, u64 *end_ret, u32 bits);
247 int extent_invalidate_folio(struct extent_io_tree *tree,
248 			  struct folio *folio, size_t offset);
249 bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
250 			       u64 *end, u64 max_bytes,
251 			       struct extent_state **cached_state);
252 
253 /* This should be reworked in the future and put elsewhere. */
254 struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start);
255 int set_state_failrec(struct extent_io_tree *tree, u64 start,
256 		      struct io_failure_record *failrec);
257 void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
258 		u64 end);
259 int free_io_failure(struct extent_io_tree *failure_tree,
260 		    struct extent_io_tree *io_tree,
261 		    struct io_failure_record *rec);
262 int clean_io_failure(struct btrfs_fs_info *fs_info,
263 		     struct extent_io_tree *failure_tree,
264 		     struct extent_io_tree *io_tree, u64 start,
265 		     struct page *page, u64 ino, unsigned int pg_offset);
266 
267 #endif /* BTRFS_EXTENT_IO_TREE_H */
268