xref: /openbmc/linux/fs/btrfs/extent-io-tree.h (revision e5860f82)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef BTRFS_EXTENT_IO_TREE_H
4 #define BTRFS_EXTENT_IO_TREE_H
5 
6 #include "misc.h"
7 
8 struct extent_changeset;
9 
10 /* Bits for the extent state */
11 enum {
12 	ENUM_BIT(EXTENT_DIRTY),
13 	ENUM_BIT(EXTENT_UPTODATE),
14 	ENUM_BIT(EXTENT_LOCKED),
15 	ENUM_BIT(EXTENT_NEW),
16 	ENUM_BIT(EXTENT_DELALLOC),
17 	ENUM_BIT(EXTENT_DEFRAG),
18 	ENUM_BIT(EXTENT_BOUNDARY),
19 	ENUM_BIT(EXTENT_NODATASUM),
20 	ENUM_BIT(EXTENT_CLEAR_META_RESV),
21 	ENUM_BIT(EXTENT_NEED_WAIT),
22 	ENUM_BIT(EXTENT_NORESERVE),
23 	ENUM_BIT(EXTENT_QGROUP_RESERVED),
24 	ENUM_BIT(EXTENT_CLEAR_DATA_RESV),
25 	/*
26 	 * Must be cleared only during ordered extent completion or on error
27 	 * paths if we did not manage to submit bios and create the ordered
28 	 * extents for the range.  Should not be cleared during page release
29 	 * and page invalidation (if there is an ordered extent in flight),
30 	 * that is left for the ordered extent completion.
31 	 */
32 	ENUM_BIT(EXTENT_DELALLOC_NEW),
33 	/*
34 	 * When an ordered extent successfully completes for a region marked as
35 	 * a new delalloc range, use this flag when clearing a new delalloc
36 	 * range to indicate that the VFS' inode number of bytes should be
37 	 * incremented and the inode's new delalloc bytes decremented, in an
38 	 * atomic way to prevent races with stat(2).
39 	 */
40 	ENUM_BIT(EXTENT_ADD_INODE_BYTES),
41 	/*
42 	 * Set during truncate when we're clearing an entire range and we just
43 	 * want the extent states to go away.
44 	 */
45 	ENUM_BIT(EXTENT_CLEAR_ALL_BITS),
46 
47 	/*
48 	 * This must be last.
49 	 *
50 	 * Bit not representing a state but a request for NOWAIT semantics,
51 	 * e.g. when allocating memory, and must be masked out from the other
52 	 * bits.
53 	 */
54 	ENUM_BIT(EXTENT_NOWAIT)
55 };
56 
57 #define EXTENT_DO_ACCOUNTING    (EXTENT_CLEAR_META_RESV | \
58 				 EXTENT_CLEAR_DATA_RESV)
59 #define EXTENT_CTLBITS		(EXTENT_DO_ACCOUNTING | \
60 				 EXTENT_ADD_INODE_BYTES | \
61 				 EXTENT_CLEAR_ALL_BITS)
62 
63 /*
64  * Redefined bits above which are used only in the device allocation tree,
65  * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
66  * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
67  * manipulation functions
68  */
69 #define CHUNK_ALLOCATED				EXTENT_DIRTY
70 #define CHUNK_TRIMMED				EXTENT_DEFRAG
71 #define CHUNK_STATE_MASK			(CHUNK_ALLOCATED |		\
72 						 CHUNK_TRIMMED)
73 
74 enum {
75 	IO_TREE_FS_PINNED_EXTENTS,
76 	IO_TREE_FS_EXCLUDED_EXTENTS,
77 	IO_TREE_BTREE_INODE_IO,
78 	IO_TREE_INODE_IO,
79 	IO_TREE_RELOC_BLOCKS,
80 	IO_TREE_TRANS_DIRTY_PAGES,
81 	IO_TREE_ROOT_DIRTY_LOG_PAGES,
82 	IO_TREE_INODE_FILE_EXTENT,
83 	IO_TREE_LOG_CSUM_RANGE,
84 	IO_TREE_SELFTEST,
85 	IO_TREE_DEVICE_ALLOC_STATE,
86 };
87 
88 struct extent_io_tree {
89 	struct rb_root state;
90 	struct btrfs_fs_info *fs_info;
91 	/* Inode associated with this tree, or NULL. */
92 	struct btrfs_inode *inode;
93 
94 	/* Who owns this io tree, should be one of IO_TREE_* */
95 	u8 owner;
96 
97 	spinlock_t lock;
98 };
99 
100 struct extent_state {
101 	u64 start;
102 	u64 end; /* inclusive */
103 	struct rb_node rb_node;
104 
105 	/* ADD NEW ELEMENTS AFTER THIS */
106 	wait_queue_head_t wq;
107 	refcount_t refs;
108 	u32 state;
109 
110 #ifdef CONFIG_BTRFS_DEBUG
111 	struct list_head leak_list;
112 #endif
113 };
114 
115 void extent_io_tree_init(struct btrfs_fs_info *fs_info,
116 			 struct extent_io_tree *tree, unsigned int owner);
117 void extent_io_tree_release(struct extent_io_tree *tree);
118 
119 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
120 		struct extent_state **cached);
121 
122 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
123 		    struct extent_state **cached);
124 
125 int __init extent_state_init_cachep(void);
126 void __cold extent_state_free_cachep(void);
127 
128 u64 count_range_bits(struct extent_io_tree *tree,
129 		     u64 *start, u64 search_end,
130 		     u64 max_bytes, u32 bits, int contig,
131 		     struct extent_state **cached_state);
132 
133 void free_extent_state(struct extent_state *state);
134 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
135 		   u32 bits, int filled, struct extent_state *cached_state);
136 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
137 			     u32 bits, struct extent_changeset *changeset);
138 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
139 		       u32 bits, struct extent_state **cached,
140 		       struct extent_changeset *changeset);
141 
clear_extent_bit(struct extent_io_tree * tree,u64 start,u64 end,u32 bits,struct extent_state ** cached)142 static inline int clear_extent_bit(struct extent_io_tree *tree, u64 start,
143 				   u64 end, u32 bits,
144 				   struct extent_state **cached)
145 {
146 	return __clear_extent_bit(tree, start, end, bits, cached, NULL);
147 }
148 
unlock_extent(struct extent_io_tree * tree,u64 start,u64 end,struct extent_state ** cached)149 static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
150 				struct extent_state **cached)
151 {
152 	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached, NULL);
153 }
154 
clear_extent_bits(struct extent_io_tree * tree,u64 start,u64 end,u32 bits)155 static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
156 				    u64 end, u32 bits)
157 {
158 	return clear_extent_bit(tree, start, end, bits, NULL);
159 }
160 
161 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
162 			   u32 bits, struct extent_changeset *changeset);
163 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
164 		   u32 bits, struct extent_state **cached_state);
165 
clear_extent_uptodate(struct extent_io_tree * tree,u64 start,u64 end,struct extent_state ** cached_state)166 static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
167 		u64 end, struct extent_state **cached_state)
168 {
169 	return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
170 				  cached_state, NULL);
171 }
172 
clear_extent_dirty(struct extent_io_tree * tree,u64 start,u64 end,struct extent_state ** cached)173 static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
174 				     u64 end, struct extent_state **cached)
175 {
176 	return clear_extent_bit(tree, start, end,
177 				EXTENT_DIRTY | EXTENT_DELALLOC |
178 				EXTENT_DO_ACCOUNTING, cached);
179 }
180 
181 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
182 		       u32 bits, u32 clear_bits,
183 		       struct extent_state **cached_state);
184 
185 bool find_first_extent_bit(struct extent_io_tree *tree, u64 start,
186 			   u64 *start_ret, u64 *end_ret, u32 bits,
187 			   struct extent_state **cached_state);
188 void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
189 				 u64 *start_ret, u64 *end_ret, u32 bits);
190 int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
191 			       u64 *start_ret, u64 *end_ret, u32 bits);
192 bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
193 			       u64 *end, u64 max_bytes,
194 			       struct extent_state **cached_state);
195 void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
196 		     struct extent_state **cached_state);
197 
198 #endif /* BTRFS_EXTENT_IO_TREE_H */
199