1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #ifndef BTRFS_EXTENT_IO_TREE_H 4 #define BTRFS_EXTENT_IO_TREE_H 5 6 struct extent_changeset; 7 struct io_failure_record; 8 9 /* Bits for the extent state */ 10 #define EXTENT_DIRTY (1U << 0) 11 #define EXTENT_UPTODATE (1U << 1) 12 #define EXTENT_LOCKED (1U << 2) 13 #define EXTENT_NEW (1U << 3) 14 #define EXTENT_DELALLOC (1U << 4) 15 #define EXTENT_DEFRAG (1U << 5) 16 #define EXTENT_BOUNDARY (1U << 6) 17 #define EXTENT_NODATASUM (1U << 7) 18 #define EXTENT_CLEAR_META_RESV (1U << 8) 19 #define EXTENT_NEED_WAIT (1U << 9) 20 #define EXTENT_NORESERVE (1U << 11) 21 #define EXTENT_QGROUP_RESERVED (1U << 12) 22 #define EXTENT_CLEAR_DATA_RESV (1U << 13) 23 /* 24 * Must be cleared only during ordered extent completion or on error paths if we 25 * did not manage to submit bios and create the ordered extents for the range. 26 * Should not be cleared during page release and page invalidation (if there is 27 * an ordered extent in flight), that is left for the ordered extent completion. 28 */ 29 #define EXTENT_DELALLOC_NEW (1U << 14) 30 /* 31 * When an ordered extent successfully completes for a region marked as a new 32 * delalloc range, use this flag when clearing a new delalloc range to indicate 33 * that the VFS' inode number of bytes should be incremented and the inode's new 34 * delalloc bytes decremented, in an atomic way to prevent races with stat(2). 35 */ 36 #define EXTENT_ADD_INODE_BYTES (1U << 15) 37 38 /* 39 * Set during truncate when we're clearing an entire range and we just want the 40 * extent states to go away. 41 */ 42 #define EXTENT_CLEAR_ALL_BITS (1U << 16) 43 44 #define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \ 45 EXTENT_CLEAR_DATA_RESV) 46 #define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | \ 47 EXTENT_ADD_INODE_BYTES | \ 48 EXTENT_CLEAR_ALL_BITS) 49 50 /* 51 * Redefined bits above which are used only in the device allocation tree, 52 * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV 53 * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit 54 * manipulation functions 55 */ 56 #define CHUNK_ALLOCATED EXTENT_DIRTY 57 #define CHUNK_TRIMMED EXTENT_DEFRAG 58 #define CHUNK_STATE_MASK (CHUNK_ALLOCATED | \ 59 CHUNK_TRIMMED) 60 61 enum { 62 IO_TREE_FS_PINNED_EXTENTS, 63 IO_TREE_FS_EXCLUDED_EXTENTS, 64 IO_TREE_BTREE_INODE_IO, 65 IO_TREE_INODE_IO, 66 IO_TREE_RELOC_BLOCKS, 67 IO_TREE_TRANS_DIRTY_PAGES, 68 IO_TREE_ROOT_DIRTY_LOG_PAGES, 69 IO_TREE_INODE_FILE_EXTENT, 70 IO_TREE_LOG_CSUM_RANGE, 71 IO_TREE_SELFTEST, 72 IO_TREE_DEVICE_ALLOC_STATE, 73 }; 74 75 struct extent_io_tree { 76 struct rb_root state; 77 struct btrfs_fs_info *fs_info; 78 void *private_data; 79 80 /* Who owns this io tree, should be one of IO_TREE_* */ 81 u8 owner; 82 83 spinlock_t lock; 84 }; 85 86 struct extent_state { 87 u64 start; 88 u64 end; /* inclusive */ 89 struct rb_node rb_node; 90 91 /* ADD NEW ELEMENTS AFTER THIS */ 92 wait_queue_head_t wq; 93 refcount_t refs; 94 u32 state; 95 96 #ifdef CONFIG_BTRFS_DEBUG 97 struct list_head leak_list; 98 #endif 99 }; 100 101 void extent_io_tree_init(struct btrfs_fs_info *fs_info, 102 struct extent_io_tree *tree, unsigned int owner, 103 void *private_data); 104 void extent_io_tree_release(struct extent_io_tree *tree); 105 106 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, 107 struct extent_state **cached); 108 109 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end); 110 111 int __init extent_state_init_cachep(void); 112 void __cold extent_state_free_cachep(void); 113 114 u64 count_range_bits(struct extent_io_tree *tree, 115 u64 *start, u64 search_end, 116 u64 max_bytes, u32 bits, int contig); 117 118 void free_extent_state(struct extent_state *state); 119 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, 120 u32 bits, int filled, struct extent_state *cached_state); 121 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 122 u32 bits, struct extent_changeset *changeset); 123 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 124 u32 bits, struct extent_state **cached, gfp_t mask, 125 struct extent_changeset *changeset); 126 127 static inline int clear_extent_bit(struct extent_io_tree *tree, u64 start, 128 u64 end, u32 bits, 129 struct extent_state **cached) 130 { 131 return __clear_extent_bit(tree, start, end, bits, cached, 132 GFP_NOFS, NULL); 133 } 134 135 static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, 136 struct extent_state **cached) 137 { 138 return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached, 139 GFP_NOFS, NULL); 140 } 141 142 static inline int unlock_extent_atomic(struct extent_io_tree *tree, u64 start, 143 u64 end, struct extent_state **cached) 144 { 145 return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached, 146 GFP_ATOMIC, NULL); 147 } 148 149 static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start, 150 u64 end, u32 bits) 151 { 152 return clear_extent_bit(tree, start, end, bits, NULL); 153 } 154 155 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 156 u32 bits, struct extent_changeset *changeset); 157 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 158 u32 bits, struct extent_state **cached_state, gfp_t mask); 159 160 static inline int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, 161 u64 end, u32 bits) 162 { 163 return set_extent_bit(tree, start, end, bits, NULL, GFP_NOWAIT); 164 } 165 166 static inline int set_extent_bits(struct extent_io_tree *tree, u64 start, 167 u64 end, u32 bits) 168 { 169 return set_extent_bit(tree, start, end, bits, NULL, GFP_NOFS); 170 } 171 172 static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, 173 u64 end, struct extent_state **cached_state) 174 { 175 return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 176 cached_state, GFP_NOFS, NULL); 177 } 178 179 static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start, 180 u64 end, gfp_t mask) 181 { 182 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL, mask); 183 } 184 185 static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start, 186 u64 end, struct extent_state **cached) 187 { 188 return clear_extent_bit(tree, start, end, 189 EXTENT_DIRTY | EXTENT_DELALLOC | 190 EXTENT_DO_ACCOUNTING, cached); 191 } 192 193 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 194 u32 bits, u32 clear_bits, 195 struct extent_state **cached_state); 196 197 static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start, 198 u64 end, u32 extra_bits, 199 struct extent_state **cached_state) 200 { 201 return set_extent_bit(tree, start, end, 202 EXTENT_DELALLOC | extra_bits, 203 cached_state, GFP_NOFS); 204 } 205 206 static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start, 207 u64 end, struct extent_state **cached_state) 208 { 209 return set_extent_bit(tree, start, end, 210 EXTENT_DELALLOC | EXTENT_DEFRAG, 211 cached_state, GFP_NOFS); 212 } 213 214 static inline int set_extent_new(struct extent_io_tree *tree, u64 start, 215 u64 end) 216 { 217 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, GFP_NOFS); 218 } 219 220 static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start, 221 u64 end, struct extent_state **cached_state, gfp_t mask) 222 { 223 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 224 cached_state, mask); 225 } 226 227 int find_first_extent_bit(struct extent_io_tree *tree, u64 start, 228 u64 *start_ret, u64 *end_ret, u32 bits, 229 struct extent_state **cached_state); 230 void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, 231 u64 *start_ret, u64 *end_ret, u32 bits); 232 int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start, 233 u64 *start_ret, u64 *end_ret, u32 bits); 234 bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start, 235 u64 *end, u64 max_bytes, 236 struct extent_state **cached_state); 237 void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits); 238 239 #endif /* BTRFS_EXTENT_IO_TREE_H */ 240