1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #ifndef BTRFS_EXTENT_IO_TREE_H 4 #define BTRFS_EXTENT_IO_TREE_H 5 6 struct extent_changeset; 7 struct io_failure_record; 8 9 /* Bits for the extent state */ 10 #define EXTENT_DIRTY (1U << 0) 11 #define EXTENT_UPTODATE (1U << 1) 12 #define EXTENT_LOCKED (1U << 2) 13 #define EXTENT_NEW (1U << 3) 14 #define EXTENT_DELALLOC (1U << 4) 15 #define EXTENT_DEFRAG (1U << 5) 16 #define EXTENT_BOUNDARY (1U << 6) 17 #define EXTENT_NODATASUM (1U << 7) 18 #define EXTENT_CLEAR_META_RESV (1U << 8) 19 #define EXTENT_NEED_WAIT (1U << 9) 20 #define EXTENT_DAMAGED (1U << 10) 21 #define EXTENT_NORESERVE (1U << 11) 22 #define EXTENT_QGROUP_RESERVED (1U << 12) 23 #define EXTENT_CLEAR_DATA_RESV (1U << 13) 24 #define EXTENT_DELALLOC_NEW (1U << 14) 25 #define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \ 26 EXTENT_CLEAR_DATA_RESV) 27 #define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING) 28 29 /* 30 * Redefined bits above which are used only in the device allocation tree, 31 * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV 32 * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit 33 * manipulation functions 34 */ 35 #define CHUNK_ALLOCATED EXTENT_DIRTY 36 #define CHUNK_TRIMMED EXTENT_DEFRAG 37 #define CHUNK_STATE_MASK (CHUNK_ALLOCATED | \ 38 CHUNK_TRIMMED) 39 40 enum { 41 IO_TREE_FS_PINNED_EXTENTS, 42 IO_TREE_FS_EXCLUDED_EXTENTS, 43 IO_TREE_BTREE_INODE_IO, 44 IO_TREE_INODE_IO, 45 IO_TREE_INODE_IO_FAILURE, 46 IO_TREE_RELOC_BLOCKS, 47 IO_TREE_TRANS_DIRTY_PAGES, 48 IO_TREE_ROOT_DIRTY_LOG_PAGES, 49 IO_TREE_INODE_FILE_EXTENT, 50 IO_TREE_LOG_CSUM_RANGE, 51 IO_TREE_SELFTEST, 52 IO_TREE_DEVICE_ALLOC_STATE, 53 }; 54 55 struct extent_io_tree { 56 struct rb_root state; 57 struct btrfs_fs_info *fs_info; 58 void *private_data; 59 u64 dirty_bytes; 60 bool track_uptodate; 61 62 /* Who owns this io tree, should be one of IO_TREE_* */ 63 u8 owner; 64 65 spinlock_t lock; 66 }; 67 68 struct extent_state { 69 u64 start; 70 u64 end; /* inclusive */ 71 struct rb_node rb_node; 72 73 /* ADD NEW ELEMENTS AFTER THIS */ 74 wait_queue_head_t wq; 75 refcount_t refs; 76 unsigned state; 77 78 struct io_failure_record *failrec; 79 80 #ifdef CONFIG_BTRFS_DEBUG 81 struct list_head leak_list; 82 #endif 83 }; 84 85 int __init extent_state_cache_init(void); 86 void __cold extent_state_cache_exit(void); 87 88 void extent_io_tree_init(struct btrfs_fs_info *fs_info, 89 struct extent_io_tree *tree, unsigned int owner, 90 void *private_data); 91 void extent_io_tree_release(struct extent_io_tree *tree); 92 93 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 94 struct extent_state **cached); 95 96 static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end) 97 { 98 return lock_extent_bits(tree, start, end, NULL); 99 } 100 101 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end); 102 103 int __init extent_io_init(void); 104 void __cold extent_io_exit(void); 105 106 u64 count_range_bits(struct extent_io_tree *tree, 107 u64 *start, u64 search_end, 108 u64 max_bytes, unsigned bits, int contig); 109 110 void free_extent_state(struct extent_state *state); 111 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, 112 unsigned bits, int filled, 113 struct extent_state *cached_state); 114 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 115 unsigned bits, struct extent_changeset *changeset); 116 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 117 unsigned bits, int wake, int delete, 118 struct extent_state **cached); 119 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 120 unsigned bits, int wake, int delete, 121 struct extent_state **cached, gfp_t mask, 122 struct extent_changeset *changeset); 123 124 static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end) 125 { 126 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL); 127 } 128 129 static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start, 130 u64 end, struct extent_state **cached) 131 { 132 return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, 133 GFP_NOFS, NULL); 134 } 135 136 static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree, 137 u64 start, u64 end, struct extent_state **cached) 138 { 139 return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, 140 GFP_ATOMIC, NULL); 141 } 142 143 static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start, 144 u64 end, unsigned bits) 145 { 146 int wake = 0; 147 148 if (bits & EXTENT_LOCKED) 149 wake = 1; 150 151 return clear_extent_bit(tree, start, end, bits, wake, 0, NULL); 152 } 153 154 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 155 unsigned bits, struct extent_changeset *changeset); 156 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 157 unsigned bits, u64 *failed_start, 158 struct extent_state **cached_state, gfp_t mask); 159 int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end, 160 unsigned bits); 161 162 static inline int set_extent_bits(struct extent_io_tree *tree, u64 start, 163 u64 end, unsigned bits) 164 { 165 return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS); 166 } 167 168 static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, 169 u64 end, struct extent_state **cached_state) 170 { 171 return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, 172 cached_state, GFP_NOFS, NULL); 173 } 174 175 static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start, 176 u64 end, gfp_t mask) 177 { 178 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL, 179 NULL, mask); 180 } 181 182 static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start, 183 u64 end, struct extent_state **cached) 184 { 185 return clear_extent_bit(tree, start, end, 186 EXTENT_DIRTY | EXTENT_DELALLOC | 187 EXTENT_DO_ACCOUNTING, 0, 0, cached); 188 } 189 190 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 191 unsigned bits, unsigned clear_bits, 192 struct extent_state **cached_state); 193 194 static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start, 195 u64 end, unsigned int extra_bits, 196 struct extent_state **cached_state) 197 { 198 return set_extent_bit(tree, start, end, 199 EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits, 200 NULL, cached_state, GFP_NOFS); 201 } 202 203 static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start, 204 u64 end, struct extent_state **cached_state) 205 { 206 return set_extent_bit(tree, start, end, 207 EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG, 208 NULL, cached_state, GFP_NOFS); 209 } 210 211 static inline int set_extent_new(struct extent_io_tree *tree, u64 start, 212 u64 end) 213 { 214 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL, 215 GFP_NOFS); 216 } 217 218 static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start, 219 u64 end, struct extent_state **cached_state, gfp_t mask) 220 { 221 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL, 222 cached_state, mask); 223 } 224 225 int find_first_extent_bit(struct extent_io_tree *tree, u64 start, 226 u64 *start_ret, u64 *end_ret, unsigned bits, 227 struct extent_state **cached_state); 228 void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, 229 u64 *start_ret, u64 *end_ret, unsigned bits); 230 int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start, 231 u64 *start_ret, u64 *end_ret, unsigned bits); 232 int extent_invalidatepage(struct extent_io_tree *tree, 233 struct page *page, unsigned long offset); 234 bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start, 235 u64 *end, u64 max_bytes, 236 struct extent_state **cached_state); 237 238 /* This should be reworked in the future and put elsewhere. */ 239 struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start); 240 int set_state_failrec(struct extent_io_tree *tree, u64 start, 241 struct io_failure_record *failrec); 242 void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, 243 u64 end); 244 int free_io_failure(struct extent_io_tree *failure_tree, 245 struct extent_io_tree *io_tree, 246 struct io_failure_record *rec); 247 int clean_io_failure(struct btrfs_fs_info *fs_info, 248 struct extent_io_tree *failure_tree, 249 struct extent_io_tree *io_tree, u64 start, 250 struct page *page, u64 ino, unsigned int pg_offset); 251 252 #endif /* BTRFS_EXTENT_IO_TREE_H */ 253