xref: /openbmc/linux/fs/btrfs/btrfs_inode.h (revision 1d05334d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #ifndef BTRFS_INODE_H
7 #define BTRFS_INODE_H
8 
9 #include <linux/hash.h>
10 #include "extent_map.h"
11 #include "extent_io.h"
12 #include "ordered-data.h"
13 #include "delayed-inode.h"
14 
15 /*
16  * ordered_data_close is set by truncate when a file that used
17  * to have good data has been truncated to zero.  When it is set
18  * the btrfs file release call will add this inode to the
19  * ordered operations list so that we make sure to flush out any
20  * new data the application may have written before commit.
21  */
22 enum {
23 	BTRFS_INODE_ORDERED_DATA_CLOSE,
24 	BTRFS_INODE_DUMMY,
25 	BTRFS_INODE_IN_DEFRAG,
26 	BTRFS_INODE_HAS_ASYNC_EXTENT,
27 	BTRFS_INODE_NEEDS_FULL_SYNC,
28 	BTRFS_INODE_COPY_EVERYTHING,
29 	BTRFS_INODE_IN_DELALLOC_LIST,
30 	BTRFS_INODE_READDIO_NEED_LOCK,
31 	BTRFS_INODE_HAS_PROPS,
32 	BTRFS_INODE_SNAPSHOT_FLUSH,
33 };
34 
35 /* in memory btrfs inode */
36 struct btrfs_inode {
37 	/* which subvolume this inode belongs to */
38 	struct btrfs_root *root;
39 
40 	/* key used to find this inode on disk.  This is used by the code
41 	 * to read in roots of subvolumes
42 	 */
43 	struct btrfs_key location;
44 
45 	/*
46 	 * Lock for counters and all fields used to determine if the inode is in
47 	 * the log or not (last_trans, last_sub_trans, last_log_commit,
48 	 * logged_trans).
49 	 */
50 	spinlock_t lock;
51 
52 	/* the extent_tree has caches of all the extent mappings to disk */
53 	struct extent_map_tree extent_tree;
54 
55 	/* the io_tree does range state (DIRTY, LOCKED etc) */
56 	struct extent_io_tree io_tree;
57 
58 	/* special utility tree used to record which mirrors have already been
59 	 * tried when checksums fail for a given block
60 	 */
61 	struct extent_io_tree io_failure_tree;
62 
63 	/*
64 	 * Keep track of where the inode has extent items mapped in order to
65 	 * make sure the i_size adjustments are accurate
66 	 */
67 	struct extent_io_tree file_extent_tree;
68 
69 	/* held while logging the inode in tree-log.c */
70 	struct mutex log_mutex;
71 
72 	/* used to order data wrt metadata */
73 	struct btrfs_ordered_inode_tree ordered_tree;
74 
75 	/* list of all the delalloc inodes in the FS.  There are times we need
76 	 * to write all the delalloc pages to disk, and this list is used
77 	 * to walk them all.
78 	 */
79 	struct list_head delalloc_inodes;
80 
81 	/* node for the red-black tree that links inodes in subvolume root */
82 	struct rb_node rb_node;
83 
84 	unsigned long runtime_flags;
85 
86 	/* Keep track of who's O_SYNC/fsyncing currently */
87 	atomic_t sync_writers;
88 
89 	/* full 64 bit generation number, struct vfs_inode doesn't have a big
90 	 * enough field for this.
91 	 */
92 	u64 generation;
93 
94 	/*
95 	 * transid of the trans_handle that last modified this inode
96 	 */
97 	u64 last_trans;
98 
99 	/*
100 	 * transid that last logged this inode
101 	 */
102 	u64 logged_trans;
103 
104 	/*
105 	 * log transid when this inode was last modified
106 	 */
107 	int last_sub_trans;
108 
109 	/* a local copy of root's last_log_commit */
110 	int last_log_commit;
111 
112 	/* total number of bytes pending delalloc, used by stat to calc the
113 	 * real block usage of the file
114 	 */
115 	u64 delalloc_bytes;
116 
117 	/*
118 	 * Total number of bytes pending delalloc that fall within a file
119 	 * range that is either a hole or beyond EOF (and no prealloc extent
120 	 * exists in the range). This is always <= delalloc_bytes.
121 	 */
122 	u64 new_delalloc_bytes;
123 
124 	/*
125 	 * total number of bytes pending defrag, used by stat to check whether
126 	 * it needs COW.
127 	 */
128 	u64 defrag_bytes;
129 
130 	/*
131 	 * the size of the file stored in the metadata on disk.  data=ordered
132 	 * means the in-memory i_size might be larger than the size on disk
133 	 * because not all the blocks are written yet.
134 	 */
135 	u64 disk_i_size;
136 
137 	/*
138 	 * if this is a directory then index_cnt is the counter for the index
139 	 * number for new files that are created
140 	 */
141 	u64 index_cnt;
142 
143 	/* Cache the directory index number to speed the dir/file remove */
144 	u64 dir_index;
145 
146 	/* the fsync log has some corner cases that mean we have to check
147 	 * directories to see if any unlinks have been done before
148 	 * the directory was logged.  See tree-log.c for all the
149 	 * details
150 	 */
151 	u64 last_unlink_trans;
152 
153 	/*
154 	 * Number of bytes outstanding that are going to need csums.  This is
155 	 * used in ENOSPC accounting.
156 	 */
157 	u64 csum_bytes;
158 
159 	/* flags field from the on disk inode */
160 	u32 flags;
161 
162 	/*
163 	 * Counters to keep track of the number of extent item's we may use due
164 	 * to delalloc and such.  outstanding_extents is the number of extent
165 	 * items we think we'll end up using, and reserved_extents is the number
166 	 * of extent items we've reserved metadata for.
167 	 */
168 	unsigned outstanding_extents;
169 
170 	struct btrfs_block_rsv block_rsv;
171 
172 	/*
173 	 * Cached values of inode properties
174 	 */
175 	unsigned prop_compress;		/* per-file compression algorithm */
176 	/*
177 	 * Force compression on the file using the defrag ioctl, could be
178 	 * different from prop_compress and takes precedence if set
179 	 */
180 	unsigned defrag_compress;
181 
182 	struct btrfs_delayed_node *delayed_node;
183 
184 	/* File creation time. */
185 	struct timespec64 i_otime;
186 
187 	/* Hook into fs_info->delayed_iputs */
188 	struct list_head delayed_iput;
189 
190 	/*
191 	 * To avoid races between lockless (i_mutex not held) direct IO writes
192 	 * and concurrent fsync requests. Direct IO writes must acquire read
193 	 * access on this semaphore for creating an extent map and its
194 	 * corresponding ordered extent. The fast fsync path must acquire write
195 	 * access on this semaphore before it collects ordered extents and
196 	 * extent maps.
197 	 */
198 	struct rw_semaphore dio_sem;
199 
200 	struct inode vfs_inode;
201 };
202 
203 static inline struct btrfs_inode *BTRFS_I(const struct inode *inode)
204 {
205 	return container_of(inode, struct btrfs_inode, vfs_inode);
206 }
207 
208 static inline unsigned long btrfs_inode_hash(u64 objectid,
209 					     const struct btrfs_root *root)
210 {
211 	u64 h = objectid ^ (root->root_key.objectid * GOLDEN_RATIO_PRIME);
212 
213 #if BITS_PER_LONG == 32
214 	h = (h >> 32) ^ (h & 0xffffffff);
215 #endif
216 
217 	return (unsigned long)h;
218 }
219 
220 static inline void btrfs_insert_inode_hash(struct inode *inode)
221 {
222 	unsigned long h = btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root);
223 
224 	__insert_inode_hash(inode, h);
225 }
226 
227 static inline u64 btrfs_ino(const struct btrfs_inode *inode)
228 {
229 	u64 ino = inode->location.objectid;
230 
231 	/*
232 	 * !ino: btree_inode
233 	 * type == BTRFS_ROOT_ITEM_KEY: subvol dir
234 	 */
235 	if (!ino || inode->location.type == BTRFS_ROOT_ITEM_KEY)
236 		ino = inode->vfs_inode.i_ino;
237 	return ino;
238 }
239 
240 static inline void btrfs_i_size_write(struct btrfs_inode *inode, u64 size)
241 {
242 	i_size_write(&inode->vfs_inode, size);
243 	inode->disk_i_size = size;
244 }
245 
246 static inline bool btrfs_is_free_space_inode(struct btrfs_inode *inode)
247 {
248 	struct btrfs_root *root = inode->root;
249 
250 	if (root == root->fs_info->tree_root &&
251 	    btrfs_ino(inode) != BTRFS_BTREE_INODE_OBJECTID)
252 		return true;
253 	if (inode->location.objectid == BTRFS_FREE_INO_OBJECTID)
254 		return true;
255 	return false;
256 }
257 
258 static inline bool is_data_inode(struct inode *inode)
259 {
260 	return btrfs_ino(BTRFS_I(inode)) != BTRFS_BTREE_INODE_OBJECTID;
261 }
262 
263 static inline void btrfs_mod_outstanding_extents(struct btrfs_inode *inode,
264 						 int mod)
265 {
266 	lockdep_assert_held(&inode->lock);
267 	inode->outstanding_extents += mod;
268 	if (btrfs_is_free_space_inode(inode))
269 		return;
270 	trace_btrfs_inode_mod_outstanding_extents(inode->root, btrfs_ino(inode),
271 						  mod);
272 }
273 
274 static inline int btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation)
275 {
276 	int ret = 0;
277 
278 	spin_lock(&inode->lock);
279 	if (inode->logged_trans == generation &&
280 	    inode->last_sub_trans <= inode->last_log_commit &&
281 	    inode->last_sub_trans <= inode->root->last_log_commit) {
282 		/*
283 		 * After a ranged fsync we might have left some extent maps
284 		 * (that fall outside the fsync's range). So return false
285 		 * here if the list isn't empty, to make sure btrfs_log_inode()
286 		 * will be called and process those extent maps.
287 		 */
288 		smp_mb();
289 		if (list_empty(&inode->extent_tree.modified_extents))
290 			ret = 1;
291 	}
292 	spin_unlock(&inode->lock);
293 	return ret;
294 }
295 
296 #define BTRFS_DIO_ORIG_BIO_SUBMITTED	0x1
297 
298 struct btrfs_dio_private {
299 	struct inode *inode;
300 	unsigned long flags;
301 	u64 logical_offset;
302 	u64 disk_bytenr;
303 	u64 bytes;
304 	void *private;
305 
306 	/* number of bios pending for this dio */
307 	atomic_t pending_bios;
308 
309 	/* IO errors */
310 	int errors;
311 
312 	/* orig_bio is our btrfs_io_bio */
313 	struct bio *orig_bio;
314 
315 	/* dio_bio came from fs/direct-io.c */
316 	struct bio *dio_bio;
317 
318 	/*
319 	 * The original bio may be split to several sub-bios, this is
320 	 * done during endio of sub-bios
321 	 */
322 	blk_status_t (*subio_endio)(struct inode *, struct btrfs_io_bio *,
323 			blk_status_t);
324 };
325 
326 /*
327  * Disable DIO read nolock optimization, so new dio readers will be forced
328  * to grab i_mutex. It is used to avoid the endless truncate due to
329  * nonlocked dio read.
330  */
331 static inline void btrfs_inode_block_unlocked_dio(struct btrfs_inode *inode)
332 {
333 	set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags);
334 	smp_mb();
335 }
336 
337 static inline void btrfs_inode_resume_unlocked_dio(struct btrfs_inode *inode)
338 {
339 	smp_mb__before_atomic();
340 	clear_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags);
341 }
342 
343 /* Array of bytes with variable length, hexadecimal format 0x1234 */
344 #define CSUM_FMT				"0x%*phN"
345 #define CSUM_FMT_VALUE(size, bytes)		size, bytes
346 
347 static inline void btrfs_print_data_csum_error(struct btrfs_inode *inode,
348 		u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num)
349 {
350 	struct btrfs_root *root = inode->root;
351 	struct btrfs_super_block *sb = root->fs_info->super_copy;
352 	const u16 csum_size = btrfs_super_csum_size(sb);
353 
354 	/* Output minus objectid, which is more meaningful */
355 	if (root->root_key.objectid >= BTRFS_LAST_FREE_OBJECTID)
356 		btrfs_warn_rl(root->fs_info,
357 "csum failed root %lld ino %lld off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
358 			root->root_key.objectid, btrfs_ino(inode),
359 			logical_start,
360 			CSUM_FMT_VALUE(csum_size, csum),
361 			CSUM_FMT_VALUE(csum_size, csum_expected),
362 			mirror_num);
363 	else
364 		btrfs_warn_rl(root->fs_info,
365 "csum failed root %llu ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
366 			root->root_key.objectid, btrfs_ino(inode),
367 			logical_start,
368 			CSUM_FMT_VALUE(csum_size, csum),
369 			CSUM_FMT_VALUE(csum_size, csum_expected),
370 			mirror_num);
371 }
372 
373 #endif
374