xref: /openbmc/linux/fs/btrfs/inode.c (revision a2cab953)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <crypto/hash.h>
7 #include <linux/kernel.h>
8 #include <linux/bio.h>
9 #include <linux/blk-cgroup.h>
10 #include <linux/file.h>
11 #include <linux/fs.h>
12 #include <linux/pagemap.h>
13 #include <linux/highmem.h>
14 #include <linux/time.h>
15 #include <linux/init.h>
16 #include <linux/string.h>
17 #include <linux/backing-dev.h>
18 #include <linux/writeback.h>
19 #include <linux/compat.h>
20 #include <linux/xattr.h>
21 #include <linux/posix_acl.h>
22 #include <linux/falloc.h>
23 #include <linux/slab.h>
24 #include <linux/ratelimit.h>
25 #include <linux/btrfs.h>
26 #include <linux/blkdev.h>
27 #include <linux/posix_acl_xattr.h>
28 #include <linux/uio.h>
29 #include <linux/magic.h>
30 #include <linux/iversion.h>
31 #include <linux/swap.h>
32 #include <linux/migrate.h>
33 #include <linux/sched/mm.h>
34 #include <linux/iomap.h>
35 #include <asm/unaligned.h>
36 #include <linux/fsverity.h>
37 #include "misc.h"
38 #include "ctree.h"
39 #include "disk-io.h"
40 #include "transaction.h"
41 #include "btrfs_inode.h"
42 #include "print-tree.h"
43 #include "ordered-data.h"
44 #include "xattr.h"
45 #include "tree-log.h"
46 #include "volumes.h"
47 #include "compression.h"
48 #include "locking.h"
49 #include "free-space-cache.h"
50 #include "props.h"
51 #include "qgroup.h"
52 #include "delalloc-space.h"
53 #include "block-group.h"
54 #include "space-info.h"
55 #include "zoned.h"
56 #include "subpage.h"
57 #include "inode-item.h"
58 
59 struct btrfs_iget_args {
60 	u64 ino;
61 	struct btrfs_root *root;
62 };
63 
64 struct btrfs_dio_data {
65 	ssize_t submitted;
66 	struct extent_changeset *data_reserved;
67 	bool data_space_reserved;
68 	bool nocow_done;
69 };
70 
71 struct btrfs_dio_private {
72 	struct inode *inode;
73 
74 	/*
75 	 * Since DIO can use anonymous page, we cannot use page_offset() to
76 	 * grab the file offset, thus need a dedicated member for file offset.
77 	 */
78 	u64 file_offset;
79 	/* Used for bio::bi_size */
80 	u32 bytes;
81 
82 	/*
83 	 * References to this structure. There is one reference per in-flight
84 	 * bio plus one while we're still setting up.
85 	 */
86 	refcount_t refs;
87 
88 	/* Array of checksums */
89 	u8 *csums;
90 
91 	/* This must be last */
92 	struct bio bio;
93 };
94 
95 static struct bio_set btrfs_dio_bioset;
96 
97 struct btrfs_rename_ctx {
98 	/* Output field. Stores the index number of the old directory entry. */
99 	u64 index;
100 };
101 
102 static const struct inode_operations btrfs_dir_inode_operations;
103 static const struct inode_operations btrfs_symlink_inode_operations;
104 static const struct inode_operations btrfs_special_inode_operations;
105 static const struct inode_operations btrfs_file_inode_operations;
106 static const struct address_space_operations btrfs_aops;
107 static const struct file_operations btrfs_dir_file_operations;
108 
109 static struct kmem_cache *btrfs_inode_cachep;
110 struct kmem_cache *btrfs_trans_handle_cachep;
111 struct kmem_cache *btrfs_path_cachep;
112 struct kmem_cache *btrfs_free_space_cachep;
113 struct kmem_cache *btrfs_free_space_bitmap_cachep;
114 
115 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
116 static int btrfs_truncate(struct inode *inode, bool skip_writeback);
117 static noinline int cow_file_range(struct btrfs_inode *inode,
118 				   struct page *locked_page,
119 				   u64 start, u64 end, int *page_started,
120 				   unsigned long *nr_written, int unlock,
121 				   u64 *done_offset);
122 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
123 				       u64 len, u64 orig_start, u64 block_start,
124 				       u64 block_len, u64 orig_block_len,
125 				       u64 ram_bytes, int compress_type,
126 				       int type);
127 
128 /*
129  * btrfs_inode_lock - lock inode i_rwsem based on arguments passed
130  *
131  * ilock_flags can have the following bit set:
132  *
133  * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
134  * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
135  *		     return -EAGAIN
136  * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
137  */
138 int btrfs_inode_lock(struct inode *inode, unsigned int ilock_flags)
139 {
140 	if (ilock_flags & BTRFS_ILOCK_SHARED) {
141 		if (ilock_flags & BTRFS_ILOCK_TRY) {
142 			if (!inode_trylock_shared(inode))
143 				return -EAGAIN;
144 			else
145 				return 0;
146 		}
147 		inode_lock_shared(inode);
148 	} else {
149 		if (ilock_flags & BTRFS_ILOCK_TRY) {
150 			if (!inode_trylock(inode))
151 				return -EAGAIN;
152 			else
153 				return 0;
154 		}
155 		inode_lock(inode);
156 	}
157 	if (ilock_flags & BTRFS_ILOCK_MMAP)
158 		down_write(&BTRFS_I(inode)->i_mmap_lock);
159 	return 0;
160 }
161 
162 /*
163  * btrfs_inode_unlock - unock inode i_rwsem
164  *
165  * ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
166  * to decide whether the lock acquired is shared or exclusive.
167  */
168 void btrfs_inode_unlock(struct inode *inode, unsigned int ilock_flags)
169 {
170 	if (ilock_flags & BTRFS_ILOCK_MMAP)
171 		up_write(&BTRFS_I(inode)->i_mmap_lock);
172 	if (ilock_flags & BTRFS_ILOCK_SHARED)
173 		inode_unlock_shared(inode);
174 	else
175 		inode_unlock(inode);
176 }
177 
178 /*
179  * Cleanup all submitted ordered extents in specified range to handle errors
180  * from the btrfs_run_delalloc_range() callback.
181  *
182  * NOTE: caller must ensure that when an error happens, it can not call
183  * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
184  * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
185  * to be released, which we want to happen only when finishing the ordered
186  * extent (btrfs_finish_ordered_io()).
187  */
188 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
189 						 struct page *locked_page,
190 						 u64 offset, u64 bytes)
191 {
192 	unsigned long index = offset >> PAGE_SHIFT;
193 	unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
194 	u64 page_start, page_end;
195 	struct page *page;
196 
197 	if (locked_page) {
198 		page_start = page_offset(locked_page);
199 		page_end = page_start + PAGE_SIZE - 1;
200 	}
201 
202 	while (index <= end_index) {
203 		/*
204 		 * For locked page, we will call end_extent_writepage() on it
205 		 * in run_delalloc_range() for the error handling.  That
206 		 * end_extent_writepage() function will call
207 		 * btrfs_mark_ordered_io_finished() to clear page Ordered and
208 		 * run the ordered extent accounting.
209 		 *
210 		 * Here we can't just clear the Ordered bit, or
211 		 * btrfs_mark_ordered_io_finished() would skip the accounting
212 		 * for the page range, and the ordered extent will never finish.
213 		 */
214 		if (locked_page && index == (page_start >> PAGE_SHIFT)) {
215 			index++;
216 			continue;
217 		}
218 		page = find_get_page(inode->vfs_inode.i_mapping, index);
219 		index++;
220 		if (!page)
221 			continue;
222 
223 		/*
224 		 * Here we just clear all Ordered bits for every page in the
225 		 * range, then btrfs_mark_ordered_io_finished() will handle
226 		 * the ordered extent accounting for the range.
227 		 */
228 		btrfs_page_clamp_clear_ordered(inode->root->fs_info, page,
229 					       offset, bytes);
230 		put_page(page);
231 	}
232 
233 	if (locked_page) {
234 		/* The locked page covers the full range, nothing needs to be done */
235 		if (bytes + offset <= page_start + PAGE_SIZE)
236 			return;
237 		/*
238 		 * In case this page belongs to the delalloc range being
239 		 * instantiated then skip it, since the first page of a range is
240 		 * going to be properly cleaned up by the caller of
241 		 * run_delalloc_range
242 		 */
243 		if (page_start >= offset && page_end <= (offset + bytes - 1)) {
244 			bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE;
245 			offset = page_offset(locked_page) + PAGE_SIZE;
246 		}
247 	}
248 
249 	return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false);
250 }
251 
252 static int btrfs_dirty_inode(struct inode *inode);
253 
254 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
255 				     struct btrfs_new_inode_args *args)
256 {
257 	int err;
258 
259 	if (args->default_acl) {
260 		err = __btrfs_set_acl(trans, args->inode, args->default_acl,
261 				      ACL_TYPE_DEFAULT);
262 		if (err)
263 			return err;
264 	}
265 	if (args->acl) {
266 		err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS);
267 		if (err)
268 			return err;
269 	}
270 	if (!args->default_acl && !args->acl)
271 		cache_no_acl(args->inode);
272 	return btrfs_xattr_security_init(trans, args->inode, args->dir,
273 					 &args->dentry->d_name);
274 }
275 
276 /*
277  * this does all the hard work for inserting an inline extent into
278  * the btree.  The caller should have done a btrfs_drop_extents so that
279  * no overlapping inline items exist in the btree
280  */
281 static int insert_inline_extent(struct btrfs_trans_handle *trans,
282 				struct btrfs_path *path,
283 				struct btrfs_inode *inode, bool extent_inserted,
284 				size_t size, size_t compressed_size,
285 				int compress_type,
286 				struct page **compressed_pages,
287 				bool update_i_size)
288 {
289 	struct btrfs_root *root = inode->root;
290 	struct extent_buffer *leaf;
291 	struct page *page = NULL;
292 	char *kaddr;
293 	unsigned long ptr;
294 	struct btrfs_file_extent_item *ei;
295 	int ret;
296 	size_t cur_size = size;
297 	u64 i_size;
298 
299 	ASSERT((compressed_size > 0 && compressed_pages) ||
300 	       (compressed_size == 0 && !compressed_pages));
301 
302 	if (compressed_size && compressed_pages)
303 		cur_size = compressed_size;
304 
305 	if (!extent_inserted) {
306 		struct btrfs_key key;
307 		size_t datasize;
308 
309 		key.objectid = btrfs_ino(inode);
310 		key.offset = 0;
311 		key.type = BTRFS_EXTENT_DATA_KEY;
312 
313 		datasize = btrfs_file_extent_calc_inline_size(cur_size);
314 		ret = btrfs_insert_empty_item(trans, root, path, &key,
315 					      datasize);
316 		if (ret)
317 			goto fail;
318 	}
319 	leaf = path->nodes[0];
320 	ei = btrfs_item_ptr(leaf, path->slots[0],
321 			    struct btrfs_file_extent_item);
322 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
323 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
324 	btrfs_set_file_extent_encryption(leaf, ei, 0);
325 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
326 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
327 	ptr = btrfs_file_extent_inline_start(ei);
328 
329 	if (compress_type != BTRFS_COMPRESS_NONE) {
330 		struct page *cpage;
331 		int i = 0;
332 		while (compressed_size > 0) {
333 			cpage = compressed_pages[i];
334 			cur_size = min_t(unsigned long, compressed_size,
335 				       PAGE_SIZE);
336 
337 			kaddr = kmap_local_page(cpage);
338 			write_extent_buffer(leaf, kaddr, ptr, cur_size);
339 			kunmap_local(kaddr);
340 
341 			i++;
342 			ptr += cur_size;
343 			compressed_size -= cur_size;
344 		}
345 		btrfs_set_file_extent_compression(leaf, ei,
346 						  compress_type);
347 	} else {
348 		page = find_get_page(inode->vfs_inode.i_mapping, 0);
349 		btrfs_set_file_extent_compression(leaf, ei, 0);
350 		kaddr = kmap_local_page(page);
351 		write_extent_buffer(leaf, kaddr, ptr, size);
352 		kunmap_local(kaddr);
353 		put_page(page);
354 	}
355 	btrfs_mark_buffer_dirty(leaf);
356 	btrfs_release_path(path);
357 
358 	/*
359 	 * We align size to sectorsize for inline extents just for simplicity
360 	 * sake.
361 	 */
362 	ret = btrfs_inode_set_file_extent_range(inode, 0,
363 					ALIGN(size, root->fs_info->sectorsize));
364 	if (ret)
365 		goto fail;
366 
367 	/*
368 	 * We're an inline extent, so nobody can extend the file past i_size
369 	 * without locking a page we already have locked.
370 	 *
371 	 * We must do any i_size and inode updates before we unlock the pages.
372 	 * Otherwise we could end up racing with unlink.
373 	 */
374 	i_size = i_size_read(&inode->vfs_inode);
375 	if (update_i_size && size > i_size) {
376 		i_size_write(&inode->vfs_inode, size);
377 		i_size = size;
378 	}
379 	inode->disk_i_size = i_size;
380 
381 fail:
382 	return ret;
383 }
384 
385 
386 /*
387  * conditionally insert an inline extent into the file.  This
388  * does the checks required to make sure the data is small enough
389  * to fit as an inline extent.
390  */
391 static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 size,
392 					  size_t compressed_size,
393 					  int compress_type,
394 					  struct page **compressed_pages,
395 					  bool update_i_size)
396 {
397 	struct btrfs_drop_extents_args drop_args = { 0 };
398 	struct btrfs_root *root = inode->root;
399 	struct btrfs_fs_info *fs_info = root->fs_info;
400 	struct btrfs_trans_handle *trans;
401 	u64 data_len = (compressed_size ?: size);
402 	int ret;
403 	struct btrfs_path *path;
404 
405 	/*
406 	 * We can create an inline extent if it ends at or beyond the current
407 	 * i_size, is no larger than a sector (decompressed), and the (possibly
408 	 * compressed) data fits in a leaf and the configured maximum inline
409 	 * size.
410 	 */
411 	if (size < i_size_read(&inode->vfs_inode) ||
412 	    size > fs_info->sectorsize ||
413 	    data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
414 	    data_len > fs_info->max_inline)
415 		return 1;
416 
417 	path = btrfs_alloc_path();
418 	if (!path)
419 		return -ENOMEM;
420 
421 	trans = btrfs_join_transaction(root);
422 	if (IS_ERR(trans)) {
423 		btrfs_free_path(path);
424 		return PTR_ERR(trans);
425 	}
426 	trans->block_rsv = &inode->block_rsv;
427 
428 	drop_args.path = path;
429 	drop_args.start = 0;
430 	drop_args.end = fs_info->sectorsize;
431 	drop_args.drop_cache = true;
432 	drop_args.replace_extent = true;
433 	drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len);
434 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
435 	if (ret) {
436 		btrfs_abort_transaction(trans, ret);
437 		goto out;
438 	}
439 
440 	ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted,
441 				   size, compressed_size, compress_type,
442 				   compressed_pages, update_i_size);
443 	if (ret && ret != -ENOSPC) {
444 		btrfs_abort_transaction(trans, ret);
445 		goto out;
446 	} else if (ret == -ENOSPC) {
447 		ret = 1;
448 		goto out;
449 	}
450 
451 	btrfs_update_inode_bytes(inode, size, drop_args.bytes_found);
452 	ret = btrfs_update_inode(trans, root, inode);
453 	if (ret && ret != -ENOSPC) {
454 		btrfs_abort_transaction(trans, ret);
455 		goto out;
456 	} else if (ret == -ENOSPC) {
457 		ret = 1;
458 		goto out;
459 	}
460 
461 	btrfs_set_inode_full_sync(inode);
462 out:
463 	/*
464 	 * Don't forget to free the reserved space, as for inlined extent
465 	 * it won't count as data extent, free them directly here.
466 	 * And at reserve time, it's always aligned to page size, so
467 	 * just free one page here.
468 	 */
469 	btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE);
470 	btrfs_free_path(path);
471 	btrfs_end_transaction(trans);
472 	return ret;
473 }
474 
475 struct async_extent {
476 	u64 start;
477 	u64 ram_size;
478 	u64 compressed_size;
479 	struct page **pages;
480 	unsigned long nr_pages;
481 	int compress_type;
482 	struct list_head list;
483 };
484 
485 struct async_chunk {
486 	struct inode *inode;
487 	struct page *locked_page;
488 	u64 start;
489 	u64 end;
490 	blk_opf_t write_flags;
491 	struct list_head extents;
492 	struct cgroup_subsys_state *blkcg_css;
493 	struct btrfs_work work;
494 	struct async_cow *async_cow;
495 };
496 
497 struct async_cow {
498 	atomic_t num_chunks;
499 	struct async_chunk chunks[];
500 };
501 
502 static noinline int add_async_extent(struct async_chunk *cow,
503 				     u64 start, u64 ram_size,
504 				     u64 compressed_size,
505 				     struct page **pages,
506 				     unsigned long nr_pages,
507 				     int compress_type)
508 {
509 	struct async_extent *async_extent;
510 
511 	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
512 	BUG_ON(!async_extent); /* -ENOMEM */
513 	async_extent->start = start;
514 	async_extent->ram_size = ram_size;
515 	async_extent->compressed_size = compressed_size;
516 	async_extent->pages = pages;
517 	async_extent->nr_pages = nr_pages;
518 	async_extent->compress_type = compress_type;
519 	list_add_tail(&async_extent->list, &cow->extents);
520 	return 0;
521 }
522 
523 /*
524  * Check if the inode needs to be submitted to compression, based on mount
525  * options, defragmentation, properties or heuristics.
526  */
527 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
528 				      u64 end)
529 {
530 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
531 
532 	if (!btrfs_inode_can_compress(inode)) {
533 		WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
534 			KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
535 			btrfs_ino(inode));
536 		return 0;
537 	}
538 	/*
539 	 * Special check for subpage.
540 	 *
541 	 * We lock the full page then run each delalloc range in the page, thus
542 	 * for the following case, we will hit some subpage specific corner case:
543 	 *
544 	 * 0		32K		64K
545 	 * |	|///////|	|///////|
546 	 *		\- A		\- B
547 	 *
548 	 * In above case, both range A and range B will try to unlock the full
549 	 * page [0, 64K), causing the one finished later will have page
550 	 * unlocked already, triggering various page lock requirement BUG_ON()s.
551 	 *
552 	 * So here we add an artificial limit that subpage compression can only
553 	 * if the range is fully page aligned.
554 	 *
555 	 * In theory we only need to ensure the first page is fully covered, but
556 	 * the tailing partial page will be locked until the full compression
557 	 * finishes, delaying the write of other range.
558 	 *
559 	 * TODO: Make btrfs_run_delalloc_range() to lock all delalloc range
560 	 * first to prevent any submitted async extent to unlock the full page.
561 	 * By this, we can ensure for subpage case that only the last async_cow
562 	 * will unlock the full page.
563 	 */
564 	if (fs_info->sectorsize < PAGE_SIZE) {
565 		if (!PAGE_ALIGNED(start) ||
566 		    !PAGE_ALIGNED(end + 1))
567 			return 0;
568 	}
569 
570 	/* force compress */
571 	if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
572 		return 1;
573 	/* defrag ioctl */
574 	if (inode->defrag_compress)
575 		return 1;
576 	/* bad compression ratios */
577 	if (inode->flags & BTRFS_INODE_NOCOMPRESS)
578 		return 0;
579 	if (btrfs_test_opt(fs_info, COMPRESS) ||
580 	    inode->flags & BTRFS_INODE_COMPRESS ||
581 	    inode->prop_compress)
582 		return btrfs_compress_heuristic(&inode->vfs_inode, start, end);
583 	return 0;
584 }
585 
586 static inline void inode_should_defrag(struct btrfs_inode *inode,
587 		u64 start, u64 end, u64 num_bytes, u32 small_write)
588 {
589 	/* If this is a small write inside eof, kick off a defrag */
590 	if (num_bytes < small_write &&
591 	    (start > 0 || end + 1 < inode->disk_i_size))
592 		btrfs_add_inode_defrag(NULL, inode, small_write);
593 }
594 
595 /*
596  * we create compressed extents in two phases.  The first
597  * phase compresses a range of pages that have already been
598  * locked (both pages and state bits are locked).
599  *
600  * This is done inside an ordered work queue, and the compression
601  * is spread across many cpus.  The actual IO submission is step
602  * two, and the ordered work queue takes care of making sure that
603  * happens in the same order things were put onto the queue by
604  * writepages and friends.
605  *
606  * If this code finds it can't get good compression, it puts an
607  * entry onto the work queue to write the uncompressed bytes.  This
608  * makes sure that both compressed inodes and uncompressed inodes
609  * are written in the same order that the flusher thread sent them
610  * down.
611  */
612 static noinline int compress_file_range(struct async_chunk *async_chunk)
613 {
614 	struct inode *inode = async_chunk->inode;
615 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
616 	u64 blocksize = fs_info->sectorsize;
617 	u64 start = async_chunk->start;
618 	u64 end = async_chunk->end;
619 	u64 actual_end;
620 	u64 i_size;
621 	int ret = 0;
622 	struct page **pages = NULL;
623 	unsigned long nr_pages;
624 	unsigned long total_compressed = 0;
625 	unsigned long total_in = 0;
626 	int i;
627 	int will_compress;
628 	int compress_type = fs_info->compress_type;
629 	int compressed_extents = 0;
630 	int redirty = 0;
631 
632 	inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1,
633 			SZ_16K);
634 
635 	/*
636 	 * We need to save i_size before now because it could change in between
637 	 * us evaluating the size and assigning it.  This is because we lock and
638 	 * unlock the page in truncate and fallocate, and then modify the i_size
639 	 * later on.
640 	 *
641 	 * The barriers are to emulate READ_ONCE, remove that once i_size_read
642 	 * does that for us.
643 	 */
644 	barrier();
645 	i_size = i_size_read(inode);
646 	barrier();
647 	actual_end = min_t(u64, i_size, end + 1);
648 again:
649 	will_compress = 0;
650 	nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
651 	nr_pages = min_t(unsigned long, nr_pages,
652 			BTRFS_MAX_COMPRESSED / PAGE_SIZE);
653 
654 	/*
655 	 * we don't want to send crud past the end of i_size through
656 	 * compression, that's just a waste of CPU time.  So, if the
657 	 * end of the file is before the start of our current
658 	 * requested range of bytes, we bail out to the uncompressed
659 	 * cleanup code that can deal with all of this.
660 	 *
661 	 * It isn't really the fastest way to fix things, but this is a
662 	 * very uncommon corner.
663 	 */
664 	if (actual_end <= start)
665 		goto cleanup_and_bail_uncompressed;
666 
667 	total_compressed = actual_end - start;
668 
669 	/*
670 	 * Skip compression for a small file range(<=blocksize) that
671 	 * isn't an inline extent, since it doesn't save disk space at all.
672 	 */
673 	if (total_compressed <= blocksize &&
674 	   (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
675 		goto cleanup_and_bail_uncompressed;
676 
677 	/*
678 	 * For subpage case, we require full page alignment for the sector
679 	 * aligned range.
680 	 * Thus we must also check against @actual_end, not just @end.
681 	 */
682 	if (blocksize < PAGE_SIZE) {
683 		if (!PAGE_ALIGNED(start) ||
684 		    !PAGE_ALIGNED(round_up(actual_end, blocksize)))
685 			goto cleanup_and_bail_uncompressed;
686 	}
687 
688 	total_compressed = min_t(unsigned long, total_compressed,
689 			BTRFS_MAX_UNCOMPRESSED);
690 	total_in = 0;
691 	ret = 0;
692 
693 	/*
694 	 * we do compression for mount -o compress and when the
695 	 * inode has not been flagged as nocompress.  This flag can
696 	 * change at any time if we discover bad compression ratios.
697 	 */
698 	if (inode_need_compress(BTRFS_I(inode), start, end)) {
699 		WARN_ON(pages);
700 		pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
701 		if (!pages) {
702 			/* just bail out to the uncompressed code */
703 			nr_pages = 0;
704 			goto cont;
705 		}
706 
707 		if (BTRFS_I(inode)->defrag_compress)
708 			compress_type = BTRFS_I(inode)->defrag_compress;
709 		else if (BTRFS_I(inode)->prop_compress)
710 			compress_type = BTRFS_I(inode)->prop_compress;
711 
712 		/*
713 		 * we need to call clear_page_dirty_for_io on each
714 		 * page in the range.  Otherwise applications with the file
715 		 * mmap'd can wander in and change the page contents while
716 		 * we are compressing them.
717 		 *
718 		 * If the compression fails for any reason, we set the pages
719 		 * dirty again later on.
720 		 *
721 		 * Note that the remaining part is redirtied, the start pointer
722 		 * has moved, the end is the original one.
723 		 */
724 		if (!redirty) {
725 			extent_range_clear_dirty_for_io(inode, start, end);
726 			redirty = 1;
727 		}
728 
729 		/* Compression level is applied here and only here */
730 		ret = btrfs_compress_pages(
731 			compress_type | (fs_info->compress_level << 4),
732 					   inode->i_mapping, start,
733 					   pages,
734 					   &nr_pages,
735 					   &total_in,
736 					   &total_compressed);
737 
738 		if (!ret) {
739 			unsigned long offset = offset_in_page(total_compressed);
740 			struct page *page = pages[nr_pages - 1];
741 
742 			/* zero the tail end of the last page, we might be
743 			 * sending it down to disk
744 			 */
745 			if (offset)
746 				memzero_page(page, offset, PAGE_SIZE - offset);
747 			will_compress = 1;
748 		}
749 	}
750 cont:
751 	/*
752 	 * Check cow_file_range() for why we don't even try to create inline
753 	 * extent for subpage case.
754 	 */
755 	if (start == 0 && fs_info->sectorsize == PAGE_SIZE) {
756 		/* lets try to make an inline extent */
757 		if (ret || total_in < actual_end) {
758 			/* we didn't compress the entire range, try
759 			 * to make an uncompressed inline extent.
760 			 */
761 			ret = cow_file_range_inline(BTRFS_I(inode), actual_end,
762 						    0, BTRFS_COMPRESS_NONE,
763 						    NULL, false);
764 		} else {
765 			/* try making a compressed inline extent */
766 			ret = cow_file_range_inline(BTRFS_I(inode), actual_end,
767 						    total_compressed,
768 						    compress_type, pages,
769 						    false);
770 		}
771 		if (ret <= 0) {
772 			unsigned long clear_flags = EXTENT_DELALLOC |
773 				EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
774 				EXTENT_DO_ACCOUNTING;
775 			unsigned long page_error_op;
776 
777 			page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
778 
779 			/*
780 			 * inline extent creation worked or returned error,
781 			 * we don't need to create any more async work items.
782 			 * Unlock and free up our temp pages.
783 			 *
784 			 * We use DO_ACCOUNTING here because we need the
785 			 * delalloc_release_metadata to be done _after_ we drop
786 			 * our outstanding extent for clearing delalloc for this
787 			 * range.
788 			 */
789 			extent_clear_unlock_delalloc(BTRFS_I(inode), start, end,
790 						     NULL,
791 						     clear_flags,
792 						     PAGE_UNLOCK |
793 						     PAGE_START_WRITEBACK |
794 						     page_error_op |
795 						     PAGE_END_WRITEBACK);
796 
797 			/*
798 			 * Ensure we only free the compressed pages if we have
799 			 * them allocated, as we can still reach here with
800 			 * inode_need_compress() == false.
801 			 */
802 			if (pages) {
803 				for (i = 0; i < nr_pages; i++) {
804 					WARN_ON(pages[i]->mapping);
805 					put_page(pages[i]);
806 				}
807 				kfree(pages);
808 			}
809 			return 0;
810 		}
811 	}
812 
813 	if (will_compress) {
814 		/*
815 		 * we aren't doing an inline extent round the compressed size
816 		 * up to a block size boundary so the allocator does sane
817 		 * things
818 		 */
819 		total_compressed = ALIGN(total_compressed, blocksize);
820 
821 		/*
822 		 * one last check to make sure the compression is really a
823 		 * win, compare the page count read with the blocks on disk,
824 		 * compression must free at least one sector size
825 		 */
826 		total_in = round_up(total_in, fs_info->sectorsize);
827 		if (total_compressed + blocksize <= total_in) {
828 			compressed_extents++;
829 
830 			/*
831 			 * The async work queues will take care of doing actual
832 			 * allocation on disk for these compressed pages, and
833 			 * will submit them to the elevator.
834 			 */
835 			add_async_extent(async_chunk, start, total_in,
836 					total_compressed, pages, nr_pages,
837 					compress_type);
838 
839 			if (start + total_in < end) {
840 				start += total_in;
841 				pages = NULL;
842 				cond_resched();
843 				goto again;
844 			}
845 			return compressed_extents;
846 		}
847 	}
848 	if (pages) {
849 		/*
850 		 * the compression code ran but failed to make things smaller,
851 		 * free any pages it allocated and our page pointer array
852 		 */
853 		for (i = 0; i < nr_pages; i++) {
854 			WARN_ON(pages[i]->mapping);
855 			put_page(pages[i]);
856 		}
857 		kfree(pages);
858 		pages = NULL;
859 		total_compressed = 0;
860 		nr_pages = 0;
861 
862 		/* flag the file so we don't compress in the future */
863 		if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) &&
864 		    !(BTRFS_I(inode)->prop_compress)) {
865 			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
866 		}
867 	}
868 cleanup_and_bail_uncompressed:
869 	/*
870 	 * No compression, but we still need to write the pages in the file
871 	 * we've been given so far.  redirty the locked page if it corresponds
872 	 * to our extent and set things up for the async work queue to run
873 	 * cow_file_range to do the normal delalloc dance.
874 	 */
875 	if (async_chunk->locked_page &&
876 	    (page_offset(async_chunk->locked_page) >= start &&
877 	     page_offset(async_chunk->locked_page)) <= end) {
878 		__set_page_dirty_nobuffers(async_chunk->locked_page);
879 		/* unlocked later on in the async handlers */
880 	}
881 
882 	if (redirty)
883 		extent_range_redirty_for_io(inode, start, end);
884 	add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
885 			 BTRFS_COMPRESS_NONE);
886 	compressed_extents++;
887 
888 	return compressed_extents;
889 }
890 
891 static void free_async_extent_pages(struct async_extent *async_extent)
892 {
893 	int i;
894 
895 	if (!async_extent->pages)
896 		return;
897 
898 	for (i = 0; i < async_extent->nr_pages; i++) {
899 		WARN_ON(async_extent->pages[i]->mapping);
900 		put_page(async_extent->pages[i]);
901 	}
902 	kfree(async_extent->pages);
903 	async_extent->nr_pages = 0;
904 	async_extent->pages = NULL;
905 }
906 
907 static int submit_uncompressed_range(struct btrfs_inode *inode,
908 				     struct async_extent *async_extent,
909 				     struct page *locked_page)
910 {
911 	u64 start = async_extent->start;
912 	u64 end = async_extent->start + async_extent->ram_size - 1;
913 	unsigned long nr_written = 0;
914 	int page_started = 0;
915 	int ret;
916 
917 	/*
918 	 * Call cow_file_range() to run the delalloc range directly, since we
919 	 * won't go to NOCOW or async path again.
920 	 *
921 	 * Also we call cow_file_range() with @unlock_page == 0, so that we
922 	 * can directly submit them without interruption.
923 	 */
924 	ret = cow_file_range(inode, locked_page, start, end, &page_started,
925 			     &nr_written, 0, NULL);
926 	/* Inline extent inserted, page gets unlocked and everything is done */
927 	if (page_started) {
928 		ret = 0;
929 		goto out;
930 	}
931 	if (ret < 0) {
932 		btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1);
933 		if (locked_page) {
934 			const u64 page_start = page_offset(locked_page);
935 			const u64 page_end = page_start + PAGE_SIZE - 1;
936 
937 			btrfs_page_set_error(inode->root->fs_info, locked_page,
938 					     page_start, PAGE_SIZE);
939 			set_page_writeback(locked_page);
940 			end_page_writeback(locked_page);
941 			end_extent_writepage(locked_page, ret, page_start, page_end);
942 			unlock_page(locked_page);
943 		}
944 		goto out;
945 	}
946 
947 	ret = extent_write_locked_range(&inode->vfs_inode, start, end);
948 	/* All pages will be unlocked, including @locked_page */
949 out:
950 	kfree(async_extent);
951 	return ret;
952 }
953 
954 static int submit_one_async_extent(struct btrfs_inode *inode,
955 				   struct async_chunk *async_chunk,
956 				   struct async_extent *async_extent,
957 				   u64 *alloc_hint)
958 {
959 	struct extent_io_tree *io_tree = &inode->io_tree;
960 	struct btrfs_root *root = inode->root;
961 	struct btrfs_fs_info *fs_info = root->fs_info;
962 	struct btrfs_key ins;
963 	struct page *locked_page = NULL;
964 	struct extent_map *em;
965 	int ret = 0;
966 	u64 start = async_extent->start;
967 	u64 end = async_extent->start + async_extent->ram_size - 1;
968 
969 	/*
970 	 * If async_chunk->locked_page is in the async_extent range, we need to
971 	 * handle it.
972 	 */
973 	if (async_chunk->locked_page) {
974 		u64 locked_page_start = page_offset(async_chunk->locked_page);
975 		u64 locked_page_end = locked_page_start + PAGE_SIZE - 1;
976 
977 		if (!(start >= locked_page_end || end <= locked_page_start))
978 			locked_page = async_chunk->locked_page;
979 	}
980 	lock_extent(io_tree, start, end, NULL);
981 
982 	/* We have fall back to uncompressed write */
983 	if (!async_extent->pages)
984 		return submit_uncompressed_range(inode, async_extent, locked_page);
985 
986 	ret = btrfs_reserve_extent(root, async_extent->ram_size,
987 				   async_extent->compressed_size,
988 				   async_extent->compressed_size,
989 				   0, *alloc_hint, &ins, 1, 1);
990 	if (ret) {
991 		free_async_extent_pages(async_extent);
992 		/*
993 		 * Here we used to try again by going back to non-compressed
994 		 * path for ENOSPC.  But we can't reserve space even for
995 		 * compressed size, how could it work for uncompressed size
996 		 * which requires larger size?  So here we directly go error
997 		 * path.
998 		 */
999 		goto out_free;
1000 	}
1001 
1002 	/* Here we're doing allocation and writeback of the compressed pages */
1003 	em = create_io_em(inode, start,
1004 			  async_extent->ram_size,	/* len */
1005 			  start,			/* orig_start */
1006 			  ins.objectid,			/* block_start */
1007 			  ins.offset,			/* block_len */
1008 			  ins.offset,			/* orig_block_len */
1009 			  async_extent->ram_size,	/* ram_bytes */
1010 			  async_extent->compress_type,
1011 			  BTRFS_ORDERED_COMPRESSED);
1012 	if (IS_ERR(em)) {
1013 		ret = PTR_ERR(em);
1014 		goto out_free_reserve;
1015 	}
1016 	free_extent_map(em);
1017 
1018 	ret = btrfs_add_ordered_extent(inode, start,		/* file_offset */
1019 				       async_extent->ram_size,	/* num_bytes */
1020 				       async_extent->ram_size,	/* ram_bytes */
1021 				       ins.objectid,		/* disk_bytenr */
1022 				       ins.offset,		/* disk_num_bytes */
1023 				       0,			/* offset */
1024 				       1 << BTRFS_ORDERED_COMPRESSED,
1025 				       async_extent->compress_type);
1026 	if (ret) {
1027 		btrfs_drop_extent_map_range(inode, start, end, false);
1028 		goto out_free_reserve;
1029 	}
1030 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1031 
1032 	/* Clear dirty, set writeback and unlock the pages. */
1033 	extent_clear_unlock_delalloc(inode, start, end,
1034 			NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
1035 			PAGE_UNLOCK | PAGE_START_WRITEBACK);
1036 	if (btrfs_submit_compressed_write(inode, start,	/* file_offset */
1037 			    async_extent->ram_size,	/* num_bytes */
1038 			    ins.objectid,		/* disk_bytenr */
1039 			    ins.offset,			/* compressed_len */
1040 			    async_extent->pages,	/* compressed_pages */
1041 			    async_extent->nr_pages,
1042 			    async_chunk->write_flags,
1043 			    async_chunk->blkcg_css, true)) {
1044 		const u64 start = async_extent->start;
1045 		const u64 end = start + async_extent->ram_size - 1;
1046 
1047 		btrfs_writepage_endio_finish_ordered(inode, NULL, start, end, 0);
1048 
1049 		extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
1050 					     PAGE_END_WRITEBACK | PAGE_SET_ERROR);
1051 		free_async_extent_pages(async_extent);
1052 	}
1053 	*alloc_hint = ins.objectid + ins.offset;
1054 	kfree(async_extent);
1055 	return ret;
1056 
1057 out_free_reserve:
1058 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1059 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1060 out_free:
1061 	extent_clear_unlock_delalloc(inode, start, end,
1062 				     NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
1063 				     EXTENT_DELALLOC_NEW |
1064 				     EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
1065 				     PAGE_UNLOCK | PAGE_START_WRITEBACK |
1066 				     PAGE_END_WRITEBACK | PAGE_SET_ERROR);
1067 	free_async_extent_pages(async_extent);
1068 	kfree(async_extent);
1069 	return ret;
1070 }
1071 
1072 /*
1073  * Phase two of compressed writeback.  This is the ordered portion of the code,
1074  * which only gets called in the order the work was queued.  We walk all the
1075  * async extents created by compress_file_range and send them down to the disk.
1076  */
1077 static noinline void submit_compressed_extents(struct async_chunk *async_chunk)
1078 {
1079 	struct btrfs_inode *inode = BTRFS_I(async_chunk->inode);
1080 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1081 	struct async_extent *async_extent;
1082 	u64 alloc_hint = 0;
1083 	int ret = 0;
1084 
1085 	while (!list_empty(&async_chunk->extents)) {
1086 		u64 extent_start;
1087 		u64 ram_size;
1088 
1089 		async_extent = list_entry(async_chunk->extents.next,
1090 					  struct async_extent, list);
1091 		list_del(&async_extent->list);
1092 		extent_start = async_extent->start;
1093 		ram_size = async_extent->ram_size;
1094 
1095 		ret = submit_one_async_extent(inode, async_chunk, async_extent,
1096 					      &alloc_hint);
1097 		btrfs_debug(fs_info,
1098 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
1099 			    inode->root->root_key.objectid,
1100 			    btrfs_ino(inode), extent_start, ram_size, ret);
1101 	}
1102 }
1103 
1104 static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
1105 				      u64 num_bytes)
1106 {
1107 	struct extent_map_tree *em_tree = &inode->extent_tree;
1108 	struct extent_map *em;
1109 	u64 alloc_hint = 0;
1110 
1111 	read_lock(&em_tree->lock);
1112 	em = search_extent_mapping(em_tree, start, num_bytes);
1113 	if (em) {
1114 		/*
1115 		 * if block start isn't an actual block number then find the
1116 		 * first block in this inode and use that as a hint.  If that
1117 		 * block is also bogus then just don't worry about it.
1118 		 */
1119 		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1120 			free_extent_map(em);
1121 			em = search_extent_mapping(em_tree, 0, 0);
1122 			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
1123 				alloc_hint = em->block_start;
1124 			if (em)
1125 				free_extent_map(em);
1126 		} else {
1127 			alloc_hint = em->block_start;
1128 			free_extent_map(em);
1129 		}
1130 	}
1131 	read_unlock(&em_tree->lock);
1132 
1133 	return alloc_hint;
1134 }
1135 
1136 /*
1137  * when extent_io.c finds a delayed allocation range in the file,
1138  * the call backs end up in this code.  The basic idea is to
1139  * allocate extents on disk for the range, and create ordered data structs
1140  * in ram to track those extents.
1141  *
1142  * locked_page is the page that writepage had locked already.  We use
1143  * it to make sure we don't do extra locks or unlocks.
1144  *
1145  * *page_started is set to one if we unlock locked_page and do everything
1146  * required to start IO on it.  It may be clean and already done with
1147  * IO when we return.
1148  *
1149  * When unlock == 1, we unlock the pages in successfully allocated regions.
1150  * When unlock == 0, we leave them locked for writing them out.
1151  *
1152  * However, we unlock all the pages except @locked_page in case of failure.
1153  *
1154  * In summary, page locking state will be as follow:
1155  *
1156  * - page_started == 1 (return value)
1157  *     - All the pages are unlocked. IO is started.
1158  *     - Note that this can happen only on success
1159  * - unlock == 1
1160  *     - All the pages except @locked_page are unlocked in any case
1161  * - unlock == 0
1162  *     - On success, all the pages are locked for writing out them
1163  *     - On failure, all the pages except @locked_page are unlocked
1164  *
1165  * When a failure happens in the second or later iteration of the
1166  * while-loop, the ordered extents created in previous iterations are kept
1167  * intact. So, the caller must clean them up by calling
1168  * btrfs_cleanup_ordered_extents(). See btrfs_run_delalloc_range() for
1169  * example.
1170  */
1171 static noinline int cow_file_range(struct btrfs_inode *inode,
1172 				   struct page *locked_page,
1173 				   u64 start, u64 end, int *page_started,
1174 				   unsigned long *nr_written, int unlock,
1175 				   u64 *done_offset)
1176 {
1177 	struct btrfs_root *root = inode->root;
1178 	struct btrfs_fs_info *fs_info = root->fs_info;
1179 	u64 alloc_hint = 0;
1180 	u64 orig_start = start;
1181 	u64 num_bytes;
1182 	unsigned long ram_size;
1183 	u64 cur_alloc_size = 0;
1184 	u64 min_alloc_size;
1185 	u64 blocksize = fs_info->sectorsize;
1186 	struct btrfs_key ins;
1187 	struct extent_map *em;
1188 	unsigned clear_bits;
1189 	unsigned long page_ops;
1190 	bool extent_reserved = false;
1191 	int ret = 0;
1192 
1193 	if (btrfs_is_free_space_inode(inode)) {
1194 		ret = -EINVAL;
1195 		goto out_unlock;
1196 	}
1197 
1198 	num_bytes = ALIGN(end - start + 1, blocksize);
1199 	num_bytes = max(blocksize,  num_bytes);
1200 	ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
1201 
1202 	inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
1203 
1204 	/*
1205 	 * Due to the page size limit, for subpage we can only trigger the
1206 	 * writeback for the dirty sectors of page, that means data writeback
1207 	 * is doing more writeback than what we want.
1208 	 *
1209 	 * This is especially unexpected for some call sites like fallocate,
1210 	 * where we only increase i_size after everything is done.
1211 	 * This means we can trigger inline extent even if we didn't want to.
1212 	 * So here we skip inline extent creation completely.
1213 	 */
1214 	if (start == 0 && fs_info->sectorsize == PAGE_SIZE) {
1215 		u64 actual_end = min_t(u64, i_size_read(&inode->vfs_inode),
1216 				       end + 1);
1217 
1218 		/* lets try to make an inline extent */
1219 		ret = cow_file_range_inline(inode, actual_end, 0,
1220 					    BTRFS_COMPRESS_NONE, NULL, false);
1221 		if (ret == 0) {
1222 			/*
1223 			 * We use DO_ACCOUNTING here because we need the
1224 			 * delalloc_release_metadata to be run _after_ we drop
1225 			 * our outstanding extent for clearing delalloc for this
1226 			 * range.
1227 			 */
1228 			extent_clear_unlock_delalloc(inode, start, end,
1229 				     locked_page,
1230 				     EXTENT_LOCKED | EXTENT_DELALLOC |
1231 				     EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
1232 				     EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1233 				     PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
1234 			*nr_written = *nr_written +
1235 			     (end - start + PAGE_SIZE) / PAGE_SIZE;
1236 			*page_started = 1;
1237 			/*
1238 			 * locked_page is locked by the caller of
1239 			 * writepage_delalloc(), not locked by
1240 			 * __process_pages_contig().
1241 			 *
1242 			 * We can't let __process_pages_contig() to unlock it,
1243 			 * as it doesn't have any subpage::writers recorded.
1244 			 *
1245 			 * Here we manually unlock the page, since the caller
1246 			 * can't use page_started to determine if it's an
1247 			 * inline extent or a compressed extent.
1248 			 */
1249 			unlock_page(locked_page);
1250 			goto out;
1251 		} else if (ret < 0) {
1252 			goto out_unlock;
1253 		}
1254 	}
1255 
1256 	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
1257 
1258 	/*
1259 	 * Relocation relies on the relocated extents to have exactly the same
1260 	 * size as the original extents. Normally writeback for relocation data
1261 	 * extents follows a NOCOW path because relocation preallocates the
1262 	 * extents. However, due to an operation such as scrub turning a block
1263 	 * group to RO mode, it may fallback to COW mode, so we must make sure
1264 	 * an extent allocated during COW has exactly the requested size and can
1265 	 * not be split into smaller extents, otherwise relocation breaks and
1266 	 * fails during the stage where it updates the bytenr of file extent
1267 	 * items.
1268 	 */
1269 	if (btrfs_is_data_reloc_root(root))
1270 		min_alloc_size = num_bytes;
1271 	else
1272 		min_alloc_size = fs_info->sectorsize;
1273 
1274 	while (num_bytes > 0) {
1275 		cur_alloc_size = num_bytes;
1276 		ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
1277 					   min_alloc_size, 0, alloc_hint,
1278 					   &ins, 1, 1);
1279 		if (ret < 0)
1280 			goto out_unlock;
1281 		cur_alloc_size = ins.offset;
1282 		extent_reserved = true;
1283 
1284 		ram_size = ins.offset;
1285 		em = create_io_em(inode, start, ins.offset, /* len */
1286 				  start, /* orig_start */
1287 				  ins.objectid, /* block_start */
1288 				  ins.offset, /* block_len */
1289 				  ins.offset, /* orig_block_len */
1290 				  ram_size, /* ram_bytes */
1291 				  BTRFS_COMPRESS_NONE, /* compress_type */
1292 				  BTRFS_ORDERED_REGULAR /* type */);
1293 		if (IS_ERR(em)) {
1294 			ret = PTR_ERR(em);
1295 			goto out_reserve;
1296 		}
1297 		free_extent_map(em);
1298 
1299 		ret = btrfs_add_ordered_extent(inode, start, ram_size, ram_size,
1300 					       ins.objectid, cur_alloc_size, 0,
1301 					       1 << BTRFS_ORDERED_REGULAR,
1302 					       BTRFS_COMPRESS_NONE);
1303 		if (ret)
1304 			goto out_drop_extent_cache;
1305 
1306 		if (btrfs_is_data_reloc_root(root)) {
1307 			ret = btrfs_reloc_clone_csums(inode, start,
1308 						      cur_alloc_size);
1309 			/*
1310 			 * Only drop cache here, and process as normal.
1311 			 *
1312 			 * We must not allow extent_clear_unlock_delalloc()
1313 			 * at out_unlock label to free meta of this ordered
1314 			 * extent, as its meta should be freed by
1315 			 * btrfs_finish_ordered_io().
1316 			 *
1317 			 * So we must continue until @start is increased to
1318 			 * skip current ordered extent.
1319 			 */
1320 			if (ret)
1321 				btrfs_drop_extent_map_range(inode, start,
1322 							    start + ram_size - 1,
1323 							    false);
1324 		}
1325 
1326 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1327 
1328 		/*
1329 		 * We're not doing compressed IO, don't unlock the first page
1330 		 * (which the caller expects to stay locked), don't clear any
1331 		 * dirty bits and don't set any writeback bits
1332 		 *
1333 		 * Do set the Ordered (Private2) bit so we know this page was
1334 		 * properly setup for writepage.
1335 		 */
1336 		page_ops = unlock ? PAGE_UNLOCK : 0;
1337 		page_ops |= PAGE_SET_ORDERED;
1338 
1339 		extent_clear_unlock_delalloc(inode, start, start + ram_size - 1,
1340 					     locked_page,
1341 					     EXTENT_LOCKED | EXTENT_DELALLOC,
1342 					     page_ops);
1343 		if (num_bytes < cur_alloc_size)
1344 			num_bytes = 0;
1345 		else
1346 			num_bytes -= cur_alloc_size;
1347 		alloc_hint = ins.objectid + ins.offset;
1348 		start += cur_alloc_size;
1349 		extent_reserved = false;
1350 
1351 		/*
1352 		 * btrfs_reloc_clone_csums() error, since start is increased
1353 		 * extent_clear_unlock_delalloc() at out_unlock label won't
1354 		 * free metadata of current ordered extent, we're OK to exit.
1355 		 */
1356 		if (ret)
1357 			goto out_unlock;
1358 	}
1359 out:
1360 	return ret;
1361 
1362 out_drop_extent_cache:
1363 	btrfs_drop_extent_map_range(inode, start, start + ram_size - 1, false);
1364 out_reserve:
1365 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1366 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1367 out_unlock:
1368 	/*
1369 	 * If done_offset is non-NULL and ret == -EAGAIN, we expect the
1370 	 * caller to write out the successfully allocated region and retry.
1371 	 */
1372 	if (done_offset && ret == -EAGAIN) {
1373 		if (orig_start < start)
1374 			*done_offset = start - 1;
1375 		else
1376 			*done_offset = start;
1377 		return ret;
1378 	} else if (ret == -EAGAIN) {
1379 		/* Convert to -ENOSPC since the caller cannot retry. */
1380 		ret = -ENOSPC;
1381 	}
1382 
1383 	/*
1384 	 * Now, we have three regions to clean up:
1385 	 *
1386 	 * |-------(1)----|---(2)---|-------------(3)----------|
1387 	 * `- orig_start  `- start  `- start + cur_alloc_size  `- end
1388 	 *
1389 	 * We process each region below.
1390 	 */
1391 
1392 	clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1393 		EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1394 	page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
1395 
1396 	/*
1397 	 * For the range (1). We have already instantiated the ordered extents
1398 	 * for this region. They are cleaned up by
1399 	 * btrfs_cleanup_ordered_extents() in e.g,
1400 	 * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are
1401 	 * already cleared in the above loop. And, EXTENT_DELALLOC_NEW |
1402 	 * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup
1403 	 * function.
1404 	 *
1405 	 * However, in case of unlock == 0, we still need to unlock the pages
1406 	 * (except @locked_page) to ensure all the pages are unlocked.
1407 	 */
1408 	if (!unlock && orig_start < start) {
1409 		if (!locked_page)
1410 			mapping_set_error(inode->vfs_inode.i_mapping, ret);
1411 		extent_clear_unlock_delalloc(inode, orig_start, start - 1,
1412 					     locked_page, 0, page_ops);
1413 	}
1414 
1415 	/*
1416 	 * For the range (2). If we reserved an extent for our delalloc range
1417 	 * (or a subrange) and failed to create the respective ordered extent,
1418 	 * then it means that when we reserved the extent we decremented the
1419 	 * extent's size from the data space_info's bytes_may_use counter and
1420 	 * incremented the space_info's bytes_reserved counter by the same
1421 	 * amount. We must make sure extent_clear_unlock_delalloc() does not try
1422 	 * to decrement again the data space_info's bytes_may_use counter,
1423 	 * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV.
1424 	 */
1425 	if (extent_reserved) {
1426 		extent_clear_unlock_delalloc(inode, start,
1427 					     start + cur_alloc_size - 1,
1428 					     locked_page,
1429 					     clear_bits,
1430 					     page_ops);
1431 		start += cur_alloc_size;
1432 		if (start >= end)
1433 			return ret;
1434 	}
1435 
1436 	/*
1437 	 * For the range (3). We never touched the region. In addition to the
1438 	 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data
1439 	 * space_info's bytes_may_use counter, reserved in
1440 	 * btrfs_check_data_free_space().
1441 	 */
1442 	extent_clear_unlock_delalloc(inode, start, end, locked_page,
1443 				     clear_bits | EXTENT_CLEAR_DATA_RESV,
1444 				     page_ops);
1445 	return ret;
1446 }
1447 
1448 /*
1449  * work queue call back to started compression on a file and pages
1450  */
1451 static noinline void async_cow_start(struct btrfs_work *work)
1452 {
1453 	struct async_chunk *async_chunk;
1454 	int compressed_extents;
1455 
1456 	async_chunk = container_of(work, struct async_chunk, work);
1457 
1458 	compressed_extents = compress_file_range(async_chunk);
1459 	if (compressed_extents == 0) {
1460 		btrfs_add_delayed_iput(async_chunk->inode);
1461 		async_chunk->inode = NULL;
1462 	}
1463 }
1464 
1465 /*
1466  * work queue call back to submit previously compressed pages
1467  */
1468 static noinline void async_cow_submit(struct btrfs_work *work)
1469 {
1470 	struct async_chunk *async_chunk = container_of(work, struct async_chunk,
1471 						     work);
1472 	struct btrfs_fs_info *fs_info = btrfs_work_owner(work);
1473 	unsigned long nr_pages;
1474 
1475 	nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
1476 		PAGE_SHIFT;
1477 
1478 	/*
1479 	 * ->inode could be NULL if async_chunk_start has failed to compress,
1480 	 * in which case we don't have anything to submit, yet we need to
1481 	 * always adjust ->async_delalloc_pages as its paired with the init
1482 	 * happening in cow_file_range_async
1483 	 */
1484 	if (async_chunk->inode)
1485 		submit_compressed_extents(async_chunk);
1486 
1487 	/* atomic_sub_return implies a barrier */
1488 	if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1489 	    5 * SZ_1M)
1490 		cond_wake_up_nomb(&fs_info->async_submit_wait);
1491 }
1492 
1493 static noinline void async_cow_free(struct btrfs_work *work)
1494 {
1495 	struct async_chunk *async_chunk;
1496 	struct async_cow *async_cow;
1497 
1498 	async_chunk = container_of(work, struct async_chunk, work);
1499 	if (async_chunk->inode)
1500 		btrfs_add_delayed_iput(async_chunk->inode);
1501 	if (async_chunk->blkcg_css)
1502 		css_put(async_chunk->blkcg_css);
1503 
1504 	async_cow = async_chunk->async_cow;
1505 	if (atomic_dec_and_test(&async_cow->num_chunks))
1506 		kvfree(async_cow);
1507 }
1508 
1509 static int cow_file_range_async(struct btrfs_inode *inode,
1510 				struct writeback_control *wbc,
1511 				struct page *locked_page,
1512 				u64 start, u64 end, int *page_started,
1513 				unsigned long *nr_written)
1514 {
1515 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1516 	struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc);
1517 	struct async_cow *ctx;
1518 	struct async_chunk *async_chunk;
1519 	unsigned long nr_pages;
1520 	u64 cur_end;
1521 	u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K);
1522 	int i;
1523 	bool should_compress;
1524 	unsigned nofs_flag;
1525 	const blk_opf_t write_flags = wbc_to_write_flags(wbc);
1526 
1527 	unlock_extent(&inode->io_tree, start, end, NULL);
1528 
1529 	if (inode->flags & BTRFS_INODE_NOCOMPRESS &&
1530 	    !btrfs_test_opt(fs_info, FORCE_COMPRESS)) {
1531 		num_chunks = 1;
1532 		should_compress = false;
1533 	} else {
1534 		should_compress = true;
1535 	}
1536 
1537 	nofs_flag = memalloc_nofs_save();
1538 	ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL);
1539 	memalloc_nofs_restore(nofs_flag);
1540 
1541 	if (!ctx) {
1542 		unsigned clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC |
1543 			EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
1544 			EXTENT_DO_ACCOUNTING;
1545 		unsigned long page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK |
1546 					 PAGE_END_WRITEBACK | PAGE_SET_ERROR;
1547 
1548 		extent_clear_unlock_delalloc(inode, start, end, locked_page,
1549 					     clear_bits, page_ops);
1550 		return -ENOMEM;
1551 	}
1552 
1553 	async_chunk = ctx->chunks;
1554 	atomic_set(&ctx->num_chunks, num_chunks);
1555 
1556 	for (i = 0; i < num_chunks; i++) {
1557 		if (should_compress)
1558 			cur_end = min(end, start + SZ_512K - 1);
1559 		else
1560 			cur_end = end;
1561 
1562 		/*
1563 		 * igrab is called higher up in the call chain, take only the
1564 		 * lightweight reference for the callback lifetime
1565 		 */
1566 		ihold(&inode->vfs_inode);
1567 		async_chunk[i].async_cow = ctx;
1568 		async_chunk[i].inode = &inode->vfs_inode;
1569 		async_chunk[i].start = start;
1570 		async_chunk[i].end = cur_end;
1571 		async_chunk[i].write_flags = write_flags;
1572 		INIT_LIST_HEAD(&async_chunk[i].extents);
1573 
1574 		/*
1575 		 * The locked_page comes all the way from writepage and its
1576 		 * the original page we were actually given.  As we spread
1577 		 * this large delalloc region across multiple async_chunk
1578 		 * structs, only the first struct needs a pointer to locked_page
1579 		 *
1580 		 * This way we don't need racey decisions about who is supposed
1581 		 * to unlock it.
1582 		 */
1583 		if (locked_page) {
1584 			/*
1585 			 * Depending on the compressibility, the pages might or
1586 			 * might not go through async.  We want all of them to
1587 			 * be accounted against wbc once.  Let's do it here
1588 			 * before the paths diverge.  wbc accounting is used
1589 			 * only for foreign writeback detection and doesn't
1590 			 * need full accuracy.  Just account the whole thing
1591 			 * against the first page.
1592 			 */
1593 			wbc_account_cgroup_owner(wbc, locked_page,
1594 						 cur_end - start);
1595 			async_chunk[i].locked_page = locked_page;
1596 			locked_page = NULL;
1597 		} else {
1598 			async_chunk[i].locked_page = NULL;
1599 		}
1600 
1601 		if (blkcg_css != blkcg_root_css) {
1602 			css_get(blkcg_css);
1603 			async_chunk[i].blkcg_css = blkcg_css;
1604 		} else {
1605 			async_chunk[i].blkcg_css = NULL;
1606 		}
1607 
1608 		btrfs_init_work(&async_chunk[i].work, async_cow_start,
1609 				async_cow_submit, async_cow_free);
1610 
1611 		nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
1612 		atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1613 
1614 		btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work);
1615 
1616 		*nr_written += nr_pages;
1617 		start = cur_end + 1;
1618 	}
1619 	*page_started = 1;
1620 	return 0;
1621 }
1622 
1623 static noinline int run_delalloc_zoned(struct btrfs_inode *inode,
1624 				       struct page *locked_page, u64 start,
1625 				       u64 end, int *page_started,
1626 				       unsigned long *nr_written)
1627 {
1628 	u64 done_offset = end;
1629 	int ret;
1630 	bool locked_page_done = false;
1631 
1632 	while (start <= end) {
1633 		ret = cow_file_range(inode, locked_page, start, end, page_started,
1634 				     nr_written, 0, &done_offset);
1635 		if (ret && ret != -EAGAIN)
1636 			return ret;
1637 
1638 		if (*page_started) {
1639 			ASSERT(ret == 0);
1640 			return 0;
1641 		}
1642 
1643 		if (ret == 0)
1644 			done_offset = end;
1645 
1646 		if (done_offset == start) {
1647 			wait_on_bit_io(&inode->root->fs_info->flags,
1648 				       BTRFS_FS_NEED_ZONE_FINISH,
1649 				       TASK_UNINTERRUPTIBLE);
1650 			continue;
1651 		}
1652 
1653 		if (!locked_page_done) {
1654 			__set_page_dirty_nobuffers(locked_page);
1655 			account_page_redirty(locked_page);
1656 		}
1657 		locked_page_done = true;
1658 		extent_write_locked_range(&inode->vfs_inode, start, done_offset);
1659 
1660 		start = done_offset + 1;
1661 	}
1662 
1663 	*page_started = 1;
1664 
1665 	return 0;
1666 }
1667 
1668 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
1669 					u64 bytenr, u64 num_bytes, bool nowait)
1670 {
1671 	struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bytenr);
1672 	struct btrfs_ordered_sum *sums;
1673 	int ret;
1674 	LIST_HEAD(list);
1675 
1676 	ret = btrfs_lookup_csums_range(csum_root, bytenr,
1677 				       bytenr + num_bytes - 1, &list, 0,
1678 				       nowait);
1679 	if (ret == 0 && list_empty(&list))
1680 		return 0;
1681 
1682 	while (!list_empty(&list)) {
1683 		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1684 		list_del(&sums->list);
1685 		kfree(sums);
1686 	}
1687 	if (ret < 0)
1688 		return ret;
1689 	return 1;
1690 }
1691 
1692 static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
1693 			   const u64 start, const u64 end,
1694 			   int *page_started, unsigned long *nr_written)
1695 {
1696 	const bool is_space_ino = btrfs_is_free_space_inode(inode);
1697 	const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root);
1698 	const u64 range_bytes = end + 1 - start;
1699 	struct extent_io_tree *io_tree = &inode->io_tree;
1700 	u64 range_start = start;
1701 	u64 count;
1702 
1703 	/*
1704 	 * If EXTENT_NORESERVE is set it means that when the buffered write was
1705 	 * made we had not enough available data space and therefore we did not
1706 	 * reserve data space for it, since we though we could do NOCOW for the
1707 	 * respective file range (either there is prealloc extent or the inode
1708 	 * has the NOCOW bit set).
1709 	 *
1710 	 * However when we need to fallback to COW mode (because for example the
1711 	 * block group for the corresponding extent was turned to RO mode by a
1712 	 * scrub or relocation) we need to do the following:
1713 	 *
1714 	 * 1) We increment the bytes_may_use counter of the data space info.
1715 	 *    If COW succeeds, it allocates a new data extent and after doing
1716 	 *    that it decrements the space info's bytes_may_use counter and
1717 	 *    increments its bytes_reserved counter by the same amount (we do
1718 	 *    this at btrfs_add_reserved_bytes()). So we need to increment the
1719 	 *    bytes_may_use counter to compensate (when space is reserved at
1720 	 *    buffered write time, the bytes_may_use counter is incremented);
1721 	 *
1722 	 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
1723 	 *    that if the COW path fails for any reason, it decrements (through
1724 	 *    extent_clear_unlock_delalloc()) the bytes_may_use counter of the
1725 	 *    data space info, which we incremented in the step above.
1726 	 *
1727 	 * If we need to fallback to cow and the inode corresponds to a free
1728 	 * space cache inode or an inode of the data relocation tree, we must
1729 	 * also increment bytes_may_use of the data space_info for the same
1730 	 * reason. Space caches and relocated data extents always get a prealloc
1731 	 * extent for them, however scrub or balance may have set the block
1732 	 * group that contains that extent to RO mode and therefore force COW
1733 	 * when starting writeback.
1734 	 */
1735 	count = count_range_bits(io_tree, &range_start, end, range_bytes,
1736 				 EXTENT_NORESERVE, 0);
1737 	if (count > 0 || is_space_ino || is_reloc_ino) {
1738 		u64 bytes = count;
1739 		struct btrfs_fs_info *fs_info = inode->root->fs_info;
1740 		struct btrfs_space_info *sinfo = fs_info->data_sinfo;
1741 
1742 		if (is_space_ino || is_reloc_ino)
1743 			bytes = range_bytes;
1744 
1745 		spin_lock(&sinfo->lock);
1746 		btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
1747 		spin_unlock(&sinfo->lock);
1748 
1749 		if (count > 0)
1750 			clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
1751 					 NULL);
1752 	}
1753 
1754 	return cow_file_range(inode, locked_page, start, end, page_started,
1755 			      nr_written, 1, NULL);
1756 }
1757 
1758 struct can_nocow_file_extent_args {
1759 	/* Input fields. */
1760 
1761 	/* Start file offset of the range we want to NOCOW. */
1762 	u64 start;
1763 	/* End file offset (inclusive) of the range we want to NOCOW. */
1764 	u64 end;
1765 	bool writeback_path;
1766 	bool strict;
1767 	/*
1768 	 * Free the path passed to can_nocow_file_extent() once it's not needed
1769 	 * anymore.
1770 	 */
1771 	bool free_path;
1772 
1773 	/* Output fields. Only set when can_nocow_file_extent() returns 1. */
1774 
1775 	u64 disk_bytenr;
1776 	u64 disk_num_bytes;
1777 	u64 extent_offset;
1778 	/* Number of bytes that can be written to in NOCOW mode. */
1779 	u64 num_bytes;
1780 };
1781 
1782 /*
1783  * Check if we can NOCOW the file extent that the path points to.
1784  * This function may return with the path released, so the caller should check
1785  * if path->nodes[0] is NULL or not if it needs to use the path afterwards.
1786  *
1787  * Returns: < 0 on error
1788  *            0 if we can not NOCOW
1789  *            1 if we can NOCOW
1790  */
1791 static int can_nocow_file_extent(struct btrfs_path *path,
1792 				 struct btrfs_key *key,
1793 				 struct btrfs_inode *inode,
1794 				 struct can_nocow_file_extent_args *args)
1795 {
1796 	const bool is_freespace_inode = btrfs_is_free_space_inode(inode);
1797 	struct extent_buffer *leaf = path->nodes[0];
1798 	struct btrfs_root *root = inode->root;
1799 	struct btrfs_file_extent_item *fi;
1800 	u64 extent_end;
1801 	u8 extent_type;
1802 	int can_nocow = 0;
1803 	int ret = 0;
1804 	bool nowait = path->nowait;
1805 
1806 	fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
1807 	extent_type = btrfs_file_extent_type(leaf, fi);
1808 
1809 	if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1810 		goto out;
1811 
1812 	/* Can't access these fields unless we know it's not an inline extent. */
1813 	args->disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1814 	args->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1815 	args->extent_offset = btrfs_file_extent_offset(leaf, fi);
1816 
1817 	if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
1818 	    extent_type == BTRFS_FILE_EXTENT_REG)
1819 		goto out;
1820 
1821 	/*
1822 	 * If the extent was created before the generation where the last snapshot
1823 	 * for its subvolume was created, then this implies the extent is shared,
1824 	 * hence we must COW.
1825 	 */
1826 	if (!args->strict &&
1827 	    btrfs_file_extent_generation(leaf, fi) <=
1828 	    btrfs_root_last_snapshot(&root->root_item))
1829 		goto out;
1830 
1831 	/* An explicit hole, must COW. */
1832 	if (args->disk_bytenr == 0)
1833 		goto out;
1834 
1835 	/* Compressed/encrypted/encoded extents must be COWed. */
1836 	if (btrfs_file_extent_compression(leaf, fi) ||
1837 	    btrfs_file_extent_encryption(leaf, fi) ||
1838 	    btrfs_file_extent_other_encoding(leaf, fi))
1839 		goto out;
1840 
1841 	extent_end = btrfs_file_extent_end(path);
1842 
1843 	/*
1844 	 * The following checks can be expensive, as they need to take other
1845 	 * locks and do btree or rbtree searches, so release the path to avoid
1846 	 * blocking other tasks for too long.
1847 	 */
1848 	btrfs_release_path(path);
1849 
1850 	ret = btrfs_cross_ref_exist(root, btrfs_ino(inode),
1851 				    key->offset - args->extent_offset,
1852 				    args->disk_bytenr, false, path);
1853 	WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1854 	if (ret != 0)
1855 		goto out;
1856 
1857 	if (args->free_path) {
1858 		/*
1859 		 * We don't need the path anymore, plus through the
1860 		 * csum_exist_in_range() call below we will end up allocating
1861 		 * another path. So free the path to avoid unnecessary extra
1862 		 * memory usage.
1863 		 */
1864 		btrfs_free_path(path);
1865 		path = NULL;
1866 	}
1867 
1868 	/* If there are pending snapshots for this root, we must COW. */
1869 	if (args->writeback_path && !is_freespace_inode &&
1870 	    atomic_read(&root->snapshot_force_cow))
1871 		goto out;
1872 
1873 	args->disk_bytenr += args->extent_offset;
1874 	args->disk_bytenr += args->start - key->offset;
1875 	args->num_bytes = min(args->end + 1, extent_end) - args->start;
1876 
1877 	/*
1878 	 * Force COW if csums exist in the range. This ensures that csums for a
1879 	 * given extent are either valid or do not exist.
1880 	 */
1881 	ret = csum_exist_in_range(root->fs_info, args->disk_bytenr, args->num_bytes,
1882 				  nowait);
1883 	WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1884 	if (ret != 0)
1885 		goto out;
1886 
1887 	can_nocow = 1;
1888  out:
1889 	if (args->free_path && path)
1890 		btrfs_free_path(path);
1891 
1892 	return ret < 0 ? ret : can_nocow;
1893 }
1894 
1895 /*
1896  * when nowcow writeback call back.  This checks for snapshots or COW copies
1897  * of the extents that exist in the file, and COWs the file as required.
1898  *
1899  * If no cow copies or snapshots exist, we write directly to the existing
1900  * blocks on disk
1901  */
1902 static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
1903 				       struct page *locked_page,
1904 				       const u64 start, const u64 end,
1905 				       int *page_started,
1906 				       unsigned long *nr_written)
1907 {
1908 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1909 	struct btrfs_root *root = inode->root;
1910 	struct btrfs_path *path;
1911 	u64 cow_start = (u64)-1;
1912 	u64 cur_offset = start;
1913 	int ret;
1914 	bool check_prev = true;
1915 	u64 ino = btrfs_ino(inode);
1916 	struct btrfs_block_group *bg;
1917 	bool nocow = false;
1918 	struct can_nocow_file_extent_args nocow_args = { 0 };
1919 
1920 	path = btrfs_alloc_path();
1921 	if (!path) {
1922 		extent_clear_unlock_delalloc(inode, start, end, locked_page,
1923 					     EXTENT_LOCKED | EXTENT_DELALLOC |
1924 					     EXTENT_DO_ACCOUNTING |
1925 					     EXTENT_DEFRAG, PAGE_UNLOCK |
1926 					     PAGE_START_WRITEBACK |
1927 					     PAGE_END_WRITEBACK);
1928 		return -ENOMEM;
1929 	}
1930 
1931 	nocow_args.end = end;
1932 	nocow_args.writeback_path = true;
1933 
1934 	while (1) {
1935 		struct btrfs_key found_key;
1936 		struct btrfs_file_extent_item *fi;
1937 		struct extent_buffer *leaf;
1938 		u64 extent_end;
1939 		u64 ram_bytes;
1940 		u64 nocow_end;
1941 		int extent_type;
1942 
1943 		nocow = false;
1944 
1945 		ret = btrfs_lookup_file_extent(NULL, root, path, ino,
1946 					       cur_offset, 0);
1947 		if (ret < 0)
1948 			goto error;
1949 
1950 		/*
1951 		 * If there is no extent for our range when doing the initial
1952 		 * search, then go back to the previous slot as it will be the
1953 		 * one containing the search offset
1954 		 */
1955 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
1956 			leaf = path->nodes[0];
1957 			btrfs_item_key_to_cpu(leaf, &found_key,
1958 					      path->slots[0] - 1);
1959 			if (found_key.objectid == ino &&
1960 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
1961 				path->slots[0]--;
1962 		}
1963 		check_prev = false;
1964 next_slot:
1965 		/* Go to next leaf if we have exhausted the current one */
1966 		leaf = path->nodes[0];
1967 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1968 			ret = btrfs_next_leaf(root, path);
1969 			if (ret < 0) {
1970 				if (cow_start != (u64)-1)
1971 					cur_offset = cow_start;
1972 				goto error;
1973 			}
1974 			if (ret > 0)
1975 				break;
1976 			leaf = path->nodes[0];
1977 		}
1978 
1979 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1980 
1981 		/* Didn't find anything for our INO */
1982 		if (found_key.objectid > ino)
1983 			break;
1984 		/*
1985 		 * Keep searching until we find an EXTENT_ITEM or there are no
1986 		 * more extents for this inode
1987 		 */
1988 		if (WARN_ON_ONCE(found_key.objectid < ino) ||
1989 		    found_key.type < BTRFS_EXTENT_DATA_KEY) {
1990 			path->slots[0]++;
1991 			goto next_slot;
1992 		}
1993 
1994 		/* Found key is not EXTENT_DATA_KEY or starts after req range */
1995 		if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
1996 		    found_key.offset > end)
1997 			break;
1998 
1999 		/*
2000 		 * If the found extent starts after requested offset, then
2001 		 * adjust extent_end to be right before this extent begins
2002 		 */
2003 		if (found_key.offset > cur_offset) {
2004 			extent_end = found_key.offset;
2005 			extent_type = 0;
2006 			goto out_check;
2007 		}
2008 
2009 		/*
2010 		 * Found extent which begins before our range and potentially
2011 		 * intersect it
2012 		 */
2013 		fi = btrfs_item_ptr(leaf, path->slots[0],
2014 				    struct btrfs_file_extent_item);
2015 		extent_type = btrfs_file_extent_type(leaf, fi);
2016 		/* If this is triggered then we have a memory corruption. */
2017 		ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES);
2018 		if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) {
2019 			ret = -EUCLEAN;
2020 			goto error;
2021 		}
2022 		ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
2023 		extent_end = btrfs_file_extent_end(path);
2024 
2025 		/*
2026 		 * If the extent we got ends before our current offset, skip to
2027 		 * the next extent.
2028 		 */
2029 		if (extent_end <= cur_offset) {
2030 			path->slots[0]++;
2031 			goto next_slot;
2032 		}
2033 
2034 		nocow_args.start = cur_offset;
2035 		ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args);
2036 		if (ret < 0) {
2037 			if (cow_start != (u64)-1)
2038 				cur_offset = cow_start;
2039 			goto error;
2040 		} else if (ret == 0) {
2041 			goto out_check;
2042 		}
2043 
2044 		ret = 0;
2045 		bg = btrfs_inc_nocow_writers(fs_info, nocow_args.disk_bytenr);
2046 		if (bg)
2047 			nocow = true;
2048 out_check:
2049 		/*
2050 		 * If nocow is false then record the beginning of the range
2051 		 * that needs to be COWed
2052 		 */
2053 		if (!nocow) {
2054 			if (cow_start == (u64)-1)
2055 				cow_start = cur_offset;
2056 			cur_offset = extent_end;
2057 			if (cur_offset > end)
2058 				break;
2059 			if (!path->nodes[0])
2060 				continue;
2061 			path->slots[0]++;
2062 			goto next_slot;
2063 		}
2064 
2065 		/*
2066 		 * COW range from cow_start to found_key.offset - 1. As the key
2067 		 * will contain the beginning of the first extent that can be
2068 		 * NOCOW, following one which needs to be COW'ed
2069 		 */
2070 		if (cow_start != (u64)-1) {
2071 			ret = fallback_to_cow(inode, locked_page,
2072 					      cow_start, found_key.offset - 1,
2073 					      page_started, nr_written);
2074 			if (ret)
2075 				goto error;
2076 			cow_start = (u64)-1;
2077 		}
2078 
2079 		nocow_end = cur_offset + nocow_args.num_bytes - 1;
2080 
2081 		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
2082 			u64 orig_start = found_key.offset - nocow_args.extent_offset;
2083 			struct extent_map *em;
2084 
2085 			em = create_io_em(inode, cur_offset, nocow_args.num_bytes,
2086 					  orig_start,
2087 					  nocow_args.disk_bytenr, /* block_start */
2088 					  nocow_args.num_bytes, /* block_len */
2089 					  nocow_args.disk_num_bytes, /* orig_block_len */
2090 					  ram_bytes, BTRFS_COMPRESS_NONE,
2091 					  BTRFS_ORDERED_PREALLOC);
2092 			if (IS_ERR(em)) {
2093 				ret = PTR_ERR(em);
2094 				goto error;
2095 			}
2096 			free_extent_map(em);
2097 			ret = btrfs_add_ordered_extent(inode,
2098 					cur_offset, nocow_args.num_bytes,
2099 					nocow_args.num_bytes,
2100 					nocow_args.disk_bytenr,
2101 					nocow_args.num_bytes, 0,
2102 					1 << BTRFS_ORDERED_PREALLOC,
2103 					BTRFS_COMPRESS_NONE);
2104 			if (ret) {
2105 				btrfs_drop_extent_map_range(inode, cur_offset,
2106 							    nocow_end, false);
2107 				goto error;
2108 			}
2109 		} else {
2110 			ret = btrfs_add_ordered_extent(inode, cur_offset,
2111 						       nocow_args.num_bytes,
2112 						       nocow_args.num_bytes,
2113 						       nocow_args.disk_bytenr,
2114 						       nocow_args.num_bytes,
2115 						       0,
2116 						       1 << BTRFS_ORDERED_NOCOW,
2117 						       BTRFS_COMPRESS_NONE);
2118 			if (ret)
2119 				goto error;
2120 		}
2121 
2122 		if (nocow) {
2123 			btrfs_dec_nocow_writers(bg);
2124 			nocow = false;
2125 		}
2126 
2127 		if (btrfs_is_data_reloc_root(root))
2128 			/*
2129 			 * Error handled later, as we must prevent
2130 			 * extent_clear_unlock_delalloc() in error handler
2131 			 * from freeing metadata of created ordered extent.
2132 			 */
2133 			ret = btrfs_reloc_clone_csums(inode, cur_offset,
2134 						      nocow_args.num_bytes);
2135 
2136 		extent_clear_unlock_delalloc(inode, cur_offset, nocow_end,
2137 					     locked_page, EXTENT_LOCKED |
2138 					     EXTENT_DELALLOC |
2139 					     EXTENT_CLEAR_DATA_RESV,
2140 					     PAGE_UNLOCK | PAGE_SET_ORDERED);
2141 
2142 		cur_offset = extent_end;
2143 
2144 		/*
2145 		 * btrfs_reloc_clone_csums() error, now we're OK to call error
2146 		 * handler, as metadata for created ordered extent will only
2147 		 * be freed by btrfs_finish_ordered_io().
2148 		 */
2149 		if (ret)
2150 			goto error;
2151 		if (cur_offset > end)
2152 			break;
2153 	}
2154 	btrfs_release_path(path);
2155 
2156 	if (cur_offset <= end && cow_start == (u64)-1)
2157 		cow_start = cur_offset;
2158 
2159 	if (cow_start != (u64)-1) {
2160 		cur_offset = end;
2161 		ret = fallback_to_cow(inode, locked_page, cow_start, end,
2162 				      page_started, nr_written);
2163 		if (ret)
2164 			goto error;
2165 	}
2166 
2167 error:
2168 	if (nocow)
2169 		btrfs_dec_nocow_writers(bg);
2170 
2171 	if (ret && cur_offset < end)
2172 		extent_clear_unlock_delalloc(inode, cur_offset, end,
2173 					     locked_page, EXTENT_LOCKED |
2174 					     EXTENT_DELALLOC | EXTENT_DEFRAG |
2175 					     EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
2176 					     PAGE_START_WRITEBACK |
2177 					     PAGE_END_WRITEBACK);
2178 	btrfs_free_path(path);
2179 	return ret;
2180 }
2181 
2182 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
2183 {
2184 	if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) {
2185 		if (inode->defrag_bytes &&
2186 		    test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG,
2187 				   0, NULL))
2188 			return false;
2189 		return true;
2190 	}
2191 	return false;
2192 }
2193 
2194 /*
2195  * Function to process delayed allocation (create CoW) for ranges which are
2196  * being touched for the first time.
2197  */
2198 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
2199 		u64 start, u64 end, int *page_started, unsigned long *nr_written,
2200 		struct writeback_control *wbc)
2201 {
2202 	int ret;
2203 	const bool zoned = btrfs_is_zoned(inode->root->fs_info);
2204 
2205 	/*
2206 	 * The range must cover part of the @locked_page, or the returned
2207 	 * @page_started can confuse the caller.
2208 	 */
2209 	ASSERT(!(end <= page_offset(locked_page) ||
2210 		 start >= page_offset(locked_page) + PAGE_SIZE));
2211 
2212 	if (should_nocow(inode, start, end)) {
2213 		/*
2214 		 * Normally on a zoned device we're only doing COW writes, but
2215 		 * in case of relocation on a zoned filesystem we have taken
2216 		 * precaution, that we're only writing sequentially. It's safe
2217 		 * to use run_delalloc_nocow() here, like for  regular
2218 		 * preallocated inodes.
2219 		 */
2220 		ASSERT(!zoned || btrfs_is_data_reloc_root(inode->root));
2221 		ret = run_delalloc_nocow(inode, locked_page, start, end,
2222 					 page_started, nr_written);
2223 	} else if (!btrfs_inode_can_compress(inode) ||
2224 		   !inode_need_compress(inode, start, end)) {
2225 		if (zoned)
2226 			ret = run_delalloc_zoned(inode, locked_page, start, end,
2227 						 page_started, nr_written);
2228 		else
2229 			ret = cow_file_range(inode, locked_page, start, end,
2230 					     page_started, nr_written, 1, NULL);
2231 	} else {
2232 		set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
2233 		ret = cow_file_range_async(inode, wbc, locked_page, start, end,
2234 					   page_started, nr_written);
2235 	}
2236 	ASSERT(ret <= 0);
2237 	if (ret)
2238 		btrfs_cleanup_ordered_extents(inode, locked_page, start,
2239 					      end - start + 1);
2240 	return ret;
2241 }
2242 
2243 void btrfs_split_delalloc_extent(struct inode *inode,
2244 				 struct extent_state *orig, u64 split)
2245 {
2246 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2247 	u64 size;
2248 
2249 	/* not delalloc, ignore it */
2250 	if (!(orig->state & EXTENT_DELALLOC))
2251 		return;
2252 
2253 	size = orig->end - orig->start + 1;
2254 	if (size > fs_info->max_extent_size) {
2255 		u32 num_extents;
2256 		u64 new_size;
2257 
2258 		/*
2259 		 * See the explanation in btrfs_merge_delalloc_extent, the same
2260 		 * applies here, just in reverse.
2261 		 */
2262 		new_size = orig->end - split + 1;
2263 		num_extents = count_max_extents(fs_info, new_size);
2264 		new_size = split - orig->start;
2265 		num_extents += count_max_extents(fs_info, new_size);
2266 		if (count_max_extents(fs_info, size) >= num_extents)
2267 			return;
2268 	}
2269 
2270 	spin_lock(&BTRFS_I(inode)->lock);
2271 	btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
2272 	spin_unlock(&BTRFS_I(inode)->lock);
2273 }
2274 
2275 /*
2276  * Handle merged delayed allocation extents so we can keep track of new extents
2277  * that are just merged onto old extents, such as when we are doing sequential
2278  * writes, so we can properly account for the metadata space we'll need.
2279  */
2280 void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new,
2281 				 struct extent_state *other)
2282 {
2283 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2284 	u64 new_size, old_size;
2285 	u32 num_extents;
2286 
2287 	/* not delalloc, ignore it */
2288 	if (!(other->state & EXTENT_DELALLOC))
2289 		return;
2290 
2291 	if (new->start > other->start)
2292 		new_size = new->end - other->start + 1;
2293 	else
2294 		new_size = other->end - new->start + 1;
2295 
2296 	/* we're not bigger than the max, unreserve the space and go */
2297 	if (new_size <= fs_info->max_extent_size) {
2298 		spin_lock(&BTRFS_I(inode)->lock);
2299 		btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
2300 		spin_unlock(&BTRFS_I(inode)->lock);
2301 		return;
2302 	}
2303 
2304 	/*
2305 	 * We have to add up either side to figure out how many extents were
2306 	 * accounted for before we merged into one big extent.  If the number of
2307 	 * extents we accounted for is <= the amount we need for the new range
2308 	 * then we can return, otherwise drop.  Think of it like this
2309 	 *
2310 	 * [ 4k][MAX_SIZE]
2311 	 *
2312 	 * So we've grown the extent by a MAX_SIZE extent, this would mean we
2313 	 * need 2 outstanding extents, on one side we have 1 and the other side
2314 	 * we have 1 so they are == and we can return.  But in this case
2315 	 *
2316 	 * [MAX_SIZE+4k][MAX_SIZE+4k]
2317 	 *
2318 	 * Each range on their own accounts for 2 extents, but merged together
2319 	 * they are only 3 extents worth of accounting, so we need to drop in
2320 	 * this case.
2321 	 */
2322 	old_size = other->end - other->start + 1;
2323 	num_extents = count_max_extents(fs_info, old_size);
2324 	old_size = new->end - new->start + 1;
2325 	num_extents += count_max_extents(fs_info, old_size);
2326 	if (count_max_extents(fs_info, new_size) >= num_extents)
2327 		return;
2328 
2329 	spin_lock(&BTRFS_I(inode)->lock);
2330 	btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
2331 	spin_unlock(&BTRFS_I(inode)->lock);
2332 }
2333 
2334 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
2335 				      struct inode *inode)
2336 {
2337 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2338 
2339 	spin_lock(&root->delalloc_lock);
2340 	if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
2341 		list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
2342 			      &root->delalloc_inodes);
2343 		set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2344 			&BTRFS_I(inode)->runtime_flags);
2345 		root->nr_delalloc_inodes++;
2346 		if (root->nr_delalloc_inodes == 1) {
2347 			spin_lock(&fs_info->delalloc_root_lock);
2348 			BUG_ON(!list_empty(&root->delalloc_root));
2349 			list_add_tail(&root->delalloc_root,
2350 				      &fs_info->delalloc_roots);
2351 			spin_unlock(&fs_info->delalloc_root_lock);
2352 		}
2353 	}
2354 	spin_unlock(&root->delalloc_lock);
2355 }
2356 
2357 
2358 void __btrfs_del_delalloc_inode(struct btrfs_root *root,
2359 				struct btrfs_inode *inode)
2360 {
2361 	struct btrfs_fs_info *fs_info = root->fs_info;
2362 
2363 	if (!list_empty(&inode->delalloc_inodes)) {
2364 		list_del_init(&inode->delalloc_inodes);
2365 		clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2366 			  &inode->runtime_flags);
2367 		root->nr_delalloc_inodes--;
2368 		if (!root->nr_delalloc_inodes) {
2369 			ASSERT(list_empty(&root->delalloc_inodes));
2370 			spin_lock(&fs_info->delalloc_root_lock);
2371 			BUG_ON(list_empty(&root->delalloc_root));
2372 			list_del_init(&root->delalloc_root);
2373 			spin_unlock(&fs_info->delalloc_root_lock);
2374 		}
2375 	}
2376 }
2377 
2378 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
2379 				     struct btrfs_inode *inode)
2380 {
2381 	spin_lock(&root->delalloc_lock);
2382 	__btrfs_del_delalloc_inode(root, inode);
2383 	spin_unlock(&root->delalloc_lock);
2384 }
2385 
2386 /*
2387  * Properly track delayed allocation bytes in the inode and to maintain the
2388  * list of inodes that have pending delalloc work to be done.
2389  */
2390 void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state,
2391 			       u32 bits)
2392 {
2393 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2394 
2395 	if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC))
2396 		WARN_ON(1);
2397 	/*
2398 	 * set_bit and clear bit hooks normally require _irqsave/restore
2399 	 * but in this case, we are only testing for the DELALLOC
2400 	 * bit, which is only set or cleared with irqs on
2401 	 */
2402 	if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2403 		struct btrfs_root *root = BTRFS_I(inode)->root;
2404 		u64 len = state->end + 1 - state->start;
2405 		u32 num_extents = count_max_extents(fs_info, len);
2406 		bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode));
2407 
2408 		spin_lock(&BTRFS_I(inode)->lock);
2409 		btrfs_mod_outstanding_extents(BTRFS_I(inode), num_extents);
2410 		spin_unlock(&BTRFS_I(inode)->lock);
2411 
2412 		/* For sanity tests */
2413 		if (btrfs_is_testing(fs_info))
2414 			return;
2415 
2416 		percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
2417 					 fs_info->delalloc_batch);
2418 		spin_lock(&BTRFS_I(inode)->lock);
2419 		BTRFS_I(inode)->delalloc_bytes += len;
2420 		if (bits & EXTENT_DEFRAG)
2421 			BTRFS_I(inode)->defrag_bytes += len;
2422 		if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2423 					 &BTRFS_I(inode)->runtime_flags))
2424 			btrfs_add_delalloc_inodes(root, inode);
2425 		spin_unlock(&BTRFS_I(inode)->lock);
2426 	}
2427 
2428 	if (!(state->state & EXTENT_DELALLOC_NEW) &&
2429 	    (bits & EXTENT_DELALLOC_NEW)) {
2430 		spin_lock(&BTRFS_I(inode)->lock);
2431 		BTRFS_I(inode)->new_delalloc_bytes += state->end + 1 -
2432 			state->start;
2433 		spin_unlock(&BTRFS_I(inode)->lock);
2434 	}
2435 }
2436 
2437 /*
2438  * Once a range is no longer delalloc this function ensures that proper
2439  * accounting happens.
2440  */
2441 void btrfs_clear_delalloc_extent(struct inode *vfs_inode,
2442 				 struct extent_state *state, u32 bits)
2443 {
2444 	struct btrfs_inode *inode = BTRFS_I(vfs_inode);
2445 	struct btrfs_fs_info *fs_info = btrfs_sb(vfs_inode->i_sb);
2446 	u64 len = state->end + 1 - state->start;
2447 	u32 num_extents = count_max_extents(fs_info, len);
2448 
2449 	if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) {
2450 		spin_lock(&inode->lock);
2451 		inode->defrag_bytes -= len;
2452 		spin_unlock(&inode->lock);
2453 	}
2454 
2455 	/*
2456 	 * set_bit and clear bit hooks normally require _irqsave/restore
2457 	 * but in this case, we are only testing for the DELALLOC
2458 	 * bit, which is only set or cleared with irqs on
2459 	 */
2460 	if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2461 		struct btrfs_root *root = inode->root;
2462 		bool do_list = !btrfs_is_free_space_inode(inode);
2463 
2464 		spin_lock(&inode->lock);
2465 		btrfs_mod_outstanding_extents(inode, -num_extents);
2466 		spin_unlock(&inode->lock);
2467 
2468 		/*
2469 		 * We don't reserve metadata space for space cache inodes so we
2470 		 * don't need to call delalloc_release_metadata if there is an
2471 		 * error.
2472 		 */
2473 		if (bits & EXTENT_CLEAR_META_RESV &&
2474 		    root != fs_info->tree_root)
2475 			btrfs_delalloc_release_metadata(inode, len, false);
2476 
2477 		/* For sanity tests. */
2478 		if (btrfs_is_testing(fs_info))
2479 			return;
2480 
2481 		if (!btrfs_is_data_reloc_root(root) &&
2482 		    do_list && !(state->state & EXTENT_NORESERVE) &&
2483 		    (bits & EXTENT_CLEAR_DATA_RESV))
2484 			btrfs_free_reserved_data_space_noquota(fs_info, len);
2485 
2486 		percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
2487 					 fs_info->delalloc_batch);
2488 		spin_lock(&inode->lock);
2489 		inode->delalloc_bytes -= len;
2490 		if (do_list && inode->delalloc_bytes == 0 &&
2491 		    test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2492 					&inode->runtime_flags))
2493 			btrfs_del_delalloc_inode(root, inode);
2494 		spin_unlock(&inode->lock);
2495 	}
2496 
2497 	if ((state->state & EXTENT_DELALLOC_NEW) &&
2498 	    (bits & EXTENT_DELALLOC_NEW)) {
2499 		spin_lock(&inode->lock);
2500 		ASSERT(inode->new_delalloc_bytes >= len);
2501 		inode->new_delalloc_bytes -= len;
2502 		if (bits & EXTENT_ADD_INODE_BYTES)
2503 			inode_add_bytes(&inode->vfs_inode, len);
2504 		spin_unlock(&inode->lock);
2505 	}
2506 }
2507 
2508 /*
2509  * in order to insert checksums into the metadata in large chunks,
2510  * we wait until bio submission time.   All the pages in the bio are
2511  * checksummed and sums are attached onto the ordered extent record.
2512  *
2513  * At IO completion time the cums attached on the ordered extent record
2514  * are inserted into the btree
2515  */
2516 static blk_status_t btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
2517 					   u64 dio_file_offset)
2518 {
2519 	return btrfs_csum_one_bio(BTRFS_I(inode), bio, (u64)-1, false);
2520 }
2521 
2522 /*
2523  * Split an extent_map at [start, start + len]
2524  *
2525  * This function is intended to be used only for extract_ordered_extent().
2526  */
2527 static int split_zoned_em(struct btrfs_inode *inode, u64 start, u64 len,
2528 			  u64 pre, u64 post)
2529 {
2530 	struct extent_map_tree *em_tree = &inode->extent_tree;
2531 	struct extent_map *em;
2532 	struct extent_map *split_pre = NULL;
2533 	struct extent_map *split_mid = NULL;
2534 	struct extent_map *split_post = NULL;
2535 	int ret = 0;
2536 	unsigned long flags;
2537 
2538 	/* Sanity check */
2539 	if (pre == 0 && post == 0)
2540 		return 0;
2541 
2542 	split_pre = alloc_extent_map();
2543 	if (pre)
2544 		split_mid = alloc_extent_map();
2545 	if (post)
2546 		split_post = alloc_extent_map();
2547 	if (!split_pre || (pre && !split_mid) || (post && !split_post)) {
2548 		ret = -ENOMEM;
2549 		goto out;
2550 	}
2551 
2552 	ASSERT(pre + post < len);
2553 
2554 	lock_extent(&inode->io_tree, start, start + len - 1, NULL);
2555 	write_lock(&em_tree->lock);
2556 	em = lookup_extent_mapping(em_tree, start, len);
2557 	if (!em) {
2558 		ret = -EIO;
2559 		goto out_unlock;
2560 	}
2561 
2562 	ASSERT(em->len == len);
2563 	ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
2564 	ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE);
2565 	ASSERT(test_bit(EXTENT_FLAG_PINNED, &em->flags));
2566 	ASSERT(!test_bit(EXTENT_FLAG_LOGGING, &em->flags));
2567 	ASSERT(!list_empty(&em->list));
2568 
2569 	flags = em->flags;
2570 	clear_bit(EXTENT_FLAG_PINNED, &em->flags);
2571 
2572 	/* First, replace the em with a new extent_map starting from * em->start */
2573 	split_pre->start = em->start;
2574 	split_pre->len = (pre ? pre : em->len - post);
2575 	split_pre->orig_start = split_pre->start;
2576 	split_pre->block_start = em->block_start;
2577 	split_pre->block_len = split_pre->len;
2578 	split_pre->orig_block_len = split_pre->block_len;
2579 	split_pre->ram_bytes = split_pre->len;
2580 	split_pre->flags = flags;
2581 	split_pre->compress_type = em->compress_type;
2582 	split_pre->generation = em->generation;
2583 
2584 	replace_extent_mapping(em_tree, em, split_pre, 1);
2585 
2586 	/*
2587 	 * Now we only have an extent_map at:
2588 	 *     [em->start, em->start + pre] if pre != 0
2589 	 *     [em->start, em->start + em->len - post] if pre == 0
2590 	 */
2591 
2592 	if (pre) {
2593 		/* Insert the middle extent_map */
2594 		split_mid->start = em->start + pre;
2595 		split_mid->len = em->len - pre - post;
2596 		split_mid->orig_start = split_mid->start;
2597 		split_mid->block_start = em->block_start + pre;
2598 		split_mid->block_len = split_mid->len;
2599 		split_mid->orig_block_len = split_mid->block_len;
2600 		split_mid->ram_bytes = split_mid->len;
2601 		split_mid->flags = flags;
2602 		split_mid->compress_type = em->compress_type;
2603 		split_mid->generation = em->generation;
2604 		add_extent_mapping(em_tree, split_mid, 1);
2605 	}
2606 
2607 	if (post) {
2608 		split_post->start = em->start + em->len - post;
2609 		split_post->len = post;
2610 		split_post->orig_start = split_post->start;
2611 		split_post->block_start = em->block_start + em->len - post;
2612 		split_post->block_len = split_post->len;
2613 		split_post->orig_block_len = split_post->block_len;
2614 		split_post->ram_bytes = split_post->len;
2615 		split_post->flags = flags;
2616 		split_post->compress_type = em->compress_type;
2617 		split_post->generation = em->generation;
2618 		add_extent_mapping(em_tree, split_post, 1);
2619 	}
2620 
2621 	/* Once for us */
2622 	free_extent_map(em);
2623 	/* Once for the tree */
2624 	free_extent_map(em);
2625 
2626 out_unlock:
2627 	write_unlock(&em_tree->lock);
2628 	unlock_extent(&inode->io_tree, start, start + len - 1, NULL);
2629 out:
2630 	free_extent_map(split_pre);
2631 	free_extent_map(split_mid);
2632 	free_extent_map(split_post);
2633 
2634 	return ret;
2635 }
2636 
2637 static blk_status_t extract_ordered_extent(struct btrfs_inode *inode,
2638 					   struct bio *bio, loff_t file_offset)
2639 {
2640 	struct btrfs_ordered_extent *ordered;
2641 	u64 start = (u64)bio->bi_iter.bi_sector << SECTOR_SHIFT;
2642 	u64 file_len;
2643 	u64 len = bio->bi_iter.bi_size;
2644 	u64 end = start + len;
2645 	u64 ordered_end;
2646 	u64 pre, post;
2647 	int ret = 0;
2648 
2649 	ordered = btrfs_lookup_ordered_extent(inode, file_offset);
2650 	if (WARN_ON_ONCE(!ordered))
2651 		return BLK_STS_IOERR;
2652 
2653 	/* No need to split */
2654 	if (ordered->disk_num_bytes == len)
2655 		goto out;
2656 
2657 	/* We cannot split once end_bio'd ordered extent */
2658 	if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes)) {
2659 		ret = -EINVAL;
2660 		goto out;
2661 	}
2662 
2663 	/* We cannot split a compressed ordered extent */
2664 	if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes)) {
2665 		ret = -EINVAL;
2666 		goto out;
2667 	}
2668 
2669 	ordered_end = ordered->disk_bytenr + ordered->disk_num_bytes;
2670 	/* bio must be in one ordered extent */
2671 	if (WARN_ON_ONCE(start < ordered->disk_bytenr || end > ordered_end)) {
2672 		ret = -EINVAL;
2673 		goto out;
2674 	}
2675 
2676 	/* Checksum list should be empty */
2677 	if (WARN_ON_ONCE(!list_empty(&ordered->list))) {
2678 		ret = -EINVAL;
2679 		goto out;
2680 	}
2681 
2682 	file_len = ordered->num_bytes;
2683 	pre = start - ordered->disk_bytenr;
2684 	post = ordered_end - end;
2685 
2686 	ret = btrfs_split_ordered_extent(ordered, pre, post);
2687 	if (ret)
2688 		goto out;
2689 	ret = split_zoned_em(inode, file_offset, file_len, pre, post);
2690 
2691 out:
2692 	btrfs_put_ordered_extent(ordered);
2693 
2694 	return errno_to_blk_status(ret);
2695 }
2696 
2697 void btrfs_submit_data_write_bio(struct inode *inode, struct bio *bio, int mirror_num)
2698 {
2699 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2700 	struct btrfs_inode *bi = BTRFS_I(inode);
2701 	blk_status_t ret;
2702 
2703 	if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
2704 		ret = extract_ordered_extent(bi, bio,
2705 				page_offset(bio_first_bvec_all(bio)->bv_page));
2706 		if (ret) {
2707 			btrfs_bio_end_io(btrfs_bio(bio), ret);
2708 			return;
2709 		}
2710 	}
2711 
2712 	/*
2713 	 * If we need to checksum, and the I/O is not issued by fsync and
2714 	 * friends, that is ->sync_writers != 0, defer the submission to a
2715 	 * workqueue to parallelize it.
2716 	 *
2717 	 * Csum items for reloc roots have already been cloned at this point,
2718 	 * so they are handled as part of the no-checksum case.
2719 	 */
2720 	if (!(bi->flags & BTRFS_INODE_NODATASUM) &&
2721 	    !test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state) &&
2722 	    !btrfs_is_data_reloc_root(bi->root)) {
2723 		if (!atomic_read(&bi->sync_writers) &&
2724 		    btrfs_wq_submit_bio(inode, bio, mirror_num, 0,
2725 					btrfs_submit_bio_start))
2726 			return;
2727 
2728 		ret = btrfs_csum_one_bio(bi, bio, (u64)-1, false);
2729 		if (ret) {
2730 			btrfs_bio_end_io(btrfs_bio(bio), ret);
2731 			return;
2732 		}
2733 	}
2734 	btrfs_submit_bio(fs_info, bio, mirror_num);
2735 }
2736 
2737 void btrfs_submit_data_read_bio(struct inode *inode, struct bio *bio,
2738 			int mirror_num, enum btrfs_compression_type compress_type)
2739 {
2740 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2741 	blk_status_t ret;
2742 
2743 	if (compress_type != BTRFS_COMPRESS_NONE) {
2744 		/*
2745 		 * btrfs_submit_compressed_read will handle completing the bio
2746 		 * if there were any errors, so just return here.
2747 		 */
2748 		btrfs_submit_compressed_read(inode, bio, mirror_num);
2749 		return;
2750 	}
2751 
2752 	/* Save the original iter for read repair */
2753 	btrfs_bio(bio)->iter = bio->bi_iter;
2754 
2755 	/*
2756 	 * Lookup bio sums does extra checks around whether we need to csum or
2757 	 * not, which is why we ignore skip_sum here.
2758 	 */
2759 	ret = btrfs_lookup_bio_sums(inode, bio, NULL);
2760 	if (ret) {
2761 		btrfs_bio_end_io(btrfs_bio(bio), ret);
2762 		return;
2763 	}
2764 
2765 	btrfs_submit_bio(fs_info, bio, mirror_num);
2766 }
2767 
2768 /*
2769  * given a list of ordered sums record them in the inode.  This happens
2770  * at IO completion time based on sums calculated at bio submission time.
2771  */
2772 static int add_pending_csums(struct btrfs_trans_handle *trans,
2773 			     struct list_head *list)
2774 {
2775 	struct btrfs_ordered_sum *sum;
2776 	struct btrfs_root *csum_root = NULL;
2777 	int ret;
2778 
2779 	list_for_each_entry(sum, list, list) {
2780 		trans->adding_csums = true;
2781 		if (!csum_root)
2782 			csum_root = btrfs_csum_root(trans->fs_info,
2783 						    sum->bytenr);
2784 		ret = btrfs_csum_file_blocks(trans, csum_root, sum);
2785 		trans->adding_csums = false;
2786 		if (ret)
2787 			return ret;
2788 	}
2789 	return 0;
2790 }
2791 
2792 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
2793 					 const u64 start,
2794 					 const u64 len,
2795 					 struct extent_state **cached_state)
2796 {
2797 	u64 search_start = start;
2798 	const u64 end = start + len - 1;
2799 
2800 	while (search_start < end) {
2801 		const u64 search_len = end - search_start + 1;
2802 		struct extent_map *em;
2803 		u64 em_len;
2804 		int ret = 0;
2805 
2806 		em = btrfs_get_extent(inode, NULL, 0, search_start, search_len);
2807 		if (IS_ERR(em))
2808 			return PTR_ERR(em);
2809 
2810 		if (em->block_start != EXTENT_MAP_HOLE)
2811 			goto next;
2812 
2813 		em_len = em->len;
2814 		if (em->start < search_start)
2815 			em_len -= search_start - em->start;
2816 		if (em_len > search_len)
2817 			em_len = search_len;
2818 
2819 		ret = set_extent_bit(&inode->io_tree, search_start,
2820 				     search_start + em_len - 1,
2821 				     EXTENT_DELALLOC_NEW, cached_state,
2822 				     GFP_NOFS);
2823 next:
2824 		search_start = extent_map_end(em);
2825 		free_extent_map(em);
2826 		if (ret)
2827 			return ret;
2828 	}
2829 	return 0;
2830 }
2831 
2832 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2833 			      unsigned int extra_bits,
2834 			      struct extent_state **cached_state)
2835 {
2836 	WARN_ON(PAGE_ALIGNED(end));
2837 
2838 	if (start >= i_size_read(&inode->vfs_inode) &&
2839 	    !(inode->flags & BTRFS_INODE_PREALLOC)) {
2840 		/*
2841 		 * There can't be any extents following eof in this case so just
2842 		 * set the delalloc new bit for the range directly.
2843 		 */
2844 		extra_bits |= EXTENT_DELALLOC_NEW;
2845 	} else {
2846 		int ret;
2847 
2848 		ret = btrfs_find_new_delalloc_bytes(inode, start,
2849 						    end + 1 - start,
2850 						    cached_state);
2851 		if (ret)
2852 			return ret;
2853 	}
2854 
2855 	return set_extent_delalloc(&inode->io_tree, start, end, extra_bits,
2856 				   cached_state);
2857 }
2858 
2859 /* see btrfs_writepage_start_hook for details on why this is required */
2860 struct btrfs_writepage_fixup {
2861 	struct page *page;
2862 	struct inode *inode;
2863 	struct btrfs_work work;
2864 };
2865 
2866 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2867 {
2868 	struct btrfs_writepage_fixup *fixup;
2869 	struct btrfs_ordered_extent *ordered;
2870 	struct extent_state *cached_state = NULL;
2871 	struct extent_changeset *data_reserved = NULL;
2872 	struct page *page;
2873 	struct btrfs_inode *inode;
2874 	u64 page_start;
2875 	u64 page_end;
2876 	int ret = 0;
2877 	bool free_delalloc_space = true;
2878 
2879 	fixup = container_of(work, struct btrfs_writepage_fixup, work);
2880 	page = fixup->page;
2881 	inode = BTRFS_I(fixup->inode);
2882 	page_start = page_offset(page);
2883 	page_end = page_offset(page) + PAGE_SIZE - 1;
2884 
2885 	/*
2886 	 * This is similar to page_mkwrite, we need to reserve the space before
2887 	 * we take the page lock.
2888 	 */
2889 	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2890 					   PAGE_SIZE);
2891 again:
2892 	lock_page(page);
2893 
2894 	/*
2895 	 * Before we queued this fixup, we took a reference on the page.
2896 	 * page->mapping may go NULL, but it shouldn't be moved to a different
2897 	 * address space.
2898 	 */
2899 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
2900 		/*
2901 		 * Unfortunately this is a little tricky, either
2902 		 *
2903 		 * 1) We got here and our page had already been dealt with and
2904 		 *    we reserved our space, thus ret == 0, so we need to just
2905 		 *    drop our space reservation and bail.  This can happen the
2906 		 *    first time we come into the fixup worker, or could happen
2907 		 *    while waiting for the ordered extent.
2908 		 * 2) Our page was already dealt with, but we happened to get an
2909 		 *    ENOSPC above from the btrfs_delalloc_reserve_space.  In
2910 		 *    this case we obviously don't have anything to release, but
2911 		 *    because the page was already dealt with we don't want to
2912 		 *    mark the page with an error, so make sure we're resetting
2913 		 *    ret to 0.  This is why we have this check _before_ the ret
2914 		 *    check, because we do not want to have a surprise ENOSPC
2915 		 *    when the page was already properly dealt with.
2916 		 */
2917 		if (!ret) {
2918 			btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2919 			btrfs_delalloc_release_space(inode, data_reserved,
2920 						     page_start, PAGE_SIZE,
2921 						     true);
2922 		}
2923 		ret = 0;
2924 		goto out_page;
2925 	}
2926 
2927 	/*
2928 	 * We can't mess with the page state unless it is locked, so now that
2929 	 * it is locked bail if we failed to make our space reservation.
2930 	 */
2931 	if (ret)
2932 		goto out_page;
2933 
2934 	lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2935 
2936 	/* already ordered? We're done */
2937 	if (PageOrdered(page))
2938 		goto out_reserved;
2939 
2940 	ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
2941 	if (ordered) {
2942 		unlock_extent(&inode->io_tree, page_start, page_end,
2943 			      &cached_state);
2944 		unlock_page(page);
2945 		btrfs_start_ordered_extent(ordered, 1);
2946 		btrfs_put_ordered_extent(ordered);
2947 		goto again;
2948 	}
2949 
2950 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
2951 					&cached_state);
2952 	if (ret)
2953 		goto out_reserved;
2954 
2955 	/*
2956 	 * Everything went as planned, we're now the owner of a dirty page with
2957 	 * delayed allocation bits set and space reserved for our COW
2958 	 * destination.
2959 	 *
2960 	 * The page was dirty when we started, nothing should have cleaned it.
2961 	 */
2962 	BUG_ON(!PageDirty(page));
2963 	free_delalloc_space = false;
2964 out_reserved:
2965 	btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2966 	if (free_delalloc_space)
2967 		btrfs_delalloc_release_space(inode, data_reserved, page_start,
2968 					     PAGE_SIZE, true);
2969 	unlock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2970 out_page:
2971 	if (ret) {
2972 		/*
2973 		 * We hit ENOSPC or other errors.  Update the mapping and page
2974 		 * to reflect the errors and clean the page.
2975 		 */
2976 		mapping_set_error(page->mapping, ret);
2977 		end_extent_writepage(page, ret, page_start, page_end);
2978 		clear_page_dirty_for_io(page);
2979 		SetPageError(page);
2980 	}
2981 	btrfs_page_clear_checked(inode->root->fs_info, page, page_start, PAGE_SIZE);
2982 	unlock_page(page);
2983 	put_page(page);
2984 	kfree(fixup);
2985 	extent_changeset_free(data_reserved);
2986 	/*
2987 	 * As a precaution, do a delayed iput in case it would be the last iput
2988 	 * that could need flushing space. Recursing back to fixup worker would
2989 	 * deadlock.
2990 	 */
2991 	btrfs_add_delayed_iput(&inode->vfs_inode);
2992 }
2993 
2994 /*
2995  * There are a few paths in the higher layers of the kernel that directly
2996  * set the page dirty bit without asking the filesystem if it is a
2997  * good idea.  This causes problems because we want to make sure COW
2998  * properly happens and the data=ordered rules are followed.
2999  *
3000  * In our case any range that doesn't have the ORDERED bit set
3001  * hasn't been properly setup for IO.  We kick off an async process
3002  * to fix it up.  The async helper will wait for ordered extents, set
3003  * the delalloc bit and make it safe to write the page.
3004  */
3005 int btrfs_writepage_cow_fixup(struct page *page)
3006 {
3007 	struct inode *inode = page->mapping->host;
3008 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3009 	struct btrfs_writepage_fixup *fixup;
3010 
3011 	/* This page has ordered extent covering it already */
3012 	if (PageOrdered(page))
3013 		return 0;
3014 
3015 	/*
3016 	 * PageChecked is set below when we create a fixup worker for this page,
3017 	 * don't try to create another one if we're already PageChecked()
3018 	 *
3019 	 * The extent_io writepage code will redirty the page if we send back
3020 	 * EAGAIN.
3021 	 */
3022 	if (PageChecked(page))
3023 		return -EAGAIN;
3024 
3025 	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
3026 	if (!fixup)
3027 		return -EAGAIN;
3028 
3029 	/*
3030 	 * We are already holding a reference to this inode from
3031 	 * write_cache_pages.  We need to hold it because the space reservation
3032 	 * takes place outside of the page lock, and we can't trust
3033 	 * page->mapping outside of the page lock.
3034 	 */
3035 	ihold(inode);
3036 	btrfs_page_set_checked(fs_info, page, page_offset(page), PAGE_SIZE);
3037 	get_page(page);
3038 	btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
3039 	fixup->page = page;
3040 	fixup->inode = inode;
3041 	btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
3042 
3043 	return -EAGAIN;
3044 }
3045 
3046 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
3047 				       struct btrfs_inode *inode, u64 file_pos,
3048 				       struct btrfs_file_extent_item *stack_fi,
3049 				       const bool update_inode_bytes,
3050 				       u64 qgroup_reserved)
3051 {
3052 	struct btrfs_root *root = inode->root;
3053 	const u64 sectorsize = root->fs_info->sectorsize;
3054 	struct btrfs_path *path;
3055 	struct extent_buffer *leaf;
3056 	struct btrfs_key ins;
3057 	u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi);
3058 	u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi);
3059 	u64 offset = btrfs_stack_file_extent_offset(stack_fi);
3060 	u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi);
3061 	u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi);
3062 	struct btrfs_drop_extents_args drop_args = { 0 };
3063 	int ret;
3064 
3065 	path = btrfs_alloc_path();
3066 	if (!path)
3067 		return -ENOMEM;
3068 
3069 	/*
3070 	 * we may be replacing one extent in the tree with another.
3071 	 * The new extent is pinned in the extent map, and we don't want
3072 	 * to drop it from the cache until it is completely in the btree.
3073 	 *
3074 	 * So, tell btrfs_drop_extents to leave this extent in the cache.
3075 	 * the caller is expected to unpin it and allow it to be merged
3076 	 * with the others.
3077 	 */
3078 	drop_args.path = path;
3079 	drop_args.start = file_pos;
3080 	drop_args.end = file_pos + num_bytes;
3081 	drop_args.replace_extent = true;
3082 	drop_args.extent_item_size = sizeof(*stack_fi);
3083 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
3084 	if (ret)
3085 		goto out;
3086 
3087 	if (!drop_args.extent_inserted) {
3088 		ins.objectid = btrfs_ino(inode);
3089 		ins.offset = file_pos;
3090 		ins.type = BTRFS_EXTENT_DATA_KEY;
3091 
3092 		ret = btrfs_insert_empty_item(trans, root, path, &ins,
3093 					      sizeof(*stack_fi));
3094 		if (ret)
3095 			goto out;
3096 	}
3097 	leaf = path->nodes[0];
3098 	btrfs_set_stack_file_extent_generation(stack_fi, trans->transid);
3099 	write_extent_buffer(leaf, stack_fi,
3100 			btrfs_item_ptr_offset(leaf, path->slots[0]),
3101 			sizeof(struct btrfs_file_extent_item));
3102 
3103 	btrfs_mark_buffer_dirty(leaf);
3104 	btrfs_release_path(path);
3105 
3106 	/*
3107 	 * If we dropped an inline extent here, we know the range where it is
3108 	 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
3109 	 * number of bytes only for that range containing the inline extent.
3110 	 * The remaining of the range will be processed when clearning the
3111 	 * EXTENT_DELALLOC_BIT bit through the ordered extent completion.
3112 	 */
3113 	if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) {
3114 		u64 inline_size = round_down(drop_args.bytes_found, sectorsize);
3115 
3116 		inline_size = drop_args.bytes_found - inline_size;
3117 		btrfs_update_inode_bytes(inode, sectorsize, inline_size);
3118 		drop_args.bytes_found -= inline_size;
3119 		num_bytes -= sectorsize;
3120 	}
3121 
3122 	if (update_inode_bytes)
3123 		btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found);
3124 
3125 	ins.objectid = disk_bytenr;
3126 	ins.offset = disk_num_bytes;
3127 	ins.type = BTRFS_EXTENT_ITEM_KEY;
3128 
3129 	ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes);
3130 	if (ret)
3131 		goto out;
3132 
3133 	ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode),
3134 					       file_pos - offset,
3135 					       qgroup_reserved, &ins);
3136 out:
3137 	btrfs_free_path(path);
3138 
3139 	return ret;
3140 }
3141 
3142 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
3143 					 u64 start, u64 len)
3144 {
3145 	struct btrfs_block_group *cache;
3146 
3147 	cache = btrfs_lookup_block_group(fs_info, start);
3148 	ASSERT(cache);
3149 
3150 	spin_lock(&cache->lock);
3151 	cache->delalloc_bytes -= len;
3152 	spin_unlock(&cache->lock);
3153 
3154 	btrfs_put_block_group(cache);
3155 }
3156 
3157 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
3158 					     struct btrfs_ordered_extent *oe)
3159 {
3160 	struct btrfs_file_extent_item stack_fi;
3161 	bool update_inode_bytes;
3162 	u64 num_bytes = oe->num_bytes;
3163 	u64 ram_bytes = oe->ram_bytes;
3164 
3165 	memset(&stack_fi, 0, sizeof(stack_fi));
3166 	btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG);
3167 	btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr);
3168 	btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi,
3169 						   oe->disk_num_bytes);
3170 	btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset);
3171 	if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) {
3172 		num_bytes = oe->truncated_len;
3173 		ram_bytes = num_bytes;
3174 	}
3175 	btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes);
3176 	btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes);
3177 	btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type);
3178 	/* Encryption and other encoding is reserved and all 0 */
3179 
3180 	/*
3181 	 * For delalloc, when completing an ordered extent we update the inode's
3182 	 * bytes when clearing the range in the inode's io tree, so pass false
3183 	 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(),
3184 	 * except if the ordered extent was truncated.
3185 	 */
3186 	update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) ||
3187 			     test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) ||
3188 			     test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags);
3189 
3190 	return insert_reserved_file_extent(trans, BTRFS_I(oe->inode),
3191 					   oe->file_offset, &stack_fi,
3192 					   update_inode_bytes, oe->qgroup_rsv);
3193 }
3194 
3195 /*
3196  * As ordered data IO finishes, this gets called so we can finish
3197  * an ordered extent if the range of bytes in the file it covers are
3198  * fully written.
3199  */
3200 int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
3201 {
3202 	struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode);
3203 	struct btrfs_root *root = inode->root;
3204 	struct btrfs_fs_info *fs_info = root->fs_info;
3205 	struct btrfs_trans_handle *trans = NULL;
3206 	struct extent_io_tree *io_tree = &inode->io_tree;
3207 	struct extent_state *cached_state = NULL;
3208 	u64 start, end;
3209 	int compress_type = 0;
3210 	int ret = 0;
3211 	u64 logical_len = ordered_extent->num_bytes;
3212 	bool freespace_inode;
3213 	bool truncated = false;
3214 	bool clear_reserved_extent = true;
3215 	unsigned int clear_bits = EXTENT_DEFRAG;
3216 
3217 	start = ordered_extent->file_offset;
3218 	end = start + ordered_extent->num_bytes - 1;
3219 
3220 	if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3221 	    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
3222 	    !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) &&
3223 	    !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags))
3224 		clear_bits |= EXTENT_DELALLOC_NEW;
3225 
3226 	freespace_inode = btrfs_is_free_space_inode(inode);
3227 	if (!freespace_inode)
3228 		btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent);
3229 
3230 	if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
3231 		ret = -EIO;
3232 		goto out;
3233 	}
3234 
3235 	/* A valid bdev implies a write on a sequential zone */
3236 	if (ordered_extent->bdev) {
3237 		btrfs_rewrite_logical_zoned(ordered_extent);
3238 		btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
3239 					ordered_extent->disk_num_bytes);
3240 	}
3241 
3242 	btrfs_free_io_failure_record(inode, start, end);
3243 
3244 	if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
3245 		truncated = true;
3246 		logical_len = ordered_extent->truncated_len;
3247 		/* Truncated the entire extent, don't bother adding */
3248 		if (!logical_len)
3249 			goto out;
3250 	}
3251 
3252 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
3253 		BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
3254 
3255 		btrfs_inode_safe_disk_i_size_write(inode, 0);
3256 		if (freespace_inode)
3257 			trans = btrfs_join_transaction_spacecache(root);
3258 		else
3259 			trans = btrfs_join_transaction(root);
3260 		if (IS_ERR(trans)) {
3261 			ret = PTR_ERR(trans);
3262 			trans = NULL;
3263 			goto out;
3264 		}
3265 		trans->block_rsv = &inode->block_rsv;
3266 		ret = btrfs_update_inode_fallback(trans, root, inode);
3267 		if (ret) /* -ENOMEM or corruption */
3268 			btrfs_abort_transaction(trans, ret);
3269 		goto out;
3270 	}
3271 
3272 	clear_bits |= EXTENT_LOCKED;
3273 	lock_extent(io_tree, start, end, &cached_state);
3274 
3275 	if (freespace_inode)
3276 		trans = btrfs_join_transaction_spacecache(root);
3277 	else
3278 		trans = btrfs_join_transaction(root);
3279 	if (IS_ERR(trans)) {
3280 		ret = PTR_ERR(trans);
3281 		trans = NULL;
3282 		goto out;
3283 	}
3284 
3285 	trans->block_rsv = &inode->block_rsv;
3286 
3287 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
3288 		compress_type = ordered_extent->compress_type;
3289 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3290 		BUG_ON(compress_type);
3291 		ret = btrfs_mark_extent_written(trans, inode,
3292 						ordered_extent->file_offset,
3293 						ordered_extent->file_offset +
3294 						logical_len);
3295 		btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr,
3296 						  ordered_extent->disk_num_bytes);
3297 	} else {
3298 		BUG_ON(root == fs_info->tree_root);
3299 		ret = insert_ordered_extent_file_extent(trans, ordered_extent);
3300 		if (!ret) {
3301 			clear_reserved_extent = false;
3302 			btrfs_release_delalloc_bytes(fs_info,
3303 						ordered_extent->disk_bytenr,
3304 						ordered_extent->disk_num_bytes);
3305 		}
3306 	}
3307 	unpin_extent_cache(&inode->extent_tree, ordered_extent->file_offset,
3308 			   ordered_extent->num_bytes, trans->transid);
3309 	if (ret < 0) {
3310 		btrfs_abort_transaction(trans, ret);
3311 		goto out;
3312 	}
3313 
3314 	ret = add_pending_csums(trans, &ordered_extent->list);
3315 	if (ret) {
3316 		btrfs_abort_transaction(trans, ret);
3317 		goto out;
3318 	}
3319 
3320 	/*
3321 	 * If this is a new delalloc range, clear its new delalloc flag to
3322 	 * update the inode's number of bytes. This needs to be done first
3323 	 * before updating the inode item.
3324 	 */
3325 	if ((clear_bits & EXTENT_DELALLOC_NEW) &&
3326 	    !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags))
3327 		clear_extent_bit(&inode->io_tree, start, end,
3328 				 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES,
3329 				 &cached_state);
3330 
3331 	btrfs_inode_safe_disk_i_size_write(inode, 0);
3332 	ret = btrfs_update_inode_fallback(trans, root, inode);
3333 	if (ret) { /* -ENOMEM or corruption */
3334 		btrfs_abort_transaction(trans, ret);
3335 		goto out;
3336 	}
3337 	ret = 0;
3338 out:
3339 	clear_extent_bit(&inode->io_tree, start, end, clear_bits,
3340 			 &cached_state);
3341 
3342 	if (trans)
3343 		btrfs_end_transaction(trans);
3344 
3345 	if (ret || truncated) {
3346 		u64 unwritten_start = start;
3347 
3348 		/*
3349 		 * If we failed to finish this ordered extent for any reason we
3350 		 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
3351 		 * extent, and mark the inode with the error if it wasn't
3352 		 * already set.  Any error during writeback would have already
3353 		 * set the mapping error, so we need to set it if we're the ones
3354 		 * marking this ordered extent as failed.
3355 		 */
3356 		if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
3357 					     &ordered_extent->flags))
3358 			mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
3359 
3360 		if (truncated)
3361 			unwritten_start += logical_len;
3362 		clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
3363 
3364 		/* Drop extent maps for the part of the extent we didn't write. */
3365 		btrfs_drop_extent_map_range(inode, unwritten_start, end, false);
3366 
3367 		/*
3368 		 * If the ordered extent had an IOERR or something else went
3369 		 * wrong we need to return the space for this ordered extent
3370 		 * back to the allocator.  We only free the extent in the
3371 		 * truncated case if we didn't write out the extent at all.
3372 		 *
3373 		 * If we made it past insert_reserved_file_extent before we
3374 		 * errored out then we don't need to do this as the accounting
3375 		 * has already been done.
3376 		 */
3377 		if ((ret || !logical_len) &&
3378 		    clear_reserved_extent &&
3379 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3380 		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3381 			/*
3382 			 * Discard the range before returning it back to the
3383 			 * free space pool
3384 			 */
3385 			if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC))
3386 				btrfs_discard_extent(fs_info,
3387 						ordered_extent->disk_bytenr,
3388 						ordered_extent->disk_num_bytes,
3389 						NULL);
3390 			btrfs_free_reserved_extent(fs_info,
3391 					ordered_extent->disk_bytenr,
3392 					ordered_extent->disk_num_bytes, 1);
3393 		}
3394 	}
3395 
3396 	/*
3397 	 * This needs to be done to make sure anybody waiting knows we are done
3398 	 * updating everything for this ordered extent.
3399 	 */
3400 	btrfs_remove_ordered_extent(inode, ordered_extent);
3401 
3402 	/* once for us */
3403 	btrfs_put_ordered_extent(ordered_extent);
3404 	/* once for the tree */
3405 	btrfs_put_ordered_extent(ordered_extent);
3406 
3407 	return ret;
3408 }
3409 
3410 void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
3411 					  struct page *page, u64 start,
3412 					  u64 end, bool uptodate)
3413 {
3414 	trace_btrfs_writepage_end_io_hook(inode, start, end, uptodate);
3415 
3416 	btrfs_mark_ordered_io_finished(inode, page, start, end + 1 - start, uptodate);
3417 }
3418 
3419 /*
3420  * Verify the checksum for a single sector without any extra action that depend
3421  * on the type of I/O.
3422  */
3423 int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
3424 			    u32 pgoff, u8 *csum, const u8 * const csum_expected)
3425 {
3426 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3427 	char *kaddr;
3428 
3429 	ASSERT(pgoff + fs_info->sectorsize <= PAGE_SIZE);
3430 
3431 	shash->tfm = fs_info->csum_shash;
3432 
3433 	kaddr = kmap_local_page(page) + pgoff;
3434 	crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum);
3435 	kunmap_local(kaddr);
3436 
3437 	if (memcmp(csum, csum_expected, fs_info->csum_size))
3438 		return -EIO;
3439 	return 0;
3440 }
3441 
3442 static u8 *btrfs_csum_ptr(const struct btrfs_fs_info *fs_info, u8 *csums, u64 offset)
3443 {
3444 	u64 offset_in_sectors = offset >> fs_info->sectorsize_bits;
3445 
3446 	return csums + offset_in_sectors * fs_info->csum_size;
3447 }
3448 
3449 /*
3450  * check_data_csum - verify checksum of one sector of uncompressed data
3451  * @inode:	inode
3452  * @bbio:	btrfs_bio which contains the csum
3453  * @bio_offset:	offset to the beginning of the bio (in bytes)
3454  * @page:	page where is the data to be verified
3455  * @pgoff:	offset inside the page
3456  *
3457  * The length of such check is always one sector size.
3458  *
3459  * When csum mismatch is detected, we will also report the error and fill the
3460  * corrupted range with zero. (Thus it needs the extra parameters)
3461  */
3462 int btrfs_check_data_csum(struct inode *inode, struct btrfs_bio *bbio,
3463 			  u32 bio_offset, struct page *page, u32 pgoff)
3464 {
3465 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3466 	u32 len = fs_info->sectorsize;
3467 	u8 *csum_expected;
3468 	u8 csum[BTRFS_CSUM_SIZE];
3469 
3470 	ASSERT(pgoff + len <= PAGE_SIZE);
3471 
3472 	csum_expected = btrfs_csum_ptr(fs_info, bbio->csum, bio_offset);
3473 
3474 	if (btrfs_check_sector_csum(fs_info, page, pgoff, csum, csum_expected))
3475 		goto zeroit;
3476 	return 0;
3477 
3478 zeroit:
3479 	btrfs_print_data_csum_error(BTRFS_I(inode),
3480 				    bbio->file_offset + bio_offset,
3481 				    csum, csum_expected, bbio->mirror_num);
3482 	if (bbio->device)
3483 		btrfs_dev_stat_inc_and_print(bbio->device,
3484 					     BTRFS_DEV_STAT_CORRUPTION_ERRS);
3485 	memzero_page(page, pgoff, len);
3486 	return -EIO;
3487 }
3488 
3489 /*
3490  * When reads are done, we need to check csums to verify the data is correct.
3491  * if there's a match, we allow the bio to finish.  If not, the code in
3492  * extent_io.c will try to find good copies for us.
3493  *
3494  * @bio_offset:	offset to the beginning of the bio (in bytes)
3495  * @start:	file offset of the range start
3496  * @end:	file offset of the range end (inclusive)
3497  *
3498  * Return a bitmap where bit set means a csum mismatch, and bit not set means
3499  * csum match.
3500  */
3501 unsigned int btrfs_verify_data_csum(struct btrfs_bio *bbio,
3502 				    u32 bio_offset, struct page *page,
3503 				    u64 start, u64 end)
3504 {
3505 	struct inode *inode = page->mapping->host;
3506 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3507 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3508 	struct btrfs_root *root = BTRFS_I(inode)->root;
3509 	const u32 sectorsize = root->fs_info->sectorsize;
3510 	u32 pg_off;
3511 	unsigned int result = 0;
3512 
3513 	/*
3514 	 * This only happens for NODATASUM or compressed read.
3515 	 * Normally this should be covered by above check for compressed read
3516 	 * or the next check for NODATASUM.  Just do a quicker exit here.
3517 	 */
3518 	if (bbio->csum == NULL)
3519 		return 0;
3520 
3521 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
3522 		return 0;
3523 
3524 	if (unlikely(test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state)))
3525 		return 0;
3526 
3527 	ASSERT(page_offset(page) <= start &&
3528 	       end <= page_offset(page) + PAGE_SIZE - 1);
3529 	for (pg_off = offset_in_page(start);
3530 	     pg_off < offset_in_page(end);
3531 	     pg_off += sectorsize, bio_offset += sectorsize) {
3532 		u64 file_offset = pg_off + page_offset(page);
3533 		int ret;
3534 
3535 		if (btrfs_is_data_reloc_root(root) &&
3536 		    test_range_bit(io_tree, file_offset,
3537 				   file_offset + sectorsize - 1,
3538 				   EXTENT_NODATASUM, 1, NULL)) {
3539 			/* Skip the range without csum for data reloc inode */
3540 			clear_extent_bits(io_tree, file_offset,
3541 					  file_offset + sectorsize - 1,
3542 					  EXTENT_NODATASUM);
3543 			continue;
3544 		}
3545 		ret = btrfs_check_data_csum(inode, bbio, bio_offset, page, pg_off);
3546 		if (ret < 0) {
3547 			const int nr_bit = (pg_off - offset_in_page(start)) >>
3548 				     root->fs_info->sectorsize_bits;
3549 
3550 			result |= (1U << nr_bit);
3551 		}
3552 	}
3553 	return result;
3554 }
3555 
3556 /*
3557  * btrfs_add_delayed_iput - perform a delayed iput on @inode
3558  *
3559  * @inode: The inode we want to perform iput on
3560  *
3561  * This function uses the generic vfs_inode::i_count to track whether we should
3562  * just decrement it (in case it's > 1) or if this is the last iput then link
3563  * the inode to the delayed iput machinery. Delayed iputs are processed at
3564  * transaction commit time/superblock commit/cleaner kthread.
3565  */
3566 void btrfs_add_delayed_iput(struct inode *inode)
3567 {
3568 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3569 	struct btrfs_inode *binode = BTRFS_I(inode);
3570 
3571 	if (atomic_add_unless(&inode->i_count, -1, 1))
3572 		return;
3573 
3574 	atomic_inc(&fs_info->nr_delayed_iputs);
3575 	spin_lock(&fs_info->delayed_iput_lock);
3576 	ASSERT(list_empty(&binode->delayed_iput));
3577 	list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
3578 	spin_unlock(&fs_info->delayed_iput_lock);
3579 	if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
3580 		wake_up_process(fs_info->cleaner_kthread);
3581 }
3582 
3583 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info,
3584 				    struct btrfs_inode *inode)
3585 {
3586 	list_del_init(&inode->delayed_iput);
3587 	spin_unlock(&fs_info->delayed_iput_lock);
3588 	iput(&inode->vfs_inode);
3589 	if (atomic_dec_and_test(&fs_info->nr_delayed_iputs))
3590 		wake_up(&fs_info->delayed_iputs_wait);
3591 	spin_lock(&fs_info->delayed_iput_lock);
3592 }
3593 
3594 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info,
3595 				   struct btrfs_inode *inode)
3596 {
3597 	if (!list_empty(&inode->delayed_iput)) {
3598 		spin_lock(&fs_info->delayed_iput_lock);
3599 		if (!list_empty(&inode->delayed_iput))
3600 			run_delayed_iput_locked(fs_info, inode);
3601 		spin_unlock(&fs_info->delayed_iput_lock);
3602 	}
3603 }
3604 
3605 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
3606 {
3607 
3608 	spin_lock(&fs_info->delayed_iput_lock);
3609 	while (!list_empty(&fs_info->delayed_iputs)) {
3610 		struct btrfs_inode *inode;
3611 
3612 		inode = list_first_entry(&fs_info->delayed_iputs,
3613 				struct btrfs_inode, delayed_iput);
3614 		run_delayed_iput_locked(fs_info, inode);
3615 		cond_resched_lock(&fs_info->delayed_iput_lock);
3616 	}
3617 	spin_unlock(&fs_info->delayed_iput_lock);
3618 }
3619 
3620 /**
3621  * Wait for flushing all delayed iputs
3622  *
3623  * @fs_info:  the filesystem
3624  *
3625  * This will wait on any delayed iputs that are currently running with KILLABLE
3626  * set.  Once they are all done running we will return, unless we are killed in
3627  * which case we return EINTR. This helps in user operations like fallocate etc
3628  * that might get blocked on the iputs.
3629  *
3630  * Return EINTR if we were killed, 0 if nothing's pending
3631  */
3632 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info)
3633 {
3634 	int ret = wait_event_killable(fs_info->delayed_iputs_wait,
3635 			atomic_read(&fs_info->nr_delayed_iputs) == 0);
3636 	if (ret)
3637 		return -EINTR;
3638 	return 0;
3639 }
3640 
3641 /*
3642  * This creates an orphan entry for the given inode in case something goes wrong
3643  * in the middle of an unlink.
3644  */
3645 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3646 		     struct btrfs_inode *inode)
3647 {
3648 	int ret;
3649 
3650 	ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
3651 	if (ret && ret != -EEXIST) {
3652 		btrfs_abort_transaction(trans, ret);
3653 		return ret;
3654 	}
3655 
3656 	return 0;
3657 }
3658 
3659 /*
3660  * We have done the delete so we can go ahead and remove the orphan item for
3661  * this particular inode.
3662  */
3663 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3664 			    struct btrfs_inode *inode)
3665 {
3666 	return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode));
3667 }
3668 
3669 /*
3670  * this cleans up any orphans that may be left on the list from the last use
3671  * of this root.
3672  */
3673 int btrfs_orphan_cleanup(struct btrfs_root *root)
3674 {
3675 	struct btrfs_fs_info *fs_info = root->fs_info;
3676 	struct btrfs_path *path;
3677 	struct extent_buffer *leaf;
3678 	struct btrfs_key key, found_key;
3679 	struct btrfs_trans_handle *trans;
3680 	struct inode *inode;
3681 	u64 last_objectid = 0;
3682 	int ret = 0, nr_unlink = 0;
3683 
3684 	if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state))
3685 		return 0;
3686 
3687 	path = btrfs_alloc_path();
3688 	if (!path) {
3689 		ret = -ENOMEM;
3690 		goto out;
3691 	}
3692 	path->reada = READA_BACK;
3693 
3694 	key.objectid = BTRFS_ORPHAN_OBJECTID;
3695 	key.type = BTRFS_ORPHAN_ITEM_KEY;
3696 	key.offset = (u64)-1;
3697 
3698 	while (1) {
3699 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3700 		if (ret < 0)
3701 			goto out;
3702 
3703 		/*
3704 		 * if ret == 0 means we found what we were searching for, which
3705 		 * is weird, but possible, so only screw with path if we didn't
3706 		 * find the key and see if we have stuff that matches
3707 		 */
3708 		if (ret > 0) {
3709 			ret = 0;
3710 			if (path->slots[0] == 0)
3711 				break;
3712 			path->slots[0]--;
3713 		}
3714 
3715 		/* pull out the item */
3716 		leaf = path->nodes[0];
3717 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3718 
3719 		/* make sure the item matches what we want */
3720 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3721 			break;
3722 		if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3723 			break;
3724 
3725 		/* release the path since we're done with it */
3726 		btrfs_release_path(path);
3727 
3728 		/*
3729 		 * this is where we are basically btrfs_lookup, without the
3730 		 * crossing root thing.  we store the inode number in the
3731 		 * offset of the orphan item.
3732 		 */
3733 
3734 		if (found_key.offset == last_objectid) {
3735 			btrfs_err(fs_info,
3736 				  "Error removing orphan entry, stopping orphan cleanup");
3737 			ret = -EINVAL;
3738 			goto out;
3739 		}
3740 
3741 		last_objectid = found_key.offset;
3742 
3743 		found_key.objectid = found_key.offset;
3744 		found_key.type = BTRFS_INODE_ITEM_KEY;
3745 		found_key.offset = 0;
3746 		inode = btrfs_iget(fs_info->sb, last_objectid, root);
3747 		ret = PTR_ERR_OR_ZERO(inode);
3748 		if (ret && ret != -ENOENT)
3749 			goto out;
3750 
3751 		if (ret == -ENOENT && root == fs_info->tree_root) {
3752 			struct btrfs_root *dead_root;
3753 			int is_dead_root = 0;
3754 
3755 			/*
3756 			 * This is an orphan in the tree root. Currently these
3757 			 * could come from 2 sources:
3758 			 *  a) a root (snapshot/subvolume) deletion in progress
3759 			 *  b) a free space cache inode
3760 			 * We need to distinguish those two, as the orphan item
3761 			 * for a root must not get deleted before the deletion
3762 			 * of the snapshot/subvolume's tree completes.
3763 			 *
3764 			 * btrfs_find_orphan_roots() ran before us, which has
3765 			 * found all deleted roots and loaded them into
3766 			 * fs_info->fs_roots_radix. So here we can find if an
3767 			 * orphan item corresponds to a deleted root by looking
3768 			 * up the root from that radix tree.
3769 			 */
3770 
3771 			spin_lock(&fs_info->fs_roots_radix_lock);
3772 			dead_root = radix_tree_lookup(&fs_info->fs_roots_radix,
3773 							 (unsigned long)found_key.objectid);
3774 			if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0)
3775 				is_dead_root = 1;
3776 			spin_unlock(&fs_info->fs_roots_radix_lock);
3777 
3778 			if (is_dead_root) {
3779 				/* prevent this orphan from being found again */
3780 				key.offset = found_key.objectid - 1;
3781 				continue;
3782 			}
3783 
3784 		}
3785 
3786 		/*
3787 		 * If we have an inode with links, there are a couple of
3788 		 * possibilities:
3789 		 *
3790 		 * 1. We were halfway through creating fsverity metadata for the
3791 		 * file. In that case, the orphan item represents incomplete
3792 		 * fsverity metadata which must be cleaned up with
3793 		 * btrfs_drop_verity_items and deleting the orphan item.
3794 
3795 		 * 2. Old kernels (before v3.12) used to create an
3796 		 * orphan item for truncate indicating that there were possibly
3797 		 * extent items past i_size that needed to be deleted. In v3.12,
3798 		 * truncate was changed to update i_size in sync with the extent
3799 		 * items, but the (useless) orphan item was still created. Since
3800 		 * v4.18, we don't create the orphan item for truncate at all.
3801 		 *
3802 		 * So, this item could mean that we need to do a truncate, but
3803 		 * only if this filesystem was last used on a pre-v3.12 kernel
3804 		 * and was not cleanly unmounted. The odds of that are quite
3805 		 * slim, and it's a pain to do the truncate now, so just delete
3806 		 * the orphan item.
3807 		 *
3808 		 * It's also possible that this orphan item was supposed to be
3809 		 * deleted but wasn't. The inode number may have been reused,
3810 		 * but either way, we can delete the orphan item.
3811 		 */
3812 		if (ret == -ENOENT || inode->i_nlink) {
3813 			if (!ret) {
3814 				ret = btrfs_drop_verity_items(BTRFS_I(inode));
3815 				iput(inode);
3816 				if (ret)
3817 					goto out;
3818 			}
3819 			trans = btrfs_start_transaction(root, 1);
3820 			if (IS_ERR(trans)) {
3821 				ret = PTR_ERR(trans);
3822 				goto out;
3823 			}
3824 			btrfs_debug(fs_info, "auto deleting %Lu",
3825 				    found_key.objectid);
3826 			ret = btrfs_del_orphan_item(trans, root,
3827 						    found_key.objectid);
3828 			btrfs_end_transaction(trans);
3829 			if (ret)
3830 				goto out;
3831 			continue;
3832 		}
3833 
3834 		nr_unlink++;
3835 
3836 		/* this will do delete_inode and everything for us */
3837 		iput(inode);
3838 	}
3839 	/* release the path since we're done with it */
3840 	btrfs_release_path(path);
3841 
3842 	if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3843 		trans = btrfs_join_transaction(root);
3844 		if (!IS_ERR(trans))
3845 			btrfs_end_transaction(trans);
3846 	}
3847 
3848 	if (nr_unlink)
3849 		btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
3850 
3851 out:
3852 	if (ret)
3853 		btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
3854 	btrfs_free_path(path);
3855 	return ret;
3856 }
3857 
3858 /*
3859  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3860  * don't find any xattrs, we know there can't be any acls.
3861  *
3862  * slot is the slot the inode is in, objectid is the objectid of the inode
3863  */
3864 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3865 					  int slot, u64 objectid,
3866 					  int *first_xattr_slot)
3867 {
3868 	u32 nritems = btrfs_header_nritems(leaf);
3869 	struct btrfs_key found_key;
3870 	static u64 xattr_access = 0;
3871 	static u64 xattr_default = 0;
3872 	int scanned = 0;
3873 
3874 	if (!xattr_access) {
3875 		xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3876 					strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3877 		xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3878 					strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3879 	}
3880 
3881 	slot++;
3882 	*first_xattr_slot = -1;
3883 	while (slot < nritems) {
3884 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3885 
3886 		/* we found a different objectid, there must not be acls */
3887 		if (found_key.objectid != objectid)
3888 			return 0;
3889 
3890 		/* we found an xattr, assume we've got an acl */
3891 		if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3892 			if (*first_xattr_slot == -1)
3893 				*first_xattr_slot = slot;
3894 			if (found_key.offset == xattr_access ||
3895 			    found_key.offset == xattr_default)
3896 				return 1;
3897 		}
3898 
3899 		/*
3900 		 * we found a key greater than an xattr key, there can't
3901 		 * be any acls later on
3902 		 */
3903 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3904 			return 0;
3905 
3906 		slot++;
3907 		scanned++;
3908 
3909 		/*
3910 		 * it goes inode, inode backrefs, xattrs, extents,
3911 		 * so if there are a ton of hard links to an inode there can
3912 		 * be a lot of backrefs.  Don't waste time searching too hard,
3913 		 * this is just an optimization
3914 		 */
3915 		if (scanned >= 8)
3916 			break;
3917 	}
3918 	/* we hit the end of the leaf before we found an xattr or
3919 	 * something larger than an xattr.  We have to assume the inode
3920 	 * has acls
3921 	 */
3922 	if (*first_xattr_slot == -1)
3923 		*first_xattr_slot = slot;
3924 	return 1;
3925 }
3926 
3927 /*
3928  * read an inode from the btree into the in-memory inode
3929  */
3930 static int btrfs_read_locked_inode(struct inode *inode,
3931 				   struct btrfs_path *in_path)
3932 {
3933 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3934 	struct btrfs_path *path = in_path;
3935 	struct extent_buffer *leaf;
3936 	struct btrfs_inode_item *inode_item;
3937 	struct btrfs_root *root = BTRFS_I(inode)->root;
3938 	struct btrfs_key location;
3939 	unsigned long ptr;
3940 	int maybe_acls;
3941 	u32 rdev;
3942 	int ret;
3943 	bool filled = false;
3944 	int first_xattr_slot;
3945 
3946 	ret = btrfs_fill_inode(inode, &rdev);
3947 	if (!ret)
3948 		filled = true;
3949 
3950 	if (!path) {
3951 		path = btrfs_alloc_path();
3952 		if (!path)
3953 			return -ENOMEM;
3954 	}
3955 
3956 	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3957 
3958 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3959 	if (ret) {
3960 		if (path != in_path)
3961 			btrfs_free_path(path);
3962 		return ret;
3963 	}
3964 
3965 	leaf = path->nodes[0];
3966 
3967 	if (filled)
3968 		goto cache_index;
3969 
3970 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
3971 				    struct btrfs_inode_item);
3972 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3973 	set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3974 	i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3975 	i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3976 	btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item));
3977 	btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
3978 			round_up(i_size_read(inode), fs_info->sectorsize));
3979 
3980 	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
3981 	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
3982 
3983 	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
3984 	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
3985 
3986 	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
3987 	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
3988 
3989 	BTRFS_I(inode)->i_otime.tv_sec =
3990 		btrfs_timespec_sec(leaf, &inode_item->otime);
3991 	BTRFS_I(inode)->i_otime.tv_nsec =
3992 		btrfs_timespec_nsec(leaf, &inode_item->otime);
3993 
3994 	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3995 	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3996 	BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3997 
3998 	inode_set_iversion_queried(inode,
3999 				   btrfs_inode_sequence(leaf, inode_item));
4000 	inode->i_generation = BTRFS_I(inode)->generation;
4001 	inode->i_rdev = 0;
4002 	rdev = btrfs_inode_rdev(leaf, inode_item);
4003 
4004 	BTRFS_I(inode)->index_cnt = (u64)-1;
4005 	btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item),
4006 				&BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
4007 
4008 cache_index:
4009 	/*
4010 	 * If we were modified in the current generation and evicted from memory
4011 	 * and then re-read we need to do a full sync since we don't have any
4012 	 * idea about which extents were modified before we were evicted from
4013 	 * cache.
4014 	 *
4015 	 * This is required for both inode re-read from disk and delayed inode
4016 	 * in delayed_nodes_tree.
4017 	 */
4018 	if (BTRFS_I(inode)->last_trans == fs_info->generation)
4019 		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4020 			&BTRFS_I(inode)->runtime_flags);
4021 
4022 	/*
4023 	 * We don't persist the id of the transaction where an unlink operation
4024 	 * against the inode was last made. So here we assume the inode might
4025 	 * have been evicted, and therefore the exact value of last_unlink_trans
4026 	 * lost, and set it to last_trans to avoid metadata inconsistencies
4027 	 * between the inode and its parent if the inode is fsync'ed and the log
4028 	 * replayed. For example, in the scenario:
4029 	 *
4030 	 * touch mydir/foo
4031 	 * ln mydir/foo mydir/bar
4032 	 * sync
4033 	 * unlink mydir/bar
4034 	 * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
4035 	 * xfs_io -c fsync mydir/foo
4036 	 * <power failure>
4037 	 * mount fs, triggers fsync log replay
4038 	 *
4039 	 * We must make sure that when we fsync our inode foo we also log its
4040 	 * parent inode, otherwise after log replay the parent still has the
4041 	 * dentry with the "bar" name but our inode foo has a link count of 1
4042 	 * and doesn't have an inode ref with the name "bar" anymore.
4043 	 *
4044 	 * Setting last_unlink_trans to last_trans is a pessimistic approach,
4045 	 * but it guarantees correctness at the expense of occasional full
4046 	 * transaction commits on fsync if our inode is a directory, or if our
4047 	 * inode is not a directory, logging its parent unnecessarily.
4048 	 */
4049 	BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
4050 
4051 	/*
4052 	 * Same logic as for last_unlink_trans. We don't persist the generation
4053 	 * of the last transaction where this inode was used for a reflink
4054 	 * operation, so after eviction and reloading the inode we must be
4055 	 * pessimistic and assume the last transaction that modified the inode.
4056 	 */
4057 	BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans;
4058 
4059 	path->slots[0]++;
4060 	if (inode->i_nlink != 1 ||
4061 	    path->slots[0] >= btrfs_header_nritems(leaf))
4062 		goto cache_acl;
4063 
4064 	btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
4065 	if (location.objectid != btrfs_ino(BTRFS_I(inode)))
4066 		goto cache_acl;
4067 
4068 	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4069 	if (location.type == BTRFS_INODE_REF_KEY) {
4070 		struct btrfs_inode_ref *ref;
4071 
4072 		ref = (struct btrfs_inode_ref *)ptr;
4073 		BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
4074 	} else if (location.type == BTRFS_INODE_EXTREF_KEY) {
4075 		struct btrfs_inode_extref *extref;
4076 
4077 		extref = (struct btrfs_inode_extref *)ptr;
4078 		BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
4079 								     extref);
4080 	}
4081 cache_acl:
4082 	/*
4083 	 * try to precache a NULL acl entry for files that don't have
4084 	 * any xattrs or acls
4085 	 */
4086 	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
4087 			btrfs_ino(BTRFS_I(inode)), &first_xattr_slot);
4088 	if (first_xattr_slot != -1) {
4089 		path->slots[0] = first_xattr_slot;
4090 		ret = btrfs_load_inode_props(inode, path);
4091 		if (ret)
4092 			btrfs_err(fs_info,
4093 				  "error loading props for ino %llu (root %llu): %d",
4094 				  btrfs_ino(BTRFS_I(inode)),
4095 				  root->root_key.objectid, ret);
4096 	}
4097 	if (path != in_path)
4098 		btrfs_free_path(path);
4099 
4100 	if (!maybe_acls)
4101 		cache_no_acl(inode);
4102 
4103 	switch (inode->i_mode & S_IFMT) {
4104 	case S_IFREG:
4105 		inode->i_mapping->a_ops = &btrfs_aops;
4106 		inode->i_fop = &btrfs_file_operations;
4107 		inode->i_op = &btrfs_file_inode_operations;
4108 		break;
4109 	case S_IFDIR:
4110 		inode->i_fop = &btrfs_dir_file_operations;
4111 		inode->i_op = &btrfs_dir_inode_operations;
4112 		break;
4113 	case S_IFLNK:
4114 		inode->i_op = &btrfs_symlink_inode_operations;
4115 		inode_nohighmem(inode);
4116 		inode->i_mapping->a_ops = &btrfs_aops;
4117 		break;
4118 	default:
4119 		inode->i_op = &btrfs_special_inode_operations;
4120 		init_special_inode(inode, inode->i_mode, rdev);
4121 		break;
4122 	}
4123 
4124 	btrfs_sync_inode_flags_to_i_flags(inode);
4125 	return 0;
4126 }
4127 
4128 /*
4129  * given a leaf and an inode, copy the inode fields into the leaf
4130  */
4131 static void fill_inode_item(struct btrfs_trans_handle *trans,
4132 			    struct extent_buffer *leaf,
4133 			    struct btrfs_inode_item *item,
4134 			    struct inode *inode)
4135 {
4136 	struct btrfs_map_token token;
4137 	u64 flags;
4138 
4139 	btrfs_init_map_token(&token, leaf);
4140 
4141 	btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
4142 	btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
4143 	btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size);
4144 	btrfs_set_token_inode_mode(&token, item, inode->i_mode);
4145 	btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
4146 
4147 	btrfs_set_token_timespec_sec(&token, &item->atime,
4148 				     inode->i_atime.tv_sec);
4149 	btrfs_set_token_timespec_nsec(&token, &item->atime,
4150 				      inode->i_atime.tv_nsec);
4151 
4152 	btrfs_set_token_timespec_sec(&token, &item->mtime,
4153 				     inode->i_mtime.tv_sec);
4154 	btrfs_set_token_timespec_nsec(&token, &item->mtime,
4155 				      inode->i_mtime.tv_nsec);
4156 
4157 	btrfs_set_token_timespec_sec(&token, &item->ctime,
4158 				     inode->i_ctime.tv_sec);
4159 	btrfs_set_token_timespec_nsec(&token, &item->ctime,
4160 				      inode->i_ctime.tv_nsec);
4161 
4162 	btrfs_set_token_timespec_sec(&token, &item->otime,
4163 				     BTRFS_I(inode)->i_otime.tv_sec);
4164 	btrfs_set_token_timespec_nsec(&token, &item->otime,
4165 				      BTRFS_I(inode)->i_otime.tv_nsec);
4166 
4167 	btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode));
4168 	btrfs_set_token_inode_generation(&token, item,
4169 					 BTRFS_I(inode)->generation);
4170 	btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
4171 	btrfs_set_token_inode_transid(&token, item, trans->transid);
4172 	btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
4173 	flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
4174 					  BTRFS_I(inode)->ro_flags);
4175 	btrfs_set_token_inode_flags(&token, item, flags);
4176 	btrfs_set_token_inode_block_group(&token, item, 0);
4177 }
4178 
4179 /*
4180  * copy everything in the in-memory inode into the btree.
4181  */
4182 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
4183 				struct btrfs_root *root,
4184 				struct btrfs_inode *inode)
4185 {
4186 	struct btrfs_inode_item *inode_item;
4187 	struct btrfs_path *path;
4188 	struct extent_buffer *leaf;
4189 	int ret;
4190 
4191 	path = btrfs_alloc_path();
4192 	if (!path)
4193 		return -ENOMEM;
4194 
4195 	ret = btrfs_lookup_inode(trans, root, path, &inode->location, 1);
4196 	if (ret) {
4197 		if (ret > 0)
4198 			ret = -ENOENT;
4199 		goto failed;
4200 	}
4201 
4202 	leaf = path->nodes[0];
4203 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
4204 				    struct btrfs_inode_item);
4205 
4206 	fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
4207 	btrfs_mark_buffer_dirty(leaf);
4208 	btrfs_set_inode_last_trans(trans, inode);
4209 	ret = 0;
4210 failed:
4211 	btrfs_free_path(path);
4212 	return ret;
4213 }
4214 
4215 /*
4216  * copy everything in the in-memory inode into the btree.
4217  */
4218 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
4219 				struct btrfs_root *root,
4220 				struct btrfs_inode *inode)
4221 {
4222 	struct btrfs_fs_info *fs_info = root->fs_info;
4223 	int ret;
4224 
4225 	/*
4226 	 * If the inode is a free space inode, we can deadlock during commit
4227 	 * if we put it into the delayed code.
4228 	 *
4229 	 * The data relocation inode should also be directly updated
4230 	 * without delay
4231 	 */
4232 	if (!btrfs_is_free_space_inode(inode)
4233 	    && !btrfs_is_data_reloc_root(root)
4234 	    && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
4235 		btrfs_update_root_times(trans, root);
4236 
4237 		ret = btrfs_delayed_update_inode(trans, root, inode);
4238 		if (!ret)
4239 			btrfs_set_inode_last_trans(trans, inode);
4240 		return ret;
4241 	}
4242 
4243 	return btrfs_update_inode_item(trans, root, inode);
4244 }
4245 
4246 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
4247 				struct btrfs_root *root, struct btrfs_inode *inode)
4248 {
4249 	int ret;
4250 
4251 	ret = btrfs_update_inode(trans, root, inode);
4252 	if (ret == -ENOSPC)
4253 		return btrfs_update_inode_item(trans, root, inode);
4254 	return ret;
4255 }
4256 
4257 /*
4258  * unlink helper that gets used here in inode.c and in the tree logging
4259  * recovery code.  It remove a link in a directory with a given name, and
4260  * also drops the back refs in the inode to the directory
4261  */
4262 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4263 				struct btrfs_inode *dir,
4264 				struct btrfs_inode *inode,
4265 				const char *name, int name_len,
4266 				struct btrfs_rename_ctx *rename_ctx)
4267 {
4268 	struct btrfs_root *root = dir->root;
4269 	struct btrfs_fs_info *fs_info = root->fs_info;
4270 	struct btrfs_path *path;
4271 	int ret = 0;
4272 	struct btrfs_dir_item *di;
4273 	u64 index;
4274 	u64 ino = btrfs_ino(inode);
4275 	u64 dir_ino = btrfs_ino(dir);
4276 
4277 	path = btrfs_alloc_path();
4278 	if (!path) {
4279 		ret = -ENOMEM;
4280 		goto out;
4281 	}
4282 
4283 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4284 				    name, name_len, -1);
4285 	if (IS_ERR_OR_NULL(di)) {
4286 		ret = di ? PTR_ERR(di) : -ENOENT;
4287 		goto err;
4288 	}
4289 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
4290 	if (ret)
4291 		goto err;
4292 	btrfs_release_path(path);
4293 
4294 	/*
4295 	 * If we don't have dir index, we have to get it by looking up
4296 	 * the inode ref, since we get the inode ref, remove it directly,
4297 	 * it is unnecessary to do delayed deletion.
4298 	 *
4299 	 * But if we have dir index, needn't search inode ref to get it.
4300 	 * Since the inode ref is close to the inode item, it is better
4301 	 * that we delay to delete it, and just do this deletion when
4302 	 * we update the inode item.
4303 	 */
4304 	if (inode->dir_index) {
4305 		ret = btrfs_delayed_delete_inode_ref(inode);
4306 		if (!ret) {
4307 			index = inode->dir_index;
4308 			goto skip_backref;
4309 		}
4310 	}
4311 
4312 	ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
4313 				  dir_ino, &index);
4314 	if (ret) {
4315 		btrfs_info(fs_info,
4316 			"failed to delete reference to %.*s, inode %llu parent %llu",
4317 			name_len, name, ino, dir_ino);
4318 		btrfs_abort_transaction(trans, ret);
4319 		goto err;
4320 	}
4321 skip_backref:
4322 	if (rename_ctx)
4323 		rename_ctx->index = index;
4324 
4325 	ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4326 	if (ret) {
4327 		btrfs_abort_transaction(trans, ret);
4328 		goto err;
4329 	}
4330 
4331 	/*
4332 	 * If we are in a rename context, we don't need to update anything in the
4333 	 * log. That will be done later during the rename by btrfs_log_new_name().
4334 	 * Besides that, doing it here would only cause extra unnecessary btree
4335 	 * operations on the log tree, increasing latency for applications.
4336 	 */
4337 	if (!rename_ctx) {
4338 		btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode,
4339 					   dir_ino);
4340 		btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir,
4341 					     index);
4342 	}
4343 
4344 	/*
4345 	 * If we have a pending delayed iput we could end up with the final iput
4346 	 * being run in btrfs-cleaner context.  If we have enough of these built
4347 	 * up we can end up burning a lot of time in btrfs-cleaner without any
4348 	 * way to throttle the unlinks.  Since we're currently holding a ref on
4349 	 * the inode we can run the delayed iput here without any issues as the
4350 	 * final iput won't be done until after we drop the ref we're currently
4351 	 * holding.
4352 	 */
4353 	btrfs_run_delayed_iput(fs_info, inode);
4354 err:
4355 	btrfs_free_path(path);
4356 	if (ret)
4357 		goto out;
4358 
4359 	btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2);
4360 	inode_inc_iversion(&inode->vfs_inode);
4361 	inode_inc_iversion(&dir->vfs_inode);
4362 	inode->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
4363 	dir->vfs_inode.i_mtime = inode->vfs_inode.i_ctime;
4364 	dir->vfs_inode.i_ctime = inode->vfs_inode.i_ctime;
4365 	ret = btrfs_update_inode(trans, root, dir);
4366 out:
4367 	return ret;
4368 }
4369 
4370 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4371 		       struct btrfs_inode *dir, struct btrfs_inode *inode,
4372 		       const char *name, int name_len)
4373 {
4374 	int ret;
4375 	ret = __btrfs_unlink_inode(trans, dir, inode, name, name_len, NULL);
4376 	if (!ret) {
4377 		drop_nlink(&inode->vfs_inode);
4378 		ret = btrfs_update_inode(trans, inode->root, inode);
4379 	}
4380 	return ret;
4381 }
4382 
4383 /*
4384  * helper to start transaction for unlink and rmdir.
4385  *
4386  * unlink and rmdir are special in btrfs, they do not always free space, so
4387  * if we cannot make our reservations the normal way try and see if there is
4388  * plenty of slack room in the global reserve to migrate, otherwise we cannot
4389  * allow the unlink to occur.
4390  */
4391 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4392 {
4393 	struct btrfs_root *root = BTRFS_I(dir)->root;
4394 
4395 	/*
4396 	 * 1 for the possible orphan item
4397 	 * 1 for the dir item
4398 	 * 1 for the dir index
4399 	 * 1 for the inode ref
4400 	 * 1 for the inode
4401 	 * 1 for the parent inode
4402 	 */
4403 	return btrfs_start_transaction_fallback_global_rsv(root, 6);
4404 }
4405 
4406 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4407 {
4408 	struct btrfs_trans_handle *trans;
4409 	struct inode *inode = d_inode(dentry);
4410 	int ret;
4411 
4412 	trans = __unlink_start_trans(dir);
4413 	if (IS_ERR(trans))
4414 		return PTR_ERR(trans);
4415 
4416 	btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4417 			0);
4418 
4419 	ret = btrfs_unlink_inode(trans, BTRFS_I(dir),
4420 			BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4421 			dentry->d_name.len);
4422 	if (ret)
4423 		goto out;
4424 
4425 	if (inode->i_nlink == 0) {
4426 		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4427 		if (ret)
4428 			goto out;
4429 	}
4430 
4431 out:
4432 	btrfs_end_transaction(trans);
4433 	btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info);
4434 	return ret;
4435 }
4436 
4437 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4438 			       struct inode *dir, struct dentry *dentry)
4439 {
4440 	struct btrfs_root *root = BTRFS_I(dir)->root;
4441 	struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
4442 	struct btrfs_path *path;
4443 	struct extent_buffer *leaf;
4444 	struct btrfs_dir_item *di;
4445 	struct btrfs_key key;
4446 	const char *name = dentry->d_name.name;
4447 	int name_len = dentry->d_name.len;
4448 	u64 index;
4449 	int ret;
4450 	u64 objectid;
4451 	u64 dir_ino = btrfs_ino(BTRFS_I(dir));
4452 
4453 	if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
4454 		objectid = inode->root->root_key.objectid;
4455 	} else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4456 		objectid = inode->location.objectid;
4457 	} else {
4458 		WARN_ON(1);
4459 		return -EINVAL;
4460 	}
4461 
4462 	path = btrfs_alloc_path();
4463 	if (!path)
4464 		return -ENOMEM;
4465 
4466 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4467 				   name, name_len, -1);
4468 	if (IS_ERR_OR_NULL(di)) {
4469 		ret = di ? PTR_ERR(di) : -ENOENT;
4470 		goto out;
4471 	}
4472 
4473 	leaf = path->nodes[0];
4474 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
4475 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4476 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
4477 	if (ret) {
4478 		btrfs_abort_transaction(trans, ret);
4479 		goto out;
4480 	}
4481 	btrfs_release_path(path);
4482 
4483 	/*
4484 	 * This is a placeholder inode for a subvolume we didn't have a
4485 	 * reference to at the time of the snapshot creation.  In the meantime
4486 	 * we could have renamed the real subvol link into our snapshot, so
4487 	 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect.
4488 	 * Instead simply lookup the dir_index_item for this entry so we can
4489 	 * remove it.  Otherwise we know we have a ref to the root and we can
4490 	 * call btrfs_del_root_ref, and it _shouldn't_ fail.
4491 	 */
4492 	if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4493 		di = btrfs_search_dir_index_item(root, path, dir_ino,
4494 						 name, name_len);
4495 		if (IS_ERR_OR_NULL(di)) {
4496 			if (!di)
4497 				ret = -ENOENT;
4498 			else
4499 				ret = PTR_ERR(di);
4500 			btrfs_abort_transaction(trans, ret);
4501 			goto out;
4502 		}
4503 
4504 		leaf = path->nodes[0];
4505 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4506 		index = key.offset;
4507 		btrfs_release_path(path);
4508 	} else {
4509 		ret = btrfs_del_root_ref(trans, objectid,
4510 					 root->root_key.objectid, dir_ino,
4511 					 &index, name, name_len);
4512 		if (ret) {
4513 			btrfs_abort_transaction(trans, ret);
4514 			goto out;
4515 		}
4516 	}
4517 
4518 	ret = btrfs_delete_delayed_dir_index(trans, BTRFS_I(dir), index);
4519 	if (ret) {
4520 		btrfs_abort_transaction(trans, ret);
4521 		goto out;
4522 	}
4523 
4524 	btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2);
4525 	inode_inc_iversion(dir);
4526 	dir->i_mtime = current_time(dir);
4527 	dir->i_ctime = dir->i_mtime;
4528 	ret = btrfs_update_inode_fallback(trans, root, BTRFS_I(dir));
4529 	if (ret)
4530 		btrfs_abort_transaction(trans, ret);
4531 out:
4532 	btrfs_free_path(path);
4533 	return ret;
4534 }
4535 
4536 /*
4537  * Helper to check if the subvolume references other subvolumes or if it's
4538  * default.
4539  */
4540 static noinline int may_destroy_subvol(struct btrfs_root *root)
4541 {
4542 	struct btrfs_fs_info *fs_info = root->fs_info;
4543 	struct btrfs_path *path;
4544 	struct btrfs_dir_item *di;
4545 	struct btrfs_key key;
4546 	u64 dir_id;
4547 	int ret;
4548 
4549 	path = btrfs_alloc_path();
4550 	if (!path)
4551 		return -ENOMEM;
4552 
4553 	/* Make sure this root isn't set as the default subvol */
4554 	dir_id = btrfs_super_root_dir(fs_info->super_copy);
4555 	di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
4556 				   dir_id, "default", 7, 0);
4557 	if (di && !IS_ERR(di)) {
4558 		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
4559 		if (key.objectid == root->root_key.objectid) {
4560 			ret = -EPERM;
4561 			btrfs_err(fs_info,
4562 				  "deleting default subvolume %llu is not allowed",
4563 				  key.objectid);
4564 			goto out;
4565 		}
4566 		btrfs_release_path(path);
4567 	}
4568 
4569 	key.objectid = root->root_key.objectid;
4570 	key.type = BTRFS_ROOT_REF_KEY;
4571 	key.offset = (u64)-1;
4572 
4573 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4574 	if (ret < 0)
4575 		goto out;
4576 	BUG_ON(ret == 0);
4577 
4578 	ret = 0;
4579 	if (path->slots[0] > 0) {
4580 		path->slots[0]--;
4581 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4582 		if (key.objectid == root->root_key.objectid &&
4583 		    key.type == BTRFS_ROOT_REF_KEY)
4584 			ret = -ENOTEMPTY;
4585 	}
4586 out:
4587 	btrfs_free_path(path);
4588 	return ret;
4589 }
4590 
4591 /* Delete all dentries for inodes belonging to the root */
4592 static void btrfs_prune_dentries(struct btrfs_root *root)
4593 {
4594 	struct btrfs_fs_info *fs_info = root->fs_info;
4595 	struct rb_node *node;
4596 	struct rb_node *prev;
4597 	struct btrfs_inode *entry;
4598 	struct inode *inode;
4599 	u64 objectid = 0;
4600 
4601 	if (!BTRFS_FS_ERROR(fs_info))
4602 		WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4603 
4604 	spin_lock(&root->inode_lock);
4605 again:
4606 	node = root->inode_tree.rb_node;
4607 	prev = NULL;
4608 	while (node) {
4609 		prev = node;
4610 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4611 
4612 		if (objectid < btrfs_ino(entry))
4613 			node = node->rb_left;
4614 		else if (objectid > btrfs_ino(entry))
4615 			node = node->rb_right;
4616 		else
4617 			break;
4618 	}
4619 	if (!node) {
4620 		while (prev) {
4621 			entry = rb_entry(prev, struct btrfs_inode, rb_node);
4622 			if (objectid <= btrfs_ino(entry)) {
4623 				node = prev;
4624 				break;
4625 			}
4626 			prev = rb_next(prev);
4627 		}
4628 	}
4629 	while (node) {
4630 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4631 		objectid = btrfs_ino(entry) + 1;
4632 		inode = igrab(&entry->vfs_inode);
4633 		if (inode) {
4634 			spin_unlock(&root->inode_lock);
4635 			if (atomic_read(&inode->i_count) > 1)
4636 				d_prune_aliases(inode);
4637 			/*
4638 			 * btrfs_drop_inode will have it removed from the inode
4639 			 * cache when its usage count hits zero.
4640 			 */
4641 			iput(inode);
4642 			cond_resched();
4643 			spin_lock(&root->inode_lock);
4644 			goto again;
4645 		}
4646 
4647 		if (cond_resched_lock(&root->inode_lock))
4648 			goto again;
4649 
4650 		node = rb_next(node);
4651 	}
4652 	spin_unlock(&root->inode_lock);
4653 }
4654 
4655 int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
4656 {
4657 	struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
4658 	struct btrfs_root *root = BTRFS_I(dir)->root;
4659 	struct inode *inode = d_inode(dentry);
4660 	struct btrfs_root *dest = BTRFS_I(inode)->root;
4661 	struct btrfs_trans_handle *trans;
4662 	struct btrfs_block_rsv block_rsv;
4663 	u64 root_flags;
4664 	int ret;
4665 
4666 	/*
4667 	 * Don't allow to delete a subvolume with send in progress. This is
4668 	 * inside the inode lock so the error handling that has to drop the bit
4669 	 * again is not run concurrently.
4670 	 */
4671 	spin_lock(&dest->root_item_lock);
4672 	if (dest->send_in_progress) {
4673 		spin_unlock(&dest->root_item_lock);
4674 		btrfs_warn(fs_info,
4675 			   "attempt to delete subvolume %llu during send",
4676 			   dest->root_key.objectid);
4677 		return -EPERM;
4678 	}
4679 	if (atomic_read(&dest->nr_swapfiles)) {
4680 		spin_unlock(&dest->root_item_lock);
4681 		btrfs_warn(fs_info,
4682 			   "attempt to delete subvolume %llu with active swapfile",
4683 			   root->root_key.objectid);
4684 		return -EPERM;
4685 	}
4686 	root_flags = btrfs_root_flags(&dest->root_item);
4687 	btrfs_set_root_flags(&dest->root_item,
4688 			     root_flags | BTRFS_ROOT_SUBVOL_DEAD);
4689 	spin_unlock(&dest->root_item_lock);
4690 
4691 	down_write(&fs_info->subvol_sem);
4692 
4693 	ret = may_destroy_subvol(dest);
4694 	if (ret)
4695 		goto out_up_write;
4696 
4697 	btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
4698 	/*
4699 	 * One for dir inode,
4700 	 * two for dir entries,
4701 	 * two for root ref/backref.
4702 	 */
4703 	ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
4704 	if (ret)
4705 		goto out_up_write;
4706 
4707 	trans = btrfs_start_transaction(root, 0);
4708 	if (IS_ERR(trans)) {
4709 		ret = PTR_ERR(trans);
4710 		goto out_release;
4711 	}
4712 	trans->block_rsv = &block_rsv;
4713 	trans->bytes_reserved = block_rsv.size;
4714 
4715 	btrfs_record_snapshot_destroy(trans, BTRFS_I(dir));
4716 
4717 	ret = btrfs_unlink_subvol(trans, dir, dentry);
4718 	if (ret) {
4719 		btrfs_abort_transaction(trans, ret);
4720 		goto out_end_trans;
4721 	}
4722 
4723 	ret = btrfs_record_root_in_trans(trans, dest);
4724 	if (ret) {
4725 		btrfs_abort_transaction(trans, ret);
4726 		goto out_end_trans;
4727 	}
4728 
4729 	memset(&dest->root_item.drop_progress, 0,
4730 		sizeof(dest->root_item.drop_progress));
4731 	btrfs_set_root_drop_level(&dest->root_item, 0);
4732 	btrfs_set_root_refs(&dest->root_item, 0);
4733 
4734 	if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
4735 		ret = btrfs_insert_orphan_item(trans,
4736 					fs_info->tree_root,
4737 					dest->root_key.objectid);
4738 		if (ret) {
4739 			btrfs_abort_transaction(trans, ret);
4740 			goto out_end_trans;
4741 		}
4742 	}
4743 
4744 	ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
4745 				  BTRFS_UUID_KEY_SUBVOL,
4746 				  dest->root_key.objectid);
4747 	if (ret && ret != -ENOENT) {
4748 		btrfs_abort_transaction(trans, ret);
4749 		goto out_end_trans;
4750 	}
4751 	if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
4752 		ret = btrfs_uuid_tree_remove(trans,
4753 					  dest->root_item.received_uuid,
4754 					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4755 					  dest->root_key.objectid);
4756 		if (ret && ret != -ENOENT) {
4757 			btrfs_abort_transaction(trans, ret);
4758 			goto out_end_trans;
4759 		}
4760 	}
4761 
4762 	free_anon_bdev(dest->anon_dev);
4763 	dest->anon_dev = 0;
4764 out_end_trans:
4765 	trans->block_rsv = NULL;
4766 	trans->bytes_reserved = 0;
4767 	ret = btrfs_end_transaction(trans);
4768 	inode->i_flags |= S_DEAD;
4769 out_release:
4770 	btrfs_subvolume_release_metadata(root, &block_rsv);
4771 out_up_write:
4772 	up_write(&fs_info->subvol_sem);
4773 	if (ret) {
4774 		spin_lock(&dest->root_item_lock);
4775 		root_flags = btrfs_root_flags(&dest->root_item);
4776 		btrfs_set_root_flags(&dest->root_item,
4777 				root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
4778 		spin_unlock(&dest->root_item_lock);
4779 	} else {
4780 		d_invalidate(dentry);
4781 		btrfs_prune_dentries(dest);
4782 		ASSERT(dest->send_in_progress == 0);
4783 	}
4784 
4785 	return ret;
4786 }
4787 
4788 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4789 {
4790 	struct inode *inode = d_inode(dentry);
4791 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
4792 	int err = 0;
4793 	struct btrfs_trans_handle *trans;
4794 	u64 last_unlink_trans;
4795 
4796 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4797 		return -ENOTEMPTY;
4798 	if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) {
4799 		if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) {
4800 			btrfs_err(fs_info,
4801 			"extent tree v2 doesn't support snapshot deletion yet");
4802 			return -EOPNOTSUPP;
4803 		}
4804 		return btrfs_delete_subvolume(dir, dentry);
4805 	}
4806 
4807 	trans = __unlink_start_trans(dir);
4808 	if (IS_ERR(trans))
4809 		return PTR_ERR(trans);
4810 
4811 	if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4812 		err = btrfs_unlink_subvol(trans, dir, dentry);
4813 		goto out;
4814 	}
4815 
4816 	err = btrfs_orphan_add(trans, BTRFS_I(inode));
4817 	if (err)
4818 		goto out;
4819 
4820 	last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
4821 
4822 	/* now the directory is empty */
4823 	err = btrfs_unlink_inode(trans, BTRFS_I(dir),
4824 			BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4825 			dentry->d_name.len);
4826 	if (!err) {
4827 		btrfs_i_size_write(BTRFS_I(inode), 0);
4828 		/*
4829 		 * Propagate the last_unlink_trans value of the deleted dir to
4830 		 * its parent directory. This is to prevent an unrecoverable
4831 		 * log tree in the case we do something like this:
4832 		 * 1) create dir foo
4833 		 * 2) create snapshot under dir foo
4834 		 * 3) delete the snapshot
4835 		 * 4) rmdir foo
4836 		 * 5) mkdir foo
4837 		 * 6) fsync foo or some file inside foo
4838 		 */
4839 		if (last_unlink_trans >= trans->transid)
4840 			BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
4841 	}
4842 out:
4843 	btrfs_end_transaction(trans);
4844 	btrfs_btree_balance_dirty(fs_info);
4845 
4846 	return err;
4847 }
4848 
4849 /*
4850  * btrfs_truncate_block - read, zero a chunk and write a block
4851  * @inode - inode that we're zeroing
4852  * @from - the offset to start zeroing
4853  * @len - the length to zero, 0 to zero the entire range respective to the
4854  *	offset
4855  * @front - zero up to the offset instead of from the offset on
4856  *
4857  * This will find the block for the "from" offset and cow the block and zero the
4858  * part we want to zero.  This is used with truncate and hole punching.
4859  */
4860 int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
4861 			 int front)
4862 {
4863 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
4864 	struct address_space *mapping = inode->vfs_inode.i_mapping;
4865 	struct extent_io_tree *io_tree = &inode->io_tree;
4866 	struct btrfs_ordered_extent *ordered;
4867 	struct extent_state *cached_state = NULL;
4868 	struct extent_changeset *data_reserved = NULL;
4869 	bool only_release_metadata = false;
4870 	u32 blocksize = fs_info->sectorsize;
4871 	pgoff_t index = from >> PAGE_SHIFT;
4872 	unsigned offset = from & (blocksize - 1);
4873 	struct page *page;
4874 	gfp_t mask = btrfs_alloc_write_mask(mapping);
4875 	size_t write_bytes = blocksize;
4876 	int ret = 0;
4877 	u64 block_start;
4878 	u64 block_end;
4879 
4880 	if (IS_ALIGNED(offset, blocksize) &&
4881 	    (!len || IS_ALIGNED(len, blocksize)))
4882 		goto out;
4883 
4884 	block_start = round_down(from, blocksize);
4885 	block_end = block_start + blocksize - 1;
4886 
4887 	ret = btrfs_check_data_free_space(inode, &data_reserved, block_start,
4888 					  blocksize, false);
4889 	if (ret < 0) {
4890 		if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) {
4891 			/* For nocow case, no need to reserve data space */
4892 			only_release_metadata = true;
4893 		} else {
4894 			goto out;
4895 		}
4896 	}
4897 	ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false);
4898 	if (ret < 0) {
4899 		if (!only_release_metadata)
4900 			btrfs_free_reserved_data_space(inode, data_reserved,
4901 						       block_start, blocksize);
4902 		goto out;
4903 	}
4904 again:
4905 	page = find_or_create_page(mapping, index, mask);
4906 	if (!page) {
4907 		btrfs_delalloc_release_space(inode, data_reserved, block_start,
4908 					     blocksize, true);
4909 		btrfs_delalloc_release_extents(inode, blocksize);
4910 		ret = -ENOMEM;
4911 		goto out;
4912 	}
4913 	ret = set_page_extent_mapped(page);
4914 	if (ret < 0)
4915 		goto out_unlock;
4916 
4917 	if (!PageUptodate(page)) {
4918 		ret = btrfs_read_folio(NULL, page_folio(page));
4919 		lock_page(page);
4920 		if (page->mapping != mapping) {
4921 			unlock_page(page);
4922 			put_page(page);
4923 			goto again;
4924 		}
4925 		if (!PageUptodate(page)) {
4926 			ret = -EIO;
4927 			goto out_unlock;
4928 		}
4929 	}
4930 	wait_on_page_writeback(page);
4931 
4932 	lock_extent(io_tree, block_start, block_end, &cached_state);
4933 
4934 	ordered = btrfs_lookup_ordered_extent(inode, block_start);
4935 	if (ordered) {
4936 		unlock_extent(io_tree, block_start, block_end, &cached_state);
4937 		unlock_page(page);
4938 		put_page(page);
4939 		btrfs_start_ordered_extent(ordered, 1);
4940 		btrfs_put_ordered_extent(ordered);
4941 		goto again;
4942 	}
4943 
4944 	clear_extent_bit(&inode->io_tree, block_start, block_end,
4945 			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4946 			 &cached_state);
4947 
4948 	ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
4949 					&cached_state);
4950 	if (ret) {
4951 		unlock_extent(io_tree, block_start, block_end, &cached_state);
4952 		goto out_unlock;
4953 	}
4954 
4955 	if (offset != blocksize) {
4956 		if (!len)
4957 			len = blocksize - offset;
4958 		if (front)
4959 			memzero_page(page, (block_start - page_offset(page)),
4960 				     offset);
4961 		else
4962 			memzero_page(page, (block_start - page_offset(page)) + offset,
4963 				     len);
4964 	}
4965 	btrfs_page_clear_checked(fs_info, page, block_start,
4966 				 block_end + 1 - block_start);
4967 	btrfs_page_set_dirty(fs_info, page, block_start, block_end + 1 - block_start);
4968 	unlock_extent(io_tree, block_start, block_end, &cached_state);
4969 
4970 	if (only_release_metadata)
4971 		set_extent_bit(&inode->io_tree, block_start, block_end,
4972 			       EXTENT_NORESERVE, NULL, GFP_NOFS);
4973 
4974 out_unlock:
4975 	if (ret) {
4976 		if (only_release_metadata)
4977 			btrfs_delalloc_release_metadata(inode, blocksize, true);
4978 		else
4979 			btrfs_delalloc_release_space(inode, data_reserved,
4980 					block_start, blocksize, true);
4981 	}
4982 	btrfs_delalloc_release_extents(inode, blocksize);
4983 	unlock_page(page);
4984 	put_page(page);
4985 out:
4986 	if (only_release_metadata)
4987 		btrfs_check_nocow_unlock(inode);
4988 	extent_changeset_free(data_reserved);
4989 	return ret;
4990 }
4991 
4992 static int maybe_insert_hole(struct btrfs_root *root, struct btrfs_inode *inode,
4993 			     u64 offset, u64 len)
4994 {
4995 	struct btrfs_fs_info *fs_info = root->fs_info;
4996 	struct btrfs_trans_handle *trans;
4997 	struct btrfs_drop_extents_args drop_args = { 0 };
4998 	int ret;
4999 
5000 	/*
5001 	 * If NO_HOLES is enabled, we don't need to do anything.
5002 	 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans()
5003 	 * or btrfs_update_inode() will be called, which guarantee that the next
5004 	 * fsync will know this inode was changed and needs to be logged.
5005 	 */
5006 	if (btrfs_fs_incompat(fs_info, NO_HOLES))
5007 		return 0;
5008 
5009 	/*
5010 	 * 1 - for the one we're dropping
5011 	 * 1 - for the one we're adding
5012 	 * 1 - for updating the inode.
5013 	 */
5014 	trans = btrfs_start_transaction(root, 3);
5015 	if (IS_ERR(trans))
5016 		return PTR_ERR(trans);
5017 
5018 	drop_args.start = offset;
5019 	drop_args.end = offset + len;
5020 	drop_args.drop_cache = true;
5021 
5022 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
5023 	if (ret) {
5024 		btrfs_abort_transaction(trans, ret);
5025 		btrfs_end_transaction(trans);
5026 		return ret;
5027 	}
5028 
5029 	ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len);
5030 	if (ret) {
5031 		btrfs_abort_transaction(trans, ret);
5032 	} else {
5033 		btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found);
5034 		btrfs_update_inode(trans, root, inode);
5035 	}
5036 	btrfs_end_transaction(trans);
5037 	return ret;
5038 }
5039 
5040 /*
5041  * This function puts in dummy file extents for the area we're creating a hole
5042  * for.  So if we are truncating this file to a larger size we need to insert
5043  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
5044  * the range between oldsize and size
5045  */
5046 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
5047 {
5048 	struct btrfs_root *root = inode->root;
5049 	struct btrfs_fs_info *fs_info = root->fs_info;
5050 	struct extent_io_tree *io_tree = &inode->io_tree;
5051 	struct extent_map *em = NULL;
5052 	struct extent_state *cached_state = NULL;
5053 	u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
5054 	u64 block_end = ALIGN(size, fs_info->sectorsize);
5055 	u64 last_byte;
5056 	u64 cur_offset;
5057 	u64 hole_size;
5058 	int err = 0;
5059 
5060 	/*
5061 	 * If our size started in the middle of a block we need to zero out the
5062 	 * rest of the block before we expand the i_size, otherwise we could
5063 	 * expose stale data.
5064 	 */
5065 	err = btrfs_truncate_block(inode, oldsize, 0, 0);
5066 	if (err)
5067 		return err;
5068 
5069 	if (size <= hole_start)
5070 		return 0;
5071 
5072 	btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1,
5073 					   &cached_state);
5074 	cur_offset = hole_start;
5075 	while (1) {
5076 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
5077 				      block_end - cur_offset);
5078 		if (IS_ERR(em)) {
5079 			err = PTR_ERR(em);
5080 			em = NULL;
5081 			break;
5082 		}
5083 		last_byte = min(extent_map_end(em), block_end);
5084 		last_byte = ALIGN(last_byte, fs_info->sectorsize);
5085 		hole_size = last_byte - cur_offset;
5086 
5087 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
5088 			struct extent_map *hole_em;
5089 
5090 			err = maybe_insert_hole(root, inode, cur_offset,
5091 						hole_size);
5092 			if (err)
5093 				break;
5094 
5095 			err = btrfs_inode_set_file_extent_range(inode,
5096 							cur_offset, hole_size);
5097 			if (err)
5098 				break;
5099 
5100 			hole_em = alloc_extent_map();
5101 			if (!hole_em) {
5102 				btrfs_drop_extent_map_range(inode, cur_offset,
5103 						    cur_offset + hole_size - 1,
5104 						    false);
5105 				btrfs_set_inode_full_sync(inode);
5106 				goto next;
5107 			}
5108 			hole_em->start = cur_offset;
5109 			hole_em->len = hole_size;
5110 			hole_em->orig_start = cur_offset;
5111 
5112 			hole_em->block_start = EXTENT_MAP_HOLE;
5113 			hole_em->block_len = 0;
5114 			hole_em->orig_block_len = 0;
5115 			hole_em->ram_bytes = hole_size;
5116 			hole_em->compress_type = BTRFS_COMPRESS_NONE;
5117 			hole_em->generation = fs_info->generation;
5118 
5119 			err = btrfs_replace_extent_map_range(inode, hole_em, true);
5120 			free_extent_map(hole_em);
5121 		} else {
5122 			err = btrfs_inode_set_file_extent_range(inode,
5123 							cur_offset, hole_size);
5124 			if (err)
5125 				break;
5126 		}
5127 next:
5128 		free_extent_map(em);
5129 		em = NULL;
5130 		cur_offset = last_byte;
5131 		if (cur_offset >= block_end)
5132 			break;
5133 	}
5134 	free_extent_map(em);
5135 	unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
5136 	return err;
5137 }
5138 
5139 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
5140 {
5141 	struct btrfs_root *root = BTRFS_I(inode)->root;
5142 	struct btrfs_trans_handle *trans;
5143 	loff_t oldsize = i_size_read(inode);
5144 	loff_t newsize = attr->ia_size;
5145 	int mask = attr->ia_valid;
5146 	int ret;
5147 
5148 	/*
5149 	 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
5150 	 * special case where we need to update the times despite not having
5151 	 * these flags set.  For all other operations the VFS set these flags
5152 	 * explicitly if it wants a timestamp update.
5153 	 */
5154 	if (newsize != oldsize) {
5155 		inode_inc_iversion(inode);
5156 		if (!(mask & (ATTR_CTIME | ATTR_MTIME))) {
5157 			inode->i_mtime = current_time(inode);
5158 			inode->i_ctime = inode->i_mtime;
5159 		}
5160 	}
5161 
5162 	if (newsize > oldsize) {
5163 		/*
5164 		 * Don't do an expanding truncate while snapshotting is ongoing.
5165 		 * This is to ensure the snapshot captures a fully consistent
5166 		 * state of this file - if the snapshot captures this expanding
5167 		 * truncation, it must capture all writes that happened before
5168 		 * this truncation.
5169 		 */
5170 		btrfs_drew_write_lock(&root->snapshot_lock);
5171 		ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize);
5172 		if (ret) {
5173 			btrfs_drew_write_unlock(&root->snapshot_lock);
5174 			return ret;
5175 		}
5176 
5177 		trans = btrfs_start_transaction(root, 1);
5178 		if (IS_ERR(trans)) {
5179 			btrfs_drew_write_unlock(&root->snapshot_lock);
5180 			return PTR_ERR(trans);
5181 		}
5182 
5183 		i_size_write(inode, newsize);
5184 		btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
5185 		pagecache_isize_extended(inode, oldsize, newsize);
5186 		ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
5187 		btrfs_drew_write_unlock(&root->snapshot_lock);
5188 		btrfs_end_transaction(trans);
5189 	} else {
5190 		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5191 
5192 		if (btrfs_is_zoned(fs_info)) {
5193 			ret = btrfs_wait_ordered_range(inode,
5194 					ALIGN(newsize, fs_info->sectorsize),
5195 					(u64)-1);
5196 			if (ret)
5197 				return ret;
5198 		}
5199 
5200 		/*
5201 		 * We're truncating a file that used to have good data down to
5202 		 * zero. Make sure any new writes to the file get on disk
5203 		 * on close.
5204 		 */
5205 		if (newsize == 0)
5206 			set_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
5207 				&BTRFS_I(inode)->runtime_flags);
5208 
5209 		truncate_setsize(inode, newsize);
5210 
5211 		inode_dio_wait(inode);
5212 
5213 		ret = btrfs_truncate(inode, newsize == oldsize);
5214 		if (ret && inode->i_nlink) {
5215 			int err;
5216 
5217 			/*
5218 			 * Truncate failed, so fix up the in-memory size. We
5219 			 * adjusted disk_i_size down as we removed extents, so
5220 			 * wait for disk_i_size to be stable and then update the
5221 			 * in-memory size to match.
5222 			 */
5223 			err = btrfs_wait_ordered_range(inode, 0, (u64)-1);
5224 			if (err)
5225 				return err;
5226 			i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5227 		}
5228 	}
5229 
5230 	return ret;
5231 }
5232 
5233 static int btrfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
5234 			 struct iattr *attr)
5235 {
5236 	struct inode *inode = d_inode(dentry);
5237 	struct btrfs_root *root = BTRFS_I(inode)->root;
5238 	int err;
5239 
5240 	if (btrfs_root_readonly(root))
5241 		return -EROFS;
5242 
5243 	err = setattr_prepare(mnt_userns, dentry, attr);
5244 	if (err)
5245 		return err;
5246 
5247 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5248 		err = btrfs_setsize(inode, attr);
5249 		if (err)
5250 			return err;
5251 	}
5252 
5253 	if (attr->ia_valid) {
5254 		setattr_copy(mnt_userns, inode, attr);
5255 		inode_inc_iversion(inode);
5256 		err = btrfs_dirty_inode(inode);
5257 
5258 		if (!err && attr->ia_valid & ATTR_MODE)
5259 			err = posix_acl_chmod(mnt_userns, inode, inode->i_mode);
5260 	}
5261 
5262 	return err;
5263 }
5264 
5265 /*
5266  * While truncating the inode pages during eviction, we get the VFS
5267  * calling btrfs_invalidate_folio() against each folio of the inode. This
5268  * is slow because the calls to btrfs_invalidate_folio() result in a
5269  * huge amount of calls to lock_extent() and clear_extent_bit(),
5270  * which keep merging and splitting extent_state structures over and over,
5271  * wasting lots of time.
5272  *
5273  * Therefore if the inode is being evicted, let btrfs_invalidate_folio()
5274  * skip all those expensive operations on a per folio basis and do only
5275  * the ordered io finishing, while we release here the extent_map and
5276  * extent_state structures, without the excessive merging and splitting.
5277  */
5278 static void evict_inode_truncate_pages(struct inode *inode)
5279 {
5280 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5281 	struct rb_node *node;
5282 
5283 	ASSERT(inode->i_state & I_FREEING);
5284 	truncate_inode_pages_final(&inode->i_data);
5285 
5286 	btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
5287 
5288 	/*
5289 	 * Keep looping until we have no more ranges in the io tree.
5290 	 * We can have ongoing bios started by readahead that have
5291 	 * their endio callback (extent_io.c:end_bio_extent_readpage)
5292 	 * still in progress (unlocked the pages in the bio but did not yet
5293 	 * unlocked the ranges in the io tree). Therefore this means some
5294 	 * ranges can still be locked and eviction started because before
5295 	 * submitting those bios, which are executed by a separate task (work
5296 	 * queue kthread), inode references (inode->i_count) were not taken
5297 	 * (which would be dropped in the end io callback of each bio).
5298 	 * Therefore here we effectively end up waiting for those bios and
5299 	 * anyone else holding locked ranges without having bumped the inode's
5300 	 * reference count - if we don't do it, when they access the inode's
5301 	 * io_tree to unlock a range it may be too late, leading to an
5302 	 * use-after-free issue.
5303 	 */
5304 	spin_lock(&io_tree->lock);
5305 	while (!RB_EMPTY_ROOT(&io_tree->state)) {
5306 		struct extent_state *state;
5307 		struct extent_state *cached_state = NULL;
5308 		u64 start;
5309 		u64 end;
5310 		unsigned state_flags;
5311 
5312 		node = rb_first(&io_tree->state);
5313 		state = rb_entry(node, struct extent_state, rb_node);
5314 		start = state->start;
5315 		end = state->end;
5316 		state_flags = state->state;
5317 		spin_unlock(&io_tree->lock);
5318 
5319 		lock_extent(io_tree, start, end, &cached_state);
5320 
5321 		/*
5322 		 * If still has DELALLOC flag, the extent didn't reach disk,
5323 		 * and its reserved space won't be freed by delayed_ref.
5324 		 * So we need to free its reserved space here.
5325 		 * (Refer to comment in btrfs_invalidate_folio, case 2)
5326 		 *
5327 		 * Note, end is the bytenr of last byte, so we need + 1 here.
5328 		 */
5329 		if (state_flags & EXTENT_DELALLOC)
5330 			btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
5331 					       end - start + 1);
5332 
5333 		clear_extent_bit(io_tree, start, end,
5334 				 EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
5335 				 &cached_state);
5336 
5337 		cond_resched();
5338 		spin_lock(&io_tree->lock);
5339 	}
5340 	spin_unlock(&io_tree->lock);
5341 }
5342 
5343 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
5344 							struct btrfs_block_rsv *rsv)
5345 {
5346 	struct btrfs_fs_info *fs_info = root->fs_info;
5347 	struct btrfs_trans_handle *trans;
5348 	u64 delayed_refs_extra = btrfs_calc_insert_metadata_size(fs_info, 1);
5349 	int ret;
5350 
5351 	/*
5352 	 * Eviction should be taking place at some place safe because of our
5353 	 * delayed iputs.  However the normal flushing code will run delayed
5354 	 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
5355 	 *
5356 	 * We reserve the delayed_refs_extra here again because we can't use
5357 	 * btrfs_start_transaction(root, 0) for the same deadlocky reason as
5358 	 * above.  We reserve our extra bit here because we generate a ton of
5359 	 * delayed refs activity by truncating.
5360 	 *
5361 	 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can,
5362 	 * if we fail to make this reservation we can re-try without the
5363 	 * delayed_refs_extra so we can make some forward progress.
5364 	 */
5365 	ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra,
5366 				     BTRFS_RESERVE_FLUSH_EVICT);
5367 	if (ret) {
5368 		ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size,
5369 					     BTRFS_RESERVE_FLUSH_EVICT);
5370 		if (ret) {
5371 			btrfs_warn(fs_info,
5372 				   "could not allocate space for delete; will truncate on mount");
5373 			return ERR_PTR(-ENOSPC);
5374 		}
5375 		delayed_refs_extra = 0;
5376 	}
5377 
5378 	trans = btrfs_join_transaction(root);
5379 	if (IS_ERR(trans))
5380 		return trans;
5381 
5382 	if (delayed_refs_extra) {
5383 		trans->block_rsv = &fs_info->trans_block_rsv;
5384 		trans->bytes_reserved = delayed_refs_extra;
5385 		btrfs_block_rsv_migrate(rsv, trans->block_rsv,
5386 					delayed_refs_extra, 1);
5387 	}
5388 	return trans;
5389 }
5390 
5391 void btrfs_evict_inode(struct inode *inode)
5392 {
5393 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5394 	struct btrfs_trans_handle *trans;
5395 	struct btrfs_root *root = BTRFS_I(inode)->root;
5396 	struct btrfs_block_rsv *rsv;
5397 	int ret;
5398 
5399 	trace_btrfs_inode_evict(inode);
5400 
5401 	if (!root) {
5402 		fsverity_cleanup_inode(inode);
5403 		clear_inode(inode);
5404 		return;
5405 	}
5406 
5407 	evict_inode_truncate_pages(inode);
5408 
5409 	if (inode->i_nlink &&
5410 	    ((btrfs_root_refs(&root->root_item) != 0 &&
5411 	      root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5412 	     btrfs_is_free_space_inode(BTRFS_I(inode))))
5413 		goto no_delete;
5414 
5415 	if (is_bad_inode(inode))
5416 		goto no_delete;
5417 
5418 	btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1);
5419 
5420 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
5421 		goto no_delete;
5422 
5423 	if (inode->i_nlink > 0) {
5424 		BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5425 		       root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5426 		goto no_delete;
5427 	}
5428 
5429 	/*
5430 	 * This makes sure the inode item in tree is uptodate and the space for
5431 	 * the inode update is released.
5432 	 */
5433 	ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
5434 	if (ret)
5435 		goto no_delete;
5436 
5437 	/*
5438 	 * This drops any pending insert or delete operations we have for this
5439 	 * inode.  We could have a delayed dir index deletion queued up, but
5440 	 * we're removing the inode completely so that'll be taken care of in
5441 	 * the truncate.
5442 	 */
5443 	btrfs_kill_delayed_inode_items(BTRFS_I(inode));
5444 
5445 	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
5446 	if (!rsv)
5447 		goto no_delete;
5448 	rsv->size = btrfs_calc_metadata_size(fs_info, 1);
5449 	rsv->failfast = true;
5450 
5451 	btrfs_i_size_write(BTRFS_I(inode), 0);
5452 
5453 	while (1) {
5454 		struct btrfs_truncate_control control = {
5455 			.inode = BTRFS_I(inode),
5456 			.ino = btrfs_ino(BTRFS_I(inode)),
5457 			.new_size = 0,
5458 			.min_type = 0,
5459 		};
5460 
5461 		trans = evict_refill_and_join(root, rsv);
5462 		if (IS_ERR(trans))
5463 			goto free_rsv;
5464 
5465 		trans->block_rsv = rsv;
5466 
5467 		ret = btrfs_truncate_inode_items(trans, root, &control);
5468 		trans->block_rsv = &fs_info->trans_block_rsv;
5469 		btrfs_end_transaction(trans);
5470 		btrfs_btree_balance_dirty(fs_info);
5471 		if (ret && ret != -ENOSPC && ret != -EAGAIN)
5472 			goto free_rsv;
5473 		else if (!ret)
5474 			break;
5475 	}
5476 
5477 	/*
5478 	 * Errors here aren't a big deal, it just means we leave orphan items in
5479 	 * the tree. They will be cleaned up on the next mount. If the inode
5480 	 * number gets reused, cleanup deletes the orphan item without doing
5481 	 * anything, and unlink reuses the existing orphan item.
5482 	 *
5483 	 * If it turns out that we are dropping too many of these, we might want
5484 	 * to add a mechanism for retrying these after a commit.
5485 	 */
5486 	trans = evict_refill_and_join(root, rsv);
5487 	if (!IS_ERR(trans)) {
5488 		trans->block_rsv = rsv;
5489 		btrfs_orphan_del(trans, BTRFS_I(inode));
5490 		trans->block_rsv = &fs_info->trans_block_rsv;
5491 		btrfs_end_transaction(trans);
5492 	}
5493 
5494 free_rsv:
5495 	btrfs_free_block_rsv(fs_info, rsv);
5496 no_delete:
5497 	/*
5498 	 * If we didn't successfully delete, the orphan item will still be in
5499 	 * the tree and we'll retry on the next mount. Again, we might also want
5500 	 * to retry these periodically in the future.
5501 	 */
5502 	btrfs_remove_delayed_node(BTRFS_I(inode));
5503 	fsverity_cleanup_inode(inode);
5504 	clear_inode(inode);
5505 }
5506 
5507 /*
5508  * Return the key found in the dir entry in the location pointer, fill @type
5509  * with BTRFS_FT_*, and return 0.
5510  *
5511  * If no dir entries were found, returns -ENOENT.
5512  * If found a corrupted location in dir entry, returns -EUCLEAN.
5513  */
5514 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
5515 			       struct btrfs_key *location, u8 *type)
5516 {
5517 	const char *name = dentry->d_name.name;
5518 	int namelen = dentry->d_name.len;
5519 	struct btrfs_dir_item *di;
5520 	struct btrfs_path *path;
5521 	struct btrfs_root *root = BTRFS_I(dir)->root;
5522 	int ret = 0;
5523 
5524 	path = btrfs_alloc_path();
5525 	if (!path)
5526 		return -ENOMEM;
5527 
5528 	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)),
5529 			name, namelen, 0);
5530 	if (IS_ERR_OR_NULL(di)) {
5531 		ret = di ? PTR_ERR(di) : -ENOENT;
5532 		goto out;
5533 	}
5534 
5535 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5536 	if (location->type != BTRFS_INODE_ITEM_KEY &&
5537 	    location->type != BTRFS_ROOT_ITEM_KEY) {
5538 		ret = -EUCLEAN;
5539 		btrfs_warn(root->fs_info,
5540 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5541 			   __func__, name, btrfs_ino(BTRFS_I(dir)),
5542 			   location->objectid, location->type, location->offset);
5543 	}
5544 	if (!ret)
5545 		*type = btrfs_dir_type(path->nodes[0], di);
5546 out:
5547 	btrfs_free_path(path);
5548 	return ret;
5549 }
5550 
5551 /*
5552  * when we hit a tree root in a directory, the btrfs part of the inode
5553  * needs to be changed to reflect the root directory of the tree root.  This
5554  * is kind of like crossing a mount point.
5555  */
5556 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
5557 				    struct inode *dir,
5558 				    struct dentry *dentry,
5559 				    struct btrfs_key *location,
5560 				    struct btrfs_root **sub_root)
5561 {
5562 	struct btrfs_path *path;
5563 	struct btrfs_root *new_root;
5564 	struct btrfs_root_ref *ref;
5565 	struct extent_buffer *leaf;
5566 	struct btrfs_key key;
5567 	int ret;
5568 	int err = 0;
5569 
5570 	path = btrfs_alloc_path();
5571 	if (!path) {
5572 		err = -ENOMEM;
5573 		goto out;
5574 	}
5575 
5576 	err = -ENOENT;
5577 	key.objectid = BTRFS_I(dir)->root->root_key.objectid;
5578 	key.type = BTRFS_ROOT_REF_KEY;
5579 	key.offset = location->objectid;
5580 
5581 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
5582 	if (ret) {
5583 		if (ret < 0)
5584 			err = ret;
5585 		goto out;
5586 	}
5587 
5588 	leaf = path->nodes[0];
5589 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5590 	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(BTRFS_I(dir)) ||
5591 	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
5592 		goto out;
5593 
5594 	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
5595 				   (unsigned long)(ref + 1),
5596 				   dentry->d_name.len);
5597 	if (ret)
5598 		goto out;
5599 
5600 	btrfs_release_path(path);
5601 
5602 	new_root = btrfs_get_fs_root(fs_info, location->objectid, true);
5603 	if (IS_ERR(new_root)) {
5604 		err = PTR_ERR(new_root);
5605 		goto out;
5606 	}
5607 
5608 	*sub_root = new_root;
5609 	location->objectid = btrfs_root_dirid(&new_root->root_item);
5610 	location->type = BTRFS_INODE_ITEM_KEY;
5611 	location->offset = 0;
5612 	err = 0;
5613 out:
5614 	btrfs_free_path(path);
5615 	return err;
5616 }
5617 
5618 static void inode_tree_add(struct inode *inode)
5619 {
5620 	struct btrfs_root *root = BTRFS_I(inode)->root;
5621 	struct btrfs_inode *entry;
5622 	struct rb_node **p;
5623 	struct rb_node *parent;
5624 	struct rb_node *new = &BTRFS_I(inode)->rb_node;
5625 	u64 ino = btrfs_ino(BTRFS_I(inode));
5626 
5627 	if (inode_unhashed(inode))
5628 		return;
5629 	parent = NULL;
5630 	spin_lock(&root->inode_lock);
5631 	p = &root->inode_tree.rb_node;
5632 	while (*p) {
5633 		parent = *p;
5634 		entry = rb_entry(parent, struct btrfs_inode, rb_node);
5635 
5636 		if (ino < btrfs_ino(entry))
5637 			p = &parent->rb_left;
5638 		else if (ino > btrfs_ino(entry))
5639 			p = &parent->rb_right;
5640 		else {
5641 			WARN_ON(!(entry->vfs_inode.i_state &
5642 				  (I_WILL_FREE | I_FREEING)));
5643 			rb_replace_node(parent, new, &root->inode_tree);
5644 			RB_CLEAR_NODE(parent);
5645 			spin_unlock(&root->inode_lock);
5646 			return;
5647 		}
5648 	}
5649 	rb_link_node(new, parent, p);
5650 	rb_insert_color(new, &root->inode_tree);
5651 	spin_unlock(&root->inode_lock);
5652 }
5653 
5654 static void inode_tree_del(struct btrfs_inode *inode)
5655 {
5656 	struct btrfs_root *root = inode->root;
5657 	int empty = 0;
5658 
5659 	spin_lock(&root->inode_lock);
5660 	if (!RB_EMPTY_NODE(&inode->rb_node)) {
5661 		rb_erase(&inode->rb_node, &root->inode_tree);
5662 		RB_CLEAR_NODE(&inode->rb_node);
5663 		empty = RB_EMPTY_ROOT(&root->inode_tree);
5664 	}
5665 	spin_unlock(&root->inode_lock);
5666 
5667 	if (empty && btrfs_root_refs(&root->root_item) == 0) {
5668 		spin_lock(&root->inode_lock);
5669 		empty = RB_EMPTY_ROOT(&root->inode_tree);
5670 		spin_unlock(&root->inode_lock);
5671 		if (empty)
5672 			btrfs_add_dead_root(root);
5673 	}
5674 }
5675 
5676 
5677 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5678 {
5679 	struct btrfs_iget_args *args = p;
5680 
5681 	inode->i_ino = args->ino;
5682 	BTRFS_I(inode)->location.objectid = args->ino;
5683 	BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
5684 	BTRFS_I(inode)->location.offset = 0;
5685 	BTRFS_I(inode)->root = btrfs_grab_root(args->root);
5686 	BUG_ON(args->root && !BTRFS_I(inode)->root);
5687 
5688 	if (args->root && args->root == args->root->fs_info->tree_root &&
5689 	    args->ino != BTRFS_BTREE_INODE_OBJECTID)
5690 		set_bit(BTRFS_INODE_FREE_SPACE_INODE,
5691 			&BTRFS_I(inode)->runtime_flags);
5692 	return 0;
5693 }
5694 
5695 static int btrfs_find_actor(struct inode *inode, void *opaque)
5696 {
5697 	struct btrfs_iget_args *args = opaque;
5698 
5699 	return args->ino == BTRFS_I(inode)->location.objectid &&
5700 		args->root == BTRFS_I(inode)->root;
5701 }
5702 
5703 static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino,
5704 				       struct btrfs_root *root)
5705 {
5706 	struct inode *inode;
5707 	struct btrfs_iget_args args;
5708 	unsigned long hashval = btrfs_inode_hash(ino, root);
5709 
5710 	args.ino = ino;
5711 	args.root = root;
5712 
5713 	inode = iget5_locked(s, hashval, btrfs_find_actor,
5714 			     btrfs_init_locked_inode,
5715 			     (void *)&args);
5716 	return inode;
5717 }
5718 
5719 /*
5720  * Get an inode object given its inode number and corresponding root.
5721  * Path can be preallocated to prevent recursing back to iget through
5722  * allocator. NULL is also valid but may require an additional allocation
5723  * later.
5724  */
5725 struct inode *btrfs_iget_path(struct super_block *s, u64 ino,
5726 			      struct btrfs_root *root, struct btrfs_path *path)
5727 {
5728 	struct inode *inode;
5729 
5730 	inode = btrfs_iget_locked(s, ino, root);
5731 	if (!inode)
5732 		return ERR_PTR(-ENOMEM);
5733 
5734 	if (inode->i_state & I_NEW) {
5735 		int ret;
5736 
5737 		ret = btrfs_read_locked_inode(inode, path);
5738 		if (!ret) {
5739 			inode_tree_add(inode);
5740 			unlock_new_inode(inode);
5741 		} else {
5742 			iget_failed(inode);
5743 			/*
5744 			 * ret > 0 can come from btrfs_search_slot called by
5745 			 * btrfs_read_locked_inode, this means the inode item
5746 			 * was not found.
5747 			 */
5748 			if (ret > 0)
5749 				ret = -ENOENT;
5750 			inode = ERR_PTR(ret);
5751 		}
5752 	}
5753 
5754 	return inode;
5755 }
5756 
5757 struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root)
5758 {
5759 	return btrfs_iget_path(s, ino, root, NULL);
5760 }
5761 
5762 static struct inode *new_simple_dir(struct super_block *s,
5763 				    struct btrfs_key *key,
5764 				    struct btrfs_root *root)
5765 {
5766 	struct inode *inode = new_inode(s);
5767 
5768 	if (!inode)
5769 		return ERR_PTR(-ENOMEM);
5770 
5771 	BTRFS_I(inode)->root = btrfs_grab_root(root);
5772 	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5773 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5774 
5775 	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5776 	/*
5777 	 * We only need lookup, the rest is read-only and there's no inode
5778 	 * associated with the dentry
5779 	 */
5780 	inode->i_op = &simple_dir_inode_operations;
5781 	inode->i_opflags &= ~IOP_XATTR;
5782 	inode->i_fop = &simple_dir_operations;
5783 	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5784 	inode->i_mtime = current_time(inode);
5785 	inode->i_atime = inode->i_mtime;
5786 	inode->i_ctime = inode->i_mtime;
5787 	BTRFS_I(inode)->i_otime = inode->i_mtime;
5788 
5789 	return inode;
5790 }
5791 
5792 static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN);
5793 static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE);
5794 static_assert(BTRFS_FT_DIR == FT_DIR);
5795 static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV);
5796 static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV);
5797 static_assert(BTRFS_FT_FIFO == FT_FIFO);
5798 static_assert(BTRFS_FT_SOCK == FT_SOCK);
5799 static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK);
5800 
5801 static inline u8 btrfs_inode_type(struct inode *inode)
5802 {
5803 	return fs_umode_to_ftype(inode->i_mode);
5804 }
5805 
5806 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5807 {
5808 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
5809 	struct inode *inode;
5810 	struct btrfs_root *root = BTRFS_I(dir)->root;
5811 	struct btrfs_root *sub_root = root;
5812 	struct btrfs_key location;
5813 	u8 di_type = 0;
5814 	int ret = 0;
5815 
5816 	if (dentry->d_name.len > BTRFS_NAME_LEN)
5817 		return ERR_PTR(-ENAMETOOLONG);
5818 
5819 	ret = btrfs_inode_by_name(dir, dentry, &location, &di_type);
5820 	if (ret < 0)
5821 		return ERR_PTR(ret);
5822 
5823 	if (location.type == BTRFS_INODE_ITEM_KEY) {
5824 		inode = btrfs_iget(dir->i_sb, location.objectid, root);
5825 		if (IS_ERR(inode))
5826 			return inode;
5827 
5828 		/* Do extra check against inode mode with di_type */
5829 		if (btrfs_inode_type(inode) != di_type) {
5830 			btrfs_crit(fs_info,
5831 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
5832 				  inode->i_mode, btrfs_inode_type(inode),
5833 				  di_type);
5834 			iput(inode);
5835 			return ERR_PTR(-EUCLEAN);
5836 		}
5837 		return inode;
5838 	}
5839 
5840 	ret = fixup_tree_root_location(fs_info, dir, dentry,
5841 				       &location, &sub_root);
5842 	if (ret < 0) {
5843 		if (ret != -ENOENT)
5844 			inode = ERR_PTR(ret);
5845 		else
5846 			inode = new_simple_dir(dir->i_sb, &location, root);
5847 	} else {
5848 		inode = btrfs_iget(dir->i_sb, location.objectid, sub_root);
5849 		btrfs_put_root(sub_root);
5850 
5851 		if (IS_ERR(inode))
5852 			return inode;
5853 
5854 		down_read(&fs_info->cleanup_work_sem);
5855 		if (!sb_rdonly(inode->i_sb))
5856 			ret = btrfs_orphan_cleanup(sub_root);
5857 		up_read(&fs_info->cleanup_work_sem);
5858 		if (ret) {
5859 			iput(inode);
5860 			inode = ERR_PTR(ret);
5861 		}
5862 	}
5863 
5864 	return inode;
5865 }
5866 
5867 static int btrfs_dentry_delete(const struct dentry *dentry)
5868 {
5869 	struct btrfs_root *root;
5870 	struct inode *inode = d_inode(dentry);
5871 
5872 	if (!inode && !IS_ROOT(dentry))
5873 		inode = d_inode(dentry->d_parent);
5874 
5875 	if (inode) {
5876 		root = BTRFS_I(inode)->root;
5877 		if (btrfs_root_refs(&root->root_item) == 0)
5878 			return 1;
5879 
5880 		if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5881 			return 1;
5882 	}
5883 	return 0;
5884 }
5885 
5886 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5887 				   unsigned int flags)
5888 {
5889 	struct inode *inode = btrfs_lookup_dentry(dir, dentry);
5890 
5891 	if (inode == ERR_PTR(-ENOENT))
5892 		inode = NULL;
5893 	return d_splice_alias(inode, dentry);
5894 }
5895 
5896 /*
5897  * All this infrastructure exists because dir_emit can fault, and we are holding
5898  * the tree lock when doing readdir.  For now just allocate a buffer and copy
5899  * our information into that, and then dir_emit from the buffer.  This is
5900  * similar to what NFS does, only we don't keep the buffer around in pagecache
5901  * because I'm afraid I'll mess that up.  Long term we need to make filldir do
5902  * copy_to_user_inatomic so we don't have to worry about page faulting under the
5903  * tree lock.
5904  */
5905 static int btrfs_opendir(struct inode *inode, struct file *file)
5906 {
5907 	struct btrfs_file_private *private;
5908 
5909 	private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL);
5910 	if (!private)
5911 		return -ENOMEM;
5912 	private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
5913 	if (!private->filldir_buf) {
5914 		kfree(private);
5915 		return -ENOMEM;
5916 	}
5917 	file->private_data = private;
5918 	return 0;
5919 }
5920 
5921 struct dir_entry {
5922 	u64 ino;
5923 	u64 offset;
5924 	unsigned type;
5925 	int name_len;
5926 };
5927 
5928 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
5929 {
5930 	while (entries--) {
5931 		struct dir_entry *entry = addr;
5932 		char *name = (char *)(entry + 1);
5933 
5934 		ctx->pos = get_unaligned(&entry->offset);
5935 		if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
5936 					 get_unaligned(&entry->ino),
5937 					 get_unaligned(&entry->type)))
5938 			return 1;
5939 		addr += sizeof(struct dir_entry) +
5940 			get_unaligned(&entry->name_len);
5941 		ctx->pos++;
5942 	}
5943 	return 0;
5944 }
5945 
5946 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5947 {
5948 	struct inode *inode = file_inode(file);
5949 	struct btrfs_root *root = BTRFS_I(inode)->root;
5950 	struct btrfs_file_private *private = file->private_data;
5951 	struct btrfs_dir_item *di;
5952 	struct btrfs_key key;
5953 	struct btrfs_key found_key;
5954 	struct btrfs_path *path;
5955 	void *addr;
5956 	struct list_head ins_list;
5957 	struct list_head del_list;
5958 	int ret;
5959 	char *name_ptr;
5960 	int name_len;
5961 	int entries = 0;
5962 	int total_len = 0;
5963 	bool put = false;
5964 	struct btrfs_key location;
5965 
5966 	if (!dir_emit_dots(file, ctx))
5967 		return 0;
5968 
5969 	path = btrfs_alloc_path();
5970 	if (!path)
5971 		return -ENOMEM;
5972 
5973 	addr = private->filldir_buf;
5974 	path->reada = READA_FORWARD;
5975 
5976 	INIT_LIST_HEAD(&ins_list);
5977 	INIT_LIST_HEAD(&del_list);
5978 	put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list);
5979 
5980 again:
5981 	key.type = BTRFS_DIR_INDEX_KEY;
5982 	key.offset = ctx->pos;
5983 	key.objectid = btrfs_ino(BTRFS_I(inode));
5984 
5985 	btrfs_for_each_slot(root, &key, &found_key, path, ret) {
5986 		struct dir_entry *entry;
5987 		struct extent_buffer *leaf = path->nodes[0];
5988 
5989 		if (found_key.objectid != key.objectid)
5990 			break;
5991 		if (found_key.type != BTRFS_DIR_INDEX_KEY)
5992 			break;
5993 		if (found_key.offset < ctx->pos)
5994 			continue;
5995 		if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
5996 			continue;
5997 		di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
5998 		name_len = btrfs_dir_name_len(leaf, di);
5999 		if ((total_len + sizeof(struct dir_entry) + name_len) >=
6000 		    PAGE_SIZE) {
6001 			btrfs_release_path(path);
6002 			ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6003 			if (ret)
6004 				goto nopos;
6005 			addr = private->filldir_buf;
6006 			entries = 0;
6007 			total_len = 0;
6008 			goto again;
6009 		}
6010 
6011 		entry = addr;
6012 		put_unaligned(name_len, &entry->name_len);
6013 		name_ptr = (char *)(entry + 1);
6014 		read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1),
6015 				   name_len);
6016 		put_unaligned(fs_ftype_to_dtype(btrfs_dir_type(leaf, di)),
6017 				&entry->type);
6018 		btrfs_dir_item_key_to_cpu(leaf, di, &location);
6019 		put_unaligned(location.objectid, &entry->ino);
6020 		put_unaligned(found_key.offset, &entry->offset);
6021 		entries++;
6022 		addr += sizeof(struct dir_entry) + name_len;
6023 		total_len += sizeof(struct dir_entry) + name_len;
6024 	}
6025 	/* Catch error encountered during iteration */
6026 	if (ret < 0)
6027 		goto err;
6028 
6029 	btrfs_release_path(path);
6030 
6031 	ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6032 	if (ret)
6033 		goto nopos;
6034 
6035 	ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
6036 	if (ret)
6037 		goto nopos;
6038 
6039 	/*
6040 	 * Stop new entries from being returned after we return the last
6041 	 * entry.
6042 	 *
6043 	 * New directory entries are assigned a strictly increasing
6044 	 * offset.  This means that new entries created during readdir
6045 	 * are *guaranteed* to be seen in the future by that readdir.
6046 	 * This has broken buggy programs which operate on names as
6047 	 * they're returned by readdir.  Until we re-use freed offsets
6048 	 * we have this hack to stop new entries from being returned
6049 	 * under the assumption that they'll never reach this huge
6050 	 * offset.
6051 	 *
6052 	 * This is being careful not to overflow 32bit loff_t unless the
6053 	 * last entry requires it because doing so has broken 32bit apps
6054 	 * in the past.
6055 	 */
6056 	if (ctx->pos >= INT_MAX)
6057 		ctx->pos = LLONG_MAX;
6058 	else
6059 		ctx->pos = INT_MAX;
6060 nopos:
6061 	ret = 0;
6062 err:
6063 	if (put)
6064 		btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
6065 	btrfs_free_path(path);
6066 	return ret;
6067 }
6068 
6069 /*
6070  * This is somewhat expensive, updating the tree every time the
6071  * inode changes.  But, it is most likely to find the inode in cache.
6072  * FIXME, needs more benchmarking...there are no reasons other than performance
6073  * to keep or drop this code.
6074  */
6075 static int btrfs_dirty_inode(struct inode *inode)
6076 {
6077 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6078 	struct btrfs_root *root = BTRFS_I(inode)->root;
6079 	struct btrfs_trans_handle *trans;
6080 	int ret;
6081 
6082 	if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
6083 		return 0;
6084 
6085 	trans = btrfs_join_transaction(root);
6086 	if (IS_ERR(trans))
6087 		return PTR_ERR(trans);
6088 
6089 	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
6090 	if (ret && (ret == -ENOSPC || ret == -EDQUOT)) {
6091 		/* whoops, lets try again with the full transaction */
6092 		btrfs_end_transaction(trans);
6093 		trans = btrfs_start_transaction(root, 1);
6094 		if (IS_ERR(trans))
6095 			return PTR_ERR(trans);
6096 
6097 		ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
6098 	}
6099 	btrfs_end_transaction(trans);
6100 	if (BTRFS_I(inode)->delayed_node)
6101 		btrfs_balance_delayed_items(fs_info);
6102 
6103 	return ret;
6104 }
6105 
6106 /*
6107  * This is a copy of file_update_time.  We need this so we can return error on
6108  * ENOSPC for updating the inode in the case of file write and mmap writes.
6109  */
6110 static int btrfs_update_time(struct inode *inode, struct timespec64 *now,
6111 			     int flags)
6112 {
6113 	struct btrfs_root *root = BTRFS_I(inode)->root;
6114 	bool dirty = flags & ~S_VERSION;
6115 
6116 	if (btrfs_root_readonly(root))
6117 		return -EROFS;
6118 
6119 	if (flags & S_VERSION)
6120 		dirty |= inode_maybe_inc_iversion(inode, dirty);
6121 	if (flags & S_CTIME)
6122 		inode->i_ctime = *now;
6123 	if (flags & S_MTIME)
6124 		inode->i_mtime = *now;
6125 	if (flags & S_ATIME)
6126 		inode->i_atime = *now;
6127 	return dirty ? btrfs_dirty_inode(inode) : 0;
6128 }
6129 
6130 /*
6131  * find the highest existing sequence number in a directory
6132  * and then set the in-memory index_cnt variable to reflect
6133  * free sequence numbers
6134  */
6135 static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
6136 {
6137 	struct btrfs_root *root = inode->root;
6138 	struct btrfs_key key, found_key;
6139 	struct btrfs_path *path;
6140 	struct extent_buffer *leaf;
6141 	int ret;
6142 
6143 	key.objectid = btrfs_ino(inode);
6144 	key.type = BTRFS_DIR_INDEX_KEY;
6145 	key.offset = (u64)-1;
6146 
6147 	path = btrfs_alloc_path();
6148 	if (!path)
6149 		return -ENOMEM;
6150 
6151 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6152 	if (ret < 0)
6153 		goto out;
6154 	/* FIXME: we should be able to handle this */
6155 	if (ret == 0)
6156 		goto out;
6157 	ret = 0;
6158 
6159 	if (path->slots[0] == 0) {
6160 		inode->index_cnt = BTRFS_DIR_START_INDEX;
6161 		goto out;
6162 	}
6163 
6164 	path->slots[0]--;
6165 
6166 	leaf = path->nodes[0];
6167 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6168 
6169 	if (found_key.objectid != btrfs_ino(inode) ||
6170 	    found_key.type != BTRFS_DIR_INDEX_KEY) {
6171 		inode->index_cnt = BTRFS_DIR_START_INDEX;
6172 		goto out;
6173 	}
6174 
6175 	inode->index_cnt = found_key.offset + 1;
6176 out:
6177 	btrfs_free_path(path);
6178 	return ret;
6179 }
6180 
6181 /*
6182  * helper to find a free sequence number in a given directory.  This current
6183  * code is very simple, later versions will do smarter things in the btree
6184  */
6185 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
6186 {
6187 	int ret = 0;
6188 
6189 	if (dir->index_cnt == (u64)-1) {
6190 		ret = btrfs_inode_delayed_dir_index_count(dir);
6191 		if (ret) {
6192 			ret = btrfs_set_inode_index_count(dir);
6193 			if (ret)
6194 				return ret;
6195 		}
6196 	}
6197 
6198 	*index = dir->index_cnt;
6199 	dir->index_cnt++;
6200 
6201 	return ret;
6202 }
6203 
6204 static int btrfs_insert_inode_locked(struct inode *inode)
6205 {
6206 	struct btrfs_iget_args args;
6207 
6208 	args.ino = BTRFS_I(inode)->location.objectid;
6209 	args.root = BTRFS_I(inode)->root;
6210 
6211 	return insert_inode_locked4(inode,
6212 		   btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6213 		   btrfs_find_actor, &args);
6214 }
6215 
6216 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
6217 			    unsigned int *trans_num_items)
6218 {
6219 	struct inode *dir = args->dir;
6220 	struct inode *inode = args->inode;
6221 	int ret;
6222 
6223 	ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl);
6224 	if (ret)
6225 		return ret;
6226 
6227 	/* 1 to add inode item */
6228 	*trans_num_items = 1;
6229 	/* 1 to add compression property */
6230 	if (BTRFS_I(dir)->prop_compress)
6231 		(*trans_num_items)++;
6232 	/* 1 to add default ACL xattr */
6233 	if (args->default_acl)
6234 		(*trans_num_items)++;
6235 	/* 1 to add access ACL xattr */
6236 	if (args->acl)
6237 		(*trans_num_items)++;
6238 #ifdef CONFIG_SECURITY
6239 	/* 1 to add LSM xattr */
6240 	if (dir->i_security)
6241 		(*trans_num_items)++;
6242 #endif
6243 	if (args->orphan) {
6244 		/* 1 to add orphan item */
6245 		(*trans_num_items)++;
6246 	} else {
6247 		/*
6248 		 * 1 to add dir item
6249 		 * 1 to add dir index
6250 		 * 1 to update parent inode item
6251 		 *
6252 		 * No need for 1 unit for the inode ref item because it is
6253 		 * inserted in a batch together with the inode item at
6254 		 * btrfs_create_new_inode().
6255 		 */
6256 		*trans_num_items += 3;
6257 	}
6258 	return 0;
6259 }
6260 
6261 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args)
6262 {
6263 	posix_acl_release(args->acl);
6264 	posix_acl_release(args->default_acl);
6265 }
6266 
6267 /*
6268  * Inherit flags from the parent inode.
6269  *
6270  * Currently only the compression flags and the cow flags are inherited.
6271  */
6272 static void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
6273 {
6274 	unsigned int flags;
6275 
6276 	flags = BTRFS_I(dir)->flags;
6277 
6278 	if (flags & BTRFS_INODE_NOCOMPRESS) {
6279 		BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
6280 		BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
6281 	} else if (flags & BTRFS_INODE_COMPRESS) {
6282 		BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
6283 		BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
6284 	}
6285 
6286 	if (flags & BTRFS_INODE_NODATACOW) {
6287 		BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
6288 		if (S_ISREG(inode->i_mode))
6289 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6290 	}
6291 
6292 	btrfs_sync_inode_flags_to_i_flags(inode);
6293 }
6294 
6295 int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
6296 			   struct btrfs_new_inode_args *args)
6297 {
6298 	struct inode *dir = args->dir;
6299 	struct inode *inode = args->inode;
6300 	const char *name = args->orphan ? NULL : args->dentry->d_name.name;
6301 	int name_len = args->orphan ? 0 : args->dentry->d_name.len;
6302 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6303 	struct btrfs_root *root;
6304 	struct btrfs_inode_item *inode_item;
6305 	struct btrfs_key *location;
6306 	struct btrfs_path *path;
6307 	u64 objectid;
6308 	struct btrfs_inode_ref *ref;
6309 	struct btrfs_key key[2];
6310 	u32 sizes[2];
6311 	struct btrfs_item_batch batch;
6312 	unsigned long ptr;
6313 	int ret;
6314 
6315 	path = btrfs_alloc_path();
6316 	if (!path)
6317 		return -ENOMEM;
6318 
6319 	if (!args->subvol)
6320 		BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root);
6321 	root = BTRFS_I(inode)->root;
6322 
6323 	ret = btrfs_get_free_objectid(root, &objectid);
6324 	if (ret)
6325 		goto out;
6326 	inode->i_ino = objectid;
6327 
6328 	if (args->orphan) {
6329 		/*
6330 		 * O_TMPFILE, set link count to 0, so that after this point, we
6331 		 * fill in an inode item with the correct link count.
6332 		 */
6333 		set_nlink(inode, 0);
6334 	} else {
6335 		trace_btrfs_inode_request(dir);
6336 
6337 		ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index);
6338 		if (ret)
6339 			goto out;
6340 	}
6341 	/* index_cnt is ignored for everything but a dir. */
6342 	BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX;
6343 	BTRFS_I(inode)->generation = trans->transid;
6344 	inode->i_generation = BTRFS_I(inode)->generation;
6345 
6346 	/*
6347 	 * Subvolumes don't inherit flags from their parent directory.
6348 	 * Originally this was probably by accident, but we probably can't
6349 	 * change it now without compatibility issues.
6350 	 */
6351 	if (!args->subvol)
6352 		btrfs_inherit_iflags(inode, dir);
6353 
6354 	if (S_ISREG(inode->i_mode)) {
6355 		if (btrfs_test_opt(fs_info, NODATASUM))
6356 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6357 		if (btrfs_test_opt(fs_info, NODATACOW))
6358 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6359 				BTRFS_INODE_NODATASUM;
6360 	}
6361 
6362 	location = &BTRFS_I(inode)->location;
6363 	location->objectid = objectid;
6364 	location->offset = 0;
6365 	location->type = BTRFS_INODE_ITEM_KEY;
6366 
6367 	ret = btrfs_insert_inode_locked(inode);
6368 	if (ret < 0) {
6369 		if (!args->orphan)
6370 			BTRFS_I(dir)->index_cnt--;
6371 		goto out;
6372 	}
6373 
6374 	/*
6375 	 * We could have gotten an inode number from somebody who was fsynced
6376 	 * and then removed in this same transaction, so let's just set full
6377 	 * sync since it will be a full sync anyway and this will blow away the
6378 	 * old info in the log.
6379 	 */
6380 	btrfs_set_inode_full_sync(BTRFS_I(inode));
6381 
6382 	key[0].objectid = objectid;
6383 	key[0].type = BTRFS_INODE_ITEM_KEY;
6384 	key[0].offset = 0;
6385 
6386 	sizes[0] = sizeof(struct btrfs_inode_item);
6387 
6388 	if (!args->orphan) {
6389 		/*
6390 		 * Start new inodes with an inode_ref. This is slightly more
6391 		 * efficient for small numbers of hard links since they will
6392 		 * be packed into one item. Extended refs will kick in if we
6393 		 * add more hard links than can fit in the ref item.
6394 		 */
6395 		key[1].objectid = objectid;
6396 		key[1].type = BTRFS_INODE_REF_KEY;
6397 		if (args->subvol) {
6398 			key[1].offset = objectid;
6399 			sizes[1] = 2 + sizeof(*ref);
6400 		} else {
6401 			key[1].offset = btrfs_ino(BTRFS_I(dir));
6402 			sizes[1] = name_len + sizeof(*ref);
6403 		}
6404 	}
6405 
6406 	batch.keys = &key[0];
6407 	batch.data_sizes = &sizes[0];
6408 	batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]);
6409 	batch.nr = args->orphan ? 1 : 2;
6410 	ret = btrfs_insert_empty_items(trans, root, path, &batch);
6411 	if (ret != 0) {
6412 		btrfs_abort_transaction(trans, ret);
6413 		goto discard;
6414 	}
6415 
6416 	inode->i_mtime = current_time(inode);
6417 	inode->i_atime = inode->i_mtime;
6418 	inode->i_ctime = inode->i_mtime;
6419 	BTRFS_I(inode)->i_otime = inode->i_mtime;
6420 
6421 	/*
6422 	 * We're going to fill the inode item now, so at this point the inode
6423 	 * must be fully initialized.
6424 	 */
6425 
6426 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6427 				  struct btrfs_inode_item);
6428 	memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
6429 			     sizeof(*inode_item));
6430 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
6431 
6432 	if (!args->orphan) {
6433 		ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6434 				     struct btrfs_inode_ref);
6435 		ptr = (unsigned long)(ref + 1);
6436 		if (args->subvol) {
6437 			btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2);
6438 			btrfs_set_inode_ref_index(path->nodes[0], ref, 0);
6439 			write_extent_buffer(path->nodes[0], "..", ptr, 2);
6440 		} else {
6441 			btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
6442 			btrfs_set_inode_ref_index(path->nodes[0], ref,
6443 						  BTRFS_I(inode)->dir_index);
6444 			write_extent_buffer(path->nodes[0], name, ptr, name_len);
6445 		}
6446 	}
6447 
6448 	btrfs_mark_buffer_dirty(path->nodes[0]);
6449 	/*
6450 	 * We don't need the path anymore, plus inheriting properties, adding
6451 	 * ACLs, security xattrs, orphan item or adding the link, will result in
6452 	 * allocating yet another path. So just free our path.
6453 	 */
6454 	btrfs_free_path(path);
6455 	path = NULL;
6456 
6457 	if (args->subvol) {
6458 		struct inode *parent;
6459 
6460 		/*
6461 		 * Subvolumes inherit properties from their parent subvolume,
6462 		 * not the directory they were created in.
6463 		 */
6464 		parent = btrfs_iget(fs_info->sb, BTRFS_FIRST_FREE_OBJECTID,
6465 				    BTRFS_I(dir)->root);
6466 		if (IS_ERR(parent)) {
6467 			ret = PTR_ERR(parent);
6468 		} else {
6469 			ret = btrfs_inode_inherit_props(trans, inode, parent);
6470 			iput(parent);
6471 		}
6472 	} else {
6473 		ret = btrfs_inode_inherit_props(trans, inode, dir);
6474 	}
6475 	if (ret) {
6476 		btrfs_err(fs_info,
6477 			  "error inheriting props for ino %llu (root %llu): %d",
6478 			  btrfs_ino(BTRFS_I(inode)), root->root_key.objectid,
6479 			  ret);
6480 	}
6481 
6482 	/*
6483 	 * Subvolumes don't inherit ACLs or get passed to the LSM. This is
6484 	 * probably a bug.
6485 	 */
6486 	if (!args->subvol) {
6487 		ret = btrfs_init_inode_security(trans, args);
6488 		if (ret) {
6489 			btrfs_abort_transaction(trans, ret);
6490 			goto discard;
6491 		}
6492 	}
6493 
6494 	inode_tree_add(inode);
6495 
6496 	trace_btrfs_inode_new(inode);
6497 	btrfs_set_inode_last_trans(trans, BTRFS_I(inode));
6498 
6499 	btrfs_update_root_times(trans, root);
6500 
6501 	if (args->orphan) {
6502 		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
6503 	} else {
6504 		ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
6505 				     name_len, 0, BTRFS_I(inode)->dir_index);
6506 	}
6507 	if (ret) {
6508 		btrfs_abort_transaction(trans, ret);
6509 		goto discard;
6510 	}
6511 
6512 	return 0;
6513 
6514 discard:
6515 	/*
6516 	 * discard_new_inode() calls iput(), but the caller owns the reference
6517 	 * to the inode.
6518 	 */
6519 	ihold(inode);
6520 	discard_new_inode(inode);
6521 out:
6522 	btrfs_free_path(path);
6523 	return ret;
6524 }
6525 
6526 /*
6527  * utility function to add 'inode' into 'parent_inode' with
6528  * a give name and a given sequence number.
6529  * if 'add_backref' is true, also insert a backref from the
6530  * inode to the parent directory.
6531  */
6532 int btrfs_add_link(struct btrfs_trans_handle *trans,
6533 		   struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
6534 		   const char *name, int name_len, int add_backref, u64 index)
6535 {
6536 	int ret = 0;
6537 	struct btrfs_key key;
6538 	struct btrfs_root *root = parent_inode->root;
6539 	u64 ino = btrfs_ino(inode);
6540 	u64 parent_ino = btrfs_ino(parent_inode);
6541 
6542 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6543 		memcpy(&key, &inode->root->root_key, sizeof(key));
6544 	} else {
6545 		key.objectid = ino;
6546 		key.type = BTRFS_INODE_ITEM_KEY;
6547 		key.offset = 0;
6548 	}
6549 
6550 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6551 		ret = btrfs_add_root_ref(trans, key.objectid,
6552 					 root->root_key.objectid, parent_ino,
6553 					 index, name, name_len);
6554 	} else if (add_backref) {
6555 		ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
6556 					     parent_ino, index);
6557 	}
6558 
6559 	/* Nothing to clean up yet */
6560 	if (ret)
6561 		return ret;
6562 
6563 	ret = btrfs_insert_dir_item(trans, name, name_len, parent_inode, &key,
6564 				    btrfs_inode_type(&inode->vfs_inode), index);
6565 	if (ret == -EEXIST || ret == -EOVERFLOW)
6566 		goto fail_dir_item;
6567 	else if (ret) {
6568 		btrfs_abort_transaction(trans, ret);
6569 		return ret;
6570 	}
6571 
6572 	btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
6573 			   name_len * 2);
6574 	inode_inc_iversion(&parent_inode->vfs_inode);
6575 	/*
6576 	 * If we are replaying a log tree, we do not want to update the mtime
6577 	 * and ctime of the parent directory with the current time, since the
6578 	 * log replay procedure is responsible for setting them to their correct
6579 	 * values (the ones it had when the fsync was done).
6580 	 */
6581 	if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
6582 		struct timespec64 now = current_time(&parent_inode->vfs_inode);
6583 
6584 		parent_inode->vfs_inode.i_mtime = now;
6585 		parent_inode->vfs_inode.i_ctime = now;
6586 	}
6587 	ret = btrfs_update_inode(trans, root, parent_inode);
6588 	if (ret)
6589 		btrfs_abort_transaction(trans, ret);
6590 	return ret;
6591 
6592 fail_dir_item:
6593 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6594 		u64 local_index;
6595 		int err;
6596 		err = btrfs_del_root_ref(trans, key.objectid,
6597 					 root->root_key.objectid, parent_ino,
6598 					 &local_index, name, name_len);
6599 		if (err)
6600 			btrfs_abort_transaction(trans, err);
6601 	} else if (add_backref) {
6602 		u64 local_index;
6603 		int err;
6604 
6605 		err = btrfs_del_inode_ref(trans, root, name, name_len,
6606 					  ino, parent_ino, &local_index);
6607 		if (err)
6608 			btrfs_abort_transaction(trans, err);
6609 	}
6610 
6611 	/* Return the original error code */
6612 	return ret;
6613 }
6614 
6615 static int btrfs_create_common(struct inode *dir, struct dentry *dentry,
6616 			       struct inode *inode)
6617 {
6618 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6619 	struct btrfs_root *root = BTRFS_I(dir)->root;
6620 	struct btrfs_new_inode_args new_inode_args = {
6621 		.dir = dir,
6622 		.dentry = dentry,
6623 		.inode = inode,
6624 	};
6625 	unsigned int trans_num_items;
6626 	struct btrfs_trans_handle *trans;
6627 	int err;
6628 
6629 	err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
6630 	if (err)
6631 		goto out_inode;
6632 
6633 	trans = btrfs_start_transaction(root, trans_num_items);
6634 	if (IS_ERR(trans)) {
6635 		err = PTR_ERR(trans);
6636 		goto out_new_inode_args;
6637 	}
6638 
6639 	err = btrfs_create_new_inode(trans, &new_inode_args);
6640 	if (!err)
6641 		d_instantiate_new(dentry, inode);
6642 
6643 	btrfs_end_transaction(trans);
6644 	btrfs_btree_balance_dirty(fs_info);
6645 out_new_inode_args:
6646 	btrfs_new_inode_args_destroy(&new_inode_args);
6647 out_inode:
6648 	if (err)
6649 		iput(inode);
6650 	return err;
6651 }
6652 
6653 static int btrfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
6654 		       struct dentry *dentry, umode_t mode, dev_t rdev)
6655 {
6656 	struct inode *inode;
6657 
6658 	inode = new_inode(dir->i_sb);
6659 	if (!inode)
6660 		return -ENOMEM;
6661 	inode_init_owner(mnt_userns, inode, dir, mode);
6662 	inode->i_op = &btrfs_special_inode_operations;
6663 	init_special_inode(inode, inode->i_mode, rdev);
6664 	return btrfs_create_common(dir, dentry, inode);
6665 }
6666 
6667 static int btrfs_create(struct user_namespace *mnt_userns, struct inode *dir,
6668 			struct dentry *dentry, umode_t mode, bool excl)
6669 {
6670 	struct inode *inode;
6671 
6672 	inode = new_inode(dir->i_sb);
6673 	if (!inode)
6674 		return -ENOMEM;
6675 	inode_init_owner(mnt_userns, inode, dir, mode);
6676 	inode->i_fop = &btrfs_file_operations;
6677 	inode->i_op = &btrfs_file_inode_operations;
6678 	inode->i_mapping->a_ops = &btrfs_aops;
6679 	return btrfs_create_common(dir, dentry, inode);
6680 }
6681 
6682 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6683 		      struct dentry *dentry)
6684 {
6685 	struct btrfs_trans_handle *trans = NULL;
6686 	struct btrfs_root *root = BTRFS_I(dir)->root;
6687 	struct inode *inode = d_inode(old_dentry);
6688 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6689 	u64 index;
6690 	int err;
6691 	int drop_inode = 0;
6692 
6693 	/* do not allow sys_link's with other subvols of the same device */
6694 	if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid)
6695 		return -EXDEV;
6696 
6697 	if (inode->i_nlink >= BTRFS_LINK_MAX)
6698 		return -EMLINK;
6699 
6700 	err = btrfs_set_inode_index(BTRFS_I(dir), &index);
6701 	if (err)
6702 		goto fail;
6703 
6704 	/*
6705 	 * 2 items for inode and inode ref
6706 	 * 2 items for dir items
6707 	 * 1 item for parent inode
6708 	 * 1 item for orphan item deletion if O_TMPFILE
6709 	 */
6710 	trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6);
6711 	if (IS_ERR(trans)) {
6712 		err = PTR_ERR(trans);
6713 		trans = NULL;
6714 		goto fail;
6715 	}
6716 
6717 	/* There are several dir indexes for this inode, clear the cache. */
6718 	BTRFS_I(inode)->dir_index = 0ULL;
6719 	inc_nlink(inode);
6720 	inode_inc_iversion(inode);
6721 	inode->i_ctime = current_time(inode);
6722 	ihold(inode);
6723 	set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6724 
6725 	err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
6726 			     dentry->d_name.name, dentry->d_name.len, 1, index);
6727 
6728 	if (err) {
6729 		drop_inode = 1;
6730 	} else {
6731 		struct dentry *parent = dentry->d_parent;
6732 
6733 		err = btrfs_update_inode(trans, root, BTRFS_I(inode));
6734 		if (err)
6735 			goto fail;
6736 		if (inode->i_nlink == 1) {
6737 			/*
6738 			 * If new hard link count is 1, it's a file created
6739 			 * with open(2) O_TMPFILE flag.
6740 			 */
6741 			err = btrfs_orphan_del(trans, BTRFS_I(inode));
6742 			if (err)
6743 				goto fail;
6744 		}
6745 		d_instantiate(dentry, inode);
6746 		btrfs_log_new_name(trans, old_dentry, NULL, 0, parent);
6747 	}
6748 
6749 fail:
6750 	if (trans)
6751 		btrfs_end_transaction(trans);
6752 	if (drop_inode) {
6753 		inode_dec_link_count(inode);
6754 		iput(inode);
6755 	}
6756 	btrfs_btree_balance_dirty(fs_info);
6757 	return err;
6758 }
6759 
6760 static int btrfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
6761 		       struct dentry *dentry, umode_t mode)
6762 {
6763 	struct inode *inode;
6764 
6765 	inode = new_inode(dir->i_sb);
6766 	if (!inode)
6767 		return -ENOMEM;
6768 	inode_init_owner(mnt_userns, inode, dir, S_IFDIR | mode);
6769 	inode->i_op = &btrfs_dir_inode_operations;
6770 	inode->i_fop = &btrfs_dir_file_operations;
6771 	return btrfs_create_common(dir, dentry, inode);
6772 }
6773 
6774 static noinline int uncompress_inline(struct btrfs_path *path,
6775 				      struct page *page,
6776 				      size_t pg_offset, u64 extent_offset,
6777 				      struct btrfs_file_extent_item *item)
6778 {
6779 	int ret;
6780 	struct extent_buffer *leaf = path->nodes[0];
6781 	char *tmp;
6782 	size_t max_size;
6783 	unsigned long inline_size;
6784 	unsigned long ptr;
6785 	int compress_type;
6786 
6787 	WARN_ON(pg_offset != 0);
6788 	compress_type = btrfs_file_extent_compression(leaf, item);
6789 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
6790 	inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]);
6791 	tmp = kmalloc(inline_size, GFP_NOFS);
6792 	if (!tmp)
6793 		return -ENOMEM;
6794 	ptr = btrfs_file_extent_inline_start(item);
6795 
6796 	read_extent_buffer(leaf, tmp, ptr, inline_size);
6797 
6798 	max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6799 	ret = btrfs_decompress(compress_type, tmp, page,
6800 			       extent_offset, inline_size, max_size);
6801 
6802 	/*
6803 	 * decompression code contains a memset to fill in any space between the end
6804 	 * of the uncompressed data and the end of max_size in case the decompressed
6805 	 * data ends up shorter than ram_bytes.  That doesn't cover the hole between
6806 	 * the end of an inline extent and the beginning of the next block, so we
6807 	 * cover that region here.
6808 	 */
6809 
6810 	if (max_size + pg_offset < PAGE_SIZE)
6811 		memzero_page(page,  pg_offset + max_size,
6812 			     PAGE_SIZE - max_size - pg_offset);
6813 	kfree(tmp);
6814 	return ret;
6815 }
6816 
6817 /**
6818  * btrfs_get_extent - Lookup the first extent overlapping a range in a file.
6819  * @inode:	file to search in
6820  * @page:	page to read extent data into if the extent is inline
6821  * @pg_offset:	offset into @page to copy to
6822  * @start:	file offset
6823  * @len:	length of range starting at @start
6824  *
6825  * This returns the first &struct extent_map which overlaps with the given
6826  * range, reading it from the B-tree and caching it if necessary. Note that
6827  * there may be more extents which overlap the given range after the returned
6828  * extent_map.
6829  *
6830  * If @page is not NULL and the extent is inline, this also reads the extent
6831  * data directly into the page and marks the extent up to date in the io_tree.
6832  *
6833  * Return: ERR_PTR on error, non-NULL extent_map on success.
6834  */
6835 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
6836 				    struct page *page, size_t pg_offset,
6837 				    u64 start, u64 len)
6838 {
6839 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
6840 	int ret = 0;
6841 	u64 extent_start = 0;
6842 	u64 extent_end = 0;
6843 	u64 objectid = btrfs_ino(inode);
6844 	int extent_type = -1;
6845 	struct btrfs_path *path = NULL;
6846 	struct btrfs_root *root = inode->root;
6847 	struct btrfs_file_extent_item *item;
6848 	struct extent_buffer *leaf;
6849 	struct btrfs_key found_key;
6850 	struct extent_map *em = NULL;
6851 	struct extent_map_tree *em_tree = &inode->extent_tree;
6852 
6853 	read_lock(&em_tree->lock);
6854 	em = lookup_extent_mapping(em_tree, start, len);
6855 	read_unlock(&em_tree->lock);
6856 
6857 	if (em) {
6858 		if (em->start > start || em->start + em->len <= start)
6859 			free_extent_map(em);
6860 		else if (em->block_start == EXTENT_MAP_INLINE && page)
6861 			free_extent_map(em);
6862 		else
6863 			goto out;
6864 	}
6865 	em = alloc_extent_map();
6866 	if (!em) {
6867 		ret = -ENOMEM;
6868 		goto out;
6869 	}
6870 	em->start = EXTENT_MAP_HOLE;
6871 	em->orig_start = EXTENT_MAP_HOLE;
6872 	em->len = (u64)-1;
6873 	em->block_len = (u64)-1;
6874 
6875 	path = btrfs_alloc_path();
6876 	if (!path) {
6877 		ret = -ENOMEM;
6878 		goto out;
6879 	}
6880 
6881 	/* Chances are we'll be called again, so go ahead and do readahead */
6882 	path->reada = READA_FORWARD;
6883 
6884 	/*
6885 	 * The same explanation in load_free_space_cache applies here as well,
6886 	 * we only read when we're loading the free space cache, and at that
6887 	 * point the commit_root has everything we need.
6888 	 */
6889 	if (btrfs_is_free_space_inode(inode)) {
6890 		path->search_commit_root = 1;
6891 		path->skip_locking = 1;
6892 	}
6893 
6894 	ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
6895 	if (ret < 0) {
6896 		goto out;
6897 	} else if (ret > 0) {
6898 		if (path->slots[0] == 0)
6899 			goto not_found;
6900 		path->slots[0]--;
6901 		ret = 0;
6902 	}
6903 
6904 	leaf = path->nodes[0];
6905 	item = btrfs_item_ptr(leaf, path->slots[0],
6906 			      struct btrfs_file_extent_item);
6907 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6908 	if (found_key.objectid != objectid ||
6909 	    found_key.type != BTRFS_EXTENT_DATA_KEY) {
6910 		/*
6911 		 * If we backup past the first extent we want to move forward
6912 		 * and see if there is an extent in front of us, otherwise we'll
6913 		 * say there is a hole for our whole search range which can
6914 		 * cause problems.
6915 		 */
6916 		extent_end = start;
6917 		goto next;
6918 	}
6919 
6920 	extent_type = btrfs_file_extent_type(leaf, item);
6921 	extent_start = found_key.offset;
6922 	extent_end = btrfs_file_extent_end(path);
6923 	if (extent_type == BTRFS_FILE_EXTENT_REG ||
6924 	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
6925 		/* Only regular file could have regular/prealloc extent */
6926 		if (!S_ISREG(inode->vfs_inode.i_mode)) {
6927 			ret = -EUCLEAN;
6928 			btrfs_crit(fs_info,
6929 		"regular/prealloc extent found for non-regular inode %llu",
6930 				   btrfs_ino(inode));
6931 			goto out;
6932 		}
6933 		trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
6934 						       extent_start);
6935 	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
6936 		trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
6937 						      path->slots[0],
6938 						      extent_start);
6939 	}
6940 next:
6941 	if (start >= extent_end) {
6942 		path->slots[0]++;
6943 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6944 			ret = btrfs_next_leaf(root, path);
6945 			if (ret < 0)
6946 				goto out;
6947 			else if (ret > 0)
6948 				goto not_found;
6949 
6950 			leaf = path->nodes[0];
6951 		}
6952 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6953 		if (found_key.objectid != objectid ||
6954 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
6955 			goto not_found;
6956 		if (start + len <= found_key.offset)
6957 			goto not_found;
6958 		if (start > found_key.offset)
6959 			goto next;
6960 
6961 		/* New extent overlaps with existing one */
6962 		em->start = start;
6963 		em->orig_start = start;
6964 		em->len = found_key.offset - start;
6965 		em->block_start = EXTENT_MAP_HOLE;
6966 		goto insert;
6967 	}
6968 
6969 	btrfs_extent_item_to_extent_map(inode, path, item, !page, em);
6970 
6971 	if (extent_type == BTRFS_FILE_EXTENT_REG ||
6972 	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
6973 		goto insert;
6974 	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
6975 		unsigned long ptr;
6976 		char *map;
6977 		size_t size;
6978 		size_t extent_offset;
6979 		size_t copy_size;
6980 
6981 		if (!page)
6982 			goto out;
6983 
6984 		size = btrfs_file_extent_ram_bytes(leaf, item);
6985 		extent_offset = page_offset(page) + pg_offset - extent_start;
6986 		copy_size = min_t(u64, PAGE_SIZE - pg_offset,
6987 				  size - extent_offset);
6988 		em->start = extent_start + extent_offset;
6989 		em->len = ALIGN(copy_size, fs_info->sectorsize);
6990 		em->orig_block_len = em->len;
6991 		em->orig_start = em->start;
6992 		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
6993 
6994 		if (!PageUptodate(page)) {
6995 			if (btrfs_file_extent_compression(leaf, item) !=
6996 			    BTRFS_COMPRESS_NONE) {
6997 				ret = uncompress_inline(path, page, pg_offset,
6998 							extent_offset, item);
6999 				if (ret)
7000 					goto out;
7001 			} else {
7002 				map = kmap_local_page(page);
7003 				read_extent_buffer(leaf, map + pg_offset, ptr,
7004 						   copy_size);
7005 				if (pg_offset + copy_size < PAGE_SIZE) {
7006 					memset(map + pg_offset + copy_size, 0,
7007 					       PAGE_SIZE - pg_offset -
7008 					       copy_size);
7009 				}
7010 				kunmap_local(map);
7011 			}
7012 			flush_dcache_page(page);
7013 		}
7014 		goto insert;
7015 	}
7016 not_found:
7017 	em->start = start;
7018 	em->orig_start = start;
7019 	em->len = len;
7020 	em->block_start = EXTENT_MAP_HOLE;
7021 insert:
7022 	ret = 0;
7023 	btrfs_release_path(path);
7024 	if (em->start > start || extent_map_end(em) <= start) {
7025 		btrfs_err(fs_info,
7026 			  "bad extent! em: [%llu %llu] passed [%llu %llu]",
7027 			  em->start, em->len, start, len);
7028 		ret = -EIO;
7029 		goto out;
7030 	}
7031 
7032 	write_lock(&em_tree->lock);
7033 	ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
7034 	write_unlock(&em_tree->lock);
7035 out:
7036 	btrfs_free_path(path);
7037 
7038 	trace_btrfs_get_extent(root, inode, em);
7039 
7040 	if (ret) {
7041 		free_extent_map(em);
7042 		return ERR_PTR(ret);
7043 	}
7044 	return em;
7045 }
7046 
7047 static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode,
7048 						  const u64 start,
7049 						  const u64 len,
7050 						  const u64 orig_start,
7051 						  const u64 block_start,
7052 						  const u64 block_len,
7053 						  const u64 orig_block_len,
7054 						  const u64 ram_bytes,
7055 						  const int type)
7056 {
7057 	struct extent_map *em = NULL;
7058 	int ret;
7059 
7060 	if (type != BTRFS_ORDERED_NOCOW) {
7061 		em = create_io_em(inode, start, len, orig_start, block_start,
7062 				  block_len, orig_block_len, ram_bytes,
7063 				  BTRFS_COMPRESS_NONE, /* compress_type */
7064 				  type);
7065 		if (IS_ERR(em))
7066 			goto out;
7067 	}
7068 	ret = btrfs_add_ordered_extent(inode, start, len, len, block_start,
7069 				       block_len, 0,
7070 				       (1 << type) |
7071 				       (1 << BTRFS_ORDERED_DIRECT),
7072 				       BTRFS_COMPRESS_NONE);
7073 	if (ret) {
7074 		if (em) {
7075 			free_extent_map(em);
7076 			btrfs_drop_extent_map_range(inode, start,
7077 						    start + len - 1, false);
7078 		}
7079 		em = ERR_PTR(ret);
7080 	}
7081  out:
7082 
7083 	return em;
7084 }
7085 
7086 static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode,
7087 						  u64 start, u64 len)
7088 {
7089 	struct btrfs_root *root = inode->root;
7090 	struct btrfs_fs_info *fs_info = root->fs_info;
7091 	struct extent_map *em;
7092 	struct btrfs_key ins;
7093 	u64 alloc_hint;
7094 	int ret;
7095 
7096 	alloc_hint = get_extent_allocation_hint(inode, start, len);
7097 	ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
7098 				   0, alloc_hint, &ins, 1, 1);
7099 	if (ret)
7100 		return ERR_PTR(ret);
7101 
7102 	em = btrfs_create_dio_extent(inode, start, ins.offset, start,
7103 				     ins.objectid, ins.offset, ins.offset,
7104 				     ins.offset, BTRFS_ORDERED_REGULAR);
7105 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
7106 	if (IS_ERR(em))
7107 		btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset,
7108 					   1);
7109 
7110 	return em;
7111 }
7112 
7113 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
7114 {
7115 	struct btrfs_block_group *block_group;
7116 	bool readonly = false;
7117 
7118 	block_group = btrfs_lookup_block_group(fs_info, bytenr);
7119 	if (!block_group || block_group->ro)
7120 		readonly = true;
7121 	if (block_group)
7122 		btrfs_put_block_group(block_group);
7123 	return readonly;
7124 }
7125 
7126 /*
7127  * Check if we can do nocow write into the range [@offset, @offset + @len)
7128  *
7129  * @offset:	File offset
7130  * @len:	The length to write, will be updated to the nocow writeable
7131  *		range
7132  * @orig_start:	(optional) Return the original file offset of the file extent
7133  * @orig_len:	(optional) Return the original on-disk length of the file extent
7134  * @ram_bytes:	(optional) Return the ram_bytes of the file extent
7135  * @strict:	if true, omit optimizations that might force us into unnecessary
7136  *		cow. e.g., don't trust generation number.
7137  *
7138  * Return:
7139  * >0	and update @len if we can do nocow write
7140  *  0	if we can't do nocow write
7141  * <0	if error happened
7142  *
7143  * NOTE: This only checks the file extents, caller is responsible to wait for
7144  *	 any ordered extents.
7145  */
7146 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7147 			      u64 *orig_start, u64 *orig_block_len,
7148 			      u64 *ram_bytes, bool nowait, bool strict)
7149 {
7150 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7151 	struct can_nocow_file_extent_args nocow_args = { 0 };
7152 	struct btrfs_path *path;
7153 	int ret;
7154 	struct extent_buffer *leaf;
7155 	struct btrfs_root *root = BTRFS_I(inode)->root;
7156 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7157 	struct btrfs_file_extent_item *fi;
7158 	struct btrfs_key key;
7159 	int found_type;
7160 
7161 	path = btrfs_alloc_path();
7162 	if (!path)
7163 		return -ENOMEM;
7164 	path->nowait = nowait;
7165 
7166 	ret = btrfs_lookup_file_extent(NULL, root, path,
7167 			btrfs_ino(BTRFS_I(inode)), offset, 0);
7168 	if (ret < 0)
7169 		goto out;
7170 
7171 	if (ret == 1) {
7172 		if (path->slots[0] == 0) {
7173 			/* can't find the item, must cow */
7174 			ret = 0;
7175 			goto out;
7176 		}
7177 		path->slots[0]--;
7178 	}
7179 	ret = 0;
7180 	leaf = path->nodes[0];
7181 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7182 	if (key.objectid != btrfs_ino(BTRFS_I(inode)) ||
7183 	    key.type != BTRFS_EXTENT_DATA_KEY) {
7184 		/* not our file or wrong item type, must cow */
7185 		goto out;
7186 	}
7187 
7188 	if (key.offset > offset) {
7189 		/* Wrong offset, must cow */
7190 		goto out;
7191 	}
7192 
7193 	if (btrfs_file_extent_end(path) <= offset)
7194 		goto out;
7195 
7196 	fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
7197 	found_type = btrfs_file_extent_type(leaf, fi);
7198 	if (ram_bytes)
7199 		*ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7200 
7201 	nocow_args.start = offset;
7202 	nocow_args.end = offset + *len - 1;
7203 	nocow_args.strict = strict;
7204 	nocow_args.free_path = true;
7205 
7206 	ret = can_nocow_file_extent(path, &key, BTRFS_I(inode), &nocow_args);
7207 	/* can_nocow_file_extent() has freed the path. */
7208 	path = NULL;
7209 
7210 	if (ret != 1) {
7211 		/* Treat errors as not being able to NOCOW. */
7212 		ret = 0;
7213 		goto out;
7214 	}
7215 
7216 	ret = 0;
7217 	if (btrfs_extent_readonly(fs_info, nocow_args.disk_bytenr))
7218 		goto out;
7219 
7220 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7221 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7222 		u64 range_end;
7223 
7224 		range_end = round_up(offset + nocow_args.num_bytes,
7225 				     root->fs_info->sectorsize) - 1;
7226 		ret = test_range_bit(io_tree, offset, range_end,
7227 				     EXTENT_DELALLOC, 0, NULL);
7228 		if (ret) {
7229 			ret = -EAGAIN;
7230 			goto out;
7231 		}
7232 	}
7233 
7234 	if (orig_start)
7235 		*orig_start = key.offset - nocow_args.extent_offset;
7236 	if (orig_block_len)
7237 		*orig_block_len = nocow_args.disk_num_bytes;
7238 
7239 	*len = nocow_args.num_bytes;
7240 	ret = 1;
7241 out:
7242 	btrfs_free_path(path);
7243 	return ret;
7244 }
7245 
7246 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7247 			      struct extent_state **cached_state,
7248 			      unsigned int iomap_flags)
7249 {
7250 	const bool writing = (iomap_flags & IOMAP_WRITE);
7251 	const bool nowait = (iomap_flags & IOMAP_NOWAIT);
7252 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7253 	struct btrfs_ordered_extent *ordered;
7254 	int ret = 0;
7255 
7256 	while (1) {
7257 		if (nowait) {
7258 			if (!try_lock_extent(io_tree, lockstart, lockend))
7259 				return -EAGAIN;
7260 		} else {
7261 			lock_extent(io_tree, lockstart, lockend, cached_state);
7262 		}
7263 		/*
7264 		 * We're concerned with the entire range that we're going to be
7265 		 * doing DIO to, so we need to make sure there's no ordered
7266 		 * extents in this range.
7267 		 */
7268 		ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart,
7269 						     lockend - lockstart + 1);
7270 
7271 		/*
7272 		 * We need to make sure there are no buffered pages in this
7273 		 * range either, we could have raced between the invalidate in
7274 		 * generic_file_direct_write and locking the extent.  The
7275 		 * invalidate needs to happen so that reads after a write do not
7276 		 * get stale data.
7277 		 */
7278 		if (!ordered &&
7279 		    (!writing || !filemap_range_has_page(inode->i_mapping,
7280 							 lockstart, lockend)))
7281 			break;
7282 
7283 		unlock_extent(io_tree, lockstart, lockend, cached_state);
7284 
7285 		if (ordered) {
7286 			if (nowait) {
7287 				btrfs_put_ordered_extent(ordered);
7288 				ret = -EAGAIN;
7289 				break;
7290 			}
7291 			/*
7292 			 * If we are doing a DIO read and the ordered extent we
7293 			 * found is for a buffered write, we can not wait for it
7294 			 * to complete and retry, because if we do so we can
7295 			 * deadlock with concurrent buffered writes on page
7296 			 * locks. This happens only if our DIO read covers more
7297 			 * than one extent map, if at this point has already
7298 			 * created an ordered extent for a previous extent map
7299 			 * and locked its range in the inode's io tree, and a
7300 			 * concurrent write against that previous extent map's
7301 			 * range and this range started (we unlock the ranges
7302 			 * in the io tree only when the bios complete and
7303 			 * buffered writes always lock pages before attempting
7304 			 * to lock range in the io tree).
7305 			 */
7306 			if (writing ||
7307 			    test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
7308 				btrfs_start_ordered_extent(ordered, 1);
7309 			else
7310 				ret = nowait ? -EAGAIN : -ENOTBLK;
7311 			btrfs_put_ordered_extent(ordered);
7312 		} else {
7313 			/*
7314 			 * We could trigger writeback for this range (and wait
7315 			 * for it to complete) and then invalidate the pages for
7316 			 * this range (through invalidate_inode_pages2_range()),
7317 			 * but that can lead us to a deadlock with a concurrent
7318 			 * call to readahead (a buffered read or a defrag call
7319 			 * triggered a readahead) on a page lock due to an
7320 			 * ordered dio extent we created before but did not have
7321 			 * yet a corresponding bio submitted (whence it can not
7322 			 * complete), which makes readahead wait for that
7323 			 * ordered extent to complete while holding a lock on
7324 			 * that page.
7325 			 */
7326 			ret = nowait ? -EAGAIN : -ENOTBLK;
7327 		}
7328 
7329 		if (ret)
7330 			break;
7331 
7332 		cond_resched();
7333 	}
7334 
7335 	return ret;
7336 }
7337 
7338 /* The callers of this must take lock_extent() */
7339 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
7340 				       u64 len, u64 orig_start, u64 block_start,
7341 				       u64 block_len, u64 orig_block_len,
7342 				       u64 ram_bytes, int compress_type,
7343 				       int type)
7344 {
7345 	struct extent_map *em;
7346 	int ret;
7347 
7348 	ASSERT(type == BTRFS_ORDERED_PREALLOC ||
7349 	       type == BTRFS_ORDERED_COMPRESSED ||
7350 	       type == BTRFS_ORDERED_NOCOW ||
7351 	       type == BTRFS_ORDERED_REGULAR);
7352 
7353 	em = alloc_extent_map();
7354 	if (!em)
7355 		return ERR_PTR(-ENOMEM);
7356 
7357 	em->start = start;
7358 	em->orig_start = orig_start;
7359 	em->len = len;
7360 	em->block_len = block_len;
7361 	em->block_start = block_start;
7362 	em->orig_block_len = orig_block_len;
7363 	em->ram_bytes = ram_bytes;
7364 	em->generation = -1;
7365 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
7366 	if (type == BTRFS_ORDERED_PREALLOC) {
7367 		set_bit(EXTENT_FLAG_FILLING, &em->flags);
7368 	} else if (type == BTRFS_ORDERED_COMPRESSED) {
7369 		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
7370 		em->compress_type = compress_type;
7371 	}
7372 
7373 	ret = btrfs_replace_extent_map_range(inode, em, true);
7374 	if (ret) {
7375 		free_extent_map(em);
7376 		return ERR_PTR(ret);
7377 	}
7378 
7379 	/* em got 2 refs now, callers needs to do free_extent_map once. */
7380 	return em;
7381 }
7382 
7383 
7384 static int btrfs_get_blocks_direct_write(struct extent_map **map,
7385 					 struct inode *inode,
7386 					 struct btrfs_dio_data *dio_data,
7387 					 u64 start, u64 len,
7388 					 unsigned int iomap_flags)
7389 {
7390 	const bool nowait = (iomap_flags & IOMAP_NOWAIT);
7391 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7392 	struct extent_map *em = *map;
7393 	int type;
7394 	u64 block_start, orig_start, orig_block_len, ram_bytes;
7395 	struct btrfs_block_group *bg;
7396 	bool can_nocow = false;
7397 	bool space_reserved = false;
7398 	u64 prev_len;
7399 	int ret = 0;
7400 
7401 	/*
7402 	 * We don't allocate a new extent in the following cases
7403 	 *
7404 	 * 1) The inode is marked as NODATACOW. In this case we'll just use the
7405 	 * existing extent.
7406 	 * 2) The extent is marked as PREALLOC. We're good to go here and can
7407 	 * just use the extent.
7408 	 *
7409 	 */
7410 	if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
7411 	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7412 	     em->block_start != EXTENT_MAP_HOLE)) {
7413 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7414 			type = BTRFS_ORDERED_PREALLOC;
7415 		else
7416 			type = BTRFS_ORDERED_NOCOW;
7417 		len = min(len, em->len - (start - em->start));
7418 		block_start = em->block_start + (start - em->start);
7419 
7420 		if (can_nocow_extent(inode, start, &len, &orig_start,
7421 				     &orig_block_len, &ram_bytes, false, false) == 1) {
7422 			bg = btrfs_inc_nocow_writers(fs_info, block_start);
7423 			if (bg)
7424 				can_nocow = true;
7425 		}
7426 	}
7427 
7428 	prev_len = len;
7429 	if (can_nocow) {
7430 		struct extent_map *em2;
7431 
7432 		/* We can NOCOW, so only need to reserve metadata space. */
7433 		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len,
7434 						      nowait);
7435 		if (ret < 0) {
7436 			/* Our caller expects us to free the input extent map. */
7437 			free_extent_map(em);
7438 			*map = NULL;
7439 			btrfs_dec_nocow_writers(bg);
7440 			if (nowait && (ret == -ENOSPC || ret == -EDQUOT))
7441 				ret = -EAGAIN;
7442 			goto out;
7443 		}
7444 		space_reserved = true;
7445 
7446 		em2 = btrfs_create_dio_extent(BTRFS_I(inode), start, len,
7447 					      orig_start, block_start,
7448 					      len, orig_block_len,
7449 					      ram_bytes, type);
7450 		btrfs_dec_nocow_writers(bg);
7451 		if (type == BTRFS_ORDERED_PREALLOC) {
7452 			free_extent_map(em);
7453 			*map = em2;
7454 			em = em2;
7455 		}
7456 
7457 		if (IS_ERR(em2)) {
7458 			ret = PTR_ERR(em2);
7459 			goto out;
7460 		}
7461 
7462 		dio_data->nocow_done = true;
7463 	} else {
7464 		/* Our caller expects us to free the input extent map. */
7465 		free_extent_map(em);
7466 		*map = NULL;
7467 
7468 		if (nowait)
7469 			return -EAGAIN;
7470 
7471 		/*
7472 		 * If we could not allocate data space before locking the file
7473 		 * range and we can't do a NOCOW write, then we have to fail.
7474 		 */
7475 		if (!dio_data->data_space_reserved)
7476 			return -ENOSPC;
7477 
7478 		/*
7479 		 * We have to COW and we have already reserved data space before,
7480 		 * so now we reserve only metadata.
7481 		 */
7482 		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len,
7483 						      false);
7484 		if (ret < 0)
7485 			goto out;
7486 		space_reserved = true;
7487 
7488 		em = btrfs_new_extent_direct(BTRFS_I(inode), start, len);
7489 		if (IS_ERR(em)) {
7490 			ret = PTR_ERR(em);
7491 			goto out;
7492 		}
7493 		*map = em;
7494 		len = min(len, em->len - (start - em->start));
7495 		if (len < prev_len)
7496 			btrfs_delalloc_release_metadata(BTRFS_I(inode),
7497 							prev_len - len, true);
7498 	}
7499 
7500 	/*
7501 	 * We have created our ordered extent, so we can now release our reservation
7502 	 * for an outstanding extent.
7503 	 */
7504 	btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len);
7505 
7506 	/*
7507 	 * Need to update the i_size under the extent lock so buffered
7508 	 * readers will get the updated i_size when we unlock.
7509 	 */
7510 	if (start + len > i_size_read(inode))
7511 		i_size_write(inode, start + len);
7512 out:
7513 	if (ret && space_reserved) {
7514 		btrfs_delalloc_release_extents(BTRFS_I(inode), len);
7515 		btrfs_delalloc_release_metadata(BTRFS_I(inode), len, true);
7516 	}
7517 	return ret;
7518 }
7519 
7520 static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
7521 		loff_t length, unsigned int flags, struct iomap *iomap,
7522 		struct iomap *srcmap)
7523 {
7524 	struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
7525 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7526 	struct extent_map *em;
7527 	struct extent_state *cached_state = NULL;
7528 	struct btrfs_dio_data *dio_data = iter->private;
7529 	u64 lockstart, lockend;
7530 	const bool write = !!(flags & IOMAP_WRITE);
7531 	int ret = 0;
7532 	u64 len = length;
7533 	const u64 data_alloc_len = length;
7534 	bool unlock_extents = false;
7535 
7536 	/*
7537 	 * We could potentially fault if we have a buffer > PAGE_SIZE, and if
7538 	 * we're NOWAIT we may submit a bio for a partial range and return
7539 	 * EIOCBQUEUED, which would result in an errant short read.
7540 	 *
7541 	 * The best way to handle this would be to allow for partial completions
7542 	 * of iocb's, so we could submit the partial bio, return and fault in
7543 	 * the rest of the pages, and then submit the io for the rest of the
7544 	 * range.  However we don't have that currently, so simply return
7545 	 * -EAGAIN at this point so that the normal path is used.
7546 	 */
7547 	if (!write && (flags & IOMAP_NOWAIT) && length > PAGE_SIZE)
7548 		return -EAGAIN;
7549 
7550 	/*
7551 	 * Cap the size of reads to that usually seen in buffered I/O as we need
7552 	 * to allocate a contiguous array for the checksums.
7553 	 */
7554 	if (!write)
7555 		len = min_t(u64, len, fs_info->sectorsize * BTRFS_MAX_BIO_SECTORS);
7556 
7557 	lockstart = start;
7558 	lockend = start + len - 1;
7559 
7560 	/*
7561 	 * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't
7562 	 * enough if we've written compressed pages to this area, so we need to
7563 	 * flush the dirty pages again to make absolutely sure that any
7564 	 * outstanding dirty pages are on disk - the first flush only starts
7565 	 * compression on the data, while keeping the pages locked, so by the
7566 	 * time the second flush returns we know bios for the compressed pages
7567 	 * were submitted and finished, and the pages no longer under writeback.
7568 	 *
7569 	 * If we have a NOWAIT request and we have any pages in the range that
7570 	 * are locked, likely due to compression still in progress, we don't want
7571 	 * to block on page locks. We also don't want to block on pages marked as
7572 	 * dirty or under writeback (same as for the non-compression case).
7573 	 * iomap_dio_rw() did the same check, but after that and before we got
7574 	 * here, mmap'ed writes may have happened or buffered reads started
7575 	 * (readpage() and readahead(), which lock pages), as we haven't locked
7576 	 * the file range yet.
7577 	 */
7578 	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
7579 		     &BTRFS_I(inode)->runtime_flags)) {
7580 		if (flags & IOMAP_NOWAIT) {
7581 			if (filemap_range_needs_writeback(inode->i_mapping,
7582 							  lockstart, lockend))
7583 				return -EAGAIN;
7584 		} else {
7585 			ret = filemap_fdatawrite_range(inode->i_mapping, start,
7586 						       start + length - 1);
7587 			if (ret)
7588 				return ret;
7589 		}
7590 	}
7591 
7592 	memset(dio_data, 0, sizeof(*dio_data));
7593 
7594 	/*
7595 	 * We always try to allocate data space and must do it before locking
7596 	 * the file range, to avoid deadlocks with concurrent writes to the same
7597 	 * range if the range has several extents and the writes don't expand the
7598 	 * current i_size (the inode lock is taken in shared mode). If we fail to
7599 	 * allocate data space here we continue and later, after locking the
7600 	 * file range, we fail with ENOSPC only if we figure out we can not do a
7601 	 * NOCOW write.
7602 	 */
7603 	if (write && !(flags & IOMAP_NOWAIT)) {
7604 		ret = btrfs_check_data_free_space(BTRFS_I(inode),
7605 						  &dio_data->data_reserved,
7606 						  start, data_alloc_len, false);
7607 		if (!ret)
7608 			dio_data->data_space_reserved = true;
7609 		else if (ret && !(BTRFS_I(inode)->flags &
7610 				  (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
7611 			goto err;
7612 	}
7613 
7614 	/*
7615 	 * If this errors out it's because we couldn't invalidate pagecache for
7616 	 * this range and we need to fallback to buffered IO, or we are doing a
7617 	 * NOWAIT read/write and we need to block.
7618 	 */
7619 	ret = lock_extent_direct(inode, lockstart, lockend, &cached_state, flags);
7620 	if (ret < 0)
7621 		goto err;
7622 
7623 	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len);
7624 	if (IS_ERR(em)) {
7625 		ret = PTR_ERR(em);
7626 		goto unlock_err;
7627 	}
7628 
7629 	/*
7630 	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7631 	 * io.  INLINE is special, and we could probably kludge it in here, but
7632 	 * it's still buffered so for safety lets just fall back to the generic
7633 	 * buffered path.
7634 	 *
7635 	 * For COMPRESSED we _have_ to read the entire extent in so we can
7636 	 * decompress it, so there will be buffering required no matter what we
7637 	 * do, so go ahead and fallback to buffered.
7638 	 *
7639 	 * We return -ENOTBLK because that's what makes DIO go ahead and go back
7640 	 * to buffered IO.  Don't blame me, this is the price we pay for using
7641 	 * the generic code.
7642 	 */
7643 	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
7644 	    em->block_start == EXTENT_MAP_INLINE) {
7645 		free_extent_map(em);
7646 		/*
7647 		 * If we are in a NOWAIT context, return -EAGAIN in order to
7648 		 * fallback to buffered IO. This is not only because we can
7649 		 * block with buffered IO (no support for NOWAIT semantics at
7650 		 * the moment) but also to avoid returning short reads to user
7651 		 * space - this happens if we were able to read some data from
7652 		 * previous non-compressed extents and then when we fallback to
7653 		 * buffered IO, at btrfs_file_read_iter() by calling
7654 		 * filemap_read(), we fail to fault in pages for the read buffer,
7655 		 * in which case filemap_read() returns a short read (the number
7656 		 * of bytes previously read is > 0, so it does not return -EFAULT).
7657 		 */
7658 		ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK;
7659 		goto unlock_err;
7660 	}
7661 
7662 	len = min(len, em->len - (start - em->start));
7663 
7664 	/*
7665 	 * If we have a NOWAIT request and the range contains multiple extents
7666 	 * (or a mix of extents and holes), then we return -EAGAIN to make the
7667 	 * caller fallback to a context where it can do a blocking (without
7668 	 * NOWAIT) request. This way we avoid doing partial IO and returning
7669 	 * success to the caller, which is not optimal for writes and for reads
7670 	 * it can result in unexpected behaviour for an application.
7671 	 *
7672 	 * When doing a read, because we use IOMAP_DIO_PARTIAL when calling
7673 	 * iomap_dio_rw(), we can end up returning less data then what the caller
7674 	 * asked for, resulting in an unexpected, and incorrect, short read.
7675 	 * That is, the caller asked to read N bytes and we return less than that,
7676 	 * which is wrong unless we are crossing EOF. This happens if we get a
7677 	 * page fault error when trying to fault in pages for the buffer that is
7678 	 * associated to the struct iov_iter passed to iomap_dio_rw(), and we
7679 	 * have previously submitted bios for other extents in the range, in
7680 	 * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of
7681 	 * those bios have completed by the time we get the page fault error,
7682 	 * which we return back to our caller - we should only return EIOCBQUEUED
7683 	 * after we have submitted bios for all the extents in the range.
7684 	 */
7685 	if ((flags & IOMAP_NOWAIT) && len < length) {
7686 		free_extent_map(em);
7687 		ret = -EAGAIN;
7688 		goto unlock_err;
7689 	}
7690 
7691 	if (write) {
7692 		ret = btrfs_get_blocks_direct_write(&em, inode, dio_data,
7693 						    start, len, flags);
7694 		if (ret < 0)
7695 			goto unlock_err;
7696 		unlock_extents = true;
7697 		/* Recalc len in case the new em is smaller than requested */
7698 		len = min(len, em->len - (start - em->start));
7699 		if (dio_data->data_space_reserved) {
7700 			u64 release_offset;
7701 			u64 release_len = 0;
7702 
7703 			if (dio_data->nocow_done) {
7704 				release_offset = start;
7705 				release_len = data_alloc_len;
7706 			} else if (len < data_alloc_len) {
7707 				release_offset = start + len;
7708 				release_len = data_alloc_len - len;
7709 			}
7710 
7711 			if (release_len > 0)
7712 				btrfs_free_reserved_data_space(BTRFS_I(inode),
7713 							       dio_data->data_reserved,
7714 							       release_offset,
7715 							       release_len);
7716 		}
7717 	} else {
7718 		/*
7719 		 * We need to unlock only the end area that we aren't using.
7720 		 * The rest is going to be unlocked by the endio routine.
7721 		 */
7722 		lockstart = start + len;
7723 		if (lockstart < lockend)
7724 			unlock_extents = true;
7725 	}
7726 
7727 	if (unlock_extents)
7728 		unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7729 			      &cached_state);
7730 	else
7731 		free_extent_state(cached_state);
7732 
7733 	/*
7734 	 * Translate extent map information to iomap.
7735 	 * We trim the extents (and move the addr) even though iomap code does
7736 	 * that, since we have locked only the parts we are performing I/O in.
7737 	 */
7738 	if ((em->block_start == EXTENT_MAP_HOLE) ||
7739 	    (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !write)) {
7740 		iomap->addr = IOMAP_NULL_ADDR;
7741 		iomap->type = IOMAP_HOLE;
7742 	} else {
7743 		iomap->addr = em->block_start + (start - em->start);
7744 		iomap->type = IOMAP_MAPPED;
7745 	}
7746 	iomap->offset = start;
7747 	iomap->bdev = fs_info->fs_devices->latest_dev->bdev;
7748 	iomap->length = len;
7749 
7750 	if (write && btrfs_use_zone_append(BTRFS_I(inode), em->block_start))
7751 		iomap->flags |= IOMAP_F_ZONE_APPEND;
7752 
7753 	free_extent_map(em);
7754 
7755 	return 0;
7756 
7757 unlock_err:
7758 	unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7759 		      &cached_state);
7760 err:
7761 	if (dio_data->data_space_reserved) {
7762 		btrfs_free_reserved_data_space(BTRFS_I(inode),
7763 					       dio_data->data_reserved,
7764 					       start, data_alloc_len);
7765 		extent_changeset_free(dio_data->data_reserved);
7766 	}
7767 
7768 	return ret;
7769 }
7770 
7771 static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length,
7772 		ssize_t written, unsigned int flags, struct iomap *iomap)
7773 {
7774 	struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
7775 	struct btrfs_dio_data *dio_data = iter->private;
7776 	size_t submitted = dio_data->submitted;
7777 	const bool write = !!(flags & IOMAP_WRITE);
7778 	int ret = 0;
7779 
7780 	if (!write && (iomap->type == IOMAP_HOLE)) {
7781 		/* If reading from a hole, unlock and return */
7782 		unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1,
7783 			      NULL);
7784 		return 0;
7785 	}
7786 
7787 	if (submitted < length) {
7788 		pos += submitted;
7789 		length -= submitted;
7790 		if (write)
7791 			btrfs_mark_ordered_io_finished(BTRFS_I(inode), NULL,
7792 						       pos, length, false);
7793 		else
7794 			unlock_extent(&BTRFS_I(inode)->io_tree, pos,
7795 				      pos + length - 1, NULL);
7796 		ret = -ENOTBLK;
7797 	}
7798 
7799 	if (write)
7800 		extent_changeset_free(dio_data->data_reserved);
7801 	return ret;
7802 }
7803 
7804 static void btrfs_dio_private_put(struct btrfs_dio_private *dip)
7805 {
7806 	/*
7807 	 * This implies a barrier so that stores to dio_bio->bi_status before
7808 	 * this and loads of dio_bio->bi_status after this are fully ordered.
7809 	 */
7810 	if (!refcount_dec_and_test(&dip->refs))
7811 		return;
7812 
7813 	if (btrfs_op(&dip->bio) == BTRFS_MAP_WRITE) {
7814 		btrfs_mark_ordered_io_finished(BTRFS_I(dip->inode), NULL,
7815 					       dip->file_offset, dip->bytes,
7816 					       !dip->bio.bi_status);
7817 	} else {
7818 		unlock_extent(&BTRFS_I(dip->inode)->io_tree,
7819 			      dip->file_offset,
7820 			      dip->file_offset + dip->bytes - 1, NULL);
7821 	}
7822 
7823 	kfree(dip->csums);
7824 	bio_endio(&dip->bio);
7825 }
7826 
7827 static void submit_dio_repair_bio(struct inode *inode, struct bio *bio,
7828 				  int mirror_num,
7829 				  enum btrfs_compression_type compress_type)
7830 {
7831 	struct btrfs_dio_private *dip = btrfs_bio(bio)->private;
7832 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7833 
7834 	BUG_ON(bio_op(bio) == REQ_OP_WRITE);
7835 
7836 	refcount_inc(&dip->refs);
7837 	btrfs_submit_bio(fs_info, bio, mirror_num);
7838 }
7839 
7840 static blk_status_t btrfs_check_read_dio_bio(struct btrfs_dio_private *dip,
7841 					     struct btrfs_bio *bbio,
7842 					     const bool uptodate)
7843 {
7844 	struct inode *inode = dip->inode;
7845 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
7846 	const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM);
7847 	blk_status_t err = BLK_STS_OK;
7848 	struct bvec_iter iter;
7849 	struct bio_vec bv;
7850 	u32 offset;
7851 
7852 	btrfs_bio_for_each_sector(fs_info, bv, bbio, iter, offset) {
7853 		u64 start = bbio->file_offset + offset;
7854 
7855 		if (uptodate &&
7856 		    (!csum || !btrfs_check_data_csum(inode, bbio, offset, bv.bv_page,
7857 					       bv.bv_offset))) {
7858 			btrfs_clean_io_failure(BTRFS_I(inode), start,
7859 					       bv.bv_page, bv.bv_offset);
7860 		} else {
7861 			int ret;
7862 
7863 			ret = btrfs_repair_one_sector(inode, bbio, offset,
7864 					bv.bv_page, bv.bv_offset,
7865 					submit_dio_repair_bio);
7866 			if (ret)
7867 				err = errno_to_blk_status(ret);
7868 		}
7869 	}
7870 
7871 	return err;
7872 }
7873 
7874 static blk_status_t btrfs_submit_bio_start_direct_io(struct inode *inode,
7875 						     struct bio *bio,
7876 						     u64 dio_file_offset)
7877 {
7878 	return btrfs_csum_one_bio(BTRFS_I(inode), bio, dio_file_offset, false);
7879 }
7880 
7881 static void btrfs_end_dio_bio(struct btrfs_bio *bbio)
7882 {
7883 	struct btrfs_dio_private *dip = bbio->private;
7884 	struct bio *bio = &bbio->bio;
7885 	blk_status_t err = bio->bi_status;
7886 
7887 	if (err)
7888 		btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
7889 			   "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
7890 			   btrfs_ino(BTRFS_I(dip->inode)), bio_op(bio),
7891 			   bio->bi_opf, bio->bi_iter.bi_sector,
7892 			   bio->bi_iter.bi_size, err);
7893 
7894 	if (bio_op(bio) == REQ_OP_READ)
7895 		err = btrfs_check_read_dio_bio(dip, bbio, !err);
7896 
7897 	if (err)
7898 		dip->bio.bi_status = err;
7899 
7900 	btrfs_record_physical_zoned(dip->inode, bbio->file_offset, bio);
7901 
7902 	bio_put(bio);
7903 	btrfs_dio_private_put(dip);
7904 }
7905 
7906 static void btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
7907 				 u64 file_offset, int async_submit)
7908 {
7909 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7910 	struct btrfs_dio_private *dip = btrfs_bio(bio)->private;
7911 	blk_status_t ret;
7912 
7913 	/* Save the original iter for read repair */
7914 	if (btrfs_op(bio) == BTRFS_MAP_READ)
7915 		btrfs_bio(bio)->iter = bio->bi_iter;
7916 
7917 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
7918 		goto map;
7919 
7920 	if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
7921 		/* Check btrfs_submit_data_write_bio() for async submit rules */
7922 		if (async_submit && !atomic_read(&BTRFS_I(inode)->sync_writers) &&
7923 		    btrfs_wq_submit_bio(inode, bio, 0, file_offset,
7924 					btrfs_submit_bio_start_direct_io))
7925 			return;
7926 
7927 		/*
7928 		 * If we aren't doing async submit, calculate the csum of the
7929 		 * bio now.
7930 		 */
7931 		ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, file_offset, false);
7932 		if (ret) {
7933 			btrfs_bio_end_io(btrfs_bio(bio), ret);
7934 			return;
7935 		}
7936 	} else {
7937 		btrfs_bio(bio)->csum = btrfs_csum_ptr(fs_info, dip->csums,
7938 						      file_offset - dip->file_offset);
7939 	}
7940 map:
7941 	btrfs_submit_bio(fs_info, bio, 0);
7942 }
7943 
7944 static void btrfs_submit_direct(const struct iomap_iter *iter,
7945 		struct bio *dio_bio, loff_t file_offset)
7946 {
7947 	struct btrfs_dio_private *dip =
7948 		container_of(dio_bio, struct btrfs_dio_private, bio);
7949 	struct inode *inode = iter->inode;
7950 	const bool write = (btrfs_op(dio_bio) == BTRFS_MAP_WRITE);
7951 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7952 	const bool raid56 = (btrfs_data_alloc_profile(fs_info) &
7953 			     BTRFS_BLOCK_GROUP_RAID56_MASK);
7954 	struct bio *bio;
7955 	u64 start_sector;
7956 	int async_submit = 0;
7957 	u64 submit_len;
7958 	u64 clone_offset = 0;
7959 	u64 clone_len;
7960 	u64 logical;
7961 	int ret;
7962 	blk_status_t status;
7963 	struct btrfs_io_geometry geom;
7964 	struct btrfs_dio_data *dio_data = iter->private;
7965 	struct extent_map *em = NULL;
7966 
7967 	dip->inode = inode;
7968 	dip->file_offset = file_offset;
7969 	dip->bytes = dio_bio->bi_iter.bi_size;
7970 	refcount_set(&dip->refs, 1);
7971 	dip->csums = NULL;
7972 
7973 	if (!write && !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
7974 		unsigned int nr_sectors =
7975 			(dio_bio->bi_iter.bi_size >> fs_info->sectorsize_bits);
7976 
7977 		/*
7978 		 * Load the csums up front to reduce csum tree searches and
7979 		 * contention when submitting bios.
7980 		 */
7981 		status = BLK_STS_RESOURCE;
7982 		dip->csums = kcalloc(nr_sectors, fs_info->csum_size, GFP_NOFS);
7983 		if (!dip->csums)
7984 			goto out_err;
7985 
7986 		status = btrfs_lookup_bio_sums(inode, dio_bio, dip->csums);
7987 		if (status != BLK_STS_OK)
7988 			goto out_err;
7989 	}
7990 
7991 	start_sector = dio_bio->bi_iter.bi_sector;
7992 	submit_len = dio_bio->bi_iter.bi_size;
7993 
7994 	do {
7995 		logical = start_sector << 9;
7996 		em = btrfs_get_chunk_map(fs_info, logical, submit_len);
7997 		if (IS_ERR(em)) {
7998 			status = errno_to_blk_status(PTR_ERR(em));
7999 			em = NULL;
8000 			goto out_err_em;
8001 		}
8002 		ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(dio_bio),
8003 					    logical, &geom);
8004 		if (ret) {
8005 			status = errno_to_blk_status(ret);
8006 			goto out_err_em;
8007 		}
8008 
8009 		clone_len = min(submit_len, geom.len);
8010 		ASSERT(clone_len <= UINT_MAX);
8011 
8012 		/*
8013 		 * This will never fail as it's passing GPF_NOFS and
8014 		 * the allocation is backed by btrfs_bioset.
8015 		 */
8016 		bio = btrfs_bio_clone_partial(dio_bio, clone_offset, clone_len,
8017 					      btrfs_end_dio_bio, dip);
8018 		btrfs_bio(bio)->file_offset = file_offset;
8019 
8020 		if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
8021 			status = extract_ordered_extent(BTRFS_I(inode), bio,
8022 							file_offset);
8023 			if (status) {
8024 				bio_put(bio);
8025 				goto out_err;
8026 			}
8027 		}
8028 
8029 		ASSERT(submit_len >= clone_len);
8030 		submit_len -= clone_len;
8031 
8032 		/*
8033 		 * Increase the count before we submit the bio so we know
8034 		 * the end IO handler won't happen before we increase the
8035 		 * count. Otherwise, the dip might get freed before we're
8036 		 * done setting it up.
8037 		 *
8038 		 * We transfer the initial reference to the last bio, so we
8039 		 * don't need to increment the reference count for the last one.
8040 		 */
8041 		if (submit_len > 0) {
8042 			refcount_inc(&dip->refs);
8043 			/*
8044 			 * If we are submitting more than one bio, submit them
8045 			 * all asynchronously. The exception is RAID 5 or 6, as
8046 			 * asynchronous checksums make it difficult to collect
8047 			 * full stripe writes.
8048 			 */
8049 			if (!raid56)
8050 				async_submit = 1;
8051 		}
8052 
8053 		btrfs_submit_dio_bio(bio, inode, file_offset, async_submit);
8054 
8055 		dio_data->submitted += clone_len;
8056 		clone_offset += clone_len;
8057 		start_sector += clone_len >> 9;
8058 		file_offset += clone_len;
8059 
8060 		free_extent_map(em);
8061 	} while (submit_len > 0);
8062 	return;
8063 
8064 out_err_em:
8065 	free_extent_map(em);
8066 out_err:
8067 	dio_bio->bi_status = status;
8068 	btrfs_dio_private_put(dip);
8069 }
8070 
8071 static const struct iomap_ops btrfs_dio_iomap_ops = {
8072 	.iomap_begin            = btrfs_dio_iomap_begin,
8073 	.iomap_end              = btrfs_dio_iomap_end,
8074 };
8075 
8076 static const struct iomap_dio_ops btrfs_dio_ops = {
8077 	.submit_io		= btrfs_submit_direct,
8078 	.bio_set		= &btrfs_dio_bioset,
8079 };
8080 
8081 ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter, size_t done_before)
8082 {
8083 	struct btrfs_dio_data data;
8084 
8085 	return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
8086 			    IOMAP_DIO_PARTIAL, &data, done_before);
8087 }
8088 
8089 struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter,
8090 				  size_t done_before)
8091 {
8092 	struct btrfs_dio_data data;
8093 
8094 	return __iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
8095 			    IOMAP_DIO_PARTIAL, &data, done_before);
8096 }
8097 
8098 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
8099 			u64 start, u64 len)
8100 {
8101 	int	ret;
8102 
8103 	ret = fiemap_prep(inode, fieinfo, start, &len, 0);
8104 	if (ret)
8105 		return ret;
8106 
8107 	/*
8108 	 * fiemap_prep() called filemap_write_and_wait() for the whole possible
8109 	 * file range (0 to LLONG_MAX), but that is not enough if we have
8110 	 * compression enabled. The first filemap_fdatawrite_range() only kicks
8111 	 * in the compression of data (in an async thread) and will return
8112 	 * before the compression is done and writeback is started. A second
8113 	 * filemap_fdatawrite_range() is needed to wait for the compression to
8114 	 * complete and writeback to start. We also need to wait for ordered
8115 	 * extents to complete, because our fiemap implementation uses mainly
8116 	 * file extent items to list the extents, searching for extent maps
8117 	 * only for file ranges with holes or prealloc extents to figure out
8118 	 * if we have delalloc in those ranges.
8119 	 */
8120 	if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) {
8121 		ret = btrfs_wait_ordered_range(inode, 0, LLONG_MAX);
8122 		if (ret)
8123 			return ret;
8124 	}
8125 
8126 	return extent_fiemap(BTRFS_I(inode), fieinfo, start, len);
8127 }
8128 
8129 static int btrfs_writepages(struct address_space *mapping,
8130 			    struct writeback_control *wbc)
8131 {
8132 	return extent_writepages(mapping, wbc);
8133 }
8134 
8135 static void btrfs_readahead(struct readahead_control *rac)
8136 {
8137 	extent_readahead(rac);
8138 }
8139 
8140 /*
8141  * For release_folio() and invalidate_folio() we have a race window where
8142  * folio_end_writeback() is called but the subpage spinlock is not yet released.
8143  * If we continue to release/invalidate the page, we could cause use-after-free
8144  * for subpage spinlock.  So this function is to spin and wait for subpage
8145  * spinlock.
8146  */
8147 static void wait_subpage_spinlock(struct page *page)
8148 {
8149 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
8150 	struct btrfs_subpage *subpage;
8151 
8152 	if (!btrfs_is_subpage(fs_info, page))
8153 		return;
8154 
8155 	ASSERT(PagePrivate(page) && page->private);
8156 	subpage = (struct btrfs_subpage *)page->private;
8157 
8158 	/*
8159 	 * This may look insane as we just acquire the spinlock and release it,
8160 	 * without doing anything.  But we just want to make sure no one is
8161 	 * still holding the subpage spinlock.
8162 	 * And since the page is not dirty nor writeback, and we have page
8163 	 * locked, the only possible way to hold a spinlock is from the endio
8164 	 * function to clear page writeback.
8165 	 *
8166 	 * Here we just acquire the spinlock so that all existing callers
8167 	 * should exit and we're safe to release/invalidate the page.
8168 	 */
8169 	spin_lock_irq(&subpage->lock);
8170 	spin_unlock_irq(&subpage->lock);
8171 }
8172 
8173 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
8174 {
8175 	int ret = try_release_extent_mapping(&folio->page, gfp_flags);
8176 
8177 	if (ret == 1) {
8178 		wait_subpage_spinlock(&folio->page);
8179 		clear_page_extent_mapped(&folio->page);
8180 	}
8181 	return ret;
8182 }
8183 
8184 static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
8185 {
8186 	if (folio_test_writeback(folio) || folio_test_dirty(folio))
8187 		return false;
8188 	return __btrfs_release_folio(folio, gfp_flags);
8189 }
8190 
8191 #ifdef CONFIG_MIGRATION
8192 static int btrfs_migrate_folio(struct address_space *mapping,
8193 			     struct folio *dst, struct folio *src,
8194 			     enum migrate_mode mode)
8195 {
8196 	int ret = filemap_migrate_folio(mapping, dst, src, mode);
8197 
8198 	if (ret != MIGRATEPAGE_SUCCESS)
8199 		return ret;
8200 
8201 	if (folio_test_ordered(src)) {
8202 		folio_clear_ordered(src);
8203 		folio_set_ordered(dst);
8204 	}
8205 
8206 	return MIGRATEPAGE_SUCCESS;
8207 }
8208 #else
8209 #define btrfs_migrate_folio NULL
8210 #endif
8211 
8212 static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
8213 				 size_t length)
8214 {
8215 	struct btrfs_inode *inode = BTRFS_I(folio->mapping->host);
8216 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
8217 	struct extent_io_tree *tree = &inode->io_tree;
8218 	struct extent_state *cached_state = NULL;
8219 	u64 page_start = folio_pos(folio);
8220 	u64 page_end = page_start + folio_size(folio) - 1;
8221 	u64 cur;
8222 	int inode_evicting = inode->vfs_inode.i_state & I_FREEING;
8223 
8224 	/*
8225 	 * We have folio locked so no new ordered extent can be created on this
8226 	 * page, nor bio can be submitted for this folio.
8227 	 *
8228 	 * But already submitted bio can still be finished on this folio.
8229 	 * Furthermore, endio function won't skip folio which has Ordered
8230 	 * (Private2) already cleared, so it's possible for endio and
8231 	 * invalidate_folio to do the same ordered extent accounting twice
8232 	 * on one folio.
8233 	 *
8234 	 * So here we wait for any submitted bios to finish, so that we won't
8235 	 * do double ordered extent accounting on the same folio.
8236 	 */
8237 	folio_wait_writeback(folio);
8238 	wait_subpage_spinlock(&folio->page);
8239 
8240 	/*
8241 	 * For subpage case, we have call sites like
8242 	 * btrfs_punch_hole_lock_range() which passes range not aligned to
8243 	 * sectorsize.
8244 	 * If the range doesn't cover the full folio, we don't need to and
8245 	 * shouldn't clear page extent mapped, as folio->private can still
8246 	 * record subpage dirty bits for other part of the range.
8247 	 *
8248 	 * For cases that invalidate the full folio even the range doesn't
8249 	 * cover the full folio, like invalidating the last folio, we're
8250 	 * still safe to wait for ordered extent to finish.
8251 	 */
8252 	if (!(offset == 0 && length == folio_size(folio))) {
8253 		btrfs_release_folio(folio, GFP_NOFS);
8254 		return;
8255 	}
8256 
8257 	if (!inode_evicting)
8258 		lock_extent(tree, page_start, page_end, &cached_state);
8259 
8260 	cur = page_start;
8261 	while (cur < page_end) {
8262 		struct btrfs_ordered_extent *ordered;
8263 		u64 range_end;
8264 		u32 range_len;
8265 		u32 extra_flags = 0;
8266 
8267 		ordered = btrfs_lookup_first_ordered_range(inode, cur,
8268 							   page_end + 1 - cur);
8269 		if (!ordered) {
8270 			range_end = page_end;
8271 			/*
8272 			 * No ordered extent covering this range, we are safe
8273 			 * to delete all extent states in the range.
8274 			 */
8275 			extra_flags = EXTENT_CLEAR_ALL_BITS;
8276 			goto next;
8277 		}
8278 		if (ordered->file_offset > cur) {
8279 			/*
8280 			 * There is a range between [cur, oe->file_offset) not
8281 			 * covered by any ordered extent.
8282 			 * We are safe to delete all extent states, and handle
8283 			 * the ordered extent in the next iteration.
8284 			 */
8285 			range_end = ordered->file_offset - 1;
8286 			extra_flags = EXTENT_CLEAR_ALL_BITS;
8287 			goto next;
8288 		}
8289 
8290 		range_end = min(ordered->file_offset + ordered->num_bytes - 1,
8291 				page_end);
8292 		ASSERT(range_end + 1 - cur < U32_MAX);
8293 		range_len = range_end + 1 - cur;
8294 		if (!btrfs_page_test_ordered(fs_info, &folio->page, cur, range_len)) {
8295 			/*
8296 			 * If Ordered (Private2) is cleared, it means endio has
8297 			 * already been executed for the range.
8298 			 * We can't delete the extent states as
8299 			 * btrfs_finish_ordered_io() may still use some of them.
8300 			 */
8301 			goto next;
8302 		}
8303 		btrfs_page_clear_ordered(fs_info, &folio->page, cur, range_len);
8304 
8305 		/*
8306 		 * IO on this page will never be started, so we need to account
8307 		 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
8308 		 * here, must leave that up for the ordered extent completion.
8309 		 *
8310 		 * This will also unlock the range for incoming
8311 		 * btrfs_finish_ordered_io().
8312 		 */
8313 		if (!inode_evicting)
8314 			clear_extent_bit(tree, cur, range_end,
8315 					 EXTENT_DELALLOC |
8316 					 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
8317 					 EXTENT_DEFRAG, &cached_state);
8318 
8319 		spin_lock_irq(&inode->ordered_tree.lock);
8320 		set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
8321 		ordered->truncated_len = min(ordered->truncated_len,
8322 					     cur - ordered->file_offset);
8323 		spin_unlock_irq(&inode->ordered_tree.lock);
8324 
8325 		/*
8326 		 * If the ordered extent has finished, we're safe to delete all
8327 		 * the extent states of the range, otherwise
8328 		 * btrfs_finish_ordered_io() will get executed by endio for
8329 		 * other pages, so we can't delete extent states.
8330 		 */
8331 		if (btrfs_dec_test_ordered_pending(inode, &ordered,
8332 						   cur, range_end + 1 - cur)) {
8333 			btrfs_finish_ordered_io(ordered);
8334 			/*
8335 			 * The ordered extent has finished, now we're again
8336 			 * safe to delete all extent states of the range.
8337 			 */
8338 			extra_flags = EXTENT_CLEAR_ALL_BITS;
8339 		}
8340 next:
8341 		if (ordered)
8342 			btrfs_put_ordered_extent(ordered);
8343 		/*
8344 		 * Qgroup reserved space handler
8345 		 * Sector(s) here will be either:
8346 		 *
8347 		 * 1) Already written to disk or bio already finished
8348 		 *    Then its QGROUP_RESERVED bit in io_tree is already cleared.
8349 		 *    Qgroup will be handled by its qgroup_record then.
8350 		 *    btrfs_qgroup_free_data() call will do nothing here.
8351 		 *
8352 		 * 2) Not written to disk yet
8353 		 *    Then btrfs_qgroup_free_data() call will clear the
8354 		 *    QGROUP_RESERVED bit of its io_tree, and free the qgroup
8355 		 *    reserved data space.
8356 		 *    Since the IO will never happen for this page.
8357 		 */
8358 		btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur);
8359 		if (!inode_evicting) {
8360 			clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
8361 				 EXTENT_DELALLOC | EXTENT_UPTODATE |
8362 				 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG |
8363 				 extra_flags, &cached_state);
8364 		}
8365 		cur = range_end + 1;
8366 	}
8367 	/*
8368 	 * We have iterated through all ordered extents of the page, the page
8369 	 * should not have Ordered (Private2) anymore, or the above iteration
8370 	 * did something wrong.
8371 	 */
8372 	ASSERT(!folio_test_ordered(folio));
8373 	btrfs_page_clear_checked(fs_info, &folio->page, folio_pos(folio), folio_size(folio));
8374 	if (!inode_evicting)
8375 		__btrfs_release_folio(folio, GFP_NOFS);
8376 	clear_page_extent_mapped(&folio->page);
8377 }
8378 
8379 /*
8380  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8381  * called from a page fault handler when a page is first dirtied. Hence we must
8382  * be careful to check for EOF conditions here. We set the page up correctly
8383  * for a written page which means we get ENOSPC checking when writing into
8384  * holes and correct delalloc and unwritten extent mapping on filesystems that
8385  * support these features.
8386  *
8387  * We are not allowed to take the i_mutex here so we have to play games to
8388  * protect against truncate races as the page could now be beyond EOF.  Because
8389  * truncate_setsize() writes the inode size before removing pages, once we have
8390  * the page lock we can determine safely if the page is beyond EOF. If it is not
8391  * beyond EOF, then the page is guaranteed safe against truncation until we
8392  * unlock the page.
8393  */
8394 vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
8395 {
8396 	struct page *page = vmf->page;
8397 	struct inode *inode = file_inode(vmf->vma->vm_file);
8398 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8399 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8400 	struct btrfs_ordered_extent *ordered;
8401 	struct extent_state *cached_state = NULL;
8402 	struct extent_changeset *data_reserved = NULL;
8403 	unsigned long zero_start;
8404 	loff_t size;
8405 	vm_fault_t ret;
8406 	int ret2;
8407 	int reserved = 0;
8408 	u64 reserved_space;
8409 	u64 page_start;
8410 	u64 page_end;
8411 	u64 end;
8412 
8413 	reserved_space = PAGE_SIZE;
8414 
8415 	sb_start_pagefault(inode->i_sb);
8416 	page_start = page_offset(page);
8417 	page_end = page_start + PAGE_SIZE - 1;
8418 	end = page_end;
8419 
8420 	/*
8421 	 * Reserving delalloc space after obtaining the page lock can lead to
8422 	 * deadlock. For example, if a dirty page is locked by this function
8423 	 * and the call to btrfs_delalloc_reserve_space() ends up triggering
8424 	 * dirty page write out, then the btrfs_writepages() function could
8425 	 * end up waiting indefinitely to get a lock on the page currently
8426 	 * being processed by btrfs_page_mkwrite() function.
8427 	 */
8428 	ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved,
8429 					    page_start, reserved_space);
8430 	if (!ret2) {
8431 		ret2 = file_update_time(vmf->vma->vm_file);
8432 		reserved = 1;
8433 	}
8434 	if (ret2) {
8435 		ret = vmf_error(ret2);
8436 		if (reserved)
8437 			goto out;
8438 		goto out_noreserve;
8439 	}
8440 
8441 	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
8442 again:
8443 	down_read(&BTRFS_I(inode)->i_mmap_lock);
8444 	lock_page(page);
8445 	size = i_size_read(inode);
8446 
8447 	if ((page->mapping != inode->i_mapping) ||
8448 	    (page_start >= size)) {
8449 		/* page got truncated out from underneath us */
8450 		goto out_unlock;
8451 	}
8452 	wait_on_page_writeback(page);
8453 
8454 	lock_extent(io_tree, page_start, page_end, &cached_state);
8455 	ret2 = set_page_extent_mapped(page);
8456 	if (ret2 < 0) {
8457 		ret = vmf_error(ret2);
8458 		unlock_extent(io_tree, page_start, page_end, &cached_state);
8459 		goto out_unlock;
8460 	}
8461 
8462 	/*
8463 	 * we can't set the delalloc bits if there are pending ordered
8464 	 * extents.  Drop our locks and wait for them to finish
8465 	 */
8466 	ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
8467 			PAGE_SIZE);
8468 	if (ordered) {
8469 		unlock_extent(io_tree, page_start, page_end, &cached_state);
8470 		unlock_page(page);
8471 		up_read(&BTRFS_I(inode)->i_mmap_lock);
8472 		btrfs_start_ordered_extent(ordered, 1);
8473 		btrfs_put_ordered_extent(ordered);
8474 		goto again;
8475 	}
8476 
8477 	if (page->index == ((size - 1) >> PAGE_SHIFT)) {
8478 		reserved_space = round_up(size - page_start,
8479 					  fs_info->sectorsize);
8480 		if (reserved_space < PAGE_SIZE) {
8481 			end = page_start + reserved_space - 1;
8482 			btrfs_delalloc_release_space(BTRFS_I(inode),
8483 					data_reserved, page_start,
8484 					PAGE_SIZE - reserved_space, true);
8485 		}
8486 	}
8487 
8488 	/*
8489 	 * page_mkwrite gets called when the page is firstly dirtied after it's
8490 	 * faulted in, but write(2) could also dirty a page and set delalloc
8491 	 * bits, thus in this case for space account reason, we still need to
8492 	 * clear any delalloc bits within this page range since we have to
8493 	 * reserve data&meta space before lock_page() (see above comments).
8494 	 */
8495 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
8496 			  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
8497 			  EXTENT_DEFRAG, &cached_state);
8498 
8499 	ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
8500 					&cached_state);
8501 	if (ret2) {
8502 		unlock_extent(io_tree, page_start, page_end, &cached_state);
8503 		ret = VM_FAULT_SIGBUS;
8504 		goto out_unlock;
8505 	}
8506 
8507 	/* page is wholly or partially inside EOF */
8508 	if (page_start + PAGE_SIZE > size)
8509 		zero_start = offset_in_page(size);
8510 	else
8511 		zero_start = PAGE_SIZE;
8512 
8513 	if (zero_start != PAGE_SIZE)
8514 		memzero_page(page, zero_start, PAGE_SIZE - zero_start);
8515 
8516 	btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE);
8517 	btrfs_page_set_dirty(fs_info, page, page_start, end + 1 - page_start);
8518 	btrfs_page_set_uptodate(fs_info, page, page_start, end + 1 - page_start);
8519 
8520 	btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
8521 
8522 	unlock_extent(io_tree, page_start, page_end, &cached_state);
8523 	up_read(&BTRFS_I(inode)->i_mmap_lock);
8524 
8525 	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
8526 	sb_end_pagefault(inode->i_sb);
8527 	extent_changeset_free(data_reserved);
8528 	return VM_FAULT_LOCKED;
8529 
8530 out_unlock:
8531 	unlock_page(page);
8532 	up_read(&BTRFS_I(inode)->i_mmap_lock);
8533 out:
8534 	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
8535 	btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start,
8536 				     reserved_space, (ret != 0));
8537 out_noreserve:
8538 	sb_end_pagefault(inode->i_sb);
8539 	extent_changeset_free(data_reserved);
8540 	return ret;
8541 }
8542 
8543 static int btrfs_truncate(struct inode *inode, bool skip_writeback)
8544 {
8545 	struct btrfs_truncate_control control = {
8546 		.inode = BTRFS_I(inode),
8547 		.ino = btrfs_ino(BTRFS_I(inode)),
8548 		.min_type = BTRFS_EXTENT_DATA_KEY,
8549 		.clear_extent_range = true,
8550 	};
8551 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8552 	struct btrfs_root *root = BTRFS_I(inode)->root;
8553 	struct btrfs_block_rsv *rsv;
8554 	int ret;
8555 	struct btrfs_trans_handle *trans;
8556 	u64 mask = fs_info->sectorsize - 1;
8557 	u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
8558 
8559 	if (!skip_writeback) {
8560 		ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
8561 					       (u64)-1);
8562 		if (ret)
8563 			return ret;
8564 	}
8565 
8566 	/*
8567 	 * Yes ladies and gentlemen, this is indeed ugly.  We have a couple of
8568 	 * things going on here:
8569 	 *
8570 	 * 1) We need to reserve space to update our inode.
8571 	 *
8572 	 * 2) We need to have something to cache all the space that is going to
8573 	 * be free'd up by the truncate operation, but also have some slack
8574 	 * space reserved in case it uses space during the truncate (thank you
8575 	 * very much snapshotting).
8576 	 *
8577 	 * And we need these to be separate.  The fact is we can use a lot of
8578 	 * space doing the truncate, and we have no earthly idea how much space
8579 	 * we will use, so we need the truncate reservation to be separate so it
8580 	 * doesn't end up using space reserved for updating the inode.  We also
8581 	 * need to be able to stop the transaction and start a new one, which
8582 	 * means we need to be able to update the inode several times, and we
8583 	 * have no idea of knowing how many times that will be, so we can't just
8584 	 * reserve 1 item for the entirety of the operation, so that has to be
8585 	 * done separately as well.
8586 	 *
8587 	 * So that leaves us with
8588 	 *
8589 	 * 1) rsv - for the truncate reservation, which we will steal from the
8590 	 * transaction reservation.
8591 	 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
8592 	 * updating the inode.
8593 	 */
8594 	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
8595 	if (!rsv)
8596 		return -ENOMEM;
8597 	rsv->size = min_size;
8598 	rsv->failfast = true;
8599 
8600 	/*
8601 	 * 1 for the truncate slack space
8602 	 * 1 for updating the inode.
8603 	 */
8604 	trans = btrfs_start_transaction(root, 2);
8605 	if (IS_ERR(trans)) {
8606 		ret = PTR_ERR(trans);
8607 		goto out;
8608 	}
8609 
8610 	/* Migrate the slack space for the truncate to our reserve */
8611 	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
8612 				      min_size, false);
8613 	BUG_ON(ret);
8614 
8615 	trans->block_rsv = rsv;
8616 
8617 	while (1) {
8618 		struct extent_state *cached_state = NULL;
8619 		const u64 new_size = inode->i_size;
8620 		const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
8621 
8622 		control.new_size = new_size;
8623 		lock_extent(&BTRFS_I(inode)->io_tree, lock_start, (u64)-1,
8624 				 &cached_state);
8625 		/*
8626 		 * We want to drop from the next block forward in case this new
8627 		 * size is not block aligned since we will be keeping the last
8628 		 * block of the extent just the way it is.
8629 		 */
8630 		btrfs_drop_extent_map_range(BTRFS_I(inode),
8631 					    ALIGN(new_size, fs_info->sectorsize),
8632 					    (u64)-1, false);
8633 
8634 		ret = btrfs_truncate_inode_items(trans, root, &control);
8635 
8636 		inode_sub_bytes(inode, control.sub_bytes);
8637 		btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), control.last_size);
8638 
8639 		unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, (u64)-1,
8640 			      &cached_state);
8641 
8642 		trans->block_rsv = &fs_info->trans_block_rsv;
8643 		if (ret != -ENOSPC && ret != -EAGAIN)
8644 			break;
8645 
8646 		ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
8647 		if (ret)
8648 			break;
8649 
8650 		btrfs_end_transaction(trans);
8651 		btrfs_btree_balance_dirty(fs_info);
8652 
8653 		trans = btrfs_start_transaction(root, 2);
8654 		if (IS_ERR(trans)) {
8655 			ret = PTR_ERR(trans);
8656 			trans = NULL;
8657 			break;
8658 		}
8659 
8660 		btrfs_block_rsv_release(fs_info, rsv, -1, NULL);
8661 		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
8662 					      rsv, min_size, false);
8663 		BUG_ON(ret);	/* shouldn't happen */
8664 		trans->block_rsv = rsv;
8665 	}
8666 
8667 	/*
8668 	 * We can't call btrfs_truncate_block inside a trans handle as we could
8669 	 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we
8670 	 * know we've truncated everything except the last little bit, and can
8671 	 * do btrfs_truncate_block and then update the disk_i_size.
8672 	 */
8673 	if (ret == BTRFS_NEED_TRUNCATE_BLOCK) {
8674 		btrfs_end_transaction(trans);
8675 		btrfs_btree_balance_dirty(fs_info);
8676 
8677 		ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
8678 		if (ret)
8679 			goto out;
8680 		trans = btrfs_start_transaction(root, 1);
8681 		if (IS_ERR(trans)) {
8682 			ret = PTR_ERR(trans);
8683 			goto out;
8684 		}
8685 		btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
8686 	}
8687 
8688 	if (trans) {
8689 		int ret2;
8690 
8691 		trans->block_rsv = &fs_info->trans_block_rsv;
8692 		ret2 = btrfs_update_inode(trans, root, BTRFS_I(inode));
8693 		if (ret2 && !ret)
8694 			ret = ret2;
8695 
8696 		ret2 = btrfs_end_transaction(trans);
8697 		if (ret2 && !ret)
8698 			ret = ret2;
8699 		btrfs_btree_balance_dirty(fs_info);
8700 	}
8701 out:
8702 	btrfs_free_block_rsv(fs_info, rsv);
8703 	/*
8704 	 * So if we truncate and then write and fsync we normally would just
8705 	 * write the extents that changed, which is a problem if we need to
8706 	 * first truncate that entire inode.  So set this flag so we write out
8707 	 * all of the extents in the inode to the sync log so we're completely
8708 	 * safe.
8709 	 *
8710 	 * If no extents were dropped or trimmed we don't need to force the next
8711 	 * fsync to truncate all the inode's items from the log and re-log them
8712 	 * all. This means the truncate operation did not change the file size,
8713 	 * or changed it to a smaller size but there was only an implicit hole
8714 	 * between the old i_size and the new i_size, and there were no prealloc
8715 	 * extents beyond i_size to drop.
8716 	 */
8717 	if (control.extents_found > 0)
8718 		btrfs_set_inode_full_sync(BTRFS_I(inode));
8719 
8720 	return ret;
8721 }
8722 
8723 struct inode *btrfs_new_subvol_inode(struct user_namespace *mnt_userns,
8724 				     struct inode *dir)
8725 {
8726 	struct inode *inode;
8727 
8728 	inode = new_inode(dir->i_sb);
8729 	if (inode) {
8730 		/*
8731 		 * Subvolumes don't inherit the sgid bit or the parent's gid if
8732 		 * the parent's sgid bit is set. This is probably a bug.
8733 		 */
8734 		inode_init_owner(mnt_userns, inode, NULL,
8735 				 S_IFDIR | (~current_umask() & S_IRWXUGO));
8736 		inode->i_op = &btrfs_dir_inode_operations;
8737 		inode->i_fop = &btrfs_dir_file_operations;
8738 	}
8739 	return inode;
8740 }
8741 
8742 struct inode *btrfs_alloc_inode(struct super_block *sb)
8743 {
8744 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
8745 	struct btrfs_inode *ei;
8746 	struct inode *inode;
8747 
8748 	ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL);
8749 	if (!ei)
8750 		return NULL;
8751 
8752 	ei->root = NULL;
8753 	ei->generation = 0;
8754 	ei->last_trans = 0;
8755 	ei->last_sub_trans = 0;
8756 	ei->logged_trans = 0;
8757 	ei->delalloc_bytes = 0;
8758 	ei->new_delalloc_bytes = 0;
8759 	ei->defrag_bytes = 0;
8760 	ei->disk_i_size = 0;
8761 	ei->flags = 0;
8762 	ei->ro_flags = 0;
8763 	ei->csum_bytes = 0;
8764 	ei->index_cnt = (u64)-1;
8765 	ei->dir_index = 0;
8766 	ei->last_unlink_trans = 0;
8767 	ei->last_reflink_trans = 0;
8768 	ei->last_log_commit = 0;
8769 
8770 	spin_lock_init(&ei->lock);
8771 	spin_lock_init(&ei->io_failure_lock);
8772 	ei->outstanding_extents = 0;
8773 	if (sb->s_magic != BTRFS_TEST_MAGIC)
8774 		btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv,
8775 					      BTRFS_BLOCK_RSV_DELALLOC);
8776 	ei->runtime_flags = 0;
8777 	ei->prop_compress = BTRFS_COMPRESS_NONE;
8778 	ei->defrag_compress = BTRFS_COMPRESS_NONE;
8779 
8780 	ei->delayed_node = NULL;
8781 
8782 	ei->i_otime.tv_sec = 0;
8783 	ei->i_otime.tv_nsec = 0;
8784 
8785 	inode = &ei->vfs_inode;
8786 	extent_map_tree_init(&ei->extent_tree);
8787 	extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO, inode);
8788 	extent_io_tree_init(fs_info, &ei->file_extent_tree,
8789 			    IO_TREE_INODE_FILE_EXTENT, NULL);
8790 	ei->io_failure_tree = RB_ROOT;
8791 	atomic_set(&ei->sync_writers, 0);
8792 	mutex_init(&ei->log_mutex);
8793 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
8794 	INIT_LIST_HEAD(&ei->delalloc_inodes);
8795 	INIT_LIST_HEAD(&ei->delayed_iput);
8796 	RB_CLEAR_NODE(&ei->rb_node);
8797 	init_rwsem(&ei->i_mmap_lock);
8798 
8799 	return inode;
8800 }
8801 
8802 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
8803 void btrfs_test_destroy_inode(struct inode *inode)
8804 {
8805 	btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
8806 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8807 }
8808 #endif
8809 
8810 void btrfs_free_inode(struct inode *inode)
8811 {
8812 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8813 }
8814 
8815 void btrfs_destroy_inode(struct inode *vfs_inode)
8816 {
8817 	struct btrfs_ordered_extent *ordered;
8818 	struct btrfs_inode *inode = BTRFS_I(vfs_inode);
8819 	struct btrfs_root *root = inode->root;
8820 	bool freespace_inode;
8821 
8822 	WARN_ON(!hlist_empty(&vfs_inode->i_dentry));
8823 	WARN_ON(vfs_inode->i_data.nrpages);
8824 	WARN_ON(inode->block_rsv.reserved);
8825 	WARN_ON(inode->block_rsv.size);
8826 	WARN_ON(inode->outstanding_extents);
8827 	if (!S_ISDIR(vfs_inode->i_mode)) {
8828 		WARN_ON(inode->delalloc_bytes);
8829 		WARN_ON(inode->new_delalloc_bytes);
8830 	}
8831 	WARN_ON(inode->csum_bytes);
8832 	WARN_ON(inode->defrag_bytes);
8833 
8834 	/*
8835 	 * This can happen where we create an inode, but somebody else also
8836 	 * created the same inode and we need to destroy the one we already
8837 	 * created.
8838 	 */
8839 	if (!root)
8840 		return;
8841 
8842 	/*
8843 	 * If this is a free space inode do not take the ordered extents lockdep
8844 	 * map.
8845 	 */
8846 	freespace_inode = btrfs_is_free_space_inode(inode);
8847 
8848 	while (1) {
8849 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
8850 		if (!ordered)
8851 			break;
8852 		else {
8853 			btrfs_err(root->fs_info,
8854 				  "found ordered extent %llu %llu on inode cleanup",
8855 				  ordered->file_offset, ordered->num_bytes);
8856 
8857 			if (!freespace_inode)
8858 				btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent);
8859 
8860 			btrfs_remove_ordered_extent(inode, ordered);
8861 			btrfs_put_ordered_extent(ordered);
8862 			btrfs_put_ordered_extent(ordered);
8863 		}
8864 	}
8865 	btrfs_qgroup_check_reserved_leak(inode);
8866 	inode_tree_del(inode);
8867 	btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
8868 	btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1);
8869 	btrfs_put_root(inode->root);
8870 }
8871 
8872 int btrfs_drop_inode(struct inode *inode)
8873 {
8874 	struct btrfs_root *root = BTRFS_I(inode)->root;
8875 
8876 	if (root == NULL)
8877 		return 1;
8878 
8879 	/* the snap/subvol tree is on deleting */
8880 	if (btrfs_root_refs(&root->root_item) == 0)
8881 		return 1;
8882 	else
8883 		return generic_drop_inode(inode);
8884 }
8885 
8886 static void init_once(void *foo)
8887 {
8888 	struct btrfs_inode *ei = foo;
8889 
8890 	inode_init_once(&ei->vfs_inode);
8891 }
8892 
8893 void __cold btrfs_destroy_cachep(void)
8894 {
8895 	/*
8896 	 * Make sure all delayed rcu free inodes are flushed before we
8897 	 * destroy cache.
8898 	 */
8899 	rcu_barrier();
8900 	bioset_exit(&btrfs_dio_bioset);
8901 	kmem_cache_destroy(btrfs_inode_cachep);
8902 	kmem_cache_destroy(btrfs_trans_handle_cachep);
8903 	kmem_cache_destroy(btrfs_path_cachep);
8904 	kmem_cache_destroy(btrfs_free_space_cachep);
8905 	kmem_cache_destroy(btrfs_free_space_bitmap_cachep);
8906 }
8907 
8908 int __init btrfs_init_cachep(void)
8909 {
8910 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
8911 			sizeof(struct btrfs_inode), 0,
8912 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT,
8913 			init_once);
8914 	if (!btrfs_inode_cachep)
8915 		goto fail;
8916 
8917 	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
8918 			sizeof(struct btrfs_trans_handle), 0,
8919 			SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
8920 	if (!btrfs_trans_handle_cachep)
8921 		goto fail;
8922 
8923 	btrfs_path_cachep = kmem_cache_create("btrfs_path",
8924 			sizeof(struct btrfs_path), 0,
8925 			SLAB_MEM_SPREAD, NULL);
8926 	if (!btrfs_path_cachep)
8927 		goto fail;
8928 
8929 	btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
8930 			sizeof(struct btrfs_free_space), 0,
8931 			SLAB_MEM_SPREAD, NULL);
8932 	if (!btrfs_free_space_cachep)
8933 		goto fail;
8934 
8935 	btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
8936 							PAGE_SIZE, PAGE_SIZE,
8937 							SLAB_MEM_SPREAD, NULL);
8938 	if (!btrfs_free_space_bitmap_cachep)
8939 		goto fail;
8940 
8941 	if (bioset_init(&btrfs_dio_bioset, BIO_POOL_SIZE,
8942 			offsetof(struct btrfs_dio_private, bio),
8943 			BIOSET_NEED_BVECS))
8944 		goto fail;
8945 
8946 	return 0;
8947 fail:
8948 	btrfs_destroy_cachep();
8949 	return -ENOMEM;
8950 }
8951 
8952 static int btrfs_getattr(struct user_namespace *mnt_userns,
8953 			 const struct path *path, struct kstat *stat,
8954 			 u32 request_mask, unsigned int flags)
8955 {
8956 	u64 delalloc_bytes;
8957 	u64 inode_bytes;
8958 	struct inode *inode = d_inode(path->dentry);
8959 	u32 blocksize = inode->i_sb->s_blocksize;
8960 	u32 bi_flags = BTRFS_I(inode)->flags;
8961 	u32 bi_ro_flags = BTRFS_I(inode)->ro_flags;
8962 
8963 	stat->result_mask |= STATX_BTIME;
8964 	stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec;
8965 	stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec;
8966 	if (bi_flags & BTRFS_INODE_APPEND)
8967 		stat->attributes |= STATX_ATTR_APPEND;
8968 	if (bi_flags & BTRFS_INODE_COMPRESS)
8969 		stat->attributes |= STATX_ATTR_COMPRESSED;
8970 	if (bi_flags & BTRFS_INODE_IMMUTABLE)
8971 		stat->attributes |= STATX_ATTR_IMMUTABLE;
8972 	if (bi_flags & BTRFS_INODE_NODUMP)
8973 		stat->attributes |= STATX_ATTR_NODUMP;
8974 	if (bi_ro_flags & BTRFS_INODE_RO_VERITY)
8975 		stat->attributes |= STATX_ATTR_VERITY;
8976 
8977 	stat->attributes_mask |= (STATX_ATTR_APPEND |
8978 				  STATX_ATTR_COMPRESSED |
8979 				  STATX_ATTR_IMMUTABLE |
8980 				  STATX_ATTR_NODUMP);
8981 
8982 	generic_fillattr(mnt_userns, inode, stat);
8983 	stat->dev = BTRFS_I(inode)->root->anon_dev;
8984 
8985 	spin_lock(&BTRFS_I(inode)->lock);
8986 	delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
8987 	inode_bytes = inode_get_bytes(inode);
8988 	spin_unlock(&BTRFS_I(inode)->lock);
8989 	stat->blocks = (ALIGN(inode_bytes, blocksize) +
8990 			ALIGN(delalloc_bytes, blocksize)) >> 9;
8991 	return 0;
8992 }
8993 
8994 static int btrfs_rename_exchange(struct inode *old_dir,
8995 			      struct dentry *old_dentry,
8996 			      struct inode *new_dir,
8997 			      struct dentry *new_dentry)
8998 {
8999 	struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
9000 	struct btrfs_trans_handle *trans;
9001 	unsigned int trans_num_items;
9002 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
9003 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9004 	struct inode *new_inode = new_dentry->d_inode;
9005 	struct inode *old_inode = old_dentry->d_inode;
9006 	struct timespec64 ctime = current_time(old_inode);
9007 	struct btrfs_rename_ctx old_rename_ctx;
9008 	struct btrfs_rename_ctx new_rename_ctx;
9009 	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
9010 	u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
9011 	u64 old_idx = 0;
9012 	u64 new_idx = 0;
9013 	int ret;
9014 	int ret2;
9015 	bool need_abort = false;
9016 
9017 	/*
9018 	 * For non-subvolumes allow exchange only within one subvolume, in the
9019 	 * same inode namespace. Two subvolumes (represented as directory) can
9020 	 * be exchanged as they're a logical link and have a fixed inode number.
9021 	 */
9022 	if (root != dest &&
9023 	    (old_ino != BTRFS_FIRST_FREE_OBJECTID ||
9024 	     new_ino != BTRFS_FIRST_FREE_OBJECTID))
9025 		return -EXDEV;
9026 
9027 	/* close the race window with snapshot create/destroy ioctl */
9028 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
9029 	    new_ino == BTRFS_FIRST_FREE_OBJECTID)
9030 		down_read(&fs_info->subvol_sem);
9031 
9032 	/*
9033 	 * For each inode:
9034 	 * 1 to remove old dir item
9035 	 * 1 to remove old dir index
9036 	 * 1 to add new dir item
9037 	 * 1 to add new dir index
9038 	 * 1 to update parent inode
9039 	 *
9040 	 * If the parents are the same, we only need to account for one
9041 	 */
9042 	trans_num_items = (old_dir == new_dir ? 9 : 10);
9043 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9044 		/*
9045 		 * 1 to remove old root ref
9046 		 * 1 to remove old root backref
9047 		 * 1 to add new root ref
9048 		 * 1 to add new root backref
9049 		 */
9050 		trans_num_items += 4;
9051 	} else {
9052 		/*
9053 		 * 1 to update inode item
9054 		 * 1 to remove old inode ref
9055 		 * 1 to add new inode ref
9056 		 */
9057 		trans_num_items += 3;
9058 	}
9059 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
9060 		trans_num_items += 4;
9061 	else
9062 		trans_num_items += 3;
9063 	trans = btrfs_start_transaction(root, trans_num_items);
9064 	if (IS_ERR(trans)) {
9065 		ret = PTR_ERR(trans);
9066 		goto out_notrans;
9067 	}
9068 
9069 	if (dest != root) {
9070 		ret = btrfs_record_root_in_trans(trans, dest);
9071 		if (ret)
9072 			goto out_fail;
9073 	}
9074 
9075 	/*
9076 	 * We need to find a free sequence number both in the source and
9077 	 * in the destination directory for the exchange.
9078 	 */
9079 	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx);
9080 	if (ret)
9081 		goto out_fail;
9082 	ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx);
9083 	if (ret)
9084 		goto out_fail;
9085 
9086 	BTRFS_I(old_inode)->dir_index = 0ULL;
9087 	BTRFS_I(new_inode)->dir_index = 0ULL;
9088 
9089 	/* Reference for the source. */
9090 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9091 		/* force full log commit if subvolume involved. */
9092 		btrfs_set_log_full_commit(trans);
9093 	} else {
9094 		ret = btrfs_insert_inode_ref(trans, dest,
9095 					     new_dentry->d_name.name,
9096 					     new_dentry->d_name.len,
9097 					     old_ino,
9098 					     btrfs_ino(BTRFS_I(new_dir)),
9099 					     old_idx);
9100 		if (ret)
9101 			goto out_fail;
9102 		need_abort = true;
9103 	}
9104 
9105 	/* And now for the dest. */
9106 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
9107 		/* force full log commit if subvolume involved. */
9108 		btrfs_set_log_full_commit(trans);
9109 	} else {
9110 		ret = btrfs_insert_inode_ref(trans, root,
9111 					     old_dentry->d_name.name,
9112 					     old_dentry->d_name.len,
9113 					     new_ino,
9114 					     btrfs_ino(BTRFS_I(old_dir)),
9115 					     new_idx);
9116 		if (ret) {
9117 			if (need_abort)
9118 				btrfs_abort_transaction(trans, ret);
9119 			goto out_fail;
9120 		}
9121 	}
9122 
9123 	/* Update inode version and ctime/mtime. */
9124 	inode_inc_iversion(old_dir);
9125 	inode_inc_iversion(new_dir);
9126 	inode_inc_iversion(old_inode);
9127 	inode_inc_iversion(new_inode);
9128 	old_dir->i_mtime = ctime;
9129 	old_dir->i_ctime = ctime;
9130 	new_dir->i_mtime = ctime;
9131 	new_dir->i_ctime = ctime;
9132 	old_inode->i_ctime = ctime;
9133 	new_inode->i_ctime = ctime;
9134 
9135 	if (old_dentry->d_parent != new_dentry->d_parent) {
9136 		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
9137 				BTRFS_I(old_inode), 1);
9138 		btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
9139 				BTRFS_I(new_inode), 1);
9140 	}
9141 
9142 	/* src is a subvolume */
9143 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9144 		ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
9145 	} else { /* src is an inode */
9146 		ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
9147 					   BTRFS_I(old_dentry->d_inode),
9148 					   old_dentry->d_name.name,
9149 					   old_dentry->d_name.len,
9150 					   &old_rename_ctx);
9151 		if (!ret)
9152 			ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
9153 	}
9154 	if (ret) {
9155 		btrfs_abort_transaction(trans, ret);
9156 		goto out_fail;
9157 	}
9158 
9159 	/* dest is a subvolume */
9160 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
9161 		ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
9162 	} else { /* dest is an inode */
9163 		ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir),
9164 					   BTRFS_I(new_dentry->d_inode),
9165 					   new_dentry->d_name.name,
9166 					   new_dentry->d_name.len,
9167 					   &new_rename_ctx);
9168 		if (!ret)
9169 			ret = btrfs_update_inode(trans, dest, BTRFS_I(new_inode));
9170 	}
9171 	if (ret) {
9172 		btrfs_abort_transaction(trans, ret);
9173 		goto out_fail;
9174 	}
9175 
9176 	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
9177 			     new_dentry->d_name.name,
9178 			     new_dentry->d_name.len, 0, old_idx);
9179 	if (ret) {
9180 		btrfs_abort_transaction(trans, ret);
9181 		goto out_fail;
9182 	}
9183 
9184 	ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
9185 			     old_dentry->d_name.name,
9186 			     old_dentry->d_name.len, 0, new_idx);
9187 	if (ret) {
9188 		btrfs_abort_transaction(trans, ret);
9189 		goto out_fail;
9190 	}
9191 
9192 	if (old_inode->i_nlink == 1)
9193 		BTRFS_I(old_inode)->dir_index = old_idx;
9194 	if (new_inode->i_nlink == 1)
9195 		BTRFS_I(new_inode)->dir_index = new_idx;
9196 
9197 	/*
9198 	 * Now pin the logs of the roots. We do it to ensure that no other task
9199 	 * can sync the logs while we are in progress with the rename, because
9200 	 * that could result in an inconsistency in case any of the inodes that
9201 	 * are part of this rename operation were logged before.
9202 	 */
9203 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9204 		btrfs_pin_log_trans(root);
9205 	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
9206 		btrfs_pin_log_trans(dest);
9207 
9208 	/* Do the log updates for all inodes. */
9209 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9210 		btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
9211 				   old_rename_ctx.index, new_dentry->d_parent);
9212 	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
9213 		btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir),
9214 				   new_rename_ctx.index, old_dentry->d_parent);
9215 
9216 	/* Now unpin the logs. */
9217 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9218 		btrfs_end_log_trans(root);
9219 	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
9220 		btrfs_end_log_trans(dest);
9221 out_fail:
9222 	ret2 = btrfs_end_transaction(trans);
9223 	ret = ret ? ret : ret2;
9224 out_notrans:
9225 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
9226 	    old_ino == BTRFS_FIRST_FREE_OBJECTID)
9227 		up_read(&fs_info->subvol_sem);
9228 
9229 	return ret;
9230 }
9231 
9232 static struct inode *new_whiteout_inode(struct user_namespace *mnt_userns,
9233 					struct inode *dir)
9234 {
9235 	struct inode *inode;
9236 
9237 	inode = new_inode(dir->i_sb);
9238 	if (inode) {
9239 		inode_init_owner(mnt_userns, inode, dir,
9240 				 S_IFCHR | WHITEOUT_MODE);
9241 		inode->i_op = &btrfs_special_inode_operations;
9242 		init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
9243 	}
9244 	return inode;
9245 }
9246 
9247 static int btrfs_rename(struct user_namespace *mnt_userns,
9248 			struct inode *old_dir, struct dentry *old_dentry,
9249 			struct inode *new_dir, struct dentry *new_dentry,
9250 			unsigned int flags)
9251 {
9252 	struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
9253 	struct btrfs_new_inode_args whiteout_args = {
9254 		.dir = old_dir,
9255 		.dentry = old_dentry,
9256 	};
9257 	struct btrfs_trans_handle *trans;
9258 	unsigned int trans_num_items;
9259 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
9260 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9261 	struct inode *new_inode = d_inode(new_dentry);
9262 	struct inode *old_inode = d_inode(old_dentry);
9263 	struct btrfs_rename_ctx rename_ctx;
9264 	u64 index = 0;
9265 	int ret;
9266 	int ret2;
9267 	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
9268 
9269 	if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
9270 		return -EPERM;
9271 
9272 	/* we only allow rename subvolume link between subvolumes */
9273 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9274 		return -EXDEV;
9275 
9276 	if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
9277 	    (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID))
9278 		return -ENOTEMPTY;
9279 
9280 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
9281 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
9282 		return -ENOTEMPTY;
9283 
9284 
9285 	/* check for collisions, even if the  name isn't there */
9286 	ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
9287 			     new_dentry->d_name.name,
9288 			     new_dentry->d_name.len);
9289 
9290 	if (ret) {
9291 		if (ret == -EEXIST) {
9292 			/* we shouldn't get
9293 			 * eexist without a new_inode */
9294 			if (WARN_ON(!new_inode)) {
9295 				return ret;
9296 			}
9297 		} else {
9298 			/* maybe -EOVERFLOW */
9299 			return ret;
9300 		}
9301 	}
9302 	ret = 0;
9303 
9304 	/*
9305 	 * we're using rename to replace one file with another.  Start IO on it
9306 	 * now so  we don't add too much work to the end of the transaction
9307 	 */
9308 	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
9309 		filemap_flush(old_inode->i_mapping);
9310 
9311 	if (flags & RENAME_WHITEOUT) {
9312 		whiteout_args.inode = new_whiteout_inode(mnt_userns, old_dir);
9313 		if (!whiteout_args.inode)
9314 			return -ENOMEM;
9315 		ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items);
9316 		if (ret)
9317 			goto out_whiteout_inode;
9318 	} else {
9319 		/* 1 to update the old parent inode. */
9320 		trans_num_items = 1;
9321 	}
9322 
9323 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9324 		/* Close the race window with snapshot create/destroy ioctl */
9325 		down_read(&fs_info->subvol_sem);
9326 		/*
9327 		 * 1 to remove old root ref
9328 		 * 1 to remove old root backref
9329 		 * 1 to add new root ref
9330 		 * 1 to add new root backref
9331 		 */
9332 		trans_num_items += 4;
9333 	} else {
9334 		/*
9335 		 * 1 to update inode
9336 		 * 1 to remove old inode ref
9337 		 * 1 to add new inode ref
9338 		 */
9339 		trans_num_items += 3;
9340 	}
9341 	/*
9342 	 * 1 to remove old dir item
9343 	 * 1 to remove old dir index
9344 	 * 1 to add new dir item
9345 	 * 1 to add new dir index
9346 	 */
9347 	trans_num_items += 4;
9348 	/* 1 to update new parent inode if it's not the same as the old parent */
9349 	if (new_dir != old_dir)
9350 		trans_num_items++;
9351 	if (new_inode) {
9352 		/*
9353 		 * 1 to update inode
9354 		 * 1 to remove inode ref
9355 		 * 1 to remove dir item
9356 		 * 1 to remove dir index
9357 		 * 1 to possibly add orphan item
9358 		 */
9359 		trans_num_items += 5;
9360 	}
9361 	trans = btrfs_start_transaction(root, trans_num_items);
9362 	if (IS_ERR(trans)) {
9363 		ret = PTR_ERR(trans);
9364 		goto out_notrans;
9365 	}
9366 
9367 	if (dest != root) {
9368 		ret = btrfs_record_root_in_trans(trans, dest);
9369 		if (ret)
9370 			goto out_fail;
9371 	}
9372 
9373 	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index);
9374 	if (ret)
9375 		goto out_fail;
9376 
9377 	BTRFS_I(old_inode)->dir_index = 0ULL;
9378 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9379 		/* force full log commit if subvolume involved. */
9380 		btrfs_set_log_full_commit(trans);
9381 	} else {
9382 		ret = btrfs_insert_inode_ref(trans, dest,
9383 					     new_dentry->d_name.name,
9384 					     new_dentry->d_name.len,
9385 					     old_ino,
9386 					     btrfs_ino(BTRFS_I(new_dir)), index);
9387 		if (ret)
9388 			goto out_fail;
9389 	}
9390 
9391 	inode_inc_iversion(old_dir);
9392 	inode_inc_iversion(new_dir);
9393 	inode_inc_iversion(old_inode);
9394 	old_dir->i_mtime = current_time(old_dir);
9395 	old_dir->i_ctime = old_dir->i_mtime;
9396 	new_dir->i_mtime = old_dir->i_mtime;
9397 	new_dir->i_ctime = old_dir->i_mtime;
9398 	old_inode->i_ctime = old_dir->i_mtime;
9399 
9400 	if (old_dentry->d_parent != new_dentry->d_parent)
9401 		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
9402 				BTRFS_I(old_inode), 1);
9403 
9404 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9405 		ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
9406 	} else {
9407 		ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
9408 					BTRFS_I(d_inode(old_dentry)),
9409 					old_dentry->d_name.name,
9410 					old_dentry->d_name.len,
9411 					&rename_ctx);
9412 		if (!ret)
9413 			ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
9414 	}
9415 	if (ret) {
9416 		btrfs_abort_transaction(trans, ret);
9417 		goto out_fail;
9418 	}
9419 
9420 	if (new_inode) {
9421 		inode_inc_iversion(new_inode);
9422 		new_inode->i_ctime = current_time(new_inode);
9423 		if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
9424 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
9425 			ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
9426 			BUG_ON(new_inode->i_nlink == 0);
9427 		} else {
9428 			ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir),
9429 						 BTRFS_I(d_inode(new_dentry)),
9430 						 new_dentry->d_name.name,
9431 						 new_dentry->d_name.len);
9432 		}
9433 		if (!ret && new_inode->i_nlink == 0)
9434 			ret = btrfs_orphan_add(trans,
9435 					BTRFS_I(d_inode(new_dentry)));
9436 		if (ret) {
9437 			btrfs_abort_transaction(trans, ret);
9438 			goto out_fail;
9439 		}
9440 	}
9441 
9442 	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
9443 			     new_dentry->d_name.name,
9444 			     new_dentry->d_name.len, 0, index);
9445 	if (ret) {
9446 		btrfs_abort_transaction(trans, ret);
9447 		goto out_fail;
9448 	}
9449 
9450 	if (old_inode->i_nlink == 1)
9451 		BTRFS_I(old_inode)->dir_index = index;
9452 
9453 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9454 		btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
9455 				   rename_ctx.index, new_dentry->d_parent);
9456 
9457 	if (flags & RENAME_WHITEOUT) {
9458 		ret = btrfs_create_new_inode(trans, &whiteout_args);
9459 		if (ret) {
9460 			btrfs_abort_transaction(trans, ret);
9461 			goto out_fail;
9462 		} else {
9463 			unlock_new_inode(whiteout_args.inode);
9464 			iput(whiteout_args.inode);
9465 			whiteout_args.inode = NULL;
9466 		}
9467 	}
9468 out_fail:
9469 	ret2 = btrfs_end_transaction(trans);
9470 	ret = ret ? ret : ret2;
9471 out_notrans:
9472 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9473 		up_read(&fs_info->subvol_sem);
9474 	if (flags & RENAME_WHITEOUT)
9475 		btrfs_new_inode_args_destroy(&whiteout_args);
9476 out_whiteout_inode:
9477 	if (flags & RENAME_WHITEOUT)
9478 		iput(whiteout_args.inode);
9479 	return ret;
9480 }
9481 
9482 static int btrfs_rename2(struct user_namespace *mnt_userns, struct inode *old_dir,
9483 			 struct dentry *old_dentry, struct inode *new_dir,
9484 			 struct dentry *new_dentry, unsigned int flags)
9485 {
9486 	int ret;
9487 
9488 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
9489 		return -EINVAL;
9490 
9491 	if (flags & RENAME_EXCHANGE)
9492 		ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir,
9493 					    new_dentry);
9494 	else
9495 		ret = btrfs_rename(mnt_userns, old_dir, old_dentry, new_dir,
9496 				   new_dentry, flags);
9497 
9498 	btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info);
9499 
9500 	return ret;
9501 }
9502 
9503 struct btrfs_delalloc_work {
9504 	struct inode *inode;
9505 	struct completion completion;
9506 	struct list_head list;
9507 	struct btrfs_work work;
9508 };
9509 
9510 static void btrfs_run_delalloc_work(struct btrfs_work *work)
9511 {
9512 	struct btrfs_delalloc_work *delalloc_work;
9513 	struct inode *inode;
9514 
9515 	delalloc_work = container_of(work, struct btrfs_delalloc_work,
9516 				     work);
9517 	inode = delalloc_work->inode;
9518 	filemap_flush(inode->i_mapping);
9519 	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
9520 				&BTRFS_I(inode)->runtime_flags))
9521 		filemap_flush(inode->i_mapping);
9522 
9523 	iput(inode);
9524 	complete(&delalloc_work->completion);
9525 }
9526 
9527 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode)
9528 {
9529 	struct btrfs_delalloc_work *work;
9530 
9531 	work = kmalloc(sizeof(*work), GFP_NOFS);
9532 	if (!work)
9533 		return NULL;
9534 
9535 	init_completion(&work->completion);
9536 	INIT_LIST_HEAD(&work->list);
9537 	work->inode = inode;
9538 	btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL);
9539 
9540 	return work;
9541 }
9542 
9543 /*
9544  * some fairly slow code that needs optimization. This walks the list
9545  * of all the inodes with pending delalloc and forces them to disk.
9546  */
9547 static int start_delalloc_inodes(struct btrfs_root *root,
9548 				 struct writeback_control *wbc, bool snapshot,
9549 				 bool in_reclaim_context)
9550 {
9551 	struct btrfs_inode *binode;
9552 	struct inode *inode;
9553 	struct btrfs_delalloc_work *work, *next;
9554 	struct list_head works;
9555 	struct list_head splice;
9556 	int ret = 0;
9557 	bool full_flush = wbc->nr_to_write == LONG_MAX;
9558 
9559 	INIT_LIST_HEAD(&works);
9560 	INIT_LIST_HEAD(&splice);
9561 
9562 	mutex_lock(&root->delalloc_mutex);
9563 	spin_lock(&root->delalloc_lock);
9564 	list_splice_init(&root->delalloc_inodes, &splice);
9565 	while (!list_empty(&splice)) {
9566 		binode = list_entry(splice.next, struct btrfs_inode,
9567 				    delalloc_inodes);
9568 
9569 		list_move_tail(&binode->delalloc_inodes,
9570 			       &root->delalloc_inodes);
9571 
9572 		if (in_reclaim_context &&
9573 		    test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags))
9574 			continue;
9575 
9576 		inode = igrab(&binode->vfs_inode);
9577 		if (!inode) {
9578 			cond_resched_lock(&root->delalloc_lock);
9579 			continue;
9580 		}
9581 		spin_unlock(&root->delalloc_lock);
9582 
9583 		if (snapshot)
9584 			set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
9585 				&binode->runtime_flags);
9586 		if (full_flush) {
9587 			work = btrfs_alloc_delalloc_work(inode);
9588 			if (!work) {
9589 				iput(inode);
9590 				ret = -ENOMEM;
9591 				goto out;
9592 			}
9593 			list_add_tail(&work->list, &works);
9594 			btrfs_queue_work(root->fs_info->flush_workers,
9595 					 &work->work);
9596 		} else {
9597 			ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc);
9598 			btrfs_add_delayed_iput(inode);
9599 			if (ret || wbc->nr_to_write <= 0)
9600 				goto out;
9601 		}
9602 		cond_resched();
9603 		spin_lock(&root->delalloc_lock);
9604 	}
9605 	spin_unlock(&root->delalloc_lock);
9606 
9607 out:
9608 	list_for_each_entry_safe(work, next, &works, list) {
9609 		list_del_init(&work->list);
9610 		wait_for_completion(&work->completion);
9611 		kfree(work);
9612 	}
9613 
9614 	if (!list_empty(&splice)) {
9615 		spin_lock(&root->delalloc_lock);
9616 		list_splice_tail(&splice, &root->delalloc_inodes);
9617 		spin_unlock(&root->delalloc_lock);
9618 	}
9619 	mutex_unlock(&root->delalloc_mutex);
9620 	return ret;
9621 }
9622 
9623 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
9624 {
9625 	struct writeback_control wbc = {
9626 		.nr_to_write = LONG_MAX,
9627 		.sync_mode = WB_SYNC_NONE,
9628 		.range_start = 0,
9629 		.range_end = LLONG_MAX,
9630 	};
9631 	struct btrfs_fs_info *fs_info = root->fs_info;
9632 
9633 	if (BTRFS_FS_ERROR(fs_info))
9634 		return -EROFS;
9635 
9636 	return start_delalloc_inodes(root, &wbc, true, in_reclaim_context);
9637 }
9638 
9639 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
9640 			       bool in_reclaim_context)
9641 {
9642 	struct writeback_control wbc = {
9643 		.nr_to_write = nr,
9644 		.sync_mode = WB_SYNC_NONE,
9645 		.range_start = 0,
9646 		.range_end = LLONG_MAX,
9647 	};
9648 	struct btrfs_root *root;
9649 	struct list_head splice;
9650 	int ret;
9651 
9652 	if (BTRFS_FS_ERROR(fs_info))
9653 		return -EROFS;
9654 
9655 	INIT_LIST_HEAD(&splice);
9656 
9657 	mutex_lock(&fs_info->delalloc_root_mutex);
9658 	spin_lock(&fs_info->delalloc_root_lock);
9659 	list_splice_init(&fs_info->delalloc_roots, &splice);
9660 	while (!list_empty(&splice)) {
9661 		/*
9662 		 * Reset nr_to_write here so we know that we're doing a full
9663 		 * flush.
9664 		 */
9665 		if (nr == LONG_MAX)
9666 			wbc.nr_to_write = LONG_MAX;
9667 
9668 		root = list_first_entry(&splice, struct btrfs_root,
9669 					delalloc_root);
9670 		root = btrfs_grab_root(root);
9671 		BUG_ON(!root);
9672 		list_move_tail(&root->delalloc_root,
9673 			       &fs_info->delalloc_roots);
9674 		spin_unlock(&fs_info->delalloc_root_lock);
9675 
9676 		ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context);
9677 		btrfs_put_root(root);
9678 		if (ret < 0 || wbc.nr_to_write <= 0)
9679 			goto out;
9680 		spin_lock(&fs_info->delalloc_root_lock);
9681 	}
9682 	spin_unlock(&fs_info->delalloc_root_lock);
9683 
9684 	ret = 0;
9685 out:
9686 	if (!list_empty(&splice)) {
9687 		spin_lock(&fs_info->delalloc_root_lock);
9688 		list_splice_tail(&splice, &fs_info->delalloc_roots);
9689 		spin_unlock(&fs_info->delalloc_root_lock);
9690 	}
9691 	mutex_unlock(&fs_info->delalloc_root_mutex);
9692 	return ret;
9693 }
9694 
9695 static int btrfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
9696 			 struct dentry *dentry, const char *symname)
9697 {
9698 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
9699 	struct btrfs_trans_handle *trans;
9700 	struct btrfs_root *root = BTRFS_I(dir)->root;
9701 	struct btrfs_path *path;
9702 	struct btrfs_key key;
9703 	struct inode *inode;
9704 	struct btrfs_new_inode_args new_inode_args = {
9705 		.dir = dir,
9706 		.dentry = dentry,
9707 	};
9708 	unsigned int trans_num_items;
9709 	int err;
9710 	int name_len;
9711 	int datasize;
9712 	unsigned long ptr;
9713 	struct btrfs_file_extent_item *ei;
9714 	struct extent_buffer *leaf;
9715 
9716 	name_len = strlen(symname);
9717 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
9718 		return -ENAMETOOLONG;
9719 
9720 	inode = new_inode(dir->i_sb);
9721 	if (!inode)
9722 		return -ENOMEM;
9723 	inode_init_owner(mnt_userns, inode, dir, S_IFLNK | S_IRWXUGO);
9724 	inode->i_op = &btrfs_symlink_inode_operations;
9725 	inode_nohighmem(inode);
9726 	inode->i_mapping->a_ops = &btrfs_aops;
9727 	btrfs_i_size_write(BTRFS_I(inode), name_len);
9728 	inode_set_bytes(inode, name_len);
9729 
9730 	new_inode_args.inode = inode;
9731 	err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
9732 	if (err)
9733 		goto out_inode;
9734 	/* 1 additional item for the inline extent */
9735 	trans_num_items++;
9736 
9737 	trans = btrfs_start_transaction(root, trans_num_items);
9738 	if (IS_ERR(trans)) {
9739 		err = PTR_ERR(trans);
9740 		goto out_new_inode_args;
9741 	}
9742 
9743 	err = btrfs_create_new_inode(trans, &new_inode_args);
9744 	if (err)
9745 		goto out;
9746 
9747 	path = btrfs_alloc_path();
9748 	if (!path) {
9749 		err = -ENOMEM;
9750 		btrfs_abort_transaction(trans, err);
9751 		discard_new_inode(inode);
9752 		inode = NULL;
9753 		goto out;
9754 	}
9755 	key.objectid = btrfs_ino(BTRFS_I(inode));
9756 	key.offset = 0;
9757 	key.type = BTRFS_EXTENT_DATA_KEY;
9758 	datasize = btrfs_file_extent_calc_inline_size(name_len);
9759 	err = btrfs_insert_empty_item(trans, root, path, &key,
9760 				      datasize);
9761 	if (err) {
9762 		btrfs_abort_transaction(trans, err);
9763 		btrfs_free_path(path);
9764 		discard_new_inode(inode);
9765 		inode = NULL;
9766 		goto out;
9767 	}
9768 	leaf = path->nodes[0];
9769 	ei = btrfs_item_ptr(leaf, path->slots[0],
9770 			    struct btrfs_file_extent_item);
9771 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
9772 	btrfs_set_file_extent_type(leaf, ei,
9773 				   BTRFS_FILE_EXTENT_INLINE);
9774 	btrfs_set_file_extent_encryption(leaf, ei, 0);
9775 	btrfs_set_file_extent_compression(leaf, ei, 0);
9776 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
9777 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
9778 
9779 	ptr = btrfs_file_extent_inline_start(ei);
9780 	write_extent_buffer(leaf, symname, ptr, name_len);
9781 	btrfs_mark_buffer_dirty(leaf);
9782 	btrfs_free_path(path);
9783 
9784 	d_instantiate_new(dentry, inode);
9785 	err = 0;
9786 out:
9787 	btrfs_end_transaction(trans);
9788 	btrfs_btree_balance_dirty(fs_info);
9789 out_new_inode_args:
9790 	btrfs_new_inode_args_destroy(&new_inode_args);
9791 out_inode:
9792 	if (err)
9793 		iput(inode);
9794 	return err;
9795 }
9796 
9797 static struct btrfs_trans_handle *insert_prealloc_file_extent(
9798 				       struct btrfs_trans_handle *trans_in,
9799 				       struct btrfs_inode *inode,
9800 				       struct btrfs_key *ins,
9801 				       u64 file_offset)
9802 {
9803 	struct btrfs_file_extent_item stack_fi;
9804 	struct btrfs_replace_extent_info extent_info;
9805 	struct btrfs_trans_handle *trans = trans_in;
9806 	struct btrfs_path *path;
9807 	u64 start = ins->objectid;
9808 	u64 len = ins->offset;
9809 	int qgroup_released;
9810 	int ret;
9811 
9812 	memset(&stack_fi, 0, sizeof(stack_fi));
9813 
9814 	btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC);
9815 	btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start);
9816 	btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len);
9817 	btrfs_set_stack_file_extent_num_bytes(&stack_fi, len);
9818 	btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len);
9819 	btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
9820 	/* Encryption and other encoding is reserved and all 0 */
9821 
9822 	qgroup_released = btrfs_qgroup_release_data(inode, file_offset, len);
9823 	if (qgroup_released < 0)
9824 		return ERR_PTR(qgroup_released);
9825 
9826 	if (trans) {
9827 		ret = insert_reserved_file_extent(trans, inode,
9828 						  file_offset, &stack_fi,
9829 						  true, qgroup_released);
9830 		if (ret)
9831 			goto free_qgroup;
9832 		return trans;
9833 	}
9834 
9835 	extent_info.disk_offset = start;
9836 	extent_info.disk_len = len;
9837 	extent_info.data_offset = 0;
9838 	extent_info.data_len = len;
9839 	extent_info.file_offset = file_offset;
9840 	extent_info.extent_buf = (char *)&stack_fi;
9841 	extent_info.is_new_extent = true;
9842 	extent_info.update_times = true;
9843 	extent_info.qgroup_reserved = qgroup_released;
9844 	extent_info.insertions = 0;
9845 
9846 	path = btrfs_alloc_path();
9847 	if (!path) {
9848 		ret = -ENOMEM;
9849 		goto free_qgroup;
9850 	}
9851 
9852 	ret = btrfs_replace_file_extents(inode, path, file_offset,
9853 				     file_offset + len - 1, &extent_info,
9854 				     &trans);
9855 	btrfs_free_path(path);
9856 	if (ret)
9857 		goto free_qgroup;
9858 	return trans;
9859 
9860 free_qgroup:
9861 	/*
9862 	 * We have released qgroup data range at the beginning of the function,
9863 	 * and normally qgroup_released bytes will be freed when committing
9864 	 * transaction.
9865 	 * But if we error out early, we have to free what we have released
9866 	 * or we leak qgroup data reservation.
9867 	 */
9868 	btrfs_qgroup_free_refroot(inode->root->fs_info,
9869 			inode->root->root_key.objectid, qgroup_released,
9870 			BTRFS_QGROUP_RSV_DATA);
9871 	return ERR_PTR(ret);
9872 }
9873 
9874 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
9875 				       u64 start, u64 num_bytes, u64 min_size,
9876 				       loff_t actual_len, u64 *alloc_hint,
9877 				       struct btrfs_trans_handle *trans)
9878 {
9879 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
9880 	struct extent_map *em;
9881 	struct btrfs_root *root = BTRFS_I(inode)->root;
9882 	struct btrfs_key ins;
9883 	u64 cur_offset = start;
9884 	u64 clear_offset = start;
9885 	u64 i_size;
9886 	u64 cur_bytes;
9887 	u64 last_alloc = (u64)-1;
9888 	int ret = 0;
9889 	bool own_trans = true;
9890 	u64 end = start + num_bytes - 1;
9891 
9892 	if (trans)
9893 		own_trans = false;
9894 	while (num_bytes > 0) {
9895 		cur_bytes = min_t(u64, num_bytes, SZ_256M);
9896 		cur_bytes = max(cur_bytes, min_size);
9897 		/*
9898 		 * If we are severely fragmented we could end up with really
9899 		 * small allocations, so if the allocator is returning small
9900 		 * chunks lets make its job easier by only searching for those
9901 		 * sized chunks.
9902 		 */
9903 		cur_bytes = min(cur_bytes, last_alloc);
9904 		ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
9905 				min_size, 0, *alloc_hint, &ins, 1, 0);
9906 		if (ret)
9907 			break;
9908 
9909 		/*
9910 		 * We've reserved this space, and thus converted it from
9911 		 * ->bytes_may_use to ->bytes_reserved.  Any error that happens
9912 		 * from here on out we will only need to clear our reservation
9913 		 * for the remaining unreserved area, so advance our
9914 		 * clear_offset by our extent size.
9915 		 */
9916 		clear_offset += ins.offset;
9917 
9918 		last_alloc = ins.offset;
9919 		trans = insert_prealloc_file_extent(trans, BTRFS_I(inode),
9920 						    &ins, cur_offset);
9921 		/*
9922 		 * Now that we inserted the prealloc extent we can finally
9923 		 * decrement the number of reservations in the block group.
9924 		 * If we did it before, we could race with relocation and have
9925 		 * relocation miss the reserved extent, making it fail later.
9926 		 */
9927 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
9928 		if (IS_ERR(trans)) {
9929 			ret = PTR_ERR(trans);
9930 			btrfs_free_reserved_extent(fs_info, ins.objectid,
9931 						   ins.offset, 0);
9932 			break;
9933 		}
9934 
9935 		em = alloc_extent_map();
9936 		if (!em) {
9937 			btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset,
9938 					    cur_offset + ins.offset - 1, false);
9939 			btrfs_set_inode_full_sync(BTRFS_I(inode));
9940 			goto next;
9941 		}
9942 
9943 		em->start = cur_offset;
9944 		em->orig_start = cur_offset;
9945 		em->len = ins.offset;
9946 		em->block_start = ins.objectid;
9947 		em->block_len = ins.offset;
9948 		em->orig_block_len = ins.offset;
9949 		em->ram_bytes = ins.offset;
9950 		set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
9951 		em->generation = trans->transid;
9952 
9953 		ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true);
9954 		free_extent_map(em);
9955 next:
9956 		num_bytes -= ins.offset;
9957 		cur_offset += ins.offset;
9958 		*alloc_hint = ins.objectid + ins.offset;
9959 
9960 		inode_inc_iversion(inode);
9961 		inode->i_ctime = current_time(inode);
9962 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
9963 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
9964 		    (actual_len > inode->i_size) &&
9965 		    (cur_offset > inode->i_size)) {
9966 			if (cur_offset > actual_len)
9967 				i_size = actual_len;
9968 			else
9969 				i_size = cur_offset;
9970 			i_size_write(inode, i_size);
9971 			btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
9972 		}
9973 
9974 		ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
9975 
9976 		if (ret) {
9977 			btrfs_abort_transaction(trans, ret);
9978 			if (own_trans)
9979 				btrfs_end_transaction(trans);
9980 			break;
9981 		}
9982 
9983 		if (own_trans) {
9984 			btrfs_end_transaction(trans);
9985 			trans = NULL;
9986 		}
9987 	}
9988 	if (clear_offset < end)
9989 		btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset,
9990 			end - clear_offset + 1);
9991 	return ret;
9992 }
9993 
9994 int btrfs_prealloc_file_range(struct inode *inode, int mode,
9995 			      u64 start, u64 num_bytes, u64 min_size,
9996 			      loff_t actual_len, u64 *alloc_hint)
9997 {
9998 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9999 					   min_size, actual_len, alloc_hint,
10000 					   NULL);
10001 }
10002 
10003 int btrfs_prealloc_file_range_trans(struct inode *inode,
10004 				    struct btrfs_trans_handle *trans, int mode,
10005 				    u64 start, u64 num_bytes, u64 min_size,
10006 				    loff_t actual_len, u64 *alloc_hint)
10007 {
10008 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
10009 					   min_size, actual_len, alloc_hint, trans);
10010 }
10011 
10012 static int btrfs_permission(struct user_namespace *mnt_userns,
10013 			    struct inode *inode, int mask)
10014 {
10015 	struct btrfs_root *root = BTRFS_I(inode)->root;
10016 	umode_t mode = inode->i_mode;
10017 
10018 	if (mask & MAY_WRITE &&
10019 	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
10020 		if (btrfs_root_readonly(root))
10021 			return -EROFS;
10022 		if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
10023 			return -EACCES;
10024 	}
10025 	return generic_permission(mnt_userns, inode, mask);
10026 }
10027 
10028 static int btrfs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
10029 			 struct file *file, umode_t mode)
10030 {
10031 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
10032 	struct btrfs_trans_handle *trans;
10033 	struct btrfs_root *root = BTRFS_I(dir)->root;
10034 	struct inode *inode;
10035 	struct btrfs_new_inode_args new_inode_args = {
10036 		.dir = dir,
10037 		.dentry = file->f_path.dentry,
10038 		.orphan = true,
10039 	};
10040 	unsigned int trans_num_items;
10041 	int ret;
10042 
10043 	inode = new_inode(dir->i_sb);
10044 	if (!inode)
10045 		return -ENOMEM;
10046 	inode_init_owner(mnt_userns, inode, dir, mode);
10047 	inode->i_fop = &btrfs_file_operations;
10048 	inode->i_op = &btrfs_file_inode_operations;
10049 	inode->i_mapping->a_ops = &btrfs_aops;
10050 
10051 	new_inode_args.inode = inode;
10052 	ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
10053 	if (ret)
10054 		goto out_inode;
10055 
10056 	trans = btrfs_start_transaction(root, trans_num_items);
10057 	if (IS_ERR(trans)) {
10058 		ret = PTR_ERR(trans);
10059 		goto out_new_inode_args;
10060 	}
10061 
10062 	ret = btrfs_create_new_inode(trans, &new_inode_args);
10063 
10064 	/*
10065 	 * We set number of links to 0 in btrfs_create_new_inode(), and here we
10066 	 * set it to 1 because d_tmpfile() will issue a warning if the count is
10067 	 * 0, through:
10068 	 *
10069 	 *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
10070 	 */
10071 	set_nlink(inode, 1);
10072 
10073 	if (!ret) {
10074 		d_tmpfile(file, inode);
10075 		unlock_new_inode(inode);
10076 		mark_inode_dirty(inode);
10077 	}
10078 
10079 	btrfs_end_transaction(trans);
10080 	btrfs_btree_balance_dirty(fs_info);
10081 out_new_inode_args:
10082 	btrfs_new_inode_args_destroy(&new_inode_args);
10083 out_inode:
10084 	if (ret)
10085 		iput(inode);
10086 	return finish_open_simple(file, ret);
10087 }
10088 
10089 void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end)
10090 {
10091 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
10092 	unsigned long index = start >> PAGE_SHIFT;
10093 	unsigned long end_index = end >> PAGE_SHIFT;
10094 	struct page *page;
10095 	u32 len;
10096 
10097 	ASSERT(end + 1 - start <= U32_MAX);
10098 	len = end + 1 - start;
10099 	while (index <= end_index) {
10100 		page = find_get_page(inode->vfs_inode.i_mapping, index);
10101 		ASSERT(page); /* Pages should be in the extent_io_tree */
10102 
10103 		btrfs_page_set_writeback(fs_info, page, start, len);
10104 		put_page(page);
10105 		index++;
10106 	}
10107 }
10108 
10109 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
10110 					     int compress_type)
10111 {
10112 	switch (compress_type) {
10113 	case BTRFS_COMPRESS_NONE:
10114 		return BTRFS_ENCODED_IO_COMPRESSION_NONE;
10115 	case BTRFS_COMPRESS_ZLIB:
10116 		return BTRFS_ENCODED_IO_COMPRESSION_ZLIB;
10117 	case BTRFS_COMPRESS_LZO:
10118 		/*
10119 		 * The LZO format depends on the sector size. 64K is the maximum
10120 		 * sector size that we support.
10121 		 */
10122 		if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K)
10123 			return -EINVAL;
10124 		return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K +
10125 		       (fs_info->sectorsize_bits - 12);
10126 	case BTRFS_COMPRESS_ZSTD:
10127 		return BTRFS_ENCODED_IO_COMPRESSION_ZSTD;
10128 	default:
10129 		return -EUCLEAN;
10130 	}
10131 }
10132 
10133 static ssize_t btrfs_encoded_read_inline(
10134 				struct kiocb *iocb,
10135 				struct iov_iter *iter, u64 start,
10136 				u64 lockend,
10137 				struct extent_state **cached_state,
10138 				u64 extent_start, size_t count,
10139 				struct btrfs_ioctl_encoded_io_args *encoded,
10140 				bool *unlocked)
10141 {
10142 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10143 	struct btrfs_root *root = inode->root;
10144 	struct btrfs_fs_info *fs_info = root->fs_info;
10145 	struct extent_io_tree *io_tree = &inode->io_tree;
10146 	struct btrfs_path *path;
10147 	struct extent_buffer *leaf;
10148 	struct btrfs_file_extent_item *item;
10149 	u64 ram_bytes;
10150 	unsigned long ptr;
10151 	void *tmp;
10152 	ssize_t ret;
10153 
10154 	path = btrfs_alloc_path();
10155 	if (!path) {
10156 		ret = -ENOMEM;
10157 		goto out;
10158 	}
10159 	ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
10160 				       extent_start, 0);
10161 	if (ret) {
10162 		if (ret > 0) {
10163 			/* The extent item disappeared? */
10164 			ret = -EIO;
10165 		}
10166 		goto out;
10167 	}
10168 	leaf = path->nodes[0];
10169 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
10170 
10171 	ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
10172 	ptr = btrfs_file_extent_inline_start(item);
10173 
10174 	encoded->len = min_t(u64, extent_start + ram_bytes,
10175 			     inode->vfs_inode.i_size) - iocb->ki_pos;
10176 	ret = btrfs_encoded_io_compression_from_extent(fs_info,
10177 				 btrfs_file_extent_compression(leaf, item));
10178 	if (ret < 0)
10179 		goto out;
10180 	encoded->compression = ret;
10181 	if (encoded->compression) {
10182 		size_t inline_size;
10183 
10184 		inline_size = btrfs_file_extent_inline_item_len(leaf,
10185 								path->slots[0]);
10186 		if (inline_size > count) {
10187 			ret = -ENOBUFS;
10188 			goto out;
10189 		}
10190 		count = inline_size;
10191 		encoded->unencoded_len = ram_bytes;
10192 		encoded->unencoded_offset = iocb->ki_pos - extent_start;
10193 	} else {
10194 		count = min_t(u64, count, encoded->len);
10195 		encoded->len = count;
10196 		encoded->unencoded_len = count;
10197 		ptr += iocb->ki_pos - extent_start;
10198 	}
10199 
10200 	tmp = kmalloc(count, GFP_NOFS);
10201 	if (!tmp) {
10202 		ret = -ENOMEM;
10203 		goto out;
10204 	}
10205 	read_extent_buffer(leaf, tmp, ptr, count);
10206 	btrfs_release_path(path);
10207 	unlock_extent(io_tree, start, lockend, cached_state);
10208 	btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
10209 	*unlocked = true;
10210 
10211 	ret = copy_to_iter(tmp, count, iter);
10212 	if (ret != count)
10213 		ret = -EFAULT;
10214 	kfree(tmp);
10215 out:
10216 	btrfs_free_path(path);
10217 	return ret;
10218 }
10219 
10220 struct btrfs_encoded_read_private {
10221 	struct btrfs_inode *inode;
10222 	u64 file_offset;
10223 	wait_queue_head_t wait;
10224 	atomic_t pending;
10225 	blk_status_t status;
10226 	bool skip_csum;
10227 };
10228 
10229 static blk_status_t submit_encoded_read_bio(struct btrfs_inode *inode,
10230 					    struct bio *bio, int mirror_num)
10231 {
10232 	struct btrfs_encoded_read_private *priv = btrfs_bio(bio)->private;
10233 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
10234 	blk_status_t ret;
10235 
10236 	if (!priv->skip_csum) {
10237 		ret = btrfs_lookup_bio_sums(&inode->vfs_inode, bio, NULL);
10238 		if (ret)
10239 			return ret;
10240 	}
10241 
10242 	atomic_inc(&priv->pending);
10243 	btrfs_submit_bio(fs_info, bio, mirror_num);
10244 	return BLK_STS_OK;
10245 }
10246 
10247 static blk_status_t btrfs_encoded_read_verify_csum(struct btrfs_bio *bbio)
10248 {
10249 	const bool uptodate = (bbio->bio.bi_status == BLK_STS_OK);
10250 	struct btrfs_encoded_read_private *priv = bbio->private;
10251 	struct btrfs_inode *inode = priv->inode;
10252 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
10253 	u32 sectorsize = fs_info->sectorsize;
10254 	struct bio_vec *bvec;
10255 	struct bvec_iter_all iter_all;
10256 	u32 bio_offset = 0;
10257 
10258 	if (priv->skip_csum || !uptodate)
10259 		return bbio->bio.bi_status;
10260 
10261 	bio_for_each_segment_all(bvec, &bbio->bio, iter_all) {
10262 		unsigned int i, nr_sectors, pgoff;
10263 
10264 		nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len);
10265 		pgoff = bvec->bv_offset;
10266 		for (i = 0; i < nr_sectors; i++) {
10267 			ASSERT(pgoff < PAGE_SIZE);
10268 			if (btrfs_check_data_csum(&inode->vfs_inode, bbio, bio_offset,
10269 					    bvec->bv_page, pgoff))
10270 				return BLK_STS_IOERR;
10271 			bio_offset += sectorsize;
10272 			pgoff += sectorsize;
10273 		}
10274 	}
10275 	return BLK_STS_OK;
10276 }
10277 
10278 static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
10279 {
10280 	struct btrfs_encoded_read_private *priv = bbio->private;
10281 	blk_status_t status;
10282 
10283 	status = btrfs_encoded_read_verify_csum(bbio);
10284 	if (status) {
10285 		/*
10286 		 * The memory barrier implied by the atomic_dec_return() here
10287 		 * pairs with the memory barrier implied by the
10288 		 * atomic_dec_return() or io_wait_event() in
10289 		 * btrfs_encoded_read_regular_fill_pages() to ensure that this
10290 		 * write is observed before the load of status in
10291 		 * btrfs_encoded_read_regular_fill_pages().
10292 		 */
10293 		WRITE_ONCE(priv->status, status);
10294 	}
10295 	if (!atomic_dec_return(&priv->pending))
10296 		wake_up(&priv->wait);
10297 	btrfs_bio_free_csum(bbio);
10298 	bio_put(&bbio->bio);
10299 }
10300 
10301 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
10302 					  u64 file_offset, u64 disk_bytenr,
10303 					  u64 disk_io_size, struct page **pages)
10304 {
10305 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
10306 	struct btrfs_encoded_read_private priv = {
10307 		.inode = inode,
10308 		.file_offset = file_offset,
10309 		.pending = ATOMIC_INIT(1),
10310 		.skip_csum = (inode->flags & BTRFS_INODE_NODATASUM),
10311 	};
10312 	unsigned long i = 0;
10313 	u64 cur = 0;
10314 	int ret;
10315 
10316 	init_waitqueue_head(&priv.wait);
10317 	/*
10318 	 * Submit bios for the extent, splitting due to bio or stripe limits as
10319 	 * necessary.
10320 	 */
10321 	while (cur < disk_io_size) {
10322 		struct extent_map *em;
10323 		struct btrfs_io_geometry geom;
10324 		struct bio *bio = NULL;
10325 		u64 remaining;
10326 
10327 		em = btrfs_get_chunk_map(fs_info, disk_bytenr + cur,
10328 					 disk_io_size - cur);
10329 		if (IS_ERR(em)) {
10330 			ret = PTR_ERR(em);
10331 		} else {
10332 			ret = btrfs_get_io_geometry(fs_info, em, BTRFS_MAP_READ,
10333 						    disk_bytenr + cur, &geom);
10334 			free_extent_map(em);
10335 		}
10336 		if (ret) {
10337 			WRITE_ONCE(priv.status, errno_to_blk_status(ret));
10338 			break;
10339 		}
10340 		remaining = min(geom.len, disk_io_size - cur);
10341 		while (bio || remaining) {
10342 			size_t bytes = min_t(u64, remaining, PAGE_SIZE);
10343 
10344 			if (!bio) {
10345 				bio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ,
10346 						      btrfs_encoded_read_endio,
10347 						      &priv);
10348 				bio->bi_iter.bi_sector =
10349 					(disk_bytenr + cur) >> SECTOR_SHIFT;
10350 			}
10351 
10352 			if (!bytes ||
10353 			    bio_add_page(bio, pages[i], bytes, 0) < bytes) {
10354 				blk_status_t status;
10355 
10356 				status = submit_encoded_read_bio(inode, bio, 0);
10357 				if (status) {
10358 					WRITE_ONCE(priv.status, status);
10359 					bio_put(bio);
10360 					goto out;
10361 				}
10362 				bio = NULL;
10363 				continue;
10364 			}
10365 
10366 			i++;
10367 			cur += bytes;
10368 			remaining -= bytes;
10369 		}
10370 	}
10371 
10372 out:
10373 	if (atomic_dec_return(&priv.pending))
10374 		io_wait_event(priv.wait, !atomic_read(&priv.pending));
10375 	/* See btrfs_encoded_read_endio() for ordering. */
10376 	return blk_status_to_errno(READ_ONCE(priv.status));
10377 }
10378 
10379 static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
10380 					  struct iov_iter *iter,
10381 					  u64 start, u64 lockend,
10382 					  struct extent_state **cached_state,
10383 					  u64 disk_bytenr, u64 disk_io_size,
10384 					  size_t count, bool compressed,
10385 					  bool *unlocked)
10386 {
10387 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10388 	struct extent_io_tree *io_tree = &inode->io_tree;
10389 	struct page **pages;
10390 	unsigned long nr_pages, i;
10391 	u64 cur;
10392 	size_t page_offset;
10393 	ssize_t ret;
10394 
10395 	nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE);
10396 	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
10397 	if (!pages)
10398 		return -ENOMEM;
10399 	ret = btrfs_alloc_page_array(nr_pages, pages);
10400 	if (ret) {
10401 		ret = -ENOMEM;
10402 		goto out;
10403 		}
10404 
10405 	ret = btrfs_encoded_read_regular_fill_pages(inode, start, disk_bytenr,
10406 						    disk_io_size, pages);
10407 	if (ret)
10408 		goto out;
10409 
10410 	unlock_extent(io_tree, start, lockend, cached_state);
10411 	btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
10412 	*unlocked = true;
10413 
10414 	if (compressed) {
10415 		i = 0;
10416 		page_offset = 0;
10417 	} else {
10418 		i = (iocb->ki_pos - start) >> PAGE_SHIFT;
10419 		page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1);
10420 	}
10421 	cur = 0;
10422 	while (cur < count) {
10423 		size_t bytes = min_t(size_t, count - cur,
10424 				     PAGE_SIZE - page_offset);
10425 
10426 		if (copy_page_to_iter(pages[i], page_offset, bytes,
10427 				      iter) != bytes) {
10428 			ret = -EFAULT;
10429 			goto out;
10430 		}
10431 		i++;
10432 		cur += bytes;
10433 		page_offset = 0;
10434 	}
10435 	ret = count;
10436 out:
10437 	for (i = 0; i < nr_pages; i++) {
10438 		if (pages[i])
10439 			__free_page(pages[i]);
10440 	}
10441 	kfree(pages);
10442 	return ret;
10443 }
10444 
10445 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
10446 			   struct btrfs_ioctl_encoded_io_args *encoded)
10447 {
10448 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10449 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
10450 	struct extent_io_tree *io_tree = &inode->io_tree;
10451 	ssize_t ret;
10452 	size_t count = iov_iter_count(iter);
10453 	u64 start, lockend, disk_bytenr, disk_io_size;
10454 	struct extent_state *cached_state = NULL;
10455 	struct extent_map *em;
10456 	bool unlocked = false;
10457 
10458 	file_accessed(iocb->ki_filp);
10459 
10460 	btrfs_inode_lock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
10461 
10462 	if (iocb->ki_pos >= inode->vfs_inode.i_size) {
10463 		btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
10464 		return 0;
10465 	}
10466 	start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize);
10467 	/*
10468 	 * We don't know how long the extent containing iocb->ki_pos is, but if
10469 	 * it's compressed we know that it won't be longer than this.
10470 	 */
10471 	lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
10472 
10473 	for (;;) {
10474 		struct btrfs_ordered_extent *ordered;
10475 
10476 		ret = btrfs_wait_ordered_range(&inode->vfs_inode, start,
10477 					       lockend - start + 1);
10478 		if (ret)
10479 			goto out_unlock_inode;
10480 		lock_extent(io_tree, start, lockend, &cached_state);
10481 		ordered = btrfs_lookup_ordered_range(inode, start,
10482 						     lockend - start + 1);
10483 		if (!ordered)
10484 			break;
10485 		btrfs_put_ordered_extent(ordered);
10486 		unlock_extent(io_tree, start, lockend, &cached_state);
10487 		cond_resched();
10488 	}
10489 
10490 	em = btrfs_get_extent(inode, NULL, 0, start, lockend - start + 1);
10491 	if (IS_ERR(em)) {
10492 		ret = PTR_ERR(em);
10493 		goto out_unlock_extent;
10494 	}
10495 
10496 	if (em->block_start == EXTENT_MAP_INLINE) {
10497 		u64 extent_start = em->start;
10498 
10499 		/*
10500 		 * For inline extents we get everything we need out of the
10501 		 * extent item.
10502 		 */
10503 		free_extent_map(em);
10504 		em = NULL;
10505 		ret = btrfs_encoded_read_inline(iocb, iter, start, lockend,
10506 						&cached_state, extent_start,
10507 						count, encoded, &unlocked);
10508 		goto out;
10509 	}
10510 
10511 	/*
10512 	 * We only want to return up to EOF even if the extent extends beyond
10513 	 * that.
10514 	 */
10515 	encoded->len = min_t(u64, extent_map_end(em),
10516 			     inode->vfs_inode.i_size) - iocb->ki_pos;
10517 	if (em->block_start == EXTENT_MAP_HOLE ||
10518 	    test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
10519 		disk_bytenr = EXTENT_MAP_HOLE;
10520 		count = min_t(u64, count, encoded->len);
10521 		encoded->len = count;
10522 		encoded->unencoded_len = count;
10523 	} else if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
10524 		disk_bytenr = em->block_start;
10525 		/*
10526 		 * Bail if the buffer isn't large enough to return the whole
10527 		 * compressed extent.
10528 		 */
10529 		if (em->block_len > count) {
10530 			ret = -ENOBUFS;
10531 			goto out_em;
10532 		}
10533 		disk_io_size = em->block_len;
10534 		count = em->block_len;
10535 		encoded->unencoded_len = em->ram_bytes;
10536 		encoded->unencoded_offset = iocb->ki_pos - em->orig_start;
10537 		ret = btrfs_encoded_io_compression_from_extent(fs_info,
10538 							     em->compress_type);
10539 		if (ret < 0)
10540 			goto out_em;
10541 		encoded->compression = ret;
10542 	} else {
10543 		disk_bytenr = em->block_start + (start - em->start);
10544 		if (encoded->len > count)
10545 			encoded->len = count;
10546 		/*
10547 		 * Don't read beyond what we locked. This also limits the page
10548 		 * allocations that we'll do.
10549 		 */
10550 		disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start;
10551 		count = start + disk_io_size - iocb->ki_pos;
10552 		encoded->len = count;
10553 		encoded->unencoded_len = count;
10554 		disk_io_size = ALIGN(disk_io_size, fs_info->sectorsize);
10555 	}
10556 	free_extent_map(em);
10557 	em = NULL;
10558 
10559 	if (disk_bytenr == EXTENT_MAP_HOLE) {
10560 		unlock_extent(io_tree, start, lockend, &cached_state);
10561 		btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
10562 		unlocked = true;
10563 		ret = iov_iter_zero(count, iter);
10564 		if (ret != count)
10565 			ret = -EFAULT;
10566 	} else {
10567 		ret = btrfs_encoded_read_regular(iocb, iter, start, lockend,
10568 						 &cached_state, disk_bytenr,
10569 						 disk_io_size, count,
10570 						 encoded->compression,
10571 						 &unlocked);
10572 	}
10573 
10574 out:
10575 	if (ret >= 0)
10576 		iocb->ki_pos += encoded->len;
10577 out_em:
10578 	free_extent_map(em);
10579 out_unlock_extent:
10580 	if (!unlocked)
10581 		unlock_extent(io_tree, start, lockend, &cached_state);
10582 out_unlock_inode:
10583 	if (!unlocked)
10584 		btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
10585 	return ret;
10586 }
10587 
10588 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
10589 			       const struct btrfs_ioctl_encoded_io_args *encoded)
10590 {
10591 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10592 	struct btrfs_root *root = inode->root;
10593 	struct btrfs_fs_info *fs_info = root->fs_info;
10594 	struct extent_io_tree *io_tree = &inode->io_tree;
10595 	struct extent_changeset *data_reserved = NULL;
10596 	struct extent_state *cached_state = NULL;
10597 	int compression;
10598 	size_t orig_count;
10599 	u64 start, end;
10600 	u64 num_bytes, ram_bytes, disk_num_bytes;
10601 	unsigned long nr_pages, i;
10602 	struct page **pages;
10603 	struct btrfs_key ins;
10604 	bool extent_reserved = false;
10605 	struct extent_map *em;
10606 	ssize_t ret;
10607 
10608 	switch (encoded->compression) {
10609 	case BTRFS_ENCODED_IO_COMPRESSION_ZLIB:
10610 		compression = BTRFS_COMPRESS_ZLIB;
10611 		break;
10612 	case BTRFS_ENCODED_IO_COMPRESSION_ZSTD:
10613 		compression = BTRFS_COMPRESS_ZSTD;
10614 		break;
10615 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K:
10616 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K:
10617 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K:
10618 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K:
10619 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K:
10620 		/* The sector size must match for LZO. */
10621 		if (encoded->compression -
10622 		    BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 !=
10623 		    fs_info->sectorsize_bits)
10624 			return -EINVAL;
10625 		compression = BTRFS_COMPRESS_LZO;
10626 		break;
10627 	default:
10628 		return -EINVAL;
10629 	}
10630 	if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE)
10631 		return -EINVAL;
10632 
10633 	orig_count = iov_iter_count(from);
10634 
10635 	/* The extent size must be sane. */
10636 	if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED ||
10637 	    orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0)
10638 		return -EINVAL;
10639 
10640 	/*
10641 	 * The compressed data must be smaller than the decompressed data.
10642 	 *
10643 	 * It's of course possible for data to compress to larger or the same
10644 	 * size, but the buffered I/O path falls back to no compression for such
10645 	 * data, and we don't want to break any assumptions by creating these
10646 	 * extents.
10647 	 *
10648 	 * Note that this is less strict than the current check we have that the
10649 	 * compressed data must be at least one sector smaller than the
10650 	 * decompressed data. We only want to enforce the weaker requirement
10651 	 * from old kernels that it is at least one byte smaller.
10652 	 */
10653 	if (orig_count >= encoded->unencoded_len)
10654 		return -EINVAL;
10655 
10656 	/* The extent must start on a sector boundary. */
10657 	start = iocb->ki_pos;
10658 	if (!IS_ALIGNED(start, fs_info->sectorsize))
10659 		return -EINVAL;
10660 
10661 	/*
10662 	 * The extent must end on a sector boundary. However, we allow a write
10663 	 * which ends at or extends i_size to have an unaligned length; we round
10664 	 * up the extent size and set i_size to the unaligned end.
10665 	 */
10666 	if (start + encoded->len < inode->vfs_inode.i_size &&
10667 	    !IS_ALIGNED(start + encoded->len, fs_info->sectorsize))
10668 		return -EINVAL;
10669 
10670 	/* Finally, the offset in the unencoded data must be sector-aligned. */
10671 	if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize))
10672 		return -EINVAL;
10673 
10674 	num_bytes = ALIGN(encoded->len, fs_info->sectorsize);
10675 	ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize);
10676 	end = start + num_bytes - 1;
10677 
10678 	/*
10679 	 * If the extent cannot be inline, the compressed data on disk must be
10680 	 * sector-aligned. For convenience, we extend it with zeroes if it
10681 	 * isn't.
10682 	 */
10683 	disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
10684 	nr_pages = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE);
10685 	pages = kvcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL_ACCOUNT);
10686 	if (!pages)
10687 		return -ENOMEM;
10688 	for (i = 0; i < nr_pages; i++) {
10689 		size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from));
10690 		char *kaddr;
10691 
10692 		pages[i] = alloc_page(GFP_KERNEL_ACCOUNT);
10693 		if (!pages[i]) {
10694 			ret = -ENOMEM;
10695 			goto out_pages;
10696 		}
10697 		kaddr = kmap_local_page(pages[i]);
10698 		if (copy_from_iter(kaddr, bytes, from) != bytes) {
10699 			kunmap_local(kaddr);
10700 			ret = -EFAULT;
10701 			goto out_pages;
10702 		}
10703 		if (bytes < PAGE_SIZE)
10704 			memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
10705 		kunmap_local(kaddr);
10706 	}
10707 
10708 	for (;;) {
10709 		struct btrfs_ordered_extent *ordered;
10710 
10711 		ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, num_bytes);
10712 		if (ret)
10713 			goto out_pages;
10714 		ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
10715 						    start >> PAGE_SHIFT,
10716 						    end >> PAGE_SHIFT);
10717 		if (ret)
10718 			goto out_pages;
10719 		lock_extent(io_tree, start, end, &cached_state);
10720 		ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
10721 		if (!ordered &&
10722 		    !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end))
10723 			break;
10724 		if (ordered)
10725 			btrfs_put_ordered_extent(ordered);
10726 		unlock_extent(io_tree, start, end, &cached_state);
10727 		cond_resched();
10728 	}
10729 
10730 	/*
10731 	 * We don't use the higher-level delalloc space functions because our
10732 	 * num_bytes and disk_num_bytes are different.
10733 	 */
10734 	ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes);
10735 	if (ret)
10736 		goto out_unlock;
10737 	ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes);
10738 	if (ret)
10739 		goto out_free_data_space;
10740 	ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes,
10741 					      false);
10742 	if (ret)
10743 		goto out_qgroup_free_data;
10744 
10745 	/* Try an inline extent first. */
10746 	if (start == 0 && encoded->unencoded_len == encoded->len &&
10747 	    encoded->unencoded_offset == 0) {
10748 		ret = cow_file_range_inline(inode, encoded->len, orig_count,
10749 					    compression, pages, true);
10750 		if (ret <= 0) {
10751 			if (ret == 0)
10752 				ret = orig_count;
10753 			goto out_delalloc_release;
10754 		}
10755 	}
10756 
10757 	ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes,
10758 				   disk_num_bytes, 0, 0, &ins, 1, 1);
10759 	if (ret)
10760 		goto out_delalloc_release;
10761 	extent_reserved = true;
10762 
10763 	em = create_io_em(inode, start, num_bytes,
10764 			  start - encoded->unencoded_offset, ins.objectid,
10765 			  ins.offset, ins.offset, ram_bytes, compression,
10766 			  BTRFS_ORDERED_COMPRESSED);
10767 	if (IS_ERR(em)) {
10768 		ret = PTR_ERR(em);
10769 		goto out_free_reserved;
10770 	}
10771 	free_extent_map(em);
10772 
10773 	ret = btrfs_add_ordered_extent(inode, start, num_bytes, ram_bytes,
10774 				       ins.objectid, ins.offset,
10775 				       encoded->unencoded_offset,
10776 				       (1 << BTRFS_ORDERED_ENCODED) |
10777 				       (1 << BTRFS_ORDERED_COMPRESSED),
10778 				       compression);
10779 	if (ret) {
10780 		btrfs_drop_extent_map_range(inode, start, end, false);
10781 		goto out_free_reserved;
10782 	}
10783 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10784 
10785 	if (start + encoded->len > inode->vfs_inode.i_size)
10786 		i_size_write(&inode->vfs_inode, start + encoded->len);
10787 
10788 	unlock_extent(io_tree, start, end, &cached_state);
10789 
10790 	btrfs_delalloc_release_extents(inode, num_bytes);
10791 
10792 	if (btrfs_submit_compressed_write(inode, start, num_bytes, ins.objectid,
10793 					  ins.offset, pages, nr_pages, 0, NULL,
10794 					  false)) {
10795 		btrfs_writepage_endio_finish_ordered(inode, pages[0], start, end, 0);
10796 		ret = -EIO;
10797 		goto out_pages;
10798 	}
10799 	ret = orig_count;
10800 	goto out;
10801 
10802 out_free_reserved:
10803 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10804 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
10805 out_delalloc_release:
10806 	btrfs_delalloc_release_extents(inode, num_bytes);
10807 	btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0);
10808 out_qgroup_free_data:
10809 	if (ret < 0)
10810 		btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes);
10811 out_free_data_space:
10812 	/*
10813 	 * If btrfs_reserve_extent() succeeded, then we already decremented
10814 	 * bytes_may_use.
10815 	 */
10816 	if (!extent_reserved)
10817 		btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes);
10818 out_unlock:
10819 	unlock_extent(io_tree, start, end, &cached_state);
10820 out_pages:
10821 	for (i = 0; i < nr_pages; i++) {
10822 		if (pages[i])
10823 			__free_page(pages[i]);
10824 	}
10825 	kvfree(pages);
10826 out:
10827 	if (ret >= 0)
10828 		iocb->ki_pos += encoded->len;
10829 	return ret;
10830 }
10831 
10832 #ifdef CONFIG_SWAP
10833 /*
10834  * Add an entry indicating a block group or device which is pinned by a
10835  * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
10836  * negative errno on failure.
10837  */
10838 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
10839 				  bool is_block_group)
10840 {
10841 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10842 	struct btrfs_swapfile_pin *sp, *entry;
10843 	struct rb_node **p;
10844 	struct rb_node *parent = NULL;
10845 
10846 	sp = kmalloc(sizeof(*sp), GFP_NOFS);
10847 	if (!sp)
10848 		return -ENOMEM;
10849 	sp->ptr = ptr;
10850 	sp->inode = inode;
10851 	sp->is_block_group = is_block_group;
10852 	sp->bg_extent_count = 1;
10853 
10854 	spin_lock(&fs_info->swapfile_pins_lock);
10855 	p = &fs_info->swapfile_pins.rb_node;
10856 	while (*p) {
10857 		parent = *p;
10858 		entry = rb_entry(parent, struct btrfs_swapfile_pin, node);
10859 		if (sp->ptr < entry->ptr ||
10860 		    (sp->ptr == entry->ptr && sp->inode < entry->inode)) {
10861 			p = &(*p)->rb_left;
10862 		} else if (sp->ptr > entry->ptr ||
10863 			   (sp->ptr == entry->ptr && sp->inode > entry->inode)) {
10864 			p = &(*p)->rb_right;
10865 		} else {
10866 			if (is_block_group)
10867 				entry->bg_extent_count++;
10868 			spin_unlock(&fs_info->swapfile_pins_lock);
10869 			kfree(sp);
10870 			return 1;
10871 		}
10872 	}
10873 	rb_link_node(&sp->node, parent, p);
10874 	rb_insert_color(&sp->node, &fs_info->swapfile_pins);
10875 	spin_unlock(&fs_info->swapfile_pins_lock);
10876 	return 0;
10877 }
10878 
10879 /* Free all of the entries pinned by this swapfile. */
10880 static void btrfs_free_swapfile_pins(struct inode *inode)
10881 {
10882 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10883 	struct btrfs_swapfile_pin *sp;
10884 	struct rb_node *node, *next;
10885 
10886 	spin_lock(&fs_info->swapfile_pins_lock);
10887 	node = rb_first(&fs_info->swapfile_pins);
10888 	while (node) {
10889 		next = rb_next(node);
10890 		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
10891 		if (sp->inode == inode) {
10892 			rb_erase(&sp->node, &fs_info->swapfile_pins);
10893 			if (sp->is_block_group) {
10894 				btrfs_dec_block_group_swap_extents(sp->ptr,
10895 							   sp->bg_extent_count);
10896 				btrfs_put_block_group(sp->ptr);
10897 			}
10898 			kfree(sp);
10899 		}
10900 		node = next;
10901 	}
10902 	spin_unlock(&fs_info->swapfile_pins_lock);
10903 }
10904 
10905 struct btrfs_swap_info {
10906 	u64 start;
10907 	u64 block_start;
10908 	u64 block_len;
10909 	u64 lowest_ppage;
10910 	u64 highest_ppage;
10911 	unsigned long nr_pages;
10912 	int nr_extents;
10913 };
10914 
10915 static int btrfs_add_swap_extent(struct swap_info_struct *sis,
10916 				 struct btrfs_swap_info *bsi)
10917 {
10918 	unsigned long nr_pages;
10919 	unsigned long max_pages;
10920 	u64 first_ppage, first_ppage_reported, next_ppage;
10921 	int ret;
10922 
10923 	/*
10924 	 * Our swapfile may have had its size extended after the swap header was
10925 	 * written. In that case activating the swapfile should not go beyond
10926 	 * the max size set in the swap header.
10927 	 */
10928 	if (bsi->nr_pages >= sis->max)
10929 		return 0;
10930 
10931 	max_pages = sis->max - bsi->nr_pages;
10932 	first_ppage = ALIGN(bsi->block_start, PAGE_SIZE) >> PAGE_SHIFT;
10933 	next_ppage = ALIGN_DOWN(bsi->block_start + bsi->block_len,
10934 				PAGE_SIZE) >> PAGE_SHIFT;
10935 
10936 	if (first_ppage >= next_ppage)
10937 		return 0;
10938 	nr_pages = next_ppage - first_ppage;
10939 	nr_pages = min(nr_pages, max_pages);
10940 
10941 	first_ppage_reported = first_ppage;
10942 	if (bsi->start == 0)
10943 		first_ppage_reported++;
10944 	if (bsi->lowest_ppage > first_ppage_reported)
10945 		bsi->lowest_ppage = first_ppage_reported;
10946 	if (bsi->highest_ppage < (next_ppage - 1))
10947 		bsi->highest_ppage = next_ppage - 1;
10948 
10949 	ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage);
10950 	if (ret < 0)
10951 		return ret;
10952 	bsi->nr_extents += ret;
10953 	bsi->nr_pages += nr_pages;
10954 	return 0;
10955 }
10956 
10957 static void btrfs_swap_deactivate(struct file *file)
10958 {
10959 	struct inode *inode = file_inode(file);
10960 
10961 	btrfs_free_swapfile_pins(inode);
10962 	atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles);
10963 }
10964 
10965 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10966 			       sector_t *span)
10967 {
10968 	struct inode *inode = file_inode(file);
10969 	struct btrfs_root *root = BTRFS_I(inode)->root;
10970 	struct btrfs_fs_info *fs_info = root->fs_info;
10971 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
10972 	struct extent_state *cached_state = NULL;
10973 	struct extent_map *em = NULL;
10974 	struct btrfs_device *device = NULL;
10975 	struct btrfs_swap_info bsi = {
10976 		.lowest_ppage = (sector_t)-1ULL,
10977 	};
10978 	int ret = 0;
10979 	u64 isize;
10980 	u64 start;
10981 
10982 	/*
10983 	 * If the swap file was just created, make sure delalloc is done. If the
10984 	 * file changes again after this, the user is doing something stupid and
10985 	 * we don't really care.
10986 	 */
10987 	ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
10988 	if (ret)
10989 		return ret;
10990 
10991 	/*
10992 	 * The inode is locked, so these flags won't change after we check them.
10993 	 */
10994 	if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
10995 		btrfs_warn(fs_info, "swapfile must not be compressed");
10996 		return -EINVAL;
10997 	}
10998 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
10999 		btrfs_warn(fs_info, "swapfile must not be copy-on-write");
11000 		return -EINVAL;
11001 	}
11002 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
11003 		btrfs_warn(fs_info, "swapfile must not be checksummed");
11004 		return -EINVAL;
11005 	}
11006 
11007 	/*
11008 	 * Balance or device remove/replace/resize can move stuff around from
11009 	 * under us. The exclop protection makes sure they aren't running/won't
11010 	 * run concurrently while we are mapping the swap extents, and
11011 	 * fs_info->swapfile_pins prevents them from running while the swap
11012 	 * file is active and moving the extents. Note that this also prevents
11013 	 * a concurrent device add which isn't actually necessary, but it's not
11014 	 * really worth the trouble to allow it.
11015 	 */
11016 	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
11017 		btrfs_warn(fs_info,
11018 	   "cannot activate swapfile while exclusive operation is running");
11019 		return -EBUSY;
11020 	}
11021 
11022 	/*
11023 	 * Prevent snapshot creation while we are activating the swap file.
11024 	 * We do not want to race with snapshot creation. If snapshot creation
11025 	 * already started before we bumped nr_swapfiles from 0 to 1 and
11026 	 * completes before the first write into the swap file after it is
11027 	 * activated, than that write would fallback to COW.
11028 	 */
11029 	if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) {
11030 		btrfs_exclop_finish(fs_info);
11031 		btrfs_warn(fs_info,
11032 	   "cannot activate swapfile because snapshot creation is in progress");
11033 		return -EINVAL;
11034 	}
11035 	/*
11036 	 * Snapshots can create extents which require COW even if NODATACOW is
11037 	 * set. We use this counter to prevent snapshots. We must increment it
11038 	 * before walking the extents because we don't want a concurrent
11039 	 * snapshot to run after we've already checked the extents.
11040 	 *
11041 	 * It is possible that subvolume is marked for deletion but still not
11042 	 * removed yet. To prevent this race, we check the root status before
11043 	 * activating the swapfile.
11044 	 */
11045 	spin_lock(&root->root_item_lock);
11046 	if (btrfs_root_dead(root)) {
11047 		spin_unlock(&root->root_item_lock);
11048 
11049 		btrfs_exclop_finish(fs_info);
11050 		btrfs_warn(fs_info,
11051 		"cannot activate swapfile because subvolume %llu is being deleted",
11052 			root->root_key.objectid);
11053 		return -EPERM;
11054 	}
11055 	atomic_inc(&root->nr_swapfiles);
11056 	spin_unlock(&root->root_item_lock);
11057 
11058 	isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
11059 
11060 	lock_extent(io_tree, 0, isize - 1, &cached_state);
11061 	start = 0;
11062 	while (start < isize) {
11063 		u64 logical_block_start, physical_block_start;
11064 		struct btrfs_block_group *bg;
11065 		u64 len = isize - start;
11066 
11067 		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len);
11068 		if (IS_ERR(em)) {
11069 			ret = PTR_ERR(em);
11070 			goto out;
11071 		}
11072 
11073 		if (em->block_start == EXTENT_MAP_HOLE) {
11074 			btrfs_warn(fs_info, "swapfile must not have holes");
11075 			ret = -EINVAL;
11076 			goto out;
11077 		}
11078 		if (em->block_start == EXTENT_MAP_INLINE) {
11079 			/*
11080 			 * It's unlikely we'll ever actually find ourselves
11081 			 * here, as a file small enough to fit inline won't be
11082 			 * big enough to store more than the swap header, but in
11083 			 * case something changes in the future, let's catch it
11084 			 * here rather than later.
11085 			 */
11086 			btrfs_warn(fs_info, "swapfile must not be inline");
11087 			ret = -EINVAL;
11088 			goto out;
11089 		}
11090 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
11091 			btrfs_warn(fs_info, "swapfile must not be compressed");
11092 			ret = -EINVAL;
11093 			goto out;
11094 		}
11095 
11096 		logical_block_start = em->block_start + (start - em->start);
11097 		len = min(len, em->len - (start - em->start));
11098 		free_extent_map(em);
11099 		em = NULL;
11100 
11101 		ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, false, true);
11102 		if (ret < 0) {
11103 			goto out;
11104 		} else if (ret) {
11105 			ret = 0;
11106 		} else {
11107 			btrfs_warn(fs_info,
11108 				   "swapfile must not be copy-on-write");
11109 			ret = -EINVAL;
11110 			goto out;
11111 		}
11112 
11113 		em = btrfs_get_chunk_map(fs_info, logical_block_start, len);
11114 		if (IS_ERR(em)) {
11115 			ret = PTR_ERR(em);
11116 			goto out;
11117 		}
11118 
11119 		if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
11120 			btrfs_warn(fs_info,
11121 				   "swapfile must have single data profile");
11122 			ret = -EINVAL;
11123 			goto out;
11124 		}
11125 
11126 		if (device == NULL) {
11127 			device = em->map_lookup->stripes[0].dev;
11128 			ret = btrfs_add_swapfile_pin(inode, device, false);
11129 			if (ret == 1)
11130 				ret = 0;
11131 			else if (ret)
11132 				goto out;
11133 		} else if (device != em->map_lookup->stripes[0].dev) {
11134 			btrfs_warn(fs_info, "swapfile must be on one device");
11135 			ret = -EINVAL;
11136 			goto out;
11137 		}
11138 
11139 		physical_block_start = (em->map_lookup->stripes[0].physical +
11140 					(logical_block_start - em->start));
11141 		len = min(len, em->len - (logical_block_start - em->start));
11142 		free_extent_map(em);
11143 		em = NULL;
11144 
11145 		bg = btrfs_lookup_block_group(fs_info, logical_block_start);
11146 		if (!bg) {
11147 			btrfs_warn(fs_info,
11148 			   "could not find block group containing swapfile");
11149 			ret = -EINVAL;
11150 			goto out;
11151 		}
11152 
11153 		if (!btrfs_inc_block_group_swap_extents(bg)) {
11154 			btrfs_warn(fs_info,
11155 			   "block group for swapfile at %llu is read-only%s",
11156 			   bg->start,
11157 			   atomic_read(&fs_info->scrubs_running) ?
11158 				       " (scrub running)" : "");
11159 			btrfs_put_block_group(bg);
11160 			ret = -EINVAL;
11161 			goto out;
11162 		}
11163 
11164 		ret = btrfs_add_swapfile_pin(inode, bg, true);
11165 		if (ret) {
11166 			btrfs_put_block_group(bg);
11167 			if (ret == 1)
11168 				ret = 0;
11169 			else
11170 				goto out;
11171 		}
11172 
11173 		if (bsi.block_len &&
11174 		    bsi.block_start + bsi.block_len == physical_block_start) {
11175 			bsi.block_len += len;
11176 		} else {
11177 			if (bsi.block_len) {
11178 				ret = btrfs_add_swap_extent(sis, &bsi);
11179 				if (ret)
11180 					goto out;
11181 			}
11182 			bsi.start = start;
11183 			bsi.block_start = physical_block_start;
11184 			bsi.block_len = len;
11185 		}
11186 
11187 		start += len;
11188 	}
11189 
11190 	if (bsi.block_len)
11191 		ret = btrfs_add_swap_extent(sis, &bsi);
11192 
11193 out:
11194 	if (!IS_ERR_OR_NULL(em))
11195 		free_extent_map(em);
11196 
11197 	unlock_extent(io_tree, 0, isize - 1, &cached_state);
11198 
11199 	if (ret)
11200 		btrfs_swap_deactivate(file);
11201 
11202 	btrfs_drew_write_unlock(&root->snapshot_lock);
11203 
11204 	btrfs_exclop_finish(fs_info);
11205 
11206 	if (ret)
11207 		return ret;
11208 
11209 	if (device)
11210 		sis->bdev = device->bdev;
11211 	*span = bsi.highest_ppage - bsi.lowest_ppage + 1;
11212 	sis->max = bsi.nr_pages;
11213 	sis->pages = bsi.nr_pages - 1;
11214 	sis->highest_bit = bsi.nr_pages - 1;
11215 	return bsi.nr_extents;
11216 }
11217 #else
11218 static void btrfs_swap_deactivate(struct file *file)
11219 {
11220 }
11221 
11222 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
11223 			       sector_t *span)
11224 {
11225 	return -EOPNOTSUPP;
11226 }
11227 #endif
11228 
11229 /*
11230  * Update the number of bytes used in the VFS' inode. When we replace extents in
11231  * a range (clone, dedupe, fallocate's zero range), we must update the number of
11232  * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls
11233  * always get a correct value.
11234  */
11235 void btrfs_update_inode_bytes(struct btrfs_inode *inode,
11236 			      const u64 add_bytes,
11237 			      const u64 del_bytes)
11238 {
11239 	if (add_bytes == del_bytes)
11240 		return;
11241 
11242 	spin_lock(&inode->lock);
11243 	if (del_bytes > 0)
11244 		inode_sub_bytes(&inode->vfs_inode, del_bytes);
11245 	if (add_bytes > 0)
11246 		inode_add_bytes(&inode->vfs_inode, add_bytes);
11247 	spin_unlock(&inode->lock);
11248 }
11249 
11250 /**
11251  * Verify that there are no ordered extents for a given file range.
11252  *
11253  * @inode:   The target inode.
11254  * @start:   Start offset of the file range, should be sector size aligned.
11255  * @end:     End offset (inclusive) of the file range, its value +1 should be
11256  *           sector size aligned.
11257  *
11258  * This should typically be used for cases where we locked an inode's VFS lock in
11259  * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode,
11260  * we have flushed all delalloc in the range, we have waited for all ordered
11261  * extents in the range to complete and finally we have locked the file range in
11262  * the inode's io_tree.
11263  */
11264 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end)
11265 {
11266 	struct btrfs_root *root = inode->root;
11267 	struct btrfs_ordered_extent *ordered;
11268 
11269 	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
11270 		return;
11271 
11272 	ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start);
11273 	if (ordered) {
11274 		btrfs_err(root->fs_info,
11275 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])",
11276 			  start, end, btrfs_ino(inode), root->root_key.objectid,
11277 			  ordered->file_offset,
11278 			  ordered->file_offset + ordered->num_bytes - 1);
11279 		btrfs_put_ordered_extent(ordered);
11280 	}
11281 
11282 	ASSERT(ordered == NULL);
11283 }
11284 
11285 static const struct inode_operations btrfs_dir_inode_operations = {
11286 	.getattr	= btrfs_getattr,
11287 	.lookup		= btrfs_lookup,
11288 	.create		= btrfs_create,
11289 	.unlink		= btrfs_unlink,
11290 	.link		= btrfs_link,
11291 	.mkdir		= btrfs_mkdir,
11292 	.rmdir		= btrfs_rmdir,
11293 	.rename		= btrfs_rename2,
11294 	.symlink	= btrfs_symlink,
11295 	.setattr	= btrfs_setattr,
11296 	.mknod		= btrfs_mknod,
11297 	.listxattr	= btrfs_listxattr,
11298 	.permission	= btrfs_permission,
11299 	.get_acl	= btrfs_get_acl,
11300 	.set_acl	= btrfs_set_acl,
11301 	.update_time	= btrfs_update_time,
11302 	.tmpfile        = btrfs_tmpfile,
11303 	.fileattr_get	= btrfs_fileattr_get,
11304 	.fileattr_set	= btrfs_fileattr_set,
11305 };
11306 
11307 static const struct file_operations btrfs_dir_file_operations = {
11308 	.llseek		= generic_file_llseek,
11309 	.read		= generic_read_dir,
11310 	.iterate_shared	= btrfs_real_readdir,
11311 	.open		= btrfs_opendir,
11312 	.unlocked_ioctl	= btrfs_ioctl,
11313 #ifdef CONFIG_COMPAT
11314 	.compat_ioctl	= btrfs_compat_ioctl,
11315 #endif
11316 	.release        = btrfs_release_file,
11317 	.fsync		= btrfs_sync_file,
11318 };
11319 
11320 /*
11321  * btrfs doesn't support the bmap operation because swapfiles
11322  * use bmap to make a mapping of extents in the file.  They assume
11323  * these extents won't change over the life of the file and they
11324  * use the bmap result to do IO directly to the drive.
11325  *
11326  * the btrfs bmap call would return logical addresses that aren't
11327  * suitable for IO and they also will change frequently as COW
11328  * operations happen.  So, swapfile + btrfs == corruption.
11329  *
11330  * For now we're avoiding this by dropping bmap.
11331  */
11332 static const struct address_space_operations btrfs_aops = {
11333 	.read_folio	= btrfs_read_folio,
11334 	.writepages	= btrfs_writepages,
11335 	.readahead	= btrfs_readahead,
11336 	.direct_IO	= noop_direct_IO,
11337 	.invalidate_folio = btrfs_invalidate_folio,
11338 	.release_folio	= btrfs_release_folio,
11339 	.migrate_folio	= btrfs_migrate_folio,
11340 	.dirty_folio	= filemap_dirty_folio,
11341 	.error_remove_page = generic_error_remove_page,
11342 	.swap_activate	= btrfs_swap_activate,
11343 	.swap_deactivate = btrfs_swap_deactivate,
11344 };
11345 
11346 static const struct inode_operations btrfs_file_inode_operations = {
11347 	.getattr	= btrfs_getattr,
11348 	.setattr	= btrfs_setattr,
11349 	.listxattr      = btrfs_listxattr,
11350 	.permission	= btrfs_permission,
11351 	.fiemap		= btrfs_fiemap,
11352 	.get_acl	= btrfs_get_acl,
11353 	.set_acl	= btrfs_set_acl,
11354 	.update_time	= btrfs_update_time,
11355 	.fileattr_get	= btrfs_fileattr_get,
11356 	.fileattr_set	= btrfs_fileattr_set,
11357 };
11358 static const struct inode_operations btrfs_special_inode_operations = {
11359 	.getattr	= btrfs_getattr,
11360 	.setattr	= btrfs_setattr,
11361 	.permission	= btrfs_permission,
11362 	.listxattr	= btrfs_listxattr,
11363 	.get_acl	= btrfs_get_acl,
11364 	.set_acl	= btrfs_set_acl,
11365 	.update_time	= btrfs_update_time,
11366 };
11367 static const struct inode_operations btrfs_symlink_inode_operations = {
11368 	.get_link	= page_get_link,
11369 	.getattr	= btrfs_getattr,
11370 	.setattr	= btrfs_setattr,
11371 	.permission	= btrfs_permission,
11372 	.listxattr	= btrfs_listxattr,
11373 	.update_time	= btrfs_update_time,
11374 };
11375 
11376 const struct dentry_operations btrfs_dentry_operations = {
11377 	.d_delete	= btrfs_dentry_delete,
11378 };
11379