xref: /openbmc/linux/fs/btrfs/inode.c (revision 908fc4c2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <crypto/hash.h>
7 #include <linux/kernel.h>
8 #include <linux/bio.h>
9 #include <linux/blk-cgroup.h>
10 #include <linux/file.h>
11 #include <linux/fs.h>
12 #include <linux/pagemap.h>
13 #include <linux/highmem.h>
14 #include <linux/time.h>
15 #include <linux/init.h>
16 #include <linux/string.h>
17 #include <linux/backing-dev.h>
18 #include <linux/writeback.h>
19 #include <linux/compat.h>
20 #include <linux/xattr.h>
21 #include <linux/posix_acl.h>
22 #include <linux/falloc.h>
23 #include <linux/slab.h>
24 #include <linux/ratelimit.h>
25 #include <linux/btrfs.h>
26 #include <linux/blkdev.h>
27 #include <linux/posix_acl_xattr.h>
28 #include <linux/uio.h>
29 #include <linux/magic.h>
30 #include <linux/iversion.h>
31 #include <linux/swap.h>
32 #include <linux/migrate.h>
33 #include <linux/sched/mm.h>
34 #include <linux/iomap.h>
35 #include <asm/unaligned.h>
36 #include <linux/fsverity.h>
37 #include "misc.h"
38 #include "ctree.h"
39 #include "disk-io.h"
40 #include "transaction.h"
41 #include "btrfs_inode.h"
42 #include "print-tree.h"
43 #include "ordered-data.h"
44 #include "xattr.h"
45 #include "tree-log.h"
46 #include "volumes.h"
47 #include "compression.h"
48 #include "locking.h"
49 #include "free-space-cache.h"
50 #include "props.h"
51 #include "qgroup.h"
52 #include "delalloc-space.h"
53 #include "block-group.h"
54 #include "space-info.h"
55 #include "zoned.h"
56 #include "subpage.h"
57 #include "inode-item.h"
58 
59 struct btrfs_iget_args {
60 	u64 ino;
61 	struct btrfs_root *root;
62 };
63 
64 struct btrfs_dio_data {
65 	ssize_t submitted;
66 	struct extent_changeset *data_reserved;
67 	bool data_space_reserved;
68 	bool nocow_done;
69 };
70 
71 struct btrfs_dio_private {
72 	struct inode *inode;
73 
74 	/*
75 	 * Since DIO can use anonymous page, we cannot use page_offset() to
76 	 * grab the file offset, thus need a dedicated member for file offset.
77 	 */
78 	u64 file_offset;
79 	/* Used for bio::bi_size */
80 	u32 bytes;
81 
82 	/*
83 	 * References to this structure. There is one reference per in-flight
84 	 * bio plus one while we're still setting up.
85 	 */
86 	refcount_t refs;
87 
88 	/* Array of checksums */
89 	u8 *csums;
90 
91 	/* This must be last */
92 	struct bio bio;
93 };
94 
95 static struct bio_set btrfs_dio_bioset;
96 
97 struct btrfs_rename_ctx {
98 	/* Output field. Stores the index number of the old directory entry. */
99 	u64 index;
100 };
101 
102 static const struct inode_operations btrfs_dir_inode_operations;
103 static const struct inode_operations btrfs_symlink_inode_operations;
104 static const struct inode_operations btrfs_special_inode_operations;
105 static const struct inode_operations btrfs_file_inode_operations;
106 static const struct address_space_operations btrfs_aops;
107 static const struct file_operations btrfs_dir_file_operations;
108 
109 static struct kmem_cache *btrfs_inode_cachep;
110 struct kmem_cache *btrfs_trans_handle_cachep;
111 struct kmem_cache *btrfs_path_cachep;
112 struct kmem_cache *btrfs_free_space_cachep;
113 struct kmem_cache *btrfs_free_space_bitmap_cachep;
114 
115 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
116 static int btrfs_truncate(struct inode *inode, bool skip_writeback);
117 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
118 static noinline int cow_file_range(struct btrfs_inode *inode,
119 				   struct page *locked_page,
120 				   u64 start, u64 end, int *page_started,
121 				   unsigned long *nr_written, int unlock);
122 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
123 				       u64 len, u64 orig_start, u64 block_start,
124 				       u64 block_len, u64 orig_block_len,
125 				       u64 ram_bytes, int compress_type,
126 				       int type);
127 
128 static void __endio_write_update_ordered(struct btrfs_inode *inode,
129 					 const u64 offset, const u64 bytes,
130 					 const bool uptodate);
131 
132 /*
133  * btrfs_inode_lock - lock inode i_rwsem based on arguments passed
134  *
135  * ilock_flags can have the following bit set:
136  *
137  * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
138  * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
139  *		     return -EAGAIN
140  * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
141  */
142 int btrfs_inode_lock(struct inode *inode, unsigned int ilock_flags)
143 {
144 	if (ilock_flags & BTRFS_ILOCK_SHARED) {
145 		if (ilock_flags & BTRFS_ILOCK_TRY) {
146 			if (!inode_trylock_shared(inode))
147 				return -EAGAIN;
148 			else
149 				return 0;
150 		}
151 		inode_lock_shared(inode);
152 	} else {
153 		if (ilock_flags & BTRFS_ILOCK_TRY) {
154 			if (!inode_trylock(inode))
155 				return -EAGAIN;
156 			else
157 				return 0;
158 		}
159 		inode_lock(inode);
160 	}
161 	if (ilock_flags & BTRFS_ILOCK_MMAP)
162 		down_write(&BTRFS_I(inode)->i_mmap_lock);
163 	return 0;
164 }
165 
166 /*
167  * btrfs_inode_unlock - unock inode i_rwsem
168  *
169  * ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
170  * to decide whether the lock acquired is shared or exclusive.
171  */
172 void btrfs_inode_unlock(struct inode *inode, unsigned int ilock_flags)
173 {
174 	if (ilock_flags & BTRFS_ILOCK_MMAP)
175 		up_write(&BTRFS_I(inode)->i_mmap_lock);
176 	if (ilock_flags & BTRFS_ILOCK_SHARED)
177 		inode_unlock_shared(inode);
178 	else
179 		inode_unlock(inode);
180 }
181 
182 /*
183  * Cleanup all submitted ordered extents in specified range to handle errors
184  * from the btrfs_run_delalloc_range() callback.
185  *
186  * NOTE: caller must ensure that when an error happens, it can not call
187  * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
188  * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
189  * to be released, which we want to happen only when finishing the ordered
190  * extent (btrfs_finish_ordered_io()).
191  */
192 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
193 						 struct page *locked_page,
194 						 u64 offset, u64 bytes)
195 {
196 	unsigned long index = offset >> PAGE_SHIFT;
197 	unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
198 	u64 page_start = page_offset(locked_page);
199 	u64 page_end = page_start + PAGE_SIZE - 1;
200 
201 	struct page *page;
202 
203 	while (index <= end_index) {
204 		/*
205 		 * For locked page, we will call end_extent_writepage() on it
206 		 * in run_delalloc_range() for the error handling.  That
207 		 * end_extent_writepage() function will call
208 		 * btrfs_mark_ordered_io_finished() to clear page Ordered and
209 		 * run the ordered extent accounting.
210 		 *
211 		 * Here we can't just clear the Ordered bit, or
212 		 * btrfs_mark_ordered_io_finished() would skip the accounting
213 		 * for the page range, and the ordered extent will never finish.
214 		 */
215 		if (index == (page_offset(locked_page) >> PAGE_SHIFT)) {
216 			index++;
217 			continue;
218 		}
219 		page = find_get_page(inode->vfs_inode.i_mapping, index);
220 		index++;
221 		if (!page)
222 			continue;
223 
224 		/*
225 		 * Here we just clear all Ordered bits for every page in the
226 		 * range, then __endio_write_update_ordered() will handle
227 		 * the ordered extent accounting for the range.
228 		 */
229 		btrfs_page_clamp_clear_ordered(inode->root->fs_info, page,
230 					       offset, bytes);
231 		put_page(page);
232 	}
233 
234 	/* The locked page covers the full range, nothing needs to be done */
235 	if (bytes + offset <= page_offset(locked_page) + PAGE_SIZE)
236 		return;
237 	/*
238 	 * In case this page belongs to the delalloc range being instantiated
239 	 * then skip it, since the first page of a range is going to be
240 	 * properly cleaned up by the caller of run_delalloc_range
241 	 */
242 	if (page_start >= offset && page_end <= (offset + bytes - 1)) {
243 		bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE;
244 		offset = page_offset(locked_page) + PAGE_SIZE;
245 	}
246 
247 	return __endio_write_update_ordered(inode, offset, bytes, false);
248 }
249 
250 static int btrfs_dirty_inode(struct inode *inode);
251 
252 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
253 				     struct btrfs_new_inode_args *args)
254 {
255 	int err;
256 
257 	if (args->default_acl) {
258 		err = __btrfs_set_acl(trans, args->inode, args->default_acl,
259 				      ACL_TYPE_DEFAULT);
260 		if (err)
261 			return err;
262 	}
263 	if (args->acl) {
264 		err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS);
265 		if (err)
266 			return err;
267 	}
268 	if (!args->default_acl && !args->acl)
269 		cache_no_acl(args->inode);
270 	return btrfs_xattr_security_init(trans, args->inode, args->dir,
271 					 &args->dentry->d_name);
272 }
273 
274 /*
275  * this does all the hard work for inserting an inline extent into
276  * the btree.  The caller should have done a btrfs_drop_extents so that
277  * no overlapping inline items exist in the btree
278  */
279 static int insert_inline_extent(struct btrfs_trans_handle *trans,
280 				struct btrfs_path *path,
281 				struct btrfs_inode *inode, bool extent_inserted,
282 				size_t size, size_t compressed_size,
283 				int compress_type,
284 				struct page **compressed_pages,
285 				bool update_i_size)
286 {
287 	struct btrfs_root *root = inode->root;
288 	struct extent_buffer *leaf;
289 	struct page *page = NULL;
290 	char *kaddr;
291 	unsigned long ptr;
292 	struct btrfs_file_extent_item *ei;
293 	int ret;
294 	size_t cur_size = size;
295 	u64 i_size;
296 
297 	ASSERT((compressed_size > 0 && compressed_pages) ||
298 	       (compressed_size == 0 && !compressed_pages));
299 
300 	if (compressed_size && compressed_pages)
301 		cur_size = compressed_size;
302 
303 	if (!extent_inserted) {
304 		struct btrfs_key key;
305 		size_t datasize;
306 
307 		key.objectid = btrfs_ino(inode);
308 		key.offset = 0;
309 		key.type = BTRFS_EXTENT_DATA_KEY;
310 
311 		datasize = btrfs_file_extent_calc_inline_size(cur_size);
312 		ret = btrfs_insert_empty_item(trans, root, path, &key,
313 					      datasize);
314 		if (ret)
315 			goto fail;
316 	}
317 	leaf = path->nodes[0];
318 	ei = btrfs_item_ptr(leaf, path->slots[0],
319 			    struct btrfs_file_extent_item);
320 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
321 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
322 	btrfs_set_file_extent_encryption(leaf, ei, 0);
323 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
324 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
325 	ptr = btrfs_file_extent_inline_start(ei);
326 
327 	if (compress_type != BTRFS_COMPRESS_NONE) {
328 		struct page *cpage;
329 		int i = 0;
330 		while (compressed_size > 0) {
331 			cpage = compressed_pages[i];
332 			cur_size = min_t(unsigned long, compressed_size,
333 				       PAGE_SIZE);
334 
335 			kaddr = kmap_atomic(cpage);
336 			write_extent_buffer(leaf, kaddr, ptr, cur_size);
337 			kunmap_atomic(kaddr);
338 
339 			i++;
340 			ptr += cur_size;
341 			compressed_size -= cur_size;
342 		}
343 		btrfs_set_file_extent_compression(leaf, ei,
344 						  compress_type);
345 	} else {
346 		page = find_get_page(inode->vfs_inode.i_mapping, 0);
347 		btrfs_set_file_extent_compression(leaf, ei, 0);
348 		kaddr = kmap_atomic(page);
349 		write_extent_buffer(leaf, kaddr, ptr, size);
350 		kunmap_atomic(kaddr);
351 		put_page(page);
352 	}
353 	btrfs_mark_buffer_dirty(leaf);
354 	btrfs_release_path(path);
355 
356 	/*
357 	 * We align size to sectorsize for inline extents just for simplicity
358 	 * sake.
359 	 */
360 	ret = btrfs_inode_set_file_extent_range(inode, 0,
361 					ALIGN(size, root->fs_info->sectorsize));
362 	if (ret)
363 		goto fail;
364 
365 	/*
366 	 * We're an inline extent, so nobody can extend the file past i_size
367 	 * without locking a page we already have locked.
368 	 *
369 	 * We must do any i_size and inode updates before we unlock the pages.
370 	 * Otherwise we could end up racing with unlink.
371 	 */
372 	i_size = i_size_read(&inode->vfs_inode);
373 	if (update_i_size && size > i_size) {
374 		i_size_write(&inode->vfs_inode, size);
375 		i_size = size;
376 	}
377 	inode->disk_i_size = i_size;
378 
379 fail:
380 	return ret;
381 }
382 
383 
384 /*
385  * conditionally insert an inline extent into the file.  This
386  * does the checks required to make sure the data is small enough
387  * to fit as an inline extent.
388  */
389 static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 size,
390 					  size_t compressed_size,
391 					  int compress_type,
392 					  struct page **compressed_pages,
393 					  bool update_i_size)
394 {
395 	struct btrfs_drop_extents_args drop_args = { 0 };
396 	struct btrfs_root *root = inode->root;
397 	struct btrfs_fs_info *fs_info = root->fs_info;
398 	struct btrfs_trans_handle *trans;
399 	u64 data_len = (compressed_size ?: size);
400 	int ret;
401 	struct btrfs_path *path;
402 
403 	/*
404 	 * We can create an inline extent if it ends at or beyond the current
405 	 * i_size, is no larger than a sector (decompressed), and the (possibly
406 	 * compressed) data fits in a leaf and the configured maximum inline
407 	 * size.
408 	 */
409 	if (size < i_size_read(&inode->vfs_inode) ||
410 	    size > fs_info->sectorsize ||
411 	    data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
412 	    data_len > fs_info->max_inline)
413 		return 1;
414 
415 	path = btrfs_alloc_path();
416 	if (!path)
417 		return -ENOMEM;
418 
419 	trans = btrfs_join_transaction(root);
420 	if (IS_ERR(trans)) {
421 		btrfs_free_path(path);
422 		return PTR_ERR(trans);
423 	}
424 	trans->block_rsv = &inode->block_rsv;
425 
426 	drop_args.path = path;
427 	drop_args.start = 0;
428 	drop_args.end = fs_info->sectorsize;
429 	drop_args.drop_cache = true;
430 	drop_args.replace_extent = true;
431 	drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len);
432 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
433 	if (ret) {
434 		btrfs_abort_transaction(trans, ret);
435 		goto out;
436 	}
437 
438 	ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted,
439 				   size, compressed_size, compress_type,
440 				   compressed_pages, update_i_size);
441 	if (ret && ret != -ENOSPC) {
442 		btrfs_abort_transaction(trans, ret);
443 		goto out;
444 	} else if (ret == -ENOSPC) {
445 		ret = 1;
446 		goto out;
447 	}
448 
449 	btrfs_update_inode_bytes(inode, size, drop_args.bytes_found);
450 	ret = btrfs_update_inode(trans, root, inode);
451 	if (ret && ret != -ENOSPC) {
452 		btrfs_abort_transaction(trans, ret);
453 		goto out;
454 	} else if (ret == -ENOSPC) {
455 		ret = 1;
456 		goto out;
457 	}
458 
459 	btrfs_set_inode_full_sync(inode);
460 out:
461 	/*
462 	 * Don't forget to free the reserved space, as for inlined extent
463 	 * it won't count as data extent, free them directly here.
464 	 * And at reserve time, it's always aligned to page size, so
465 	 * just free one page here.
466 	 */
467 	btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE);
468 	btrfs_free_path(path);
469 	btrfs_end_transaction(trans);
470 	return ret;
471 }
472 
473 struct async_extent {
474 	u64 start;
475 	u64 ram_size;
476 	u64 compressed_size;
477 	struct page **pages;
478 	unsigned long nr_pages;
479 	int compress_type;
480 	struct list_head list;
481 };
482 
483 struct async_chunk {
484 	struct inode *inode;
485 	struct page *locked_page;
486 	u64 start;
487 	u64 end;
488 	unsigned int write_flags;
489 	struct list_head extents;
490 	struct cgroup_subsys_state *blkcg_css;
491 	struct btrfs_work work;
492 	struct async_cow *async_cow;
493 };
494 
495 struct async_cow {
496 	atomic_t num_chunks;
497 	struct async_chunk chunks[];
498 };
499 
500 static noinline int add_async_extent(struct async_chunk *cow,
501 				     u64 start, u64 ram_size,
502 				     u64 compressed_size,
503 				     struct page **pages,
504 				     unsigned long nr_pages,
505 				     int compress_type)
506 {
507 	struct async_extent *async_extent;
508 
509 	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
510 	BUG_ON(!async_extent); /* -ENOMEM */
511 	async_extent->start = start;
512 	async_extent->ram_size = ram_size;
513 	async_extent->compressed_size = compressed_size;
514 	async_extent->pages = pages;
515 	async_extent->nr_pages = nr_pages;
516 	async_extent->compress_type = compress_type;
517 	list_add_tail(&async_extent->list, &cow->extents);
518 	return 0;
519 }
520 
521 /*
522  * Check if the inode needs to be submitted to compression, based on mount
523  * options, defragmentation, properties or heuristics.
524  */
525 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
526 				      u64 end)
527 {
528 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
529 
530 	if (!btrfs_inode_can_compress(inode)) {
531 		WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
532 			KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
533 			btrfs_ino(inode));
534 		return 0;
535 	}
536 	/*
537 	 * Special check for subpage.
538 	 *
539 	 * We lock the full page then run each delalloc range in the page, thus
540 	 * for the following case, we will hit some subpage specific corner case:
541 	 *
542 	 * 0		32K		64K
543 	 * |	|///////|	|///////|
544 	 *		\- A		\- B
545 	 *
546 	 * In above case, both range A and range B will try to unlock the full
547 	 * page [0, 64K), causing the one finished later will have page
548 	 * unlocked already, triggering various page lock requirement BUG_ON()s.
549 	 *
550 	 * So here we add an artificial limit that subpage compression can only
551 	 * if the range is fully page aligned.
552 	 *
553 	 * In theory we only need to ensure the first page is fully covered, but
554 	 * the tailing partial page will be locked until the full compression
555 	 * finishes, delaying the write of other range.
556 	 *
557 	 * TODO: Make btrfs_run_delalloc_range() to lock all delalloc range
558 	 * first to prevent any submitted async extent to unlock the full page.
559 	 * By this, we can ensure for subpage case that only the last async_cow
560 	 * will unlock the full page.
561 	 */
562 	if (fs_info->sectorsize < PAGE_SIZE) {
563 		if (!IS_ALIGNED(start, PAGE_SIZE) ||
564 		    !IS_ALIGNED(end + 1, PAGE_SIZE))
565 			return 0;
566 	}
567 
568 	/* force compress */
569 	if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
570 		return 1;
571 	/* defrag ioctl */
572 	if (inode->defrag_compress)
573 		return 1;
574 	/* bad compression ratios */
575 	if (inode->flags & BTRFS_INODE_NOCOMPRESS)
576 		return 0;
577 	if (btrfs_test_opt(fs_info, COMPRESS) ||
578 	    inode->flags & BTRFS_INODE_COMPRESS ||
579 	    inode->prop_compress)
580 		return btrfs_compress_heuristic(&inode->vfs_inode, start, end);
581 	return 0;
582 }
583 
584 static inline void inode_should_defrag(struct btrfs_inode *inode,
585 		u64 start, u64 end, u64 num_bytes, u32 small_write)
586 {
587 	/* If this is a small write inside eof, kick off a defrag */
588 	if (num_bytes < small_write &&
589 	    (start > 0 || end + 1 < inode->disk_i_size))
590 		btrfs_add_inode_defrag(NULL, inode, small_write);
591 }
592 
593 /*
594  * we create compressed extents in two phases.  The first
595  * phase compresses a range of pages that have already been
596  * locked (both pages and state bits are locked).
597  *
598  * This is done inside an ordered work queue, and the compression
599  * is spread across many cpus.  The actual IO submission is step
600  * two, and the ordered work queue takes care of making sure that
601  * happens in the same order things were put onto the queue by
602  * writepages and friends.
603  *
604  * If this code finds it can't get good compression, it puts an
605  * entry onto the work queue to write the uncompressed bytes.  This
606  * makes sure that both compressed inodes and uncompressed inodes
607  * are written in the same order that the flusher thread sent them
608  * down.
609  */
610 static noinline int compress_file_range(struct async_chunk *async_chunk)
611 {
612 	struct inode *inode = async_chunk->inode;
613 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
614 	u64 blocksize = fs_info->sectorsize;
615 	u64 start = async_chunk->start;
616 	u64 end = async_chunk->end;
617 	u64 actual_end;
618 	u64 i_size;
619 	int ret = 0;
620 	struct page **pages = NULL;
621 	unsigned long nr_pages;
622 	unsigned long total_compressed = 0;
623 	unsigned long total_in = 0;
624 	int i;
625 	int will_compress;
626 	int compress_type = fs_info->compress_type;
627 	int compressed_extents = 0;
628 	int redirty = 0;
629 
630 	inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1,
631 			SZ_16K);
632 
633 	/*
634 	 * We need to save i_size before now because it could change in between
635 	 * us evaluating the size and assigning it.  This is because we lock and
636 	 * unlock the page in truncate and fallocate, and then modify the i_size
637 	 * later on.
638 	 *
639 	 * The barriers are to emulate READ_ONCE, remove that once i_size_read
640 	 * does that for us.
641 	 */
642 	barrier();
643 	i_size = i_size_read(inode);
644 	barrier();
645 	actual_end = min_t(u64, i_size, end + 1);
646 again:
647 	will_compress = 0;
648 	nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
649 	nr_pages = min_t(unsigned long, nr_pages,
650 			BTRFS_MAX_COMPRESSED / PAGE_SIZE);
651 
652 	/*
653 	 * we don't want to send crud past the end of i_size through
654 	 * compression, that's just a waste of CPU time.  So, if the
655 	 * end of the file is before the start of our current
656 	 * requested range of bytes, we bail out to the uncompressed
657 	 * cleanup code that can deal with all of this.
658 	 *
659 	 * It isn't really the fastest way to fix things, but this is a
660 	 * very uncommon corner.
661 	 */
662 	if (actual_end <= start)
663 		goto cleanup_and_bail_uncompressed;
664 
665 	total_compressed = actual_end - start;
666 
667 	/*
668 	 * Skip compression for a small file range(<=blocksize) that
669 	 * isn't an inline extent, since it doesn't save disk space at all.
670 	 */
671 	if (total_compressed <= blocksize &&
672 	   (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
673 		goto cleanup_and_bail_uncompressed;
674 
675 	/*
676 	 * For subpage case, we require full page alignment for the sector
677 	 * aligned range.
678 	 * Thus we must also check against @actual_end, not just @end.
679 	 */
680 	if (blocksize < PAGE_SIZE) {
681 		if (!IS_ALIGNED(start, PAGE_SIZE) ||
682 		    !IS_ALIGNED(round_up(actual_end, blocksize), PAGE_SIZE))
683 			goto cleanup_and_bail_uncompressed;
684 	}
685 
686 	total_compressed = min_t(unsigned long, total_compressed,
687 			BTRFS_MAX_UNCOMPRESSED);
688 	total_in = 0;
689 	ret = 0;
690 
691 	/*
692 	 * we do compression for mount -o compress and when the
693 	 * inode has not been flagged as nocompress.  This flag can
694 	 * change at any time if we discover bad compression ratios.
695 	 */
696 	if (inode_need_compress(BTRFS_I(inode), start, end)) {
697 		WARN_ON(pages);
698 		pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
699 		if (!pages) {
700 			/* just bail out to the uncompressed code */
701 			nr_pages = 0;
702 			goto cont;
703 		}
704 
705 		if (BTRFS_I(inode)->defrag_compress)
706 			compress_type = BTRFS_I(inode)->defrag_compress;
707 		else if (BTRFS_I(inode)->prop_compress)
708 			compress_type = BTRFS_I(inode)->prop_compress;
709 
710 		/*
711 		 * we need to call clear_page_dirty_for_io on each
712 		 * page in the range.  Otherwise applications with the file
713 		 * mmap'd can wander in and change the page contents while
714 		 * we are compressing them.
715 		 *
716 		 * If the compression fails for any reason, we set the pages
717 		 * dirty again later on.
718 		 *
719 		 * Note that the remaining part is redirtied, the start pointer
720 		 * has moved, the end is the original one.
721 		 */
722 		if (!redirty) {
723 			extent_range_clear_dirty_for_io(inode, start, end);
724 			redirty = 1;
725 		}
726 
727 		/* Compression level is applied here and only here */
728 		ret = btrfs_compress_pages(
729 			compress_type | (fs_info->compress_level << 4),
730 					   inode->i_mapping, start,
731 					   pages,
732 					   &nr_pages,
733 					   &total_in,
734 					   &total_compressed);
735 
736 		if (!ret) {
737 			unsigned long offset = offset_in_page(total_compressed);
738 			struct page *page = pages[nr_pages - 1];
739 
740 			/* zero the tail end of the last page, we might be
741 			 * sending it down to disk
742 			 */
743 			if (offset)
744 				memzero_page(page, offset, PAGE_SIZE - offset);
745 			will_compress = 1;
746 		}
747 	}
748 cont:
749 	/*
750 	 * Check cow_file_range() for why we don't even try to create inline
751 	 * extent for subpage case.
752 	 */
753 	if (start == 0 && fs_info->sectorsize == PAGE_SIZE) {
754 		/* lets try to make an inline extent */
755 		if (ret || total_in < actual_end) {
756 			/* we didn't compress the entire range, try
757 			 * to make an uncompressed inline extent.
758 			 */
759 			ret = cow_file_range_inline(BTRFS_I(inode), actual_end,
760 						    0, BTRFS_COMPRESS_NONE,
761 						    NULL, false);
762 		} else {
763 			/* try making a compressed inline extent */
764 			ret = cow_file_range_inline(BTRFS_I(inode), actual_end,
765 						    total_compressed,
766 						    compress_type, pages,
767 						    false);
768 		}
769 		if (ret <= 0) {
770 			unsigned long clear_flags = EXTENT_DELALLOC |
771 				EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
772 				EXTENT_DO_ACCOUNTING;
773 			unsigned long page_error_op;
774 
775 			page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
776 
777 			/*
778 			 * inline extent creation worked or returned error,
779 			 * we don't need to create any more async work items.
780 			 * Unlock and free up our temp pages.
781 			 *
782 			 * We use DO_ACCOUNTING here because we need the
783 			 * delalloc_release_metadata to be done _after_ we drop
784 			 * our outstanding extent for clearing delalloc for this
785 			 * range.
786 			 */
787 			extent_clear_unlock_delalloc(BTRFS_I(inode), start, end,
788 						     NULL,
789 						     clear_flags,
790 						     PAGE_UNLOCK |
791 						     PAGE_START_WRITEBACK |
792 						     page_error_op |
793 						     PAGE_END_WRITEBACK);
794 
795 			/*
796 			 * Ensure we only free the compressed pages if we have
797 			 * them allocated, as we can still reach here with
798 			 * inode_need_compress() == false.
799 			 */
800 			if (pages) {
801 				for (i = 0; i < nr_pages; i++) {
802 					WARN_ON(pages[i]->mapping);
803 					put_page(pages[i]);
804 				}
805 				kfree(pages);
806 			}
807 			return 0;
808 		}
809 	}
810 
811 	if (will_compress) {
812 		/*
813 		 * we aren't doing an inline extent round the compressed size
814 		 * up to a block size boundary so the allocator does sane
815 		 * things
816 		 */
817 		total_compressed = ALIGN(total_compressed, blocksize);
818 
819 		/*
820 		 * one last check to make sure the compression is really a
821 		 * win, compare the page count read with the blocks on disk,
822 		 * compression must free at least one sector size
823 		 */
824 		total_in = round_up(total_in, fs_info->sectorsize);
825 		if (total_compressed + blocksize <= total_in) {
826 			compressed_extents++;
827 
828 			/*
829 			 * The async work queues will take care of doing actual
830 			 * allocation on disk for these compressed pages, and
831 			 * will submit them to the elevator.
832 			 */
833 			add_async_extent(async_chunk, start, total_in,
834 					total_compressed, pages, nr_pages,
835 					compress_type);
836 
837 			if (start + total_in < end) {
838 				start += total_in;
839 				pages = NULL;
840 				cond_resched();
841 				goto again;
842 			}
843 			return compressed_extents;
844 		}
845 	}
846 	if (pages) {
847 		/*
848 		 * the compression code ran but failed to make things smaller,
849 		 * free any pages it allocated and our page pointer array
850 		 */
851 		for (i = 0; i < nr_pages; i++) {
852 			WARN_ON(pages[i]->mapping);
853 			put_page(pages[i]);
854 		}
855 		kfree(pages);
856 		pages = NULL;
857 		total_compressed = 0;
858 		nr_pages = 0;
859 
860 		/* flag the file so we don't compress in the future */
861 		if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) &&
862 		    !(BTRFS_I(inode)->prop_compress)) {
863 			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
864 		}
865 	}
866 cleanup_and_bail_uncompressed:
867 	/*
868 	 * No compression, but we still need to write the pages in the file
869 	 * we've been given so far.  redirty the locked page if it corresponds
870 	 * to our extent and set things up for the async work queue to run
871 	 * cow_file_range to do the normal delalloc dance.
872 	 */
873 	if (async_chunk->locked_page &&
874 	    (page_offset(async_chunk->locked_page) >= start &&
875 	     page_offset(async_chunk->locked_page)) <= end) {
876 		__set_page_dirty_nobuffers(async_chunk->locked_page);
877 		/* unlocked later on in the async handlers */
878 	}
879 
880 	if (redirty)
881 		extent_range_redirty_for_io(inode, start, end);
882 	add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
883 			 BTRFS_COMPRESS_NONE);
884 	compressed_extents++;
885 
886 	return compressed_extents;
887 }
888 
889 static void free_async_extent_pages(struct async_extent *async_extent)
890 {
891 	int i;
892 
893 	if (!async_extent->pages)
894 		return;
895 
896 	for (i = 0; i < async_extent->nr_pages; i++) {
897 		WARN_ON(async_extent->pages[i]->mapping);
898 		put_page(async_extent->pages[i]);
899 	}
900 	kfree(async_extent->pages);
901 	async_extent->nr_pages = 0;
902 	async_extent->pages = NULL;
903 }
904 
905 static int submit_uncompressed_range(struct btrfs_inode *inode,
906 				     struct async_extent *async_extent,
907 				     struct page *locked_page)
908 {
909 	u64 start = async_extent->start;
910 	u64 end = async_extent->start + async_extent->ram_size - 1;
911 	unsigned long nr_written = 0;
912 	int page_started = 0;
913 	int ret;
914 
915 	/*
916 	 * Call cow_file_range() to run the delalloc range directly, since we
917 	 * won't go to NOCOW or async path again.
918 	 *
919 	 * Also we call cow_file_range() with @unlock_page == 0, so that we
920 	 * can directly submit them without interruption.
921 	 */
922 	ret = cow_file_range(inode, locked_page, start, end, &page_started,
923 			     &nr_written, 0);
924 	/* Inline extent inserted, page gets unlocked and everything is done */
925 	if (page_started) {
926 		ret = 0;
927 		goto out;
928 	}
929 	if (ret < 0) {
930 		if (locked_page)
931 			unlock_page(locked_page);
932 		goto out;
933 	}
934 
935 	ret = extent_write_locked_range(&inode->vfs_inode, start, end);
936 	/* All pages will be unlocked, including @locked_page */
937 out:
938 	kfree(async_extent);
939 	return ret;
940 }
941 
942 static int submit_one_async_extent(struct btrfs_inode *inode,
943 				   struct async_chunk *async_chunk,
944 				   struct async_extent *async_extent,
945 				   u64 *alloc_hint)
946 {
947 	struct extent_io_tree *io_tree = &inode->io_tree;
948 	struct btrfs_root *root = inode->root;
949 	struct btrfs_fs_info *fs_info = root->fs_info;
950 	struct btrfs_key ins;
951 	struct page *locked_page = NULL;
952 	struct extent_map *em;
953 	int ret = 0;
954 	u64 start = async_extent->start;
955 	u64 end = async_extent->start + async_extent->ram_size - 1;
956 
957 	/*
958 	 * If async_chunk->locked_page is in the async_extent range, we need to
959 	 * handle it.
960 	 */
961 	if (async_chunk->locked_page) {
962 		u64 locked_page_start = page_offset(async_chunk->locked_page);
963 		u64 locked_page_end = locked_page_start + PAGE_SIZE - 1;
964 
965 		if (!(start >= locked_page_end || end <= locked_page_start))
966 			locked_page = async_chunk->locked_page;
967 	}
968 	lock_extent(io_tree, start, end);
969 
970 	/* We have fall back to uncompressed write */
971 	if (!async_extent->pages)
972 		return submit_uncompressed_range(inode, async_extent, locked_page);
973 
974 	ret = btrfs_reserve_extent(root, async_extent->ram_size,
975 				   async_extent->compressed_size,
976 				   async_extent->compressed_size,
977 				   0, *alloc_hint, &ins, 1, 1);
978 	if (ret) {
979 		free_async_extent_pages(async_extent);
980 		/*
981 		 * Here we used to try again by going back to non-compressed
982 		 * path for ENOSPC.  But we can't reserve space even for
983 		 * compressed size, how could it work for uncompressed size
984 		 * which requires larger size?  So here we directly go error
985 		 * path.
986 		 */
987 		goto out_free;
988 	}
989 
990 	/* Here we're doing allocation and writeback of the compressed pages */
991 	em = create_io_em(inode, start,
992 			  async_extent->ram_size,	/* len */
993 			  start,			/* orig_start */
994 			  ins.objectid,			/* block_start */
995 			  ins.offset,			/* block_len */
996 			  ins.offset,			/* orig_block_len */
997 			  async_extent->ram_size,	/* ram_bytes */
998 			  async_extent->compress_type,
999 			  BTRFS_ORDERED_COMPRESSED);
1000 	if (IS_ERR(em)) {
1001 		ret = PTR_ERR(em);
1002 		goto out_free_reserve;
1003 	}
1004 	free_extent_map(em);
1005 
1006 	ret = btrfs_add_ordered_extent(inode, start,		/* file_offset */
1007 				       async_extent->ram_size,	/* num_bytes */
1008 				       async_extent->ram_size,	/* ram_bytes */
1009 				       ins.objectid,		/* disk_bytenr */
1010 				       ins.offset,		/* disk_num_bytes */
1011 				       0,			/* offset */
1012 				       1 << BTRFS_ORDERED_COMPRESSED,
1013 				       async_extent->compress_type);
1014 	if (ret) {
1015 		btrfs_drop_extent_cache(inode, start, end, 0);
1016 		goto out_free_reserve;
1017 	}
1018 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1019 
1020 	/* Clear dirty, set writeback and unlock the pages. */
1021 	extent_clear_unlock_delalloc(inode, start, end,
1022 			NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
1023 			PAGE_UNLOCK | PAGE_START_WRITEBACK);
1024 	if (btrfs_submit_compressed_write(inode, start,	/* file_offset */
1025 			    async_extent->ram_size,	/* num_bytes */
1026 			    ins.objectid,		/* disk_bytenr */
1027 			    ins.offset,			/* compressed_len */
1028 			    async_extent->pages,	/* compressed_pages */
1029 			    async_extent->nr_pages,
1030 			    async_chunk->write_flags,
1031 			    async_chunk->blkcg_css, true)) {
1032 		const u64 start = async_extent->start;
1033 		const u64 end = start + async_extent->ram_size - 1;
1034 
1035 		btrfs_writepage_endio_finish_ordered(inode, NULL, start, end, 0);
1036 
1037 		extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
1038 					     PAGE_END_WRITEBACK | PAGE_SET_ERROR);
1039 		free_async_extent_pages(async_extent);
1040 	}
1041 	*alloc_hint = ins.objectid + ins.offset;
1042 	kfree(async_extent);
1043 	return ret;
1044 
1045 out_free_reserve:
1046 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1047 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1048 out_free:
1049 	extent_clear_unlock_delalloc(inode, start, end,
1050 				     NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
1051 				     EXTENT_DELALLOC_NEW |
1052 				     EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
1053 				     PAGE_UNLOCK | PAGE_START_WRITEBACK |
1054 				     PAGE_END_WRITEBACK | PAGE_SET_ERROR);
1055 	free_async_extent_pages(async_extent);
1056 	kfree(async_extent);
1057 	return ret;
1058 }
1059 
1060 /*
1061  * Phase two of compressed writeback.  This is the ordered portion of the code,
1062  * which only gets called in the order the work was queued.  We walk all the
1063  * async extents created by compress_file_range and send them down to the disk.
1064  */
1065 static noinline void submit_compressed_extents(struct async_chunk *async_chunk)
1066 {
1067 	struct btrfs_inode *inode = BTRFS_I(async_chunk->inode);
1068 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1069 	struct async_extent *async_extent;
1070 	u64 alloc_hint = 0;
1071 	int ret = 0;
1072 
1073 	while (!list_empty(&async_chunk->extents)) {
1074 		u64 extent_start;
1075 		u64 ram_size;
1076 
1077 		async_extent = list_entry(async_chunk->extents.next,
1078 					  struct async_extent, list);
1079 		list_del(&async_extent->list);
1080 		extent_start = async_extent->start;
1081 		ram_size = async_extent->ram_size;
1082 
1083 		ret = submit_one_async_extent(inode, async_chunk, async_extent,
1084 					      &alloc_hint);
1085 		btrfs_debug(fs_info,
1086 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
1087 			    inode->root->root_key.objectid,
1088 			    btrfs_ino(inode), extent_start, ram_size, ret);
1089 	}
1090 }
1091 
1092 static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
1093 				      u64 num_bytes)
1094 {
1095 	struct extent_map_tree *em_tree = &inode->extent_tree;
1096 	struct extent_map *em;
1097 	u64 alloc_hint = 0;
1098 
1099 	read_lock(&em_tree->lock);
1100 	em = search_extent_mapping(em_tree, start, num_bytes);
1101 	if (em) {
1102 		/*
1103 		 * if block start isn't an actual block number then find the
1104 		 * first block in this inode and use that as a hint.  If that
1105 		 * block is also bogus then just don't worry about it.
1106 		 */
1107 		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1108 			free_extent_map(em);
1109 			em = search_extent_mapping(em_tree, 0, 0);
1110 			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
1111 				alloc_hint = em->block_start;
1112 			if (em)
1113 				free_extent_map(em);
1114 		} else {
1115 			alloc_hint = em->block_start;
1116 			free_extent_map(em);
1117 		}
1118 	}
1119 	read_unlock(&em_tree->lock);
1120 
1121 	return alloc_hint;
1122 }
1123 
1124 /*
1125  * when extent_io.c finds a delayed allocation range in the file,
1126  * the call backs end up in this code.  The basic idea is to
1127  * allocate extents on disk for the range, and create ordered data structs
1128  * in ram to track those extents.
1129  *
1130  * locked_page is the page that writepage had locked already.  We use
1131  * it to make sure we don't do extra locks or unlocks.
1132  *
1133  * *page_started is set to one if we unlock locked_page and do everything
1134  * required to start IO on it.  It may be clean and already done with
1135  * IO when we return.
1136  */
1137 static noinline int cow_file_range(struct btrfs_inode *inode,
1138 				   struct page *locked_page,
1139 				   u64 start, u64 end, int *page_started,
1140 				   unsigned long *nr_written, int unlock)
1141 {
1142 	struct btrfs_root *root = inode->root;
1143 	struct btrfs_fs_info *fs_info = root->fs_info;
1144 	u64 alloc_hint = 0;
1145 	u64 num_bytes;
1146 	unsigned long ram_size;
1147 	u64 cur_alloc_size = 0;
1148 	u64 min_alloc_size;
1149 	u64 blocksize = fs_info->sectorsize;
1150 	struct btrfs_key ins;
1151 	struct extent_map *em;
1152 	unsigned clear_bits;
1153 	unsigned long page_ops;
1154 	bool extent_reserved = false;
1155 	int ret = 0;
1156 
1157 	if (btrfs_is_free_space_inode(inode)) {
1158 		ret = -EINVAL;
1159 		goto out_unlock;
1160 	}
1161 
1162 	num_bytes = ALIGN(end - start + 1, blocksize);
1163 	num_bytes = max(blocksize,  num_bytes);
1164 	ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
1165 
1166 	inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
1167 
1168 	/*
1169 	 * Due to the page size limit, for subpage we can only trigger the
1170 	 * writeback for the dirty sectors of page, that means data writeback
1171 	 * is doing more writeback than what we want.
1172 	 *
1173 	 * This is especially unexpected for some call sites like fallocate,
1174 	 * where we only increase i_size after everything is done.
1175 	 * This means we can trigger inline extent even if we didn't want to.
1176 	 * So here we skip inline extent creation completely.
1177 	 */
1178 	if (start == 0 && fs_info->sectorsize == PAGE_SIZE) {
1179 		u64 actual_end = min_t(u64, i_size_read(&inode->vfs_inode),
1180 				       end + 1);
1181 
1182 		/* lets try to make an inline extent */
1183 		ret = cow_file_range_inline(inode, actual_end, 0,
1184 					    BTRFS_COMPRESS_NONE, NULL, false);
1185 		if (ret == 0) {
1186 			/*
1187 			 * We use DO_ACCOUNTING here because we need the
1188 			 * delalloc_release_metadata to be run _after_ we drop
1189 			 * our outstanding extent for clearing delalloc for this
1190 			 * range.
1191 			 */
1192 			extent_clear_unlock_delalloc(inode, start, end,
1193 				     locked_page,
1194 				     EXTENT_LOCKED | EXTENT_DELALLOC |
1195 				     EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
1196 				     EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1197 				     PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
1198 			*nr_written = *nr_written +
1199 			     (end - start + PAGE_SIZE) / PAGE_SIZE;
1200 			*page_started = 1;
1201 			/*
1202 			 * locked_page is locked by the caller of
1203 			 * writepage_delalloc(), not locked by
1204 			 * __process_pages_contig().
1205 			 *
1206 			 * We can't let __process_pages_contig() to unlock it,
1207 			 * as it doesn't have any subpage::writers recorded.
1208 			 *
1209 			 * Here we manually unlock the page, since the caller
1210 			 * can't use page_started to determine if it's an
1211 			 * inline extent or a compressed extent.
1212 			 */
1213 			unlock_page(locked_page);
1214 			goto out;
1215 		} else if (ret < 0) {
1216 			goto out_unlock;
1217 		}
1218 	}
1219 
1220 	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
1221 	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
1222 
1223 	/*
1224 	 * Relocation relies on the relocated extents to have exactly the same
1225 	 * size as the original extents. Normally writeback for relocation data
1226 	 * extents follows a NOCOW path because relocation preallocates the
1227 	 * extents. However, due to an operation such as scrub turning a block
1228 	 * group to RO mode, it may fallback to COW mode, so we must make sure
1229 	 * an extent allocated during COW has exactly the requested size and can
1230 	 * not be split into smaller extents, otherwise relocation breaks and
1231 	 * fails during the stage where it updates the bytenr of file extent
1232 	 * items.
1233 	 */
1234 	if (btrfs_is_data_reloc_root(root))
1235 		min_alloc_size = num_bytes;
1236 	else
1237 		min_alloc_size = fs_info->sectorsize;
1238 
1239 	while (num_bytes > 0) {
1240 		cur_alloc_size = num_bytes;
1241 		ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
1242 					   min_alloc_size, 0, alloc_hint,
1243 					   &ins, 1, 1);
1244 		if (ret < 0)
1245 			goto out_unlock;
1246 		cur_alloc_size = ins.offset;
1247 		extent_reserved = true;
1248 
1249 		ram_size = ins.offset;
1250 		em = create_io_em(inode, start, ins.offset, /* len */
1251 				  start, /* orig_start */
1252 				  ins.objectid, /* block_start */
1253 				  ins.offset, /* block_len */
1254 				  ins.offset, /* orig_block_len */
1255 				  ram_size, /* ram_bytes */
1256 				  BTRFS_COMPRESS_NONE, /* compress_type */
1257 				  BTRFS_ORDERED_REGULAR /* type */);
1258 		if (IS_ERR(em)) {
1259 			ret = PTR_ERR(em);
1260 			goto out_reserve;
1261 		}
1262 		free_extent_map(em);
1263 
1264 		ret = btrfs_add_ordered_extent(inode, start, ram_size, ram_size,
1265 					       ins.objectid, cur_alloc_size, 0,
1266 					       1 << BTRFS_ORDERED_REGULAR,
1267 					       BTRFS_COMPRESS_NONE);
1268 		if (ret)
1269 			goto out_drop_extent_cache;
1270 
1271 		if (btrfs_is_data_reloc_root(root)) {
1272 			ret = btrfs_reloc_clone_csums(inode, start,
1273 						      cur_alloc_size);
1274 			/*
1275 			 * Only drop cache here, and process as normal.
1276 			 *
1277 			 * We must not allow extent_clear_unlock_delalloc()
1278 			 * at out_unlock label to free meta of this ordered
1279 			 * extent, as its meta should be freed by
1280 			 * btrfs_finish_ordered_io().
1281 			 *
1282 			 * So we must continue until @start is increased to
1283 			 * skip current ordered extent.
1284 			 */
1285 			if (ret)
1286 				btrfs_drop_extent_cache(inode, start,
1287 						start + ram_size - 1, 0);
1288 		}
1289 
1290 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1291 
1292 		/*
1293 		 * We're not doing compressed IO, don't unlock the first page
1294 		 * (which the caller expects to stay locked), don't clear any
1295 		 * dirty bits and don't set any writeback bits
1296 		 *
1297 		 * Do set the Ordered (Private2) bit so we know this page was
1298 		 * properly setup for writepage.
1299 		 */
1300 		page_ops = unlock ? PAGE_UNLOCK : 0;
1301 		page_ops |= PAGE_SET_ORDERED;
1302 
1303 		extent_clear_unlock_delalloc(inode, start, start + ram_size - 1,
1304 					     locked_page,
1305 					     EXTENT_LOCKED | EXTENT_DELALLOC,
1306 					     page_ops);
1307 		if (num_bytes < cur_alloc_size)
1308 			num_bytes = 0;
1309 		else
1310 			num_bytes -= cur_alloc_size;
1311 		alloc_hint = ins.objectid + ins.offset;
1312 		start += cur_alloc_size;
1313 		extent_reserved = false;
1314 
1315 		/*
1316 		 * btrfs_reloc_clone_csums() error, since start is increased
1317 		 * extent_clear_unlock_delalloc() at out_unlock label won't
1318 		 * free metadata of current ordered extent, we're OK to exit.
1319 		 */
1320 		if (ret)
1321 			goto out_unlock;
1322 	}
1323 out:
1324 	return ret;
1325 
1326 out_drop_extent_cache:
1327 	btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
1328 out_reserve:
1329 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1330 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1331 out_unlock:
1332 	clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1333 		EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1334 	page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
1335 	/*
1336 	 * If we reserved an extent for our delalloc range (or a subrange) and
1337 	 * failed to create the respective ordered extent, then it means that
1338 	 * when we reserved the extent we decremented the extent's size from
1339 	 * the data space_info's bytes_may_use counter and incremented the
1340 	 * space_info's bytes_reserved counter by the same amount. We must make
1341 	 * sure extent_clear_unlock_delalloc() does not try to decrement again
1342 	 * the data space_info's bytes_may_use counter, therefore we do not pass
1343 	 * it the flag EXTENT_CLEAR_DATA_RESV.
1344 	 */
1345 	if (extent_reserved) {
1346 		extent_clear_unlock_delalloc(inode, start,
1347 					     start + cur_alloc_size - 1,
1348 					     locked_page,
1349 					     clear_bits,
1350 					     page_ops);
1351 		start += cur_alloc_size;
1352 		if (start >= end)
1353 			goto out;
1354 	}
1355 	extent_clear_unlock_delalloc(inode, start, end, locked_page,
1356 				     clear_bits | EXTENT_CLEAR_DATA_RESV,
1357 				     page_ops);
1358 	goto out;
1359 }
1360 
1361 /*
1362  * work queue call back to started compression on a file and pages
1363  */
1364 static noinline void async_cow_start(struct btrfs_work *work)
1365 {
1366 	struct async_chunk *async_chunk;
1367 	int compressed_extents;
1368 
1369 	async_chunk = container_of(work, struct async_chunk, work);
1370 
1371 	compressed_extents = compress_file_range(async_chunk);
1372 	if (compressed_extents == 0) {
1373 		btrfs_add_delayed_iput(async_chunk->inode);
1374 		async_chunk->inode = NULL;
1375 	}
1376 }
1377 
1378 /*
1379  * work queue call back to submit previously compressed pages
1380  */
1381 static noinline void async_cow_submit(struct btrfs_work *work)
1382 {
1383 	struct async_chunk *async_chunk = container_of(work, struct async_chunk,
1384 						     work);
1385 	struct btrfs_fs_info *fs_info = btrfs_work_owner(work);
1386 	unsigned long nr_pages;
1387 
1388 	nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
1389 		PAGE_SHIFT;
1390 
1391 	/*
1392 	 * ->inode could be NULL if async_chunk_start has failed to compress,
1393 	 * in which case we don't have anything to submit, yet we need to
1394 	 * always adjust ->async_delalloc_pages as its paired with the init
1395 	 * happening in cow_file_range_async
1396 	 */
1397 	if (async_chunk->inode)
1398 		submit_compressed_extents(async_chunk);
1399 
1400 	/* atomic_sub_return implies a barrier */
1401 	if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1402 	    5 * SZ_1M)
1403 		cond_wake_up_nomb(&fs_info->async_submit_wait);
1404 }
1405 
1406 static noinline void async_cow_free(struct btrfs_work *work)
1407 {
1408 	struct async_chunk *async_chunk;
1409 	struct async_cow *async_cow;
1410 
1411 	async_chunk = container_of(work, struct async_chunk, work);
1412 	if (async_chunk->inode)
1413 		btrfs_add_delayed_iput(async_chunk->inode);
1414 	if (async_chunk->blkcg_css)
1415 		css_put(async_chunk->blkcg_css);
1416 
1417 	async_cow = async_chunk->async_cow;
1418 	if (atomic_dec_and_test(&async_cow->num_chunks))
1419 		kvfree(async_cow);
1420 }
1421 
1422 static int cow_file_range_async(struct btrfs_inode *inode,
1423 				struct writeback_control *wbc,
1424 				struct page *locked_page,
1425 				u64 start, u64 end, int *page_started,
1426 				unsigned long *nr_written)
1427 {
1428 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1429 	struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc);
1430 	struct async_cow *ctx;
1431 	struct async_chunk *async_chunk;
1432 	unsigned long nr_pages;
1433 	u64 cur_end;
1434 	u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K);
1435 	int i;
1436 	bool should_compress;
1437 	unsigned nofs_flag;
1438 	const unsigned int write_flags = wbc_to_write_flags(wbc);
1439 
1440 	unlock_extent(&inode->io_tree, start, end);
1441 
1442 	if (inode->flags & BTRFS_INODE_NOCOMPRESS &&
1443 	    !btrfs_test_opt(fs_info, FORCE_COMPRESS)) {
1444 		num_chunks = 1;
1445 		should_compress = false;
1446 	} else {
1447 		should_compress = true;
1448 	}
1449 
1450 	nofs_flag = memalloc_nofs_save();
1451 	ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL);
1452 	memalloc_nofs_restore(nofs_flag);
1453 
1454 	if (!ctx) {
1455 		unsigned clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC |
1456 			EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
1457 			EXTENT_DO_ACCOUNTING;
1458 		unsigned long page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK |
1459 					 PAGE_END_WRITEBACK | PAGE_SET_ERROR;
1460 
1461 		extent_clear_unlock_delalloc(inode, start, end, locked_page,
1462 					     clear_bits, page_ops);
1463 		return -ENOMEM;
1464 	}
1465 
1466 	async_chunk = ctx->chunks;
1467 	atomic_set(&ctx->num_chunks, num_chunks);
1468 
1469 	for (i = 0; i < num_chunks; i++) {
1470 		if (should_compress)
1471 			cur_end = min(end, start + SZ_512K - 1);
1472 		else
1473 			cur_end = end;
1474 
1475 		/*
1476 		 * igrab is called higher up in the call chain, take only the
1477 		 * lightweight reference for the callback lifetime
1478 		 */
1479 		ihold(&inode->vfs_inode);
1480 		async_chunk[i].async_cow = ctx;
1481 		async_chunk[i].inode = &inode->vfs_inode;
1482 		async_chunk[i].start = start;
1483 		async_chunk[i].end = cur_end;
1484 		async_chunk[i].write_flags = write_flags;
1485 		INIT_LIST_HEAD(&async_chunk[i].extents);
1486 
1487 		/*
1488 		 * The locked_page comes all the way from writepage and its
1489 		 * the original page we were actually given.  As we spread
1490 		 * this large delalloc region across multiple async_chunk
1491 		 * structs, only the first struct needs a pointer to locked_page
1492 		 *
1493 		 * This way we don't need racey decisions about who is supposed
1494 		 * to unlock it.
1495 		 */
1496 		if (locked_page) {
1497 			/*
1498 			 * Depending on the compressibility, the pages might or
1499 			 * might not go through async.  We want all of them to
1500 			 * be accounted against wbc once.  Let's do it here
1501 			 * before the paths diverge.  wbc accounting is used
1502 			 * only for foreign writeback detection and doesn't
1503 			 * need full accuracy.  Just account the whole thing
1504 			 * against the first page.
1505 			 */
1506 			wbc_account_cgroup_owner(wbc, locked_page,
1507 						 cur_end - start);
1508 			async_chunk[i].locked_page = locked_page;
1509 			locked_page = NULL;
1510 		} else {
1511 			async_chunk[i].locked_page = NULL;
1512 		}
1513 
1514 		if (blkcg_css != blkcg_root_css) {
1515 			css_get(blkcg_css);
1516 			async_chunk[i].blkcg_css = blkcg_css;
1517 		} else {
1518 			async_chunk[i].blkcg_css = NULL;
1519 		}
1520 
1521 		btrfs_init_work(&async_chunk[i].work, async_cow_start,
1522 				async_cow_submit, async_cow_free);
1523 
1524 		nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
1525 		atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1526 
1527 		btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work);
1528 
1529 		*nr_written += nr_pages;
1530 		start = cur_end + 1;
1531 	}
1532 	*page_started = 1;
1533 	return 0;
1534 }
1535 
1536 static noinline int run_delalloc_zoned(struct btrfs_inode *inode,
1537 				       struct page *locked_page, u64 start,
1538 				       u64 end, int *page_started,
1539 				       unsigned long *nr_written)
1540 {
1541 	int ret;
1542 
1543 	ret = cow_file_range(inode, locked_page, start, end, page_started,
1544 			     nr_written, 0);
1545 	if (ret)
1546 		return ret;
1547 
1548 	if (*page_started)
1549 		return 0;
1550 
1551 	__set_page_dirty_nobuffers(locked_page);
1552 	account_page_redirty(locked_page);
1553 	extent_write_locked_range(&inode->vfs_inode, start, end);
1554 	*page_started = 1;
1555 
1556 	return 0;
1557 }
1558 
1559 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
1560 					u64 bytenr, u64 num_bytes)
1561 {
1562 	struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bytenr);
1563 	struct btrfs_ordered_sum *sums;
1564 	int ret;
1565 	LIST_HEAD(list);
1566 
1567 	ret = btrfs_lookup_csums_range(csum_root, bytenr,
1568 				       bytenr + num_bytes - 1, &list, 0);
1569 	if (ret == 0 && list_empty(&list))
1570 		return 0;
1571 
1572 	while (!list_empty(&list)) {
1573 		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1574 		list_del(&sums->list);
1575 		kfree(sums);
1576 	}
1577 	if (ret < 0)
1578 		return ret;
1579 	return 1;
1580 }
1581 
1582 static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
1583 			   const u64 start, const u64 end,
1584 			   int *page_started, unsigned long *nr_written)
1585 {
1586 	const bool is_space_ino = btrfs_is_free_space_inode(inode);
1587 	const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root);
1588 	const u64 range_bytes = end + 1 - start;
1589 	struct extent_io_tree *io_tree = &inode->io_tree;
1590 	u64 range_start = start;
1591 	u64 count;
1592 
1593 	/*
1594 	 * If EXTENT_NORESERVE is set it means that when the buffered write was
1595 	 * made we had not enough available data space and therefore we did not
1596 	 * reserve data space for it, since we though we could do NOCOW for the
1597 	 * respective file range (either there is prealloc extent or the inode
1598 	 * has the NOCOW bit set).
1599 	 *
1600 	 * However when we need to fallback to COW mode (because for example the
1601 	 * block group for the corresponding extent was turned to RO mode by a
1602 	 * scrub or relocation) we need to do the following:
1603 	 *
1604 	 * 1) We increment the bytes_may_use counter of the data space info.
1605 	 *    If COW succeeds, it allocates a new data extent and after doing
1606 	 *    that it decrements the space info's bytes_may_use counter and
1607 	 *    increments its bytes_reserved counter by the same amount (we do
1608 	 *    this at btrfs_add_reserved_bytes()). So we need to increment the
1609 	 *    bytes_may_use counter to compensate (when space is reserved at
1610 	 *    buffered write time, the bytes_may_use counter is incremented);
1611 	 *
1612 	 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
1613 	 *    that if the COW path fails for any reason, it decrements (through
1614 	 *    extent_clear_unlock_delalloc()) the bytes_may_use counter of the
1615 	 *    data space info, which we incremented in the step above.
1616 	 *
1617 	 * If we need to fallback to cow and the inode corresponds to a free
1618 	 * space cache inode or an inode of the data relocation tree, we must
1619 	 * also increment bytes_may_use of the data space_info for the same
1620 	 * reason. Space caches and relocated data extents always get a prealloc
1621 	 * extent for them, however scrub or balance may have set the block
1622 	 * group that contains that extent to RO mode and therefore force COW
1623 	 * when starting writeback.
1624 	 */
1625 	count = count_range_bits(io_tree, &range_start, end, range_bytes,
1626 				 EXTENT_NORESERVE, 0);
1627 	if (count > 0 || is_space_ino || is_reloc_ino) {
1628 		u64 bytes = count;
1629 		struct btrfs_fs_info *fs_info = inode->root->fs_info;
1630 		struct btrfs_space_info *sinfo = fs_info->data_sinfo;
1631 
1632 		if (is_space_ino || is_reloc_ino)
1633 			bytes = range_bytes;
1634 
1635 		spin_lock(&sinfo->lock);
1636 		btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
1637 		spin_unlock(&sinfo->lock);
1638 
1639 		if (count > 0)
1640 			clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
1641 					 0, 0, NULL);
1642 	}
1643 
1644 	return cow_file_range(inode, locked_page, start, end, page_started,
1645 			      nr_written, 1);
1646 }
1647 
1648 struct can_nocow_file_extent_args {
1649 	/* Input fields. */
1650 
1651 	/* Start file offset of the range we want to NOCOW. */
1652 	u64 start;
1653 	/* End file offset (inclusive) of the range we want to NOCOW. */
1654 	u64 end;
1655 	bool writeback_path;
1656 	bool strict;
1657 	/*
1658 	 * Free the path passed to can_nocow_file_extent() once it's not needed
1659 	 * anymore.
1660 	 */
1661 	bool free_path;
1662 
1663 	/* Output fields. Only set when can_nocow_file_extent() returns 1. */
1664 
1665 	u64 disk_bytenr;
1666 	u64 disk_num_bytes;
1667 	u64 extent_offset;
1668 	/* Number of bytes that can be written to in NOCOW mode. */
1669 	u64 num_bytes;
1670 };
1671 
1672 /*
1673  * Check if we can NOCOW the file extent that the path points to.
1674  * This function may return with the path released, so the caller should check
1675  * if path->nodes[0] is NULL or not if it needs to use the path afterwards.
1676  *
1677  * Returns: < 0 on error
1678  *            0 if we can not NOCOW
1679  *            1 if we can NOCOW
1680  */
1681 static int can_nocow_file_extent(struct btrfs_path *path,
1682 				 struct btrfs_key *key,
1683 				 struct btrfs_inode *inode,
1684 				 struct can_nocow_file_extent_args *args)
1685 {
1686 	const bool is_freespace_inode = btrfs_is_free_space_inode(inode);
1687 	struct extent_buffer *leaf = path->nodes[0];
1688 	struct btrfs_root *root = inode->root;
1689 	struct btrfs_file_extent_item *fi;
1690 	u64 extent_end;
1691 	u8 extent_type;
1692 	int can_nocow = 0;
1693 	int ret = 0;
1694 
1695 	fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
1696 	extent_type = btrfs_file_extent_type(leaf, fi);
1697 
1698 	if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1699 		goto out;
1700 
1701 	/* Can't access these fields unless we know it's not an inline extent. */
1702 	args->disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1703 	args->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1704 	args->extent_offset = btrfs_file_extent_offset(leaf, fi);
1705 
1706 	if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
1707 	    extent_type == BTRFS_FILE_EXTENT_REG)
1708 		goto out;
1709 
1710 	/*
1711 	 * If the extent was created before the generation where the last snapshot
1712 	 * for its subvolume was created, then this implies the extent is shared,
1713 	 * hence we must COW.
1714 	 */
1715 	if (!args->strict &&
1716 	    btrfs_file_extent_generation(leaf, fi) <=
1717 	    btrfs_root_last_snapshot(&root->root_item))
1718 		goto out;
1719 
1720 	/* An explicit hole, must COW. */
1721 	if (args->disk_bytenr == 0)
1722 		goto out;
1723 
1724 	/* Compressed/encrypted/encoded extents must be COWed. */
1725 	if (btrfs_file_extent_compression(leaf, fi) ||
1726 	    btrfs_file_extent_encryption(leaf, fi) ||
1727 	    btrfs_file_extent_other_encoding(leaf, fi))
1728 		goto out;
1729 
1730 	extent_end = btrfs_file_extent_end(path);
1731 
1732 	/*
1733 	 * The following checks can be expensive, as they need to take other
1734 	 * locks and do btree or rbtree searches, so release the path to avoid
1735 	 * blocking other tasks for too long.
1736 	 */
1737 	btrfs_release_path(path);
1738 
1739 	ret = btrfs_cross_ref_exist(root, btrfs_ino(inode),
1740 				    key->offset - args->extent_offset,
1741 				    args->disk_bytenr, false, path);
1742 	WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1743 	if (ret != 0)
1744 		goto out;
1745 
1746 	if (args->free_path) {
1747 		/*
1748 		 * We don't need the path anymore, plus through the
1749 		 * csum_exist_in_range() call below we will end up allocating
1750 		 * another path. So free the path to avoid unnecessary extra
1751 		 * memory usage.
1752 		 */
1753 		btrfs_free_path(path);
1754 		path = NULL;
1755 	}
1756 
1757 	/* If there are pending snapshots for this root, we must COW. */
1758 	if (args->writeback_path && !is_freespace_inode &&
1759 	    atomic_read(&root->snapshot_force_cow))
1760 		goto out;
1761 
1762 	args->disk_bytenr += args->extent_offset;
1763 	args->disk_bytenr += args->start - key->offset;
1764 	args->num_bytes = min(args->end + 1, extent_end) - args->start;
1765 
1766 	/*
1767 	 * Force COW if csums exist in the range. This ensures that csums for a
1768 	 * given extent are either valid or do not exist.
1769 	 */
1770 	ret = csum_exist_in_range(root->fs_info, args->disk_bytenr, args->num_bytes);
1771 	WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1772 	if (ret != 0)
1773 		goto out;
1774 
1775 	can_nocow = 1;
1776  out:
1777 	if (args->free_path && path)
1778 		btrfs_free_path(path);
1779 
1780 	return ret < 0 ? ret : can_nocow;
1781 }
1782 
1783 /*
1784  * when nowcow writeback call back.  This checks for snapshots or COW copies
1785  * of the extents that exist in the file, and COWs the file as required.
1786  *
1787  * If no cow copies or snapshots exist, we write directly to the existing
1788  * blocks on disk
1789  */
1790 static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
1791 				       struct page *locked_page,
1792 				       const u64 start, const u64 end,
1793 				       int *page_started,
1794 				       unsigned long *nr_written)
1795 {
1796 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1797 	struct btrfs_root *root = inode->root;
1798 	struct btrfs_path *path;
1799 	u64 cow_start = (u64)-1;
1800 	u64 cur_offset = start;
1801 	int ret;
1802 	bool check_prev = true;
1803 	u64 ino = btrfs_ino(inode);
1804 	struct btrfs_block_group *bg;
1805 	bool nocow = false;
1806 	struct can_nocow_file_extent_args nocow_args = { 0 };
1807 
1808 	path = btrfs_alloc_path();
1809 	if (!path) {
1810 		extent_clear_unlock_delalloc(inode, start, end, locked_page,
1811 					     EXTENT_LOCKED | EXTENT_DELALLOC |
1812 					     EXTENT_DO_ACCOUNTING |
1813 					     EXTENT_DEFRAG, PAGE_UNLOCK |
1814 					     PAGE_START_WRITEBACK |
1815 					     PAGE_END_WRITEBACK);
1816 		return -ENOMEM;
1817 	}
1818 
1819 	nocow_args.end = end;
1820 	nocow_args.writeback_path = true;
1821 
1822 	while (1) {
1823 		struct btrfs_key found_key;
1824 		struct btrfs_file_extent_item *fi;
1825 		struct extent_buffer *leaf;
1826 		u64 extent_end;
1827 		u64 ram_bytes;
1828 		u64 nocow_end;
1829 		int extent_type;
1830 
1831 		nocow = false;
1832 
1833 		ret = btrfs_lookup_file_extent(NULL, root, path, ino,
1834 					       cur_offset, 0);
1835 		if (ret < 0)
1836 			goto error;
1837 
1838 		/*
1839 		 * If there is no extent for our range when doing the initial
1840 		 * search, then go back to the previous slot as it will be the
1841 		 * one containing the search offset
1842 		 */
1843 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
1844 			leaf = path->nodes[0];
1845 			btrfs_item_key_to_cpu(leaf, &found_key,
1846 					      path->slots[0] - 1);
1847 			if (found_key.objectid == ino &&
1848 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
1849 				path->slots[0]--;
1850 		}
1851 		check_prev = false;
1852 next_slot:
1853 		/* Go to next leaf if we have exhausted the current one */
1854 		leaf = path->nodes[0];
1855 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1856 			ret = btrfs_next_leaf(root, path);
1857 			if (ret < 0) {
1858 				if (cow_start != (u64)-1)
1859 					cur_offset = cow_start;
1860 				goto error;
1861 			}
1862 			if (ret > 0)
1863 				break;
1864 			leaf = path->nodes[0];
1865 		}
1866 
1867 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1868 
1869 		/* Didn't find anything for our INO */
1870 		if (found_key.objectid > ino)
1871 			break;
1872 		/*
1873 		 * Keep searching until we find an EXTENT_ITEM or there are no
1874 		 * more extents for this inode
1875 		 */
1876 		if (WARN_ON_ONCE(found_key.objectid < ino) ||
1877 		    found_key.type < BTRFS_EXTENT_DATA_KEY) {
1878 			path->slots[0]++;
1879 			goto next_slot;
1880 		}
1881 
1882 		/* Found key is not EXTENT_DATA_KEY or starts after req range */
1883 		if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
1884 		    found_key.offset > end)
1885 			break;
1886 
1887 		/*
1888 		 * If the found extent starts after requested offset, then
1889 		 * adjust extent_end to be right before this extent begins
1890 		 */
1891 		if (found_key.offset > cur_offset) {
1892 			extent_end = found_key.offset;
1893 			extent_type = 0;
1894 			goto out_check;
1895 		}
1896 
1897 		/*
1898 		 * Found extent which begins before our range and potentially
1899 		 * intersect it
1900 		 */
1901 		fi = btrfs_item_ptr(leaf, path->slots[0],
1902 				    struct btrfs_file_extent_item);
1903 		extent_type = btrfs_file_extent_type(leaf, fi);
1904 		/* If this is triggered then we have a memory corruption. */
1905 		ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES);
1906 		if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) {
1907 			ret = -EUCLEAN;
1908 			goto error;
1909 		}
1910 		ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1911 		extent_end = btrfs_file_extent_end(path);
1912 
1913 		/*
1914 		 * If the extent we got ends before our current offset, skip to
1915 		 * the next extent.
1916 		 */
1917 		if (extent_end <= cur_offset) {
1918 			path->slots[0]++;
1919 			goto next_slot;
1920 		}
1921 
1922 		nocow_args.start = cur_offset;
1923 		ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args);
1924 		if (ret < 0) {
1925 			if (cow_start != (u64)-1)
1926 				cur_offset = cow_start;
1927 			goto error;
1928 		} else if (ret == 0) {
1929 			goto out_check;
1930 		}
1931 
1932 		ret = 0;
1933 		bg = btrfs_inc_nocow_writers(fs_info, nocow_args.disk_bytenr);
1934 		if (bg)
1935 			nocow = true;
1936 out_check:
1937 		/*
1938 		 * If nocow is false then record the beginning of the range
1939 		 * that needs to be COWed
1940 		 */
1941 		if (!nocow) {
1942 			if (cow_start == (u64)-1)
1943 				cow_start = cur_offset;
1944 			cur_offset = extent_end;
1945 			if (cur_offset > end)
1946 				break;
1947 			if (!path->nodes[0])
1948 				continue;
1949 			path->slots[0]++;
1950 			goto next_slot;
1951 		}
1952 
1953 		/*
1954 		 * COW range from cow_start to found_key.offset - 1. As the key
1955 		 * will contain the beginning of the first extent that can be
1956 		 * NOCOW, following one which needs to be COW'ed
1957 		 */
1958 		if (cow_start != (u64)-1) {
1959 			ret = fallback_to_cow(inode, locked_page,
1960 					      cow_start, found_key.offset - 1,
1961 					      page_started, nr_written);
1962 			if (ret)
1963 				goto error;
1964 			cow_start = (u64)-1;
1965 		}
1966 
1967 		nocow_end = cur_offset + nocow_args.num_bytes - 1;
1968 
1969 		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1970 			u64 orig_start = found_key.offset - nocow_args.extent_offset;
1971 			struct extent_map *em;
1972 
1973 			em = create_io_em(inode, cur_offset, nocow_args.num_bytes,
1974 					  orig_start,
1975 					  nocow_args.disk_bytenr, /* block_start */
1976 					  nocow_args.num_bytes, /* block_len */
1977 					  nocow_args.disk_num_bytes, /* orig_block_len */
1978 					  ram_bytes, BTRFS_COMPRESS_NONE,
1979 					  BTRFS_ORDERED_PREALLOC);
1980 			if (IS_ERR(em)) {
1981 				ret = PTR_ERR(em);
1982 				goto error;
1983 			}
1984 			free_extent_map(em);
1985 			ret = btrfs_add_ordered_extent(inode,
1986 					cur_offset, nocow_args.num_bytes,
1987 					nocow_args.num_bytes,
1988 					nocow_args.disk_bytenr,
1989 					nocow_args.num_bytes, 0,
1990 					1 << BTRFS_ORDERED_PREALLOC,
1991 					BTRFS_COMPRESS_NONE);
1992 			if (ret) {
1993 				btrfs_drop_extent_cache(inode, cur_offset,
1994 							nocow_end, 0);
1995 				goto error;
1996 			}
1997 		} else {
1998 			ret = btrfs_add_ordered_extent(inode, cur_offset,
1999 						       nocow_args.num_bytes,
2000 						       nocow_args.num_bytes,
2001 						       nocow_args.disk_bytenr,
2002 						       nocow_args.num_bytes,
2003 						       0,
2004 						       1 << BTRFS_ORDERED_NOCOW,
2005 						       BTRFS_COMPRESS_NONE);
2006 			if (ret)
2007 				goto error;
2008 		}
2009 
2010 		if (nocow) {
2011 			btrfs_dec_nocow_writers(bg);
2012 			nocow = false;
2013 		}
2014 
2015 		if (btrfs_is_data_reloc_root(root))
2016 			/*
2017 			 * Error handled later, as we must prevent
2018 			 * extent_clear_unlock_delalloc() in error handler
2019 			 * from freeing metadata of created ordered extent.
2020 			 */
2021 			ret = btrfs_reloc_clone_csums(inode, cur_offset,
2022 						      nocow_args.num_bytes);
2023 
2024 		extent_clear_unlock_delalloc(inode, cur_offset, nocow_end,
2025 					     locked_page, EXTENT_LOCKED |
2026 					     EXTENT_DELALLOC |
2027 					     EXTENT_CLEAR_DATA_RESV,
2028 					     PAGE_UNLOCK | PAGE_SET_ORDERED);
2029 
2030 		cur_offset = extent_end;
2031 
2032 		/*
2033 		 * btrfs_reloc_clone_csums() error, now we're OK to call error
2034 		 * handler, as metadata for created ordered extent will only
2035 		 * be freed by btrfs_finish_ordered_io().
2036 		 */
2037 		if (ret)
2038 			goto error;
2039 		if (cur_offset > end)
2040 			break;
2041 	}
2042 	btrfs_release_path(path);
2043 
2044 	if (cur_offset <= end && cow_start == (u64)-1)
2045 		cow_start = cur_offset;
2046 
2047 	if (cow_start != (u64)-1) {
2048 		cur_offset = end;
2049 		ret = fallback_to_cow(inode, locked_page, cow_start, end,
2050 				      page_started, nr_written);
2051 		if (ret)
2052 			goto error;
2053 	}
2054 
2055 error:
2056 	if (nocow)
2057 		btrfs_dec_nocow_writers(bg);
2058 
2059 	if (ret && cur_offset < end)
2060 		extent_clear_unlock_delalloc(inode, cur_offset, end,
2061 					     locked_page, EXTENT_LOCKED |
2062 					     EXTENT_DELALLOC | EXTENT_DEFRAG |
2063 					     EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
2064 					     PAGE_START_WRITEBACK |
2065 					     PAGE_END_WRITEBACK);
2066 	btrfs_free_path(path);
2067 	return ret;
2068 }
2069 
2070 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
2071 {
2072 	if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) {
2073 		if (inode->defrag_bytes &&
2074 		    test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG,
2075 				   0, NULL))
2076 			return false;
2077 		return true;
2078 	}
2079 	return false;
2080 }
2081 
2082 /*
2083  * Function to process delayed allocation (create CoW) for ranges which are
2084  * being touched for the first time.
2085  */
2086 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
2087 		u64 start, u64 end, int *page_started, unsigned long *nr_written,
2088 		struct writeback_control *wbc)
2089 {
2090 	int ret;
2091 	const bool zoned = btrfs_is_zoned(inode->root->fs_info);
2092 
2093 	/*
2094 	 * The range must cover part of the @locked_page, or the returned
2095 	 * @page_started can confuse the caller.
2096 	 */
2097 	ASSERT(!(end <= page_offset(locked_page) ||
2098 		 start >= page_offset(locked_page) + PAGE_SIZE));
2099 
2100 	if (should_nocow(inode, start, end)) {
2101 		/*
2102 		 * Normally on a zoned device we're only doing COW writes, but
2103 		 * in case of relocation on a zoned filesystem we have taken
2104 		 * precaution, that we're only writing sequentially. It's safe
2105 		 * to use run_delalloc_nocow() here, like for  regular
2106 		 * preallocated inodes.
2107 		 */
2108 		ASSERT(!zoned || btrfs_is_data_reloc_root(inode->root));
2109 		ret = run_delalloc_nocow(inode, locked_page, start, end,
2110 					 page_started, nr_written);
2111 	} else if (!btrfs_inode_can_compress(inode) ||
2112 		   !inode_need_compress(inode, start, end)) {
2113 		if (zoned)
2114 			ret = run_delalloc_zoned(inode, locked_page, start, end,
2115 						 page_started, nr_written);
2116 		else
2117 			ret = cow_file_range(inode, locked_page, start, end,
2118 					     page_started, nr_written, 1);
2119 	} else {
2120 		set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
2121 		ret = cow_file_range_async(inode, wbc, locked_page, start, end,
2122 					   page_started, nr_written);
2123 	}
2124 	ASSERT(ret <= 0);
2125 	if (ret)
2126 		btrfs_cleanup_ordered_extents(inode, locked_page, start,
2127 					      end - start + 1);
2128 	return ret;
2129 }
2130 
2131 void btrfs_split_delalloc_extent(struct inode *inode,
2132 				 struct extent_state *orig, u64 split)
2133 {
2134 	u64 size;
2135 
2136 	/* not delalloc, ignore it */
2137 	if (!(orig->state & EXTENT_DELALLOC))
2138 		return;
2139 
2140 	size = orig->end - orig->start + 1;
2141 	if (size > BTRFS_MAX_EXTENT_SIZE) {
2142 		u32 num_extents;
2143 		u64 new_size;
2144 
2145 		/*
2146 		 * See the explanation in btrfs_merge_delalloc_extent, the same
2147 		 * applies here, just in reverse.
2148 		 */
2149 		new_size = orig->end - split + 1;
2150 		num_extents = count_max_extents(new_size);
2151 		new_size = split - orig->start;
2152 		num_extents += count_max_extents(new_size);
2153 		if (count_max_extents(size) >= num_extents)
2154 			return;
2155 	}
2156 
2157 	spin_lock(&BTRFS_I(inode)->lock);
2158 	btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
2159 	spin_unlock(&BTRFS_I(inode)->lock);
2160 }
2161 
2162 /*
2163  * Handle merged delayed allocation extents so we can keep track of new extents
2164  * that are just merged onto old extents, such as when we are doing sequential
2165  * writes, so we can properly account for the metadata space we'll need.
2166  */
2167 void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new,
2168 				 struct extent_state *other)
2169 {
2170 	u64 new_size, old_size;
2171 	u32 num_extents;
2172 
2173 	/* not delalloc, ignore it */
2174 	if (!(other->state & EXTENT_DELALLOC))
2175 		return;
2176 
2177 	if (new->start > other->start)
2178 		new_size = new->end - other->start + 1;
2179 	else
2180 		new_size = other->end - new->start + 1;
2181 
2182 	/* we're not bigger than the max, unreserve the space and go */
2183 	if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
2184 		spin_lock(&BTRFS_I(inode)->lock);
2185 		btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
2186 		spin_unlock(&BTRFS_I(inode)->lock);
2187 		return;
2188 	}
2189 
2190 	/*
2191 	 * We have to add up either side to figure out how many extents were
2192 	 * accounted for before we merged into one big extent.  If the number of
2193 	 * extents we accounted for is <= the amount we need for the new range
2194 	 * then we can return, otherwise drop.  Think of it like this
2195 	 *
2196 	 * [ 4k][MAX_SIZE]
2197 	 *
2198 	 * So we've grown the extent by a MAX_SIZE extent, this would mean we
2199 	 * need 2 outstanding extents, on one side we have 1 and the other side
2200 	 * we have 1 so they are == and we can return.  But in this case
2201 	 *
2202 	 * [MAX_SIZE+4k][MAX_SIZE+4k]
2203 	 *
2204 	 * Each range on their own accounts for 2 extents, but merged together
2205 	 * they are only 3 extents worth of accounting, so we need to drop in
2206 	 * this case.
2207 	 */
2208 	old_size = other->end - other->start + 1;
2209 	num_extents = count_max_extents(old_size);
2210 	old_size = new->end - new->start + 1;
2211 	num_extents += count_max_extents(old_size);
2212 	if (count_max_extents(new_size) >= num_extents)
2213 		return;
2214 
2215 	spin_lock(&BTRFS_I(inode)->lock);
2216 	btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
2217 	spin_unlock(&BTRFS_I(inode)->lock);
2218 }
2219 
2220 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
2221 				      struct inode *inode)
2222 {
2223 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2224 
2225 	spin_lock(&root->delalloc_lock);
2226 	if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
2227 		list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
2228 			      &root->delalloc_inodes);
2229 		set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2230 			&BTRFS_I(inode)->runtime_flags);
2231 		root->nr_delalloc_inodes++;
2232 		if (root->nr_delalloc_inodes == 1) {
2233 			spin_lock(&fs_info->delalloc_root_lock);
2234 			BUG_ON(!list_empty(&root->delalloc_root));
2235 			list_add_tail(&root->delalloc_root,
2236 				      &fs_info->delalloc_roots);
2237 			spin_unlock(&fs_info->delalloc_root_lock);
2238 		}
2239 	}
2240 	spin_unlock(&root->delalloc_lock);
2241 }
2242 
2243 
2244 void __btrfs_del_delalloc_inode(struct btrfs_root *root,
2245 				struct btrfs_inode *inode)
2246 {
2247 	struct btrfs_fs_info *fs_info = root->fs_info;
2248 
2249 	if (!list_empty(&inode->delalloc_inodes)) {
2250 		list_del_init(&inode->delalloc_inodes);
2251 		clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2252 			  &inode->runtime_flags);
2253 		root->nr_delalloc_inodes--;
2254 		if (!root->nr_delalloc_inodes) {
2255 			ASSERT(list_empty(&root->delalloc_inodes));
2256 			spin_lock(&fs_info->delalloc_root_lock);
2257 			BUG_ON(list_empty(&root->delalloc_root));
2258 			list_del_init(&root->delalloc_root);
2259 			spin_unlock(&fs_info->delalloc_root_lock);
2260 		}
2261 	}
2262 }
2263 
2264 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
2265 				     struct btrfs_inode *inode)
2266 {
2267 	spin_lock(&root->delalloc_lock);
2268 	__btrfs_del_delalloc_inode(root, inode);
2269 	spin_unlock(&root->delalloc_lock);
2270 }
2271 
2272 /*
2273  * Properly track delayed allocation bytes in the inode and to maintain the
2274  * list of inodes that have pending delalloc work to be done.
2275  */
2276 void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state,
2277 			       unsigned *bits)
2278 {
2279 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2280 
2281 	if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
2282 		WARN_ON(1);
2283 	/*
2284 	 * set_bit and clear bit hooks normally require _irqsave/restore
2285 	 * but in this case, we are only testing for the DELALLOC
2286 	 * bit, which is only set or cleared with irqs on
2287 	 */
2288 	if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
2289 		struct btrfs_root *root = BTRFS_I(inode)->root;
2290 		u64 len = state->end + 1 - state->start;
2291 		u32 num_extents = count_max_extents(len);
2292 		bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode));
2293 
2294 		spin_lock(&BTRFS_I(inode)->lock);
2295 		btrfs_mod_outstanding_extents(BTRFS_I(inode), num_extents);
2296 		spin_unlock(&BTRFS_I(inode)->lock);
2297 
2298 		/* For sanity tests */
2299 		if (btrfs_is_testing(fs_info))
2300 			return;
2301 
2302 		percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
2303 					 fs_info->delalloc_batch);
2304 		spin_lock(&BTRFS_I(inode)->lock);
2305 		BTRFS_I(inode)->delalloc_bytes += len;
2306 		if (*bits & EXTENT_DEFRAG)
2307 			BTRFS_I(inode)->defrag_bytes += len;
2308 		if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2309 					 &BTRFS_I(inode)->runtime_flags))
2310 			btrfs_add_delalloc_inodes(root, inode);
2311 		spin_unlock(&BTRFS_I(inode)->lock);
2312 	}
2313 
2314 	if (!(state->state & EXTENT_DELALLOC_NEW) &&
2315 	    (*bits & EXTENT_DELALLOC_NEW)) {
2316 		spin_lock(&BTRFS_I(inode)->lock);
2317 		BTRFS_I(inode)->new_delalloc_bytes += state->end + 1 -
2318 			state->start;
2319 		spin_unlock(&BTRFS_I(inode)->lock);
2320 	}
2321 }
2322 
2323 /*
2324  * Once a range is no longer delalloc this function ensures that proper
2325  * accounting happens.
2326  */
2327 void btrfs_clear_delalloc_extent(struct inode *vfs_inode,
2328 				 struct extent_state *state, unsigned *bits)
2329 {
2330 	struct btrfs_inode *inode = BTRFS_I(vfs_inode);
2331 	struct btrfs_fs_info *fs_info = btrfs_sb(vfs_inode->i_sb);
2332 	u64 len = state->end + 1 - state->start;
2333 	u32 num_extents = count_max_extents(len);
2334 
2335 	if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) {
2336 		spin_lock(&inode->lock);
2337 		inode->defrag_bytes -= len;
2338 		spin_unlock(&inode->lock);
2339 	}
2340 
2341 	/*
2342 	 * set_bit and clear bit hooks normally require _irqsave/restore
2343 	 * but in this case, we are only testing for the DELALLOC
2344 	 * bit, which is only set or cleared with irqs on
2345 	 */
2346 	if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
2347 		struct btrfs_root *root = inode->root;
2348 		bool do_list = !btrfs_is_free_space_inode(inode);
2349 
2350 		spin_lock(&inode->lock);
2351 		btrfs_mod_outstanding_extents(inode, -num_extents);
2352 		spin_unlock(&inode->lock);
2353 
2354 		/*
2355 		 * We don't reserve metadata space for space cache inodes so we
2356 		 * don't need to call delalloc_release_metadata if there is an
2357 		 * error.
2358 		 */
2359 		if (*bits & EXTENT_CLEAR_META_RESV &&
2360 		    root != fs_info->tree_root)
2361 			btrfs_delalloc_release_metadata(inode, len, false);
2362 
2363 		/* For sanity tests. */
2364 		if (btrfs_is_testing(fs_info))
2365 			return;
2366 
2367 		if (!btrfs_is_data_reloc_root(root) &&
2368 		    do_list && !(state->state & EXTENT_NORESERVE) &&
2369 		    (*bits & EXTENT_CLEAR_DATA_RESV))
2370 			btrfs_free_reserved_data_space_noquota(fs_info, len);
2371 
2372 		percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
2373 					 fs_info->delalloc_batch);
2374 		spin_lock(&inode->lock);
2375 		inode->delalloc_bytes -= len;
2376 		if (do_list && inode->delalloc_bytes == 0 &&
2377 		    test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2378 					&inode->runtime_flags))
2379 			btrfs_del_delalloc_inode(root, inode);
2380 		spin_unlock(&inode->lock);
2381 	}
2382 
2383 	if ((state->state & EXTENT_DELALLOC_NEW) &&
2384 	    (*bits & EXTENT_DELALLOC_NEW)) {
2385 		spin_lock(&inode->lock);
2386 		ASSERT(inode->new_delalloc_bytes >= len);
2387 		inode->new_delalloc_bytes -= len;
2388 		if (*bits & EXTENT_ADD_INODE_BYTES)
2389 			inode_add_bytes(&inode->vfs_inode, len);
2390 		spin_unlock(&inode->lock);
2391 	}
2392 }
2393 
2394 /*
2395  * in order to insert checksums into the metadata in large chunks,
2396  * we wait until bio submission time.   All the pages in the bio are
2397  * checksummed and sums are attached onto the ordered extent record.
2398  *
2399  * At IO completion time the cums attached on the ordered extent record
2400  * are inserted into the btree
2401  */
2402 static blk_status_t btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
2403 					   u64 dio_file_offset)
2404 {
2405 	return btrfs_csum_one_bio(BTRFS_I(inode), bio, (u64)-1, false);
2406 }
2407 
2408 /*
2409  * Split an extent_map at [start, start + len]
2410  *
2411  * This function is intended to be used only for extract_ordered_extent().
2412  */
2413 static int split_zoned_em(struct btrfs_inode *inode, u64 start, u64 len,
2414 			  u64 pre, u64 post)
2415 {
2416 	struct extent_map_tree *em_tree = &inode->extent_tree;
2417 	struct extent_map *em;
2418 	struct extent_map *split_pre = NULL;
2419 	struct extent_map *split_mid = NULL;
2420 	struct extent_map *split_post = NULL;
2421 	int ret = 0;
2422 	unsigned long flags;
2423 
2424 	/* Sanity check */
2425 	if (pre == 0 && post == 0)
2426 		return 0;
2427 
2428 	split_pre = alloc_extent_map();
2429 	if (pre)
2430 		split_mid = alloc_extent_map();
2431 	if (post)
2432 		split_post = alloc_extent_map();
2433 	if (!split_pre || (pre && !split_mid) || (post && !split_post)) {
2434 		ret = -ENOMEM;
2435 		goto out;
2436 	}
2437 
2438 	ASSERT(pre + post < len);
2439 
2440 	lock_extent(&inode->io_tree, start, start + len - 1);
2441 	write_lock(&em_tree->lock);
2442 	em = lookup_extent_mapping(em_tree, start, len);
2443 	if (!em) {
2444 		ret = -EIO;
2445 		goto out_unlock;
2446 	}
2447 
2448 	ASSERT(em->len == len);
2449 	ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
2450 	ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE);
2451 	ASSERT(test_bit(EXTENT_FLAG_PINNED, &em->flags));
2452 	ASSERT(!test_bit(EXTENT_FLAG_LOGGING, &em->flags));
2453 	ASSERT(!list_empty(&em->list));
2454 
2455 	flags = em->flags;
2456 	clear_bit(EXTENT_FLAG_PINNED, &em->flags);
2457 
2458 	/* First, replace the em with a new extent_map starting from * em->start */
2459 	split_pre->start = em->start;
2460 	split_pre->len = (pre ? pre : em->len - post);
2461 	split_pre->orig_start = split_pre->start;
2462 	split_pre->block_start = em->block_start;
2463 	split_pre->block_len = split_pre->len;
2464 	split_pre->orig_block_len = split_pre->block_len;
2465 	split_pre->ram_bytes = split_pre->len;
2466 	split_pre->flags = flags;
2467 	split_pre->compress_type = em->compress_type;
2468 	split_pre->generation = em->generation;
2469 
2470 	replace_extent_mapping(em_tree, em, split_pre, 1);
2471 
2472 	/*
2473 	 * Now we only have an extent_map at:
2474 	 *     [em->start, em->start + pre] if pre != 0
2475 	 *     [em->start, em->start + em->len - post] if pre == 0
2476 	 */
2477 
2478 	if (pre) {
2479 		/* Insert the middle extent_map */
2480 		split_mid->start = em->start + pre;
2481 		split_mid->len = em->len - pre - post;
2482 		split_mid->orig_start = split_mid->start;
2483 		split_mid->block_start = em->block_start + pre;
2484 		split_mid->block_len = split_mid->len;
2485 		split_mid->orig_block_len = split_mid->block_len;
2486 		split_mid->ram_bytes = split_mid->len;
2487 		split_mid->flags = flags;
2488 		split_mid->compress_type = em->compress_type;
2489 		split_mid->generation = em->generation;
2490 		add_extent_mapping(em_tree, split_mid, 1);
2491 	}
2492 
2493 	if (post) {
2494 		split_post->start = em->start + em->len - post;
2495 		split_post->len = post;
2496 		split_post->orig_start = split_post->start;
2497 		split_post->block_start = em->block_start + em->len - post;
2498 		split_post->block_len = split_post->len;
2499 		split_post->orig_block_len = split_post->block_len;
2500 		split_post->ram_bytes = split_post->len;
2501 		split_post->flags = flags;
2502 		split_post->compress_type = em->compress_type;
2503 		split_post->generation = em->generation;
2504 		add_extent_mapping(em_tree, split_post, 1);
2505 	}
2506 
2507 	/* Once for us */
2508 	free_extent_map(em);
2509 	/* Once for the tree */
2510 	free_extent_map(em);
2511 
2512 out_unlock:
2513 	write_unlock(&em_tree->lock);
2514 	unlock_extent(&inode->io_tree, start, start + len - 1);
2515 out:
2516 	free_extent_map(split_pre);
2517 	free_extent_map(split_mid);
2518 	free_extent_map(split_post);
2519 
2520 	return ret;
2521 }
2522 
2523 static blk_status_t extract_ordered_extent(struct btrfs_inode *inode,
2524 					   struct bio *bio, loff_t file_offset)
2525 {
2526 	struct btrfs_ordered_extent *ordered;
2527 	u64 start = (u64)bio->bi_iter.bi_sector << SECTOR_SHIFT;
2528 	u64 file_len;
2529 	u64 len = bio->bi_iter.bi_size;
2530 	u64 end = start + len;
2531 	u64 ordered_end;
2532 	u64 pre, post;
2533 	int ret = 0;
2534 
2535 	ordered = btrfs_lookup_ordered_extent(inode, file_offset);
2536 	if (WARN_ON_ONCE(!ordered))
2537 		return BLK_STS_IOERR;
2538 
2539 	/* No need to split */
2540 	if (ordered->disk_num_bytes == len)
2541 		goto out;
2542 
2543 	/* We cannot split once end_bio'd ordered extent */
2544 	if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes)) {
2545 		ret = -EINVAL;
2546 		goto out;
2547 	}
2548 
2549 	/* We cannot split a compressed ordered extent */
2550 	if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes)) {
2551 		ret = -EINVAL;
2552 		goto out;
2553 	}
2554 
2555 	ordered_end = ordered->disk_bytenr + ordered->disk_num_bytes;
2556 	/* bio must be in one ordered extent */
2557 	if (WARN_ON_ONCE(start < ordered->disk_bytenr || end > ordered_end)) {
2558 		ret = -EINVAL;
2559 		goto out;
2560 	}
2561 
2562 	/* Checksum list should be empty */
2563 	if (WARN_ON_ONCE(!list_empty(&ordered->list))) {
2564 		ret = -EINVAL;
2565 		goto out;
2566 	}
2567 
2568 	file_len = ordered->num_bytes;
2569 	pre = start - ordered->disk_bytenr;
2570 	post = ordered_end - end;
2571 
2572 	ret = btrfs_split_ordered_extent(ordered, pre, post);
2573 	if (ret)
2574 		goto out;
2575 	ret = split_zoned_em(inode, file_offset, file_len, pre, post);
2576 
2577 out:
2578 	btrfs_put_ordered_extent(ordered);
2579 
2580 	return errno_to_blk_status(ret);
2581 }
2582 
2583 /*
2584  * extent_io.c submission hook. This does the right thing for csum calculation
2585  * on write, or reading the csums from the tree before a read.
2586  *
2587  * Rules about async/sync submit,
2588  * a) read:				sync submit
2589  *
2590  * b) write without checksum:		sync submit
2591  *
2592  * c) write with checksum:
2593  *    c-1) if bio is issued by fsync:	sync submit
2594  *         (sync_writers != 0)
2595  *
2596  *    c-2) if root is reloc root:	sync submit
2597  *         (only in case of buffered IO)
2598  *
2599  *    c-3) otherwise:			async submit
2600  */
2601 void btrfs_submit_data_bio(struct inode *inode, struct bio *bio,
2602 			   int mirror_num, enum btrfs_compression_type compress_type)
2603 {
2604 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2605 	struct btrfs_root *root = BTRFS_I(inode)->root;
2606 	enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
2607 	blk_status_t ret = 0;
2608 	int skip_sum;
2609 	int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
2610 
2611 	skip_sum = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) ||
2612 		test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state);
2613 
2614 	if (btrfs_is_free_space_inode(BTRFS_I(inode)))
2615 		metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
2616 
2617 	if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
2618 		struct page *page = bio_first_bvec_all(bio)->bv_page;
2619 		loff_t file_offset = page_offset(page);
2620 
2621 		ret = extract_ordered_extent(BTRFS_I(inode), bio, file_offset);
2622 		if (ret)
2623 			goto out;
2624 	}
2625 
2626 	if (btrfs_op(bio) != BTRFS_MAP_WRITE) {
2627 		ret = btrfs_bio_wq_end_io(fs_info, bio, metadata);
2628 		if (ret)
2629 			goto out;
2630 
2631 		if (compress_type != BTRFS_COMPRESS_NONE) {
2632 			/*
2633 			 * btrfs_submit_compressed_read will handle completing
2634 			 * the bio if there were any errors, so just return
2635 			 * here.
2636 			 */
2637 			btrfs_submit_compressed_read(inode, bio, mirror_num);
2638 			return;
2639 		} else {
2640 			/*
2641 			 * Lookup bio sums does extra checks around whether we
2642 			 * need to csum or not, which is why we ignore skip_sum
2643 			 * here.
2644 			 */
2645 			ret = btrfs_lookup_bio_sums(inode, bio, NULL);
2646 			if (ret)
2647 				goto out;
2648 		}
2649 		goto mapit;
2650 	} else if (async && !skip_sum) {
2651 		/* csum items have already been cloned */
2652 		if (btrfs_is_data_reloc_root(root))
2653 			goto mapit;
2654 		/* we're doing a write, do the async checksumming */
2655 		ret = btrfs_wq_submit_bio(inode, bio, mirror_num,
2656 					  0, btrfs_submit_bio_start);
2657 		goto out;
2658 	} else if (!skip_sum) {
2659 		ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, (u64)-1, false);
2660 		if (ret)
2661 			goto out;
2662 	}
2663 
2664 mapit:
2665 	ret = btrfs_map_bio(fs_info, bio, mirror_num);
2666 
2667 out:
2668 	if (ret) {
2669 		bio->bi_status = ret;
2670 		bio_endio(bio);
2671 	}
2672 }
2673 
2674 /*
2675  * given a list of ordered sums record them in the inode.  This happens
2676  * at IO completion time based on sums calculated at bio submission time.
2677  */
2678 static int add_pending_csums(struct btrfs_trans_handle *trans,
2679 			     struct list_head *list)
2680 {
2681 	struct btrfs_ordered_sum *sum;
2682 	struct btrfs_root *csum_root = NULL;
2683 	int ret;
2684 
2685 	list_for_each_entry(sum, list, list) {
2686 		trans->adding_csums = true;
2687 		if (!csum_root)
2688 			csum_root = btrfs_csum_root(trans->fs_info,
2689 						    sum->bytenr);
2690 		ret = btrfs_csum_file_blocks(trans, csum_root, sum);
2691 		trans->adding_csums = false;
2692 		if (ret)
2693 			return ret;
2694 	}
2695 	return 0;
2696 }
2697 
2698 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
2699 					 const u64 start,
2700 					 const u64 len,
2701 					 struct extent_state **cached_state)
2702 {
2703 	u64 search_start = start;
2704 	const u64 end = start + len - 1;
2705 
2706 	while (search_start < end) {
2707 		const u64 search_len = end - search_start + 1;
2708 		struct extent_map *em;
2709 		u64 em_len;
2710 		int ret = 0;
2711 
2712 		em = btrfs_get_extent(inode, NULL, 0, search_start, search_len);
2713 		if (IS_ERR(em))
2714 			return PTR_ERR(em);
2715 
2716 		if (em->block_start != EXTENT_MAP_HOLE)
2717 			goto next;
2718 
2719 		em_len = em->len;
2720 		if (em->start < search_start)
2721 			em_len -= search_start - em->start;
2722 		if (em_len > search_len)
2723 			em_len = search_len;
2724 
2725 		ret = set_extent_bit(&inode->io_tree, search_start,
2726 				     search_start + em_len - 1,
2727 				     EXTENT_DELALLOC_NEW, 0, NULL, cached_state,
2728 				     GFP_NOFS, NULL);
2729 next:
2730 		search_start = extent_map_end(em);
2731 		free_extent_map(em);
2732 		if (ret)
2733 			return ret;
2734 	}
2735 	return 0;
2736 }
2737 
2738 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2739 			      unsigned int extra_bits,
2740 			      struct extent_state **cached_state)
2741 {
2742 	WARN_ON(PAGE_ALIGNED(end));
2743 
2744 	if (start >= i_size_read(&inode->vfs_inode) &&
2745 	    !(inode->flags & BTRFS_INODE_PREALLOC)) {
2746 		/*
2747 		 * There can't be any extents following eof in this case so just
2748 		 * set the delalloc new bit for the range directly.
2749 		 */
2750 		extra_bits |= EXTENT_DELALLOC_NEW;
2751 	} else {
2752 		int ret;
2753 
2754 		ret = btrfs_find_new_delalloc_bytes(inode, start,
2755 						    end + 1 - start,
2756 						    cached_state);
2757 		if (ret)
2758 			return ret;
2759 	}
2760 
2761 	return set_extent_delalloc(&inode->io_tree, start, end, extra_bits,
2762 				   cached_state);
2763 }
2764 
2765 /* see btrfs_writepage_start_hook for details on why this is required */
2766 struct btrfs_writepage_fixup {
2767 	struct page *page;
2768 	struct inode *inode;
2769 	struct btrfs_work work;
2770 };
2771 
2772 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2773 {
2774 	struct btrfs_writepage_fixup *fixup;
2775 	struct btrfs_ordered_extent *ordered;
2776 	struct extent_state *cached_state = NULL;
2777 	struct extent_changeset *data_reserved = NULL;
2778 	struct page *page;
2779 	struct btrfs_inode *inode;
2780 	u64 page_start;
2781 	u64 page_end;
2782 	int ret = 0;
2783 	bool free_delalloc_space = true;
2784 
2785 	fixup = container_of(work, struct btrfs_writepage_fixup, work);
2786 	page = fixup->page;
2787 	inode = BTRFS_I(fixup->inode);
2788 	page_start = page_offset(page);
2789 	page_end = page_offset(page) + PAGE_SIZE - 1;
2790 
2791 	/*
2792 	 * This is similar to page_mkwrite, we need to reserve the space before
2793 	 * we take the page lock.
2794 	 */
2795 	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2796 					   PAGE_SIZE);
2797 again:
2798 	lock_page(page);
2799 
2800 	/*
2801 	 * Before we queued this fixup, we took a reference on the page.
2802 	 * page->mapping may go NULL, but it shouldn't be moved to a different
2803 	 * address space.
2804 	 */
2805 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
2806 		/*
2807 		 * Unfortunately this is a little tricky, either
2808 		 *
2809 		 * 1) We got here and our page had already been dealt with and
2810 		 *    we reserved our space, thus ret == 0, so we need to just
2811 		 *    drop our space reservation and bail.  This can happen the
2812 		 *    first time we come into the fixup worker, or could happen
2813 		 *    while waiting for the ordered extent.
2814 		 * 2) Our page was already dealt with, but we happened to get an
2815 		 *    ENOSPC above from the btrfs_delalloc_reserve_space.  In
2816 		 *    this case we obviously don't have anything to release, but
2817 		 *    because the page was already dealt with we don't want to
2818 		 *    mark the page with an error, so make sure we're resetting
2819 		 *    ret to 0.  This is why we have this check _before_ the ret
2820 		 *    check, because we do not want to have a surprise ENOSPC
2821 		 *    when the page was already properly dealt with.
2822 		 */
2823 		if (!ret) {
2824 			btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2825 			btrfs_delalloc_release_space(inode, data_reserved,
2826 						     page_start, PAGE_SIZE,
2827 						     true);
2828 		}
2829 		ret = 0;
2830 		goto out_page;
2831 	}
2832 
2833 	/*
2834 	 * We can't mess with the page state unless it is locked, so now that
2835 	 * it is locked bail if we failed to make our space reservation.
2836 	 */
2837 	if (ret)
2838 		goto out_page;
2839 
2840 	lock_extent_bits(&inode->io_tree, page_start, page_end, &cached_state);
2841 
2842 	/* already ordered? We're done */
2843 	if (PageOrdered(page))
2844 		goto out_reserved;
2845 
2846 	ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
2847 	if (ordered) {
2848 		unlock_extent_cached(&inode->io_tree, page_start, page_end,
2849 				     &cached_state);
2850 		unlock_page(page);
2851 		btrfs_start_ordered_extent(ordered, 1);
2852 		btrfs_put_ordered_extent(ordered);
2853 		goto again;
2854 	}
2855 
2856 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
2857 					&cached_state);
2858 	if (ret)
2859 		goto out_reserved;
2860 
2861 	/*
2862 	 * Everything went as planned, we're now the owner of a dirty page with
2863 	 * delayed allocation bits set and space reserved for our COW
2864 	 * destination.
2865 	 *
2866 	 * The page was dirty when we started, nothing should have cleaned it.
2867 	 */
2868 	BUG_ON(!PageDirty(page));
2869 	free_delalloc_space = false;
2870 out_reserved:
2871 	btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2872 	if (free_delalloc_space)
2873 		btrfs_delalloc_release_space(inode, data_reserved, page_start,
2874 					     PAGE_SIZE, true);
2875 	unlock_extent_cached(&inode->io_tree, page_start, page_end,
2876 			     &cached_state);
2877 out_page:
2878 	if (ret) {
2879 		/*
2880 		 * We hit ENOSPC or other errors.  Update the mapping and page
2881 		 * to reflect the errors and clean the page.
2882 		 */
2883 		mapping_set_error(page->mapping, ret);
2884 		end_extent_writepage(page, ret, page_start, page_end);
2885 		clear_page_dirty_for_io(page);
2886 		SetPageError(page);
2887 	}
2888 	btrfs_page_clear_checked(inode->root->fs_info, page, page_start, PAGE_SIZE);
2889 	unlock_page(page);
2890 	put_page(page);
2891 	kfree(fixup);
2892 	extent_changeset_free(data_reserved);
2893 	/*
2894 	 * As a precaution, do a delayed iput in case it would be the last iput
2895 	 * that could need flushing space. Recursing back to fixup worker would
2896 	 * deadlock.
2897 	 */
2898 	btrfs_add_delayed_iput(&inode->vfs_inode);
2899 }
2900 
2901 /*
2902  * There are a few paths in the higher layers of the kernel that directly
2903  * set the page dirty bit without asking the filesystem if it is a
2904  * good idea.  This causes problems because we want to make sure COW
2905  * properly happens and the data=ordered rules are followed.
2906  *
2907  * In our case any range that doesn't have the ORDERED bit set
2908  * hasn't been properly setup for IO.  We kick off an async process
2909  * to fix it up.  The async helper will wait for ordered extents, set
2910  * the delalloc bit and make it safe to write the page.
2911  */
2912 int btrfs_writepage_cow_fixup(struct page *page)
2913 {
2914 	struct inode *inode = page->mapping->host;
2915 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2916 	struct btrfs_writepage_fixup *fixup;
2917 
2918 	/* This page has ordered extent covering it already */
2919 	if (PageOrdered(page))
2920 		return 0;
2921 
2922 	/*
2923 	 * PageChecked is set below when we create a fixup worker for this page,
2924 	 * don't try to create another one if we're already PageChecked()
2925 	 *
2926 	 * The extent_io writepage code will redirty the page if we send back
2927 	 * EAGAIN.
2928 	 */
2929 	if (PageChecked(page))
2930 		return -EAGAIN;
2931 
2932 	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2933 	if (!fixup)
2934 		return -EAGAIN;
2935 
2936 	/*
2937 	 * We are already holding a reference to this inode from
2938 	 * write_cache_pages.  We need to hold it because the space reservation
2939 	 * takes place outside of the page lock, and we can't trust
2940 	 * page->mapping outside of the page lock.
2941 	 */
2942 	ihold(inode);
2943 	btrfs_page_set_checked(fs_info, page, page_offset(page), PAGE_SIZE);
2944 	get_page(page);
2945 	btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
2946 	fixup->page = page;
2947 	fixup->inode = inode;
2948 	btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
2949 
2950 	return -EAGAIN;
2951 }
2952 
2953 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2954 				       struct btrfs_inode *inode, u64 file_pos,
2955 				       struct btrfs_file_extent_item *stack_fi,
2956 				       const bool update_inode_bytes,
2957 				       u64 qgroup_reserved)
2958 {
2959 	struct btrfs_root *root = inode->root;
2960 	const u64 sectorsize = root->fs_info->sectorsize;
2961 	struct btrfs_path *path;
2962 	struct extent_buffer *leaf;
2963 	struct btrfs_key ins;
2964 	u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi);
2965 	u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi);
2966 	u64 offset = btrfs_stack_file_extent_offset(stack_fi);
2967 	u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi);
2968 	u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi);
2969 	struct btrfs_drop_extents_args drop_args = { 0 };
2970 	int ret;
2971 
2972 	path = btrfs_alloc_path();
2973 	if (!path)
2974 		return -ENOMEM;
2975 
2976 	/*
2977 	 * we may be replacing one extent in the tree with another.
2978 	 * The new extent is pinned in the extent map, and we don't want
2979 	 * to drop it from the cache until it is completely in the btree.
2980 	 *
2981 	 * So, tell btrfs_drop_extents to leave this extent in the cache.
2982 	 * the caller is expected to unpin it and allow it to be merged
2983 	 * with the others.
2984 	 */
2985 	drop_args.path = path;
2986 	drop_args.start = file_pos;
2987 	drop_args.end = file_pos + num_bytes;
2988 	drop_args.replace_extent = true;
2989 	drop_args.extent_item_size = sizeof(*stack_fi);
2990 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2991 	if (ret)
2992 		goto out;
2993 
2994 	if (!drop_args.extent_inserted) {
2995 		ins.objectid = btrfs_ino(inode);
2996 		ins.offset = file_pos;
2997 		ins.type = BTRFS_EXTENT_DATA_KEY;
2998 
2999 		ret = btrfs_insert_empty_item(trans, root, path, &ins,
3000 					      sizeof(*stack_fi));
3001 		if (ret)
3002 			goto out;
3003 	}
3004 	leaf = path->nodes[0];
3005 	btrfs_set_stack_file_extent_generation(stack_fi, trans->transid);
3006 	write_extent_buffer(leaf, stack_fi,
3007 			btrfs_item_ptr_offset(leaf, path->slots[0]),
3008 			sizeof(struct btrfs_file_extent_item));
3009 
3010 	btrfs_mark_buffer_dirty(leaf);
3011 	btrfs_release_path(path);
3012 
3013 	/*
3014 	 * If we dropped an inline extent here, we know the range where it is
3015 	 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
3016 	 * number of bytes only for that range containing the inline extent.
3017 	 * The remaining of the range will be processed when clearning the
3018 	 * EXTENT_DELALLOC_BIT bit through the ordered extent completion.
3019 	 */
3020 	if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) {
3021 		u64 inline_size = round_down(drop_args.bytes_found, sectorsize);
3022 
3023 		inline_size = drop_args.bytes_found - inline_size;
3024 		btrfs_update_inode_bytes(inode, sectorsize, inline_size);
3025 		drop_args.bytes_found -= inline_size;
3026 		num_bytes -= sectorsize;
3027 	}
3028 
3029 	if (update_inode_bytes)
3030 		btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found);
3031 
3032 	ins.objectid = disk_bytenr;
3033 	ins.offset = disk_num_bytes;
3034 	ins.type = BTRFS_EXTENT_ITEM_KEY;
3035 
3036 	ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes);
3037 	if (ret)
3038 		goto out;
3039 
3040 	ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode),
3041 					       file_pos - offset,
3042 					       qgroup_reserved, &ins);
3043 out:
3044 	btrfs_free_path(path);
3045 
3046 	return ret;
3047 }
3048 
3049 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
3050 					 u64 start, u64 len)
3051 {
3052 	struct btrfs_block_group *cache;
3053 
3054 	cache = btrfs_lookup_block_group(fs_info, start);
3055 	ASSERT(cache);
3056 
3057 	spin_lock(&cache->lock);
3058 	cache->delalloc_bytes -= len;
3059 	spin_unlock(&cache->lock);
3060 
3061 	btrfs_put_block_group(cache);
3062 }
3063 
3064 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
3065 					     struct btrfs_ordered_extent *oe)
3066 {
3067 	struct btrfs_file_extent_item stack_fi;
3068 	bool update_inode_bytes;
3069 	u64 num_bytes = oe->num_bytes;
3070 	u64 ram_bytes = oe->ram_bytes;
3071 
3072 	memset(&stack_fi, 0, sizeof(stack_fi));
3073 	btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG);
3074 	btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr);
3075 	btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi,
3076 						   oe->disk_num_bytes);
3077 	btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset);
3078 	if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags))
3079 		num_bytes = ram_bytes = oe->truncated_len;
3080 	btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes);
3081 	btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes);
3082 	btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type);
3083 	/* Encryption and other encoding is reserved and all 0 */
3084 
3085 	/*
3086 	 * For delalloc, when completing an ordered extent we update the inode's
3087 	 * bytes when clearing the range in the inode's io tree, so pass false
3088 	 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(),
3089 	 * except if the ordered extent was truncated.
3090 	 */
3091 	update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) ||
3092 			     test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) ||
3093 			     test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags);
3094 
3095 	return insert_reserved_file_extent(trans, BTRFS_I(oe->inode),
3096 					   oe->file_offset, &stack_fi,
3097 					   update_inode_bytes, oe->qgroup_rsv);
3098 }
3099 
3100 /*
3101  * As ordered data IO finishes, this gets called so we can finish
3102  * an ordered extent if the range of bytes in the file it covers are
3103  * fully written.
3104  */
3105 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
3106 {
3107 	struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode);
3108 	struct btrfs_root *root = inode->root;
3109 	struct btrfs_fs_info *fs_info = root->fs_info;
3110 	struct btrfs_trans_handle *trans = NULL;
3111 	struct extent_io_tree *io_tree = &inode->io_tree;
3112 	struct extent_state *cached_state = NULL;
3113 	u64 start, end;
3114 	int compress_type = 0;
3115 	int ret = 0;
3116 	u64 logical_len = ordered_extent->num_bytes;
3117 	bool freespace_inode;
3118 	bool truncated = false;
3119 	bool clear_reserved_extent = true;
3120 	unsigned int clear_bits = EXTENT_DEFRAG;
3121 
3122 	start = ordered_extent->file_offset;
3123 	end = start + ordered_extent->num_bytes - 1;
3124 
3125 	if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3126 	    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
3127 	    !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) &&
3128 	    !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags))
3129 		clear_bits |= EXTENT_DELALLOC_NEW;
3130 
3131 	freespace_inode = btrfs_is_free_space_inode(inode);
3132 
3133 	if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
3134 		ret = -EIO;
3135 		goto out;
3136 	}
3137 
3138 	/* A valid bdev implies a write on a sequential zone */
3139 	if (ordered_extent->bdev) {
3140 		btrfs_rewrite_logical_zoned(ordered_extent);
3141 		btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
3142 					ordered_extent->disk_num_bytes);
3143 	}
3144 
3145 	btrfs_free_io_failure_record(inode, start, end);
3146 
3147 	if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
3148 		truncated = true;
3149 		logical_len = ordered_extent->truncated_len;
3150 		/* Truncated the entire extent, don't bother adding */
3151 		if (!logical_len)
3152 			goto out;
3153 	}
3154 
3155 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
3156 		BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
3157 
3158 		btrfs_inode_safe_disk_i_size_write(inode, 0);
3159 		if (freespace_inode)
3160 			trans = btrfs_join_transaction_spacecache(root);
3161 		else
3162 			trans = btrfs_join_transaction(root);
3163 		if (IS_ERR(trans)) {
3164 			ret = PTR_ERR(trans);
3165 			trans = NULL;
3166 			goto out;
3167 		}
3168 		trans->block_rsv = &inode->block_rsv;
3169 		ret = btrfs_update_inode_fallback(trans, root, inode);
3170 		if (ret) /* -ENOMEM or corruption */
3171 			btrfs_abort_transaction(trans, ret);
3172 		goto out;
3173 	}
3174 
3175 	clear_bits |= EXTENT_LOCKED;
3176 	lock_extent_bits(io_tree, start, end, &cached_state);
3177 
3178 	if (freespace_inode)
3179 		trans = btrfs_join_transaction_spacecache(root);
3180 	else
3181 		trans = btrfs_join_transaction(root);
3182 	if (IS_ERR(trans)) {
3183 		ret = PTR_ERR(trans);
3184 		trans = NULL;
3185 		goto out;
3186 	}
3187 
3188 	trans->block_rsv = &inode->block_rsv;
3189 
3190 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
3191 		compress_type = ordered_extent->compress_type;
3192 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3193 		BUG_ON(compress_type);
3194 		ret = btrfs_mark_extent_written(trans, inode,
3195 						ordered_extent->file_offset,
3196 						ordered_extent->file_offset +
3197 						logical_len);
3198 		btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr,
3199 						  ordered_extent->disk_num_bytes);
3200 	} else {
3201 		BUG_ON(root == fs_info->tree_root);
3202 		ret = insert_ordered_extent_file_extent(trans, ordered_extent);
3203 		if (!ret) {
3204 			clear_reserved_extent = false;
3205 			btrfs_release_delalloc_bytes(fs_info,
3206 						ordered_extent->disk_bytenr,
3207 						ordered_extent->disk_num_bytes);
3208 		}
3209 	}
3210 	unpin_extent_cache(&inode->extent_tree, ordered_extent->file_offset,
3211 			   ordered_extent->num_bytes, trans->transid);
3212 	if (ret < 0) {
3213 		btrfs_abort_transaction(trans, ret);
3214 		goto out;
3215 	}
3216 
3217 	ret = add_pending_csums(trans, &ordered_extent->list);
3218 	if (ret) {
3219 		btrfs_abort_transaction(trans, ret);
3220 		goto out;
3221 	}
3222 
3223 	/*
3224 	 * If this is a new delalloc range, clear its new delalloc flag to
3225 	 * update the inode's number of bytes. This needs to be done first
3226 	 * before updating the inode item.
3227 	 */
3228 	if ((clear_bits & EXTENT_DELALLOC_NEW) &&
3229 	    !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags))
3230 		clear_extent_bit(&inode->io_tree, start, end,
3231 				 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES,
3232 				 0, 0, &cached_state);
3233 
3234 	btrfs_inode_safe_disk_i_size_write(inode, 0);
3235 	ret = btrfs_update_inode_fallback(trans, root, inode);
3236 	if (ret) { /* -ENOMEM or corruption */
3237 		btrfs_abort_transaction(trans, ret);
3238 		goto out;
3239 	}
3240 	ret = 0;
3241 out:
3242 	clear_extent_bit(&inode->io_tree, start, end, clear_bits,
3243 			 (clear_bits & EXTENT_LOCKED) ? 1 : 0, 0,
3244 			 &cached_state);
3245 
3246 	if (trans)
3247 		btrfs_end_transaction(trans);
3248 
3249 	if (ret || truncated) {
3250 		u64 unwritten_start = start;
3251 
3252 		/*
3253 		 * If we failed to finish this ordered extent for any reason we
3254 		 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
3255 		 * extent, and mark the inode with the error if it wasn't
3256 		 * already set.  Any error during writeback would have already
3257 		 * set the mapping error, so we need to set it if we're the ones
3258 		 * marking this ordered extent as failed.
3259 		 */
3260 		if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
3261 					     &ordered_extent->flags))
3262 			mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
3263 
3264 		if (truncated)
3265 			unwritten_start += logical_len;
3266 		clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
3267 
3268 		/* Drop the cache for the part of the extent we didn't write. */
3269 		btrfs_drop_extent_cache(inode, unwritten_start, end, 0);
3270 
3271 		/*
3272 		 * If the ordered extent had an IOERR or something else went
3273 		 * wrong we need to return the space for this ordered extent
3274 		 * back to the allocator.  We only free the extent in the
3275 		 * truncated case if we didn't write out the extent at all.
3276 		 *
3277 		 * If we made it past insert_reserved_file_extent before we
3278 		 * errored out then we don't need to do this as the accounting
3279 		 * has already been done.
3280 		 */
3281 		if ((ret || !logical_len) &&
3282 		    clear_reserved_extent &&
3283 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3284 		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3285 			/*
3286 			 * Discard the range before returning it back to the
3287 			 * free space pool
3288 			 */
3289 			if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC))
3290 				btrfs_discard_extent(fs_info,
3291 						ordered_extent->disk_bytenr,
3292 						ordered_extent->disk_num_bytes,
3293 						NULL);
3294 			btrfs_free_reserved_extent(fs_info,
3295 					ordered_extent->disk_bytenr,
3296 					ordered_extent->disk_num_bytes, 1);
3297 		}
3298 	}
3299 
3300 	/*
3301 	 * This needs to be done to make sure anybody waiting knows we are done
3302 	 * updating everything for this ordered extent.
3303 	 */
3304 	btrfs_remove_ordered_extent(inode, ordered_extent);
3305 
3306 	/* once for us */
3307 	btrfs_put_ordered_extent(ordered_extent);
3308 	/* once for the tree */
3309 	btrfs_put_ordered_extent(ordered_extent);
3310 
3311 	return ret;
3312 }
3313 
3314 static void finish_ordered_fn(struct btrfs_work *work)
3315 {
3316 	struct btrfs_ordered_extent *ordered_extent;
3317 	ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
3318 	btrfs_finish_ordered_io(ordered_extent);
3319 }
3320 
3321 void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
3322 					  struct page *page, u64 start,
3323 					  u64 end, bool uptodate)
3324 {
3325 	trace_btrfs_writepage_end_io_hook(inode, start, end, uptodate);
3326 
3327 	btrfs_mark_ordered_io_finished(inode, page, start, end + 1 - start,
3328 				       finish_ordered_fn, uptodate);
3329 }
3330 
3331 /*
3332  * check_data_csum - verify checksum of one sector of uncompressed data
3333  * @inode:	inode
3334  * @io_bio:	btrfs_io_bio which contains the csum
3335  * @bio_offset:	offset to the beginning of the bio (in bytes)
3336  * @page:	page where is the data to be verified
3337  * @pgoff:	offset inside the page
3338  * @start:	logical offset in the file
3339  *
3340  * The length of such check is always one sector size.
3341  */
3342 static int check_data_csum(struct inode *inode, struct btrfs_bio *bbio,
3343 			   u32 bio_offset, struct page *page, u32 pgoff,
3344 			   u64 start)
3345 {
3346 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3347 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3348 	char *kaddr;
3349 	u32 len = fs_info->sectorsize;
3350 	const u32 csum_size = fs_info->csum_size;
3351 	unsigned int offset_sectors;
3352 	u8 *csum_expected;
3353 	u8 csum[BTRFS_CSUM_SIZE];
3354 
3355 	ASSERT(pgoff + len <= PAGE_SIZE);
3356 
3357 	offset_sectors = bio_offset >> fs_info->sectorsize_bits;
3358 	csum_expected = ((u8 *)bbio->csum) + offset_sectors * csum_size;
3359 
3360 	kaddr = kmap_atomic(page);
3361 	shash->tfm = fs_info->csum_shash;
3362 
3363 	crypto_shash_digest(shash, kaddr + pgoff, len, csum);
3364 	kunmap_atomic(kaddr);
3365 
3366 	if (memcmp(csum, csum_expected, csum_size))
3367 		goto zeroit;
3368 
3369 	return 0;
3370 zeroit:
3371 	btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected,
3372 				    bbio->mirror_num);
3373 	if (bbio->device)
3374 		btrfs_dev_stat_inc_and_print(bbio->device,
3375 					     BTRFS_DEV_STAT_CORRUPTION_ERRS);
3376 	memzero_page(page, pgoff, len);
3377 	return -EIO;
3378 }
3379 
3380 /*
3381  * When reads are done, we need to check csums to verify the data is correct.
3382  * if there's a match, we allow the bio to finish.  If not, the code in
3383  * extent_io.c will try to find good copies for us.
3384  *
3385  * @bio_offset:	offset to the beginning of the bio (in bytes)
3386  * @start:	file offset of the range start
3387  * @end:	file offset of the range end (inclusive)
3388  *
3389  * Return a bitmap where bit set means a csum mismatch, and bit not set means
3390  * csum match.
3391  */
3392 unsigned int btrfs_verify_data_csum(struct btrfs_bio *bbio,
3393 				    u32 bio_offset, struct page *page,
3394 				    u64 start, u64 end)
3395 {
3396 	struct inode *inode = page->mapping->host;
3397 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3398 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3399 	struct btrfs_root *root = BTRFS_I(inode)->root;
3400 	const u32 sectorsize = root->fs_info->sectorsize;
3401 	u32 pg_off;
3402 	unsigned int result = 0;
3403 
3404 	if (btrfs_page_test_checked(fs_info, page, start, end + 1 - start)) {
3405 		btrfs_page_clear_checked(fs_info, page, start, end + 1 - start);
3406 		return 0;
3407 	}
3408 
3409 	/*
3410 	 * This only happens for NODATASUM or compressed read.
3411 	 * Normally this should be covered by above check for compressed read
3412 	 * or the next check for NODATASUM.  Just do a quicker exit here.
3413 	 */
3414 	if (bbio->csum == NULL)
3415 		return 0;
3416 
3417 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
3418 		return 0;
3419 
3420 	if (unlikely(test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state)))
3421 		return 0;
3422 
3423 	ASSERT(page_offset(page) <= start &&
3424 	       end <= page_offset(page) + PAGE_SIZE - 1);
3425 	for (pg_off = offset_in_page(start);
3426 	     pg_off < offset_in_page(end);
3427 	     pg_off += sectorsize, bio_offset += sectorsize) {
3428 		u64 file_offset = pg_off + page_offset(page);
3429 		int ret;
3430 
3431 		if (btrfs_is_data_reloc_root(root) &&
3432 		    test_range_bit(io_tree, file_offset,
3433 				   file_offset + sectorsize - 1,
3434 				   EXTENT_NODATASUM, 1, NULL)) {
3435 			/* Skip the range without csum for data reloc inode */
3436 			clear_extent_bits(io_tree, file_offset,
3437 					  file_offset + sectorsize - 1,
3438 					  EXTENT_NODATASUM);
3439 			continue;
3440 		}
3441 		ret = check_data_csum(inode, bbio, bio_offset, page, pg_off,
3442 				      page_offset(page) + pg_off);
3443 		if (ret < 0) {
3444 			const int nr_bit = (pg_off - offset_in_page(start)) >>
3445 				     root->fs_info->sectorsize_bits;
3446 
3447 			result |= (1U << nr_bit);
3448 		}
3449 	}
3450 	return result;
3451 }
3452 
3453 /*
3454  * btrfs_add_delayed_iput - perform a delayed iput on @inode
3455  *
3456  * @inode: The inode we want to perform iput on
3457  *
3458  * This function uses the generic vfs_inode::i_count to track whether we should
3459  * just decrement it (in case it's > 1) or if this is the last iput then link
3460  * the inode to the delayed iput machinery. Delayed iputs are processed at
3461  * transaction commit time/superblock commit/cleaner kthread.
3462  */
3463 void btrfs_add_delayed_iput(struct inode *inode)
3464 {
3465 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3466 	struct btrfs_inode *binode = BTRFS_I(inode);
3467 
3468 	if (atomic_add_unless(&inode->i_count, -1, 1))
3469 		return;
3470 
3471 	atomic_inc(&fs_info->nr_delayed_iputs);
3472 	spin_lock(&fs_info->delayed_iput_lock);
3473 	ASSERT(list_empty(&binode->delayed_iput));
3474 	list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
3475 	spin_unlock(&fs_info->delayed_iput_lock);
3476 	if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
3477 		wake_up_process(fs_info->cleaner_kthread);
3478 }
3479 
3480 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info,
3481 				    struct btrfs_inode *inode)
3482 {
3483 	list_del_init(&inode->delayed_iput);
3484 	spin_unlock(&fs_info->delayed_iput_lock);
3485 	iput(&inode->vfs_inode);
3486 	if (atomic_dec_and_test(&fs_info->nr_delayed_iputs))
3487 		wake_up(&fs_info->delayed_iputs_wait);
3488 	spin_lock(&fs_info->delayed_iput_lock);
3489 }
3490 
3491 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info,
3492 				   struct btrfs_inode *inode)
3493 {
3494 	if (!list_empty(&inode->delayed_iput)) {
3495 		spin_lock(&fs_info->delayed_iput_lock);
3496 		if (!list_empty(&inode->delayed_iput))
3497 			run_delayed_iput_locked(fs_info, inode);
3498 		spin_unlock(&fs_info->delayed_iput_lock);
3499 	}
3500 }
3501 
3502 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
3503 {
3504 
3505 	spin_lock(&fs_info->delayed_iput_lock);
3506 	while (!list_empty(&fs_info->delayed_iputs)) {
3507 		struct btrfs_inode *inode;
3508 
3509 		inode = list_first_entry(&fs_info->delayed_iputs,
3510 				struct btrfs_inode, delayed_iput);
3511 		run_delayed_iput_locked(fs_info, inode);
3512 		cond_resched_lock(&fs_info->delayed_iput_lock);
3513 	}
3514 	spin_unlock(&fs_info->delayed_iput_lock);
3515 }
3516 
3517 /**
3518  * Wait for flushing all delayed iputs
3519  *
3520  * @fs_info:  the filesystem
3521  *
3522  * This will wait on any delayed iputs that are currently running with KILLABLE
3523  * set.  Once they are all done running we will return, unless we are killed in
3524  * which case we return EINTR. This helps in user operations like fallocate etc
3525  * that might get blocked on the iputs.
3526  *
3527  * Return EINTR if we were killed, 0 if nothing's pending
3528  */
3529 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info)
3530 {
3531 	int ret = wait_event_killable(fs_info->delayed_iputs_wait,
3532 			atomic_read(&fs_info->nr_delayed_iputs) == 0);
3533 	if (ret)
3534 		return -EINTR;
3535 	return 0;
3536 }
3537 
3538 /*
3539  * This creates an orphan entry for the given inode in case something goes wrong
3540  * in the middle of an unlink.
3541  */
3542 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3543 		     struct btrfs_inode *inode)
3544 {
3545 	int ret;
3546 
3547 	ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
3548 	if (ret && ret != -EEXIST) {
3549 		btrfs_abort_transaction(trans, ret);
3550 		return ret;
3551 	}
3552 
3553 	return 0;
3554 }
3555 
3556 /*
3557  * We have done the delete so we can go ahead and remove the orphan item for
3558  * this particular inode.
3559  */
3560 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3561 			    struct btrfs_inode *inode)
3562 {
3563 	return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode));
3564 }
3565 
3566 /*
3567  * this cleans up any orphans that may be left on the list from the last use
3568  * of this root.
3569  */
3570 int btrfs_orphan_cleanup(struct btrfs_root *root)
3571 {
3572 	struct btrfs_fs_info *fs_info = root->fs_info;
3573 	struct btrfs_path *path;
3574 	struct extent_buffer *leaf;
3575 	struct btrfs_key key, found_key;
3576 	struct btrfs_trans_handle *trans;
3577 	struct inode *inode;
3578 	u64 last_objectid = 0;
3579 	int ret = 0, nr_unlink = 0;
3580 
3581 	if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state))
3582 		return 0;
3583 
3584 	path = btrfs_alloc_path();
3585 	if (!path) {
3586 		ret = -ENOMEM;
3587 		goto out;
3588 	}
3589 	path->reada = READA_BACK;
3590 
3591 	key.objectid = BTRFS_ORPHAN_OBJECTID;
3592 	key.type = BTRFS_ORPHAN_ITEM_KEY;
3593 	key.offset = (u64)-1;
3594 
3595 	while (1) {
3596 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3597 		if (ret < 0)
3598 			goto out;
3599 
3600 		/*
3601 		 * if ret == 0 means we found what we were searching for, which
3602 		 * is weird, but possible, so only screw with path if we didn't
3603 		 * find the key and see if we have stuff that matches
3604 		 */
3605 		if (ret > 0) {
3606 			ret = 0;
3607 			if (path->slots[0] == 0)
3608 				break;
3609 			path->slots[0]--;
3610 		}
3611 
3612 		/* pull out the item */
3613 		leaf = path->nodes[0];
3614 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3615 
3616 		/* make sure the item matches what we want */
3617 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3618 			break;
3619 		if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3620 			break;
3621 
3622 		/* release the path since we're done with it */
3623 		btrfs_release_path(path);
3624 
3625 		/*
3626 		 * this is where we are basically btrfs_lookup, without the
3627 		 * crossing root thing.  we store the inode number in the
3628 		 * offset of the orphan item.
3629 		 */
3630 
3631 		if (found_key.offset == last_objectid) {
3632 			btrfs_err(fs_info,
3633 				  "Error removing orphan entry, stopping orphan cleanup");
3634 			ret = -EINVAL;
3635 			goto out;
3636 		}
3637 
3638 		last_objectid = found_key.offset;
3639 
3640 		found_key.objectid = found_key.offset;
3641 		found_key.type = BTRFS_INODE_ITEM_KEY;
3642 		found_key.offset = 0;
3643 		inode = btrfs_iget(fs_info->sb, last_objectid, root);
3644 		ret = PTR_ERR_OR_ZERO(inode);
3645 		if (ret && ret != -ENOENT)
3646 			goto out;
3647 
3648 		if (ret == -ENOENT && root == fs_info->tree_root) {
3649 			struct btrfs_root *dead_root;
3650 			int is_dead_root = 0;
3651 
3652 			/*
3653 			 * This is an orphan in the tree root. Currently these
3654 			 * could come from 2 sources:
3655 			 *  a) a root (snapshot/subvolume) deletion in progress
3656 			 *  b) a free space cache inode
3657 			 * We need to distinguish those two, as the orphan item
3658 			 * for a root must not get deleted before the deletion
3659 			 * of the snapshot/subvolume's tree completes.
3660 			 *
3661 			 * btrfs_find_orphan_roots() ran before us, which has
3662 			 * found all deleted roots and loaded them into
3663 			 * fs_info->fs_roots_radix. So here we can find if an
3664 			 * orphan item corresponds to a deleted root by looking
3665 			 * up the root from that radix tree.
3666 			 */
3667 
3668 			spin_lock(&fs_info->fs_roots_radix_lock);
3669 			dead_root = radix_tree_lookup(&fs_info->fs_roots_radix,
3670 							 (unsigned long)found_key.objectid);
3671 			if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0)
3672 				is_dead_root = 1;
3673 			spin_unlock(&fs_info->fs_roots_radix_lock);
3674 
3675 			if (is_dead_root) {
3676 				/* prevent this orphan from being found again */
3677 				key.offset = found_key.objectid - 1;
3678 				continue;
3679 			}
3680 
3681 		}
3682 
3683 		/*
3684 		 * If we have an inode with links, there are a couple of
3685 		 * possibilities:
3686 		 *
3687 		 * 1. We were halfway through creating fsverity metadata for the
3688 		 * file. In that case, the orphan item represents incomplete
3689 		 * fsverity metadata which must be cleaned up with
3690 		 * btrfs_drop_verity_items and deleting the orphan item.
3691 
3692 		 * 2. Old kernels (before v3.12) used to create an
3693 		 * orphan item for truncate indicating that there were possibly
3694 		 * extent items past i_size that needed to be deleted. In v3.12,
3695 		 * truncate was changed to update i_size in sync with the extent
3696 		 * items, but the (useless) orphan item was still created. Since
3697 		 * v4.18, we don't create the orphan item for truncate at all.
3698 		 *
3699 		 * So, this item could mean that we need to do a truncate, but
3700 		 * only if this filesystem was last used on a pre-v3.12 kernel
3701 		 * and was not cleanly unmounted. The odds of that are quite
3702 		 * slim, and it's a pain to do the truncate now, so just delete
3703 		 * the orphan item.
3704 		 *
3705 		 * It's also possible that this orphan item was supposed to be
3706 		 * deleted but wasn't. The inode number may have been reused,
3707 		 * but either way, we can delete the orphan item.
3708 		 */
3709 		if (ret == -ENOENT || inode->i_nlink) {
3710 			if (!ret) {
3711 				ret = btrfs_drop_verity_items(BTRFS_I(inode));
3712 				iput(inode);
3713 				if (ret)
3714 					goto out;
3715 			}
3716 			trans = btrfs_start_transaction(root, 1);
3717 			if (IS_ERR(trans)) {
3718 				ret = PTR_ERR(trans);
3719 				goto out;
3720 			}
3721 			btrfs_debug(fs_info, "auto deleting %Lu",
3722 				    found_key.objectid);
3723 			ret = btrfs_del_orphan_item(trans, root,
3724 						    found_key.objectid);
3725 			btrfs_end_transaction(trans);
3726 			if (ret)
3727 				goto out;
3728 			continue;
3729 		}
3730 
3731 		nr_unlink++;
3732 
3733 		/* this will do delete_inode and everything for us */
3734 		iput(inode);
3735 	}
3736 	/* release the path since we're done with it */
3737 	btrfs_release_path(path);
3738 
3739 	if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3740 		trans = btrfs_join_transaction(root);
3741 		if (!IS_ERR(trans))
3742 			btrfs_end_transaction(trans);
3743 	}
3744 
3745 	if (nr_unlink)
3746 		btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
3747 
3748 out:
3749 	if (ret)
3750 		btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
3751 	btrfs_free_path(path);
3752 	return ret;
3753 }
3754 
3755 /*
3756  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3757  * don't find any xattrs, we know there can't be any acls.
3758  *
3759  * slot is the slot the inode is in, objectid is the objectid of the inode
3760  */
3761 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3762 					  int slot, u64 objectid,
3763 					  int *first_xattr_slot)
3764 {
3765 	u32 nritems = btrfs_header_nritems(leaf);
3766 	struct btrfs_key found_key;
3767 	static u64 xattr_access = 0;
3768 	static u64 xattr_default = 0;
3769 	int scanned = 0;
3770 
3771 	if (!xattr_access) {
3772 		xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3773 					strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3774 		xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3775 					strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3776 	}
3777 
3778 	slot++;
3779 	*first_xattr_slot = -1;
3780 	while (slot < nritems) {
3781 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3782 
3783 		/* we found a different objectid, there must not be acls */
3784 		if (found_key.objectid != objectid)
3785 			return 0;
3786 
3787 		/* we found an xattr, assume we've got an acl */
3788 		if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3789 			if (*first_xattr_slot == -1)
3790 				*first_xattr_slot = slot;
3791 			if (found_key.offset == xattr_access ||
3792 			    found_key.offset == xattr_default)
3793 				return 1;
3794 		}
3795 
3796 		/*
3797 		 * we found a key greater than an xattr key, there can't
3798 		 * be any acls later on
3799 		 */
3800 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3801 			return 0;
3802 
3803 		slot++;
3804 		scanned++;
3805 
3806 		/*
3807 		 * it goes inode, inode backrefs, xattrs, extents,
3808 		 * so if there are a ton of hard links to an inode there can
3809 		 * be a lot of backrefs.  Don't waste time searching too hard,
3810 		 * this is just an optimization
3811 		 */
3812 		if (scanned >= 8)
3813 			break;
3814 	}
3815 	/* we hit the end of the leaf before we found an xattr or
3816 	 * something larger than an xattr.  We have to assume the inode
3817 	 * has acls
3818 	 */
3819 	if (*first_xattr_slot == -1)
3820 		*first_xattr_slot = slot;
3821 	return 1;
3822 }
3823 
3824 /*
3825  * read an inode from the btree into the in-memory inode
3826  */
3827 static int btrfs_read_locked_inode(struct inode *inode,
3828 				   struct btrfs_path *in_path)
3829 {
3830 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3831 	struct btrfs_path *path = in_path;
3832 	struct extent_buffer *leaf;
3833 	struct btrfs_inode_item *inode_item;
3834 	struct btrfs_root *root = BTRFS_I(inode)->root;
3835 	struct btrfs_key location;
3836 	unsigned long ptr;
3837 	int maybe_acls;
3838 	u32 rdev;
3839 	int ret;
3840 	bool filled = false;
3841 	int first_xattr_slot;
3842 
3843 	ret = btrfs_fill_inode(inode, &rdev);
3844 	if (!ret)
3845 		filled = true;
3846 
3847 	if (!path) {
3848 		path = btrfs_alloc_path();
3849 		if (!path)
3850 			return -ENOMEM;
3851 	}
3852 
3853 	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3854 
3855 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3856 	if (ret) {
3857 		if (path != in_path)
3858 			btrfs_free_path(path);
3859 		return ret;
3860 	}
3861 
3862 	leaf = path->nodes[0];
3863 
3864 	if (filled)
3865 		goto cache_index;
3866 
3867 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
3868 				    struct btrfs_inode_item);
3869 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3870 	set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3871 	i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3872 	i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3873 	btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item));
3874 	btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
3875 			round_up(i_size_read(inode), fs_info->sectorsize));
3876 
3877 	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
3878 	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
3879 
3880 	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
3881 	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
3882 
3883 	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
3884 	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
3885 
3886 	BTRFS_I(inode)->i_otime.tv_sec =
3887 		btrfs_timespec_sec(leaf, &inode_item->otime);
3888 	BTRFS_I(inode)->i_otime.tv_nsec =
3889 		btrfs_timespec_nsec(leaf, &inode_item->otime);
3890 
3891 	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3892 	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3893 	BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3894 
3895 	inode_set_iversion_queried(inode,
3896 				   btrfs_inode_sequence(leaf, inode_item));
3897 	inode->i_generation = BTRFS_I(inode)->generation;
3898 	inode->i_rdev = 0;
3899 	rdev = btrfs_inode_rdev(leaf, inode_item);
3900 
3901 	BTRFS_I(inode)->index_cnt = (u64)-1;
3902 	btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item),
3903 				&BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
3904 
3905 cache_index:
3906 	/*
3907 	 * If we were modified in the current generation and evicted from memory
3908 	 * and then re-read we need to do a full sync since we don't have any
3909 	 * idea about which extents were modified before we were evicted from
3910 	 * cache.
3911 	 *
3912 	 * This is required for both inode re-read from disk and delayed inode
3913 	 * in delayed_nodes_tree.
3914 	 */
3915 	if (BTRFS_I(inode)->last_trans == fs_info->generation)
3916 		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3917 			&BTRFS_I(inode)->runtime_flags);
3918 
3919 	/*
3920 	 * We don't persist the id of the transaction where an unlink operation
3921 	 * against the inode was last made. So here we assume the inode might
3922 	 * have been evicted, and therefore the exact value of last_unlink_trans
3923 	 * lost, and set it to last_trans to avoid metadata inconsistencies
3924 	 * between the inode and its parent if the inode is fsync'ed and the log
3925 	 * replayed. For example, in the scenario:
3926 	 *
3927 	 * touch mydir/foo
3928 	 * ln mydir/foo mydir/bar
3929 	 * sync
3930 	 * unlink mydir/bar
3931 	 * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
3932 	 * xfs_io -c fsync mydir/foo
3933 	 * <power failure>
3934 	 * mount fs, triggers fsync log replay
3935 	 *
3936 	 * We must make sure that when we fsync our inode foo we also log its
3937 	 * parent inode, otherwise after log replay the parent still has the
3938 	 * dentry with the "bar" name but our inode foo has a link count of 1
3939 	 * and doesn't have an inode ref with the name "bar" anymore.
3940 	 *
3941 	 * Setting last_unlink_trans to last_trans is a pessimistic approach,
3942 	 * but it guarantees correctness at the expense of occasional full
3943 	 * transaction commits on fsync if our inode is a directory, or if our
3944 	 * inode is not a directory, logging its parent unnecessarily.
3945 	 */
3946 	BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3947 
3948 	/*
3949 	 * Same logic as for last_unlink_trans. We don't persist the generation
3950 	 * of the last transaction where this inode was used for a reflink
3951 	 * operation, so after eviction and reloading the inode we must be
3952 	 * pessimistic and assume the last transaction that modified the inode.
3953 	 */
3954 	BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans;
3955 
3956 	path->slots[0]++;
3957 	if (inode->i_nlink != 1 ||
3958 	    path->slots[0] >= btrfs_header_nritems(leaf))
3959 		goto cache_acl;
3960 
3961 	btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3962 	if (location.objectid != btrfs_ino(BTRFS_I(inode)))
3963 		goto cache_acl;
3964 
3965 	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3966 	if (location.type == BTRFS_INODE_REF_KEY) {
3967 		struct btrfs_inode_ref *ref;
3968 
3969 		ref = (struct btrfs_inode_ref *)ptr;
3970 		BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3971 	} else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3972 		struct btrfs_inode_extref *extref;
3973 
3974 		extref = (struct btrfs_inode_extref *)ptr;
3975 		BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3976 								     extref);
3977 	}
3978 cache_acl:
3979 	/*
3980 	 * try to precache a NULL acl entry for files that don't have
3981 	 * any xattrs or acls
3982 	 */
3983 	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3984 			btrfs_ino(BTRFS_I(inode)), &first_xattr_slot);
3985 	if (first_xattr_slot != -1) {
3986 		path->slots[0] = first_xattr_slot;
3987 		ret = btrfs_load_inode_props(inode, path);
3988 		if (ret)
3989 			btrfs_err(fs_info,
3990 				  "error loading props for ino %llu (root %llu): %d",
3991 				  btrfs_ino(BTRFS_I(inode)),
3992 				  root->root_key.objectid, ret);
3993 	}
3994 	if (path != in_path)
3995 		btrfs_free_path(path);
3996 
3997 	if (!maybe_acls)
3998 		cache_no_acl(inode);
3999 
4000 	switch (inode->i_mode & S_IFMT) {
4001 	case S_IFREG:
4002 		inode->i_mapping->a_ops = &btrfs_aops;
4003 		inode->i_fop = &btrfs_file_operations;
4004 		inode->i_op = &btrfs_file_inode_operations;
4005 		break;
4006 	case S_IFDIR:
4007 		inode->i_fop = &btrfs_dir_file_operations;
4008 		inode->i_op = &btrfs_dir_inode_operations;
4009 		break;
4010 	case S_IFLNK:
4011 		inode->i_op = &btrfs_symlink_inode_operations;
4012 		inode_nohighmem(inode);
4013 		inode->i_mapping->a_ops = &btrfs_aops;
4014 		break;
4015 	default:
4016 		inode->i_op = &btrfs_special_inode_operations;
4017 		init_special_inode(inode, inode->i_mode, rdev);
4018 		break;
4019 	}
4020 
4021 	btrfs_sync_inode_flags_to_i_flags(inode);
4022 	return 0;
4023 }
4024 
4025 /*
4026  * given a leaf and an inode, copy the inode fields into the leaf
4027  */
4028 static void fill_inode_item(struct btrfs_trans_handle *trans,
4029 			    struct extent_buffer *leaf,
4030 			    struct btrfs_inode_item *item,
4031 			    struct inode *inode)
4032 {
4033 	struct btrfs_map_token token;
4034 	u64 flags;
4035 
4036 	btrfs_init_map_token(&token, leaf);
4037 
4038 	btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
4039 	btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
4040 	btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size);
4041 	btrfs_set_token_inode_mode(&token, item, inode->i_mode);
4042 	btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
4043 
4044 	btrfs_set_token_timespec_sec(&token, &item->atime,
4045 				     inode->i_atime.tv_sec);
4046 	btrfs_set_token_timespec_nsec(&token, &item->atime,
4047 				      inode->i_atime.tv_nsec);
4048 
4049 	btrfs_set_token_timespec_sec(&token, &item->mtime,
4050 				     inode->i_mtime.tv_sec);
4051 	btrfs_set_token_timespec_nsec(&token, &item->mtime,
4052 				      inode->i_mtime.tv_nsec);
4053 
4054 	btrfs_set_token_timespec_sec(&token, &item->ctime,
4055 				     inode->i_ctime.tv_sec);
4056 	btrfs_set_token_timespec_nsec(&token, &item->ctime,
4057 				      inode->i_ctime.tv_nsec);
4058 
4059 	btrfs_set_token_timespec_sec(&token, &item->otime,
4060 				     BTRFS_I(inode)->i_otime.tv_sec);
4061 	btrfs_set_token_timespec_nsec(&token, &item->otime,
4062 				      BTRFS_I(inode)->i_otime.tv_nsec);
4063 
4064 	btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode));
4065 	btrfs_set_token_inode_generation(&token, item,
4066 					 BTRFS_I(inode)->generation);
4067 	btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
4068 	btrfs_set_token_inode_transid(&token, item, trans->transid);
4069 	btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
4070 	flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
4071 					  BTRFS_I(inode)->ro_flags);
4072 	btrfs_set_token_inode_flags(&token, item, flags);
4073 	btrfs_set_token_inode_block_group(&token, item, 0);
4074 }
4075 
4076 /*
4077  * copy everything in the in-memory inode into the btree.
4078  */
4079 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
4080 				struct btrfs_root *root,
4081 				struct btrfs_inode *inode)
4082 {
4083 	struct btrfs_inode_item *inode_item;
4084 	struct btrfs_path *path;
4085 	struct extent_buffer *leaf;
4086 	int ret;
4087 
4088 	path = btrfs_alloc_path();
4089 	if (!path)
4090 		return -ENOMEM;
4091 
4092 	ret = btrfs_lookup_inode(trans, root, path, &inode->location, 1);
4093 	if (ret) {
4094 		if (ret > 0)
4095 			ret = -ENOENT;
4096 		goto failed;
4097 	}
4098 
4099 	leaf = path->nodes[0];
4100 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
4101 				    struct btrfs_inode_item);
4102 
4103 	fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
4104 	btrfs_mark_buffer_dirty(leaf);
4105 	btrfs_set_inode_last_trans(trans, inode);
4106 	ret = 0;
4107 failed:
4108 	btrfs_free_path(path);
4109 	return ret;
4110 }
4111 
4112 /*
4113  * copy everything in the in-memory inode into the btree.
4114  */
4115 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
4116 				struct btrfs_root *root,
4117 				struct btrfs_inode *inode)
4118 {
4119 	struct btrfs_fs_info *fs_info = root->fs_info;
4120 	int ret;
4121 
4122 	/*
4123 	 * If the inode is a free space inode, we can deadlock during commit
4124 	 * if we put it into the delayed code.
4125 	 *
4126 	 * The data relocation inode should also be directly updated
4127 	 * without delay
4128 	 */
4129 	if (!btrfs_is_free_space_inode(inode)
4130 	    && !btrfs_is_data_reloc_root(root)
4131 	    && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
4132 		btrfs_update_root_times(trans, root);
4133 
4134 		ret = btrfs_delayed_update_inode(trans, root, inode);
4135 		if (!ret)
4136 			btrfs_set_inode_last_trans(trans, inode);
4137 		return ret;
4138 	}
4139 
4140 	return btrfs_update_inode_item(trans, root, inode);
4141 }
4142 
4143 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
4144 				struct btrfs_root *root, struct btrfs_inode *inode)
4145 {
4146 	int ret;
4147 
4148 	ret = btrfs_update_inode(trans, root, inode);
4149 	if (ret == -ENOSPC)
4150 		return btrfs_update_inode_item(trans, root, inode);
4151 	return ret;
4152 }
4153 
4154 /*
4155  * unlink helper that gets used here in inode.c and in the tree logging
4156  * recovery code.  It remove a link in a directory with a given name, and
4157  * also drops the back refs in the inode to the directory
4158  */
4159 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4160 				struct btrfs_inode *dir,
4161 				struct btrfs_inode *inode,
4162 				const char *name, int name_len,
4163 				struct btrfs_rename_ctx *rename_ctx)
4164 {
4165 	struct btrfs_root *root = dir->root;
4166 	struct btrfs_fs_info *fs_info = root->fs_info;
4167 	struct btrfs_path *path;
4168 	int ret = 0;
4169 	struct btrfs_dir_item *di;
4170 	u64 index;
4171 	u64 ino = btrfs_ino(inode);
4172 	u64 dir_ino = btrfs_ino(dir);
4173 
4174 	path = btrfs_alloc_path();
4175 	if (!path) {
4176 		ret = -ENOMEM;
4177 		goto out;
4178 	}
4179 
4180 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4181 				    name, name_len, -1);
4182 	if (IS_ERR_OR_NULL(di)) {
4183 		ret = di ? PTR_ERR(di) : -ENOENT;
4184 		goto err;
4185 	}
4186 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
4187 	if (ret)
4188 		goto err;
4189 	btrfs_release_path(path);
4190 
4191 	/*
4192 	 * If we don't have dir index, we have to get it by looking up
4193 	 * the inode ref, since we get the inode ref, remove it directly,
4194 	 * it is unnecessary to do delayed deletion.
4195 	 *
4196 	 * But if we have dir index, needn't search inode ref to get it.
4197 	 * Since the inode ref is close to the inode item, it is better
4198 	 * that we delay to delete it, and just do this deletion when
4199 	 * we update the inode item.
4200 	 */
4201 	if (inode->dir_index) {
4202 		ret = btrfs_delayed_delete_inode_ref(inode);
4203 		if (!ret) {
4204 			index = inode->dir_index;
4205 			goto skip_backref;
4206 		}
4207 	}
4208 
4209 	ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
4210 				  dir_ino, &index);
4211 	if (ret) {
4212 		btrfs_info(fs_info,
4213 			"failed to delete reference to %.*s, inode %llu parent %llu",
4214 			name_len, name, ino, dir_ino);
4215 		btrfs_abort_transaction(trans, ret);
4216 		goto err;
4217 	}
4218 skip_backref:
4219 	if (rename_ctx)
4220 		rename_ctx->index = index;
4221 
4222 	ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4223 	if (ret) {
4224 		btrfs_abort_transaction(trans, ret);
4225 		goto err;
4226 	}
4227 
4228 	/*
4229 	 * If we are in a rename context, we don't need to update anything in the
4230 	 * log. That will be done later during the rename by btrfs_log_new_name().
4231 	 * Besides that, doing it here would only cause extra unncessary btree
4232 	 * operations on the log tree, increasing latency for applications.
4233 	 */
4234 	if (!rename_ctx) {
4235 		btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode,
4236 					   dir_ino);
4237 		btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir,
4238 					     index);
4239 	}
4240 
4241 	/*
4242 	 * If we have a pending delayed iput we could end up with the final iput
4243 	 * being run in btrfs-cleaner context.  If we have enough of these built
4244 	 * up we can end up burning a lot of time in btrfs-cleaner without any
4245 	 * way to throttle the unlinks.  Since we're currently holding a ref on
4246 	 * the inode we can run the delayed iput here without any issues as the
4247 	 * final iput won't be done until after we drop the ref we're currently
4248 	 * holding.
4249 	 */
4250 	btrfs_run_delayed_iput(fs_info, inode);
4251 err:
4252 	btrfs_free_path(path);
4253 	if (ret)
4254 		goto out;
4255 
4256 	btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2);
4257 	inode_inc_iversion(&inode->vfs_inode);
4258 	inode_inc_iversion(&dir->vfs_inode);
4259 	inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime =
4260 		dir->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
4261 	ret = btrfs_update_inode(trans, root, dir);
4262 out:
4263 	return ret;
4264 }
4265 
4266 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4267 		       struct btrfs_inode *dir, struct btrfs_inode *inode,
4268 		       const char *name, int name_len)
4269 {
4270 	int ret;
4271 	ret = __btrfs_unlink_inode(trans, dir, inode, name, name_len, NULL);
4272 	if (!ret) {
4273 		drop_nlink(&inode->vfs_inode);
4274 		ret = btrfs_update_inode(trans, inode->root, inode);
4275 	}
4276 	return ret;
4277 }
4278 
4279 /*
4280  * helper to start transaction for unlink and rmdir.
4281  *
4282  * unlink and rmdir are special in btrfs, they do not always free space, so
4283  * if we cannot make our reservations the normal way try and see if there is
4284  * plenty of slack room in the global reserve to migrate, otherwise we cannot
4285  * allow the unlink to occur.
4286  */
4287 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4288 {
4289 	struct btrfs_root *root = BTRFS_I(dir)->root;
4290 
4291 	/*
4292 	 * 1 for the possible orphan item
4293 	 * 1 for the dir item
4294 	 * 1 for the dir index
4295 	 * 1 for the inode ref
4296 	 * 1 for the inode
4297 	 * 1 for the parent inode
4298 	 */
4299 	return btrfs_start_transaction_fallback_global_rsv(root, 6);
4300 }
4301 
4302 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4303 {
4304 	struct btrfs_trans_handle *trans;
4305 	struct inode *inode = d_inode(dentry);
4306 	int ret;
4307 
4308 	trans = __unlink_start_trans(dir);
4309 	if (IS_ERR(trans))
4310 		return PTR_ERR(trans);
4311 
4312 	btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4313 			0);
4314 
4315 	ret = btrfs_unlink_inode(trans, BTRFS_I(dir),
4316 			BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4317 			dentry->d_name.len);
4318 	if (ret)
4319 		goto out;
4320 
4321 	if (inode->i_nlink == 0) {
4322 		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4323 		if (ret)
4324 			goto out;
4325 	}
4326 
4327 out:
4328 	btrfs_end_transaction(trans);
4329 	btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info);
4330 	return ret;
4331 }
4332 
4333 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4334 			       struct inode *dir, struct dentry *dentry)
4335 {
4336 	struct btrfs_root *root = BTRFS_I(dir)->root;
4337 	struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
4338 	struct btrfs_path *path;
4339 	struct extent_buffer *leaf;
4340 	struct btrfs_dir_item *di;
4341 	struct btrfs_key key;
4342 	const char *name = dentry->d_name.name;
4343 	int name_len = dentry->d_name.len;
4344 	u64 index;
4345 	int ret;
4346 	u64 objectid;
4347 	u64 dir_ino = btrfs_ino(BTRFS_I(dir));
4348 
4349 	if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
4350 		objectid = inode->root->root_key.objectid;
4351 	} else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4352 		objectid = inode->location.objectid;
4353 	} else {
4354 		WARN_ON(1);
4355 		return -EINVAL;
4356 	}
4357 
4358 	path = btrfs_alloc_path();
4359 	if (!path)
4360 		return -ENOMEM;
4361 
4362 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4363 				   name, name_len, -1);
4364 	if (IS_ERR_OR_NULL(di)) {
4365 		ret = di ? PTR_ERR(di) : -ENOENT;
4366 		goto out;
4367 	}
4368 
4369 	leaf = path->nodes[0];
4370 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
4371 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4372 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
4373 	if (ret) {
4374 		btrfs_abort_transaction(trans, ret);
4375 		goto out;
4376 	}
4377 	btrfs_release_path(path);
4378 
4379 	/*
4380 	 * This is a placeholder inode for a subvolume we didn't have a
4381 	 * reference to at the time of the snapshot creation.  In the meantime
4382 	 * we could have renamed the real subvol link into our snapshot, so
4383 	 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect.
4384 	 * Instead simply lookup the dir_index_item for this entry so we can
4385 	 * remove it.  Otherwise we know we have a ref to the root and we can
4386 	 * call btrfs_del_root_ref, and it _shouldn't_ fail.
4387 	 */
4388 	if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4389 		di = btrfs_search_dir_index_item(root, path, dir_ino,
4390 						 name, name_len);
4391 		if (IS_ERR_OR_NULL(di)) {
4392 			if (!di)
4393 				ret = -ENOENT;
4394 			else
4395 				ret = PTR_ERR(di);
4396 			btrfs_abort_transaction(trans, ret);
4397 			goto out;
4398 		}
4399 
4400 		leaf = path->nodes[0];
4401 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4402 		index = key.offset;
4403 		btrfs_release_path(path);
4404 	} else {
4405 		ret = btrfs_del_root_ref(trans, objectid,
4406 					 root->root_key.objectid, dir_ino,
4407 					 &index, name, name_len);
4408 		if (ret) {
4409 			btrfs_abort_transaction(trans, ret);
4410 			goto out;
4411 		}
4412 	}
4413 
4414 	ret = btrfs_delete_delayed_dir_index(trans, BTRFS_I(dir), index);
4415 	if (ret) {
4416 		btrfs_abort_transaction(trans, ret);
4417 		goto out;
4418 	}
4419 
4420 	btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2);
4421 	inode_inc_iversion(dir);
4422 	dir->i_mtime = dir->i_ctime = current_time(dir);
4423 	ret = btrfs_update_inode_fallback(trans, root, BTRFS_I(dir));
4424 	if (ret)
4425 		btrfs_abort_transaction(trans, ret);
4426 out:
4427 	btrfs_free_path(path);
4428 	return ret;
4429 }
4430 
4431 /*
4432  * Helper to check if the subvolume references other subvolumes or if it's
4433  * default.
4434  */
4435 static noinline int may_destroy_subvol(struct btrfs_root *root)
4436 {
4437 	struct btrfs_fs_info *fs_info = root->fs_info;
4438 	struct btrfs_path *path;
4439 	struct btrfs_dir_item *di;
4440 	struct btrfs_key key;
4441 	u64 dir_id;
4442 	int ret;
4443 
4444 	path = btrfs_alloc_path();
4445 	if (!path)
4446 		return -ENOMEM;
4447 
4448 	/* Make sure this root isn't set as the default subvol */
4449 	dir_id = btrfs_super_root_dir(fs_info->super_copy);
4450 	di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
4451 				   dir_id, "default", 7, 0);
4452 	if (di && !IS_ERR(di)) {
4453 		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
4454 		if (key.objectid == root->root_key.objectid) {
4455 			ret = -EPERM;
4456 			btrfs_err(fs_info,
4457 				  "deleting default subvolume %llu is not allowed",
4458 				  key.objectid);
4459 			goto out;
4460 		}
4461 		btrfs_release_path(path);
4462 	}
4463 
4464 	key.objectid = root->root_key.objectid;
4465 	key.type = BTRFS_ROOT_REF_KEY;
4466 	key.offset = (u64)-1;
4467 
4468 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4469 	if (ret < 0)
4470 		goto out;
4471 	BUG_ON(ret == 0);
4472 
4473 	ret = 0;
4474 	if (path->slots[0] > 0) {
4475 		path->slots[0]--;
4476 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4477 		if (key.objectid == root->root_key.objectid &&
4478 		    key.type == BTRFS_ROOT_REF_KEY)
4479 			ret = -ENOTEMPTY;
4480 	}
4481 out:
4482 	btrfs_free_path(path);
4483 	return ret;
4484 }
4485 
4486 /* Delete all dentries for inodes belonging to the root */
4487 static void btrfs_prune_dentries(struct btrfs_root *root)
4488 {
4489 	struct btrfs_fs_info *fs_info = root->fs_info;
4490 	struct rb_node *node;
4491 	struct rb_node *prev;
4492 	struct btrfs_inode *entry;
4493 	struct inode *inode;
4494 	u64 objectid = 0;
4495 
4496 	if (!BTRFS_FS_ERROR(fs_info))
4497 		WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4498 
4499 	spin_lock(&root->inode_lock);
4500 again:
4501 	node = root->inode_tree.rb_node;
4502 	prev = NULL;
4503 	while (node) {
4504 		prev = node;
4505 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4506 
4507 		if (objectid < btrfs_ino(entry))
4508 			node = node->rb_left;
4509 		else if (objectid > btrfs_ino(entry))
4510 			node = node->rb_right;
4511 		else
4512 			break;
4513 	}
4514 	if (!node) {
4515 		while (prev) {
4516 			entry = rb_entry(prev, struct btrfs_inode, rb_node);
4517 			if (objectid <= btrfs_ino(entry)) {
4518 				node = prev;
4519 				break;
4520 			}
4521 			prev = rb_next(prev);
4522 		}
4523 	}
4524 	while (node) {
4525 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4526 		objectid = btrfs_ino(entry) + 1;
4527 		inode = igrab(&entry->vfs_inode);
4528 		if (inode) {
4529 			spin_unlock(&root->inode_lock);
4530 			if (atomic_read(&inode->i_count) > 1)
4531 				d_prune_aliases(inode);
4532 			/*
4533 			 * btrfs_drop_inode will have it removed from the inode
4534 			 * cache when its usage count hits zero.
4535 			 */
4536 			iput(inode);
4537 			cond_resched();
4538 			spin_lock(&root->inode_lock);
4539 			goto again;
4540 		}
4541 
4542 		if (cond_resched_lock(&root->inode_lock))
4543 			goto again;
4544 
4545 		node = rb_next(node);
4546 	}
4547 	spin_unlock(&root->inode_lock);
4548 }
4549 
4550 int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
4551 {
4552 	struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
4553 	struct btrfs_root *root = BTRFS_I(dir)->root;
4554 	struct inode *inode = d_inode(dentry);
4555 	struct btrfs_root *dest = BTRFS_I(inode)->root;
4556 	struct btrfs_trans_handle *trans;
4557 	struct btrfs_block_rsv block_rsv;
4558 	u64 root_flags;
4559 	int ret;
4560 
4561 	/*
4562 	 * Don't allow to delete a subvolume with send in progress. This is
4563 	 * inside the inode lock so the error handling that has to drop the bit
4564 	 * again is not run concurrently.
4565 	 */
4566 	spin_lock(&dest->root_item_lock);
4567 	if (dest->send_in_progress) {
4568 		spin_unlock(&dest->root_item_lock);
4569 		btrfs_warn(fs_info,
4570 			   "attempt to delete subvolume %llu during send",
4571 			   dest->root_key.objectid);
4572 		return -EPERM;
4573 	}
4574 	if (atomic_read(&dest->nr_swapfiles)) {
4575 		spin_unlock(&dest->root_item_lock);
4576 		btrfs_warn(fs_info,
4577 			   "attempt to delete subvolume %llu with active swapfile",
4578 			   root->root_key.objectid);
4579 		return -EPERM;
4580 	}
4581 	root_flags = btrfs_root_flags(&dest->root_item);
4582 	btrfs_set_root_flags(&dest->root_item,
4583 			     root_flags | BTRFS_ROOT_SUBVOL_DEAD);
4584 	spin_unlock(&dest->root_item_lock);
4585 
4586 	down_write(&fs_info->subvol_sem);
4587 
4588 	ret = may_destroy_subvol(dest);
4589 	if (ret)
4590 		goto out_up_write;
4591 
4592 	btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
4593 	/*
4594 	 * One for dir inode,
4595 	 * two for dir entries,
4596 	 * two for root ref/backref.
4597 	 */
4598 	ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
4599 	if (ret)
4600 		goto out_up_write;
4601 
4602 	trans = btrfs_start_transaction(root, 0);
4603 	if (IS_ERR(trans)) {
4604 		ret = PTR_ERR(trans);
4605 		goto out_release;
4606 	}
4607 	trans->block_rsv = &block_rsv;
4608 	trans->bytes_reserved = block_rsv.size;
4609 
4610 	btrfs_record_snapshot_destroy(trans, BTRFS_I(dir));
4611 
4612 	ret = btrfs_unlink_subvol(trans, dir, dentry);
4613 	if (ret) {
4614 		btrfs_abort_transaction(trans, ret);
4615 		goto out_end_trans;
4616 	}
4617 
4618 	ret = btrfs_record_root_in_trans(trans, dest);
4619 	if (ret) {
4620 		btrfs_abort_transaction(trans, ret);
4621 		goto out_end_trans;
4622 	}
4623 
4624 	memset(&dest->root_item.drop_progress, 0,
4625 		sizeof(dest->root_item.drop_progress));
4626 	btrfs_set_root_drop_level(&dest->root_item, 0);
4627 	btrfs_set_root_refs(&dest->root_item, 0);
4628 
4629 	if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
4630 		ret = btrfs_insert_orphan_item(trans,
4631 					fs_info->tree_root,
4632 					dest->root_key.objectid);
4633 		if (ret) {
4634 			btrfs_abort_transaction(trans, ret);
4635 			goto out_end_trans;
4636 		}
4637 	}
4638 
4639 	ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
4640 				  BTRFS_UUID_KEY_SUBVOL,
4641 				  dest->root_key.objectid);
4642 	if (ret && ret != -ENOENT) {
4643 		btrfs_abort_transaction(trans, ret);
4644 		goto out_end_trans;
4645 	}
4646 	if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
4647 		ret = btrfs_uuid_tree_remove(trans,
4648 					  dest->root_item.received_uuid,
4649 					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4650 					  dest->root_key.objectid);
4651 		if (ret && ret != -ENOENT) {
4652 			btrfs_abort_transaction(trans, ret);
4653 			goto out_end_trans;
4654 		}
4655 	}
4656 
4657 	free_anon_bdev(dest->anon_dev);
4658 	dest->anon_dev = 0;
4659 out_end_trans:
4660 	trans->block_rsv = NULL;
4661 	trans->bytes_reserved = 0;
4662 	ret = btrfs_end_transaction(trans);
4663 	inode->i_flags |= S_DEAD;
4664 out_release:
4665 	btrfs_subvolume_release_metadata(root, &block_rsv);
4666 out_up_write:
4667 	up_write(&fs_info->subvol_sem);
4668 	if (ret) {
4669 		spin_lock(&dest->root_item_lock);
4670 		root_flags = btrfs_root_flags(&dest->root_item);
4671 		btrfs_set_root_flags(&dest->root_item,
4672 				root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
4673 		spin_unlock(&dest->root_item_lock);
4674 	} else {
4675 		d_invalidate(dentry);
4676 		btrfs_prune_dentries(dest);
4677 		ASSERT(dest->send_in_progress == 0);
4678 	}
4679 
4680 	return ret;
4681 }
4682 
4683 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4684 {
4685 	struct inode *inode = d_inode(dentry);
4686 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
4687 	int err = 0;
4688 	struct btrfs_trans_handle *trans;
4689 	u64 last_unlink_trans;
4690 
4691 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4692 		return -ENOTEMPTY;
4693 	if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) {
4694 		if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) {
4695 			btrfs_err(fs_info,
4696 			"extent tree v2 doesn't support snapshot deletion yet");
4697 			return -EOPNOTSUPP;
4698 		}
4699 		return btrfs_delete_subvolume(dir, dentry);
4700 	}
4701 
4702 	trans = __unlink_start_trans(dir);
4703 	if (IS_ERR(trans))
4704 		return PTR_ERR(trans);
4705 
4706 	if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4707 		err = btrfs_unlink_subvol(trans, dir, dentry);
4708 		goto out;
4709 	}
4710 
4711 	err = btrfs_orphan_add(trans, BTRFS_I(inode));
4712 	if (err)
4713 		goto out;
4714 
4715 	last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
4716 
4717 	/* now the directory is empty */
4718 	err = btrfs_unlink_inode(trans, BTRFS_I(dir),
4719 			BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4720 			dentry->d_name.len);
4721 	if (!err) {
4722 		btrfs_i_size_write(BTRFS_I(inode), 0);
4723 		/*
4724 		 * Propagate the last_unlink_trans value of the deleted dir to
4725 		 * its parent directory. This is to prevent an unrecoverable
4726 		 * log tree in the case we do something like this:
4727 		 * 1) create dir foo
4728 		 * 2) create snapshot under dir foo
4729 		 * 3) delete the snapshot
4730 		 * 4) rmdir foo
4731 		 * 5) mkdir foo
4732 		 * 6) fsync foo or some file inside foo
4733 		 */
4734 		if (last_unlink_trans >= trans->transid)
4735 			BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
4736 	}
4737 out:
4738 	btrfs_end_transaction(trans);
4739 	btrfs_btree_balance_dirty(fs_info);
4740 
4741 	return err;
4742 }
4743 
4744 /*
4745  * btrfs_truncate_block - read, zero a chunk and write a block
4746  * @inode - inode that we're zeroing
4747  * @from - the offset to start zeroing
4748  * @len - the length to zero, 0 to zero the entire range respective to the
4749  *	offset
4750  * @front - zero up to the offset instead of from the offset on
4751  *
4752  * This will find the block for the "from" offset and cow the block and zero the
4753  * part we want to zero.  This is used with truncate and hole punching.
4754  */
4755 int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
4756 			 int front)
4757 {
4758 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
4759 	struct address_space *mapping = inode->vfs_inode.i_mapping;
4760 	struct extent_io_tree *io_tree = &inode->io_tree;
4761 	struct btrfs_ordered_extent *ordered;
4762 	struct extent_state *cached_state = NULL;
4763 	struct extent_changeset *data_reserved = NULL;
4764 	bool only_release_metadata = false;
4765 	u32 blocksize = fs_info->sectorsize;
4766 	pgoff_t index = from >> PAGE_SHIFT;
4767 	unsigned offset = from & (blocksize - 1);
4768 	struct page *page;
4769 	gfp_t mask = btrfs_alloc_write_mask(mapping);
4770 	size_t write_bytes = blocksize;
4771 	int ret = 0;
4772 	u64 block_start;
4773 	u64 block_end;
4774 
4775 	if (IS_ALIGNED(offset, blocksize) &&
4776 	    (!len || IS_ALIGNED(len, blocksize)))
4777 		goto out;
4778 
4779 	block_start = round_down(from, blocksize);
4780 	block_end = block_start + blocksize - 1;
4781 
4782 	ret = btrfs_check_data_free_space(inode, &data_reserved, block_start,
4783 					  blocksize);
4784 	if (ret < 0) {
4785 		if (btrfs_check_nocow_lock(inode, block_start, &write_bytes) > 0) {
4786 			/* For nocow case, no need to reserve data space */
4787 			only_release_metadata = true;
4788 		} else {
4789 			goto out;
4790 		}
4791 	}
4792 	ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false);
4793 	if (ret < 0) {
4794 		if (!only_release_metadata)
4795 			btrfs_free_reserved_data_space(inode, data_reserved,
4796 						       block_start, blocksize);
4797 		goto out;
4798 	}
4799 again:
4800 	page = find_or_create_page(mapping, index, mask);
4801 	if (!page) {
4802 		btrfs_delalloc_release_space(inode, data_reserved, block_start,
4803 					     blocksize, true);
4804 		btrfs_delalloc_release_extents(inode, blocksize);
4805 		ret = -ENOMEM;
4806 		goto out;
4807 	}
4808 	ret = set_page_extent_mapped(page);
4809 	if (ret < 0)
4810 		goto out_unlock;
4811 
4812 	if (!PageUptodate(page)) {
4813 		ret = btrfs_read_folio(NULL, page_folio(page));
4814 		lock_page(page);
4815 		if (page->mapping != mapping) {
4816 			unlock_page(page);
4817 			put_page(page);
4818 			goto again;
4819 		}
4820 		if (!PageUptodate(page)) {
4821 			ret = -EIO;
4822 			goto out_unlock;
4823 		}
4824 	}
4825 	wait_on_page_writeback(page);
4826 
4827 	lock_extent_bits(io_tree, block_start, block_end, &cached_state);
4828 
4829 	ordered = btrfs_lookup_ordered_extent(inode, block_start);
4830 	if (ordered) {
4831 		unlock_extent_cached(io_tree, block_start, block_end,
4832 				     &cached_state);
4833 		unlock_page(page);
4834 		put_page(page);
4835 		btrfs_start_ordered_extent(ordered, 1);
4836 		btrfs_put_ordered_extent(ordered);
4837 		goto again;
4838 	}
4839 
4840 	clear_extent_bit(&inode->io_tree, block_start, block_end,
4841 			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4842 			 0, 0, &cached_state);
4843 
4844 	ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
4845 					&cached_state);
4846 	if (ret) {
4847 		unlock_extent_cached(io_tree, block_start, block_end,
4848 				     &cached_state);
4849 		goto out_unlock;
4850 	}
4851 
4852 	if (offset != blocksize) {
4853 		if (!len)
4854 			len = blocksize - offset;
4855 		if (front)
4856 			memzero_page(page, (block_start - page_offset(page)),
4857 				     offset);
4858 		else
4859 			memzero_page(page, (block_start - page_offset(page)) + offset,
4860 				     len);
4861 		flush_dcache_page(page);
4862 	}
4863 	btrfs_page_clear_checked(fs_info, page, block_start,
4864 				 block_end + 1 - block_start);
4865 	btrfs_page_set_dirty(fs_info, page, block_start, block_end + 1 - block_start);
4866 	unlock_extent_cached(io_tree, block_start, block_end, &cached_state);
4867 
4868 	if (only_release_metadata)
4869 		set_extent_bit(&inode->io_tree, block_start, block_end,
4870 			       EXTENT_NORESERVE, 0, NULL, NULL, GFP_NOFS, NULL);
4871 
4872 out_unlock:
4873 	if (ret) {
4874 		if (only_release_metadata)
4875 			btrfs_delalloc_release_metadata(inode, blocksize, true);
4876 		else
4877 			btrfs_delalloc_release_space(inode, data_reserved,
4878 					block_start, blocksize, true);
4879 	}
4880 	btrfs_delalloc_release_extents(inode, blocksize);
4881 	unlock_page(page);
4882 	put_page(page);
4883 out:
4884 	if (only_release_metadata)
4885 		btrfs_check_nocow_unlock(inode);
4886 	extent_changeset_free(data_reserved);
4887 	return ret;
4888 }
4889 
4890 static int maybe_insert_hole(struct btrfs_root *root, struct btrfs_inode *inode,
4891 			     u64 offset, u64 len)
4892 {
4893 	struct btrfs_fs_info *fs_info = root->fs_info;
4894 	struct btrfs_trans_handle *trans;
4895 	struct btrfs_drop_extents_args drop_args = { 0 };
4896 	int ret;
4897 
4898 	/*
4899 	 * If NO_HOLES is enabled, we don't need to do anything.
4900 	 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans()
4901 	 * or btrfs_update_inode() will be called, which guarantee that the next
4902 	 * fsync will know this inode was changed and needs to be logged.
4903 	 */
4904 	if (btrfs_fs_incompat(fs_info, NO_HOLES))
4905 		return 0;
4906 
4907 	/*
4908 	 * 1 - for the one we're dropping
4909 	 * 1 - for the one we're adding
4910 	 * 1 - for updating the inode.
4911 	 */
4912 	trans = btrfs_start_transaction(root, 3);
4913 	if (IS_ERR(trans))
4914 		return PTR_ERR(trans);
4915 
4916 	drop_args.start = offset;
4917 	drop_args.end = offset + len;
4918 	drop_args.drop_cache = true;
4919 
4920 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
4921 	if (ret) {
4922 		btrfs_abort_transaction(trans, ret);
4923 		btrfs_end_transaction(trans);
4924 		return ret;
4925 	}
4926 
4927 	ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
4928 			offset, 0, 0, len, 0, len, 0, 0, 0);
4929 	if (ret) {
4930 		btrfs_abort_transaction(trans, ret);
4931 	} else {
4932 		btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found);
4933 		btrfs_update_inode(trans, root, inode);
4934 	}
4935 	btrfs_end_transaction(trans);
4936 	return ret;
4937 }
4938 
4939 /*
4940  * This function puts in dummy file extents for the area we're creating a hole
4941  * for.  So if we are truncating this file to a larger size we need to insert
4942  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4943  * the range between oldsize and size
4944  */
4945 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
4946 {
4947 	struct btrfs_root *root = inode->root;
4948 	struct btrfs_fs_info *fs_info = root->fs_info;
4949 	struct extent_io_tree *io_tree = &inode->io_tree;
4950 	struct extent_map *em = NULL;
4951 	struct extent_state *cached_state = NULL;
4952 	struct extent_map_tree *em_tree = &inode->extent_tree;
4953 	u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
4954 	u64 block_end = ALIGN(size, fs_info->sectorsize);
4955 	u64 last_byte;
4956 	u64 cur_offset;
4957 	u64 hole_size;
4958 	int err = 0;
4959 
4960 	/*
4961 	 * If our size started in the middle of a block we need to zero out the
4962 	 * rest of the block before we expand the i_size, otherwise we could
4963 	 * expose stale data.
4964 	 */
4965 	err = btrfs_truncate_block(inode, oldsize, 0, 0);
4966 	if (err)
4967 		return err;
4968 
4969 	if (size <= hole_start)
4970 		return 0;
4971 
4972 	btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1,
4973 					   &cached_state);
4974 	cur_offset = hole_start;
4975 	while (1) {
4976 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4977 				      block_end - cur_offset);
4978 		if (IS_ERR(em)) {
4979 			err = PTR_ERR(em);
4980 			em = NULL;
4981 			break;
4982 		}
4983 		last_byte = min(extent_map_end(em), block_end);
4984 		last_byte = ALIGN(last_byte, fs_info->sectorsize);
4985 		hole_size = last_byte - cur_offset;
4986 
4987 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
4988 			struct extent_map *hole_em;
4989 
4990 			err = maybe_insert_hole(root, inode, cur_offset,
4991 						hole_size);
4992 			if (err)
4993 				break;
4994 
4995 			err = btrfs_inode_set_file_extent_range(inode,
4996 							cur_offset, hole_size);
4997 			if (err)
4998 				break;
4999 
5000 			btrfs_drop_extent_cache(inode, cur_offset,
5001 						cur_offset + hole_size - 1, 0);
5002 			hole_em = alloc_extent_map();
5003 			if (!hole_em) {
5004 				btrfs_set_inode_full_sync(inode);
5005 				goto next;
5006 			}
5007 			hole_em->start = cur_offset;
5008 			hole_em->len = hole_size;
5009 			hole_em->orig_start = cur_offset;
5010 
5011 			hole_em->block_start = EXTENT_MAP_HOLE;
5012 			hole_em->block_len = 0;
5013 			hole_em->orig_block_len = 0;
5014 			hole_em->ram_bytes = hole_size;
5015 			hole_em->compress_type = BTRFS_COMPRESS_NONE;
5016 			hole_em->generation = fs_info->generation;
5017 
5018 			while (1) {
5019 				write_lock(&em_tree->lock);
5020 				err = add_extent_mapping(em_tree, hole_em, 1);
5021 				write_unlock(&em_tree->lock);
5022 				if (err != -EEXIST)
5023 					break;
5024 				btrfs_drop_extent_cache(inode, cur_offset,
5025 							cur_offset +
5026 							hole_size - 1, 0);
5027 			}
5028 			free_extent_map(hole_em);
5029 		} else {
5030 			err = btrfs_inode_set_file_extent_range(inode,
5031 							cur_offset, hole_size);
5032 			if (err)
5033 				break;
5034 		}
5035 next:
5036 		free_extent_map(em);
5037 		em = NULL;
5038 		cur_offset = last_byte;
5039 		if (cur_offset >= block_end)
5040 			break;
5041 	}
5042 	free_extent_map(em);
5043 	unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state);
5044 	return err;
5045 }
5046 
5047 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
5048 {
5049 	struct btrfs_root *root = BTRFS_I(inode)->root;
5050 	struct btrfs_trans_handle *trans;
5051 	loff_t oldsize = i_size_read(inode);
5052 	loff_t newsize = attr->ia_size;
5053 	int mask = attr->ia_valid;
5054 	int ret;
5055 
5056 	/*
5057 	 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
5058 	 * special case where we need to update the times despite not having
5059 	 * these flags set.  For all other operations the VFS set these flags
5060 	 * explicitly if it wants a timestamp update.
5061 	 */
5062 	if (newsize != oldsize) {
5063 		inode_inc_iversion(inode);
5064 		if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
5065 			inode->i_ctime = inode->i_mtime =
5066 				current_time(inode);
5067 	}
5068 
5069 	if (newsize > oldsize) {
5070 		/*
5071 		 * Don't do an expanding truncate while snapshotting is ongoing.
5072 		 * This is to ensure the snapshot captures a fully consistent
5073 		 * state of this file - if the snapshot captures this expanding
5074 		 * truncation, it must capture all writes that happened before
5075 		 * this truncation.
5076 		 */
5077 		btrfs_drew_write_lock(&root->snapshot_lock);
5078 		ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize);
5079 		if (ret) {
5080 			btrfs_drew_write_unlock(&root->snapshot_lock);
5081 			return ret;
5082 		}
5083 
5084 		trans = btrfs_start_transaction(root, 1);
5085 		if (IS_ERR(trans)) {
5086 			btrfs_drew_write_unlock(&root->snapshot_lock);
5087 			return PTR_ERR(trans);
5088 		}
5089 
5090 		i_size_write(inode, newsize);
5091 		btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
5092 		pagecache_isize_extended(inode, oldsize, newsize);
5093 		ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
5094 		btrfs_drew_write_unlock(&root->snapshot_lock);
5095 		btrfs_end_transaction(trans);
5096 	} else {
5097 		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5098 
5099 		if (btrfs_is_zoned(fs_info)) {
5100 			ret = btrfs_wait_ordered_range(inode,
5101 					ALIGN(newsize, fs_info->sectorsize),
5102 					(u64)-1);
5103 			if (ret)
5104 				return ret;
5105 		}
5106 
5107 		/*
5108 		 * We're truncating a file that used to have good data down to
5109 		 * zero. Make sure any new writes to the file get on disk
5110 		 * on close.
5111 		 */
5112 		if (newsize == 0)
5113 			set_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
5114 				&BTRFS_I(inode)->runtime_flags);
5115 
5116 		truncate_setsize(inode, newsize);
5117 
5118 		inode_dio_wait(inode);
5119 
5120 		ret = btrfs_truncate(inode, newsize == oldsize);
5121 		if (ret && inode->i_nlink) {
5122 			int err;
5123 
5124 			/*
5125 			 * Truncate failed, so fix up the in-memory size. We
5126 			 * adjusted disk_i_size down as we removed extents, so
5127 			 * wait for disk_i_size to be stable and then update the
5128 			 * in-memory size to match.
5129 			 */
5130 			err = btrfs_wait_ordered_range(inode, 0, (u64)-1);
5131 			if (err)
5132 				return err;
5133 			i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5134 		}
5135 	}
5136 
5137 	return ret;
5138 }
5139 
5140 static int btrfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
5141 			 struct iattr *attr)
5142 {
5143 	struct inode *inode = d_inode(dentry);
5144 	struct btrfs_root *root = BTRFS_I(inode)->root;
5145 	int err;
5146 
5147 	if (btrfs_root_readonly(root))
5148 		return -EROFS;
5149 
5150 	err = setattr_prepare(mnt_userns, dentry, attr);
5151 	if (err)
5152 		return err;
5153 
5154 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5155 		err = btrfs_setsize(inode, attr);
5156 		if (err)
5157 			return err;
5158 	}
5159 
5160 	if (attr->ia_valid) {
5161 		setattr_copy(mnt_userns, inode, attr);
5162 		inode_inc_iversion(inode);
5163 		err = btrfs_dirty_inode(inode);
5164 
5165 		if (!err && attr->ia_valid & ATTR_MODE)
5166 			err = posix_acl_chmod(mnt_userns, inode, inode->i_mode);
5167 	}
5168 
5169 	return err;
5170 }
5171 
5172 /*
5173  * While truncating the inode pages during eviction, we get the VFS
5174  * calling btrfs_invalidate_folio() against each folio of the inode. This
5175  * is slow because the calls to btrfs_invalidate_folio() result in a
5176  * huge amount of calls to lock_extent_bits() and clear_extent_bit(),
5177  * which keep merging and splitting extent_state structures over and over,
5178  * wasting lots of time.
5179  *
5180  * Therefore if the inode is being evicted, let btrfs_invalidate_folio()
5181  * skip all those expensive operations on a per folio basis and do only
5182  * the ordered io finishing, while we release here the extent_map and
5183  * extent_state structures, without the excessive merging and splitting.
5184  */
5185 static void evict_inode_truncate_pages(struct inode *inode)
5186 {
5187 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5188 	struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
5189 	struct rb_node *node;
5190 
5191 	ASSERT(inode->i_state & I_FREEING);
5192 	truncate_inode_pages_final(&inode->i_data);
5193 
5194 	write_lock(&map_tree->lock);
5195 	while (!RB_EMPTY_ROOT(&map_tree->map.rb_root)) {
5196 		struct extent_map *em;
5197 
5198 		node = rb_first_cached(&map_tree->map);
5199 		em = rb_entry(node, struct extent_map, rb_node);
5200 		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
5201 		clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
5202 		remove_extent_mapping(map_tree, em);
5203 		free_extent_map(em);
5204 		if (need_resched()) {
5205 			write_unlock(&map_tree->lock);
5206 			cond_resched();
5207 			write_lock(&map_tree->lock);
5208 		}
5209 	}
5210 	write_unlock(&map_tree->lock);
5211 
5212 	/*
5213 	 * Keep looping until we have no more ranges in the io tree.
5214 	 * We can have ongoing bios started by readahead that have
5215 	 * their endio callback (extent_io.c:end_bio_extent_readpage)
5216 	 * still in progress (unlocked the pages in the bio but did not yet
5217 	 * unlocked the ranges in the io tree). Therefore this means some
5218 	 * ranges can still be locked and eviction started because before
5219 	 * submitting those bios, which are executed by a separate task (work
5220 	 * queue kthread), inode references (inode->i_count) were not taken
5221 	 * (which would be dropped in the end io callback of each bio).
5222 	 * Therefore here we effectively end up waiting for those bios and
5223 	 * anyone else holding locked ranges without having bumped the inode's
5224 	 * reference count - if we don't do it, when they access the inode's
5225 	 * io_tree to unlock a range it may be too late, leading to an
5226 	 * use-after-free issue.
5227 	 */
5228 	spin_lock(&io_tree->lock);
5229 	while (!RB_EMPTY_ROOT(&io_tree->state)) {
5230 		struct extent_state *state;
5231 		struct extent_state *cached_state = NULL;
5232 		u64 start;
5233 		u64 end;
5234 		unsigned state_flags;
5235 
5236 		node = rb_first(&io_tree->state);
5237 		state = rb_entry(node, struct extent_state, rb_node);
5238 		start = state->start;
5239 		end = state->end;
5240 		state_flags = state->state;
5241 		spin_unlock(&io_tree->lock);
5242 
5243 		lock_extent_bits(io_tree, start, end, &cached_state);
5244 
5245 		/*
5246 		 * If still has DELALLOC flag, the extent didn't reach disk,
5247 		 * and its reserved space won't be freed by delayed_ref.
5248 		 * So we need to free its reserved space here.
5249 		 * (Refer to comment in btrfs_invalidate_folio, case 2)
5250 		 *
5251 		 * Note, end is the bytenr of last byte, so we need + 1 here.
5252 		 */
5253 		if (state_flags & EXTENT_DELALLOC)
5254 			btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
5255 					       end - start + 1);
5256 
5257 		clear_extent_bit(io_tree, start, end,
5258 				 EXTENT_LOCKED | EXTENT_DELALLOC |
5259 				 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
5260 				 &cached_state);
5261 
5262 		cond_resched();
5263 		spin_lock(&io_tree->lock);
5264 	}
5265 	spin_unlock(&io_tree->lock);
5266 }
5267 
5268 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
5269 							struct btrfs_block_rsv *rsv)
5270 {
5271 	struct btrfs_fs_info *fs_info = root->fs_info;
5272 	struct btrfs_trans_handle *trans;
5273 	u64 delayed_refs_extra = btrfs_calc_insert_metadata_size(fs_info, 1);
5274 	int ret;
5275 
5276 	/*
5277 	 * Eviction should be taking place at some place safe because of our
5278 	 * delayed iputs.  However the normal flushing code will run delayed
5279 	 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
5280 	 *
5281 	 * We reserve the delayed_refs_extra here again because we can't use
5282 	 * btrfs_start_transaction(root, 0) for the same deadlocky reason as
5283 	 * above.  We reserve our extra bit here because we generate a ton of
5284 	 * delayed refs activity by truncating.
5285 	 *
5286 	 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can,
5287 	 * if we fail to make this reservation we can re-try without the
5288 	 * delayed_refs_extra so we can make some forward progress.
5289 	 */
5290 	ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra,
5291 				     BTRFS_RESERVE_FLUSH_EVICT);
5292 	if (ret) {
5293 		ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size,
5294 					     BTRFS_RESERVE_FLUSH_EVICT);
5295 		if (ret) {
5296 			btrfs_warn(fs_info,
5297 				   "could not allocate space for delete; will truncate on mount");
5298 			return ERR_PTR(-ENOSPC);
5299 		}
5300 		delayed_refs_extra = 0;
5301 	}
5302 
5303 	trans = btrfs_join_transaction(root);
5304 	if (IS_ERR(trans))
5305 		return trans;
5306 
5307 	if (delayed_refs_extra) {
5308 		trans->block_rsv = &fs_info->trans_block_rsv;
5309 		trans->bytes_reserved = delayed_refs_extra;
5310 		btrfs_block_rsv_migrate(rsv, trans->block_rsv,
5311 					delayed_refs_extra, 1);
5312 	}
5313 	return trans;
5314 }
5315 
5316 void btrfs_evict_inode(struct inode *inode)
5317 {
5318 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5319 	struct btrfs_trans_handle *trans;
5320 	struct btrfs_root *root = BTRFS_I(inode)->root;
5321 	struct btrfs_block_rsv *rsv;
5322 	int ret;
5323 
5324 	trace_btrfs_inode_evict(inode);
5325 
5326 	if (!root) {
5327 		fsverity_cleanup_inode(inode);
5328 		clear_inode(inode);
5329 		return;
5330 	}
5331 
5332 	evict_inode_truncate_pages(inode);
5333 
5334 	if (inode->i_nlink &&
5335 	    ((btrfs_root_refs(&root->root_item) != 0 &&
5336 	      root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5337 	     btrfs_is_free_space_inode(BTRFS_I(inode))))
5338 		goto no_delete;
5339 
5340 	if (is_bad_inode(inode))
5341 		goto no_delete;
5342 
5343 	btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1);
5344 
5345 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
5346 		goto no_delete;
5347 
5348 	if (inode->i_nlink > 0) {
5349 		BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5350 		       root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5351 		goto no_delete;
5352 	}
5353 
5354 	/*
5355 	 * This makes sure the inode item in tree is uptodate and the space for
5356 	 * the inode update is released.
5357 	 */
5358 	ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
5359 	if (ret)
5360 		goto no_delete;
5361 
5362 	/*
5363 	 * This drops any pending insert or delete operations we have for this
5364 	 * inode.  We could have a delayed dir index deletion queued up, but
5365 	 * we're removing the inode completely so that'll be taken care of in
5366 	 * the truncate.
5367 	 */
5368 	btrfs_kill_delayed_inode_items(BTRFS_I(inode));
5369 
5370 	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
5371 	if (!rsv)
5372 		goto no_delete;
5373 	rsv->size = btrfs_calc_metadata_size(fs_info, 1);
5374 	rsv->failfast = 1;
5375 
5376 	btrfs_i_size_write(BTRFS_I(inode), 0);
5377 
5378 	while (1) {
5379 		struct btrfs_truncate_control control = {
5380 			.inode = BTRFS_I(inode),
5381 			.ino = btrfs_ino(BTRFS_I(inode)),
5382 			.new_size = 0,
5383 			.min_type = 0,
5384 		};
5385 
5386 		trans = evict_refill_and_join(root, rsv);
5387 		if (IS_ERR(trans))
5388 			goto free_rsv;
5389 
5390 		trans->block_rsv = rsv;
5391 
5392 		ret = btrfs_truncate_inode_items(trans, root, &control);
5393 		trans->block_rsv = &fs_info->trans_block_rsv;
5394 		btrfs_end_transaction(trans);
5395 		btrfs_btree_balance_dirty(fs_info);
5396 		if (ret && ret != -ENOSPC && ret != -EAGAIN)
5397 			goto free_rsv;
5398 		else if (!ret)
5399 			break;
5400 	}
5401 
5402 	/*
5403 	 * Errors here aren't a big deal, it just means we leave orphan items in
5404 	 * the tree. They will be cleaned up on the next mount. If the inode
5405 	 * number gets reused, cleanup deletes the orphan item without doing
5406 	 * anything, and unlink reuses the existing orphan item.
5407 	 *
5408 	 * If it turns out that we are dropping too many of these, we might want
5409 	 * to add a mechanism for retrying these after a commit.
5410 	 */
5411 	trans = evict_refill_and_join(root, rsv);
5412 	if (!IS_ERR(trans)) {
5413 		trans->block_rsv = rsv;
5414 		btrfs_orphan_del(trans, BTRFS_I(inode));
5415 		trans->block_rsv = &fs_info->trans_block_rsv;
5416 		btrfs_end_transaction(trans);
5417 	}
5418 
5419 free_rsv:
5420 	btrfs_free_block_rsv(fs_info, rsv);
5421 no_delete:
5422 	/*
5423 	 * If we didn't successfully delete, the orphan item will still be in
5424 	 * the tree and we'll retry on the next mount. Again, we might also want
5425 	 * to retry these periodically in the future.
5426 	 */
5427 	btrfs_remove_delayed_node(BTRFS_I(inode));
5428 	fsverity_cleanup_inode(inode);
5429 	clear_inode(inode);
5430 }
5431 
5432 /*
5433  * Return the key found in the dir entry in the location pointer, fill @type
5434  * with BTRFS_FT_*, and return 0.
5435  *
5436  * If no dir entries were found, returns -ENOENT.
5437  * If found a corrupted location in dir entry, returns -EUCLEAN.
5438  */
5439 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
5440 			       struct btrfs_key *location, u8 *type)
5441 {
5442 	const char *name = dentry->d_name.name;
5443 	int namelen = dentry->d_name.len;
5444 	struct btrfs_dir_item *di;
5445 	struct btrfs_path *path;
5446 	struct btrfs_root *root = BTRFS_I(dir)->root;
5447 	int ret = 0;
5448 
5449 	path = btrfs_alloc_path();
5450 	if (!path)
5451 		return -ENOMEM;
5452 
5453 	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)),
5454 			name, namelen, 0);
5455 	if (IS_ERR_OR_NULL(di)) {
5456 		ret = di ? PTR_ERR(di) : -ENOENT;
5457 		goto out;
5458 	}
5459 
5460 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5461 	if (location->type != BTRFS_INODE_ITEM_KEY &&
5462 	    location->type != BTRFS_ROOT_ITEM_KEY) {
5463 		ret = -EUCLEAN;
5464 		btrfs_warn(root->fs_info,
5465 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5466 			   __func__, name, btrfs_ino(BTRFS_I(dir)),
5467 			   location->objectid, location->type, location->offset);
5468 	}
5469 	if (!ret)
5470 		*type = btrfs_dir_type(path->nodes[0], di);
5471 out:
5472 	btrfs_free_path(path);
5473 	return ret;
5474 }
5475 
5476 /*
5477  * when we hit a tree root in a directory, the btrfs part of the inode
5478  * needs to be changed to reflect the root directory of the tree root.  This
5479  * is kind of like crossing a mount point.
5480  */
5481 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
5482 				    struct inode *dir,
5483 				    struct dentry *dentry,
5484 				    struct btrfs_key *location,
5485 				    struct btrfs_root **sub_root)
5486 {
5487 	struct btrfs_path *path;
5488 	struct btrfs_root *new_root;
5489 	struct btrfs_root_ref *ref;
5490 	struct extent_buffer *leaf;
5491 	struct btrfs_key key;
5492 	int ret;
5493 	int err = 0;
5494 
5495 	path = btrfs_alloc_path();
5496 	if (!path) {
5497 		err = -ENOMEM;
5498 		goto out;
5499 	}
5500 
5501 	err = -ENOENT;
5502 	key.objectid = BTRFS_I(dir)->root->root_key.objectid;
5503 	key.type = BTRFS_ROOT_REF_KEY;
5504 	key.offset = location->objectid;
5505 
5506 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
5507 	if (ret) {
5508 		if (ret < 0)
5509 			err = ret;
5510 		goto out;
5511 	}
5512 
5513 	leaf = path->nodes[0];
5514 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5515 	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(BTRFS_I(dir)) ||
5516 	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
5517 		goto out;
5518 
5519 	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
5520 				   (unsigned long)(ref + 1),
5521 				   dentry->d_name.len);
5522 	if (ret)
5523 		goto out;
5524 
5525 	btrfs_release_path(path);
5526 
5527 	new_root = btrfs_get_fs_root(fs_info, location->objectid, true);
5528 	if (IS_ERR(new_root)) {
5529 		err = PTR_ERR(new_root);
5530 		goto out;
5531 	}
5532 
5533 	*sub_root = new_root;
5534 	location->objectid = btrfs_root_dirid(&new_root->root_item);
5535 	location->type = BTRFS_INODE_ITEM_KEY;
5536 	location->offset = 0;
5537 	err = 0;
5538 out:
5539 	btrfs_free_path(path);
5540 	return err;
5541 }
5542 
5543 static void inode_tree_add(struct inode *inode)
5544 {
5545 	struct btrfs_root *root = BTRFS_I(inode)->root;
5546 	struct btrfs_inode *entry;
5547 	struct rb_node **p;
5548 	struct rb_node *parent;
5549 	struct rb_node *new = &BTRFS_I(inode)->rb_node;
5550 	u64 ino = btrfs_ino(BTRFS_I(inode));
5551 
5552 	if (inode_unhashed(inode))
5553 		return;
5554 	parent = NULL;
5555 	spin_lock(&root->inode_lock);
5556 	p = &root->inode_tree.rb_node;
5557 	while (*p) {
5558 		parent = *p;
5559 		entry = rb_entry(parent, struct btrfs_inode, rb_node);
5560 
5561 		if (ino < btrfs_ino(entry))
5562 			p = &parent->rb_left;
5563 		else if (ino > btrfs_ino(entry))
5564 			p = &parent->rb_right;
5565 		else {
5566 			WARN_ON(!(entry->vfs_inode.i_state &
5567 				  (I_WILL_FREE | I_FREEING)));
5568 			rb_replace_node(parent, new, &root->inode_tree);
5569 			RB_CLEAR_NODE(parent);
5570 			spin_unlock(&root->inode_lock);
5571 			return;
5572 		}
5573 	}
5574 	rb_link_node(new, parent, p);
5575 	rb_insert_color(new, &root->inode_tree);
5576 	spin_unlock(&root->inode_lock);
5577 }
5578 
5579 static void inode_tree_del(struct btrfs_inode *inode)
5580 {
5581 	struct btrfs_root *root = inode->root;
5582 	int empty = 0;
5583 
5584 	spin_lock(&root->inode_lock);
5585 	if (!RB_EMPTY_NODE(&inode->rb_node)) {
5586 		rb_erase(&inode->rb_node, &root->inode_tree);
5587 		RB_CLEAR_NODE(&inode->rb_node);
5588 		empty = RB_EMPTY_ROOT(&root->inode_tree);
5589 	}
5590 	spin_unlock(&root->inode_lock);
5591 
5592 	if (empty && btrfs_root_refs(&root->root_item) == 0) {
5593 		spin_lock(&root->inode_lock);
5594 		empty = RB_EMPTY_ROOT(&root->inode_tree);
5595 		spin_unlock(&root->inode_lock);
5596 		if (empty)
5597 			btrfs_add_dead_root(root);
5598 	}
5599 }
5600 
5601 
5602 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5603 {
5604 	struct btrfs_iget_args *args = p;
5605 
5606 	inode->i_ino = args->ino;
5607 	BTRFS_I(inode)->location.objectid = args->ino;
5608 	BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
5609 	BTRFS_I(inode)->location.offset = 0;
5610 	BTRFS_I(inode)->root = btrfs_grab_root(args->root);
5611 	BUG_ON(args->root && !BTRFS_I(inode)->root);
5612 	return 0;
5613 }
5614 
5615 static int btrfs_find_actor(struct inode *inode, void *opaque)
5616 {
5617 	struct btrfs_iget_args *args = opaque;
5618 
5619 	return args->ino == BTRFS_I(inode)->location.objectid &&
5620 		args->root == BTRFS_I(inode)->root;
5621 }
5622 
5623 static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino,
5624 				       struct btrfs_root *root)
5625 {
5626 	struct inode *inode;
5627 	struct btrfs_iget_args args;
5628 	unsigned long hashval = btrfs_inode_hash(ino, root);
5629 
5630 	args.ino = ino;
5631 	args.root = root;
5632 
5633 	inode = iget5_locked(s, hashval, btrfs_find_actor,
5634 			     btrfs_init_locked_inode,
5635 			     (void *)&args);
5636 	return inode;
5637 }
5638 
5639 /*
5640  * Get an inode object given its inode number and corresponding root.
5641  * Path can be preallocated to prevent recursing back to iget through
5642  * allocator. NULL is also valid but may require an additional allocation
5643  * later.
5644  */
5645 struct inode *btrfs_iget_path(struct super_block *s, u64 ino,
5646 			      struct btrfs_root *root, struct btrfs_path *path)
5647 {
5648 	struct inode *inode;
5649 
5650 	inode = btrfs_iget_locked(s, ino, root);
5651 	if (!inode)
5652 		return ERR_PTR(-ENOMEM);
5653 
5654 	if (inode->i_state & I_NEW) {
5655 		int ret;
5656 
5657 		ret = btrfs_read_locked_inode(inode, path);
5658 		if (!ret) {
5659 			inode_tree_add(inode);
5660 			unlock_new_inode(inode);
5661 		} else {
5662 			iget_failed(inode);
5663 			/*
5664 			 * ret > 0 can come from btrfs_search_slot called by
5665 			 * btrfs_read_locked_inode, this means the inode item
5666 			 * was not found.
5667 			 */
5668 			if (ret > 0)
5669 				ret = -ENOENT;
5670 			inode = ERR_PTR(ret);
5671 		}
5672 	}
5673 
5674 	return inode;
5675 }
5676 
5677 struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root)
5678 {
5679 	return btrfs_iget_path(s, ino, root, NULL);
5680 }
5681 
5682 static struct inode *new_simple_dir(struct super_block *s,
5683 				    struct btrfs_key *key,
5684 				    struct btrfs_root *root)
5685 {
5686 	struct inode *inode = new_inode(s);
5687 
5688 	if (!inode)
5689 		return ERR_PTR(-ENOMEM);
5690 
5691 	BTRFS_I(inode)->root = btrfs_grab_root(root);
5692 	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5693 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5694 
5695 	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5696 	/*
5697 	 * We only need lookup, the rest is read-only and there's no inode
5698 	 * associated with the dentry
5699 	 */
5700 	inode->i_op = &simple_dir_inode_operations;
5701 	inode->i_opflags &= ~IOP_XATTR;
5702 	inode->i_fop = &simple_dir_operations;
5703 	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5704 	inode->i_mtime = current_time(inode);
5705 	inode->i_atime = inode->i_mtime;
5706 	inode->i_ctime = inode->i_mtime;
5707 	BTRFS_I(inode)->i_otime = inode->i_mtime;
5708 
5709 	return inode;
5710 }
5711 
5712 static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN);
5713 static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE);
5714 static_assert(BTRFS_FT_DIR == FT_DIR);
5715 static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV);
5716 static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV);
5717 static_assert(BTRFS_FT_FIFO == FT_FIFO);
5718 static_assert(BTRFS_FT_SOCK == FT_SOCK);
5719 static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK);
5720 
5721 static inline u8 btrfs_inode_type(struct inode *inode)
5722 {
5723 	return fs_umode_to_ftype(inode->i_mode);
5724 }
5725 
5726 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5727 {
5728 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
5729 	struct inode *inode;
5730 	struct btrfs_root *root = BTRFS_I(dir)->root;
5731 	struct btrfs_root *sub_root = root;
5732 	struct btrfs_key location;
5733 	u8 di_type = 0;
5734 	int ret = 0;
5735 
5736 	if (dentry->d_name.len > BTRFS_NAME_LEN)
5737 		return ERR_PTR(-ENAMETOOLONG);
5738 
5739 	ret = btrfs_inode_by_name(dir, dentry, &location, &di_type);
5740 	if (ret < 0)
5741 		return ERR_PTR(ret);
5742 
5743 	if (location.type == BTRFS_INODE_ITEM_KEY) {
5744 		inode = btrfs_iget(dir->i_sb, location.objectid, root);
5745 		if (IS_ERR(inode))
5746 			return inode;
5747 
5748 		/* Do extra check against inode mode with di_type */
5749 		if (btrfs_inode_type(inode) != di_type) {
5750 			btrfs_crit(fs_info,
5751 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
5752 				  inode->i_mode, btrfs_inode_type(inode),
5753 				  di_type);
5754 			iput(inode);
5755 			return ERR_PTR(-EUCLEAN);
5756 		}
5757 		return inode;
5758 	}
5759 
5760 	ret = fixup_tree_root_location(fs_info, dir, dentry,
5761 				       &location, &sub_root);
5762 	if (ret < 0) {
5763 		if (ret != -ENOENT)
5764 			inode = ERR_PTR(ret);
5765 		else
5766 			inode = new_simple_dir(dir->i_sb, &location, sub_root);
5767 	} else {
5768 		inode = btrfs_iget(dir->i_sb, location.objectid, sub_root);
5769 	}
5770 	if (root != sub_root)
5771 		btrfs_put_root(sub_root);
5772 
5773 	if (!IS_ERR(inode) && root != sub_root) {
5774 		down_read(&fs_info->cleanup_work_sem);
5775 		if (!sb_rdonly(inode->i_sb))
5776 			ret = btrfs_orphan_cleanup(sub_root);
5777 		up_read(&fs_info->cleanup_work_sem);
5778 		if (ret) {
5779 			iput(inode);
5780 			inode = ERR_PTR(ret);
5781 		}
5782 	}
5783 
5784 	return inode;
5785 }
5786 
5787 static int btrfs_dentry_delete(const struct dentry *dentry)
5788 {
5789 	struct btrfs_root *root;
5790 	struct inode *inode = d_inode(dentry);
5791 
5792 	if (!inode && !IS_ROOT(dentry))
5793 		inode = d_inode(dentry->d_parent);
5794 
5795 	if (inode) {
5796 		root = BTRFS_I(inode)->root;
5797 		if (btrfs_root_refs(&root->root_item) == 0)
5798 			return 1;
5799 
5800 		if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5801 			return 1;
5802 	}
5803 	return 0;
5804 }
5805 
5806 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5807 				   unsigned int flags)
5808 {
5809 	struct inode *inode = btrfs_lookup_dentry(dir, dentry);
5810 
5811 	if (inode == ERR_PTR(-ENOENT))
5812 		inode = NULL;
5813 	return d_splice_alias(inode, dentry);
5814 }
5815 
5816 /*
5817  * All this infrastructure exists because dir_emit can fault, and we are holding
5818  * the tree lock when doing readdir.  For now just allocate a buffer and copy
5819  * our information into that, and then dir_emit from the buffer.  This is
5820  * similar to what NFS does, only we don't keep the buffer around in pagecache
5821  * because I'm afraid I'll mess that up.  Long term we need to make filldir do
5822  * copy_to_user_inatomic so we don't have to worry about page faulting under the
5823  * tree lock.
5824  */
5825 static int btrfs_opendir(struct inode *inode, struct file *file)
5826 {
5827 	struct btrfs_file_private *private;
5828 
5829 	private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL);
5830 	if (!private)
5831 		return -ENOMEM;
5832 	private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
5833 	if (!private->filldir_buf) {
5834 		kfree(private);
5835 		return -ENOMEM;
5836 	}
5837 	file->private_data = private;
5838 	return 0;
5839 }
5840 
5841 struct dir_entry {
5842 	u64 ino;
5843 	u64 offset;
5844 	unsigned type;
5845 	int name_len;
5846 };
5847 
5848 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
5849 {
5850 	while (entries--) {
5851 		struct dir_entry *entry = addr;
5852 		char *name = (char *)(entry + 1);
5853 
5854 		ctx->pos = get_unaligned(&entry->offset);
5855 		if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
5856 					 get_unaligned(&entry->ino),
5857 					 get_unaligned(&entry->type)))
5858 			return 1;
5859 		addr += sizeof(struct dir_entry) +
5860 			get_unaligned(&entry->name_len);
5861 		ctx->pos++;
5862 	}
5863 	return 0;
5864 }
5865 
5866 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5867 {
5868 	struct inode *inode = file_inode(file);
5869 	struct btrfs_root *root = BTRFS_I(inode)->root;
5870 	struct btrfs_file_private *private = file->private_data;
5871 	struct btrfs_dir_item *di;
5872 	struct btrfs_key key;
5873 	struct btrfs_key found_key;
5874 	struct btrfs_path *path;
5875 	void *addr;
5876 	struct list_head ins_list;
5877 	struct list_head del_list;
5878 	int ret;
5879 	char *name_ptr;
5880 	int name_len;
5881 	int entries = 0;
5882 	int total_len = 0;
5883 	bool put = false;
5884 	struct btrfs_key location;
5885 
5886 	if (!dir_emit_dots(file, ctx))
5887 		return 0;
5888 
5889 	path = btrfs_alloc_path();
5890 	if (!path)
5891 		return -ENOMEM;
5892 
5893 	addr = private->filldir_buf;
5894 	path->reada = READA_FORWARD;
5895 
5896 	INIT_LIST_HEAD(&ins_list);
5897 	INIT_LIST_HEAD(&del_list);
5898 	put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list);
5899 
5900 again:
5901 	key.type = BTRFS_DIR_INDEX_KEY;
5902 	key.offset = ctx->pos;
5903 	key.objectid = btrfs_ino(BTRFS_I(inode));
5904 
5905 	btrfs_for_each_slot(root, &key, &found_key, path, ret) {
5906 		struct dir_entry *entry;
5907 		struct extent_buffer *leaf = path->nodes[0];
5908 
5909 		if (found_key.objectid != key.objectid)
5910 			break;
5911 		if (found_key.type != BTRFS_DIR_INDEX_KEY)
5912 			break;
5913 		if (found_key.offset < ctx->pos)
5914 			continue;
5915 		if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
5916 			continue;
5917 		di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
5918 		name_len = btrfs_dir_name_len(leaf, di);
5919 		if ((total_len + sizeof(struct dir_entry) + name_len) >=
5920 		    PAGE_SIZE) {
5921 			btrfs_release_path(path);
5922 			ret = btrfs_filldir(private->filldir_buf, entries, ctx);
5923 			if (ret)
5924 				goto nopos;
5925 			addr = private->filldir_buf;
5926 			entries = 0;
5927 			total_len = 0;
5928 			goto again;
5929 		}
5930 
5931 		entry = addr;
5932 		put_unaligned(name_len, &entry->name_len);
5933 		name_ptr = (char *)(entry + 1);
5934 		read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1),
5935 				   name_len);
5936 		put_unaligned(fs_ftype_to_dtype(btrfs_dir_type(leaf, di)),
5937 				&entry->type);
5938 		btrfs_dir_item_key_to_cpu(leaf, di, &location);
5939 		put_unaligned(location.objectid, &entry->ino);
5940 		put_unaligned(found_key.offset, &entry->offset);
5941 		entries++;
5942 		addr += sizeof(struct dir_entry) + name_len;
5943 		total_len += sizeof(struct dir_entry) + name_len;
5944 	}
5945 	/* Catch error encountered during iteration */
5946 	if (ret < 0)
5947 		goto err;
5948 
5949 	btrfs_release_path(path);
5950 
5951 	ret = btrfs_filldir(private->filldir_buf, entries, ctx);
5952 	if (ret)
5953 		goto nopos;
5954 
5955 	ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
5956 	if (ret)
5957 		goto nopos;
5958 
5959 	/*
5960 	 * Stop new entries from being returned after we return the last
5961 	 * entry.
5962 	 *
5963 	 * New directory entries are assigned a strictly increasing
5964 	 * offset.  This means that new entries created during readdir
5965 	 * are *guaranteed* to be seen in the future by that readdir.
5966 	 * This has broken buggy programs which operate on names as
5967 	 * they're returned by readdir.  Until we re-use freed offsets
5968 	 * we have this hack to stop new entries from being returned
5969 	 * under the assumption that they'll never reach this huge
5970 	 * offset.
5971 	 *
5972 	 * This is being careful not to overflow 32bit loff_t unless the
5973 	 * last entry requires it because doing so has broken 32bit apps
5974 	 * in the past.
5975 	 */
5976 	if (ctx->pos >= INT_MAX)
5977 		ctx->pos = LLONG_MAX;
5978 	else
5979 		ctx->pos = INT_MAX;
5980 nopos:
5981 	ret = 0;
5982 err:
5983 	if (put)
5984 		btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
5985 	btrfs_free_path(path);
5986 	return ret;
5987 }
5988 
5989 /*
5990  * This is somewhat expensive, updating the tree every time the
5991  * inode changes.  But, it is most likely to find the inode in cache.
5992  * FIXME, needs more benchmarking...there are no reasons other than performance
5993  * to keep or drop this code.
5994  */
5995 static int btrfs_dirty_inode(struct inode *inode)
5996 {
5997 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5998 	struct btrfs_root *root = BTRFS_I(inode)->root;
5999 	struct btrfs_trans_handle *trans;
6000 	int ret;
6001 
6002 	if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
6003 		return 0;
6004 
6005 	trans = btrfs_join_transaction(root);
6006 	if (IS_ERR(trans))
6007 		return PTR_ERR(trans);
6008 
6009 	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
6010 	if (ret && (ret == -ENOSPC || ret == -EDQUOT)) {
6011 		/* whoops, lets try again with the full transaction */
6012 		btrfs_end_transaction(trans);
6013 		trans = btrfs_start_transaction(root, 1);
6014 		if (IS_ERR(trans))
6015 			return PTR_ERR(trans);
6016 
6017 		ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
6018 	}
6019 	btrfs_end_transaction(trans);
6020 	if (BTRFS_I(inode)->delayed_node)
6021 		btrfs_balance_delayed_items(fs_info);
6022 
6023 	return ret;
6024 }
6025 
6026 /*
6027  * This is a copy of file_update_time.  We need this so we can return error on
6028  * ENOSPC for updating the inode in the case of file write and mmap writes.
6029  */
6030 static int btrfs_update_time(struct inode *inode, struct timespec64 *now,
6031 			     int flags)
6032 {
6033 	struct btrfs_root *root = BTRFS_I(inode)->root;
6034 	bool dirty = flags & ~S_VERSION;
6035 
6036 	if (btrfs_root_readonly(root))
6037 		return -EROFS;
6038 
6039 	if (flags & S_VERSION)
6040 		dirty |= inode_maybe_inc_iversion(inode, dirty);
6041 	if (flags & S_CTIME)
6042 		inode->i_ctime = *now;
6043 	if (flags & S_MTIME)
6044 		inode->i_mtime = *now;
6045 	if (flags & S_ATIME)
6046 		inode->i_atime = *now;
6047 	return dirty ? btrfs_dirty_inode(inode) : 0;
6048 }
6049 
6050 /*
6051  * find the highest existing sequence number in a directory
6052  * and then set the in-memory index_cnt variable to reflect
6053  * free sequence numbers
6054  */
6055 static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
6056 {
6057 	struct btrfs_root *root = inode->root;
6058 	struct btrfs_key key, found_key;
6059 	struct btrfs_path *path;
6060 	struct extent_buffer *leaf;
6061 	int ret;
6062 
6063 	key.objectid = btrfs_ino(inode);
6064 	key.type = BTRFS_DIR_INDEX_KEY;
6065 	key.offset = (u64)-1;
6066 
6067 	path = btrfs_alloc_path();
6068 	if (!path)
6069 		return -ENOMEM;
6070 
6071 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6072 	if (ret < 0)
6073 		goto out;
6074 	/* FIXME: we should be able to handle this */
6075 	if (ret == 0)
6076 		goto out;
6077 	ret = 0;
6078 
6079 	if (path->slots[0] == 0) {
6080 		inode->index_cnt = BTRFS_DIR_START_INDEX;
6081 		goto out;
6082 	}
6083 
6084 	path->slots[0]--;
6085 
6086 	leaf = path->nodes[0];
6087 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6088 
6089 	if (found_key.objectid != btrfs_ino(inode) ||
6090 	    found_key.type != BTRFS_DIR_INDEX_KEY) {
6091 		inode->index_cnt = BTRFS_DIR_START_INDEX;
6092 		goto out;
6093 	}
6094 
6095 	inode->index_cnt = found_key.offset + 1;
6096 out:
6097 	btrfs_free_path(path);
6098 	return ret;
6099 }
6100 
6101 /*
6102  * helper to find a free sequence number in a given directory.  This current
6103  * code is very simple, later versions will do smarter things in the btree
6104  */
6105 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
6106 {
6107 	int ret = 0;
6108 
6109 	if (dir->index_cnt == (u64)-1) {
6110 		ret = btrfs_inode_delayed_dir_index_count(dir);
6111 		if (ret) {
6112 			ret = btrfs_set_inode_index_count(dir);
6113 			if (ret)
6114 				return ret;
6115 		}
6116 	}
6117 
6118 	*index = dir->index_cnt;
6119 	dir->index_cnt++;
6120 
6121 	return ret;
6122 }
6123 
6124 static int btrfs_insert_inode_locked(struct inode *inode)
6125 {
6126 	struct btrfs_iget_args args;
6127 
6128 	args.ino = BTRFS_I(inode)->location.objectid;
6129 	args.root = BTRFS_I(inode)->root;
6130 
6131 	return insert_inode_locked4(inode,
6132 		   btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6133 		   btrfs_find_actor, &args);
6134 }
6135 
6136 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
6137 			    unsigned int *trans_num_items)
6138 {
6139 	struct inode *dir = args->dir;
6140 	struct inode *inode = args->inode;
6141 	int ret;
6142 
6143 	ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl);
6144 	if (ret)
6145 		return ret;
6146 
6147 	/* 1 to add inode item */
6148 	*trans_num_items = 1;
6149 	/* 1 to add compression property */
6150 	if (BTRFS_I(dir)->prop_compress)
6151 		(*trans_num_items)++;
6152 	/* 1 to add default ACL xattr */
6153 	if (args->default_acl)
6154 		(*trans_num_items)++;
6155 	/* 1 to add access ACL xattr */
6156 	if (args->acl)
6157 		(*trans_num_items)++;
6158 #ifdef CONFIG_SECURITY
6159 	/* 1 to add LSM xattr */
6160 	if (dir->i_security)
6161 		(*trans_num_items)++;
6162 #endif
6163 	if (args->orphan) {
6164 		/* 1 to add orphan item */
6165 		(*trans_num_items)++;
6166 	} else {
6167 		/*
6168 		 * 1 to add dir item
6169 		 * 1 to add dir index
6170 		 * 1 to update parent inode item
6171 		 *
6172 		 * No need for 1 unit for the inode ref item because it is
6173 		 * inserted in a batch together with the inode item at
6174 		 * btrfs_create_new_inode().
6175 		 */
6176 		*trans_num_items += 3;
6177 	}
6178 	return 0;
6179 }
6180 
6181 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args)
6182 {
6183 	posix_acl_release(args->acl);
6184 	posix_acl_release(args->default_acl);
6185 }
6186 
6187 /*
6188  * Inherit flags from the parent inode.
6189  *
6190  * Currently only the compression flags and the cow flags are inherited.
6191  */
6192 static void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
6193 {
6194 	unsigned int flags;
6195 
6196 	flags = BTRFS_I(dir)->flags;
6197 
6198 	if (flags & BTRFS_INODE_NOCOMPRESS) {
6199 		BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
6200 		BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
6201 	} else if (flags & BTRFS_INODE_COMPRESS) {
6202 		BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
6203 		BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
6204 	}
6205 
6206 	if (flags & BTRFS_INODE_NODATACOW) {
6207 		BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
6208 		if (S_ISREG(inode->i_mode))
6209 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6210 	}
6211 
6212 	btrfs_sync_inode_flags_to_i_flags(inode);
6213 }
6214 
6215 int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
6216 			   struct btrfs_new_inode_args *args)
6217 {
6218 	struct inode *dir = args->dir;
6219 	struct inode *inode = args->inode;
6220 	const char *name = args->orphan ? NULL : args->dentry->d_name.name;
6221 	int name_len = args->orphan ? 0 : args->dentry->d_name.len;
6222 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6223 	struct btrfs_root *root;
6224 	struct btrfs_inode_item *inode_item;
6225 	struct btrfs_key *location;
6226 	struct btrfs_path *path;
6227 	u64 objectid;
6228 	struct btrfs_inode_ref *ref;
6229 	struct btrfs_key key[2];
6230 	u32 sizes[2];
6231 	struct btrfs_item_batch batch;
6232 	unsigned long ptr;
6233 	int ret;
6234 
6235 	path = btrfs_alloc_path();
6236 	if (!path)
6237 		return -ENOMEM;
6238 
6239 	if (!args->subvol)
6240 		BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root);
6241 	root = BTRFS_I(inode)->root;
6242 
6243 	ret = btrfs_get_free_objectid(root, &objectid);
6244 	if (ret)
6245 		goto out;
6246 	inode->i_ino = objectid;
6247 
6248 	if (args->orphan) {
6249 		/*
6250 		 * O_TMPFILE, set link count to 0, so that after this point, we
6251 		 * fill in an inode item with the correct link count.
6252 		 */
6253 		set_nlink(inode, 0);
6254 	} else {
6255 		trace_btrfs_inode_request(dir);
6256 
6257 		ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index);
6258 		if (ret)
6259 			goto out;
6260 	}
6261 	/* index_cnt is ignored for everything but a dir. */
6262 	BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX;
6263 	BTRFS_I(inode)->generation = trans->transid;
6264 	inode->i_generation = BTRFS_I(inode)->generation;
6265 
6266 	/*
6267 	 * Subvolumes don't inherit flags from their parent directory.
6268 	 * Originally this was probably by accident, but we probably can't
6269 	 * change it now without compatibility issues.
6270 	 */
6271 	if (!args->subvol)
6272 		btrfs_inherit_iflags(inode, dir);
6273 
6274 	if (S_ISREG(inode->i_mode)) {
6275 		if (btrfs_test_opt(fs_info, NODATASUM))
6276 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6277 		if (btrfs_test_opt(fs_info, NODATACOW))
6278 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6279 				BTRFS_INODE_NODATASUM;
6280 	}
6281 
6282 	location = &BTRFS_I(inode)->location;
6283 	location->objectid = objectid;
6284 	location->offset = 0;
6285 	location->type = BTRFS_INODE_ITEM_KEY;
6286 
6287 	ret = btrfs_insert_inode_locked(inode);
6288 	if (ret < 0) {
6289 		if (!args->orphan)
6290 			BTRFS_I(dir)->index_cnt--;
6291 		goto out;
6292 	}
6293 
6294 	/*
6295 	 * We could have gotten an inode number from somebody who was fsynced
6296 	 * and then removed in this same transaction, so let's just set full
6297 	 * sync since it will be a full sync anyway and this will blow away the
6298 	 * old info in the log.
6299 	 */
6300 	btrfs_set_inode_full_sync(BTRFS_I(inode));
6301 
6302 	key[0].objectid = objectid;
6303 	key[0].type = BTRFS_INODE_ITEM_KEY;
6304 	key[0].offset = 0;
6305 
6306 	sizes[0] = sizeof(struct btrfs_inode_item);
6307 
6308 	if (!args->orphan) {
6309 		/*
6310 		 * Start new inodes with an inode_ref. This is slightly more
6311 		 * efficient for small numbers of hard links since they will
6312 		 * be packed into one item. Extended refs will kick in if we
6313 		 * add more hard links than can fit in the ref item.
6314 		 */
6315 		key[1].objectid = objectid;
6316 		key[1].type = BTRFS_INODE_REF_KEY;
6317 		if (args->subvol) {
6318 			key[1].offset = objectid;
6319 			sizes[1] = 2 + sizeof(*ref);
6320 		} else {
6321 			key[1].offset = btrfs_ino(BTRFS_I(dir));
6322 			sizes[1] = name_len + sizeof(*ref);
6323 		}
6324 	}
6325 
6326 	batch.keys = &key[0];
6327 	batch.data_sizes = &sizes[0];
6328 	batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]);
6329 	batch.nr = args->orphan ? 1 : 2;
6330 	ret = btrfs_insert_empty_items(trans, root, path, &batch);
6331 	if (ret != 0) {
6332 		btrfs_abort_transaction(trans, ret);
6333 		goto discard;
6334 	}
6335 
6336 	inode->i_mtime = current_time(inode);
6337 	inode->i_atime = inode->i_mtime;
6338 	inode->i_ctime = inode->i_mtime;
6339 	BTRFS_I(inode)->i_otime = inode->i_mtime;
6340 
6341 	/*
6342 	 * We're going to fill the inode item now, so at this point the inode
6343 	 * must be fully initialized.
6344 	 */
6345 
6346 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6347 				  struct btrfs_inode_item);
6348 	memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
6349 			     sizeof(*inode_item));
6350 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
6351 
6352 	if (!args->orphan) {
6353 		ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6354 				     struct btrfs_inode_ref);
6355 		ptr = (unsigned long)(ref + 1);
6356 		if (args->subvol) {
6357 			btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2);
6358 			btrfs_set_inode_ref_index(path->nodes[0], ref, 0);
6359 			write_extent_buffer(path->nodes[0], "..", ptr, 2);
6360 		} else {
6361 			btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
6362 			btrfs_set_inode_ref_index(path->nodes[0], ref,
6363 						  BTRFS_I(inode)->dir_index);
6364 			write_extent_buffer(path->nodes[0], name, ptr, name_len);
6365 		}
6366 	}
6367 
6368 	btrfs_mark_buffer_dirty(path->nodes[0]);
6369 	btrfs_release_path(path);
6370 
6371 	if (args->subvol) {
6372 		struct inode *parent;
6373 
6374 		/*
6375 		 * Subvolumes inherit properties from their parent subvolume,
6376 		 * not the directory they were created in.
6377 		 */
6378 		parent = btrfs_iget(fs_info->sb, BTRFS_FIRST_FREE_OBJECTID,
6379 				    BTRFS_I(dir)->root);
6380 		if (IS_ERR(parent)) {
6381 			ret = PTR_ERR(parent);
6382 		} else {
6383 			ret = btrfs_inode_inherit_props(trans, inode, parent);
6384 			iput(parent);
6385 		}
6386 	} else {
6387 		ret = btrfs_inode_inherit_props(trans, inode, dir);
6388 	}
6389 	if (ret) {
6390 		btrfs_err(fs_info,
6391 			  "error inheriting props for ino %llu (root %llu): %d",
6392 			  btrfs_ino(BTRFS_I(inode)), root->root_key.objectid,
6393 			  ret);
6394 	}
6395 
6396 	/*
6397 	 * Subvolumes don't inherit ACLs or get passed to the LSM. This is
6398 	 * probably a bug.
6399 	 */
6400 	if (!args->subvol) {
6401 		ret = btrfs_init_inode_security(trans, args);
6402 		if (ret) {
6403 			btrfs_abort_transaction(trans, ret);
6404 			goto discard;
6405 		}
6406 	}
6407 
6408 	inode_tree_add(inode);
6409 
6410 	trace_btrfs_inode_new(inode);
6411 	btrfs_set_inode_last_trans(trans, BTRFS_I(inode));
6412 
6413 	btrfs_update_root_times(trans, root);
6414 
6415 	if (args->orphan) {
6416 		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
6417 	} else {
6418 		ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
6419 				     name_len, 0, BTRFS_I(inode)->dir_index);
6420 	}
6421 	if (ret) {
6422 		btrfs_abort_transaction(trans, ret);
6423 		goto discard;
6424 	}
6425 
6426 	ret = 0;
6427 	goto out;
6428 
6429 discard:
6430 	/*
6431 	 * discard_new_inode() calls iput(), but the caller owns the reference
6432 	 * to the inode.
6433 	 */
6434 	ihold(inode);
6435 	discard_new_inode(inode);
6436 out:
6437 	btrfs_free_path(path);
6438 	return ret;
6439 }
6440 
6441 /*
6442  * utility function to add 'inode' into 'parent_inode' with
6443  * a give name and a given sequence number.
6444  * if 'add_backref' is true, also insert a backref from the
6445  * inode to the parent directory.
6446  */
6447 int btrfs_add_link(struct btrfs_trans_handle *trans,
6448 		   struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
6449 		   const char *name, int name_len, int add_backref, u64 index)
6450 {
6451 	int ret = 0;
6452 	struct btrfs_key key;
6453 	struct btrfs_root *root = parent_inode->root;
6454 	u64 ino = btrfs_ino(inode);
6455 	u64 parent_ino = btrfs_ino(parent_inode);
6456 
6457 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6458 		memcpy(&key, &inode->root->root_key, sizeof(key));
6459 	} else {
6460 		key.objectid = ino;
6461 		key.type = BTRFS_INODE_ITEM_KEY;
6462 		key.offset = 0;
6463 	}
6464 
6465 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6466 		ret = btrfs_add_root_ref(trans, key.objectid,
6467 					 root->root_key.objectid, parent_ino,
6468 					 index, name, name_len);
6469 	} else if (add_backref) {
6470 		ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
6471 					     parent_ino, index);
6472 	}
6473 
6474 	/* Nothing to clean up yet */
6475 	if (ret)
6476 		return ret;
6477 
6478 	ret = btrfs_insert_dir_item(trans, name, name_len, parent_inode, &key,
6479 				    btrfs_inode_type(&inode->vfs_inode), index);
6480 	if (ret == -EEXIST || ret == -EOVERFLOW)
6481 		goto fail_dir_item;
6482 	else if (ret) {
6483 		btrfs_abort_transaction(trans, ret);
6484 		return ret;
6485 	}
6486 
6487 	btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
6488 			   name_len * 2);
6489 	inode_inc_iversion(&parent_inode->vfs_inode);
6490 	/*
6491 	 * If we are replaying a log tree, we do not want to update the mtime
6492 	 * and ctime of the parent directory with the current time, since the
6493 	 * log replay procedure is responsible for setting them to their correct
6494 	 * values (the ones it had when the fsync was done).
6495 	 */
6496 	if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
6497 		struct timespec64 now = current_time(&parent_inode->vfs_inode);
6498 
6499 		parent_inode->vfs_inode.i_mtime = now;
6500 		parent_inode->vfs_inode.i_ctime = now;
6501 	}
6502 	ret = btrfs_update_inode(trans, root, parent_inode);
6503 	if (ret)
6504 		btrfs_abort_transaction(trans, ret);
6505 	return ret;
6506 
6507 fail_dir_item:
6508 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6509 		u64 local_index;
6510 		int err;
6511 		err = btrfs_del_root_ref(trans, key.objectid,
6512 					 root->root_key.objectid, parent_ino,
6513 					 &local_index, name, name_len);
6514 		if (err)
6515 			btrfs_abort_transaction(trans, err);
6516 	} else if (add_backref) {
6517 		u64 local_index;
6518 		int err;
6519 
6520 		err = btrfs_del_inode_ref(trans, root, name, name_len,
6521 					  ino, parent_ino, &local_index);
6522 		if (err)
6523 			btrfs_abort_transaction(trans, err);
6524 	}
6525 
6526 	/* Return the original error code */
6527 	return ret;
6528 }
6529 
6530 static int btrfs_create_common(struct inode *dir, struct dentry *dentry,
6531 			       struct inode *inode)
6532 {
6533 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6534 	struct btrfs_root *root = BTRFS_I(dir)->root;
6535 	struct btrfs_new_inode_args new_inode_args = {
6536 		.dir = dir,
6537 		.dentry = dentry,
6538 		.inode = inode,
6539 	};
6540 	unsigned int trans_num_items;
6541 	struct btrfs_trans_handle *trans;
6542 	int err;
6543 
6544 	err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
6545 	if (err)
6546 		goto out_inode;
6547 
6548 	trans = btrfs_start_transaction(root, trans_num_items);
6549 	if (IS_ERR(trans)) {
6550 		err = PTR_ERR(trans);
6551 		goto out_new_inode_args;
6552 	}
6553 
6554 	err = btrfs_create_new_inode(trans, &new_inode_args);
6555 	if (!err)
6556 		d_instantiate_new(dentry, inode);
6557 
6558 	btrfs_end_transaction(trans);
6559 	btrfs_btree_balance_dirty(fs_info);
6560 out_new_inode_args:
6561 	btrfs_new_inode_args_destroy(&new_inode_args);
6562 out_inode:
6563 	if (err)
6564 		iput(inode);
6565 	return err;
6566 }
6567 
6568 static int btrfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
6569 		       struct dentry *dentry, umode_t mode, dev_t rdev)
6570 {
6571 	struct inode *inode;
6572 
6573 	inode = new_inode(dir->i_sb);
6574 	if (!inode)
6575 		return -ENOMEM;
6576 	inode_init_owner(mnt_userns, inode, dir, mode);
6577 	inode->i_op = &btrfs_special_inode_operations;
6578 	init_special_inode(inode, inode->i_mode, rdev);
6579 	return btrfs_create_common(dir, dentry, inode);
6580 }
6581 
6582 static int btrfs_create(struct user_namespace *mnt_userns, struct inode *dir,
6583 			struct dentry *dentry, umode_t mode, bool excl)
6584 {
6585 	struct inode *inode;
6586 
6587 	inode = new_inode(dir->i_sb);
6588 	if (!inode)
6589 		return -ENOMEM;
6590 	inode_init_owner(mnt_userns, inode, dir, mode);
6591 	inode->i_fop = &btrfs_file_operations;
6592 	inode->i_op = &btrfs_file_inode_operations;
6593 	inode->i_mapping->a_ops = &btrfs_aops;
6594 	return btrfs_create_common(dir, dentry, inode);
6595 }
6596 
6597 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6598 		      struct dentry *dentry)
6599 {
6600 	struct btrfs_trans_handle *trans = NULL;
6601 	struct btrfs_root *root = BTRFS_I(dir)->root;
6602 	struct inode *inode = d_inode(old_dentry);
6603 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6604 	u64 index;
6605 	int err;
6606 	int drop_inode = 0;
6607 
6608 	/* do not allow sys_link's with other subvols of the same device */
6609 	if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid)
6610 		return -EXDEV;
6611 
6612 	if (inode->i_nlink >= BTRFS_LINK_MAX)
6613 		return -EMLINK;
6614 
6615 	err = btrfs_set_inode_index(BTRFS_I(dir), &index);
6616 	if (err)
6617 		goto fail;
6618 
6619 	/*
6620 	 * 2 items for inode and inode ref
6621 	 * 2 items for dir items
6622 	 * 1 item for parent inode
6623 	 * 1 item for orphan item deletion if O_TMPFILE
6624 	 */
6625 	trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6);
6626 	if (IS_ERR(trans)) {
6627 		err = PTR_ERR(trans);
6628 		trans = NULL;
6629 		goto fail;
6630 	}
6631 
6632 	/* There are several dir indexes for this inode, clear the cache. */
6633 	BTRFS_I(inode)->dir_index = 0ULL;
6634 	inc_nlink(inode);
6635 	inode_inc_iversion(inode);
6636 	inode->i_ctime = current_time(inode);
6637 	ihold(inode);
6638 	set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6639 
6640 	err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
6641 			     dentry->d_name.name, dentry->d_name.len, 1, index);
6642 
6643 	if (err) {
6644 		drop_inode = 1;
6645 	} else {
6646 		struct dentry *parent = dentry->d_parent;
6647 
6648 		err = btrfs_update_inode(trans, root, BTRFS_I(inode));
6649 		if (err)
6650 			goto fail;
6651 		if (inode->i_nlink == 1) {
6652 			/*
6653 			 * If new hard link count is 1, it's a file created
6654 			 * with open(2) O_TMPFILE flag.
6655 			 */
6656 			err = btrfs_orphan_del(trans, BTRFS_I(inode));
6657 			if (err)
6658 				goto fail;
6659 		}
6660 		d_instantiate(dentry, inode);
6661 		btrfs_log_new_name(trans, old_dentry, NULL, 0, parent);
6662 	}
6663 
6664 fail:
6665 	if (trans)
6666 		btrfs_end_transaction(trans);
6667 	if (drop_inode) {
6668 		inode_dec_link_count(inode);
6669 		iput(inode);
6670 	}
6671 	btrfs_btree_balance_dirty(fs_info);
6672 	return err;
6673 }
6674 
6675 static int btrfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
6676 		       struct dentry *dentry, umode_t mode)
6677 {
6678 	struct inode *inode;
6679 
6680 	inode = new_inode(dir->i_sb);
6681 	if (!inode)
6682 		return -ENOMEM;
6683 	inode_init_owner(mnt_userns, inode, dir, S_IFDIR | mode);
6684 	inode->i_op = &btrfs_dir_inode_operations;
6685 	inode->i_fop = &btrfs_dir_file_operations;
6686 	return btrfs_create_common(dir, dentry, inode);
6687 }
6688 
6689 static noinline int uncompress_inline(struct btrfs_path *path,
6690 				      struct page *page,
6691 				      size_t pg_offset, u64 extent_offset,
6692 				      struct btrfs_file_extent_item *item)
6693 {
6694 	int ret;
6695 	struct extent_buffer *leaf = path->nodes[0];
6696 	char *tmp;
6697 	size_t max_size;
6698 	unsigned long inline_size;
6699 	unsigned long ptr;
6700 	int compress_type;
6701 
6702 	WARN_ON(pg_offset != 0);
6703 	compress_type = btrfs_file_extent_compression(leaf, item);
6704 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
6705 	inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]);
6706 	tmp = kmalloc(inline_size, GFP_NOFS);
6707 	if (!tmp)
6708 		return -ENOMEM;
6709 	ptr = btrfs_file_extent_inline_start(item);
6710 
6711 	read_extent_buffer(leaf, tmp, ptr, inline_size);
6712 
6713 	max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6714 	ret = btrfs_decompress(compress_type, tmp, page,
6715 			       extent_offset, inline_size, max_size);
6716 
6717 	/*
6718 	 * decompression code contains a memset to fill in any space between the end
6719 	 * of the uncompressed data and the end of max_size in case the decompressed
6720 	 * data ends up shorter than ram_bytes.  That doesn't cover the hole between
6721 	 * the end of an inline extent and the beginning of the next block, so we
6722 	 * cover that region here.
6723 	 */
6724 
6725 	if (max_size + pg_offset < PAGE_SIZE)
6726 		memzero_page(page,  pg_offset + max_size,
6727 			     PAGE_SIZE - max_size - pg_offset);
6728 	kfree(tmp);
6729 	return ret;
6730 }
6731 
6732 /**
6733  * btrfs_get_extent - Lookup the first extent overlapping a range in a file.
6734  * @inode:	file to search in
6735  * @page:	page to read extent data into if the extent is inline
6736  * @pg_offset:	offset into @page to copy to
6737  * @start:	file offset
6738  * @len:	length of range starting at @start
6739  *
6740  * This returns the first &struct extent_map which overlaps with the given
6741  * range, reading it from the B-tree and caching it if necessary. Note that
6742  * there may be more extents which overlap the given range after the returned
6743  * extent_map.
6744  *
6745  * If @page is not NULL and the extent is inline, this also reads the extent
6746  * data directly into the page and marks the extent up to date in the io_tree.
6747  *
6748  * Return: ERR_PTR on error, non-NULL extent_map on success.
6749  */
6750 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
6751 				    struct page *page, size_t pg_offset,
6752 				    u64 start, u64 len)
6753 {
6754 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
6755 	int ret = 0;
6756 	u64 extent_start = 0;
6757 	u64 extent_end = 0;
6758 	u64 objectid = btrfs_ino(inode);
6759 	int extent_type = -1;
6760 	struct btrfs_path *path = NULL;
6761 	struct btrfs_root *root = inode->root;
6762 	struct btrfs_file_extent_item *item;
6763 	struct extent_buffer *leaf;
6764 	struct btrfs_key found_key;
6765 	struct extent_map *em = NULL;
6766 	struct extent_map_tree *em_tree = &inode->extent_tree;
6767 	struct extent_io_tree *io_tree = &inode->io_tree;
6768 
6769 	read_lock(&em_tree->lock);
6770 	em = lookup_extent_mapping(em_tree, start, len);
6771 	read_unlock(&em_tree->lock);
6772 
6773 	if (em) {
6774 		if (em->start > start || em->start + em->len <= start)
6775 			free_extent_map(em);
6776 		else if (em->block_start == EXTENT_MAP_INLINE && page)
6777 			free_extent_map(em);
6778 		else
6779 			goto out;
6780 	}
6781 	em = alloc_extent_map();
6782 	if (!em) {
6783 		ret = -ENOMEM;
6784 		goto out;
6785 	}
6786 	em->start = EXTENT_MAP_HOLE;
6787 	em->orig_start = EXTENT_MAP_HOLE;
6788 	em->len = (u64)-1;
6789 	em->block_len = (u64)-1;
6790 
6791 	path = btrfs_alloc_path();
6792 	if (!path) {
6793 		ret = -ENOMEM;
6794 		goto out;
6795 	}
6796 
6797 	/* Chances are we'll be called again, so go ahead and do readahead */
6798 	path->reada = READA_FORWARD;
6799 
6800 	/*
6801 	 * The same explanation in load_free_space_cache applies here as well,
6802 	 * we only read when we're loading the free space cache, and at that
6803 	 * point the commit_root has everything we need.
6804 	 */
6805 	if (btrfs_is_free_space_inode(inode)) {
6806 		path->search_commit_root = 1;
6807 		path->skip_locking = 1;
6808 	}
6809 
6810 	ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
6811 	if (ret < 0) {
6812 		goto out;
6813 	} else if (ret > 0) {
6814 		if (path->slots[0] == 0)
6815 			goto not_found;
6816 		path->slots[0]--;
6817 		ret = 0;
6818 	}
6819 
6820 	leaf = path->nodes[0];
6821 	item = btrfs_item_ptr(leaf, path->slots[0],
6822 			      struct btrfs_file_extent_item);
6823 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6824 	if (found_key.objectid != objectid ||
6825 	    found_key.type != BTRFS_EXTENT_DATA_KEY) {
6826 		/*
6827 		 * If we backup past the first extent we want to move forward
6828 		 * and see if there is an extent in front of us, otherwise we'll
6829 		 * say there is a hole for our whole search range which can
6830 		 * cause problems.
6831 		 */
6832 		extent_end = start;
6833 		goto next;
6834 	}
6835 
6836 	extent_type = btrfs_file_extent_type(leaf, item);
6837 	extent_start = found_key.offset;
6838 	extent_end = btrfs_file_extent_end(path);
6839 	if (extent_type == BTRFS_FILE_EXTENT_REG ||
6840 	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
6841 		/* Only regular file could have regular/prealloc extent */
6842 		if (!S_ISREG(inode->vfs_inode.i_mode)) {
6843 			ret = -EUCLEAN;
6844 			btrfs_crit(fs_info,
6845 		"regular/prealloc extent found for non-regular inode %llu",
6846 				   btrfs_ino(inode));
6847 			goto out;
6848 		}
6849 		trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
6850 						       extent_start);
6851 	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
6852 		trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
6853 						      path->slots[0],
6854 						      extent_start);
6855 	}
6856 next:
6857 	if (start >= extent_end) {
6858 		path->slots[0]++;
6859 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6860 			ret = btrfs_next_leaf(root, path);
6861 			if (ret < 0)
6862 				goto out;
6863 			else if (ret > 0)
6864 				goto not_found;
6865 
6866 			leaf = path->nodes[0];
6867 		}
6868 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6869 		if (found_key.objectid != objectid ||
6870 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
6871 			goto not_found;
6872 		if (start + len <= found_key.offset)
6873 			goto not_found;
6874 		if (start > found_key.offset)
6875 			goto next;
6876 
6877 		/* New extent overlaps with existing one */
6878 		em->start = start;
6879 		em->orig_start = start;
6880 		em->len = found_key.offset - start;
6881 		em->block_start = EXTENT_MAP_HOLE;
6882 		goto insert;
6883 	}
6884 
6885 	btrfs_extent_item_to_extent_map(inode, path, item, !page, em);
6886 
6887 	if (extent_type == BTRFS_FILE_EXTENT_REG ||
6888 	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
6889 		goto insert;
6890 	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
6891 		unsigned long ptr;
6892 		char *map;
6893 		size_t size;
6894 		size_t extent_offset;
6895 		size_t copy_size;
6896 
6897 		if (!page)
6898 			goto out;
6899 
6900 		size = btrfs_file_extent_ram_bytes(leaf, item);
6901 		extent_offset = page_offset(page) + pg_offset - extent_start;
6902 		copy_size = min_t(u64, PAGE_SIZE - pg_offset,
6903 				  size - extent_offset);
6904 		em->start = extent_start + extent_offset;
6905 		em->len = ALIGN(copy_size, fs_info->sectorsize);
6906 		em->orig_block_len = em->len;
6907 		em->orig_start = em->start;
6908 		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
6909 
6910 		if (!PageUptodate(page)) {
6911 			if (btrfs_file_extent_compression(leaf, item) !=
6912 			    BTRFS_COMPRESS_NONE) {
6913 				ret = uncompress_inline(path, page, pg_offset,
6914 							extent_offset, item);
6915 				if (ret)
6916 					goto out;
6917 			} else {
6918 				map = kmap_local_page(page);
6919 				read_extent_buffer(leaf, map + pg_offset, ptr,
6920 						   copy_size);
6921 				if (pg_offset + copy_size < PAGE_SIZE) {
6922 					memset(map + pg_offset + copy_size, 0,
6923 					       PAGE_SIZE - pg_offset -
6924 					       copy_size);
6925 				}
6926 				kunmap_local(map);
6927 			}
6928 			flush_dcache_page(page);
6929 		}
6930 		set_extent_uptodate(io_tree, em->start,
6931 				    extent_map_end(em) - 1, NULL, GFP_NOFS);
6932 		goto insert;
6933 	}
6934 not_found:
6935 	em->start = start;
6936 	em->orig_start = start;
6937 	em->len = len;
6938 	em->block_start = EXTENT_MAP_HOLE;
6939 insert:
6940 	ret = 0;
6941 	btrfs_release_path(path);
6942 	if (em->start > start || extent_map_end(em) <= start) {
6943 		btrfs_err(fs_info,
6944 			  "bad extent! em: [%llu %llu] passed [%llu %llu]",
6945 			  em->start, em->len, start, len);
6946 		ret = -EIO;
6947 		goto out;
6948 	}
6949 
6950 	write_lock(&em_tree->lock);
6951 	ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
6952 	write_unlock(&em_tree->lock);
6953 out:
6954 	btrfs_free_path(path);
6955 
6956 	trace_btrfs_get_extent(root, inode, em);
6957 
6958 	if (ret) {
6959 		free_extent_map(em);
6960 		return ERR_PTR(ret);
6961 	}
6962 	return em;
6963 }
6964 
6965 struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
6966 					   u64 start, u64 len)
6967 {
6968 	struct extent_map *em;
6969 	struct extent_map *hole_em = NULL;
6970 	u64 delalloc_start = start;
6971 	u64 end;
6972 	u64 delalloc_len;
6973 	u64 delalloc_end;
6974 	int err = 0;
6975 
6976 	em = btrfs_get_extent(inode, NULL, 0, start, len);
6977 	if (IS_ERR(em))
6978 		return em;
6979 	/*
6980 	 * If our em maps to:
6981 	 * - a hole or
6982 	 * - a pre-alloc extent,
6983 	 * there might actually be delalloc bytes behind it.
6984 	 */
6985 	if (em->block_start != EXTENT_MAP_HOLE &&
6986 	    !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6987 		return em;
6988 	else
6989 		hole_em = em;
6990 
6991 	/* check to see if we've wrapped (len == -1 or similar) */
6992 	end = start + len;
6993 	if (end < start)
6994 		end = (u64)-1;
6995 	else
6996 		end -= 1;
6997 
6998 	em = NULL;
6999 
7000 	/* ok, we didn't find anything, lets look for delalloc */
7001 	delalloc_len = count_range_bits(&inode->io_tree, &delalloc_start,
7002 				 end, len, EXTENT_DELALLOC, 1);
7003 	delalloc_end = delalloc_start + delalloc_len;
7004 	if (delalloc_end < delalloc_start)
7005 		delalloc_end = (u64)-1;
7006 
7007 	/*
7008 	 * We didn't find anything useful, return the original results from
7009 	 * get_extent()
7010 	 */
7011 	if (delalloc_start > end || delalloc_end <= start) {
7012 		em = hole_em;
7013 		hole_em = NULL;
7014 		goto out;
7015 	}
7016 
7017 	/*
7018 	 * Adjust the delalloc_start to make sure it doesn't go backwards from
7019 	 * the start they passed in
7020 	 */
7021 	delalloc_start = max(start, delalloc_start);
7022 	delalloc_len = delalloc_end - delalloc_start;
7023 
7024 	if (delalloc_len > 0) {
7025 		u64 hole_start;
7026 		u64 hole_len;
7027 		const u64 hole_end = extent_map_end(hole_em);
7028 
7029 		em = alloc_extent_map();
7030 		if (!em) {
7031 			err = -ENOMEM;
7032 			goto out;
7033 		}
7034 
7035 		ASSERT(hole_em);
7036 		/*
7037 		 * When btrfs_get_extent can't find anything it returns one
7038 		 * huge hole
7039 		 *
7040 		 * Make sure what it found really fits our range, and adjust to
7041 		 * make sure it is based on the start from the caller
7042 		 */
7043 		if (hole_end <= start || hole_em->start > end) {
7044 		       free_extent_map(hole_em);
7045 		       hole_em = NULL;
7046 		} else {
7047 		       hole_start = max(hole_em->start, start);
7048 		       hole_len = hole_end - hole_start;
7049 		}
7050 
7051 		if (hole_em && delalloc_start > hole_start) {
7052 			/*
7053 			 * Our hole starts before our delalloc, so we have to
7054 			 * return just the parts of the hole that go until the
7055 			 * delalloc starts
7056 			 */
7057 			em->len = min(hole_len, delalloc_start - hole_start);
7058 			em->start = hole_start;
7059 			em->orig_start = hole_start;
7060 			/*
7061 			 * Don't adjust block start at all, it is fixed at
7062 			 * EXTENT_MAP_HOLE
7063 			 */
7064 			em->block_start = hole_em->block_start;
7065 			em->block_len = hole_len;
7066 			if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
7067 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
7068 		} else {
7069 			/*
7070 			 * Hole is out of passed range or it starts after
7071 			 * delalloc range
7072 			 */
7073 			em->start = delalloc_start;
7074 			em->len = delalloc_len;
7075 			em->orig_start = delalloc_start;
7076 			em->block_start = EXTENT_MAP_DELALLOC;
7077 			em->block_len = delalloc_len;
7078 		}
7079 	} else {
7080 		return hole_em;
7081 	}
7082 out:
7083 
7084 	free_extent_map(hole_em);
7085 	if (err) {
7086 		free_extent_map(em);
7087 		return ERR_PTR(err);
7088 	}
7089 	return em;
7090 }
7091 
7092 static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode,
7093 						  const u64 start,
7094 						  const u64 len,
7095 						  const u64 orig_start,
7096 						  const u64 block_start,
7097 						  const u64 block_len,
7098 						  const u64 orig_block_len,
7099 						  const u64 ram_bytes,
7100 						  const int type)
7101 {
7102 	struct extent_map *em = NULL;
7103 	int ret;
7104 
7105 	if (type != BTRFS_ORDERED_NOCOW) {
7106 		em = create_io_em(inode, start, len, orig_start, block_start,
7107 				  block_len, orig_block_len, ram_bytes,
7108 				  BTRFS_COMPRESS_NONE, /* compress_type */
7109 				  type);
7110 		if (IS_ERR(em))
7111 			goto out;
7112 	}
7113 	ret = btrfs_add_ordered_extent(inode, start, len, len, block_start,
7114 				       block_len, 0,
7115 				       (1 << type) |
7116 				       (1 << BTRFS_ORDERED_DIRECT),
7117 				       BTRFS_COMPRESS_NONE);
7118 	if (ret) {
7119 		if (em) {
7120 			free_extent_map(em);
7121 			btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
7122 		}
7123 		em = ERR_PTR(ret);
7124 	}
7125  out:
7126 
7127 	return em;
7128 }
7129 
7130 static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode,
7131 						  u64 start, u64 len)
7132 {
7133 	struct btrfs_root *root = inode->root;
7134 	struct btrfs_fs_info *fs_info = root->fs_info;
7135 	struct extent_map *em;
7136 	struct btrfs_key ins;
7137 	u64 alloc_hint;
7138 	int ret;
7139 
7140 	alloc_hint = get_extent_allocation_hint(inode, start, len);
7141 	ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
7142 				   0, alloc_hint, &ins, 1, 1);
7143 	if (ret)
7144 		return ERR_PTR(ret);
7145 
7146 	em = btrfs_create_dio_extent(inode, start, ins.offset, start,
7147 				     ins.objectid, ins.offset, ins.offset,
7148 				     ins.offset, BTRFS_ORDERED_REGULAR);
7149 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
7150 	if (IS_ERR(em))
7151 		btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset,
7152 					   1);
7153 
7154 	return em;
7155 }
7156 
7157 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
7158 {
7159 	struct btrfs_block_group *block_group;
7160 	bool readonly = false;
7161 
7162 	block_group = btrfs_lookup_block_group(fs_info, bytenr);
7163 	if (!block_group || block_group->ro)
7164 		readonly = true;
7165 	if (block_group)
7166 		btrfs_put_block_group(block_group);
7167 	return readonly;
7168 }
7169 
7170 /*
7171  * Check if we can do nocow write into the range [@offset, @offset + @len)
7172  *
7173  * @offset:	File offset
7174  * @len:	The length to write, will be updated to the nocow writeable
7175  *		range
7176  * @orig_start:	(optional) Return the original file offset of the file extent
7177  * @orig_len:	(optional) Return the original on-disk length of the file extent
7178  * @ram_bytes:	(optional) Return the ram_bytes of the file extent
7179  * @strict:	if true, omit optimizations that might force us into unnecessary
7180  *		cow. e.g., don't trust generation number.
7181  *
7182  * Return:
7183  * >0	and update @len if we can do nocow write
7184  *  0	if we can't do nocow write
7185  * <0	if error happened
7186  *
7187  * NOTE: This only checks the file extents, caller is responsible to wait for
7188  *	 any ordered extents.
7189  */
7190 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7191 			      u64 *orig_start, u64 *orig_block_len,
7192 			      u64 *ram_bytes, bool strict)
7193 {
7194 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7195 	struct can_nocow_file_extent_args nocow_args = { 0 };
7196 	struct btrfs_path *path;
7197 	int ret;
7198 	struct extent_buffer *leaf;
7199 	struct btrfs_root *root = BTRFS_I(inode)->root;
7200 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7201 	struct btrfs_file_extent_item *fi;
7202 	struct btrfs_key key;
7203 	int found_type;
7204 
7205 	path = btrfs_alloc_path();
7206 	if (!path)
7207 		return -ENOMEM;
7208 
7209 	ret = btrfs_lookup_file_extent(NULL, root, path,
7210 			btrfs_ino(BTRFS_I(inode)), offset, 0);
7211 	if (ret < 0)
7212 		goto out;
7213 
7214 	if (ret == 1) {
7215 		if (path->slots[0] == 0) {
7216 			/* can't find the item, must cow */
7217 			ret = 0;
7218 			goto out;
7219 		}
7220 		path->slots[0]--;
7221 	}
7222 	ret = 0;
7223 	leaf = path->nodes[0];
7224 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7225 	if (key.objectid != btrfs_ino(BTRFS_I(inode)) ||
7226 	    key.type != BTRFS_EXTENT_DATA_KEY) {
7227 		/* not our file or wrong item type, must cow */
7228 		goto out;
7229 	}
7230 
7231 	if (key.offset > offset) {
7232 		/* Wrong offset, must cow */
7233 		goto out;
7234 	}
7235 
7236 	if (btrfs_file_extent_end(path) <= offset)
7237 		goto out;
7238 
7239 	fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
7240 	found_type = btrfs_file_extent_type(leaf, fi);
7241 	if (ram_bytes)
7242 		*ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7243 
7244 	nocow_args.start = offset;
7245 	nocow_args.end = offset + *len - 1;
7246 	nocow_args.strict = strict;
7247 	nocow_args.free_path = true;
7248 
7249 	ret = can_nocow_file_extent(path, &key, BTRFS_I(inode), &nocow_args);
7250 	/* can_nocow_file_extent() has freed the path. */
7251 	path = NULL;
7252 
7253 	if (ret != 1) {
7254 		/* Treat errors as not being able to NOCOW. */
7255 		ret = 0;
7256 		goto out;
7257 	}
7258 
7259 	ret = 0;
7260 	if (btrfs_extent_readonly(fs_info, nocow_args.disk_bytenr))
7261 		goto out;
7262 
7263 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7264 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7265 		u64 range_end;
7266 
7267 		range_end = round_up(offset + nocow_args.num_bytes,
7268 				     root->fs_info->sectorsize) - 1;
7269 		ret = test_range_bit(io_tree, offset, range_end,
7270 				     EXTENT_DELALLOC, 0, NULL);
7271 		if (ret) {
7272 			ret = -EAGAIN;
7273 			goto out;
7274 		}
7275 	}
7276 
7277 	if (orig_start)
7278 		*orig_start = key.offset - nocow_args.extent_offset;
7279 	if (orig_block_len)
7280 		*orig_block_len = nocow_args.disk_num_bytes;
7281 
7282 	*len = nocow_args.num_bytes;
7283 	ret = 1;
7284 out:
7285 	btrfs_free_path(path);
7286 	return ret;
7287 }
7288 
7289 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7290 			      struct extent_state **cached_state,
7291 			      unsigned int iomap_flags)
7292 {
7293 	const bool writing = (iomap_flags & IOMAP_WRITE);
7294 	const bool nowait = (iomap_flags & IOMAP_NOWAIT);
7295 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7296 	struct btrfs_ordered_extent *ordered;
7297 	int ret = 0;
7298 
7299 	while (1) {
7300 		if (nowait) {
7301 			if (!try_lock_extent(io_tree, lockstart, lockend))
7302 				return -EAGAIN;
7303 		} else {
7304 			lock_extent_bits(io_tree, lockstart, lockend, cached_state);
7305 		}
7306 		/*
7307 		 * We're concerned with the entire range that we're going to be
7308 		 * doing DIO to, so we need to make sure there's no ordered
7309 		 * extents in this range.
7310 		 */
7311 		ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart,
7312 						     lockend - lockstart + 1);
7313 
7314 		/*
7315 		 * We need to make sure there are no buffered pages in this
7316 		 * range either, we could have raced between the invalidate in
7317 		 * generic_file_direct_write and locking the extent.  The
7318 		 * invalidate needs to happen so that reads after a write do not
7319 		 * get stale data.
7320 		 */
7321 		if (!ordered &&
7322 		    (!writing || !filemap_range_has_page(inode->i_mapping,
7323 							 lockstart, lockend)))
7324 			break;
7325 
7326 		unlock_extent_cached(io_tree, lockstart, lockend, cached_state);
7327 
7328 		if (ordered) {
7329 			if (nowait) {
7330 				btrfs_put_ordered_extent(ordered);
7331 				ret = -EAGAIN;
7332 				break;
7333 			}
7334 			/*
7335 			 * If we are doing a DIO read and the ordered extent we
7336 			 * found is for a buffered write, we can not wait for it
7337 			 * to complete and retry, because if we do so we can
7338 			 * deadlock with concurrent buffered writes on page
7339 			 * locks. This happens only if our DIO read covers more
7340 			 * than one extent map, if at this point has already
7341 			 * created an ordered extent for a previous extent map
7342 			 * and locked its range in the inode's io tree, and a
7343 			 * concurrent write against that previous extent map's
7344 			 * range and this range started (we unlock the ranges
7345 			 * in the io tree only when the bios complete and
7346 			 * buffered writes always lock pages before attempting
7347 			 * to lock range in the io tree).
7348 			 */
7349 			if (writing ||
7350 			    test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
7351 				btrfs_start_ordered_extent(ordered, 1);
7352 			else
7353 				ret = nowait ? -EAGAIN : -ENOTBLK;
7354 			btrfs_put_ordered_extent(ordered);
7355 		} else {
7356 			/*
7357 			 * We could trigger writeback for this range (and wait
7358 			 * for it to complete) and then invalidate the pages for
7359 			 * this range (through invalidate_inode_pages2_range()),
7360 			 * but that can lead us to a deadlock with a concurrent
7361 			 * call to readahead (a buffered read or a defrag call
7362 			 * triggered a readahead) on a page lock due to an
7363 			 * ordered dio extent we created before but did not have
7364 			 * yet a corresponding bio submitted (whence it can not
7365 			 * complete), which makes readahead wait for that
7366 			 * ordered extent to complete while holding a lock on
7367 			 * that page.
7368 			 */
7369 			ret = nowait ? -EAGAIN : -ENOTBLK;
7370 		}
7371 
7372 		if (ret)
7373 			break;
7374 
7375 		cond_resched();
7376 	}
7377 
7378 	return ret;
7379 }
7380 
7381 /* The callers of this must take lock_extent() */
7382 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
7383 				       u64 len, u64 orig_start, u64 block_start,
7384 				       u64 block_len, u64 orig_block_len,
7385 				       u64 ram_bytes, int compress_type,
7386 				       int type)
7387 {
7388 	struct extent_map_tree *em_tree;
7389 	struct extent_map *em;
7390 	int ret;
7391 
7392 	ASSERT(type == BTRFS_ORDERED_PREALLOC ||
7393 	       type == BTRFS_ORDERED_COMPRESSED ||
7394 	       type == BTRFS_ORDERED_NOCOW ||
7395 	       type == BTRFS_ORDERED_REGULAR);
7396 
7397 	em_tree = &inode->extent_tree;
7398 	em = alloc_extent_map();
7399 	if (!em)
7400 		return ERR_PTR(-ENOMEM);
7401 
7402 	em->start = start;
7403 	em->orig_start = orig_start;
7404 	em->len = len;
7405 	em->block_len = block_len;
7406 	em->block_start = block_start;
7407 	em->orig_block_len = orig_block_len;
7408 	em->ram_bytes = ram_bytes;
7409 	em->generation = -1;
7410 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
7411 	if (type == BTRFS_ORDERED_PREALLOC) {
7412 		set_bit(EXTENT_FLAG_FILLING, &em->flags);
7413 	} else if (type == BTRFS_ORDERED_COMPRESSED) {
7414 		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
7415 		em->compress_type = compress_type;
7416 	}
7417 
7418 	do {
7419 		btrfs_drop_extent_cache(inode, em->start,
7420 					em->start + em->len - 1, 0);
7421 		write_lock(&em_tree->lock);
7422 		ret = add_extent_mapping(em_tree, em, 1);
7423 		write_unlock(&em_tree->lock);
7424 		/*
7425 		 * The caller has taken lock_extent(), who could race with us
7426 		 * to add em?
7427 		 */
7428 	} while (ret == -EEXIST);
7429 
7430 	if (ret) {
7431 		free_extent_map(em);
7432 		return ERR_PTR(ret);
7433 	}
7434 
7435 	/* em got 2 refs now, callers needs to do free_extent_map once. */
7436 	return em;
7437 }
7438 
7439 
7440 static int btrfs_get_blocks_direct_write(struct extent_map **map,
7441 					 struct inode *inode,
7442 					 struct btrfs_dio_data *dio_data,
7443 					 u64 start, u64 len,
7444 					 unsigned int iomap_flags)
7445 {
7446 	const bool nowait = (iomap_flags & IOMAP_NOWAIT);
7447 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7448 	struct extent_map *em = *map;
7449 	int type;
7450 	u64 block_start, orig_start, orig_block_len, ram_bytes;
7451 	struct btrfs_block_group *bg;
7452 	bool can_nocow = false;
7453 	bool space_reserved = false;
7454 	u64 prev_len;
7455 	int ret = 0;
7456 
7457 	/*
7458 	 * We don't allocate a new extent in the following cases
7459 	 *
7460 	 * 1) The inode is marked as NODATACOW. In this case we'll just use the
7461 	 * existing extent.
7462 	 * 2) The extent is marked as PREALLOC. We're good to go here and can
7463 	 * just use the extent.
7464 	 *
7465 	 */
7466 	if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
7467 	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7468 	     em->block_start != EXTENT_MAP_HOLE)) {
7469 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7470 			type = BTRFS_ORDERED_PREALLOC;
7471 		else
7472 			type = BTRFS_ORDERED_NOCOW;
7473 		len = min(len, em->len - (start - em->start));
7474 		block_start = em->block_start + (start - em->start);
7475 
7476 		if (can_nocow_extent(inode, start, &len, &orig_start,
7477 				     &orig_block_len, &ram_bytes, false) == 1) {
7478 			bg = btrfs_inc_nocow_writers(fs_info, block_start);
7479 			if (bg)
7480 				can_nocow = true;
7481 		}
7482 	}
7483 
7484 	prev_len = len;
7485 	if (can_nocow) {
7486 		struct extent_map *em2;
7487 
7488 		/* We can NOCOW, so only need to reserve metadata space. */
7489 		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len,
7490 						      nowait);
7491 		if (ret < 0) {
7492 			/* Our caller expects us to free the input extent map. */
7493 			free_extent_map(em);
7494 			*map = NULL;
7495 			btrfs_dec_nocow_writers(bg);
7496 			if (nowait && (ret == -ENOSPC || ret == -EDQUOT))
7497 				ret = -EAGAIN;
7498 			goto out;
7499 		}
7500 		space_reserved = true;
7501 
7502 		em2 = btrfs_create_dio_extent(BTRFS_I(inode), start, len,
7503 					      orig_start, block_start,
7504 					      len, orig_block_len,
7505 					      ram_bytes, type);
7506 		btrfs_dec_nocow_writers(bg);
7507 		if (type == BTRFS_ORDERED_PREALLOC) {
7508 			free_extent_map(em);
7509 			*map = em = em2;
7510 		}
7511 
7512 		if (IS_ERR(em2)) {
7513 			ret = PTR_ERR(em2);
7514 			goto out;
7515 		}
7516 
7517 		dio_data->nocow_done = true;
7518 	} else {
7519 		/* Our caller expects us to free the input extent map. */
7520 		free_extent_map(em);
7521 		*map = NULL;
7522 
7523 		if (nowait)
7524 			return -EAGAIN;
7525 
7526 		/*
7527 		 * If we could not allocate data space before locking the file
7528 		 * range and we can't do a NOCOW write, then we have to fail.
7529 		 */
7530 		if (!dio_data->data_space_reserved)
7531 			return -ENOSPC;
7532 
7533 		/*
7534 		 * We have to COW and we have already reserved data space before,
7535 		 * so now we reserve only metadata.
7536 		 */
7537 		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len,
7538 						      false);
7539 		if (ret < 0)
7540 			goto out;
7541 		space_reserved = true;
7542 
7543 		em = btrfs_new_extent_direct(BTRFS_I(inode), start, len);
7544 		if (IS_ERR(em)) {
7545 			ret = PTR_ERR(em);
7546 			goto out;
7547 		}
7548 		*map = em;
7549 		len = min(len, em->len - (start - em->start));
7550 		if (len < prev_len)
7551 			btrfs_delalloc_release_metadata(BTRFS_I(inode),
7552 							prev_len - len, true);
7553 	}
7554 
7555 	/*
7556 	 * We have created our ordered extent, so we can now release our reservation
7557 	 * for an outstanding extent.
7558 	 */
7559 	btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len);
7560 
7561 	/*
7562 	 * Need to update the i_size under the extent lock so buffered
7563 	 * readers will get the updated i_size when we unlock.
7564 	 */
7565 	if (start + len > i_size_read(inode))
7566 		i_size_write(inode, start + len);
7567 out:
7568 	if (ret && space_reserved) {
7569 		btrfs_delalloc_release_extents(BTRFS_I(inode), len);
7570 		btrfs_delalloc_release_metadata(BTRFS_I(inode), len, true);
7571 	}
7572 	return ret;
7573 }
7574 
7575 static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
7576 		loff_t length, unsigned int flags, struct iomap *iomap,
7577 		struct iomap *srcmap)
7578 {
7579 	struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
7580 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7581 	struct extent_map *em;
7582 	struct extent_state *cached_state = NULL;
7583 	struct btrfs_dio_data *dio_data = iter->private;
7584 	u64 lockstart, lockend;
7585 	const bool write = !!(flags & IOMAP_WRITE);
7586 	int ret = 0;
7587 	u64 len = length;
7588 	const u64 data_alloc_len = length;
7589 	bool unlock_extents = false;
7590 
7591 	if (!write)
7592 		len = min_t(u64, len, fs_info->sectorsize);
7593 
7594 	lockstart = start;
7595 	lockend = start + len - 1;
7596 
7597 	/*
7598 	 * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't
7599 	 * enough if we've written compressed pages to this area, so we need to
7600 	 * flush the dirty pages again to make absolutely sure that any
7601 	 * outstanding dirty pages are on disk - the first flush only starts
7602 	 * compression on the data, while keeping the pages locked, so by the
7603 	 * time the second flush returns we know bios for the compressed pages
7604 	 * were submitted and finished, and the pages no longer under writeback.
7605 	 *
7606 	 * If we have a NOWAIT request and we have any pages in the range that
7607 	 * are locked, likely due to compression still in progress, we don't want
7608 	 * to block on page locks. We also don't want to block on pages marked as
7609 	 * dirty or under writeback (same as for the non-compression case).
7610 	 * iomap_dio_rw() did the same check, but after that and before we got
7611 	 * here, mmap'ed writes may have happened or buffered reads started
7612 	 * (readpage() and readahead(), which lock pages), as we haven't locked
7613 	 * the file range yet.
7614 	 */
7615 	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
7616 		     &BTRFS_I(inode)->runtime_flags)) {
7617 		if (flags & IOMAP_NOWAIT) {
7618 			if (filemap_range_needs_writeback(inode->i_mapping,
7619 							  lockstart, lockend))
7620 				return -EAGAIN;
7621 		} else {
7622 			ret = filemap_fdatawrite_range(inode->i_mapping, start,
7623 						       start + length - 1);
7624 			if (ret)
7625 				return ret;
7626 		}
7627 	}
7628 
7629 	memset(dio_data, 0, sizeof(*dio_data));
7630 
7631 	/*
7632 	 * We always try to allocate data space and must do it before locking
7633 	 * the file range, to avoid deadlocks with concurrent writes to the same
7634 	 * range if the range has several extents and the writes don't expand the
7635 	 * current i_size (the inode lock is taken in shared mode). If we fail to
7636 	 * allocate data space here we continue and later, after locking the
7637 	 * file range, we fail with ENOSPC only if we figure out we can not do a
7638 	 * NOCOW write.
7639 	 */
7640 	if (write && !(flags & IOMAP_NOWAIT)) {
7641 		ret = btrfs_check_data_free_space(BTRFS_I(inode),
7642 						  &dio_data->data_reserved,
7643 						  start, data_alloc_len);
7644 		if (!ret)
7645 			dio_data->data_space_reserved = true;
7646 		else if (ret && !(BTRFS_I(inode)->flags &
7647 				  (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
7648 			goto err;
7649 	}
7650 
7651 	/*
7652 	 * If this errors out it's because we couldn't invalidate pagecache for
7653 	 * this range and we need to fallback to buffered IO, or we are doing a
7654 	 * NOWAIT read/write and we need to block.
7655 	 */
7656 	ret = lock_extent_direct(inode, lockstart, lockend, &cached_state, flags);
7657 	if (ret < 0)
7658 		goto err;
7659 
7660 	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len);
7661 	if (IS_ERR(em)) {
7662 		ret = PTR_ERR(em);
7663 		goto unlock_err;
7664 	}
7665 
7666 	/*
7667 	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7668 	 * io.  INLINE is special, and we could probably kludge it in here, but
7669 	 * it's still buffered so for safety lets just fall back to the generic
7670 	 * buffered path.
7671 	 *
7672 	 * For COMPRESSED we _have_ to read the entire extent in so we can
7673 	 * decompress it, so there will be buffering required no matter what we
7674 	 * do, so go ahead and fallback to buffered.
7675 	 *
7676 	 * We return -ENOTBLK because that's what makes DIO go ahead and go back
7677 	 * to buffered IO.  Don't blame me, this is the price we pay for using
7678 	 * the generic code.
7679 	 */
7680 	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
7681 	    em->block_start == EXTENT_MAP_INLINE) {
7682 		free_extent_map(em);
7683 		/*
7684 		 * If we are in a NOWAIT context, return -EAGAIN in order to
7685 		 * fallback to buffered IO. This is not only because we can
7686 		 * block with buffered IO (no support for NOWAIT semantics at
7687 		 * the moment) but also to avoid returning short reads to user
7688 		 * space - this happens if we were able to read some data from
7689 		 * previous non-compressed extents and then when we fallback to
7690 		 * buffered IO, at btrfs_file_read_iter() by calling
7691 		 * filemap_read(), we fail to fault in pages for the read buffer,
7692 		 * in which case filemap_read() returns a short read (the number
7693 		 * of bytes previously read is > 0, so it does not return -EFAULT).
7694 		 */
7695 		ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK;
7696 		goto unlock_err;
7697 	}
7698 
7699 	len = min(len, em->len - (start - em->start));
7700 
7701 	/*
7702 	 * If we have a NOWAIT request and the range contains multiple extents
7703 	 * (or a mix of extents and holes), then we return -EAGAIN to make the
7704 	 * caller fallback to a context where it can do a blocking (without
7705 	 * NOWAIT) request. This way we avoid doing partial IO and returning
7706 	 * success to the caller, which is not optimal for writes and for reads
7707 	 * it can result in unexpected behaviour for an application.
7708 	 *
7709 	 * When doing a read, because we use IOMAP_DIO_PARTIAL when calling
7710 	 * iomap_dio_rw(), we can end up returning less data then what the caller
7711 	 * asked for, resulting in an unexpected, and incorrect, short read.
7712 	 * That is, the caller asked to read N bytes and we return less than that,
7713 	 * which is wrong unless we are crossing EOF. This happens if we get a
7714 	 * page fault error when trying to fault in pages for the buffer that is
7715 	 * associated to the struct iov_iter passed to iomap_dio_rw(), and we
7716 	 * have previously submitted bios for other extents in the range, in
7717 	 * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of
7718 	 * those bios have completed by the time we get the page fault error,
7719 	 * which we return back to our caller - we should only return EIOCBQUEUED
7720 	 * after we have submitted bios for all the extents in the range.
7721 	 */
7722 	if ((flags & IOMAP_NOWAIT) && len < length) {
7723 		free_extent_map(em);
7724 		ret = -EAGAIN;
7725 		goto unlock_err;
7726 	}
7727 
7728 	if (write) {
7729 		ret = btrfs_get_blocks_direct_write(&em, inode, dio_data,
7730 						    start, len, flags);
7731 		if (ret < 0)
7732 			goto unlock_err;
7733 		unlock_extents = true;
7734 		/* Recalc len in case the new em is smaller than requested */
7735 		len = min(len, em->len - (start - em->start));
7736 		if (dio_data->data_space_reserved) {
7737 			u64 release_offset;
7738 			u64 release_len = 0;
7739 
7740 			if (dio_data->nocow_done) {
7741 				release_offset = start;
7742 				release_len = data_alloc_len;
7743 			} else if (len < data_alloc_len) {
7744 				release_offset = start + len;
7745 				release_len = data_alloc_len - len;
7746 			}
7747 
7748 			if (release_len > 0)
7749 				btrfs_free_reserved_data_space(BTRFS_I(inode),
7750 							       dio_data->data_reserved,
7751 							       release_offset,
7752 							       release_len);
7753 		}
7754 	} else {
7755 		/*
7756 		 * We need to unlock only the end area that we aren't using.
7757 		 * The rest is going to be unlocked by the endio routine.
7758 		 */
7759 		lockstart = start + len;
7760 		if (lockstart < lockend)
7761 			unlock_extents = true;
7762 	}
7763 
7764 	if (unlock_extents)
7765 		unlock_extent_cached(&BTRFS_I(inode)->io_tree,
7766 				     lockstart, lockend, &cached_state);
7767 	else
7768 		free_extent_state(cached_state);
7769 
7770 	/*
7771 	 * Translate extent map information to iomap.
7772 	 * We trim the extents (and move the addr) even though iomap code does
7773 	 * that, since we have locked only the parts we are performing I/O in.
7774 	 */
7775 	if ((em->block_start == EXTENT_MAP_HOLE) ||
7776 	    (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !write)) {
7777 		iomap->addr = IOMAP_NULL_ADDR;
7778 		iomap->type = IOMAP_HOLE;
7779 	} else {
7780 		iomap->addr = em->block_start + (start - em->start);
7781 		iomap->type = IOMAP_MAPPED;
7782 	}
7783 	iomap->offset = start;
7784 	iomap->bdev = fs_info->fs_devices->latest_dev->bdev;
7785 	iomap->length = len;
7786 
7787 	if (write && btrfs_use_zone_append(BTRFS_I(inode), em->block_start))
7788 		iomap->flags |= IOMAP_F_ZONE_APPEND;
7789 
7790 	free_extent_map(em);
7791 
7792 	return 0;
7793 
7794 unlock_err:
7795 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7796 			     &cached_state);
7797 err:
7798 	if (dio_data->data_space_reserved) {
7799 		btrfs_free_reserved_data_space(BTRFS_I(inode),
7800 					       dio_data->data_reserved,
7801 					       start, data_alloc_len);
7802 		extent_changeset_free(dio_data->data_reserved);
7803 	}
7804 
7805 	return ret;
7806 }
7807 
7808 static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length,
7809 		ssize_t written, unsigned int flags, struct iomap *iomap)
7810 {
7811 	struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
7812 	struct btrfs_dio_data *dio_data = iter->private;
7813 	size_t submitted = dio_data->submitted;
7814 	const bool write = !!(flags & IOMAP_WRITE);
7815 	int ret = 0;
7816 
7817 	if (!write && (iomap->type == IOMAP_HOLE)) {
7818 		/* If reading from a hole, unlock and return */
7819 		unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1);
7820 		return 0;
7821 	}
7822 
7823 	if (submitted < length) {
7824 		pos += submitted;
7825 		length -= submitted;
7826 		if (write)
7827 			__endio_write_update_ordered(BTRFS_I(inode), pos,
7828 					length, false);
7829 		else
7830 			unlock_extent(&BTRFS_I(inode)->io_tree, pos,
7831 				      pos + length - 1);
7832 		ret = -ENOTBLK;
7833 	}
7834 
7835 	if (write)
7836 		extent_changeset_free(dio_data->data_reserved);
7837 	return ret;
7838 }
7839 
7840 static void btrfs_dio_private_put(struct btrfs_dio_private *dip)
7841 {
7842 	/*
7843 	 * This implies a barrier so that stores to dio_bio->bi_status before
7844 	 * this and loads of dio_bio->bi_status after this are fully ordered.
7845 	 */
7846 	if (!refcount_dec_and_test(&dip->refs))
7847 		return;
7848 
7849 	if (btrfs_op(&dip->bio) == BTRFS_MAP_WRITE) {
7850 		__endio_write_update_ordered(BTRFS_I(dip->inode),
7851 					     dip->file_offset,
7852 					     dip->bytes,
7853 					     !dip->bio.bi_status);
7854 	} else {
7855 		unlock_extent(&BTRFS_I(dip->inode)->io_tree,
7856 			      dip->file_offset,
7857 			      dip->file_offset + dip->bytes - 1);
7858 	}
7859 
7860 	kfree(dip->csums);
7861 	bio_endio(&dip->bio);
7862 }
7863 
7864 static void submit_dio_repair_bio(struct inode *inode, struct bio *bio,
7865 				  int mirror_num,
7866 				  enum btrfs_compression_type compress_type)
7867 {
7868 	struct btrfs_dio_private *dip = bio->bi_private;
7869 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7870 
7871 	BUG_ON(bio_op(bio) == REQ_OP_WRITE);
7872 
7873 	if (btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA))
7874 		return;
7875 
7876 	refcount_inc(&dip->refs);
7877 	if (btrfs_map_bio(fs_info, bio, mirror_num))
7878 		refcount_dec(&dip->refs);
7879 }
7880 
7881 static blk_status_t btrfs_check_read_dio_bio(struct btrfs_dio_private *dip,
7882 					     struct btrfs_bio *bbio,
7883 					     const bool uptodate)
7884 {
7885 	struct inode *inode = dip->inode;
7886 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
7887 	const u32 sectorsize = fs_info->sectorsize;
7888 	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
7889 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7890 	const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM);
7891 	struct bio_vec bvec;
7892 	struct bvec_iter iter;
7893 	u32 bio_offset = 0;
7894 	blk_status_t err = BLK_STS_OK;
7895 
7896 	__bio_for_each_segment(bvec, &bbio->bio, iter, bbio->iter) {
7897 		unsigned int i, nr_sectors, pgoff;
7898 
7899 		nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
7900 		pgoff = bvec.bv_offset;
7901 		for (i = 0; i < nr_sectors; i++) {
7902 			u64 start = bbio->file_offset + bio_offset;
7903 
7904 			ASSERT(pgoff < PAGE_SIZE);
7905 			if (uptodate &&
7906 			    (!csum || !check_data_csum(inode, bbio,
7907 						       bio_offset, bvec.bv_page,
7908 						       pgoff, start))) {
7909 				clean_io_failure(fs_info, failure_tree, io_tree,
7910 						 start, bvec.bv_page,
7911 						 btrfs_ino(BTRFS_I(inode)),
7912 						 pgoff);
7913 			} else {
7914 				int ret;
7915 
7916 				ret = btrfs_repair_one_sector(inode, &bbio->bio,
7917 						bio_offset, bvec.bv_page, pgoff,
7918 						start, bbio->mirror_num,
7919 						submit_dio_repair_bio);
7920 				if (ret)
7921 					err = errno_to_blk_status(ret);
7922 			}
7923 			ASSERT(bio_offset + sectorsize > bio_offset);
7924 			bio_offset += sectorsize;
7925 			pgoff += sectorsize;
7926 		}
7927 	}
7928 	return err;
7929 }
7930 
7931 static void __endio_write_update_ordered(struct btrfs_inode *inode,
7932 					 const u64 offset, const u64 bytes,
7933 					 const bool uptodate)
7934 {
7935 	btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes,
7936 				       finish_ordered_fn, uptodate);
7937 }
7938 
7939 static blk_status_t btrfs_submit_bio_start_direct_io(struct inode *inode,
7940 						     struct bio *bio,
7941 						     u64 dio_file_offset)
7942 {
7943 	return btrfs_csum_one_bio(BTRFS_I(inode), bio, dio_file_offset, false);
7944 }
7945 
7946 static void btrfs_end_dio_bio(struct bio *bio)
7947 {
7948 	struct btrfs_dio_private *dip = bio->bi_private;
7949 	struct btrfs_bio *bbio = btrfs_bio(bio);
7950 	blk_status_t err = bio->bi_status;
7951 
7952 	if (err)
7953 		btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
7954 			   "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
7955 			   btrfs_ino(BTRFS_I(dip->inode)), bio_op(bio),
7956 			   bio->bi_opf, bio->bi_iter.bi_sector,
7957 			   bio->bi_iter.bi_size, err);
7958 
7959 	if (bio_op(bio) == REQ_OP_READ)
7960 		err = btrfs_check_read_dio_bio(dip, bbio, !err);
7961 
7962 	if (err)
7963 		dip->bio.bi_status = err;
7964 
7965 	btrfs_record_physical_zoned(dip->inode, bbio->file_offset, bio);
7966 
7967 	bio_put(bio);
7968 	btrfs_dio_private_put(dip);
7969 }
7970 
7971 static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
7972 		struct inode *inode, u64 file_offset, int async_submit)
7973 {
7974 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7975 	struct btrfs_dio_private *dip = bio->bi_private;
7976 	bool write = btrfs_op(bio) == BTRFS_MAP_WRITE;
7977 	blk_status_t ret;
7978 
7979 	/* Check btrfs_submit_bio_hook() for rules about async submit. */
7980 	if (async_submit)
7981 		async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
7982 
7983 	if (!write) {
7984 		ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
7985 		if (ret)
7986 			goto err;
7987 	}
7988 
7989 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
7990 		goto map;
7991 
7992 	if (write && async_submit) {
7993 		ret = btrfs_wq_submit_bio(inode, bio, 0, file_offset,
7994 					  btrfs_submit_bio_start_direct_io);
7995 		goto err;
7996 	} else if (write) {
7997 		/*
7998 		 * If we aren't doing async submit, calculate the csum of the
7999 		 * bio now.
8000 		 */
8001 		ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, file_offset, false);
8002 		if (ret)
8003 			goto err;
8004 	} else {
8005 		u64 csum_offset;
8006 
8007 		csum_offset = file_offset - dip->file_offset;
8008 		csum_offset >>= fs_info->sectorsize_bits;
8009 		csum_offset *= fs_info->csum_size;
8010 		btrfs_bio(bio)->csum = dip->csums + csum_offset;
8011 	}
8012 map:
8013 	ret = btrfs_map_bio(fs_info, bio, 0);
8014 err:
8015 	return ret;
8016 }
8017 
8018 static void btrfs_submit_direct(const struct iomap_iter *iter,
8019 		struct bio *dio_bio, loff_t file_offset)
8020 {
8021 	struct btrfs_dio_private *dip =
8022 		container_of(dio_bio, struct btrfs_dio_private, bio);
8023 	struct inode *inode = iter->inode;
8024 	const bool write = (btrfs_op(dio_bio) == BTRFS_MAP_WRITE);
8025 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8026 	const bool raid56 = (btrfs_data_alloc_profile(fs_info) &
8027 			     BTRFS_BLOCK_GROUP_RAID56_MASK);
8028 	struct bio *bio;
8029 	u64 start_sector;
8030 	int async_submit = 0;
8031 	u64 submit_len;
8032 	u64 clone_offset = 0;
8033 	u64 clone_len;
8034 	u64 logical;
8035 	int ret;
8036 	blk_status_t status;
8037 	struct btrfs_io_geometry geom;
8038 	struct btrfs_dio_data *dio_data = iter->private;
8039 	struct extent_map *em = NULL;
8040 
8041 	dip->inode = inode;
8042 	dip->file_offset = file_offset;
8043 	dip->bytes = dio_bio->bi_iter.bi_size;
8044 	refcount_set(&dip->refs, 1);
8045 	dip->csums = NULL;
8046 
8047 	if (!write && !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
8048 		unsigned int nr_sectors =
8049 			(dio_bio->bi_iter.bi_size >> fs_info->sectorsize_bits);
8050 
8051 		/*
8052 		 * Load the csums up front to reduce csum tree searches and
8053 		 * contention when submitting bios.
8054 		 */
8055 		status = BLK_STS_RESOURCE;
8056 		dip->csums = kcalloc(nr_sectors, fs_info->csum_size, GFP_NOFS);
8057 		if (!dip)
8058 			goto out_err;
8059 
8060 		status = btrfs_lookup_bio_sums(inode, dio_bio, dip->csums);
8061 		if (status != BLK_STS_OK)
8062 			goto out_err;
8063 	}
8064 
8065 	start_sector = dio_bio->bi_iter.bi_sector;
8066 	submit_len = dio_bio->bi_iter.bi_size;
8067 
8068 	do {
8069 		logical = start_sector << 9;
8070 		em = btrfs_get_chunk_map(fs_info, logical, submit_len);
8071 		if (IS_ERR(em)) {
8072 			status = errno_to_blk_status(PTR_ERR(em));
8073 			em = NULL;
8074 			goto out_err_em;
8075 		}
8076 		ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(dio_bio),
8077 					    logical, &geom);
8078 		if (ret) {
8079 			status = errno_to_blk_status(ret);
8080 			goto out_err_em;
8081 		}
8082 
8083 		clone_len = min(submit_len, geom.len);
8084 		ASSERT(clone_len <= UINT_MAX);
8085 
8086 		/*
8087 		 * This will never fail as it's passing GPF_NOFS and
8088 		 * the allocation is backed by btrfs_bioset.
8089 		 */
8090 		bio = btrfs_bio_clone_partial(dio_bio, clone_offset, clone_len);
8091 		bio->bi_private = dip;
8092 		bio->bi_end_io = btrfs_end_dio_bio;
8093 		btrfs_bio(bio)->file_offset = file_offset;
8094 
8095 		if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
8096 			status = extract_ordered_extent(BTRFS_I(inode), bio,
8097 							file_offset);
8098 			if (status) {
8099 				bio_put(bio);
8100 				goto out_err;
8101 			}
8102 		}
8103 
8104 		ASSERT(submit_len >= clone_len);
8105 		submit_len -= clone_len;
8106 
8107 		/*
8108 		 * Increase the count before we submit the bio so we know
8109 		 * the end IO handler won't happen before we increase the
8110 		 * count. Otherwise, the dip might get freed before we're
8111 		 * done setting it up.
8112 		 *
8113 		 * We transfer the initial reference to the last bio, so we
8114 		 * don't need to increment the reference count for the last one.
8115 		 */
8116 		if (submit_len > 0) {
8117 			refcount_inc(&dip->refs);
8118 			/*
8119 			 * If we are submitting more than one bio, submit them
8120 			 * all asynchronously. The exception is RAID 5 or 6, as
8121 			 * asynchronous checksums make it difficult to collect
8122 			 * full stripe writes.
8123 			 */
8124 			if (!raid56)
8125 				async_submit = 1;
8126 		}
8127 
8128 		status = btrfs_submit_dio_bio(bio, inode, file_offset,
8129 						async_submit);
8130 		if (status) {
8131 			bio_put(bio);
8132 			if (submit_len > 0)
8133 				refcount_dec(&dip->refs);
8134 			goto out_err_em;
8135 		}
8136 
8137 		dio_data->submitted += clone_len;
8138 		clone_offset += clone_len;
8139 		start_sector += clone_len >> 9;
8140 		file_offset += clone_len;
8141 
8142 		free_extent_map(em);
8143 	} while (submit_len > 0);
8144 	return;
8145 
8146 out_err_em:
8147 	free_extent_map(em);
8148 out_err:
8149 	dio_bio->bi_status = status;
8150 	btrfs_dio_private_put(dip);
8151 }
8152 
8153 static const struct iomap_ops btrfs_dio_iomap_ops = {
8154 	.iomap_begin            = btrfs_dio_iomap_begin,
8155 	.iomap_end              = btrfs_dio_iomap_end,
8156 };
8157 
8158 static const struct iomap_dio_ops btrfs_dio_ops = {
8159 	.submit_io		= btrfs_submit_direct,
8160 	.bio_set		= &btrfs_dio_bioset,
8161 };
8162 
8163 ssize_t btrfs_dio_rw(struct kiocb *iocb, struct iov_iter *iter, size_t done_before)
8164 {
8165 	struct btrfs_dio_data data;
8166 
8167 	return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
8168 			    IOMAP_DIO_PARTIAL, &data, done_before);
8169 }
8170 
8171 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
8172 			u64 start, u64 len)
8173 {
8174 	int	ret;
8175 
8176 	ret = fiemap_prep(inode, fieinfo, start, &len, 0);
8177 	if (ret)
8178 		return ret;
8179 
8180 	return extent_fiemap(BTRFS_I(inode), fieinfo, start, len);
8181 }
8182 
8183 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
8184 {
8185 	struct inode *inode = page->mapping->host;
8186 	int ret;
8187 
8188 	if (current->flags & PF_MEMALLOC) {
8189 		redirty_page_for_writepage(wbc, page);
8190 		unlock_page(page);
8191 		return 0;
8192 	}
8193 
8194 	/*
8195 	 * If we are under memory pressure we will call this directly from the
8196 	 * VM, we need to make sure we have the inode referenced for the ordered
8197 	 * extent.  If not just return like we didn't do anything.
8198 	 */
8199 	if (!igrab(inode)) {
8200 		redirty_page_for_writepage(wbc, page);
8201 		return AOP_WRITEPAGE_ACTIVATE;
8202 	}
8203 	ret = extent_write_full_page(page, wbc);
8204 	btrfs_add_delayed_iput(inode);
8205 	return ret;
8206 }
8207 
8208 static int btrfs_writepages(struct address_space *mapping,
8209 			    struct writeback_control *wbc)
8210 {
8211 	return extent_writepages(mapping, wbc);
8212 }
8213 
8214 static void btrfs_readahead(struct readahead_control *rac)
8215 {
8216 	extent_readahead(rac);
8217 }
8218 
8219 /*
8220  * For release_folio() and invalidate_folio() we have a race window where
8221  * folio_end_writeback() is called but the subpage spinlock is not yet released.
8222  * If we continue to release/invalidate the page, we could cause use-after-free
8223  * for subpage spinlock.  So this function is to spin and wait for subpage
8224  * spinlock.
8225  */
8226 static void wait_subpage_spinlock(struct page *page)
8227 {
8228 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
8229 	struct btrfs_subpage *subpage;
8230 
8231 	if (!btrfs_is_subpage(fs_info, page))
8232 		return;
8233 
8234 	ASSERT(PagePrivate(page) && page->private);
8235 	subpage = (struct btrfs_subpage *)page->private;
8236 
8237 	/*
8238 	 * This may look insane as we just acquire the spinlock and release it,
8239 	 * without doing anything.  But we just want to make sure no one is
8240 	 * still holding the subpage spinlock.
8241 	 * And since the page is not dirty nor writeback, and we have page
8242 	 * locked, the only possible way to hold a spinlock is from the endio
8243 	 * function to clear page writeback.
8244 	 *
8245 	 * Here we just acquire the spinlock so that all existing callers
8246 	 * should exit and we're safe to release/invalidate the page.
8247 	 */
8248 	spin_lock_irq(&subpage->lock);
8249 	spin_unlock_irq(&subpage->lock);
8250 }
8251 
8252 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
8253 {
8254 	int ret = try_release_extent_mapping(&folio->page, gfp_flags);
8255 
8256 	if (ret == 1) {
8257 		wait_subpage_spinlock(&folio->page);
8258 		clear_page_extent_mapped(&folio->page);
8259 	}
8260 	return ret;
8261 }
8262 
8263 static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
8264 {
8265 	if (folio_test_writeback(folio) || folio_test_dirty(folio))
8266 		return false;
8267 	return __btrfs_release_folio(folio, gfp_flags);
8268 }
8269 
8270 #ifdef CONFIG_MIGRATION
8271 static int btrfs_migratepage(struct address_space *mapping,
8272 			     struct page *newpage, struct page *page,
8273 			     enum migrate_mode mode)
8274 {
8275 	int ret;
8276 
8277 	ret = migrate_page_move_mapping(mapping, newpage, page, 0);
8278 	if (ret != MIGRATEPAGE_SUCCESS)
8279 		return ret;
8280 
8281 	if (page_has_private(page))
8282 		attach_page_private(newpage, detach_page_private(page));
8283 
8284 	if (PageOrdered(page)) {
8285 		ClearPageOrdered(page);
8286 		SetPageOrdered(newpage);
8287 	}
8288 
8289 	if (mode != MIGRATE_SYNC_NO_COPY)
8290 		migrate_page_copy(newpage, page);
8291 	else
8292 		migrate_page_states(newpage, page);
8293 	return MIGRATEPAGE_SUCCESS;
8294 }
8295 #endif
8296 
8297 static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
8298 				 size_t length)
8299 {
8300 	struct btrfs_inode *inode = BTRFS_I(folio->mapping->host);
8301 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
8302 	struct extent_io_tree *tree = &inode->io_tree;
8303 	struct extent_state *cached_state = NULL;
8304 	u64 page_start = folio_pos(folio);
8305 	u64 page_end = page_start + folio_size(folio) - 1;
8306 	u64 cur;
8307 	int inode_evicting = inode->vfs_inode.i_state & I_FREEING;
8308 
8309 	/*
8310 	 * We have folio locked so no new ordered extent can be created on this
8311 	 * page, nor bio can be submitted for this folio.
8312 	 *
8313 	 * But already submitted bio can still be finished on this folio.
8314 	 * Furthermore, endio function won't skip folio which has Ordered
8315 	 * (Private2) already cleared, so it's possible for endio and
8316 	 * invalidate_folio to do the same ordered extent accounting twice
8317 	 * on one folio.
8318 	 *
8319 	 * So here we wait for any submitted bios to finish, so that we won't
8320 	 * do double ordered extent accounting on the same folio.
8321 	 */
8322 	folio_wait_writeback(folio);
8323 	wait_subpage_spinlock(&folio->page);
8324 
8325 	/*
8326 	 * For subpage case, we have call sites like
8327 	 * btrfs_punch_hole_lock_range() which passes range not aligned to
8328 	 * sectorsize.
8329 	 * If the range doesn't cover the full folio, we don't need to and
8330 	 * shouldn't clear page extent mapped, as folio->private can still
8331 	 * record subpage dirty bits for other part of the range.
8332 	 *
8333 	 * For cases that invalidate the full folio even the range doesn't
8334 	 * cover the full folio, like invalidating the last folio, we're
8335 	 * still safe to wait for ordered extent to finish.
8336 	 */
8337 	if (!(offset == 0 && length == folio_size(folio))) {
8338 		btrfs_release_folio(folio, GFP_NOFS);
8339 		return;
8340 	}
8341 
8342 	if (!inode_evicting)
8343 		lock_extent_bits(tree, page_start, page_end, &cached_state);
8344 
8345 	cur = page_start;
8346 	while (cur < page_end) {
8347 		struct btrfs_ordered_extent *ordered;
8348 		bool delete_states;
8349 		u64 range_end;
8350 		u32 range_len;
8351 
8352 		ordered = btrfs_lookup_first_ordered_range(inode, cur,
8353 							   page_end + 1 - cur);
8354 		if (!ordered) {
8355 			range_end = page_end;
8356 			/*
8357 			 * No ordered extent covering this range, we are safe
8358 			 * to delete all extent states in the range.
8359 			 */
8360 			delete_states = true;
8361 			goto next;
8362 		}
8363 		if (ordered->file_offset > cur) {
8364 			/*
8365 			 * There is a range between [cur, oe->file_offset) not
8366 			 * covered by any ordered extent.
8367 			 * We are safe to delete all extent states, and handle
8368 			 * the ordered extent in the next iteration.
8369 			 */
8370 			range_end = ordered->file_offset - 1;
8371 			delete_states = true;
8372 			goto next;
8373 		}
8374 
8375 		range_end = min(ordered->file_offset + ordered->num_bytes - 1,
8376 				page_end);
8377 		ASSERT(range_end + 1 - cur < U32_MAX);
8378 		range_len = range_end + 1 - cur;
8379 		if (!btrfs_page_test_ordered(fs_info, &folio->page, cur, range_len)) {
8380 			/*
8381 			 * If Ordered (Private2) is cleared, it means endio has
8382 			 * already been executed for the range.
8383 			 * We can't delete the extent states as
8384 			 * btrfs_finish_ordered_io() may still use some of them.
8385 			 */
8386 			delete_states = false;
8387 			goto next;
8388 		}
8389 		btrfs_page_clear_ordered(fs_info, &folio->page, cur, range_len);
8390 
8391 		/*
8392 		 * IO on this page will never be started, so we need to account
8393 		 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
8394 		 * here, must leave that up for the ordered extent completion.
8395 		 *
8396 		 * This will also unlock the range for incoming
8397 		 * btrfs_finish_ordered_io().
8398 		 */
8399 		if (!inode_evicting)
8400 			clear_extent_bit(tree, cur, range_end,
8401 					 EXTENT_DELALLOC |
8402 					 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
8403 					 EXTENT_DEFRAG, 1, 0, &cached_state);
8404 
8405 		spin_lock_irq(&inode->ordered_tree.lock);
8406 		set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
8407 		ordered->truncated_len = min(ordered->truncated_len,
8408 					     cur - ordered->file_offset);
8409 		spin_unlock_irq(&inode->ordered_tree.lock);
8410 
8411 		if (btrfs_dec_test_ordered_pending(inode, &ordered,
8412 						   cur, range_end + 1 - cur)) {
8413 			btrfs_finish_ordered_io(ordered);
8414 			/*
8415 			 * The ordered extent has finished, now we're again
8416 			 * safe to delete all extent states of the range.
8417 			 */
8418 			delete_states = true;
8419 		} else {
8420 			/*
8421 			 * btrfs_finish_ordered_io() will get executed by endio
8422 			 * of other pages, thus we can't delete extent states
8423 			 * anymore
8424 			 */
8425 			delete_states = false;
8426 		}
8427 next:
8428 		if (ordered)
8429 			btrfs_put_ordered_extent(ordered);
8430 		/*
8431 		 * Qgroup reserved space handler
8432 		 * Sector(s) here will be either:
8433 		 *
8434 		 * 1) Already written to disk or bio already finished
8435 		 *    Then its QGROUP_RESERVED bit in io_tree is already cleared.
8436 		 *    Qgroup will be handled by its qgroup_record then.
8437 		 *    btrfs_qgroup_free_data() call will do nothing here.
8438 		 *
8439 		 * 2) Not written to disk yet
8440 		 *    Then btrfs_qgroup_free_data() call will clear the
8441 		 *    QGROUP_RESERVED bit of its io_tree, and free the qgroup
8442 		 *    reserved data space.
8443 		 *    Since the IO will never happen for this page.
8444 		 */
8445 		btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur);
8446 		if (!inode_evicting) {
8447 			clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
8448 				 EXTENT_DELALLOC | EXTENT_UPTODATE |
8449 				 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1,
8450 				 delete_states, &cached_state);
8451 		}
8452 		cur = range_end + 1;
8453 	}
8454 	/*
8455 	 * We have iterated through all ordered extents of the page, the page
8456 	 * should not have Ordered (Private2) anymore, or the above iteration
8457 	 * did something wrong.
8458 	 */
8459 	ASSERT(!folio_test_ordered(folio));
8460 	btrfs_page_clear_checked(fs_info, &folio->page, folio_pos(folio), folio_size(folio));
8461 	if (!inode_evicting)
8462 		__btrfs_release_folio(folio, GFP_NOFS);
8463 	clear_page_extent_mapped(&folio->page);
8464 }
8465 
8466 /*
8467  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8468  * called from a page fault handler when a page is first dirtied. Hence we must
8469  * be careful to check for EOF conditions here. We set the page up correctly
8470  * for a written page which means we get ENOSPC checking when writing into
8471  * holes and correct delalloc and unwritten extent mapping on filesystems that
8472  * support these features.
8473  *
8474  * We are not allowed to take the i_mutex here so we have to play games to
8475  * protect against truncate races as the page could now be beyond EOF.  Because
8476  * truncate_setsize() writes the inode size before removing pages, once we have
8477  * the page lock we can determine safely if the page is beyond EOF. If it is not
8478  * beyond EOF, then the page is guaranteed safe against truncation until we
8479  * unlock the page.
8480  */
8481 vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
8482 {
8483 	struct page *page = vmf->page;
8484 	struct inode *inode = file_inode(vmf->vma->vm_file);
8485 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8486 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8487 	struct btrfs_ordered_extent *ordered;
8488 	struct extent_state *cached_state = NULL;
8489 	struct extent_changeset *data_reserved = NULL;
8490 	unsigned long zero_start;
8491 	loff_t size;
8492 	vm_fault_t ret;
8493 	int ret2;
8494 	int reserved = 0;
8495 	u64 reserved_space;
8496 	u64 page_start;
8497 	u64 page_end;
8498 	u64 end;
8499 
8500 	reserved_space = PAGE_SIZE;
8501 
8502 	sb_start_pagefault(inode->i_sb);
8503 	page_start = page_offset(page);
8504 	page_end = page_start + PAGE_SIZE - 1;
8505 	end = page_end;
8506 
8507 	/*
8508 	 * Reserving delalloc space after obtaining the page lock can lead to
8509 	 * deadlock. For example, if a dirty page is locked by this function
8510 	 * and the call to btrfs_delalloc_reserve_space() ends up triggering
8511 	 * dirty page write out, then the btrfs_writepage() function could
8512 	 * end up waiting indefinitely to get a lock on the page currently
8513 	 * being processed by btrfs_page_mkwrite() function.
8514 	 */
8515 	ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved,
8516 					    page_start, reserved_space);
8517 	if (!ret2) {
8518 		ret2 = file_update_time(vmf->vma->vm_file);
8519 		reserved = 1;
8520 	}
8521 	if (ret2) {
8522 		ret = vmf_error(ret2);
8523 		if (reserved)
8524 			goto out;
8525 		goto out_noreserve;
8526 	}
8527 
8528 	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
8529 again:
8530 	down_read(&BTRFS_I(inode)->i_mmap_lock);
8531 	lock_page(page);
8532 	size = i_size_read(inode);
8533 
8534 	if ((page->mapping != inode->i_mapping) ||
8535 	    (page_start >= size)) {
8536 		/* page got truncated out from underneath us */
8537 		goto out_unlock;
8538 	}
8539 	wait_on_page_writeback(page);
8540 
8541 	lock_extent_bits(io_tree, page_start, page_end, &cached_state);
8542 	ret2 = set_page_extent_mapped(page);
8543 	if (ret2 < 0) {
8544 		ret = vmf_error(ret2);
8545 		unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
8546 		goto out_unlock;
8547 	}
8548 
8549 	/*
8550 	 * we can't set the delalloc bits if there are pending ordered
8551 	 * extents.  Drop our locks and wait for them to finish
8552 	 */
8553 	ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
8554 			PAGE_SIZE);
8555 	if (ordered) {
8556 		unlock_extent_cached(io_tree, page_start, page_end,
8557 				     &cached_state);
8558 		unlock_page(page);
8559 		up_read(&BTRFS_I(inode)->i_mmap_lock);
8560 		btrfs_start_ordered_extent(ordered, 1);
8561 		btrfs_put_ordered_extent(ordered);
8562 		goto again;
8563 	}
8564 
8565 	if (page->index == ((size - 1) >> PAGE_SHIFT)) {
8566 		reserved_space = round_up(size - page_start,
8567 					  fs_info->sectorsize);
8568 		if (reserved_space < PAGE_SIZE) {
8569 			end = page_start + reserved_space - 1;
8570 			btrfs_delalloc_release_space(BTRFS_I(inode),
8571 					data_reserved, page_start,
8572 					PAGE_SIZE - reserved_space, true);
8573 		}
8574 	}
8575 
8576 	/*
8577 	 * page_mkwrite gets called when the page is firstly dirtied after it's
8578 	 * faulted in, but write(2) could also dirty a page and set delalloc
8579 	 * bits, thus in this case for space account reason, we still need to
8580 	 * clear any delalloc bits within this page range since we have to
8581 	 * reserve data&meta space before lock_page() (see above comments).
8582 	 */
8583 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
8584 			  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
8585 			  EXTENT_DEFRAG, 0, 0, &cached_state);
8586 
8587 	ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
8588 					&cached_state);
8589 	if (ret2) {
8590 		unlock_extent_cached(io_tree, page_start, page_end,
8591 				     &cached_state);
8592 		ret = VM_FAULT_SIGBUS;
8593 		goto out_unlock;
8594 	}
8595 
8596 	/* page is wholly or partially inside EOF */
8597 	if (page_start + PAGE_SIZE > size)
8598 		zero_start = offset_in_page(size);
8599 	else
8600 		zero_start = PAGE_SIZE;
8601 
8602 	if (zero_start != PAGE_SIZE) {
8603 		memzero_page(page, zero_start, PAGE_SIZE - zero_start);
8604 		flush_dcache_page(page);
8605 	}
8606 	btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE);
8607 	btrfs_page_set_dirty(fs_info, page, page_start, end + 1 - page_start);
8608 	btrfs_page_set_uptodate(fs_info, page, page_start, end + 1 - page_start);
8609 
8610 	btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
8611 
8612 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
8613 	up_read(&BTRFS_I(inode)->i_mmap_lock);
8614 
8615 	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
8616 	sb_end_pagefault(inode->i_sb);
8617 	extent_changeset_free(data_reserved);
8618 	return VM_FAULT_LOCKED;
8619 
8620 out_unlock:
8621 	unlock_page(page);
8622 	up_read(&BTRFS_I(inode)->i_mmap_lock);
8623 out:
8624 	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
8625 	btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start,
8626 				     reserved_space, (ret != 0));
8627 out_noreserve:
8628 	sb_end_pagefault(inode->i_sb);
8629 	extent_changeset_free(data_reserved);
8630 	return ret;
8631 }
8632 
8633 static int btrfs_truncate(struct inode *inode, bool skip_writeback)
8634 {
8635 	struct btrfs_truncate_control control = {
8636 		.inode = BTRFS_I(inode),
8637 		.ino = btrfs_ino(BTRFS_I(inode)),
8638 		.min_type = BTRFS_EXTENT_DATA_KEY,
8639 		.clear_extent_range = true,
8640 	};
8641 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8642 	struct btrfs_root *root = BTRFS_I(inode)->root;
8643 	struct btrfs_block_rsv *rsv;
8644 	int ret;
8645 	struct btrfs_trans_handle *trans;
8646 	u64 mask = fs_info->sectorsize - 1;
8647 	u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
8648 
8649 	if (!skip_writeback) {
8650 		ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
8651 					       (u64)-1);
8652 		if (ret)
8653 			return ret;
8654 	}
8655 
8656 	/*
8657 	 * Yes ladies and gentlemen, this is indeed ugly.  We have a couple of
8658 	 * things going on here:
8659 	 *
8660 	 * 1) We need to reserve space to update our inode.
8661 	 *
8662 	 * 2) We need to have something to cache all the space that is going to
8663 	 * be free'd up by the truncate operation, but also have some slack
8664 	 * space reserved in case it uses space during the truncate (thank you
8665 	 * very much snapshotting).
8666 	 *
8667 	 * And we need these to be separate.  The fact is we can use a lot of
8668 	 * space doing the truncate, and we have no earthly idea how much space
8669 	 * we will use, so we need the truncate reservation to be separate so it
8670 	 * doesn't end up using space reserved for updating the inode.  We also
8671 	 * need to be able to stop the transaction and start a new one, which
8672 	 * means we need to be able to update the inode several times, and we
8673 	 * have no idea of knowing how many times that will be, so we can't just
8674 	 * reserve 1 item for the entirety of the operation, so that has to be
8675 	 * done separately as well.
8676 	 *
8677 	 * So that leaves us with
8678 	 *
8679 	 * 1) rsv - for the truncate reservation, which we will steal from the
8680 	 * transaction reservation.
8681 	 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
8682 	 * updating the inode.
8683 	 */
8684 	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
8685 	if (!rsv)
8686 		return -ENOMEM;
8687 	rsv->size = min_size;
8688 	rsv->failfast = 1;
8689 
8690 	/*
8691 	 * 1 for the truncate slack space
8692 	 * 1 for updating the inode.
8693 	 */
8694 	trans = btrfs_start_transaction(root, 2);
8695 	if (IS_ERR(trans)) {
8696 		ret = PTR_ERR(trans);
8697 		goto out;
8698 	}
8699 
8700 	/* Migrate the slack space for the truncate to our reserve */
8701 	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
8702 				      min_size, false);
8703 	BUG_ON(ret);
8704 
8705 	trans->block_rsv = rsv;
8706 
8707 	while (1) {
8708 		struct extent_state *cached_state = NULL;
8709 		const u64 new_size = inode->i_size;
8710 		const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
8711 
8712 		control.new_size = new_size;
8713 		lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, (u64)-1,
8714 				 &cached_state);
8715 		/*
8716 		 * We want to drop from the next block forward in case this new
8717 		 * size is not block aligned since we will be keeping the last
8718 		 * block of the extent just the way it is.
8719 		 */
8720 		btrfs_drop_extent_cache(BTRFS_I(inode),
8721 					ALIGN(new_size, fs_info->sectorsize),
8722 					(u64)-1, 0);
8723 
8724 		ret = btrfs_truncate_inode_items(trans, root, &control);
8725 
8726 		inode_sub_bytes(inode, control.sub_bytes);
8727 		btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), control.last_size);
8728 
8729 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start,
8730 				     (u64)-1, &cached_state);
8731 
8732 		trans->block_rsv = &fs_info->trans_block_rsv;
8733 		if (ret != -ENOSPC && ret != -EAGAIN)
8734 			break;
8735 
8736 		ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
8737 		if (ret)
8738 			break;
8739 
8740 		btrfs_end_transaction(trans);
8741 		btrfs_btree_balance_dirty(fs_info);
8742 
8743 		trans = btrfs_start_transaction(root, 2);
8744 		if (IS_ERR(trans)) {
8745 			ret = PTR_ERR(trans);
8746 			trans = NULL;
8747 			break;
8748 		}
8749 
8750 		btrfs_block_rsv_release(fs_info, rsv, -1, NULL);
8751 		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
8752 					      rsv, min_size, false);
8753 		BUG_ON(ret);	/* shouldn't happen */
8754 		trans->block_rsv = rsv;
8755 	}
8756 
8757 	/*
8758 	 * We can't call btrfs_truncate_block inside a trans handle as we could
8759 	 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we
8760 	 * know we've truncated everything except the last little bit, and can
8761 	 * do btrfs_truncate_block and then update the disk_i_size.
8762 	 */
8763 	if (ret == BTRFS_NEED_TRUNCATE_BLOCK) {
8764 		btrfs_end_transaction(trans);
8765 		btrfs_btree_balance_dirty(fs_info);
8766 
8767 		ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
8768 		if (ret)
8769 			goto out;
8770 		trans = btrfs_start_transaction(root, 1);
8771 		if (IS_ERR(trans)) {
8772 			ret = PTR_ERR(trans);
8773 			goto out;
8774 		}
8775 		btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
8776 	}
8777 
8778 	if (trans) {
8779 		int ret2;
8780 
8781 		trans->block_rsv = &fs_info->trans_block_rsv;
8782 		ret2 = btrfs_update_inode(trans, root, BTRFS_I(inode));
8783 		if (ret2 && !ret)
8784 			ret = ret2;
8785 
8786 		ret2 = btrfs_end_transaction(trans);
8787 		if (ret2 && !ret)
8788 			ret = ret2;
8789 		btrfs_btree_balance_dirty(fs_info);
8790 	}
8791 out:
8792 	btrfs_free_block_rsv(fs_info, rsv);
8793 	/*
8794 	 * So if we truncate and then write and fsync we normally would just
8795 	 * write the extents that changed, which is a problem if we need to
8796 	 * first truncate that entire inode.  So set this flag so we write out
8797 	 * all of the extents in the inode to the sync log so we're completely
8798 	 * safe.
8799 	 *
8800 	 * If no extents were dropped or trimmed we don't need to force the next
8801 	 * fsync to truncate all the inode's items from the log and re-log them
8802 	 * all. This means the truncate operation did not change the file size,
8803 	 * or changed it to a smaller size but there was only an implicit hole
8804 	 * between the old i_size and the new i_size, and there were no prealloc
8805 	 * extents beyond i_size to drop.
8806 	 */
8807 	if (control.extents_found > 0)
8808 		btrfs_set_inode_full_sync(BTRFS_I(inode));
8809 
8810 	return ret;
8811 }
8812 
8813 struct inode *btrfs_new_subvol_inode(struct user_namespace *mnt_userns,
8814 				     struct inode *dir)
8815 {
8816 	struct inode *inode;
8817 
8818 	inode = new_inode(dir->i_sb);
8819 	if (inode) {
8820 		/*
8821 		 * Subvolumes don't inherit the sgid bit or the parent's gid if
8822 		 * the parent's sgid bit is set. This is probably a bug.
8823 		 */
8824 		inode_init_owner(mnt_userns, inode, NULL,
8825 				 S_IFDIR | (~current_umask() & S_IRWXUGO));
8826 		inode->i_op = &btrfs_dir_inode_operations;
8827 		inode->i_fop = &btrfs_dir_file_operations;
8828 	}
8829 	return inode;
8830 }
8831 
8832 struct inode *btrfs_alloc_inode(struct super_block *sb)
8833 {
8834 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
8835 	struct btrfs_inode *ei;
8836 	struct inode *inode;
8837 
8838 	ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL);
8839 	if (!ei)
8840 		return NULL;
8841 
8842 	ei->root = NULL;
8843 	ei->generation = 0;
8844 	ei->last_trans = 0;
8845 	ei->last_sub_trans = 0;
8846 	ei->logged_trans = 0;
8847 	ei->delalloc_bytes = 0;
8848 	ei->new_delalloc_bytes = 0;
8849 	ei->defrag_bytes = 0;
8850 	ei->disk_i_size = 0;
8851 	ei->flags = 0;
8852 	ei->ro_flags = 0;
8853 	ei->csum_bytes = 0;
8854 	ei->index_cnt = (u64)-1;
8855 	ei->dir_index = 0;
8856 	ei->last_unlink_trans = 0;
8857 	ei->last_reflink_trans = 0;
8858 	ei->last_log_commit = 0;
8859 
8860 	spin_lock_init(&ei->lock);
8861 	ei->outstanding_extents = 0;
8862 	if (sb->s_magic != BTRFS_TEST_MAGIC)
8863 		btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv,
8864 					      BTRFS_BLOCK_RSV_DELALLOC);
8865 	ei->runtime_flags = 0;
8866 	ei->prop_compress = BTRFS_COMPRESS_NONE;
8867 	ei->defrag_compress = BTRFS_COMPRESS_NONE;
8868 
8869 	ei->delayed_node = NULL;
8870 
8871 	ei->i_otime.tv_sec = 0;
8872 	ei->i_otime.tv_nsec = 0;
8873 
8874 	inode = &ei->vfs_inode;
8875 	extent_map_tree_init(&ei->extent_tree);
8876 	extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO, inode);
8877 	extent_io_tree_init(fs_info, &ei->io_failure_tree,
8878 			    IO_TREE_INODE_IO_FAILURE, inode);
8879 	extent_io_tree_init(fs_info, &ei->file_extent_tree,
8880 			    IO_TREE_INODE_FILE_EXTENT, inode);
8881 	ei->io_tree.track_uptodate = true;
8882 	ei->io_failure_tree.track_uptodate = true;
8883 	atomic_set(&ei->sync_writers, 0);
8884 	mutex_init(&ei->log_mutex);
8885 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
8886 	INIT_LIST_HEAD(&ei->delalloc_inodes);
8887 	INIT_LIST_HEAD(&ei->delayed_iput);
8888 	RB_CLEAR_NODE(&ei->rb_node);
8889 	init_rwsem(&ei->i_mmap_lock);
8890 
8891 	return inode;
8892 }
8893 
8894 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
8895 void btrfs_test_destroy_inode(struct inode *inode)
8896 {
8897 	btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
8898 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8899 }
8900 #endif
8901 
8902 void btrfs_free_inode(struct inode *inode)
8903 {
8904 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8905 }
8906 
8907 void btrfs_destroy_inode(struct inode *vfs_inode)
8908 {
8909 	struct btrfs_ordered_extent *ordered;
8910 	struct btrfs_inode *inode = BTRFS_I(vfs_inode);
8911 	struct btrfs_root *root = inode->root;
8912 
8913 	WARN_ON(!hlist_empty(&vfs_inode->i_dentry));
8914 	WARN_ON(vfs_inode->i_data.nrpages);
8915 	WARN_ON(inode->block_rsv.reserved);
8916 	WARN_ON(inode->block_rsv.size);
8917 	WARN_ON(inode->outstanding_extents);
8918 	if (!S_ISDIR(vfs_inode->i_mode)) {
8919 		WARN_ON(inode->delalloc_bytes);
8920 		WARN_ON(inode->new_delalloc_bytes);
8921 	}
8922 	WARN_ON(inode->csum_bytes);
8923 	WARN_ON(inode->defrag_bytes);
8924 
8925 	/*
8926 	 * This can happen where we create an inode, but somebody else also
8927 	 * created the same inode and we need to destroy the one we already
8928 	 * created.
8929 	 */
8930 	if (!root)
8931 		return;
8932 
8933 	while (1) {
8934 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
8935 		if (!ordered)
8936 			break;
8937 		else {
8938 			btrfs_err(root->fs_info,
8939 				  "found ordered extent %llu %llu on inode cleanup",
8940 				  ordered->file_offset, ordered->num_bytes);
8941 			btrfs_remove_ordered_extent(inode, ordered);
8942 			btrfs_put_ordered_extent(ordered);
8943 			btrfs_put_ordered_extent(ordered);
8944 		}
8945 	}
8946 	btrfs_qgroup_check_reserved_leak(inode);
8947 	inode_tree_del(inode);
8948 	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
8949 	btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1);
8950 	btrfs_put_root(inode->root);
8951 }
8952 
8953 int btrfs_drop_inode(struct inode *inode)
8954 {
8955 	struct btrfs_root *root = BTRFS_I(inode)->root;
8956 
8957 	if (root == NULL)
8958 		return 1;
8959 
8960 	/* the snap/subvol tree is on deleting */
8961 	if (btrfs_root_refs(&root->root_item) == 0)
8962 		return 1;
8963 	else
8964 		return generic_drop_inode(inode);
8965 }
8966 
8967 static void init_once(void *foo)
8968 {
8969 	struct btrfs_inode *ei = foo;
8970 
8971 	inode_init_once(&ei->vfs_inode);
8972 }
8973 
8974 void __cold btrfs_destroy_cachep(void)
8975 {
8976 	/*
8977 	 * Make sure all delayed rcu free inodes are flushed before we
8978 	 * destroy cache.
8979 	 */
8980 	rcu_barrier();
8981 	bioset_exit(&btrfs_dio_bioset);
8982 	kmem_cache_destroy(btrfs_inode_cachep);
8983 	kmem_cache_destroy(btrfs_trans_handle_cachep);
8984 	kmem_cache_destroy(btrfs_path_cachep);
8985 	kmem_cache_destroy(btrfs_free_space_cachep);
8986 	kmem_cache_destroy(btrfs_free_space_bitmap_cachep);
8987 }
8988 
8989 int __init btrfs_init_cachep(void)
8990 {
8991 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
8992 			sizeof(struct btrfs_inode), 0,
8993 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT,
8994 			init_once);
8995 	if (!btrfs_inode_cachep)
8996 		goto fail;
8997 
8998 	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
8999 			sizeof(struct btrfs_trans_handle), 0,
9000 			SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
9001 	if (!btrfs_trans_handle_cachep)
9002 		goto fail;
9003 
9004 	btrfs_path_cachep = kmem_cache_create("btrfs_path",
9005 			sizeof(struct btrfs_path), 0,
9006 			SLAB_MEM_SPREAD, NULL);
9007 	if (!btrfs_path_cachep)
9008 		goto fail;
9009 
9010 	btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
9011 			sizeof(struct btrfs_free_space), 0,
9012 			SLAB_MEM_SPREAD, NULL);
9013 	if (!btrfs_free_space_cachep)
9014 		goto fail;
9015 
9016 	btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
9017 							PAGE_SIZE, PAGE_SIZE,
9018 							SLAB_MEM_SPREAD, NULL);
9019 	if (!btrfs_free_space_bitmap_cachep)
9020 		goto fail;
9021 
9022 	if (bioset_init(&btrfs_dio_bioset, BIO_POOL_SIZE,
9023 			offsetof(struct btrfs_dio_private, bio),
9024 			BIOSET_NEED_BVECS))
9025 		goto fail;
9026 
9027 	return 0;
9028 fail:
9029 	btrfs_destroy_cachep();
9030 	return -ENOMEM;
9031 }
9032 
9033 static int btrfs_getattr(struct user_namespace *mnt_userns,
9034 			 const struct path *path, struct kstat *stat,
9035 			 u32 request_mask, unsigned int flags)
9036 {
9037 	u64 delalloc_bytes;
9038 	u64 inode_bytes;
9039 	struct inode *inode = d_inode(path->dentry);
9040 	u32 blocksize = inode->i_sb->s_blocksize;
9041 	u32 bi_flags = BTRFS_I(inode)->flags;
9042 	u32 bi_ro_flags = BTRFS_I(inode)->ro_flags;
9043 
9044 	stat->result_mask |= STATX_BTIME;
9045 	stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec;
9046 	stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec;
9047 	if (bi_flags & BTRFS_INODE_APPEND)
9048 		stat->attributes |= STATX_ATTR_APPEND;
9049 	if (bi_flags & BTRFS_INODE_COMPRESS)
9050 		stat->attributes |= STATX_ATTR_COMPRESSED;
9051 	if (bi_flags & BTRFS_INODE_IMMUTABLE)
9052 		stat->attributes |= STATX_ATTR_IMMUTABLE;
9053 	if (bi_flags & BTRFS_INODE_NODUMP)
9054 		stat->attributes |= STATX_ATTR_NODUMP;
9055 	if (bi_ro_flags & BTRFS_INODE_RO_VERITY)
9056 		stat->attributes |= STATX_ATTR_VERITY;
9057 
9058 	stat->attributes_mask |= (STATX_ATTR_APPEND |
9059 				  STATX_ATTR_COMPRESSED |
9060 				  STATX_ATTR_IMMUTABLE |
9061 				  STATX_ATTR_NODUMP);
9062 
9063 	generic_fillattr(mnt_userns, inode, stat);
9064 	stat->dev = BTRFS_I(inode)->root->anon_dev;
9065 
9066 	spin_lock(&BTRFS_I(inode)->lock);
9067 	delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
9068 	inode_bytes = inode_get_bytes(inode);
9069 	spin_unlock(&BTRFS_I(inode)->lock);
9070 	stat->blocks = (ALIGN(inode_bytes, blocksize) +
9071 			ALIGN(delalloc_bytes, blocksize)) >> 9;
9072 	return 0;
9073 }
9074 
9075 static int btrfs_rename_exchange(struct inode *old_dir,
9076 			      struct dentry *old_dentry,
9077 			      struct inode *new_dir,
9078 			      struct dentry *new_dentry)
9079 {
9080 	struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
9081 	struct btrfs_trans_handle *trans;
9082 	unsigned int trans_num_items;
9083 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
9084 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9085 	struct inode *new_inode = new_dentry->d_inode;
9086 	struct inode *old_inode = old_dentry->d_inode;
9087 	struct timespec64 ctime = current_time(old_inode);
9088 	struct btrfs_rename_ctx old_rename_ctx;
9089 	struct btrfs_rename_ctx new_rename_ctx;
9090 	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
9091 	u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
9092 	u64 old_idx = 0;
9093 	u64 new_idx = 0;
9094 	int ret;
9095 	int ret2;
9096 	bool need_abort = false;
9097 
9098 	/*
9099 	 * For non-subvolumes allow exchange only within one subvolume, in the
9100 	 * same inode namespace. Two subvolumes (represented as directory) can
9101 	 * be exchanged as they're a logical link and have a fixed inode number.
9102 	 */
9103 	if (root != dest &&
9104 	    (old_ino != BTRFS_FIRST_FREE_OBJECTID ||
9105 	     new_ino != BTRFS_FIRST_FREE_OBJECTID))
9106 		return -EXDEV;
9107 
9108 	/* close the race window with snapshot create/destroy ioctl */
9109 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
9110 	    new_ino == BTRFS_FIRST_FREE_OBJECTID)
9111 		down_read(&fs_info->subvol_sem);
9112 
9113 	/*
9114 	 * For each inode:
9115 	 * 1 to remove old dir item
9116 	 * 1 to remove old dir index
9117 	 * 1 to add new dir item
9118 	 * 1 to add new dir index
9119 	 * 1 to update parent inode
9120 	 *
9121 	 * If the parents are the same, we only need to account for one
9122 	 */
9123 	trans_num_items = (old_dir == new_dir ? 9 : 10);
9124 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9125 		/*
9126 		 * 1 to remove old root ref
9127 		 * 1 to remove old root backref
9128 		 * 1 to add new root ref
9129 		 * 1 to add new root backref
9130 		 */
9131 		trans_num_items += 4;
9132 	} else {
9133 		/*
9134 		 * 1 to update inode item
9135 		 * 1 to remove old inode ref
9136 		 * 1 to add new inode ref
9137 		 */
9138 		trans_num_items += 3;
9139 	}
9140 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
9141 		trans_num_items += 4;
9142 	else
9143 		trans_num_items += 3;
9144 	trans = btrfs_start_transaction(root, trans_num_items);
9145 	if (IS_ERR(trans)) {
9146 		ret = PTR_ERR(trans);
9147 		goto out_notrans;
9148 	}
9149 
9150 	if (dest != root) {
9151 		ret = btrfs_record_root_in_trans(trans, dest);
9152 		if (ret)
9153 			goto out_fail;
9154 	}
9155 
9156 	/*
9157 	 * We need to find a free sequence number both in the source and
9158 	 * in the destination directory for the exchange.
9159 	 */
9160 	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx);
9161 	if (ret)
9162 		goto out_fail;
9163 	ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx);
9164 	if (ret)
9165 		goto out_fail;
9166 
9167 	BTRFS_I(old_inode)->dir_index = 0ULL;
9168 	BTRFS_I(new_inode)->dir_index = 0ULL;
9169 
9170 	/* Reference for the source. */
9171 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9172 		/* force full log commit if subvolume involved. */
9173 		btrfs_set_log_full_commit(trans);
9174 	} else {
9175 		ret = btrfs_insert_inode_ref(trans, dest,
9176 					     new_dentry->d_name.name,
9177 					     new_dentry->d_name.len,
9178 					     old_ino,
9179 					     btrfs_ino(BTRFS_I(new_dir)),
9180 					     old_idx);
9181 		if (ret)
9182 			goto out_fail;
9183 		need_abort = true;
9184 	}
9185 
9186 	/* And now for the dest. */
9187 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
9188 		/* force full log commit if subvolume involved. */
9189 		btrfs_set_log_full_commit(trans);
9190 	} else {
9191 		ret = btrfs_insert_inode_ref(trans, root,
9192 					     old_dentry->d_name.name,
9193 					     old_dentry->d_name.len,
9194 					     new_ino,
9195 					     btrfs_ino(BTRFS_I(old_dir)),
9196 					     new_idx);
9197 		if (ret) {
9198 			if (need_abort)
9199 				btrfs_abort_transaction(trans, ret);
9200 			goto out_fail;
9201 		}
9202 	}
9203 
9204 	/* Update inode version and ctime/mtime. */
9205 	inode_inc_iversion(old_dir);
9206 	inode_inc_iversion(new_dir);
9207 	inode_inc_iversion(old_inode);
9208 	inode_inc_iversion(new_inode);
9209 	old_dir->i_ctime = old_dir->i_mtime = ctime;
9210 	new_dir->i_ctime = new_dir->i_mtime = ctime;
9211 	old_inode->i_ctime = ctime;
9212 	new_inode->i_ctime = ctime;
9213 
9214 	if (old_dentry->d_parent != new_dentry->d_parent) {
9215 		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
9216 				BTRFS_I(old_inode), 1);
9217 		btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
9218 				BTRFS_I(new_inode), 1);
9219 	}
9220 
9221 	/* src is a subvolume */
9222 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9223 		ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
9224 	} else { /* src is an inode */
9225 		ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
9226 					   BTRFS_I(old_dentry->d_inode),
9227 					   old_dentry->d_name.name,
9228 					   old_dentry->d_name.len,
9229 					   &old_rename_ctx);
9230 		if (!ret)
9231 			ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
9232 	}
9233 	if (ret) {
9234 		btrfs_abort_transaction(trans, ret);
9235 		goto out_fail;
9236 	}
9237 
9238 	/* dest is a subvolume */
9239 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
9240 		ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
9241 	} else { /* dest is an inode */
9242 		ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir),
9243 					   BTRFS_I(new_dentry->d_inode),
9244 					   new_dentry->d_name.name,
9245 					   new_dentry->d_name.len,
9246 					   &new_rename_ctx);
9247 		if (!ret)
9248 			ret = btrfs_update_inode(trans, dest, BTRFS_I(new_inode));
9249 	}
9250 	if (ret) {
9251 		btrfs_abort_transaction(trans, ret);
9252 		goto out_fail;
9253 	}
9254 
9255 	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
9256 			     new_dentry->d_name.name,
9257 			     new_dentry->d_name.len, 0, old_idx);
9258 	if (ret) {
9259 		btrfs_abort_transaction(trans, ret);
9260 		goto out_fail;
9261 	}
9262 
9263 	ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
9264 			     old_dentry->d_name.name,
9265 			     old_dentry->d_name.len, 0, new_idx);
9266 	if (ret) {
9267 		btrfs_abort_transaction(trans, ret);
9268 		goto out_fail;
9269 	}
9270 
9271 	if (old_inode->i_nlink == 1)
9272 		BTRFS_I(old_inode)->dir_index = old_idx;
9273 	if (new_inode->i_nlink == 1)
9274 		BTRFS_I(new_inode)->dir_index = new_idx;
9275 
9276 	/*
9277 	 * Now pin the logs of the roots. We do it to ensure that no other task
9278 	 * can sync the logs while we are in progress with the rename, because
9279 	 * that could result in an inconsistency in case any of the inodes that
9280 	 * are part of this rename operation were logged before.
9281 	 */
9282 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9283 		btrfs_pin_log_trans(root);
9284 	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
9285 		btrfs_pin_log_trans(dest);
9286 
9287 	/* Do the log updates for all inodes. */
9288 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9289 		btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
9290 				   old_rename_ctx.index, new_dentry->d_parent);
9291 	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
9292 		btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir),
9293 				   new_rename_ctx.index, old_dentry->d_parent);
9294 
9295 	/* Now unpin the logs. */
9296 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9297 		btrfs_end_log_trans(root);
9298 	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
9299 		btrfs_end_log_trans(dest);
9300 out_fail:
9301 	ret2 = btrfs_end_transaction(trans);
9302 	ret = ret ? ret : ret2;
9303 out_notrans:
9304 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
9305 	    old_ino == BTRFS_FIRST_FREE_OBJECTID)
9306 		up_read(&fs_info->subvol_sem);
9307 
9308 	return ret;
9309 }
9310 
9311 static struct inode *new_whiteout_inode(struct user_namespace *mnt_userns,
9312 					struct inode *dir)
9313 {
9314 	struct inode *inode;
9315 
9316 	inode = new_inode(dir->i_sb);
9317 	if (inode) {
9318 		inode_init_owner(mnt_userns, inode, dir,
9319 				 S_IFCHR | WHITEOUT_MODE);
9320 		inode->i_op = &btrfs_special_inode_operations;
9321 		init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
9322 	}
9323 	return inode;
9324 }
9325 
9326 static int btrfs_rename(struct user_namespace *mnt_userns,
9327 			struct inode *old_dir, struct dentry *old_dentry,
9328 			struct inode *new_dir, struct dentry *new_dentry,
9329 			unsigned int flags)
9330 {
9331 	struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
9332 	struct btrfs_new_inode_args whiteout_args = {
9333 		.dir = old_dir,
9334 		.dentry = old_dentry,
9335 	};
9336 	struct btrfs_trans_handle *trans;
9337 	unsigned int trans_num_items;
9338 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
9339 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9340 	struct inode *new_inode = d_inode(new_dentry);
9341 	struct inode *old_inode = d_inode(old_dentry);
9342 	struct btrfs_rename_ctx rename_ctx;
9343 	u64 index = 0;
9344 	int ret;
9345 	int ret2;
9346 	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
9347 
9348 	if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
9349 		return -EPERM;
9350 
9351 	/* we only allow rename subvolume link between subvolumes */
9352 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9353 		return -EXDEV;
9354 
9355 	if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
9356 	    (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID))
9357 		return -ENOTEMPTY;
9358 
9359 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
9360 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
9361 		return -ENOTEMPTY;
9362 
9363 
9364 	/* check for collisions, even if the  name isn't there */
9365 	ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
9366 			     new_dentry->d_name.name,
9367 			     new_dentry->d_name.len);
9368 
9369 	if (ret) {
9370 		if (ret == -EEXIST) {
9371 			/* we shouldn't get
9372 			 * eexist without a new_inode */
9373 			if (WARN_ON(!new_inode)) {
9374 				return ret;
9375 			}
9376 		} else {
9377 			/* maybe -EOVERFLOW */
9378 			return ret;
9379 		}
9380 	}
9381 	ret = 0;
9382 
9383 	/*
9384 	 * we're using rename to replace one file with another.  Start IO on it
9385 	 * now so  we don't add too much work to the end of the transaction
9386 	 */
9387 	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
9388 		filemap_flush(old_inode->i_mapping);
9389 
9390 	if (flags & RENAME_WHITEOUT) {
9391 		whiteout_args.inode = new_whiteout_inode(mnt_userns, old_dir);
9392 		if (!whiteout_args.inode)
9393 			return -ENOMEM;
9394 		ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items);
9395 		if (ret)
9396 			goto out_whiteout_inode;
9397 	} else {
9398 		/* 1 to update the old parent inode. */
9399 		trans_num_items = 1;
9400 	}
9401 
9402 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9403 		/* Close the race window with snapshot create/destroy ioctl */
9404 		down_read(&fs_info->subvol_sem);
9405 		/*
9406 		 * 1 to remove old root ref
9407 		 * 1 to remove old root backref
9408 		 * 1 to add new root ref
9409 		 * 1 to add new root backref
9410 		 */
9411 		trans_num_items += 4;
9412 	} else {
9413 		/*
9414 		 * 1 to update inode
9415 		 * 1 to remove old inode ref
9416 		 * 1 to add new inode ref
9417 		 */
9418 		trans_num_items += 3;
9419 	}
9420 	/*
9421 	 * 1 to remove old dir item
9422 	 * 1 to remove old dir index
9423 	 * 1 to add new dir item
9424 	 * 1 to add new dir index
9425 	 */
9426 	trans_num_items += 4;
9427 	/* 1 to update new parent inode if it's not the same as the old parent */
9428 	if (new_dir != old_dir)
9429 		trans_num_items++;
9430 	if (new_inode) {
9431 		/*
9432 		 * 1 to update inode
9433 		 * 1 to remove inode ref
9434 		 * 1 to remove dir item
9435 		 * 1 to remove dir index
9436 		 * 1 to possibly add orphan item
9437 		 */
9438 		trans_num_items += 5;
9439 	}
9440 	trans = btrfs_start_transaction(root, trans_num_items);
9441 	if (IS_ERR(trans)) {
9442 		ret = PTR_ERR(trans);
9443 		goto out_notrans;
9444 	}
9445 
9446 	if (dest != root) {
9447 		ret = btrfs_record_root_in_trans(trans, dest);
9448 		if (ret)
9449 			goto out_fail;
9450 	}
9451 
9452 	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index);
9453 	if (ret)
9454 		goto out_fail;
9455 
9456 	BTRFS_I(old_inode)->dir_index = 0ULL;
9457 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9458 		/* force full log commit if subvolume involved. */
9459 		btrfs_set_log_full_commit(trans);
9460 	} else {
9461 		ret = btrfs_insert_inode_ref(trans, dest,
9462 					     new_dentry->d_name.name,
9463 					     new_dentry->d_name.len,
9464 					     old_ino,
9465 					     btrfs_ino(BTRFS_I(new_dir)), index);
9466 		if (ret)
9467 			goto out_fail;
9468 	}
9469 
9470 	inode_inc_iversion(old_dir);
9471 	inode_inc_iversion(new_dir);
9472 	inode_inc_iversion(old_inode);
9473 	old_dir->i_ctime = old_dir->i_mtime =
9474 	new_dir->i_ctime = new_dir->i_mtime =
9475 	old_inode->i_ctime = current_time(old_dir);
9476 
9477 	if (old_dentry->d_parent != new_dentry->d_parent)
9478 		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
9479 				BTRFS_I(old_inode), 1);
9480 
9481 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9482 		ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
9483 	} else {
9484 		ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
9485 					BTRFS_I(d_inode(old_dentry)),
9486 					old_dentry->d_name.name,
9487 					old_dentry->d_name.len,
9488 					&rename_ctx);
9489 		if (!ret)
9490 			ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
9491 	}
9492 	if (ret) {
9493 		btrfs_abort_transaction(trans, ret);
9494 		goto out_fail;
9495 	}
9496 
9497 	if (new_inode) {
9498 		inode_inc_iversion(new_inode);
9499 		new_inode->i_ctime = current_time(new_inode);
9500 		if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
9501 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
9502 			ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
9503 			BUG_ON(new_inode->i_nlink == 0);
9504 		} else {
9505 			ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir),
9506 						 BTRFS_I(d_inode(new_dentry)),
9507 						 new_dentry->d_name.name,
9508 						 new_dentry->d_name.len);
9509 		}
9510 		if (!ret && new_inode->i_nlink == 0)
9511 			ret = btrfs_orphan_add(trans,
9512 					BTRFS_I(d_inode(new_dentry)));
9513 		if (ret) {
9514 			btrfs_abort_transaction(trans, ret);
9515 			goto out_fail;
9516 		}
9517 	}
9518 
9519 	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
9520 			     new_dentry->d_name.name,
9521 			     new_dentry->d_name.len, 0, index);
9522 	if (ret) {
9523 		btrfs_abort_transaction(trans, ret);
9524 		goto out_fail;
9525 	}
9526 
9527 	if (old_inode->i_nlink == 1)
9528 		BTRFS_I(old_inode)->dir_index = index;
9529 
9530 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9531 		btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
9532 				   rename_ctx.index, new_dentry->d_parent);
9533 
9534 	if (flags & RENAME_WHITEOUT) {
9535 		ret = btrfs_create_new_inode(trans, &whiteout_args);
9536 		if (ret) {
9537 			btrfs_abort_transaction(trans, ret);
9538 			goto out_fail;
9539 		} else {
9540 			unlock_new_inode(whiteout_args.inode);
9541 			iput(whiteout_args.inode);
9542 			whiteout_args.inode = NULL;
9543 		}
9544 	}
9545 out_fail:
9546 	ret2 = btrfs_end_transaction(trans);
9547 	ret = ret ? ret : ret2;
9548 out_notrans:
9549 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9550 		up_read(&fs_info->subvol_sem);
9551 	if (flags & RENAME_WHITEOUT)
9552 		btrfs_new_inode_args_destroy(&whiteout_args);
9553 out_whiteout_inode:
9554 	if (flags & RENAME_WHITEOUT)
9555 		iput(whiteout_args.inode);
9556 	return ret;
9557 }
9558 
9559 static int btrfs_rename2(struct user_namespace *mnt_userns, struct inode *old_dir,
9560 			 struct dentry *old_dentry, struct inode *new_dir,
9561 			 struct dentry *new_dentry, unsigned int flags)
9562 {
9563 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
9564 		return -EINVAL;
9565 
9566 	if (flags & RENAME_EXCHANGE)
9567 		return btrfs_rename_exchange(old_dir, old_dentry, new_dir,
9568 					  new_dentry);
9569 
9570 	return btrfs_rename(mnt_userns, old_dir, old_dentry, new_dir,
9571 			    new_dentry, flags);
9572 }
9573 
9574 struct btrfs_delalloc_work {
9575 	struct inode *inode;
9576 	struct completion completion;
9577 	struct list_head list;
9578 	struct btrfs_work work;
9579 };
9580 
9581 static void btrfs_run_delalloc_work(struct btrfs_work *work)
9582 {
9583 	struct btrfs_delalloc_work *delalloc_work;
9584 	struct inode *inode;
9585 
9586 	delalloc_work = container_of(work, struct btrfs_delalloc_work,
9587 				     work);
9588 	inode = delalloc_work->inode;
9589 	filemap_flush(inode->i_mapping);
9590 	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
9591 				&BTRFS_I(inode)->runtime_flags))
9592 		filemap_flush(inode->i_mapping);
9593 
9594 	iput(inode);
9595 	complete(&delalloc_work->completion);
9596 }
9597 
9598 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode)
9599 {
9600 	struct btrfs_delalloc_work *work;
9601 
9602 	work = kmalloc(sizeof(*work), GFP_NOFS);
9603 	if (!work)
9604 		return NULL;
9605 
9606 	init_completion(&work->completion);
9607 	INIT_LIST_HEAD(&work->list);
9608 	work->inode = inode;
9609 	btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL);
9610 
9611 	return work;
9612 }
9613 
9614 /*
9615  * some fairly slow code that needs optimization. This walks the list
9616  * of all the inodes with pending delalloc and forces them to disk.
9617  */
9618 static int start_delalloc_inodes(struct btrfs_root *root,
9619 				 struct writeback_control *wbc, bool snapshot,
9620 				 bool in_reclaim_context)
9621 {
9622 	struct btrfs_inode *binode;
9623 	struct inode *inode;
9624 	struct btrfs_delalloc_work *work, *next;
9625 	struct list_head works;
9626 	struct list_head splice;
9627 	int ret = 0;
9628 	bool full_flush = wbc->nr_to_write == LONG_MAX;
9629 
9630 	INIT_LIST_HEAD(&works);
9631 	INIT_LIST_HEAD(&splice);
9632 
9633 	mutex_lock(&root->delalloc_mutex);
9634 	spin_lock(&root->delalloc_lock);
9635 	list_splice_init(&root->delalloc_inodes, &splice);
9636 	while (!list_empty(&splice)) {
9637 		binode = list_entry(splice.next, struct btrfs_inode,
9638 				    delalloc_inodes);
9639 
9640 		list_move_tail(&binode->delalloc_inodes,
9641 			       &root->delalloc_inodes);
9642 
9643 		if (in_reclaim_context &&
9644 		    test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags))
9645 			continue;
9646 
9647 		inode = igrab(&binode->vfs_inode);
9648 		if (!inode) {
9649 			cond_resched_lock(&root->delalloc_lock);
9650 			continue;
9651 		}
9652 		spin_unlock(&root->delalloc_lock);
9653 
9654 		if (snapshot)
9655 			set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
9656 				&binode->runtime_flags);
9657 		if (full_flush) {
9658 			work = btrfs_alloc_delalloc_work(inode);
9659 			if (!work) {
9660 				iput(inode);
9661 				ret = -ENOMEM;
9662 				goto out;
9663 			}
9664 			list_add_tail(&work->list, &works);
9665 			btrfs_queue_work(root->fs_info->flush_workers,
9666 					 &work->work);
9667 		} else {
9668 			ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc);
9669 			btrfs_add_delayed_iput(inode);
9670 			if (ret || wbc->nr_to_write <= 0)
9671 				goto out;
9672 		}
9673 		cond_resched();
9674 		spin_lock(&root->delalloc_lock);
9675 	}
9676 	spin_unlock(&root->delalloc_lock);
9677 
9678 out:
9679 	list_for_each_entry_safe(work, next, &works, list) {
9680 		list_del_init(&work->list);
9681 		wait_for_completion(&work->completion);
9682 		kfree(work);
9683 	}
9684 
9685 	if (!list_empty(&splice)) {
9686 		spin_lock(&root->delalloc_lock);
9687 		list_splice_tail(&splice, &root->delalloc_inodes);
9688 		spin_unlock(&root->delalloc_lock);
9689 	}
9690 	mutex_unlock(&root->delalloc_mutex);
9691 	return ret;
9692 }
9693 
9694 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
9695 {
9696 	struct writeback_control wbc = {
9697 		.nr_to_write = LONG_MAX,
9698 		.sync_mode = WB_SYNC_NONE,
9699 		.range_start = 0,
9700 		.range_end = LLONG_MAX,
9701 	};
9702 	struct btrfs_fs_info *fs_info = root->fs_info;
9703 
9704 	if (BTRFS_FS_ERROR(fs_info))
9705 		return -EROFS;
9706 
9707 	return start_delalloc_inodes(root, &wbc, true, in_reclaim_context);
9708 }
9709 
9710 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
9711 			       bool in_reclaim_context)
9712 {
9713 	struct writeback_control wbc = {
9714 		.nr_to_write = nr,
9715 		.sync_mode = WB_SYNC_NONE,
9716 		.range_start = 0,
9717 		.range_end = LLONG_MAX,
9718 	};
9719 	struct btrfs_root *root;
9720 	struct list_head splice;
9721 	int ret;
9722 
9723 	if (BTRFS_FS_ERROR(fs_info))
9724 		return -EROFS;
9725 
9726 	INIT_LIST_HEAD(&splice);
9727 
9728 	mutex_lock(&fs_info->delalloc_root_mutex);
9729 	spin_lock(&fs_info->delalloc_root_lock);
9730 	list_splice_init(&fs_info->delalloc_roots, &splice);
9731 	while (!list_empty(&splice)) {
9732 		/*
9733 		 * Reset nr_to_write here so we know that we're doing a full
9734 		 * flush.
9735 		 */
9736 		if (nr == LONG_MAX)
9737 			wbc.nr_to_write = LONG_MAX;
9738 
9739 		root = list_first_entry(&splice, struct btrfs_root,
9740 					delalloc_root);
9741 		root = btrfs_grab_root(root);
9742 		BUG_ON(!root);
9743 		list_move_tail(&root->delalloc_root,
9744 			       &fs_info->delalloc_roots);
9745 		spin_unlock(&fs_info->delalloc_root_lock);
9746 
9747 		ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context);
9748 		btrfs_put_root(root);
9749 		if (ret < 0 || wbc.nr_to_write <= 0)
9750 			goto out;
9751 		spin_lock(&fs_info->delalloc_root_lock);
9752 	}
9753 	spin_unlock(&fs_info->delalloc_root_lock);
9754 
9755 	ret = 0;
9756 out:
9757 	if (!list_empty(&splice)) {
9758 		spin_lock(&fs_info->delalloc_root_lock);
9759 		list_splice_tail(&splice, &fs_info->delalloc_roots);
9760 		spin_unlock(&fs_info->delalloc_root_lock);
9761 	}
9762 	mutex_unlock(&fs_info->delalloc_root_mutex);
9763 	return ret;
9764 }
9765 
9766 static int btrfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
9767 			 struct dentry *dentry, const char *symname)
9768 {
9769 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
9770 	struct btrfs_trans_handle *trans;
9771 	struct btrfs_root *root = BTRFS_I(dir)->root;
9772 	struct btrfs_path *path;
9773 	struct btrfs_key key;
9774 	struct inode *inode;
9775 	struct btrfs_new_inode_args new_inode_args = {
9776 		.dir = dir,
9777 		.dentry = dentry,
9778 	};
9779 	unsigned int trans_num_items;
9780 	int err;
9781 	int name_len;
9782 	int datasize;
9783 	unsigned long ptr;
9784 	struct btrfs_file_extent_item *ei;
9785 	struct extent_buffer *leaf;
9786 
9787 	name_len = strlen(symname);
9788 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
9789 		return -ENAMETOOLONG;
9790 
9791 	inode = new_inode(dir->i_sb);
9792 	if (!inode)
9793 		return -ENOMEM;
9794 	inode_init_owner(mnt_userns, inode, dir, S_IFLNK | S_IRWXUGO);
9795 	inode->i_op = &btrfs_symlink_inode_operations;
9796 	inode_nohighmem(inode);
9797 	inode->i_mapping->a_ops = &btrfs_aops;
9798 	btrfs_i_size_write(BTRFS_I(inode), name_len);
9799 	inode_set_bytes(inode, name_len);
9800 
9801 	new_inode_args.inode = inode;
9802 	err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
9803 	if (err)
9804 		goto out_inode;
9805 	/* 1 additional item for the inline extent */
9806 	trans_num_items++;
9807 
9808 	trans = btrfs_start_transaction(root, trans_num_items);
9809 	if (IS_ERR(trans)) {
9810 		err = PTR_ERR(trans);
9811 		goto out_new_inode_args;
9812 	}
9813 
9814 	err = btrfs_create_new_inode(trans, &new_inode_args);
9815 	if (err)
9816 		goto out;
9817 
9818 	path = btrfs_alloc_path();
9819 	if (!path) {
9820 		err = -ENOMEM;
9821 		btrfs_abort_transaction(trans, err);
9822 		discard_new_inode(inode);
9823 		inode = NULL;
9824 		goto out;
9825 	}
9826 	key.objectid = btrfs_ino(BTRFS_I(inode));
9827 	key.offset = 0;
9828 	key.type = BTRFS_EXTENT_DATA_KEY;
9829 	datasize = btrfs_file_extent_calc_inline_size(name_len);
9830 	err = btrfs_insert_empty_item(trans, root, path, &key,
9831 				      datasize);
9832 	if (err) {
9833 		btrfs_abort_transaction(trans, err);
9834 		btrfs_free_path(path);
9835 		discard_new_inode(inode);
9836 		inode = NULL;
9837 		goto out;
9838 	}
9839 	leaf = path->nodes[0];
9840 	ei = btrfs_item_ptr(leaf, path->slots[0],
9841 			    struct btrfs_file_extent_item);
9842 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
9843 	btrfs_set_file_extent_type(leaf, ei,
9844 				   BTRFS_FILE_EXTENT_INLINE);
9845 	btrfs_set_file_extent_encryption(leaf, ei, 0);
9846 	btrfs_set_file_extent_compression(leaf, ei, 0);
9847 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
9848 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
9849 
9850 	ptr = btrfs_file_extent_inline_start(ei);
9851 	write_extent_buffer(leaf, symname, ptr, name_len);
9852 	btrfs_mark_buffer_dirty(leaf);
9853 	btrfs_free_path(path);
9854 
9855 	d_instantiate_new(dentry, inode);
9856 	err = 0;
9857 out:
9858 	btrfs_end_transaction(trans);
9859 	btrfs_btree_balance_dirty(fs_info);
9860 out_new_inode_args:
9861 	btrfs_new_inode_args_destroy(&new_inode_args);
9862 out_inode:
9863 	if (err)
9864 		iput(inode);
9865 	return err;
9866 }
9867 
9868 static struct btrfs_trans_handle *insert_prealloc_file_extent(
9869 				       struct btrfs_trans_handle *trans_in,
9870 				       struct btrfs_inode *inode,
9871 				       struct btrfs_key *ins,
9872 				       u64 file_offset)
9873 {
9874 	struct btrfs_file_extent_item stack_fi;
9875 	struct btrfs_replace_extent_info extent_info;
9876 	struct btrfs_trans_handle *trans = trans_in;
9877 	struct btrfs_path *path;
9878 	u64 start = ins->objectid;
9879 	u64 len = ins->offset;
9880 	int qgroup_released;
9881 	int ret;
9882 
9883 	memset(&stack_fi, 0, sizeof(stack_fi));
9884 
9885 	btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC);
9886 	btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start);
9887 	btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len);
9888 	btrfs_set_stack_file_extent_num_bytes(&stack_fi, len);
9889 	btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len);
9890 	btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
9891 	/* Encryption and other encoding is reserved and all 0 */
9892 
9893 	qgroup_released = btrfs_qgroup_release_data(inode, file_offset, len);
9894 	if (qgroup_released < 0)
9895 		return ERR_PTR(qgroup_released);
9896 
9897 	if (trans) {
9898 		ret = insert_reserved_file_extent(trans, inode,
9899 						  file_offset, &stack_fi,
9900 						  true, qgroup_released);
9901 		if (ret)
9902 			goto free_qgroup;
9903 		return trans;
9904 	}
9905 
9906 	extent_info.disk_offset = start;
9907 	extent_info.disk_len = len;
9908 	extent_info.data_offset = 0;
9909 	extent_info.data_len = len;
9910 	extent_info.file_offset = file_offset;
9911 	extent_info.extent_buf = (char *)&stack_fi;
9912 	extent_info.is_new_extent = true;
9913 	extent_info.update_times = true;
9914 	extent_info.qgroup_reserved = qgroup_released;
9915 	extent_info.insertions = 0;
9916 
9917 	path = btrfs_alloc_path();
9918 	if (!path) {
9919 		ret = -ENOMEM;
9920 		goto free_qgroup;
9921 	}
9922 
9923 	ret = btrfs_replace_file_extents(inode, path, file_offset,
9924 				     file_offset + len - 1, &extent_info,
9925 				     &trans);
9926 	btrfs_free_path(path);
9927 	if (ret)
9928 		goto free_qgroup;
9929 	return trans;
9930 
9931 free_qgroup:
9932 	/*
9933 	 * We have released qgroup data range at the beginning of the function,
9934 	 * and normally qgroup_released bytes will be freed when committing
9935 	 * transaction.
9936 	 * But if we error out early, we have to free what we have released
9937 	 * or we leak qgroup data reservation.
9938 	 */
9939 	btrfs_qgroup_free_refroot(inode->root->fs_info,
9940 			inode->root->root_key.objectid, qgroup_released,
9941 			BTRFS_QGROUP_RSV_DATA);
9942 	return ERR_PTR(ret);
9943 }
9944 
9945 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
9946 				       u64 start, u64 num_bytes, u64 min_size,
9947 				       loff_t actual_len, u64 *alloc_hint,
9948 				       struct btrfs_trans_handle *trans)
9949 {
9950 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
9951 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
9952 	struct extent_map *em;
9953 	struct btrfs_root *root = BTRFS_I(inode)->root;
9954 	struct btrfs_key ins;
9955 	u64 cur_offset = start;
9956 	u64 clear_offset = start;
9957 	u64 i_size;
9958 	u64 cur_bytes;
9959 	u64 last_alloc = (u64)-1;
9960 	int ret = 0;
9961 	bool own_trans = true;
9962 	u64 end = start + num_bytes - 1;
9963 
9964 	if (trans)
9965 		own_trans = false;
9966 	while (num_bytes > 0) {
9967 		cur_bytes = min_t(u64, num_bytes, SZ_256M);
9968 		cur_bytes = max(cur_bytes, min_size);
9969 		/*
9970 		 * If we are severely fragmented we could end up with really
9971 		 * small allocations, so if the allocator is returning small
9972 		 * chunks lets make its job easier by only searching for those
9973 		 * sized chunks.
9974 		 */
9975 		cur_bytes = min(cur_bytes, last_alloc);
9976 		ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
9977 				min_size, 0, *alloc_hint, &ins, 1, 0);
9978 		if (ret)
9979 			break;
9980 
9981 		/*
9982 		 * We've reserved this space, and thus converted it from
9983 		 * ->bytes_may_use to ->bytes_reserved.  Any error that happens
9984 		 * from here on out we will only need to clear our reservation
9985 		 * for the remaining unreserved area, so advance our
9986 		 * clear_offset by our extent size.
9987 		 */
9988 		clear_offset += ins.offset;
9989 
9990 		last_alloc = ins.offset;
9991 		trans = insert_prealloc_file_extent(trans, BTRFS_I(inode),
9992 						    &ins, cur_offset);
9993 		/*
9994 		 * Now that we inserted the prealloc extent we can finally
9995 		 * decrement the number of reservations in the block group.
9996 		 * If we did it before, we could race with relocation and have
9997 		 * relocation miss the reserved extent, making it fail later.
9998 		 */
9999 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10000 		if (IS_ERR(trans)) {
10001 			ret = PTR_ERR(trans);
10002 			btrfs_free_reserved_extent(fs_info, ins.objectid,
10003 						   ins.offset, 0);
10004 			break;
10005 		}
10006 
10007 		btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
10008 					cur_offset + ins.offset -1, 0);
10009 
10010 		em = alloc_extent_map();
10011 		if (!em) {
10012 			btrfs_set_inode_full_sync(BTRFS_I(inode));
10013 			goto next;
10014 		}
10015 
10016 		em->start = cur_offset;
10017 		em->orig_start = cur_offset;
10018 		em->len = ins.offset;
10019 		em->block_start = ins.objectid;
10020 		em->block_len = ins.offset;
10021 		em->orig_block_len = ins.offset;
10022 		em->ram_bytes = ins.offset;
10023 		set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
10024 		em->generation = trans->transid;
10025 
10026 		while (1) {
10027 			write_lock(&em_tree->lock);
10028 			ret = add_extent_mapping(em_tree, em, 1);
10029 			write_unlock(&em_tree->lock);
10030 			if (ret != -EEXIST)
10031 				break;
10032 			btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
10033 						cur_offset + ins.offset - 1,
10034 						0);
10035 		}
10036 		free_extent_map(em);
10037 next:
10038 		num_bytes -= ins.offset;
10039 		cur_offset += ins.offset;
10040 		*alloc_hint = ins.objectid + ins.offset;
10041 
10042 		inode_inc_iversion(inode);
10043 		inode->i_ctime = current_time(inode);
10044 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
10045 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
10046 		    (actual_len > inode->i_size) &&
10047 		    (cur_offset > inode->i_size)) {
10048 			if (cur_offset > actual_len)
10049 				i_size = actual_len;
10050 			else
10051 				i_size = cur_offset;
10052 			i_size_write(inode, i_size);
10053 			btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
10054 		}
10055 
10056 		ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
10057 
10058 		if (ret) {
10059 			btrfs_abort_transaction(trans, ret);
10060 			if (own_trans)
10061 				btrfs_end_transaction(trans);
10062 			break;
10063 		}
10064 
10065 		if (own_trans) {
10066 			btrfs_end_transaction(trans);
10067 			trans = NULL;
10068 		}
10069 	}
10070 	if (clear_offset < end)
10071 		btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset,
10072 			end - clear_offset + 1);
10073 	return ret;
10074 }
10075 
10076 int btrfs_prealloc_file_range(struct inode *inode, int mode,
10077 			      u64 start, u64 num_bytes, u64 min_size,
10078 			      loff_t actual_len, u64 *alloc_hint)
10079 {
10080 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
10081 					   min_size, actual_len, alloc_hint,
10082 					   NULL);
10083 }
10084 
10085 int btrfs_prealloc_file_range_trans(struct inode *inode,
10086 				    struct btrfs_trans_handle *trans, int mode,
10087 				    u64 start, u64 num_bytes, u64 min_size,
10088 				    loff_t actual_len, u64 *alloc_hint)
10089 {
10090 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
10091 					   min_size, actual_len, alloc_hint, trans);
10092 }
10093 
10094 static int btrfs_permission(struct user_namespace *mnt_userns,
10095 			    struct inode *inode, int mask)
10096 {
10097 	struct btrfs_root *root = BTRFS_I(inode)->root;
10098 	umode_t mode = inode->i_mode;
10099 
10100 	if (mask & MAY_WRITE &&
10101 	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
10102 		if (btrfs_root_readonly(root))
10103 			return -EROFS;
10104 		if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
10105 			return -EACCES;
10106 	}
10107 	return generic_permission(mnt_userns, inode, mask);
10108 }
10109 
10110 static int btrfs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
10111 			 struct dentry *dentry, umode_t mode)
10112 {
10113 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
10114 	struct btrfs_trans_handle *trans;
10115 	struct btrfs_root *root = BTRFS_I(dir)->root;
10116 	struct inode *inode;
10117 	struct btrfs_new_inode_args new_inode_args = {
10118 		.dir = dir,
10119 		.dentry = dentry,
10120 		.orphan = true,
10121 	};
10122 	unsigned int trans_num_items;
10123 	int ret;
10124 
10125 	inode = new_inode(dir->i_sb);
10126 	if (!inode)
10127 		return -ENOMEM;
10128 	inode_init_owner(mnt_userns, inode, dir, mode);
10129 	inode->i_fop = &btrfs_file_operations;
10130 	inode->i_op = &btrfs_file_inode_operations;
10131 	inode->i_mapping->a_ops = &btrfs_aops;
10132 
10133 	new_inode_args.inode = inode;
10134 	ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
10135 	if (ret)
10136 		goto out_inode;
10137 
10138 	trans = btrfs_start_transaction(root, trans_num_items);
10139 	if (IS_ERR(trans)) {
10140 		ret = PTR_ERR(trans);
10141 		goto out_new_inode_args;
10142 	}
10143 
10144 	ret = btrfs_create_new_inode(trans, &new_inode_args);
10145 
10146 	/*
10147 	 * We set number of links to 0 in btrfs_create_new_inode(), and here we
10148 	 * set it to 1 because d_tmpfile() will issue a warning if the count is
10149 	 * 0, through:
10150 	 *
10151 	 *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
10152 	 */
10153 	set_nlink(inode, 1);
10154 
10155 	if (!ret) {
10156 		d_tmpfile(dentry, inode);
10157 		unlock_new_inode(inode);
10158 		mark_inode_dirty(inode);
10159 	}
10160 
10161 	btrfs_end_transaction(trans);
10162 	btrfs_btree_balance_dirty(fs_info);
10163 out_new_inode_args:
10164 	btrfs_new_inode_args_destroy(&new_inode_args);
10165 out_inode:
10166 	if (ret)
10167 		iput(inode);
10168 	return ret;
10169 }
10170 
10171 void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end)
10172 {
10173 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
10174 	unsigned long index = start >> PAGE_SHIFT;
10175 	unsigned long end_index = end >> PAGE_SHIFT;
10176 	struct page *page;
10177 	u32 len;
10178 
10179 	ASSERT(end + 1 - start <= U32_MAX);
10180 	len = end + 1 - start;
10181 	while (index <= end_index) {
10182 		page = find_get_page(inode->vfs_inode.i_mapping, index);
10183 		ASSERT(page); /* Pages should be in the extent_io_tree */
10184 
10185 		btrfs_page_set_writeback(fs_info, page, start, len);
10186 		put_page(page);
10187 		index++;
10188 	}
10189 }
10190 
10191 static int btrfs_encoded_io_compression_from_extent(
10192 				struct btrfs_fs_info *fs_info,
10193 				int compress_type)
10194 {
10195 	switch (compress_type) {
10196 	case BTRFS_COMPRESS_NONE:
10197 		return BTRFS_ENCODED_IO_COMPRESSION_NONE;
10198 	case BTRFS_COMPRESS_ZLIB:
10199 		return BTRFS_ENCODED_IO_COMPRESSION_ZLIB;
10200 	case BTRFS_COMPRESS_LZO:
10201 		/*
10202 		 * The LZO format depends on the sector size. 64K is the maximum
10203 		 * sector size that we support.
10204 		 */
10205 		if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K)
10206 			return -EINVAL;
10207 		return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K +
10208 		       (fs_info->sectorsize_bits - 12);
10209 	case BTRFS_COMPRESS_ZSTD:
10210 		return BTRFS_ENCODED_IO_COMPRESSION_ZSTD;
10211 	default:
10212 		return -EUCLEAN;
10213 	}
10214 }
10215 
10216 static ssize_t btrfs_encoded_read_inline(
10217 				struct kiocb *iocb,
10218 				struct iov_iter *iter, u64 start,
10219 				u64 lockend,
10220 				struct extent_state **cached_state,
10221 				u64 extent_start, size_t count,
10222 				struct btrfs_ioctl_encoded_io_args *encoded,
10223 				bool *unlocked)
10224 {
10225 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10226 	struct btrfs_root *root = inode->root;
10227 	struct btrfs_fs_info *fs_info = root->fs_info;
10228 	struct extent_io_tree *io_tree = &inode->io_tree;
10229 	struct btrfs_path *path;
10230 	struct extent_buffer *leaf;
10231 	struct btrfs_file_extent_item *item;
10232 	u64 ram_bytes;
10233 	unsigned long ptr;
10234 	void *tmp;
10235 	ssize_t ret;
10236 
10237 	path = btrfs_alloc_path();
10238 	if (!path) {
10239 		ret = -ENOMEM;
10240 		goto out;
10241 	}
10242 	ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
10243 				       extent_start, 0);
10244 	if (ret) {
10245 		if (ret > 0) {
10246 			/* The extent item disappeared? */
10247 			ret = -EIO;
10248 		}
10249 		goto out;
10250 	}
10251 	leaf = path->nodes[0];
10252 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
10253 
10254 	ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
10255 	ptr = btrfs_file_extent_inline_start(item);
10256 
10257 	encoded->len = min_t(u64, extent_start + ram_bytes,
10258 			     inode->vfs_inode.i_size) - iocb->ki_pos;
10259 	ret = btrfs_encoded_io_compression_from_extent(fs_info,
10260 				 btrfs_file_extent_compression(leaf, item));
10261 	if (ret < 0)
10262 		goto out;
10263 	encoded->compression = ret;
10264 	if (encoded->compression) {
10265 		size_t inline_size;
10266 
10267 		inline_size = btrfs_file_extent_inline_item_len(leaf,
10268 								path->slots[0]);
10269 		if (inline_size > count) {
10270 			ret = -ENOBUFS;
10271 			goto out;
10272 		}
10273 		count = inline_size;
10274 		encoded->unencoded_len = ram_bytes;
10275 		encoded->unencoded_offset = iocb->ki_pos - extent_start;
10276 	} else {
10277 		count = min_t(u64, count, encoded->len);
10278 		encoded->len = count;
10279 		encoded->unencoded_len = count;
10280 		ptr += iocb->ki_pos - extent_start;
10281 	}
10282 
10283 	tmp = kmalloc(count, GFP_NOFS);
10284 	if (!tmp) {
10285 		ret = -ENOMEM;
10286 		goto out;
10287 	}
10288 	read_extent_buffer(leaf, tmp, ptr, count);
10289 	btrfs_release_path(path);
10290 	unlock_extent_cached(io_tree, start, lockend, cached_state);
10291 	btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
10292 	*unlocked = true;
10293 
10294 	ret = copy_to_iter(tmp, count, iter);
10295 	if (ret != count)
10296 		ret = -EFAULT;
10297 	kfree(tmp);
10298 out:
10299 	btrfs_free_path(path);
10300 	return ret;
10301 }
10302 
10303 struct btrfs_encoded_read_private {
10304 	struct btrfs_inode *inode;
10305 	u64 file_offset;
10306 	wait_queue_head_t wait;
10307 	atomic_t pending;
10308 	blk_status_t status;
10309 	bool skip_csum;
10310 };
10311 
10312 static blk_status_t submit_encoded_read_bio(struct btrfs_inode *inode,
10313 					    struct bio *bio, int mirror_num)
10314 {
10315 	struct btrfs_encoded_read_private *priv = bio->bi_private;
10316 	struct btrfs_bio *bbio = btrfs_bio(bio);
10317 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
10318 	blk_status_t ret;
10319 
10320 	if (!priv->skip_csum) {
10321 		ret = btrfs_lookup_bio_sums(&inode->vfs_inode, bio, NULL);
10322 		if (ret)
10323 			return ret;
10324 	}
10325 
10326 	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
10327 	if (ret) {
10328 		btrfs_bio_free_csum(bbio);
10329 		return ret;
10330 	}
10331 
10332 	atomic_inc(&priv->pending);
10333 	ret = btrfs_map_bio(fs_info, bio, mirror_num);
10334 	if (ret) {
10335 		atomic_dec(&priv->pending);
10336 		btrfs_bio_free_csum(bbio);
10337 	}
10338 	return ret;
10339 }
10340 
10341 static blk_status_t btrfs_encoded_read_verify_csum(struct btrfs_bio *bbio)
10342 {
10343 	const bool uptodate = (bbio->bio.bi_status == BLK_STS_OK);
10344 	struct btrfs_encoded_read_private *priv = bbio->bio.bi_private;
10345 	struct btrfs_inode *inode = priv->inode;
10346 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
10347 	u32 sectorsize = fs_info->sectorsize;
10348 	struct bio_vec *bvec;
10349 	struct bvec_iter_all iter_all;
10350 	u64 start = priv->file_offset;
10351 	u32 bio_offset = 0;
10352 
10353 	if (priv->skip_csum || !uptodate)
10354 		return bbio->bio.bi_status;
10355 
10356 	bio_for_each_segment_all(bvec, &bbio->bio, iter_all) {
10357 		unsigned int i, nr_sectors, pgoff;
10358 
10359 		nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len);
10360 		pgoff = bvec->bv_offset;
10361 		for (i = 0; i < nr_sectors; i++) {
10362 			ASSERT(pgoff < PAGE_SIZE);
10363 			if (check_data_csum(&inode->vfs_inode, bbio, bio_offset,
10364 					    bvec->bv_page, pgoff, start))
10365 				return BLK_STS_IOERR;
10366 			start += sectorsize;
10367 			bio_offset += sectorsize;
10368 			pgoff += sectorsize;
10369 		}
10370 	}
10371 	return BLK_STS_OK;
10372 }
10373 
10374 static void btrfs_encoded_read_endio(struct bio *bio)
10375 {
10376 	struct btrfs_encoded_read_private *priv = bio->bi_private;
10377 	struct btrfs_bio *bbio = btrfs_bio(bio);
10378 	blk_status_t status;
10379 
10380 	status = btrfs_encoded_read_verify_csum(bbio);
10381 	if (status) {
10382 		/*
10383 		 * The memory barrier implied by the atomic_dec_return() here
10384 		 * pairs with the memory barrier implied by the
10385 		 * atomic_dec_return() or io_wait_event() in
10386 		 * btrfs_encoded_read_regular_fill_pages() to ensure that this
10387 		 * write is observed before the load of status in
10388 		 * btrfs_encoded_read_regular_fill_pages().
10389 		 */
10390 		WRITE_ONCE(priv->status, status);
10391 	}
10392 	if (!atomic_dec_return(&priv->pending))
10393 		wake_up(&priv->wait);
10394 	btrfs_bio_free_csum(bbio);
10395 	bio_put(bio);
10396 }
10397 
10398 static int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
10399 						 u64 file_offset,
10400 						 u64 disk_bytenr,
10401 						 u64 disk_io_size,
10402 						 struct page **pages)
10403 {
10404 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
10405 	struct btrfs_encoded_read_private priv = {
10406 		.inode = inode,
10407 		.file_offset = file_offset,
10408 		.pending = ATOMIC_INIT(1),
10409 		.skip_csum = (inode->flags & BTRFS_INODE_NODATASUM),
10410 	};
10411 	unsigned long i = 0;
10412 	u64 cur = 0;
10413 	int ret;
10414 
10415 	init_waitqueue_head(&priv.wait);
10416 	/*
10417 	 * Submit bios for the extent, splitting due to bio or stripe limits as
10418 	 * necessary.
10419 	 */
10420 	while (cur < disk_io_size) {
10421 		struct extent_map *em;
10422 		struct btrfs_io_geometry geom;
10423 		struct bio *bio = NULL;
10424 		u64 remaining;
10425 
10426 		em = btrfs_get_chunk_map(fs_info, disk_bytenr + cur,
10427 					 disk_io_size - cur);
10428 		if (IS_ERR(em)) {
10429 			ret = PTR_ERR(em);
10430 		} else {
10431 			ret = btrfs_get_io_geometry(fs_info, em, BTRFS_MAP_READ,
10432 						    disk_bytenr + cur, &geom);
10433 			free_extent_map(em);
10434 		}
10435 		if (ret) {
10436 			WRITE_ONCE(priv.status, errno_to_blk_status(ret));
10437 			break;
10438 		}
10439 		remaining = min(geom.len, disk_io_size - cur);
10440 		while (bio || remaining) {
10441 			size_t bytes = min_t(u64, remaining, PAGE_SIZE);
10442 
10443 			if (!bio) {
10444 				bio = btrfs_bio_alloc(BIO_MAX_VECS);
10445 				bio->bi_iter.bi_sector =
10446 					(disk_bytenr + cur) >> SECTOR_SHIFT;
10447 				bio->bi_end_io = btrfs_encoded_read_endio;
10448 				bio->bi_private = &priv;
10449 				bio->bi_opf = REQ_OP_READ;
10450 			}
10451 
10452 			if (!bytes ||
10453 			    bio_add_page(bio, pages[i], bytes, 0) < bytes) {
10454 				blk_status_t status;
10455 
10456 				status = submit_encoded_read_bio(inode, bio, 0);
10457 				if (status) {
10458 					WRITE_ONCE(priv.status, status);
10459 					bio_put(bio);
10460 					goto out;
10461 				}
10462 				bio = NULL;
10463 				continue;
10464 			}
10465 
10466 			i++;
10467 			cur += bytes;
10468 			remaining -= bytes;
10469 		}
10470 	}
10471 
10472 out:
10473 	if (atomic_dec_return(&priv.pending))
10474 		io_wait_event(priv.wait, !atomic_read(&priv.pending));
10475 	/* See btrfs_encoded_read_endio() for ordering. */
10476 	return blk_status_to_errno(READ_ONCE(priv.status));
10477 }
10478 
10479 static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
10480 					  struct iov_iter *iter,
10481 					  u64 start, u64 lockend,
10482 					  struct extent_state **cached_state,
10483 					  u64 disk_bytenr, u64 disk_io_size,
10484 					  size_t count, bool compressed,
10485 					  bool *unlocked)
10486 {
10487 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10488 	struct extent_io_tree *io_tree = &inode->io_tree;
10489 	struct page **pages;
10490 	unsigned long nr_pages, i;
10491 	u64 cur;
10492 	size_t page_offset;
10493 	ssize_t ret;
10494 
10495 	nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE);
10496 	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
10497 	if (!pages)
10498 		return -ENOMEM;
10499 	ret = btrfs_alloc_page_array(nr_pages, pages);
10500 	if (ret) {
10501 		ret = -ENOMEM;
10502 		goto out;
10503 		}
10504 
10505 	ret = btrfs_encoded_read_regular_fill_pages(inode, start, disk_bytenr,
10506 						    disk_io_size, pages);
10507 	if (ret)
10508 		goto out;
10509 
10510 	unlock_extent_cached(io_tree, start, lockend, cached_state);
10511 	btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
10512 	*unlocked = true;
10513 
10514 	if (compressed) {
10515 		i = 0;
10516 		page_offset = 0;
10517 	} else {
10518 		i = (iocb->ki_pos - start) >> PAGE_SHIFT;
10519 		page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1);
10520 	}
10521 	cur = 0;
10522 	while (cur < count) {
10523 		size_t bytes = min_t(size_t, count - cur,
10524 				     PAGE_SIZE - page_offset);
10525 
10526 		if (copy_page_to_iter(pages[i], page_offset, bytes,
10527 				      iter) != bytes) {
10528 			ret = -EFAULT;
10529 			goto out;
10530 		}
10531 		i++;
10532 		cur += bytes;
10533 		page_offset = 0;
10534 	}
10535 	ret = count;
10536 out:
10537 	for (i = 0; i < nr_pages; i++) {
10538 		if (pages[i])
10539 			__free_page(pages[i]);
10540 	}
10541 	kfree(pages);
10542 	return ret;
10543 }
10544 
10545 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
10546 			   struct btrfs_ioctl_encoded_io_args *encoded)
10547 {
10548 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10549 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
10550 	struct extent_io_tree *io_tree = &inode->io_tree;
10551 	ssize_t ret;
10552 	size_t count = iov_iter_count(iter);
10553 	u64 start, lockend, disk_bytenr, disk_io_size;
10554 	struct extent_state *cached_state = NULL;
10555 	struct extent_map *em;
10556 	bool unlocked = false;
10557 
10558 	file_accessed(iocb->ki_filp);
10559 
10560 	btrfs_inode_lock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
10561 
10562 	if (iocb->ki_pos >= inode->vfs_inode.i_size) {
10563 		btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
10564 		return 0;
10565 	}
10566 	start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize);
10567 	/*
10568 	 * We don't know how long the extent containing iocb->ki_pos is, but if
10569 	 * it's compressed we know that it won't be longer than this.
10570 	 */
10571 	lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
10572 
10573 	for (;;) {
10574 		struct btrfs_ordered_extent *ordered;
10575 
10576 		ret = btrfs_wait_ordered_range(&inode->vfs_inode, start,
10577 					       lockend - start + 1);
10578 		if (ret)
10579 			goto out_unlock_inode;
10580 		lock_extent_bits(io_tree, start, lockend, &cached_state);
10581 		ordered = btrfs_lookup_ordered_range(inode, start,
10582 						     lockend - start + 1);
10583 		if (!ordered)
10584 			break;
10585 		btrfs_put_ordered_extent(ordered);
10586 		unlock_extent_cached(io_tree, start, lockend, &cached_state);
10587 		cond_resched();
10588 	}
10589 
10590 	em = btrfs_get_extent(inode, NULL, 0, start, lockend - start + 1);
10591 	if (IS_ERR(em)) {
10592 		ret = PTR_ERR(em);
10593 		goto out_unlock_extent;
10594 	}
10595 
10596 	if (em->block_start == EXTENT_MAP_INLINE) {
10597 		u64 extent_start = em->start;
10598 
10599 		/*
10600 		 * For inline extents we get everything we need out of the
10601 		 * extent item.
10602 		 */
10603 		free_extent_map(em);
10604 		em = NULL;
10605 		ret = btrfs_encoded_read_inline(iocb, iter, start, lockend,
10606 						&cached_state, extent_start,
10607 						count, encoded, &unlocked);
10608 		goto out;
10609 	}
10610 
10611 	/*
10612 	 * We only want to return up to EOF even if the extent extends beyond
10613 	 * that.
10614 	 */
10615 	encoded->len = min_t(u64, extent_map_end(em),
10616 			     inode->vfs_inode.i_size) - iocb->ki_pos;
10617 	if (em->block_start == EXTENT_MAP_HOLE ||
10618 	    test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
10619 		disk_bytenr = EXTENT_MAP_HOLE;
10620 		count = min_t(u64, count, encoded->len);
10621 		encoded->len = count;
10622 		encoded->unencoded_len = count;
10623 	} else if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
10624 		disk_bytenr = em->block_start;
10625 		/*
10626 		 * Bail if the buffer isn't large enough to return the whole
10627 		 * compressed extent.
10628 		 */
10629 		if (em->block_len > count) {
10630 			ret = -ENOBUFS;
10631 			goto out_em;
10632 		}
10633 		disk_io_size = count = em->block_len;
10634 		encoded->unencoded_len = em->ram_bytes;
10635 		encoded->unencoded_offset = iocb->ki_pos - em->orig_start;
10636 		ret = btrfs_encoded_io_compression_from_extent(fs_info,
10637 							     em->compress_type);
10638 		if (ret < 0)
10639 			goto out_em;
10640 		encoded->compression = ret;
10641 	} else {
10642 		disk_bytenr = em->block_start + (start - em->start);
10643 		if (encoded->len > count)
10644 			encoded->len = count;
10645 		/*
10646 		 * Don't read beyond what we locked. This also limits the page
10647 		 * allocations that we'll do.
10648 		 */
10649 		disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start;
10650 		count = start + disk_io_size - iocb->ki_pos;
10651 		encoded->len = count;
10652 		encoded->unencoded_len = count;
10653 		disk_io_size = ALIGN(disk_io_size, fs_info->sectorsize);
10654 	}
10655 	free_extent_map(em);
10656 	em = NULL;
10657 
10658 	if (disk_bytenr == EXTENT_MAP_HOLE) {
10659 		unlock_extent_cached(io_tree, start, lockend, &cached_state);
10660 		btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
10661 		unlocked = true;
10662 		ret = iov_iter_zero(count, iter);
10663 		if (ret != count)
10664 			ret = -EFAULT;
10665 	} else {
10666 		ret = btrfs_encoded_read_regular(iocb, iter, start, lockend,
10667 						 &cached_state, disk_bytenr,
10668 						 disk_io_size, count,
10669 						 encoded->compression,
10670 						 &unlocked);
10671 	}
10672 
10673 out:
10674 	if (ret >= 0)
10675 		iocb->ki_pos += encoded->len;
10676 out_em:
10677 	free_extent_map(em);
10678 out_unlock_extent:
10679 	if (!unlocked)
10680 		unlock_extent_cached(io_tree, start, lockend, &cached_state);
10681 out_unlock_inode:
10682 	if (!unlocked)
10683 		btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
10684 	return ret;
10685 }
10686 
10687 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
10688 			       const struct btrfs_ioctl_encoded_io_args *encoded)
10689 {
10690 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10691 	struct btrfs_root *root = inode->root;
10692 	struct btrfs_fs_info *fs_info = root->fs_info;
10693 	struct extent_io_tree *io_tree = &inode->io_tree;
10694 	struct extent_changeset *data_reserved = NULL;
10695 	struct extent_state *cached_state = NULL;
10696 	int compression;
10697 	size_t orig_count;
10698 	u64 start, end;
10699 	u64 num_bytes, ram_bytes, disk_num_bytes;
10700 	unsigned long nr_pages, i;
10701 	struct page **pages;
10702 	struct btrfs_key ins;
10703 	bool extent_reserved = false;
10704 	struct extent_map *em;
10705 	ssize_t ret;
10706 
10707 	switch (encoded->compression) {
10708 	case BTRFS_ENCODED_IO_COMPRESSION_ZLIB:
10709 		compression = BTRFS_COMPRESS_ZLIB;
10710 		break;
10711 	case BTRFS_ENCODED_IO_COMPRESSION_ZSTD:
10712 		compression = BTRFS_COMPRESS_ZSTD;
10713 		break;
10714 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K:
10715 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K:
10716 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K:
10717 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K:
10718 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K:
10719 		/* The sector size must match for LZO. */
10720 		if (encoded->compression -
10721 		    BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 !=
10722 		    fs_info->sectorsize_bits)
10723 			return -EINVAL;
10724 		compression = BTRFS_COMPRESS_LZO;
10725 		break;
10726 	default:
10727 		return -EINVAL;
10728 	}
10729 	if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE)
10730 		return -EINVAL;
10731 
10732 	orig_count = iov_iter_count(from);
10733 
10734 	/* The extent size must be sane. */
10735 	if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED ||
10736 	    orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0)
10737 		return -EINVAL;
10738 
10739 	/*
10740 	 * The compressed data must be smaller than the decompressed data.
10741 	 *
10742 	 * It's of course possible for data to compress to larger or the same
10743 	 * size, but the buffered I/O path falls back to no compression for such
10744 	 * data, and we don't want to break any assumptions by creating these
10745 	 * extents.
10746 	 *
10747 	 * Note that this is less strict than the current check we have that the
10748 	 * compressed data must be at least one sector smaller than the
10749 	 * decompressed data. We only want to enforce the weaker requirement
10750 	 * from old kernels that it is at least one byte smaller.
10751 	 */
10752 	if (orig_count >= encoded->unencoded_len)
10753 		return -EINVAL;
10754 
10755 	/* The extent must start on a sector boundary. */
10756 	start = iocb->ki_pos;
10757 	if (!IS_ALIGNED(start, fs_info->sectorsize))
10758 		return -EINVAL;
10759 
10760 	/*
10761 	 * The extent must end on a sector boundary. However, we allow a write
10762 	 * which ends at or extends i_size to have an unaligned length; we round
10763 	 * up the extent size and set i_size to the unaligned end.
10764 	 */
10765 	if (start + encoded->len < inode->vfs_inode.i_size &&
10766 	    !IS_ALIGNED(start + encoded->len, fs_info->sectorsize))
10767 		return -EINVAL;
10768 
10769 	/* Finally, the offset in the unencoded data must be sector-aligned. */
10770 	if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize))
10771 		return -EINVAL;
10772 
10773 	num_bytes = ALIGN(encoded->len, fs_info->sectorsize);
10774 	ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize);
10775 	end = start + num_bytes - 1;
10776 
10777 	/*
10778 	 * If the extent cannot be inline, the compressed data on disk must be
10779 	 * sector-aligned. For convenience, we extend it with zeroes if it
10780 	 * isn't.
10781 	 */
10782 	disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
10783 	nr_pages = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE);
10784 	pages = kvcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL_ACCOUNT);
10785 	if (!pages)
10786 		return -ENOMEM;
10787 	for (i = 0; i < nr_pages; i++) {
10788 		size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from));
10789 		char *kaddr;
10790 
10791 		pages[i] = alloc_page(GFP_KERNEL_ACCOUNT);
10792 		if (!pages[i]) {
10793 			ret = -ENOMEM;
10794 			goto out_pages;
10795 		}
10796 		kaddr = kmap(pages[i]);
10797 		if (copy_from_iter(kaddr, bytes, from) != bytes) {
10798 			kunmap(pages[i]);
10799 			ret = -EFAULT;
10800 			goto out_pages;
10801 		}
10802 		if (bytes < PAGE_SIZE)
10803 			memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
10804 		kunmap(pages[i]);
10805 	}
10806 
10807 	for (;;) {
10808 		struct btrfs_ordered_extent *ordered;
10809 
10810 		ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, num_bytes);
10811 		if (ret)
10812 			goto out_pages;
10813 		ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
10814 						    start >> PAGE_SHIFT,
10815 						    end >> PAGE_SHIFT);
10816 		if (ret)
10817 			goto out_pages;
10818 		lock_extent_bits(io_tree, start, end, &cached_state);
10819 		ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
10820 		if (!ordered &&
10821 		    !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end))
10822 			break;
10823 		if (ordered)
10824 			btrfs_put_ordered_extent(ordered);
10825 		unlock_extent_cached(io_tree, start, end, &cached_state);
10826 		cond_resched();
10827 	}
10828 
10829 	/*
10830 	 * We don't use the higher-level delalloc space functions because our
10831 	 * num_bytes and disk_num_bytes are different.
10832 	 */
10833 	ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes);
10834 	if (ret)
10835 		goto out_unlock;
10836 	ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes);
10837 	if (ret)
10838 		goto out_free_data_space;
10839 	ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes,
10840 					      false);
10841 	if (ret)
10842 		goto out_qgroup_free_data;
10843 
10844 	/* Try an inline extent first. */
10845 	if (start == 0 && encoded->unencoded_len == encoded->len &&
10846 	    encoded->unencoded_offset == 0) {
10847 		ret = cow_file_range_inline(inode, encoded->len, orig_count,
10848 					    compression, pages, true);
10849 		if (ret <= 0) {
10850 			if (ret == 0)
10851 				ret = orig_count;
10852 			goto out_delalloc_release;
10853 		}
10854 	}
10855 
10856 	ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes,
10857 				   disk_num_bytes, 0, 0, &ins, 1, 1);
10858 	if (ret)
10859 		goto out_delalloc_release;
10860 	extent_reserved = true;
10861 
10862 	em = create_io_em(inode, start, num_bytes,
10863 			  start - encoded->unencoded_offset, ins.objectid,
10864 			  ins.offset, ins.offset, ram_bytes, compression,
10865 			  BTRFS_ORDERED_COMPRESSED);
10866 	if (IS_ERR(em)) {
10867 		ret = PTR_ERR(em);
10868 		goto out_free_reserved;
10869 	}
10870 	free_extent_map(em);
10871 
10872 	ret = btrfs_add_ordered_extent(inode, start, num_bytes, ram_bytes,
10873 				       ins.objectid, ins.offset,
10874 				       encoded->unencoded_offset,
10875 				       (1 << BTRFS_ORDERED_ENCODED) |
10876 				       (1 << BTRFS_ORDERED_COMPRESSED),
10877 				       compression);
10878 	if (ret) {
10879 		btrfs_drop_extent_cache(inode, start, end, 0);
10880 		goto out_free_reserved;
10881 	}
10882 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10883 
10884 	if (start + encoded->len > inode->vfs_inode.i_size)
10885 		i_size_write(&inode->vfs_inode, start + encoded->len);
10886 
10887 	unlock_extent_cached(io_tree, start, end, &cached_state);
10888 
10889 	btrfs_delalloc_release_extents(inode, num_bytes);
10890 
10891 	if (btrfs_submit_compressed_write(inode, start, num_bytes, ins.objectid,
10892 					  ins.offset, pages, nr_pages, 0, NULL,
10893 					  false)) {
10894 		btrfs_writepage_endio_finish_ordered(inode, pages[0], start, end, 0);
10895 		ret = -EIO;
10896 		goto out_pages;
10897 	}
10898 	ret = orig_count;
10899 	goto out;
10900 
10901 out_free_reserved:
10902 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10903 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
10904 out_delalloc_release:
10905 	btrfs_delalloc_release_extents(inode, num_bytes);
10906 	btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0);
10907 out_qgroup_free_data:
10908 	if (ret < 0)
10909 		btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes);
10910 out_free_data_space:
10911 	/*
10912 	 * If btrfs_reserve_extent() succeeded, then we already decremented
10913 	 * bytes_may_use.
10914 	 */
10915 	if (!extent_reserved)
10916 		btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes);
10917 out_unlock:
10918 	unlock_extent_cached(io_tree, start, end, &cached_state);
10919 out_pages:
10920 	for (i = 0; i < nr_pages; i++) {
10921 		if (pages[i])
10922 			__free_page(pages[i]);
10923 	}
10924 	kvfree(pages);
10925 out:
10926 	if (ret >= 0)
10927 		iocb->ki_pos += encoded->len;
10928 	return ret;
10929 }
10930 
10931 #ifdef CONFIG_SWAP
10932 /*
10933  * Add an entry indicating a block group or device which is pinned by a
10934  * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
10935  * negative errno on failure.
10936  */
10937 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
10938 				  bool is_block_group)
10939 {
10940 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10941 	struct btrfs_swapfile_pin *sp, *entry;
10942 	struct rb_node **p;
10943 	struct rb_node *parent = NULL;
10944 
10945 	sp = kmalloc(sizeof(*sp), GFP_NOFS);
10946 	if (!sp)
10947 		return -ENOMEM;
10948 	sp->ptr = ptr;
10949 	sp->inode = inode;
10950 	sp->is_block_group = is_block_group;
10951 	sp->bg_extent_count = 1;
10952 
10953 	spin_lock(&fs_info->swapfile_pins_lock);
10954 	p = &fs_info->swapfile_pins.rb_node;
10955 	while (*p) {
10956 		parent = *p;
10957 		entry = rb_entry(parent, struct btrfs_swapfile_pin, node);
10958 		if (sp->ptr < entry->ptr ||
10959 		    (sp->ptr == entry->ptr && sp->inode < entry->inode)) {
10960 			p = &(*p)->rb_left;
10961 		} else if (sp->ptr > entry->ptr ||
10962 			   (sp->ptr == entry->ptr && sp->inode > entry->inode)) {
10963 			p = &(*p)->rb_right;
10964 		} else {
10965 			if (is_block_group)
10966 				entry->bg_extent_count++;
10967 			spin_unlock(&fs_info->swapfile_pins_lock);
10968 			kfree(sp);
10969 			return 1;
10970 		}
10971 	}
10972 	rb_link_node(&sp->node, parent, p);
10973 	rb_insert_color(&sp->node, &fs_info->swapfile_pins);
10974 	spin_unlock(&fs_info->swapfile_pins_lock);
10975 	return 0;
10976 }
10977 
10978 /* Free all of the entries pinned by this swapfile. */
10979 static void btrfs_free_swapfile_pins(struct inode *inode)
10980 {
10981 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10982 	struct btrfs_swapfile_pin *sp;
10983 	struct rb_node *node, *next;
10984 
10985 	spin_lock(&fs_info->swapfile_pins_lock);
10986 	node = rb_first(&fs_info->swapfile_pins);
10987 	while (node) {
10988 		next = rb_next(node);
10989 		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
10990 		if (sp->inode == inode) {
10991 			rb_erase(&sp->node, &fs_info->swapfile_pins);
10992 			if (sp->is_block_group) {
10993 				btrfs_dec_block_group_swap_extents(sp->ptr,
10994 							   sp->bg_extent_count);
10995 				btrfs_put_block_group(sp->ptr);
10996 			}
10997 			kfree(sp);
10998 		}
10999 		node = next;
11000 	}
11001 	spin_unlock(&fs_info->swapfile_pins_lock);
11002 }
11003 
11004 struct btrfs_swap_info {
11005 	u64 start;
11006 	u64 block_start;
11007 	u64 block_len;
11008 	u64 lowest_ppage;
11009 	u64 highest_ppage;
11010 	unsigned long nr_pages;
11011 	int nr_extents;
11012 };
11013 
11014 static int btrfs_add_swap_extent(struct swap_info_struct *sis,
11015 				 struct btrfs_swap_info *bsi)
11016 {
11017 	unsigned long nr_pages;
11018 	unsigned long max_pages;
11019 	u64 first_ppage, first_ppage_reported, next_ppage;
11020 	int ret;
11021 
11022 	/*
11023 	 * Our swapfile may have had its size extended after the swap header was
11024 	 * written. In that case activating the swapfile should not go beyond
11025 	 * the max size set in the swap header.
11026 	 */
11027 	if (bsi->nr_pages >= sis->max)
11028 		return 0;
11029 
11030 	max_pages = sis->max - bsi->nr_pages;
11031 	first_ppage = ALIGN(bsi->block_start, PAGE_SIZE) >> PAGE_SHIFT;
11032 	next_ppage = ALIGN_DOWN(bsi->block_start + bsi->block_len,
11033 				PAGE_SIZE) >> PAGE_SHIFT;
11034 
11035 	if (first_ppage >= next_ppage)
11036 		return 0;
11037 	nr_pages = next_ppage - first_ppage;
11038 	nr_pages = min(nr_pages, max_pages);
11039 
11040 	first_ppage_reported = first_ppage;
11041 	if (bsi->start == 0)
11042 		first_ppage_reported++;
11043 	if (bsi->lowest_ppage > first_ppage_reported)
11044 		bsi->lowest_ppage = first_ppage_reported;
11045 	if (bsi->highest_ppage < (next_ppage - 1))
11046 		bsi->highest_ppage = next_ppage - 1;
11047 
11048 	ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage);
11049 	if (ret < 0)
11050 		return ret;
11051 	bsi->nr_extents += ret;
11052 	bsi->nr_pages += nr_pages;
11053 	return 0;
11054 }
11055 
11056 static void btrfs_swap_deactivate(struct file *file)
11057 {
11058 	struct inode *inode = file_inode(file);
11059 
11060 	btrfs_free_swapfile_pins(inode);
11061 	atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles);
11062 }
11063 
11064 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
11065 			       sector_t *span)
11066 {
11067 	struct inode *inode = file_inode(file);
11068 	struct btrfs_root *root = BTRFS_I(inode)->root;
11069 	struct btrfs_fs_info *fs_info = root->fs_info;
11070 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
11071 	struct extent_state *cached_state = NULL;
11072 	struct extent_map *em = NULL;
11073 	struct btrfs_device *device = NULL;
11074 	struct btrfs_swap_info bsi = {
11075 		.lowest_ppage = (sector_t)-1ULL,
11076 	};
11077 	int ret = 0;
11078 	u64 isize;
11079 	u64 start;
11080 
11081 	/*
11082 	 * If the swap file was just created, make sure delalloc is done. If the
11083 	 * file changes again after this, the user is doing something stupid and
11084 	 * we don't really care.
11085 	 */
11086 	ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
11087 	if (ret)
11088 		return ret;
11089 
11090 	/*
11091 	 * The inode is locked, so these flags won't change after we check them.
11092 	 */
11093 	if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
11094 		btrfs_warn(fs_info, "swapfile must not be compressed");
11095 		return -EINVAL;
11096 	}
11097 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
11098 		btrfs_warn(fs_info, "swapfile must not be copy-on-write");
11099 		return -EINVAL;
11100 	}
11101 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
11102 		btrfs_warn(fs_info, "swapfile must not be checksummed");
11103 		return -EINVAL;
11104 	}
11105 
11106 	/*
11107 	 * Balance or device remove/replace/resize can move stuff around from
11108 	 * under us. The exclop protection makes sure they aren't running/won't
11109 	 * run concurrently while we are mapping the swap extents, and
11110 	 * fs_info->swapfile_pins prevents them from running while the swap
11111 	 * file is active and moving the extents. Note that this also prevents
11112 	 * a concurrent device add which isn't actually necessary, but it's not
11113 	 * really worth the trouble to allow it.
11114 	 */
11115 	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
11116 		btrfs_warn(fs_info,
11117 	   "cannot activate swapfile while exclusive operation is running");
11118 		return -EBUSY;
11119 	}
11120 
11121 	/*
11122 	 * Prevent snapshot creation while we are activating the swap file.
11123 	 * We do not want to race with snapshot creation. If snapshot creation
11124 	 * already started before we bumped nr_swapfiles from 0 to 1 and
11125 	 * completes before the first write into the swap file after it is
11126 	 * activated, than that write would fallback to COW.
11127 	 */
11128 	if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) {
11129 		btrfs_exclop_finish(fs_info);
11130 		btrfs_warn(fs_info,
11131 	   "cannot activate swapfile because snapshot creation is in progress");
11132 		return -EINVAL;
11133 	}
11134 	/*
11135 	 * Snapshots can create extents which require COW even if NODATACOW is
11136 	 * set. We use this counter to prevent snapshots. We must increment it
11137 	 * before walking the extents because we don't want a concurrent
11138 	 * snapshot to run after we've already checked the extents.
11139 	 *
11140 	 * It is possible that subvolume is marked for deletion but still not
11141 	 * removed yet. To prevent this race, we check the root status before
11142 	 * activating the swapfile.
11143 	 */
11144 	spin_lock(&root->root_item_lock);
11145 	if (btrfs_root_dead(root)) {
11146 		spin_unlock(&root->root_item_lock);
11147 
11148 		btrfs_exclop_finish(fs_info);
11149 		btrfs_warn(fs_info,
11150 		"cannot activate swapfile because subvolume %llu is being deleted",
11151 			root->root_key.objectid);
11152 		return -EPERM;
11153 	}
11154 	atomic_inc(&root->nr_swapfiles);
11155 	spin_unlock(&root->root_item_lock);
11156 
11157 	isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
11158 
11159 	lock_extent_bits(io_tree, 0, isize - 1, &cached_state);
11160 	start = 0;
11161 	while (start < isize) {
11162 		u64 logical_block_start, physical_block_start;
11163 		struct btrfs_block_group *bg;
11164 		u64 len = isize - start;
11165 
11166 		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len);
11167 		if (IS_ERR(em)) {
11168 			ret = PTR_ERR(em);
11169 			goto out;
11170 		}
11171 
11172 		if (em->block_start == EXTENT_MAP_HOLE) {
11173 			btrfs_warn(fs_info, "swapfile must not have holes");
11174 			ret = -EINVAL;
11175 			goto out;
11176 		}
11177 		if (em->block_start == EXTENT_MAP_INLINE) {
11178 			/*
11179 			 * It's unlikely we'll ever actually find ourselves
11180 			 * here, as a file small enough to fit inline won't be
11181 			 * big enough to store more than the swap header, but in
11182 			 * case something changes in the future, let's catch it
11183 			 * here rather than later.
11184 			 */
11185 			btrfs_warn(fs_info, "swapfile must not be inline");
11186 			ret = -EINVAL;
11187 			goto out;
11188 		}
11189 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
11190 			btrfs_warn(fs_info, "swapfile must not be compressed");
11191 			ret = -EINVAL;
11192 			goto out;
11193 		}
11194 
11195 		logical_block_start = em->block_start + (start - em->start);
11196 		len = min(len, em->len - (start - em->start));
11197 		free_extent_map(em);
11198 		em = NULL;
11199 
11200 		ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, true);
11201 		if (ret < 0) {
11202 			goto out;
11203 		} else if (ret) {
11204 			ret = 0;
11205 		} else {
11206 			btrfs_warn(fs_info,
11207 				   "swapfile must not be copy-on-write");
11208 			ret = -EINVAL;
11209 			goto out;
11210 		}
11211 
11212 		em = btrfs_get_chunk_map(fs_info, logical_block_start, len);
11213 		if (IS_ERR(em)) {
11214 			ret = PTR_ERR(em);
11215 			goto out;
11216 		}
11217 
11218 		if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
11219 			btrfs_warn(fs_info,
11220 				   "swapfile must have single data profile");
11221 			ret = -EINVAL;
11222 			goto out;
11223 		}
11224 
11225 		if (device == NULL) {
11226 			device = em->map_lookup->stripes[0].dev;
11227 			ret = btrfs_add_swapfile_pin(inode, device, false);
11228 			if (ret == 1)
11229 				ret = 0;
11230 			else if (ret)
11231 				goto out;
11232 		} else if (device != em->map_lookup->stripes[0].dev) {
11233 			btrfs_warn(fs_info, "swapfile must be on one device");
11234 			ret = -EINVAL;
11235 			goto out;
11236 		}
11237 
11238 		physical_block_start = (em->map_lookup->stripes[0].physical +
11239 					(logical_block_start - em->start));
11240 		len = min(len, em->len - (logical_block_start - em->start));
11241 		free_extent_map(em);
11242 		em = NULL;
11243 
11244 		bg = btrfs_lookup_block_group(fs_info, logical_block_start);
11245 		if (!bg) {
11246 			btrfs_warn(fs_info,
11247 			   "could not find block group containing swapfile");
11248 			ret = -EINVAL;
11249 			goto out;
11250 		}
11251 
11252 		if (!btrfs_inc_block_group_swap_extents(bg)) {
11253 			btrfs_warn(fs_info,
11254 			   "block group for swapfile at %llu is read-only%s",
11255 			   bg->start,
11256 			   atomic_read(&fs_info->scrubs_running) ?
11257 				       " (scrub running)" : "");
11258 			btrfs_put_block_group(bg);
11259 			ret = -EINVAL;
11260 			goto out;
11261 		}
11262 
11263 		ret = btrfs_add_swapfile_pin(inode, bg, true);
11264 		if (ret) {
11265 			btrfs_put_block_group(bg);
11266 			if (ret == 1)
11267 				ret = 0;
11268 			else
11269 				goto out;
11270 		}
11271 
11272 		if (bsi.block_len &&
11273 		    bsi.block_start + bsi.block_len == physical_block_start) {
11274 			bsi.block_len += len;
11275 		} else {
11276 			if (bsi.block_len) {
11277 				ret = btrfs_add_swap_extent(sis, &bsi);
11278 				if (ret)
11279 					goto out;
11280 			}
11281 			bsi.start = start;
11282 			bsi.block_start = physical_block_start;
11283 			bsi.block_len = len;
11284 		}
11285 
11286 		start += len;
11287 	}
11288 
11289 	if (bsi.block_len)
11290 		ret = btrfs_add_swap_extent(sis, &bsi);
11291 
11292 out:
11293 	if (!IS_ERR_OR_NULL(em))
11294 		free_extent_map(em);
11295 
11296 	unlock_extent_cached(io_tree, 0, isize - 1, &cached_state);
11297 
11298 	if (ret)
11299 		btrfs_swap_deactivate(file);
11300 
11301 	btrfs_drew_write_unlock(&root->snapshot_lock);
11302 
11303 	btrfs_exclop_finish(fs_info);
11304 
11305 	if (ret)
11306 		return ret;
11307 
11308 	if (device)
11309 		sis->bdev = device->bdev;
11310 	*span = bsi.highest_ppage - bsi.lowest_ppage + 1;
11311 	sis->max = bsi.nr_pages;
11312 	sis->pages = bsi.nr_pages - 1;
11313 	sis->highest_bit = bsi.nr_pages - 1;
11314 	return bsi.nr_extents;
11315 }
11316 #else
11317 static void btrfs_swap_deactivate(struct file *file)
11318 {
11319 }
11320 
11321 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
11322 			       sector_t *span)
11323 {
11324 	return -EOPNOTSUPP;
11325 }
11326 #endif
11327 
11328 /*
11329  * Update the number of bytes used in the VFS' inode. When we replace extents in
11330  * a range (clone, dedupe, fallocate's zero range), we must update the number of
11331  * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls
11332  * always get a correct value.
11333  */
11334 void btrfs_update_inode_bytes(struct btrfs_inode *inode,
11335 			      const u64 add_bytes,
11336 			      const u64 del_bytes)
11337 {
11338 	if (add_bytes == del_bytes)
11339 		return;
11340 
11341 	spin_lock(&inode->lock);
11342 	if (del_bytes > 0)
11343 		inode_sub_bytes(&inode->vfs_inode, del_bytes);
11344 	if (add_bytes > 0)
11345 		inode_add_bytes(&inode->vfs_inode, add_bytes);
11346 	spin_unlock(&inode->lock);
11347 }
11348 
11349 /**
11350  * Verify that there are no ordered extents for a given file range.
11351  *
11352  * @inode:   The target inode.
11353  * @start:   Start offset of the file range, should be sector size aligned.
11354  * @end:     End offset (inclusive) of the file range, its value +1 should be
11355  *           sector size aligned.
11356  *
11357  * This should typically be used for cases where we locked an inode's VFS lock in
11358  * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode,
11359  * we have flushed all delalloc in the range, we have waited for all ordered
11360  * extents in the range to complete and finally we have locked the file range in
11361  * the inode's io_tree.
11362  */
11363 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end)
11364 {
11365 	struct btrfs_root *root = inode->root;
11366 	struct btrfs_ordered_extent *ordered;
11367 
11368 	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
11369 		return;
11370 
11371 	ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start);
11372 	if (ordered) {
11373 		btrfs_err(root->fs_info,
11374 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])",
11375 			  start, end, btrfs_ino(inode), root->root_key.objectid,
11376 			  ordered->file_offset,
11377 			  ordered->file_offset + ordered->num_bytes - 1);
11378 		btrfs_put_ordered_extent(ordered);
11379 	}
11380 
11381 	ASSERT(ordered == NULL);
11382 }
11383 
11384 static const struct inode_operations btrfs_dir_inode_operations = {
11385 	.getattr	= btrfs_getattr,
11386 	.lookup		= btrfs_lookup,
11387 	.create		= btrfs_create,
11388 	.unlink		= btrfs_unlink,
11389 	.link		= btrfs_link,
11390 	.mkdir		= btrfs_mkdir,
11391 	.rmdir		= btrfs_rmdir,
11392 	.rename		= btrfs_rename2,
11393 	.symlink	= btrfs_symlink,
11394 	.setattr	= btrfs_setattr,
11395 	.mknod		= btrfs_mknod,
11396 	.listxattr	= btrfs_listxattr,
11397 	.permission	= btrfs_permission,
11398 	.get_acl	= btrfs_get_acl,
11399 	.set_acl	= btrfs_set_acl,
11400 	.update_time	= btrfs_update_time,
11401 	.tmpfile        = btrfs_tmpfile,
11402 	.fileattr_get	= btrfs_fileattr_get,
11403 	.fileattr_set	= btrfs_fileattr_set,
11404 };
11405 
11406 static const struct file_operations btrfs_dir_file_operations = {
11407 	.llseek		= generic_file_llseek,
11408 	.read		= generic_read_dir,
11409 	.iterate_shared	= btrfs_real_readdir,
11410 	.open		= btrfs_opendir,
11411 	.unlocked_ioctl	= btrfs_ioctl,
11412 #ifdef CONFIG_COMPAT
11413 	.compat_ioctl	= btrfs_compat_ioctl,
11414 #endif
11415 	.release        = btrfs_release_file,
11416 	.fsync		= btrfs_sync_file,
11417 };
11418 
11419 /*
11420  * btrfs doesn't support the bmap operation because swapfiles
11421  * use bmap to make a mapping of extents in the file.  They assume
11422  * these extents won't change over the life of the file and they
11423  * use the bmap result to do IO directly to the drive.
11424  *
11425  * the btrfs bmap call would return logical addresses that aren't
11426  * suitable for IO and they also will change frequently as COW
11427  * operations happen.  So, swapfile + btrfs == corruption.
11428  *
11429  * For now we're avoiding this by dropping bmap.
11430  */
11431 static const struct address_space_operations btrfs_aops = {
11432 	.read_folio	= btrfs_read_folio,
11433 	.writepage	= btrfs_writepage,
11434 	.writepages	= btrfs_writepages,
11435 	.readahead	= btrfs_readahead,
11436 	.direct_IO	= noop_direct_IO,
11437 	.invalidate_folio = btrfs_invalidate_folio,
11438 	.release_folio	= btrfs_release_folio,
11439 #ifdef CONFIG_MIGRATION
11440 	.migratepage	= btrfs_migratepage,
11441 #endif
11442 	.dirty_folio	= filemap_dirty_folio,
11443 	.error_remove_page = generic_error_remove_page,
11444 	.swap_activate	= btrfs_swap_activate,
11445 	.swap_deactivate = btrfs_swap_deactivate,
11446 };
11447 
11448 static const struct inode_operations btrfs_file_inode_operations = {
11449 	.getattr	= btrfs_getattr,
11450 	.setattr	= btrfs_setattr,
11451 	.listxattr      = btrfs_listxattr,
11452 	.permission	= btrfs_permission,
11453 	.fiemap		= btrfs_fiemap,
11454 	.get_acl	= btrfs_get_acl,
11455 	.set_acl	= btrfs_set_acl,
11456 	.update_time	= btrfs_update_time,
11457 	.fileattr_get	= btrfs_fileattr_get,
11458 	.fileattr_set	= btrfs_fileattr_set,
11459 };
11460 static const struct inode_operations btrfs_special_inode_operations = {
11461 	.getattr	= btrfs_getattr,
11462 	.setattr	= btrfs_setattr,
11463 	.permission	= btrfs_permission,
11464 	.listxattr	= btrfs_listxattr,
11465 	.get_acl	= btrfs_get_acl,
11466 	.set_acl	= btrfs_set_acl,
11467 	.update_time	= btrfs_update_time,
11468 };
11469 static const struct inode_operations btrfs_symlink_inode_operations = {
11470 	.get_link	= page_get_link,
11471 	.getattr	= btrfs_getattr,
11472 	.setattr	= btrfs_setattr,
11473 	.permission	= btrfs_permission,
11474 	.listxattr	= btrfs_listxattr,
11475 	.update_time	= btrfs_update_time,
11476 };
11477 
11478 const struct dentry_operations btrfs_dentry_operations = {
11479 	.d_delete	= btrfs_dentry_delete,
11480 };
11481