xref: /openbmc/linux/fs/btrfs/inode.c (revision f39650de)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <crypto/hash.h>
7 #include <linux/kernel.h>
8 #include <linux/bio.h>
9 #include <linux/file.h>
10 #include <linux/fs.h>
11 #include <linux/pagemap.h>
12 #include <linux/highmem.h>
13 #include <linux/time.h>
14 #include <linux/init.h>
15 #include <linux/string.h>
16 #include <linux/backing-dev.h>
17 #include <linux/writeback.h>
18 #include <linux/compat.h>
19 #include <linux/xattr.h>
20 #include <linux/posix_acl.h>
21 #include <linux/falloc.h>
22 #include <linux/slab.h>
23 #include <linux/ratelimit.h>
24 #include <linux/btrfs.h>
25 #include <linux/blkdev.h>
26 #include <linux/posix_acl_xattr.h>
27 #include <linux/uio.h>
28 #include <linux/magic.h>
29 #include <linux/iversion.h>
30 #include <linux/swap.h>
31 #include <linux/migrate.h>
32 #include <linux/sched/mm.h>
33 #include <linux/iomap.h>
34 #include <asm/unaligned.h>
35 #include "misc.h"
36 #include "ctree.h"
37 #include "disk-io.h"
38 #include "transaction.h"
39 #include "btrfs_inode.h"
40 #include "print-tree.h"
41 #include "ordered-data.h"
42 #include "xattr.h"
43 #include "tree-log.h"
44 #include "volumes.h"
45 #include "compression.h"
46 #include "locking.h"
47 #include "free-space-cache.h"
48 #include "props.h"
49 #include "qgroup.h"
50 #include "delalloc-space.h"
51 #include "block-group.h"
52 #include "space-info.h"
53 #include "zoned.h"
54 
55 struct btrfs_iget_args {
56 	u64 ino;
57 	struct btrfs_root *root;
58 };
59 
60 struct btrfs_dio_data {
61 	u64 reserve;
62 	loff_t length;
63 	ssize_t submitted;
64 	struct extent_changeset *data_reserved;
65 };
66 
67 static const struct inode_operations btrfs_dir_inode_operations;
68 static const struct inode_operations btrfs_symlink_inode_operations;
69 static const struct inode_operations btrfs_special_inode_operations;
70 static const struct inode_operations btrfs_file_inode_operations;
71 static const struct address_space_operations btrfs_aops;
72 static const struct file_operations btrfs_dir_file_operations;
73 
74 static struct kmem_cache *btrfs_inode_cachep;
75 struct kmem_cache *btrfs_trans_handle_cachep;
76 struct kmem_cache *btrfs_path_cachep;
77 struct kmem_cache *btrfs_free_space_cachep;
78 struct kmem_cache *btrfs_free_space_bitmap_cachep;
79 
80 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
81 static int btrfs_truncate(struct inode *inode, bool skip_writeback);
82 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
83 static noinline int cow_file_range(struct btrfs_inode *inode,
84 				   struct page *locked_page,
85 				   u64 start, u64 end, int *page_started,
86 				   unsigned long *nr_written, int unlock);
87 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
88 				       u64 len, u64 orig_start, u64 block_start,
89 				       u64 block_len, u64 orig_block_len,
90 				       u64 ram_bytes, int compress_type,
91 				       int type);
92 
93 static void __endio_write_update_ordered(struct btrfs_inode *inode,
94 					 const u64 offset, const u64 bytes,
95 					 const bool uptodate);
96 
97 /*
98  * btrfs_inode_lock - lock inode i_rwsem based on arguments passed
99  *
100  * ilock_flags can have the following bit set:
101  *
102  * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
103  * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
104  *		     return -EAGAIN
105  * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
106  */
107 int btrfs_inode_lock(struct inode *inode, unsigned int ilock_flags)
108 {
109 	if (ilock_flags & BTRFS_ILOCK_SHARED) {
110 		if (ilock_flags & BTRFS_ILOCK_TRY) {
111 			if (!inode_trylock_shared(inode))
112 				return -EAGAIN;
113 			else
114 				return 0;
115 		}
116 		inode_lock_shared(inode);
117 	} else {
118 		if (ilock_flags & BTRFS_ILOCK_TRY) {
119 			if (!inode_trylock(inode))
120 				return -EAGAIN;
121 			else
122 				return 0;
123 		}
124 		inode_lock(inode);
125 	}
126 	if (ilock_flags & BTRFS_ILOCK_MMAP)
127 		down_write(&BTRFS_I(inode)->i_mmap_lock);
128 	return 0;
129 }
130 
131 /*
132  * btrfs_inode_unlock - unock inode i_rwsem
133  *
134  * ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
135  * to decide whether the lock acquired is shared or exclusive.
136  */
137 void btrfs_inode_unlock(struct inode *inode, unsigned int ilock_flags)
138 {
139 	if (ilock_flags & BTRFS_ILOCK_MMAP)
140 		up_write(&BTRFS_I(inode)->i_mmap_lock);
141 	if (ilock_flags & BTRFS_ILOCK_SHARED)
142 		inode_unlock_shared(inode);
143 	else
144 		inode_unlock(inode);
145 }
146 
147 /*
148  * Cleanup all submitted ordered extents in specified range to handle errors
149  * from the btrfs_run_delalloc_range() callback.
150  *
151  * NOTE: caller must ensure that when an error happens, it can not call
152  * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
153  * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
154  * to be released, which we want to happen only when finishing the ordered
155  * extent (btrfs_finish_ordered_io()).
156  */
157 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
158 						 struct page *locked_page,
159 						 u64 offset, u64 bytes)
160 {
161 	unsigned long index = offset >> PAGE_SHIFT;
162 	unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
163 	u64 page_start = page_offset(locked_page);
164 	u64 page_end = page_start + PAGE_SIZE - 1;
165 
166 	struct page *page;
167 
168 	while (index <= end_index) {
169 		page = find_get_page(inode->vfs_inode.i_mapping, index);
170 		index++;
171 		if (!page)
172 			continue;
173 		ClearPagePrivate2(page);
174 		put_page(page);
175 	}
176 
177 	/*
178 	 * In case this page belongs to the delalloc range being instantiated
179 	 * then skip it, since the first page of a range is going to be
180 	 * properly cleaned up by the caller of run_delalloc_range
181 	 */
182 	if (page_start >= offset && page_end <= (offset + bytes - 1)) {
183 		offset += PAGE_SIZE;
184 		bytes -= PAGE_SIZE;
185 	}
186 
187 	return __endio_write_update_ordered(inode, offset, bytes, false);
188 }
189 
190 static int btrfs_dirty_inode(struct inode *inode);
191 
192 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
193 				     struct inode *inode,  struct inode *dir,
194 				     const struct qstr *qstr)
195 {
196 	int err;
197 
198 	err = btrfs_init_acl(trans, inode, dir);
199 	if (!err)
200 		err = btrfs_xattr_security_init(trans, inode, dir, qstr);
201 	return err;
202 }
203 
204 /*
205  * this does all the hard work for inserting an inline extent into
206  * the btree.  The caller should have done a btrfs_drop_extents so that
207  * no overlapping inline items exist in the btree
208  */
209 static int insert_inline_extent(struct btrfs_trans_handle *trans,
210 				struct btrfs_path *path, bool extent_inserted,
211 				struct btrfs_root *root, struct inode *inode,
212 				u64 start, size_t size, size_t compressed_size,
213 				int compress_type,
214 				struct page **compressed_pages)
215 {
216 	struct extent_buffer *leaf;
217 	struct page *page = NULL;
218 	char *kaddr;
219 	unsigned long ptr;
220 	struct btrfs_file_extent_item *ei;
221 	int ret;
222 	size_t cur_size = size;
223 	unsigned long offset;
224 
225 	ASSERT((compressed_size > 0 && compressed_pages) ||
226 	       (compressed_size == 0 && !compressed_pages));
227 
228 	if (compressed_size && compressed_pages)
229 		cur_size = compressed_size;
230 
231 	if (!extent_inserted) {
232 		struct btrfs_key key;
233 		size_t datasize;
234 
235 		key.objectid = btrfs_ino(BTRFS_I(inode));
236 		key.offset = start;
237 		key.type = BTRFS_EXTENT_DATA_KEY;
238 
239 		datasize = btrfs_file_extent_calc_inline_size(cur_size);
240 		ret = btrfs_insert_empty_item(trans, root, path, &key,
241 					      datasize);
242 		if (ret)
243 			goto fail;
244 	}
245 	leaf = path->nodes[0];
246 	ei = btrfs_item_ptr(leaf, path->slots[0],
247 			    struct btrfs_file_extent_item);
248 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
249 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
250 	btrfs_set_file_extent_encryption(leaf, ei, 0);
251 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
252 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
253 	ptr = btrfs_file_extent_inline_start(ei);
254 
255 	if (compress_type != BTRFS_COMPRESS_NONE) {
256 		struct page *cpage;
257 		int i = 0;
258 		while (compressed_size > 0) {
259 			cpage = compressed_pages[i];
260 			cur_size = min_t(unsigned long, compressed_size,
261 				       PAGE_SIZE);
262 
263 			kaddr = kmap_atomic(cpage);
264 			write_extent_buffer(leaf, kaddr, ptr, cur_size);
265 			kunmap_atomic(kaddr);
266 
267 			i++;
268 			ptr += cur_size;
269 			compressed_size -= cur_size;
270 		}
271 		btrfs_set_file_extent_compression(leaf, ei,
272 						  compress_type);
273 	} else {
274 		page = find_get_page(inode->i_mapping,
275 				     start >> PAGE_SHIFT);
276 		btrfs_set_file_extent_compression(leaf, ei, 0);
277 		kaddr = kmap_atomic(page);
278 		offset = offset_in_page(start);
279 		write_extent_buffer(leaf, kaddr + offset, ptr, size);
280 		kunmap_atomic(kaddr);
281 		put_page(page);
282 	}
283 	btrfs_mark_buffer_dirty(leaf);
284 	btrfs_release_path(path);
285 
286 	/*
287 	 * We align size to sectorsize for inline extents just for simplicity
288 	 * sake.
289 	 */
290 	size = ALIGN(size, root->fs_info->sectorsize);
291 	ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), start, size);
292 	if (ret)
293 		goto fail;
294 
295 	/*
296 	 * we're an inline extent, so nobody can
297 	 * extend the file past i_size without locking
298 	 * a page we already have locked.
299 	 *
300 	 * We must do any isize and inode updates
301 	 * before we unlock the pages.  Otherwise we
302 	 * could end up racing with unlink.
303 	 */
304 	BTRFS_I(inode)->disk_i_size = inode->i_size;
305 fail:
306 	return ret;
307 }
308 
309 
310 /*
311  * conditionally insert an inline extent into the file.  This
312  * does the checks required to make sure the data is small enough
313  * to fit as an inline extent.
314  */
315 static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 start,
316 					  u64 end, size_t compressed_size,
317 					  int compress_type,
318 					  struct page **compressed_pages)
319 {
320 	struct btrfs_drop_extents_args drop_args = { 0 };
321 	struct btrfs_root *root = inode->root;
322 	struct btrfs_fs_info *fs_info = root->fs_info;
323 	struct btrfs_trans_handle *trans;
324 	u64 isize = i_size_read(&inode->vfs_inode);
325 	u64 actual_end = min(end + 1, isize);
326 	u64 inline_len = actual_end - start;
327 	u64 aligned_end = ALIGN(end, fs_info->sectorsize);
328 	u64 data_len = inline_len;
329 	int ret;
330 	struct btrfs_path *path;
331 
332 	if (compressed_size)
333 		data_len = compressed_size;
334 
335 	if (start > 0 ||
336 	    actual_end > fs_info->sectorsize ||
337 	    data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
338 	    (!compressed_size &&
339 	    (actual_end & (fs_info->sectorsize - 1)) == 0) ||
340 	    end + 1 < isize ||
341 	    data_len > fs_info->max_inline) {
342 		return 1;
343 	}
344 
345 	path = btrfs_alloc_path();
346 	if (!path)
347 		return -ENOMEM;
348 
349 	trans = btrfs_join_transaction(root);
350 	if (IS_ERR(trans)) {
351 		btrfs_free_path(path);
352 		return PTR_ERR(trans);
353 	}
354 	trans->block_rsv = &inode->block_rsv;
355 
356 	drop_args.path = path;
357 	drop_args.start = start;
358 	drop_args.end = aligned_end;
359 	drop_args.drop_cache = true;
360 	drop_args.replace_extent = true;
361 
362 	if (compressed_size && compressed_pages)
363 		drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(
364 		   compressed_size);
365 	else
366 		drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(
367 		    inline_len);
368 
369 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
370 	if (ret) {
371 		btrfs_abort_transaction(trans, ret);
372 		goto out;
373 	}
374 
375 	if (isize > actual_end)
376 		inline_len = min_t(u64, isize, actual_end);
377 	ret = insert_inline_extent(trans, path, drop_args.extent_inserted,
378 				   root, &inode->vfs_inode, start,
379 				   inline_len, compressed_size,
380 				   compress_type, compressed_pages);
381 	if (ret && ret != -ENOSPC) {
382 		btrfs_abort_transaction(trans, ret);
383 		goto out;
384 	} else if (ret == -ENOSPC) {
385 		ret = 1;
386 		goto out;
387 	}
388 
389 	btrfs_update_inode_bytes(inode, inline_len, drop_args.bytes_found);
390 	ret = btrfs_update_inode(trans, root, inode);
391 	if (ret && ret != -ENOSPC) {
392 		btrfs_abort_transaction(trans, ret);
393 		goto out;
394 	} else if (ret == -ENOSPC) {
395 		ret = 1;
396 		goto out;
397 	}
398 
399 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
400 out:
401 	/*
402 	 * Don't forget to free the reserved space, as for inlined extent
403 	 * it won't count as data extent, free them directly here.
404 	 * And at reserve time, it's always aligned to page size, so
405 	 * just free one page here.
406 	 */
407 	btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE);
408 	btrfs_free_path(path);
409 	btrfs_end_transaction(trans);
410 	return ret;
411 }
412 
413 struct async_extent {
414 	u64 start;
415 	u64 ram_size;
416 	u64 compressed_size;
417 	struct page **pages;
418 	unsigned long nr_pages;
419 	int compress_type;
420 	struct list_head list;
421 };
422 
423 struct async_chunk {
424 	struct inode *inode;
425 	struct page *locked_page;
426 	u64 start;
427 	u64 end;
428 	unsigned int write_flags;
429 	struct list_head extents;
430 	struct cgroup_subsys_state *blkcg_css;
431 	struct btrfs_work work;
432 	atomic_t *pending;
433 };
434 
435 struct async_cow {
436 	/* Number of chunks in flight; must be first in the structure */
437 	atomic_t num_chunks;
438 	struct async_chunk chunks[];
439 };
440 
441 static noinline int add_async_extent(struct async_chunk *cow,
442 				     u64 start, u64 ram_size,
443 				     u64 compressed_size,
444 				     struct page **pages,
445 				     unsigned long nr_pages,
446 				     int compress_type)
447 {
448 	struct async_extent *async_extent;
449 
450 	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
451 	BUG_ON(!async_extent); /* -ENOMEM */
452 	async_extent->start = start;
453 	async_extent->ram_size = ram_size;
454 	async_extent->compressed_size = compressed_size;
455 	async_extent->pages = pages;
456 	async_extent->nr_pages = nr_pages;
457 	async_extent->compress_type = compress_type;
458 	list_add_tail(&async_extent->list, &cow->extents);
459 	return 0;
460 }
461 
462 /*
463  * Check if the inode has flags compatible with compression
464  */
465 static inline bool inode_can_compress(struct btrfs_inode *inode)
466 {
467 	if (inode->flags & BTRFS_INODE_NODATACOW ||
468 	    inode->flags & BTRFS_INODE_NODATASUM)
469 		return false;
470 	return true;
471 }
472 
473 /*
474  * Check if the inode needs to be submitted to compression, based on mount
475  * options, defragmentation, properties or heuristics.
476  */
477 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
478 				      u64 end)
479 {
480 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
481 
482 	if (!inode_can_compress(inode)) {
483 		WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
484 			KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
485 			btrfs_ino(inode));
486 		return 0;
487 	}
488 	/* force compress */
489 	if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
490 		return 1;
491 	/* defrag ioctl */
492 	if (inode->defrag_compress)
493 		return 1;
494 	/* bad compression ratios */
495 	if (inode->flags & BTRFS_INODE_NOCOMPRESS)
496 		return 0;
497 	if (btrfs_test_opt(fs_info, COMPRESS) ||
498 	    inode->flags & BTRFS_INODE_COMPRESS ||
499 	    inode->prop_compress)
500 		return btrfs_compress_heuristic(&inode->vfs_inode, start, end);
501 	return 0;
502 }
503 
504 static inline void inode_should_defrag(struct btrfs_inode *inode,
505 		u64 start, u64 end, u64 num_bytes, u64 small_write)
506 {
507 	/* If this is a small write inside eof, kick off a defrag */
508 	if (num_bytes < small_write &&
509 	    (start > 0 || end + 1 < inode->disk_i_size))
510 		btrfs_add_inode_defrag(NULL, inode);
511 }
512 
513 /*
514  * we create compressed extents in two phases.  The first
515  * phase compresses a range of pages that have already been
516  * locked (both pages and state bits are locked).
517  *
518  * This is done inside an ordered work queue, and the compression
519  * is spread across many cpus.  The actual IO submission is step
520  * two, and the ordered work queue takes care of making sure that
521  * happens in the same order things were put onto the queue by
522  * writepages and friends.
523  *
524  * If this code finds it can't get good compression, it puts an
525  * entry onto the work queue to write the uncompressed bytes.  This
526  * makes sure that both compressed inodes and uncompressed inodes
527  * are written in the same order that the flusher thread sent them
528  * down.
529  */
530 static noinline int compress_file_range(struct async_chunk *async_chunk)
531 {
532 	struct inode *inode = async_chunk->inode;
533 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
534 	u64 blocksize = fs_info->sectorsize;
535 	u64 start = async_chunk->start;
536 	u64 end = async_chunk->end;
537 	u64 actual_end;
538 	u64 i_size;
539 	int ret = 0;
540 	struct page **pages = NULL;
541 	unsigned long nr_pages;
542 	unsigned long total_compressed = 0;
543 	unsigned long total_in = 0;
544 	int i;
545 	int will_compress;
546 	int compress_type = fs_info->compress_type;
547 	int compressed_extents = 0;
548 	int redirty = 0;
549 
550 	inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1,
551 			SZ_16K);
552 
553 	/*
554 	 * We need to save i_size before now because it could change in between
555 	 * us evaluating the size and assigning it.  This is because we lock and
556 	 * unlock the page in truncate and fallocate, and then modify the i_size
557 	 * later on.
558 	 *
559 	 * The barriers are to emulate READ_ONCE, remove that once i_size_read
560 	 * does that for us.
561 	 */
562 	barrier();
563 	i_size = i_size_read(inode);
564 	barrier();
565 	actual_end = min_t(u64, i_size, end + 1);
566 again:
567 	will_compress = 0;
568 	nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
569 	BUILD_BUG_ON((BTRFS_MAX_COMPRESSED % PAGE_SIZE) != 0);
570 	nr_pages = min_t(unsigned long, nr_pages,
571 			BTRFS_MAX_COMPRESSED / PAGE_SIZE);
572 
573 	/*
574 	 * we don't want to send crud past the end of i_size through
575 	 * compression, that's just a waste of CPU time.  So, if the
576 	 * end of the file is before the start of our current
577 	 * requested range of bytes, we bail out to the uncompressed
578 	 * cleanup code that can deal with all of this.
579 	 *
580 	 * It isn't really the fastest way to fix things, but this is a
581 	 * very uncommon corner.
582 	 */
583 	if (actual_end <= start)
584 		goto cleanup_and_bail_uncompressed;
585 
586 	total_compressed = actual_end - start;
587 
588 	/*
589 	 * skip compression for a small file range(<=blocksize) that
590 	 * isn't an inline extent, since it doesn't save disk space at all.
591 	 */
592 	if (total_compressed <= blocksize &&
593 	   (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
594 		goto cleanup_and_bail_uncompressed;
595 
596 	total_compressed = min_t(unsigned long, total_compressed,
597 			BTRFS_MAX_UNCOMPRESSED);
598 	total_in = 0;
599 	ret = 0;
600 
601 	/*
602 	 * we do compression for mount -o compress and when the
603 	 * inode has not been flagged as nocompress.  This flag can
604 	 * change at any time if we discover bad compression ratios.
605 	 */
606 	if (inode_need_compress(BTRFS_I(inode), start, end)) {
607 		WARN_ON(pages);
608 		pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
609 		if (!pages) {
610 			/* just bail out to the uncompressed code */
611 			nr_pages = 0;
612 			goto cont;
613 		}
614 
615 		if (BTRFS_I(inode)->defrag_compress)
616 			compress_type = BTRFS_I(inode)->defrag_compress;
617 		else if (BTRFS_I(inode)->prop_compress)
618 			compress_type = BTRFS_I(inode)->prop_compress;
619 
620 		/*
621 		 * we need to call clear_page_dirty_for_io on each
622 		 * page in the range.  Otherwise applications with the file
623 		 * mmap'd can wander in and change the page contents while
624 		 * we are compressing them.
625 		 *
626 		 * If the compression fails for any reason, we set the pages
627 		 * dirty again later on.
628 		 *
629 		 * Note that the remaining part is redirtied, the start pointer
630 		 * has moved, the end is the original one.
631 		 */
632 		if (!redirty) {
633 			extent_range_clear_dirty_for_io(inode, start, end);
634 			redirty = 1;
635 		}
636 
637 		/* Compression level is applied here and only here */
638 		ret = btrfs_compress_pages(
639 			compress_type | (fs_info->compress_level << 4),
640 					   inode->i_mapping, start,
641 					   pages,
642 					   &nr_pages,
643 					   &total_in,
644 					   &total_compressed);
645 
646 		if (!ret) {
647 			unsigned long offset = offset_in_page(total_compressed);
648 			struct page *page = pages[nr_pages - 1];
649 
650 			/* zero the tail end of the last page, we might be
651 			 * sending it down to disk
652 			 */
653 			if (offset)
654 				memzero_page(page, offset, PAGE_SIZE - offset);
655 			will_compress = 1;
656 		}
657 	}
658 cont:
659 	if (start == 0) {
660 		/* lets try to make an inline extent */
661 		if (ret || total_in < actual_end) {
662 			/* we didn't compress the entire range, try
663 			 * to make an uncompressed inline extent.
664 			 */
665 			ret = cow_file_range_inline(BTRFS_I(inode), start, end,
666 						    0, BTRFS_COMPRESS_NONE,
667 						    NULL);
668 		} else {
669 			/* try making a compressed inline extent */
670 			ret = cow_file_range_inline(BTRFS_I(inode), start, end,
671 						    total_compressed,
672 						    compress_type, pages);
673 		}
674 		if (ret <= 0) {
675 			unsigned long clear_flags = EXTENT_DELALLOC |
676 				EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
677 				EXTENT_DO_ACCOUNTING;
678 			unsigned long page_error_op;
679 
680 			page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
681 
682 			/*
683 			 * inline extent creation worked or returned error,
684 			 * we don't need to create any more async work items.
685 			 * Unlock and free up our temp pages.
686 			 *
687 			 * We use DO_ACCOUNTING here because we need the
688 			 * delalloc_release_metadata to be done _after_ we drop
689 			 * our outstanding extent for clearing delalloc for this
690 			 * range.
691 			 */
692 			extent_clear_unlock_delalloc(BTRFS_I(inode), start, end,
693 						     NULL,
694 						     clear_flags,
695 						     PAGE_UNLOCK |
696 						     PAGE_START_WRITEBACK |
697 						     page_error_op |
698 						     PAGE_END_WRITEBACK);
699 
700 			/*
701 			 * Ensure we only free the compressed pages if we have
702 			 * them allocated, as we can still reach here with
703 			 * inode_need_compress() == false.
704 			 */
705 			if (pages) {
706 				for (i = 0; i < nr_pages; i++) {
707 					WARN_ON(pages[i]->mapping);
708 					put_page(pages[i]);
709 				}
710 				kfree(pages);
711 			}
712 			return 0;
713 		}
714 	}
715 
716 	if (will_compress) {
717 		/*
718 		 * we aren't doing an inline extent round the compressed size
719 		 * up to a block size boundary so the allocator does sane
720 		 * things
721 		 */
722 		total_compressed = ALIGN(total_compressed, blocksize);
723 
724 		/*
725 		 * one last check to make sure the compression is really a
726 		 * win, compare the page count read with the blocks on disk,
727 		 * compression must free at least one sector size
728 		 */
729 		total_in = ALIGN(total_in, PAGE_SIZE);
730 		if (total_compressed + blocksize <= total_in) {
731 			compressed_extents++;
732 
733 			/*
734 			 * The async work queues will take care of doing actual
735 			 * allocation on disk for these compressed pages, and
736 			 * will submit them to the elevator.
737 			 */
738 			add_async_extent(async_chunk, start, total_in,
739 					total_compressed, pages, nr_pages,
740 					compress_type);
741 
742 			if (start + total_in < end) {
743 				start += total_in;
744 				pages = NULL;
745 				cond_resched();
746 				goto again;
747 			}
748 			return compressed_extents;
749 		}
750 	}
751 	if (pages) {
752 		/*
753 		 * the compression code ran but failed to make things smaller,
754 		 * free any pages it allocated and our page pointer array
755 		 */
756 		for (i = 0; i < nr_pages; i++) {
757 			WARN_ON(pages[i]->mapping);
758 			put_page(pages[i]);
759 		}
760 		kfree(pages);
761 		pages = NULL;
762 		total_compressed = 0;
763 		nr_pages = 0;
764 
765 		/* flag the file so we don't compress in the future */
766 		if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) &&
767 		    !(BTRFS_I(inode)->prop_compress)) {
768 			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
769 		}
770 	}
771 cleanup_and_bail_uncompressed:
772 	/*
773 	 * No compression, but we still need to write the pages in the file
774 	 * we've been given so far.  redirty the locked page if it corresponds
775 	 * to our extent and set things up for the async work queue to run
776 	 * cow_file_range to do the normal delalloc dance.
777 	 */
778 	if (async_chunk->locked_page &&
779 	    (page_offset(async_chunk->locked_page) >= start &&
780 	     page_offset(async_chunk->locked_page)) <= end) {
781 		__set_page_dirty_nobuffers(async_chunk->locked_page);
782 		/* unlocked later on in the async handlers */
783 	}
784 
785 	if (redirty)
786 		extent_range_redirty_for_io(inode, start, end);
787 	add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
788 			 BTRFS_COMPRESS_NONE);
789 	compressed_extents++;
790 
791 	return compressed_extents;
792 }
793 
794 static void free_async_extent_pages(struct async_extent *async_extent)
795 {
796 	int i;
797 
798 	if (!async_extent->pages)
799 		return;
800 
801 	for (i = 0; i < async_extent->nr_pages; i++) {
802 		WARN_ON(async_extent->pages[i]->mapping);
803 		put_page(async_extent->pages[i]);
804 	}
805 	kfree(async_extent->pages);
806 	async_extent->nr_pages = 0;
807 	async_extent->pages = NULL;
808 }
809 
810 /*
811  * phase two of compressed writeback.  This is the ordered portion
812  * of the code, which only gets called in the order the work was
813  * queued.  We walk all the async extents created by compress_file_range
814  * and send them down to the disk.
815  */
816 static noinline void submit_compressed_extents(struct async_chunk *async_chunk)
817 {
818 	struct btrfs_inode *inode = BTRFS_I(async_chunk->inode);
819 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
820 	struct async_extent *async_extent;
821 	u64 alloc_hint = 0;
822 	struct btrfs_key ins;
823 	struct extent_map *em;
824 	struct btrfs_root *root = inode->root;
825 	struct extent_io_tree *io_tree = &inode->io_tree;
826 	int ret = 0;
827 
828 again:
829 	while (!list_empty(&async_chunk->extents)) {
830 		async_extent = list_entry(async_chunk->extents.next,
831 					  struct async_extent, list);
832 		list_del(&async_extent->list);
833 
834 retry:
835 		lock_extent(io_tree, async_extent->start,
836 			    async_extent->start + async_extent->ram_size - 1);
837 		/* did the compression code fall back to uncompressed IO? */
838 		if (!async_extent->pages) {
839 			int page_started = 0;
840 			unsigned long nr_written = 0;
841 
842 			/* allocate blocks */
843 			ret = cow_file_range(inode, async_chunk->locked_page,
844 					     async_extent->start,
845 					     async_extent->start +
846 					     async_extent->ram_size - 1,
847 					     &page_started, &nr_written, 0);
848 
849 			/* JDM XXX */
850 
851 			/*
852 			 * if page_started, cow_file_range inserted an
853 			 * inline extent and took care of all the unlocking
854 			 * and IO for us.  Otherwise, we need to submit
855 			 * all those pages down to the drive.
856 			 */
857 			if (!page_started && !ret)
858 				extent_write_locked_range(&inode->vfs_inode,
859 						  async_extent->start,
860 						  async_extent->start +
861 						  async_extent->ram_size - 1,
862 						  WB_SYNC_ALL);
863 			else if (ret && async_chunk->locked_page)
864 				unlock_page(async_chunk->locked_page);
865 			kfree(async_extent);
866 			cond_resched();
867 			continue;
868 		}
869 
870 		ret = btrfs_reserve_extent(root, async_extent->ram_size,
871 					   async_extent->compressed_size,
872 					   async_extent->compressed_size,
873 					   0, alloc_hint, &ins, 1, 1);
874 		if (ret) {
875 			free_async_extent_pages(async_extent);
876 
877 			if (ret == -ENOSPC) {
878 				unlock_extent(io_tree, async_extent->start,
879 					      async_extent->start +
880 					      async_extent->ram_size - 1);
881 
882 				/*
883 				 * we need to redirty the pages if we decide to
884 				 * fallback to uncompressed IO, otherwise we
885 				 * will not submit these pages down to lower
886 				 * layers.
887 				 */
888 				extent_range_redirty_for_io(&inode->vfs_inode,
889 						async_extent->start,
890 						async_extent->start +
891 						async_extent->ram_size - 1);
892 
893 				goto retry;
894 			}
895 			goto out_free;
896 		}
897 		/*
898 		 * here we're doing allocation and writeback of the
899 		 * compressed pages
900 		 */
901 		em = create_io_em(inode, async_extent->start,
902 				  async_extent->ram_size, /* len */
903 				  async_extent->start, /* orig_start */
904 				  ins.objectid, /* block_start */
905 				  ins.offset, /* block_len */
906 				  ins.offset, /* orig_block_len */
907 				  async_extent->ram_size, /* ram_bytes */
908 				  async_extent->compress_type,
909 				  BTRFS_ORDERED_COMPRESSED);
910 		if (IS_ERR(em))
911 			/* ret value is not necessary due to void function */
912 			goto out_free_reserve;
913 		free_extent_map(em);
914 
915 		ret = btrfs_add_ordered_extent_compress(inode,
916 						async_extent->start,
917 						ins.objectid,
918 						async_extent->ram_size,
919 						ins.offset,
920 						async_extent->compress_type);
921 		if (ret) {
922 			btrfs_drop_extent_cache(inode, async_extent->start,
923 						async_extent->start +
924 						async_extent->ram_size - 1, 0);
925 			goto out_free_reserve;
926 		}
927 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
928 
929 		/*
930 		 * clear dirty, set writeback and unlock the pages.
931 		 */
932 		extent_clear_unlock_delalloc(inode, async_extent->start,
933 				async_extent->start +
934 				async_extent->ram_size - 1,
935 				NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
936 				PAGE_UNLOCK | PAGE_START_WRITEBACK);
937 		if (btrfs_submit_compressed_write(inode, async_extent->start,
938 				    async_extent->ram_size,
939 				    ins.objectid,
940 				    ins.offset, async_extent->pages,
941 				    async_extent->nr_pages,
942 				    async_chunk->write_flags,
943 				    async_chunk->blkcg_css)) {
944 			struct page *p = async_extent->pages[0];
945 			const u64 start = async_extent->start;
946 			const u64 end = start + async_extent->ram_size - 1;
947 
948 			p->mapping = inode->vfs_inode.i_mapping;
949 			btrfs_writepage_endio_finish_ordered(p, start, end, 0);
950 
951 			p->mapping = NULL;
952 			extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
953 						     PAGE_END_WRITEBACK |
954 						     PAGE_SET_ERROR);
955 			free_async_extent_pages(async_extent);
956 		}
957 		alloc_hint = ins.objectid + ins.offset;
958 		kfree(async_extent);
959 		cond_resched();
960 	}
961 	return;
962 out_free_reserve:
963 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
964 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
965 out_free:
966 	extent_clear_unlock_delalloc(inode, async_extent->start,
967 				     async_extent->start +
968 				     async_extent->ram_size - 1,
969 				     NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
970 				     EXTENT_DELALLOC_NEW |
971 				     EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
972 				     PAGE_UNLOCK | PAGE_START_WRITEBACK |
973 				     PAGE_END_WRITEBACK | PAGE_SET_ERROR);
974 	free_async_extent_pages(async_extent);
975 	kfree(async_extent);
976 	goto again;
977 }
978 
979 static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
980 				      u64 num_bytes)
981 {
982 	struct extent_map_tree *em_tree = &inode->extent_tree;
983 	struct extent_map *em;
984 	u64 alloc_hint = 0;
985 
986 	read_lock(&em_tree->lock);
987 	em = search_extent_mapping(em_tree, start, num_bytes);
988 	if (em) {
989 		/*
990 		 * if block start isn't an actual block number then find the
991 		 * first block in this inode and use that as a hint.  If that
992 		 * block is also bogus then just don't worry about it.
993 		 */
994 		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
995 			free_extent_map(em);
996 			em = search_extent_mapping(em_tree, 0, 0);
997 			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
998 				alloc_hint = em->block_start;
999 			if (em)
1000 				free_extent_map(em);
1001 		} else {
1002 			alloc_hint = em->block_start;
1003 			free_extent_map(em);
1004 		}
1005 	}
1006 	read_unlock(&em_tree->lock);
1007 
1008 	return alloc_hint;
1009 }
1010 
1011 /*
1012  * when extent_io.c finds a delayed allocation range in the file,
1013  * the call backs end up in this code.  The basic idea is to
1014  * allocate extents on disk for the range, and create ordered data structs
1015  * in ram to track those extents.
1016  *
1017  * locked_page is the page that writepage had locked already.  We use
1018  * it to make sure we don't do extra locks or unlocks.
1019  *
1020  * *page_started is set to one if we unlock locked_page and do everything
1021  * required to start IO on it.  It may be clean and already done with
1022  * IO when we return.
1023  */
1024 static noinline int cow_file_range(struct btrfs_inode *inode,
1025 				   struct page *locked_page,
1026 				   u64 start, u64 end, int *page_started,
1027 				   unsigned long *nr_written, int unlock)
1028 {
1029 	struct btrfs_root *root = inode->root;
1030 	struct btrfs_fs_info *fs_info = root->fs_info;
1031 	u64 alloc_hint = 0;
1032 	u64 num_bytes;
1033 	unsigned long ram_size;
1034 	u64 cur_alloc_size = 0;
1035 	u64 min_alloc_size;
1036 	u64 blocksize = fs_info->sectorsize;
1037 	struct btrfs_key ins;
1038 	struct extent_map *em;
1039 	unsigned clear_bits;
1040 	unsigned long page_ops;
1041 	bool extent_reserved = false;
1042 	int ret = 0;
1043 
1044 	if (btrfs_is_free_space_inode(inode)) {
1045 		WARN_ON_ONCE(1);
1046 		ret = -EINVAL;
1047 		goto out_unlock;
1048 	}
1049 
1050 	num_bytes = ALIGN(end - start + 1, blocksize);
1051 	num_bytes = max(blocksize,  num_bytes);
1052 	ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
1053 
1054 	inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
1055 
1056 	if (start == 0) {
1057 		/* lets try to make an inline extent */
1058 		ret = cow_file_range_inline(inode, start, end, 0,
1059 					    BTRFS_COMPRESS_NONE, NULL);
1060 		if (ret == 0) {
1061 			/*
1062 			 * We use DO_ACCOUNTING here because we need the
1063 			 * delalloc_release_metadata to be run _after_ we drop
1064 			 * our outstanding extent for clearing delalloc for this
1065 			 * range.
1066 			 */
1067 			extent_clear_unlock_delalloc(inode, start, end, NULL,
1068 				     EXTENT_LOCKED | EXTENT_DELALLOC |
1069 				     EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
1070 				     EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1071 				     PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
1072 			*nr_written = *nr_written +
1073 			     (end - start + PAGE_SIZE) / PAGE_SIZE;
1074 			*page_started = 1;
1075 			goto out;
1076 		} else if (ret < 0) {
1077 			goto out_unlock;
1078 		}
1079 	}
1080 
1081 	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
1082 	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
1083 
1084 	/*
1085 	 * Relocation relies on the relocated extents to have exactly the same
1086 	 * size as the original extents. Normally writeback for relocation data
1087 	 * extents follows a NOCOW path because relocation preallocates the
1088 	 * extents. However, due to an operation such as scrub turning a block
1089 	 * group to RO mode, it may fallback to COW mode, so we must make sure
1090 	 * an extent allocated during COW has exactly the requested size and can
1091 	 * not be split into smaller extents, otherwise relocation breaks and
1092 	 * fails during the stage where it updates the bytenr of file extent
1093 	 * items.
1094 	 */
1095 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1096 		min_alloc_size = num_bytes;
1097 	else
1098 		min_alloc_size = fs_info->sectorsize;
1099 
1100 	while (num_bytes > 0) {
1101 		cur_alloc_size = num_bytes;
1102 		ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
1103 					   min_alloc_size, 0, alloc_hint,
1104 					   &ins, 1, 1);
1105 		if (ret < 0)
1106 			goto out_unlock;
1107 		cur_alloc_size = ins.offset;
1108 		extent_reserved = true;
1109 
1110 		ram_size = ins.offset;
1111 		em = create_io_em(inode, start, ins.offset, /* len */
1112 				  start, /* orig_start */
1113 				  ins.objectid, /* block_start */
1114 				  ins.offset, /* block_len */
1115 				  ins.offset, /* orig_block_len */
1116 				  ram_size, /* ram_bytes */
1117 				  BTRFS_COMPRESS_NONE, /* compress_type */
1118 				  BTRFS_ORDERED_REGULAR /* type */);
1119 		if (IS_ERR(em)) {
1120 			ret = PTR_ERR(em);
1121 			goto out_reserve;
1122 		}
1123 		free_extent_map(em);
1124 
1125 		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
1126 					       ram_size, cur_alloc_size,
1127 					       BTRFS_ORDERED_REGULAR);
1128 		if (ret)
1129 			goto out_drop_extent_cache;
1130 
1131 		if (root->root_key.objectid ==
1132 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
1133 			ret = btrfs_reloc_clone_csums(inode, start,
1134 						      cur_alloc_size);
1135 			/*
1136 			 * Only drop cache here, and process as normal.
1137 			 *
1138 			 * We must not allow extent_clear_unlock_delalloc()
1139 			 * at out_unlock label to free meta of this ordered
1140 			 * extent, as its meta should be freed by
1141 			 * btrfs_finish_ordered_io().
1142 			 *
1143 			 * So we must continue until @start is increased to
1144 			 * skip current ordered extent.
1145 			 */
1146 			if (ret)
1147 				btrfs_drop_extent_cache(inode, start,
1148 						start + ram_size - 1, 0);
1149 		}
1150 
1151 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1152 
1153 		/* we're not doing compressed IO, don't unlock the first
1154 		 * page (which the caller expects to stay locked), don't
1155 		 * clear any dirty bits and don't set any writeback bits
1156 		 *
1157 		 * Do set the Private2 bit so we know this page was properly
1158 		 * setup for writepage
1159 		 */
1160 		page_ops = unlock ? PAGE_UNLOCK : 0;
1161 		page_ops |= PAGE_SET_PRIVATE2;
1162 
1163 		extent_clear_unlock_delalloc(inode, start, start + ram_size - 1,
1164 					     locked_page,
1165 					     EXTENT_LOCKED | EXTENT_DELALLOC,
1166 					     page_ops);
1167 		if (num_bytes < cur_alloc_size)
1168 			num_bytes = 0;
1169 		else
1170 			num_bytes -= cur_alloc_size;
1171 		alloc_hint = ins.objectid + ins.offset;
1172 		start += cur_alloc_size;
1173 		extent_reserved = false;
1174 
1175 		/*
1176 		 * btrfs_reloc_clone_csums() error, since start is increased
1177 		 * extent_clear_unlock_delalloc() at out_unlock label won't
1178 		 * free metadata of current ordered extent, we're OK to exit.
1179 		 */
1180 		if (ret)
1181 			goto out_unlock;
1182 	}
1183 out:
1184 	return ret;
1185 
1186 out_drop_extent_cache:
1187 	btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
1188 out_reserve:
1189 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1190 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1191 out_unlock:
1192 	clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1193 		EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1194 	page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
1195 	/*
1196 	 * If we reserved an extent for our delalloc range (or a subrange) and
1197 	 * failed to create the respective ordered extent, then it means that
1198 	 * when we reserved the extent we decremented the extent's size from
1199 	 * the data space_info's bytes_may_use counter and incremented the
1200 	 * space_info's bytes_reserved counter by the same amount. We must make
1201 	 * sure extent_clear_unlock_delalloc() does not try to decrement again
1202 	 * the data space_info's bytes_may_use counter, therefore we do not pass
1203 	 * it the flag EXTENT_CLEAR_DATA_RESV.
1204 	 */
1205 	if (extent_reserved) {
1206 		extent_clear_unlock_delalloc(inode, start,
1207 					     start + cur_alloc_size - 1,
1208 					     locked_page,
1209 					     clear_bits,
1210 					     page_ops);
1211 		start += cur_alloc_size;
1212 		if (start >= end)
1213 			goto out;
1214 	}
1215 	extent_clear_unlock_delalloc(inode, start, end, locked_page,
1216 				     clear_bits | EXTENT_CLEAR_DATA_RESV,
1217 				     page_ops);
1218 	goto out;
1219 }
1220 
1221 /*
1222  * work queue call back to started compression on a file and pages
1223  */
1224 static noinline void async_cow_start(struct btrfs_work *work)
1225 {
1226 	struct async_chunk *async_chunk;
1227 	int compressed_extents;
1228 
1229 	async_chunk = container_of(work, struct async_chunk, work);
1230 
1231 	compressed_extents = compress_file_range(async_chunk);
1232 	if (compressed_extents == 0) {
1233 		btrfs_add_delayed_iput(async_chunk->inode);
1234 		async_chunk->inode = NULL;
1235 	}
1236 }
1237 
1238 /*
1239  * work queue call back to submit previously compressed pages
1240  */
1241 static noinline void async_cow_submit(struct btrfs_work *work)
1242 {
1243 	struct async_chunk *async_chunk = container_of(work, struct async_chunk,
1244 						     work);
1245 	struct btrfs_fs_info *fs_info = btrfs_work_owner(work);
1246 	unsigned long nr_pages;
1247 
1248 	nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
1249 		PAGE_SHIFT;
1250 
1251 	/* atomic_sub_return implies a barrier */
1252 	if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1253 	    5 * SZ_1M)
1254 		cond_wake_up_nomb(&fs_info->async_submit_wait);
1255 
1256 	/*
1257 	 * ->inode could be NULL if async_chunk_start has failed to compress,
1258 	 * in which case we don't have anything to submit, yet we need to
1259 	 * always adjust ->async_delalloc_pages as its paired with the init
1260 	 * happening in cow_file_range_async
1261 	 */
1262 	if (async_chunk->inode)
1263 		submit_compressed_extents(async_chunk);
1264 }
1265 
1266 static noinline void async_cow_free(struct btrfs_work *work)
1267 {
1268 	struct async_chunk *async_chunk;
1269 
1270 	async_chunk = container_of(work, struct async_chunk, work);
1271 	if (async_chunk->inode)
1272 		btrfs_add_delayed_iput(async_chunk->inode);
1273 	if (async_chunk->blkcg_css)
1274 		css_put(async_chunk->blkcg_css);
1275 	/*
1276 	 * Since the pointer to 'pending' is at the beginning of the array of
1277 	 * async_chunk's, freeing it ensures the whole array has been freed.
1278 	 */
1279 	if (atomic_dec_and_test(async_chunk->pending))
1280 		kvfree(async_chunk->pending);
1281 }
1282 
1283 static int cow_file_range_async(struct btrfs_inode *inode,
1284 				struct writeback_control *wbc,
1285 				struct page *locked_page,
1286 				u64 start, u64 end, int *page_started,
1287 				unsigned long *nr_written)
1288 {
1289 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1290 	struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc);
1291 	struct async_cow *ctx;
1292 	struct async_chunk *async_chunk;
1293 	unsigned long nr_pages;
1294 	u64 cur_end;
1295 	u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K);
1296 	int i;
1297 	bool should_compress;
1298 	unsigned nofs_flag;
1299 	const unsigned int write_flags = wbc_to_write_flags(wbc);
1300 
1301 	unlock_extent(&inode->io_tree, start, end);
1302 
1303 	if (inode->flags & BTRFS_INODE_NOCOMPRESS &&
1304 	    !btrfs_test_opt(fs_info, FORCE_COMPRESS)) {
1305 		num_chunks = 1;
1306 		should_compress = false;
1307 	} else {
1308 		should_compress = true;
1309 	}
1310 
1311 	nofs_flag = memalloc_nofs_save();
1312 	ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL);
1313 	memalloc_nofs_restore(nofs_flag);
1314 
1315 	if (!ctx) {
1316 		unsigned clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC |
1317 			EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
1318 			EXTENT_DO_ACCOUNTING;
1319 		unsigned long page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK |
1320 					 PAGE_END_WRITEBACK | PAGE_SET_ERROR;
1321 
1322 		extent_clear_unlock_delalloc(inode, start, end, locked_page,
1323 					     clear_bits, page_ops);
1324 		return -ENOMEM;
1325 	}
1326 
1327 	async_chunk = ctx->chunks;
1328 	atomic_set(&ctx->num_chunks, num_chunks);
1329 
1330 	for (i = 0; i < num_chunks; i++) {
1331 		if (should_compress)
1332 			cur_end = min(end, start + SZ_512K - 1);
1333 		else
1334 			cur_end = end;
1335 
1336 		/*
1337 		 * igrab is called higher up in the call chain, take only the
1338 		 * lightweight reference for the callback lifetime
1339 		 */
1340 		ihold(&inode->vfs_inode);
1341 		async_chunk[i].pending = &ctx->num_chunks;
1342 		async_chunk[i].inode = &inode->vfs_inode;
1343 		async_chunk[i].start = start;
1344 		async_chunk[i].end = cur_end;
1345 		async_chunk[i].write_flags = write_flags;
1346 		INIT_LIST_HEAD(&async_chunk[i].extents);
1347 
1348 		/*
1349 		 * The locked_page comes all the way from writepage and its
1350 		 * the original page we were actually given.  As we spread
1351 		 * this large delalloc region across multiple async_chunk
1352 		 * structs, only the first struct needs a pointer to locked_page
1353 		 *
1354 		 * This way we don't need racey decisions about who is supposed
1355 		 * to unlock it.
1356 		 */
1357 		if (locked_page) {
1358 			/*
1359 			 * Depending on the compressibility, the pages might or
1360 			 * might not go through async.  We want all of them to
1361 			 * be accounted against wbc once.  Let's do it here
1362 			 * before the paths diverge.  wbc accounting is used
1363 			 * only for foreign writeback detection and doesn't
1364 			 * need full accuracy.  Just account the whole thing
1365 			 * against the first page.
1366 			 */
1367 			wbc_account_cgroup_owner(wbc, locked_page,
1368 						 cur_end - start);
1369 			async_chunk[i].locked_page = locked_page;
1370 			locked_page = NULL;
1371 		} else {
1372 			async_chunk[i].locked_page = NULL;
1373 		}
1374 
1375 		if (blkcg_css != blkcg_root_css) {
1376 			css_get(blkcg_css);
1377 			async_chunk[i].blkcg_css = blkcg_css;
1378 		} else {
1379 			async_chunk[i].blkcg_css = NULL;
1380 		}
1381 
1382 		btrfs_init_work(&async_chunk[i].work, async_cow_start,
1383 				async_cow_submit, async_cow_free);
1384 
1385 		nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
1386 		atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1387 
1388 		btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work);
1389 
1390 		*nr_written += nr_pages;
1391 		start = cur_end + 1;
1392 	}
1393 	*page_started = 1;
1394 	return 0;
1395 }
1396 
1397 static noinline int run_delalloc_zoned(struct btrfs_inode *inode,
1398 				       struct page *locked_page, u64 start,
1399 				       u64 end, int *page_started,
1400 				       unsigned long *nr_written)
1401 {
1402 	int ret;
1403 
1404 	ret = cow_file_range(inode, locked_page, start, end, page_started,
1405 			     nr_written, 0);
1406 	if (ret)
1407 		return ret;
1408 
1409 	if (*page_started)
1410 		return 0;
1411 
1412 	__set_page_dirty_nobuffers(locked_page);
1413 	account_page_redirty(locked_page);
1414 	extent_write_locked_range(&inode->vfs_inode, start, end, WB_SYNC_ALL);
1415 	*page_started = 1;
1416 
1417 	return 0;
1418 }
1419 
1420 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
1421 					u64 bytenr, u64 num_bytes)
1422 {
1423 	int ret;
1424 	struct btrfs_ordered_sum *sums;
1425 	LIST_HEAD(list);
1426 
1427 	ret = btrfs_lookup_csums_range(fs_info->csum_root, bytenr,
1428 				       bytenr + num_bytes - 1, &list, 0);
1429 	if (ret == 0 && list_empty(&list))
1430 		return 0;
1431 
1432 	while (!list_empty(&list)) {
1433 		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1434 		list_del(&sums->list);
1435 		kfree(sums);
1436 	}
1437 	if (ret < 0)
1438 		return ret;
1439 	return 1;
1440 }
1441 
1442 static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
1443 			   const u64 start, const u64 end,
1444 			   int *page_started, unsigned long *nr_written)
1445 {
1446 	const bool is_space_ino = btrfs_is_free_space_inode(inode);
1447 	const bool is_reloc_ino = (inode->root->root_key.objectid ==
1448 				   BTRFS_DATA_RELOC_TREE_OBJECTID);
1449 	const u64 range_bytes = end + 1 - start;
1450 	struct extent_io_tree *io_tree = &inode->io_tree;
1451 	u64 range_start = start;
1452 	u64 count;
1453 
1454 	/*
1455 	 * If EXTENT_NORESERVE is set it means that when the buffered write was
1456 	 * made we had not enough available data space and therefore we did not
1457 	 * reserve data space for it, since we though we could do NOCOW for the
1458 	 * respective file range (either there is prealloc extent or the inode
1459 	 * has the NOCOW bit set).
1460 	 *
1461 	 * However when we need to fallback to COW mode (because for example the
1462 	 * block group for the corresponding extent was turned to RO mode by a
1463 	 * scrub or relocation) we need to do the following:
1464 	 *
1465 	 * 1) We increment the bytes_may_use counter of the data space info.
1466 	 *    If COW succeeds, it allocates a new data extent and after doing
1467 	 *    that it decrements the space info's bytes_may_use counter and
1468 	 *    increments its bytes_reserved counter by the same amount (we do
1469 	 *    this at btrfs_add_reserved_bytes()). So we need to increment the
1470 	 *    bytes_may_use counter to compensate (when space is reserved at
1471 	 *    buffered write time, the bytes_may_use counter is incremented);
1472 	 *
1473 	 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
1474 	 *    that if the COW path fails for any reason, it decrements (through
1475 	 *    extent_clear_unlock_delalloc()) the bytes_may_use counter of the
1476 	 *    data space info, which we incremented in the step above.
1477 	 *
1478 	 * If we need to fallback to cow and the inode corresponds to a free
1479 	 * space cache inode or an inode of the data relocation tree, we must
1480 	 * also increment bytes_may_use of the data space_info for the same
1481 	 * reason. Space caches and relocated data extents always get a prealloc
1482 	 * extent for them, however scrub or balance may have set the block
1483 	 * group that contains that extent to RO mode and therefore force COW
1484 	 * when starting writeback.
1485 	 */
1486 	count = count_range_bits(io_tree, &range_start, end, range_bytes,
1487 				 EXTENT_NORESERVE, 0);
1488 	if (count > 0 || is_space_ino || is_reloc_ino) {
1489 		u64 bytes = count;
1490 		struct btrfs_fs_info *fs_info = inode->root->fs_info;
1491 		struct btrfs_space_info *sinfo = fs_info->data_sinfo;
1492 
1493 		if (is_space_ino || is_reloc_ino)
1494 			bytes = range_bytes;
1495 
1496 		spin_lock(&sinfo->lock);
1497 		btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
1498 		spin_unlock(&sinfo->lock);
1499 
1500 		if (count > 0)
1501 			clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
1502 					 0, 0, NULL);
1503 	}
1504 
1505 	return cow_file_range(inode, locked_page, start, end, page_started,
1506 			      nr_written, 1);
1507 }
1508 
1509 /*
1510  * when nowcow writeback call back.  This checks for snapshots or COW copies
1511  * of the extents that exist in the file, and COWs the file as required.
1512  *
1513  * If no cow copies or snapshots exist, we write directly to the existing
1514  * blocks on disk
1515  */
1516 static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
1517 				       struct page *locked_page,
1518 				       const u64 start, const u64 end,
1519 				       int *page_started,
1520 				       unsigned long *nr_written)
1521 {
1522 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1523 	struct btrfs_root *root = inode->root;
1524 	struct btrfs_path *path;
1525 	u64 cow_start = (u64)-1;
1526 	u64 cur_offset = start;
1527 	int ret;
1528 	bool check_prev = true;
1529 	const bool freespace_inode = btrfs_is_free_space_inode(inode);
1530 	u64 ino = btrfs_ino(inode);
1531 	bool nocow = false;
1532 	u64 disk_bytenr = 0;
1533 	const bool force = inode->flags & BTRFS_INODE_NODATACOW;
1534 
1535 	path = btrfs_alloc_path();
1536 	if (!path) {
1537 		extent_clear_unlock_delalloc(inode, start, end, locked_page,
1538 					     EXTENT_LOCKED | EXTENT_DELALLOC |
1539 					     EXTENT_DO_ACCOUNTING |
1540 					     EXTENT_DEFRAG, PAGE_UNLOCK |
1541 					     PAGE_START_WRITEBACK |
1542 					     PAGE_END_WRITEBACK);
1543 		return -ENOMEM;
1544 	}
1545 
1546 	while (1) {
1547 		struct btrfs_key found_key;
1548 		struct btrfs_file_extent_item *fi;
1549 		struct extent_buffer *leaf;
1550 		u64 extent_end;
1551 		u64 extent_offset;
1552 		u64 num_bytes = 0;
1553 		u64 disk_num_bytes;
1554 		u64 ram_bytes;
1555 		int extent_type;
1556 
1557 		nocow = false;
1558 
1559 		ret = btrfs_lookup_file_extent(NULL, root, path, ino,
1560 					       cur_offset, 0);
1561 		if (ret < 0)
1562 			goto error;
1563 
1564 		/*
1565 		 * If there is no extent for our range when doing the initial
1566 		 * search, then go back to the previous slot as it will be the
1567 		 * one containing the search offset
1568 		 */
1569 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
1570 			leaf = path->nodes[0];
1571 			btrfs_item_key_to_cpu(leaf, &found_key,
1572 					      path->slots[0] - 1);
1573 			if (found_key.objectid == ino &&
1574 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
1575 				path->slots[0]--;
1576 		}
1577 		check_prev = false;
1578 next_slot:
1579 		/* Go to next leaf if we have exhausted the current one */
1580 		leaf = path->nodes[0];
1581 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1582 			ret = btrfs_next_leaf(root, path);
1583 			if (ret < 0) {
1584 				if (cow_start != (u64)-1)
1585 					cur_offset = cow_start;
1586 				goto error;
1587 			}
1588 			if (ret > 0)
1589 				break;
1590 			leaf = path->nodes[0];
1591 		}
1592 
1593 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1594 
1595 		/* Didn't find anything for our INO */
1596 		if (found_key.objectid > ino)
1597 			break;
1598 		/*
1599 		 * Keep searching until we find an EXTENT_ITEM or there are no
1600 		 * more extents for this inode
1601 		 */
1602 		if (WARN_ON_ONCE(found_key.objectid < ino) ||
1603 		    found_key.type < BTRFS_EXTENT_DATA_KEY) {
1604 			path->slots[0]++;
1605 			goto next_slot;
1606 		}
1607 
1608 		/* Found key is not EXTENT_DATA_KEY or starts after req range */
1609 		if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
1610 		    found_key.offset > end)
1611 			break;
1612 
1613 		/*
1614 		 * If the found extent starts after requested offset, then
1615 		 * adjust extent_end to be right before this extent begins
1616 		 */
1617 		if (found_key.offset > cur_offset) {
1618 			extent_end = found_key.offset;
1619 			extent_type = 0;
1620 			goto out_check;
1621 		}
1622 
1623 		/*
1624 		 * Found extent which begins before our range and potentially
1625 		 * intersect it
1626 		 */
1627 		fi = btrfs_item_ptr(leaf, path->slots[0],
1628 				    struct btrfs_file_extent_item);
1629 		extent_type = btrfs_file_extent_type(leaf, fi);
1630 
1631 		ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1632 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
1633 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1634 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1635 			extent_offset = btrfs_file_extent_offset(leaf, fi);
1636 			extent_end = found_key.offset +
1637 				btrfs_file_extent_num_bytes(leaf, fi);
1638 			disk_num_bytes =
1639 				btrfs_file_extent_disk_num_bytes(leaf, fi);
1640 			/*
1641 			 * If the extent we got ends before our current offset,
1642 			 * skip to the next extent.
1643 			 */
1644 			if (extent_end <= cur_offset) {
1645 				path->slots[0]++;
1646 				goto next_slot;
1647 			}
1648 			/* Skip holes */
1649 			if (disk_bytenr == 0)
1650 				goto out_check;
1651 			/* Skip compressed/encrypted/encoded extents */
1652 			if (btrfs_file_extent_compression(leaf, fi) ||
1653 			    btrfs_file_extent_encryption(leaf, fi) ||
1654 			    btrfs_file_extent_other_encoding(leaf, fi))
1655 				goto out_check;
1656 			/*
1657 			 * If extent is created before the last volume's snapshot
1658 			 * this implies the extent is shared, hence we can't do
1659 			 * nocow. This is the same check as in
1660 			 * btrfs_cross_ref_exist but without calling
1661 			 * btrfs_search_slot.
1662 			 */
1663 			if (!freespace_inode &&
1664 			    btrfs_file_extent_generation(leaf, fi) <=
1665 			    btrfs_root_last_snapshot(&root->root_item))
1666 				goto out_check;
1667 			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1668 				goto out_check;
1669 
1670 			/*
1671 			 * The following checks can be expensive, as they need to
1672 			 * take other locks and do btree or rbtree searches, so
1673 			 * release the path to avoid blocking other tasks for too
1674 			 * long.
1675 			 */
1676 			btrfs_release_path(path);
1677 
1678 			ret = btrfs_cross_ref_exist(root, ino,
1679 						    found_key.offset -
1680 						    extent_offset, disk_bytenr, false);
1681 			if (ret) {
1682 				/*
1683 				 * ret could be -EIO if the above fails to read
1684 				 * metadata.
1685 				 */
1686 				if (ret < 0) {
1687 					if (cow_start != (u64)-1)
1688 						cur_offset = cow_start;
1689 					goto error;
1690 				}
1691 
1692 				WARN_ON_ONCE(freespace_inode);
1693 				goto out_check;
1694 			}
1695 			disk_bytenr += extent_offset;
1696 			disk_bytenr += cur_offset - found_key.offset;
1697 			num_bytes = min(end + 1, extent_end) - cur_offset;
1698 			/*
1699 			 * If there are pending snapshots for this root, we
1700 			 * fall into common COW way
1701 			 */
1702 			if (!freespace_inode && atomic_read(&root->snapshot_force_cow))
1703 				goto out_check;
1704 			/*
1705 			 * force cow if csum exists in the range.
1706 			 * this ensure that csum for a given extent are
1707 			 * either valid or do not exist.
1708 			 */
1709 			ret = csum_exist_in_range(fs_info, disk_bytenr,
1710 						  num_bytes);
1711 			if (ret) {
1712 				/*
1713 				 * ret could be -EIO if the above fails to read
1714 				 * metadata.
1715 				 */
1716 				if (ret < 0) {
1717 					if (cow_start != (u64)-1)
1718 						cur_offset = cow_start;
1719 					goto error;
1720 				}
1721 				WARN_ON_ONCE(freespace_inode);
1722 				goto out_check;
1723 			}
1724 			/* If the extent's block group is RO, we must COW */
1725 			if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
1726 				goto out_check;
1727 			nocow = true;
1728 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1729 			extent_end = found_key.offset + ram_bytes;
1730 			extent_end = ALIGN(extent_end, fs_info->sectorsize);
1731 			/* Skip extents outside of our requested range */
1732 			if (extent_end <= start) {
1733 				path->slots[0]++;
1734 				goto next_slot;
1735 			}
1736 		} else {
1737 			/* If this triggers then we have a memory corruption */
1738 			BUG();
1739 		}
1740 out_check:
1741 		/*
1742 		 * If nocow is false then record the beginning of the range
1743 		 * that needs to be COWed
1744 		 */
1745 		if (!nocow) {
1746 			if (cow_start == (u64)-1)
1747 				cow_start = cur_offset;
1748 			cur_offset = extent_end;
1749 			if (cur_offset > end)
1750 				break;
1751 			if (!path->nodes[0])
1752 				continue;
1753 			path->slots[0]++;
1754 			goto next_slot;
1755 		}
1756 
1757 		/*
1758 		 * COW range from cow_start to found_key.offset - 1. As the key
1759 		 * will contain the beginning of the first extent that can be
1760 		 * NOCOW, following one which needs to be COW'ed
1761 		 */
1762 		if (cow_start != (u64)-1) {
1763 			ret = fallback_to_cow(inode, locked_page,
1764 					      cow_start, found_key.offset - 1,
1765 					      page_started, nr_written);
1766 			if (ret)
1767 				goto error;
1768 			cow_start = (u64)-1;
1769 		}
1770 
1771 		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1772 			u64 orig_start = found_key.offset - extent_offset;
1773 			struct extent_map *em;
1774 
1775 			em = create_io_em(inode, cur_offset, num_bytes,
1776 					  orig_start,
1777 					  disk_bytenr, /* block_start */
1778 					  num_bytes, /* block_len */
1779 					  disk_num_bytes, /* orig_block_len */
1780 					  ram_bytes, BTRFS_COMPRESS_NONE,
1781 					  BTRFS_ORDERED_PREALLOC);
1782 			if (IS_ERR(em)) {
1783 				ret = PTR_ERR(em);
1784 				goto error;
1785 			}
1786 			free_extent_map(em);
1787 			ret = btrfs_add_ordered_extent(inode, cur_offset,
1788 						       disk_bytenr, num_bytes,
1789 						       num_bytes,
1790 						       BTRFS_ORDERED_PREALLOC);
1791 			if (ret) {
1792 				btrfs_drop_extent_cache(inode, cur_offset,
1793 							cur_offset + num_bytes - 1,
1794 							0);
1795 				goto error;
1796 			}
1797 		} else {
1798 			ret = btrfs_add_ordered_extent(inode, cur_offset,
1799 						       disk_bytenr, num_bytes,
1800 						       num_bytes,
1801 						       BTRFS_ORDERED_NOCOW);
1802 			if (ret)
1803 				goto error;
1804 		}
1805 
1806 		if (nocow)
1807 			btrfs_dec_nocow_writers(fs_info, disk_bytenr);
1808 		nocow = false;
1809 
1810 		if (root->root_key.objectid ==
1811 		    BTRFS_DATA_RELOC_TREE_OBJECTID)
1812 			/*
1813 			 * Error handled later, as we must prevent
1814 			 * extent_clear_unlock_delalloc() in error handler
1815 			 * from freeing metadata of created ordered extent.
1816 			 */
1817 			ret = btrfs_reloc_clone_csums(inode, cur_offset,
1818 						      num_bytes);
1819 
1820 		extent_clear_unlock_delalloc(inode, cur_offset,
1821 					     cur_offset + num_bytes - 1,
1822 					     locked_page, EXTENT_LOCKED |
1823 					     EXTENT_DELALLOC |
1824 					     EXTENT_CLEAR_DATA_RESV,
1825 					     PAGE_UNLOCK | PAGE_SET_PRIVATE2);
1826 
1827 		cur_offset = extent_end;
1828 
1829 		/*
1830 		 * btrfs_reloc_clone_csums() error, now we're OK to call error
1831 		 * handler, as metadata for created ordered extent will only
1832 		 * be freed by btrfs_finish_ordered_io().
1833 		 */
1834 		if (ret)
1835 			goto error;
1836 		if (cur_offset > end)
1837 			break;
1838 	}
1839 	btrfs_release_path(path);
1840 
1841 	if (cur_offset <= end && cow_start == (u64)-1)
1842 		cow_start = cur_offset;
1843 
1844 	if (cow_start != (u64)-1) {
1845 		cur_offset = end;
1846 		ret = fallback_to_cow(inode, locked_page, cow_start, end,
1847 				      page_started, nr_written);
1848 		if (ret)
1849 			goto error;
1850 	}
1851 
1852 error:
1853 	if (nocow)
1854 		btrfs_dec_nocow_writers(fs_info, disk_bytenr);
1855 
1856 	if (ret && cur_offset < end)
1857 		extent_clear_unlock_delalloc(inode, cur_offset, end,
1858 					     locked_page, EXTENT_LOCKED |
1859 					     EXTENT_DELALLOC | EXTENT_DEFRAG |
1860 					     EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1861 					     PAGE_START_WRITEBACK |
1862 					     PAGE_END_WRITEBACK);
1863 	btrfs_free_path(path);
1864 	return ret;
1865 }
1866 
1867 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
1868 {
1869 	if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) {
1870 		if (inode->defrag_bytes &&
1871 		    test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG,
1872 				   0, NULL))
1873 			return false;
1874 		return true;
1875 	}
1876 	return false;
1877 }
1878 
1879 /*
1880  * Function to process delayed allocation (create CoW) for ranges which are
1881  * being touched for the first time.
1882  */
1883 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
1884 		u64 start, u64 end, int *page_started, unsigned long *nr_written,
1885 		struct writeback_control *wbc)
1886 {
1887 	int ret;
1888 	const bool zoned = btrfs_is_zoned(inode->root->fs_info);
1889 
1890 	if (should_nocow(inode, start, end)) {
1891 		ASSERT(!zoned);
1892 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1893 					 page_started, nr_written);
1894 	} else if (!inode_can_compress(inode) ||
1895 		   !inode_need_compress(inode, start, end)) {
1896 		if (zoned)
1897 			ret = run_delalloc_zoned(inode, locked_page, start, end,
1898 						 page_started, nr_written);
1899 		else
1900 			ret = cow_file_range(inode, locked_page, start, end,
1901 					     page_started, nr_written, 1);
1902 	} else {
1903 		set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
1904 		ret = cow_file_range_async(inode, wbc, locked_page, start, end,
1905 					   page_started, nr_written);
1906 	}
1907 	if (ret)
1908 		btrfs_cleanup_ordered_extents(inode, locked_page, start,
1909 					      end - start + 1);
1910 	return ret;
1911 }
1912 
1913 void btrfs_split_delalloc_extent(struct inode *inode,
1914 				 struct extent_state *orig, u64 split)
1915 {
1916 	u64 size;
1917 
1918 	/* not delalloc, ignore it */
1919 	if (!(orig->state & EXTENT_DELALLOC))
1920 		return;
1921 
1922 	size = orig->end - orig->start + 1;
1923 	if (size > BTRFS_MAX_EXTENT_SIZE) {
1924 		u32 num_extents;
1925 		u64 new_size;
1926 
1927 		/*
1928 		 * See the explanation in btrfs_merge_delalloc_extent, the same
1929 		 * applies here, just in reverse.
1930 		 */
1931 		new_size = orig->end - split + 1;
1932 		num_extents = count_max_extents(new_size);
1933 		new_size = split - orig->start;
1934 		num_extents += count_max_extents(new_size);
1935 		if (count_max_extents(size) >= num_extents)
1936 			return;
1937 	}
1938 
1939 	spin_lock(&BTRFS_I(inode)->lock);
1940 	btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
1941 	spin_unlock(&BTRFS_I(inode)->lock);
1942 }
1943 
1944 /*
1945  * Handle merged delayed allocation extents so we can keep track of new extents
1946  * that are just merged onto old extents, such as when we are doing sequential
1947  * writes, so we can properly account for the metadata space we'll need.
1948  */
1949 void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new,
1950 				 struct extent_state *other)
1951 {
1952 	u64 new_size, old_size;
1953 	u32 num_extents;
1954 
1955 	/* not delalloc, ignore it */
1956 	if (!(other->state & EXTENT_DELALLOC))
1957 		return;
1958 
1959 	if (new->start > other->start)
1960 		new_size = new->end - other->start + 1;
1961 	else
1962 		new_size = other->end - new->start + 1;
1963 
1964 	/* we're not bigger than the max, unreserve the space and go */
1965 	if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
1966 		spin_lock(&BTRFS_I(inode)->lock);
1967 		btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
1968 		spin_unlock(&BTRFS_I(inode)->lock);
1969 		return;
1970 	}
1971 
1972 	/*
1973 	 * We have to add up either side to figure out how many extents were
1974 	 * accounted for before we merged into one big extent.  If the number of
1975 	 * extents we accounted for is <= the amount we need for the new range
1976 	 * then we can return, otherwise drop.  Think of it like this
1977 	 *
1978 	 * [ 4k][MAX_SIZE]
1979 	 *
1980 	 * So we've grown the extent by a MAX_SIZE extent, this would mean we
1981 	 * need 2 outstanding extents, on one side we have 1 and the other side
1982 	 * we have 1 so they are == and we can return.  But in this case
1983 	 *
1984 	 * [MAX_SIZE+4k][MAX_SIZE+4k]
1985 	 *
1986 	 * Each range on their own accounts for 2 extents, but merged together
1987 	 * they are only 3 extents worth of accounting, so we need to drop in
1988 	 * this case.
1989 	 */
1990 	old_size = other->end - other->start + 1;
1991 	num_extents = count_max_extents(old_size);
1992 	old_size = new->end - new->start + 1;
1993 	num_extents += count_max_extents(old_size);
1994 	if (count_max_extents(new_size) >= num_extents)
1995 		return;
1996 
1997 	spin_lock(&BTRFS_I(inode)->lock);
1998 	btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
1999 	spin_unlock(&BTRFS_I(inode)->lock);
2000 }
2001 
2002 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
2003 				      struct inode *inode)
2004 {
2005 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2006 
2007 	spin_lock(&root->delalloc_lock);
2008 	if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
2009 		list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
2010 			      &root->delalloc_inodes);
2011 		set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2012 			&BTRFS_I(inode)->runtime_flags);
2013 		root->nr_delalloc_inodes++;
2014 		if (root->nr_delalloc_inodes == 1) {
2015 			spin_lock(&fs_info->delalloc_root_lock);
2016 			BUG_ON(!list_empty(&root->delalloc_root));
2017 			list_add_tail(&root->delalloc_root,
2018 				      &fs_info->delalloc_roots);
2019 			spin_unlock(&fs_info->delalloc_root_lock);
2020 		}
2021 	}
2022 	spin_unlock(&root->delalloc_lock);
2023 }
2024 
2025 
2026 void __btrfs_del_delalloc_inode(struct btrfs_root *root,
2027 				struct btrfs_inode *inode)
2028 {
2029 	struct btrfs_fs_info *fs_info = root->fs_info;
2030 
2031 	if (!list_empty(&inode->delalloc_inodes)) {
2032 		list_del_init(&inode->delalloc_inodes);
2033 		clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2034 			  &inode->runtime_flags);
2035 		root->nr_delalloc_inodes--;
2036 		if (!root->nr_delalloc_inodes) {
2037 			ASSERT(list_empty(&root->delalloc_inodes));
2038 			spin_lock(&fs_info->delalloc_root_lock);
2039 			BUG_ON(list_empty(&root->delalloc_root));
2040 			list_del_init(&root->delalloc_root);
2041 			spin_unlock(&fs_info->delalloc_root_lock);
2042 		}
2043 	}
2044 }
2045 
2046 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
2047 				     struct btrfs_inode *inode)
2048 {
2049 	spin_lock(&root->delalloc_lock);
2050 	__btrfs_del_delalloc_inode(root, inode);
2051 	spin_unlock(&root->delalloc_lock);
2052 }
2053 
2054 /*
2055  * Properly track delayed allocation bytes in the inode and to maintain the
2056  * list of inodes that have pending delalloc work to be done.
2057  */
2058 void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state,
2059 			       unsigned *bits)
2060 {
2061 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2062 
2063 	if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
2064 		WARN_ON(1);
2065 	/*
2066 	 * set_bit and clear bit hooks normally require _irqsave/restore
2067 	 * but in this case, we are only testing for the DELALLOC
2068 	 * bit, which is only set or cleared with irqs on
2069 	 */
2070 	if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
2071 		struct btrfs_root *root = BTRFS_I(inode)->root;
2072 		u64 len = state->end + 1 - state->start;
2073 		u32 num_extents = count_max_extents(len);
2074 		bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode));
2075 
2076 		spin_lock(&BTRFS_I(inode)->lock);
2077 		btrfs_mod_outstanding_extents(BTRFS_I(inode), num_extents);
2078 		spin_unlock(&BTRFS_I(inode)->lock);
2079 
2080 		/* For sanity tests */
2081 		if (btrfs_is_testing(fs_info))
2082 			return;
2083 
2084 		percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
2085 					 fs_info->delalloc_batch);
2086 		spin_lock(&BTRFS_I(inode)->lock);
2087 		BTRFS_I(inode)->delalloc_bytes += len;
2088 		if (*bits & EXTENT_DEFRAG)
2089 			BTRFS_I(inode)->defrag_bytes += len;
2090 		if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2091 					 &BTRFS_I(inode)->runtime_flags))
2092 			btrfs_add_delalloc_inodes(root, inode);
2093 		spin_unlock(&BTRFS_I(inode)->lock);
2094 	}
2095 
2096 	if (!(state->state & EXTENT_DELALLOC_NEW) &&
2097 	    (*bits & EXTENT_DELALLOC_NEW)) {
2098 		spin_lock(&BTRFS_I(inode)->lock);
2099 		BTRFS_I(inode)->new_delalloc_bytes += state->end + 1 -
2100 			state->start;
2101 		spin_unlock(&BTRFS_I(inode)->lock);
2102 	}
2103 }
2104 
2105 /*
2106  * Once a range is no longer delalloc this function ensures that proper
2107  * accounting happens.
2108  */
2109 void btrfs_clear_delalloc_extent(struct inode *vfs_inode,
2110 				 struct extent_state *state, unsigned *bits)
2111 {
2112 	struct btrfs_inode *inode = BTRFS_I(vfs_inode);
2113 	struct btrfs_fs_info *fs_info = btrfs_sb(vfs_inode->i_sb);
2114 	u64 len = state->end + 1 - state->start;
2115 	u32 num_extents = count_max_extents(len);
2116 
2117 	if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) {
2118 		spin_lock(&inode->lock);
2119 		inode->defrag_bytes -= len;
2120 		spin_unlock(&inode->lock);
2121 	}
2122 
2123 	/*
2124 	 * set_bit and clear bit hooks normally require _irqsave/restore
2125 	 * but in this case, we are only testing for the DELALLOC
2126 	 * bit, which is only set or cleared with irqs on
2127 	 */
2128 	if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
2129 		struct btrfs_root *root = inode->root;
2130 		bool do_list = !btrfs_is_free_space_inode(inode);
2131 
2132 		spin_lock(&inode->lock);
2133 		btrfs_mod_outstanding_extents(inode, -num_extents);
2134 		spin_unlock(&inode->lock);
2135 
2136 		/*
2137 		 * We don't reserve metadata space for space cache inodes so we
2138 		 * don't need to call delalloc_release_metadata if there is an
2139 		 * error.
2140 		 */
2141 		if (*bits & EXTENT_CLEAR_META_RESV &&
2142 		    root != fs_info->tree_root)
2143 			btrfs_delalloc_release_metadata(inode, len, false);
2144 
2145 		/* For sanity tests. */
2146 		if (btrfs_is_testing(fs_info))
2147 			return;
2148 
2149 		if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID &&
2150 		    do_list && !(state->state & EXTENT_NORESERVE) &&
2151 		    (*bits & EXTENT_CLEAR_DATA_RESV))
2152 			btrfs_free_reserved_data_space_noquota(fs_info, len);
2153 
2154 		percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
2155 					 fs_info->delalloc_batch);
2156 		spin_lock(&inode->lock);
2157 		inode->delalloc_bytes -= len;
2158 		if (do_list && inode->delalloc_bytes == 0 &&
2159 		    test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2160 					&inode->runtime_flags))
2161 			btrfs_del_delalloc_inode(root, inode);
2162 		spin_unlock(&inode->lock);
2163 	}
2164 
2165 	if ((state->state & EXTENT_DELALLOC_NEW) &&
2166 	    (*bits & EXTENT_DELALLOC_NEW)) {
2167 		spin_lock(&inode->lock);
2168 		ASSERT(inode->new_delalloc_bytes >= len);
2169 		inode->new_delalloc_bytes -= len;
2170 		if (*bits & EXTENT_ADD_INODE_BYTES)
2171 			inode_add_bytes(&inode->vfs_inode, len);
2172 		spin_unlock(&inode->lock);
2173 	}
2174 }
2175 
2176 /*
2177  * btrfs_bio_fits_in_stripe - Checks whether the size of the given bio will fit
2178  * in a chunk's stripe. This function ensures that bios do not span a
2179  * stripe/chunk
2180  *
2181  * @page - The page we are about to add to the bio
2182  * @size - size we want to add to the bio
2183  * @bio - bio we want to ensure is smaller than a stripe
2184  * @bio_flags - flags of the bio
2185  *
2186  * return 1 if page cannot be added to the bio
2187  * return 0 if page can be added to the bio
2188  * return error otherwise
2189  */
2190 int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio,
2191 			     unsigned long bio_flags)
2192 {
2193 	struct inode *inode = page->mapping->host;
2194 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2195 	u64 logical = bio->bi_iter.bi_sector << 9;
2196 	struct extent_map *em;
2197 	u64 length = 0;
2198 	u64 map_length;
2199 	int ret = 0;
2200 	struct btrfs_io_geometry geom;
2201 
2202 	if (bio_flags & EXTENT_BIO_COMPRESSED)
2203 		return 0;
2204 
2205 	length = bio->bi_iter.bi_size;
2206 	map_length = length;
2207 	em = btrfs_get_chunk_map(fs_info, logical, map_length);
2208 	if (IS_ERR(em))
2209 		return PTR_ERR(em);
2210 	ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio), logical,
2211 				    map_length, &geom);
2212 	if (ret < 0)
2213 		goto out;
2214 
2215 	if (geom.len < length + size)
2216 		ret = 1;
2217 out:
2218 	free_extent_map(em);
2219 	return ret;
2220 }
2221 
2222 /*
2223  * in order to insert checksums into the metadata in large chunks,
2224  * we wait until bio submission time.   All the pages in the bio are
2225  * checksummed and sums are attached onto the ordered extent record.
2226  *
2227  * At IO completion time the cums attached on the ordered extent record
2228  * are inserted into the btree
2229  */
2230 static blk_status_t btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
2231 					   u64 dio_file_offset)
2232 {
2233 	return btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0);
2234 }
2235 
2236 bool btrfs_bio_fits_in_ordered_extent(struct page *page, struct bio *bio,
2237 				      unsigned int size)
2238 {
2239 	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
2240 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2241 	struct btrfs_ordered_extent *ordered;
2242 	u64 len = bio->bi_iter.bi_size + size;
2243 	bool ret = true;
2244 
2245 	ASSERT(btrfs_is_zoned(fs_info));
2246 	ASSERT(fs_info->max_zone_append_size > 0);
2247 	ASSERT(bio_op(bio) == REQ_OP_ZONE_APPEND);
2248 
2249 	/* Ordered extent not yet created, so we're good */
2250 	ordered = btrfs_lookup_ordered_extent(inode, page_offset(page));
2251 	if (!ordered)
2252 		return ret;
2253 
2254 	if ((bio->bi_iter.bi_sector << SECTOR_SHIFT) + len >
2255 	    ordered->disk_bytenr + ordered->disk_num_bytes)
2256 		ret = false;
2257 
2258 	btrfs_put_ordered_extent(ordered);
2259 
2260 	return ret;
2261 }
2262 
2263 static blk_status_t extract_ordered_extent(struct btrfs_inode *inode,
2264 					   struct bio *bio, loff_t file_offset)
2265 {
2266 	struct btrfs_ordered_extent *ordered;
2267 	struct extent_map *em = NULL, *em_new = NULL;
2268 	struct extent_map_tree *em_tree = &inode->extent_tree;
2269 	u64 start = (u64)bio->bi_iter.bi_sector << SECTOR_SHIFT;
2270 	u64 len = bio->bi_iter.bi_size;
2271 	u64 end = start + len;
2272 	u64 ordered_end;
2273 	u64 pre, post;
2274 	int ret = 0;
2275 
2276 	ordered = btrfs_lookup_ordered_extent(inode, file_offset);
2277 	if (WARN_ON_ONCE(!ordered))
2278 		return BLK_STS_IOERR;
2279 
2280 	/* No need to split */
2281 	if (ordered->disk_num_bytes == len)
2282 		goto out;
2283 
2284 	/* We cannot split once end_bio'd ordered extent */
2285 	if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes)) {
2286 		ret = -EINVAL;
2287 		goto out;
2288 	}
2289 
2290 	/* We cannot split a compressed ordered extent */
2291 	if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes)) {
2292 		ret = -EINVAL;
2293 		goto out;
2294 	}
2295 
2296 	ordered_end = ordered->disk_bytenr + ordered->disk_num_bytes;
2297 	/* bio must be in one ordered extent */
2298 	if (WARN_ON_ONCE(start < ordered->disk_bytenr || end > ordered_end)) {
2299 		ret = -EINVAL;
2300 		goto out;
2301 	}
2302 
2303 	/* Checksum list should be empty */
2304 	if (WARN_ON_ONCE(!list_empty(&ordered->list))) {
2305 		ret = -EINVAL;
2306 		goto out;
2307 	}
2308 
2309 	pre = start - ordered->disk_bytenr;
2310 	post = ordered_end - end;
2311 
2312 	ret = btrfs_split_ordered_extent(ordered, pre, post);
2313 	if (ret)
2314 		goto out;
2315 
2316 	read_lock(&em_tree->lock);
2317 	em = lookup_extent_mapping(em_tree, ordered->file_offset, len);
2318 	if (!em) {
2319 		read_unlock(&em_tree->lock);
2320 		ret = -EIO;
2321 		goto out;
2322 	}
2323 	read_unlock(&em_tree->lock);
2324 
2325 	ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
2326 	/*
2327 	 * We cannot reuse em_new here but have to create a new one, as
2328 	 * unpin_extent_cache() expects the start of the extent map to be the
2329 	 * logical offset of the file, which does not hold true anymore after
2330 	 * splitting.
2331 	 */
2332 	em_new = create_io_em(inode, em->start + pre, len,
2333 			      em->start + pre, em->block_start + pre, len,
2334 			      len, len, BTRFS_COMPRESS_NONE,
2335 			      BTRFS_ORDERED_REGULAR);
2336 	if (IS_ERR(em_new)) {
2337 		ret = PTR_ERR(em_new);
2338 		goto out;
2339 	}
2340 	free_extent_map(em_new);
2341 
2342 out:
2343 	free_extent_map(em);
2344 	btrfs_put_ordered_extent(ordered);
2345 
2346 	return errno_to_blk_status(ret);
2347 }
2348 
2349 /*
2350  * extent_io.c submission hook. This does the right thing for csum calculation
2351  * on write, or reading the csums from the tree before a read.
2352  *
2353  * Rules about async/sync submit,
2354  * a) read:				sync submit
2355  *
2356  * b) write without checksum:		sync submit
2357  *
2358  * c) write with checksum:
2359  *    c-1) if bio is issued by fsync:	sync submit
2360  *         (sync_writers != 0)
2361  *
2362  *    c-2) if root is reloc root:	sync submit
2363  *         (only in case of buffered IO)
2364  *
2365  *    c-3) otherwise:			async submit
2366  */
2367 blk_status_t btrfs_submit_data_bio(struct inode *inode, struct bio *bio,
2368 				   int mirror_num, unsigned long bio_flags)
2369 
2370 {
2371 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2372 	struct btrfs_root *root = BTRFS_I(inode)->root;
2373 	enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
2374 	blk_status_t ret = 0;
2375 	int skip_sum;
2376 	int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
2377 
2378 	skip_sum = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) ||
2379 		   !fs_info->csum_root;
2380 
2381 	if (btrfs_is_free_space_inode(BTRFS_I(inode)))
2382 		metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
2383 
2384 	if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
2385 		struct page *page = bio_first_bvec_all(bio)->bv_page;
2386 		loff_t file_offset = page_offset(page);
2387 
2388 		ret = extract_ordered_extent(BTRFS_I(inode), bio, file_offset);
2389 		if (ret)
2390 			goto out;
2391 	}
2392 
2393 	if (btrfs_op(bio) != BTRFS_MAP_WRITE) {
2394 		ret = btrfs_bio_wq_end_io(fs_info, bio, metadata);
2395 		if (ret)
2396 			goto out;
2397 
2398 		if (bio_flags & EXTENT_BIO_COMPRESSED) {
2399 			ret = btrfs_submit_compressed_read(inode, bio,
2400 							   mirror_num,
2401 							   bio_flags);
2402 			goto out;
2403 		} else {
2404 			/*
2405 			 * Lookup bio sums does extra checks around whether we
2406 			 * need to csum or not, which is why we ignore skip_sum
2407 			 * here.
2408 			 */
2409 			ret = btrfs_lookup_bio_sums(inode, bio, NULL);
2410 			if (ret)
2411 				goto out;
2412 		}
2413 		goto mapit;
2414 	} else if (async && !skip_sum) {
2415 		/* csum items have already been cloned */
2416 		if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2417 			goto mapit;
2418 		/* we're doing a write, do the async checksumming */
2419 		ret = btrfs_wq_submit_bio(inode, bio, mirror_num, bio_flags,
2420 					  0, btrfs_submit_bio_start);
2421 		goto out;
2422 	} else if (!skip_sum) {
2423 		ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0);
2424 		if (ret)
2425 			goto out;
2426 	}
2427 
2428 mapit:
2429 	ret = btrfs_map_bio(fs_info, bio, mirror_num);
2430 
2431 out:
2432 	if (ret) {
2433 		bio->bi_status = ret;
2434 		bio_endio(bio);
2435 	}
2436 	return ret;
2437 }
2438 
2439 /*
2440  * given a list of ordered sums record them in the inode.  This happens
2441  * at IO completion time based on sums calculated at bio submission time.
2442  */
2443 static int add_pending_csums(struct btrfs_trans_handle *trans,
2444 			     struct list_head *list)
2445 {
2446 	struct btrfs_ordered_sum *sum;
2447 	int ret;
2448 
2449 	list_for_each_entry(sum, list, list) {
2450 		trans->adding_csums = true;
2451 		ret = btrfs_csum_file_blocks(trans, trans->fs_info->csum_root, sum);
2452 		trans->adding_csums = false;
2453 		if (ret)
2454 			return ret;
2455 	}
2456 	return 0;
2457 }
2458 
2459 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
2460 					 const u64 start,
2461 					 const u64 len,
2462 					 struct extent_state **cached_state)
2463 {
2464 	u64 search_start = start;
2465 	const u64 end = start + len - 1;
2466 
2467 	while (search_start < end) {
2468 		const u64 search_len = end - search_start + 1;
2469 		struct extent_map *em;
2470 		u64 em_len;
2471 		int ret = 0;
2472 
2473 		em = btrfs_get_extent(inode, NULL, 0, search_start, search_len);
2474 		if (IS_ERR(em))
2475 			return PTR_ERR(em);
2476 
2477 		if (em->block_start != EXTENT_MAP_HOLE)
2478 			goto next;
2479 
2480 		em_len = em->len;
2481 		if (em->start < search_start)
2482 			em_len -= search_start - em->start;
2483 		if (em_len > search_len)
2484 			em_len = search_len;
2485 
2486 		ret = set_extent_bit(&inode->io_tree, search_start,
2487 				     search_start + em_len - 1,
2488 				     EXTENT_DELALLOC_NEW, 0, NULL, cached_state,
2489 				     GFP_NOFS, NULL);
2490 next:
2491 		search_start = extent_map_end(em);
2492 		free_extent_map(em);
2493 		if (ret)
2494 			return ret;
2495 	}
2496 	return 0;
2497 }
2498 
2499 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2500 			      unsigned int extra_bits,
2501 			      struct extent_state **cached_state)
2502 {
2503 	WARN_ON(PAGE_ALIGNED(end));
2504 
2505 	if (start >= i_size_read(&inode->vfs_inode) &&
2506 	    !(inode->flags & BTRFS_INODE_PREALLOC)) {
2507 		/*
2508 		 * There can't be any extents following eof in this case so just
2509 		 * set the delalloc new bit for the range directly.
2510 		 */
2511 		extra_bits |= EXTENT_DELALLOC_NEW;
2512 	} else {
2513 		int ret;
2514 
2515 		ret = btrfs_find_new_delalloc_bytes(inode, start,
2516 						    end + 1 - start,
2517 						    cached_state);
2518 		if (ret)
2519 			return ret;
2520 	}
2521 
2522 	return set_extent_delalloc(&inode->io_tree, start, end, extra_bits,
2523 				   cached_state);
2524 }
2525 
2526 /* see btrfs_writepage_start_hook for details on why this is required */
2527 struct btrfs_writepage_fixup {
2528 	struct page *page;
2529 	struct inode *inode;
2530 	struct btrfs_work work;
2531 };
2532 
2533 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2534 {
2535 	struct btrfs_writepage_fixup *fixup;
2536 	struct btrfs_ordered_extent *ordered;
2537 	struct extent_state *cached_state = NULL;
2538 	struct extent_changeset *data_reserved = NULL;
2539 	struct page *page;
2540 	struct btrfs_inode *inode;
2541 	u64 page_start;
2542 	u64 page_end;
2543 	int ret = 0;
2544 	bool free_delalloc_space = true;
2545 
2546 	fixup = container_of(work, struct btrfs_writepage_fixup, work);
2547 	page = fixup->page;
2548 	inode = BTRFS_I(fixup->inode);
2549 	page_start = page_offset(page);
2550 	page_end = page_offset(page) + PAGE_SIZE - 1;
2551 
2552 	/*
2553 	 * This is similar to page_mkwrite, we need to reserve the space before
2554 	 * we take the page lock.
2555 	 */
2556 	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2557 					   PAGE_SIZE);
2558 again:
2559 	lock_page(page);
2560 
2561 	/*
2562 	 * Before we queued this fixup, we took a reference on the page.
2563 	 * page->mapping may go NULL, but it shouldn't be moved to a different
2564 	 * address space.
2565 	 */
2566 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
2567 		/*
2568 		 * Unfortunately this is a little tricky, either
2569 		 *
2570 		 * 1) We got here and our page had already been dealt with and
2571 		 *    we reserved our space, thus ret == 0, so we need to just
2572 		 *    drop our space reservation and bail.  This can happen the
2573 		 *    first time we come into the fixup worker, or could happen
2574 		 *    while waiting for the ordered extent.
2575 		 * 2) Our page was already dealt with, but we happened to get an
2576 		 *    ENOSPC above from the btrfs_delalloc_reserve_space.  In
2577 		 *    this case we obviously don't have anything to release, but
2578 		 *    because the page was already dealt with we don't want to
2579 		 *    mark the page with an error, so make sure we're resetting
2580 		 *    ret to 0.  This is why we have this check _before_ the ret
2581 		 *    check, because we do not want to have a surprise ENOSPC
2582 		 *    when the page was already properly dealt with.
2583 		 */
2584 		if (!ret) {
2585 			btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2586 			btrfs_delalloc_release_space(inode, data_reserved,
2587 						     page_start, PAGE_SIZE,
2588 						     true);
2589 		}
2590 		ret = 0;
2591 		goto out_page;
2592 	}
2593 
2594 	/*
2595 	 * We can't mess with the page state unless it is locked, so now that
2596 	 * it is locked bail if we failed to make our space reservation.
2597 	 */
2598 	if (ret)
2599 		goto out_page;
2600 
2601 	lock_extent_bits(&inode->io_tree, page_start, page_end, &cached_state);
2602 
2603 	/* already ordered? We're done */
2604 	if (PagePrivate2(page))
2605 		goto out_reserved;
2606 
2607 	ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
2608 	if (ordered) {
2609 		unlock_extent_cached(&inode->io_tree, page_start, page_end,
2610 				     &cached_state);
2611 		unlock_page(page);
2612 		btrfs_start_ordered_extent(ordered, 1);
2613 		btrfs_put_ordered_extent(ordered);
2614 		goto again;
2615 	}
2616 
2617 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
2618 					&cached_state);
2619 	if (ret)
2620 		goto out_reserved;
2621 
2622 	/*
2623 	 * Everything went as planned, we're now the owner of a dirty page with
2624 	 * delayed allocation bits set and space reserved for our COW
2625 	 * destination.
2626 	 *
2627 	 * The page was dirty when we started, nothing should have cleaned it.
2628 	 */
2629 	BUG_ON(!PageDirty(page));
2630 	free_delalloc_space = false;
2631 out_reserved:
2632 	btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2633 	if (free_delalloc_space)
2634 		btrfs_delalloc_release_space(inode, data_reserved, page_start,
2635 					     PAGE_SIZE, true);
2636 	unlock_extent_cached(&inode->io_tree, page_start, page_end,
2637 			     &cached_state);
2638 out_page:
2639 	if (ret) {
2640 		/*
2641 		 * We hit ENOSPC or other errors.  Update the mapping and page
2642 		 * to reflect the errors and clean the page.
2643 		 */
2644 		mapping_set_error(page->mapping, ret);
2645 		end_extent_writepage(page, ret, page_start, page_end);
2646 		clear_page_dirty_for_io(page);
2647 		SetPageError(page);
2648 	}
2649 	ClearPageChecked(page);
2650 	unlock_page(page);
2651 	put_page(page);
2652 	kfree(fixup);
2653 	extent_changeset_free(data_reserved);
2654 	/*
2655 	 * As a precaution, do a delayed iput in case it would be the last iput
2656 	 * that could need flushing space. Recursing back to fixup worker would
2657 	 * deadlock.
2658 	 */
2659 	btrfs_add_delayed_iput(&inode->vfs_inode);
2660 }
2661 
2662 /*
2663  * There are a few paths in the higher layers of the kernel that directly
2664  * set the page dirty bit without asking the filesystem if it is a
2665  * good idea.  This causes problems because we want to make sure COW
2666  * properly happens and the data=ordered rules are followed.
2667  *
2668  * In our case any range that doesn't have the ORDERED bit set
2669  * hasn't been properly setup for IO.  We kick off an async process
2670  * to fix it up.  The async helper will wait for ordered extents, set
2671  * the delalloc bit and make it safe to write the page.
2672  */
2673 int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end)
2674 {
2675 	struct inode *inode = page->mapping->host;
2676 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2677 	struct btrfs_writepage_fixup *fixup;
2678 
2679 	/* this page is properly in the ordered list */
2680 	if (TestClearPagePrivate2(page))
2681 		return 0;
2682 
2683 	/*
2684 	 * PageChecked is set below when we create a fixup worker for this page,
2685 	 * don't try to create another one if we're already PageChecked()
2686 	 *
2687 	 * The extent_io writepage code will redirty the page if we send back
2688 	 * EAGAIN.
2689 	 */
2690 	if (PageChecked(page))
2691 		return -EAGAIN;
2692 
2693 	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2694 	if (!fixup)
2695 		return -EAGAIN;
2696 
2697 	/*
2698 	 * We are already holding a reference to this inode from
2699 	 * write_cache_pages.  We need to hold it because the space reservation
2700 	 * takes place outside of the page lock, and we can't trust
2701 	 * page->mapping outside of the page lock.
2702 	 */
2703 	ihold(inode);
2704 	SetPageChecked(page);
2705 	get_page(page);
2706 	btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
2707 	fixup->page = page;
2708 	fixup->inode = inode;
2709 	btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
2710 
2711 	return -EAGAIN;
2712 }
2713 
2714 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2715 				       struct btrfs_inode *inode, u64 file_pos,
2716 				       struct btrfs_file_extent_item *stack_fi,
2717 				       const bool update_inode_bytes,
2718 				       u64 qgroup_reserved)
2719 {
2720 	struct btrfs_root *root = inode->root;
2721 	const u64 sectorsize = root->fs_info->sectorsize;
2722 	struct btrfs_path *path;
2723 	struct extent_buffer *leaf;
2724 	struct btrfs_key ins;
2725 	u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi);
2726 	u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi);
2727 	u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi);
2728 	u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi);
2729 	struct btrfs_drop_extents_args drop_args = { 0 };
2730 	int ret;
2731 
2732 	path = btrfs_alloc_path();
2733 	if (!path)
2734 		return -ENOMEM;
2735 
2736 	/*
2737 	 * we may be replacing one extent in the tree with another.
2738 	 * The new extent is pinned in the extent map, and we don't want
2739 	 * to drop it from the cache until it is completely in the btree.
2740 	 *
2741 	 * So, tell btrfs_drop_extents to leave this extent in the cache.
2742 	 * the caller is expected to unpin it and allow it to be merged
2743 	 * with the others.
2744 	 */
2745 	drop_args.path = path;
2746 	drop_args.start = file_pos;
2747 	drop_args.end = file_pos + num_bytes;
2748 	drop_args.replace_extent = true;
2749 	drop_args.extent_item_size = sizeof(*stack_fi);
2750 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2751 	if (ret)
2752 		goto out;
2753 
2754 	if (!drop_args.extent_inserted) {
2755 		ins.objectid = btrfs_ino(inode);
2756 		ins.offset = file_pos;
2757 		ins.type = BTRFS_EXTENT_DATA_KEY;
2758 
2759 		ret = btrfs_insert_empty_item(trans, root, path, &ins,
2760 					      sizeof(*stack_fi));
2761 		if (ret)
2762 			goto out;
2763 	}
2764 	leaf = path->nodes[0];
2765 	btrfs_set_stack_file_extent_generation(stack_fi, trans->transid);
2766 	write_extent_buffer(leaf, stack_fi,
2767 			btrfs_item_ptr_offset(leaf, path->slots[0]),
2768 			sizeof(struct btrfs_file_extent_item));
2769 
2770 	btrfs_mark_buffer_dirty(leaf);
2771 	btrfs_release_path(path);
2772 
2773 	/*
2774 	 * If we dropped an inline extent here, we know the range where it is
2775 	 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
2776 	 * number of bytes only for that range contaning the inline extent.
2777 	 * The remaining of the range will be processed when clearning the
2778 	 * EXTENT_DELALLOC_BIT bit through the ordered extent completion.
2779 	 */
2780 	if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) {
2781 		u64 inline_size = round_down(drop_args.bytes_found, sectorsize);
2782 
2783 		inline_size = drop_args.bytes_found - inline_size;
2784 		btrfs_update_inode_bytes(inode, sectorsize, inline_size);
2785 		drop_args.bytes_found -= inline_size;
2786 		num_bytes -= sectorsize;
2787 	}
2788 
2789 	if (update_inode_bytes)
2790 		btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found);
2791 
2792 	ins.objectid = disk_bytenr;
2793 	ins.offset = disk_num_bytes;
2794 	ins.type = BTRFS_EXTENT_ITEM_KEY;
2795 
2796 	ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes);
2797 	if (ret)
2798 		goto out;
2799 
2800 	ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode),
2801 					       file_pos, qgroup_reserved, &ins);
2802 out:
2803 	btrfs_free_path(path);
2804 
2805 	return ret;
2806 }
2807 
2808 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
2809 					 u64 start, u64 len)
2810 {
2811 	struct btrfs_block_group *cache;
2812 
2813 	cache = btrfs_lookup_block_group(fs_info, start);
2814 	ASSERT(cache);
2815 
2816 	spin_lock(&cache->lock);
2817 	cache->delalloc_bytes -= len;
2818 	spin_unlock(&cache->lock);
2819 
2820 	btrfs_put_block_group(cache);
2821 }
2822 
2823 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
2824 					     struct btrfs_ordered_extent *oe)
2825 {
2826 	struct btrfs_file_extent_item stack_fi;
2827 	u64 logical_len;
2828 	bool update_inode_bytes;
2829 
2830 	memset(&stack_fi, 0, sizeof(stack_fi));
2831 	btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG);
2832 	btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr);
2833 	btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi,
2834 						   oe->disk_num_bytes);
2835 	if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags))
2836 		logical_len = oe->truncated_len;
2837 	else
2838 		logical_len = oe->num_bytes;
2839 	btrfs_set_stack_file_extent_num_bytes(&stack_fi, logical_len);
2840 	btrfs_set_stack_file_extent_ram_bytes(&stack_fi, logical_len);
2841 	btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type);
2842 	/* Encryption and other encoding is reserved and all 0 */
2843 
2844 	/*
2845 	 * For delalloc, when completing an ordered extent we update the inode's
2846 	 * bytes when clearing the range in the inode's io tree, so pass false
2847 	 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(),
2848 	 * except if the ordered extent was truncated.
2849 	 */
2850 	update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) ||
2851 			     test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags);
2852 
2853 	return insert_reserved_file_extent(trans, BTRFS_I(oe->inode),
2854 					   oe->file_offset, &stack_fi,
2855 					   update_inode_bytes, oe->qgroup_rsv);
2856 }
2857 
2858 /*
2859  * As ordered data IO finishes, this gets called so we can finish
2860  * an ordered extent if the range of bytes in the file it covers are
2861  * fully written.
2862  */
2863 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2864 {
2865 	struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode);
2866 	struct btrfs_root *root = inode->root;
2867 	struct btrfs_fs_info *fs_info = root->fs_info;
2868 	struct btrfs_trans_handle *trans = NULL;
2869 	struct extent_io_tree *io_tree = &inode->io_tree;
2870 	struct extent_state *cached_state = NULL;
2871 	u64 start, end;
2872 	int compress_type = 0;
2873 	int ret = 0;
2874 	u64 logical_len = ordered_extent->num_bytes;
2875 	bool freespace_inode;
2876 	bool truncated = false;
2877 	bool clear_reserved_extent = true;
2878 	unsigned int clear_bits = EXTENT_DEFRAG;
2879 
2880 	start = ordered_extent->file_offset;
2881 	end = start + ordered_extent->num_bytes - 1;
2882 
2883 	if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
2884 	    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
2885 	    !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags))
2886 		clear_bits |= EXTENT_DELALLOC_NEW;
2887 
2888 	freespace_inode = btrfs_is_free_space_inode(inode);
2889 
2890 	if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2891 		ret = -EIO;
2892 		goto out;
2893 	}
2894 
2895 	if (ordered_extent->disk)
2896 		btrfs_rewrite_logical_zoned(ordered_extent);
2897 
2898 	btrfs_free_io_failure_record(inode, start, end);
2899 
2900 	if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
2901 		truncated = true;
2902 		logical_len = ordered_extent->truncated_len;
2903 		/* Truncated the entire extent, don't bother adding */
2904 		if (!logical_len)
2905 			goto out;
2906 	}
2907 
2908 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
2909 		BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
2910 
2911 		btrfs_inode_safe_disk_i_size_write(inode, 0);
2912 		if (freespace_inode)
2913 			trans = btrfs_join_transaction_spacecache(root);
2914 		else
2915 			trans = btrfs_join_transaction(root);
2916 		if (IS_ERR(trans)) {
2917 			ret = PTR_ERR(trans);
2918 			trans = NULL;
2919 			goto out;
2920 		}
2921 		trans->block_rsv = &inode->block_rsv;
2922 		ret = btrfs_update_inode_fallback(trans, root, inode);
2923 		if (ret) /* -ENOMEM or corruption */
2924 			btrfs_abort_transaction(trans, ret);
2925 		goto out;
2926 	}
2927 
2928 	clear_bits |= EXTENT_LOCKED;
2929 	lock_extent_bits(io_tree, start, end, &cached_state);
2930 
2931 	if (freespace_inode)
2932 		trans = btrfs_join_transaction_spacecache(root);
2933 	else
2934 		trans = btrfs_join_transaction(root);
2935 	if (IS_ERR(trans)) {
2936 		ret = PTR_ERR(trans);
2937 		trans = NULL;
2938 		goto out;
2939 	}
2940 
2941 	trans->block_rsv = &inode->block_rsv;
2942 
2943 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
2944 		compress_type = ordered_extent->compress_type;
2945 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
2946 		BUG_ON(compress_type);
2947 		ret = btrfs_mark_extent_written(trans, inode,
2948 						ordered_extent->file_offset,
2949 						ordered_extent->file_offset +
2950 						logical_len);
2951 	} else {
2952 		BUG_ON(root == fs_info->tree_root);
2953 		ret = insert_ordered_extent_file_extent(trans, ordered_extent);
2954 		if (!ret) {
2955 			clear_reserved_extent = false;
2956 			btrfs_release_delalloc_bytes(fs_info,
2957 						ordered_extent->disk_bytenr,
2958 						ordered_extent->disk_num_bytes);
2959 		}
2960 	}
2961 	unpin_extent_cache(&inode->extent_tree, ordered_extent->file_offset,
2962 			   ordered_extent->num_bytes, trans->transid);
2963 	if (ret < 0) {
2964 		btrfs_abort_transaction(trans, ret);
2965 		goto out;
2966 	}
2967 
2968 	ret = add_pending_csums(trans, &ordered_extent->list);
2969 	if (ret) {
2970 		btrfs_abort_transaction(trans, ret);
2971 		goto out;
2972 	}
2973 
2974 	/*
2975 	 * If this is a new delalloc range, clear its new delalloc flag to
2976 	 * update the inode's number of bytes. This needs to be done first
2977 	 * before updating the inode item.
2978 	 */
2979 	if ((clear_bits & EXTENT_DELALLOC_NEW) &&
2980 	    !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags))
2981 		clear_extent_bit(&inode->io_tree, start, end,
2982 				 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES,
2983 				 0, 0, &cached_state);
2984 
2985 	btrfs_inode_safe_disk_i_size_write(inode, 0);
2986 	ret = btrfs_update_inode_fallback(trans, root, inode);
2987 	if (ret) { /* -ENOMEM or corruption */
2988 		btrfs_abort_transaction(trans, ret);
2989 		goto out;
2990 	}
2991 	ret = 0;
2992 out:
2993 	clear_extent_bit(&inode->io_tree, start, end, clear_bits,
2994 			 (clear_bits & EXTENT_LOCKED) ? 1 : 0, 0,
2995 			 &cached_state);
2996 
2997 	if (trans)
2998 		btrfs_end_transaction(trans);
2999 
3000 	if (ret || truncated) {
3001 		u64 unwritten_start = start;
3002 
3003 		/*
3004 		 * If we failed to finish this ordered extent for any reason we
3005 		 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
3006 		 * extent, and mark the inode with the error if it wasn't
3007 		 * already set.  Any error during writeback would have already
3008 		 * set the mapping error, so we need to set it if we're the ones
3009 		 * marking this ordered extent as failed.
3010 		 */
3011 		if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
3012 					     &ordered_extent->flags))
3013 			mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
3014 
3015 		if (truncated)
3016 			unwritten_start += logical_len;
3017 		clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
3018 
3019 		/* Drop the cache for the part of the extent we didn't write. */
3020 		btrfs_drop_extent_cache(inode, unwritten_start, end, 0);
3021 
3022 		/*
3023 		 * If the ordered extent had an IOERR or something else went
3024 		 * wrong we need to return the space for this ordered extent
3025 		 * back to the allocator.  We only free the extent in the
3026 		 * truncated case if we didn't write out the extent at all.
3027 		 *
3028 		 * If we made it past insert_reserved_file_extent before we
3029 		 * errored out then we don't need to do this as the accounting
3030 		 * has already been done.
3031 		 */
3032 		if ((ret || !logical_len) &&
3033 		    clear_reserved_extent &&
3034 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3035 		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3036 			/*
3037 			 * Discard the range before returning it back to the
3038 			 * free space pool
3039 			 */
3040 			if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC))
3041 				btrfs_discard_extent(fs_info,
3042 						ordered_extent->disk_bytenr,
3043 						ordered_extent->disk_num_bytes,
3044 						NULL);
3045 			btrfs_free_reserved_extent(fs_info,
3046 					ordered_extent->disk_bytenr,
3047 					ordered_extent->disk_num_bytes, 1);
3048 		}
3049 	}
3050 
3051 	/*
3052 	 * This needs to be done to make sure anybody waiting knows we are done
3053 	 * updating everything for this ordered extent.
3054 	 */
3055 	btrfs_remove_ordered_extent(inode, ordered_extent);
3056 
3057 	/* once for us */
3058 	btrfs_put_ordered_extent(ordered_extent);
3059 	/* once for the tree */
3060 	btrfs_put_ordered_extent(ordered_extent);
3061 
3062 	return ret;
3063 }
3064 
3065 static void finish_ordered_fn(struct btrfs_work *work)
3066 {
3067 	struct btrfs_ordered_extent *ordered_extent;
3068 	ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
3069 	btrfs_finish_ordered_io(ordered_extent);
3070 }
3071 
3072 void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
3073 					  u64 end, int uptodate)
3074 {
3075 	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
3076 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3077 	struct btrfs_ordered_extent *ordered_extent = NULL;
3078 	struct btrfs_workqueue *wq;
3079 
3080 	trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
3081 
3082 	ClearPagePrivate2(page);
3083 	if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
3084 					    end - start + 1, uptodate))
3085 		return;
3086 
3087 	if (btrfs_is_free_space_inode(inode))
3088 		wq = fs_info->endio_freespace_worker;
3089 	else
3090 		wq = fs_info->endio_write_workers;
3091 
3092 	btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL);
3093 	btrfs_queue_work(wq, &ordered_extent->work);
3094 }
3095 
3096 /*
3097  * check_data_csum - verify checksum of one sector of uncompressed data
3098  * @inode:	inode
3099  * @io_bio:	btrfs_io_bio which contains the csum
3100  * @bio_offset:	offset to the beginning of the bio (in bytes)
3101  * @page:	page where is the data to be verified
3102  * @pgoff:	offset inside the page
3103  * @start:	logical offset in the file
3104  *
3105  * The length of such check is always one sector size.
3106  */
3107 static int check_data_csum(struct inode *inode, struct btrfs_io_bio *io_bio,
3108 			   u32 bio_offset, struct page *page, u32 pgoff,
3109 			   u64 start)
3110 {
3111 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3112 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3113 	char *kaddr;
3114 	u32 len = fs_info->sectorsize;
3115 	const u32 csum_size = fs_info->csum_size;
3116 	unsigned int offset_sectors;
3117 	u8 *csum_expected;
3118 	u8 csum[BTRFS_CSUM_SIZE];
3119 
3120 	ASSERT(pgoff + len <= PAGE_SIZE);
3121 
3122 	offset_sectors = bio_offset >> fs_info->sectorsize_bits;
3123 	csum_expected = ((u8 *)io_bio->csum) + offset_sectors * csum_size;
3124 
3125 	kaddr = kmap_atomic(page);
3126 	shash->tfm = fs_info->csum_shash;
3127 
3128 	crypto_shash_digest(shash, kaddr + pgoff, len, csum);
3129 
3130 	if (memcmp(csum, csum_expected, csum_size))
3131 		goto zeroit;
3132 
3133 	kunmap_atomic(kaddr);
3134 	return 0;
3135 zeroit:
3136 	btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected,
3137 				    io_bio->mirror_num);
3138 	if (io_bio->device)
3139 		btrfs_dev_stat_inc_and_print(io_bio->device,
3140 					     BTRFS_DEV_STAT_CORRUPTION_ERRS);
3141 	memset(kaddr + pgoff, 1, len);
3142 	flush_dcache_page(page);
3143 	kunmap_atomic(kaddr);
3144 	return -EIO;
3145 }
3146 
3147 /*
3148  * When reads are done, we need to check csums to verify the data is correct.
3149  * if there's a match, we allow the bio to finish.  If not, the code in
3150  * extent_io.c will try to find good copies for us.
3151  *
3152  * @bio_offset:	offset to the beginning of the bio (in bytes)
3153  * @start:	file offset of the range start
3154  * @end:	file offset of the range end (inclusive)
3155  */
3156 int btrfs_verify_data_csum(struct btrfs_io_bio *io_bio, u32 bio_offset,
3157 			   struct page *page, u64 start, u64 end)
3158 {
3159 	struct inode *inode = page->mapping->host;
3160 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3161 	struct btrfs_root *root = BTRFS_I(inode)->root;
3162 	const u32 sectorsize = root->fs_info->sectorsize;
3163 	u32 pg_off;
3164 
3165 	if (PageChecked(page)) {
3166 		ClearPageChecked(page);
3167 		return 0;
3168 	}
3169 
3170 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
3171 		return 0;
3172 
3173 	if (!root->fs_info->csum_root)
3174 		return 0;
3175 
3176 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
3177 	    test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
3178 		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM);
3179 		return 0;
3180 	}
3181 
3182 	ASSERT(page_offset(page) <= start &&
3183 	       end <= page_offset(page) + PAGE_SIZE - 1);
3184 	for (pg_off = offset_in_page(start);
3185 	     pg_off < offset_in_page(end);
3186 	     pg_off += sectorsize, bio_offset += sectorsize) {
3187 		int ret;
3188 
3189 		ret = check_data_csum(inode, io_bio, bio_offset, page, pg_off,
3190 				      page_offset(page) + pg_off);
3191 		if (ret < 0)
3192 			return -EIO;
3193 	}
3194 	return 0;
3195 }
3196 
3197 /*
3198  * btrfs_add_delayed_iput - perform a delayed iput on @inode
3199  *
3200  * @inode: The inode we want to perform iput on
3201  *
3202  * This function uses the generic vfs_inode::i_count to track whether we should
3203  * just decrement it (in case it's > 1) or if this is the last iput then link
3204  * the inode to the delayed iput machinery. Delayed iputs are processed at
3205  * transaction commit time/superblock commit/cleaner kthread.
3206  */
3207 void btrfs_add_delayed_iput(struct inode *inode)
3208 {
3209 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3210 	struct btrfs_inode *binode = BTRFS_I(inode);
3211 
3212 	if (atomic_add_unless(&inode->i_count, -1, 1))
3213 		return;
3214 
3215 	atomic_inc(&fs_info->nr_delayed_iputs);
3216 	spin_lock(&fs_info->delayed_iput_lock);
3217 	ASSERT(list_empty(&binode->delayed_iput));
3218 	list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
3219 	spin_unlock(&fs_info->delayed_iput_lock);
3220 	if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
3221 		wake_up_process(fs_info->cleaner_kthread);
3222 }
3223 
3224 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info,
3225 				    struct btrfs_inode *inode)
3226 {
3227 	list_del_init(&inode->delayed_iput);
3228 	spin_unlock(&fs_info->delayed_iput_lock);
3229 	iput(&inode->vfs_inode);
3230 	if (atomic_dec_and_test(&fs_info->nr_delayed_iputs))
3231 		wake_up(&fs_info->delayed_iputs_wait);
3232 	spin_lock(&fs_info->delayed_iput_lock);
3233 }
3234 
3235 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info,
3236 				   struct btrfs_inode *inode)
3237 {
3238 	if (!list_empty(&inode->delayed_iput)) {
3239 		spin_lock(&fs_info->delayed_iput_lock);
3240 		if (!list_empty(&inode->delayed_iput))
3241 			run_delayed_iput_locked(fs_info, inode);
3242 		spin_unlock(&fs_info->delayed_iput_lock);
3243 	}
3244 }
3245 
3246 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
3247 {
3248 
3249 	spin_lock(&fs_info->delayed_iput_lock);
3250 	while (!list_empty(&fs_info->delayed_iputs)) {
3251 		struct btrfs_inode *inode;
3252 
3253 		inode = list_first_entry(&fs_info->delayed_iputs,
3254 				struct btrfs_inode, delayed_iput);
3255 		run_delayed_iput_locked(fs_info, inode);
3256 		cond_resched_lock(&fs_info->delayed_iput_lock);
3257 	}
3258 	spin_unlock(&fs_info->delayed_iput_lock);
3259 }
3260 
3261 /**
3262  * Wait for flushing all delayed iputs
3263  *
3264  * @fs_info:  the filesystem
3265  *
3266  * This will wait on any delayed iputs that are currently running with KILLABLE
3267  * set.  Once they are all done running we will return, unless we are killed in
3268  * which case we return EINTR. This helps in user operations like fallocate etc
3269  * that might get blocked on the iputs.
3270  *
3271  * Return EINTR if we were killed, 0 if nothing's pending
3272  */
3273 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info)
3274 {
3275 	int ret = wait_event_killable(fs_info->delayed_iputs_wait,
3276 			atomic_read(&fs_info->nr_delayed_iputs) == 0);
3277 	if (ret)
3278 		return -EINTR;
3279 	return 0;
3280 }
3281 
3282 /*
3283  * This creates an orphan entry for the given inode in case something goes wrong
3284  * in the middle of an unlink.
3285  */
3286 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3287 		     struct btrfs_inode *inode)
3288 {
3289 	int ret;
3290 
3291 	ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
3292 	if (ret && ret != -EEXIST) {
3293 		btrfs_abort_transaction(trans, ret);
3294 		return ret;
3295 	}
3296 
3297 	return 0;
3298 }
3299 
3300 /*
3301  * We have done the delete so we can go ahead and remove the orphan item for
3302  * this particular inode.
3303  */
3304 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3305 			    struct btrfs_inode *inode)
3306 {
3307 	return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode));
3308 }
3309 
3310 /*
3311  * this cleans up any orphans that may be left on the list from the last use
3312  * of this root.
3313  */
3314 int btrfs_orphan_cleanup(struct btrfs_root *root)
3315 {
3316 	struct btrfs_fs_info *fs_info = root->fs_info;
3317 	struct btrfs_path *path;
3318 	struct extent_buffer *leaf;
3319 	struct btrfs_key key, found_key;
3320 	struct btrfs_trans_handle *trans;
3321 	struct inode *inode;
3322 	u64 last_objectid = 0;
3323 	int ret = 0, nr_unlink = 0;
3324 
3325 	if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
3326 		return 0;
3327 
3328 	path = btrfs_alloc_path();
3329 	if (!path) {
3330 		ret = -ENOMEM;
3331 		goto out;
3332 	}
3333 	path->reada = READA_BACK;
3334 
3335 	key.objectid = BTRFS_ORPHAN_OBJECTID;
3336 	key.type = BTRFS_ORPHAN_ITEM_KEY;
3337 	key.offset = (u64)-1;
3338 
3339 	while (1) {
3340 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3341 		if (ret < 0)
3342 			goto out;
3343 
3344 		/*
3345 		 * if ret == 0 means we found what we were searching for, which
3346 		 * is weird, but possible, so only screw with path if we didn't
3347 		 * find the key and see if we have stuff that matches
3348 		 */
3349 		if (ret > 0) {
3350 			ret = 0;
3351 			if (path->slots[0] == 0)
3352 				break;
3353 			path->slots[0]--;
3354 		}
3355 
3356 		/* pull out the item */
3357 		leaf = path->nodes[0];
3358 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3359 
3360 		/* make sure the item matches what we want */
3361 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3362 			break;
3363 		if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3364 			break;
3365 
3366 		/* release the path since we're done with it */
3367 		btrfs_release_path(path);
3368 
3369 		/*
3370 		 * this is where we are basically btrfs_lookup, without the
3371 		 * crossing root thing.  we store the inode number in the
3372 		 * offset of the orphan item.
3373 		 */
3374 
3375 		if (found_key.offset == last_objectid) {
3376 			btrfs_err(fs_info,
3377 				  "Error removing orphan entry, stopping orphan cleanup");
3378 			ret = -EINVAL;
3379 			goto out;
3380 		}
3381 
3382 		last_objectid = found_key.offset;
3383 
3384 		found_key.objectid = found_key.offset;
3385 		found_key.type = BTRFS_INODE_ITEM_KEY;
3386 		found_key.offset = 0;
3387 		inode = btrfs_iget(fs_info->sb, last_objectid, root);
3388 		ret = PTR_ERR_OR_ZERO(inode);
3389 		if (ret && ret != -ENOENT)
3390 			goto out;
3391 
3392 		if (ret == -ENOENT && root == fs_info->tree_root) {
3393 			struct btrfs_root *dead_root;
3394 			int is_dead_root = 0;
3395 
3396 			/*
3397 			 * This is an orphan in the tree root. Currently these
3398 			 * could come from 2 sources:
3399 			 *  a) a root (snapshot/subvolume) deletion in progress
3400 			 *  b) a free space cache inode
3401 			 * We need to distinguish those two, as the orphan item
3402 			 * for a root must not get deleted before the deletion
3403 			 * of the snapshot/subvolume's tree completes.
3404 			 *
3405 			 * btrfs_find_orphan_roots() ran before us, which has
3406 			 * found all deleted roots and loaded them into
3407 			 * fs_info->fs_roots_radix. So here we can find if an
3408 			 * orphan item corresponds to a deleted root by looking
3409 			 * up the root from that radix tree.
3410 			 */
3411 
3412 			spin_lock(&fs_info->fs_roots_radix_lock);
3413 			dead_root = radix_tree_lookup(&fs_info->fs_roots_radix,
3414 							 (unsigned long)found_key.objectid);
3415 			if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0)
3416 				is_dead_root = 1;
3417 			spin_unlock(&fs_info->fs_roots_radix_lock);
3418 
3419 			if (is_dead_root) {
3420 				/* prevent this orphan from being found again */
3421 				key.offset = found_key.objectid - 1;
3422 				continue;
3423 			}
3424 
3425 		}
3426 
3427 		/*
3428 		 * If we have an inode with links, there are a couple of
3429 		 * possibilities. Old kernels (before v3.12) used to create an
3430 		 * orphan item for truncate indicating that there were possibly
3431 		 * extent items past i_size that needed to be deleted. In v3.12,
3432 		 * truncate was changed to update i_size in sync with the extent
3433 		 * items, but the (useless) orphan item was still created. Since
3434 		 * v4.18, we don't create the orphan item for truncate at all.
3435 		 *
3436 		 * So, this item could mean that we need to do a truncate, but
3437 		 * only if this filesystem was last used on a pre-v3.12 kernel
3438 		 * and was not cleanly unmounted. The odds of that are quite
3439 		 * slim, and it's a pain to do the truncate now, so just delete
3440 		 * the orphan item.
3441 		 *
3442 		 * It's also possible that this orphan item was supposed to be
3443 		 * deleted but wasn't. The inode number may have been reused,
3444 		 * but either way, we can delete the orphan item.
3445 		 */
3446 		if (ret == -ENOENT || inode->i_nlink) {
3447 			if (!ret)
3448 				iput(inode);
3449 			trans = btrfs_start_transaction(root, 1);
3450 			if (IS_ERR(trans)) {
3451 				ret = PTR_ERR(trans);
3452 				goto out;
3453 			}
3454 			btrfs_debug(fs_info, "auto deleting %Lu",
3455 				    found_key.objectid);
3456 			ret = btrfs_del_orphan_item(trans, root,
3457 						    found_key.objectid);
3458 			btrfs_end_transaction(trans);
3459 			if (ret)
3460 				goto out;
3461 			continue;
3462 		}
3463 
3464 		nr_unlink++;
3465 
3466 		/* this will do delete_inode and everything for us */
3467 		iput(inode);
3468 	}
3469 	/* release the path since we're done with it */
3470 	btrfs_release_path(path);
3471 
3472 	root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3473 
3474 	if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3475 		trans = btrfs_join_transaction(root);
3476 		if (!IS_ERR(trans))
3477 			btrfs_end_transaction(trans);
3478 	}
3479 
3480 	if (nr_unlink)
3481 		btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
3482 
3483 out:
3484 	if (ret)
3485 		btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
3486 	btrfs_free_path(path);
3487 	return ret;
3488 }
3489 
3490 /*
3491  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3492  * don't find any xattrs, we know there can't be any acls.
3493  *
3494  * slot is the slot the inode is in, objectid is the objectid of the inode
3495  */
3496 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3497 					  int slot, u64 objectid,
3498 					  int *first_xattr_slot)
3499 {
3500 	u32 nritems = btrfs_header_nritems(leaf);
3501 	struct btrfs_key found_key;
3502 	static u64 xattr_access = 0;
3503 	static u64 xattr_default = 0;
3504 	int scanned = 0;
3505 
3506 	if (!xattr_access) {
3507 		xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3508 					strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3509 		xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3510 					strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3511 	}
3512 
3513 	slot++;
3514 	*first_xattr_slot = -1;
3515 	while (slot < nritems) {
3516 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3517 
3518 		/* we found a different objectid, there must not be acls */
3519 		if (found_key.objectid != objectid)
3520 			return 0;
3521 
3522 		/* we found an xattr, assume we've got an acl */
3523 		if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3524 			if (*first_xattr_slot == -1)
3525 				*first_xattr_slot = slot;
3526 			if (found_key.offset == xattr_access ||
3527 			    found_key.offset == xattr_default)
3528 				return 1;
3529 		}
3530 
3531 		/*
3532 		 * we found a key greater than an xattr key, there can't
3533 		 * be any acls later on
3534 		 */
3535 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3536 			return 0;
3537 
3538 		slot++;
3539 		scanned++;
3540 
3541 		/*
3542 		 * it goes inode, inode backrefs, xattrs, extents,
3543 		 * so if there are a ton of hard links to an inode there can
3544 		 * be a lot of backrefs.  Don't waste time searching too hard,
3545 		 * this is just an optimization
3546 		 */
3547 		if (scanned >= 8)
3548 			break;
3549 	}
3550 	/* we hit the end of the leaf before we found an xattr or
3551 	 * something larger than an xattr.  We have to assume the inode
3552 	 * has acls
3553 	 */
3554 	if (*first_xattr_slot == -1)
3555 		*first_xattr_slot = slot;
3556 	return 1;
3557 }
3558 
3559 /*
3560  * read an inode from the btree into the in-memory inode
3561  */
3562 static int btrfs_read_locked_inode(struct inode *inode,
3563 				   struct btrfs_path *in_path)
3564 {
3565 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3566 	struct btrfs_path *path = in_path;
3567 	struct extent_buffer *leaf;
3568 	struct btrfs_inode_item *inode_item;
3569 	struct btrfs_root *root = BTRFS_I(inode)->root;
3570 	struct btrfs_key location;
3571 	unsigned long ptr;
3572 	int maybe_acls;
3573 	u32 rdev;
3574 	int ret;
3575 	bool filled = false;
3576 	int first_xattr_slot;
3577 
3578 	ret = btrfs_fill_inode(inode, &rdev);
3579 	if (!ret)
3580 		filled = true;
3581 
3582 	if (!path) {
3583 		path = btrfs_alloc_path();
3584 		if (!path)
3585 			return -ENOMEM;
3586 	}
3587 
3588 	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3589 
3590 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3591 	if (ret) {
3592 		if (path != in_path)
3593 			btrfs_free_path(path);
3594 		return ret;
3595 	}
3596 
3597 	leaf = path->nodes[0];
3598 
3599 	if (filled)
3600 		goto cache_index;
3601 
3602 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
3603 				    struct btrfs_inode_item);
3604 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3605 	set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3606 	i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3607 	i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3608 	btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item));
3609 	btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
3610 			round_up(i_size_read(inode), fs_info->sectorsize));
3611 
3612 	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
3613 	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
3614 
3615 	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
3616 	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
3617 
3618 	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
3619 	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
3620 
3621 	BTRFS_I(inode)->i_otime.tv_sec =
3622 		btrfs_timespec_sec(leaf, &inode_item->otime);
3623 	BTRFS_I(inode)->i_otime.tv_nsec =
3624 		btrfs_timespec_nsec(leaf, &inode_item->otime);
3625 
3626 	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3627 	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3628 	BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3629 
3630 	inode_set_iversion_queried(inode,
3631 				   btrfs_inode_sequence(leaf, inode_item));
3632 	inode->i_generation = BTRFS_I(inode)->generation;
3633 	inode->i_rdev = 0;
3634 	rdev = btrfs_inode_rdev(leaf, inode_item);
3635 
3636 	BTRFS_I(inode)->index_cnt = (u64)-1;
3637 	BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3638 
3639 cache_index:
3640 	/*
3641 	 * If we were modified in the current generation and evicted from memory
3642 	 * and then re-read we need to do a full sync since we don't have any
3643 	 * idea about which extents were modified before we were evicted from
3644 	 * cache.
3645 	 *
3646 	 * This is required for both inode re-read from disk and delayed inode
3647 	 * in delayed_nodes_tree.
3648 	 */
3649 	if (BTRFS_I(inode)->last_trans == fs_info->generation)
3650 		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3651 			&BTRFS_I(inode)->runtime_flags);
3652 
3653 	/*
3654 	 * We don't persist the id of the transaction where an unlink operation
3655 	 * against the inode was last made. So here we assume the inode might
3656 	 * have been evicted, and therefore the exact value of last_unlink_trans
3657 	 * lost, and set it to last_trans to avoid metadata inconsistencies
3658 	 * between the inode and its parent if the inode is fsync'ed and the log
3659 	 * replayed. For example, in the scenario:
3660 	 *
3661 	 * touch mydir/foo
3662 	 * ln mydir/foo mydir/bar
3663 	 * sync
3664 	 * unlink mydir/bar
3665 	 * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
3666 	 * xfs_io -c fsync mydir/foo
3667 	 * <power failure>
3668 	 * mount fs, triggers fsync log replay
3669 	 *
3670 	 * We must make sure that when we fsync our inode foo we also log its
3671 	 * parent inode, otherwise after log replay the parent still has the
3672 	 * dentry with the "bar" name but our inode foo has a link count of 1
3673 	 * and doesn't have an inode ref with the name "bar" anymore.
3674 	 *
3675 	 * Setting last_unlink_trans to last_trans is a pessimistic approach,
3676 	 * but it guarantees correctness at the expense of occasional full
3677 	 * transaction commits on fsync if our inode is a directory, or if our
3678 	 * inode is not a directory, logging its parent unnecessarily.
3679 	 */
3680 	BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3681 
3682 	/*
3683 	 * Same logic as for last_unlink_trans. We don't persist the generation
3684 	 * of the last transaction where this inode was used for a reflink
3685 	 * operation, so after eviction and reloading the inode we must be
3686 	 * pessimistic and assume the last transaction that modified the inode.
3687 	 */
3688 	BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans;
3689 
3690 	path->slots[0]++;
3691 	if (inode->i_nlink != 1 ||
3692 	    path->slots[0] >= btrfs_header_nritems(leaf))
3693 		goto cache_acl;
3694 
3695 	btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3696 	if (location.objectid != btrfs_ino(BTRFS_I(inode)))
3697 		goto cache_acl;
3698 
3699 	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3700 	if (location.type == BTRFS_INODE_REF_KEY) {
3701 		struct btrfs_inode_ref *ref;
3702 
3703 		ref = (struct btrfs_inode_ref *)ptr;
3704 		BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3705 	} else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3706 		struct btrfs_inode_extref *extref;
3707 
3708 		extref = (struct btrfs_inode_extref *)ptr;
3709 		BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3710 								     extref);
3711 	}
3712 cache_acl:
3713 	/*
3714 	 * try to precache a NULL acl entry for files that don't have
3715 	 * any xattrs or acls
3716 	 */
3717 	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3718 			btrfs_ino(BTRFS_I(inode)), &first_xattr_slot);
3719 	if (first_xattr_slot != -1) {
3720 		path->slots[0] = first_xattr_slot;
3721 		ret = btrfs_load_inode_props(inode, path);
3722 		if (ret)
3723 			btrfs_err(fs_info,
3724 				  "error loading props for ino %llu (root %llu): %d",
3725 				  btrfs_ino(BTRFS_I(inode)),
3726 				  root->root_key.objectid, ret);
3727 	}
3728 	if (path != in_path)
3729 		btrfs_free_path(path);
3730 
3731 	if (!maybe_acls)
3732 		cache_no_acl(inode);
3733 
3734 	switch (inode->i_mode & S_IFMT) {
3735 	case S_IFREG:
3736 		inode->i_mapping->a_ops = &btrfs_aops;
3737 		inode->i_fop = &btrfs_file_operations;
3738 		inode->i_op = &btrfs_file_inode_operations;
3739 		break;
3740 	case S_IFDIR:
3741 		inode->i_fop = &btrfs_dir_file_operations;
3742 		inode->i_op = &btrfs_dir_inode_operations;
3743 		break;
3744 	case S_IFLNK:
3745 		inode->i_op = &btrfs_symlink_inode_operations;
3746 		inode_nohighmem(inode);
3747 		inode->i_mapping->a_ops = &btrfs_aops;
3748 		break;
3749 	default:
3750 		inode->i_op = &btrfs_special_inode_operations;
3751 		init_special_inode(inode, inode->i_mode, rdev);
3752 		break;
3753 	}
3754 
3755 	btrfs_sync_inode_flags_to_i_flags(inode);
3756 	return 0;
3757 }
3758 
3759 /*
3760  * given a leaf and an inode, copy the inode fields into the leaf
3761  */
3762 static void fill_inode_item(struct btrfs_trans_handle *trans,
3763 			    struct extent_buffer *leaf,
3764 			    struct btrfs_inode_item *item,
3765 			    struct inode *inode)
3766 {
3767 	struct btrfs_map_token token;
3768 
3769 	btrfs_init_map_token(&token, leaf);
3770 
3771 	btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
3772 	btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
3773 	btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size);
3774 	btrfs_set_token_inode_mode(&token, item, inode->i_mode);
3775 	btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
3776 
3777 	btrfs_set_token_timespec_sec(&token, &item->atime,
3778 				     inode->i_atime.tv_sec);
3779 	btrfs_set_token_timespec_nsec(&token, &item->atime,
3780 				      inode->i_atime.tv_nsec);
3781 
3782 	btrfs_set_token_timespec_sec(&token, &item->mtime,
3783 				     inode->i_mtime.tv_sec);
3784 	btrfs_set_token_timespec_nsec(&token, &item->mtime,
3785 				      inode->i_mtime.tv_nsec);
3786 
3787 	btrfs_set_token_timespec_sec(&token, &item->ctime,
3788 				     inode->i_ctime.tv_sec);
3789 	btrfs_set_token_timespec_nsec(&token, &item->ctime,
3790 				      inode->i_ctime.tv_nsec);
3791 
3792 	btrfs_set_token_timespec_sec(&token, &item->otime,
3793 				     BTRFS_I(inode)->i_otime.tv_sec);
3794 	btrfs_set_token_timespec_nsec(&token, &item->otime,
3795 				      BTRFS_I(inode)->i_otime.tv_nsec);
3796 
3797 	btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode));
3798 	btrfs_set_token_inode_generation(&token, item,
3799 					 BTRFS_I(inode)->generation);
3800 	btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
3801 	btrfs_set_token_inode_transid(&token, item, trans->transid);
3802 	btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
3803 	btrfs_set_token_inode_flags(&token, item, BTRFS_I(inode)->flags);
3804 	btrfs_set_token_inode_block_group(&token, item, 0);
3805 }
3806 
3807 /*
3808  * copy everything in the in-memory inode into the btree.
3809  */
3810 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
3811 				struct btrfs_root *root,
3812 				struct btrfs_inode *inode)
3813 {
3814 	struct btrfs_inode_item *inode_item;
3815 	struct btrfs_path *path;
3816 	struct extent_buffer *leaf;
3817 	int ret;
3818 
3819 	path = btrfs_alloc_path();
3820 	if (!path)
3821 		return -ENOMEM;
3822 
3823 	ret = btrfs_lookup_inode(trans, root, path, &inode->location, 1);
3824 	if (ret) {
3825 		if (ret > 0)
3826 			ret = -ENOENT;
3827 		goto failed;
3828 	}
3829 
3830 	leaf = path->nodes[0];
3831 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
3832 				    struct btrfs_inode_item);
3833 
3834 	fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
3835 	btrfs_mark_buffer_dirty(leaf);
3836 	btrfs_set_inode_last_trans(trans, inode);
3837 	ret = 0;
3838 failed:
3839 	btrfs_free_path(path);
3840 	return ret;
3841 }
3842 
3843 /*
3844  * copy everything in the in-memory inode into the btree.
3845  */
3846 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
3847 				struct btrfs_root *root,
3848 				struct btrfs_inode *inode)
3849 {
3850 	struct btrfs_fs_info *fs_info = root->fs_info;
3851 	int ret;
3852 
3853 	/*
3854 	 * If the inode is a free space inode, we can deadlock during commit
3855 	 * if we put it into the delayed code.
3856 	 *
3857 	 * The data relocation inode should also be directly updated
3858 	 * without delay
3859 	 */
3860 	if (!btrfs_is_free_space_inode(inode)
3861 	    && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
3862 	    && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
3863 		btrfs_update_root_times(trans, root);
3864 
3865 		ret = btrfs_delayed_update_inode(trans, root, inode);
3866 		if (!ret)
3867 			btrfs_set_inode_last_trans(trans, inode);
3868 		return ret;
3869 	}
3870 
3871 	return btrfs_update_inode_item(trans, root, inode);
3872 }
3873 
3874 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3875 				struct btrfs_root *root, struct btrfs_inode *inode)
3876 {
3877 	int ret;
3878 
3879 	ret = btrfs_update_inode(trans, root, inode);
3880 	if (ret == -ENOSPC)
3881 		return btrfs_update_inode_item(trans, root, inode);
3882 	return ret;
3883 }
3884 
3885 /*
3886  * unlink helper that gets used here in inode.c and in the tree logging
3887  * recovery code.  It remove a link in a directory with a given name, and
3888  * also drops the back refs in the inode to the directory
3889  */
3890 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3891 				struct btrfs_root *root,
3892 				struct btrfs_inode *dir,
3893 				struct btrfs_inode *inode,
3894 				const char *name, int name_len)
3895 {
3896 	struct btrfs_fs_info *fs_info = root->fs_info;
3897 	struct btrfs_path *path;
3898 	int ret = 0;
3899 	struct btrfs_dir_item *di;
3900 	u64 index;
3901 	u64 ino = btrfs_ino(inode);
3902 	u64 dir_ino = btrfs_ino(dir);
3903 
3904 	path = btrfs_alloc_path();
3905 	if (!path) {
3906 		ret = -ENOMEM;
3907 		goto out;
3908 	}
3909 
3910 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3911 				    name, name_len, -1);
3912 	if (IS_ERR_OR_NULL(di)) {
3913 		ret = di ? PTR_ERR(di) : -ENOENT;
3914 		goto err;
3915 	}
3916 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
3917 	if (ret)
3918 		goto err;
3919 	btrfs_release_path(path);
3920 
3921 	/*
3922 	 * If we don't have dir index, we have to get it by looking up
3923 	 * the inode ref, since we get the inode ref, remove it directly,
3924 	 * it is unnecessary to do delayed deletion.
3925 	 *
3926 	 * But if we have dir index, needn't search inode ref to get it.
3927 	 * Since the inode ref is close to the inode item, it is better
3928 	 * that we delay to delete it, and just do this deletion when
3929 	 * we update the inode item.
3930 	 */
3931 	if (inode->dir_index) {
3932 		ret = btrfs_delayed_delete_inode_ref(inode);
3933 		if (!ret) {
3934 			index = inode->dir_index;
3935 			goto skip_backref;
3936 		}
3937 	}
3938 
3939 	ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
3940 				  dir_ino, &index);
3941 	if (ret) {
3942 		btrfs_info(fs_info,
3943 			"failed to delete reference to %.*s, inode %llu parent %llu",
3944 			name_len, name, ino, dir_ino);
3945 		btrfs_abort_transaction(trans, ret);
3946 		goto err;
3947 	}
3948 skip_backref:
3949 	ret = btrfs_delete_delayed_dir_index(trans, dir, index);
3950 	if (ret) {
3951 		btrfs_abort_transaction(trans, ret);
3952 		goto err;
3953 	}
3954 
3955 	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode,
3956 			dir_ino);
3957 	if (ret != 0 && ret != -ENOENT) {
3958 		btrfs_abort_transaction(trans, ret);
3959 		goto err;
3960 	}
3961 
3962 	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir,
3963 			index);
3964 	if (ret == -ENOENT)
3965 		ret = 0;
3966 	else if (ret)
3967 		btrfs_abort_transaction(trans, ret);
3968 
3969 	/*
3970 	 * If we have a pending delayed iput we could end up with the final iput
3971 	 * being run in btrfs-cleaner context.  If we have enough of these built
3972 	 * up we can end up burning a lot of time in btrfs-cleaner without any
3973 	 * way to throttle the unlinks.  Since we're currently holding a ref on
3974 	 * the inode we can run the delayed iput here without any issues as the
3975 	 * final iput won't be done until after we drop the ref we're currently
3976 	 * holding.
3977 	 */
3978 	btrfs_run_delayed_iput(fs_info, inode);
3979 err:
3980 	btrfs_free_path(path);
3981 	if (ret)
3982 		goto out;
3983 
3984 	btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2);
3985 	inode_inc_iversion(&inode->vfs_inode);
3986 	inode_inc_iversion(&dir->vfs_inode);
3987 	inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime =
3988 		dir->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
3989 	ret = btrfs_update_inode(trans, root, dir);
3990 out:
3991 	return ret;
3992 }
3993 
3994 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3995 		       struct btrfs_root *root,
3996 		       struct btrfs_inode *dir, struct btrfs_inode *inode,
3997 		       const char *name, int name_len)
3998 {
3999 	int ret;
4000 	ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
4001 	if (!ret) {
4002 		drop_nlink(&inode->vfs_inode);
4003 		ret = btrfs_update_inode(trans, root, inode);
4004 	}
4005 	return ret;
4006 }
4007 
4008 /*
4009  * helper to start transaction for unlink and rmdir.
4010  *
4011  * unlink and rmdir are special in btrfs, they do not always free space, so
4012  * if we cannot make our reservations the normal way try and see if there is
4013  * plenty of slack room in the global reserve to migrate, otherwise we cannot
4014  * allow the unlink to occur.
4015  */
4016 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4017 {
4018 	struct btrfs_root *root = BTRFS_I(dir)->root;
4019 
4020 	/*
4021 	 * 1 for the possible orphan item
4022 	 * 1 for the dir item
4023 	 * 1 for the dir index
4024 	 * 1 for the inode ref
4025 	 * 1 for the inode
4026 	 */
4027 	return btrfs_start_transaction_fallback_global_rsv(root, 5);
4028 }
4029 
4030 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4031 {
4032 	struct btrfs_root *root = BTRFS_I(dir)->root;
4033 	struct btrfs_trans_handle *trans;
4034 	struct inode *inode = d_inode(dentry);
4035 	int ret;
4036 
4037 	trans = __unlink_start_trans(dir);
4038 	if (IS_ERR(trans))
4039 		return PTR_ERR(trans);
4040 
4041 	btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4042 			0);
4043 
4044 	ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
4045 			BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4046 			dentry->d_name.len);
4047 	if (ret)
4048 		goto out;
4049 
4050 	if (inode->i_nlink == 0) {
4051 		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4052 		if (ret)
4053 			goto out;
4054 	}
4055 
4056 out:
4057 	btrfs_end_transaction(trans);
4058 	btrfs_btree_balance_dirty(root->fs_info);
4059 	return ret;
4060 }
4061 
4062 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4063 			       struct inode *dir, struct dentry *dentry)
4064 {
4065 	struct btrfs_root *root = BTRFS_I(dir)->root;
4066 	struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
4067 	struct btrfs_path *path;
4068 	struct extent_buffer *leaf;
4069 	struct btrfs_dir_item *di;
4070 	struct btrfs_key key;
4071 	const char *name = dentry->d_name.name;
4072 	int name_len = dentry->d_name.len;
4073 	u64 index;
4074 	int ret;
4075 	u64 objectid;
4076 	u64 dir_ino = btrfs_ino(BTRFS_I(dir));
4077 
4078 	if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
4079 		objectid = inode->root->root_key.objectid;
4080 	} else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4081 		objectid = inode->location.objectid;
4082 	} else {
4083 		WARN_ON(1);
4084 		return -EINVAL;
4085 	}
4086 
4087 	path = btrfs_alloc_path();
4088 	if (!path)
4089 		return -ENOMEM;
4090 
4091 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4092 				   name, name_len, -1);
4093 	if (IS_ERR_OR_NULL(di)) {
4094 		ret = di ? PTR_ERR(di) : -ENOENT;
4095 		goto out;
4096 	}
4097 
4098 	leaf = path->nodes[0];
4099 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
4100 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4101 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
4102 	if (ret) {
4103 		btrfs_abort_transaction(trans, ret);
4104 		goto out;
4105 	}
4106 	btrfs_release_path(path);
4107 
4108 	/*
4109 	 * This is a placeholder inode for a subvolume we didn't have a
4110 	 * reference to at the time of the snapshot creation.  In the meantime
4111 	 * we could have renamed the real subvol link into our snapshot, so
4112 	 * depending on btrfs_del_root_ref to return -ENOENT here is incorret.
4113 	 * Instead simply lookup the dir_index_item for this entry so we can
4114 	 * remove it.  Otherwise we know we have a ref to the root and we can
4115 	 * call btrfs_del_root_ref, and it _shouldn't_ fail.
4116 	 */
4117 	if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4118 		di = btrfs_search_dir_index_item(root, path, dir_ino,
4119 						 name, name_len);
4120 		if (IS_ERR_OR_NULL(di)) {
4121 			if (!di)
4122 				ret = -ENOENT;
4123 			else
4124 				ret = PTR_ERR(di);
4125 			btrfs_abort_transaction(trans, ret);
4126 			goto out;
4127 		}
4128 
4129 		leaf = path->nodes[0];
4130 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4131 		index = key.offset;
4132 		btrfs_release_path(path);
4133 	} else {
4134 		ret = btrfs_del_root_ref(trans, objectid,
4135 					 root->root_key.objectid, dir_ino,
4136 					 &index, name, name_len);
4137 		if (ret) {
4138 			btrfs_abort_transaction(trans, ret);
4139 			goto out;
4140 		}
4141 	}
4142 
4143 	ret = btrfs_delete_delayed_dir_index(trans, BTRFS_I(dir), index);
4144 	if (ret) {
4145 		btrfs_abort_transaction(trans, ret);
4146 		goto out;
4147 	}
4148 
4149 	btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2);
4150 	inode_inc_iversion(dir);
4151 	dir->i_mtime = dir->i_ctime = current_time(dir);
4152 	ret = btrfs_update_inode_fallback(trans, root, BTRFS_I(dir));
4153 	if (ret)
4154 		btrfs_abort_transaction(trans, ret);
4155 out:
4156 	btrfs_free_path(path);
4157 	return ret;
4158 }
4159 
4160 /*
4161  * Helper to check if the subvolume references other subvolumes or if it's
4162  * default.
4163  */
4164 static noinline int may_destroy_subvol(struct btrfs_root *root)
4165 {
4166 	struct btrfs_fs_info *fs_info = root->fs_info;
4167 	struct btrfs_path *path;
4168 	struct btrfs_dir_item *di;
4169 	struct btrfs_key key;
4170 	u64 dir_id;
4171 	int ret;
4172 
4173 	path = btrfs_alloc_path();
4174 	if (!path)
4175 		return -ENOMEM;
4176 
4177 	/* Make sure this root isn't set as the default subvol */
4178 	dir_id = btrfs_super_root_dir(fs_info->super_copy);
4179 	di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
4180 				   dir_id, "default", 7, 0);
4181 	if (di && !IS_ERR(di)) {
4182 		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
4183 		if (key.objectid == root->root_key.objectid) {
4184 			ret = -EPERM;
4185 			btrfs_err(fs_info,
4186 				  "deleting default subvolume %llu is not allowed",
4187 				  key.objectid);
4188 			goto out;
4189 		}
4190 		btrfs_release_path(path);
4191 	}
4192 
4193 	key.objectid = root->root_key.objectid;
4194 	key.type = BTRFS_ROOT_REF_KEY;
4195 	key.offset = (u64)-1;
4196 
4197 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4198 	if (ret < 0)
4199 		goto out;
4200 	BUG_ON(ret == 0);
4201 
4202 	ret = 0;
4203 	if (path->slots[0] > 0) {
4204 		path->slots[0]--;
4205 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4206 		if (key.objectid == root->root_key.objectid &&
4207 		    key.type == BTRFS_ROOT_REF_KEY)
4208 			ret = -ENOTEMPTY;
4209 	}
4210 out:
4211 	btrfs_free_path(path);
4212 	return ret;
4213 }
4214 
4215 /* Delete all dentries for inodes belonging to the root */
4216 static void btrfs_prune_dentries(struct btrfs_root *root)
4217 {
4218 	struct btrfs_fs_info *fs_info = root->fs_info;
4219 	struct rb_node *node;
4220 	struct rb_node *prev;
4221 	struct btrfs_inode *entry;
4222 	struct inode *inode;
4223 	u64 objectid = 0;
4224 
4225 	if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
4226 		WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4227 
4228 	spin_lock(&root->inode_lock);
4229 again:
4230 	node = root->inode_tree.rb_node;
4231 	prev = NULL;
4232 	while (node) {
4233 		prev = node;
4234 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4235 
4236 		if (objectid < btrfs_ino(entry))
4237 			node = node->rb_left;
4238 		else if (objectid > btrfs_ino(entry))
4239 			node = node->rb_right;
4240 		else
4241 			break;
4242 	}
4243 	if (!node) {
4244 		while (prev) {
4245 			entry = rb_entry(prev, struct btrfs_inode, rb_node);
4246 			if (objectid <= btrfs_ino(entry)) {
4247 				node = prev;
4248 				break;
4249 			}
4250 			prev = rb_next(prev);
4251 		}
4252 	}
4253 	while (node) {
4254 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4255 		objectid = btrfs_ino(entry) + 1;
4256 		inode = igrab(&entry->vfs_inode);
4257 		if (inode) {
4258 			spin_unlock(&root->inode_lock);
4259 			if (atomic_read(&inode->i_count) > 1)
4260 				d_prune_aliases(inode);
4261 			/*
4262 			 * btrfs_drop_inode will have it removed from the inode
4263 			 * cache when its usage count hits zero.
4264 			 */
4265 			iput(inode);
4266 			cond_resched();
4267 			spin_lock(&root->inode_lock);
4268 			goto again;
4269 		}
4270 
4271 		if (cond_resched_lock(&root->inode_lock))
4272 			goto again;
4273 
4274 		node = rb_next(node);
4275 	}
4276 	spin_unlock(&root->inode_lock);
4277 }
4278 
4279 int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
4280 {
4281 	struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
4282 	struct btrfs_root *root = BTRFS_I(dir)->root;
4283 	struct inode *inode = d_inode(dentry);
4284 	struct btrfs_root *dest = BTRFS_I(inode)->root;
4285 	struct btrfs_trans_handle *trans;
4286 	struct btrfs_block_rsv block_rsv;
4287 	u64 root_flags;
4288 	int ret;
4289 
4290 	/*
4291 	 * Don't allow to delete a subvolume with send in progress. This is
4292 	 * inside the inode lock so the error handling that has to drop the bit
4293 	 * again is not run concurrently.
4294 	 */
4295 	spin_lock(&dest->root_item_lock);
4296 	if (dest->send_in_progress) {
4297 		spin_unlock(&dest->root_item_lock);
4298 		btrfs_warn(fs_info,
4299 			   "attempt to delete subvolume %llu during send",
4300 			   dest->root_key.objectid);
4301 		return -EPERM;
4302 	}
4303 	root_flags = btrfs_root_flags(&dest->root_item);
4304 	btrfs_set_root_flags(&dest->root_item,
4305 			     root_flags | BTRFS_ROOT_SUBVOL_DEAD);
4306 	spin_unlock(&dest->root_item_lock);
4307 
4308 	down_write(&fs_info->subvol_sem);
4309 
4310 	ret = may_destroy_subvol(dest);
4311 	if (ret)
4312 		goto out_up_write;
4313 
4314 	btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
4315 	/*
4316 	 * One for dir inode,
4317 	 * two for dir entries,
4318 	 * two for root ref/backref.
4319 	 */
4320 	ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
4321 	if (ret)
4322 		goto out_up_write;
4323 
4324 	trans = btrfs_start_transaction(root, 0);
4325 	if (IS_ERR(trans)) {
4326 		ret = PTR_ERR(trans);
4327 		goto out_release;
4328 	}
4329 	trans->block_rsv = &block_rsv;
4330 	trans->bytes_reserved = block_rsv.size;
4331 
4332 	btrfs_record_snapshot_destroy(trans, BTRFS_I(dir));
4333 
4334 	ret = btrfs_unlink_subvol(trans, dir, dentry);
4335 	if (ret) {
4336 		btrfs_abort_transaction(trans, ret);
4337 		goto out_end_trans;
4338 	}
4339 
4340 	ret = btrfs_record_root_in_trans(trans, dest);
4341 	if (ret) {
4342 		btrfs_abort_transaction(trans, ret);
4343 		goto out_end_trans;
4344 	}
4345 
4346 	memset(&dest->root_item.drop_progress, 0,
4347 		sizeof(dest->root_item.drop_progress));
4348 	btrfs_set_root_drop_level(&dest->root_item, 0);
4349 	btrfs_set_root_refs(&dest->root_item, 0);
4350 
4351 	if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
4352 		ret = btrfs_insert_orphan_item(trans,
4353 					fs_info->tree_root,
4354 					dest->root_key.objectid);
4355 		if (ret) {
4356 			btrfs_abort_transaction(trans, ret);
4357 			goto out_end_trans;
4358 		}
4359 	}
4360 
4361 	ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
4362 				  BTRFS_UUID_KEY_SUBVOL,
4363 				  dest->root_key.objectid);
4364 	if (ret && ret != -ENOENT) {
4365 		btrfs_abort_transaction(trans, ret);
4366 		goto out_end_trans;
4367 	}
4368 	if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
4369 		ret = btrfs_uuid_tree_remove(trans,
4370 					  dest->root_item.received_uuid,
4371 					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4372 					  dest->root_key.objectid);
4373 		if (ret && ret != -ENOENT) {
4374 			btrfs_abort_transaction(trans, ret);
4375 			goto out_end_trans;
4376 		}
4377 	}
4378 
4379 	free_anon_bdev(dest->anon_dev);
4380 	dest->anon_dev = 0;
4381 out_end_trans:
4382 	trans->block_rsv = NULL;
4383 	trans->bytes_reserved = 0;
4384 	ret = btrfs_end_transaction(trans);
4385 	inode->i_flags |= S_DEAD;
4386 out_release:
4387 	btrfs_subvolume_release_metadata(root, &block_rsv);
4388 out_up_write:
4389 	up_write(&fs_info->subvol_sem);
4390 	if (ret) {
4391 		spin_lock(&dest->root_item_lock);
4392 		root_flags = btrfs_root_flags(&dest->root_item);
4393 		btrfs_set_root_flags(&dest->root_item,
4394 				root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
4395 		spin_unlock(&dest->root_item_lock);
4396 	} else {
4397 		d_invalidate(dentry);
4398 		btrfs_prune_dentries(dest);
4399 		ASSERT(dest->send_in_progress == 0);
4400 	}
4401 
4402 	return ret;
4403 }
4404 
4405 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4406 {
4407 	struct inode *inode = d_inode(dentry);
4408 	int err = 0;
4409 	struct btrfs_root *root = BTRFS_I(dir)->root;
4410 	struct btrfs_trans_handle *trans;
4411 	u64 last_unlink_trans;
4412 
4413 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4414 		return -ENOTEMPTY;
4415 	if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID)
4416 		return btrfs_delete_subvolume(dir, dentry);
4417 
4418 	trans = __unlink_start_trans(dir);
4419 	if (IS_ERR(trans))
4420 		return PTR_ERR(trans);
4421 
4422 	if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4423 		err = btrfs_unlink_subvol(trans, dir, dentry);
4424 		goto out;
4425 	}
4426 
4427 	err = btrfs_orphan_add(trans, BTRFS_I(inode));
4428 	if (err)
4429 		goto out;
4430 
4431 	last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
4432 
4433 	/* now the directory is empty */
4434 	err = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
4435 			BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4436 			dentry->d_name.len);
4437 	if (!err) {
4438 		btrfs_i_size_write(BTRFS_I(inode), 0);
4439 		/*
4440 		 * Propagate the last_unlink_trans value of the deleted dir to
4441 		 * its parent directory. This is to prevent an unrecoverable
4442 		 * log tree in the case we do something like this:
4443 		 * 1) create dir foo
4444 		 * 2) create snapshot under dir foo
4445 		 * 3) delete the snapshot
4446 		 * 4) rmdir foo
4447 		 * 5) mkdir foo
4448 		 * 6) fsync foo or some file inside foo
4449 		 */
4450 		if (last_unlink_trans >= trans->transid)
4451 			BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
4452 	}
4453 out:
4454 	btrfs_end_transaction(trans);
4455 	btrfs_btree_balance_dirty(root->fs_info);
4456 
4457 	return err;
4458 }
4459 
4460 /*
4461  * Return this if we need to call truncate_block for the last bit of the
4462  * truncate.
4463  */
4464 #define NEED_TRUNCATE_BLOCK 1
4465 
4466 /*
4467  * this can truncate away extent items, csum items and directory items.
4468  * It starts at a high offset and removes keys until it can't find
4469  * any higher than new_size
4470  *
4471  * csum items that cross the new i_size are truncated to the new size
4472  * as well.
4473  *
4474  * min_type is the minimum key type to truncate down to.  If set to 0, this
4475  * will kill all the items on this inode, including the INODE_ITEM_KEY.
4476  */
4477 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4478 			       struct btrfs_root *root,
4479 			       struct btrfs_inode *inode,
4480 			       u64 new_size, u32 min_type)
4481 {
4482 	struct btrfs_fs_info *fs_info = root->fs_info;
4483 	struct btrfs_path *path;
4484 	struct extent_buffer *leaf;
4485 	struct btrfs_file_extent_item *fi;
4486 	struct btrfs_key key;
4487 	struct btrfs_key found_key;
4488 	u64 extent_start = 0;
4489 	u64 extent_num_bytes = 0;
4490 	u64 extent_offset = 0;
4491 	u64 item_end = 0;
4492 	u64 last_size = new_size;
4493 	u32 found_type = (u8)-1;
4494 	int found_extent;
4495 	int del_item;
4496 	int pending_del_nr = 0;
4497 	int pending_del_slot = 0;
4498 	int extent_type = -1;
4499 	int ret;
4500 	u64 ino = btrfs_ino(inode);
4501 	u64 bytes_deleted = 0;
4502 	bool be_nice = false;
4503 	bool should_throttle = false;
4504 	const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
4505 	struct extent_state *cached_state = NULL;
4506 
4507 	BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
4508 
4509 	/*
4510 	 * For non-free space inodes and non-shareable roots, we want to back
4511 	 * off from time to time.  This means all inodes in subvolume roots,
4512 	 * reloc roots, and data reloc roots.
4513 	 */
4514 	if (!btrfs_is_free_space_inode(inode) &&
4515 	    test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
4516 		be_nice = true;
4517 
4518 	path = btrfs_alloc_path();
4519 	if (!path)
4520 		return -ENOMEM;
4521 	path->reada = READA_BACK;
4522 
4523 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4524 		lock_extent_bits(&inode->io_tree, lock_start, (u64)-1,
4525 				 &cached_state);
4526 
4527 		/*
4528 		 * We want to drop from the next block forward in case this
4529 		 * new size is not block aligned since we will be keeping the
4530 		 * last block of the extent just the way it is.
4531 		 */
4532 		btrfs_drop_extent_cache(inode, ALIGN(new_size,
4533 					fs_info->sectorsize),
4534 					(u64)-1, 0);
4535 	}
4536 
4537 	/*
4538 	 * This function is also used to drop the items in the log tree before
4539 	 * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4540 	 * it is used to drop the logged items. So we shouldn't kill the delayed
4541 	 * items.
4542 	 */
4543 	if (min_type == 0 && root == inode->root)
4544 		btrfs_kill_delayed_inode_items(inode);
4545 
4546 	key.objectid = ino;
4547 	key.offset = (u64)-1;
4548 	key.type = (u8)-1;
4549 
4550 search_again:
4551 	/*
4552 	 * with a 16K leaf size and 128MB extents, you can actually queue
4553 	 * up a huge file in a single leaf.  Most of the time that
4554 	 * bytes_deleted is > 0, it will be huge by the time we get here
4555 	 */
4556 	if (be_nice && bytes_deleted > SZ_32M &&
4557 	    btrfs_should_end_transaction(trans)) {
4558 		ret = -EAGAIN;
4559 		goto out;
4560 	}
4561 
4562 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
4563 	if (ret < 0)
4564 		goto out;
4565 
4566 	if (ret > 0) {
4567 		ret = 0;
4568 		/* there are no items in the tree for us to truncate, we're
4569 		 * done
4570 		 */
4571 		if (path->slots[0] == 0)
4572 			goto out;
4573 		path->slots[0]--;
4574 	}
4575 
4576 	while (1) {
4577 		u64 clear_start = 0, clear_len = 0;
4578 
4579 		fi = NULL;
4580 		leaf = path->nodes[0];
4581 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4582 		found_type = found_key.type;
4583 
4584 		if (found_key.objectid != ino)
4585 			break;
4586 
4587 		if (found_type < min_type)
4588 			break;
4589 
4590 		item_end = found_key.offset;
4591 		if (found_type == BTRFS_EXTENT_DATA_KEY) {
4592 			fi = btrfs_item_ptr(leaf, path->slots[0],
4593 					    struct btrfs_file_extent_item);
4594 			extent_type = btrfs_file_extent_type(leaf, fi);
4595 			if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4596 				item_end +=
4597 				    btrfs_file_extent_num_bytes(leaf, fi);
4598 
4599 				trace_btrfs_truncate_show_fi_regular(
4600 					inode, leaf, fi, found_key.offset);
4601 			} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4602 				item_end += btrfs_file_extent_ram_bytes(leaf,
4603 									fi);
4604 
4605 				trace_btrfs_truncate_show_fi_inline(
4606 					inode, leaf, fi, path->slots[0],
4607 					found_key.offset);
4608 			}
4609 			item_end--;
4610 		}
4611 		if (found_type > min_type) {
4612 			del_item = 1;
4613 		} else {
4614 			if (item_end < new_size)
4615 				break;
4616 			if (found_key.offset >= new_size)
4617 				del_item = 1;
4618 			else
4619 				del_item = 0;
4620 		}
4621 		found_extent = 0;
4622 		/* FIXME, shrink the extent if the ref count is only 1 */
4623 		if (found_type != BTRFS_EXTENT_DATA_KEY)
4624 			goto delete;
4625 
4626 		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4627 			u64 num_dec;
4628 
4629 			clear_start = found_key.offset;
4630 			extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
4631 			if (!del_item) {
4632 				u64 orig_num_bytes =
4633 					btrfs_file_extent_num_bytes(leaf, fi);
4634 				extent_num_bytes = ALIGN(new_size -
4635 						found_key.offset,
4636 						fs_info->sectorsize);
4637 				clear_start = ALIGN(new_size, fs_info->sectorsize);
4638 				btrfs_set_file_extent_num_bytes(leaf, fi,
4639 							 extent_num_bytes);
4640 				num_dec = (orig_num_bytes -
4641 					   extent_num_bytes);
4642 				if (test_bit(BTRFS_ROOT_SHAREABLE,
4643 					     &root->state) &&
4644 				    extent_start != 0)
4645 					inode_sub_bytes(&inode->vfs_inode,
4646 							num_dec);
4647 				btrfs_mark_buffer_dirty(leaf);
4648 			} else {
4649 				extent_num_bytes =
4650 					btrfs_file_extent_disk_num_bytes(leaf,
4651 									 fi);
4652 				extent_offset = found_key.offset -
4653 					btrfs_file_extent_offset(leaf, fi);
4654 
4655 				/* FIXME blocksize != 4096 */
4656 				num_dec = btrfs_file_extent_num_bytes(leaf, fi);
4657 				if (extent_start != 0) {
4658 					found_extent = 1;
4659 					if (test_bit(BTRFS_ROOT_SHAREABLE,
4660 						     &root->state))
4661 						inode_sub_bytes(&inode->vfs_inode,
4662 								num_dec);
4663 				}
4664 			}
4665 			clear_len = num_dec;
4666 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4667 			/*
4668 			 * we can't truncate inline items that have had
4669 			 * special encodings
4670 			 */
4671 			if (!del_item &&
4672 			    btrfs_file_extent_encryption(leaf, fi) == 0 &&
4673 			    btrfs_file_extent_other_encoding(leaf, fi) == 0 &&
4674 			    btrfs_file_extent_compression(leaf, fi) == 0) {
4675 				u32 size = (u32)(new_size - found_key.offset);
4676 
4677 				btrfs_set_file_extent_ram_bytes(leaf, fi, size);
4678 				size = btrfs_file_extent_calc_inline_size(size);
4679 				btrfs_truncate_item(path, size, 1);
4680 			} else if (!del_item) {
4681 				/*
4682 				 * We have to bail so the last_size is set to
4683 				 * just before this extent.
4684 				 */
4685 				ret = NEED_TRUNCATE_BLOCK;
4686 				break;
4687 			} else {
4688 				/*
4689 				 * Inline extents are special, we just treat
4690 				 * them as a full sector worth in the file
4691 				 * extent tree just for simplicity sake.
4692 				 */
4693 				clear_len = fs_info->sectorsize;
4694 			}
4695 
4696 			if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
4697 				inode_sub_bytes(&inode->vfs_inode,
4698 						item_end + 1 - new_size);
4699 		}
4700 delete:
4701 		/*
4702 		 * We use btrfs_truncate_inode_items() to clean up log trees for
4703 		 * multiple fsyncs, and in this case we don't want to clear the
4704 		 * file extent range because it's just the log.
4705 		 */
4706 		if (root == inode->root) {
4707 			ret = btrfs_inode_clear_file_extent_range(inode,
4708 						  clear_start, clear_len);
4709 			if (ret) {
4710 				btrfs_abort_transaction(trans, ret);
4711 				break;
4712 			}
4713 		}
4714 
4715 		if (del_item)
4716 			last_size = found_key.offset;
4717 		else
4718 			last_size = new_size;
4719 		if (del_item) {
4720 			if (!pending_del_nr) {
4721 				/* no pending yet, add ourselves */
4722 				pending_del_slot = path->slots[0];
4723 				pending_del_nr = 1;
4724 			} else if (pending_del_nr &&
4725 				   path->slots[0] + 1 == pending_del_slot) {
4726 				/* hop on the pending chunk */
4727 				pending_del_nr++;
4728 				pending_del_slot = path->slots[0];
4729 			} else {
4730 				BUG();
4731 			}
4732 		} else {
4733 			break;
4734 		}
4735 		should_throttle = false;
4736 
4737 		if (found_extent &&
4738 		    root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4739 			struct btrfs_ref ref = { 0 };
4740 
4741 			bytes_deleted += extent_num_bytes;
4742 
4743 			btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF,
4744 					extent_start, extent_num_bytes, 0);
4745 			ref.real_root = root->root_key.objectid;
4746 			btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
4747 					ino, extent_offset);
4748 			ret = btrfs_free_extent(trans, &ref);
4749 			if (ret) {
4750 				btrfs_abort_transaction(trans, ret);
4751 				break;
4752 			}
4753 			if (be_nice) {
4754 				if (btrfs_should_throttle_delayed_refs(trans))
4755 					should_throttle = true;
4756 			}
4757 		}
4758 
4759 		if (found_type == BTRFS_INODE_ITEM_KEY)
4760 			break;
4761 
4762 		if (path->slots[0] == 0 ||
4763 		    path->slots[0] != pending_del_slot ||
4764 		    should_throttle) {
4765 			if (pending_del_nr) {
4766 				ret = btrfs_del_items(trans, root, path,
4767 						pending_del_slot,
4768 						pending_del_nr);
4769 				if (ret) {
4770 					btrfs_abort_transaction(trans, ret);
4771 					break;
4772 				}
4773 				pending_del_nr = 0;
4774 			}
4775 			btrfs_release_path(path);
4776 
4777 			/*
4778 			 * We can generate a lot of delayed refs, so we need to
4779 			 * throttle every once and a while and make sure we're
4780 			 * adding enough space to keep up with the work we are
4781 			 * generating.  Since we hold a transaction here we
4782 			 * can't flush, and we don't want to FLUSH_LIMIT because
4783 			 * we could have generated too many delayed refs to
4784 			 * actually allocate, so just bail if we're short and
4785 			 * let the normal reservation dance happen higher up.
4786 			 */
4787 			if (should_throttle) {
4788 				ret = btrfs_delayed_refs_rsv_refill(fs_info,
4789 							BTRFS_RESERVE_NO_FLUSH);
4790 				if (ret) {
4791 					ret = -EAGAIN;
4792 					break;
4793 				}
4794 			}
4795 			goto search_again;
4796 		} else {
4797 			path->slots[0]--;
4798 		}
4799 	}
4800 out:
4801 	if (ret >= 0 && pending_del_nr) {
4802 		int err;
4803 
4804 		err = btrfs_del_items(trans, root, path, pending_del_slot,
4805 				      pending_del_nr);
4806 		if (err) {
4807 			btrfs_abort_transaction(trans, err);
4808 			ret = err;
4809 		}
4810 	}
4811 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4812 		ASSERT(last_size >= new_size);
4813 		if (!ret && last_size > new_size)
4814 			last_size = new_size;
4815 		btrfs_inode_safe_disk_i_size_write(inode, last_size);
4816 		unlock_extent_cached(&inode->io_tree, lock_start, (u64)-1,
4817 				     &cached_state);
4818 	}
4819 
4820 	btrfs_free_path(path);
4821 	return ret;
4822 }
4823 
4824 /*
4825  * btrfs_truncate_block - read, zero a chunk and write a block
4826  * @inode - inode that we're zeroing
4827  * @from - the offset to start zeroing
4828  * @len - the length to zero, 0 to zero the entire range respective to the
4829  *	offset
4830  * @front - zero up to the offset instead of from the offset on
4831  *
4832  * This will find the block for the "from" offset and cow the block and zero the
4833  * part we want to zero.  This is used with truncate and hole punching.
4834  */
4835 int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
4836 			 int front)
4837 {
4838 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
4839 	struct address_space *mapping = inode->vfs_inode.i_mapping;
4840 	struct extent_io_tree *io_tree = &inode->io_tree;
4841 	struct btrfs_ordered_extent *ordered;
4842 	struct extent_state *cached_state = NULL;
4843 	struct extent_changeset *data_reserved = NULL;
4844 	bool only_release_metadata = false;
4845 	u32 blocksize = fs_info->sectorsize;
4846 	pgoff_t index = from >> PAGE_SHIFT;
4847 	unsigned offset = from & (blocksize - 1);
4848 	struct page *page;
4849 	gfp_t mask = btrfs_alloc_write_mask(mapping);
4850 	size_t write_bytes = blocksize;
4851 	int ret = 0;
4852 	u64 block_start;
4853 	u64 block_end;
4854 
4855 	if (IS_ALIGNED(offset, blocksize) &&
4856 	    (!len || IS_ALIGNED(len, blocksize)))
4857 		goto out;
4858 
4859 	block_start = round_down(from, blocksize);
4860 	block_end = block_start + blocksize - 1;
4861 
4862 	ret = btrfs_check_data_free_space(inode, &data_reserved, block_start,
4863 					  blocksize);
4864 	if (ret < 0) {
4865 		if (btrfs_check_nocow_lock(inode, block_start, &write_bytes) > 0) {
4866 			/* For nocow case, no need to reserve data space */
4867 			only_release_metadata = true;
4868 		} else {
4869 			goto out;
4870 		}
4871 	}
4872 	ret = btrfs_delalloc_reserve_metadata(inode, blocksize);
4873 	if (ret < 0) {
4874 		if (!only_release_metadata)
4875 			btrfs_free_reserved_data_space(inode, data_reserved,
4876 						       block_start, blocksize);
4877 		goto out;
4878 	}
4879 again:
4880 	page = find_or_create_page(mapping, index, mask);
4881 	if (!page) {
4882 		btrfs_delalloc_release_space(inode, data_reserved, block_start,
4883 					     blocksize, true);
4884 		btrfs_delalloc_release_extents(inode, blocksize);
4885 		ret = -ENOMEM;
4886 		goto out;
4887 	}
4888 	ret = set_page_extent_mapped(page);
4889 	if (ret < 0)
4890 		goto out_unlock;
4891 
4892 	if (!PageUptodate(page)) {
4893 		ret = btrfs_readpage(NULL, page);
4894 		lock_page(page);
4895 		if (page->mapping != mapping) {
4896 			unlock_page(page);
4897 			put_page(page);
4898 			goto again;
4899 		}
4900 		if (!PageUptodate(page)) {
4901 			ret = -EIO;
4902 			goto out_unlock;
4903 		}
4904 	}
4905 	wait_on_page_writeback(page);
4906 
4907 	lock_extent_bits(io_tree, block_start, block_end, &cached_state);
4908 
4909 	ordered = btrfs_lookup_ordered_extent(inode, block_start);
4910 	if (ordered) {
4911 		unlock_extent_cached(io_tree, block_start, block_end,
4912 				     &cached_state);
4913 		unlock_page(page);
4914 		put_page(page);
4915 		btrfs_start_ordered_extent(ordered, 1);
4916 		btrfs_put_ordered_extent(ordered);
4917 		goto again;
4918 	}
4919 
4920 	clear_extent_bit(&inode->io_tree, block_start, block_end,
4921 			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4922 			 0, 0, &cached_state);
4923 
4924 	ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
4925 					&cached_state);
4926 	if (ret) {
4927 		unlock_extent_cached(io_tree, block_start, block_end,
4928 				     &cached_state);
4929 		goto out_unlock;
4930 	}
4931 
4932 	if (offset != blocksize) {
4933 		if (!len)
4934 			len = blocksize - offset;
4935 		if (front)
4936 			memzero_page(page, (block_start - page_offset(page)),
4937 				     offset);
4938 		else
4939 			memzero_page(page, (block_start - page_offset(page)) + offset,
4940 				     len);
4941 		flush_dcache_page(page);
4942 	}
4943 	ClearPageChecked(page);
4944 	set_page_dirty(page);
4945 	unlock_extent_cached(io_tree, block_start, block_end, &cached_state);
4946 
4947 	if (only_release_metadata)
4948 		set_extent_bit(&inode->io_tree, block_start, block_end,
4949 			       EXTENT_NORESERVE, 0, NULL, NULL, GFP_NOFS, NULL);
4950 
4951 out_unlock:
4952 	if (ret) {
4953 		if (only_release_metadata)
4954 			btrfs_delalloc_release_metadata(inode, blocksize, true);
4955 		else
4956 			btrfs_delalloc_release_space(inode, data_reserved,
4957 					block_start, blocksize, true);
4958 	}
4959 	btrfs_delalloc_release_extents(inode, blocksize);
4960 	unlock_page(page);
4961 	put_page(page);
4962 out:
4963 	if (only_release_metadata)
4964 		btrfs_check_nocow_unlock(inode);
4965 	extent_changeset_free(data_reserved);
4966 	return ret;
4967 }
4968 
4969 static int maybe_insert_hole(struct btrfs_root *root, struct btrfs_inode *inode,
4970 			     u64 offset, u64 len)
4971 {
4972 	struct btrfs_fs_info *fs_info = root->fs_info;
4973 	struct btrfs_trans_handle *trans;
4974 	struct btrfs_drop_extents_args drop_args = { 0 };
4975 	int ret;
4976 
4977 	/*
4978 	 * Still need to make sure the inode looks like it's been updated so
4979 	 * that any holes get logged if we fsync.
4980 	 */
4981 	if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
4982 		inode->last_trans = fs_info->generation;
4983 		inode->last_sub_trans = root->log_transid;
4984 		inode->last_log_commit = root->last_log_commit;
4985 		return 0;
4986 	}
4987 
4988 	/*
4989 	 * 1 - for the one we're dropping
4990 	 * 1 - for the one we're adding
4991 	 * 1 - for updating the inode.
4992 	 */
4993 	trans = btrfs_start_transaction(root, 3);
4994 	if (IS_ERR(trans))
4995 		return PTR_ERR(trans);
4996 
4997 	drop_args.start = offset;
4998 	drop_args.end = offset + len;
4999 	drop_args.drop_cache = true;
5000 
5001 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
5002 	if (ret) {
5003 		btrfs_abort_transaction(trans, ret);
5004 		btrfs_end_transaction(trans);
5005 		return ret;
5006 	}
5007 
5008 	ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
5009 			offset, 0, 0, len, 0, len, 0, 0, 0);
5010 	if (ret) {
5011 		btrfs_abort_transaction(trans, ret);
5012 	} else {
5013 		btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found);
5014 		btrfs_update_inode(trans, root, inode);
5015 	}
5016 	btrfs_end_transaction(trans);
5017 	return ret;
5018 }
5019 
5020 /*
5021  * This function puts in dummy file extents for the area we're creating a hole
5022  * for.  So if we are truncating this file to a larger size we need to insert
5023  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
5024  * the range between oldsize and size
5025  */
5026 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
5027 {
5028 	struct btrfs_root *root = inode->root;
5029 	struct btrfs_fs_info *fs_info = root->fs_info;
5030 	struct extent_io_tree *io_tree = &inode->io_tree;
5031 	struct extent_map *em = NULL;
5032 	struct extent_state *cached_state = NULL;
5033 	struct extent_map_tree *em_tree = &inode->extent_tree;
5034 	u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
5035 	u64 block_end = ALIGN(size, fs_info->sectorsize);
5036 	u64 last_byte;
5037 	u64 cur_offset;
5038 	u64 hole_size;
5039 	int err = 0;
5040 
5041 	/*
5042 	 * If our size started in the middle of a block we need to zero out the
5043 	 * rest of the block before we expand the i_size, otherwise we could
5044 	 * expose stale data.
5045 	 */
5046 	err = btrfs_truncate_block(inode, oldsize, 0, 0);
5047 	if (err)
5048 		return err;
5049 
5050 	if (size <= hole_start)
5051 		return 0;
5052 
5053 	btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1,
5054 					   &cached_state);
5055 	cur_offset = hole_start;
5056 	while (1) {
5057 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
5058 				      block_end - cur_offset);
5059 		if (IS_ERR(em)) {
5060 			err = PTR_ERR(em);
5061 			em = NULL;
5062 			break;
5063 		}
5064 		last_byte = min(extent_map_end(em), block_end);
5065 		last_byte = ALIGN(last_byte, fs_info->sectorsize);
5066 		hole_size = last_byte - cur_offset;
5067 
5068 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
5069 			struct extent_map *hole_em;
5070 
5071 			err = maybe_insert_hole(root, inode, cur_offset,
5072 						hole_size);
5073 			if (err)
5074 				break;
5075 
5076 			err = btrfs_inode_set_file_extent_range(inode,
5077 							cur_offset, hole_size);
5078 			if (err)
5079 				break;
5080 
5081 			btrfs_drop_extent_cache(inode, cur_offset,
5082 						cur_offset + hole_size - 1, 0);
5083 			hole_em = alloc_extent_map();
5084 			if (!hole_em) {
5085 				set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5086 					&inode->runtime_flags);
5087 				goto next;
5088 			}
5089 			hole_em->start = cur_offset;
5090 			hole_em->len = hole_size;
5091 			hole_em->orig_start = cur_offset;
5092 
5093 			hole_em->block_start = EXTENT_MAP_HOLE;
5094 			hole_em->block_len = 0;
5095 			hole_em->orig_block_len = 0;
5096 			hole_em->ram_bytes = hole_size;
5097 			hole_em->compress_type = BTRFS_COMPRESS_NONE;
5098 			hole_em->generation = fs_info->generation;
5099 
5100 			while (1) {
5101 				write_lock(&em_tree->lock);
5102 				err = add_extent_mapping(em_tree, hole_em, 1);
5103 				write_unlock(&em_tree->lock);
5104 				if (err != -EEXIST)
5105 					break;
5106 				btrfs_drop_extent_cache(inode, cur_offset,
5107 							cur_offset +
5108 							hole_size - 1, 0);
5109 			}
5110 			free_extent_map(hole_em);
5111 		} else {
5112 			err = btrfs_inode_set_file_extent_range(inode,
5113 							cur_offset, hole_size);
5114 			if (err)
5115 				break;
5116 		}
5117 next:
5118 		free_extent_map(em);
5119 		em = NULL;
5120 		cur_offset = last_byte;
5121 		if (cur_offset >= block_end)
5122 			break;
5123 	}
5124 	free_extent_map(em);
5125 	unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state);
5126 	return err;
5127 }
5128 
5129 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
5130 {
5131 	struct btrfs_root *root = BTRFS_I(inode)->root;
5132 	struct btrfs_trans_handle *trans;
5133 	loff_t oldsize = i_size_read(inode);
5134 	loff_t newsize = attr->ia_size;
5135 	int mask = attr->ia_valid;
5136 	int ret;
5137 
5138 	/*
5139 	 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
5140 	 * special case where we need to update the times despite not having
5141 	 * these flags set.  For all other operations the VFS set these flags
5142 	 * explicitly if it wants a timestamp update.
5143 	 */
5144 	if (newsize != oldsize) {
5145 		inode_inc_iversion(inode);
5146 		if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
5147 			inode->i_ctime = inode->i_mtime =
5148 				current_time(inode);
5149 	}
5150 
5151 	if (newsize > oldsize) {
5152 		/*
5153 		 * Don't do an expanding truncate while snapshotting is ongoing.
5154 		 * This is to ensure the snapshot captures a fully consistent
5155 		 * state of this file - if the snapshot captures this expanding
5156 		 * truncation, it must capture all writes that happened before
5157 		 * this truncation.
5158 		 */
5159 		btrfs_drew_write_lock(&root->snapshot_lock);
5160 		ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize);
5161 		if (ret) {
5162 			btrfs_drew_write_unlock(&root->snapshot_lock);
5163 			return ret;
5164 		}
5165 
5166 		trans = btrfs_start_transaction(root, 1);
5167 		if (IS_ERR(trans)) {
5168 			btrfs_drew_write_unlock(&root->snapshot_lock);
5169 			return PTR_ERR(trans);
5170 		}
5171 
5172 		i_size_write(inode, newsize);
5173 		btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
5174 		pagecache_isize_extended(inode, oldsize, newsize);
5175 		ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
5176 		btrfs_drew_write_unlock(&root->snapshot_lock);
5177 		btrfs_end_transaction(trans);
5178 	} else {
5179 		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5180 
5181 		if (btrfs_is_zoned(fs_info)) {
5182 			ret = btrfs_wait_ordered_range(inode,
5183 					ALIGN(newsize, fs_info->sectorsize),
5184 					(u64)-1);
5185 			if (ret)
5186 				return ret;
5187 		}
5188 
5189 		/*
5190 		 * We're truncating a file that used to have good data down to
5191 		 * zero. Make sure any new writes to the file get on disk
5192 		 * on close.
5193 		 */
5194 		if (newsize == 0)
5195 			set_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
5196 				&BTRFS_I(inode)->runtime_flags);
5197 
5198 		truncate_setsize(inode, newsize);
5199 
5200 		inode_dio_wait(inode);
5201 
5202 		ret = btrfs_truncate(inode, newsize == oldsize);
5203 		if (ret && inode->i_nlink) {
5204 			int err;
5205 
5206 			/*
5207 			 * Truncate failed, so fix up the in-memory size. We
5208 			 * adjusted disk_i_size down as we removed extents, so
5209 			 * wait for disk_i_size to be stable and then update the
5210 			 * in-memory size to match.
5211 			 */
5212 			err = btrfs_wait_ordered_range(inode, 0, (u64)-1);
5213 			if (err)
5214 				return err;
5215 			i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5216 		}
5217 	}
5218 
5219 	return ret;
5220 }
5221 
5222 static int btrfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
5223 			 struct iattr *attr)
5224 {
5225 	struct inode *inode = d_inode(dentry);
5226 	struct btrfs_root *root = BTRFS_I(inode)->root;
5227 	int err;
5228 
5229 	if (btrfs_root_readonly(root))
5230 		return -EROFS;
5231 
5232 	err = setattr_prepare(&init_user_ns, dentry, attr);
5233 	if (err)
5234 		return err;
5235 
5236 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5237 		err = btrfs_setsize(inode, attr);
5238 		if (err)
5239 			return err;
5240 	}
5241 
5242 	if (attr->ia_valid) {
5243 		setattr_copy(&init_user_ns, inode, attr);
5244 		inode_inc_iversion(inode);
5245 		err = btrfs_dirty_inode(inode);
5246 
5247 		if (!err && attr->ia_valid & ATTR_MODE)
5248 			err = posix_acl_chmod(&init_user_ns, inode,
5249 					      inode->i_mode);
5250 	}
5251 
5252 	return err;
5253 }
5254 
5255 /*
5256  * While truncating the inode pages during eviction, we get the VFS calling
5257  * btrfs_invalidatepage() against each page of the inode. This is slow because
5258  * the calls to btrfs_invalidatepage() result in a huge amount of calls to
5259  * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
5260  * extent_state structures over and over, wasting lots of time.
5261  *
5262  * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
5263  * those expensive operations on a per page basis and do only the ordered io
5264  * finishing, while we release here the extent_map and extent_state structures,
5265  * without the excessive merging and splitting.
5266  */
5267 static void evict_inode_truncate_pages(struct inode *inode)
5268 {
5269 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5270 	struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
5271 	struct rb_node *node;
5272 
5273 	ASSERT(inode->i_state & I_FREEING);
5274 	truncate_inode_pages_final(&inode->i_data);
5275 
5276 	write_lock(&map_tree->lock);
5277 	while (!RB_EMPTY_ROOT(&map_tree->map.rb_root)) {
5278 		struct extent_map *em;
5279 
5280 		node = rb_first_cached(&map_tree->map);
5281 		em = rb_entry(node, struct extent_map, rb_node);
5282 		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
5283 		clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
5284 		remove_extent_mapping(map_tree, em);
5285 		free_extent_map(em);
5286 		if (need_resched()) {
5287 			write_unlock(&map_tree->lock);
5288 			cond_resched();
5289 			write_lock(&map_tree->lock);
5290 		}
5291 	}
5292 	write_unlock(&map_tree->lock);
5293 
5294 	/*
5295 	 * Keep looping until we have no more ranges in the io tree.
5296 	 * We can have ongoing bios started by readahead that have
5297 	 * their endio callback (extent_io.c:end_bio_extent_readpage)
5298 	 * still in progress (unlocked the pages in the bio but did not yet
5299 	 * unlocked the ranges in the io tree). Therefore this means some
5300 	 * ranges can still be locked and eviction started because before
5301 	 * submitting those bios, which are executed by a separate task (work
5302 	 * queue kthread), inode references (inode->i_count) were not taken
5303 	 * (which would be dropped in the end io callback of each bio).
5304 	 * Therefore here we effectively end up waiting for those bios and
5305 	 * anyone else holding locked ranges without having bumped the inode's
5306 	 * reference count - if we don't do it, when they access the inode's
5307 	 * io_tree to unlock a range it may be too late, leading to an
5308 	 * use-after-free issue.
5309 	 */
5310 	spin_lock(&io_tree->lock);
5311 	while (!RB_EMPTY_ROOT(&io_tree->state)) {
5312 		struct extent_state *state;
5313 		struct extent_state *cached_state = NULL;
5314 		u64 start;
5315 		u64 end;
5316 		unsigned state_flags;
5317 
5318 		node = rb_first(&io_tree->state);
5319 		state = rb_entry(node, struct extent_state, rb_node);
5320 		start = state->start;
5321 		end = state->end;
5322 		state_flags = state->state;
5323 		spin_unlock(&io_tree->lock);
5324 
5325 		lock_extent_bits(io_tree, start, end, &cached_state);
5326 
5327 		/*
5328 		 * If still has DELALLOC flag, the extent didn't reach disk,
5329 		 * and its reserved space won't be freed by delayed_ref.
5330 		 * So we need to free its reserved space here.
5331 		 * (Refer to comment in btrfs_invalidatepage, case 2)
5332 		 *
5333 		 * Note, end is the bytenr of last byte, so we need + 1 here.
5334 		 */
5335 		if (state_flags & EXTENT_DELALLOC)
5336 			btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
5337 					       end - start + 1);
5338 
5339 		clear_extent_bit(io_tree, start, end,
5340 				 EXTENT_LOCKED | EXTENT_DELALLOC |
5341 				 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
5342 				 &cached_state);
5343 
5344 		cond_resched();
5345 		spin_lock(&io_tree->lock);
5346 	}
5347 	spin_unlock(&io_tree->lock);
5348 }
5349 
5350 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
5351 							struct btrfs_block_rsv *rsv)
5352 {
5353 	struct btrfs_fs_info *fs_info = root->fs_info;
5354 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5355 	struct btrfs_trans_handle *trans;
5356 	u64 delayed_refs_extra = btrfs_calc_insert_metadata_size(fs_info, 1);
5357 	int ret;
5358 
5359 	/*
5360 	 * Eviction should be taking place at some place safe because of our
5361 	 * delayed iputs.  However the normal flushing code will run delayed
5362 	 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
5363 	 *
5364 	 * We reserve the delayed_refs_extra here again because we can't use
5365 	 * btrfs_start_transaction(root, 0) for the same deadlocky reason as
5366 	 * above.  We reserve our extra bit here because we generate a ton of
5367 	 * delayed refs activity by truncating.
5368 	 *
5369 	 * If we cannot make our reservation we'll attempt to steal from the
5370 	 * global reserve, because we really want to be able to free up space.
5371 	 */
5372 	ret = btrfs_block_rsv_refill(root, rsv, rsv->size + delayed_refs_extra,
5373 				     BTRFS_RESERVE_FLUSH_EVICT);
5374 	if (ret) {
5375 		/*
5376 		 * Try to steal from the global reserve if there is space for
5377 		 * it.
5378 		 */
5379 		if (btrfs_check_space_for_delayed_refs(fs_info) ||
5380 		    btrfs_block_rsv_migrate(global_rsv, rsv, rsv->size, 0)) {
5381 			btrfs_warn(fs_info,
5382 				   "could not allocate space for delete; will truncate on mount");
5383 			return ERR_PTR(-ENOSPC);
5384 		}
5385 		delayed_refs_extra = 0;
5386 	}
5387 
5388 	trans = btrfs_join_transaction(root);
5389 	if (IS_ERR(trans))
5390 		return trans;
5391 
5392 	if (delayed_refs_extra) {
5393 		trans->block_rsv = &fs_info->trans_block_rsv;
5394 		trans->bytes_reserved = delayed_refs_extra;
5395 		btrfs_block_rsv_migrate(rsv, trans->block_rsv,
5396 					delayed_refs_extra, 1);
5397 	}
5398 	return trans;
5399 }
5400 
5401 void btrfs_evict_inode(struct inode *inode)
5402 {
5403 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5404 	struct btrfs_trans_handle *trans;
5405 	struct btrfs_root *root = BTRFS_I(inode)->root;
5406 	struct btrfs_block_rsv *rsv;
5407 	int ret;
5408 
5409 	trace_btrfs_inode_evict(inode);
5410 
5411 	if (!root) {
5412 		clear_inode(inode);
5413 		return;
5414 	}
5415 
5416 	evict_inode_truncate_pages(inode);
5417 
5418 	if (inode->i_nlink &&
5419 	    ((btrfs_root_refs(&root->root_item) != 0 &&
5420 	      root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5421 	     btrfs_is_free_space_inode(BTRFS_I(inode))))
5422 		goto no_delete;
5423 
5424 	if (is_bad_inode(inode))
5425 		goto no_delete;
5426 
5427 	btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1);
5428 
5429 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
5430 		goto no_delete;
5431 
5432 	if (inode->i_nlink > 0) {
5433 		BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5434 		       root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5435 		goto no_delete;
5436 	}
5437 
5438 	ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
5439 	if (ret)
5440 		goto no_delete;
5441 
5442 	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
5443 	if (!rsv)
5444 		goto no_delete;
5445 	rsv->size = btrfs_calc_metadata_size(fs_info, 1);
5446 	rsv->failfast = 1;
5447 
5448 	btrfs_i_size_write(BTRFS_I(inode), 0);
5449 
5450 	while (1) {
5451 		trans = evict_refill_and_join(root, rsv);
5452 		if (IS_ERR(trans))
5453 			goto free_rsv;
5454 
5455 		trans->block_rsv = rsv;
5456 
5457 		ret = btrfs_truncate_inode_items(trans, root, BTRFS_I(inode),
5458 						 0, 0);
5459 		trans->block_rsv = &fs_info->trans_block_rsv;
5460 		btrfs_end_transaction(trans);
5461 		btrfs_btree_balance_dirty(fs_info);
5462 		if (ret && ret != -ENOSPC && ret != -EAGAIN)
5463 			goto free_rsv;
5464 		else if (!ret)
5465 			break;
5466 	}
5467 
5468 	/*
5469 	 * Errors here aren't a big deal, it just means we leave orphan items in
5470 	 * the tree. They will be cleaned up on the next mount. If the inode
5471 	 * number gets reused, cleanup deletes the orphan item without doing
5472 	 * anything, and unlink reuses the existing orphan item.
5473 	 *
5474 	 * If it turns out that we are dropping too many of these, we might want
5475 	 * to add a mechanism for retrying these after a commit.
5476 	 */
5477 	trans = evict_refill_and_join(root, rsv);
5478 	if (!IS_ERR(trans)) {
5479 		trans->block_rsv = rsv;
5480 		btrfs_orphan_del(trans, BTRFS_I(inode));
5481 		trans->block_rsv = &fs_info->trans_block_rsv;
5482 		btrfs_end_transaction(trans);
5483 	}
5484 
5485 free_rsv:
5486 	btrfs_free_block_rsv(fs_info, rsv);
5487 no_delete:
5488 	/*
5489 	 * If we didn't successfully delete, the orphan item will still be in
5490 	 * the tree and we'll retry on the next mount. Again, we might also want
5491 	 * to retry these periodically in the future.
5492 	 */
5493 	btrfs_remove_delayed_node(BTRFS_I(inode));
5494 	clear_inode(inode);
5495 }
5496 
5497 /*
5498  * Return the key found in the dir entry in the location pointer, fill @type
5499  * with BTRFS_FT_*, and return 0.
5500  *
5501  * If no dir entries were found, returns -ENOENT.
5502  * If found a corrupted location in dir entry, returns -EUCLEAN.
5503  */
5504 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
5505 			       struct btrfs_key *location, u8 *type)
5506 {
5507 	const char *name = dentry->d_name.name;
5508 	int namelen = dentry->d_name.len;
5509 	struct btrfs_dir_item *di;
5510 	struct btrfs_path *path;
5511 	struct btrfs_root *root = BTRFS_I(dir)->root;
5512 	int ret = 0;
5513 
5514 	path = btrfs_alloc_path();
5515 	if (!path)
5516 		return -ENOMEM;
5517 
5518 	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)),
5519 			name, namelen, 0);
5520 	if (IS_ERR_OR_NULL(di)) {
5521 		ret = di ? PTR_ERR(di) : -ENOENT;
5522 		goto out;
5523 	}
5524 
5525 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5526 	if (location->type != BTRFS_INODE_ITEM_KEY &&
5527 	    location->type != BTRFS_ROOT_ITEM_KEY) {
5528 		ret = -EUCLEAN;
5529 		btrfs_warn(root->fs_info,
5530 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5531 			   __func__, name, btrfs_ino(BTRFS_I(dir)),
5532 			   location->objectid, location->type, location->offset);
5533 	}
5534 	if (!ret)
5535 		*type = btrfs_dir_type(path->nodes[0], di);
5536 out:
5537 	btrfs_free_path(path);
5538 	return ret;
5539 }
5540 
5541 /*
5542  * when we hit a tree root in a directory, the btrfs part of the inode
5543  * needs to be changed to reflect the root directory of the tree root.  This
5544  * is kind of like crossing a mount point.
5545  */
5546 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
5547 				    struct inode *dir,
5548 				    struct dentry *dentry,
5549 				    struct btrfs_key *location,
5550 				    struct btrfs_root **sub_root)
5551 {
5552 	struct btrfs_path *path;
5553 	struct btrfs_root *new_root;
5554 	struct btrfs_root_ref *ref;
5555 	struct extent_buffer *leaf;
5556 	struct btrfs_key key;
5557 	int ret;
5558 	int err = 0;
5559 
5560 	path = btrfs_alloc_path();
5561 	if (!path) {
5562 		err = -ENOMEM;
5563 		goto out;
5564 	}
5565 
5566 	err = -ENOENT;
5567 	key.objectid = BTRFS_I(dir)->root->root_key.objectid;
5568 	key.type = BTRFS_ROOT_REF_KEY;
5569 	key.offset = location->objectid;
5570 
5571 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
5572 	if (ret) {
5573 		if (ret < 0)
5574 			err = ret;
5575 		goto out;
5576 	}
5577 
5578 	leaf = path->nodes[0];
5579 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5580 	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(BTRFS_I(dir)) ||
5581 	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
5582 		goto out;
5583 
5584 	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
5585 				   (unsigned long)(ref + 1),
5586 				   dentry->d_name.len);
5587 	if (ret)
5588 		goto out;
5589 
5590 	btrfs_release_path(path);
5591 
5592 	new_root = btrfs_get_fs_root(fs_info, location->objectid, true);
5593 	if (IS_ERR(new_root)) {
5594 		err = PTR_ERR(new_root);
5595 		goto out;
5596 	}
5597 
5598 	*sub_root = new_root;
5599 	location->objectid = btrfs_root_dirid(&new_root->root_item);
5600 	location->type = BTRFS_INODE_ITEM_KEY;
5601 	location->offset = 0;
5602 	err = 0;
5603 out:
5604 	btrfs_free_path(path);
5605 	return err;
5606 }
5607 
5608 static void inode_tree_add(struct inode *inode)
5609 {
5610 	struct btrfs_root *root = BTRFS_I(inode)->root;
5611 	struct btrfs_inode *entry;
5612 	struct rb_node **p;
5613 	struct rb_node *parent;
5614 	struct rb_node *new = &BTRFS_I(inode)->rb_node;
5615 	u64 ino = btrfs_ino(BTRFS_I(inode));
5616 
5617 	if (inode_unhashed(inode))
5618 		return;
5619 	parent = NULL;
5620 	spin_lock(&root->inode_lock);
5621 	p = &root->inode_tree.rb_node;
5622 	while (*p) {
5623 		parent = *p;
5624 		entry = rb_entry(parent, struct btrfs_inode, rb_node);
5625 
5626 		if (ino < btrfs_ino(entry))
5627 			p = &parent->rb_left;
5628 		else if (ino > btrfs_ino(entry))
5629 			p = &parent->rb_right;
5630 		else {
5631 			WARN_ON(!(entry->vfs_inode.i_state &
5632 				  (I_WILL_FREE | I_FREEING)));
5633 			rb_replace_node(parent, new, &root->inode_tree);
5634 			RB_CLEAR_NODE(parent);
5635 			spin_unlock(&root->inode_lock);
5636 			return;
5637 		}
5638 	}
5639 	rb_link_node(new, parent, p);
5640 	rb_insert_color(new, &root->inode_tree);
5641 	spin_unlock(&root->inode_lock);
5642 }
5643 
5644 static void inode_tree_del(struct btrfs_inode *inode)
5645 {
5646 	struct btrfs_root *root = inode->root;
5647 	int empty = 0;
5648 
5649 	spin_lock(&root->inode_lock);
5650 	if (!RB_EMPTY_NODE(&inode->rb_node)) {
5651 		rb_erase(&inode->rb_node, &root->inode_tree);
5652 		RB_CLEAR_NODE(&inode->rb_node);
5653 		empty = RB_EMPTY_ROOT(&root->inode_tree);
5654 	}
5655 	spin_unlock(&root->inode_lock);
5656 
5657 	if (empty && btrfs_root_refs(&root->root_item) == 0) {
5658 		spin_lock(&root->inode_lock);
5659 		empty = RB_EMPTY_ROOT(&root->inode_tree);
5660 		spin_unlock(&root->inode_lock);
5661 		if (empty)
5662 			btrfs_add_dead_root(root);
5663 	}
5664 }
5665 
5666 
5667 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5668 {
5669 	struct btrfs_iget_args *args = p;
5670 
5671 	inode->i_ino = args->ino;
5672 	BTRFS_I(inode)->location.objectid = args->ino;
5673 	BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
5674 	BTRFS_I(inode)->location.offset = 0;
5675 	BTRFS_I(inode)->root = btrfs_grab_root(args->root);
5676 	BUG_ON(args->root && !BTRFS_I(inode)->root);
5677 	return 0;
5678 }
5679 
5680 static int btrfs_find_actor(struct inode *inode, void *opaque)
5681 {
5682 	struct btrfs_iget_args *args = opaque;
5683 
5684 	return args->ino == BTRFS_I(inode)->location.objectid &&
5685 		args->root == BTRFS_I(inode)->root;
5686 }
5687 
5688 static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino,
5689 				       struct btrfs_root *root)
5690 {
5691 	struct inode *inode;
5692 	struct btrfs_iget_args args;
5693 	unsigned long hashval = btrfs_inode_hash(ino, root);
5694 
5695 	args.ino = ino;
5696 	args.root = root;
5697 
5698 	inode = iget5_locked(s, hashval, btrfs_find_actor,
5699 			     btrfs_init_locked_inode,
5700 			     (void *)&args);
5701 	return inode;
5702 }
5703 
5704 /*
5705  * Get an inode object given its inode number and corresponding root.
5706  * Path can be preallocated to prevent recursing back to iget through
5707  * allocator. NULL is also valid but may require an additional allocation
5708  * later.
5709  */
5710 struct inode *btrfs_iget_path(struct super_block *s, u64 ino,
5711 			      struct btrfs_root *root, struct btrfs_path *path)
5712 {
5713 	struct inode *inode;
5714 
5715 	inode = btrfs_iget_locked(s, ino, root);
5716 	if (!inode)
5717 		return ERR_PTR(-ENOMEM);
5718 
5719 	if (inode->i_state & I_NEW) {
5720 		int ret;
5721 
5722 		ret = btrfs_read_locked_inode(inode, path);
5723 		if (!ret) {
5724 			inode_tree_add(inode);
5725 			unlock_new_inode(inode);
5726 		} else {
5727 			iget_failed(inode);
5728 			/*
5729 			 * ret > 0 can come from btrfs_search_slot called by
5730 			 * btrfs_read_locked_inode, this means the inode item
5731 			 * was not found.
5732 			 */
5733 			if (ret > 0)
5734 				ret = -ENOENT;
5735 			inode = ERR_PTR(ret);
5736 		}
5737 	}
5738 
5739 	return inode;
5740 }
5741 
5742 struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root)
5743 {
5744 	return btrfs_iget_path(s, ino, root, NULL);
5745 }
5746 
5747 static struct inode *new_simple_dir(struct super_block *s,
5748 				    struct btrfs_key *key,
5749 				    struct btrfs_root *root)
5750 {
5751 	struct inode *inode = new_inode(s);
5752 
5753 	if (!inode)
5754 		return ERR_PTR(-ENOMEM);
5755 
5756 	BTRFS_I(inode)->root = btrfs_grab_root(root);
5757 	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5758 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5759 
5760 	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5761 	/*
5762 	 * We only need lookup, the rest is read-only and there's no inode
5763 	 * associated with the dentry
5764 	 */
5765 	inode->i_op = &simple_dir_inode_operations;
5766 	inode->i_opflags &= ~IOP_XATTR;
5767 	inode->i_fop = &simple_dir_operations;
5768 	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5769 	inode->i_mtime = current_time(inode);
5770 	inode->i_atime = inode->i_mtime;
5771 	inode->i_ctime = inode->i_mtime;
5772 	BTRFS_I(inode)->i_otime = inode->i_mtime;
5773 
5774 	return inode;
5775 }
5776 
5777 static inline u8 btrfs_inode_type(struct inode *inode)
5778 {
5779 	/*
5780 	 * Compile-time asserts that generic FT_* types still match
5781 	 * BTRFS_FT_* types
5782 	 */
5783 	BUILD_BUG_ON(BTRFS_FT_UNKNOWN != FT_UNKNOWN);
5784 	BUILD_BUG_ON(BTRFS_FT_REG_FILE != FT_REG_FILE);
5785 	BUILD_BUG_ON(BTRFS_FT_DIR != FT_DIR);
5786 	BUILD_BUG_ON(BTRFS_FT_CHRDEV != FT_CHRDEV);
5787 	BUILD_BUG_ON(BTRFS_FT_BLKDEV != FT_BLKDEV);
5788 	BUILD_BUG_ON(BTRFS_FT_FIFO != FT_FIFO);
5789 	BUILD_BUG_ON(BTRFS_FT_SOCK != FT_SOCK);
5790 	BUILD_BUG_ON(BTRFS_FT_SYMLINK != FT_SYMLINK);
5791 
5792 	return fs_umode_to_ftype(inode->i_mode);
5793 }
5794 
5795 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5796 {
5797 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
5798 	struct inode *inode;
5799 	struct btrfs_root *root = BTRFS_I(dir)->root;
5800 	struct btrfs_root *sub_root = root;
5801 	struct btrfs_key location;
5802 	u8 di_type = 0;
5803 	int ret = 0;
5804 
5805 	if (dentry->d_name.len > BTRFS_NAME_LEN)
5806 		return ERR_PTR(-ENAMETOOLONG);
5807 
5808 	ret = btrfs_inode_by_name(dir, dentry, &location, &di_type);
5809 	if (ret < 0)
5810 		return ERR_PTR(ret);
5811 
5812 	if (location.type == BTRFS_INODE_ITEM_KEY) {
5813 		inode = btrfs_iget(dir->i_sb, location.objectid, root);
5814 		if (IS_ERR(inode))
5815 			return inode;
5816 
5817 		/* Do extra check against inode mode with di_type */
5818 		if (btrfs_inode_type(inode) != di_type) {
5819 			btrfs_crit(fs_info,
5820 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
5821 				  inode->i_mode, btrfs_inode_type(inode),
5822 				  di_type);
5823 			iput(inode);
5824 			return ERR_PTR(-EUCLEAN);
5825 		}
5826 		return inode;
5827 	}
5828 
5829 	ret = fixup_tree_root_location(fs_info, dir, dentry,
5830 				       &location, &sub_root);
5831 	if (ret < 0) {
5832 		if (ret != -ENOENT)
5833 			inode = ERR_PTR(ret);
5834 		else
5835 			inode = new_simple_dir(dir->i_sb, &location, sub_root);
5836 	} else {
5837 		inode = btrfs_iget(dir->i_sb, location.objectid, sub_root);
5838 	}
5839 	if (root != sub_root)
5840 		btrfs_put_root(sub_root);
5841 
5842 	if (!IS_ERR(inode) && root != sub_root) {
5843 		down_read(&fs_info->cleanup_work_sem);
5844 		if (!sb_rdonly(inode->i_sb))
5845 			ret = btrfs_orphan_cleanup(sub_root);
5846 		up_read(&fs_info->cleanup_work_sem);
5847 		if (ret) {
5848 			iput(inode);
5849 			inode = ERR_PTR(ret);
5850 		}
5851 	}
5852 
5853 	return inode;
5854 }
5855 
5856 static int btrfs_dentry_delete(const struct dentry *dentry)
5857 {
5858 	struct btrfs_root *root;
5859 	struct inode *inode = d_inode(dentry);
5860 
5861 	if (!inode && !IS_ROOT(dentry))
5862 		inode = d_inode(dentry->d_parent);
5863 
5864 	if (inode) {
5865 		root = BTRFS_I(inode)->root;
5866 		if (btrfs_root_refs(&root->root_item) == 0)
5867 			return 1;
5868 
5869 		if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5870 			return 1;
5871 	}
5872 	return 0;
5873 }
5874 
5875 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5876 				   unsigned int flags)
5877 {
5878 	struct inode *inode = btrfs_lookup_dentry(dir, dentry);
5879 
5880 	if (inode == ERR_PTR(-ENOENT))
5881 		inode = NULL;
5882 	return d_splice_alias(inode, dentry);
5883 }
5884 
5885 /*
5886  * All this infrastructure exists because dir_emit can fault, and we are holding
5887  * the tree lock when doing readdir.  For now just allocate a buffer and copy
5888  * our information into that, and then dir_emit from the buffer.  This is
5889  * similar to what NFS does, only we don't keep the buffer around in pagecache
5890  * because I'm afraid I'll mess that up.  Long term we need to make filldir do
5891  * copy_to_user_inatomic so we don't have to worry about page faulting under the
5892  * tree lock.
5893  */
5894 static int btrfs_opendir(struct inode *inode, struct file *file)
5895 {
5896 	struct btrfs_file_private *private;
5897 
5898 	private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL);
5899 	if (!private)
5900 		return -ENOMEM;
5901 	private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
5902 	if (!private->filldir_buf) {
5903 		kfree(private);
5904 		return -ENOMEM;
5905 	}
5906 	file->private_data = private;
5907 	return 0;
5908 }
5909 
5910 struct dir_entry {
5911 	u64 ino;
5912 	u64 offset;
5913 	unsigned type;
5914 	int name_len;
5915 };
5916 
5917 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
5918 {
5919 	while (entries--) {
5920 		struct dir_entry *entry = addr;
5921 		char *name = (char *)(entry + 1);
5922 
5923 		ctx->pos = get_unaligned(&entry->offset);
5924 		if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
5925 					 get_unaligned(&entry->ino),
5926 					 get_unaligned(&entry->type)))
5927 			return 1;
5928 		addr += sizeof(struct dir_entry) +
5929 			get_unaligned(&entry->name_len);
5930 		ctx->pos++;
5931 	}
5932 	return 0;
5933 }
5934 
5935 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5936 {
5937 	struct inode *inode = file_inode(file);
5938 	struct btrfs_root *root = BTRFS_I(inode)->root;
5939 	struct btrfs_file_private *private = file->private_data;
5940 	struct btrfs_dir_item *di;
5941 	struct btrfs_key key;
5942 	struct btrfs_key found_key;
5943 	struct btrfs_path *path;
5944 	void *addr;
5945 	struct list_head ins_list;
5946 	struct list_head del_list;
5947 	int ret;
5948 	struct extent_buffer *leaf;
5949 	int slot;
5950 	char *name_ptr;
5951 	int name_len;
5952 	int entries = 0;
5953 	int total_len = 0;
5954 	bool put = false;
5955 	struct btrfs_key location;
5956 
5957 	if (!dir_emit_dots(file, ctx))
5958 		return 0;
5959 
5960 	path = btrfs_alloc_path();
5961 	if (!path)
5962 		return -ENOMEM;
5963 
5964 	addr = private->filldir_buf;
5965 	path->reada = READA_FORWARD;
5966 
5967 	INIT_LIST_HEAD(&ins_list);
5968 	INIT_LIST_HEAD(&del_list);
5969 	put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list);
5970 
5971 again:
5972 	key.type = BTRFS_DIR_INDEX_KEY;
5973 	key.offset = ctx->pos;
5974 	key.objectid = btrfs_ino(BTRFS_I(inode));
5975 
5976 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5977 	if (ret < 0)
5978 		goto err;
5979 
5980 	while (1) {
5981 		struct dir_entry *entry;
5982 
5983 		leaf = path->nodes[0];
5984 		slot = path->slots[0];
5985 		if (slot >= btrfs_header_nritems(leaf)) {
5986 			ret = btrfs_next_leaf(root, path);
5987 			if (ret < 0)
5988 				goto err;
5989 			else if (ret > 0)
5990 				break;
5991 			continue;
5992 		}
5993 
5994 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
5995 
5996 		if (found_key.objectid != key.objectid)
5997 			break;
5998 		if (found_key.type != BTRFS_DIR_INDEX_KEY)
5999 			break;
6000 		if (found_key.offset < ctx->pos)
6001 			goto next;
6002 		if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
6003 			goto next;
6004 		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
6005 		name_len = btrfs_dir_name_len(leaf, di);
6006 		if ((total_len + sizeof(struct dir_entry) + name_len) >=
6007 		    PAGE_SIZE) {
6008 			btrfs_release_path(path);
6009 			ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6010 			if (ret)
6011 				goto nopos;
6012 			addr = private->filldir_buf;
6013 			entries = 0;
6014 			total_len = 0;
6015 			goto again;
6016 		}
6017 
6018 		entry = addr;
6019 		put_unaligned(name_len, &entry->name_len);
6020 		name_ptr = (char *)(entry + 1);
6021 		read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1),
6022 				   name_len);
6023 		put_unaligned(fs_ftype_to_dtype(btrfs_dir_type(leaf, di)),
6024 				&entry->type);
6025 		btrfs_dir_item_key_to_cpu(leaf, di, &location);
6026 		put_unaligned(location.objectid, &entry->ino);
6027 		put_unaligned(found_key.offset, &entry->offset);
6028 		entries++;
6029 		addr += sizeof(struct dir_entry) + name_len;
6030 		total_len += sizeof(struct dir_entry) + name_len;
6031 next:
6032 		path->slots[0]++;
6033 	}
6034 	btrfs_release_path(path);
6035 
6036 	ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6037 	if (ret)
6038 		goto nopos;
6039 
6040 	ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
6041 	if (ret)
6042 		goto nopos;
6043 
6044 	/*
6045 	 * Stop new entries from being returned after we return the last
6046 	 * entry.
6047 	 *
6048 	 * New directory entries are assigned a strictly increasing
6049 	 * offset.  This means that new entries created during readdir
6050 	 * are *guaranteed* to be seen in the future by that readdir.
6051 	 * This has broken buggy programs which operate on names as
6052 	 * they're returned by readdir.  Until we re-use freed offsets
6053 	 * we have this hack to stop new entries from being returned
6054 	 * under the assumption that they'll never reach this huge
6055 	 * offset.
6056 	 *
6057 	 * This is being careful not to overflow 32bit loff_t unless the
6058 	 * last entry requires it because doing so has broken 32bit apps
6059 	 * in the past.
6060 	 */
6061 	if (ctx->pos >= INT_MAX)
6062 		ctx->pos = LLONG_MAX;
6063 	else
6064 		ctx->pos = INT_MAX;
6065 nopos:
6066 	ret = 0;
6067 err:
6068 	if (put)
6069 		btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
6070 	btrfs_free_path(path);
6071 	return ret;
6072 }
6073 
6074 /*
6075  * This is somewhat expensive, updating the tree every time the
6076  * inode changes.  But, it is most likely to find the inode in cache.
6077  * FIXME, needs more benchmarking...there are no reasons other than performance
6078  * to keep or drop this code.
6079  */
6080 static int btrfs_dirty_inode(struct inode *inode)
6081 {
6082 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6083 	struct btrfs_root *root = BTRFS_I(inode)->root;
6084 	struct btrfs_trans_handle *trans;
6085 	int ret;
6086 
6087 	if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
6088 		return 0;
6089 
6090 	trans = btrfs_join_transaction(root);
6091 	if (IS_ERR(trans))
6092 		return PTR_ERR(trans);
6093 
6094 	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
6095 	if (ret && (ret == -ENOSPC || ret == -EDQUOT)) {
6096 		/* whoops, lets try again with the full transaction */
6097 		btrfs_end_transaction(trans);
6098 		trans = btrfs_start_transaction(root, 1);
6099 		if (IS_ERR(trans))
6100 			return PTR_ERR(trans);
6101 
6102 		ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
6103 	}
6104 	btrfs_end_transaction(trans);
6105 	if (BTRFS_I(inode)->delayed_node)
6106 		btrfs_balance_delayed_items(fs_info);
6107 
6108 	return ret;
6109 }
6110 
6111 /*
6112  * This is a copy of file_update_time.  We need this so we can return error on
6113  * ENOSPC for updating the inode in the case of file write and mmap writes.
6114  */
6115 static int btrfs_update_time(struct inode *inode, struct timespec64 *now,
6116 			     int flags)
6117 {
6118 	struct btrfs_root *root = BTRFS_I(inode)->root;
6119 	bool dirty = flags & ~S_VERSION;
6120 
6121 	if (btrfs_root_readonly(root))
6122 		return -EROFS;
6123 
6124 	if (flags & S_VERSION)
6125 		dirty |= inode_maybe_inc_iversion(inode, dirty);
6126 	if (flags & S_CTIME)
6127 		inode->i_ctime = *now;
6128 	if (flags & S_MTIME)
6129 		inode->i_mtime = *now;
6130 	if (flags & S_ATIME)
6131 		inode->i_atime = *now;
6132 	return dirty ? btrfs_dirty_inode(inode) : 0;
6133 }
6134 
6135 /*
6136  * find the highest existing sequence number in a directory
6137  * and then set the in-memory index_cnt variable to reflect
6138  * free sequence numbers
6139  */
6140 static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
6141 {
6142 	struct btrfs_root *root = inode->root;
6143 	struct btrfs_key key, found_key;
6144 	struct btrfs_path *path;
6145 	struct extent_buffer *leaf;
6146 	int ret;
6147 
6148 	key.objectid = btrfs_ino(inode);
6149 	key.type = BTRFS_DIR_INDEX_KEY;
6150 	key.offset = (u64)-1;
6151 
6152 	path = btrfs_alloc_path();
6153 	if (!path)
6154 		return -ENOMEM;
6155 
6156 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6157 	if (ret < 0)
6158 		goto out;
6159 	/* FIXME: we should be able to handle this */
6160 	if (ret == 0)
6161 		goto out;
6162 	ret = 0;
6163 
6164 	/*
6165 	 * MAGIC NUMBER EXPLANATION:
6166 	 * since we search a directory based on f_pos we have to start at 2
6167 	 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
6168 	 * else has to start at 2
6169 	 */
6170 	if (path->slots[0] == 0) {
6171 		inode->index_cnt = 2;
6172 		goto out;
6173 	}
6174 
6175 	path->slots[0]--;
6176 
6177 	leaf = path->nodes[0];
6178 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6179 
6180 	if (found_key.objectid != btrfs_ino(inode) ||
6181 	    found_key.type != BTRFS_DIR_INDEX_KEY) {
6182 		inode->index_cnt = 2;
6183 		goto out;
6184 	}
6185 
6186 	inode->index_cnt = found_key.offset + 1;
6187 out:
6188 	btrfs_free_path(path);
6189 	return ret;
6190 }
6191 
6192 /*
6193  * helper to find a free sequence number in a given directory.  This current
6194  * code is very simple, later versions will do smarter things in the btree
6195  */
6196 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
6197 {
6198 	int ret = 0;
6199 
6200 	if (dir->index_cnt == (u64)-1) {
6201 		ret = btrfs_inode_delayed_dir_index_count(dir);
6202 		if (ret) {
6203 			ret = btrfs_set_inode_index_count(dir);
6204 			if (ret)
6205 				return ret;
6206 		}
6207 	}
6208 
6209 	*index = dir->index_cnt;
6210 	dir->index_cnt++;
6211 
6212 	return ret;
6213 }
6214 
6215 static int btrfs_insert_inode_locked(struct inode *inode)
6216 {
6217 	struct btrfs_iget_args args;
6218 
6219 	args.ino = BTRFS_I(inode)->location.objectid;
6220 	args.root = BTRFS_I(inode)->root;
6221 
6222 	return insert_inode_locked4(inode,
6223 		   btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6224 		   btrfs_find_actor, &args);
6225 }
6226 
6227 /*
6228  * Inherit flags from the parent inode.
6229  *
6230  * Currently only the compression flags and the cow flags are inherited.
6231  */
6232 static void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
6233 {
6234 	unsigned int flags;
6235 
6236 	if (!dir)
6237 		return;
6238 
6239 	flags = BTRFS_I(dir)->flags;
6240 
6241 	if (flags & BTRFS_INODE_NOCOMPRESS) {
6242 		BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
6243 		BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
6244 	} else if (flags & BTRFS_INODE_COMPRESS) {
6245 		BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
6246 		BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
6247 	}
6248 
6249 	if (flags & BTRFS_INODE_NODATACOW) {
6250 		BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
6251 		if (S_ISREG(inode->i_mode))
6252 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6253 	}
6254 
6255 	btrfs_sync_inode_flags_to_i_flags(inode);
6256 }
6257 
6258 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
6259 				     struct btrfs_root *root,
6260 				     struct inode *dir,
6261 				     const char *name, int name_len,
6262 				     u64 ref_objectid, u64 objectid,
6263 				     umode_t mode, u64 *index)
6264 {
6265 	struct btrfs_fs_info *fs_info = root->fs_info;
6266 	struct inode *inode;
6267 	struct btrfs_inode_item *inode_item;
6268 	struct btrfs_key *location;
6269 	struct btrfs_path *path;
6270 	struct btrfs_inode_ref *ref;
6271 	struct btrfs_key key[2];
6272 	u32 sizes[2];
6273 	int nitems = name ? 2 : 1;
6274 	unsigned long ptr;
6275 	unsigned int nofs_flag;
6276 	int ret;
6277 
6278 	path = btrfs_alloc_path();
6279 	if (!path)
6280 		return ERR_PTR(-ENOMEM);
6281 
6282 	nofs_flag = memalloc_nofs_save();
6283 	inode = new_inode(fs_info->sb);
6284 	memalloc_nofs_restore(nofs_flag);
6285 	if (!inode) {
6286 		btrfs_free_path(path);
6287 		return ERR_PTR(-ENOMEM);
6288 	}
6289 
6290 	/*
6291 	 * O_TMPFILE, set link count to 0, so that after this point,
6292 	 * we fill in an inode item with the correct link count.
6293 	 */
6294 	if (!name)
6295 		set_nlink(inode, 0);
6296 
6297 	/*
6298 	 * we have to initialize this early, so we can reclaim the inode
6299 	 * number if we fail afterwards in this function.
6300 	 */
6301 	inode->i_ino = objectid;
6302 
6303 	if (dir && name) {
6304 		trace_btrfs_inode_request(dir);
6305 
6306 		ret = btrfs_set_inode_index(BTRFS_I(dir), index);
6307 		if (ret) {
6308 			btrfs_free_path(path);
6309 			iput(inode);
6310 			return ERR_PTR(ret);
6311 		}
6312 	} else if (dir) {
6313 		*index = 0;
6314 	}
6315 	/*
6316 	 * index_cnt is ignored for everything but a dir,
6317 	 * btrfs_set_inode_index_count has an explanation for the magic
6318 	 * number
6319 	 */
6320 	BTRFS_I(inode)->index_cnt = 2;
6321 	BTRFS_I(inode)->dir_index = *index;
6322 	BTRFS_I(inode)->root = btrfs_grab_root(root);
6323 	BTRFS_I(inode)->generation = trans->transid;
6324 	inode->i_generation = BTRFS_I(inode)->generation;
6325 
6326 	/*
6327 	 * We could have gotten an inode number from somebody who was fsynced
6328 	 * and then removed in this same transaction, so let's just set full
6329 	 * sync since it will be a full sync anyway and this will blow away the
6330 	 * old info in the log.
6331 	 */
6332 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
6333 
6334 	key[0].objectid = objectid;
6335 	key[0].type = BTRFS_INODE_ITEM_KEY;
6336 	key[0].offset = 0;
6337 
6338 	sizes[0] = sizeof(struct btrfs_inode_item);
6339 
6340 	if (name) {
6341 		/*
6342 		 * Start new inodes with an inode_ref. This is slightly more
6343 		 * efficient for small numbers of hard links since they will
6344 		 * be packed into one item. Extended refs will kick in if we
6345 		 * add more hard links than can fit in the ref item.
6346 		 */
6347 		key[1].objectid = objectid;
6348 		key[1].type = BTRFS_INODE_REF_KEY;
6349 		key[1].offset = ref_objectid;
6350 
6351 		sizes[1] = name_len + sizeof(*ref);
6352 	}
6353 
6354 	location = &BTRFS_I(inode)->location;
6355 	location->objectid = objectid;
6356 	location->offset = 0;
6357 	location->type = BTRFS_INODE_ITEM_KEY;
6358 
6359 	ret = btrfs_insert_inode_locked(inode);
6360 	if (ret < 0) {
6361 		iput(inode);
6362 		goto fail;
6363 	}
6364 
6365 	ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
6366 	if (ret != 0)
6367 		goto fail_unlock;
6368 
6369 	inode_init_owner(&init_user_ns, inode, dir, mode);
6370 	inode_set_bytes(inode, 0);
6371 
6372 	inode->i_mtime = current_time(inode);
6373 	inode->i_atime = inode->i_mtime;
6374 	inode->i_ctime = inode->i_mtime;
6375 	BTRFS_I(inode)->i_otime = inode->i_mtime;
6376 
6377 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6378 				  struct btrfs_inode_item);
6379 	memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
6380 			     sizeof(*inode_item));
6381 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
6382 
6383 	if (name) {
6384 		ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6385 				     struct btrfs_inode_ref);
6386 		btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
6387 		btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
6388 		ptr = (unsigned long)(ref + 1);
6389 		write_extent_buffer(path->nodes[0], name, ptr, name_len);
6390 	}
6391 
6392 	btrfs_mark_buffer_dirty(path->nodes[0]);
6393 	btrfs_free_path(path);
6394 
6395 	btrfs_inherit_iflags(inode, dir);
6396 
6397 	if (S_ISREG(mode)) {
6398 		if (btrfs_test_opt(fs_info, NODATASUM))
6399 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6400 		if (btrfs_test_opt(fs_info, NODATACOW))
6401 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6402 				BTRFS_INODE_NODATASUM;
6403 	}
6404 
6405 	inode_tree_add(inode);
6406 
6407 	trace_btrfs_inode_new(inode);
6408 	btrfs_set_inode_last_trans(trans, BTRFS_I(inode));
6409 
6410 	btrfs_update_root_times(trans, root);
6411 
6412 	ret = btrfs_inode_inherit_props(trans, inode, dir);
6413 	if (ret)
6414 		btrfs_err(fs_info,
6415 			  "error inheriting props for ino %llu (root %llu): %d",
6416 			btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, ret);
6417 
6418 	return inode;
6419 
6420 fail_unlock:
6421 	discard_new_inode(inode);
6422 fail:
6423 	if (dir && name)
6424 		BTRFS_I(dir)->index_cnt--;
6425 	btrfs_free_path(path);
6426 	return ERR_PTR(ret);
6427 }
6428 
6429 /*
6430  * utility function to add 'inode' into 'parent_inode' with
6431  * a give name and a given sequence number.
6432  * if 'add_backref' is true, also insert a backref from the
6433  * inode to the parent directory.
6434  */
6435 int btrfs_add_link(struct btrfs_trans_handle *trans,
6436 		   struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
6437 		   const char *name, int name_len, int add_backref, u64 index)
6438 {
6439 	int ret = 0;
6440 	struct btrfs_key key;
6441 	struct btrfs_root *root = parent_inode->root;
6442 	u64 ino = btrfs_ino(inode);
6443 	u64 parent_ino = btrfs_ino(parent_inode);
6444 
6445 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6446 		memcpy(&key, &inode->root->root_key, sizeof(key));
6447 	} else {
6448 		key.objectid = ino;
6449 		key.type = BTRFS_INODE_ITEM_KEY;
6450 		key.offset = 0;
6451 	}
6452 
6453 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6454 		ret = btrfs_add_root_ref(trans, key.objectid,
6455 					 root->root_key.objectid, parent_ino,
6456 					 index, name, name_len);
6457 	} else if (add_backref) {
6458 		ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
6459 					     parent_ino, index);
6460 	}
6461 
6462 	/* Nothing to clean up yet */
6463 	if (ret)
6464 		return ret;
6465 
6466 	ret = btrfs_insert_dir_item(trans, name, name_len, parent_inode, &key,
6467 				    btrfs_inode_type(&inode->vfs_inode), index);
6468 	if (ret == -EEXIST || ret == -EOVERFLOW)
6469 		goto fail_dir_item;
6470 	else if (ret) {
6471 		btrfs_abort_transaction(trans, ret);
6472 		return ret;
6473 	}
6474 
6475 	btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
6476 			   name_len * 2);
6477 	inode_inc_iversion(&parent_inode->vfs_inode);
6478 	/*
6479 	 * If we are replaying a log tree, we do not want to update the mtime
6480 	 * and ctime of the parent directory with the current time, since the
6481 	 * log replay procedure is responsible for setting them to their correct
6482 	 * values (the ones it had when the fsync was done).
6483 	 */
6484 	if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
6485 		struct timespec64 now = current_time(&parent_inode->vfs_inode);
6486 
6487 		parent_inode->vfs_inode.i_mtime = now;
6488 		parent_inode->vfs_inode.i_ctime = now;
6489 	}
6490 	ret = btrfs_update_inode(trans, root, parent_inode);
6491 	if (ret)
6492 		btrfs_abort_transaction(trans, ret);
6493 	return ret;
6494 
6495 fail_dir_item:
6496 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6497 		u64 local_index;
6498 		int err;
6499 		err = btrfs_del_root_ref(trans, key.objectid,
6500 					 root->root_key.objectid, parent_ino,
6501 					 &local_index, name, name_len);
6502 		if (err)
6503 			btrfs_abort_transaction(trans, err);
6504 	} else if (add_backref) {
6505 		u64 local_index;
6506 		int err;
6507 
6508 		err = btrfs_del_inode_ref(trans, root, name, name_len,
6509 					  ino, parent_ino, &local_index);
6510 		if (err)
6511 			btrfs_abort_transaction(trans, err);
6512 	}
6513 
6514 	/* Return the original error code */
6515 	return ret;
6516 }
6517 
6518 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
6519 			    struct btrfs_inode *dir, struct dentry *dentry,
6520 			    struct btrfs_inode *inode, int backref, u64 index)
6521 {
6522 	int err = btrfs_add_link(trans, dir, inode,
6523 				 dentry->d_name.name, dentry->d_name.len,
6524 				 backref, index);
6525 	if (err > 0)
6526 		err = -EEXIST;
6527 	return err;
6528 }
6529 
6530 static int btrfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
6531 		       struct dentry *dentry, umode_t mode, dev_t rdev)
6532 {
6533 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6534 	struct btrfs_trans_handle *trans;
6535 	struct btrfs_root *root = BTRFS_I(dir)->root;
6536 	struct inode *inode = NULL;
6537 	int err;
6538 	u64 objectid;
6539 	u64 index = 0;
6540 
6541 	/*
6542 	 * 2 for inode item and ref
6543 	 * 2 for dir items
6544 	 * 1 for xattr if selinux is on
6545 	 */
6546 	trans = btrfs_start_transaction(root, 5);
6547 	if (IS_ERR(trans))
6548 		return PTR_ERR(trans);
6549 
6550 	err = btrfs_get_free_objectid(root, &objectid);
6551 	if (err)
6552 		goto out_unlock;
6553 
6554 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6555 			dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6556 			mode, &index);
6557 	if (IS_ERR(inode)) {
6558 		err = PTR_ERR(inode);
6559 		inode = NULL;
6560 		goto out_unlock;
6561 	}
6562 
6563 	/*
6564 	* If the active LSM wants to access the inode during
6565 	* d_instantiate it needs these. Smack checks to see
6566 	* if the filesystem supports xattrs by looking at the
6567 	* ops vector.
6568 	*/
6569 	inode->i_op = &btrfs_special_inode_operations;
6570 	init_special_inode(inode, inode->i_mode, rdev);
6571 
6572 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6573 	if (err)
6574 		goto out_unlock;
6575 
6576 	err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6577 			0, index);
6578 	if (err)
6579 		goto out_unlock;
6580 
6581 	btrfs_update_inode(trans, root, BTRFS_I(inode));
6582 	d_instantiate_new(dentry, inode);
6583 
6584 out_unlock:
6585 	btrfs_end_transaction(trans);
6586 	btrfs_btree_balance_dirty(fs_info);
6587 	if (err && inode) {
6588 		inode_dec_link_count(inode);
6589 		discard_new_inode(inode);
6590 	}
6591 	return err;
6592 }
6593 
6594 static int btrfs_create(struct user_namespace *mnt_userns, struct inode *dir,
6595 			struct dentry *dentry, umode_t mode, bool excl)
6596 {
6597 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6598 	struct btrfs_trans_handle *trans;
6599 	struct btrfs_root *root = BTRFS_I(dir)->root;
6600 	struct inode *inode = NULL;
6601 	int err;
6602 	u64 objectid;
6603 	u64 index = 0;
6604 
6605 	/*
6606 	 * 2 for inode item and ref
6607 	 * 2 for dir items
6608 	 * 1 for xattr if selinux is on
6609 	 */
6610 	trans = btrfs_start_transaction(root, 5);
6611 	if (IS_ERR(trans))
6612 		return PTR_ERR(trans);
6613 
6614 	err = btrfs_get_free_objectid(root, &objectid);
6615 	if (err)
6616 		goto out_unlock;
6617 
6618 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6619 			dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6620 			mode, &index);
6621 	if (IS_ERR(inode)) {
6622 		err = PTR_ERR(inode);
6623 		inode = NULL;
6624 		goto out_unlock;
6625 	}
6626 	/*
6627 	* If the active LSM wants to access the inode during
6628 	* d_instantiate it needs these. Smack checks to see
6629 	* if the filesystem supports xattrs by looking at the
6630 	* ops vector.
6631 	*/
6632 	inode->i_fop = &btrfs_file_operations;
6633 	inode->i_op = &btrfs_file_inode_operations;
6634 	inode->i_mapping->a_ops = &btrfs_aops;
6635 
6636 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6637 	if (err)
6638 		goto out_unlock;
6639 
6640 	err = btrfs_update_inode(trans, root, BTRFS_I(inode));
6641 	if (err)
6642 		goto out_unlock;
6643 
6644 	err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6645 			0, index);
6646 	if (err)
6647 		goto out_unlock;
6648 
6649 	d_instantiate_new(dentry, inode);
6650 
6651 out_unlock:
6652 	btrfs_end_transaction(trans);
6653 	if (err && inode) {
6654 		inode_dec_link_count(inode);
6655 		discard_new_inode(inode);
6656 	}
6657 	btrfs_btree_balance_dirty(fs_info);
6658 	return err;
6659 }
6660 
6661 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6662 		      struct dentry *dentry)
6663 {
6664 	struct btrfs_trans_handle *trans = NULL;
6665 	struct btrfs_root *root = BTRFS_I(dir)->root;
6666 	struct inode *inode = d_inode(old_dentry);
6667 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6668 	u64 index;
6669 	int err;
6670 	int drop_inode = 0;
6671 
6672 	/* do not allow sys_link's with other subvols of the same device */
6673 	if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid)
6674 		return -EXDEV;
6675 
6676 	if (inode->i_nlink >= BTRFS_LINK_MAX)
6677 		return -EMLINK;
6678 
6679 	err = btrfs_set_inode_index(BTRFS_I(dir), &index);
6680 	if (err)
6681 		goto fail;
6682 
6683 	/*
6684 	 * 2 items for inode and inode ref
6685 	 * 2 items for dir items
6686 	 * 1 item for parent inode
6687 	 * 1 item for orphan item deletion if O_TMPFILE
6688 	 */
6689 	trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6);
6690 	if (IS_ERR(trans)) {
6691 		err = PTR_ERR(trans);
6692 		trans = NULL;
6693 		goto fail;
6694 	}
6695 
6696 	/* There are several dir indexes for this inode, clear the cache. */
6697 	BTRFS_I(inode)->dir_index = 0ULL;
6698 	inc_nlink(inode);
6699 	inode_inc_iversion(inode);
6700 	inode->i_ctime = current_time(inode);
6701 	ihold(inode);
6702 	set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6703 
6704 	err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6705 			1, index);
6706 
6707 	if (err) {
6708 		drop_inode = 1;
6709 	} else {
6710 		struct dentry *parent = dentry->d_parent;
6711 
6712 		err = btrfs_update_inode(trans, root, BTRFS_I(inode));
6713 		if (err)
6714 			goto fail;
6715 		if (inode->i_nlink == 1) {
6716 			/*
6717 			 * If new hard link count is 1, it's a file created
6718 			 * with open(2) O_TMPFILE flag.
6719 			 */
6720 			err = btrfs_orphan_del(trans, BTRFS_I(inode));
6721 			if (err)
6722 				goto fail;
6723 		}
6724 		d_instantiate(dentry, inode);
6725 		btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent);
6726 	}
6727 
6728 fail:
6729 	if (trans)
6730 		btrfs_end_transaction(trans);
6731 	if (drop_inode) {
6732 		inode_dec_link_count(inode);
6733 		iput(inode);
6734 	}
6735 	btrfs_btree_balance_dirty(fs_info);
6736 	return err;
6737 }
6738 
6739 static int btrfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
6740 		       struct dentry *dentry, umode_t mode)
6741 {
6742 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6743 	struct inode *inode = NULL;
6744 	struct btrfs_trans_handle *trans;
6745 	struct btrfs_root *root = BTRFS_I(dir)->root;
6746 	int err = 0;
6747 	u64 objectid = 0;
6748 	u64 index = 0;
6749 
6750 	/*
6751 	 * 2 items for inode and ref
6752 	 * 2 items for dir items
6753 	 * 1 for xattr if selinux is on
6754 	 */
6755 	trans = btrfs_start_transaction(root, 5);
6756 	if (IS_ERR(trans))
6757 		return PTR_ERR(trans);
6758 
6759 	err = btrfs_get_free_objectid(root, &objectid);
6760 	if (err)
6761 		goto out_fail;
6762 
6763 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6764 			dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6765 			S_IFDIR | mode, &index);
6766 	if (IS_ERR(inode)) {
6767 		err = PTR_ERR(inode);
6768 		inode = NULL;
6769 		goto out_fail;
6770 	}
6771 
6772 	/* these must be set before we unlock the inode */
6773 	inode->i_op = &btrfs_dir_inode_operations;
6774 	inode->i_fop = &btrfs_dir_file_operations;
6775 
6776 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6777 	if (err)
6778 		goto out_fail;
6779 
6780 	btrfs_i_size_write(BTRFS_I(inode), 0);
6781 	err = btrfs_update_inode(trans, root, BTRFS_I(inode));
6782 	if (err)
6783 		goto out_fail;
6784 
6785 	err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
6786 			dentry->d_name.name,
6787 			dentry->d_name.len, 0, index);
6788 	if (err)
6789 		goto out_fail;
6790 
6791 	d_instantiate_new(dentry, inode);
6792 
6793 out_fail:
6794 	btrfs_end_transaction(trans);
6795 	if (err && inode) {
6796 		inode_dec_link_count(inode);
6797 		discard_new_inode(inode);
6798 	}
6799 	btrfs_btree_balance_dirty(fs_info);
6800 	return err;
6801 }
6802 
6803 static noinline int uncompress_inline(struct btrfs_path *path,
6804 				      struct page *page,
6805 				      size_t pg_offset, u64 extent_offset,
6806 				      struct btrfs_file_extent_item *item)
6807 {
6808 	int ret;
6809 	struct extent_buffer *leaf = path->nodes[0];
6810 	char *tmp;
6811 	size_t max_size;
6812 	unsigned long inline_size;
6813 	unsigned long ptr;
6814 	int compress_type;
6815 
6816 	WARN_ON(pg_offset != 0);
6817 	compress_type = btrfs_file_extent_compression(leaf, item);
6818 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
6819 	inline_size = btrfs_file_extent_inline_item_len(leaf,
6820 					btrfs_item_nr(path->slots[0]));
6821 	tmp = kmalloc(inline_size, GFP_NOFS);
6822 	if (!tmp)
6823 		return -ENOMEM;
6824 	ptr = btrfs_file_extent_inline_start(item);
6825 
6826 	read_extent_buffer(leaf, tmp, ptr, inline_size);
6827 
6828 	max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6829 	ret = btrfs_decompress(compress_type, tmp, page,
6830 			       extent_offset, inline_size, max_size);
6831 
6832 	/*
6833 	 * decompression code contains a memset to fill in any space between the end
6834 	 * of the uncompressed data and the end of max_size in case the decompressed
6835 	 * data ends up shorter than ram_bytes.  That doesn't cover the hole between
6836 	 * the end of an inline extent and the beginning of the next block, so we
6837 	 * cover that region here.
6838 	 */
6839 
6840 	if (max_size + pg_offset < PAGE_SIZE)
6841 		memzero_page(page,  pg_offset + max_size,
6842 			     PAGE_SIZE - max_size - pg_offset);
6843 	kfree(tmp);
6844 	return ret;
6845 }
6846 
6847 /**
6848  * btrfs_get_extent - Lookup the first extent overlapping a range in a file.
6849  * @inode:	file to search in
6850  * @page:	page to read extent data into if the extent is inline
6851  * @pg_offset:	offset into @page to copy to
6852  * @start:	file offset
6853  * @len:	length of range starting at @start
6854  *
6855  * This returns the first &struct extent_map which overlaps with the given
6856  * range, reading it from the B-tree and caching it if necessary. Note that
6857  * there may be more extents which overlap the given range after the returned
6858  * extent_map.
6859  *
6860  * If @page is not NULL and the extent is inline, this also reads the extent
6861  * data directly into the page and marks the extent up to date in the io_tree.
6862  *
6863  * Return: ERR_PTR on error, non-NULL extent_map on success.
6864  */
6865 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
6866 				    struct page *page, size_t pg_offset,
6867 				    u64 start, u64 len)
6868 {
6869 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
6870 	int ret = 0;
6871 	u64 extent_start = 0;
6872 	u64 extent_end = 0;
6873 	u64 objectid = btrfs_ino(inode);
6874 	int extent_type = -1;
6875 	struct btrfs_path *path = NULL;
6876 	struct btrfs_root *root = inode->root;
6877 	struct btrfs_file_extent_item *item;
6878 	struct extent_buffer *leaf;
6879 	struct btrfs_key found_key;
6880 	struct extent_map *em = NULL;
6881 	struct extent_map_tree *em_tree = &inode->extent_tree;
6882 	struct extent_io_tree *io_tree = &inode->io_tree;
6883 
6884 	read_lock(&em_tree->lock);
6885 	em = lookup_extent_mapping(em_tree, start, len);
6886 	read_unlock(&em_tree->lock);
6887 
6888 	if (em) {
6889 		if (em->start > start || em->start + em->len <= start)
6890 			free_extent_map(em);
6891 		else if (em->block_start == EXTENT_MAP_INLINE && page)
6892 			free_extent_map(em);
6893 		else
6894 			goto out;
6895 	}
6896 	em = alloc_extent_map();
6897 	if (!em) {
6898 		ret = -ENOMEM;
6899 		goto out;
6900 	}
6901 	em->start = EXTENT_MAP_HOLE;
6902 	em->orig_start = EXTENT_MAP_HOLE;
6903 	em->len = (u64)-1;
6904 	em->block_len = (u64)-1;
6905 
6906 	path = btrfs_alloc_path();
6907 	if (!path) {
6908 		ret = -ENOMEM;
6909 		goto out;
6910 	}
6911 
6912 	/* Chances are we'll be called again, so go ahead and do readahead */
6913 	path->reada = READA_FORWARD;
6914 
6915 	/*
6916 	 * The same explanation in load_free_space_cache applies here as well,
6917 	 * we only read when we're loading the free space cache, and at that
6918 	 * point the commit_root has everything we need.
6919 	 */
6920 	if (btrfs_is_free_space_inode(inode)) {
6921 		path->search_commit_root = 1;
6922 		path->skip_locking = 1;
6923 	}
6924 
6925 	ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
6926 	if (ret < 0) {
6927 		goto out;
6928 	} else if (ret > 0) {
6929 		if (path->slots[0] == 0)
6930 			goto not_found;
6931 		path->slots[0]--;
6932 		ret = 0;
6933 	}
6934 
6935 	leaf = path->nodes[0];
6936 	item = btrfs_item_ptr(leaf, path->slots[0],
6937 			      struct btrfs_file_extent_item);
6938 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6939 	if (found_key.objectid != objectid ||
6940 	    found_key.type != BTRFS_EXTENT_DATA_KEY) {
6941 		/*
6942 		 * If we backup past the first extent we want to move forward
6943 		 * and see if there is an extent in front of us, otherwise we'll
6944 		 * say there is a hole for our whole search range which can
6945 		 * cause problems.
6946 		 */
6947 		extent_end = start;
6948 		goto next;
6949 	}
6950 
6951 	extent_type = btrfs_file_extent_type(leaf, item);
6952 	extent_start = found_key.offset;
6953 	extent_end = btrfs_file_extent_end(path);
6954 	if (extent_type == BTRFS_FILE_EXTENT_REG ||
6955 	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
6956 		/* Only regular file could have regular/prealloc extent */
6957 		if (!S_ISREG(inode->vfs_inode.i_mode)) {
6958 			ret = -EUCLEAN;
6959 			btrfs_crit(fs_info,
6960 		"regular/prealloc extent found for non-regular inode %llu",
6961 				   btrfs_ino(inode));
6962 			goto out;
6963 		}
6964 		trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
6965 						       extent_start);
6966 	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
6967 		trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
6968 						      path->slots[0],
6969 						      extent_start);
6970 	}
6971 next:
6972 	if (start >= extent_end) {
6973 		path->slots[0]++;
6974 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6975 			ret = btrfs_next_leaf(root, path);
6976 			if (ret < 0)
6977 				goto out;
6978 			else if (ret > 0)
6979 				goto not_found;
6980 
6981 			leaf = path->nodes[0];
6982 		}
6983 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6984 		if (found_key.objectid != objectid ||
6985 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
6986 			goto not_found;
6987 		if (start + len <= found_key.offset)
6988 			goto not_found;
6989 		if (start > found_key.offset)
6990 			goto next;
6991 
6992 		/* New extent overlaps with existing one */
6993 		em->start = start;
6994 		em->orig_start = start;
6995 		em->len = found_key.offset - start;
6996 		em->block_start = EXTENT_MAP_HOLE;
6997 		goto insert;
6998 	}
6999 
7000 	btrfs_extent_item_to_extent_map(inode, path, item, !page, em);
7001 
7002 	if (extent_type == BTRFS_FILE_EXTENT_REG ||
7003 	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
7004 		goto insert;
7005 	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
7006 		unsigned long ptr;
7007 		char *map;
7008 		size_t size;
7009 		size_t extent_offset;
7010 		size_t copy_size;
7011 
7012 		if (!page)
7013 			goto out;
7014 
7015 		size = btrfs_file_extent_ram_bytes(leaf, item);
7016 		extent_offset = page_offset(page) + pg_offset - extent_start;
7017 		copy_size = min_t(u64, PAGE_SIZE - pg_offset,
7018 				  size - extent_offset);
7019 		em->start = extent_start + extent_offset;
7020 		em->len = ALIGN(copy_size, fs_info->sectorsize);
7021 		em->orig_block_len = em->len;
7022 		em->orig_start = em->start;
7023 		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
7024 
7025 		if (!PageUptodate(page)) {
7026 			if (btrfs_file_extent_compression(leaf, item) !=
7027 			    BTRFS_COMPRESS_NONE) {
7028 				ret = uncompress_inline(path, page, pg_offset,
7029 							extent_offset, item);
7030 				if (ret)
7031 					goto out;
7032 			} else {
7033 				map = kmap_local_page(page);
7034 				read_extent_buffer(leaf, map + pg_offset, ptr,
7035 						   copy_size);
7036 				if (pg_offset + copy_size < PAGE_SIZE) {
7037 					memset(map + pg_offset + copy_size, 0,
7038 					       PAGE_SIZE - pg_offset -
7039 					       copy_size);
7040 				}
7041 				kunmap_local(map);
7042 			}
7043 			flush_dcache_page(page);
7044 		}
7045 		set_extent_uptodate(io_tree, em->start,
7046 				    extent_map_end(em) - 1, NULL, GFP_NOFS);
7047 		goto insert;
7048 	}
7049 not_found:
7050 	em->start = start;
7051 	em->orig_start = start;
7052 	em->len = len;
7053 	em->block_start = EXTENT_MAP_HOLE;
7054 insert:
7055 	ret = 0;
7056 	btrfs_release_path(path);
7057 	if (em->start > start || extent_map_end(em) <= start) {
7058 		btrfs_err(fs_info,
7059 			  "bad extent! em: [%llu %llu] passed [%llu %llu]",
7060 			  em->start, em->len, start, len);
7061 		ret = -EIO;
7062 		goto out;
7063 	}
7064 
7065 	write_lock(&em_tree->lock);
7066 	ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
7067 	write_unlock(&em_tree->lock);
7068 out:
7069 	btrfs_free_path(path);
7070 
7071 	trace_btrfs_get_extent(root, inode, em);
7072 
7073 	if (ret) {
7074 		free_extent_map(em);
7075 		return ERR_PTR(ret);
7076 	}
7077 	return em;
7078 }
7079 
7080 struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
7081 					   u64 start, u64 len)
7082 {
7083 	struct extent_map *em;
7084 	struct extent_map *hole_em = NULL;
7085 	u64 delalloc_start = start;
7086 	u64 end;
7087 	u64 delalloc_len;
7088 	u64 delalloc_end;
7089 	int err = 0;
7090 
7091 	em = btrfs_get_extent(inode, NULL, 0, start, len);
7092 	if (IS_ERR(em))
7093 		return em;
7094 	/*
7095 	 * If our em maps to:
7096 	 * - a hole or
7097 	 * - a pre-alloc extent,
7098 	 * there might actually be delalloc bytes behind it.
7099 	 */
7100 	if (em->block_start != EXTENT_MAP_HOLE &&
7101 	    !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7102 		return em;
7103 	else
7104 		hole_em = em;
7105 
7106 	/* check to see if we've wrapped (len == -1 or similar) */
7107 	end = start + len;
7108 	if (end < start)
7109 		end = (u64)-1;
7110 	else
7111 		end -= 1;
7112 
7113 	em = NULL;
7114 
7115 	/* ok, we didn't find anything, lets look for delalloc */
7116 	delalloc_len = count_range_bits(&inode->io_tree, &delalloc_start,
7117 				 end, len, EXTENT_DELALLOC, 1);
7118 	delalloc_end = delalloc_start + delalloc_len;
7119 	if (delalloc_end < delalloc_start)
7120 		delalloc_end = (u64)-1;
7121 
7122 	/*
7123 	 * We didn't find anything useful, return the original results from
7124 	 * get_extent()
7125 	 */
7126 	if (delalloc_start > end || delalloc_end <= start) {
7127 		em = hole_em;
7128 		hole_em = NULL;
7129 		goto out;
7130 	}
7131 
7132 	/*
7133 	 * Adjust the delalloc_start to make sure it doesn't go backwards from
7134 	 * the start they passed in
7135 	 */
7136 	delalloc_start = max(start, delalloc_start);
7137 	delalloc_len = delalloc_end - delalloc_start;
7138 
7139 	if (delalloc_len > 0) {
7140 		u64 hole_start;
7141 		u64 hole_len;
7142 		const u64 hole_end = extent_map_end(hole_em);
7143 
7144 		em = alloc_extent_map();
7145 		if (!em) {
7146 			err = -ENOMEM;
7147 			goto out;
7148 		}
7149 
7150 		ASSERT(hole_em);
7151 		/*
7152 		 * When btrfs_get_extent can't find anything it returns one
7153 		 * huge hole
7154 		 *
7155 		 * Make sure what it found really fits our range, and adjust to
7156 		 * make sure it is based on the start from the caller
7157 		 */
7158 		if (hole_end <= start || hole_em->start > end) {
7159 		       free_extent_map(hole_em);
7160 		       hole_em = NULL;
7161 		} else {
7162 		       hole_start = max(hole_em->start, start);
7163 		       hole_len = hole_end - hole_start;
7164 		}
7165 
7166 		if (hole_em && delalloc_start > hole_start) {
7167 			/*
7168 			 * Our hole starts before our delalloc, so we have to
7169 			 * return just the parts of the hole that go until the
7170 			 * delalloc starts
7171 			 */
7172 			em->len = min(hole_len, delalloc_start - hole_start);
7173 			em->start = hole_start;
7174 			em->orig_start = hole_start;
7175 			/*
7176 			 * Don't adjust block start at all, it is fixed at
7177 			 * EXTENT_MAP_HOLE
7178 			 */
7179 			em->block_start = hole_em->block_start;
7180 			em->block_len = hole_len;
7181 			if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
7182 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
7183 		} else {
7184 			/*
7185 			 * Hole is out of passed range or it starts after
7186 			 * delalloc range
7187 			 */
7188 			em->start = delalloc_start;
7189 			em->len = delalloc_len;
7190 			em->orig_start = delalloc_start;
7191 			em->block_start = EXTENT_MAP_DELALLOC;
7192 			em->block_len = delalloc_len;
7193 		}
7194 	} else {
7195 		return hole_em;
7196 	}
7197 out:
7198 
7199 	free_extent_map(hole_em);
7200 	if (err) {
7201 		free_extent_map(em);
7202 		return ERR_PTR(err);
7203 	}
7204 	return em;
7205 }
7206 
7207 static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode,
7208 						  const u64 start,
7209 						  const u64 len,
7210 						  const u64 orig_start,
7211 						  const u64 block_start,
7212 						  const u64 block_len,
7213 						  const u64 orig_block_len,
7214 						  const u64 ram_bytes,
7215 						  const int type)
7216 {
7217 	struct extent_map *em = NULL;
7218 	int ret;
7219 
7220 	if (type != BTRFS_ORDERED_NOCOW) {
7221 		em = create_io_em(inode, start, len, orig_start, block_start,
7222 				  block_len, orig_block_len, ram_bytes,
7223 				  BTRFS_COMPRESS_NONE, /* compress_type */
7224 				  type);
7225 		if (IS_ERR(em))
7226 			goto out;
7227 	}
7228 	ret = btrfs_add_ordered_extent_dio(inode, start, block_start, len,
7229 					   block_len, type);
7230 	if (ret) {
7231 		if (em) {
7232 			free_extent_map(em);
7233 			btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
7234 		}
7235 		em = ERR_PTR(ret);
7236 	}
7237  out:
7238 
7239 	return em;
7240 }
7241 
7242 static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode,
7243 						  u64 start, u64 len)
7244 {
7245 	struct btrfs_root *root = inode->root;
7246 	struct btrfs_fs_info *fs_info = root->fs_info;
7247 	struct extent_map *em;
7248 	struct btrfs_key ins;
7249 	u64 alloc_hint;
7250 	int ret;
7251 
7252 	alloc_hint = get_extent_allocation_hint(inode, start, len);
7253 	ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
7254 				   0, alloc_hint, &ins, 1, 1);
7255 	if (ret)
7256 		return ERR_PTR(ret);
7257 
7258 	em = btrfs_create_dio_extent(inode, start, ins.offset, start,
7259 				     ins.objectid, ins.offset, ins.offset,
7260 				     ins.offset, BTRFS_ORDERED_REGULAR);
7261 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
7262 	if (IS_ERR(em))
7263 		btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset,
7264 					   1);
7265 
7266 	return em;
7267 }
7268 
7269 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
7270 {
7271 	struct btrfs_block_group *block_group;
7272 	bool readonly = false;
7273 
7274 	block_group = btrfs_lookup_block_group(fs_info, bytenr);
7275 	if (!block_group || block_group->ro)
7276 		readonly = true;
7277 	if (block_group)
7278 		btrfs_put_block_group(block_group);
7279 	return readonly;
7280 }
7281 
7282 /*
7283  * Check if we can do nocow write into the range [@offset, @offset + @len)
7284  *
7285  * @offset:	File offset
7286  * @len:	The length to write, will be updated to the nocow writeable
7287  *		range
7288  * @orig_start:	(optional) Return the original file offset of the file extent
7289  * @orig_len:	(optional) Return the original on-disk length of the file extent
7290  * @ram_bytes:	(optional) Return the ram_bytes of the file extent
7291  * @strict:	if true, omit optimizations that might force us into unnecessary
7292  *		cow. e.g., don't trust generation number.
7293  *
7294  * Return:
7295  * >0	and update @len if we can do nocow write
7296  *  0	if we can't do nocow write
7297  * <0	if error happened
7298  *
7299  * NOTE: This only checks the file extents, caller is responsible to wait for
7300  *	 any ordered extents.
7301  */
7302 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7303 			      u64 *orig_start, u64 *orig_block_len,
7304 			      u64 *ram_bytes, bool strict)
7305 {
7306 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7307 	struct btrfs_path *path;
7308 	int ret;
7309 	struct extent_buffer *leaf;
7310 	struct btrfs_root *root = BTRFS_I(inode)->root;
7311 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7312 	struct btrfs_file_extent_item *fi;
7313 	struct btrfs_key key;
7314 	u64 disk_bytenr;
7315 	u64 backref_offset;
7316 	u64 extent_end;
7317 	u64 num_bytes;
7318 	int slot;
7319 	int found_type;
7320 	bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
7321 
7322 	path = btrfs_alloc_path();
7323 	if (!path)
7324 		return -ENOMEM;
7325 
7326 	ret = btrfs_lookup_file_extent(NULL, root, path,
7327 			btrfs_ino(BTRFS_I(inode)), offset, 0);
7328 	if (ret < 0)
7329 		goto out;
7330 
7331 	slot = path->slots[0];
7332 	if (ret == 1) {
7333 		if (slot == 0) {
7334 			/* can't find the item, must cow */
7335 			ret = 0;
7336 			goto out;
7337 		}
7338 		slot--;
7339 	}
7340 	ret = 0;
7341 	leaf = path->nodes[0];
7342 	btrfs_item_key_to_cpu(leaf, &key, slot);
7343 	if (key.objectid != btrfs_ino(BTRFS_I(inode)) ||
7344 	    key.type != BTRFS_EXTENT_DATA_KEY) {
7345 		/* not our file or wrong item type, must cow */
7346 		goto out;
7347 	}
7348 
7349 	if (key.offset > offset) {
7350 		/* Wrong offset, must cow */
7351 		goto out;
7352 	}
7353 
7354 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
7355 	found_type = btrfs_file_extent_type(leaf, fi);
7356 	if (found_type != BTRFS_FILE_EXTENT_REG &&
7357 	    found_type != BTRFS_FILE_EXTENT_PREALLOC) {
7358 		/* not a regular extent, must cow */
7359 		goto out;
7360 	}
7361 
7362 	if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
7363 		goto out;
7364 
7365 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
7366 	if (extent_end <= offset)
7367 		goto out;
7368 
7369 	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7370 	if (disk_bytenr == 0)
7371 		goto out;
7372 
7373 	if (btrfs_file_extent_compression(leaf, fi) ||
7374 	    btrfs_file_extent_encryption(leaf, fi) ||
7375 	    btrfs_file_extent_other_encoding(leaf, fi))
7376 		goto out;
7377 
7378 	/*
7379 	 * Do the same check as in btrfs_cross_ref_exist but without the
7380 	 * unnecessary search.
7381 	 */
7382 	if (!strict &&
7383 	    (btrfs_file_extent_generation(leaf, fi) <=
7384 	     btrfs_root_last_snapshot(&root->root_item)))
7385 		goto out;
7386 
7387 	backref_offset = btrfs_file_extent_offset(leaf, fi);
7388 
7389 	if (orig_start) {
7390 		*orig_start = key.offset - backref_offset;
7391 		*orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
7392 		*ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7393 	}
7394 
7395 	if (btrfs_extent_readonly(fs_info, disk_bytenr))
7396 		goto out;
7397 
7398 	num_bytes = min(offset + *len, extent_end) - offset;
7399 	if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7400 		u64 range_end;
7401 
7402 		range_end = round_up(offset + num_bytes,
7403 				     root->fs_info->sectorsize) - 1;
7404 		ret = test_range_bit(io_tree, offset, range_end,
7405 				     EXTENT_DELALLOC, 0, NULL);
7406 		if (ret) {
7407 			ret = -EAGAIN;
7408 			goto out;
7409 		}
7410 	}
7411 
7412 	btrfs_release_path(path);
7413 
7414 	/*
7415 	 * look for other files referencing this extent, if we
7416 	 * find any we must cow
7417 	 */
7418 
7419 	ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)),
7420 				    key.offset - backref_offset, disk_bytenr,
7421 				    strict);
7422 	if (ret) {
7423 		ret = 0;
7424 		goto out;
7425 	}
7426 
7427 	/*
7428 	 * adjust disk_bytenr and num_bytes to cover just the bytes
7429 	 * in this extent we are about to write.  If there
7430 	 * are any csums in that range we have to cow in order
7431 	 * to keep the csums correct
7432 	 */
7433 	disk_bytenr += backref_offset;
7434 	disk_bytenr += offset - key.offset;
7435 	if (csum_exist_in_range(fs_info, disk_bytenr, num_bytes))
7436 		goto out;
7437 	/*
7438 	 * all of the above have passed, it is safe to overwrite this extent
7439 	 * without cow
7440 	 */
7441 	*len = num_bytes;
7442 	ret = 1;
7443 out:
7444 	btrfs_free_path(path);
7445 	return ret;
7446 }
7447 
7448 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7449 			      struct extent_state **cached_state, bool writing)
7450 {
7451 	struct btrfs_ordered_extent *ordered;
7452 	int ret = 0;
7453 
7454 	while (1) {
7455 		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7456 				 cached_state);
7457 		/*
7458 		 * We're concerned with the entire range that we're going to be
7459 		 * doing DIO to, so we need to make sure there's no ordered
7460 		 * extents in this range.
7461 		 */
7462 		ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart,
7463 						     lockend - lockstart + 1);
7464 
7465 		/*
7466 		 * We need to make sure there are no buffered pages in this
7467 		 * range either, we could have raced between the invalidate in
7468 		 * generic_file_direct_write and locking the extent.  The
7469 		 * invalidate needs to happen so that reads after a write do not
7470 		 * get stale data.
7471 		 */
7472 		if (!ordered &&
7473 		    (!writing || !filemap_range_has_page(inode->i_mapping,
7474 							 lockstart, lockend)))
7475 			break;
7476 
7477 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7478 				     cached_state);
7479 
7480 		if (ordered) {
7481 			/*
7482 			 * If we are doing a DIO read and the ordered extent we
7483 			 * found is for a buffered write, we can not wait for it
7484 			 * to complete and retry, because if we do so we can
7485 			 * deadlock with concurrent buffered writes on page
7486 			 * locks. This happens only if our DIO read covers more
7487 			 * than one extent map, if at this point has already
7488 			 * created an ordered extent for a previous extent map
7489 			 * and locked its range in the inode's io tree, and a
7490 			 * concurrent write against that previous extent map's
7491 			 * range and this range started (we unlock the ranges
7492 			 * in the io tree only when the bios complete and
7493 			 * buffered writes always lock pages before attempting
7494 			 * to lock range in the io tree).
7495 			 */
7496 			if (writing ||
7497 			    test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
7498 				btrfs_start_ordered_extent(ordered, 1);
7499 			else
7500 				ret = -ENOTBLK;
7501 			btrfs_put_ordered_extent(ordered);
7502 		} else {
7503 			/*
7504 			 * We could trigger writeback for this range (and wait
7505 			 * for it to complete) and then invalidate the pages for
7506 			 * this range (through invalidate_inode_pages2_range()),
7507 			 * but that can lead us to a deadlock with a concurrent
7508 			 * call to readahead (a buffered read or a defrag call
7509 			 * triggered a readahead) on a page lock due to an
7510 			 * ordered dio extent we created before but did not have
7511 			 * yet a corresponding bio submitted (whence it can not
7512 			 * complete), which makes readahead wait for that
7513 			 * ordered extent to complete while holding a lock on
7514 			 * that page.
7515 			 */
7516 			ret = -ENOTBLK;
7517 		}
7518 
7519 		if (ret)
7520 			break;
7521 
7522 		cond_resched();
7523 	}
7524 
7525 	return ret;
7526 }
7527 
7528 /* The callers of this must take lock_extent() */
7529 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
7530 				       u64 len, u64 orig_start, u64 block_start,
7531 				       u64 block_len, u64 orig_block_len,
7532 				       u64 ram_bytes, int compress_type,
7533 				       int type)
7534 {
7535 	struct extent_map_tree *em_tree;
7536 	struct extent_map *em;
7537 	int ret;
7538 
7539 	ASSERT(type == BTRFS_ORDERED_PREALLOC ||
7540 	       type == BTRFS_ORDERED_COMPRESSED ||
7541 	       type == BTRFS_ORDERED_NOCOW ||
7542 	       type == BTRFS_ORDERED_REGULAR);
7543 
7544 	em_tree = &inode->extent_tree;
7545 	em = alloc_extent_map();
7546 	if (!em)
7547 		return ERR_PTR(-ENOMEM);
7548 
7549 	em->start = start;
7550 	em->orig_start = orig_start;
7551 	em->len = len;
7552 	em->block_len = block_len;
7553 	em->block_start = block_start;
7554 	em->orig_block_len = orig_block_len;
7555 	em->ram_bytes = ram_bytes;
7556 	em->generation = -1;
7557 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
7558 	if (type == BTRFS_ORDERED_PREALLOC) {
7559 		set_bit(EXTENT_FLAG_FILLING, &em->flags);
7560 	} else if (type == BTRFS_ORDERED_COMPRESSED) {
7561 		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
7562 		em->compress_type = compress_type;
7563 	}
7564 
7565 	do {
7566 		btrfs_drop_extent_cache(inode, em->start,
7567 					em->start + em->len - 1, 0);
7568 		write_lock(&em_tree->lock);
7569 		ret = add_extent_mapping(em_tree, em, 1);
7570 		write_unlock(&em_tree->lock);
7571 		/*
7572 		 * The caller has taken lock_extent(), who could race with us
7573 		 * to add em?
7574 		 */
7575 	} while (ret == -EEXIST);
7576 
7577 	if (ret) {
7578 		free_extent_map(em);
7579 		return ERR_PTR(ret);
7580 	}
7581 
7582 	/* em got 2 refs now, callers needs to do free_extent_map once. */
7583 	return em;
7584 }
7585 
7586 
7587 static int btrfs_get_blocks_direct_write(struct extent_map **map,
7588 					 struct inode *inode,
7589 					 struct btrfs_dio_data *dio_data,
7590 					 u64 start, u64 len)
7591 {
7592 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7593 	struct extent_map *em = *map;
7594 	int ret = 0;
7595 
7596 	/*
7597 	 * We don't allocate a new extent in the following cases
7598 	 *
7599 	 * 1) The inode is marked as NODATACOW. In this case we'll just use the
7600 	 * existing extent.
7601 	 * 2) The extent is marked as PREALLOC. We're good to go here and can
7602 	 * just use the extent.
7603 	 *
7604 	 */
7605 	if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
7606 	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7607 	     em->block_start != EXTENT_MAP_HOLE)) {
7608 		int type;
7609 		u64 block_start, orig_start, orig_block_len, ram_bytes;
7610 
7611 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7612 			type = BTRFS_ORDERED_PREALLOC;
7613 		else
7614 			type = BTRFS_ORDERED_NOCOW;
7615 		len = min(len, em->len - (start - em->start));
7616 		block_start = em->block_start + (start - em->start);
7617 
7618 		if (can_nocow_extent(inode, start, &len, &orig_start,
7619 				     &orig_block_len, &ram_bytes, false) == 1 &&
7620 		    btrfs_inc_nocow_writers(fs_info, block_start)) {
7621 			struct extent_map *em2;
7622 
7623 			em2 = btrfs_create_dio_extent(BTRFS_I(inode), start, len,
7624 						      orig_start, block_start,
7625 						      len, orig_block_len,
7626 						      ram_bytes, type);
7627 			btrfs_dec_nocow_writers(fs_info, block_start);
7628 			if (type == BTRFS_ORDERED_PREALLOC) {
7629 				free_extent_map(em);
7630 				*map = em = em2;
7631 			}
7632 
7633 			if (em2 && IS_ERR(em2)) {
7634 				ret = PTR_ERR(em2);
7635 				goto out;
7636 			}
7637 			/*
7638 			 * For inode marked NODATACOW or extent marked PREALLOC,
7639 			 * use the existing or preallocated extent, so does not
7640 			 * need to adjust btrfs_space_info's bytes_may_use.
7641 			 */
7642 			btrfs_free_reserved_data_space_noquota(fs_info, len);
7643 			goto skip_cow;
7644 		}
7645 	}
7646 
7647 	/* this will cow the extent */
7648 	free_extent_map(em);
7649 	*map = em = btrfs_new_extent_direct(BTRFS_I(inode), start, len);
7650 	if (IS_ERR(em)) {
7651 		ret = PTR_ERR(em);
7652 		goto out;
7653 	}
7654 
7655 	len = min(len, em->len - (start - em->start));
7656 
7657 skip_cow:
7658 	/*
7659 	 * Need to update the i_size under the extent lock so buffered
7660 	 * readers will get the updated i_size when we unlock.
7661 	 */
7662 	if (start + len > i_size_read(inode))
7663 		i_size_write(inode, start + len);
7664 
7665 	dio_data->reserve -= len;
7666 out:
7667 	return ret;
7668 }
7669 
7670 static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
7671 		loff_t length, unsigned int flags, struct iomap *iomap,
7672 		struct iomap *srcmap)
7673 {
7674 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7675 	struct extent_map *em;
7676 	struct extent_state *cached_state = NULL;
7677 	struct btrfs_dio_data *dio_data = NULL;
7678 	u64 lockstart, lockend;
7679 	const bool write = !!(flags & IOMAP_WRITE);
7680 	int ret = 0;
7681 	u64 len = length;
7682 	bool unlock_extents = false;
7683 
7684 	if (!write)
7685 		len = min_t(u64, len, fs_info->sectorsize);
7686 
7687 	lockstart = start;
7688 	lockend = start + len - 1;
7689 
7690 	/*
7691 	 * The generic stuff only does filemap_write_and_wait_range, which
7692 	 * isn't enough if we've written compressed pages to this area, so we
7693 	 * need to flush the dirty pages again to make absolutely sure that any
7694 	 * outstanding dirty pages are on disk.
7695 	 */
7696 	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
7697 		     &BTRFS_I(inode)->runtime_flags)) {
7698 		ret = filemap_fdatawrite_range(inode->i_mapping, start,
7699 					       start + length - 1);
7700 		if (ret)
7701 			return ret;
7702 	}
7703 
7704 	dio_data = kzalloc(sizeof(*dio_data), GFP_NOFS);
7705 	if (!dio_data)
7706 		return -ENOMEM;
7707 
7708 	dio_data->length = length;
7709 	if (write) {
7710 		dio_data->reserve = round_up(length, fs_info->sectorsize);
7711 		ret = btrfs_delalloc_reserve_space(BTRFS_I(inode),
7712 				&dio_data->data_reserved,
7713 				start, dio_data->reserve);
7714 		if (ret) {
7715 			extent_changeset_free(dio_data->data_reserved);
7716 			kfree(dio_data);
7717 			return ret;
7718 		}
7719 	}
7720 	iomap->private = dio_data;
7721 
7722 
7723 	/*
7724 	 * If this errors out it's because we couldn't invalidate pagecache for
7725 	 * this range and we need to fallback to buffered.
7726 	 */
7727 	if (lock_extent_direct(inode, lockstart, lockend, &cached_state, write)) {
7728 		ret = -ENOTBLK;
7729 		goto err;
7730 	}
7731 
7732 	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len);
7733 	if (IS_ERR(em)) {
7734 		ret = PTR_ERR(em);
7735 		goto unlock_err;
7736 	}
7737 
7738 	/*
7739 	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7740 	 * io.  INLINE is special, and we could probably kludge it in here, but
7741 	 * it's still buffered so for safety lets just fall back to the generic
7742 	 * buffered path.
7743 	 *
7744 	 * For COMPRESSED we _have_ to read the entire extent in so we can
7745 	 * decompress it, so there will be buffering required no matter what we
7746 	 * do, so go ahead and fallback to buffered.
7747 	 *
7748 	 * We return -ENOTBLK because that's what makes DIO go ahead and go back
7749 	 * to buffered IO.  Don't blame me, this is the price we pay for using
7750 	 * the generic code.
7751 	 */
7752 	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
7753 	    em->block_start == EXTENT_MAP_INLINE) {
7754 		free_extent_map(em);
7755 		ret = -ENOTBLK;
7756 		goto unlock_err;
7757 	}
7758 
7759 	len = min(len, em->len - (start - em->start));
7760 	if (write) {
7761 		ret = btrfs_get_blocks_direct_write(&em, inode, dio_data,
7762 						    start, len);
7763 		if (ret < 0)
7764 			goto unlock_err;
7765 		unlock_extents = true;
7766 		/* Recalc len in case the new em is smaller than requested */
7767 		len = min(len, em->len - (start - em->start));
7768 	} else {
7769 		/*
7770 		 * We need to unlock only the end area that we aren't using.
7771 		 * The rest is going to be unlocked by the endio routine.
7772 		 */
7773 		lockstart = start + len;
7774 		if (lockstart < lockend)
7775 			unlock_extents = true;
7776 	}
7777 
7778 	if (unlock_extents)
7779 		unlock_extent_cached(&BTRFS_I(inode)->io_tree,
7780 				     lockstart, lockend, &cached_state);
7781 	else
7782 		free_extent_state(cached_state);
7783 
7784 	/*
7785 	 * Translate extent map information to iomap.
7786 	 * We trim the extents (and move the addr) even though iomap code does
7787 	 * that, since we have locked only the parts we are performing I/O in.
7788 	 */
7789 	if ((em->block_start == EXTENT_MAP_HOLE) ||
7790 	    (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !write)) {
7791 		iomap->addr = IOMAP_NULL_ADDR;
7792 		iomap->type = IOMAP_HOLE;
7793 	} else {
7794 		iomap->addr = em->block_start + (start - em->start);
7795 		iomap->type = IOMAP_MAPPED;
7796 	}
7797 	iomap->offset = start;
7798 	iomap->bdev = fs_info->fs_devices->latest_bdev;
7799 	iomap->length = len;
7800 
7801 	if (write && btrfs_use_zone_append(BTRFS_I(inode), em->block_start))
7802 		iomap->flags |= IOMAP_F_ZONE_APPEND;
7803 
7804 	free_extent_map(em);
7805 
7806 	return 0;
7807 
7808 unlock_err:
7809 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7810 			     &cached_state);
7811 err:
7812 	if (dio_data) {
7813 		btrfs_delalloc_release_space(BTRFS_I(inode),
7814 				dio_data->data_reserved, start,
7815 				dio_data->reserve, true);
7816 		btrfs_delalloc_release_extents(BTRFS_I(inode), dio_data->reserve);
7817 		extent_changeset_free(dio_data->data_reserved);
7818 		kfree(dio_data);
7819 	}
7820 	return ret;
7821 }
7822 
7823 static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length,
7824 		ssize_t written, unsigned int flags, struct iomap *iomap)
7825 {
7826 	int ret = 0;
7827 	struct btrfs_dio_data *dio_data = iomap->private;
7828 	size_t submitted = dio_data->submitted;
7829 	const bool write = !!(flags & IOMAP_WRITE);
7830 
7831 	if (!write && (iomap->type == IOMAP_HOLE)) {
7832 		/* If reading from a hole, unlock and return */
7833 		unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1);
7834 		goto out;
7835 	}
7836 
7837 	if (submitted < length) {
7838 		pos += submitted;
7839 		length -= submitted;
7840 		if (write)
7841 			__endio_write_update_ordered(BTRFS_I(inode), pos,
7842 					length, false);
7843 		else
7844 			unlock_extent(&BTRFS_I(inode)->io_tree, pos,
7845 				      pos + length - 1);
7846 		ret = -ENOTBLK;
7847 	}
7848 
7849 	if (write) {
7850 		if (dio_data->reserve)
7851 			btrfs_delalloc_release_space(BTRFS_I(inode),
7852 					dio_data->data_reserved, pos,
7853 					dio_data->reserve, true);
7854 		btrfs_delalloc_release_extents(BTRFS_I(inode), dio_data->length);
7855 		extent_changeset_free(dio_data->data_reserved);
7856 	}
7857 out:
7858 	kfree(dio_data);
7859 	iomap->private = NULL;
7860 
7861 	return ret;
7862 }
7863 
7864 static void btrfs_dio_private_put(struct btrfs_dio_private *dip)
7865 {
7866 	/*
7867 	 * This implies a barrier so that stores to dio_bio->bi_status before
7868 	 * this and loads of dio_bio->bi_status after this are fully ordered.
7869 	 */
7870 	if (!refcount_dec_and_test(&dip->refs))
7871 		return;
7872 
7873 	if (btrfs_op(dip->dio_bio) == BTRFS_MAP_WRITE) {
7874 		__endio_write_update_ordered(BTRFS_I(dip->inode),
7875 					     dip->logical_offset,
7876 					     dip->bytes,
7877 					     !dip->dio_bio->bi_status);
7878 	} else {
7879 		unlock_extent(&BTRFS_I(dip->inode)->io_tree,
7880 			      dip->logical_offset,
7881 			      dip->logical_offset + dip->bytes - 1);
7882 	}
7883 
7884 	bio_endio(dip->dio_bio);
7885 	kfree(dip);
7886 }
7887 
7888 static blk_status_t submit_dio_repair_bio(struct inode *inode, struct bio *bio,
7889 					  int mirror_num,
7890 					  unsigned long bio_flags)
7891 {
7892 	struct btrfs_dio_private *dip = bio->bi_private;
7893 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7894 	blk_status_t ret;
7895 
7896 	BUG_ON(bio_op(bio) == REQ_OP_WRITE);
7897 
7898 	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
7899 	if (ret)
7900 		return ret;
7901 
7902 	refcount_inc(&dip->refs);
7903 	ret = btrfs_map_bio(fs_info, bio, mirror_num);
7904 	if (ret)
7905 		refcount_dec(&dip->refs);
7906 	return ret;
7907 }
7908 
7909 static blk_status_t btrfs_check_read_dio_bio(struct inode *inode,
7910 					     struct btrfs_io_bio *io_bio,
7911 					     const bool uptodate)
7912 {
7913 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
7914 	const u32 sectorsize = fs_info->sectorsize;
7915 	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
7916 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7917 	const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM);
7918 	struct bio_vec bvec;
7919 	struct bvec_iter iter;
7920 	u64 start = io_bio->logical;
7921 	u32 bio_offset = 0;
7922 	blk_status_t err = BLK_STS_OK;
7923 
7924 	__bio_for_each_segment(bvec, &io_bio->bio, iter, io_bio->iter) {
7925 		unsigned int i, nr_sectors, pgoff;
7926 
7927 		nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
7928 		pgoff = bvec.bv_offset;
7929 		for (i = 0; i < nr_sectors; i++) {
7930 			ASSERT(pgoff < PAGE_SIZE);
7931 			if (uptodate &&
7932 			    (!csum || !check_data_csum(inode, io_bio,
7933 						       bio_offset, bvec.bv_page,
7934 						       pgoff, start))) {
7935 				clean_io_failure(fs_info, failure_tree, io_tree,
7936 						 start, bvec.bv_page,
7937 						 btrfs_ino(BTRFS_I(inode)),
7938 						 pgoff);
7939 			} else {
7940 				blk_status_t status;
7941 
7942 				ASSERT((start - io_bio->logical) < UINT_MAX);
7943 				status = btrfs_submit_read_repair(inode,
7944 							&io_bio->bio,
7945 							start - io_bio->logical,
7946 							bvec.bv_page, pgoff,
7947 							start,
7948 							start + sectorsize - 1,
7949 							io_bio->mirror_num,
7950 							submit_dio_repair_bio);
7951 				if (status)
7952 					err = status;
7953 			}
7954 			start += sectorsize;
7955 			ASSERT(bio_offset + sectorsize > bio_offset);
7956 			bio_offset += sectorsize;
7957 			pgoff += sectorsize;
7958 		}
7959 	}
7960 	return err;
7961 }
7962 
7963 static void __endio_write_update_ordered(struct btrfs_inode *inode,
7964 					 const u64 offset, const u64 bytes,
7965 					 const bool uptodate)
7966 {
7967 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
7968 	struct btrfs_ordered_extent *ordered = NULL;
7969 	struct btrfs_workqueue *wq;
7970 	u64 ordered_offset = offset;
7971 	u64 ordered_bytes = bytes;
7972 	u64 last_offset;
7973 
7974 	if (btrfs_is_free_space_inode(inode))
7975 		wq = fs_info->endio_freespace_worker;
7976 	else
7977 		wq = fs_info->endio_write_workers;
7978 
7979 	while (ordered_offset < offset + bytes) {
7980 		last_offset = ordered_offset;
7981 		if (btrfs_dec_test_first_ordered_pending(inode, &ordered,
7982 							 &ordered_offset,
7983 							 ordered_bytes,
7984 							 uptodate)) {
7985 			btrfs_init_work(&ordered->work, finish_ordered_fn, NULL,
7986 					NULL);
7987 			btrfs_queue_work(wq, &ordered->work);
7988 		}
7989 
7990 		/* No ordered extent found in the range, exit */
7991 		if (ordered_offset == last_offset)
7992 			return;
7993 		/*
7994 		 * Our bio might span multiple ordered extents. In this case
7995 		 * we keep going until we have accounted the whole dio.
7996 		 */
7997 		if (ordered_offset < offset + bytes) {
7998 			ordered_bytes = offset + bytes - ordered_offset;
7999 			ordered = NULL;
8000 		}
8001 	}
8002 }
8003 
8004 static blk_status_t btrfs_submit_bio_start_direct_io(struct inode *inode,
8005 						     struct bio *bio,
8006 						     u64 dio_file_offset)
8007 {
8008 	return btrfs_csum_one_bio(BTRFS_I(inode), bio, dio_file_offset, 1);
8009 }
8010 
8011 static void btrfs_end_dio_bio(struct bio *bio)
8012 {
8013 	struct btrfs_dio_private *dip = bio->bi_private;
8014 	blk_status_t err = bio->bi_status;
8015 
8016 	if (err)
8017 		btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
8018 			   "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
8019 			   btrfs_ino(BTRFS_I(dip->inode)), bio_op(bio),
8020 			   bio->bi_opf, bio->bi_iter.bi_sector,
8021 			   bio->bi_iter.bi_size, err);
8022 
8023 	if (bio_op(bio) == REQ_OP_READ) {
8024 		err = btrfs_check_read_dio_bio(dip->inode, btrfs_io_bio(bio),
8025 					       !err);
8026 	}
8027 
8028 	if (err)
8029 		dip->dio_bio->bi_status = err;
8030 
8031 	btrfs_record_physical_zoned(dip->inode, dip->logical_offset, bio);
8032 
8033 	bio_put(bio);
8034 	btrfs_dio_private_put(dip);
8035 }
8036 
8037 static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
8038 		struct inode *inode, u64 file_offset, int async_submit)
8039 {
8040 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8041 	struct btrfs_dio_private *dip = bio->bi_private;
8042 	bool write = btrfs_op(bio) == BTRFS_MAP_WRITE;
8043 	blk_status_t ret;
8044 
8045 	/* Check btrfs_submit_bio_hook() for rules about async submit. */
8046 	if (async_submit)
8047 		async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
8048 
8049 	if (!write) {
8050 		ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
8051 		if (ret)
8052 			goto err;
8053 	}
8054 
8055 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
8056 		goto map;
8057 
8058 	if (write && async_submit) {
8059 		ret = btrfs_wq_submit_bio(inode, bio, 0, 0, file_offset,
8060 					  btrfs_submit_bio_start_direct_io);
8061 		goto err;
8062 	} else if (write) {
8063 		/*
8064 		 * If we aren't doing async submit, calculate the csum of the
8065 		 * bio now.
8066 		 */
8067 		ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, file_offset, 1);
8068 		if (ret)
8069 			goto err;
8070 	} else {
8071 		u64 csum_offset;
8072 
8073 		csum_offset = file_offset - dip->logical_offset;
8074 		csum_offset >>= fs_info->sectorsize_bits;
8075 		csum_offset *= fs_info->csum_size;
8076 		btrfs_io_bio(bio)->csum = dip->csums + csum_offset;
8077 	}
8078 map:
8079 	ret = btrfs_map_bio(fs_info, bio, 0);
8080 err:
8081 	return ret;
8082 }
8083 
8084 /*
8085  * If this succeeds, the btrfs_dio_private is responsible for cleaning up locked
8086  * or ordered extents whether or not we submit any bios.
8087  */
8088 static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio,
8089 							  struct inode *inode,
8090 							  loff_t file_offset)
8091 {
8092 	const bool write = (btrfs_op(dio_bio) == BTRFS_MAP_WRITE);
8093 	const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM);
8094 	size_t dip_size;
8095 	struct btrfs_dio_private *dip;
8096 
8097 	dip_size = sizeof(*dip);
8098 	if (!write && csum) {
8099 		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8100 		size_t nblocks;
8101 
8102 		nblocks = dio_bio->bi_iter.bi_size >> fs_info->sectorsize_bits;
8103 		dip_size += fs_info->csum_size * nblocks;
8104 	}
8105 
8106 	dip = kzalloc(dip_size, GFP_NOFS);
8107 	if (!dip)
8108 		return NULL;
8109 
8110 	dip->inode = inode;
8111 	dip->logical_offset = file_offset;
8112 	dip->bytes = dio_bio->bi_iter.bi_size;
8113 	dip->disk_bytenr = dio_bio->bi_iter.bi_sector << 9;
8114 	dip->dio_bio = dio_bio;
8115 	refcount_set(&dip->refs, 1);
8116 	return dip;
8117 }
8118 
8119 static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap,
8120 		struct bio *dio_bio, loff_t file_offset)
8121 {
8122 	const bool write = (btrfs_op(dio_bio) == BTRFS_MAP_WRITE);
8123 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8124 	const bool raid56 = (btrfs_data_alloc_profile(fs_info) &
8125 			     BTRFS_BLOCK_GROUP_RAID56_MASK);
8126 	struct btrfs_dio_private *dip;
8127 	struct bio *bio;
8128 	u64 start_sector;
8129 	int async_submit = 0;
8130 	u64 submit_len;
8131 	int clone_offset = 0;
8132 	int clone_len;
8133 	u64 logical;
8134 	int ret;
8135 	blk_status_t status;
8136 	struct btrfs_io_geometry geom;
8137 	struct btrfs_dio_data *dio_data = iomap->private;
8138 	struct extent_map *em = NULL;
8139 
8140 	dip = btrfs_create_dio_private(dio_bio, inode, file_offset);
8141 	if (!dip) {
8142 		if (!write) {
8143 			unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
8144 				file_offset + dio_bio->bi_iter.bi_size - 1);
8145 		}
8146 		dio_bio->bi_status = BLK_STS_RESOURCE;
8147 		bio_endio(dio_bio);
8148 		return BLK_QC_T_NONE;
8149 	}
8150 
8151 	if (!write) {
8152 		/*
8153 		 * Load the csums up front to reduce csum tree searches and
8154 		 * contention when submitting bios.
8155 		 *
8156 		 * If we have csums disabled this will do nothing.
8157 		 */
8158 		status = btrfs_lookup_bio_sums(inode, dio_bio, dip->csums);
8159 		if (status != BLK_STS_OK)
8160 			goto out_err;
8161 	}
8162 
8163 	start_sector = dio_bio->bi_iter.bi_sector;
8164 	submit_len = dio_bio->bi_iter.bi_size;
8165 
8166 	do {
8167 		logical = start_sector << 9;
8168 		em = btrfs_get_chunk_map(fs_info, logical, submit_len);
8169 		if (IS_ERR(em)) {
8170 			status = errno_to_blk_status(PTR_ERR(em));
8171 			em = NULL;
8172 			goto out_err_em;
8173 		}
8174 		ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(dio_bio),
8175 					    logical, submit_len, &geom);
8176 		if (ret) {
8177 			status = errno_to_blk_status(ret);
8178 			goto out_err_em;
8179 		}
8180 		ASSERT(geom.len <= INT_MAX);
8181 
8182 		clone_len = min_t(int, submit_len, geom.len);
8183 
8184 		/*
8185 		 * This will never fail as it's passing GPF_NOFS and
8186 		 * the allocation is backed by btrfs_bioset.
8187 		 */
8188 		bio = btrfs_bio_clone_partial(dio_bio, clone_offset, clone_len);
8189 		bio->bi_private = dip;
8190 		bio->bi_end_io = btrfs_end_dio_bio;
8191 		btrfs_io_bio(bio)->logical = file_offset;
8192 
8193 		if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
8194 			status = extract_ordered_extent(BTRFS_I(inode), bio,
8195 							file_offset);
8196 			if (status) {
8197 				bio_put(bio);
8198 				goto out_err;
8199 			}
8200 		}
8201 
8202 		ASSERT(submit_len >= clone_len);
8203 		submit_len -= clone_len;
8204 
8205 		/*
8206 		 * Increase the count before we submit the bio so we know
8207 		 * the end IO handler won't happen before we increase the
8208 		 * count. Otherwise, the dip might get freed before we're
8209 		 * done setting it up.
8210 		 *
8211 		 * We transfer the initial reference to the last bio, so we
8212 		 * don't need to increment the reference count for the last one.
8213 		 */
8214 		if (submit_len > 0) {
8215 			refcount_inc(&dip->refs);
8216 			/*
8217 			 * If we are submitting more than one bio, submit them
8218 			 * all asynchronously. The exception is RAID 5 or 6, as
8219 			 * asynchronous checksums make it difficult to collect
8220 			 * full stripe writes.
8221 			 */
8222 			if (!raid56)
8223 				async_submit = 1;
8224 		}
8225 
8226 		status = btrfs_submit_dio_bio(bio, inode, file_offset,
8227 						async_submit);
8228 		if (status) {
8229 			bio_put(bio);
8230 			if (submit_len > 0)
8231 				refcount_dec(&dip->refs);
8232 			goto out_err_em;
8233 		}
8234 
8235 		dio_data->submitted += clone_len;
8236 		clone_offset += clone_len;
8237 		start_sector += clone_len >> 9;
8238 		file_offset += clone_len;
8239 
8240 		free_extent_map(em);
8241 	} while (submit_len > 0);
8242 	return BLK_QC_T_NONE;
8243 
8244 out_err_em:
8245 	free_extent_map(em);
8246 out_err:
8247 	dip->dio_bio->bi_status = status;
8248 	btrfs_dio_private_put(dip);
8249 
8250 	return BLK_QC_T_NONE;
8251 }
8252 
8253 const struct iomap_ops btrfs_dio_iomap_ops = {
8254 	.iomap_begin            = btrfs_dio_iomap_begin,
8255 	.iomap_end              = btrfs_dio_iomap_end,
8256 };
8257 
8258 const struct iomap_dio_ops btrfs_dio_ops = {
8259 	.submit_io		= btrfs_submit_direct,
8260 };
8261 
8262 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
8263 			u64 start, u64 len)
8264 {
8265 	int	ret;
8266 
8267 	ret = fiemap_prep(inode, fieinfo, start, &len, 0);
8268 	if (ret)
8269 		return ret;
8270 
8271 	return extent_fiemap(BTRFS_I(inode), fieinfo, start, len);
8272 }
8273 
8274 int btrfs_readpage(struct file *file, struct page *page)
8275 {
8276 	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
8277 	u64 start = page_offset(page);
8278 	u64 end = start + PAGE_SIZE - 1;
8279 	unsigned long bio_flags = 0;
8280 	struct bio *bio = NULL;
8281 	int ret;
8282 
8283 	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
8284 
8285 	ret = btrfs_do_readpage(page, NULL, &bio, &bio_flags, 0, NULL);
8286 	if (bio)
8287 		ret = submit_one_bio(bio, 0, bio_flags);
8288 	return ret;
8289 }
8290 
8291 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
8292 {
8293 	struct inode *inode = page->mapping->host;
8294 	int ret;
8295 
8296 	if (current->flags & PF_MEMALLOC) {
8297 		redirty_page_for_writepage(wbc, page);
8298 		unlock_page(page);
8299 		return 0;
8300 	}
8301 
8302 	/*
8303 	 * If we are under memory pressure we will call this directly from the
8304 	 * VM, we need to make sure we have the inode referenced for the ordered
8305 	 * extent.  If not just return like we didn't do anything.
8306 	 */
8307 	if (!igrab(inode)) {
8308 		redirty_page_for_writepage(wbc, page);
8309 		return AOP_WRITEPAGE_ACTIVATE;
8310 	}
8311 	ret = extent_write_full_page(page, wbc);
8312 	btrfs_add_delayed_iput(inode);
8313 	return ret;
8314 }
8315 
8316 static int btrfs_writepages(struct address_space *mapping,
8317 			    struct writeback_control *wbc)
8318 {
8319 	return extent_writepages(mapping, wbc);
8320 }
8321 
8322 static void btrfs_readahead(struct readahead_control *rac)
8323 {
8324 	extent_readahead(rac);
8325 }
8326 
8327 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8328 {
8329 	int ret = try_release_extent_mapping(page, gfp_flags);
8330 	if (ret == 1)
8331 		clear_page_extent_mapped(page);
8332 	return ret;
8333 }
8334 
8335 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8336 {
8337 	if (PageWriteback(page) || PageDirty(page))
8338 		return 0;
8339 	return __btrfs_releasepage(page, gfp_flags);
8340 }
8341 
8342 #ifdef CONFIG_MIGRATION
8343 static int btrfs_migratepage(struct address_space *mapping,
8344 			     struct page *newpage, struct page *page,
8345 			     enum migrate_mode mode)
8346 {
8347 	int ret;
8348 
8349 	ret = migrate_page_move_mapping(mapping, newpage, page, 0);
8350 	if (ret != MIGRATEPAGE_SUCCESS)
8351 		return ret;
8352 
8353 	if (page_has_private(page))
8354 		attach_page_private(newpage, detach_page_private(page));
8355 
8356 	if (PagePrivate2(page)) {
8357 		ClearPagePrivate2(page);
8358 		SetPagePrivate2(newpage);
8359 	}
8360 
8361 	if (mode != MIGRATE_SYNC_NO_COPY)
8362 		migrate_page_copy(newpage, page);
8363 	else
8364 		migrate_page_states(newpage, page);
8365 	return MIGRATEPAGE_SUCCESS;
8366 }
8367 #endif
8368 
8369 static void btrfs_invalidatepage(struct page *page, unsigned int offset,
8370 				 unsigned int length)
8371 {
8372 	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
8373 	struct extent_io_tree *tree = &inode->io_tree;
8374 	struct btrfs_ordered_extent *ordered;
8375 	struct extent_state *cached_state = NULL;
8376 	u64 page_start = page_offset(page);
8377 	u64 page_end = page_start + PAGE_SIZE - 1;
8378 	u64 start;
8379 	u64 end;
8380 	int inode_evicting = inode->vfs_inode.i_state & I_FREEING;
8381 	bool found_ordered = false;
8382 	bool completed_ordered = false;
8383 
8384 	/*
8385 	 * we have the page locked, so new writeback can't start,
8386 	 * and the dirty bit won't be cleared while we are here.
8387 	 *
8388 	 * Wait for IO on this page so that we can safely clear
8389 	 * the PagePrivate2 bit and do ordered accounting
8390 	 */
8391 	wait_on_page_writeback(page);
8392 
8393 	if (offset) {
8394 		btrfs_releasepage(page, GFP_NOFS);
8395 		return;
8396 	}
8397 
8398 	if (!inode_evicting)
8399 		lock_extent_bits(tree, page_start, page_end, &cached_state);
8400 
8401 	start = page_start;
8402 again:
8403 	ordered = btrfs_lookup_ordered_range(inode, start, page_end - start + 1);
8404 	if (ordered) {
8405 		found_ordered = true;
8406 		end = min(page_end,
8407 			  ordered->file_offset + ordered->num_bytes - 1);
8408 		/*
8409 		 * IO on this page will never be started, so we need to account
8410 		 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
8411 		 * here, must leave that up for the ordered extent completion.
8412 		 */
8413 		if (!inode_evicting)
8414 			clear_extent_bit(tree, start, end,
8415 					 EXTENT_DELALLOC |
8416 					 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
8417 					 EXTENT_DEFRAG, 1, 0, &cached_state);
8418 		/*
8419 		 * whoever cleared the private bit is responsible
8420 		 * for the finish_ordered_io
8421 		 */
8422 		if (TestClearPagePrivate2(page)) {
8423 			spin_lock_irq(&inode->ordered_tree.lock);
8424 			set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
8425 			ordered->truncated_len = min(ordered->truncated_len,
8426 						     start - ordered->file_offset);
8427 			spin_unlock_irq(&inode->ordered_tree.lock);
8428 
8429 			if (btrfs_dec_test_ordered_pending(inode, &ordered,
8430 							   start,
8431 							   end - start + 1, 1)) {
8432 				btrfs_finish_ordered_io(ordered);
8433 				completed_ordered = true;
8434 			}
8435 		}
8436 		btrfs_put_ordered_extent(ordered);
8437 		if (!inode_evicting) {
8438 			cached_state = NULL;
8439 			lock_extent_bits(tree, start, end,
8440 					 &cached_state);
8441 		}
8442 
8443 		start = end + 1;
8444 		if (start < page_end)
8445 			goto again;
8446 	}
8447 
8448 	/*
8449 	 * Qgroup reserved space handler
8450 	 * Page here will be either
8451 	 * 1) Already written to disk or ordered extent already submitted
8452 	 *    Then its QGROUP_RESERVED bit in io_tree is already cleaned.
8453 	 *    Qgroup will be handled by its qgroup_record then.
8454 	 *    btrfs_qgroup_free_data() call will do nothing here.
8455 	 *
8456 	 * 2) Not written to disk yet
8457 	 *    Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED
8458 	 *    bit of its io_tree, and free the qgroup reserved data space.
8459 	 *    Since the IO will never happen for this page.
8460 	 */
8461 	btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
8462 	if (!inode_evicting) {
8463 		bool delete = true;
8464 
8465 		/*
8466 		 * If there's an ordered extent for this range and we have not
8467 		 * finished it ourselves, we must leave EXTENT_DELALLOC_NEW set
8468 		 * in the range for the ordered extent completion. We must also
8469 		 * not delete the range, otherwise we would lose that bit (and
8470 		 * any other bits set in the range). Make sure EXTENT_UPTODATE
8471 		 * is cleared if we don't delete, otherwise it can lead to
8472 		 * corruptions if the i_size is extented later.
8473 		 */
8474 		if (found_ordered && !completed_ordered)
8475 			delete = false;
8476 		clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED |
8477 				 EXTENT_DELALLOC | EXTENT_UPTODATE |
8478 				 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1,
8479 				 delete, &cached_state);
8480 
8481 		__btrfs_releasepage(page, GFP_NOFS);
8482 	}
8483 
8484 	ClearPageChecked(page);
8485 	clear_page_extent_mapped(page);
8486 }
8487 
8488 /*
8489  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8490  * called from a page fault handler when a page is first dirtied. Hence we must
8491  * be careful to check for EOF conditions here. We set the page up correctly
8492  * for a written page which means we get ENOSPC checking when writing into
8493  * holes and correct delalloc and unwritten extent mapping on filesystems that
8494  * support these features.
8495  *
8496  * We are not allowed to take the i_mutex here so we have to play games to
8497  * protect against truncate races as the page could now be beyond EOF.  Because
8498  * truncate_setsize() writes the inode size before removing pages, once we have
8499  * the page lock we can determine safely if the page is beyond EOF. If it is not
8500  * beyond EOF, then the page is guaranteed safe against truncation until we
8501  * unlock the page.
8502  */
8503 vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
8504 {
8505 	struct page *page = vmf->page;
8506 	struct inode *inode = file_inode(vmf->vma->vm_file);
8507 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8508 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8509 	struct btrfs_ordered_extent *ordered;
8510 	struct extent_state *cached_state = NULL;
8511 	struct extent_changeset *data_reserved = NULL;
8512 	unsigned long zero_start;
8513 	loff_t size;
8514 	vm_fault_t ret;
8515 	int ret2;
8516 	int reserved = 0;
8517 	u64 reserved_space;
8518 	u64 page_start;
8519 	u64 page_end;
8520 	u64 end;
8521 
8522 	reserved_space = PAGE_SIZE;
8523 
8524 	sb_start_pagefault(inode->i_sb);
8525 	page_start = page_offset(page);
8526 	page_end = page_start + PAGE_SIZE - 1;
8527 	end = page_end;
8528 
8529 	/*
8530 	 * Reserving delalloc space after obtaining the page lock can lead to
8531 	 * deadlock. For example, if a dirty page is locked by this function
8532 	 * and the call to btrfs_delalloc_reserve_space() ends up triggering
8533 	 * dirty page write out, then the btrfs_writepage() function could
8534 	 * end up waiting indefinitely to get a lock on the page currently
8535 	 * being processed by btrfs_page_mkwrite() function.
8536 	 */
8537 	ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved,
8538 					    page_start, reserved_space);
8539 	if (!ret2) {
8540 		ret2 = file_update_time(vmf->vma->vm_file);
8541 		reserved = 1;
8542 	}
8543 	if (ret2) {
8544 		ret = vmf_error(ret2);
8545 		if (reserved)
8546 			goto out;
8547 		goto out_noreserve;
8548 	}
8549 
8550 	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
8551 again:
8552 	down_read(&BTRFS_I(inode)->i_mmap_lock);
8553 	lock_page(page);
8554 	size = i_size_read(inode);
8555 
8556 	if ((page->mapping != inode->i_mapping) ||
8557 	    (page_start >= size)) {
8558 		/* page got truncated out from underneath us */
8559 		goto out_unlock;
8560 	}
8561 	wait_on_page_writeback(page);
8562 
8563 	lock_extent_bits(io_tree, page_start, page_end, &cached_state);
8564 	ret2 = set_page_extent_mapped(page);
8565 	if (ret2 < 0) {
8566 		ret = vmf_error(ret2);
8567 		unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
8568 		goto out_unlock;
8569 	}
8570 
8571 	/*
8572 	 * we can't set the delalloc bits if there are pending ordered
8573 	 * extents.  Drop our locks and wait for them to finish
8574 	 */
8575 	ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
8576 			PAGE_SIZE);
8577 	if (ordered) {
8578 		unlock_extent_cached(io_tree, page_start, page_end,
8579 				     &cached_state);
8580 		unlock_page(page);
8581 		up_read(&BTRFS_I(inode)->i_mmap_lock);
8582 		btrfs_start_ordered_extent(ordered, 1);
8583 		btrfs_put_ordered_extent(ordered);
8584 		goto again;
8585 	}
8586 
8587 	if (page->index == ((size - 1) >> PAGE_SHIFT)) {
8588 		reserved_space = round_up(size - page_start,
8589 					  fs_info->sectorsize);
8590 		if (reserved_space < PAGE_SIZE) {
8591 			end = page_start + reserved_space - 1;
8592 			btrfs_delalloc_release_space(BTRFS_I(inode),
8593 					data_reserved, page_start,
8594 					PAGE_SIZE - reserved_space, true);
8595 		}
8596 	}
8597 
8598 	/*
8599 	 * page_mkwrite gets called when the page is firstly dirtied after it's
8600 	 * faulted in, but write(2) could also dirty a page and set delalloc
8601 	 * bits, thus in this case for space account reason, we still need to
8602 	 * clear any delalloc bits within this page range since we have to
8603 	 * reserve data&meta space before lock_page() (see above comments).
8604 	 */
8605 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
8606 			  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
8607 			  EXTENT_DEFRAG, 0, 0, &cached_state);
8608 
8609 	ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
8610 					&cached_state);
8611 	if (ret2) {
8612 		unlock_extent_cached(io_tree, page_start, page_end,
8613 				     &cached_state);
8614 		ret = VM_FAULT_SIGBUS;
8615 		goto out_unlock;
8616 	}
8617 
8618 	/* page is wholly or partially inside EOF */
8619 	if (page_start + PAGE_SIZE > size)
8620 		zero_start = offset_in_page(size);
8621 	else
8622 		zero_start = PAGE_SIZE;
8623 
8624 	if (zero_start != PAGE_SIZE) {
8625 		memzero_page(page, zero_start, PAGE_SIZE - zero_start);
8626 		flush_dcache_page(page);
8627 	}
8628 	ClearPageChecked(page);
8629 	set_page_dirty(page);
8630 	SetPageUptodate(page);
8631 
8632 	btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
8633 
8634 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
8635 	up_read(&BTRFS_I(inode)->i_mmap_lock);
8636 
8637 	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
8638 	sb_end_pagefault(inode->i_sb);
8639 	extent_changeset_free(data_reserved);
8640 	return VM_FAULT_LOCKED;
8641 
8642 out_unlock:
8643 	unlock_page(page);
8644 	up_read(&BTRFS_I(inode)->i_mmap_lock);
8645 out:
8646 	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
8647 	btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start,
8648 				     reserved_space, (ret != 0));
8649 out_noreserve:
8650 	sb_end_pagefault(inode->i_sb);
8651 	extent_changeset_free(data_reserved);
8652 	return ret;
8653 }
8654 
8655 static int btrfs_truncate(struct inode *inode, bool skip_writeback)
8656 {
8657 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8658 	struct btrfs_root *root = BTRFS_I(inode)->root;
8659 	struct btrfs_block_rsv *rsv;
8660 	int ret;
8661 	struct btrfs_trans_handle *trans;
8662 	u64 mask = fs_info->sectorsize - 1;
8663 	u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
8664 
8665 	if (!skip_writeback) {
8666 		ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
8667 					       (u64)-1);
8668 		if (ret)
8669 			return ret;
8670 	}
8671 
8672 	/*
8673 	 * Yes ladies and gentlemen, this is indeed ugly.  We have a couple of
8674 	 * things going on here:
8675 	 *
8676 	 * 1) We need to reserve space to update our inode.
8677 	 *
8678 	 * 2) We need to have something to cache all the space that is going to
8679 	 * be free'd up by the truncate operation, but also have some slack
8680 	 * space reserved in case it uses space during the truncate (thank you
8681 	 * very much snapshotting).
8682 	 *
8683 	 * And we need these to be separate.  The fact is we can use a lot of
8684 	 * space doing the truncate, and we have no earthly idea how much space
8685 	 * we will use, so we need the truncate reservation to be separate so it
8686 	 * doesn't end up using space reserved for updating the inode.  We also
8687 	 * need to be able to stop the transaction and start a new one, which
8688 	 * means we need to be able to update the inode several times, and we
8689 	 * have no idea of knowing how many times that will be, so we can't just
8690 	 * reserve 1 item for the entirety of the operation, so that has to be
8691 	 * done separately as well.
8692 	 *
8693 	 * So that leaves us with
8694 	 *
8695 	 * 1) rsv - for the truncate reservation, which we will steal from the
8696 	 * transaction reservation.
8697 	 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
8698 	 * updating the inode.
8699 	 */
8700 	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
8701 	if (!rsv)
8702 		return -ENOMEM;
8703 	rsv->size = min_size;
8704 	rsv->failfast = 1;
8705 
8706 	/*
8707 	 * 1 for the truncate slack space
8708 	 * 1 for updating the inode.
8709 	 */
8710 	trans = btrfs_start_transaction(root, 2);
8711 	if (IS_ERR(trans)) {
8712 		ret = PTR_ERR(trans);
8713 		goto out;
8714 	}
8715 
8716 	/* Migrate the slack space for the truncate to our reserve */
8717 	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
8718 				      min_size, false);
8719 	BUG_ON(ret);
8720 
8721 	/*
8722 	 * So if we truncate and then write and fsync we normally would just
8723 	 * write the extents that changed, which is a problem if we need to
8724 	 * first truncate that entire inode.  So set this flag so we write out
8725 	 * all of the extents in the inode to the sync log so we're completely
8726 	 * safe.
8727 	 */
8728 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
8729 	trans->block_rsv = rsv;
8730 
8731 	while (1) {
8732 		ret = btrfs_truncate_inode_items(trans, root, BTRFS_I(inode),
8733 						 inode->i_size,
8734 						 BTRFS_EXTENT_DATA_KEY);
8735 		trans->block_rsv = &fs_info->trans_block_rsv;
8736 		if (ret != -ENOSPC && ret != -EAGAIN)
8737 			break;
8738 
8739 		ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
8740 		if (ret)
8741 			break;
8742 
8743 		btrfs_end_transaction(trans);
8744 		btrfs_btree_balance_dirty(fs_info);
8745 
8746 		trans = btrfs_start_transaction(root, 2);
8747 		if (IS_ERR(trans)) {
8748 			ret = PTR_ERR(trans);
8749 			trans = NULL;
8750 			break;
8751 		}
8752 
8753 		btrfs_block_rsv_release(fs_info, rsv, -1, NULL);
8754 		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
8755 					      rsv, min_size, false);
8756 		BUG_ON(ret);	/* shouldn't happen */
8757 		trans->block_rsv = rsv;
8758 	}
8759 
8760 	/*
8761 	 * We can't call btrfs_truncate_block inside a trans handle as we could
8762 	 * deadlock with freeze, if we got NEED_TRUNCATE_BLOCK then we know
8763 	 * we've truncated everything except the last little bit, and can do
8764 	 * btrfs_truncate_block and then update the disk_i_size.
8765 	 */
8766 	if (ret == NEED_TRUNCATE_BLOCK) {
8767 		btrfs_end_transaction(trans);
8768 		btrfs_btree_balance_dirty(fs_info);
8769 
8770 		ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
8771 		if (ret)
8772 			goto out;
8773 		trans = btrfs_start_transaction(root, 1);
8774 		if (IS_ERR(trans)) {
8775 			ret = PTR_ERR(trans);
8776 			goto out;
8777 		}
8778 		btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
8779 	}
8780 
8781 	if (trans) {
8782 		int ret2;
8783 
8784 		trans->block_rsv = &fs_info->trans_block_rsv;
8785 		ret2 = btrfs_update_inode(trans, root, BTRFS_I(inode));
8786 		if (ret2 && !ret)
8787 			ret = ret2;
8788 
8789 		ret2 = btrfs_end_transaction(trans);
8790 		if (ret2 && !ret)
8791 			ret = ret2;
8792 		btrfs_btree_balance_dirty(fs_info);
8793 	}
8794 out:
8795 	btrfs_free_block_rsv(fs_info, rsv);
8796 
8797 	return ret;
8798 }
8799 
8800 /*
8801  * create a new subvolume directory/inode (helper for the ioctl).
8802  */
8803 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
8804 			     struct btrfs_root *new_root,
8805 			     struct btrfs_root *parent_root)
8806 {
8807 	struct inode *inode;
8808 	int err;
8809 	u64 index = 0;
8810 	u64 ino;
8811 
8812 	err = btrfs_get_free_objectid(new_root, &ino);
8813 	if (err < 0)
8814 		return err;
8815 
8816 	inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, ino, ino,
8817 				S_IFDIR | (~current_umask() & S_IRWXUGO),
8818 				&index);
8819 	if (IS_ERR(inode))
8820 		return PTR_ERR(inode);
8821 	inode->i_op = &btrfs_dir_inode_operations;
8822 	inode->i_fop = &btrfs_dir_file_operations;
8823 
8824 	set_nlink(inode, 1);
8825 	btrfs_i_size_write(BTRFS_I(inode), 0);
8826 	unlock_new_inode(inode);
8827 
8828 	err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
8829 	if (err)
8830 		btrfs_err(new_root->fs_info,
8831 			  "error inheriting subvolume %llu properties: %d",
8832 			  new_root->root_key.objectid, err);
8833 
8834 	err = btrfs_update_inode(trans, new_root, BTRFS_I(inode));
8835 
8836 	iput(inode);
8837 	return err;
8838 }
8839 
8840 struct inode *btrfs_alloc_inode(struct super_block *sb)
8841 {
8842 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
8843 	struct btrfs_inode *ei;
8844 	struct inode *inode;
8845 
8846 	ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_KERNEL);
8847 	if (!ei)
8848 		return NULL;
8849 
8850 	ei->root = NULL;
8851 	ei->generation = 0;
8852 	ei->last_trans = 0;
8853 	ei->last_sub_trans = 0;
8854 	ei->logged_trans = 0;
8855 	ei->delalloc_bytes = 0;
8856 	ei->new_delalloc_bytes = 0;
8857 	ei->defrag_bytes = 0;
8858 	ei->disk_i_size = 0;
8859 	ei->flags = 0;
8860 	ei->csum_bytes = 0;
8861 	ei->index_cnt = (u64)-1;
8862 	ei->dir_index = 0;
8863 	ei->last_unlink_trans = 0;
8864 	ei->last_reflink_trans = 0;
8865 	ei->last_log_commit = 0;
8866 
8867 	spin_lock_init(&ei->lock);
8868 	ei->outstanding_extents = 0;
8869 	if (sb->s_magic != BTRFS_TEST_MAGIC)
8870 		btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv,
8871 					      BTRFS_BLOCK_RSV_DELALLOC);
8872 	ei->runtime_flags = 0;
8873 	ei->prop_compress = BTRFS_COMPRESS_NONE;
8874 	ei->defrag_compress = BTRFS_COMPRESS_NONE;
8875 
8876 	ei->delayed_node = NULL;
8877 
8878 	ei->i_otime.tv_sec = 0;
8879 	ei->i_otime.tv_nsec = 0;
8880 
8881 	inode = &ei->vfs_inode;
8882 	extent_map_tree_init(&ei->extent_tree);
8883 	extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO, inode);
8884 	extent_io_tree_init(fs_info, &ei->io_failure_tree,
8885 			    IO_TREE_INODE_IO_FAILURE, inode);
8886 	extent_io_tree_init(fs_info, &ei->file_extent_tree,
8887 			    IO_TREE_INODE_FILE_EXTENT, inode);
8888 	ei->io_tree.track_uptodate = true;
8889 	ei->io_failure_tree.track_uptodate = true;
8890 	atomic_set(&ei->sync_writers, 0);
8891 	mutex_init(&ei->log_mutex);
8892 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
8893 	INIT_LIST_HEAD(&ei->delalloc_inodes);
8894 	INIT_LIST_HEAD(&ei->delayed_iput);
8895 	RB_CLEAR_NODE(&ei->rb_node);
8896 	init_rwsem(&ei->i_mmap_lock);
8897 
8898 	return inode;
8899 }
8900 
8901 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
8902 void btrfs_test_destroy_inode(struct inode *inode)
8903 {
8904 	btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
8905 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8906 }
8907 #endif
8908 
8909 void btrfs_free_inode(struct inode *inode)
8910 {
8911 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8912 }
8913 
8914 void btrfs_destroy_inode(struct inode *vfs_inode)
8915 {
8916 	struct btrfs_ordered_extent *ordered;
8917 	struct btrfs_inode *inode = BTRFS_I(vfs_inode);
8918 	struct btrfs_root *root = inode->root;
8919 
8920 	WARN_ON(!hlist_empty(&vfs_inode->i_dentry));
8921 	WARN_ON(vfs_inode->i_data.nrpages);
8922 	WARN_ON(inode->block_rsv.reserved);
8923 	WARN_ON(inode->block_rsv.size);
8924 	WARN_ON(inode->outstanding_extents);
8925 	WARN_ON(inode->delalloc_bytes);
8926 	WARN_ON(inode->new_delalloc_bytes);
8927 	WARN_ON(inode->csum_bytes);
8928 	WARN_ON(inode->defrag_bytes);
8929 
8930 	/*
8931 	 * This can happen where we create an inode, but somebody else also
8932 	 * created the same inode and we need to destroy the one we already
8933 	 * created.
8934 	 */
8935 	if (!root)
8936 		return;
8937 
8938 	while (1) {
8939 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
8940 		if (!ordered)
8941 			break;
8942 		else {
8943 			btrfs_err(root->fs_info,
8944 				  "found ordered extent %llu %llu on inode cleanup",
8945 				  ordered->file_offset, ordered->num_bytes);
8946 			btrfs_remove_ordered_extent(inode, ordered);
8947 			btrfs_put_ordered_extent(ordered);
8948 			btrfs_put_ordered_extent(ordered);
8949 		}
8950 	}
8951 	btrfs_qgroup_check_reserved_leak(inode);
8952 	inode_tree_del(inode);
8953 	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
8954 	btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1);
8955 	btrfs_put_root(inode->root);
8956 }
8957 
8958 int btrfs_drop_inode(struct inode *inode)
8959 {
8960 	struct btrfs_root *root = BTRFS_I(inode)->root;
8961 
8962 	if (root == NULL)
8963 		return 1;
8964 
8965 	/* the snap/subvol tree is on deleting */
8966 	if (btrfs_root_refs(&root->root_item) == 0)
8967 		return 1;
8968 	else
8969 		return generic_drop_inode(inode);
8970 }
8971 
8972 static void init_once(void *foo)
8973 {
8974 	struct btrfs_inode *ei = (struct btrfs_inode *) foo;
8975 
8976 	inode_init_once(&ei->vfs_inode);
8977 }
8978 
8979 void __cold btrfs_destroy_cachep(void)
8980 {
8981 	/*
8982 	 * Make sure all delayed rcu free inodes are flushed before we
8983 	 * destroy cache.
8984 	 */
8985 	rcu_barrier();
8986 	kmem_cache_destroy(btrfs_inode_cachep);
8987 	kmem_cache_destroy(btrfs_trans_handle_cachep);
8988 	kmem_cache_destroy(btrfs_path_cachep);
8989 	kmem_cache_destroy(btrfs_free_space_cachep);
8990 	kmem_cache_destroy(btrfs_free_space_bitmap_cachep);
8991 }
8992 
8993 int __init btrfs_init_cachep(void)
8994 {
8995 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
8996 			sizeof(struct btrfs_inode), 0,
8997 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT,
8998 			init_once);
8999 	if (!btrfs_inode_cachep)
9000 		goto fail;
9001 
9002 	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
9003 			sizeof(struct btrfs_trans_handle), 0,
9004 			SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
9005 	if (!btrfs_trans_handle_cachep)
9006 		goto fail;
9007 
9008 	btrfs_path_cachep = kmem_cache_create("btrfs_path",
9009 			sizeof(struct btrfs_path), 0,
9010 			SLAB_MEM_SPREAD, NULL);
9011 	if (!btrfs_path_cachep)
9012 		goto fail;
9013 
9014 	btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
9015 			sizeof(struct btrfs_free_space), 0,
9016 			SLAB_MEM_SPREAD, NULL);
9017 	if (!btrfs_free_space_cachep)
9018 		goto fail;
9019 
9020 	btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
9021 							PAGE_SIZE, PAGE_SIZE,
9022 							SLAB_MEM_SPREAD, NULL);
9023 	if (!btrfs_free_space_bitmap_cachep)
9024 		goto fail;
9025 
9026 	return 0;
9027 fail:
9028 	btrfs_destroy_cachep();
9029 	return -ENOMEM;
9030 }
9031 
9032 static int btrfs_getattr(struct user_namespace *mnt_userns,
9033 			 const struct path *path, struct kstat *stat,
9034 			 u32 request_mask, unsigned int flags)
9035 {
9036 	u64 delalloc_bytes;
9037 	u64 inode_bytes;
9038 	struct inode *inode = d_inode(path->dentry);
9039 	u32 blocksize = inode->i_sb->s_blocksize;
9040 	u32 bi_flags = BTRFS_I(inode)->flags;
9041 
9042 	stat->result_mask |= STATX_BTIME;
9043 	stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec;
9044 	stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec;
9045 	if (bi_flags & BTRFS_INODE_APPEND)
9046 		stat->attributes |= STATX_ATTR_APPEND;
9047 	if (bi_flags & BTRFS_INODE_COMPRESS)
9048 		stat->attributes |= STATX_ATTR_COMPRESSED;
9049 	if (bi_flags & BTRFS_INODE_IMMUTABLE)
9050 		stat->attributes |= STATX_ATTR_IMMUTABLE;
9051 	if (bi_flags & BTRFS_INODE_NODUMP)
9052 		stat->attributes |= STATX_ATTR_NODUMP;
9053 
9054 	stat->attributes_mask |= (STATX_ATTR_APPEND |
9055 				  STATX_ATTR_COMPRESSED |
9056 				  STATX_ATTR_IMMUTABLE |
9057 				  STATX_ATTR_NODUMP);
9058 
9059 	generic_fillattr(&init_user_ns, inode, stat);
9060 	stat->dev = BTRFS_I(inode)->root->anon_dev;
9061 
9062 	spin_lock(&BTRFS_I(inode)->lock);
9063 	delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
9064 	inode_bytes = inode_get_bytes(inode);
9065 	spin_unlock(&BTRFS_I(inode)->lock);
9066 	stat->blocks = (ALIGN(inode_bytes, blocksize) +
9067 			ALIGN(delalloc_bytes, blocksize)) >> 9;
9068 	return 0;
9069 }
9070 
9071 static int btrfs_rename_exchange(struct inode *old_dir,
9072 			      struct dentry *old_dentry,
9073 			      struct inode *new_dir,
9074 			      struct dentry *new_dentry)
9075 {
9076 	struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
9077 	struct btrfs_trans_handle *trans;
9078 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
9079 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9080 	struct inode *new_inode = new_dentry->d_inode;
9081 	struct inode *old_inode = old_dentry->d_inode;
9082 	struct timespec64 ctime = current_time(old_inode);
9083 	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
9084 	u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
9085 	u64 old_idx = 0;
9086 	u64 new_idx = 0;
9087 	int ret;
9088 	int ret2;
9089 	bool root_log_pinned = false;
9090 	bool dest_log_pinned = false;
9091 	bool need_abort = false;
9092 
9093 	/* we only allow rename subvolume link between subvolumes */
9094 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9095 		return -EXDEV;
9096 
9097 	/* close the race window with snapshot create/destroy ioctl */
9098 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
9099 	    new_ino == BTRFS_FIRST_FREE_OBJECTID)
9100 		down_read(&fs_info->subvol_sem);
9101 
9102 	/*
9103 	 * We want to reserve the absolute worst case amount of items.  So if
9104 	 * both inodes are subvols and we need to unlink them then that would
9105 	 * require 4 item modifications, but if they are both normal inodes it
9106 	 * would require 5 item modifications, so we'll assume their normal
9107 	 * inodes.  So 5 * 2 is 10, plus 2 for the new links, so 12 total items
9108 	 * should cover the worst case number of items we'll modify.
9109 	 */
9110 	trans = btrfs_start_transaction(root, 12);
9111 	if (IS_ERR(trans)) {
9112 		ret = PTR_ERR(trans);
9113 		goto out_notrans;
9114 	}
9115 
9116 	if (dest != root) {
9117 		ret = btrfs_record_root_in_trans(trans, dest);
9118 		if (ret)
9119 			goto out_fail;
9120 	}
9121 
9122 	/*
9123 	 * We need to find a free sequence number both in the source and
9124 	 * in the destination directory for the exchange.
9125 	 */
9126 	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx);
9127 	if (ret)
9128 		goto out_fail;
9129 	ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx);
9130 	if (ret)
9131 		goto out_fail;
9132 
9133 	BTRFS_I(old_inode)->dir_index = 0ULL;
9134 	BTRFS_I(new_inode)->dir_index = 0ULL;
9135 
9136 	/* Reference for the source. */
9137 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9138 		/* force full log commit if subvolume involved. */
9139 		btrfs_set_log_full_commit(trans);
9140 	} else {
9141 		btrfs_pin_log_trans(root);
9142 		root_log_pinned = true;
9143 		ret = btrfs_insert_inode_ref(trans, dest,
9144 					     new_dentry->d_name.name,
9145 					     new_dentry->d_name.len,
9146 					     old_ino,
9147 					     btrfs_ino(BTRFS_I(new_dir)),
9148 					     old_idx);
9149 		if (ret)
9150 			goto out_fail;
9151 		need_abort = true;
9152 	}
9153 
9154 	/* And now for the dest. */
9155 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
9156 		/* force full log commit if subvolume involved. */
9157 		btrfs_set_log_full_commit(trans);
9158 	} else {
9159 		btrfs_pin_log_trans(dest);
9160 		dest_log_pinned = true;
9161 		ret = btrfs_insert_inode_ref(trans, root,
9162 					     old_dentry->d_name.name,
9163 					     old_dentry->d_name.len,
9164 					     new_ino,
9165 					     btrfs_ino(BTRFS_I(old_dir)),
9166 					     new_idx);
9167 		if (ret) {
9168 			if (need_abort)
9169 				btrfs_abort_transaction(trans, ret);
9170 			goto out_fail;
9171 		}
9172 	}
9173 
9174 	/* Update inode version and ctime/mtime. */
9175 	inode_inc_iversion(old_dir);
9176 	inode_inc_iversion(new_dir);
9177 	inode_inc_iversion(old_inode);
9178 	inode_inc_iversion(new_inode);
9179 	old_dir->i_ctime = old_dir->i_mtime = ctime;
9180 	new_dir->i_ctime = new_dir->i_mtime = ctime;
9181 	old_inode->i_ctime = ctime;
9182 	new_inode->i_ctime = ctime;
9183 
9184 	if (old_dentry->d_parent != new_dentry->d_parent) {
9185 		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
9186 				BTRFS_I(old_inode), 1);
9187 		btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
9188 				BTRFS_I(new_inode), 1);
9189 	}
9190 
9191 	/* src is a subvolume */
9192 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9193 		ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
9194 	} else { /* src is an inode */
9195 		ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
9196 					   BTRFS_I(old_dentry->d_inode),
9197 					   old_dentry->d_name.name,
9198 					   old_dentry->d_name.len);
9199 		if (!ret)
9200 			ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
9201 	}
9202 	if (ret) {
9203 		btrfs_abort_transaction(trans, ret);
9204 		goto out_fail;
9205 	}
9206 
9207 	/* dest is a subvolume */
9208 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
9209 		ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
9210 	} else { /* dest is an inode */
9211 		ret = __btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir),
9212 					   BTRFS_I(new_dentry->d_inode),
9213 					   new_dentry->d_name.name,
9214 					   new_dentry->d_name.len);
9215 		if (!ret)
9216 			ret = btrfs_update_inode(trans, dest, BTRFS_I(new_inode));
9217 	}
9218 	if (ret) {
9219 		btrfs_abort_transaction(trans, ret);
9220 		goto out_fail;
9221 	}
9222 
9223 	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
9224 			     new_dentry->d_name.name,
9225 			     new_dentry->d_name.len, 0, old_idx);
9226 	if (ret) {
9227 		btrfs_abort_transaction(trans, ret);
9228 		goto out_fail;
9229 	}
9230 
9231 	ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
9232 			     old_dentry->d_name.name,
9233 			     old_dentry->d_name.len, 0, new_idx);
9234 	if (ret) {
9235 		btrfs_abort_transaction(trans, ret);
9236 		goto out_fail;
9237 	}
9238 
9239 	if (old_inode->i_nlink == 1)
9240 		BTRFS_I(old_inode)->dir_index = old_idx;
9241 	if (new_inode->i_nlink == 1)
9242 		BTRFS_I(new_inode)->dir_index = new_idx;
9243 
9244 	if (root_log_pinned) {
9245 		btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
9246 				   new_dentry->d_parent);
9247 		btrfs_end_log_trans(root);
9248 		root_log_pinned = false;
9249 	}
9250 	if (dest_log_pinned) {
9251 		btrfs_log_new_name(trans, BTRFS_I(new_inode), BTRFS_I(new_dir),
9252 				   old_dentry->d_parent);
9253 		btrfs_end_log_trans(dest);
9254 		dest_log_pinned = false;
9255 	}
9256 out_fail:
9257 	/*
9258 	 * If we have pinned a log and an error happened, we unpin tasks
9259 	 * trying to sync the log and force them to fallback to a transaction
9260 	 * commit if the log currently contains any of the inodes involved in
9261 	 * this rename operation (to ensure we do not persist a log with an
9262 	 * inconsistent state for any of these inodes or leading to any
9263 	 * inconsistencies when replayed). If the transaction was aborted, the
9264 	 * abortion reason is propagated to userspace when attempting to commit
9265 	 * the transaction. If the log does not contain any of these inodes, we
9266 	 * allow the tasks to sync it.
9267 	 */
9268 	if (ret && (root_log_pinned || dest_log_pinned)) {
9269 		if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) ||
9270 		    btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) ||
9271 		    btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) ||
9272 		    (new_inode &&
9273 		     btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation)))
9274 			btrfs_set_log_full_commit(trans);
9275 
9276 		if (root_log_pinned) {
9277 			btrfs_end_log_trans(root);
9278 			root_log_pinned = false;
9279 		}
9280 		if (dest_log_pinned) {
9281 			btrfs_end_log_trans(dest);
9282 			dest_log_pinned = false;
9283 		}
9284 	}
9285 	ret2 = btrfs_end_transaction(trans);
9286 	ret = ret ? ret : ret2;
9287 out_notrans:
9288 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
9289 	    old_ino == BTRFS_FIRST_FREE_OBJECTID)
9290 		up_read(&fs_info->subvol_sem);
9291 
9292 	return ret;
9293 }
9294 
9295 static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans,
9296 				     struct btrfs_root *root,
9297 				     struct inode *dir,
9298 				     struct dentry *dentry)
9299 {
9300 	int ret;
9301 	struct inode *inode;
9302 	u64 objectid;
9303 	u64 index;
9304 
9305 	ret = btrfs_get_free_objectid(root, &objectid);
9306 	if (ret)
9307 		return ret;
9308 
9309 	inode = btrfs_new_inode(trans, root, dir,
9310 				dentry->d_name.name,
9311 				dentry->d_name.len,
9312 				btrfs_ino(BTRFS_I(dir)),
9313 				objectid,
9314 				S_IFCHR | WHITEOUT_MODE,
9315 				&index);
9316 
9317 	if (IS_ERR(inode)) {
9318 		ret = PTR_ERR(inode);
9319 		return ret;
9320 	}
9321 
9322 	inode->i_op = &btrfs_special_inode_operations;
9323 	init_special_inode(inode, inode->i_mode,
9324 		WHITEOUT_DEV);
9325 
9326 	ret = btrfs_init_inode_security(trans, inode, dir,
9327 				&dentry->d_name);
9328 	if (ret)
9329 		goto out;
9330 
9331 	ret = btrfs_add_nondir(trans, BTRFS_I(dir), dentry,
9332 				BTRFS_I(inode), 0, index);
9333 	if (ret)
9334 		goto out;
9335 
9336 	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
9337 out:
9338 	unlock_new_inode(inode);
9339 	if (ret)
9340 		inode_dec_link_count(inode);
9341 	iput(inode);
9342 
9343 	return ret;
9344 }
9345 
9346 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
9347 			   struct inode *new_dir, struct dentry *new_dentry,
9348 			   unsigned int flags)
9349 {
9350 	struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
9351 	struct btrfs_trans_handle *trans;
9352 	unsigned int trans_num_items;
9353 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
9354 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9355 	struct inode *new_inode = d_inode(new_dentry);
9356 	struct inode *old_inode = d_inode(old_dentry);
9357 	u64 index = 0;
9358 	int ret;
9359 	int ret2;
9360 	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
9361 	bool log_pinned = false;
9362 
9363 	if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
9364 		return -EPERM;
9365 
9366 	/* we only allow rename subvolume link between subvolumes */
9367 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9368 		return -EXDEV;
9369 
9370 	if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
9371 	    (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID))
9372 		return -ENOTEMPTY;
9373 
9374 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
9375 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
9376 		return -ENOTEMPTY;
9377 
9378 
9379 	/* check for collisions, even if the  name isn't there */
9380 	ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
9381 			     new_dentry->d_name.name,
9382 			     new_dentry->d_name.len);
9383 
9384 	if (ret) {
9385 		if (ret == -EEXIST) {
9386 			/* we shouldn't get
9387 			 * eexist without a new_inode */
9388 			if (WARN_ON(!new_inode)) {
9389 				return ret;
9390 			}
9391 		} else {
9392 			/* maybe -EOVERFLOW */
9393 			return ret;
9394 		}
9395 	}
9396 	ret = 0;
9397 
9398 	/*
9399 	 * we're using rename to replace one file with another.  Start IO on it
9400 	 * now so  we don't add too much work to the end of the transaction
9401 	 */
9402 	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
9403 		filemap_flush(old_inode->i_mapping);
9404 
9405 	/* close the racy window with snapshot create/destroy ioctl */
9406 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9407 		down_read(&fs_info->subvol_sem);
9408 	/*
9409 	 * We want to reserve the absolute worst case amount of items.  So if
9410 	 * both inodes are subvols and we need to unlink them then that would
9411 	 * require 4 item modifications, but if they are both normal inodes it
9412 	 * would require 5 item modifications, so we'll assume they are normal
9413 	 * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
9414 	 * should cover the worst case number of items we'll modify.
9415 	 * If our rename has the whiteout flag, we need more 5 units for the
9416 	 * new inode (1 inode item, 1 inode ref, 2 dir items and 1 xattr item
9417 	 * when selinux is enabled).
9418 	 */
9419 	trans_num_items = 11;
9420 	if (flags & RENAME_WHITEOUT)
9421 		trans_num_items += 5;
9422 	trans = btrfs_start_transaction(root, trans_num_items);
9423 	if (IS_ERR(trans)) {
9424 		ret = PTR_ERR(trans);
9425 		goto out_notrans;
9426 	}
9427 
9428 	if (dest != root) {
9429 		ret = btrfs_record_root_in_trans(trans, dest);
9430 		if (ret)
9431 			goto out_fail;
9432 	}
9433 
9434 	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index);
9435 	if (ret)
9436 		goto out_fail;
9437 
9438 	BTRFS_I(old_inode)->dir_index = 0ULL;
9439 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9440 		/* force full log commit if subvolume involved. */
9441 		btrfs_set_log_full_commit(trans);
9442 	} else {
9443 		btrfs_pin_log_trans(root);
9444 		log_pinned = true;
9445 		ret = btrfs_insert_inode_ref(trans, dest,
9446 					     new_dentry->d_name.name,
9447 					     new_dentry->d_name.len,
9448 					     old_ino,
9449 					     btrfs_ino(BTRFS_I(new_dir)), index);
9450 		if (ret)
9451 			goto out_fail;
9452 	}
9453 
9454 	inode_inc_iversion(old_dir);
9455 	inode_inc_iversion(new_dir);
9456 	inode_inc_iversion(old_inode);
9457 	old_dir->i_ctime = old_dir->i_mtime =
9458 	new_dir->i_ctime = new_dir->i_mtime =
9459 	old_inode->i_ctime = current_time(old_dir);
9460 
9461 	if (old_dentry->d_parent != new_dentry->d_parent)
9462 		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
9463 				BTRFS_I(old_inode), 1);
9464 
9465 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9466 		ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
9467 	} else {
9468 		ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
9469 					BTRFS_I(d_inode(old_dentry)),
9470 					old_dentry->d_name.name,
9471 					old_dentry->d_name.len);
9472 		if (!ret)
9473 			ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
9474 	}
9475 	if (ret) {
9476 		btrfs_abort_transaction(trans, ret);
9477 		goto out_fail;
9478 	}
9479 
9480 	if (new_inode) {
9481 		inode_inc_iversion(new_inode);
9482 		new_inode->i_ctime = current_time(new_inode);
9483 		if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
9484 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
9485 			ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
9486 			BUG_ON(new_inode->i_nlink == 0);
9487 		} else {
9488 			ret = btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir),
9489 						 BTRFS_I(d_inode(new_dentry)),
9490 						 new_dentry->d_name.name,
9491 						 new_dentry->d_name.len);
9492 		}
9493 		if (!ret && new_inode->i_nlink == 0)
9494 			ret = btrfs_orphan_add(trans,
9495 					BTRFS_I(d_inode(new_dentry)));
9496 		if (ret) {
9497 			btrfs_abort_transaction(trans, ret);
9498 			goto out_fail;
9499 		}
9500 	}
9501 
9502 	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
9503 			     new_dentry->d_name.name,
9504 			     new_dentry->d_name.len, 0, index);
9505 	if (ret) {
9506 		btrfs_abort_transaction(trans, ret);
9507 		goto out_fail;
9508 	}
9509 
9510 	if (old_inode->i_nlink == 1)
9511 		BTRFS_I(old_inode)->dir_index = index;
9512 
9513 	if (log_pinned) {
9514 		btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
9515 				   new_dentry->d_parent);
9516 		btrfs_end_log_trans(root);
9517 		log_pinned = false;
9518 	}
9519 
9520 	if (flags & RENAME_WHITEOUT) {
9521 		ret = btrfs_whiteout_for_rename(trans, root, old_dir,
9522 						old_dentry);
9523 
9524 		if (ret) {
9525 			btrfs_abort_transaction(trans, ret);
9526 			goto out_fail;
9527 		}
9528 	}
9529 out_fail:
9530 	/*
9531 	 * If we have pinned the log and an error happened, we unpin tasks
9532 	 * trying to sync the log and force them to fallback to a transaction
9533 	 * commit if the log currently contains any of the inodes involved in
9534 	 * this rename operation (to ensure we do not persist a log with an
9535 	 * inconsistent state for any of these inodes or leading to any
9536 	 * inconsistencies when replayed). If the transaction was aborted, the
9537 	 * abortion reason is propagated to userspace when attempting to commit
9538 	 * the transaction. If the log does not contain any of these inodes, we
9539 	 * allow the tasks to sync it.
9540 	 */
9541 	if (ret && log_pinned) {
9542 		if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) ||
9543 		    btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) ||
9544 		    btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) ||
9545 		    (new_inode &&
9546 		     btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation)))
9547 			btrfs_set_log_full_commit(trans);
9548 
9549 		btrfs_end_log_trans(root);
9550 		log_pinned = false;
9551 	}
9552 	ret2 = btrfs_end_transaction(trans);
9553 	ret = ret ? ret : ret2;
9554 out_notrans:
9555 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9556 		up_read(&fs_info->subvol_sem);
9557 
9558 	return ret;
9559 }
9560 
9561 static int btrfs_rename2(struct user_namespace *mnt_userns, struct inode *old_dir,
9562 			 struct dentry *old_dentry, struct inode *new_dir,
9563 			 struct dentry *new_dentry, unsigned int flags)
9564 {
9565 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
9566 		return -EINVAL;
9567 
9568 	if (flags & RENAME_EXCHANGE)
9569 		return btrfs_rename_exchange(old_dir, old_dentry, new_dir,
9570 					  new_dentry);
9571 
9572 	return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
9573 }
9574 
9575 struct btrfs_delalloc_work {
9576 	struct inode *inode;
9577 	struct completion completion;
9578 	struct list_head list;
9579 	struct btrfs_work work;
9580 };
9581 
9582 static void btrfs_run_delalloc_work(struct btrfs_work *work)
9583 {
9584 	struct btrfs_delalloc_work *delalloc_work;
9585 	struct inode *inode;
9586 
9587 	delalloc_work = container_of(work, struct btrfs_delalloc_work,
9588 				     work);
9589 	inode = delalloc_work->inode;
9590 	filemap_flush(inode->i_mapping);
9591 	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
9592 				&BTRFS_I(inode)->runtime_flags))
9593 		filemap_flush(inode->i_mapping);
9594 
9595 	iput(inode);
9596 	complete(&delalloc_work->completion);
9597 }
9598 
9599 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode)
9600 {
9601 	struct btrfs_delalloc_work *work;
9602 
9603 	work = kmalloc(sizeof(*work), GFP_NOFS);
9604 	if (!work)
9605 		return NULL;
9606 
9607 	init_completion(&work->completion);
9608 	INIT_LIST_HEAD(&work->list);
9609 	work->inode = inode;
9610 	btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL);
9611 
9612 	return work;
9613 }
9614 
9615 /*
9616  * some fairly slow code that needs optimization. This walks the list
9617  * of all the inodes with pending delalloc and forces them to disk.
9618  */
9619 static int start_delalloc_inodes(struct btrfs_root *root,
9620 				 struct writeback_control *wbc, bool snapshot,
9621 				 bool in_reclaim_context)
9622 {
9623 	struct btrfs_inode *binode;
9624 	struct inode *inode;
9625 	struct btrfs_delalloc_work *work, *next;
9626 	struct list_head works;
9627 	struct list_head splice;
9628 	int ret = 0;
9629 	bool full_flush = wbc->nr_to_write == LONG_MAX;
9630 
9631 	INIT_LIST_HEAD(&works);
9632 	INIT_LIST_HEAD(&splice);
9633 
9634 	mutex_lock(&root->delalloc_mutex);
9635 	spin_lock(&root->delalloc_lock);
9636 	list_splice_init(&root->delalloc_inodes, &splice);
9637 	while (!list_empty(&splice)) {
9638 		binode = list_entry(splice.next, struct btrfs_inode,
9639 				    delalloc_inodes);
9640 
9641 		list_move_tail(&binode->delalloc_inodes,
9642 			       &root->delalloc_inodes);
9643 
9644 		if (in_reclaim_context &&
9645 		    test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags))
9646 			continue;
9647 
9648 		inode = igrab(&binode->vfs_inode);
9649 		if (!inode) {
9650 			cond_resched_lock(&root->delalloc_lock);
9651 			continue;
9652 		}
9653 		spin_unlock(&root->delalloc_lock);
9654 
9655 		if (snapshot)
9656 			set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
9657 				&binode->runtime_flags);
9658 		if (full_flush) {
9659 			work = btrfs_alloc_delalloc_work(inode);
9660 			if (!work) {
9661 				iput(inode);
9662 				ret = -ENOMEM;
9663 				goto out;
9664 			}
9665 			list_add_tail(&work->list, &works);
9666 			btrfs_queue_work(root->fs_info->flush_workers,
9667 					 &work->work);
9668 		} else {
9669 			ret = sync_inode(inode, wbc);
9670 			if (!ret &&
9671 			    test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
9672 				     &BTRFS_I(inode)->runtime_flags))
9673 				ret = sync_inode(inode, wbc);
9674 			btrfs_add_delayed_iput(inode);
9675 			if (ret || wbc->nr_to_write <= 0)
9676 				goto out;
9677 		}
9678 		cond_resched();
9679 		spin_lock(&root->delalloc_lock);
9680 	}
9681 	spin_unlock(&root->delalloc_lock);
9682 
9683 out:
9684 	list_for_each_entry_safe(work, next, &works, list) {
9685 		list_del_init(&work->list);
9686 		wait_for_completion(&work->completion);
9687 		kfree(work);
9688 	}
9689 
9690 	if (!list_empty(&splice)) {
9691 		spin_lock(&root->delalloc_lock);
9692 		list_splice_tail(&splice, &root->delalloc_inodes);
9693 		spin_unlock(&root->delalloc_lock);
9694 	}
9695 	mutex_unlock(&root->delalloc_mutex);
9696 	return ret;
9697 }
9698 
9699 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
9700 {
9701 	struct writeback_control wbc = {
9702 		.nr_to_write = LONG_MAX,
9703 		.sync_mode = WB_SYNC_NONE,
9704 		.range_start = 0,
9705 		.range_end = LLONG_MAX,
9706 	};
9707 	struct btrfs_fs_info *fs_info = root->fs_info;
9708 
9709 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
9710 		return -EROFS;
9711 
9712 	return start_delalloc_inodes(root, &wbc, true, in_reclaim_context);
9713 }
9714 
9715 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
9716 			       bool in_reclaim_context)
9717 {
9718 	struct writeback_control wbc = {
9719 		.nr_to_write = nr,
9720 		.sync_mode = WB_SYNC_NONE,
9721 		.range_start = 0,
9722 		.range_end = LLONG_MAX,
9723 	};
9724 	struct btrfs_root *root;
9725 	struct list_head splice;
9726 	int ret;
9727 
9728 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
9729 		return -EROFS;
9730 
9731 	INIT_LIST_HEAD(&splice);
9732 
9733 	mutex_lock(&fs_info->delalloc_root_mutex);
9734 	spin_lock(&fs_info->delalloc_root_lock);
9735 	list_splice_init(&fs_info->delalloc_roots, &splice);
9736 	while (!list_empty(&splice)) {
9737 		/*
9738 		 * Reset nr_to_write here so we know that we're doing a full
9739 		 * flush.
9740 		 */
9741 		if (nr == LONG_MAX)
9742 			wbc.nr_to_write = LONG_MAX;
9743 
9744 		root = list_first_entry(&splice, struct btrfs_root,
9745 					delalloc_root);
9746 		root = btrfs_grab_root(root);
9747 		BUG_ON(!root);
9748 		list_move_tail(&root->delalloc_root,
9749 			       &fs_info->delalloc_roots);
9750 		spin_unlock(&fs_info->delalloc_root_lock);
9751 
9752 		ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context);
9753 		btrfs_put_root(root);
9754 		if (ret < 0 || wbc.nr_to_write <= 0)
9755 			goto out;
9756 		spin_lock(&fs_info->delalloc_root_lock);
9757 	}
9758 	spin_unlock(&fs_info->delalloc_root_lock);
9759 
9760 	ret = 0;
9761 out:
9762 	if (!list_empty(&splice)) {
9763 		spin_lock(&fs_info->delalloc_root_lock);
9764 		list_splice_tail(&splice, &fs_info->delalloc_roots);
9765 		spin_unlock(&fs_info->delalloc_root_lock);
9766 	}
9767 	mutex_unlock(&fs_info->delalloc_root_mutex);
9768 	return ret;
9769 }
9770 
9771 static int btrfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
9772 			 struct dentry *dentry, const char *symname)
9773 {
9774 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
9775 	struct btrfs_trans_handle *trans;
9776 	struct btrfs_root *root = BTRFS_I(dir)->root;
9777 	struct btrfs_path *path;
9778 	struct btrfs_key key;
9779 	struct inode *inode = NULL;
9780 	int err;
9781 	u64 objectid;
9782 	u64 index = 0;
9783 	int name_len;
9784 	int datasize;
9785 	unsigned long ptr;
9786 	struct btrfs_file_extent_item *ei;
9787 	struct extent_buffer *leaf;
9788 
9789 	name_len = strlen(symname);
9790 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
9791 		return -ENAMETOOLONG;
9792 
9793 	/*
9794 	 * 2 items for inode item and ref
9795 	 * 2 items for dir items
9796 	 * 1 item for updating parent inode item
9797 	 * 1 item for the inline extent item
9798 	 * 1 item for xattr if selinux is on
9799 	 */
9800 	trans = btrfs_start_transaction(root, 7);
9801 	if (IS_ERR(trans))
9802 		return PTR_ERR(trans);
9803 
9804 	err = btrfs_get_free_objectid(root, &objectid);
9805 	if (err)
9806 		goto out_unlock;
9807 
9808 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
9809 				dentry->d_name.len, btrfs_ino(BTRFS_I(dir)),
9810 				objectid, S_IFLNK|S_IRWXUGO, &index);
9811 	if (IS_ERR(inode)) {
9812 		err = PTR_ERR(inode);
9813 		inode = NULL;
9814 		goto out_unlock;
9815 	}
9816 
9817 	/*
9818 	* If the active LSM wants to access the inode during
9819 	* d_instantiate it needs these. Smack checks to see
9820 	* if the filesystem supports xattrs by looking at the
9821 	* ops vector.
9822 	*/
9823 	inode->i_fop = &btrfs_file_operations;
9824 	inode->i_op = &btrfs_file_inode_operations;
9825 	inode->i_mapping->a_ops = &btrfs_aops;
9826 
9827 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
9828 	if (err)
9829 		goto out_unlock;
9830 
9831 	path = btrfs_alloc_path();
9832 	if (!path) {
9833 		err = -ENOMEM;
9834 		goto out_unlock;
9835 	}
9836 	key.objectid = btrfs_ino(BTRFS_I(inode));
9837 	key.offset = 0;
9838 	key.type = BTRFS_EXTENT_DATA_KEY;
9839 	datasize = btrfs_file_extent_calc_inline_size(name_len);
9840 	err = btrfs_insert_empty_item(trans, root, path, &key,
9841 				      datasize);
9842 	if (err) {
9843 		btrfs_free_path(path);
9844 		goto out_unlock;
9845 	}
9846 	leaf = path->nodes[0];
9847 	ei = btrfs_item_ptr(leaf, path->slots[0],
9848 			    struct btrfs_file_extent_item);
9849 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
9850 	btrfs_set_file_extent_type(leaf, ei,
9851 				   BTRFS_FILE_EXTENT_INLINE);
9852 	btrfs_set_file_extent_encryption(leaf, ei, 0);
9853 	btrfs_set_file_extent_compression(leaf, ei, 0);
9854 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
9855 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
9856 
9857 	ptr = btrfs_file_extent_inline_start(ei);
9858 	write_extent_buffer(leaf, symname, ptr, name_len);
9859 	btrfs_mark_buffer_dirty(leaf);
9860 	btrfs_free_path(path);
9861 
9862 	inode->i_op = &btrfs_symlink_inode_operations;
9863 	inode_nohighmem(inode);
9864 	inode_set_bytes(inode, name_len);
9865 	btrfs_i_size_write(BTRFS_I(inode), name_len);
9866 	err = btrfs_update_inode(trans, root, BTRFS_I(inode));
9867 	/*
9868 	 * Last step, add directory indexes for our symlink inode. This is the
9869 	 * last step to avoid extra cleanup of these indexes if an error happens
9870 	 * elsewhere above.
9871 	 */
9872 	if (!err)
9873 		err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry,
9874 				BTRFS_I(inode), 0, index);
9875 	if (err)
9876 		goto out_unlock;
9877 
9878 	d_instantiate_new(dentry, inode);
9879 
9880 out_unlock:
9881 	btrfs_end_transaction(trans);
9882 	if (err && inode) {
9883 		inode_dec_link_count(inode);
9884 		discard_new_inode(inode);
9885 	}
9886 	btrfs_btree_balance_dirty(fs_info);
9887 	return err;
9888 }
9889 
9890 static struct btrfs_trans_handle *insert_prealloc_file_extent(
9891 				       struct btrfs_trans_handle *trans_in,
9892 				       struct btrfs_inode *inode,
9893 				       struct btrfs_key *ins,
9894 				       u64 file_offset)
9895 {
9896 	struct btrfs_file_extent_item stack_fi;
9897 	struct btrfs_replace_extent_info extent_info;
9898 	struct btrfs_trans_handle *trans = trans_in;
9899 	struct btrfs_path *path;
9900 	u64 start = ins->objectid;
9901 	u64 len = ins->offset;
9902 	int qgroup_released;
9903 	int ret;
9904 
9905 	memset(&stack_fi, 0, sizeof(stack_fi));
9906 
9907 	btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC);
9908 	btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start);
9909 	btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len);
9910 	btrfs_set_stack_file_extent_num_bytes(&stack_fi, len);
9911 	btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len);
9912 	btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
9913 	/* Encryption and other encoding is reserved and all 0 */
9914 
9915 	qgroup_released = btrfs_qgroup_release_data(inode, file_offset, len);
9916 	if (qgroup_released < 0)
9917 		return ERR_PTR(qgroup_released);
9918 
9919 	if (trans) {
9920 		ret = insert_reserved_file_extent(trans, inode,
9921 						  file_offset, &stack_fi,
9922 						  true, qgroup_released);
9923 		if (ret)
9924 			goto free_qgroup;
9925 		return trans;
9926 	}
9927 
9928 	extent_info.disk_offset = start;
9929 	extent_info.disk_len = len;
9930 	extent_info.data_offset = 0;
9931 	extent_info.data_len = len;
9932 	extent_info.file_offset = file_offset;
9933 	extent_info.extent_buf = (char *)&stack_fi;
9934 	extent_info.is_new_extent = true;
9935 	extent_info.qgroup_reserved = qgroup_released;
9936 	extent_info.insertions = 0;
9937 
9938 	path = btrfs_alloc_path();
9939 	if (!path) {
9940 		ret = -ENOMEM;
9941 		goto free_qgroup;
9942 	}
9943 
9944 	ret = btrfs_replace_file_extents(inode, path, file_offset,
9945 				     file_offset + len - 1, &extent_info,
9946 				     &trans);
9947 	btrfs_free_path(path);
9948 	if (ret)
9949 		goto free_qgroup;
9950 	return trans;
9951 
9952 free_qgroup:
9953 	/*
9954 	 * We have released qgroup data range at the beginning of the function,
9955 	 * and normally qgroup_released bytes will be freed when committing
9956 	 * transaction.
9957 	 * But if we error out early, we have to free what we have released
9958 	 * or we leak qgroup data reservation.
9959 	 */
9960 	btrfs_qgroup_free_refroot(inode->root->fs_info,
9961 			inode->root->root_key.objectid, qgroup_released,
9962 			BTRFS_QGROUP_RSV_DATA);
9963 	return ERR_PTR(ret);
9964 }
9965 
9966 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
9967 				       u64 start, u64 num_bytes, u64 min_size,
9968 				       loff_t actual_len, u64 *alloc_hint,
9969 				       struct btrfs_trans_handle *trans)
9970 {
9971 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
9972 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
9973 	struct extent_map *em;
9974 	struct btrfs_root *root = BTRFS_I(inode)->root;
9975 	struct btrfs_key ins;
9976 	u64 cur_offset = start;
9977 	u64 clear_offset = start;
9978 	u64 i_size;
9979 	u64 cur_bytes;
9980 	u64 last_alloc = (u64)-1;
9981 	int ret = 0;
9982 	bool own_trans = true;
9983 	u64 end = start + num_bytes - 1;
9984 
9985 	if (trans)
9986 		own_trans = false;
9987 	while (num_bytes > 0) {
9988 		cur_bytes = min_t(u64, num_bytes, SZ_256M);
9989 		cur_bytes = max(cur_bytes, min_size);
9990 		/*
9991 		 * If we are severely fragmented we could end up with really
9992 		 * small allocations, so if the allocator is returning small
9993 		 * chunks lets make its job easier by only searching for those
9994 		 * sized chunks.
9995 		 */
9996 		cur_bytes = min(cur_bytes, last_alloc);
9997 		ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
9998 				min_size, 0, *alloc_hint, &ins, 1, 0);
9999 		if (ret)
10000 			break;
10001 
10002 		/*
10003 		 * We've reserved this space, and thus converted it from
10004 		 * ->bytes_may_use to ->bytes_reserved.  Any error that happens
10005 		 * from here on out we will only need to clear our reservation
10006 		 * for the remaining unreserved area, so advance our
10007 		 * clear_offset by our extent size.
10008 		 */
10009 		clear_offset += ins.offset;
10010 
10011 		last_alloc = ins.offset;
10012 		trans = insert_prealloc_file_extent(trans, BTRFS_I(inode),
10013 						    &ins, cur_offset);
10014 		/*
10015 		 * Now that we inserted the prealloc extent we can finally
10016 		 * decrement the number of reservations in the block group.
10017 		 * If we did it before, we could race with relocation and have
10018 		 * relocation miss the reserved extent, making it fail later.
10019 		 */
10020 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10021 		if (IS_ERR(trans)) {
10022 			ret = PTR_ERR(trans);
10023 			btrfs_free_reserved_extent(fs_info, ins.objectid,
10024 						   ins.offset, 0);
10025 			break;
10026 		}
10027 
10028 		btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
10029 					cur_offset + ins.offset -1, 0);
10030 
10031 		em = alloc_extent_map();
10032 		if (!em) {
10033 			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
10034 				&BTRFS_I(inode)->runtime_flags);
10035 			goto next;
10036 		}
10037 
10038 		em->start = cur_offset;
10039 		em->orig_start = cur_offset;
10040 		em->len = ins.offset;
10041 		em->block_start = ins.objectid;
10042 		em->block_len = ins.offset;
10043 		em->orig_block_len = ins.offset;
10044 		em->ram_bytes = ins.offset;
10045 		set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
10046 		em->generation = trans->transid;
10047 
10048 		while (1) {
10049 			write_lock(&em_tree->lock);
10050 			ret = add_extent_mapping(em_tree, em, 1);
10051 			write_unlock(&em_tree->lock);
10052 			if (ret != -EEXIST)
10053 				break;
10054 			btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
10055 						cur_offset + ins.offset - 1,
10056 						0);
10057 		}
10058 		free_extent_map(em);
10059 next:
10060 		num_bytes -= ins.offset;
10061 		cur_offset += ins.offset;
10062 		*alloc_hint = ins.objectid + ins.offset;
10063 
10064 		inode_inc_iversion(inode);
10065 		inode->i_ctime = current_time(inode);
10066 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
10067 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
10068 		    (actual_len > inode->i_size) &&
10069 		    (cur_offset > inode->i_size)) {
10070 			if (cur_offset > actual_len)
10071 				i_size = actual_len;
10072 			else
10073 				i_size = cur_offset;
10074 			i_size_write(inode, i_size);
10075 			btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
10076 		}
10077 
10078 		ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
10079 
10080 		if (ret) {
10081 			btrfs_abort_transaction(trans, ret);
10082 			if (own_trans)
10083 				btrfs_end_transaction(trans);
10084 			break;
10085 		}
10086 
10087 		if (own_trans) {
10088 			btrfs_end_transaction(trans);
10089 			trans = NULL;
10090 		}
10091 	}
10092 	if (clear_offset < end)
10093 		btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset,
10094 			end - clear_offset + 1);
10095 	return ret;
10096 }
10097 
10098 int btrfs_prealloc_file_range(struct inode *inode, int mode,
10099 			      u64 start, u64 num_bytes, u64 min_size,
10100 			      loff_t actual_len, u64 *alloc_hint)
10101 {
10102 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
10103 					   min_size, actual_len, alloc_hint,
10104 					   NULL);
10105 }
10106 
10107 int btrfs_prealloc_file_range_trans(struct inode *inode,
10108 				    struct btrfs_trans_handle *trans, int mode,
10109 				    u64 start, u64 num_bytes, u64 min_size,
10110 				    loff_t actual_len, u64 *alloc_hint)
10111 {
10112 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
10113 					   min_size, actual_len, alloc_hint, trans);
10114 }
10115 
10116 static int btrfs_set_page_dirty(struct page *page)
10117 {
10118 	return __set_page_dirty_nobuffers(page);
10119 }
10120 
10121 static int btrfs_permission(struct user_namespace *mnt_userns,
10122 			    struct inode *inode, int mask)
10123 {
10124 	struct btrfs_root *root = BTRFS_I(inode)->root;
10125 	umode_t mode = inode->i_mode;
10126 
10127 	if (mask & MAY_WRITE &&
10128 	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
10129 		if (btrfs_root_readonly(root))
10130 			return -EROFS;
10131 		if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
10132 			return -EACCES;
10133 	}
10134 	return generic_permission(&init_user_ns, inode, mask);
10135 }
10136 
10137 static int btrfs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
10138 			 struct dentry *dentry, umode_t mode)
10139 {
10140 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
10141 	struct btrfs_trans_handle *trans;
10142 	struct btrfs_root *root = BTRFS_I(dir)->root;
10143 	struct inode *inode = NULL;
10144 	u64 objectid;
10145 	u64 index;
10146 	int ret = 0;
10147 
10148 	/*
10149 	 * 5 units required for adding orphan entry
10150 	 */
10151 	trans = btrfs_start_transaction(root, 5);
10152 	if (IS_ERR(trans))
10153 		return PTR_ERR(trans);
10154 
10155 	ret = btrfs_get_free_objectid(root, &objectid);
10156 	if (ret)
10157 		goto out;
10158 
10159 	inode = btrfs_new_inode(trans, root, dir, NULL, 0,
10160 			btrfs_ino(BTRFS_I(dir)), objectid, mode, &index);
10161 	if (IS_ERR(inode)) {
10162 		ret = PTR_ERR(inode);
10163 		inode = NULL;
10164 		goto out;
10165 	}
10166 
10167 	inode->i_fop = &btrfs_file_operations;
10168 	inode->i_op = &btrfs_file_inode_operations;
10169 
10170 	inode->i_mapping->a_ops = &btrfs_aops;
10171 
10172 	ret = btrfs_init_inode_security(trans, inode, dir, NULL);
10173 	if (ret)
10174 		goto out;
10175 
10176 	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
10177 	if (ret)
10178 		goto out;
10179 	ret = btrfs_orphan_add(trans, BTRFS_I(inode));
10180 	if (ret)
10181 		goto out;
10182 
10183 	/*
10184 	 * We set number of links to 0 in btrfs_new_inode(), and here we set
10185 	 * it to 1 because d_tmpfile() will issue a warning if the count is 0,
10186 	 * through:
10187 	 *
10188 	 *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
10189 	 */
10190 	set_nlink(inode, 1);
10191 	d_tmpfile(dentry, inode);
10192 	unlock_new_inode(inode);
10193 	mark_inode_dirty(inode);
10194 out:
10195 	btrfs_end_transaction(trans);
10196 	if (ret && inode)
10197 		discard_new_inode(inode);
10198 	btrfs_btree_balance_dirty(fs_info);
10199 	return ret;
10200 }
10201 
10202 void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
10203 {
10204 	struct inode *inode = tree->private_data;
10205 	unsigned long index = start >> PAGE_SHIFT;
10206 	unsigned long end_index = end >> PAGE_SHIFT;
10207 	struct page *page;
10208 
10209 	while (index <= end_index) {
10210 		page = find_get_page(inode->i_mapping, index);
10211 		ASSERT(page); /* Pages should be in the extent_io_tree */
10212 		set_page_writeback(page);
10213 		put_page(page);
10214 		index++;
10215 	}
10216 }
10217 
10218 #ifdef CONFIG_SWAP
10219 /*
10220  * Add an entry indicating a block group or device which is pinned by a
10221  * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
10222  * negative errno on failure.
10223  */
10224 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
10225 				  bool is_block_group)
10226 {
10227 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10228 	struct btrfs_swapfile_pin *sp, *entry;
10229 	struct rb_node **p;
10230 	struct rb_node *parent = NULL;
10231 
10232 	sp = kmalloc(sizeof(*sp), GFP_NOFS);
10233 	if (!sp)
10234 		return -ENOMEM;
10235 	sp->ptr = ptr;
10236 	sp->inode = inode;
10237 	sp->is_block_group = is_block_group;
10238 	sp->bg_extent_count = 1;
10239 
10240 	spin_lock(&fs_info->swapfile_pins_lock);
10241 	p = &fs_info->swapfile_pins.rb_node;
10242 	while (*p) {
10243 		parent = *p;
10244 		entry = rb_entry(parent, struct btrfs_swapfile_pin, node);
10245 		if (sp->ptr < entry->ptr ||
10246 		    (sp->ptr == entry->ptr && sp->inode < entry->inode)) {
10247 			p = &(*p)->rb_left;
10248 		} else if (sp->ptr > entry->ptr ||
10249 			   (sp->ptr == entry->ptr && sp->inode > entry->inode)) {
10250 			p = &(*p)->rb_right;
10251 		} else {
10252 			if (is_block_group)
10253 				entry->bg_extent_count++;
10254 			spin_unlock(&fs_info->swapfile_pins_lock);
10255 			kfree(sp);
10256 			return 1;
10257 		}
10258 	}
10259 	rb_link_node(&sp->node, parent, p);
10260 	rb_insert_color(&sp->node, &fs_info->swapfile_pins);
10261 	spin_unlock(&fs_info->swapfile_pins_lock);
10262 	return 0;
10263 }
10264 
10265 /* Free all of the entries pinned by this swapfile. */
10266 static void btrfs_free_swapfile_pins(struct inode *inode)
10267 {
10268 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10269 	struct btrfs_swapfile_pin *sp;
10270 	struct rb_node *node, *next;
10271 
10272 	spin_lock(&fs_info->swapfile_pins_lock);
10273 	node = rb_first(&fs_info->swapfile_pins);
10274 	while (node) {
10275 		next = rb_next(node);
10276 		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
10277 		if (sp->inode == inode) {
10278 			rb_erase(&sp->node, &fs_info->swapfile_pins);
10279 			if (sp->is_block_group) {
10280 				btrfs_dec_block_group_swap_extents(sp->ptr,
10281 							   sp->bg_extent_count);
10282 				btrfs_put_block_group(sp->ptr);
10283 			}
10284 			kfree(sp);
10285 		}
10286 		node = next;
10287 	}
10288 	spin_unlock(&fs_info->swapfile_pins_lock);
10289 }
10290 
10291 struct btrfs_swap_info {
10292 	u64 start;
10293 	u64 block_start;
10294 	u64 block_len;
10295 	u64 lowest_ppage;
10296 	u64 highest_ppage;
10297 	unsigned long nr_pages;
10298 	int nr_extents;
10299 };
10300 
10301 static int btrfs_add_swap_extent(struct swap_info_struct *sis,
10302 				 struct btrfs_swap_info *bsi)
10303 {
10304 	unsigned long nr_pages;
10305 	u64 first_ppage, first_ppage_reported, next_ppage;
10306 	int ret;
10307 
10308 	first_ppage = ALIGN(bsi->block_start, PAGE_SIZE) >> PAGE_SHIFT;
10309 	next_ppage = ALIGN_DOWN(bsi->block_start + bsi->block_len,
10310 				PAGE_SIZE) >> PAGE_SHIFT;
10311 
10312 	if (first_ppage >= next_ppage)
10313 		return 0;
10314 	nr_pages = next_ppage - first_ppage;
10315 
10316 	first_ppage_reported = first_ppage;
10317 	if (bsi->start == 0)
10318 		first_ppage_reported++;
10319 	if (bsi->lowest_ppage > first_ppage_reported)
10320 		bsi->lowest_ppage = first_ppage_reported;
10321 	if (bsi->highest_ppage < (next_ppage - 1))
10322 		bsi->highest_ppage = next_ppage - 1;
10323 
10324 	ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage);
10325 	if (ret < 0)
10326 		return ret;
10327 	bsi->nr_extents += ret;
10328 	bsi->nr_pages += nr_pages;
10329 	return 0;
10330 }
10331 
10332 static void btrfs_swap_deactivate(struct file *file)
10333 {
10334 	struct inode *inode = file_inode(file);
10335 
10336 	btrfs_free_swapfile_pins(inode);
10337 	atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles);
10338 }
10339 
10340 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10341 			       sector_t *span)
10342 {
10343 	struct inode *inode = file_inode(file);
10344 	struct btrfs_root *root = BTRFS_I(inode)->root;
10345 	struct btrfs_fs_info *fs_info = root->fs_info;
10346 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
10347 	struct extent_state *cached_state = NULL;
10348 	struct extent_map *em = NULL;
10349 	struct btrfs_device *device = NULL;
10350 	struct btrfs_swap_info bsi = {
10351 		.lowest_ppage = (sector_t)-1ULL,
10352 	};
10353 	int ret = 0;
10354 	u64 isize;
10355 	u64 start;
10356 
10357 	/*
10358 	 * If the swap file was just created, make sure delalloc is done. If the
10359 	 * file changes again after this, the user is doing something stupid and
10360 	 * we don't really care.
10361 	 */
10362 	ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
10363 	if (ret)
10364 		return ret;
10365 
10366 	/*
10367 	 * The inode is locked, so these flags won't change after we check them.
10368 	 */
10369 	if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
10370 		btrfs_warn(fs_info, "swapfile must not be compressed");
10371 		return -EINVAL;
10372 	}
10373 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
10374 		btrfs_warn(fs_info, "swapfile must not be copy-on-write");
10375 		return -EINVAL;
10376 	}
10377 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
10378 		btrfs_warn(fs_info, "swapfile must not be checksummed");
10379 		return -EINVAL;
10380 	}
10381 
10382 	/*
10383 	 * Balance or device remove/replace/resize can move stuff around from
10384 	 * under us. The exclop protection makes sure they aren't running/won't
10385 	 * run concurrently while we are mapping the swap extents, and
10386 	 * fs_info->swapfile_pins prevents them from running while the swap
10387 	 * file is active and moving the extents. Note that this also prevents
10388 	 * a concurrent device add which isn't actually necessary, but it's not
10389 	 * really worth the trouble to allow it.
10390 	 */
10391 	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
10392 		btrfs_warn(fs_info,
10393 	   "cannot activate swapfile while exclusive operation is running");
10394 		return -EBUSY;
10395 	}
10396 
10397 	/*
10398 	 * Prevent snapshot creation while we are activating the swap file.
10399 	 * We do not want to race with snapshot creation. If snapshot creation
10400 	 * already started before we bumped nr_swapfiles from 0 to 1 and
10401 	 * completes before the first write into the swap file after it is
10402 	 * activated, than that write would fallback to COW.
10403 	 */
10404 	if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) {
10405 		btrfs_exclop_finish(fs_info);
10406 		btrfs_warn(fs_info,
10407 	   "cannot activate swapfile because snapshot creation is in progress");
10408 		return -EINVAL;
10409 	}
10410 	/*
10411 	 * Snapshots can create extents which require COW even if NODATACOW is
10412 	 * set. We use this counter to prevent snapshots. We must increment it
10413 	 * before walking the extents because we don't want a concurrent
10414 	 * snapshot to run after we've already checked the extents.
10415 	 */
10416 	atomic_inc(&root->nr_swapfiles);
10417 
10418 	isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
10419 
10420 	lock_extent_bits(io_tree, 0, isize - 1, &cached_state);
10421 	start = 0;
10422 	while (start < isize) {
10423 		u64 logical_block_start, physical_block_start;
10424 		struct btrfs_block_group *bg;
10425 		u64 len = isize - start;
10426 
10427 		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len);
10428 		if (IS_ERR(em)) {
10429 			ret = PTR_ERR(em);
10430 			goto out;
10431 		}
10432 
10433 		if (em->block_start == EXTENT_MAP_HOLE) {
10434 			btrfs_warn(fs_info, "swapfile must not have holes");
10435 			ret = -EINVAL;
10436 			goto out;
10437 		}
10438 		if (em->block_start == EXTENT_MAP_INLINE) {
10439 			/*
10440 			 * It's unlikely we'll ever actually find ourselves
10441 			 * here, as a file small enough to fit inline won't be
10442 			 * big enough to store more than the swap header, but in
10443 			 * case something changes in the future, let's catch it
10444 			 * here rather than later.
10445 			 */
10446 			btrfs_warn(fs_info, "swapfile must not be inline");
10447 			ret = -EINVAL;
10448 			goto out;
10449 		}
10450 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
10451 			btrfs_warn(fs_info, "swapfile must not be compressed");
10452 			ret = -EINVAL;
10453 			goto out;
10454 		}
10455 
10456 		logical_block_start = em->block_start + (start - em->start);
10457 		len = min(len, em->len - (start - em->start));
10458 		free_extent_map(em);
10459 		em = NULL;
10460 
10461 		ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, true);
10462 		if (ret < 0) {
10463 			goto out;
10464 		} else if (ret) {
10465 			ret = 0;
10466 		} else {
10467 			btrfs_warn(fs_info,
10468 				   "swapfile must not be copy-on-write");
10469 			ret = -EINVAL;
10470 			goto out;
10471 		}
10472 
10473 		em = btrfs_get_chunk_map(fs_info, logical_block_start, len);
10474 		if (IS_ERR(em)) {
10475 			ret = PTR_ERR(em);
10476 			goto out;
10477 		}
10478 
10479 		if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
10480 			btrfs_warn(fs_info,
10481 				   "swapfile must have single data profile");
10482 			ret = -EINVAL;
10483 			goto out;
10484 		}
10485 
10486 		if (device == NULL) {
10487 			device = em->map_lookup->stripes[0].dev;
10488 			ret = btrfs_add_swapfile_pin(inode, device, false);
10489 			if (ret == 1)
10490 				ret = 0;
10491 			else if (ret)
10492 				goto out;
10493 		} else if (device != em->map_lookup->stripes[0].dev) {
10494 			btrfs_warn(fs_info, "swapfile must be on one device");
10495 			ret = -EINVAL;
10496 			goto out;
10497 		}
10498 
10499 		physical_block_start = (em->map_lookup->stripes[0].physical +
10500 					(logical_block_start - em->start));
10501 		len = min(len, em->len - (logical_block_start - em->start));
10502 		free_extent_map(em);
10503 		em = NULL;
10504 
10505 		bg = btrfs_lookup_block_group(fs_info, logical_block_start);
10506 		if (!bg) {
10507 			btrfs_warn(fs_info,
10508 			   "could not find block group containing swapfile");
10509 			ret = -EINVAL;
10510 			goto out;
10511 		}
10512 
10513 		if (!btrfs_inc_block_group_swap_extents(bg)) {
10514 			btrfs_warn(fs_info,
10515 			   "block group for swapfile at %llu is read-only%s",
10516 			   bg->start,
10517 			   atomic_read(&fs_info->scrubs_running) ?
10518 				       " (scrub running)" : "");
10519 			btrfs_put_block_group(bg);
10520 			ret = -EINVAL;
10521 			goto out;
10522 		}
10523 
10524 		ret = btrfs_add_swapfile_pin(inode, bg, true);
10525 		if (ret) {
10526 			btrfs_put_block_group(bg);
10527 			if (ret == 1)
10528 				ret = 0;
10529 			else
10530 				goto out;
10531 		}
10532 
10533 		if (bsi.block_len &&
10534 		    bsi.block_start + bsi.block_len == physical_block_start) {
10535 			bsi.block_len += len;
10536 		} else {
10537 			if (bsi.block_len) {
10538 				ret = btrfs_add_swap_extent(sis, &bsi);
10539 				if (ret)
10540 					goto out;
10541 			}
10542 			bsi.start = start;
10543 			bsi.block_start = physical_block_start;
10544 			bsi.block_len = len;
10545 		}
10546 
10547 		start += len;
10548 	}
10549 
10550 	if (bsi.block_len)
10551 		ret = btrfs_add_swap_extent(sis, &bsi);
10552 
10553 out:
10554 	if (!IS_ERR_OR_NULL(em))
10555 		free_extent_map(em);
10556 
10557 	unlock_extent_cached(io_tree, 0, isize - 1, &cached_state);
10558 
10559 	if (ret)
10560 		btrfs_swap_deactivate(file);
10561 
10562 	btrfs_drew_write_unlock(&root->snapshot_lock);
10563 
10564 	btrfs_exclop_finish(fs_info);
10565 
10566 	if (ret)
10567 		return ret;
10568 
10569 	if (device)
10570 		sis->bdev = device->bdev;
10571 	*span = bsi.highest_ppage - bsi.lowest_ppage + 1;
10572 	sis->max = bsi.nr_pages;
10573 	sis->pages = bsi.nr_pages - 1;
10574 	sis->highest_bit = bsi.nr_pages - 1;
10575 	return bsi.nr_extents;
10576 }
10577 #else
10578 static void btrfs_swap_deactivate(struct file *file)
10579 {
10580 }
10581 
10582 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10583 			       sector_t *span)
10584 {
10585 	return -EOPNOTSUPP;
10586 }
10587 #endif
10588 
10589 /*
10590  * Update the number of bytes used in the VFS' inode. When we replace extents in
10591  * a range (clone, dedupe, fallocate's zero range), we must update the number of
10592  * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls
10593  * always get a correct value.
10594  */
10595 void btrfs_update_inode_bytes(struct btrfs_inode *inode,
10596 			      const u64 add_bytes,
10597 			      const u64 del_bytes)
10598 {
10599 	if (add_bytes == del_bytes)
10600 		return;
10601 
10602 	spin_lock(&inode->lock);
10603 	if (del_bytes > 0)
10604 		inode_sub_bytes(&inode->vfs_inode, del_bytes);
10605 	if (add_bytes > 0)
10606 		inode_add_bytes(&inode->vfs_inode, add_bytes);
10607 	spin_unlock(&inode->lock);
10608 }
10609 
10610 static const struct inode_operations btrfs_dir_inode_operations = {
10611 	.getattr	= btrfs_getattr,
10612 	.lookup		= btrfs_lookup,
10613 	.create		= btrfs_create,
10614 	.unlink		= btrfs_unlink,
10615 	.link		= btrfs_link,
10616 	.mkdir		= btrfs_mkdir,
10617 	.rmdir		= btrfs_rmdir,
10618 	.rename		= btrfs_rename2,
10619 	.symlink	= btrfs_symlink,
10620 	.setattr	= btrfs_setattr,
10621 	.mknod		= btrfs_mknod,
10622 	.listxattr	= btrfs_listxattr,
10623 	.permission	= btrfs_permission,
10624 	.get_acl	= btrfs_get_acl,
10625 	.set_acl	= btrfs_set_acl,
10626 	.update_time	= btrfs_update_time,
10627 	.tmpfile        = btrfs_tmpfile,
10628 	.fileattr_get	= btrfs_fileattr_get,
10629 	.fileattr_set	= btrfs_fileattr_set,
10630 };
10631 
10632 static const struct file_operations btrfs_dir_file_operations = {
10633 	.llseek		= generic_file_llseek,
10634 	.read		= generic_read_dir,
10635 	.iterate_shared	= btrfs_real_readdir,
10636 	.open		= btrfs_opendir,
10637 	.unlocked_ioctl	= btrfs_ioctl,
10638 #ifdef CONFIG_COMPAT
10639 	.compat_ioctl	= btrfs_compat_ioctl,
10640 #endif
10641 	.release        = btrfs_release_file,
10642 	.fsync		= btrfs_sync_file,
10643 };
10644 
10645 /*
10646  * btrfs doesn't support the bmap operation because swapfiles
10647  * use bmap to make a mapping of extents in the file.  They assume
10648  * these extents won't change over the life of the file and they
10649  * use the bmap result to do IO directly to the drive.
10650  *
10651  * the btrfs bmap call would return logical addresses that aren't
10652  * suitable for IO and they also will change frequently as COW
10653  * operations happen.  So, swapfile + btrfs == corruption.
10654  *
10655  * For now we're avoiding this by dropping bmap.
10656  */
10657 static const struct address_space_operations btrfs_aops = {
10658 	.readpage	= btrfs_readpage,
10659 	.writepage	= btrfs_writepage,
10660 	.writepages	= btrfs_writepages,
10661 	.readahead	= btrfs_readahead,
10662 	.direct_IO	= noop_direct_IO,
10663 	.invalidatepage = btrfs_invalidatepage,
10664 	.releasepage	= btrfs_releasepage,
10665 #ifdef CONFIG_MIGRATION
10666 	.migratepage	= btrfs_migratepage,
10667 #endif
10668 	.set_page_dirty	= btrfs_set_page_dirty,
10669 	.error_remove_page = generic_error_remove_page,
10670 	.swap_activate	= btrfs_swap_activate,
10671 	.swap_deactivate = btrfs_swap_deactivate,
10672 };
10673 
10674 static const struct inode_operations btrfs_file_inode_operations = {
10675 	.getattr	= btrfs_getattr,
10676 	.setattr	= btrfs_setattr,
10677 	.listxattr      = btrfs_listxattr,
10678 	.permission	= btrfs_permission,
10679 	.fiemap		= btrfs_fiemap,
10680 	.get_acl	= btrfs_get_acl,
10681 	.set_acl	= btrfs_set_acl,
10682 	.update_time	= btrfs_update_time,
10683 	.fileattr_get	= btrfs_fileattr_get,
10684 	.fileattr_set	= btrfs_fileattr_set,
10685 };
10686 static const struct inode_operations btrfs_special_inode_operations = {
10687 	.getattr	= btrfs_getattr,
10688 	.setattr	= btrfs_setattr,
10689 	.permission	= btrfs_permission,
10690 	.listxattr	= btrfs_listxattr,
10691 	.get_acl	= btrfs_get_acl,
10692 	.set_acl	= btrfs_set_acl,
10693 	.update_time	= btrfs_update_time,
10694 };
10695 static const struct inode_operations btrfs_symlink_inode_operations = {
10696 	.get_link	= page_get_link,
10697 	.getattr	= btrfs_getattr,
10698 	.setattr	= btrfs_setattr,
10699 	.permission	= btrfs_permission,
10700 	.listxattr	= btrfs_listxattr,
10701 	.update_time	= btrfs_update_time,
10702 };
10703 
10704 const struct dentry_operations btrfs_dentry_operations = {
10705 	.d_delete	= btrfs_dentry_delete,
10706 };
10707