xref: /openbmc/linux/fs/btrfs/inode.c (revision 5f2fb52fac15a8a8e10ce020dd532504a8abfc4e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/bio.h>
8 #include <linux/buffer_head.h>
9 #include <linux/file.h>
10 #include <linux/fs.h>
11 #include <linux/pagemap.h>
12 #include <linux/highmem.h>
13 #include <linux/time.h>
14 #include <linux/init.h>
15 #include <linux/string.h>
16 #include <linux/backing-dev.h>
17 #include <linux/writeback.h>
18 #include <linux/compat.h>
19 #include <linux/xattr.h>
20 #include <linux/posix_acl.h>
21 #include <linux/falloc.h>
22 #include <linux/slab.h>
23 #include <linux/ratelimit.h>
24 #include <linux/btrfs.h>
25 #include <linux/blkdev.h>
26 #include <linux/posix_acl_xattr.h>
27 #include <linux/uio.h>
28 #include <linux/magic.h>
29 #include <linux/iversion.h>
30 #include <linux/swap.h>
31 #include <linux/sched/mm.h>
32 #include <asm/unaligned.h>
33 #include "misc.h"
34 #include "ctree.h"
35 #include "disk-io.h"
36 #include "transaction.h"
37 #include "btrfs_inode.h"
38 #include "print-tree.h"
39 #include "ordered-data.h"
40 #include "xattr.h"
41 #include "tree-log.h"
42 #include "volumes.h"
43 #include "compression.h"
44 #include "locking.h"
45 #include "free-space-cache.h"
46 #include "inode-map.h"
47 #include "props.h"
48 #include "qgroup.h"
49 #include "delalloc-space.h"
50 #include "block-group.h"
51 
52 struct btrfs_iget_args {
53 	struct btrfs_key *location;
54 	struct btrfs_root *root;
55 };
56 
57 struct btrfs_dio_data {
58 	u64 reserve;
59 	u64 unsubmitted_oe_range_start;
60 	u64 unsubmitted_oe_range_end;
61 	int overwrite;
62 };
63 
64 static const struct inode_operations btrfs_dir_inode_operations;
65 static const struct inode_operations btrfs_symlink_inode_operations;
66 static const struct inode_operations btrfs_special_inode_operations;
67 static const struct inode_operations btrfs_file_inode_operations;
68 static const struct address_space_operations btrfs_aops;
69 static const struct file_operations btrfs_dir_file_operations;
70 static const struct extent_io_ops btrfs_extent_io_ops;
71 
72 static struct kmem_cache *btrfs_inode_cachep;
73 struct kmem_cache *btrfs_trans_handle_cachep;
74 struct kmem_cache *btrfs_path_cachep;
75 struct kmem_cache *btrfs_free_space_cachep;
76 struct kmem_cache *btrfs_free_space_bitmap_cachep;
77 
78 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
79 static int btrfs_truncate(struct inode *inode, bool skip_writeback);
80 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
81 static noinline int cow_file_range(struct inode *inode,
82 				   struct page *locked_page,
83 				   u64 start, u64 end, int *page_started,
84 				   unsigned long *nr_written, int unlock);
85 static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
86 				       u64 orig_start, u64 block_start,
87 				       u64 block_len, u64 orig_block_len,
88 				       u64 ram_bytes, int compress_type,
89 				       int type);
90 
91 static void __endio_write_update_ordered(struct inode *inode,
92 					 const u64 offset, const u64 bytes,
93 					 const bool uptodate);
94 
95 /*
96  * Cleanup all submitted ordered extents in specified range to handle errors
97  * from the btrfs_run_delalloc_range() callback.
98  *
99  * NOTE: caller must ensure that when an error happens, it can not call
100  * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
101  * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
102  * to be released, which we want to happen only when finishing the ordered
103  * extent (btrfs_finish_ordered_io()).
104  */
105 static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
106 						 struct page *locked_page,
107 						 u64 offset, u64 bytes)
108 {
109 	unsigned long index = offset >> PAGE_SHIFT;
110 	unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
111 	u64 page_start = page_offset(locked_page);
112 	u64 page_end = page_start + PAGE_SIZE - 1;
113 
114 	struct page *page;
115 
116 	while (index <= end_index) {
117 		page = find_get_page(inode->i_mapping, index);
118 		index++;
119 		if (!page)
120 			continue;
121 		ClearPagePrivate2(page);
122 		put_page(page);
123 	}
124 
125 	/*
126 	 * In case this page belongs to the delalloc range being instantiated
127 	 * then skip it, since the first page of a range is going to be
128 	 * properly cleaned up by the caller of run_delalloc_range
129 	 */
130 	if (page_start >= offset && page_end <= (offset + bytes - 1)) {
131 		offset += PAGE_SIZE;
132 		bytes -= PAGE_SIZE;
133 	}
134 
135 	return __endio_write_update_ordered(inode, offset, bytes, false);
136 }
137 
138 static int btrfs_dirty_inode(struct inode *inode);
139 
140 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
141 void btrfs_test_inode_set_ops(struct inode *inode)
142 {
143 	BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
144 }
145 #endif
146 
147 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
148 				     struct inode *inode,  struct inode *dir,
149 				     const struct qstr *qstr)
150 {
151 	int err;
152 
153 	err = btrfs_init_acl(trans, inode, dir);
154 	if (!err)
155 		err = btrfs_xattr_security_init(trans, inode, dir, qstr);
156 	return err;
157 }
158 
159 /*
160  * this does all the hard work for inserting an inline extent into
161  * the btree.  The caller should have done a btrfs_drop_extents so that
162  * no overlapping inline items exist in the btree
163  */
164 static int insert_inline_extent(struct btrfs_trans_handle *trans,
165 				struct btrfs_path *path, int extent_inserted,
166 				struct btrfs_root *root, struct inode *inode,
167 				u64 start, size_t size, size_t compressed_size,
168 				int compress_type,
169 				struct page **compressed_pages)
170 {
171 	struct extent_buffer *leaf;
172 	struct page *page = NULL;
173 	char *kaddr;
174 	unsigned long ptr;
175 	struct btrfs_file_extent_item *ei;
176 	int ret;
177 	size_t cur_size = size;
178 	unsigned long offset;
179 
180 	ASSERT((compressed_size > 0 && compressed_pages) ||
181 	       (compressed_size == 0 && !compressed_pages));
182 
183 	if (compressed_size && compressed_pages)
184 		cur_size = compressed_size;
185 
186 	inode_add_bytes(inode, size);
187 
188 	if (!extent_inserted) {
189 		struct btrfs_key key;
190 		size_t datasize;
191 
192 		key.objectid = btrfs_ino(BTRFS_I(inode));
193 		key.offset = start;
194 		key.type = BTRFS_EXTENT_DATA_KEY;
195 
196 		datasize = btrfs_file_extent_calc_inline_size(cur_size);
197 		path->leave_spinning = 1;
198 		ret = btrfs_insert_empty_item(trans, root, path, &key,
199 					      datasize);
200 		if (ret)
201 			goto fail;
202 	}
203 	leaf = path->nodes[0];
204 	ei = btrfs_item_ptr(leaf, path->slots[0],
205 			    struct btrfs_file_extent_item);
206 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
207 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
208 	btrfs_set_file_extent_encryption(leaf, ei, 0);
209 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
210 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
211 	ptr = btrfs_file_extent_inline_start(ei);
212 
213 	if (compress_type != BTRFS_COMPRESS_NONE) {
214 		struct page *cpage;
215 		int i = 0;
216 		while (compressed_size > 0) {
217 			cpage = compressed_pages[i];
218 			cur_size = min_t(unsigned long, compressed_size,
219 				       PAGE_SIZE);
220 
221 			kaddr = kmap_atomic(cpage);
222 			write_extent_buffer(leaf, kaddr, ptr, cur_size);
223 			kunmap_atomic(kaddr);
224 
225 			i++;
226 			ptr += cur_size;
227 			compressed_size -= cur_size;
228 		}
229 		btrfs_set_file_extent_compression(leaf, ei,
230 						  compress_type);
231 	} else {
232 		page = find_get_page(inode->i_mapping,
233 				     start >> PAGE_SHIFT);
234 		btrfs_set_file_extent_compression(leaf, ei, 0);
235 		kaddr = kmap_atomic(page);
236 		offset = offset_in_page(start);
237 		write_extent_buffer(leaf, kaddr + offset, ptr, size);
238 		kunmap_atomic(kaddr);
239 		put_page(page);
240 	}
241 	btrfs_mark_buffer_dirty(leaf);
242 	btrfs_release_path(path);
243 
244 	/*
245 	 * we're an inline extent, so nobody can
246 	 * extend the file past i_size without locking
247 	 * a page we already have locked.
248 	 *
249 	 * We must do any isize and inode updates
250 	 * before we unlock the pages.  Otherwise we
251 	 * could end up racing with unlink.
252 	 */
253 	BTRFS_I(inode)->disk_i_size = inode->i_size;
254 	ret = btrfs_update_inode(trans, root, inode);
255 
256 fail:
257 	return ret;
258 }
259 
260 
261 /*
262  * conditionally insert an inline extent into the file.  This
263  * does the checks required to make sure the data is small enough
264  * to fit as an inline extent.
265  */
266 static noinline int cow_file_range_inline(struct inode *inode, u64 start,
267 					  u64 end, size_t compressed_size,
268 					  int compress_type,
269 					  struct page **compressed_pages)
270 {
271 	struct btrfs_root *root = BTRFS_I(inode)->root;
272 	struct btrfs_fs_info *fs_info = root->fs_info;
273 	struct btrfs_trans_handle *trans;
274 	u64 isize = i_size_read(inode);
275 	u64 actual_end = min(end + 1, isize);
276 	u64 inline_len = actual_end - start;
277 	u64 aligned_end = ALIGN(end, fs_info->sectorsize);
278 	u64 data_len = inline_len;
279 	int ret;
280 	struct btrfs_path *path;
281 	int extent_inserted = 0;
282 	u32 extent_item_size;
283 
284 	if (compressed_size)
285 		data_len = compressed_size;
286 
287 	if (start > 0 ||
288 	    actual_end > fs_info->sectorsize ||
289 	    data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
290 	    (!compressed_size &&
291 	    (actual_end & (fs_info->sectorsize - 1)) == 0) ||
292 	    end + 1 < isize ||
293 	    data_len > fs_info->max_inline) {
294 		return 1;
295 	}
296 
297 	path = btrfs_alloc_path();
298 	if (!path)
299 		return -ENOMEM;
300 
301 	trans = btrfs_join_transaction(root);
302 	if (IS_ERR(trans)) {
303 		btrfs_free_path(path);
304 		return PTR_ERR(trans);
305 	}
306 	trans->block_rsv = &BTRFS_I(inode)->block_rsv;
307 
308 	if (compressed_size && compressed_pages)
309 		extent_item_size = btrfs_file_extent_calc_inline_size(
310 		   compressed_size);
311 	else
312 		extent_item_size = btrfs_file_extent_calc_inline_size(
313 		    inline_len);
314 
315 	ret = __btrfs_drop_extents(trans, root, inode, path,
316 				   start, aligned_end, NULL,
317 				   1, 1, extent_item_size, &extent_inserted);
318 	if (ret) {
319 		btrfs_abort_transaction(trans, ret);
320 		goto out;
321 	}
322 
323 	if (isize > actual_end)
324 		inline_len = min_t(u64, isize, actual_end);
325 	ret = insert_inline_extent(trans, path, extent_inserted,
326 				   root, inode, start,
327 				   inline_len, compressed_size,
328 				   compress_type, compressed_pages);
329 	if (ret && ret != -ENOSPC) {
330 		btrfs_abort_transaction(trans, ret);
331 		goto out;
332 	} else if (ret == -ENOSPC) {
333 		ret = 1;
334 		goto out;
335 	}
336 
337 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
338 	btrfs_drop_extent_cache(BTRFS_I(inode), start, aligned_end - 1, 0);
339 out:
340 	/*
341 	 * Don't forget to free the reserved space, as for inlined extent
342 	 * it won't count as data extent, free them directly here.
343 	 * And at reserve time, it's always aligned to page size, so
344 	 * just free one page here.
345 	 */
346 	btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE);
347 	btrfs_free_path(path);
348 	btrfs_end_transaction(trans);
349 	return ret;
350 }
351 
352 struct async_extent {
353 	u64 start;
354 	u64 ram_size;
355 	u64 compressed_size;
356 	struct page **pages;
357 	unsigned long nr_pages;
358 	int compress_type;
359 	struct list_head list;
360 };
361 
362 struct async_chunk {
363 	struct inode *inode;
364 	struct page *locked_page;
365 	u64 start;
366 	u64 end;
367 	unsigned int write_flags;
368 	struct list_head extents;
369 	struct cgroup_subsys_state *blkcg_css;
370 	struct btrfs_work work;
371 	atomic_t *pending;
372 };
373 
374 struct async_cow {
375 	/* Number of chunks in flight; must be first in the structure */
376 	atomic_t num_chunks;
377 	struct async_chunk chunks[];
378 };
379 
380 static noinline int add_async_extent(struct async_chunk *cow,
381 				     u64 start, u64 ram_size,
382 				     u64 compressed_size,
383 				     struct page **pages,
384 				     unsigned long nr_pages,
385 				     int compress_type)
386 {
387 	struct async_extent *async_extent;
388 
389 	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
390 	BUG_ON(!async_extent); /* -ENOMEM */
391 	async_extent->start = start;
392 	async_extent->ram_size = ram_size;
393 	async_extent->compressed_size = compressed_size;
394 	async_extent->pages = pages;
395 	async_extent->nr_pages = nr_pages;
396 	async_extent->compress_type = compress_type;
397 	list_add_tail(&async_extent->list, &cow->extents);
398 	return 0;
399 }
400 
401 /*
402  * Check if the inode has flags compatible with compression
403  */
404 static inline bool inode_can_compress(struct inode *inode)
405 {
406 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW ||
407 	    BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
408 		return false;
409 	return true;
410 }
411 
412 /*
413  * Check if the inode needs to be submitted to compression, based on mount
414  * options, defragmentation, properties or heuristics.
415  */
416 static inline int inode_need_compress(struct inode *inode, u64 start, u64 end)
417 {
418 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
419 
420 	if (!inode_can_compress(inode)) {
421 		WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
422 			KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
423 			btrfs_ino(BTRFS_I(inode)));
424 		return 0;
425 	}
426 	/* force compress */
427 	if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
428 		return 1;
429 	/* defrag ioctl */
430 	if (BTRFS_I(inode)->defrag_compress)
431 		return 1;
432 	/* bad compression ratios */
433 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
434 		return 0;
435 	if (btrfs_test_opt(fs_info, COMPRESS) ||
436 	    BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
437 	    BTRFS_I(inode)->prop_compress)
438 		return btrfs_compress_heuristic(inode, start, end);
439 	return 0;
440 }
441 
442 static inline void inode_should_defrag(struct btrfs_inode *inode,
443 		u64 start, u64 end, u64 num_bytes, u64 small_write)
444 {
445 	/* If this is a small write inside eof, kick off a defrag */
446 	if (num_bytes < small_write &&
447 	    (start > 0 || end + 1 < inode->disk_i_size))
448 		btrfs_add_inode_defrag(NULL, inode);
449 }
450 
451 /*
452  * we create compressed extents in two phases.  The first
453  * phase compresses a range of pages that have already been
454  * locked (both pages and state bits are locked).
455  *
456  * This is done inside an ordered work queue, and the compression
457  * is spread across many cpus.  The actual IO submission is step
458  * two, and the ordered work queue takes care of making sure that
459  * happens in the same order things were put onto the queue by
460  * writepages and friends.
461  *
462  * If this code finds it can't get good compression, it puts an
463  * entry onto the work queue to write the uncompressed bytes.  This
464  * makes sure that both compressed inodes and uncompressed inodes
465  * are written in the same order that the flusher thread sent them
466  * down.
467  */
468 static noinline int compress_file_range(struct async_chunk *async_chunk)
469 {
470 	struct inode *inode = async_chunk->inode;
471 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
472 	u64 blocksize = fs_info->sectorsize;
473 	u64 start = async_chunk->start;
474 	u64 end = async_chunk->end;
475 	u64 actual_end;
476 	u64 i_size;
477 	int ret = 0;
478 	struct page **pages = NULL;
479 	unsigned long nr_pages;
480 	unsigned long total_compressed = 0;
481 	unsigned long total_in = 0;
482 	int i;
483 	int will_compress;
484 	int compress_type = fs_info->compress_type;
485 	int compressed_extents = 0;
486 	int redirty = 0;
487 
488 	inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1,
489 			SZ_16K);
490 
491 	/*
492 	 * We need to save i_size before now because it could change in between
493 	 * us evaluating the size and assigning it.  This is because we lock and
494 	 * unlock the page in truncate and fallocate, and then modify the i_size
495 	 * later on.
496 	 *
497 	 * The barriers are to emulate READ_ONCE, remove that once i_size_read
498 	 * does that for us.
499 	 */
500 	barrier();
501 	i_size = i_size_read(inode);
502 	barrier();
503 	actual_end = min_t(u64, i_size, end + 1);
504 again:
505 	will_compress = 0;
506 	nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
507 	BUILD_BUG_ON((BTRFS_MAX_COMPRESSED % PAGE_SIZE) != 0);
508 	nr_pages = min_t(unsigned long, nr_pages,
509 			BTRFS_MAX_COMPRESSED / PAGE_SIZE);
510 
511 	/*
512 	 * we don't want to send crud past the end of i_size through
513 	 * compression, that's just a waste of CPU time.  So, if the
514 	 * end of the file is before the start of our current
515 	 * requested range of bytes, we bail out to the uncompressed
516 	 * cleanup code that can deal with all of this.
517 	 *
518 	 * It isn't really the fastest way to fix things, but this is a
519 	 * very uncommon corner.
520 	 */
521 	if (actual_end <= start)
522 		goto cleanup_and_bail_uncompressed;
523 
524 	total_compressed = actual_end - start;
525 
526 	/*
527 	 * skip compression for a small file range(<=blocksize) that
528 	 * isn't an inline extent, since it doesn't save disk space at all.
529 	 */
530 	if (total_compressed <= blocksize &&
531 	   (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
532 		goto cleanup_and_bail_uncompressed;
533 
534 	total_compressed = min_t(unsigned long, total_compressed,
535 			BTRFS_MAX_UNCOMPRESSED);
536 	total_in = 0;
537 	ret = 0;
538 
539 	/*
540 	 * we do compression for mount -o compress and when the
541 	 * inode has not been flagged as nocompress.  This flag can
542 	 * change at any time if we discover bad compression ratios.
543 	 */
544 	if (inode_need_compress(inode, start, end)) {
545 		WARN_ON(pages);
546 		pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
547 		if (!pages) {
548 			/* just bail out to the uncompressed code */
549 			nr_pages = 0;
550 			goto cont;
551 		}
552 
553 		if (BTRFS_I(inode)->defrag_compress)
554 			compress_type = BTRFS_I(inode)->defrag_compress;
555 		else if (BTRFS_I(inode)->prop_compress)
556 			compress_type = BTRFS_I(inode)->prop_compress;
557 
558 		/*
559 		 * we need to call clear_page_dirty_for_io on each
560 		 * page in the range.  Otherwise applications with the file
561 		 * mmap'd can wander in and change the page contents while
562 		 * we are compressing them.
563 		 *
564 		 * If the compression fails for any reason, we set the pages
565 		 * dirty again later on.
566 		 *
567 		 * Note that the remaining part is redirtied, the start pointer
568 		 * has moved, the end is the original one.
569 		 */
570 		if (!redirty) {
571 			extent_range_clear_dirty_for_io(inode, start, end);
572 			redirty = 1;
573 		}
574 
575 		/* Compression level is applied here and only here */
576 		ret = btrfs_compress_pages(
577 			compress_type | (fs_info->compress_level << 4),
578 					   inode->i_mapping, start,
579 					   pages,
580 					   &nr_pages,
581 					   &total_in,
582 					   &total_compressed);
583 
584 		if (!ret) {
585 			unsigned long offset = offset_in_page(total_compressed);
586 			struct page *page = pages[nr_pages - 1];
587 			char *kaddr;
588 
589 			/* zero the tail end of the last page, we might be
590 			 * sending it down to disk
591 			 */
592 			if (offset) {
593 				kaddr = kmap_atomic(page);
594 				memset(kaddr + offset, 0,
595 				       PAGE_SIZE - offset);
596 				kunmap_atomic(kaddr);
597 			}
598 			will_compress = 1;
599 		}
600 	}
601 cont:
602 	if (start == 0) {
603 		/* lets try to make an inline extent */
604 		if (ret || total_in < actual_end) {
605 			/* we didn't compress the entire range, try
606 			 * to make an uncompressed inline extent.
607 			 */
608 			ret = cow_file_range_inline(inode, start, end, 0,
609 						    BTRFS_COMPRESS_NONE, NULL);
610 		} else {
611 			/* try making a compressed inline extent */
612 			ret = cow_file_range_inline(inode, start, end,
613 						    total_compressed,
614 						    compress_type, pages);
615 		}
616 		if (ret <= 0) {
617 			unsigned long clear_flags = EXTENT_DELALLOC |
618 				EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
619 				EXTENT_DO_ACCOUNTING;
620 			unsigned long page_error_op;
621 
622 			page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
623 
624 			/*
625 			 * inline extent creation worked or returned error,
626 			 * we don't need to create any more async work items.
627 			 * Unlock and free up our temp pages.
628 			 *
629 			 * We use DO_ACCOUNTING here because we need the
630 			 * delalloc_release_metadata to be done _after_ we drop
631 			 * our outstanding extent for clearing delalloc for this
632 			 * range.
633 			 */
634 			extent_clear_unlock_delalloc(inode, start, end, NULL,
635 						     clear_flags,
636 						     PAGE_UNLOCK |
637 						     PAGE_CLEAR_DIRTY |
638 						     PAGE_SET_WRITEBACK |
639 						     page_error_op |
640 						     PAGE_END_WRITEBACK);
641 
642 			for (i = 0; i < nr_pages; i++) {
643 				WARN_ON(pages[i]->mapping);
644 				put_page(pages[i]);
645 			}
646 			kfree(pages);
647 
648 			return 0;
649 		}
650 	}
651 
652 	if (will_compress) {
653 		/*
654 		 * we aren't doing an inline extent round the compressed size
655 		 * up to a block size boundary so the allocator does sane
656 		 * things
657 		 */
658 		total_compressed = ALIGN(total_compressed, blocksize);
659 
660 		/*
661 		 * one last check to make sure the compression is really a
662 		 * win, compare the page count read with the blocks on disk,
663 		 * compression must free at least one sector size
664 		 */
665 		total_in = ALIGN(total_in, PAGE_SIZE);
666 		if (total_compressed + blocksize <= total_in) {
667 			compressed_extents++;
668 
669 			/*
670 			 * The async work queues will take care of doing actual
671 			 * allocation on disk for these compressed pages, and
672 			 * will submit them to the elevator.
673 			 */
674 			add_async_extent(async_chunk, start, total_in,
675 					total_compressed, pages, nr_pages,
676 					compress_type);
677 
678 			if (start + total_in < end) {
679 				start += total_in;
680 				pages = NULL;
681 				cond_resched();
682 				goto again;
683 			}
684 			return compressed_extents;
685 		}
686 	}
687 	if (pages) {
688 		/*
689 		 * the compression code ran but failed to make things smaller,
690 		 * free any pages it allocated and our page pointer array
691 		 */
692 		for (i = 0; i < nr_pages; i++) {
693 			WARN_ON(pages[i]->mapping);
694 			put_page(pages[i]);
695 		}
696 		kfree(pages);
697 		pages = NULL;
698 		total_compressed = 0;
699 		nr_pages = 0;
700 
701 		/* flag the file so we don't compress in the future */
702 		if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) &&
703 		    !(BTRFS_I(inode)->prop_compress)) {
704 			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
705 		}
706 	}
707 cleanup_and_bail_uncompressed:
708 	/*
709 	 * No compression, but we still need to write the pages in the file
710 	 * we've been given so far.  redirty the locked page if it corresponds
711 	 * to our extent and set things up for the async work queue to run
712 	 * cow_file_range to do the normal delalloc dance.
713 	 */
714 	if (async_chunk->locked_page &&
715 	    (page_offset(async_chunk->locked_page) >= start &&
716 	     page_offset(async_chunk->locked_page)) <= end) {
717 		__set_page_dirty_nobuffers(async_chunk->locked_page);
718 		/* unlocked later on in the async handlers */
719 	}
720 
721 	if (redirty)
722 		extent_range_redirty_for_io(inode, start, end);
723 	add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
724 			 BTRFS_COMPRESS_NONE);
725 	compressed_extents++;
726 
727 	return compressed_extents;
728 }
729 
730 static void free_async_extent_pages(struct async_extent *async_extent)
731 {
732 	int i;
733 
734 	if (!async_extent->pages)
735 		return;
736 
737 	for (i = 0; i < async_extent->nr_pages; i++) {
738 		WARN_ON(async_extent->pages[i]->mapping);
739 		put_page(async_extent->pages[i]);
740 	}
741 	kfree(async_extent->pages);
742 	async_extent->nr_pages = 0;
743 	async_extent->pages = NULL;
744 }
745 
746 /*
747  * phase two of compressed writeback.  This is the ordered portion
748  * of the code, which only gets called in the order the work was
749  * queued.  We walk all the async extents created by compress_file_range
750  * and send them down to the disk.
751  */
752 static noinline void submit_compressed_extents(struct async_chunk *async_chunk)
753 {
754 	struct inode *inode = async_chunk->inode;
755 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
756 	struct async_extent *async_extent;
757 	u64 alloc_hint = 0;
758 	struct btrfs_key ins;
759 	struct extent_map *em;
760 	struct btrfs_root *root = BTRFS_I(inode)->root;
761 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
762 	int ret = 0;
763 
764 again:
765 	while (!list_empty(&async_chunk->extents)) {
766 		async_extent = list_entry(async_chunk->extents.next,
767 					  struct async_extent, list);
768 		list_del(&async_extent->list);
769 
770 retry:
771 		lock_extent(io_tree, async_extent->start,
772 			    async_extent->start + async_extent->ram_size - 1);
773 		/* did the compression code fall back to uncompressed IO? */
774 		if (!async_extent->pages) {
775 			int page_started = 0;
776 			unsigned long nr_written = 0;
777 
778 			/* allocate blocks */
779 			ret = cow_file_range(inode, async_chunk->locked_page,
780 					     async_extent->start,
781 					     async_extent->start +
782 					     async_extent->ram_size - 1,
783 					     &page_started, &nr_written, 0);
784 
785 			/* JDM XXX */
786 
787 			/*
788 			 * if page_started, cow_file_range inserted an
789 			 * inline extent and took care of all the unlocking
790 			 * and IO for us.  Otherwise, we need to submit
791 			 * all those pages down to the drive.
792 			 */
793 			if (!page_started && !ret)
794 				extent_write_locked_range(inode,
795 						  async_extent->start,
796 						  async_extent->start +
797 						  async_extent->ram_size - 1,
798 						  WB_SYNC_ALL);
799 			else if (ret && async_chunk->locked_page)
800 				unlock_page(async_chunk->locked_page);
801 			kfree(async_extent);
802 			cond_resched();
803 			continue;
804 		}
805 
806 		ret = btrfs_reserve_extent(root, async_extent->ram_size,
807 					   async_extent->compressed_size,
808 					   async_extent->compressed_size,
809 					   0, alloc_hint, &ins, 1, 1);
810 		if (ret) {
811 			free_async_extent_pages(async_extent);
812 
813 			if (ret == -ENOSPC) {
814 				unlock_extent(io_tree, async_extent->start,
815 					      async_extent->start +
816 					      async_extent->ram_size - 1);
817 
818 				/*
819 				 * we need to redirty the pages if we decide to
820 				 * fallback to uncompressed IO, otherwise we
821 				 * will not submit these pages down to lower
822 				 * layers.
823 				 */
824 				extent_range_redirty_for_io(inode,
825 						async_extent->start,
826 						async_extent->start +
827 						async_extent->ram_size - 1);
828 
829 				goto retry;
830 			}
831 			goto out_free;
832 		}
833 		/*
834 		 * here we're doing allocation and writeback of the
835 		 * compressed pages
836 		 */
837 		em = create_io_em(inode, async_extent->start,
838 				  async_extent->ram_size, /* len */
839 				  async_extent->start, /* orig_start */
840 				  ins.objectid, /* block_start */
841 				  ins.offset, /* block_len */
842 				  ins.offset, /* orig_block_len */
843 				  async_extent->ram_size, /* ram_bytes */
844 				  async_extent->compress_type,
845 				  BTRFS_ORDERED_COMPRESSED);
846 		if (IS_ERR(em))
847 			/* ret value is not necessary due to void function */
848 			goto out_free_reserve;
849 		free_extent_map(em);
850 
851 		ret = btrfs_add_ordered_extent_compress(inode,
852 						async_extent->start,
853 						ins.objectid,
854 						async_extent->ram_size,
855 						ins.offset,
856 						BTRFS_ORDERED_COMPRESSED,
857 						async_extent->compress_type);
858 		if (ret) {
859 			btrfs_drop_extent_cache(BTRFS_I(inode),
860 						async_extent->start,
861 						async_extent->start +
862 						async_extent->ram_size - 1, 0);
863 			goto out_free_reserve;
864 		}
865 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
866 
867 		/*
868 		 * clear dirty, set writeback and unlock the pages.
869 		 */
870 		extent_clear_unlock_delalloc(inode, async_extent->start,
871 				async_extent->start +
872 				async_extent->ram_size - 1,
873 				NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
874 				PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
875 				PAGE_SET_WRITEBACK);
876 		if (btrfs_submit_compressed_write(inode,
877 				    async_extent->start,
878 				    async_extent->ram_size,
879 				    ins.objectid,
880 				    ins.offset, async_extent->pages,
881 				    async_extent->nr_pages,
882 				    async_chunk->write_flags,
883 				    async_chunk->blkcg_css)) {
884 			struct page *p = async_extent->pages[0];
885 			const u64 start = async_extent->start;
886 			const u64 end = start + async_extent->ram_size - 1;
887 
888 			p->mapping = inode->i_mapping;
889 			btrfs_writepage_endio_finish_ordered(p, start, end, 0);
890 
891 			p->mapping = NULL;
892 			extent_clear_unlock_delalloc(inode, start, end,
893 						     NULL, 0,
894 						     PAGE_END_WRITEBACK |
895 						     PAGE_SET_ERROR);
896 			free_async_extent_pages(async_extent);
897 		}
898 		alloc_hint = ins.objectid + ins.offset;
899 		kfree(async_extent);
900 		cond_resched();
901 	}
902 	return;
903 out_free_reserve:
904 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
905 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
906 out_free:
907 	extent_clear_unlock_delalloc(inode, async_extent->start,
908 				     async_extent->start +
909 				     async_extent->ram_size - 1,
910 				     NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
911 				     EXTENT_DELALLOC_NEW |
912 				     EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
913 				     PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
914 				     PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
915 				     PAGE_SET_ERROR);
916 	free_async_extent_pages(async_extent);
917 	kfree(async_extent);
918 	goto again;
919 }
920 
921 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
922 				      u64 num_bytes)
923 {
924 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
925 	struct extent_map *em;
926 	u64 alloc_hint = 0;
927 
928 	read_lock(&em_tree->lock);
929 	em = search_extent_mapping(em_tree, start, num_bytes);
930 	if (em) {
931 		/*
932 		 * if block start isn't an actual block number then find the
933 		 * first block in this inode and use that as a hint.  If that
934 		 * block is also bogus then just don't worry about it.
935 		 */
936 		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
937 			free_extent_map(em);
938 			em = search_extent_mapping(em_tree, 0, 0);
939 			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
940 				alloc_hint = em->block_start;
941 			if (em)
942 				free_extent_map(em);
943 		} else {
944 			alloc_hint = em->block_start;
945 			free_extent_map(em);
946 		}
947 	}
948 	read_unlock(&em_tree->lock);
949 
950 	return alloc_hint;
951 }
952 
953 /*
954  * when extent_io.c finds a delayed allocation range in the file,
955  * the call backs end up in this code.  The basic idea is to
956  * allocate extents on disk for the range, and create ordered data structs
957  * in ram to track those extents.
958  *
959  * locked_page is the page that writepage had locked already.  We use
960  * it to make sure we don't do extra locks or unlocks.
961  *
962  * *page_started is set to one if we unlock locked_page and do everything
963  * required to start IO on it.  It may be clean and already done with
964  * IO when we return.
965  */
966 static noinline int cow_file_range(struct inode *inode,
967 				   struct page *locked_page,
968 				   u64 start, u64 end, int *page_started,
969 				   unsigned long *nr_written, int unlock)
970 {
971 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
972 	struct btrfs_root *root = BTRFS_I(inode)->root;
973 	u64 alloc_hint = 0;
974 	u64 num_bytes;
975 	unsigned long ram_size;
976 	u64 cur_alloc_size = 0;
977 	u64 blocksize = fs_info->sectorsize;
978 	struct btrfs_key ins;
979 	struct extent_map *em;
980 	unsigned clear_bits;
981 	unsigned long page_ops;
982 	bool extent_reserved = false;
983 	int ret = 0;
984 
985 	if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
986 		WARN_ON_ONCE(1);
987 		ret = -EINVAL;
988 		goto out_unlock;
989 	}
990 
991 	num_bytes = ALIGN(end - start + 1, blocksize);
992 	num_bytes = max(blocksize,  num_bytes);
993 	ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
994 
995 	inode_should_defrag(BTRFS_I(inode), start, end, num_bytes, SZ_64K);
996 
997 	if (start == 0) {
998 		/* lets try to make an inline extent */
999 		ret = cow_file_range_inline(inode, start, end, 0,
1000 					    BTRFS_COMPRESS_NONE, NULL);
1001 		if (ret == 0) {
1002 			/*
1003 			 * We use DO_ACCOUNTING here because we need the
1004 			 * delalloc_release_metadata to be run _after_ we drop
1005 			 * our outstanding extent for clearing delalloc for this
1006 			 * range.
1007 			 */
1008 			extent_clear_unlock_delalloc(inode, start, end, NULL,
1009 				     EXTENT_LOCKED | EXTENT_DELALLOC |
1010 				     EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
1011 				     EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1012 				     PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
1013 				     PAGE_END_WRITEBACK);
1014 			*nr_written = *nr_written +
1015 			     (end - start + PAGE_SIZE) / PAGE_SIZE;
1016 			*page_started = 1;
1017 			goto out;
1018 		} else if (ret < 0) {
1019 			goto out_unlock;
1020 		}
1021 	}
1022 
1023 	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
1024 	btrfs_drop_extent_cache(BTRFS_I(inode), start,
1025 			start + num_bytes - 1, 0);
1026 
1027 	while (num_bytes > 0) {
1028 		cur_alloc_size = num_bytes;
1029 		ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
1030 					   fs_info->sectorsize, 0, alloc_hint,
1031 					   &ins, 1, 1);
1032 		if (ret < 0)
1033 			goto out_unlock;
1034 		cur_alloc_size = ins.offset;
1035 		extent_reserved = true;
1036 
1037 		ram_size = ins.offset;
1038 		em = create_io_em(inode, start, ins.offset, /* len */
1039 				  start, /* orig_start */
1040 				  ins.objectid, /* block_start */
1041 				  ins.offset, /* block_len */
1042 				  ins.offset, /* orig_block_len */
1043 				  ram_size, /* ram_bytes */
1044 				  BTRFS_COMPRESS_NONE, /* compress_type */
1045 				  BTRFS_ORDERED_REGULAR /* type */);
1046 		if (IS_ERR(em)) {
1047 			ret = PTR_ERR(em);
1048 			goto out_reserve;
1049 		}
1050 		free_extent_map(em);
1051 
1052 		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
1053 					       ram_size, cur_alloc_size, 0);
1054 		if (ret)
1055 			goto out_drop_extent_cache;
1056 
1057 		if (root->root_key.objectid ==
1058 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
1059 			ret = btrfs_reloc_clone_csums(inode, start,
1060 						      cur_alloc_size);
1061 			/*
1062 			 * Only drop cache here, and process as normal.
1063 			 *
1064 			 * We must not allow extent_clear_unlock_delalloc()
1065 			 * at out_unlock label to free meta of this ordered
1066 			 * extent, as its meta should be freed by
1067 			 * btrfs_finish_ordered_io().
1068 			 *
1069 			 * So we must continue until @start is increased to
1070 			 * skip current ordered extent.
1071 			 */
1072 			if (ret)
1073 				btrfs_drop_extent_cache(BTRFS_I(inode), start,
1074 						start + ram_size - 1, 0);
1075 		}
1076 
1077 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1078 
1079 		/* we're not doing compressed IO, don't unlock the first
1080 		 * page (which the caller expects to stay locked), don't
1081 		 * clear any dirty bits and don't set any writeback bits
1082 		 *
1083 		 * Do set the Private2 bit so we know this page was properly
1084 		 * setup for writepage
1085 		 */
1086 		page_ops = unlock ? PAGE_UNLOCK : 0;
1087 		page_ops |= PAGE_SET_PRIVATE2;
1088 
1089 		extent_clear_unlock_delalloc(inode, start,
1090 					     start + ram_size - 1,
1091 					     locked_page,
1092 					     EXTENT_LOCKED | EXTENT_DELALLOC,
1093 					     page_ops);
1094 		if (num_bytes < cur_alloc_size)
1095 			num_bytes = 0;
1096 		else
1097 			num_bytes -= cur_alloc_size;
1098 		alloc_hint = ins.objectid + ins.offset;
1099 		start += cur_alloc_size;
1100 		extent_reserved = false;
1101 
1102 		/*
1103 		 * btrfs_reloc_clone_csums() error, since start is increased
1104 		 * extent_clear_unlock_delalloc() at out_unlock label won't
1105 		 * free metadata of current ordered extent, we're OK to exit.
1106 		 */
1107 		if (ret)
1108 			goto out_unlock;
1109 	}
1110 out:
1111 	return ret;
1112 
1113 out_drop_extent_cache:
1114 	btrfs_drop_extent_cache(BTRFS_I(inode), start, start + ram_size - 1, 0);
1115 out_reserve:
1116 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1117 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1118 out_unlock:
1119 	clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1120 		EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1121 	page_ops = PAGE_UNLOCK | PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
1122 		PAGE_END_WRITEBACK;
1123 	/*
1124 	 * If we reserved an extent for our delalloc range (or a subrange) and
1125 	 * failed to create the respective ordered extent, then it means that
1126 	 * when we reserved the extent we decremented the extent's size from
1127 	 * the data space_info's bytes_may_use counter and incremented the
1128 	 * space_info's bytes_reserved counter by the same amount. We must make
1129 	 * sure extent_clear_unlock_delalloc() does not try to decrement again
1130 	 * the data space_info's bytes_may_use counter, therefore we do not pass
1131 	 * it the flag EXTENT_CLEAR_DATA_RESV.
1132 	 */
1133 	if (extent_reserved) {
1134 		extent_clear_unlock_delalloc(inode, start,
1135 					     start + cur_alloc_size,
1136 					     locked_page,
1137 					     clear_bits,
1138 					     page_ops);
1139 		start += cur_alloc_size;
1140 		if (start >= end)
1141 			goto out;
1142 	}
1143 	extent_clear_unlock_delalloc(inode, start, end, locked_page,
1144 				     clear_bits | EXTENT_CLEAR_DATA_RESV,
1145 				     page_ops);
1146 	goto out;
1147 }
1148 
1149 /*
1150  * work queue call back to started compression on a file and pages
1151  */
1152 static noinline void async_cow_start(struct btrfs_work *work)
1153 {
1154 	struct async_chunk *async_chunk;
1155 	int compressed_extents;
1156 
1157 	async_chunk = container_of(work, struct async_chunk, work);
1158 
1159 	compressed_extents = compress_file_range(async_chunk);
1160 	if (compressed_extents == 0) {
1161 		btrfs_add_delayed_iput(async_chunk->inode);
1162 		async_chunk->inode = NULL;
1163 	}
1164 }
1165 
1166 /*
1167  * work queue call back to submit previously compressed pages
1168  */
1169 static noinline void async_cow_submit(struct btrfs_work *work)
1170 {
1171 	struct async_chunk *async_chunk = container_of(work, struct async_chunk,
1172 						     work);
1173 	struct btrfs_fs_info *fs_info = btrfs_work_owner(work);
1174 	unsigned long nr_pages;
1175 
1176 	nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
1177 		PAGE_SHIFT;
1178 
1179 	/* atomic_sub_return implies a barrier */
1180 	if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1181 	    5 * SZ_1M)
1182 		cond_wake_up_nomb(&fs_info->async_submit_wait);
1183 
1184 	/*
1185 	 * ->inode could be NULL if async_chunk_start has failed to compress,
1186 	 * in which case we don't have anything to submit, yet we need to
1187 	 * always adjust ->async_delalloc_pages as its paired with the init
1188 	 * happening in cow_file_range_async
1189 	 */
1190 	if (async_chunk->inode)
1191 		submit_compressed_extents(async_chunk);
1192 }
1193 
1194 static noinline void async_cow_free(struct btrfs_work *work)
1195 {
1196 	struct async_chunk *async_chunk;
1197 
1198 	async_chunk = container_of(work, struct async_chunk, work);
1199 	if (async_chunk->inode)
1200 		btrfs_add_delayed_iput(async_chunk->inode);
1201 	if (async_chunk->blkcg_css)
1202 		css_put(async_chunk->blkcg_css);
1203 	/*
1204 	 * Since the pointer to 'pending' is at the beginning of the array of
1205 	 * async_chunk's, freeing it ensures the whole array has been freed.
1206 	 */
1207 	if (atomic_dec_and_test(async_chunk->pending))
1208 		kvfree(async_chunk->pending);
1209 }
1210 
1211 static int cow_file_range_async(struct inode *inode,
1212 				struct writeback_control *wbc,
1213 				struct page *locked_page,
1214 				u64 start, u64 end, int *page_started,
1215 				unsigned long *nr_written)
1216 {
1217 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1218 	struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc);
1219 	struct async_cow *ctx;
1220 	struct async_chunk *async_chunk;
1221 	unsigned long nr_pages;
1222 	u64 cur_end;
1223 	u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K);
1224 	int i;
1225 	bool should_compress;
1226 	unsigned nofs_flag;
1227 	const unsigned int write_flags = wbc_to_write_flags(wbc);
1228 
1229 	unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
1230 
1231 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
1232 	    !btrfs_test_opt(fs_info, FORCE_COMPRESS)) {
1233 		num_chunks = 1;
1234 		should_compress = false;
1235 	} else {
1236 		should_compress = true;
1237 	}
1238 
1239 	nofs_flag = memalloc_nofs_save();
1240 	ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL);
1241 	memalloc_nofs_restore(nofs_flag);
1242 
1243 	if (!ctx) {
1244 		unsigned clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC |
1245 			EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
1246 			EXTENT_DO_ACCOUNTING;
1247 		unsigned long page_ops = PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
1248 			PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
1249 			PAGE_SET_ERROR;
1250 
1251 		extent_clear_unlock_delalloc(inode, start, end, locked_page,
1252 					     clear_bits, page_ops);
1253 		return -ENOMEM;
1254 	}
1255 
1256 	async_chunk = ctx->chunks;
1257 	atomic_set(&ctx->num_chunks, num_chunks);
1258 
1259 	for (i = 0; i < num_chunks; i++) {
1260 		if (should_compress)
1261 			cur_end = min(end, start + SZ_512K - 1);
1262 		else
1263 			cur_end = end;
1264 
1265 		/*
1266 		 * igrab is called higher up in the call chain, take only the
1267 		 * lightweight reference for the callback lifetime
1268 		 */
1269 		ihold(inode);
1270 		async_chunk[i].pending = &ctx->num_chunks;
1271 		async_chunk[i].inode = inode;
1272 		async_chunk[i].start = start;
1273 		async_chunk[i].end = cur_end;
1274 		async_chunk[i].write_flags = write_flags;
1275 		INIT_LIST_HEAD(&async_chunk[i].extents);
1276 
1277 		/*
1278 		 * The locked_page comes all the way from writepage and its
1279 		 * the original page we were actually given.  As we spread
1280 		 * this large delalloc region across multiple async_chunk
1281 		 * structs, only the first struct needs a pointer to locked_page
1282 		 *
1283 		 * This way we don't need racey decisions about who is supposed
1284 		 * to unlock it.
1285 		 */
1286 		if (locked_page) {
1287 			/*
1288 			 * Depending on the compressibility, the pages might or
1289 			 * might not go through async.  We want all of them to
1290 			 * be accounted against wbc once.  Let's do it here
1291 			 * before the paths diverge.  wbc accounting is used
1292 			 * only for foreign writeback detection and doesn't
1293 			 * need full accuracy.  Just account the whole thing
1294 			 * against the first page.
1295 			 */
1296 			wbc_account_cgroup_owner(wbc, locked_page,
1297 						 cur_end - start);
1298 			async_chunk[i].locked_page = locked_page;
1299 			locked_page = NULL;
1300 		} else {
1301 			async_chunk[i].locked_page = NULL;
1302 		}
1303 
1304 		if (blkcg_css != blkcg_root_css) {
1305 			css_get(blkcg_css);
1306 			async_chunk[i].blkcg_css = blkcg_css;
1307 		} else {
1308 			async_chunk[i].blkcg_css = NULL;
1309 		}
1310 
1311 		btrfs_init_work(&async_chunk[i].work, async_cow_start,
1312 				async_cow_submit, async_cow_free);
1313 
1314 		nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
1315 		atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1316 
1317 		btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work);
1318 
1319 		*nr_written += nr_pages;
1320 		start = cur_end + 1;
1321 	}
1322 	*page_started = 1;
1323 	return 0;
1324 }
1325 
1326 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
1327 					u64 bytenr, u64 num_bytes)
1328 {
1329 	int ret;
1330 	struct btrfs_ordered_sum *sums;
1331 	LIST_HEAD(list);
1332 
1333 	ret = btrfs_lookup_csums_range(fs_info->csum_root, bytenr,
1334 				       bytenr + num_bytes - 1, &list, 0);
1335 	if (ret == 0 && list_empty(&list))
1336 		return 0;
1337 
1338 	while (!list_empty(&list)) {
1339 		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1340 		list_del(&sums->list);
1341 		kfree(sums);
1342 	}
1343 	if (ret < 0)
1344 		return ret;
1345 	return 1;
1346 }
1347 
1348 /*
1349  * when nowcow writeback call back.  This checks for snapshots or COW copies
1350  * of the extents that exist in the file, and COWs the file as required.
1351  *
1352  * If no cow copies or snapshots exist, we write directly to the existing
1353  * blocks on disk
1354  */
1355 static noinline int run_delalloc_nocow(struct inode *inode,
1356 				       struct page *locked_page,
1357 				       const u64 start, const u64 end,
1358 				       int *page_started, int force,
1359 				       unsigned long *nr_written)
1360 {
1361 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1362 	struct btrfs_root *root = BTRFS_I(inode)->root;
1363 	struct btrfs_path *path;
1364 	u64 cow_start = (u64)-1;
1365 	u64 cur_offset = start;
1366 	int ret;
1367 	bool check_prev = true;
1368 	const bool freespace_inode = btrfs_is_free_space_inode(BTRFS_I(inode));
1369 	u64 ino = btrfs_ino(BTRFS_I(inode));
1370 	bool nocow = false;
1371 	u64 disk_bytenr = 0;
1372 
1373 	path = btrfs_alloc_path();
1374 	if (!path) {
1375 		extent_clear_unlock_delalloc(inode, start, end, locked_page,
1376 					     EXTENT_LOCKED | EXTENT_DELALLOC |
1377 					     EXTENT_DO_ACCOUNTING |
1378 					     EXTENT_DEFRAG, PAGE_UNLOCK |
1379 					     PAGE_CLEAR_DIRTY |
1380 					     PAGE_SET_WRITEBACK |
1381 					     PAGE_END_WRITEBACK);
1382 		return -ENOMEM;
1383 	}
1384 
1385 	while (1) {
1386 		struct btrfs_key found_key;
1387 		struct btrfs_file_extent_item *fi;
1388 		struct extent_buffer *leaf;
1389 		u64 extent_end;
1390 		u64 extent_offset;
1391 		u64 num_bytes = 0;
1392 		u64 disk_num_bytes;
1393 		u64 ram_bytes;
1394 		int extent_type;
1395 
1396 		nocow = false;
1397 
1398 		ret = btrfs_lookup_file_extent(NULL, root, path, ino,
1399 					       cur_offset, 0);
1400 		if (ret < 0)
1401 			goto error;
1402 
1403 		/*
1404 		 * If there is no extent for our range when doing the initial
1405 		 * search, then go back to the previous slot as it will be the
1406 		 * one containing the search offset
1407 		 */
1408 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
1409 			leaf = path->nodes[0];
1410 			btrfs_item_key_to_cpu(leaf, &found_key,
1411 					      path->slots[0] - 1);
1412 			if (found_key.objectid == ino &&
1413 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
1414 				path->slots[0]--;
1415 		}
1416 		check_prev = false;
1417 next_slot:
1418 		/* Go to next leaf if we have exhausted the current one */
1419 		leaf = path->nodes[0];
1420 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1421 			ret = btrfs_next_leaf(root, path);
1422 			if (ret < 0) {
1423 				if (cow_start != (u64)-1)
1424 					cur_offset = cow_start;
1425 				goto error;
1426 			}
1427 			if (ret > 0)
1428 				break;
1429 			leaf = path->nodes[0];
1430 		}
1431 
1432 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1433 
1434 		/* Didn't find anything for our INO */
1435 		if (found_key.objectid > ino)
1436 			break;
1437 		/*
1438 		 * Keep searching until we find an EXTENT_ITEM or there are no
1439 		 * more extents for this inode
1440 		 */
1441 		if (WARN_ON_ONCE(found_key.objectid < ino) ||
1442 		    found_key.type < BTRFS_EXTENT_DATA_KEY) {
1443 			path->slots[0]++;
1444 			goto next_slot;
1445 		}
1446 
1447 		/* Found key is not EXTENT_DATA_KEY or starts after req range */
1448 		if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
1449 		    found_key.offset > end)
1450 			break;
1451 
1452 		/*
1453 		 * If the found extent starts after requested offset, then
1454 		 * adjust extent_end to be right before this extent begins
1455 		 */
1456 		if (found_key.offset > cur_offset) {
1457 			extent_end = found_key.offset;
1458 			extent_type = 0;
1459 			goto out_check;
1460 		}
1461 
1462 		/*
1463 		 * Found extent which begins before our range and potentially
1464 		 * intersect it
1465 		 */
1466 		fi = btrfs_item_ptr(leaf, path->slots[0],
1467 				    struct btrfs_file_extent_item);
1468 		extent_type = btrfs_file_extent_type(leaf, fi);
1469 
1470 		ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1471 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
1472 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1473 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1474 			extent_offset = btrfs_file_extent_offset(leaf, fi);
1475 			extent_end = found_key.offset +
1476 				btrfs_file_extent_num_bytes(leaf, fi);
1477 			disk_num_bytes =
1478 				btrfs_file_extent_disk_num_bytes(leaf, fi);
1479 			/*
1480 			 * If the extent we got ends before our current offset,
1481 			 * skip to the next extent.
1482 			 */
1483 			if (extent_end <= cur_offset) {
1484 				path->slots[0]++;
1485 				goto next_slot;
1486 			}
1487 			/* Skip holes */
1488 			if (disk_bytenr == 0)
1489 				goto out_check;
1490 			/* Skip compressed/encrypted/encoded extents */
1491 			if (btrfs_file_extent_compression(leaf, fi) ||
1492 			    btrfs_file_extent_encryption(leaf, fi) ||
1493 			    btrfs_file_extent_other_encoding(leaf, fi))
1494 				goto out_check;
1495 			/*
1496 			 * If extent is created before the last volume's snapshot
1497 			 * this implies the extent is shared, hence we can't do
1498 			 * nocow. This is the same check as in
1499 			 * btrfs_cross_ref_exist but without calling
1500 			 * btrfs_search_slot.
1501 			 */
1502 			if (!freespace_inode &&
1503 			    btrfs_file_extent_generation(leaf, fi) <=
1504 			    btrfs_root_last_snapshot(&root->root_item))
1505 				goto out_check;
1506 			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1507 				goto out_check;
1508 			/* If extent is RO, we must COW it */
1509 			if (btrfs_extent_readonly(fs_info, disk_bytenr))
1510 				goto out_check;
1511 			ret = btrfs_cross_ref_exist(root, ino,
1512 						    found_key.offset -
1513 						    extent_offset, disk_bytenr);
1514 			if (ret) {
1515 				/*
1516 				 * ret could be -EIO if the above fails to read
1517 				 * metadata.
1518 				 */
1519 				if (ret < 0) {
1520 					if (cow_start != (u64)-1)
1521 						cur_offset = cow_start;
1522 					goto error;
1523 				}
1524 
1525 				WARN_ON_ONCE(freespace_inode);
1526 				goto out_check;
1527 			}
1528 			disk_bytenr += extent_offset;
1529 			disk_bytenr += cur_offset - found_key.offset;
1530 			num_bytes = min(end + 1, extent_end) - cur_offset;
1531 			/*
1532 			 * If there are pending snapshots for this root, we
1533 			 * fall into common COW way
1534 			 */
1535 			if (!freespace_inode && atomic_read(&root->snapshot_force_cow))
1536 				goto out_check;
1537 			/*
1538 			 * force cow if csum exists in the range.
1539 			 * this ensure that csum for a given extent are
1540 			 * either valid or do not exist.
1541 			 */
1542 			ret = csum_exist_in_range(fs_info, disk_bytenr,
1543 						  num_bytes);
1544 			if (ret) {
1545 				/*
1546 				 * ret could be -EIO if the above fails to read
1547 				 * metadata.
1548 				 */
1549 				if (ret < 0) {
1550 					if (cow_start != (u64)-1)
1551 						cur_offset = cow_start;
1552 					goto error;
1553 				}
1554 				WARN_ON_ONCE(freespace_inode);
1555 				goto out_check;
1556 			}
1557 			if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
1558 				goto out_check;
1559 			nocow = true;
1560 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1561 			extent_end = found_key.offset + ram_bytes;
1562 			extent_end = ALIGN(extent_end, fs_info->sectorsize);
1563 			/* Skip extents outside of our requested range */
1564 			if (extent_end <= start) {
1565 				path->slots[0]++;
1566 				goto next_slot;
1567 			}
1568 		} else {
1569 			/* If this triggers then we have a memory corruption */
1570 			BUG();
1571 		}
1572 out_check:
1573 		/*
1574 		 * If nocow is false then record the beginning of the range
1575 		 * that needs to be COWed
1576 		 */
1577 		if (!nocow) {
1578 			if (cow_start == (u64)-1)
1579 				cow_start = cur_offset;
1580 			cur_offset = extent_end;
1581 			if (cur_offset > end)
1582 				break;
1583 			path->slots[0]++;
1584 			goto next_slot;
1585 		}
1586 
1587 		btrfs_release_path(path);
1588 
1589 		/*
1590 		 * COW range from cow_start to found_key.offset - 1. As the key
1591 		 * will contain the beginning of the first extent that can be
1592 		 * NOCOW, following one which needs to be COW'ed
1593 		 */
1594 		if (cow_start != (u64)-1) {
1595 			ret = cow_file_range(inode, locked_page,
1596 					     cow_start, found_key.offset - 1,
1597 					     page_started, nr_written, 1);
1598 			if (ret) {
1599 				if (nocow)
1600 					btrfs_dec_nocow_writers(fs_info,
1601 								disk_bytenr);
1602 				goto error;
1603 			}
1604 			cow_start = (u64)-1;
1605 		}
1606 
1607 		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1608 			u64 orig_start = found_key.offset - extent_offset;
1609 			struct extent_map *em;
1610 
1611 			em = create_io_em(inode, cur_offset, num_bytes,
1612 					  orig_start,
1613 					  disk_bytenr, /* block_start */
1614 					  num_bytes, /* block_len */
1615 					  disk_num_bytes, /* orig_block_len */
1616 					  ram_bytes, BTRFS_COMPRESS_NONE,
1617 					  BTRFS_ORDERED_PREALLOC);
1618 			if (IS_ERR(em)) {
1619 				if (nocow)
1620 					btrfs_dec_nocow_writers(fs_info,
1621 								disk_bytenr);
1622 				ret = PTR_ERR(em);
1623 				goto error;
1624 			}
1625 			free_extent_map(em);
1626 			ret = btrfs_add_ordered_extent(inode, cur_offset,
1627 						       disk_bytenr, num_bytes,
1628 						       num_bytes,
1629 						       BTRFS_ORDERED_PREALLOC);
1630 			if (ret) {
1631 				btrfs_drop_extent_cache(BTRFS_I(inode),
1632 							cur_offset,
1633 							cur_offset + num_bytes - 1,
1634 							0);
1635 				goto error;
1636 			}
1637 		} else {
1638 			ret = btrfs_add_ordered_extent(inode, cur_offset,
1639 						       disk_bytenr, num_bytes,
1640 						       num_bytes,
1641 						       BTRFS_ORDERED_NOCOW);
1642 			if (ret)
1643 				goto error;
1644 		}
1645 
1646 		if (nocow)
1647 			btrfs_dec_nocow_writers(fs_info, disk_bytenr);
1648 		nocow = false;
1649 
1650 		if (root->root_key.objectid ==
1651 		    BTRFS_DATA_RELOC_TREE_OBJECTID)
1652 			/*
1653 			 * Error handled later, as we must prevent
1654 			 * extent_clear_unlock_delalloc() in error handler
1655 			 * from freeing metadata of created ordered extent.
1656 			 */
1657 			ret = btrfs_reloc_clone_csums(inode, cur_offset,
1658 						      num_bytes);
1659 
1660 		extent_clear_unlock_delalloc(inode, cur_offset,
1661 					     cur_offset + num_bytes - 1,
1662 					     locked_page, EXTENT_LOCKED |
1663 					     EXTENT_DELALLOC |
1664 					     EXTENT_CLEAR_DATA_RESV,
1665 					     PAGE_UNLOCK | PAGE_SET_PRIVATE2);
1666 
1667 		cur_offset = extent_end;
1668 
1669 		/*
1670 		 * btrfs_reloc_clone_csums() error, now we're OK to call error
1671 		 * handler, as metadata for created ordered extent will only
1672 		 * be freed by btrfs_finish_ordered_io().
1673 		 */
1674 		if (ret)
1675 			goto error;
1676 		if (cur_offset > end)
1677 			break;
1678 	}
1679 	btrfs_release_path(path);
1680 
1681 	if (cur_offset <= end && cow_start == (u64)-1)
1682 		cow_start = cur_offset;
1683 
1684 	if (cow_start != (u64)-1) {
1685 		cur_offset = end;
1686 		ret = cow_file_range(inode, locked_page, cow_start, end,
1687 				     page_started, nr_written, 1);
1688 		if (ret)
1689 			goto error;
1690 	}
1691 
1692 error:
1693 	if (nocow)
1694 		btrfs_dec_nocow_writers(fs_info, disk_bytenr);
1695 
1696 	if (ret && cur_offset < end)
1697 		extent_clear_unlock_delalloc(inode, cur_offset, end,
1698 					     locked_page, EXTENT_LOCKED |
1699 					     EXTENT_DELALLOC | EXTENT_DEFRAG |
1700 					     EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1701 					     PAGE_CLEAR_DIRTY |
1702 					     PAGE_SET_WRITEBACK |
1703 					     PAGE_END_WRITEBACK);
1704 	btrfs_free_path(path);
1705 	return ret;
1706 }
1707 
1708 static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
1709 {
1710 
1711 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
1712 	    !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC))
1713 		return 0;
1714 
1715 	/*
1716 	 * @defrag_bytes is a hint value, no spinlock held here,
1717 	 * if is not zero, it means the file is defragging.
1718 	 * Force cow if given extent needs to be defragged.
1719 	 */
1720 	if (BTRFS_I(inode)->defrag_bytes &&
1721 	    test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1722 			   EXTENT_DEFRAG, 0, NULL))
1723 		return 1;
1724 
1725 	return 0;
1726 }
1727 
1728 /*
1729  * Function to process delayed allocation (create CoW) for ranges which are
1730  * being touched for the first time.
1731  */
1732 int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page,
1733 		u64 start, u64 end, int *page_started, unsigned long *nr_written,
1734 		struct writeback_control *wbc)
1735 {
1736 	int ret;
1737 	int force_cow = need_force_cow(inode, start, end);
1738 
1739 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
1740 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1741 					 page_started, 1, nr_written);
1742 	} else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
1743 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1744 					 page_started, 0, nr_written);
1745 	} else if (!inode_can_compress(inode) ||
1746 		   !inode_need_compress(inode, start, end)) {
1747 		ret = cow_file_range(inode, locked_page, start, end,
1748 				      page_started, nr_written, 1);
1749 	} else {
1750 		set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1751 			&BTRFS_I(inode)->runtime_flags);
1752 		ret = cow_file_range_async(inode, wbc, locked_page, start, end,
1753 					   page_started, nr_written);
1754 	}
1755 	if (ret)
1756 		btrfs_cleanup_ordered_extents(inode, locked_page, start,
1757 					      end - start + 1);
1758 	return ret;
1759 }
1760 
1761 void btrfs_split_delalloc_extent(struct inode *inode,
1762 				 struct extent_state *orig, u64 split)
1763 {
1764 	u64 size;
1765 
1766 	/* not delalloc, ignore it */
1767 	if (!(orig->state & EXTENT_DELALLOC))
1768 		return;
1769 
1770 	size = orig->end - orig->start + 1;
1771 	if (size > BTRFS_MAX_EXTENT_SIZE) {
1772 		u32 num_extents;
1773 		u64 new_size;
1774 
1775 		/*
1776 		 * See the explanation in btrfs_merge_delalloc_extent, the same
1777 		 * applies here, just in reverse.
1778 		 */
1779 		new_size = orig->end - split + 1;
1780 		num_extents = count_max_extents(new_size);
1781 		new_size = split - orig->start;
1782 		num_extents += count_max_extents(new_size);
1783 		if (count_max_extents(size) >= num_extents)
1784 			return;
1785 	}
1786 
1787 	spin_lock(&BTRFS_I(inode)->lock);
1788 	btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
1789 	spin_unlock(&BTRFS_I(inode)->lock);
1790 }
1791 
1792 /*
1793  * Handle merged delayed allocation extents so we can keep track of new extents
1794  * that are just merged onto old extents, such as when we are doing sequential
1795  * writes, so we can properly account for the metadata space we'll need.
1796  */
1797 void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new,
1798 				 struct extent_state *other)
1799 {
1800 	u64 new_size, old_size;
1801 	u32 num_extents;
1802 
1803 	/* not delalloc, ignore it */
1804 	if (!(other->state & EXTENT_DELALLOC))
1805 		return;
1806 
1807 	if (new->start > other->start)
1808 		new_size = new->end - other->start + 1;
1809 	else
1810 		new_size = other->end - new->start + 1;
1811 
1812 	/* we're not bigger than the max, unreserve the space and go */
1813 	if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
1814 		spin_lock(&BTRFS_I(inode)->lock);
1815 		btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
1816 		spin_unlock(&BTRFS_I(inode)->lock);
1817 		return;
1818 	}
1819 
1820 	/*
1821 	 * We have to add up either side to figure out how many extents were
1822 	 * accounted for before we merged into one big extent.  If the number of
1823 	 * extents we accounted for is <= the amount we need for the new range
1824 	 * then we can return, otherwise drop.  Think of it like this
1825 	 *
1826 	 * [ 4k][MAX_SIZE]
1827 	 *
1828 	 * So we've grown the extent by a MAX_SIZE extent, this would mean we
1829 	 * need 2 outstanding extents, on one side we have 1 and the other side
1830 	 * we have 1 so they are == and we can return.  But in this case
1831 	 *
1832 	 * [MAX_SIZE+4k][MAX_SIZE+4k]
1833 	 *
1834 	 * Each range on their own accounts for 2 extents, but merged together
1835 	 * they are only 3 extents worth of accounting, so we need to drop in
1836 	 * this case.
1837 	 */
1838 	old_size = other->end - other->start + 1;
1839 	num_extents = count_max_extents(old_size);
1840 	old_size = new->end - new->start + 1;
1841 	num_extents += count_max_extents(old_size);
1842 	if (count_max_extents(new_size) >= num_extents)
1843 		return;
1844 
1845 	spin_lock(&BTRFS_I(inode)->lock);
1846 	btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
1847 	spin_unlock(&BTRFS_I(inode)->lock);
1848 }
1849 
1850 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
1851 				      struct inode *inode)
1852 {
1853 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1854 
1855 	spin_lock(&root->delalloc_lock);
1856 	if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1857 		list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1858 			      &root->delalloc_inodes);
1859 		set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1860 			&BTRFS_I(inode)->runtime_flags);
1861 		root->nr_delalloc_inodes++;
1862 		if (root->nr_delalloc_inodes == 1) {
1863 			spin_lock(&fs_info->delalloc_root_lock);
1864 			BUG_ON(!list_empty(&root->delalloc_root));
1865 			list_add_tail(&root->delalloc_root,
1866 				      &fs_info->delalloc_roots);
1867 			spin_unlock(&fs_info->delalloc_root_lock);
1868 		}
1869 	}
1870 	spin_unlock(&root->delalloc_lock);
1871 }
1872 
1873 
1874 void __btrfs_del_delalloc_inode(struct btrfs_root *root,
1875 				struct btrfs_inode *inode)
1876 {
1877 	struct btrfs_fs_info *fs_info = root->fs_info;
1878 
1879 	if (!list_empty(&inode->delalloc_inodes)) {
1880 		list_del_init(&inode->delalloc_inodes);
1881 		clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1882 			  &inode->runtime_flags);
1883 		root->nr_delalloc_inodes--;
1884 		if (!root->nr_delalloc_inodes) {
1885 			ASSERT(list_empty(&root->delalloc_inodes));
1886 			spin_lock(&fs_info->delalloc_root_lock);
1887 			BUG_ON(list_empty(&root->delalloc_root));
1888 			list_del_init(&root->delalloc_root);
1889 			spin_unlock(&fs_info->delalloc_root_lock);
1890 		}
1891 	}
1892 }
1893 
1894 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
1895 				     struct btrfs_inode *inode)
1896 {
1897 	spin_lock(&root->delalloc_lock);
1898 	__btrfs_del_delalloc_inode(root, inode);
1899 	spin_unlock(&root->delalloc_lock);
1900 }
1901 
1902 /*
1903  * Properly track delayed allocation bytes in the inode and to maintain the
1904  * list of inodes that have pending delalloc work to be done.
1905  */
1906 void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state,
1907 			       unsigned *bits)
1908 {
1909 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1910 
1911 	if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
1912 		WARN_ON(1);
1913 	/*
1914 	 * set_bit and clear bit hooks normally require _irqsave/restore
1915 	 * but in this case, we are only testing for the DELALLOC
1916 	 * bit, which is only set or cleared with irqs on
1917 	 */
1918 	if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1919 		struct btrfs_root *root = BTRFS_I(inode)->root;
1920 		u64 len = state->end + 1 - state->start;
1921 		u32 num_extents = count_max_extents(len);
1922 		bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode));
1923 
1924 		spin_lock(&BTRFS_I(inode)->lock);
1925 		btrfs_mod_outstanding_extents(BTRFS_I(inode), num_extents);
1926 		spin_unlock(&BTRFS_I(inode)->lock);
1927 
1928 		/* For sanity tests */
1929 		if (btrfs_is_testing(fs_info))
1930 			return;
1931 
1932 		percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
1933 					 fs_info->delalloc_batch);
1934 		spin_lock(&BTRFS_I(inode)->lock);
1935 		BTRFS_I(inode)->delalloc_bytes += len;
1936 		if (*bits & EXTENT_DEFRAG)
1937 			BTRFS_I(inode)->defrag_bytes += len;
1938 		if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1939 					 &BTRFS_I(inode)->runtime_flags))
1940 			btrfs_add_delalloc_inodes(root, inode);
1941 		spin_unlock(&BTRFS_I(inode)->lock);
1942 	}
1943 
1944 	if (!(state->state & EXTENT_DELALLOC_NEW) &&
1945 	    (*bits & EXTENT_DELALLOC_NEW)) {
1946 		spin_lock(&BTRFS_I(inode)->lock);
1947 		BTRFS_I(inode)->new_delalloc_bytes += state->end + 1 -
1948 			state->start;
1949 		spin_unlock(&BTRFS_I(inode)->lock);
1950 	}
1951 }
1952 
1953 /*
1954  * Once a range is no longer delalloc this function ensures that proper
1955  * accounting happens.
1956  */
1957 void btrfs_clear_delalloc_extent(struct inode *vfs_inode,
1958 				 struct extent_state *state, unsigned *bits)
1959 {
1960 	struct btrfs_inode *inode = BTRFS_I(vfs_inode);
1961 	struct btrfs_fs_info *fs_info = btrfs_sb(vfs_inode->i_sb);
1962 	u64 len = state->end + 1 - state->start;
1963 	u32 num_extents = count_max_extents(len);
1964 
1965 	if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) {
1966 		spin_lock(&inode->lock);
1967 		inode->defrag_bytes -= len;
1968 		spin_unlock(&inode->lock);
1969 	}
1970 
1971 	/*
1972 	 * set_bit and clear bit hooks normally require _irqsave/restore
1973 	 * but in this case, we are only testing for the DELALLOC
1974 	 * bit, which is only set or cleared with irqs on
1975 	 */
1976 	if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1977 		struct btrfs_root *root = inode->root;
1978 		bool do_list = !btrfs_is_free_space_inode(inode);
1979 
1980 		spin_lock(&inode->lock);
1981 		btrfs_mod_outstanding_extents(inode, -num_extents);
1982 		spin_unlock(&inode->lock);
1983 
1984 		/*
1985 		 * We don't reserve metadata space for space cache inodes so we
1986 		 * don't need to call delalloc_release_metadata if there is an
1987 		 * error.
1988 		 */
1989 		if (*bits & EXTENT_CLEAR_META_RESV &&
1990 		    root != fs_info->tree_root)
1991 			btrfs_delalloc_release_metadata(inode, len, false);
1992 
1993 		/* For sanity tests. */
1994 		if (btrfs_is_testing(fs_info))
1995 			return;
1996 
1997 		if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID &&
1998 		    do_list && !(state->state & EXTENT_NORESERVE) &&
1999 		    (*bits & EXTENT_CLEAR_DATA_RESV))
2000 			btrfs_free_reserved_data_space_noquota(
2001 					&inode->vfs_inode,
2002 					state->start, len);
2003 
2004 		percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
2005 					 fs_info->delalloc_batch);
2006 		spin_lock(&inode->lock);
2007 		inode->delalloc_bytes -= len;
2008 		if (do_list && inode->delalloc_bytes == 0 &&
2009 		    test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2010 					&inode->runtime_flags))
2011 			btrfs_del_delalloc_inode(root, inode);
2012 		spin_unlock(&inode->lock);
2013 	}
2014 
2015 	if ((state->state & EXTENT_DELALLOC_NEW) &&
2016 	    (*bits & EXTENT_DELALLOC_NEW)) {
2017 		spin_lock(&inode->lock);
2018 		ASSERT(inode->new_delalloc_bytes >= len);
2019 		inode->new_delalloc_bytes -= len;
2020 		spin_unlock(&inode->lock);
2021 	}
2022 }
2023 
2024 /*
2025  * btrfs_bio_fits_in_stripe - Checks whether the size of the given bio will fit
2026  * in a chunk's stripe. This function ensures that bios do not span a
2027  * stripe/chunk
2028  *
2029  * @page - The page we are about to add to the bio
2030  * @size - size we want to add to the bio
2031  * @bio - bio we want to ensure is smaller than a stripe
2032  * @bio_flags - flags of the bio
2033  *
2034  * return 1 if page cannot be added to the bio
2035  * return 0 if page can be added to the bio
2036  * return error otherwise
2037  */
2038 int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio,
2039 			     unsigned long bio_flags)
2040 {
2041 	struct inode *inode = page->mapping->host;
2042 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2043 	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
2044 	u64 length = 0;
2045 	u64 map_length;
2046 	int ret;
2047 	struct btrfs_io_geometry geom;
2048 
2049 	if (bio_flags & EXTENT_BIO_COMPRESSED)
2050 		return 0;
2051 
2052 	length = bio->bi_iter.bi_size;
2053 	map_length = length;
2054 	ret = btrfs_get_io_geometry(fs_info, btrfs_op(bio), logical, map_length,
2055 				    &geom);
2056 	if (ret < 0)
2057 		return ret;
2058 
2059 	if (geom.len < length + size)
2060 		return 1;
2061 	return 0;
2062 }
2063 
2064 /*
2065  * in order to insert checksums into the metadata in large chunks,
2066  * we wait until bio submission time.   All the pages in the bio are
2067  * checksummed and sums are attached onto the ordered extent record.
2068  *
2069  * At IO completion time the cums attached on the ordered extent record
2070  * are inserted into the btree
2071  */
2072 static blk_status_t btrfs_submit_bio_start(void *private_data, struct bio *bio,
2073 				    u64 bio_offset)
2074 {
2075 	struct inode *inode = private_data;
2076 	blk_status_t ret = 0;
2077 
2078 	ret = btrfs_csum_one_bio(inode, bio, 0, 0);
2079 	BUG_ON(ret); /* -ENOMEM */
2080 	return 0;
2081 }
2082 
2083 /*
2084  * extent_io.c submission hook. This does the right thing for csum calculation
2085  * on write, or reading the csums from the tree before a read.
2086  *
2087  * Rules about async/sync submit,
2088  * a) read:				sync submit
2089  *
2090  * b) write without checksum:		sync submit
2091  *
2092  * c) write with checksum:
2093  *    c-1) if bio is issued by fsync:	sync submit
2094  *         (sync_writers != 0)
2095  *
2096  *    c-2) if root is reloc root:	sync submit
2097  *         (only in case of buffered IO)
2098  *
2099  *    c-3) otherwise:			async submit
2100  */
2101 static blk_status_t btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
2102 					  int mirror_num,
2103 					  unsigned long bio_flags)
2104 
2105 {
2106 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2107 	struct btrfs_root *root = BTRFS_I(inode)->root;
2108 	enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
2109 	blk_status_t ret = 0;
2110 	int skip_sum;
2111 	int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
2112 
2113 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
2114 
2115 	if (btrfs_is_free_space_inode(BTRFS_I(inode)))
2116 		metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
2117 
2118 	if (bio_op(bio) != REQ_OP_WRITE) {
2119 		ret = btrfs_bio_wq_end_io(fs_info, bio, metadata);
2120 		if (ret)
2121 			goto out;
2122 
2123 		if (bio_flags & EXTENT_BIO_COMPRESSED) {
2124 			ret = btrfs_submit_compressed_read(inode, bio,
2125 							   mirror_num,
2126 							   bio_flags);
2127 			goto out;
2128 		} else if (!skip_sum) {
2129 			ret = btrfs_lookup_bio_sums(inode, bio, (u64)-1, NULL);
2130 			if (ret)
2131 				goto out;
2132 		}
2133 		goto mapit;
2134 	} else if (async && !skip_sum) {
2135 		/* csum items have already been cloned */
2136 		if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2137 			goto mapit;
2138 		/* we're doing a write, do the async checksumming */
2139 		ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, bio_flags,
2140 					  0, inode, btrfs_submit_bio_start);
2141 		goto out;
2142 	} else if (!skip_sum) {
2143 		ret = btrfs_csum_one_bio(inode, bio, 0, 0);
2144 		if (ret)
2145 			goto out;
2146 	}
2147 
2148 mapit:
2149 	ret = btrfs_map_bio(fs_info, bio, mirror_num);
2150 
2151 out:
2152 	if (ret) {
2153 		bio->bi_status = ret;
2154 		bio_endio(bio);
2155 	}
2156 	return ret;
2157 }
2158 
2159 /*
2160  * given a list of ordered sums record them in the inode.  This happens
2161  * at IO completion time based on sums calculated at bio submission time.
2162  */
2163 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
2164 			     struct inode *inode, struct list_head *list)
2165 {
2166 	struct btrfs_ordered_sum *sum;
2167 	int ret;
2168 
2169 	list_for_each_entry(sum, list, list) {
2170 		trans->adding_csums = true;
2171 		ret = btrfs_csum_file_blocks(trans,
2172 		       BTRFS_I(inode)->root->fs_info->csum_root, sum);
2173 		trans->adding_csums = false;
2174 		if (ret)
2175 			return ret;
2176 	}
2177 	return 0;
2178 }
2179 
2180 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
2181 			      unsigned int extra_bits,
2182 			      struct extent_state **cached_state)
2183 {
2184 	WARN_ON(PAGE_ALIGNED(end));
2185 	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
2186 				   extra_bits, cached_state);
2187 }
2188 
2189 /* see btrfs_writepage_start_hook for details on why this is required */
2190 struct btrfs_writepage_fixup {
2191 	struct page *page;
2192 	struct btrfs_work work;
2193 };
2194 
2195 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2196 {
2197 	struct btrfs_writepage_fixup *fixup;
2198 	struct btrfs_ordered_extent *ordered;
2199 	struct extent_state *cached_state = NULL;
2200 	struct extent_changeset *data_reserved = NULL;
2201 	struct page *page;
2202 	struct inode *inode;
2203 	u64 page_start;
2204 	u64 page_end;
2205 	int ret;
2206 
2207 	fixup = container_of(work, struct btrfs_writepage_fixup, work);
2208 	page = fixup->page;
2209 again:
2210 	lock_page(page);
2211 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
2212 		ClearPageChecked(page);
2213 		goto out_page;
2214 	}
2215 
2216 	inode = page->mapping->host;
2217 	page_start = page_offset(page);
2218 	page_end = page_offset(page) + PAGE_SIZE - 1;
2219 
2220 	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
2221 			 &cached_state);
2222 
2223 	/* already ordered? We're done */
2224 	if (PagePrivate2(page))
2225 		goto out;
2226 
2227 	ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
2228 					PAGE_SIZE);
2229 	if (ordered) {
2230 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
2231 				     page_end, &cached_state);
2232 		unlock_page(page);
2233 		btrfs_start_ordered_extent(inode, ordered, 1);
2234 		btrfs_put_ordered_extent(ordered);
2235 		goto again;
2236 	}
2237 
2238 	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2239 					   PAGE_SIZE);
2240 	if (ret) {
2241 		mapping_set_error(page->mapping, ret);
2242 		end_extent_writepage(page, ret, page_start, page_end);
2243 		ClearPageChecked(page);
2244 		goto out;
2245 	 }
2246 
2247 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
2248 					&cached_state);
2249 	if (ret) {
2250 		mapping_set_error(page->mapping, ret);
2251 		end_extent_writepage(page, ret, page_start, page_end);
2252 		ClearPageChecked(page);
2253 		goto out_reserved;
2254 	}
2255 
2256 	ClearPageChecked(page);
2257 	set_page_dirty(page);
2258 out_reserved:
2259 	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
2260 	if (ret)
2261 		btrfs_delalloc_release_space(inode, data_reserved, page_start,
2262 					     PAGE_SIZE, true);
2263 out:
2264 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
2265 			     &cached_state);
2266 out_page:
2267 	unlock_page(page);
2268 	put_page(page);
2269 	kfree(fixup);
2270 	extent_changeset_free(data_reserved);
2271 }
2272 
2273 /*
2274  * There are a few paths in the higher layers of the kernel that directly
2275  * set the page dirty bit without asking the filesystem if it is a
2276  * good idea.  This causes problems because we want to make sure COW
2277  * properly happens and the data=ordered rules are followed.
2278  *
2279  * In our case any range that doesn't have the ORDERED bit set
2280  * hasn't been properly setup for IO.  We kick off an async process
2281  * to fix it up.  The async helper will wait for ordered extents, set
2282  * the delalloc bit and make it safe to write the page.
2283  */
2284 int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end)
2285 {
2286 	struct inode *inode = page->mapping->host;
2287 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2288 	struct btrfs_writepage_fixup *fixup;
2289 
2290 	/* this page is properly in the ordered list */
2291 	if (TestClearPagePrivate2(page))
2292 		return 0;
2293 
2294 	if (PageChecked(page))
2295 		return -EAGAIN;
2296 
2297 	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2298 	if (!fixup)
2299 		return -EAGAIN;
2300 
2301 	SetPageChecked(page);
2302 	get_page(page);
2303 	btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
2304 	fixup->page = page;
2305 	btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
2306 	return -EBUSY;
2307 }
2308 
2309 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2310 				       struct inode *inode, u64 file_pos,
2311 				       u64 disk_bytenr, u64 disk_num_bytes,
2312 				       u64 num_bytes, u64 ram_bytes,
2313 				       u8 compression, u8 encryption,
2314 				       u16 other_encoding, int extent_type)
2315 {
2316 	struct btrfs_root *root = BTRFS_I(inode)->root;
2317 	struct btrfs_file_extent_item *fi;
2318 	struct btrfs_path *path;
2319 	struct extent_buffer *leaf;
2320 	struct btrfs_key ins;
2321 	u64 qg_released;
2322 	int extent_inserted = 0;
2323 	int ret;
2324 
2325 	path = btrfs_alloc_path();
2326 	if (!path)
2327 		return -ENOMEM;
2328 
2329 	/*
2330 	 * we may be replacing one extent in the tree with another.
2331 	 * The new extent is pinned in the extent map, and we don't want
2332 	 * to drop it from the cache until it is completely in the btree.
2333 	 *
2334 	 * So, tell btrfs_drop_extents to leave this extent in the cache.
2335 	 * the caller is expected to unpin it and allow it to be merged
2336 	 * with the others.
2337 	 */
2338 	ret = __btrfs_drop_extents(trans, root, inode, path, file_pos,
2339 				   file_pos + num_bytes, NULL, 0,
2340 				   1, sizeof(*fi), &extent_inserted);
2341 	if (ret)
2342 		goto out;
2343 
2344 	if (!extent_inserted) {
2345 		ins.objectid = btrfs_ino(BTRFS_I(inode));
2346 		ins.offset = file_pos;
2347 		ins.type = BTRFS_EXTENT_DATA_KEY;
2348 
2349 		path->leave_spinning = 1;
2350 		ret = btrfs_insert_empty_item(trans, root, path, &ins,
2351 					      sizeof(*fi));
2352 		if (ret)
2353 			goto out;
2354 	}
2355 	leaf = path->nodes[0];
2356 	fi = btrfs_item_ptr(leaf, path->slots[0],
2357 			    struct btrfs_file_extent_item);
2358 	btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2359 	btrfs_set_file_extent_type(leaf, fi, extent_type);
2360 	btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
2361 	btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
2362 	btrfs_set_file_extent_offset(leaf, fi, 0);
2363 	btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2364 	btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
2365 	btrfs_set_file_extent_compression(leaf, fi, compression);
2366 	btrfs_set_file_extent_encryption(leaf, fi, encryption);
2367 	btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
2368 
2369 	btrfs_mark_buffer_dirty(leaf);
2370 	btrfs_release_path(path);
2371 
2372 	inode_add_bytes(inode, num_bytes);
2373 
2374 	ins.objectid = disk_bytenr;
2375 	ins.offset = disk_num_bytes;
2376 	ins.type = BTRFS_EXTENT_ITEM_KEY;
2377 
2378 	/*
2379 	 * Release the reserved range from inode dirty range map, as it is
2380 	 * already moved into delayed_ref_head
2381 	 */
2382 	ret = btrfs_qgroup_release_data(inode, file_pos, ram_bytes);
2383 	if (ret < 0)
2384 		goto out;
2385 	qg_released = ret;
2386 	ret = btrfs_alloc_reserved_file_extent(trans, root,
2387 					       btrfs_ino(BTRFS_I(inode)),
2388 					       file_pos, qg_released, &ins);
2389 out:
2390 	btrfs_free_path(path);
2391 
2392 	return ret;
2393 }
2394 
2395 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
2396 					 u64 start, u64 len)
2397 {
2398 	struct btrfs_block_group *cache;
2399 
2400 	cache = btrfs_lookup_block_group(fs_info, start);
2401 	ASSERT(cache);
2402 
2403 	spin_lock(&cache->lock);
2404 	cache->delalloc_bytes -= len;
2405 	spin_unlock(&cache->lock);
2406 
2407 	btrfs_put_block_group(cache);
2408 }
2409 
2410 /* as ordered data IO finishes, this gets called so we can finish
2411  * an ordered extent if the range of bytes in the file it covers are
2412  * fully written.
2413  */
2414 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2415 {
2416 	struct inode *inode = ordered_extent->inode;
2417 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2418 	struct btrfs_root *root = BTRFS_I(inode)->root;
2419 	struct btrfs_trans_handle *trans = NULL;
2420 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2421 	struct extent_state *cached_state = NULL;
2422 	u64 start, end;
2423 	int compress_type = 0;
2424 	int ret = 0;
2425 	u64 logical_len = ordered_extent->num_bytes;
2426 	bool freespace_inode;
2427 	bool truncated = false;
2428 	bool range_locked = false;
2429 	bool clear_new_delalloc_bytes = false;
2430 	bool clear_reserved_extent = true;
2431 	unsigned int clear_bits;
2432 
2433 	start = ordered_extent->file_offset;
2434 	end = start + ordered_extent->num_bytes - 1;
2435 
2436 	if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
2437 	    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
2438 	    !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags))
2439 		clear_new_delalloc_bytes = true;
2440 
2441 	freespace_inode = btrfs_is_free_space_inode(BTRFS_I(inode));
2442 
2443 	if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2444 		ret = -EIO;
2445 		goto out;
2446 	}
2447 
2448 	btrfs_free_io_failure_record(BTRFS_I(inode), start, end);
2449 
2450 	if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
2451 		truncated = true;
2452 		logical_len = ordered_extent->truncated_len;
2453 		/* Truncated the entire extent, don't bother adding */
2454 		if (!logical_len)
2455 			goto out;
2456 	}
2457 
2458 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
2459 		BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
2460 
2461 		/*
2462 		 * For mwrite(mmap + memset to write) case, we still reserve
2463 		 * space for NOCOW range.
2464 		 * As NOCOW won't cause a new delayed ref, just free the space
2465 		 */
2466 		btrfs_qgroup_free_data(inode, NULL, start,
2467 				       ordered_extent->num_bytes);
2468 		btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2469 		if (freespace_inode)
2470 			trans = btrfs_join_transaction_spacecache(root);
2471 		else
2472 			trans = btrfs_join_transaction(root);
2473 		if (IS_ERR(trans)) {
2474 			ret = PTR_ERR(trans);
2475 			trans = NULL;
2476 			goto out;
2477 		}
2478 		trans->block_rsv = &BTRFS_I(inode)->block_rsv;
2479 		ret = btrfs_update_inode_fallback(trans, root, inode);
2480 		if (ret) /* -ENOMEM or corruption */
2481 			btrfs_abort_transaction(trans, ret);
2482 		goto out;
2483 	}
2484 
2485 	range_locked = true;
2486 	lock_extent_bits(io_tree, start, end, &cached_state);
2487 
2488 	if (freespace_inode)
2489 		trans = btrfs_join_transaction_spacecache(root);
2490 	else
2491 		trans = btrfs_join_transaction(root);
2492 	if (IS_ERR(trans)) {
2493 		ret = PTR_ERR(trans);
2494 		trans = NULL;
2495 		goto out;
2496 	}
2497 
2498 	trans->block_rsv = &BTRFS_I(inode)->block_rsv;
2499 
2500 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
2501 		compress_type = ordered_extent->compress_type;
2502 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
2503 		BUG_ON(compress_type);
2504 		btrfs_qgroup_free_data(inode, NULL, start,
2505 				       ordered_extent->num_bytes);
2506 		ret = btrfs_mark_extent_written(trans, BTRFS_I(inode),
2507 						ordered_extent->file_offset,
2508 						ordered_extent->file_offset +
2509 						logical_len);
2510 	} else {
2511 		BUG_ON(root == fs_info->tree_root);
2512 		ret = insert_reserved_file_extent(trans, inode, start,
2513 						ordered_extent->disk_bytenr,
2514 						ordered_extent->disk_num_bytes,
2515 						logical_len, logical_len,
2516 						compress_type, 0, 0,
2517 						BTRFS_FILE_EXTENT_REG);
2518 		if (!ret) {
2519 			clear_reserved_extent = false;
2520 			btrfs_release_delalloc_bytes(fs_info,
2521 						ordered_extent->disk_bytenr,
2522 						ordered_extent->disk_num_bytes);
2523 		}
2524 	}
2525 	unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
2526 			   ordered_extent->file_offset,
2527 			   ordered_extent->num_bytes, trans->transid);
2528 	if (ret < 0) {
2529 		btrfs_abort_transaction(trans, ret);
2530 		goto out;
2531 	}
2532 
2533 	ret = add_pending_csums(trans, inode, &ordered_extent->list);
2534 	if (ret) {
2535 		btrfs_abort_transaction(trans, ret);
2536 		goto out;
2537 	}
2538 
2539 	btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2540 	ret = btrfs_update_inode_fallback(trans, root, inode);
2541 	if (ret) { /* -ENOMEM or corruption */
2542 		btrfs_abort_transaction(trans, ret);
2543 		goto out;
2544 	}
2545 	ret = 0;
2546 out:
2547 	clear_bits = EXTENT_DEFRAG;
2548 	if (range_locked)
2549 		clear_bits |= EXTENT_LOCKED;
2550 	if (clear_new_delalloc_bytes)
2551 		clear_bits |= EXTENT_DELALLOC_NEW;
2552 	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits,
2553 			 (clear_bits & EXTENT_LOCKED) ? 1 : 0, 0,
2554 			 &cached_state);
2555 
2556 	if (trans)
2557 		btrfs_end_transaction(trans);
2558 
2559 	if (ret || truncated) {
2560 		u64 unwritten_start = start;
2561 
2562 		if (truncated)
2563 			unwritten_start += logical_len;
2564 		clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
2565 
2566 		/* Drop the cache for the part of the extent we didn't write. */
2567 		btrfs_drop_extent_cache(BTRFS_I(inode), unwritten_start, end, 0);
2568 
2569 		/*
2570 		 * If the ordered extent had an IOERR or something else went
2571 		 * wrong we need to return the space for this ordered extent
2572 		 * back to the allocator.  We only free the extent in the
2573 		 * truncated case if we didn't write out the extent at all.
2574 		 *
2575 		 * If we made it past insert_reserved_file_extent before we
2576 		 * errored out then we don't need to do this as the accounting
2577 		 * has already been done.
2578 		 */
2579 		if ((ret || !logical_len) &&
2580 		    clear_reserved_extent &&
2581 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
2582 		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
2583 			/*
2584 			 * Discard the range before returning it back to the
2585 			 * free space pool
2586 			 */
2587 			if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC))
2588 				btrfs_discard_extent(fs_info,
2589 						ordered_extent->disk_bytenr,
2590 						ordered_extent->disk_num_bytes,
2591 						NULL);
2592 			btrfs_free_reserved_extent(fs_info,
2593 					ordered_extent->disk_bytenr,
2594 					ordered_extent->disk_num_bytes, 1);
2595 		}
2596 	}
2597 
2598 	/*
2599 	 * This needs to be done to make sure anybody waiting knows we are done
2600 	 * updating everything for this ordered extent.
2601 	 */
2602 	btrfs_remove_ordered_extent(inode, ordered_extent);
2603 
2604 	/* once for us */
2605 	btrfs_put_ordered_extent(ordered_extent);
2606 	/* once for the tree */
2607 	btrfs_put_ordered_extent(ordered_extent);
2608 
2609 	return ret;
2610 }
2611 
2612 static void finish_ordered_fn(struct btrfs_work *work)
2613 {
2614 	struct btrfs_ordered_extent *ordered_extent;
2615 	ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
2616 	btrfs_finish_ordered_io(ordered_extent);
2617 }
2618 
2619 void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
2620 					  u64 end, int uptodate)
2621 {
2622 	struct inode *inode = page->mapping->host;
2623 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2624 	struct btrfs_ordered_extent *ordered_extent = NULL;
2625 	struct btrfs_workqueue *wq;
2626 
2627 	trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
2628 
2629 	ClearPagePrivate2(page);
2630 	if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
2631 					    end - start + 1, uptodate))
2632 		return;
2633 
2634 	if (btrfs_is_free_space_inode(BTRFS_I(inode)))
2635 		wq = fs_info->endio_freespace_worker;
2636 	else
2637 		wq = fs_info->endio_write_workers;
2638 
2639 	btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL);
2640 	btrfs_queue_work(wq, &ordered_extent->work);
2641 }
2642 
2643 static int __readpage_endio_check(struct inode *inode,
2644 				  struct btrfs_io_bio *io_bio,
2645 				  int icsum, struct page *page,
2646 				  int pgoff, u64 start, size_t len)
2647 {
2648 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2649 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
2650 	char *kaddr;
2651 	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2652 	u8 *csum_expected;
2653 	u8 csum[BTRFS_CSUM_SIZE];
2654 
2655 	csum_expected = ((u8 *)io_bio->csum) + icsum * csum_size;
2656 
2657 	kaddr = kmap_atomic(page);
2658 	shash->tfm = fs_info->csum_shash;
2659 
2660 	crypto_shash_init(shash);
2661 	crypto_shash_update(shash, kaddr + pgoff, len);
2662 	crypto_shash_final(shash, csum);
2663 
2664 	if (memcmp(csum, csum_expected, csum_size))
2665 		goto zeroit;
2666 
2667 	kunmap_atomic(kaddr);
2668 	return 0;
2669 zeroit:
2670 	btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected,
2671 				    io_bio->mirror_num);
2672 	memset(kaddr + pgoff, 1, len);
2673 	flush_dcache_page(page);
2674 	kunmap_atomic(kaddr);
2675 	return -EIO;
2676 }
2677 
2678 /*
2679  * when reads are done, we need to check csums to verify the data is correct
2680  * if there's a match, we allow the bio to finish.  If not, the code in
2681  * extent_io.c will try to find good copies for us.
2682  */
2683 static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
2684 				      u64 phy_offset, struct page *page,
2685 				      u64 start, u64 end, int mirror)
2686 {
2687 	size_t offset = start - page_offset(page);
2688 	struct inode *inode = page->mapping->host;
2689 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2690 	struct btrfs_root *root = BTRFS_I(inode)->root;
2691 
2692 	if (PageChecked(page)) {
2693 		ClearPageChecked(page);
2694 		return 0;
2695 	}
2696 
2697 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
2698 		return 0;
2699 
2700 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
2701 	    test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
2702 		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM);
2703 		return 0;
2704 	}
2705 
2706 	phy_offset >>= inode->i_sb->s_blocksize_bits;
2707 	return __readpage_endio_check(inode, io_bio, phy_offset, page, offset,
2708 				      start, (size_t)(end - start + 1));
2709 }
2710 
2711 /*
2712  * btrfs_add_delayed_iput - perform a delayed iput on @inode
2713  *
2714  * @inode: The inode we want to perform iput on
2715  *
2716  * This function uses the generic vfs_inode::i_count to track whether we should
2717  * just decrement it (in case it's > 1) or if this is the last iput then link
2718  * the inode to the delayed iput machinery. Delayed iputs are processed at
2719  * transaction commit time/superblock commit/cleaner kthread.
2720  */
2721 void btrfs_add_delayed_iput(struct inode *inode)
2722 {
2723 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2724 	struct btrfs_inode *binode = BTRFS_I(inode);
2725 
2726 	if (atomic_add_unless(&inode->i_count, -1, 1))
2727 		return;
2728 
2729 	atomic_inc(&fs_info->nr_delayed_iputs);
2730 	spin_lock(&fs_info->delayed_iput_lock);
2731 	ASSERT(list_empty(&binode->delayed_iput));
2732 	list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
2733 	spin_unlock(&fs_info->delayed_iput_lock);
2734 	if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
2735 		wake_up_process(fs_info->cleaner_kthread);
2736 }
2737 
2738 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info,
2739 				    struct btrfs_inode *inode)
2740 {
2741 	list_del_init(&inode->delayed_iput);
2742 	spin_unlock(&fs_info->delayed_iput_lock);
2743 	iput(&inode->vfs_inode);
2744 	if (atomic_dec_and_test(&fs_info->nr_delayed_iputs))
2745 		wake_up(&fs_info->delayed_iputs_wait);
2746 	spin_lock(&fs_info->delayed_iput_lock);
2747 }
2748 
2749 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info,
2750 				   struct btrfs_inode *inode)
2751 {
2752 	if (!list_empty(&inode->delayed_iput)) {
2753 		spin_lock(&fs_info->delayed_iput_lock);
2754 		if (!list_empty(&inode->delayed_iput))
2755 			run_delayed_iput_locked(fs_info, inode);
2756 		spin_unlock(&fs_info->delayed_iput_lock);
2757 	}
2758 }
2759 
2760 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
2761 {
2762 
2763 	spin_lock(&fs_info->delayed_iput_lock);
2764 	while (!list_empty(&fs_info->delayed_iputs)) {
2765 		struct btrfs_inode *inode;
2766 
2767 		inode = list_first_entry(&fs_info->delayed_iputs,
2768 				struct btrfs_inode, delayed_iput);
2769 		run_delayed_iput_locked(fs_info, inode);
2770 	}
2771 	spin_unlock(&fs_info->delayed_iput_lock);
2772 }
2773 
2774 /**
2775  * btrfs_wait_on_delayed_iputs - wait on the delayed iputs to be done running
2776  * @fs_info - the fs_info for this fs
2777  * @return - EINTR if we were killed, 0 if nothing's pending
2778  *
2779  * This will wait on any delayed iputs that are currently running with KILLABLE
2780  * set.  Once they are all done running we will return, unless we are killed in
2781  * which case we return EINTR. This helps in user operations like fallocate etc
2782  * that might get blocked on the iputs.
2783  */
2784 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info)
2785 {
2786 	int ret = wait_event_killable(fs_info->delayed_iputs_wait,
2787 			atomic_read(&fs_info->nr_delayed_iputs) == 0);
2788 	if (ret)
2789 		return -EINTR;
2790 	return 0;
2791 }
2792 
2793 /*
2794  * This creates an orphan entry for the given inode in case something goes wrong
2795  * in the middle of an unlink.
2796  */
2797 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
2798 		     struct btrfs_inode *inode)
2799 {
2800 	int ret;
2801 
2802 	ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
2803 	if (ret && ret != -EEXIST) {
2804 		btrfs_abort_transaction(trans, ret);
2805 		return ret;
2806 	}
2807 
2808 	return 0;
2809 }
2810 
2811 /*
2812  * We have done the delete so we can go ahead and remove the orphan item for
2813  * this particular inode.
2814  */
2815 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
2816 			    struct btrfs_inode *inode)
2817 {
2818 	return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode));
2819 }
2820 
2821 /*
2822  * this cleans up any orphans that may be left on the list from the last use
2823  * of this root.
2824  */
2825 int btrfs_orphan_cleanup(struct btrfs_root *root)
2826 {
2827 	struct btrfs_fs_info *fs_info = root->fs_info;
2828 	struct btrfs_path *path;
2829 	struct extent_buffer *leaf;
2830 	struct btrfs_key key, found_key;
2831 	struct btrfs_trans_handle *trans;
2832 	struct inode *inode;
2833 	u64 last_objectid = 0;
2834 	int ret = 0, nr_unlink = 0;
2835 
2836 	if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
2837 		return 0;
2838 
2839 	path = btrfs_alloc_path();
2840 	if (!path) {
2841 		ret = -ENOMEM;
2842 		goto out;
2843 	}
2844 	path->reada = READA_BACK;
2845 
2846 	key.objectid = BTRFS_ORPHAN_OBJECTID;
2847 	key.type = BTRFS_ORPHAN_ITEM_KEY;
2848 	key.offset = (u64)-1;
2849 
2850 	while (1) {
2851 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2852 		if (ret < 0)
2853 			goto out;
2854 
2855 		/*
2856 		 * if ret == 0 means we found what we were searching for, which
2857 		 * is weird, but possible, so only screw with path if we didn't
2858 		 * find the key and see if we have stuff that matches
2859 		 */
2860 		if (ret > 0) {
2861 			ret = 0;
2862 			if (path->slots[0] == 0)
2863 				break;
2864 			path->slots[0]--;
2865 		}
2866 
2867 		/* pull out the item */
2868 		leaf = path->nodes[0];
2869 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2870 
2871 		/* make sure the item matches what we want */
2872 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
2873 			break;
2874 		if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
2875 			break;
2876 
2877 		/* release the path since we're done with it */
2878 		btrfs_release_path(path);
2879 
2880 		/*
2881 		 * this is where we are basically btrfs_lookup, without the
2882 		 * crossing root thing.  we store the inode number in the
2883 		 * offset of the orphan item.
2884 		 */
2885 
2886 		if (found_key.offset == last_objectid) {
2887 			btrfs_err(fs_info,
2888 				  "Error removing orphan entry, stopping orphan cleanup");
2889 			ret = -EINVAL;
2890 			goto out;
2891 		}
2892 
2893 		last_objectid = found_key.offset;
2894 
2895 		found_key.objectid = found_key.offset;
2896 		found_key.type = BTRFS_INODE_ITEM_KEY;
2897 		found_key.offset = 0;
2898 		inode = btrfs_iget(fs_info->sb, &found_key, root);
2899 		ret = PTR_ERR_OR_ZERO(inode);
2900 		if (ret && ret != -ENOENT)
2901 			goto out;
2902 
2903 		if (ret == -ENOENT && root == fs_info->tree_root) {
2904 			struct btrfs_root *dead_root;
2905 			struct btrfs_fs_info *fs_info = root->fs_info;
2906 			int is_dead_root = 0;
2907 
2908 			/*
2909 			 * this is an orphan in the tree root. Currently these
2910 			 * could come from 2 sources:
2911 			 *  a) a snapshot deletion in progress
2912 			 *  b) a free space cache inode
2913 			 * We need to distinguish those two, as the snapshot
2914 			 * orphan must not get deleted.
2915 			 * find_dead_roots already ran before us, so if this
2916 			 * is a snapshot deletion, we should find the root
2917 			 * in the dead_roots list
2918 			 */
2919 			spin_lock(&fs_info->trans_lock);
2920 			list_for_each_entry(dead_root, &fs_info->dead_roots,
2921 					    root_list) {
2922 				if (dead_root->root_key.objectid ==
2923 				    found_key.objectid) {
2924 					is_dead_root = 1;
2925 					break;
2926 				}
2927 			}
2928 			spin_unlock(&fs_info->trans_lock);
2929 			if (is_dead_root) {
2930 				/* prevent this orphan from being found again */
2931 				key.offset = found_key.objectid - 1;
2932 				continue;
2933 			}
2934 
2935 		}
2936 
2937 		/*
2938 		 * If we have an inode with links, there are a couple of
2939 		 * possibilities. Old kernels (before v3.12) used to create an
2940 		 * orphan item for truncate indicating that there were possibly
2941 		 * extent items past i_size that needed to be deleted. In v3.12,
2942 		 * truncate was changed to update i_size in sync with the extent
2943 		 * items, but the (useless) orphan item was still created. Since
2944 		 * v4.18, we don't create the orphan item for truncate at all.
2945 		 *
2946 		 * So, this item could mean that we need to do a truncate, but
2947 		 * only if this filesystem was last used on a pre-v3.12 kernel
2948 		 * and was not cleanly unmounted. The odds of that are quite
2949 		 * slim, and it's a pain to do the truncate now, so just delete
2950 		 * the orphan item.
2951 		 *
2952 		 * It's also possible that this orphan item was supposed to be
2953 		 * deleted but wasn't. The inode number may have been reused,
2954 		 * but either way, we can delete the orphan item.
2955 		 */
2956 		if (ret == -ENOENT || inode->i_nlink) {
2957 			if (!ret)
2958 				iput(inode);
2959 			trans = btrfs_start_transaction(root, 1);
2960 			if (IS_ERR(trans)) {
2961 				ret = PTR_ERR(trans);
2962 				goto out;
2963 			}
2964 			btrfs_debug(fs_info, "auto deleting %Lu",
2965 				    found_key.objectid);
2966 			ret = btrfs_del_orphan_item(trans, root,
2967 						    found_key.objectid);
2968 			btrfs_end_transaction(trans);
2969 			if (ret)
2970 				goto out;
2971 			continue;
2972 		}
2973 
2974 		nr_unlink++;
2975 
2976 		/* this will do delete_inode and everything for us */
2977 		iput(inode);
2978 	}
2979 	/* release the path since we're done with it */
2980 	btrfs_release_path(path);
2981 
2982 	root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
2983 
2984 	if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
2985 		trans = btrfs_join_transaction(root);
2986 		if (!IS_ERR(trans))
2987 			btrfs_end_transaction(trans);
2988 	}
2989 
2990 	if (nr_unlink)
2991 		btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
2992 
2993 out:
2994 	if (ret)
2995 		btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
2996 	btrfs_free_path(path);
2997 	return ret;
2998 }
2999 
3000 /*
3001  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3002  * don't find any xattrs, we know there can't be any acls.
3003  *
3004  * slot is the slot the inode is in, objectid is the objectid of the inode
3005  */
3006 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3007 					  int slot, u64 objectid,
3008 					  int *first_xattr_slot)
3009 {
3010 	u32 nritems = btrfs_header_nritems(leaf);
3011 	struct btrfs_key found_key;
3012 	static u64 xattr_access = 0;
3013 	static u64 xattr_default = 0;
3014 	int scanned = 0;
3015 
3016 	if (!xattr_access) {
3017 		xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3018 					strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3019 		xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3020 					strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3021 	}
3022 
3023 	slot++;
3024 	*first_xattr_slot = -1;
3025 	while (slot < nritems) {
3026 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3027 
3028 		/* we found a different objectid, there must not be acls */
3029 		if (found_key.objectid != objectid)
3030 			return 0;
3031 
3032 		/* we found an xattr, assume we've got an acl */
3033 		if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3034 			if (*first_xattr_slot == -1)
3035 				*first_xattr_slot = slot;
3036 			if (found_key.offset == xattr_access ||
3037 			    found_key.offset == xattr_default)
3038 				return 1;
3039 		}
3040 
3041 		/*
3042 		 * we found a key greater than an xattr key, there can't
3043 		 * be any acls later on
3044 		 */
3045 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3046 			return 0;
3047 
3048 		slot++;
3049 		scanned++;
3050 
3051 		/*
3052 		 * it goes inode, inode backrefs, xattrs, extents,
3053 		 * so if there are a ton of hard links to an inode there can
3054 		 * be a lot of backrefs.  Don't waste time searching too hard,
3055 		 * this is just an optimization
3056 		 */
3057 		if (scanned >= 8)
3058 			break;
3059 	}
3060 	/* we hit the end of the leaf before we found an xattr or
3061 	 * something larger than an xattr.  We have to assume the inode
3062 	 * has acls
3063 	 */
3064 	if (*first_xattr_slot == -1)
3065 		*first_xattr_slot = slot;
3066 	return 1;
3067 }
3068 
3069 /*
3070  * read an inode from the btree into the in-memory inode
3071  */
3072 static int btrfs_read_locked_inode(struct inode *inode,
3073 				   struct btrfs_path *in_path)
3074 {
3075 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3076 	struct btrfs_path *path = in_path;
3077 	struct extent_buffer *leaf;
3078 	struct btrfs_inode_item *inode_item;
3079 	struct btrfs_root *root = BTRFS_I(inode)->root;
3080 	struct btrfs_key location;
3081 	unsigned long ptr;
3082 	int maybe_acls;
3083 	u32 rdev;
3084 	int ret;
3085 	bool filled = false;
3086 	int first_xattr_slot;
3087 
3088 	ret = btrfs_fill_inode(inode, &rdev);
3089 	if (!ret)
3090 		filled = true;
3091 
3092 	if (!path) {
3093 		path = btrfs_alloc_path();
3094 		if (!path)
3095 			return -ENOMEM;
3096 	}
3097 
3098 	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3099 
3100 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3101 	if (ret) {
3102 		if (path != in_path)
3103 			btrfs_free_path(path);
3104 		return ret;
3105 	}
3106 
3107 	leaf = path->nodes[0];
3108 
3109 	if (filled)
3110 		goto cache_index;
3111 
3112 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
3113 				    struct btrfs_inode_item);
3114 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3115 	set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3116 	i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3117 	i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3118 	btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item));
3119 
3120 	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
3121 	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
3122 
3123 	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
3124 	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
3125 
3126 	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
3127 	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
3128 
3129 	BTRFS_I(inode)->i_otime.tv_sec =
3130 		btrfs_timespec_sec(leaf, &inode_item->otime);
3131 	BTRFS_I(inode)->i_otime.tv_nsec =
3132 		btrfs_timespec_nsec(leaf, &inode_item->otime);
3133 
3134 	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3135 	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3136 	BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3137 
3138 	inode_set_iversion_queried(inode,
3139 				   btrfs_inode_sequence(leaf, inode_item));
3140 	inode->i_generation = BTRFS_I(inode)->generation;
3141 	inode->i_rdev = 0;
3142 	rdev = btrfs_inode_rdev(leaf, inode_item);
3143 
3144 	BTRFS_I(inode)->index_cnt = (u64)-1;
3145 	BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3146 
3147 cache_index:
3148 	/*
3149 	 * If we were modified in the current generation and evicted from memory
3150 	 * and then re-read we need to do a full sync since we don't have any
3151 	 * idea about which extents were modified before we were evicted from
3152 	 * cache.
3153 	 *
3154 	 * This is required for both inode re-read from disk and delayed inode
3155 	 * in delayed_nodes_tree.
3156 	 */
3157 	if (BTRFS_I(inode)->last_trans == fs_info->generation)
3158 		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3159 			&BTRFS_I(inode)->runtime_flags);
3160 
3161 	/*
3162 	 * We don't persist the id of the transaction where an unlink operation
3163 	 * against the inode was last made. So here we assume the inode might
3164 	 * have been evicted, and therefore the exact value of last_unlink_trans
3165 	 * lost, and set it to last_trans to avoid metadata inconsistencies
3166 	 * between the inode and its parent if the inode is fsync'ed and the log
3167 	 * replayed. For example, in the scenario:
3168 	 *
3169 	 * touch mydir/foo
3170 	 * ln mydir/foo mydir/bar
3171 	 * sync
3172 	 * unlink mydir/bar
3173 	 * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
3174 	 * xfs_io -c fsync mydir/foo
3175 	 * <power failure>
3176 	 * mount fs, triggers fsync log replay
3177 	 *
3178 	 * We must make sure that when we fsync our inode foo we also log its
3179 	 * parent inode, otherwise after log replay the parent still has the
3180 	 * dentry with the "bar" name but our inode foo has a link count of 1
3181 	 * and doesn't have an inode ref with the name "bar" anymore.
3182 	 *
3183 	 * Setting last_unlink_trans to last_trans is a pessimistic approach,
3184 	 * but it guarantees correctness at the expense of occasional full
3185 	 * transaction commits on fsync if our inode is a directory, or if our
3186 	 * inode is not a directory, logging its parent unnecessarily.
3187 	 */
3188 	BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3189 
3190 	path->slots[0]++;
3191 	if (inode->i_nlink != 1 ||
3192 	    path->slots[0] >= btrfs_header_nritems(leaf))
3193 		goto cache_acl;
3194 
3195 	btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3196 	if (location.objectid != btrfs_ino(BTRFS_I(inode)))
3197 		goto cache_acl;
3198 
3199 	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3200 	if (location.type == BTRFS_INODE_REF_KEY) {
3201 		struct btrfs_inode_ref *ref;
3202 
3203 		ref = (struct btrfs_inode_ref *)ptr;
3204 		BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3205 	} else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3206 		struct btrfs_inode_extref *extref;
3207 
3208 		extref = (struct btrfs_inode_extref *)ptr;
3209 		BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3210 								     extref);
3211 	}
3212 cache_acl:
3213 	/*
3214 	 * try to precache a NULL acl entry for files that don't have
3215 	 * any xattrs or acls
3216 	 */
3217 	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3218 			btrfs_ino(BTRFS_I(inode)), &first_xattr_slot);
3219 	if (first_xattr_slot != -1) {
3220 		path->slots[0] = first_xattr_slot;
3221 		ret = btrfs_load_inode_props(inode, path);
3222 		if (ret)
3223 			btrfs_err(fs_info,
3224 				  "error loading props for ino %llu (root %llu): %d",
3225 				  btrfs_ino(BTRFS_I(inode)),
3226 				  root->root_key.objectid, ret);
3227 	}
3228 	if (path != in_path)
3229 		btrfs_free_path(path);
3230 
3231 	if (!maybe_acls)
3232 		cache_no_acl(inode);
3233 
3234 	switch (inode->i_mode & S_IFMT) {
3235 	case S_IFREG:
3236 		inode->i_mapping->a_ops = &btrfs_aops;
3237 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3238 		inode->i_fop = &btrfs_file_operations;
3239 		inode->i_op = &btrfs_file_inode_operations;
3240 		break;
3241 	case S_IFDIR:
3242 		inode->i_fop = &btrfs_dir_file_operations;
3243 		inode->i_op = &btrfs_dir_inode_operations;
3244 		break;
3245 	case S_IFLNK:
3246 		inode->i_op = &btrfs_symlink_inode_operations;
3247 		inode_nohighmem(inode);
3248 		inode->i_mapping->a_ops = &btrfs_aops;
3249 		break;
3250 	default:
3251 		inode->i_op = &btrfs_special_inode_operations;
3252 		init_special_inode(inode, inode->i_mode, rdev);
3253 		break;
3254 	}
3255 
3256 	btrfs_sync_inode_flags_to_i_flags(inode);
3257 	return 0;
3258 }
3259 
3260 /*
3261  * given a leaf and an inode, copy the inode fields into the leaf
3262  */
3263 static void fill_inode_item(struct btrfs_trans_handle *trans,
3264 			    struct extent_buffer *leaf,
3265 			    struct btrfs_inode_item *item,
3266 			    struct inode *inode)
3267 {
3268 	struct btrfs_map_token token;
3269 
3270 	btrfs_init_map_token(&token, leaf);
3271 
3272 	btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3273 	btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3274 	btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
3275 				   &token);
3276 	btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3277 	btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3278 
3279 	btrfs_set_token_timespec_sec(leaf, &item->atime,
3280 				     inode->i_atime.tv_sec, &token);
3281 	btrfs_set_token_timespec_nsec(leaf, &item->atime,
3282 				      inode->i_atime.tv_nsec, &token);
3283 
3284 	btrfs_set_token_timespec_sec(leaf, &item->mtime,
3285 				     inode->i_mtime.tv_sec, &token);
3286 	btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3287 				      inode->i_mtime.tv_nsec, &token);
3288 
3289 	btrfs_set_token_timespec_sec(leaf, &item->ctime,
3290 				     inode->i_ctime.tv_sec, &token);
3291 	btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3292 				      inode->i_ctime.tv_nsec, &token);
3293 
3294 	btrfs_set_token_timespec_sec(leaf, &item->otime,
3295 				     BTRFS_I(inode)->i_otime.tv_sec, &token);
3296 	btrfs_set_token_timespec_nsec(leaf, &item->otime,
3297 				      BTRFS_I(inode)->i_otime.tv_nsec, &token);
3298 
3299 	btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3300 				     &token);
3301 	btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
3302 					 &token);
3303 	btrfs_set_token_inode_sequence(leaf, item, inode_peek_iversion(inode),
3304 				       &token);
3305 	btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3306 	btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3307 	btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3308 	btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3309 }
3310 
3311 /*
3312  * copy everything in the in-memory inode into the btree.
3313  */
3314 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
3315 				struct btrfs_root *root, struct inode *inode)
3316 {
3317 	struct btrfs_inode_item *inode_item;
3318 	struct btrfs_path *path;
3319 	struct extent_buffer *leaf;
3320 	int ret;
3321 
3322 	path = btrfs_alloc_path();
3323 	if (!path)
3324 		return -ENOMEM;
3325 
3326 	path->leave_spinning = 1;
3327 	ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
3328 				 1);
3329 	if (ret) {
3330 		if (ret > 0)
3331 			ret = -ENOENT;
3332 		goto failed;
3333 	}
3334 
3335 	leaf = path->nodes[0];
3336 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
3337 				    struct btrfs_inode_item);
3338 
3339 	fill_inode_item(trans, leaf, inode_item, inode);
3340 	btrfs_mark_buffer_dirty(leaf);
3341 	btrfs_set_inode_last_trans(trans, inode);
3342 	ret = 0;
3343 failed:
3344 	btrfs_free_path(path);
3345 	return ret;
3346 }
3347 
3348 /*
3349  * copy everything in the in-memory inode into the btree.
3350  */
3351 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
3352 				struct btrfs_root *root, struct inode *inode)
3353 {
3354 	struct btrfs_fs_info *fs_info = root->fs_info;
3355 	int ret;
3356 
3357 	/*
3358 	 * If the inode is a free space inode, we can deadlock during commit
3359 	 * if we put it into the delayed code.
3360 	 *
3361 	 * The data relocation inode should also be directly updated
3362 	 * without delay
3363 	 */
3364 	if (!btrfs_is_free_space_inode(BTRFS_I(inode))
3365 	    && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
3366 	    && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
3367 		btrfs_update_root_times(trans, root);
3368 
3369 		ret = btrfs_delayed_update_inode(trans, root, inode);
3370 		if (!ret)
3371 			btrfs_set_inode_last_trans(trans, inode);
3372 		return ret;
3373 	}
3374 
3375 	return btrfs_update_inode_item(trans, root, inode);
3376 }
3377 
3378 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3379 					 struct btrfs_root *root,
3380 					 struct inode *inode)
3381 {
3382 	int ret;
3383 
3384 	ret = btrfs_update_inode(trans, root, inode);
3385 	if (ret == -ENOSPC)
3386 		return btrfs_update_inode_item(trans, root, inode);
3387 	return ret;
3388 }
3389 
3390 /*
3391  * unlink helper that gets used here in inode.c and in the tree logging
3392  * recovery code.  It remove a link in a directory with a given name, and
3393  * also drops the back refs in the inode to the directory
3394  */
3395 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3396 				struct btrfs_root *root,
3397 				struct btrfs_inode *dir,
3398 				struct btrfs_inode *inode,
3399 				const char *name, int name_len)
3400 {
3401 	struct btrfs_fs_info *fs_info = root->fs_info;
3402 	struct btrfs_path *path;
3403 	int ret = 0;
3404 	struct btrfs_dir_item *di;
3405 	u64 index;
3406 	u64 ino = btrfs_ino(inode);
3407 	u64 dir_ino = btrfs_ino(dir);
3408 
3409 	path = btrfs_alloc_path();
3410 	if (!path) {
3411 		ret = -ENOMEM;
3412 		goto out;
3413 	}
3414 
3415 	path->leave_spinning = 1;
3416 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3417 				    name, name_len, -1);
3418 	if (IS_ERR_OR_NULL(di)) {
3419 		ret = di ? PTR_ERR(di) : -ENOENT;
3420 		goto err;
3421 	}
3422 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
3423 	if (ret)
3424 		goto err;
3425 	btrfs_release_path(path);
3426 
3427 	/*
3428 	 * If we don't have dir index, we have to get it by looking up
3429 	 * the inode ref, since we get the inode ref, remove it directly,
3430 	 * it is unnecessary to do delayed deletion.
3431 	 *
3432 	 * But if we have dir index, needn't search inode ref to get it.
3433 	 * Since the inode ref is close to the inode item, it is better
3434 	 * that we delay to delete it, and just do this deletion when
3435 	 * we update the inode item.
3436 	 */
3437 	if (inode->dir_index) {
3438 		ret = btrfs_delayed_delete_inode_ref(inode);
3439 		if (!ret) {
3440 			index = inode->dir_index;
3441 			goto skip_backref;
3442 		}
3443 	}
3444 
3445 	ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
3446 				  dir_ino, &index);
3447 	if (ret) {
3448 		btrfs_info(fs_info,
3449 			"failed to delete reference to %.*s, inode %llu parent %llu",
3450 			name_len, name, ino, dir_ino);
3451 		btrfs_abort_transaction(trans, ret);
3452 		goto err;
3453 	}
3454 skip_backref:
3455 	ret = btrfs_delete_delayed_dir_index(trans, dir, index);
3456 	if (ret) {
3457 		btrfs_abort_transaction(trans, ret);
3458 		goto err;
3459 	}
3460 
3461 	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode,
3462 			dir_ino);
3463 	if (ret != 0 && ret != -ENOENT) {
3464 		btrfs_abort_transaction(trans, ret);
3465 		goto err;
3466 	}
3467 
3468 	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir,
3469 			index);
3470 	if (ret == -ENOENT)
3471 		ret = 0;
3472 	else if (ret)
3473 		btrfs_abort_transaction(trans, ret);
3474 
3475 	/*
3476 	 * If we have a pending delayed iput we could end up with the final iput
3477 	 * being run in btrfs-cleaner context.  If we have enough of these built
3478 	 * up we can end up burning a lot of time in btrfs-cleaner without any
3479 	 * way to throttle the unlinks.  Since we're currently holding a ref on
3480 	 * the inode we can run the delayed iput here without any issues as the
3481 	 * final iput won't be done until after we drop the ref we're currently
3482 	 * holding.
3483 	 */
3484 	btrfs_run_delayed_iput(fs_info, inode);
3485 err:
3486 	btrfs_free_path(path);
3487 	if (ret)
3488 		goto out;
3489 
3490 	btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2);
3491 	inode_inc_iversion(&inode->vfs_inode);
3492 	inode_inc_iversion(&dir->vfs_inode);
3493 	inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime =
3494 		dir->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
3495 	ret = btrfs_update_inode(trans, root, &dir->vfs_inode);
3496 out:
3497 	return ret;
3498 }
3499 
3500 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3501 		       struct btrfs_root *root,
3502 		       struct btrfs_inode *dir, struct btrfs_inode *inode,
3503 		       const char *name, int name_len)
3504 {
3505 	int ret;
3506 	ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
3507 	if (!ret) {
3508 		drop_nlink(&inode->vfs_inode);
3509 		ret = btrfs_update_inode(trans, root, &inode->vfs_inode);
3510 	}
3511 	return ret;
3512 }
3513 
3514 /*
3515  * helper to start transaction for unlink and rmdir.
3516  *
3517  * unlink and rmdir are special in btrfs, they do not always free space, so
3518  * if we cannot make our reservations the normal way try and see if there is
3519  * plenty of slack room in the global reserve to migrate, otherwise we cannot
3520  * allow the unlink to occur.
3521  */
3522 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
3523 {
3524 	struct btrfs_root *root = BTRFS_I(dir)->root;
3525 
3526 	/*
3527 	 * 1 for the possible orphan item
3528 	 * 1 for the dir item
3529 	 * 1 for the dir index
3530 	 * 1 for the inode ref
3531 	 * 1 for the inode
3532 	 */
3533 	return btrfs_start_transaction_fallback_global_rsv(root, 5, 5);
3534 }
3535 
3536 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
3537 {
3538 	struct btrfs_root *root = BTRFS_I(dir)->root;
3539 	struct btrfs_trans_handle *trans;
3540 	struct inode *inode = d_inode(dentry);
3541 	int ret;
3542 
3543 	trans = __unlink_start_trans(dir);
3544 	if (IS_ERR(trans))
3545 		return PTR_ERR(trans);
3546 
3547 	btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
3548 			0);
3549 
3550 	ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
3551 			BTRFS_I(d_inode(dentry)), dentry->d_name.name,
3552 			dentry->d_name.len);
3553 	if (ret)
3554 		goto out;
3555 
3556 	if (inode->i_nlink == 0) {
3557 		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
3558 		if (ret)
3559 			goto out;
3560 	}
3561 
3562 out:
3563 	btrfs_end_transaction(trans);
3564 	btrfs_btree_balance_dirty(root->fs_info);
3565 	return ret;
3566 }
3567 
3568 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3569 			       struct inode *dir, struct dentry *dentry)
3570 {
3571 	struct btrfs_root *root = BTRFS_I(dir)->root;
3572 	struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
3573 	struct btrfs_path *path;
3574 	struct extent_buffer *leaf;
3575 	struct btrfs_dir_item *di;
3576 	struct btrfs_key key;
3577 	const char *name = dentry->d_name.name;
3578 	int name_len = dentry->d_name.len;
3579 	u64 index;
3580 	int ret;
3581 	u64 objectid;
3582 	u64 dir_ino = btrfs_ino(BTRFS_I(dir));
3583 
3584 	if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
3585 		objectid = inode->root->root_key.objectid;
3586 	} else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
3587 		objectid = inode->location.objectid;
3588 	} else {
3589 		WARN_ON(1);
3590 		return -EINVAL;
3591 	}
3592 
3593 	path = btrfs_alloc_path();
3594 	if (!path)
3595 		return -ENOMEM;
3596 
3597 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3598 				   name, name_len, -1);
3599 	if (IS_ERR_OR_NULL(di)) {
3600 		ret = di ? PTR_ERR(di) : -ENOENT;
3601 		goto out;
3602 	}
3603 
3604 	leaf = path->nodes[0];
3605 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
3606 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
3607 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
3608 	if (ret) {
3609 		btrfs_abort_transaction(trans, ret);
3610 		goto out;
3611 	}
3612 	btrfs_release_path(path);
3613 
3614 	/*
3615 	 * This is a placeholder inode for a subvolume we didn't have a
3616 	 * reference to at the time of the snapshot creation.  In the meantime
3617 	 * we could have renamed the real subvol link into our snapshot, so
3618 	 * depending on btrfs_del_root_ref to return -ENOENT here is incorret.
3619 	 * Instead simply lookup the dir_index_item for this entry so we can
3620 	 * remove it.  Otherwise we know we have a ref to the root and we can
3621 	 * call btrfs_del_root_ref, and it _shouldn't_ fail.
3622 	 */
3623 	if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
3624 		di = btrfs_search_dir_index_item(root, path, dir_ino,
3625 						 name, name_len);
3626 		if (IS_ERR_OR_NULL(di)) {
3627 			if (!di)
3628 				ret = -ENOENT;
3629 			else
3630 				ret = PTR_ERR(di);
3631 			btrfs_abort_transaction(trans, ret);
3632 			goto out;
3633 		}
3634 
3635 		leaf = path->nodes[0];
3636 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3637 		index = key.offset;
3638 		btrfs_release_path(path);
3639 	} else {
3640 		ret = btrfs_del_root_ref(trans, objectid,
3641 					 root->root_key.objectid, dir_ino,
3642 					 &index, name, name_len);
3643 		if (ret) {
3644 			btrfs_abort_transaction(trans, ret);
3645 			goto out;
3646 		}
3647 	}
3648 
3649 	ret = btrfs_delete_delayed_dir_index(trans, BTRFS_I(dir), index);
3650 	if (ret) {
3651 		btrfs_abort_transaction(trans, ret);
3652 		goto out;
3653 	}
3654 
3655 	btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2);
3656 	inode_inc_iversion(dir);
3657 	dir->i_mtime = dir->i_ctime = current_time(dir);
3658 	ret = btrfs_update_inode_fallback(trans, root, dir);
3659 	if (ret)
3660 		btrfs_abort_transaction(trans, ret);
3661 out:
3662 	btrfs_free_path(path);
3663 	return ret;
3664 }
3665 
3666 /*
3667  * Helper to check if the subvolume references other subvolumes or if it's
3668  * default.
3669  */
3670 static noinline int may_destroy_subvol(struct btrfs_root *root)
3671 {
3672 	struct btrfs_fs_info *fs_info = root->fs_info;
3673 	struct btrfs_path *path;
3674 	struct btrfs_dir_item *di;
3675 	struct btrfs_key key;
3676 	u64 dir_id;
3677 	int ret;
3678 
3679 	path = btrfs_alloc_path();
3680 	if (!path)
3681 		return -ENOMEM;
3682 
3683 	/* Make sure this root isn't set as the default subvol */
3684 	dir_id = btrfs_super_root_dir(fs_info->super_copy);
3685 	di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
3686 				   dir_id, "default", 7, 0);
3687 	if (di && !IS_ERR(di)) {
3688 		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
3689 		if (key.objectid == root->root_key.objectid) {
3690 			ret = -EPERM;
3691 			btrfs_err(fs_info,
3692 				  "deleting default subvolume %llu is not allowed",
3693 				  key.objectid);
3694 			goto out;
3695 		}
3696 		btrfs_release_path(path);
3697 	}
3698 
3699 	key.objectid = root->root_key.objectid;
3700 	key.type = BTRFS_ROOT_REF_KEY;
3701 	key.offset = (u64)-1;
3702 
3703 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3704 	if (ret < 0)
3705 		goto out;
3706 	BUG_ON(ret == 0);
3707 
3708 	ret = 0;
3709 	if (path->slots[0] > 0) {
3710 		path->slots[0]--;
3711 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3712 		if (key.objectid == root->root_key.objectid &&
3713 		    key.type == BTRFS_ROOT_REF_KEY)
3714 			ret = -ENOTEMPTY;
3715 	}
3716 out:
3717 	btrfs_free_path(path);
3718 	return ret;
3719 }
3720 
3721 /* Delete all dentries for inodes belonging to the root */
3722 static void btrfs_prune_dentries(struct btrfs_root *root)
3723 {
3724 	struct btrfs_fs_info *fs_info = root->fs_info;
3725 	struct rb_node *node;
3726 	struct rb_node *prev;
3727 	struct btrfs_inode *entry;
3728 	struct inode *inode;
3729 	u64 objectid = 0;
3730 
3731 	if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3732 		WARN_ON(btrfs_root_refs(&root->root_item) != 0);
3733 
3734 	spin_lock(&root->inode_lock);
3735 again:
3736 	node = root->inode_tree.rb_node;
3737 	prev = NULL;
3738 	while (node) {
3739 		prev = node;
3740 		entry = rb_entry(node, struct btrfs_inode, rb_node);
3741 
3742 		if (objectid < btrfs_ino(entry))
3743 			node = node->rb_left;
3744 		else if (objectid > btrfs_ino(entry))
3745 			node = node->rb_right;
3746 		else
3747 			break;
3748 	}
3749 	if (!node) {
3750 		while (prev) {
3751 			entry = rb_entry(prev, struct btrfs_inode, rb_node);
3752 			if (objectid <= btrfs_ino(entry)) {
3753 				node = prev;
3754 				break;
3755 			}
3756 			prev = rb_next(prev);
3757 		}
3758 	}
3759 	while (node) {
3760 		entry = rb_entry(node, struct btrfs_inode, rb_node);
3761 		objectid = btrfs_ino(entry) + 1;
3762 		inode = igrab(&entry->vfs_inode);
3763 		if (inode) {
3764 			spin_unlock(&root->inode_lock);
3765 			if (atomic_read(&inode->i_count) > 1)
3766 				d_prune_aliases(inode);
3767 			/*
3768 			 * btrfs_drop_inode will have it removed from the inode
3769 			 * cache when its usage count hits zero.
3770 			 */
3771 			iput(inode);
3772 			cond_resched();
3773 			spin_lock(&root->inode_lock);
3774 			goto again;
3775 		}
3776 
3777 		if (cond_resched_lock(&root->inode_lock))
3778 			goto again;
3779 
3780 		node = rb_next(node);
3781 	}
3782 	spin_unlock(&root->inode_lock);
3783 }
3784 
3785 int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
3786 {
3787 	struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
3788 	struct btrfs_root *root = BTRFS_I(dir)->root;
3789 	struct inode *inode = d_inode(dentry);
3790 	struct btrfs_root *dest = BTRFS_I(inode)->root;
3791 	struct btrfs_trans_handle *trans;
3792 	struct btrfs_block_rsv block_rsv;
3793 	u64 root_flags;
3794 	int ret;
3795 	int err;
3796 
3797 	/*
3798 	 * Don't allow to delete a subvolume with send in progress. This is
3799 	 * inside the inode lock so the error handling that has to drop the bit
3800 	 * again is not run concurrently.
3801 	 */
3802 	spin_lock(&dest->root_item_lock);
3803 	if (dest->send_in_progress) {
3804 		spin_unlock(&dest->root_item_lock);
3805 		btrfs_warn(fs_info,
3806 			   "attempt to delete subvolume %llu during send",
3807 			   dest->root_key.objectid);
3808 		return -EPERM;
3809 	}
3810 	root_flags = btrfs_root_flags(&dest->root_item);
3811 	btrfs_set_root_flags(&dest->root_item,
3812 			     root_flags | BTRFS_ROOT_SUBVOL_DEAD);
3813 	spin_unlock(&dest->root_item_lock);
3814 
3815 	down_write(&fs_info->subvol_sem);
3816 
3817 	err = may_destroy_subvol(dest);
3818 	if (err)
3819 		goto out_up_write;
3820 
3821 	btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
3822 	/*
3823 	 * One for dir inode,
3824 	 * two for dir entries,
3825 	 * two for root ref/backref.
3826 	 */
3827 	err = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
3828 	if (err)
3829 		goto out_up_write;
3830 
3831 	trans = btrfs_start_transaction(root, 0);
3832 	if (IS_ERR(trans)) {
3833 		err = PTR_ERR(trans);
3834 		goto out_release;
3835 	}
3836 	trans->block_rsv = &block_rsv;
3837 	trans->bytes_reserved = block_rsv.size;
3838 
3839 	btrfs_record_snapshot_destroy(trans, BTRFS_I(dir));
3840 
3841 	ret = btrfs_unlink_subvol(trans, dir, dentry);
3842 	if (ret) {
3843 		err = ret;
3844 		btrfs_abort_transaction(trans, ret);
3845 		goto out_end_trans;
3846 	}
3847 
3848 	btrfs_record_root_in_trans(trans, dest);
3849 
3850 	memset(&dest->root_item.drop_progress, 0,
3851 		sizeof(dest->root_item.drop_progress));
3852 	dest->root_item.drop_level = 0;
3853 	btrfs_set_root_refs(&dest->root_item, 0);
3854 
3855 	if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
3856 		ret = btrfs_insert_orphan_item(trans,
3857 					fs_info->tree_root,
3858 					dest->root_key.objectid);
3859 		if (ret) {
3860 			btrfs_abort_transaction(trans, ret);
3861 			err = ret;
3862 			goto out_end_trans;
3863 		}
3864 	}
3865 
3866 	ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
3867 				  BTRFS_UUID_KEY_SUBVOL,
3868 				  dest->root_key.objectid);
3869 	if (ret && ret != -ENOENT) {
3870 		btrfs_abort_transaction(trans, ret);
3871 		err = ret;
3872 		goto out_end_trans;
3873 	}
3874 	if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
3875 		ret = btrfs_uuid_tree_remove(trans,
3876 					  dest->root_item.received_uuid,
3877 					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
3878 					  dest->root_key.objectid);
3879 		if (ret && ret != -ENOENT) {
3880 			btrfs_abort_transaction(trans, ret);
3881 			err = ret;
3882 			goto out_end_trans;
3883 		}
3884 	}
3885 
3886 out_end_trans:
3887 	trans->block_rsv = NULL;
3888 	trans->bytes_reserved = 0;
3889 	ret = btrfs_end_transaction(trans);
3890 	if (ret && !err)
3891 		err = ret;
3892 	inode->i_flags |= S_DEAD;
3893 out_release:
3894 	btrfs_subvolume_release_metadata(fs_info, &block_rsv);
3895 out_up_write:
3896 	up_write(&fs_info->subvol_sem);
3897 	if (err) {
3898 		spin_lock(&dest->root_item_lock);
3899 		root_flags = btrfs_root_flags(&dest->root_item);
3900 		btrfs_set_root_flags(&dest->root_item,
3901 				root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
3902 		spin_unlock(&dest->root_item_lock);
3903 	} else {
3904 		d_invalidate(dentry);
3905 		btrfs_prune_dentries(dest);
3906 		ASSERT(dest->send_in_progress == 0);
3907 
3908 		/* the last ref */
3909 		if (dest->ino_cache_inode) {
3910 			iput(dest->ino_cache_inode);
3911 			dest->ino_cache_inode = NULL;
3912 		}
3913 	}
3914 
3915 	return err;
3916 }
3917 
3918 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3919 {
3920 	struct inode *inode = d_inode(dentry);
3921 	int err = 0;
3922 	struct btrfs_root *root = BTRFS_I(dir)->root;
3923 	struct btrfs_trans_handle *trans;
3924 	u64 last_unlink_trans;
3925 
3926 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
3927 		return -ENOTEMPTY;
3928 	if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID)
3929 		return btrfs_delete_subvolume(dir, dentry);
3930 
3931 	trans = __unlink_start_trans(dir);
3932 	if (IS_ERR(trans))
3933 		return PTR_ERR(trans);
3934 
3935 	if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
3936 		err = btrfs_unlink_subvol(trans, dir, dentry);
3937 		goto out;
3938 	}
3939 
3940 	err = btrfs_orphan_add(trans, BTRFS_I(inode));
3941 	if (err)
3942 		goto out;
3943 
3944 	last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
3945 
3946 	/* now the directory is empty */
3947 	err = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
3948 			BTRFS_I(d_inode(dentry)), dentry->d_name.name,
3949 			dentry->d_name.len);
3950 	if (!err) {
3951 		btrfs_i_size_write(BTRFS_I(inode), 0);
3952 		/*
3953 		 * Propagate the last_unlink_trans value of the deleted dir to
3954 		 * its parent directory. This is to prevent an unrecoverable
3955 		 * log tree in the case we do something like this:
3956 		 * 1) create dir foo
3957 		 * 2) create snapshot under dir foo
3958 		 * 3) delete the snapshot
3959 		 * 4) rmdir foo
3960 		 * 5) mkdir foo
3961 		 * 6) fsync foo or some file inside foo
3962 		 */
3963 		if (last_unlink_trans >= trans->transid)
3964 			BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
3965 	}
3966 out:
3967 	btrfs_end_transaction(trans);
3968 	btrfs_btree_balance_dirty(root->fs_info);
3969 
3970 	return err;
3971 }
3972 
3973 /*
3974  * Return this if we need to call truncate_block for the last bit of the
3975  * truncate.
3976  */
3977 #define NEED_TRUNCATE_BLOCK 1
3978 
3979 /*
3980  * this can truncate away extent items, csum items and directory items.
3981  * It starts at a high offset and removes keys until it can't find
3982  * any higher than new_size
3983  *
3984  * csum items that cross the new i_size are truncated to the new size
3985  * as well.
3986  *
3987  * min_type is the minimum key type to truncate down to.  If set to 0, this
3988  * will kill all the items on this inode, including the INODE_ITEM_KEY.
3989  */
3990 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3991 			       struct btrfs_root *root,
3992 			       struct inode *inode,
3993 			       u64 new_size, u32 min_type)
3994 {
3995 	struct btrfs_fs_info *fs_info = root->fs_info;
3996 	struct btrfs_path *path;
3997 	struct extent_buffer *leaf;
3998 	struct btrfs_file_extent_item *fi;
3999 	struct btrfs_key key;
4000 	struct btrfs_key found_key;
4001 	u64 extent_start = 0;
4002 	u64 extent_num_bytes = 0;
4003 	u64 extent_offset = 0;
4004 	u64 item_end = 0;
4005 	u64 last_size = new_size;
4006 	u32 found_type = (u8)-1;
4007 	int found_extent;
4008 	int del_item;
4009 	int pending_del_nr = 0;
4010 	int pending_del_slot = 0;
4011 	int extent_type = -1;
4012 	int ret;
4013 	u64 ino = btrfs_ino(BTRFS_I(inode));
4014 	u64 bytes_deleted = 0;
4015 	bool be_nice = false;
4016 	bool should_throttle = false;
4017 
4018 	BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
4019 
4020 	/*
4021 	 * for non-free space inodes and ref cows, we want to back off from
4022 	 * time to time
4023 	 */
4024 	if (!btrfs_is_free_space_inode(BTRFS_I(inode)) &&
4025 	    test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4026 		be_nice = true;
4027 
4028 	path = btrfs_alloc_path();
4029 	if (!path)
4030 		return -ENOMEM;
4031 	path->reada = READA_BACK;
4032 
4033 	/*
4034 	 * We want to drop from the next block forward in case this new size is
4035 	 * not block aligned since we will be keeping the last block of the
4036 	 * extent just the way it is.
4037 	 */
4038 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4039 	    root == fs_info->tree_root)
4040 		btrfs_drop_extent_cache(BTRFS_I(inode), ALIGN(new_size,
4041 					fs_info->sectorsize),
4042 					(u64)-1, 0);
4043 
4044 	/*
4045 	 * This function is also used to drop the items in the log tree before
4046 	 * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4047 	 * it is used to drop the logged items. So we shouldn't kill the delayed
4048 	 * items.
4049 	 */
4050 	if (min_type == 0 && root == BTRFS_I(inode)->root)
4051 		btrfs_kill_delayed_inode_items(BTRFS_I(inode));
4052 
4053 	key.objectid = ino;
4054 	key.offset = (u64)-1;
4055 	key.type = (u8)-1;
4056 
4057 search_again:
4058 	/*
4059 	 * with a 16K leaf size and 128MB extents, you can actually queue
4060 	 * up a huge file in a single leaf.  Most of the time that
4061 	 * bytes_deleted is > 0, it will be huge by the time we get here
4062 	 */
4063 	if (be_nice && bytes_deleted > SZ_32M &&
4064 	    btrfs_should_end_transaction(trans)) {
4065 		ret = -EAGAIN;
4066 		goto out;
4067 	}
4068 
4069 	path->leave_spinning = 1;
4070 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
4071 	if (ret < 0)
4072 		goto out;
4073 
4074 	if (ret > 0) {
4075 		ret = 0;
4076 		/* there are no items in the tree for us to truncate, we're
4077 		 * done
4078 		 */
4079 		if (path->slots[0] == 0)
4080 			goto out;
4081 		path->slots[0]--;
4082 	}
4083 
4084 	while (1) {
4085 		fi = NULL;
4086 		leaf = path->nodes[0];
4087 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4088 		found_type = found_key.type;
4089 
4090 		if (found_key.objectid != ino)
4091 			break;
4092 
4093 		if (found_type < min_type)
4094 			break;
4095 
4096 		item_end = found_key.offset;
4097 		if (found_type == BTRFS_EXTENT_DATA_KEY) {
4098 			fi = btrfs_item_ptr(leaf, path->slots[0],
4099 					    struct btrfs_file_extent_item);
4100 			extent_type = btrfs_file_extent_type(leaf, fi);
4101 			if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4102 				item_end +=
4103 				    btrfs_file_extent_num_bytes(leaf, fi);
4104 
4105 				trace_btrfs_truncate_show_fi_regular(
4106 					BTRFS_I(inode), leaf, fi,
4107 					found_key.offset);
4108 			} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4109 				item_end += btrfs_file_extent_ram_bytes(leaf,
4110 									fi);
4111 
4112 				trace_btrfs_truncate_show_fi_inline(
4113 					BTRFS_I(inode), leaf, fi, path->slots[0],
4114 					found_key.offset);
4115 			}
4116 			item_end--;
4117 		}
4118 		if (found_type > min_type) {
4119 			del_item = 1;
4120 		} else {
4121 			if (item_end < new_size)
4122 				break;
4123 			if (found_key.offset >= new_size)
4124 				del_item = 1;
4125 			else
4126 				del_item = 0;
4127 		}
4128 		found_extent = 0;
4129 		/* FIXME, shrink the extent if the ref count is only 1 */
4130 		if (found_type != BTRFS_EXTENT_DATA_KEY)
4131 			goto delete;
4132 
4133 		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4134 			u64 num_dec;
4135 			extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
4136 			if (!del_item) {
4137 				u64 orig_num_bytes =
4138 					btrfs_file_extent_num_bytes(leaf, fi);
4139 				extent_num_bytes = ALIGN(new_size -
4140 						found_key.offset,
4141 						fs_info->sectorsize);
4142 				btrfs_set_file_extent_num_bytes(leaf, fi,
4143 							 extent_num_bytes);
4144 				num_dec = (orig_num_bytes -
4145 					   extent_num_bytes);
4146 				if (test_bit(BTRFS_ROOT_REF_COWS,
4147 					     &root->state) &&
4148 				    extent_start != 0)
4149 					inode_sub_bytes(inode, num_dec);
4150 				btrfs_mark_buffer_dirty(leaf);
4151 			} else {
4152 				extent_num_bytes =
4153 					btrfs_file_extent_disk_num_bytes(leaf,
4154 									 fi);
4155 				extent_offset = found_key.offset -
4156 					btrfs_file_extent_offset(leaf, fi);
4157 
4158 				/* FIXME blocksize != 4096 */
4159 				num_dec = btrfs_file_extent_num_bytes(leaf, fi);
4160 				if (extent_start != 0) {
4161 					found_extent = 1;
4162 					if (test_bit(BTRFS_ROOT_REF_COWS,
4163 						     &root->state))
4164 						inode_sub_bytes(inode, num_dec);
4165 				}
4166 			}
4167 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4168 			/*
4169 			 * we can't truncate inline items that have had
4170 			 * special encodings
4171 			 */
4172 			if (!del_item &&
4173 			    btrfs_file_extent_encryption(leaf, fi) == 0 &&
4174 			    btrfs_file_extent_other_encoding(leaf, fi) == 0 &&
4175 			    btrfs_file_extent_compression(leaf, fi) == 0) {
4176 				u32 size = (u32)(new_size - found_key.offset);
4177 
4178 				btrfs_set_file_extent_ram_bytes(leaf, fi, size);
4179 				size = btrfs_file_extent_calc_inline_size(size);
4180 				btrfs_truncate_item(path, size, 1);
4181 			} else if (!del_item) {
4182 				/*
4183 				 * We have to bail so the last_size is set to
4184 				 * just before this extent.
4185 				 */
4186 				ret = NEED_TRUNCATE_BLOCK;
4187 				break;
4188 			}
4189 
4190 			if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4191 				inode_sub_bytes(inode, item_end + 1 - new_size);
4192 		}
4193 delete:
4194 		if (del_item)
4195 			last_size = found_key.offset;
4196 		else
4197 			last_size = new_size;
4198 		if (del_item) {
4199 			if (!pending_del_nr) {
4200 				/* no pending yet, add ourselves */
4201 				pending_del_slot = path->slots[0];
4202 				pending_del_nr = 1;
4203 			} else if (pending_del_nr &&
4204 				   path->slots[0] + 1 == pending_del_slot) {
4205 				/* hop on the pending chunk */
4206 				pending_del_nr++;
4207 				pending_del_slot = path->slots[0];
4208 			} else {
4209 				BUG();
4210 			}
4211 		} else {
4212 			break;
4213 		}
4214 		should_throttle = false;
4215 
4216 		if (found_extent &&
4217 		    (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4218 		     root == fs_info->tree_root)) {
4219 			struct btrfs_ref ref = { 0 };
4220 
4221 			btrfs_set_path_blocking(path);
4222 			bytes_deleted += extent_num_bytes;
4223 
4224 			btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF,
4225 					extent_start, extent_num_bytes, 0);
4226 			ref.real_root = root->root_key.objectid;
4227 			btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
4228 					ino, extent_offset);
4229 			ret = btrfs_free_extent(trans, &ref);
4230 			if (ret) {
4231 				btrfs_abort_transaction(trans, ret);
4232 				break;
4233 			}
4234 			if (be_nice) {
4235 				if (btrfs_should_throttle_delayed_refs(trans))
4236 					should_throttle = true;
4237 			}
4238 		}
4239 
4240 		if (found_type == BTRFS_INODE_ITEM_KEY)
4241 			break;
4242 
4243 		if (path->slots[0] == 0 ||
4244 		    path->slots[0] != pending_del_slot ||
4245 		    should_throttle) {
4246 			if (pending_del_nr) {
4247 				ret = btrfs_del_items(trans, root, path,
4248 						pending_del_slot,
4249 						pending_del_nr);
4250 				if (ret) {
4251 					btrfs_abort_transaction(trans, ret);
4252 					break;
4253 				}
4254 				pending_del_nr = 0;
4255 			}
4256 			btrfs_release_path(path);
4257 
4258 			/*
4259 			 * We can generate a lot of delayed refs, so we need to
4260 			 * throttle every once and a while and make sure we're
4261 			 * adding enough space to keep up with the work we are
4262 			 * generating.  Since we hold a transaction here we
4263 			 * can't flush, and we don't want to FLUSH_LIMIT because
4264 			 * we could have generated too many delayed refs to
4265 			 * actually allocate, so just bail if we're short and
4266 			 * let the normal reservation dance happen higher up.
4267 			 */
4268 			if (should_throttle) {
4269 				ret = btrfs_delayed_refs_rsv_refill(fs_info,
4270 							BTRFS_RESERVE_NO_FLUSH);
4271 				if (ret) {
4272 					ret = -EAGAIN;
4273 					break;
4274 				}
4275 			}
4276 			goto search_again;
4277 		} else {
4278 			path->slots[0]--;
4279 		}
4280 	}
4281 out:
4282 	if (ret >= 0 && pending_del_nr) {
4283 		int err;
4284 
4285 		err = btrfs_del_items(trans, root, path, pending_del_slot,
4286 				      pending_del_nr);
4287 		if (err) {
4288 			btrfs_abort_transaction(trans, err);
4289 			ret = err;
4290 		}
4291 	}
4292 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4293 		ASSERT(last_size >= new_size);
4294 		if (!ret && last_size > new_size)
4295 			last_size = new_size;
4296 		btrfs_ordered_update_i_size(inode, last_size, NULL);
4297 	}
4298 
4299 	btrfs_free_path(path);
4300 	return ret;
4301 }
4302 
4303 /*
4304  * btrfs_truncate_block - read, zero a chunk and write a block
4305  * @inode - inode that we're zeroing
4306  * @from - the offset to start zeroing
4307  * @len - the length to zero, 0 to zero the entire range respective to the
4308  *	offset
4309  * @front - zero up to the offset instead of from the offset on
4310  *
4311  * This will find the block for the "from" offset and cow the block and zero the
4312  * part we want to zero.  This is used with truncate and hole punching.
4313  */
4314 int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
4315 			int front)
4316 {
4317 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4318 	struct address_space *mapping = inode->i_mapping;
4319 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4320 	struct btrfs_ordered_extent *ordered;
4321 	struct extent_state *cached_state = NULL;
4322 	struct extent_changeset *data_reserved = NULL;
4323 	char *kaddr;
4324 	u32 blocksize = fs_info->sectorsize;
4325 	pgoff_t index = from >> PAGE_SHIFT;
4326 	unsigned offset = from & (blocksize - 1);
4327 	struct page *page;
4328 	gfp_t mask = btrfs_alloc_write_mask(mapping);
4329 	int ret = 0;
4330 	u64 block_start;
4331 	u64 block_end;
4332 
4333 	if (IS_ALIGNED(offset, blocksize) &&
4334 	    (!len || IS_ALIGNED(len, blocksize)))
4335 		goto out;
4336 
4337 	block_start = round_down(from, blocksize);
4338 	block_end = block_start + blocksize - 1;
4339 
4340 	ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
4341 					   block_start, blocksize);
4342 	if (ret)
4343 		goto out;
4344 
4345 again:
4346 	page = find_or_create_page(mapping, index, mask);
4347 	if (!page) {
4348 		btrfs_delalloc_release_space(inode, data_reserved,
4349 					     block_start, blocksize, true);
4350 		btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
4351 		ret = -ENOMEM;
4352 		goto out;
4353 	}
4354 
4355 	if (!PageUptodate(page)) {
4356 		ret = btrfs_readpage(NULL, page);
4357 		lock_page(page);
4358 		if (page->mapping != mapping) {
4359 			unlock_page(page);
4360 			put_page(page);
4361 			goto again;
4362 		}
4363 		if (!PageUptodate(page)) {
4364 			ret = -EIO;
4365 			goto out_unlock;
4366 		}
4367 	}
4368 	wait_on_page_writeback(page);
4369 
4370 	lock_extent_bits(io_tree, block_start, block_end, &cached_state);
4371 	set_page_extent_mapped(page);
4372 
4373 	ordered = btrfs_lookup_ordered_extent(inode, block_start);
4374 	if (ordered) {
4375 		unlock_extent_cached(io_tree, block_start, block_end,
4376 				     &cached_state);
4377 		unlock_page(page);
4378 		put_page(page);
4379 		btrfs_start_ordered_extent(inode, ordered, 1);
4380 		btrfs_put_ordered_extent(ordered);
4381 		goto again;
4382 	}
4383 
4384 	clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end,
4385 			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4386 			 0, 0, &cached_state);
4387 
4388 	ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
4389 					&cached_state);
4390 	if (ret) {
4391 		unlock_extent_cached(io_tree, block_start, block_end,
4392 				     &cached_state);
4393 		goto out_unlock;
4394 	}
4395 
4396 	if (offset != blocksize) {
4397 		if (!len)
4398 			len = blocksize - offset;
4399 		kaddr = kmap(page);
4400 		if (front)
4401 			memset(kaddr + (block_start - page_offset(page)),
4402 				0, offset);
4403 		else
4404 			memset(kaddr + (block_start - page_offset(page)) +  offset,
4405 				0, len);
4406 		flush_dcache_page(page);
4407 		kunmap(page);
4408 	}
4409 	ClearPageChecked(page);
4410 	set_page_dirty(page);
4411 	unlock_extent_cached(io_tree, block_start, block_end, &cached_state);
4412 
4413 out_unlock:
4414 	if (ret)
4415 		btrfs_delalloc_release_space(inode, data_reserved, block_start,
4416 					     blocksize, true);
4417 	btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
4418 	unlock_page(page);
4419 	put_page(page);
4420 out:
4421 	extent_changeset_free(data_reserved);
4422 	return ret;
4423 }
4424 
4425 static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
4426 			     u64 offset, u64 len)
4427 {
4428 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4429 	struct btrfs_trans_handle *trans;
4430 	int ret;
4431 
4432 	/*
4433 	 * Still need to make sure the inode looks like it's been updated so
4434 	 * that any holes get logged if we fsync.
4435 	 */
4436 	if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
4437 		BTRFS_I(inode)->last_trans = fs_info->generation;
4438 		BTRFS_I(inode)->last_sub_trans = root->log_transid;
4439 		BTRFS_I(inode)->last_log_commit = root->last_log_commit;
4440 		return 0;
4441 	}
4442 
4443 	/*
4444 	 * 1 - for the one we're dropping
4445 	 * 1 - for the one we're adding
4446 	 * 1 - for updating the inode.
4447 	 */
4448 	trans = btrfs_start_transaction(root, 3);
4449 	if (IS_ERR(trans))
4450 		return PTR_ERR(trans);
4451 
4452 	ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
4453 	if (ret) {
4454 		btrfs_abort_transaction(trans, ret);
4455 		btrfs_end_transaction(trans);
4456 		return ret;
4457 	}
4458 
4459 	ret = btrfs_insert_file_extent(trans, root, btrfs_ino(BTRFS_I(inode)),
4460 			offset, 0, 0, len, 0, len, 0, 0, 0);
4461 	if (ret)
4462 		btrfs_abort_transaction(trans, ret);
4463 	else
4464 		btrfs_update_inode(trans, root, inode);
4465 	btrfs_end_transaction(trans);
4466 	return ret;
4467 }
4468 
4469 /*
4470  * This function puts in dummy file extents for the area we're creating a hole
4471  * for.  So if we are truncating this file to a larger size we need to insert
4472  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4473  * the range between oldsize and size
4474  */
4475 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
4476 {
4477 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4478 	struct btrfs_root *root = BTRFS_I(inode)->root;
4479 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4480 	struct extent_map *em = NULL;
4481 	struct extent_state *cached_state = NULL;
4482 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4483 	u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
4484 	u64 block_end = ALIGN(size, fs_info->sectorsize);
4485 	u64 last_byte;
4486 	u64 cur_offset;
4487 	u64 hole_size;
4488 	int err = 0;
4489 
4490 	/*
4491 	 * If our size started in the middle of a block we need to zero out the
4492 	 * rest of the block before we expand the i_size, otherwise we could
4493 	 * expose stale data.
4494 	 */
4495 	err = btrfs_truncate_block(inode, oldsize, 0, 0);
4496 	if (err)
4497 		return err;
4498 
4499 	if (size <= hole_start)
4500 		return 0;
4501 
4502 	btrfs_lock_and_flush_ordered_range(io_tree, BTRFS_I(inode), hole_start,
4503 					   block_end - 1, &cached_state);
4504 	cur_offset = hole_start;
4505 	while (1) {
4506 		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
4507 				      block_end - cur_offset);
4508 		if (IS_ERR(em)) {
4509 			err = PTR_ERR(em);
4510 			em = NULL;
4511 			break;
4512 		}
4513 		last_byte = min(extent_map_end(em), block_end);
4514 		last_byte = ALIGN(last_byte, fs_info->sectorsize);
4515 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
4516 			struct extent_map *hole_em;
4517 			hole_size = last_byte - cur_offset;
4518 
4519 			err = maybe_insert_hole(root, inode, cur_offset,
4520 						hole_size);
4521 			if (err)
4522 				break;
4523 			btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
4524 						cur_offset + hole_size - 1, 0);
4525 			hole_em = alloc_extent_map();
4526 			if (!hole_em) {
4527 				set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4528 					&BTRFS_I(inode)->runtime_flags);
4529 				goto next;
4530 			}
4531 			hole_em->start = cur_offset;
4532 			hole_em->len = hole_size;
4533 			hole_em->orig_start = cur_offset;
4534 
4535 			hole_em->block_start = EXTENT_MAP_HOLE;
4536 			hole_em->block_len = 0;
4537 			hole_em->orig_block_len = 0;
4538 			hole_em->ram_bytes = hole_size;
4539 			hole_em->compress_type = BTRFS_COMPRESS_NONE;
4540 			hole_em->generation = fs_info->generation;
4541 
4542 			while (1) {
4543 				write_lock(&em_tree->lock);
4544 				err = add_extent_mapping(em_tree, hole_em, 1);
4545 				write_unlock(&em_tree->lock);
4546 				if (err != -EEXIST)
4547 					break;
4548 				btrfs_drop_extent_cache(BTRFS_I(inode),
4549 							cur_offset,
4550 							cur_offset +
4551 							hole_size - 1, 0);
4552 			}
4553 			free_extent_map(hole_em);
4554 		}
4555 next:
4556 		free_extent_map(em);
4557 		em = NULL;
4558 		cur_offset = last_byte;
4559 		if (cur_offset >= block_end)
4560 			break;
4561 	}
4562 	free_extent_map(em);
4563 	unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state);
4564 	return err;
4565 }
4566 
4567 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4568 {
4569 	struct btrfs_root *root = BTRFS_I(inode)->root;
4570 	struct btrfs_trans_handle *trans;
4571 	loff_t oldsize = i_size_read(inode);
4572 	loff_t newsize = attr->ia_size;
4573 	int mask = attr->ia_valid;
4574 	int ret;
4575 
4576 	/*
4577 	 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4578 	 * special case where we need to update the times despite not having
4579 	 * these flags set.  For all other operations the VFS set these flags
4580 	 * explicitly if it wants a timestamp update.
4581 	 */
4582 	if (newsize != oldsize) {
4583 		inode_inc_iversion(inode);
4584 		if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
4585 			inode->i_ctime = inode->i_mtime =
4586 				current_time(inode);
4587 	}
4588 
4589 	if (newsize > oldsize) {
4590 		/*
4591 		 * Don't do an expanding truncate while snapshotting is ongoing.
4592 		 * This is to ensure the snapshot captures a fully consistent
4593 		 * state of this file - if the snapshot captures this expanding
4594 		 * truncation, it must capture all writes that happened before
4595 		 * this truncation.
4596 		 */
4597 		btrfs_wait_for_snapshot_creation(root);
4598 		ret = btrfs_cont_expand(inode, oldsize, newsize);
4599 		if (ret) {
4600 			btrfs_end_write_no_snapshotting(root);
4601 			return ret;
4602 		}
4603 
4604 		trans = btrfs_start_transaction(root, 1);
4605 		if (IS_ERR(trans)) {
4606 			btrfs_end_write_no_snapshotting(root);
4607 			return PTR_ERR(trans);
4608 		}
4609 
4610 		i_size_write(inode, newsize);
4611 		btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
4612 		pagecache_isize_extended(inode, oldsize, newsize);
4613 		ret = btrfs_update_inode(trans, root, inode);
4614 		btrfs_end_write_no_snapshotting(root);
4615 		btrfs_end_transaction(trans);
4616 	} else {
4617 
4618 		/*
4619 		 * We're truncating a file that used to have good data down to
4620 		 * zero. Make sure it gets into the ordered flush list so that
4621 		 * any new writes get down to disk quickly.
4622 		 */
4623 		if (newsize == 0)
4624 			set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
4625 				&BTRFS_I(inode)->runtime_flags);
4626 
4627 		truncate_setsize(inode, newsize);
4628 
4629 		/* Disable nonlocked read DIO to avoid the endless truncate */
4630 		btrfs_inode_block_unlocked_dio(BTRFS_I(inode));
4631 		inode_dio_wait(inode);
4632 		btrfs_inode_resume_unlocked_dio(BTRFS_I(inode));
4633 
4634 		ret = btrfs_truncate(inode, newsize == oldsize);
4635 		if (ret && inode->i_nlink) {
4636 			int err;
4637 
4638 			/*
4639 			 * Truncate failed, so fix up the in-memory size. We
4640 			 * adjusted disk_i_size down as we removed extents, so
4641 			 * wait for disk_i_size to be stable and then update the
4642 			 * in-memory size to match.
4643 			 */
4644 			err = btrfs_wait_ordered_range(inode, 0, (u64)-1);
4645 			if (err)
4646 				return err;
4647 			i_size_write(inode, BTRFS_I(inode)->disk_i_size);
4648 		}
4649 	}
4650 
4651 	return ret;
4652 }
4653 
4654 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
4655 {
4656 	struct inode *inode = d_inode(dentry);
4657 	struct btrfs_root *root = BTRFS_I(inode)->root;
4658 	int err;
4659 
4660 	if (btrfs_root_readonly(root))
4661 		return -EROFS;
4662 
4663 	err = setattr_prepare(dentry, attr);
4664 	if (err)
4665 		return err;
4666 
4667 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
4668 		err = btrfs_setsize(inode, attr);
4669 		if (err)
4670 			return err;
4671 	}
4672 
4673 	if (attr->ia_valid) {
4674 		setattr_copy(inode, attr);
4675 		inode_inc_iversion(inode);
4676 		err = btrfs_dirty_inode(inode);
4677 
4678 		if (!err && attr->ia_valid & ATTR_MODE)
4679 			err = posix_acl_chmod(inode, inode->i_mode);
4680 	}
4681 
4682 	return err;
4683 }
4684 
4685 /*
4686  * While truncating the inode pages during eviction, we get the VFS calling
4687  * btrfs_invalidatepage() against each page of the inode. This is slow because
4688  * the calls to btrfs_invalidatepage() result in a huge amount of calls to
4689  * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
4690  * extent_state structures over and over, wasting lots of time.
4691  *
4692  * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
4693  * those expensive operations on a per page basis and do only the ordered io
4694  * finishing, while we release here the extent_map and extent_state structures,
4695  * without the excessive merging and splitting.
4696  */
4697 static void evict_inode_truncate_pages(struct inode *inode)
4698 {
4699 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4700 	struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
4701 	struct rb_node *node;
4702 
4703 	ASSERT(inode->i_state & I_FREEING);
4704 	truncate_inode_pages_final(&inode->i_data);
4705 
4706 	write_lock(&map_tree->lock);
4707 	while (!RB_EMPTY_ROOT(&map_tree->map.rb_root)) {
4708 		struct extent_map *em;
4709 
4710 		node = rb_first_cached(&map_tree->map);
4711 		em = rb_entry(node, struct extent_map, rb_node);
4712 		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
4713 		clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
4714 		remove_extent_mapping(map_tree, em);
4715 		free_extent_map(em);
4716 		if (need_resched()) {
4717 			write_unlock(&map_tree->lock);
4718 			cond_resched();
4719 			write_lock(&map_tree->lock);
4720 		}
4721 	}
4722 	write_unlock(&map_tree->lock);
4723 
4724 	/*
4725 	 * Keep looping until we have no more ranges in the io tree.
4726 	 * We can have ongoing bios started by readpages (called from readahead)
4727 	 * that have their endio callback (extent_io.c:end_bio_extent_readpage)
4728 	 * still in progress (unlocked the pages in the bio but did not yet
4729 	 * unlocked the ranges in the io tree). Therefore this means some
4730 	 * ranges can still be locked and eviction started because before
4731 	 * submitting those bios, which are executed by a separate task (work
4732 	 * queue kthread), inode references (inode->i_count) were not taken
4733 	 * (which would be dropped in the end io callback of each bio).
4734 	 * Therefore here we effectively end up waiting for those bios and
4735 	 * anyone else holding locked ranges without having bumped the inode's
4736 	 * reference count - if we don't do it, when they access the inode's
4737 	 * io_tree to unlock a range it may be too late, leading to an
4738 	 * use-after-free issue.
4739 	 */
4740 	spin_lock(&io_tree->lock);
4741 	while (!RB_EMPTY_ROOT(&io_tree->state)) {
4742 		struct extent_state *state;
4743 		struct extent_state *cached_state = NULL;
4744 		u64 start;
4745 		u64 end;
4746 		unsigned state_flags;
4747 
4748 		node = rb_first(&io_tree->state);
4749 		state = rb_entry(node, struct extent_state, rb_node);
4750 		start = state->start;
4751 		end = state->end;
4752 		state_flags = state->state;
4753 		spin_unlock(&io_tree->lock);
4754 
4755 		lock_extent_bits(io_tree, start, end, &cached_state);
4756 
4757 		/*
4758 		 * If still has DELALLOC flag, the extent didn't reach disk,
4759 		 * and its reserved space won't be freed by delayed_ref.
4760 		 * So we need to free its reserved space here.
4761 		 * (Refer to comment in btrfs_invalidatepage, case 2)
4762 		 *
4763 		 * Note, end is the bytenr of last byte, so we need + 1 here.
4764 		 */
4765 		if (state_flags & EXTENT_DELALLOC)
4766 			btrfs_qgroup_free_data(inode, NULL, start, end - start + 1);
4767 
4768 		clear_extent_bit(io_tree, start, end,
4769 				 EXTENT_LOCKED | EXTENT_DELALLOC |
4770 				 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
4771 				 &cached_state);
4772 
4773 		cond_resched();
4774 		spin_lock(&io_tree->lock);
4775 	}
4776 	spin_unlock(&io_tree->lock);
4777 }
4778 
4779 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
4780 							struct btrfs_block_rsv *rsv)
4781 {
4782 	struct btrfs_fs_info *fs_info = root->fs_info;
4783 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4784 	struct btrfs_trans_handle *trans;
4785 	u64 delayed_refs_extra = btrfs_calc_insert_metadata_size(fs_info, 1);
4786 	int ret;
4787 
4788 	/*
4789 	 * Eviction should be taking place at some place safe because of our
4790 	 * delayed iputs.  However the normal flushing code will run delayed
4791 	 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
4792 	 *
4793 	 * We reserve the delayed_refs_extra here again because we can't use
4794 	 * btrfs_start_transaction(root, 0) for the same deadlocky reason as
4795 	 * above.  We reserve our extra bit here because we generate a ton of
4796 	 * delayed refs activity by truncating.
4797 	 *
4798 	 * If we cannot make our reservation we'll attempt to steal from the
4799 	 * global reserve, because we really want to be able to free up space.
4800 	 */
4801 	ret = btrfs_block_rsv_refill(root, rsv, rsv->size + delayed_refs_extra,
4802 				     BTRFS_RESERVE_FLUSH_EVICT);
4803 	if (ret) {
4804 		/*
4805 		 * Try to steal from the global reserve if there is space for
4806 		 * it.
4807 		 */
4808 		if (btrfs_check_space_for_delayed_refs(fs_info) ||
4809 		    btrfs_block_rsv_migrate(global_rsv, rsv, rsv->size, 0)) {
4810 			btrfs_warn(fs_info,
4811 				   "could not allocate space for delete; will truncate on mount");
4812 			return ERR_PTR(-ENOSPC);
4813 		}
4814 		delayed_refs_extra = 0;
4815 	}
4816 
4817 	trans = btrfs_join_transaction(root);
4818 	if (IS_ERR(trans))
4819 		return trans;
4820 
4821 	if (delayed_refs_extra) {
4822 		trans->block_rsv = &fs_info->trans_block_rsv;
4823 		trans->bytes_reserved = delayed_refs_extra;
4824 		btrfs_block_rsv_migrate(rsv, trans->block_rsv,
4825 					delayed_refs_extra, 1);
4826 	}
4827 	return trans;
4828 }
4829 
4830 void btrfs_evict_inode(struct inode *inode)
4831 {
4832 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4833 	struct btrfs_trans_handle *trans;
4834 	struct btrfs_root *root = BTRFS_I(inode)->root;
4835 	struct btrfs_block_rsv *rsv;
4836 	int ret;
4837 
4838 	trace_btrfs_inode_evict(inode);
4839 
4840 	if (!root) {
4841 		clear_inode(inode);
4842 		return;
4843 	}
4844 
4845 	evict_inode_truncate_pages(inode);
4846 
4847 	if (inode->i_nlink &&
4848 	    ((btrfs_root_refs(&root->root_item) != 0 &&
4849 	      root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
4850 	     btrfs_is_free_space_inode(BTRFS_I(inode))))
4851 		goto no_delete;
4852 
4853 	if (is_bad_inode(inode))
4854 		goto no_delete;
4855 
4856 	btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1);
4857 
4858 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
4859 		goto no_delete;
4860 
4861 	if (inode->i_nlink > 0) {
4862 		BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
4863 		       root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
4864 		goto no_delete;
4865 	}
4866 
4867 	ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
4868 	if (ret)
4869 		goto no_delete;
4870 
4871 	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
4872 	if (!rsv)
4873 		goto no_delete;
4874 	rsv->size = btrfs_calc_metadata_size(fs_info, 1);
4875 	rsv->failfast = 1;
4876 
4877 	btrfs_i_size_write(BTRFS_I(inode), 0);
4878 
4879 	while (1) {
4880 		trans = evict_refill_and_join(root, rsv);
4881 		if (IS_ERR(trans))
4882 			goto free_rsv;
4883 
4884 		trans->block_rsv = rsv;
4885 
4886 		ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
4887 		trans->block_rsv = &fs_info->trans_block_rsv;
4888 		btrfs_end_transaction(trans);
4889 		btrfs_btree_balance_dirty(fs_info);
4890 		if (ret && ret != -ENOSPC && ret != -EAGAIN)
4891 			goto free_rsv;
4892 		else if (!ret)
4893 			break;
4894 	}
4895 
4896 	/*
4897 	 * Errors here aren't a big deal, it just means we leave orphan items in
4898 	 * the tree. They will be cleaned up on the next mount. If the inode
4899 	 * number gets reused, cleanup deletes the orphan item without doing
4900 	 * anything, and unlink reuses the existing orphan item.
4901 	 *
4902 	 * If it turns out that we are dropping too many of these, we might want
4903 	 * to add a mechanism for retrying these after a commit.
4904 	 */
4905 	trans = evict_refill_and_join(root, rsv);
4906 	if (!IS_ERR(trans)) {
4907 		trans->block_rsv = rsv;
4908 		btrfs_orphan_del(trans, BTRFS_I(inode));
4909 		trans->block_rsv = &fs_info->trans_block_rsv;
4910 		btrfs_end_transaction(trans);
4911 	}
4912 
4913 	if (!(root == fs_info->tree_root ||
4914 	      root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
4915 		btrfs_return_ino(root, btrfs_ino(BTRFS_I(inode)));
4916 
4917 free_rsv:
4918 	btrfs_free_block_rsv(fs_info, rsv);
4919 no_delete:
4920 	/*
4921 	 * If we didn't successfully delete, the orphan item will still be in
4922 	 * the tree and we'll retry on the next mount. Again, we might also want
4923 	 * to retry these periodically in the future.
4924 	 */
4925 	btrfs_remove_delayed_node(BTRFS_I(inode));
4926 	clear_inode(inode);
4927 }
4928 
4929 /*
4930  * Return the key found in the dir entry in the location pointer, fill @type
4931  * with BTRFS_FT_*, and return 0.
4932  *
4933  * If no dir entries were found, returns -ENOENT.
4934  * If found a corrupted location in dir entry, returns -EUCLEAN.
4935  */
4936 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
4937 			       struct btrfs_key *location, u8 *type)
4938 {
4939 	const char *name = dentry->d_name.name;
4940 	int namelen = dentry->d_name.len;
4941 	struct btrfs_dir_item *di;
4942 	struct btrfs_path *path;
4943 	struct btrfs_root *root = BTRFS_I(dir)->root;
4944 	int ret = 0;
4945 
4946 	path = btrfs_alloc_path();
4947 	if (!path)
4948 		return -ENOMEM;
4949 
4950 	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)),
4951 			name, namelen, 0);
4952 	if (IS_ERR_OR_NULL(di)) {
4953 		ret = di ? PTR_ERR(di) : -ENOENT;
4954 		goto out;
4955 	}
4956 
4957 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
4958 	if (location->type != BTRFS_INODE_ITEM_KEY &&
4959 	    location->type != BTRFS_ROOT_ITEM_KEY) {
4960 		ret = -EUCLEAN;
4961 		btrfs_warn(root->fs_info,
4962 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
4963 			   __func__, name, btrfs_ino(BTRFS_I(dir)),
4964 			   location->objectid, location->type, location->offset);
4965 	}
4966 	if (!ret)
4967 		*type = btrfs_dir_type(path->nodes[0], di);
4968 out:
4969 	btrfs_free_path(path);
4970 	return ret;
4971 }
4972 
4973 /*
4974  * when we hit a tree root in a directory, the btrfs part of the inode
4975  * needs to be changed to reflect the root directory of the tree root.  This
4976  * is kind of like crossing a mount point.
4977  */
4978 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
4979 				    struct inode *dir,
4980 				    struct dentry *dentry,
4981 				    struct btrfs_key *location,
4982 				    struct btrfs_root **sub_root)
4983 {
4984 	struct btrfs_path *path;
4985 	struct btrfs_root *new_root;
4986 	struct btrfs_root_ref *ref;
4987 	struct extent_buffer *leaf;
4988 	struct btrfs_key key;
4989 	int ret;
4990 	int err = 0;
4991 
4992 	path = btrfs_alloc_path();
4993 	if (!path) {
4994 		err = -ENOMEM;
4995 		goto out;
4996 	}
4997 
4998 	err = -ENOENT;
4999 	key.objectid = BTRFS_I(dir)->root->root_key.objectid;
5000 	key.type = BTRFS_ROOT_REF_KEY;
5001 	key.offset = location->objectid;
5002 
5003 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
5004 	if (ret) {
5005 		if (ret < 0)
5006 			err = ret;
5007 		goto out;
5008 	}
5009 
5010 	leaf = path->nodes[0];
5011 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5012 	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(BTRFS_I(dir)) ||
5013 	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
5014 		goto out;
5015 
5016 	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
5017 				   (unsigned long)(ref + 1),
5018 				   dentry->d_name.len);
5019 	if (ret)
5020 		goto out;
5021 
5022 	btrfs_release_path(path);
5023 
5024 	new_root = btrfs_read_fs_root_no_name(fs_info, location);
5025 	if (IS_ERR(new_root)) {
5026 		err = PTR_ERR(new_root);
5027 		goto out;
5028 	}
5029 
5030 	*sub_root = new_root;
5031 	location->objectid = btrfs_root_dirid(&new_root->root_item);
5032 	location->type = BTRFS_INODE_ITEM_KEY;
5033 	location->offset = 0;
5034 	err = 0;
5035 out:
5036 	btrfs_free_path(path);
5037 	return err;
5038 }
5039 
5040 static void inode_tree_add(struct inode *inode)
5041 {
5042 	struct btrfs_root *root = BTRFS_I(inode)->root;
5043 	struct btrfs_inode *entry;
5044 	struct rb_node **p;
5045 	struct rb_node *parent;
5046 	struct rb_node *new = &BTRFS_I(inode)->rb_node;
5047 	u64 ino = btrfs_ino(BTRFS_I(inode));
5048 
5049 	if (inode_unhashed(inode))
5050 		return;
5051 	parent = NULL;
5052 	spin_lock(&root->inode_lock);
5053 	p = &root->inode_tree.rb_node;
5054 	while (*p) {
5055 		parent = *p;
5056 		entry = rb_entry(parent, struct btrfs_inode, rb_node);
5057 
5058 		if (ino < btrfs_ino(entry))
5059 			p = &parent->rb_left;
5060 		else if (ino > btrfs_ino(entry))
5061 			p = &parent->rb_right;
5062 		else {
5063 			WARN_ON(!(entry->vfs_inode.i_state &
5064 				  (I_WILL_FREE | I_FREEING)));
5065 			rb_replace_node(parent, new, &root->inode_tree);
5066 			RB_CLEAR_NODE(parent);
5067 			spin_unlock(&root->inode_lock);
5068 			return;
5069 		}
5070 	}
5071 	rb_link_node(new, parent, p);
5072 	rb_insert_color(new, &root->inode_tree);
5073 	spin_unlock(&root->inode_lock);
5074 }
5075 
5076 static void inode_tree_del(struct inode *inode)
5077 {
5078 	struct btrfs_root *root = BTRFS_I(inode)->root;
5079 	int empty = 0;
5080 
5081 	spin_lock(&root->inode_lock);
5082 	if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
5083 		rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
5084 		RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
5085 		empty = RB_EMPTY_ROOT(&root->inode_tree);
5086 	}
5087 	spin_unlock(&root->inode_lock);
5088 
5089 	if (empty && btrfs_root_refs(&root->root_item) == 0) {
5090 		spin_lock(&root->inode_lock);
5091 		empty = RB_EMPTY_ROOT(&root->inode_tree);
5092 		spin_unlock(&root->inode_lock);
5093 		if (empty)
5094 			btrfs_add_dead_root(root);
5095 	}
5096 }
5097 
5098 
5099 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5100 {
5101 	struct btrfs_iget_args *args = p;
5102 	inode->i_ino = args->location->objectid;
5103 	memcpy(&BTRFS_I(inode)->location, args->location,
5104 	       sizeof(*args->location));
5105 	BTRFS_I(inode)->root = args->root;
5106 	return 0;
5107 }
5108 
5109 static int btrfs_find_actor(struct inode *inode, void *opaque)
5110 {
5111 	struct btrfs_iget_args *args = opaque;
5112 	return args->location->objectid == BTRFS_I(inode)->location.objectid &&
5113 		args->root == BTRFS_I(inode)->root;
5114 }
5115 
5116 static struct inode *btrfs_iget_locked(struct super_block *s,
5117 				       struct btrfs_key *location,
5118 				       struct btrfs_root *root)
5119 {
5120 	struct inode *inode;
5121 	struct btrfs_iget_args args;
5122 	unsigned long hashval = btrfs_inode_hash(location->objectid, root);
5123 
5124 	args.location = location;
5125 	args.root = root;
5126 
5127 	inode = iget5_locked(s, hashval, btrfs_find_actor,
5128 			     btrfs_init_locked_inode,
5129 			     (void *)&args);
5130 	return inode;
5131 }
5132 
5133 /*
5134  * Get an inode object given its location and corresponding root.
5135  * Path can be preallocated to prevent recursing back to iget through
5136  * allocator. NULL is also valid but may require an additional allocation
5137  * later.
5138  */
5139 struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
5140 			      struct btrfs_root *root, struct btrfs_path *path)
5141 {
5142 	struct inode *inode;
5143 
5144 	inode = btrfs_iget_locked(s, location, root);
5145 	if (!inode)
5146 		return ERR_PTR(-ENOMEM);
5147 
5148 	if (inode->i_state & I_NEW) {
5149 		int ret;
5150 
5151 		ret = btrfs_read_locked_inode(inode, path);
5152 		if (!ret) {
5153 			inode_tree_add(inode);
5154 			unlock_new_inode(inode);
5155 		} else {
5156 			iget_failed(inode);
5157 			/*
5158 			 * ret > 0 can come from btrfs_search_slot called by
5159 			 * btrfs_read_locked_inode, this means the inode item
5160 			 * was not found.
5161 			 */
5162 			if (ret > 0)
5163 				ret = -ENOENT;
5164 			inode = ERR_PTR(ret);
5165 		}
5166 	}
5167 
5168 	return inode;
5169 }
5170 
5171 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
5172 			 struct btrfs_root *root)
5173 {
5174 	return btrfs_iget_path(s, location, root, NULL);
5175 }
5176 
5177 static struct inode *new_simple_dir(struct super_block *s,
5178 				    struct btrfs_key *key,
5179 				    struct btrfs_root *root)
5180 {
5181 	struct inode *inode = new_inode(s);
5182 
5183 	if (!inode)
5184 		return ERR_PTR(-ENOMEM);
5185 
5186 	BTRFS_I(inode)->root = root;
5187 	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5188 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5189 
5190 	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5191 	/*
5192 	 * We only need lookup, the rest is read-only and there's no inode
5193 	 * associated with the dentry
5194 	 */
5195 	inode->i_op = &simple_dir_inode_operations;
5196 	inode->i_opflags &= ~IOP_XATTR;
5197 	inode->i_fop = &simple_dir_operations;
5198 	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5199 	inode->i_mtime = current_time(inode);
5200 	inode->i_atime = inode->i_mtime;
5201 	inode->i_ctime = inode->i_mtime;
5202 	BTRFS_I(inode)->i_otime = inode->i_mtime;
5203 
5204 	return inode;
5205 }
5206 
5207 static inline u8 btrfs_inode_type(struct inode *inode)
5208 {
5209 	/*
5210 	 * Compile-time asserts that generic FT_* types still match
5211 	 * BTRFS_FT_* types
5212 	 */
5213 	BUILD_BUG_ON(BTRFS_FT_UNKNOWN != FT_UNKNOWN);
5214 	BUILD_BUG_ON(BTRFS_FT_REG_FILE != FT_REG_FILE);
5215 	BUILD_BUG_ON(BTRFS_FT_DIR != FT_DIR);
5216 	BUILD_BUG_ON(BTRFS_FT_CHRDEV != FT_CHRDEV);
5217 	BUILD_BUG_ON(BTRFS_FT_BLKDEV != FT_BLKDEV);
5218 	BUILD_BUG_ON(BTRFS_FT_FIFO != FT_FIFO);
5219 	BUILD_BUG_ON(BTRFS_FT_SOCK != FT_SOCK);
5220 	BUILD_BUG_ON(BTRFS_FT_SYMLINK != FT_SYMLINK);
5221 
5222 	return fs_umode_to_ftype(inode->i_mode);
5223 }
5224 
5225 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5226 {
5227 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
5228 	struct inode *inode;
5229 	struct btrfs_root *root = BTRFS_I(dir)->root;
5230 	struct btrfs_root *sub_root = root;
5231 	struct btrfs_key location;
5232 	u8 di_type = 0;
5233 	int index;
5234 	int ret = 0;
5235 
5236 	if (dentry->d_name.len > BTRFS_NAME_LEN)
5237 		return ERR_PTR(-ENAMETOOLONG);
5238 
5239 	ret = btrfs_inode_by_name(dir, dentry, &location, &di_type);
5240 	if (ret < 0)
5241 		return ERR_PTR(ret);
5242 
5243 	if (location.type == BTRFS_INODE_ITEM_KEY) {
5244 		inode = btrfs_iget(dir->i_sb, &location, root);
5245 		if (IS_ERR(inode))
5246 			return inode;
5247 
5248 		/* Do extra check against inode mode with di_type */
5249 		if (btrfs_inode_type(inode) != di_type) {
5250 			btrfs_crit(fs_info,
5251 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
5252 				  inode->i_mode, btrfs_inode_type(inode),
5253 				  di_type);
5254 			iput(inode);
5255 			return ERR_PTR(-EUCLEAN);
5256 		}
5257 		return inode;
5258 	}
5259 
5260 	index = srcu_read_lock(&fs_info->subvol_srcu);
5261 	ret = fixup_tree_root_location(fs_info, dir, dentry,
5262 				       &location, &sub_root);
5263 	if (ret < 0) {
5264 		if (ret != -ENOENT)
5265 			inode = ERR_PTR(ret);
5266 		else
5267 			inode = new_simple_dir(dir->i_sb, &location, sub_root);
5268 	} else {
5269 		inode = btrfs_iget(dir->i_sb, &location, sub_root);
5270 	}
5271 	srcu_read_unlock(&fs_info->subvol_srcu, index);
5272 
5273 	if (!IS_ERR(inode) && root != sub_root) {
5274 		down_read(&fs_info->cleanup_work_sem);
5275 		if (!sb_rdonly(inode->i_sb))
5276 			ret = btrfs_orphan_cleanup(sub_root);
5277 		up_read(&fs_info->cleanup_work_sem);
5278 		if (ret) {
5279 			iput(inode);
5280 			inode = ERR_PTR(ret);
5281 		}
5282 	}
5283 
5284 	return inode;
5285 }
5286 
5287 static int btrfs_dentry_delete(const struct dentry *dentry)
5288 {
5289 	struct btrfs_root *root;
5290 	struct inode *inode = d_inode(dentry);
5291 
5292 	if (!inode && !IS_ROOT(dentry))
5293 		inode = d_inode(dentry->d_parent);
5294 
5295 	if (inode) {
5296 		root = BTRFS_I(inode)->root;
5297 		if (btrfs_root_refs(&root->root_item) == 0)
5298 			return 1;
5299 
5300 		if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5301 			return 1;
5302 	}
5303 	return 0;
5304 }
5305 
5306 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5307 				   unsigned int flags)
5308 {
5309 	struct inode *inode = btrfs_lookup_dentry(dir, dentry);
5310 
5311 	if (inode == ERR_PTR(-ENOENT))
5312 		inode = NULL;
5313 	return d_splice_alias(inode, dentry);
5314 }
5315 
5316 /*
5317  * All this infrastructure exists because dir_emit can fault, and we are holding
5318  * the tree lock when doing readdir.  For now just allocate a buffer and copy
5319  * our information into that, and then dir_emit from the buffer.  This is
5320  * similar to what NFS does, only we don't keep the buffer around in pagecache
5321  * because I'm afraid I'll mess that up.  Long term we need to make filldir do
5322  * copy_to_user_inatomic so we don't have to worry about page faulting under the
5323  * tree lock.
5324  */
5325 static int btrfs_opendir(struct inode *inode, struct file *file)
5326 {
5327 	struct btrfs_file_private *private;
5328 
5329 	private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL);
5330 	if (!private)
5331 		return -ENOMEM;
5332 	private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
5333 	if (!private->filldir_buf) {
5334 		kfree(private);
5335 		return -ENOMEM;
5336 	}
5337 	file->private_data = private;
5338 	return 0;
5339 }
5340 
5341 struct dir_entry {
5342 	u64 ino;
5343 	u64 offset;
5344 	unsigned type;
5345 	int name_len;
5346 };
5347 
5348 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
5349 {
5350 	while (entries--) {
5351 		struct dir_entry *entry = addr;
5352 		char *name = (char *)(entry + 1);
5353 
5354 		ctx->pos = get_unaligned(&entry->offset);
5355 		if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
5356 					 get_unaligned(&entry->ino),
5357 					 get_unaligned(&entry->type)))
5358 			return 1;
5359 		addr += sizeof(struct dir_entry) +
5360 			get_unaligned(&entry->name_len);
5361 		ctx->pos++;
5362 	}
5363 	return 0;
5364 }
5365 
5366 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5367 {
5368 	struct inode *inode = file_inode(file);
5369 	struct btrfs_root *root = BTRFS_I(inode)->root;
5370 	struct btrfs_file_private *private = file->private_data;
5371 	struct btrfs_dir_item *di;
5372 	struct btrfs_key key;
5373 	struct btrfs_key found_key;
5374 	struct btrfs_path *path;
5375 	void *addr;
5376 	struct list_head ins_list;
5377 	struct list_head del_list;
5378 	int ret;
5379 	struct extent_buffer *leaf;
5380 	int slot;
5381 	char *name_ptr;
5382 	int name_len;
5383 	int entries = 0;
5384 	int total_len = 0;
5385 	bool put = false;
5386 	struct btrfs_key location;
5387 
5388 	if (!dir_emit_dots(file, ctx))
5389 		return 0;
5390 
5391 	path = btrfs_alloc_path();
5392 	if (!path)
5393 		return -ENOMEM;
5394 
5395 	addr = private->filldir_buf;
5396 	path->reada = READA_FORWARD;
5397 
5398 	INIT_LIST_HEAD(&ins_list);
5399 	INIT_LIST_HEAD(&del_list);
5400 	put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list);
5401 
5402 again:
5403 	key.type = BTRFS_DIR_INDEX_KEY;
5404 	key.offset = ctx->pos;
5405 	key.objectid = btrfs_ino(BTRFS_I(inode));
5406 
5407 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5408 	if (ret < 0)
5409 		goto err;
5410 
5411 	while (1) {
5412 		struct dir_entry *entry;
5413 
5414 		leaf = path->nodes[0];
5415 		slot = path->slots[0];
5416 		if (slot >= btrfs_header_nritems(leaf)) {
5417 			ret = btrfs_next_leaf(root, path);
5418 			if (ret < 0)
5419 				goto err;
5420 			else if (ret > 0)
5421 				break;
5422 			continue;
5423 		}
5424 
5425 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
5426 
5427 		if (found_key.objectid != key.objectid)
5428 			break;
5429 		if (found_key.type != BTRFS_DIR_INDEX_KEY)
5430 			break;
5431 		if (found_key.offset < ctx->pos)
5432 			goto next;
5433 		if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
5434 			goto next;
5435 		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
5436 		name_len = btrfs_dir_name_len(leaf, di);
5437 		if ((total_len + sizeof(struct dir_entry) + name_len) >=
5438 		    PAGE_SIZE) {
5439 			btrfs_release_path(path);
5440 			ret = btrfs_filldir(private->filldir_buf, entries, ctx);
5441 			if (ret)
5442 				goto nopos;
5443 			addr = private->filldir_buf;
5444 			entries = 0;
5445 			total_len = 0;
5446 			goto again;
5447 		}
5448 
5449 		entry = addr;
5450 		put_unaligned(name_len, &entry->name_len);
5451 		name_ptr = (char *)(entry + 1);
5452 		read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1),
5453 				   name_len);
5454 		put_unaligned(fs_ftype_to_dtype(btrfs_dir_type(leaf, di)),
5455 				&entry->type);
5456 		btrfs_dir_item_key_to_cpu(leaf, di, &location);
5457 		put_unaligned(location.objectid, &entry->ino);
5458 		put_unaligned(found_key.offset, &entry->offset);
5459 		entries++;
5460 		addr += sizeof(struct dir_entry) + name_len;
5461 		total_len += sizeof(struct dir_entry) + name_len;
5462 next:
5463 		path->slots[0]++;
5464 	}
5465 	btrfs_release_path(path);
5466 
5467 	ret = btrfs_filldir(private->filldir_buf, entries, ctx);
5468 	if (ret)
5469 		goto nopos;
5470 
5471 	ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
5472 	if (ret)
5473 		goto nopos;
5474 
5475 	/*
5476 	 * Stop new entries from being returned after we return the last
5477 	 * entry.
5478 	 *
5479 	 * New directory entries are assigned a strictly increasing
5480 	 * offset.  This means that new entries created during readdir
5481 	 * are *guaranteed* to be seen in the future by that readdir.
5482 	 * This has broken buggy programs which operate on names as
5483 	 * they're returned by readdir.  Until we re-use freed offsets
5484 	 * we have this hack to stop new entries from being returned
5485 	 * under the assumption that they'll never reach this huge
5486 	 * offset.
5487 	 *
5488 	 * This is being careful not to overflow 32bit loff_t unless the
5489 	 * last entry requires it because doing so has broken 32bit apps
5490 	 * in the past.
5491 	 */
5492 	if (ctx->pos >= INT_MAX)
5493 		ctx->pos = LLONG_MAX;
5494 	else
5495 		ctx->pos = INT_MAX;
5496 nopos:
5497 	ret = 0;
5498 err:
5499 	if (put)
5500 		btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
5501 	btrfs_free_path(path);
5502 	return ret;
5503 }
5504 
5505 /*
5506  * This is somewhat expensive, updating the tree every time the
5507  * inode changes.  But, it is most likely to find the inode in cache.
5508  * FIXME, needs more benchmarking...there are no reasons other than performance
5509  * to keep or drop this code.
5510  */
5511 static int btrfs_dirty_inode(struct inode *inode)
5512 {
5513 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5514 	struct btrfs_root *root = BTRFS_I(inode)->root;
5515 	struct btrfs_trans_handle *trans;
5516 	int ret;
5517 
5518 	if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5519 		return 0;
5520 
5521 	trans = btrfs_join_transaction(root);
5522 	if (IS_ERR(trans))
5523 		return PTR_ERR(trans);
5524 
5525 	ret = btrfs_update_inode(trans, root, inode);
5526 	if (ret && ret == -ENOSPC) {
5527 		/* whoops, lets try again with the full transaction */
5528 		btrfs_end_transaction(trans);
5529 		trans = btrfs_start_transaction(root, 1);
5530 		if (IS_ERR(trans))
5531 			return PTR_ERR(trans);
5532 
5533 		ret = btrfs_update_inode(trans, root, inode);
5534 	}
5535 	btrfs_end_transaction(trans);
5536 	if (BTRFS_I(inode)->delayed_node)
5537 		btrfs_balance_delayed_items(fs_info);
5538 
5539 	return ret;
5540 }
5541 
5542 /*
5543  * This is a copy of file_update_time.  We need this so we can return error on
5544  * ENOSPC for updating the inode in the case of file write and mmap writes.
5545  */
5546 static int btrfs_update_time(struct inode *inode, struct timespec64 *now,
5547 			     int flags)
5548 {
5549 	struct btrfs_root *root = BTRFS_I(inode)->root;
5550 	bool dirty = flags & ~S_VERSION;
5551 
5552 	if (btrfs_root_readonly(root))
5553 		return -EROFS;
5554 
5555 	if (flags & S_VERSION)
5556 		dirty |= inode_maybe_inc_iversion(inode, dirty);
5557 	if (flags & S_CTIME)
5558 		inode->i_ctime = *now;
5559 	if (flags & S_MTIME)
5560 		inode->i_mtime = *now;
5561 	if (flags & S_ATIME)
5562 		inode->i_atime = *now;
5563 	return dirty ? btrfs_dirty_inode(inode) : 0;
5564 }
5565 
5566 /*
5567  * find the highest existing sequence number in a directory
5568  * and then set the in-memory index_cnt variable to reflect
5569  * free sequence numbers
5570  */
5571 static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
5572 {
5573 	struct btrfs_root *root = inode->root;
5574 	struct btrfs_key key, found_key;
5575 	struct btrfs_path *path;
5576 	struct extent_buffer *leaf;
5577 	int ret;
5578 
5579 	key.objectid = btrfs_ino(inode);
5580 	key.type = BTRFS_DIR_INDEX_KEY;
5581 	key.offset = (u64)-1;
5582 
5583 	path = btrfs_alloc_path();
5584 	if (!path)
5585 		return -ENOMEM;
5586 
5587 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5588 	if (ret < 0)
5589 		goto out;
5590 	/* FIXME: we should be able to handle this */
5591 	if (ret == 0)
5592 		goto out;
5593 	ret = 0;
5594 
5595 	/*
5596 	 * MAGIC NUMBER EXPLANATION:
5597 	 * since we search a directory based on f_pos we have to start at 2
5598 	 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
5599 	 * else has to start at 2
5600 	 */
5601 	if (path->slots[0] == 0) {
5602 		inode->index_cnt = 2;
5603 		goto out;
5604 	}
5605 
5606 	path->slots[0]--;
5607 
5608 	leaf = path->nodes[0];
5609 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5610 
5611 	if (found_key.objectid != btrfs_ino(inode) ||
5612 	    found_key.type != BTRFS_DIR_INDEX_KEY) {
5613 		inode->index_cnt = 2;
5614 		goto out;
5615 	}
5616 
5617 	inode->index_cnt = found_key.offset + 1;
5618 out:
5619 	btrfs_free_path(path);
5620 	return ret;
5621 }
5622 
5623 /*
5624  * helper to find a free sequence number in a given directory.  This current
5625  * code is very simple, later versions will do smarter things in the btree
5626  */
5627 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
5628 {
5629 	int ret = 0;
5630 
5631 	if (dir->index_cnt == (u64)-1) {
5632 		ret = btrfs_inode_delayed_dir_index_count(dir);
5633 		if (ret) {
5634 			ret = btrfs_set_inode_index_count(dir);
5635 			if (ret)
5636 				return ret;
5637 		}
5638 	}
5639 
5640 	*index = dir->index_cnt;
5641 	dir->index_cnt++;
5642 
5643 	return ret;
5644 }
5645 
5646 static int btrfs_insert_inode_locked(struct inode *inode)
5647 {
5648 	struct btrfs_iget_args args;
5649 	args.location = &BTRFS_I(inode)->location;
5650 	args.root = BTRFS_I(inode)->root;
5651 
5652 	return insert_inode_locked4(inode,
5653 		   btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
5654 		   btrfs_find_actor, &args);
5655 }
5656 
5657 /*
5658  * Inherit flags from the parent inode.
5659  *
5660  * Currently only the compression flags and the cow flags are inherited.
5661  */
5662 static void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
5663 {
5664 	unsigned int flags;
5665 
5666 	if (!dir)
5667 		return;
5668 
5669 	flags = BTRFS_I(dir)->flags;
5670 
5671 	if (flags & BTRFS_INODE_NOCOMPRESS) {
5672 		BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
5673 		BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
5674 	} else if (flags & BTRFS_INODE_COMPRESS) {
5675 		BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
5676 		BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
5677 	}
5678 
5679 	if (flags & BTRFS_INODE_NODATACOW) {
5680 		BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
5681 		if (S_ISREG(inode->i_mode))
5682 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
5683 	}
5684 
5685 	btrfs_sync_inode_flags_to_i_flags(inode);
5686 }
5687 
5688 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
5689 				     struct btrfs_root *root,
5690 				     struct inode *dir,
5691 				     const char *name, int name_len,
5692 				     u64 ref_objectid, u64 objectid,
5693 				     umode_t mode, u64 *index)
5694 {
5695 	struct btrfs_fs_info *fs_info = root->fs_info;
5696 	struct inode *inode;
5697 	struct btrfs_inode_item *inode_item;
5698 	struct btrfs_key *location;
5699 	struct btrfs_path *path;
5700 	struct btrfs_inode_ref *ref;
5701 	struct btrfs_key key[2];
5702 	u32 sizes[2];
5703 	int nitems = name ? 2 : 1;
5704 	unsigned long ptr;
5705 	unsigned int nofs_flag;
5706 	int ret;
5707 
5708 	path = btrfs_alloc_path();
5709 	if (!path)
5710 		return ERR_PTR(-ENOMEM);
5711 
5712 	nofs_flag = memalloc_nofs_save();
5713 	inode = new_inode(fs_info->sb);
5714 	memalloc_nofs_restore(nofs_flag);
5715 	if (!inode) {
5716 		btrfs_free_path(path);
5717 		return ERR_PTR(-ENOMEM);
5718 	}
5719 
5720 	/*
5721 	 * O_TMPFILE, set link count to 0, so that after this point,
5722 	 * we fill in an inode item with the correct link count.
5723 	 */
5724 	if (!name)
5725 		set_nlink(inode, 0);
5726 
5727 	/*
5728 	 * we have to initialize this early, so we can reclaim the inode
5729 	 * number if we fail afterwards in this function.
5730 	 */
5731 	inode->i_ino = objectid;
5732 
5733 	if (dir && name) {
5734 		trace_btrfs_inode_request(dir);
5735 
5736 		ret = btrfs_set_inode_index(BTRFS_I(dir), index);
5737 		if (ret) {
5738 			btrfs_free_path(path);
5739 			iput(inode);
5740 			return ERR_PTR(ret);
5741 		}
5742 	} else if (dir) {
5743 		*index = 0;
5744 	}
5745 	/*
5746 	 * index_cnt is ignored for everything but a dir,
5747 	 * btrfs_set_inode_index_count has an explanation for the magic
5748 	 * number
5749 	 */
5750 	BTRFS_I(inode)->index_cnt = 2;
5751 	BTRFS_I(inode)->dir_index = *index;
5752 	BTRFS_I(inode)->root = root;
5753 	BTRFS_I(inode)->generation = trans->transid;
5754 	inode->i_generation = BTRFS_I(inode)->generation;
5755 
5756 	/*
5757 	 * We could have gotten an inode number from somebody who was fsynced
5758 	 * and then removed in this same transaction, so let's just set full
5759 	 * sync since it will be a full sync anyway and this will blow away the
5760 	 * old info in the log.
5761 	 */
5762 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
5763 
5764 	key[0].objectid = objectid;
5765 	key[0].type = BTRFS_INODE_ITEM_KEY;
5766 	key[0].offset = 0;
5767 
5768 	sizes[0] = sizeof(struct btrfs_inode_item);
5769 
5770 	if (name) {
5771 		/*
5772 		 * Start new inodes with an inode_ref. This is slightly more
5773 		 * efficient for small numbers of hard links since they will
5774 		 * be packed into one item. Extended refs will kick in if we
5775 		 * add more hard links than can fit in the ref item.
5776 		 */
5777 		key[1].objectid = objectid;
5778 		key[1].type = BTRFS_INODE_REF_KEY;
5779 		key[1].offset = ref_objectid;
5780 
5781 		sizes[1] = name_len + sizeof(*ref);
5782 	}
5783 
5784 	location = &BTRFS_I(inode)->location;
5785 	location->objectid = objectid;
5786 	location->offset = 0;
5787 	location->type = BTRFS_INODE_ITEM_KEY;
5788 
5789 	ret = btrfs_insert_inode_locked(inode);
5790 	if (ret < 0) {
5791 		iput(inode);
5792 		goto fail;
5793 	}
5794 
5795 	path->leave_spinning = 1;
5796 	ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
5797 	if (ret != 0)
5798 		goto fail_unlock;
5799 
5800 	inode_init_owner(inode, dir, mode);
5801 	inode_set_bytes(inode, 0);
5802 
5803 	inode->i_mtime = current_time(inode);
5804 	inode->i_atime = inode->i_mtime;
5805 	inode->i_ctime = inode->i_mtime;
5806 	BTRFS_I(inode)->i_otime = inode->i_mtime;
5807 
5808 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
5809 				  struct btrfs_inode_item);
5810 	memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
5811 			     sizeof(*inode_item));
5812 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
5813 
5814 	if (name) {
5815 		ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
5816 				     struct btrfs_inode_ref);
5817 		btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
5818 		btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
5819 		ptr = (unsigned long)(ref + 1);
5820 		write_extent_buffer(path->nodes[0], name, ptr, name_len);
5821 	}
5822 
5823 	btrfs_mark_buffer_dirty(path->nodes[0]);
5824 	btrfs_free_path(path);
5825 
5826 	btrfs_inherit_iflags(inode, dir);
5827 
5828 	if (S_ISREG(mode)) {
5829 		if (btrfs_test_opt(fs_info, NODATASUM))
5830 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
5831 		if (btrfs_test_opt(fs_info, NODATACOW))
5832 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
5833 				BTRFS_INODE_NODATASUM;
5834 	}
5835 
5836 	inode_tree_add(inode);
5837 
5838 	trace_btrfs_inode_new(inode);
5839 	btrfs_set_inode_last_trans(trans, inode);
5840 
5841 	btrfs_update_root_times(trans, root);
5842 
5843 	ret = btrfs_inode_inherit_props(trans, inode, dir);
5844 	if (ret)
5845 		btrfs_err(fs_info,
5846 			  "error inheriting props for ino %llu (root %llu): %d",
5847 			btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, ret);
5848 
5849 	return inode;
5850 
5851 fail_unlock:
5852 	discard_new_inode(inode);
5853 fail:
5854 	if (dir && name)
5855 		BTRFS_I(dir)->index_cnt--;
5856 	btrfs_free_path(path);
5857 	return ERR_PTR(ret);
5858 }
5859 
5860 /*
5861  * utility function to add 'inode' into 'parent_inode' with
5862  * a give name and a given sequence number.
5863  * if 'add_backref' is true, also insert a backref from the
5864  * inode to the parent directory.
5865  */
5866 int btrfs_add_link(struct btrfs_trans_handle *trans,
5867 		   struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
5868 		   const char *name, int name_len, int add_backref, u64 index)
5869 {
5870 	int ret = 0;
5871 	struct btrfs_key key;
5872 	struct btrfs_root *root = parent_inode->root;
5873 	u64 ino = btrfs_ino(inode);
5874 	u64 parent_ino = btrfs_ino(parent_inode);
5875 
5876 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
5877 		memcpy(&key, &inode->root->root_key, sizeof(key));
5878 	} else {
5879 		key.objectid = ino;
5880 		key.type = BTRFS_INODE_ITEM_KEY;
5881 		key.offset = 0;
5882 	}
5883 
5884 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
5885 		ret = btrfs_add_root_ref(trans, key.objectid,
5886 					 root->root_key.objectid, parent_ino,
5887 					 index, name, name_len);
5888 	} else if (add_backref) {
5889 		ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
5890 					     parent_ino, index);
5891 	}
5892 
5893 	/* Nothing to clean up yet */
5894 	if (ret)
5895 		return ret;
5896 
5897 	ret = btrfs_insert_dir_item(trans, name, name_len, parent_inode, &key,
5898 				    btrfs_inode_type(&inode->vfs_inode), index);
5899 	if (ret == -EEXIST || ret == -EOVERFLOW)
5900 		goto fail_dir_item;
5901 	else if (ret) {
5902 		btrfs_abort_transaction(trans, ret);
5903 		return ret;
5904 	}
5905 
5906 	btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
5907 			   name_len * 2);
5908 	inode_inc_iversion(&parent_inode->vfs_inode);
5909 	/*
5910 	 * If we are replaying a log tree, we do not want to update the mtime
5911 	 * and ctime of the parent directory with the current time, since the
5912 	 * log replay procedure is responsible for setting them to their correct
5913 	 * values (the ones it had when the fsync was done).
5914 	 */
5915 	if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
5916 		struct timespec64 now = current_time(&parent_inode->vfs_inode);
5917 
5918 		parent_inode->vfs_inode.i_mtime = now;
5919 		parent_inode->vfs_inode.i_ctime = now;
5920 	}
5921 	ret = btrfs_update_inode(trans, root, &parent_inode->vfs_inode);
5922 	if (ret)
5923 		btrfs_abort_transaction(trans, ret);
5924 	return ret;
5925 
5926 fail_dir_item:
5927 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
5928 		u64 local_index;
5929 		int err;
5930 		err = btrfs_del_root_ref(trans, key.objectid,
5931 					 root->root_key.objectid, parent_ino,
5932 					 &local_index, name, name_len);
5933 		if (err)
5934 			btrfs_abort_transaction(trans, err);
5935 	} else if (add_backref) {
5936 		u64 local_index;
5937 		int err;
5938 
5939 		err = btrfs_del_inode_ref(trans, root, name, name_len,
5940 					  ino, parent_ino, &local_index);
5941 		if (err)
5942 			btrfs_abort_transaction(trans, err);
5943 	}
5944 
5945 	/* Return the original error code */
5946 	return ret;
5947 }
5948 
5949 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
5950 			    struct btrfs_inode *dir, struct dentry *dentry,
5951 			    struct btrfs_inode *inode, int backref, u64 index)
5952 {
5953 	int err = btrfs_add_link(trans, dir, inode,
5954 				 dentry->d_name.name, dentry->d_name.len,
5955 				 backref, index);
5956 	if (err > 0)
5957 		err = -EEXIST;
5958 	return err;
5959 }
5960 
5961 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
5962 			umode_t mode, dev_t rdev)
5963 {
5964 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
5965 	struct btrfs_trans_handle *trans;
5966 	struct btrfs_root *root = BTRFS_I(dir)->root;
5967 	struct inode *inode = NULL;
5968 	int err;
5969 	u64 objectid;
5970 	u64 index = 0;
5971 
5972 	/*
5973 	 * 2 for inode item and ref
5974 	 * 2 for dir items
5975 	 * 1 for xattr if selinux is on
5976 	 */
5977 	trans = btrfs_start_transaction(root, 5);
5978 	if (IS_ERR(trans))
5979 		return PTR_ERR(trans);
5980 
5981 	err = btrfs_find_free_ino(root, &objectid);
5982 	if (err)
5983 		goto out_unlock;
5984 
5985 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5986 			dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
5987 			mode, &index);
5988 	if (IS_ERR(inode)) {
5989 		err = PTR_ERR(inode);
5990 		inode = NULL;
5991 		goto out_unlock;
5992 	}
5993 
5994 	/*
5995 	* If the active LSM wants to access the inode during
5996 	* d_instantiate it needs these. Smack checks to see
5997 	* if the filesystem supports xattrs by looking at the
5998 	* ops vector.
5999 	*/
6000 	inode->i_op = &btrfs_special_inode_operations;
6001 	init_special_inode(inode, inode->i_mode, rdev);
6002 
6003 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6004 	if (err)
6005 		goto out_unlock;
6006 
6007 	err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6008 			0, index);
6009 	if (err)
6010 		goto out_unlock;
6011 
6012 	btrfs_update_inode(trans, root, inode);
6013 	d_instantiate_new(dentry, inode);
6014 
6015 out_unlock:
6016 	btrfs_end_transaction(trans);
6017 	btrfs_btree_balance_dirty(fs_info);
6018 	if (err && inode) {
6019 		inode_dec_link_count(inode);
6020 		discard_new_inode(inode);
6021 	}
6022 	return err;
6023 }
6024 
6025 static int btrfs_create(struct inode *dir, struct dentry *dentry,
6026 			umode_t mode, bool excl)
6027 {
6028 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6029 	struct btrfs_trans_handle *trans;
6030 	struct btrfs_root *root = BTRFS_I(dir)->root;
6031 	struct inode *inode = NULL;
6032 	int err;
6033 	u64 objectid;
6034 	u64 index = 0;
6035 
6036 	/*
6037 	 * 2 for inode item and ref
6038 	 * 2 for dir items
6039 	 * 1 for xattr if selinux is on
6040 	 */
6041 	trans = btrfs_start_transaction(root, 5);
6042 	if (IS_ERR(trans))
6043 		return PTR_ERR(trans);
6044 
6045 	err = btrfs_find_free_ino(root, &objectid);
6046 	if (err)
6047 		goto out_unlock;
6048 
6049 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6050 			dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6051 			mode, &index);
6052 	if (IS_ERR(inode)) {
6053 		err = PTR_ERR(inode);
6054 		inode = NULL;
6055 		goto out_unlock;
6056 	}
6057 	/*
6058 	* If the active LSM wants to access the inode during
6059 	* d_instantiate it needs these. Smack checks to see
6060 	* if the filesystem supports xattrs by looking at the
6061 	* ops vector.
6062 	*/
6063 	inode->i_fop = &btrfs_file_operations;
6064 	inode->i_op = &btrfs_file_inode_operations;
6065 	inode->i_mapping->a_ops = &btrfs_aops;
6066 
6067 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6068 	if (err)
6069 		goto out_unlock;
6070 
6071 	err = btrfs_update_inode(trans, root, inode);
6072 	if (err)
6073 		goto out_unlock;
6074 
6075 	err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6076 			0, index);
6077 	if (err)
6078 		goto out_unlock;
6079 
6080 	BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
6081 	d_instantiate_new(dentry, inode);
6082 
6083 out_unlock:
6084 	btrfs_end_transaction(trans);
6085 	if (err && inode) {
6086 		inode_dec_link_count(inode);
6087 		discard_new_inode(inode);
6088 	}
6089 	btrfs_btree_balance_dirty(fs_info);
6090 	return err;
6091 }
6092 
6093 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6094 		      struct dentry *dentry)
6095 {
6096 	struct btrfs_trans_handle *trans = NULL;
6097 	struct btrfs_root *root = BTRFS_I(dir)->root;
6098 	struct inode *inode = d_inode(old_dentry);
6099 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6100 	u64 index;
6101 	int err;
6102 	int drop_inode = 0;
6103 
6104 	/* do not allow sys_link's with other subvols of the same device */
6105 	if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid)
6106 		return -EXDEV;
6107 
6108 	if (inode->i_nlink >= BTRFS_LINK_MAX)
6109 		return -EMLINK;
6110 
6111 	err = btrfs_set_inode_index(BTRFS_I(dir), &index);
6112 	if (err)
6113 		goto fail;
6114 
6115 	/*
6116 	 * 2 items for inode and inode ref
6117 	 * 2 items for dir items
6118 	 * 1 item for parent inode
6119 	 * 1 item for orphan item deletion if O_TMPFILE
6120 	 */
6121 	trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6);
6122 	if (IS_ERR(trans)) {
6123 		err = PTR_ERR(trans);
6124 		trans = NULL;
6125 		goto fail;
6126 	}
6127 
6128 	/* There are several dir indexes for this inode, clear the cache. */
6129 	BTRFS_I(inode)->dir_index = 0ULL;
6130 	inc_nlink(inode);
6131 	inode_inc_iversion(inode);
6132 	inode->i_ctime = current_time(inode);
6133 	ihold(inode);
6134 	set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6135 
6136 	err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6137 			1, index);
6138 
6139 	if (err) {
6140 		drop_inode = 1;
6141 	} else {
6142 		struct dentry *parent = dentry->d_parent;
6143 		int ret;
6144 
6145 		err = btrfs_update_inode(trans, root, inode);
6146 		if (err)
6147 			goto fail;
6148 		if (inode->i_nlink == 1) {
6149 			/*
6150 			 * If new hard link count is 1, it's a file created
6151 			 * with open(2) O_TMPFILE flag.
6152 			 */
6153 			err = btrfs_orphan_del(trans, BTRFS_I(inode));
6154 			if (err)
6155 				goto fail;
6156 		}
6157 		d_instantiate(dentry, inode);
6158 		ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent,
6159 					 true, NULL);
6160 		if (ret == BTRFS_NEED_TRANS_COMMIT) {
6161 			err = btrfs_commit_transaction(trans);
6162 			trans = NULL;
6163 		}
6164 	}
6165 
6166 fail:
6167 	if (trans)
6168 		btrfs_end_transaction(trans);
6169 	if (drop_inode) {
6170 		inode_dec_link_count(inode);
6171 		iput(inode);
6172 	}
6173 	btrfs_btree_balance_dirty(fs_info);
6174 	return err;
6175 }
6176 
6177 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
6178 {
6179 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6180 	struct inode *inode = NULL;
6181 	struct btrfs_trans_handle *trans;
6182 	struct btrfs_root *root = BTRFS_I(dir)->root;
6183 	int err = 0;
6184 	u64 objectid = 0;
6185 	u64 index = 0;
6186 
6187 	/*
6188 	 * 2 items for inode and ref
6189 	 * 2 items for dir items
6190 	 * 1 for xattr if selinux is on
6191 	 */
6192 	trans = btrfs_start_transaction(root, 5);
6193 	if (IS_ERR(trans))
6194 		return PTR_ERR(trans);
6195 
6196 	err = btrfs_find_free_ino(root, &objectid);
6197 	if (err)
6198 		goto out_fail;
6199 
6200 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6201 			dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6202 			S_IFDIR | mode, &index);
6203 	if (IS_ERR(inode)) {
6204 		err = PTR_ERR(inode);
6205 		inode = NULL;
6206 		goto out_fail;
6207 	}
6208 
6209 	/* these must be set before we unlock the inode */
6210 	inode->i_op = &btrfs_dir_inode_operations;
6211 	inode->i_fop = &btrfs_dir_file_operations;
6212 
6213 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6214 	if (err)
6215 		goto out_fail;
6216 
6217 	btrfs_i_size_write(BTRFS_I(inode), 0);
6218 	err = btrfs_update_inode(trans, root, inode);
6219 	if (err)
6220 		goto out_fail;
6221 
6222 	err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
6223 			dentry->d_name.name,
6224 			dentry->d_name.len, 0, index);
6225 	if (err)
6226 		goto out_fail;
6227 
6228 	d_instantiate_new(dentry, inode);
6229 
6230 out_fail:
6231 	btrfs_end_transaction(trans);
6232 	if (err && inode) {
6233 		inode_dec_link_count(inode);
6234 		discard_new_inode(inode);
6235 	}
6236 	btrfs_btree_balance_dirty(fs_info);
6237 	return err;
6238 }
6239 
6240 static noinline int uncompress_inline(struct btrfs_path *path,
6241 				      struct page *page,
6242 				      size_t pg_offset, u64 extent_offset,
6243 				      struct btrfs_file_extent_item *item)
6244 {
6245 	int ret;
6246 	struct extent_buffer *leaf = path->nodes[0];
6247 	char *tmp;
6248 	size_t max_size;
6249 	unsigned long inline_size;
6250 	unsigned long ptr;
6251 	int compress_type;
6252 
6253 	WARN_ON(pg_offset != 0);
6254 	compress_type = btrfs_file_extent_compression(leaf, item);
6255 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
6256 	inline_size = btrfs_file_extent_inline_item_len(leaf,
6257 					btrfs_item_nr(path->slots[0]));
6258 	tmp = kmalloc(inline_size, GFP_NOFS);
6259 	if (!tmp)
6260 		return -ENOMEM;
6261 	ptr = btrfs_file_extent_inline_start(item);
6262 
6263 	read_extent_buffer(leaf, tmp, ptr, inline_size);
6264 
6265 	max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6266 	ret = btrfs_decompress(compress_type, tmp, page,
6267 			       extent_offset, inline_size, max_size);
6268 
6269 	/*
6270 	 * decompression code contains a memset to fill in any space between the end
6271 	 * of the uncompressed data and the end of max_size in case the decompressed
6272 	 * data ends up shorter than ram_bytes.  That doesn't cover the hole between
6273 	 * the end of an inline extent and the beginning of the next block, so we
6274 	 * cover that region here.
6275 	 */
6276 
6277 	if (max_size + pg_offset < PAGE_SIZE) {
6278 		char *map = kmap(page);
6279 		memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
6280 		kunmap(page);
6281 	}
6282 	kfree(tmp);
6283 	return ret;
6284 }
6285 
6286 /**
6287  * btrfs_get_extent - Lookup the first extent overlapping a range in a file.
6288  * @inode:	file to search in
6289  * @page:	page to read extent data into if the extent is inline
6290  * @pg_offset:	offset into @page to copy to
6291  * @start:	file offset
6292  * @len:	length of range starting at @start
6293  *
6294  * This returns the first &struct extent_map which overlaps with the given
6295  * range, reading it from the B-tree and caching it if necessary. Note that
6296  * there may be more extents which overlap the given range after the returned
6297  * extent_map.
6298  *
6299  * If @page is not NULL and the extent is inline, this also reads the extent
6300  * data directly into the page and marks the extent up to date in the io_tree.
6301  *
6302  * Return: ERR_PTR on error, non-NULL extent_map on success.
6303  */
6304 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
6305 				    struct page *page, size_t pg_offset,
6306 				    u64 start, u64 len)
6307 {
6308 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
6309 	int ret;
6310 	int err = 0;
6311 	u64 extent_start = 0;
6312 	u64 extent_end = 0;
6313 	u64 objectid = btrfs_ino(inode);
6314 	int extent_type = -1;
6315 	struct btrfs_path *path = NULL;
6316 	struct btrfs_root *root = inode->root;
6317 	struct btrfs_file_extent_item *item;
6318 	struct extent_buffer *leaf;
6319 	struct btrfs_key found_key;
6320 	struct extent_map *em = NULL;
6321 	struct extent_map_tree *em_tree = &inode->extent_tree;
6322 	struct extent_io_tree *io_tree = &inode->io_tree;
6323 
6324 	read_lock(&em_tree->lock);
6325 	em = lookup_extent_mapping(em_tree, start, len);
6326 	read_unlock(&em_tree->lock);
6327 
6328 	if (em) {
6329 		if (em->start > start || em->start + em->len <= start)
6330 			free_extent_map(em);
6331 		else if (em->block_start == EXTENT_MAP_INLINE && page)
6332 			free_extent_map(em);
6333 		else
6334 			goto out;
6335 	}
6336 	em = alloc_extent_map();
6337 	if (!em) {
6338 		err = -ENOMEM;
6339 		goto out;
6340 	}
6341 	em->start = EXTENT_MAP_HOLE;
6342 	em->orig_start = EXTENT_MAP_HOLE;
6343 	em->len = (u64)-1;
6344 	em->block_len = (u64)-1;
6345 
6346 	path = btrfs_alloc_path();
6347 	if (!path) {
6348 		err = -ENOMEM;
6349 		goto out;
6350 	}
6351 
6352 	/* Chances are we'll be called again, so go ahead and do readahead */
6353 	path->reada = READA_FORWARD;
6354 
6355 	/*
6356 	 * Unless we're going to uncompress the inline extent, no sleep would
6357 	 * happen.
6358 	 */
6359 	path->leave_spinning = 1;
6360 
6361 	ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
6362 	if (ret < 0) {
6363 		err = ret;
6364 		goto out;
6365 	} else if (ret > 0) {
6366 		if (path->slots[0] == 0)
6367 			goto not_found;
6368 		path->slots[0]--;
6369 	}
6370 
6371 	leaf = path->nodes[0];
6372 	item = btrfs_item_ptr(leaf, path->slots[0],
6373 			      struct btrfs_file_extent_item);
6374 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6375 	if (found_key.objectid != objectid ||
6376 	    found_key.type != BTRFS_EXTENT_DATA_KEY) {
6377 		/*
6378 		 * If we backup past the first extent we want to move forward
6379 		 * and see if there is an extent in front of us, otherwise we'll
6380 		 * say there is a hole for our whole search range which can
6381 		 * cause problems.
6382 		 */
6383 		extent_end = start;
6384 		goto next;
6385 	}
6386 
6387 	extent_type = btrfs_file_extent_type(leaf, item);
6388 	extent_start = found_key.offset;
6389 	if (extent_type == BTRFS_FILE_EXTENT_REG ||
6390 	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
6391 		/* Only regular file could have regular/prealloc extent */
6392 		if (!S_ISREG(inode->vfs_inode.i_mode)) {
6393 			ret = -EUCLEAN;
6394 			btrfs_crit(fs_info,
6395 		"regular/prealloc extent found for non-regular inode %llu",
6396 				   btrfs_ino(inode));
6397 			goto out;
6398 		}
6399 		extent_end = extent_start +
6400 		       btrfs_file_extent_num_bytes(leaf, item);
6401 
6402 		trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
6403 						       extent_start);
6404 	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
6405 		size_t size;
6406 
6407 		size = btrfs_file_extent_ram_bytes(leaf, item);
6408 		extent_end = ALIGN(extent_start + size,
6409 				   fs_info->sectorsize);
6410 
6411 		trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
6412 						      path->slots[0],
6413 						      extent_start);
6414 	}
6415 next:
6416 	if (start >= extent_end) {
6417 		path->slots[0]++;
6418 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6419 			ret = btrfs_next_leaf(root, path);
6420 			if (ret < 0) {
6421 				err = ret;
6422 				goto out;
6423 			} else if (ret > 0) {
6424 				goto not_found;
6425 			}
6426 			leaf = path->nodes[0];
6427 		}
6428 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6429 		if (found_key.objectid != objectid ||
6430 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
6431 			goto not_found;
6432 		if (start + len <= found_key.offset)
6433 			goto not_found;
6434 		if (start > found_key.offset)
6435 			goto next;
6436 
6437 		/* New extent overlaps with existing one */
6438 		em->start = start;
6439 		em->orig_start = start;
6440 		em->len = found_key.offset - start;
6441 		em->block_start = EXTENT_MAP_HOLE;
6442 		goto insert;
6443 	}
6444 
6445 	btrfs_extent_item_to_extent_map(inode, path, item, !page, em);
6446 
6447 	if (extent_type == BTRFS_FILE_EXTENT_REG ||
6448 	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
6449 		goto insert;
6450 	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
6451 		unsigned long ptr;
6452 		char *map;
6453 		size_t size;
6454 		size_t extent_offset;
6455 		size_t copy_size;
6456 
6457 		if (!page)
6458 			goto out;
6459 
6460 		size = btrfs_file_extent_ram_bytes(leaf, item);
6461 		extent_offset = page_offset(page) + pg_offset - extent_start;
6462 		copy_size = min_t(u64, PAGE_SIZE - pg_offset,
6463 				  size - extent_offset);
6464 		em->start = extent_start + extent_offset;
6465 		em->len = ALIGN(copy_size, fs_info->sectorsize);
6466 		em->orig_block_len = em->len;
6467 		em->orig_start = em->start;
6468 		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
6469 
6470 		btrfs_set_path_blocking(path);
6471 		if (!PageUptodate(page)) {
6472 			if (btrfs_file_extent_compression(leaf, item) !=
6473 			    BTRFS_COMPRESS_NONE) {
6474 				ret = uncompress_inline(path, page, pg_offset,
6475 							extent_offset, item);
6476 				if (ret) {
6477 					err = ret;
6478 					goto out;
6479 				}
6480 			} else {
6481 				map = kmap(page);
6482 				read_extent_buffer(leaf, map + pg_offset, ptr,
6483 						   copy_size);
6484 				if (pg_offset + copy_size < PAGE_SIZE) {
6485 					memset(map + pg_offset + copy_size, 0,
6486 					       PAGE_SIZE - pg_offset -
6487 					       copy_size);
6488 				}
6489 				kunmap(page);
6490 			}
6491 			flush_dcache_page(page);
6492 		}
6493 		set_extent_uptodate(io_tree, em->start,
6494 				    extent_map_end(em) - 1, NULL, GFP_NOFS);
6495 		goto insert;
6496 	}
6497 not_found:
6498 	em->start = start;
6499 	em->orig_start = start;
6500 	em->len = len;
6501 	em->block_start = EXTENT_MAP_HOLE;
6502 insert:
6503 	btrfs_release_path(path);
6504 	if (em->start > start || extent_map_end(em) <= start) {
6505 		btrfs_err(fs_info,
6506 			  "bad extent! em: [%llu %llu] passed [%llu %llu]",
6507 			  em->start, em->len, start, len);
6508 		err = -EIO;
6509 		goto out;
6510 	}
6511 
6512 	err = 0;
6513 	write_lock(&em_tree->lock);
6514 	err = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
6515 	write_unlock(&em_tree->lock);
6516 out:
6517 	btrfs_free_path(path);
6518 
6519 	trace_btrfs_get_extent(root, inode, em);
6520 
6521 	if (err) {
6522 		free_extent_map(em);
6523 		return ERR_PTR(err);
6524 	}
6525 	BUG_ON(!em); /* Error is always set */
6526 	return em;
6527 }
6528 
6529 struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
6530 					   u64 start, u64 len)
6531 {
6532 	struct extent_map *em;
6533 	struct extent_map *hole_em = NULL;
6534 	u64 delalloc_start = start;
6535 	u64 end;
6536 	u64 delalloc_len;
6537 	u64 delalloc_end;
6538 	int err = 0;
6539 
6540 	em = btrfs_get_extent(inode, NULL, 0, start, len);
6541 	if (IS_ERR(em))
6542 		return em;
6543 	/*
6544 	 * If our em maps to:
6545 	 * - a hole or
6546 	 * - a pre-alloc extent,
6547 	 * there might actually be delalloc bytes behind it.
6548 	 */
6549 	if (em->block_start != EXTENT_MAP_HOLE &&
6550 	    !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6551 		return em;
6552 	else
6553 		hole_em = em;
6554 
6555 	/* check to see if we've wrapped (len == -1 or similar) */
6556 	end = start + len;
6557 	if (end < start)
6558 		end = (u64)-1;
6559 	else
6560 		end -= 1;
6561 
6562 	em = NULL;
6563 
6564 	/* ok, we didn't find anything, lets look for delalloc */
6565 	delalloc_len = count_range_bits(&inode->io_tree, &delalloc_start,
6566 				 end, len, EXTENT_DELALLOC, 1);
6567 	delalloc_end = delalloc_start + delalloc_len;
6568 	if (delalloc_end < delalloc_start)
6569 		delalloc_end = (u64)-1;
6570 
6571 	/*
6572 	 * We didn't find anything useful, return the original results from
6573 	 * get_extent()
6574 	 */
6575 	if (delalloc_start > end || delalloc_end <= start) {
6576 		em = hole_em;
6577 		hole_em = NULL;
6578 		goto out;
6579 	}
6580 
6581 	/*
6582 	 * Adjust the delalloc_start to make sure it doesn't go backwards from
6583 	 * the start they passed in
6584 	 */
6585 	delalloc_start = max(start, delalloc_start);
6586 	delalloc_len = delalloc_end - delalloc_start;
6587 
6588 	if (delalloc_len > 0) {
6589 		u64 hole_start;
6590 		u64 hole_len;
6591 		const u64 hole_end = extent_map_end(hole_em);
6592 
6593 		em = alloc_extent_map();
6594 		if (!em) {
6595 			err = -ENOMEM;
6596 			goto out;
6597 		}
6598 
6599 		ASSERT(hole_em);
6600 		/*
6601 		 * When btrfs_get_extent can't find anything it returns one
6602 		 * huge hole
6603 		 *
6604 		 * Make sure what it found really fits our range, and adjust to
6605 		 * make sure it is based on the start from the caller
6606 		 */
6607 		if (hole_end <= start || hole_em->start > end) {
6608 		       free_extent_map(hole_em);
6609 		       hole_em = NULL;
6610 		} else {
6611 		       hole_start = max(hole_em->start, start);
6612 		       hole_len = hole_end - hole_start;
6613 		}
6614 
6615 		if (hole_em && delalloc_start > hole_start) {
6616 			/*
6617 			 * Our hole starts before our delalloc, so we have to
6618 			 * return just the parts of the hole that go until the
6619 			 * delalloc starts
6620 			 */
6621 			em->len = min(hole_len, delalloc_start - hole_start);
6622 			em->start = hole_start;
6623 			em->orig_start = hole_start;
6624 			/*
6625 			 * Don't adjust block start at all, it is fixed at
6626 			 * EXTENT_MAP_HOLE
6627 			 */
6628 			em->block_start = hole_em->block_start;
6629 			em->block_len = hole_len;
6630 			if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
6631 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
6632 		} else {
6633 			/*
6634 			 * Hole is out of passed range or it starts after
6635 			 * delalloc range
6636 			 */
6637 			em->start = delalloc_start;
6638 			em->len = delalloc_len;
6639 			em->orig_start = delalloc_start;
6640 			em->block_start = EXTENT_MAP_DELALLOC;
6641 			em->block_len = delalloc_len;
6642 		}
6643 	} else {
6644 		return hole_em;
6645 	}
6646 out:
6647 
6648 	free_extent_map(hole_em);
6649 	if (err) {
6650 		free_extent_map(em);
6651 		return ERR_PTR(err);
6652 	}
6653 	return em;
6654 }
6655 
6656 static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
6657 						  const u64 start,
6658 						  const u64 len,
6659 						  const u64 orig_start,
6660 						  const u64 block_start,
6661 						  const u64 block_len,
6662 						  const u64 orig_block_len,
6663 						  const u64 ram_bytes,
6664 						  const int type)
6665 {
6666 	struct extent_map *em = NULL;
6667 	int ret;
6668 
6669 	if (type != BTRFS_ORDERED_NOCOW) {
6670 		em = create_io_em(inode, start, len, orig_start,
6671 				  block_start, block_len, orig_block_len,
6672 				  ram_bytes,
6673 				  BTRFS_COMPRESS_NONE, /* compress_type */
6674 				  type);
6675 		if (IS_ERR(em))
6676 			goto out;
6677 	}
6678 	ret = btrfs_add_ordered_extent_dio(inode, start, block_start,
6679 					   len, block_len, type);
6680 	if (ret) {
6681 		if (em) {
6682 			free_extent_map(em);
6683 			btrfs_drop_extent_cache(BTRFS_I(inode), start,
6684 						start + len - 1, 0);
6685 		}
6686 		em = ERR_PTR(ret);
6687 	}
6688  out:
6689 
6690 	return em;
6691 }
6692 
6693 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
6694 						  u64 start, u64 len)
6695 {
6696 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6697 	struct btrfs_root *root = BTRFS_I(inode)->root;
6698 	struct extent_map *em;
6699 	struct btrfs_key ins;
6700 	u64 alloc_hint;
6701 	int ret;
6702 
6703 	alloc_hint = get_extent_allocation_hint(inode, start, len);
6704 	ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
6705 				   0, alloc_hint, &ins, 1, 1);
6706 	if (ret)
6707 		return ERR_PTR(ret);
6708 
6709 	em = btrfs_create_dio_extent(inode, start, ins.offset, start,
6710 				     ins.objectid, ins.offset, ins.offset,
6711 				     ins.offset, BTRFS_ORDERED_REGULAR);
6712 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
6713 	if (IS_ERR(em))
6714 		btrfs_free_reserved_extent(fs_info, ins.objectid,
6715 					   ins.offset, 1);
6716 
6717 	return em;
6718 }
6719 
6720 /*
6721  * returns 1 when the nocow is safe, < 1 on error, 0 if the
6722  * block must be cow'd
6723  */
6724 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
6725 			      u64 *orig_start, u64 *orig_block_len,
6726 			      u64 *ram_bytes)
6727 {
6728 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6729 	struct btrfs_path *path;
6730 	int ret;
6731 	struct extent_buffer *leaf;
6732 	struct btrfs_root *root = BTRFS_I(inode)->root;
6733 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6734 	struct btrfs_file_extent_item *fi;
6735 	struct btrfs_key key;
6736 	u64 disk_bytenr;
6737 	u64 backref_offset;
6738 	u64 extent_end;
6739 	u64 num_bytes;
6740 	int slot;
6741 	int found_type;
6742 	bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
6743 
6744 	path = btrfs_alloc_path();
6745 	if (!path)
6746 		return -ENOMEM;
6747 
6748 	ret = btrfs_lookup_file_extent(NULL, root, path,
6749 			btrfs_ino(BTRFS_I(inode)), offset, 0);
6750 	if (ret < 0)
6751 		goto out;
6752 
6753 	slot = path->slots[0];
6754 	if (ret == 1) {
6755 		if (slot == 0) {
6756 			/* can't find the item, must cow */
6757 			ret = 0;
6758 			goto out;
6759 		}
6760 		slot--;
6761 	}
6762 	ret = 0;
6763 	leaf = path->nodes[0];
6764 	btrfs_item_key_to_cpu(leaf, &key, slot);
6765 	if (key.objectid != btrfs_ino(BTRFS_I(inode)) ||
6766 	    key.type != BTRFS_EXTENT_DATA_KEY) {
6767 		/* not our file or wrong item type, must cow */
6768 		goto out;
6769 	}
6770 
6771 	if (key.offset > offset) {
6772 		/* Wrong offset, must cow */
6773 		goto out;
6774 	}
6775 
6776 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
6777 	found_type = btrfs_file_extent_type(leaf, fi);
6778 	if (found_type != BTRFS_FILE_EXTENT_REG &&
6779 	    found_type != BTRFS_FILE_EXTENT_PREALLOC) {
6780 		/* not a regular extent, must cow */
6781 		goto out;
6782 	}
6783 
6784 	if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
6785 		goto out;
6786 
6787 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
6788 	if (extent_end <= offset)
6789 		goto out;
6790 
6791 	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
6792 	if (disk_bytenr == 0)
6793 		goto out;
6794 
6795 	if (btrfs_file_extent_compression(leaf, fi) ||
6796 	    btrfs_file_extent_encryption(leaf, fi) ||
6797 	    btrfs_file_extent_other_encoding(leaf, fi))
6798 		goto out;
6799 
6800 	/*
6801 	 * Do the same check as in btrfs_cross_ref_exist but without the
6802 	 * unnecessary search.
6803 	 */
6804 	if (btrfs_file_extent_generation(leaf, fi) <=
6805 	    btrfs_root_last_snapshot(&root->root_item))
6806 		goto out;
6807 
6808 	backref_offset = btrfs_file_extent_offset(leaf, fi);
6809 
6810 	if (orig_start) {
6811 		*orig_start = key.offset - backref_offset;
6812 		*orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
6813 		*ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
6814 	}
6815 
6816 	if (btrfs_extent_readonly(fs_info, disk_bytenr))
6817 		goto out;
6818 
6819 	num_bytes = min(offset + *len, extent_end) - offset;
6820 	if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6821 		u64 range_end;
6822 
6823 		range_end = round_up(offset + num_bytes,
6824 				     root->fs_info->sectorsize) - 1;
6825 		ret = test_range_bit(io_tree, offset, range_end,
6826 				     EXTENT_DELALLOC, 0, NULL);
6827 		if (ret) {
6828 			ret = -EAGAIN;
6829 			goto out;
6830 		}
6831 	}
6832 
6833 	btrfs_release_path(path);
6834 
6835 	/*
6836 	 * look for other files referencing this extent, if we
6837 	 * find any we must cow
6838 	 */
6839 
6840 	ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)),
6841 				    key.offset - backref_offset, disk_bytenr);
6842 	if (ret) {
6843 		ret = 0;
6844 		goto out;
6845 	}
6846 
6847 	/*
6848 	 * adjust disk_bytenr and num_bytes to cover just the bytes
6849 	 * in this extent we are about to write.  If there
6850 	 * are any csums in that range we have to cow in order
6851 	 * to keep the csums correct
6852 	 */
6853 	disk_bytenr += backref_offset;
6854 	disk_bytenr += offset - key.offset;
6855 	if (csum_exist_in_range(fs_info, disk_bytenr, num_bytes))
6856 		goto out;
6857 	/*
6858 	 * all of the above have passed, it is safe to overwrite this extent
6859 	 * without cow
6860 	 */
6861 	*len = num_bytes;
6862 	ret = 1;
6863 out:
6864 	btrfs_free_path(path);
6865 	return ret;
6866 }
6867 
6868 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
6869 			      struct extent_state **cached_state, int writing)
6870 {
6871 	struct btrfs_ordered_extent *ordered;
6872 	int ret = 0;
6873 
6874 	while (1) {
6875 		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6876 				 cached_state);
6877 		/*
6878 		 * We're concerned with the entire range that we're going to be
6879 		 * doing DIO to, so we need to make sure there's no ordered
6880 		 * extents in this range.
6881 		 */
6882 		ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart,
6883 						     lockend - lockstart + 1);
6884 
6885 		/*
6886 		 * We need to make sure there are no buffered pages in this
6887 		 * range either, we could have raced between the invalidate in
6888 		 * generic_file_direct_write and locking the extent.  The
6889 		 * invalidate needs to happen so that reads after a write do not
6890 		 * get stale data.
6891 		 */
6892 		if (!ordered &&
6893 		    (!writing || !filemap_range_has_page(inode->i_mapping,
6894 							 lockstart, lockend)))
6895 			break;
6896 
6897 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6898 				     cached_state);
6899 
6900 		if (ordered) {
6901 			/*
6902 			 * If we are doing a DIO read and the ordered extent we
6903 			 * found is for a buffered write, we can not wait for it
6904 			 * to complete and retry, because if we do so we can
6905 			 * deadlock with concurrent buffered writes on page
6906 			 * locks. This happens only if our DIO read covers more
6907 			 * than one extent map, if at this point has already
6908 			 * created an ordered extent for a previous extent map
6909 			 * and locked its range in the inode's io tree, and a
6910 			 * concurrent write against that previous extent map's
6911 			 * range and this range started (we unlock the ranges
6912 			 * in the io tree only when the bios complete and
6913 			 * buffered writes always lock pages before attempting
6914 			 * to lock range in the io tree).
6915 			 */
6916 			if (writing ||
6917 			    test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
6918 				btrfs_start_ordered_extent(inode, ordered, 1);
6919 			else
6920 				ret = -ENOTBLK;
6921 			btrfs_put_ordered_extent(ordered);
6922 		} else {
6923 			/*
6924 			 * We could trigger writeback for this range (and wait
6925 			 * for it to complete) and then invalidate the pages for
6926 			 * this range (through invalidate_inode_pages2_range()),
6927 			 * but that can lead us to a deadlock with a concurrent
6928 			 * call to readpages() (a buffered read or a defrag call
6929 			 * triggered a readahead) on a page lock due to an
6930 			 * ordered dio extent we created before but did not have
6931 			 * yet a corresponding bio submitted (whence it can not
6932 			 * complete), which makes readpages() wait for that
6933 			 * ordered extent to complete while holding a lock on
6934 			 * that page.
6935 			 */
6936 			ret = -ENOTBLK;
6937 		}
6938 
6939 		if (ret)
6940 			break;
6941 
6942 		cond_resched();
6943 	}
6944 
6945 	return ret;
6946 }
6947 
6948 /* The callers of this must take lock_extent() */
6949 static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
6950 				       u64 orig_start, u64 block_start,
6951 				       u64 block_len, u64 orig_block_len,
6952 				       u64 ram_bytes, int compress_type,
6953 				       int type)
6954 {
6955 	struct extent_map_tree *em_tree;
6956 	struct extent_map *em;
6957 	int ret;
6958 
6959 	ASSERT(type == BTRFS_ORDERED_PREALLOC ||
6960 	       type == BTRFS_ORDERED_COMPRESSED ||
6961 	       type == BTRFS_ORDERED_NOCOW ||
6962 	       type == BTRFS_ORDERED_REGULAR);
6963 
6964 	em_tree = &BTRFS_I(inode)->extent_tree;
6965 	em = alloc_extent_map();
6966 	if (!em)
6967 		return ERR_PTR(-ENOMEM);
6968 
6969 	em->start = start;
6970 	em->orig_start = orig_start;
6971 	em->len = len;
6972 	em->block_len = block_len;
6973 	em->block_start = block_start;
6974 	em->orig_block_len = orig_block_len;
6975 	em->ram_bytes = ram_bytes;
6976 	em->generation = -1;
6977 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
6978 	if (type == BTRFS_ORDERED_PREALLOC) {
6979 		set_bit(EXTENT_FLAG_FILLING, &em->flags);
6980 	} else if (type == BTRFS_ORDERED_COMPRESSED) {
6981 		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
6982 		em->compress_type = compress_type;
6983 	}
6984 
6985 	do {
6986 		btrfs_drop_extent_cache(BTRFS_I(inode), em->start,
6987 				em->start + em->len - 1, 0);
6988 		write_lock(&em_tree->lock);
6989 		ret = add_extent_mapping(em_tree, em, 1);
6990 		write_unlock(&em_tree->lock);
6991 		/*
6992 		 * The caller has taken lock_extent(), who could race with us
6993 		 * to add em?
6994 		 */
6995 	} while (ret == -EEXIST);
6996 
6997 	if (ret) {
6998 		free_extent_map(em);
6999 		return ERR_PTR(ret);
7000 	}
7001 
7002 	/* em got 2 refs now, callers needs to do free_extent_map once. */
7003 	return em;
7004 }
7005 
7006 
7007 static int btrfs_get_blocks_direct_read(struct extent_map *em,
7008 					struct buffer_head *bh_result,
7009 					struct inode *inode,
7010 					u64 start, u64 len)
7011 {
7012 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7013 
7014 	if (em->block_start == EXTENT_MAP_HOLE ||
7015 			test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7016 		return -ENOENT;
7017 
7018 	len = min(len, em->len - (start - em->start));
7019 
7020 	bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
7021 		inode->i_blkbits;
7022 	bh_result->b_size = len;
7023 	bh_result->b_bdev = fs_info->fs_devices->latest_bdev;
7024 	set_buffer_mapped(bh_result);
7025 
7026 	return 0;
7027 }
7028 
7029 static int btrfs_get_blocks_direct_write(struct extent_map **map,
7030 					 struct buffer_head *bh_result,
7031 					 struct inode *inode,
7032 					 struct btrfs_dio_data *dio_data,
7033 					 u64 start, u64 len)
7034 {
7035 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7036 	struct extent_map *em = *map;
7037 	int ret = 0;
7038 
7039 	/*
7040 	 * We don't allocate a new extent in the following cases
7041 	 *
7042 	 * 1) The inode is marked as NODATACOW. In this case we'll just use the
7043 	 * existing extent.
7044 	 * 2) The extent is marked as PREALLOC. We're good to go here and can
7045 	 * just use the extent.
7046 	 *
7047 	 */
7048 	if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
7049 	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7050 	     em->block_start != EXTENT_MAP_HOLE)) {
7051 		int type;
7052 		u64 block_start, orig_start, orig_block_len, ram_bytes;
7053 
7054 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7055 			type = BTRFS_ORDERED_PREALLOC;
7056 		else
7057 			type = BTRFS_ORDERED_NOCOW;
7058 		len = min(len, em->len - (start - em->start));
7059 		block_start = em->block_start + (start - em->start);
7060 
7061 		if (can_nocow_extent(inode, start, &len, &orig_start,
7062 				     &orig_block_len, &ram_bytes) == 1 &&
7063 		    btrfs_inc_nocow_writers(fs_info, block_start)) {
7064 			struct extent_map *em2;
7065 
7066 			em2 = btrfs_create_dio_extent(inode, start, len,
7067 						      orig_start, block_start,
7068 						      len, orig_block_len,
7069 						      ram_bytes, type);
7070 			btrfs_dec_nocow_writers(fs_info, block_start);
7071 			if (type == BTRFS_ORDERED_PREALLOC) {
7072 				free_extent_map(em);
7073 				*map = em = em2;
7074 			}
7075 
7076 			if (em2 && IS_ERR(em2)) {
7077 				ret = PTR_ERR(em2);
7078 				goto out;
7079 			}
7080 			/*
7081 			 * For inode marked NODATACOW or extent marked PREALLOC,
7082 			 * use the existing or preallocated extent, so does not
7083 			 * need to adjust btrfs_space_info's bytes_may_use.
7084 			 */
7085 			btrfs_free_reserved_data_space_noquota(inode, start,
7086 							       len);
7087 			goto skip_cow;
7088 		}
7089 	}
7090 
7091 	/* this will cow the extent */
7092 	len = bh_result->b_size;
7093 	free_extent_map(em);
7094 	*map = em = btrfs_new_extent_direct(inode, start, len);
7095 	if (IS_ERR(em)) {
7096 		ret = PTR_ERR(em);
7097 		goto out;
7098 	}
7099 
7100 	len = min(len, em->len - (start - em->start));
7101 
7102 skip_cow:
7103 	bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
7104 		inode->i_blkbits;
7105 	bh_result->b_size = len;
7106 	bh_result->b_bdev = fs_info->fs_devices->latest_bdev;
7107 	set_buffer_mapped(bh_result);
7108 
7109 	if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7110 		set_buffer_new(bh_result);
7111 
7112 	/*
7113 	 * Need to update the i_size under the extent lock so buffered
7114 	 * readers will get the updated i_size when we unlock.
7115 	 */
7116 	if (!dio_data->overwrite && start + len > i_size_read(inode))
7117 		i_size_write(inode, start + len);
7118 
7119 	WARN_ON(dio_data->reserve < len);
7120 	dio_data->reserve -= len;
7121 	dio_data->unsubmitted_oe_range_end = start + len;
7122 	current->journal_info = dio_data;
7123 out:
7124 	return ret;
7125 }
7126 
7127 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7128 				   struct buffer_head *bh_result, int create)
7129 {
7130 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7131 	struct extent_map *em;
7132 	struct extent_state *cached_state = NULL;
7133 	struct btrfs_dio_data *dio_data = NULL;
7134 	u64 start = iblock << inode->i_blkbits;
7135 	u64 lockstart, lockend;
7136 	u64 len = bh_result->b_size;
7137 	int ret = 0;
7138 
7139 	if (!create)
7140 		len = min_t(u64, len, fs_info->sectorsize);
7141 
7142 	lockstart = start;
7143 	lockend = start + len - 1;
7144 
7145 	if (current->journal_info) {
7146 		/*
7147 		 * Need to pull our outstanding extents and set journal_info to NULL so
7148 		 * that anything that needs to check if there's a transaction doesn't get
7149 		 * confused.
7150 		 */
7151 		dio_data = current->journal_info;
7152 		current->journal_info = NULL;
7153 	}
7154 
7155 	/*
7156 	 * If this errors out it's because we couldn't invalidate pagecache for
7157 	 * this range and we need to fallback to buffered.
7158 	 */
7159 	if (lock_extent_direct(inode, lockstart, lockend, &cached_state,
7160 			       create)) {
7161 		ret = -ENOTBLK;
7162 		goto err;
7163 	}
7164 
7165 	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len);
7166 	if (IS_ERR(em)) {
7167 		ret = PTR_ERR(em);
7168 		goto unlock_err;
7169 	}
7170 
7171 	/*
7172 	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7173 	 * io.  INLINE is special, and we could probably kludge it in here, but
7174 	 * it's still buffered so for safety lets just fall back to the generic
7175 	 * buffered path.
7176 	 *
7177 	 * For COMPRESSED we _have_ to read the entire extent in so we can
7178 	 * decompress it, so there will be buffering required no matter what we
7179 	 * do, so go ahead and fallback to buffered.
7180 	 *
7181 	 * We return -ENOTBLK because that's what makes DIO go ahead and go back
7182 	 * to buffered IO.  Don't blame me, this is the price we pay for using
7183 	 * the generic code.
7184 	 */
7185 	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
7186 	    em->block_start == EXTENT_MAP_INLINE) {
7187 		free_extent_map(em);
7188 		ret = -ENOTBLK;
7189 		goto unlock_err;
7190 	}
7191 
7192 	if (create) {
7193 		ret = btrfs_get_blocks_direct_write(&em, bh_result, inode,
7194 						    dio_data, start, len);
7195 		if (ret < 0)
7196 			goto unlock_err;
7197 
7198 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
7199 				     lockend, &cached_state);
7200 	} else {
7201 		ret = btrfs_get_blocks_direct_read(em, bh_result, inode,
7202 						   start, len);
7203 		/* Can be negative only if we read from a hole */
7204 		if (ret < 0) {
7205 			ret = 0;
7206 			free_extent_map(em);
7207 			goto unlock_err;
7208 		}
7209 		/*
7210 		 * We need to unlock only the end area that we aren't using.
7211 		 * The rest is going to be unlocked by the endio routine.
7212 		 */
7213 		lockstart = start + bh_result->b_size;
7214 		if (lockstart < lockend) {
7215 			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
7216 					     lockstart, lockend, &cached_state);
7217 		} else {
7218 			free_extent_state(cached_state);
7219 		}
7220 	}
7221 
7222 	free_extent_map(em);
7223 
7224 	return 0;
7225 
7226 unlock_err:
7227 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7228 			     &cached_state);
7229 err:
7230 	if (dio_data)
7231 		current->journal_info = dio_data;
7232 	return ret;
7233 }
7234 
7235 static inline blk_status_t submit_dio_repair_bio(struct inode *inode,
7236 						 struct bio *bio,
7237 						 int mirror_num)
7238 {
7239 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7240 	blk_status_t ret;
7241 
7242 	BUG_ON(bio_op(bio) == REQ_OP_WRITE);
7243 
7244 	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DIO_REPAIR);
7245 	if (ret)
7246 		return ret;
7247 
7248 	ret = btrfs_map_bio(fs_info, bio, mirror_num);
7249 
7250 	return ret;
7251 }
7252 
7253 static int btrfs_check_dio_repairable(struct inode *inode,
7254 				      struct bio *failed_bio,
7255 				      struct io_failure_record *failrec,
7256 				      int failed_mirror)
7257 {
7258 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7259 	int num_copies;
7260 
7261 	num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
7262 	if (num_copies == 1) {
7263 		/*
7264 		 * we only have a single copy of the data, so don't bother with
7265 		 * all the retry and error correction code that follows. no
7266 		 * matter what the error is, it is very likely to persist.
7267 		 */
7268 		btrfs_debug(fs_info,
7269 			"Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
7270 			num_copies, failrec->this_mirror, failed_mirror);
7271 		return 0;
7272 	}
7273 
7274 	failrec->failed_mirror = failed_mirror;
7275 	failrec->this_mirror++;
7276 	if (failrec->this_mirror == failed_mirror)
7277 		failrec->this_mirror++;
7278 
7279 	if (failrec->this_mirror > num_copies) {
7280 		btrfs_debug(fs_info,
7281 			"Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
7282 			num_copies, failrec->this_mirror, failed_mirror);
7283 		return 0;
7284 	}
7285 
7286 	return 1;
7287 }
7288 
7289 static blk_status_t dio_read_error(struct inode *inode, struct bio *failed_bio,
7290 				   struct page *page, unsigned int pgoff,
7291 				   u64 start, u64 end, int failed_mirror,
7292 				   bio_end_io_t *repair_endio, void *repair_arg)
7293 {
7294 	struct io_failure_record *failrec;
7295 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7296 	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
7297 	struct bio *bio;
7298 	int isector;
7299 	unsigned int read_mode = 0;
7300 	int segs;
7301 	int ret;
7302 	blk_status_t status;
7303 	struct bio_vec bvec;
7304 
7305 	BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
7306 
7307 	ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
7308 	if (ret)
7309 		return errno_to_blk_status(ret);
7310 
7311 	ret = btrfs_check_dio_repairable(inode, failed_bio, failrec,
7312 					 failed_mirror);
7313 	if (!ret) {
7314 		free_io_failure(failure_tree, io_tree, failrec);
7315 		return BLK_STS_IOERR;
7316 	}
7317 
7318 	segs = bio_segments(failed_bio);
7319 	bio_get_first_bvec(failed_bio, &bvec);
7320 	if (segs > 1 ||
7321 	    (bvec.bv_len > btrfs_inode_sectorsize(inode)))
7322 		read_mode |= REQ_FAILFAST_DEV;
7323 
7324 	isector = start - btrfs_io_bio(failed_bio)->logical;
7325 	isector >>= inode->i_sb->s_blocksize_bits;
7326 	bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
7327 				pgoff, isector, repair_endio, repair_arg);
7328 	bio->bi_opf = REQ_OP_READ | read_mode;
7329 
7330 	btrfs_debug(BTRFS_I(inode)->root->fs_info,
7331 		    "repair DIO read error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d",
7332 		    read_mode, failrec->this_mirror, failrec->in_validation);
7333 
7334 	status = submit_dio_repair_bio(inode, bio, failrec->this_mirror);
7335 	if (status) {
7336 		free_io_failure(failure_tree, io_tree, failrec);
7337 		bio_put(bio);
7338 	}
7339 
7340 	return status;
7341 }
7342 
7343 struct btrfs_retry_complete {
7344 	struct completion done;
7345 	struct inode *inode;
7346 	u64 start;
7347 	int uptodate;
7348 };
7349 
7350 static void btrfs_retry_endio_nocsum(struct bio *bio)
7351 {
7352 	struct btrfs_retry_complete *done = bio->bi_private;
7353 	struct inode *inode = done->inode;
7354 	struct bio_vec *bvec;
7355 	struct extent_io_tree *io_tree, *failure_tree;
7356 	struct bvec_iter_all iter_all;
7357 
7358 	if (bio->bi_status)
7359 		goto end;
7360 
7361 	ASSERT(bio->bi_vcnt == 1);
7362 	io_tree = &BTRFS_I(inode)->io_tree;
7363 	failure_tree = &BTRFS_I(inode)->io_failure_tree;
7364 	ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(inode));
7365 
7366 	done->uptodate = 1;
7367 	ASSERT(!bio_flagged(bio, BIO_CLONED));
7368 	bio_for_each_segment_all(bvec, bio, iter_all)
7369 		clean_io_failure(BTRFS_I(inode)->root->fs_info, failure_tree,
7370 				 io_tree, done->start, bvec->bv_page,
7371 				 btrfs_ino(BTRFS_I(inode)), 0);
7372 end:
7373 	complete(&done->done);
7374 	bio_put(bio);
7375 }
7376 
7377 static blk_status_t __btrfs_correct_data_nocsum(struct inode *inode,
7378 						struct btrfs_io_bio *io_bio)
7379 {
7380 	struct btrfs_fs_info *fs_info;
7381 	struct bio_vec bvec;
7382 	struct bvec_iter iter;
7383 	struct btrfs_retry_complete done;
7384 	u64 start;
7385 	unsigned int pgoff;
7386 	u32 sectorsize;
7387 	int nr_sectors;
7388 	blk_status_t ret;
7389 	blk_status_t err = BLK_STS_OK;
7390 
7391 	fs_info = BTRFS_I(inode)->root->fs_info;
7392 	sectorsize = fs_info->sectorsize;
7393 
7394 	start = io_bio->logical;
7395 	done.inode = inode;
7396 	io_bio->bio.bi_iter = io_bio->iter;
7397 
7398 	bio_for_each_segment(bvec, &io_bio->bio, iter) {
7399 		nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
7400 		pgoff = bvec.bv_offset;
7401 
7402 next_block_or_try_again:
7403 		done.uptodate = 0;
7404 		done.start = start;
7405 		init_completion(&done.done);
7406 
7407 		ret = dio_read_error(inode, &io_bio->bio, bvec.bv_page,
7408 				pgoff, start, start + sectorsize - 1,
7409 				io_bio->mirror_num,
7410 				btrfs_retry_endio_nocsum, &done);
7411 		if (ret) {
7412 			err = ret;
7413 			goto next;
7414 		}
7415 
7416 		wait_for_completion_io(&done.done);
7417 
7418 		if (!done.uptodate) {
7419 			/* We might have another mirror, so try again */
7420 			goto next_block_or_try_again;
7421 		}
7422 
7423 next:
7424 		start += sectorsize;
7425 
7426 		nr_sectors--;
7427 		if (nr_sectors) {
7428 			pgoff += sectorsize;
7429 			ASSERT(pgoff < PAGE_SIZE);
7430 			goto next_block_or_try_again;
7431 		}
7432 	}
7433 
7434 	return err;
7435 }
7436 
7437 static void btrfs_retry_endio(struct bio *bio)
7438 {
7439 	struct btrfs_retry_complete *done = bio->bi_private;
7440 	struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7441 	struct extent_io_tree *io_tree, *failure_tree;
7442 	struct inode *inode = done->inode;
7443 	struct bio_vec *bvec;
7444 	int uptodate;
7445 	int ret;
7446 	int i = 0;
7447 	struct bvec_iter_all iter_all;
7448 
7449 	if (bio->bi_status)
7450 		goto end;
7451 
7452 	uptodate = 1;
7453 
7454 	ASSERT(bio->bi_vcnt == 1);
7455 	ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(done->inode));
7456 
7457 	io_tree = &BTRFS_I(inode)->io_tree;
7458 	failure_tree = &BTRFS_I(inode)->io_failure_tree;
7459 
7460 	ASSERT(!bio_flagged(bio, BIO_CLONED));
7461 	bio_for_each_segment_all(bvec, bio, iter_all) {
7462 		ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page,
7463 					     bvec->bv_offset, done->start,
7464 					     bvec->bv_len);
7465 		if (!ret)
7466 			clean_io_failure(BTRFS_I(inode)->root->fs_info,
7467 					 failure_tree, io_tree, done->start,
7468 					 bvec->bv_page,
7469 					 btrfs_ino(BTRFS_I(inode)),
7470 					 bvec->bv_offset);
7471 		else
7472 			uptodate = 0;
7473 		i++;
7474 	}
7475 
7476 	done->uptodate = uptodate;
7477 end:
7478 	complete(&done->done);
7479 	bio_put(bio);
7480 }
7481 
7482 static blk_status_t __btrfs_subio_endio_read(struct inode *inode,
7483 		struct btrfs_io_bio *io_bio, blk_status_t err)
7484 {
7485 	struct btrfs_fs_info *fs_info;
7486 	struct bio_vec bvec;
7487 	struct bvec_iter iter;
7488 	struct btrfs_retry_complete done;
7489 	u64 start;
7490 	u64 offset = 0;
7491 	u32 sectorsize;
7492 	int nr_sectors;
7493 	unsigned int pgoff;
7494 	int csum_pos;
7495 	bool uptodate = (err == 0);
7496 	int ret;
7497 	blk_status_t status;
7498 
7499 	fs_info = BTRFS_I(inode)->root->fs_info;
7500 	sectorsize = fs_info->sectorsize;
7501 
7502 	err = BLK_STS_OK;
7503 	start = io_bio->logical;
7504 	done.inode = inode;
7505 	io_bio->bio.bi_iter = io_bio->iter;
7506 
7507 	bio_for_each_segment(bvec, &io_bio->bio, iter) {
7508 		nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
7509 
7510 		pgoff = bvec.bv_offset;
7511 next_block:
7512 		if (uptodate) {
7513 			csum_pos = BTRFS_BYTES_TO_BLKS(fs_info, offset);
7514 			ret = __readpage_endio_check(inode, io_bio, csum_pos,
7515 					bvec.bv_page, pgoff, start, sectorsize);
7516 			if (likely(!ret))
7517 				goto next;
7518 		}
7519 try_again:
7520 		done.uptodate = 0;
7521 		done.start = start;
7522 		init_completion(&done.done);
7523 
7524 		status = dio_read_error(inode, &io_bio->bio, bvec.bv_page,
7525 					pgoff, start, start + sectorsize - 1,
7526 					io_bio->mirror_num, btrfs_retry_endio,
7527 					&done);
7528 		if (status) {
7529 			err = status;
7530 			goto next;
7531 		}
7532 
7533 		wait_for_completion_io(&done.done);
7534 
7535 		if (!done.uptodate) {
7536 			/* We might have another mirror, so try again */
7537 			goto try_again;
7538 		}
7539 next:
7540 		offset += sectorsize;
7541 		start += sectorsize;
7542 
7543 		ASSERT(nr_sectors);
7544 
7545 		nr_sectors--;
7546 		if (nr_sectors) {
7547 			pgoff += sectorsize;
7548 			ASSERT(pgoff < PAGE_SIZE);
7549 			goto next_block;
7550 		}
7551 	}
7552 
7553 	return err;
7554 }
7555 
7556 static blk_status_t btrfs_subio_endio_read(struct inode *inode,
7557 		struct btrfs_io_bio *io_bio, blk_status_t err)
7558 {
7559 	bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
7560 
7561 	if (skip_csum) {
7562 		if (unlikely(err))
7563 			return __btrfs_correct_data_nocsum(inode, io_bio);
7564 		else
7565 			return BLK_STS_OK;
7566 	} else {
7567 		return __btrfs_subio_endio_read(inode, io_bio, err);
7568 	}
7569 }
7570 
7571 static void btrfs_endio_direct_read(struct bio *bio)
7572 {
7573 	struct btrfs_dio_private *dip = bio->bi_private;
7574 	struct inode *inode = dip->inode;
7575 	struct bio *dio_bio;
7576 	struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7577 	blk_status_t err = bio->bi_status;
7578 
7579 	if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
7580 		err = btrfs_subio_endio_read(inode, io_bio, err);
7581 
7582 	unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
7583 		      dip->logical_offset + dip->bytes - 1);
7584 	dio_bio = dip->dio_bio;
7585 
7586 	kfree(dip);
7587 
7588 	dio_bio->bi_status = err;
7589 	dio_end_io(dio_bio);
7590 	btrfs_io_bio_free_csum(io_bio);
7591 	bio_put(bio);
7592 }
7593 
7594 static void __endio_write_update_ordered(struct inode *inode,
7595 					 const u64 offset, const u64 bytes,
7596 					 const bool uptodate)
7597 {
7598 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7599 	struct btrfs_ordered_extent *ordered = NULL;
7600 	struct btrfs_workqueue *wq;
7601 	u64 ordered_offset = offset;
7602 	u64 ordered_bytes = bytes;
7603 	u64 last_offset;
7604 
7605 	if (btrfs_is_free_space_inode(BTRFS_I(inode)))
7606 		wq = fs_info->endio_freespace_worker;
7607 	else
7608 		wq = fs_info->endio_write_workers;
7609 
7610 	while (ordered_offset < offset + bytes) {
7611 		last_offset = ordered_offset;
7612 		if (btrfs_dec_test_first_ordered_pending(inode, &ordered,
7613 							   &ordered_offset,
7614 							   ordered_bytes,
7615 							   uptodate)) {
7616 			btrfs_init_work(&ordered->work, finish_ordered_fn, NULL,
7617 					NULL);
7618 			btrfs_queue_work(wq, &ordered->work);
7619 		}
7620 		/*
7621 		 * If btrfs_dec_test_ordered_pending does not find any ordered
7622 		 * extent in the range, we can exit.
7623 		 */
7624 		if (ordered_offset == last_offset)
7625 			return;
7626 		/*
7627 		 * Our bio might span multiple ordered extents. In this case
7628 		 * we keep going until we have accounted the whole dio.
7629 		 */
7630 		if (ordered_offset < offset + bytes) {
7631 			ordered_bytes = offset + bytes - ordered_offset;
7632 			ordered = NULL;
7633 		}
7634 	}
7635 }
7636 
7637 static void btrfs_endio_direct_write(struct bio *bio)
7638 {
7639 	struct btrfs_dio_private *dip = bio->bi_private;
7640 	struct bio *dio_bio = dip->dio_bio;
7641 
7642 	__endio_write_update_ordered(dip->inode, dip->logical_offset,
7643 				     dip->bytes, !bio->bi_status);
7644 
7645 	kfree(dip);
7646 
7647 	dio_bio->bi_status = bio->bi_status;
7648 	dio_end_io(dio_bio);
7649 	bio_put(bio);
7650 }
7651 
7652 static blk_status_t btrfs_submit_bio_start_direct_io(void *private_data,
7653 				    struct bio *bio, u64 offset)
7654 {
7655 	struct inode *inode = private_data;
7656 	blk_status_t ret;
7657 	ret = btrfs_csum_one_bio(inode, bio, offset, 1);
7658 	BUG_ON(ret); /* -ENOMEM */
7659 	return 0;
7660 }
7661 
7662 static void btrfs_end_dio_bio(struct bio *bio)
7663 {
7664 	struct btrfs_dio_private *dip = bio->bi_private;
7665 	blk_status_t err = bio->bi_status;
7666 
7667 	if (err)
7668 		btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
7669 			   "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
7670 			   btrfs_ino(BTRFS_I(dip->inode)), bio_op(bio),
7671 			   bio->bi_opf,
7672 			   (unsigned long long)bio->bi_iter.bi_sector,
7673 			   bio->bi_iter.bi_size, err);
7674 
7675 	if (dip->subio_endio)
7676 		err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err);
7677 
7678 	if (err) {
7679 		/*
7680 		 * We want to perceive the errors flag being set before
7681 		 * decrementing the reference count. We don't need a barrier
7682 		 * since atomic operations with a return value are fully
7683 		 * ordered as per atomic_t.txt
7684 		 */
7685 		dip->errors = 1;
7686 	}
7687 
7688 	/* if there are more bios still pending for this dio, just exit */
7689 	if (!atomic_dec_and_test(&dip->pending_bios))
7690 		goto out;
7691 
7692 	if (dip->errors) {
7693 		bio_io_error(dip->orig_bio);
7694 	} else {
7695 		dip->dio_bio->bi_status = BLK_STS_OK;
7696 		bio_endio(dip->orig_bio);
7697 	}
7698 out:
7699 	bio_put(bio);
7700 }
7701 
7702 static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
7703 						 struct btrfs_dio_private *dip,
7704 						 struct bio *bio,
7705 						 u64 file_offset)
7706 {
7707 	struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7708 	struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
7709 	blk_status_t ret;
7710 
7711 	/*
7712 	 * We load all the csum data we need when we submit
7713 	 * the first bio to reduce the csum tree search and
7714 	 * contention.
7715 	 */
7716 	if (dip->logical_offset == file_offset) {
7717 		ret = btrfs_lookup_bio_sums(inode, dip->orig_bio, file_offset,
7718 					    NULL);
7719 		if (ret)
7720 			return ret;
7721 	}
7722 
7723 	if (bio == dip->orig_bio)
7724 		return 0;
7725 
7726 	file_offset -= dip->logical_offset;
7727 	file_offset >>= inode->i_sb->s_blocksize_bits;
7728 	io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset);
7729 
7730 	return 0;
7731 }
7732 
7733 static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
7734 		struct inode *inode, u64 file_offset, int async_submit)
7735 {
7736 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7737 	struct btrfs_dio_private *dip = bio->bi_private;
7738 	bool write = bio_op(bio) == REQ_OP_WRITE;
7739 	blk_status_t ret;
7740 
7741 	/* Check btrfs_submit_bio_hook() for rules about async submit. */
7742 	if (async_submit)
7743 		async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
7744 
7745 	if (!write) {
7746 		ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
7747 		if (ret)
7748 			goto err;
7749 	}
7750 
7751 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
7752 		goto map;
7753 
7754 	if (write && async_submit) {
7755 		ret = btrfs_wq_submit_bio(fs_info, bio, 0, 0,
7756 					  file_offset, inode,
7757 					  btrfs_submit_bio_start_direct_io);
7758 		goto err;
7759 	} else if (write) {
7760 		/*
7761 		 * If we aren't doing async submit, calculate the csum of the
7762 		 * bio now.
7763 		 */
7764 		ret = btrfs_csum_one_bio(inode, bio, file_offset, 1);
7765 		if (ret)
7766 			goto err;
7767 	} else {
7768 		ret = btrfs_lookup_and_bind_dio_csum(inode, dip, bio,
7769 						     file_offset);
7770 		if (ret)
7771 			goto err;
7772 	}
7773 map:
7774 	ret = btrfs_map_bio(fs_info, bio, 0);
7775 err:
7776 	return ret;
7777 }
7778 
7779 static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip)
7780 {
7781 	struct inode *inode = dip->inode;
7782 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7783 	struct bio *bio;
7784 	struct bio *orig_bio = dip->orig_bio;
7785 	u64 start_sector = orig_bio->bi_iter.bi_sector;
7786 	u64 file_offset = dip->logical_offset;
7787 	int async_submit = 0;
7788 	u64 submit_len;
7789 	int clone_offset = 0;
7790 	int clone_len;
7791 	int ret;
7792 	blk_status_t status;
7793 	struct btrfs_io_geometry geom;
7794 
7795 	submit_len = orig_bio->bi_iter.bi_size;
7796 	ret = btrfs_get_io_geometry(fs_info, btrfs_op(orig_bio),
7797 				    start_sector << 9, submit_len, &geom);
7798 	if (ret)
7799 		return -EIO;
7800 
7801 	if (geom.len >= submit_len) {
7802 		bio = orig_bio;
7803 		dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED;
7804 		goto submit;
7805 	}
7806 
7807 	/* async crcs make it difficult to collect full stripe writes. */
7808 	if (btrfs_data_alloc_profile(fs_info) & BTRFS_BLOCK_GROUP_RAID56_MASK)
7809 		async_submit = 0;
7810 	else
7811 		async_submit = 1;
7812 
7813 	/* bio split */
7814 	ASSERT(geom.len <= INT_MAX);
7815 	atomic_inc(&dip->pending_bios);
7816 	do {
7817 		clone_len = min_t(int, submit_len, geom.len);
7818 
7819 		/*
7820 		 * This will never fail as it's passing GPF_NOFS and
7821 		 * the allocation is backed by btrfs_bioset.
7822 		 */
7823 		bio = btrfs_bio_clone_partial(orig_bio, clone_offset,
7824 					      clone_len);
7825 		bio->bi_private = dip;
7826 		bio->bi_end_io = btrfs_end_dio_bio;
7827 		btrfs_io_bio(bio)->logical = file_offset;
7828 
7829 		ASSERT(submit_len >= clone_len);
7830 		submit_len -= clone_len;
7831 		if (submit_len == 0)
7832 			break;
7833 
7834 		/*
7835 		 * Increase the count before we submit the bio so we know
7836 		 * the end IO handler won't happen before we increase the
7837 		 * count. Otherwise, the dip might get freed before we're
7838 		 * done setting it up.
7839 		 */
7840 		atomic_inc(&dip->pending_bios);
7841 
7842 		status = btrfs_submit_dio_bio(bio, inode, file_offset,
7843 						async_submit);
7844 		if (status) {
7845 			bio_put(bio);
7846 			atomic_dec(&dip->pending_bios);
7847 			goto out_err;
7848 		}
7849 
7850 		clone_offset += clone_len;
7851 		start_sector += clone_len >> 9;
7852 		file_offset += clone_len;
7853 
7854 		ret = btrfs_get_io_geometry(fs_info, btrfs_op(orig_bio),
7855 				      start_sector << 9, submit_len, &geom);
7856 		if (ret)
7857 			goto out_err;
7858 	} while (submit_len > 0);
7859 
7860 submit:
7861 	status = btrfs_submit_dio_bio(bio, inode, file_offset, async_submit);
7862 	if (!status)
7863 		return 0;
7864 
7865 	bio_put(bio);
7866 out_err:
7867 	dip->errors = 1;
7868 	/*
7869 	 * Before atomic variable goto zero, we must  make sure dip->errors is
7870 	 * perceived to be set. This ordering is ensured by the fact that an
7871 	 * atomic operations with a return value are fully ordered as per
7872 	 * atomic_t.txt
7873 	 */
7874 	if (atomic_dec_and_test(&dip->pending_bios))
7875 		bio_io_error(dip->orig_bio);
7876 
7877 	/* bio_end_io() will handle error, so we needn't return it */
7878 	return 0;
7879 }
7880 
7881 static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode,
7882 				loff_t file_offset)
7883 {
7884 	struct btrfs_dio_private *dip = NULL;
7885 	struct bio *bio = NULL;
7886 	struct btrfs_io_bio *io_bio;
7887 	bool write = (bio_op(dio_bio) == REQ_OP_WRITE);
7888 	int ret = 0;
7889 
7890 	bio = btrfs_bio_clone(dio_bio);
7891 
7892 	dip = kzalloc(sizeof(*dip), GFP_NOFS);
7893 	if (!dip) {
7894 		ret = -ENOMEM;
7895 		goto free_ordered;
7896 	}
7897 
7898 	dip->private = dio_bio->bi_private;
7899 	dip->inode = inode;
7900 	dip->logical_offset = file_offset;
7901 	dip->bytes = dio_bio->bi_iter.bi_size;
7902 	dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
7903 	bio->bi_private = dip;
7904 	dip->orig_bio = bio;
7905 	dip->dio_bio = dio_bio;
7906 	atomic_set(&dip->pending_bios, 0);
7907 	io_bio = btrfs_io_bio(bio);
7908 	io_bio->logical = file_offset;
7909 
7910 	if (write) {
7911 		bio->bi_end_io = btrfs_endio_direct_write;
7912 	} else {
7913 		bio->bi_end_io = btrfs_endio_direct_read;
7914 		dip->subio_endio = btrfs_subio_endio_read;
7915 	}
7916 
7917 	/*
7918 	 * Reset the range for unsubmitted ordered extents (to a 0 length range)
7919 	 * even if we fail to submit a bio, because in such case we do the
7920 	 * corresponding error handling below and it must not be done a second
7921 	 * time by btrfs_direct_IO().
7922 	 */
7923 	if (write) {
7924 		struct btrfs_dio_data *dio_data = current->journal_info;
7925 
7926 		dio_data->unsubmitted_oe_range_end = dip->logical_offset +
7927 			dip->bytes;
7928 		dio_data->unsubmitted_oe_range_start =
7929 			dio_data->unsubmitted_oe_range_end;
7930 	}
7931 
7932 	ret = btrfs_submit_direct_hook(dip);
7933 	if (!ret)
7934 		return;
7935 
7936 	btrfs_io_bio_free_csum(io_bio);
7937 
7938 free_ordered:
7939 	/*
7940 	 * If we arrived here it means either we failed to submit the dip
7941 	 * or we either failed to clone the dio_bio or failed to allocate the
7942 	 * dip. If we cloned the dio_bio and allocated the dip, we can just
7943 	 * call bio_endio against our io_bio so that we get proper resource
7944 	 * cleanup if we fail to submit the dip, otherwise, we must do the
7945 	 * same as btrfs_endio_direct_[write|read] because we can't call these
7946 	 * callbacks - they require an allocated dip and a clone of dio_bio.
7947 	 */
7948 	if (bio && dip) {
7949 		bio_io_error(bio);
7950 		/*
7951 		 * The end io callbacks free our dip, do the final put on bio
7952 		 * and all the cleanup and final put for dio_bio (through
7953 		 * dio_end_io()).
7954 		 */
7955 		dip = NULL;
7956 		bio = NULL;
7957 	} else {
7958 		if (write)
7959 			__endio_write_update_ordered(inode,
7960 						file_offset,
7961 						dio_bio->bi_iter.bi_size,
7962 						false);
7963 		else
7964 			unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
7965 			      file_offset + dio_bio->bi_iter.bi_size - 1);
7966 
7967 		dio_bio->bi_status = BLK_STS_IOERR;
7968 		/*
7969 		 * Releases and cleans up our dio_bio, no need to bio_put()
7970 		 * nor bio_endio()/bio_io_error() against dio_bio.
7971 		 */
7972 		dio_end_io(dio_bio);
7973 	}
7974 	if (bio)
7975 		bio_put(bio);
7976 	kfree(dip);
7977 }
7978 
7979 static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
7980 			       const struct iov_iter *iter, loff_t offset)
7981 {
7982 	int seg;
7983 	int i;
7984 	unsigned int blocksize_mask = fs_info->sectorsize - 1;
7985 	ssize_t retval = -EINVAL;
7986 
7987 	if (offset & blocksize_mask)
7988 		goto out;
7989 
7990 	if (iov_iter_alignment(iter) & blocksize_mask)
7991 		goto out;
7992 
7993 	/* If this is a write we don't need to check anymore */
7994 	if (iov_iter_rw(iter) != READ || !iter_is_iovec(iter))
7995 		return 0;
7996 	/*
7997 	 * Check to make sure we don't have duplicate iov_base's in this
7998 	 * iovec, if so return EINVAL, otherwise we'll get csum errors
7999 	 * when reading back.
8000 	 */
8001 	for (seg = 0; seg < iter->nr_segs; seg++) {
8002 		for (i = seg + 1; i < iter->nr_segs; i++) {
8003 			if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
8004 				goto out;
8005 		}
8006 	}
8007 	retval = 0;
8008 out:
8009 	return retval;
8010 }
8011 
8012 static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
8013 {
8014 	struct file *file = iocb->ki_filp;
8015 	struct inode *inode = file->f_mapping->host;
8016 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8017 	struct btrfs_dio_data dio_data = { 0 };
8018 	struct extent_changeset *data_reserved = NULL;
8019 	loff_t offset = iocb->ki_pos;
8020 	size_t count = 0;
8021 	int flags = 0;
8022 	bool wakeup = true;
8023 	bool relock = false;
8024 	ssize_t ret;
8025 
8026 	if (check_direct_IO(fs_info, iter, offset))
8027 		return 0;
8028 
8029 	inode_dio_begin(inode);
8030 
8031 	/*
8032 	 * The generic stuff only does filemap_write_and_wait_range, which
8033 	 * isn't enough if we've written compressed pages to this area, so
8034 	 * we need to flush the dirty pages again to make absolutely sure
8035 	 * that any outstanding dirty pages are on disk.
8036 	 */
8037 	count = iov_iter_count(iter);
8038 	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
8039 		     &BTRFS_I(inode)->runtime_flags))
8040 		filemap_fdatawrite_range(inode->i_mapping, offset,
8041 					 offset + count - 1);
8042 
8043 	if (iov_iter_rw(iter) == WRITE) {
8044 		/*
8045 		 * If the write DIO is beyond the EOF, we need update
8046 		 * the isize, but it is protected by i_mutex. So we can
8047 		 * not unlock the i_mutex at this case.
8048 		 */
8049 		if (offset + count <= inode->i_size) {
8050 			dio_data.overwrite = 1;
8051 			inode_unlock(inode);
8052 			relock = true;
8053 		} else if (iocb->ki_flags & IOCB_NOWAIT) {
8054 			ret = -EAGAIN;
8055 			goto out;
8056 		}
8057 		ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
8058 						   offset, count);
8059 		if (ret)
8060 			goto out;
8061 
8062 		/*
8063 		 * We need to know how many extents we reserved so that we can
8064 		 * do the accounting properly if we go over the number we
8065 		 * originally calculated.  Abuse current->journal_info for this.
8066 		 */
8067 		dio_data.reserve = round_up(count,
8068 					    fs_info->sectorsize);
8069 		dio_data.unsubmitted_oe_range_start = (u64)offset;
8070 		dio_data.unsubmitted_oe_range_end = (u64)offset;
8071 		current->journal_info = &dio_data;
8072 		down_read(&BTRFS_I(inode)->dio_sem);
8073 	} else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
8074 				     &BTRFS_I(inode)->runtime_flags)) {
8075 		inode_dio_end(inode);
8076 		flags = DIO_LOCKING | DIO_SKIP_HOLES;
8077 		wakeup = false;
8078 	}
8079 
8080 	ret = __blockdev_direct_IO(iocb, inode,
8081 				   fs_info->fs_devices->latest_bdev,
8082 				   iter, btrfs_get_blocks_direct, NULL,
8083 				   btrfs_submit_direct, flags);
8084 	if (iov_iter_rw(iter) == WRITE) {
8085 		up_read(&BTRFS_I(inode)->dio_sem);
8086 		current->journal_info = NULL;
8087 		if (ret < 0 && ret != -EIOCBQUEUED) {
8088 			if (dio_data.reserve)
8089 				btrfs_delalloc_release_space(inode, data_reserved,
8090 					offset, dio_data.reserve, true);
8091 			/*
8092 			 * On error we might have left some ordered extents
8093 			 * without submitting corresponding bios for them, so
8094 			 * cleanup them up to avoid other tasks getting them
8095 			 * and waiting for them to complete forever.
8096 			 */
8097 			if (dio_data.unsubmitted_oe_range_start <
8098 			    dio_data.unsubmitted_oe_range_end)
8099 				__endio_write_update_ordered(inode,
8100 					dio_data.unsubmitted_oe_range_start,
8101 					dio_data.unsubmitted_oe_range_end -
8102 					dio_data.unsubmitted_oe_range_start,
8103 					false);
8104 		} else if (ret >= 0 && (size_t)ret < count)
8105 			btrfs_delalloc_release_space(inode, data_reserved,
8106 					offset, count - (size_t)ret, true);
8107 		btrfs_delalloc_release_extents(BTRFS_I(inode), count);
8108 	}
8109 out:
8110 	if (wakeup)
8111 		inode_dio_end(inode);
8112 	if (relock)
8113 		inode_lock(inode);
8114 
8115 	extent_changeset_free(data_reserved);
8116 	return ret;
8117 }
8118 
8119 #define BTRFS_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC)
8120 
8121 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
8122 		__u64 start, __u64 len)
8123 {
8124 	int	ret;
8125 
8126 	ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
8127 	if (ret)
8128 		return ret;
8129 
8130 	return extent_fiemap(inode, fieinfo, start, len);
8131 }
8132 
8133 int btrfs_readpage(struct file *file, struct page *page)
8134 {
8135 	struct extent_io_tree *tree;
8136 	tree = &BTRFS_I(page->mapping->host)->io_tree;
8137 	return extent_read_full_page(tree, page, btrfs_get_extent, 0);
8138 }
8139 
8140 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
8141 {
8142 	struct inode *inode = page->mapping->host;
8143 	int ret;
8144 
8145 	if (current->flags & PF_MEMALLOC) {
8146 		redirty_page_for_writepage(wbc, page);
8147 		unlock_page(page);
8148 		return 0;
8149 	}
8150 
8151 	/*
8152 	 * If we are under memory pressure we will call this directly from the
8153 	 * VM, we need to make sure we have the inode referenced for the ordered
8154 	 * extent.  If not just return like we didn't do anything.
8155 	 */
8156 	if (!igrab(inode)) {
8157 		redirty_page_for_writepage(wbc, page);
8158 		return AOP_WRITEPAGE_ACTIVATE;
8159 	}
8160 	ret = extent_write_full_page(page, wbc);
8161 	btrfs_add_delayed_iput(inode);
8162 	return ret;
8163 }
8164 
8165 static int btrfs_writepages(struct address_space *mapping,
8166 			    struct writeback_control *wbc)
8167 {
8168 	return extent_writepages(mapping, wbc);
8169 }
8170 
8171 static int
8172 btrfs_readpages(struct file *file, struct address_space *mapping,
8173 		struct list_head *pages, unsigned nr_pages)
8174 {
8175 	return extent_readpages(mapping, pages, nr_pages);
8176 }
8177 
8178 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8179 {
8180 	int ret = try_release_extent_mapping(page, gfp_flags);
8181 	if (ret == 1) {
8182 		ClearPagePrivate(page);
8183 		set_page_private(page, 0);
8184 		put_page(page);
8185 	}
8186 	return ret;
8187 }
8188 
8189 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8190 {
8191 	if (PageWriteback(page) || PageDirty(page))
8192 		return 0;
8193 	return __btrfs_releasepage(page, gfp_flags);
8194 }
8195 
8196 static void btrfs_invalidatepage(struct page *page, unsigned int offset,
8197 				 unsigned int length)
8198 {
8199 	struct inode *inode = page->mapping->host;
8200 	struct extent_io_tree *tree;
8201 	struct btrfs_ordered_extent *ordered;
8202 	struct extent_state *cached_state = NULL;
8203 	u64 page_start = page_offset(page);
8204 	u64 page_end = page_start + PAGE_SIZE - 1;
8205 	u64 start;
8206 	u64 end;
8207 	int inode_evicting = inode->i_state & I_FREEING;
8208 
8209 	/*
8210 	 * we have the page locked, so new writeback can't start,
8211 	 * and the dirty bit won't be cleared while we are here.
8212 	 *
8213 	 * Wait for IO on this page so that we can safely clear
8214 	 * the PagePrivate2 bit and do ordered accounting
8215 	 */
8216 	wait_on_page_writeback(page);
8217 
8218 	tree = &BTRFS_I(inode)->io_tree;
8219 	if (offset) {
8220 		btrfs_releasepage(page, GFP_NOFS);
8221 		return;
8222 	}
8223 
8224 	if (!inode_evicting)
8225 		lock_extent_bits(tree, page_start, page_end, &cached_state);
8226 again:
8227 	start = page_start;
8228 	ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
8229 					page_end - start + 1);
8230 	if (ordered) {
8231 		end = min(page_end,
8232 			  ordered->file_offset + ordered->num_bytes - 1);
8233 		/*
8234 		 * IO on this page will never be started, so we need
8235 		 * to account for any ordered extents now
8236 		 */
8237 		if (!inode_evicting)
8238 			clear_extent_bit(tree, start, end,
8239 					 EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
8240 					 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
8241 					 EXTENT_DEFRAG, 1, 0, &cached_state);
8242 		/*
8243 		 * whoever cleared the private bit is responsible
8244 		 * for the finish_ordered_io
8245 		 */
8246 		if (TestClearPagePrivate2(page)) {
8247 			struct btrfs_ordered_inode_tree *tree;
8248 			u64 new_len;
8249 
8250 			tree = &BTRFS_I(inode)->ordered_tree;
8251 
8252 			spin_lock_irq(&tree->lock);
8253 			set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
8254 			new_len = start - ordered->file_offset;
8255 			if (new_len < ordered->truncated_len)
8256 				ordered->truncated_len = new_len;
8257 			spin_unlock_irq(&tree->lock);
8258 
8259 			if (btrfs_dec_test_ordered_pending(inode, &ordered,
8260 							   start,
8261 							   end - start + 1, 1))
8262 				btrfs_finish_ordered_io(ordered);
8263 		}
8264 		btrfs_put_ordered_extent(ordered);
8265 		if (!inode_evicting) {
8266 			cached_state = NULL;
8267 			lock_extent_bits(tree, start, end,
8268 					 &cached_state);
8269 		}
8270 
8271 		start = end + 1;
8272 		if (start < page_end)
8273 			goto again;
8274 	}
8275 
8276 	/*
8277 	 * Qgroup reserved space handler
8278 	 * Page here will be either
8279 	 * 1) Already written to disk
8280 	 *    In this case, its reserved space is released from data rsv map
8281 	 *    and will be freed by delayed_ref handler finally.
8282 	 *    So even we call qgroup_free_data(), it won't decrease reserved
8283 	 *    space.
8284 	 * 2) Not written to disk
8285 	 *    This means the reserved space should be freed here. However,
8286 	 *    if a truncate invalidates the page (by clearing PageDirty)
8287 	 *    and the page is accounted for while allocating extent
8288 	 *    in btrfs_check_data_free_space() we let delayed_ref to
8289 	 *    free the entire extent.
8290 	 */
8291 	if (PageDirty(page))
8292 		btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
8293 	if (!inode_evicting) {
8294 		clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED |
8295 				 EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
8296 				 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
8297 				 &cached_state);
8298 
8299 		__btrfs_releasepage(page, GFP_NOFS);
8300 	}
8301 
8302 	ClearPageChecked(page);
8303 	if (PagePrivate(page)) {
8304 		ClearPagePrivate(page);
8305 		set_page_private(page, 0);
8306 		put_page(page);
8307 	}
8308 }
8309 
8310 /*
8311  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8312  * called from a page fault handler when a page is first dirtied. Hence we must
8313  * be careful to check for EOF conditions here. We set the page up correctly
8314  * for a written page which means we get ENOSPC checking when writing into
8315  * holes and correct delalloc and unwritten extent mapping on filesystems that
8316  * support these features.
8317  *
8318  * We are not allowed to take the i_mutex here so we have to play games to
8319  * protect against truncate races as the page could now be beyond EOF.  Because
8320  * truncate_setsize() writes the inode size before removing pages, once we have
8321  * the page lock we can determine safely if the page is beyond EOF. If it is not
8322  * beyond EOF, then the page is guaranteed safe against truncation until we
8323  * unlock the page.
8324  */
8325 vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
8326 {
8327 	struct page *page = vmf->page;
8328 	struct inode *inode = file_inode(vmf->vma->vm_file);
8329 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8330 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8331 	struct btrfs_ordered_extent *ordered;
8332 	struct extent_state *cached_state = NULL;
8333 	struct extent_changeset *data_reserved = NULL;
8334 	char *kaddr;
8335 	unsigned long zero_start;
8336 	loff_t size;
8337 	vm_fault_t ret;
8338 	int ret2;
8339 	int reserved = 0;
8340 	u64 reserved_space;
8341 	u64 page_start;
8342 	u64 page_end;
8343 	u64 end;
8344 
8345 	reserved_space = PAGE_SIZE;
8346 
8347 	sb_start_pagefault(inode->i_sb);
8348 	page_start = page_offset(page);
8349 	page_end = page_start + PAGE_SIZE - 1;
8350 	end = page_end;
8351 
8352 	/*
8353 	 * Reserving delalloc space after obtaining the page lock can lead to
8354 	 * deadlock. For example, if a dirty page is locked by this function
8355 	 * and the call to btrfs_delalloc_reserve_space() ends up triggering
8356 	 * dirty page write out, then the btrfs_writepage() function could
8357 	 * end up waiting indefinitely to get a lock on the page currently
8358 	 * being processed by btrfs_page_mkwrite() function.
8359 	 */
8360 	ret2 = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
8361 					   reserved_space);
8362 	if (!ret2) {
8363 		ret2 = file_update_time(vmf->vma->vm_file);
8364 		reserved = 1;
8365 	}
8366 	if (ret2) {
8367 		ret = vmf_error(ret2);
8368 		if (reserved)
8369 			goto out;
8370 		goto out_noreserve;
8371 	}
8372 
8373 	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
8374 again:
8375 	lock_page(page);
8376 	size = i_size_read(inode);
8377 
8378 	if ((page->mapping != inode->i_mapping) ||
8379 	    (page_start >= size)) {
8380 		/* page got truncated out from underneath us */
8381 		goto out_unlock;
8382 	}
8383 	wait_on_page_writeback(page);
8384 
8385 	lock_extent_bits(io_tree, page_start, page_end, &cached_state);
8386 	set_page_extent_mapped(page);
8387 
8388 	/*
8389 	 * we can't set the delalloc bits if there are pending ordered
8390 	 * extents.  Drop our locks and wait for them to finish
8391 	 */
8392 	ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
8393 			PAGE_SIZE);
8394 	if (ordered) {
8395 		unlock_extent_cached(io_tree, page_start, page_end,
8396 				     &cached_state);
8397 		unlock_page(page);
8398 		btrfs_start_ordered_extent(inode, ordered, 1);
8399 		btrfs_put_ordered_extent(ordered);
8400 		goto again;
8401 	}
8402 
8403 	if (page->index == ((size - 1) >> PAGE_SHIFT)) {
8404 		reserved_space = round_up(size - page_start,
8405 					  fs_info->sectorsize);
8406 		if (reserved_space < PAGE_SIZE) {
8407 			end = page_start + reserved_space - 1;
8408 			btrfs_delalloc_release_space(inode, data_reserved,
8409 					page_start, PAGE_SIZE - reserved_space,
8410 					true);
8411 		}
8412 	}
8413 
8414 	/*
8415 	 * page_mkwrite gets called when the page is firstly dirtied after it's
8416 	 * faulted in, but write(2) could also dirty a page and set delalloc
8417 	 * bits, thus in this case for space account reason, we still need to
8418 	 * clear any delalloc bits within this page range since we have to
8419 	 * reserve data&meta space before lock_page() (see above comments).
8420 	 */
8421 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
8422 			  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
8423 			  EXTENT_DEFRAG, 0, 0, &cached_state);
8424 
8425 	ret2 = btrfs_set_extent_delalloc(inode, page_start, end, 0,
8426 					&cached_state);
8427 	if (ret2) {
8428 		unlock_extent_cached(io_tree, page_start, page_end,
8429 				     &cached_state);
8430 		ret = VM_FAULT_SIGBUS;
8431 		goto out_unlock;
8432 	}
8433 
8434 	/* page is wholly or partially inside EOF */
8435 	if (page_start + PAGE_SIZE > size)
8436 		zero_start = offset_in_page(size);
8437 	else
8438 		zero_start = PAGE_SIZE;
8439 
8440 	if (zero_start != PAGE_SIZE) {
8441 		kaddr = kmap(page);
8442 		memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start);
8443 		flush_dcache_page(page);
8444 		kunmap(page);
8445 	}
8446 	ClearPageChecked(page);
8447 	set_page_dirty(page);
8448 	SetPageUptodate(page);
8449 
8450 	BTRFS_I(inode)->last_trans = fs_info->generation;
8451 	BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
8452 	BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
8453 
8454 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
8455 
8456 	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
8457 	sb_end_pagefault(inode->i_sb);
8458 	extent_changeset_free(data_reserved);
8459 	return VM_FAULT_LOCKED;
8460 
8461 out_unlock:
8462 	unlock_page(page);
8463 out:
8464 	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
8465 	btrfs_delalloc_release_space(inode, data_reserved, page_start,
8466 				     reserved_space, (ret != 0));
8467 out_noreserve:
8468 	sb_end_pagefault(inode->i_sb);
8469 	extent_changeset_free(data_reserved);
8470 	return ret;
8471 }
8472 
8473 static int btrfs_truncate(struct inode *inode, bool skip_writeback)
8474 {
8475 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8476 	struct btrfs_root *root = BTRFS_I(inode)->root;
8477 	struct btrfs_block_rsv *rsv;
8478 	int ret;
8479 	struct btrfs_trans_handle *trans;
8480 	u64 mask = fs_info->sectorsize - 1;
8481 	u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
8482 
8483 	if (!skip_writeback) {
8484 		ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
8485 					       (u64)-1);
8486 		if (ret)
8487 			return ret;
8488 	}
8489 
8490 	/*
8491 	 * Yes ladies and gentlemen, this is indeed ugly.  We have a couple of
8492 	 * things going on here:
8493 	 *
8494 	 * 1) We need to reserve space to update our inode.
8495 	 *
8496 	 * 2) We need to have something to cache all the space that is going to
8497 	 * be free'd up by the truncate operation, but also have some slack
8498 	 * space reserved in case it uses space during the truncate (thank you
8499 	 * very much snapshotting).
8500 	 *
8501 	 * And we need these to be separate.  The fact is we can use a lot of
8502 	 * space doing the truncate, and we have no earthly idea how much space
8503 	 * we will use, so we need the truncate reservation to be separate so it
8504 	 * doesn't end up using space reserved for updating the inode.  We also
8505 	 * need to be able to stop the transaction and start a new one, which
8506 	 * means we need to be able to update the inode several times, and we
8507 	 * have no idea of knowing how many times that will be, so we can't just
8508 	 * reserve 1 item for the entirety of the operation, so that has to be
8509 	 * done separately as well.
8510 	 *
8511 	 * So that leaves us with
8512 	 *
8513 	 * 1) rsv - for the truncate reservation, which we will steal from the
8514 	 * transaction reservation.
8515 	 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
8516 	 * updating the inode.
8517 	 */
8518 	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
8519 	if (!rsv)
8520 		return -ENOMEM;
8521 	rsv->size = min_size;
8522 	rsv->failfast = 1;
8523 
8524 	/*
8525 	 * 1 for the truncate slack space
8526 	 * 1 for updating the inode.
8527 	 */
8528 	trans = btrfs_start_transaction(root, 2);
8529 	if (IS_ERR(trans)) {
8530 		ret = PTR_ERR(trans);
8531 		goto out;
8532 	}
8533 
8534 	/* Migrate the slack space for the truncate to our reserve */
8535 	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
8536 				      min_size, false);
8537 	BUG_ON(ret);
8538 
8539 	/*
8540 	 * So if we truncate and then write and fsync we normally would just
8541 	 * write the extents that changed, which is a problem if we need to
8542 	 * first truncate that entire inode.  So set this flag so we write out
8543 	 * all of the extents in the inode to the sync log so we're completely
8544 	 * safe.
8545 	 */
8546 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
8547 	trans->block_rsv = rsv;
8548 
8549 	while (1) {
8550 		ret = btrfs_truncate_inode_items(trans, root, inode,
8551 						 inode->i_size,
8552 						 BTRFS_EXTENT_DATA_KEY);
8553 		trans->block_rsv = &fs_info->trans_block_rsv;
8554 		if (ret != -ENOSPC && ret != -EAGAIN)
8555 			break;
8556 
8557 		ret = btrfs_update_inode(trans, root, inode);
8558 		if (ret)
8559 			break;
8560 
8561 		btrfs_end_transaction(trans);
8562 		btrfs_btree_balance_dirty(fs_info);
8563 
8564 		trans = btrfs_start_transaction(root, 2);
8565 		if (IS_ERR(trans)) {
8566 			ret = PTR_ERR(trans);
8567 			trans = NULL;
8568 			break;
8569 		}
8570 
8571 		btrfs_block_rsv_release(fs_info, rsv, -1);
8572 		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
8573 					      rsv, min_size, false);
8574 		BUG_ON(ret);	/* shouldn't happen */
8575 		trans->block_rsv = rsv;
8576 	}
8577 
8578 	/*
8579 	 * We can't call btrfs_truncate_block inside a trans handle as we could
8580 	 * deadlock with freeze, if we got NEED_TRUNCATE_BLOCK then we know
8581 	 * we've truncated everything except the last little bit, and can do
8582 	 * btrfs_truncate_block and then update the disk_i_size.
8583 	 */
8584 	if (ret == NEED_TRUNCATE_BLOCK) {
8585 		btrfs_end_transaction(trans);
8586 		btrfs_btree_balance_dirty(fs_info);
8587 
8588 		ret = btrfs_truncate_block(inode, inode->i_size, 0, 0);
8589 		if (ret)
8590 			goto out;
8591 		trans = btrfs_start_transaction(root, 1);
8592 		if (IS_ERR(trans)) {
8593 			ret = PTR_ERR(trans);
8594 			goto out;
8595 		}
8596 		btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
8597 	}
8598 
8599 	if (trans) {
8600 		int ret2;
8601 
8602 		trans->block_rsv = &fs_info->trans_block_rsv;
8603 		ret2 = btrfs_update_inode(trans, root, inode);
8604 		if (ret2 && !ret)
8605 			ret = ret2;
8606 
8607 		ret2 = btrfs_end_transaction(trans);
8608 		if (ret2 && !ret)
8609 			ret = ret2;
8610 		btrfs_btree_balance_dirty(fs_info);
8611 	}
8612 out:
8613 	btrfs_free_block_rsv(fs_info, rsv);
8614 
8615 	return ret;
8616 }
8617 
8618 /*
8619  * create a new subvolume directory/inode (helper for the ioctl).
8620  */
8621 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
8622 			     struct btrfs_root *new_root,
8623 			     struct btrfs_root *parent_root,
8624 			     u64 new_dirid)
8625 {
8626 	struct inode *inode;
8627 	int err;
8628 	u64 index = 0;
8629 
8630 	inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
8631 				new_dirid, new_dirid,
8632 				S_IFDIR | (~current_umask() & S_IRWXUGO),
8633 				&index);
8634 	if (IS_ERR(inode))
8635 		return PTR_ERR(inode);
8636 	inode->i_op = &btrfs_dir_inode_operations;
8637 	inode->i_fop = &btrfs_dir_file_operations;
8638 
8639 	set_nlink(inode, 1);
8640 	btrfs_i_size_write(BTRFS_I(inode), 0);
8641 	unlock_new_inode(inode);
8642 
8643 	err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
8644 	if (err)
8645 		btrfs_err(new_root->fs_info,
8646 			  "error inheriting subvolume %llu properties: %d",
8647 			  new_root->root_key.objectid, err);
8648 
8649 	err = btrfs_update_inode(trans, new_root, inode);
8650 
8651 	iput(inode);
8652 	return err;
8653 }
8654 
8655 struct inode *btrfs_alloc_inode(struct super_block *sb)
8656 {
8657 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
8658 	struct btrfs_inode *ei;
8659 	struct inode *inode;
8660 
8661 	ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_KERNEL);
8662 	if (!ei)
8663 		return NULL;
8664 
8665 	ei->root = NULL;
8666 	ei->generation = 0;
8667 	ei->last_trans = 0;
8668 	ei->last_sub_trans = 0;
8669 	ei->logged_trans = 0;
8670 	ei->delalloc_bytes = 0;
8671 	ei->new_delalloc_bytes = 0;
8672 	ei->defrag_bytes = 0;
8673 	ei->disk_i_size = 0;
8674 	ei->flags = 0;
8675 	ei->csum_bytes = 0;
8676 	ei->index_cnt = (u64)-1;
8677 	ei->dir_index = 0;
8678 	ei->last_unlink_trans = 0;
8679 	ei->last_log_commit = 0;
8680 
8681 	spin_lock_init(&ei->lock);
8682 	ei->outstanding_extents = 0;
8683 	if (sb->s_magic != BTRFS_TEST_MAGIC)
8684 		btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv,
8685 					      BTRFS_BLOCK_RSV_DELALLOC);
8686 	ei->runtime_flags = 0;
8687 	ei->prop_compress = BTRFS_COMPRESS_NONE;
8688 	ei->defrag_compress = BTRFS_COMPRESS_NONE;
8689 
8690 	ei->delayed_node = NULL;
8691 
8692 	ei->i_otime.tv_sec = 0;
8693 	ei->i_otime.tv_nsec = 0;
8694 
8695 	inode = &ei->vfs_inode;
8696 	extent_map_tree_init(&ei->extent_tree);
8697 	extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO, inode);
8698 	extent_io_tree_init(fs_info, &ei->io_failure_tree,
8699 			    IO_TREE_INODE_IO_FAILURE, inode);
8700 	ei->io_tree.track_uptodate = true;
8701 	ei->io_failure_tree.track_uptodate = true;
8702 	atomic_set(&ei->sync_writers, 0);
8703 	mutex_init(&ei->log_mutex);
8704 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
8705 	INIT_LIST_HEAD(&ei->delalloc_inodes);
8706 	INIT_LIST_HEAD(&ei->delayed_iput);
8707 	RB_CLEAR_NODE(&ei->rb_node);
8708 	init_rwsem(&ei->dio_sem);
8709 
8710 	return inode;
8711 }
8712 
8713 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
8714 void btrfs_test_destroy_inode(struct inode *inode)
8715 {
8716 	btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
8717 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8718 }
8719 #endif
8720 
8721 void btrfs_free_inode(struct inode *inode)
8722 {
8723 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8724 }
8725 
8726 void btrfs_destroy_inode(struct inode *inode)
8727 {
8728 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8729 	struct btrfs_ordered_extent *ordered;
8730 	struct btrfs_root *root = BTRFS_I(inode)->root;
8731 
8732 	WARN_ON(!hlist_empty(&inode->i_dentry));
8733 	WARN_ON(inode->i_data.nrpages);
8734 	WARN_ON(BTRFS_I(inode)->block_rsv.reserved);
8735 	WARN_ON(BTRFS_I(inode)->block_rsv.size);
8736 	WARN_ON(BTRFS_I(inode)->outstanding_extents);
8737 	WARN_ON(BTRFS_I(inode)->delalloc_bytes);
8738 	WARN_ON(BTRFS_I(inode)->new_delalloc_bytes);
8739 	WARN_ON(BTRFS_I(inode)->csum_bytes);
8740 	WARN_ON(BTRFS_I(inode)->defrag_bytes);
8741 
8742 	/*
8743 	 * This can happen where we create an inode, but somebody else also
8744 	 * created the same inode and we need to destroy the one we already
8745 	 * created.
8746 	 */
8747 	if (!root)
8748 		return;
8749 
8750 	while (1) {
8751 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
8752 		if (!ordered)
8753 			break;
8754 		else {
8755 			btrfs_err(fs_info,
8756 				  "found ordered extent %llu %llu on inode cleanup",
8757 				  ordered->file_offset, ordered->num_bytes);
8758 			btrfs_remove_ordered_extent(inode, ordered);
8759 			btrfs_put_ordered_extent(ordered);
8760 			btrfs_put_ordered_extent(ordered);
8761 		}
8762 	}
8763 	btrfs_qgroup_check_reserved_leak(inode);
8764 	inode_tree_del(inode);
8765 	btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
8766 }
8767 
8768 int btrfs_drop_inode(struct inode *inode)
8769 {
8770 	struct btrfs_root *root = BTRFS_I(inode)->root;
8771 
8772 	if (root == NULL)
8773 		return 1;
8774 
8775 	/* the snap/subvol tree is on deleting */
8776 	if (btrfs_root_refs(&root->root_item) == 0)
8777 		return 1;
8778 	else
8779 		return generic_drop_inode(inode);
8780 }
8781 
8782 static void init_once(void *foo)
8783 {
8784 	struct btrfs_inode *ei = (struct btrfs_inode *) foo;
8785 
8786 	inode_init_once(&ei->vfs_inode);
8787 }
8788 
8789 void __cold btrfs_destroy_cachep(void)
8790 {
8791 	/*
8792 	 * Make sure all delayed rcu free inodes are flushed before we
8793 	 * destroy cache.
8794 	 */
8795 	rcu_barrier();
8796 	kmem_cache_destroy(btrfs_inode_cachep);
8797 	kmem_cache_destroy(btrfs_trans_handle_cachep);
8798 	kmem_cache_destroy(btrfs_path_cachep);
8799 	kmem_cache_destroy(btrfs_free_space_cachep);
8800 	kmem_cache_destroy(btrfs_free_space_bitmap_cachep);
8801 }
8802 
8803 int __init btrfs_init_cachep(void)
8804 {
8805 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
8806 			sizeof(struct btrfs_inode), 0,
8807 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT,
8808 			init_once);
8809 	if (!btrfs_inode_cachep)
8810 		goto fail;
8811 
8812 	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
8813 			sizeof(struct btrfs_trans_handle), 0,
8814 			SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
8815 	if (!btrfs_trans_handle_cachep)
8816 		goto fail;
8817 
8818 	btrfs_path_cachep = kmem_cache_create("btrfs_path",
8819 			sizeof(struct btrfs_path), 0,
8820 			SLAB_MEM_SPREAD, NULL);
8821 	if (!btrfs_path_cachep)
8822 		goto fail;
8823 
8824 	btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
8825 			sizeof(struct btrfs_free_space), 0,
8826 			SLAB_MEM_SPREAD, NULL);
8827 	if (!btrfs_free_space_cachep)
8828 		goto fail;
8829 
8830 	btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
8831 							PAGE_SIZE, PAGE_SIZE,
8832 							SLAB_RED_ZONE, NULL);
8833 	if (!btrfs_free_space_bitmap_cachep)
8834 		goto fail;
8835 
8836 	return 0;
8837 fail:
8838 	btrfs_destroy_cachep();
8839 	return -ENOMEM;
8840 }
8841 
8842 static int btrfs_getattr(const struct path *path, struct kstat *stat,
8843 			 u32 request_mask, unsigned int flags)
8844 {
8845 	u64 delalloc_bytes;
8846 	struct inode *inode = d_inode(path->dentry);
8847 	u32 blocksize = inode->i_sb->s_blocksize;
8848 	u32 bi_flags = BTRFS_I(inode)->flags;
8849 
8850 	stat->result_mask |= STATX_BTIME;
8851 	stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec;
8852 	stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec;
8853 	if (bi_flags & BTRFS_INODE_APPEND)
8854 		stat->attributes |= STATX_ATTR_APPEND;
8855 	if (bi_flags & BTRFS_INODE_COMPRESS)
8856 		stat->attributes |= STATX_ATTR_COMPRESSED;
8857 	if (bi_flags & BTRFS_INODE_IMMUTABLE)
8858 		stat->attributes |= STATX_ATTR_IMMUTABLE;
8859 	if (bi_flags & BTRFS_INODE_NODUMP)
8860 		stat->attributes |= STATX_ATTR_NODUMP;
8861 
8862 	stat->attributes_mask |= (STATX_ATTR_APPEND |
8863 				  STATX_ATTR_COMPRESSED |
8864 				  STATX_ATTR_IMMUTABLE |
8865 				  STATX_ATTR_NODUMP);
8866 
8867 	generic_fillattr(inode, stat);
8868 	stat->dev = BTRFS_I(inode)->root->anon_dev;
8869 
8870 	spin_lock(&BTRFS_I(inode)->lock);
8871 	delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
8872 	spin_unlock(&BTRFS_I(inode)->lock);
8873 	stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
8874 			ALIGN(delalloc_bytes, blocksize)) >> 9;
8875 	return 0;
8876 }
8877 
8878 static int btrfs_rename_exchange(struct inode *old_dir,
8879 			      struct dentry *old_dentry,
8880 			      struct inode *new_dir,
8881 			      struct dentry *new_dentry)
8882 {
8883 	struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
8884 	struct btrfs_trans_handle *trans;
8885 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
8886 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
8887 	struct inode *new_inode = new_dentry->d_inode;
8888 	struct inode *old_inode = old_dentry->d_inode;
8889 	struct timespec64 ctime = current_time(old_inode);
8890 	struct dentry *parent;
8891 	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
8892 	u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
8893 	u64 old_idx = 0;
8894 	u64 new_idx = 0;
8895 	int ret;
8896 	bool root_log_pinned = false;
8897 	bool dest_log_pinned = false;
8898 	struct btrfs_log_ctx ctx_root;
8899 	struct btrfs_log_ctx ctx_dest;
8900 	bool sync_log_root = false;
8901 	bool sync_log_dest = false;
8902 	bool commit_transaction = false;
8903 
8904 	/* we only allow rename subvolume link between subvolumes */
8905 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
8906 		return -EXDEV;
8907 
8908 	btrfs_init_log_ctx(&ctx_root, old_inode);
8909 	btrfs_init_log_ctx(&ctx_dest, new_inode);
8910 
8911 	/* close the race window with snapshot create/destroy ioctl */
8912 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
8913 	    new_ino == BTRFS_FIRST_FREE_OBJECTID)
8914 		down_read(&fs_info->subvol_sem);
8915 
8916 	/*
8917 	 * We want to reserve the absolute worst case amount of items.  So if
8918 	 * both inodes are subvols and we need to unlink them then that would
8919 	 * require 4 item modifications, but if they are both normal inodes it
8920 	 * would require 5 item modifications, so we'll assume their normal
8921 	 * inodes.  So 5 * 2 is 10, plus 2 for the new links, so 12 total items
8922 	 * should cover the worst case number of items we'll modify.
8923 	 */
8924 	trans = btrfs_start_transaction(root, 12);
8925 	if (IS_ERR(trans)) {
8926 		ret = PTR_ERR(trans);
8927 		goto out_notrans;
8928 	}
8929 
8930 	if (dest != root)
8931 		btrfs_record_root_in_trans(trans, dest);
8932 
8933 	/*
8934 	 * We need to find a free sequence number both in the source and
8935 	 * in the destination directory for the exchange.
8936 	 */
8937 	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx);
8938 	if (ret)
8939 		goto out_fail;
8940 	ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx);
8941 	if (ret)
8942 		goto out_fail;
8943 
8944 	BTRFS_I(old_inode)->dir_index = 0ULL;
8945 	BTRFS_I(new_inode)->dir_index = 0ULL;
8946 
8947 	/* Reference for the source. */
8948 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8949 		/* force full log commit if subvolume involved. */
8950 		btrfs_set_log_full_commit(trans);
8951 	} else {
8952 		btrfs_pin_log_trans(root);
8953 		root_log_pinned = true;
8954 		ret = btrfs_insert_inode_ref(trans, dest,
8955 					     new_dentry->d_name.name,
8956 					     new_dentry->d_name.len,
8957 					     old_ino,
8958 					     btrfs_ino(BTRFS_I(new_dir)),
8959 					     old_idx);
8960 		if (ret)
8961 			goto out_fail;
8962 	}
8963 
8964 	/* And now for the dest. */
8965 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
8966 		/* force full log commit if subvolume involved. */
8967 		btrfs_set_log_full_commit(trans);
8968 	} else {
8969 		btrfs_pin_log_trans(dest);
8970 		dest_log_pinned = true;
8971 		ret = btrfs_insert_inode_ref(trans, root,
8972 					     old_dentry->d_name.name,
8973 					     old_dentry->d_name.len,
8974 					     new_ino,
8975 					     btrfs_ino(BTRFS_I(old_dir)),
8976 					     new_idx);
8977 		if (ret)
8978 			goto out_fail;
8979 	}
8980 
8981 	/* Update inode version and ctime/mtime. */
8982 	inode_inc_iversion(old_dir);
8983 	inode_inc_iversion(new_dir);
8984 	inode_inc_iversion(old_inode);
8985 	inode_inc_iversion(new_inode);
8986 	old_dir->i_ctime = old_dir->i_mtime = ctime;
8987 	new_dir->i_ctime = new_dir->i_mtime = ctime;
8988 	old_inode->i_ctime = ctime;
8989 	new_inode->i_ctime = ctime;
8990 
8991 	if (old_dentry->d_parent != new_dentry->d_parent) {
8992 		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
8993 				BTRFS_I(old_inode), 1);
8994 		btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
8995 				BTRFS_I(new_inode), 1);
8996 	}
8997 
8998 	/* src is a subvolume */
8999 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9000 		ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
9001 	} else { /* src is an inode */
9002 		ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
9003 					   BTRFS_I(old_dentry->d_inode),
9004 					   old_dentry->d_name.name,
9005 					   old_dentry->d_name.len);
9006 		if (!ret)
9007 			ret = btrfs_update_inode(trans, root, old_inode);
9008 	}
9009 	if (ret) {
9010 		btrfs_abort_transaction(trans, ret);
9011 		goto out_fail;
9012 	}
9013 
9014 	/* dest is a subvolume */
9015 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
9016 		ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
9017 	} else { /* dest is an inode */
9018 		ret = __btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir),
9019 					   BTRFS_I(new_dentry->d_inode),
9020 					   new_dentry->d_name.name,
9021 					   new_dentry->d_name.len);
9022 		if (!ret)
9023 			ret = btrfs_update_inode(trans, dest, new_inode);
9024 	}
9025 	if (ret) {
9026 		btrfs_abort_transaction(trans, ret);
9027 		goto out_fail;
9028 	}
9029 
9030 	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
9031 			     new_dentry->d_name.name,
9032 			     new_dentry->d_name.len, 0, old_idx);
9033 	if (ret) {
9034 		btrfs_abort_transaction(trans, ret);
9035 		goto out_fail;
9036 	}
9037 
9038 	ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
9039 			     old_dentry->d_name.name,
9040 			     old_dentry->d_name.len, 0, new_idx);
9041 	if (ret) {
9042 		btrfs_abort_transaction(trans, ret);
9043 		goto out_fail;
9044 	}
9045 
9046 	if (old_inode->i_nlink == 1)
9047 		BTRFS_I(old_inode)->dir_index = old_idx;
9048 	if (new_inode->i_nlink == 1)
9049 		BTRFS_I(new_inode)->dir_index = new_idx;
9050 
9051 	if (root_log_pinned) {
9052 		parent = new_dentry->d_parent;
9053 		ret = btrfs_log_new_name(trans, BTRFS_I(old_inode),
9054 					 BTRFS_I(old_dir), parent,
9055 					 false, &ctx_root);
9056 		if (ret == BTRFS_NEED_LOG_SYNC)
9057 			sync_log_root = true;
9058 		else if (ret == BTRFS_NEED_TRANS_COMMIT)
9059 			commit_transaction = true;
9060 		ret = 0;
9061 		btrfs_end_log_trans(root);
9062 		root_log_pinned = false;
9063 	}
9064 	if (dest_log_pinned) {
9065 		if (!commit_transaction) {
9066 			parent = old_dentry->d_parent;
9067 			ret = btrfs_log_new_name(trans, BTRFS_I(new_inode),
9068 						 BTRFS_I(new_dir), parent,
9069 						 false, &ctx_dest);
9070 			if (ret == BTRFS_NEED_LOG_SYNC)
9071 				sync_log_dest = true;
9072 			else if (ret == BTRFS_NEED_TRANS_COMMIT)
9073 				commit_transaction = true;
9074 			ret = 0;
9075 		}
9076 		btrfs_end_log_trans(dest);
9077 		dest_log_pinned = false;
9078 	}
9079 out_fail:
9080 	/*
9081 	 * If we have pinned a log and an error happened, we unpin tasks
9082 	 * trying to sync the log and force them to fallback to a transaction
9083 	 * commit if the log currently contains any of the inodes involved in
9084 	 * this rename operation (to ensure we do not persist a log with an
9085 	 * inconsistent state for any of these inodes or leading to any
9086 	 * inconsistencies when replayed). If the transaction was aborted, the
9087 	 * abortion reason is propagated to userspace when attempting to commit
9088 	 * the transaction. If the log does not contain any of these inodes, we
9089 	 * allow the tasks to sync it.
9090 	 */
9091 	if (ret && (root_log_pinned || dest_log_pinned)) {
9092 		if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) ||
9093 		    btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) ||
9094 		    btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) ||
9095 		    (new_inode &&
9096 		     btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation)))
9097 			btrfs_set_log_full_commit(trans);
9098 
9099 		if (root_log_pinned) {
9100 			btrfs_end_log_trans(root);
9101 			root_log_pinned = false;
9102 		}
9103 		if (dest_log_pinned) {
9104 			btrfs_end_log_trans(dest);
9105 			dest_log_pinned = false;
9106 		}
9107 	}
9108 	if (!ret && sync_log_root && !commit_transaction) {
9109 		ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root,
9110 				     &ctx_root);
9111 		if (ret)
9112 			commit_transaction = true;
9113 	}
9114 	if (!ret && sync_log_dest && !commit_transaction) {
9115 		ret = btrfs_sync_log(trans, BTRFS_I(new_inode)->root,
9116 				     &ctx_dest);
9117 		if (ret)
9118 			commit_transaction = true;
9119 	}
9120 	if (commit_transaction) {
9121 		/*
9122 		 * We may have set commit_transaction when logging the new name
9123 		 * in the destination root, in which case we left the source
9124 		 * root context in the list of log contextes. So make sure we
9125 		 * remove it to avoid invalid memory accesses, since the context
9126 		 * was allocated in our stack frame.
9127 		 */
9128 		if (sync_log_root) {
9129 			mutex_lock(&root->log_mutex);
9130 			list_del_init(&ctx_root.list);
9131 			mutex_unlock(&root->log_mutex);
9132 		}
9133 		ret = btrfs_commit_transaction(trans);
9134 	} else {
9135 		int ret2;
9136 
9137 		ret2 = btrfs_end_transaction(trans);
9138 		ret = ret ? ret : ret2;
9139 	}
9140 out_notrans:
9141 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
9142 	    old_ino == BTRFS_FIRST_FREE_OBJECTID)
9143 		up_read(&fs_info->subvol_sem);
9144 
9145 	ASSERT(list_empty(&ctx_root.list));
9146 	ASSERT(list_empty(&ctx_dest.list));
9147 
9148 	return ret;
9149 }
9150 
9151 static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans,
9152 				     struct btrfs_root *root,
9153 				     struct inode *dir,
9154 				     struct dentry *dentry)
9155 {
9156 	int ret;
9157 	struct inode *inode;
9158 	u64 objectid;
9159 	u64 index;
9160 
9161 	ret = btrfs_find_free_ino(root, &objectid);
9162 	if (ret)
9163 		return ret;
9164 
9165 	inode = btrfs_new_inode(trans, root, dir,
9166 				dentry->d_name.name,
9167 				dentry->d_name.len,
9168 				btrfs_ino(BTRFS_I(dir)),
9169 				objectid,
9170 				S_IFCHR | WHITEOUT_MODE,
9171 				&index);
9172 
9173 	if (IS_ERR(inode)) {
9174 		ret = PTR_ERR(inode);
9175 		return ret;
9176 	}
9177 
9178 	inode->i_op = &btrfs_special_inode_operations;
9179 	init_special_inode(inode, inode->i_mode,
9180 		WHITEOUT_DEV);
9181 
9182 	ret = btrfs_init_inode_security(trans, inode, dir,
9183 				&dentry->d_name);
9184 	if (ret)
9185 		goto out;
9186 
9187 	ret = btrfs_add_nondir(trans, BTRFS_I(dir), dentry,
9188 				BTRFS_I(inode), 0, index);
9189 	if (ret)
9190 		goto out;
9191 
9192 	ret = btrfs_update_inode(trans, root, inode);
9193 out:
9194 	unlock_new_inode(inode);
9195 	if (ret)
9196 		inode_dec_link_count(inode);
9197 	iput(inode);
9198 
9199 	return ret;
9200 }
9201 
9202 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
9203 			   struct inode *new_dir, struct dentry *new_dentry,
9204 			   unsigned int flags)
9205 {
9206 	struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
9207 	struct btrfs_trans_handle *trans;
9208 	unsigned int trans_num_items;
9209 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
9210 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9211 	struct inode *new_inode = d_inode(new_dentry);
9212 	struct inode *old_inode = d_inode(old_dentry);
9213 	u64 index = 0;
9214 	int ret;
9215 	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
9216 	bool log_pinned = false;
9217 	struct btrfs_log_ctx ctx;
9218 	bool sync_log = false;
9219 	bool commit_transaction = false;
9220 
9221 	if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
9222 		return -EPERM;
9223 
9224 	/* we only allow rename subvolume link between subvolumes */
9225 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9226 		return -EXDEV;
9227 
9228 	if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
9229 	    (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID))
9230 		return -ENOTEMPTY;
9231 
9232 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
9233 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
9234 		return -ENOTEMPTY;
9235 
9236 
9237 	/* check for collisions, even if the  name isn't there */
9238 	ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
9239 			     new_dentry->d_name.name,
9240 			     new_dentry->d_name.len);
9241 
9242 	if (ret) {
9243 		if (ret == -EEXIST) {
9244 			/* we shouldn't get
9245 			 * eexist without a new_inode */
9246 			if (WARN_ON(!new_inode)) {
9247 				return ret;
9248 			}
9249 		} else {
9250 			/* maybe -EOVERFLOW */
9251 			return ret;
9252 		}
9253 	}
9254 	ret = 0;
9255 
9256 	/*
9257 	 * we're using rename to replace one file with another.  Start IO on it
9258 	 * now so  we don't add too much work to the end of the transaction
9259 	 */
9260 	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
9261 		filemap_flush(old_inode->i_mapping);
9262 
9263 	/* close the racy window with snapshot create/destroy ioctl */
9264 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9265 		down_read(&fs_info->subvol_sem);
9266 	/*
9267 	 * We want to reserve the absolute worst case amount of items.  So if
9268 	 * both inodes are subvols and we need to unlink them then that would
9269 	 * require 4 item modifications, but if they are both normal inodes it
9270 	 * would require 5 item modifications, so we'll assume they are normal
9271 	 * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
9272 	 * should cover the worst case number of items we'll modify.
9273 	 * If our rename has the whiteout flag, we need more 5 units for the
9274 	 * new inode (1 inode item, 1 inode ref, 2 dir items and 1 xattr item
9275 	 * when selinux is enabled).
9276 	 */
9277 	trans_num_items = 11;
9278 	if (flags & RENAME_WHITEOUT)
9279 		trans_num_items += 5;
9280 	trans = btrfs_start_transaction(root, trans_num_items);
9281 	if (IS_ERR(trans)) {
9282 		ret = PTR_ERR(trans);
9283 		goto out_notrans;
9284 	}
9285 
9286 	if (dest != root)
9287 		btrfs_record_root_in_trans(trans, dest);
9288 
9289 	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index);
9290 	if (ret)
9291 		goto out_fail;
9292 
9293 	BTRFS_I(old_inode)->dir_index = 0ULL;
9294 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9295 		/* force full log commit if subvolume involved. */
9296 		btrfs_set_log_full_commit(trans);
9297 	} else {
9298 		btrfs_pin_log_trans(root);
9299 		log_pinned = true;
9300 		ret = btrfs_insert_inode_ref(trans, dest,
9301 					     new_dentry->d_name.name,
9302 					     new_dentry->d_name.len,
9303 					     old_ino,
9304 					     btrfs_ino(BTRFS_I(new_dir)), index);
9305 		if (ret)
9306 			goto out_fail;
9307 	}
9308 
9309 	inode_inc_iversion(old_dir);
9310 	inode_inc_iversion(new_dir);
9311 	inode_inc_iversion(old_inode);
9312 	old_dir->i_ctime = old_dir->i_mtime =
9313 	new_dir->i_ctime = new_dir->i_mtime =
9314 	old_inode->i_ctime = current_time(old_dir);
9315 
9316 	if (old_dentry->d_parent != new_dentry->d_parent)
9317 		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
9318 				BTRFS_I(old_inode), 1);
9319 
9320 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9321 		ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
9322 	} else {
9323 		ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
9324 					BTRFS_I(d_inode(old_dentry)),
9325 					old_dentry->d_name.name,
9326 					old_dentry->d_name.len);
9327 		if (!ret)
9328 			ret = btrfs_update_inode(trans, root, old_inode);
9329 	}
9330 	if (ret) {
9331 		btrfs_abort_transaction(trans, ret);
9332 		goto out_fail;
9333 	}
9334 
9335 	if (new_inode) {
9336 		inode_inc_iversion(new_inode);
9337 		new_inode->i_ctime = current_time(new_inode);
9338 		if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
9339 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
9340 			ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
9341 			BUG_ON(new_inode->i_nlink == 0);
9342 		} else {
9343 			ret = btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir),
9344 						 BTRFS_I(d_inode(new_dentry)),
9345 						 new_dentry->d_name.name,
9346 						 new_dentry->d_name.len);
9347 		}
9348 		if (!ret && new_inode->i_nlink == 0)
9349 			ret = btrfs_orphan_add(trans,
9350 					BTRFS_I(d_inode(new_dentry)));
9351 		if (ret) {
9352 			btrfs_abort_transaction(trans, ret);
9353 			goto out_fail;
9354 		}
9355 	}
9356 
9357 	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
9358 			     new_dentry->d_name.name,
9359 			     new_dentry->d_name.len, 0, index);
9360 	if (ret) {
9361 		btrfs_abort_transaction(trans, ret);
9362 		goto out_fail;
9363 	}
9364 
9365 	if (old_inode->i_nlink == 1)
9366 		BTRFS_I(old_inode)->dir_index = index;
9367 
9368 	if (log_pinned) {
9369 		struct dentry *parent = new_dentry->d_parent;
9370 
9371 		btrfs_init_log_ctx(&ctx, old_inode);
9372 		ret = btrfs_log_new_name(trans, BTRFS_I(old_inode),
9373 					 BTRFS_I(old_dir), parent,
9374 					 false, &ctx);
9375 		if (ret == BTRFS_NEED_LOG_SYNC)
9376 			sync_log = true;
9377 		else if (ret == BTRFS_NEED_TRANS_COMMIT)
9378 			commit_transaction = true;
9379 		ret = 0;
9380 		btrfs_end_log_trans(root);
9381 		log_pinned = false;
9382 	}
9383 
9384 	if (flags & RENAME_WHITEOUT) {
9385 		ret = btrfs_whiteout_for_rename(trans, root, old_dir,
9386 						old_dentry);
9387 
9388 		if (ret) {
9389 			btrfs_abort_transaction(trans, ret);
9390 			goto out_fail;
9391 		}
9392 	}
9393 out_fail:
9394 	/*
9395 	 * If we have pinned the log and an error happened, we unpin tasks
9396 	 * trying to sync the log and force them to fallback to a transaction
9397 	 * commit if the log currently contains any of the inodes involved in
9398 	 * this rename operation (to ensure we do not persist a log with an
9399 	 * inconsistent state for any of these inodes or leading to any
9400 	 * inconsistencies when replayed). If the transaction was aborted, the
9401 	 * abortion reason is propagated to userspace when attempting to commit
9402 	 * the transaction. If the log does not contain any of these inodes, we
9403 	 * allow the tasks to sync it.
9404 	 */
9405 	if (ret && log_pinned) {
9406 		if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) ||
9407 		    btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) ||
9408 		    btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) ||
9409 		    (new_inode &&
9410 		     btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation)))
9411 			btrfs_set_log_full_commit(trans);
9412 
9413 		btrfs_end_log_trans(root);
9414 		log_pinned = false;
9415 	}
9416 	if (!ret && sync_log) {
9417 		ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, &ctx);
9418 		if (ret)
9419 			commit_transaction = true;
9420 	}
9421 	if (commit_transaction) {
9422 		ret = btrfs_commit_transaction(trans);
9423 	} else {
9424 		int ret2;
9425 
9426 		ret2 = btrfs_end_transaction(trans);
9427 		ret = ret ? ret : ret2;
9428 	}
9429 out_notrans:
9430 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9431 		up_read(&fs_info->subvol_sem);
9432 
9433 	return ret;
9434 }
9435 
9436 static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry,
9437 			 struct inode *new_dir, struct dentry *new_dentry,
9438 			 unsigned int flags)
9439 {
9440 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
9441 		return -EINVAL;
9442 
9443 	if (flags & RENAME_EXCHANGE)
9444 		return btrfs_rename_exchange(old_dir, old_dentry, new_dir,
9445 					  new_dentry);
9446 
9447 	return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
9448 }
9449 
9450 struct btrfs_delalloc_work {
9451 	struct inode *inode;
9452 	struct completion completion;
9453 	struct list_head list;
9454 	struct btrfs_work work;
9455 };
9456 
9457 static void btrfs_run_delalloc_work(struct btrfs_work *work)
9458 {
9459 	struct btrfs_delalloc_work *delalloc_work;
9460 	struct inode *inode;
9461 
9462 	delalloc_work = container_of(work, struct btrfs_delalloc_work,
9463 				     work);
9464 	inode = delalloc_work->inode;
9465 	filemap_flush(inode->i_mapping);
9466 	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
9467 				&BTRFS_I(inode)->runtime_flags))
9468 		filemap_flush(inode->i_mapping);
9469 
9470 	iput(inode);
9471 	complete(&delalloc_work->completion);
9472 }
9473 
9474 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode)
9475 {
9476 	struct btrfs_delalloc_work *work;
9477 
9478 	work = kmalloc(sizeof(*work), GFP_NOFS);
9479 	if (!work)
9480 		return NULL;
9481 
9482 	init_completion(&work->completion);
9483 	INIT_LIST_HEAD(&work->list);
9484 	work->inode = inode;
9485 	btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL);
9486 
9487 	return work;
9488 }
9489 
9490 /*
9491  * some fairly slow code that needs optimization. This walks the list
9492  * of all the inodes with pending delalloc and forces them to disk.
9493  */
9494 static int start_delalloc_inodes(struct btrfs_root *root, int nr, bool snapshot)
9495 {
9496 	struct btrfs_inode *binode;
9497 	struct inode *inode;
9498 	struct btrfs_delalloc_work *work, *next;
9499 	struct list_head works;
9500 	struct list_head splice;
9501 	int ret = 0;
9502 
9503 	INIT_LIST_HEAD(&works);
9504 	INIT_LIST_HEAD(&splice);
9505 
9506 	mutex_lock(&root->delalloc_mutex);
9507 	spin_lock(&root->delalloc_lock);
9508 	list_splice_init(&root->delalloc_inodes, &splice);
9509 	while (!list_empty(&splice)) {
9510 		binode = list_entry(splice.next, struct btrfs_inode,
9511 				    delalloc_inodes);
9512 
9513 		list_move_tail(&binode->delalloc_inodes,
9514 			       &root->delalloc_inodes);
9515 		inode = igrab(&binode->vfs_inode);
9516 		if (!inode) {
9517 			cond_resched_lock(&root->delalloc_lock);
9518 			continue;
9519 		}
9520 		spin_unlock(&root->delalloc_lock);
9521 
9522 		if (snapshot)
9523 			set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
9524 				&binode->runtime_flags);
9525 		work = btrfs_alloc_delalloc_work(inode);
9526 		if (!work) {
9527 			iput(inode);
9528 			ret = -ENOMEM;
9529 			goto out;
9530 		}
9531 		list_add_tail(&work->list, &works);
9532 		btrfs_queue_work(root->fs_info->flush_workers,
9533 				 &work->work);
9534 		ret++;
9535 		if (nr != -1 && ret >= nr)
9536 			goto out;
9537 		cond_resched();
9538 		spin_lock(&root->delalloc_lock);
9539 	}
9540 	spin_unlock(&root->delalloc_lock);
9541 
9542 out:
9543 	list_for_each_entry_safe(work, next, &works, list) {
9544 		list_del_init(&work->list);
9545 		wait_for_completion(&work->completion);
9546 		kfree(work);
9547 	}
9548 
9549 	if (!list_empty(&splice)) {
9550 		spin_lock(&root->delalloc_lock);
9551 		list_splice_tail(&splice, &root->delalloc_inodes);
9552 		spin_unlock(&root->delalloc_lock);
9553 	}
9554 	mutex_unlock(&root->delalloc_mutex);
9555 	return ret;
9556 }
9557 
9558 int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
9559 {
9560 	struct btrfs_fs_info *fs_info = root->fs_info;
9561 	int ret;
9562 
9563 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
9564 		return -EROFS;
9565 
9566 	ret = start_delalloc_inodes(root, -1, true);
9567 	if (ret > 0)
9568 		ret = 0;
9569 	return ret;
9570 }
9571 
9572 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int nr)
9573 {
9574 	struct btrfs_root *root;
9575 	struct list_head splice;
9576 	int ret;
9577 
9578 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
9579 		return -EROFS;
9580 
9581 	INIT_LIST_HEAD(&splice);
9582 
9583 	mutex_lock(&fs_info->delalloc_root_mutex);
9584 	spin_lock(&fs_info->delalloc_root_lock);
9585 	list_splice_init(&fs_info->delalloc_roots, &splice);
9586 	while (!list_empty(&splice) && nr) {
9587 		root = list_first_entry(&splice, struct btrfs_root,
9588 					delalloc_root);
9589 		root = btrfs_grab_fs_root(root);
9590 		BUG_ON(!root);
9591 		list_move_tail(&root->delalloc_root,
9592 			       &fs_info->delalloc_roots);
9593 		spin_unlock(&fs_info->delalloc_root_lock);
9594 
9595 		ret = start_delalloc_inodes(root, nr, false);
9596 		btrfs_put_fs_root(root);
9597 		if (ret < 0)
9598 			goto out;
9599 
9600 		if (nr != -1) {
9601 			nr -= ret;
9602 			WARN_ON(nr < 0);
9603 		}
9604 		spin_lock(&fs_info->delalloc_root_lock);
9605 	}
9606 	spin_unlock(&fs_info->delalloc_root_lock);
9607 
9608 	ret = 0;
9609 out:
9610 	if (!list_empty(&splice)) {
9611 		spin_lock(&fs_info->delalloc_root_lock);
9612 		list_splice_tail(&splice, &fs_info->delalloc_roots);
9613 		spin_unlock(&fs_info->delalloc_root_lock);
9614 	}
9615 	mutex_unlock(&fs_info->delalloc_root_mutex);
9616 	return ret;
9617 }
9618 
9619 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
9620 			 const char *symname)
9621 {
9622 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
9623 	struct btrfs_trans_handle *trans;
9624 	struct btrfs_root *root = BTRFS_I(dir)->root;
9625 	struct btrfs_path *path;
9626 	struct btrfs_key key;
9627 	struct inode *inode = NULL;
9628 	int err;
9629 	u64 objectid;
9630 	u64 index = 0;
9631 	int name_len;
9632 	int datasize;
9633 	unsigned long ptr;
9634 	struct btrfs_file_extent_item *ei;
9635 	struct extent_buffer *leaf;
9636 
9637 	name_len = strlen(symname);
9638 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
9639 		return -ENAMETOOLONG;
9640 
9641 	/*
9642 	 * 2 items for inode item and ref
9643 	 * 2 items for dir items
9644 	 * 1 item for updating parent inode item
9645 	 * 1 item for the inline extent item
9646 	 * 1 item for xattr if selinux is on
9647 	 */
9648 	trans = btrfs_start_transaction(root, 7);
9649 	if (IS_ERR(trans))
9650 		return PTR_ERR(trans);
9651 
9652 	err = btrfs_find_free_ino(root, &objectid);
9653 	if (err)
9654 		goto out_unlock;
9655 
9656 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
9657 				dentry->d_name.len, btrfs_ino(BTRFS_I(dir)),
9658 				objectid, S_IFLNK|S_IRWXUGO, &index);
9659 	if (IS_ERR(inode)) {
9660 		err = PTR_ERR(inode);
9661 		inode = NULL;
9662 		goto out_unlock;
9663 	}
9664 
9665 	/*
9666 	* If the active LSM wants to access the inode during
9667 	* d_instantiate it needs these. Smack checks to see
9668 	* if the filesystem supports xattrs by looking at the
9669 	* ops vector.
9670 	*/
9671 	inode->i_fop = &btrfs_file_operations;
9672 	inode->i_op = &btrfs_file_inode_operations;
9673 	inode->i_mapping->a_ops = &btrfs_aops;
9674 	BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
9675 
9676 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
9677 	if (err)
9678 		goto out_unlock;
9679 
9680 	path = btrfs_alloc_path();
9681 	if (!path) {
9682 		err = -ENOMEM;
9683 		goto out_unlock;
9684 	}
9685 	key.objectid = btrfs_ino(BTRFS_I(inode));
9686 	key.offset = 0;
9687 	key.type = BTRFS_EXTENT_DATA_KEY;
9688 	datasize = btrfs_file_extent_calc_inline_size(name_len);
9689 	err = btrfs_insert_empty_item(trans, root, path, &key,
9690 				      datasize);
9691 	if (err) {
9692 		btrfs_free_path(path);
9693 		goto out_unlock;
9694 	}
9695 	leaf = path->nodes[0];
9696 	ei = btrfs_item_ptr(leaf, path->slots[0],
9697 			    struct btrfs_file_extent_item);
9698 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
9699 	btrfs_set_file_extent_type(leaf, ei,
9700 				   BTRFS_FILE_EXTENT_INLINE);
9701 	btrfs_set_file_extent_encryption(leaf, ei, 0);
9702 	btrfs_set_file_extent_compression(leaf, ei, 0);
9703 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
9704 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
9705 
9706 	ptr = btrfs_file_extent_inline_start(ei);
9707 	write_extent_buffer(leaf, symname, ptr, name_len);
9708 	btrfs_mark_buffer_dirty(leaf);
9709 	btrfs_free_path(path);
9710 
9711 	inode->i_op = &btrfs_symlink_inode_operations;
9712 	inode_nohighmem(inode);
9713 	inode_set_bytes(inode, name_len);
9714 	btrfs_i_size_write(BTRFS_I(inode), name_len);
9715 	err = btrfs_update_inode(trans, root, inode);
9716 	/*
9717 	 * Last step, add directory indexes for our symlink inode. This is the
9718 	 * last step to avoid extra cleanup of these indexes if an error happens
9719 	 * elsewhere above.
9720 	 */
9721 	if (!err)
9722 		err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry,
9723 				BTRFS_I(inode), 0, index);
9724 	if (err)
9725 		goto out_unlock;
9726 
9727 	d_instantiate_new(dentry, inode);
9728 
9729 out_unlock:
9730 	btrfs_end_transaction(trans);
9731 	if (err && inode) {
9732 		inode_dec_link_count(inode);
9733 		discard_new_inode(inode);
9734 	}
9735 	btrfs_btree_balance_dirty(fs_info);
9736 	return err;
9737 }
9738 
9739 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
9740 				       u64 start, u64 num_bytes, u64 min_size,
9741 				       loff_t actual_len, u64 *alloc_hint,
9742 				       struct btrfs_trans_handle *trans)
9743 {
9744 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
9745 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
9746 	struct extent_map *em;
9747 	struct btrfs_root *root = BTRFS_I(inode)->root;
9748 	struct btrfs_key ins;
9749 	u64 cur_offset = start;
9750 	u64 i_size;
9751 	u64 cur_bytes;
9752 	u64 last_alloc = (u64)-1;
9753 	int ret = 0;
9754 	bool own_trans = true;
9755 	u64 end = start + num_bytes - 1;
9756 
9757 	if (trans)
9758 		own_trans = false;
9759 	while (num_bytes > 0) {
9760 		if (own_trans) {
9761 			trans = btrfs_start_transaction(root, 3);
9762 			if (IS_ERR(trans)) {
9763 				ret = PTR_ERR(trans);
9764 				break;
9765 			}
9766 		}
9767 
9768 		cur_bytes = min_t(u64, num_bytes, SZ_256M);
9769 		cur_bytes = max(cur_bytes, min_size);
9770 		/*
9771 		 * If we are severely fragmented we could end up with really
9772 		 * small allocations, so if the allocator is returning small
9773 		 * chunks lets make its job easier by only searching for those
9774 		 * sized chunks.
9775 		 */
9776 		cur_bytes = min(cur_bytes, last_alloc);
9777 		ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
9778 				min_size, 0, *alloc_hint, &ins, 1, 0);
9779 		if (ret) {
9780 			if (own_trans)
9781 				btrfs_end_transaction(trans);
9782 			break;
9783 		}
9784 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
9785 
9786 		last_alloc = ins.offset;
9787 		ret = insert_reserved_file_extent(trans, inode,
9788 						  cur_offset, ins.objectid,
9789 						  ins.offset, ins.offset,
9790 						  ins.offset, 0, 0, 0,
9791 						  BTRFS_FILE_EXTENT_PREALLOC);
9792 		if (ret) {
9793 			btrfs_free_reserved_extent(fs_info, ins.objectid,
9794 						   ins.offset, 0);
9795 			btrfs_abort_transaction(trans, ret);
9796 			if (own_trans)
9797 				btrfs_end_transaction(trans);
9798 			break;
9799 		}
9800 
9801 		btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
9802 					cur_offset + ins.offset -1, 0);
9803 
9804 		em = alloc_extent_map();
9805 		if (!em) {
9806 			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
9807 				&BTRFS_I(inode)->runtime_flags);
9808 			goto next;
9809 		}
9810 
9811 		em->start = cur_offset;
9812 		em->orig_start = cur_offset;
9813 		em->len = ins.offset;
9814 		em->block_start = ins.objectid;
9815 		em->block_len = ins.offset;
9816 		em->orig_block_len = ins.offset;
9817 		em->ram_bytes = ins.offset;
9818 		set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
9819 		em->generation = trans->transid;
9820 
9821 		while (1) {
9822 			write_lock(&em_tree->lock);
9823 			ret = add_extent_mapping(em_tree, em, 1);
9824 			write_unlock(&em_tree->lock);
9825 			if (ret != -EEXIST)
9826 				break;
9827 			btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
9828 						cur_offset + ins.offset - 1,
9829 						0);
9830 		}
9831 		free_extent_map(em);
9832 next:
9833 		num_bytes -= ins.offset;
9834 		cur_offset += ins.offset;
9835 		*alloc_hint = ins.objectid + ins.offset;
9836 
9837 		inode_inc_iversion(inode);
9838 		inode->i_ctime = current_time(inode);
9839 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
9840 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
9841 		    (actual_len > inode->i_size) &&
9842 		    (cur_offset > inode->i_size)) {
9843 			if (cur_offset > actual_len)
9844 				i_size = actual_len;
9845 			else
9846 				i_size = cur_offset;
9847 			i_size_write(inode, i_size);
9848 			btrfs_ordered_update_i_size(inode, i_size, NULL);
9849 		}
9850 
9851 		ret = btrfs_update_inode(trans, root, inode);
9852 
9853 		if (ret) {
9854 			btrfs_abort_transaction(trans, ret);
9855 			if (own_trans)
9856 				btrfs_end_transaction(trans);
9857 			break;
9858 		}
9859 
9860 		if (own_trans)
9861 			btrfs_end_transaction(trans);
9862 	}
9863 	if (cur_offset < end)
9864 		btrfs_free_reserved_data_space(inode, NULL, cur_offset,
9865 			end - cur_offset + 1);
9866 	return ret;
9867 }
9868 
9869 int btrfs_prealloc_file_range(struct inode *inode, int mode,
9870 			      u64 start, u64 num_bytes, u64 min_size,
9871 			      loff_t actual_len, u64 *alloc_hint)
9872 {
9873 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9874 					   min_size, actual_len, alloc_hint,
9875 					   NULL);
9876 }
9877 
9878 int btrfs_prealloc_file_range_trans(struct inode *inode,
9879 				    struct btrfs_trans_handle *trans, int mode,
9880 				    u64 start, u64 num_bytes, u64 min_size,
9881 				    loff_t actual_len, u64 *alloc_hint)
9882 {
9883 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9884 					   min_size, actual_len, alloc_hint, trans);
9885 }
9886 
9887 static int btrfs_set_page_dirty(struct page *page)
9888 {
9889 	return __set_page_dirty_nobuffers(page);
9890 }
9891 
9892 static int btrfs_permission(struct inode *inode, int mask)
9893 {
9894 	struct btrfs_root *root = BTRFS_I(inode)->root;
9895 	umode_t mode = inode->i_mode;
9896 
9897 	if (mask & MAY_WRITE &&
9898 	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
9899 		if (btrfs_root_readonly(root))
9900 			return -EROFS;
9901 		if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
9902 			return -EACCES;
9903 	}
9904 	return generic_permission(inode, mask);
9905 }
9906 
9907 static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
9908 {
9909 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
9910 	struct btrfs_trans_handle *trans;
9911 	struct btrfs_root *root = BTRFS_I(dir)->root;
9912 	struct inode *inode = NULL;
9913 	u64 objectid;
9914 	u64 index;
9915 	int ret = 0;
9916 
9917 	/*
9918 	 * 5 units required for adding orphan entry
9919 	 */
9920 	trans = btrfs_start_transaction(root, 5);
9921 	if (IS_ERR(trans))
9922 		return PTR_ERR(trans);
9923 
9924 	ret = btrfs_find_free_ino(root, &objectid);
9925 	if (ret)
9926 		goto out;
9927 
9928 	inode = btrfs_new_inode(trans, root, dir, NULL, 0,
9929 			btrfs_ino(BTRFS_I(dir)), objectid, mode, &index);
9930 	if (IS_ERR(inode)) {
9931 		ret = PTR_ERR(inode);
9932 		inode = NULL;
9933 		goto out;
9934 	}
9935 
9936 	inode->i_fop = &btrfs_file_operations;
9937 	inode->i_op = &btrfs_file_inode_operations;
9938 
9939 	inode->i_mapping->a_ops = &btrfs_aops;
9940 	BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
9941 
9942 	ret = btrfs_init_inode_security(trans, inode, dir, NULL);
9943 	if (ret)
9944 		goto out;
9945 
9946 	ret = btrfs_update_inode(trans, root, inode);
9947 	if (ret)
9948 		goto out;
9949 	ret = btrfs_orphan_add(trans, BTRFS_I(inode));
9950 	if (ret)
9951 		goto out;
9952 
9953 	/*
9954 	 * We set number of links to 0 in btrfs_new_inode(), and here we set
9955 	 * it to 1 because d_tmpfile() will issue a warning if the count is 0,
9956 	 * through:
9957 	 *
9958 	 *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
9959 	 */
9960 	set_nlink(inode, 1);
9961 	d_tmpfile(dentry, inode);
9962 	unlock_new_inode(inode);
9963 	mark_inode_dirty(inode);
9964 out:
9965 	btrfs_end_transaction(trans);
9966 	if (ret && inode)
9967 		discard_new_inode(inode);
9968 	btrfs_btree_balance_dirty(fs_info);
9969 	return ret;
9970 }
9971 
9972 void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
9973 {
9974 	struct inode *inode = tree->private_data;
9975 	unsigned long index = start >> PAGE_SHIFT;
9976 	unsigned long end_index = end >> PAGE_SHIFT;
9977 	struct page *page;
9978 
9979 	while (index <= end_index) {
9980 		page = find_get_page(inode->i_mapping, index);
9981 		ASSERT(page); /* Pages should be in the extent_io_tree */
9982 		set_page_writeback(page);
9983 		put_page(page);
9984 		index++;
9985 	}
9986 }
9987 
9988 #ifdef CONFIG_SWAP
9989 /*
9990  * Add an entry indicating a block group or device which is pinned by a
9991  * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
9992  * negative errno on failure.
9993  */
9994 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
9995 				  bool is_block_group)
9996 {
9997 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
9998 	struct btrfs_swapfile_pin *sp, *entry;
9999 	struct rb_node **p;
10000 	struct rb_node *parent = NULL;
10001 
10002 	sp = kmalloc(sizeof(*sp), GFP_NOFS);
10003 	if (!sp)
10004 		return -ENOMEM;
10005 	sp->ptr = ptr;
10006 	sp->inode = inode;
10007 	sp->is_block_group = is_block_group;
10008 
10009 	spin_lock(&fs_info->swapfile_pins_lock);
10010 	p = &fs_info->swapfile_pins.rb_node;
10011 	while (*p) {
10012 		parent = *p;
10013 		entry = rb_entry(parent, struct btrfs_swapfile_pin, node);
10014 		if (sp->ptr < entry->ptr ||
10015 		    (sp->ptr == entry->ptr && sp->inode < entry->inode)) {
10016 			p = &(*p)->rb_left;
10017 		} else if (sp->ptr > entry->ptr ||
10018 			   (sp->ptr == entry->ptr && sp->inode > entry->inode)) {
10019 			p = &(*p)->rb_right;
10020 		} else {
10021 			spin_unlock(&fs_info->swapfile_pins_lock);
10022 			kfree(sp);
10023 			return 1;
10024 		}
10025 	}
10026 	rb_link_node(&sp->node, parent, p);
10027 	rb_insert_color(&sp->node, &fs_info->swapfile_pins);
10028 	spin_unlock(&fs_info->swapfile_pins_lock);
10029 	return 0;
10030 }
10031 
10032 /* Free all of the entries pinned by this swapfile. */
10033 static void btrfs_free_swapfile_pins(struct inode *inode)
10034 {
10035 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10036 	struct btrfs_swapfile_pin *sp;
10037 	struct rb_node *node, *next;
10038 
10039 	spin_lock(&fs_info->swapfile_pins_lock);
10040 	node = rb_first(&fs_info->swapfile_pins);
10041 	while (node) {
10042 		next = rb_next(node);
10043 		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
10044 		if (sp->inode == inode) {
10045 			rb_erase(&sp->node, &fs_info->swapfile_pins);
10046 			if (sp->is_block_group)
10047 				btrfs_put_block_group(sp->ptr);
10048 			kfree(sp);
10049 		}
10050 		node = next;
10051 	}
10052 	spin_unlock(&fs_info->swapfile_pins_lock);
10053 }
10054 
10055 struct btrfs_swap_info {
10056 	u64 start;
10057 	u64 block_start;
10058 	u64 block_len;
10059 	u64 lowest_ppage;
10060 	u64 highest_ppage;
10061 	unsigned long nr_pages;
10062 	int nr_extents;
10063 };
10064 
10065 static int btrfs_add_swap_extent(struct swap_info_struct *sis,
10066 				 struct btrfs_swap_info *bsi)
10067 {
10068 	unsigned long nr_pages;
10069 	u64 first_ppage, first_ppage_reported, next_ppage;
10070 	int ret;
10071 
10072 	first_ppage = ALIGN(bsi->block_start, PAGE_SIZE) >> PAGE_SHIFT;
10073 	next_ppage = ALIGN_DOWN(bsi->block_start + bsi->block_len,
10074 				PAGE_SIZE) >> PAGE_SHIFT;
10075 
10076 	if (first_ppage >= next_ppage)
10077 		return 0;
10078 	nr_pages = next_ppage - first_ppage;
10079 
10080 	first_ppage_reported = first_ppage;
10081 	if (bsi->start == 0)
10082 		first_ppage_reported++;
10083 	if (bsi->lowest_ppage > first_ppage_reported)
10084 		bsi->lowest_ppage = first_ppage_reported;
10085 	if (bsi->highest_ppage < (next_ppage - 1))
10086 		bsi->highest_ppage = next_ppage - 1;
10087 
10088 	ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage);
10089 	if (ret < 0)
10090 		return ret;
10091 	bsi->nr_extents += ret;
10092 	bsi->nr_pages += nr_pages;
10093 	return 0;
10094 }
10095 
10096 static void btrfs_swap_deactivate(struct file *file)
10097 {
10098 	struct inode *inode = file_inode(file);
10099 
10100 	btrfs_free_swapfile_pins(inode);
10101 	atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles);
10102 }
10103 
10104 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10105 			       sector_t *span)
10106 {
10107 	struct inode *inode = file_inode(file);
10108 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10109 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
10110 	struct extent_state *cached_state = NULL;
10111 	struct extent_map *em = NULL;
10112 	struct btrfs_device *device = NULL;
10113 	struct btrfs_swap_info bsi = {
10114 		.lowest_ppage = (sector_t)-1ULL,
10115 	};
10116 	int ret = 0;
10117 	u64 isize;
10118 	u64 start;
10119 
10120 	/*
10121 	 * If the swap file was just created, make sure delalloc is done. If the
10122 	 * file changes again after this, the user is doing something stupid and
10123 	 * we don't really care.
10124 	 */
10125 	ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
10126 	if (ret)
10127 		return ret;
10128 
10129 	/*
10130 	 * The inode is locked, so these flags won't change after we check them.
10131 	 */
10132 	if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
10133 		btrfs_warn(fs_info, "swapfile must not be compressed");
10134 		return -EINVAL;
10135 	}
10136 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
10137 		btrfs_warn(fs_info, "swapfile must not be copy-on-write");
10138 		return -EINVAL;
10139 	}
10140 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
10141 		btrfs_warn(fs_info, "swapfile must not be checksummed");
10142 		return -EINVAL;
10143 	}
10144 
10145 	/*
10146 	 * Balance or device remove/replace/resize can move stuff around from
10147 	 * under us. The EXCL_OP flag makes sure they aren't running/won't run
10148 	 * concurrently while we are mapping the swap extents, and
10149 	 * fs_info->swapfile_pins prevents them from running while the swap file
10150 	 * is active and moving the extents. Note that this also prevents a
10151 	 * concurrent device add which isn't actually necessary, but it's not
10152 	 * really worth the trouble to allow it.
10153 	 */
10154 	if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
10155 		btrfs_warn(fs_info,
10156 	   "cannot activate swapfile while exclusive operation is running");
10157 		return -EBUSY;
10158 	}
10159 	/*
10160 	 * Snapshots can create extents which require COW even if NODATACOW is
10161 	 * set. We use this counter to prevent snapshots. We must increment it
10162 	 * before walking the extents because we don't want a concurrent
10163 	 * snapshot to run after we've already checked the extents.
10164 	 */
10165 	atomic_inc(&BTRFS_I(inode)->root->nr_swapfiles);
10166 
10167 	isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
10168 
10169 	lock_extent_bits(io_tree, 0, isize - 1, &cached_state);
10170 	start = 0;
10171 	while (start < isize) {
10172 		u64 logical_block_start, physical_block_start;
10173 		struct btrfs_block_group *bg;
10174 		u64 len = isize - start;
10175 
10176 		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len);
10177 		if (IS_ERR(em)) {
10178 			ret = PTR_ERR(em);
10179 			goto out;
10180 		}
10181 
10182 		if (em->block_start == EXTENT_MAP_HOLE) {
10183 			btrfs_warn(fs_info, "swapfile must not have holes");
10184 			ret = -EINVAL;
10185 			goto out;
10186 		}
10187 		if (em->block_start == EXTENT_MAP_INLINE) {
10188 			/*
10189 			 * It's unlikely we'll ever actually find ourselves
10190 			 * here, as a file small enough to fit inline won't be
10191 			 * big enough to store more than the swap header, but in
10192 			 * case something changes in the future, let's catch it
10193 			 * here rather than later.
10194 			 */
10195 			btrfs_warn(fs_info, "swapfile must not be inline");
10196 			ret = -EINVAL;
10197 			goto out;
10198 		}
10199 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
10200 			btrfs_warn(fs_info, "swapfile must not be compressed");
10201 			ret = -EINVAL;
10202 			goto out;
10203 		}
10204 
10205 		logical_block_start = em->block_start + (start - em->start);
10206 		len = min(len, em->len - (start - em->start));
10207 		free_extent_map(em);
10208 		em = NULL;
10209 
10210 		ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL);
10211 		if (ret < 0) {
10212 			goto out;
10213 		} else if (ret) {
10214 			ret = 0;
10215 		} else {
10216 			btrfs_warn(fs_info,
10217 				   "swapfile must not be copy-on-write");
10218 			ret = -EINVAL;
10219 			goto out;
10220 		}
10221 
10222 		em = btrfs_get_chunk_map(fs_info, logical_block_start, len);
10223 		if (IS_ERR(em)) {
10224 			ret = PTR_ERR(em);
10225 			goto out;
10226 		}
10227 
10228 		if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
10229 			btrfs_warn(fs_info,
10230 				   "swapfile must have single data profile");
10231 			ret = -EINVAL;
10232 			goto out;
10233 		}
10234 
10235 		if (device == NULL) {
10236 			device = em->map_lookup->stripes[0].dev;
10237 			ret = btrfs_add_swapfile_pin(inode, device, false);
10238 			if (ret == 1)
10239 				ret = 0;
10240 			else if (ret)
10241 				goto out;
10242 		} else if (device != em->map_lookup->stripes[0].dev) {
10243 			btrfs_warn(fs_info, "swapfile must be on one device");
10244 			ret = -EINVAL;
10245 			goto out;
10246 		}
10247 
10248 		physical_block_start = (em->map_lookup->stripes[0].physical +
10249 					(logical_block_start - em->start));
10250 		len = min(len, em->len - (logical_block_start - em->start));
10251 		free_extent_map(em);
10252 		em = NULL;
10253 
10254 		bg = btrfs_lookup_block_group(fs_info, logical_block_start);
10255 		if (!bg) {
10256 			btrfs_warn(fs_info,
10257 			   "could not find block group containing swapfile");
10258 			ret = -EINVAL;
10259 			goto out;
10260 		}
10261 
10262 		ret = btrfs_add_swapfile_pin(inode, bg, true);
10263 		if (ret) {
10264 			btrfs_put_block_group(bg);
10265 			if (ret == 1)
10266 				ret = 0;
10267 			else
10268 				goto out;
10269 		}
10270 
10271 		if (bsi.block_len &&
10272 		    bsi.block_start + bsi.block_len == physical_block_start) {
10273 			bsi.block_len += len;
10274 		} else {
10275 			if (bsi.block_len) {
10276 				ret = btrfs_add_swap_extent(sis, &bsi);
10277 				if (ret)
10278 					goto out;
10279 			}
10280 			bsi.start = start;
10281 			bsi.block_start = physical_block_start;
10282 			bsi.block_len = len;
10283 		}
10284 
10285 		start += len;
10286 	}
10287 
10288 	if (bsi.block_len)
10289 		ret = btrfs_add_swap_extent(sis, &bsi);
10290 
10291 out:
10292 	if (!IS_ERR_OR_NULL(em))
10293 		free_extent_map(em);
10294 
10295 	unlock_extent_cached(io_tree, 0, isize - 1, &cached_state);
10296 
10297 	if (ret)
10298 		btrfs_swap_deactivate(file);
10299 
10300 	clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
10301 
10302 	if (ret)
10303 		return ret;
10304 
10305 	if (device)
10306 		sis->bdev = device->bdev;
10307 	*span = bsi.highest_ppage - bsi.lowest_ppage + 1;
10308 	sis->max = bsi.nr_pages;
10309 	sis->pages = bsi.nr_pages - 1;
10310 	sis->highest_bit = bsi.nr_pages - 1;
10311 	return bsi.nr_extents;
10312 }
10313 #else
10314 static void btrfs_swap_deactivate(struct file *file)
10315 {
10316 }
10317 
10318 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10319 			       sector_t *span)
10320 {
10321 	return -EOPNOTSUPP;
10322 }
10323 #endif
10324 
10325 static const struct inode_operations btrfs_dir_inode_operations = {
10326 	.getattr	= btrfs_getattr,
10327 	.lookup		= btrfs_lookup,
10328 	.create		= btrfs_create,
10329 	.unlink		= btrfs_unlink,
10330 	.link		= btrfs_link,
10331 	.mkdir		= btrfs_mkdir,
10332 	.rmdir		= btrfs_rmdir,
10333 	.rename		= btrfs_rename2,
10334 	.symlink	= btrfs_symlink,
10335 	.setattr	= btrfs_setattr,
10336 	.mknod		= btrfs_mknod,
10337 	.listxattr	= btrfs_listxattr,
10338 	.permission	= btrfs_permission,
10339 	.get_acl	= btrfs_get_acl,
10340 	.set_acl	= btrfs_set_acl,
10341 	.update_time	= btrfs_update_time,
10342 	.tmpfile        = btrfs_tmpfile,
10343 };
10344 
10345 static const struct file_operations btrfs_dir_file_operations = {
10346 	.llseek		= generic_file_llseek,
10347 	.read		= generic_read_dir,
10348 	.iterate_shared	= btrfs_real_readdir,
10349 	.open		= btrfs_opendir,
10350 	.unlocked_ioctl	= btrfs_ioctl,
10351 #ifdef CONFIG_COMPAT
10352 	.compat_ioctl	= btrfs_compat_ioctl,
10353 #endif
10354 	.release        = btrfs_release_file,
10355 	.fsync		= btrfs_sync_file,
10356 };
10357 
10358 static const struct extent_io_ops btrfs_extent_io_ops = {
10359 	/* mandatory callbacks */
10360 	.submit_bio_hook = btrfs_submit_bio_hook,
10361 	.readpage_end_io_hook = btrfs_readpage_end_io_hook,
10362 };
10363 
10364 /*
10365  * btrfs doesn't support the bmap operation because swapfiles
10366  * use bmap to make a mapping of extents in the file.  They assume
10367  * these extents won't change over the life of the file and they
10368  * use the bmap result to do IO directly to the drive.
10369  *
10370  * the btrfs bmap call would return logical addresses that aren't
10371  * suitable for IO and they also will change frequently as COW
10372  * operations happen.  So, swapfile + btrfs == corruption.
10373  *
10374  * For now we're avoiding this by dropping bmap.
10375  */
10376 static const struct address_space_operations btrfs_aops = {
10377 	.readpage	= btrfs_readpage,
10378 	.writepage	= btrfs_writepage,
10379 	.writepages	= btrfs_writepages,
10380 	.readpages	= btrfs_readpages,
10381 	.direct_IO	= btrfs_direct_IO,
10382 	.invalidatepage = btrfs_invalidatepage,
10383 	.releasepage	= btrfs_releasepage,
10384 	.set_page_dirty	= btrfs_set_page_dirty,
10385 	.error_remove_page = generic_error_remove_page,
10386 	.swap_activate	= btrfs_swap_activate,
10387 	.swap_deactivate = btrfs_swap_deactivate,
10388 };
10389 
10390 static const struct inode_operations btrfs_file_inode_operations = {
10391 	.getattr	= btrfs_getattr,
10392 	.setattr	= btrfs_setattr,
10393 	.listxattr      = btrfs_listxattr,
10394 	.permission	= btrfs_permission,
10395 	.fiemap		= btrfs_fiemap,
10396 	.get_acl	= btrfs_get_acl,
10397 	.set_acl	= btrfs_set_acl,
10398 	.update_time	= btrfs_update_time,
10399 };
10400 static const struct inode_operations btrfs_special_inode_operations = {
10401 	.getattr	= btrfs_getattr,
10402 	.setattr	= btrfs_setattr,
10403 	.permission	= btrfs_permission,
10404 	.listxattr	= btrfs_listxattr,
10405 	.get_acl	= btrfs_get_acl,
10406 	.set_acl	= btrfs_set_acl,
10407 	.update_time	= btrfs_update_time,
10408 };
10409 static const struct inode_operations btrfs_symlink_inode_operations = {
10410 	.get_link	= page_get_link,
10411 	.getattr	= btrfs_getattr,
10412 	.setattr	= btrfs_setattr,
10413 	.permission	= btrfs_permission,
10414 	.listxattr	= btrfs_listxattr,
10415 	.update_time	= btrfs_update_time,
10416 };
10417 
10418 const struct dentry_operations btrfs_dentry_operations = {
10419 	.d_delete	= btrfs_dentry_delete,
10420 };
10421