xref: /openbmc/linux/fs/btrfs/inode.c (revision 840ef8b7cc584a23c4f9d05352f4dbaf8e56e5ab)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include <linux/slab.h>
40 #include <linux/ratelimit.h>
41 #include <linux/mount.h>
42 #include <linux/btrfs.h>
43 #include <linux/blkdev.h>
44 #include "compat.h"
45 #include "ctree.h"
46 #include "disk-io.h"
47 #include "transaction.h"
48 #include "btrfs_inode.h"
49 #include "print-tree.h"
50 #include "ordered-data.h"
51 #include "xattr.h"
52 #include "tree-log.h"
53 #include "volumes.h"
54 #include "compression.h"
55 #include "locking.h"
56 #include "free-space-cache.h"
57 #include "inode-map.h"
58 #include "backref.h"
59 
60 struct btrfs_iget_args {
61 	u64 ino;
62 	struct btrfs_root *root;
63 };
64 
65 static const struct inode_operations btrfs_dir_inode_operations;
66 static const struct inode_operations btrfs_symlink_inode_operations;
67 static const struct inode_operations btrfs_dir_ro_inode_operations;
68 static const struct inode_operations btrfs_special_inode_operations;
69 static const struct inode_operations btrfs_file_inode_operations;
70 static const struct address_space_operations btrfs_aops;
71 static const struct address_space_operations btrfs_symlink_aops;
72 static const struct file_operations btrfs_dir_file_operations;
73 static struct extent_io_ops btrfs_extent_io_ops;
74 
75 static struct kmem_cache *btrfs_inode_cachep;
76 static struct kmem_cache *btrfs_delalloc_work_cachep;
77 struct kmem_cache *btrfs_trans_handle_cachep;
78 struct kmem_cache *btrfs_transaction_cachep;
79 struct kmem_cache *btrfs_path_cachep;
80 struct kmem_cache *btrfs_free_space_cachep;
81 
82 #define S_SHIFT 12
83 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
84 	[S_IFREG >> S_SHIFT]	= BTRFS_FT_REG_FILE,
85 	[S_IFDIR >> S_SHIFT]	= BTRFS_FT_DIR,
86 	[S_IFCHR >> S_SHIFT]	= BTRFS_FT_CHRDEV,
87 	[S_IFBLK >> S_SHIFT]	= BTRFS_FT_BLKDEV,
88 	[S_IFIFO >> S_SHIFT]	= BTRFS_FT_FIFO,
89 	[S_IFSOCK >> S_SHIFT]	= BTRFS_FT_SOCK,
90 	[S_IFLNK >> S_SHIFT]	= BTRFS_FT_SYMLINK,
91 };
92 
93 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
94 static int btrfs_truncate(struct inode *inode);
95 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
96 static noinline int cow_file_range(struct inode *inode,
97 				   struct page *locked_page,
98 				   u64 start, u64 end, int *page_started,
99 				   unsigned long *nr_written, int unlock);
100 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
101 					   u64 len, u64 orig_start,
102 					   u64 block_start, u64 block_len,
103 					   u64 orig_block_len, int type);
104 
105 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
106 				     struct inode *inode,  struct inode *dir,
107 				     const struct qstr *qstr)
108 {
109 	int err;
110 
111 	err = btrfs_init_acl(trans, inode, dir);
112 	if (!err)
113 		err = btrfs_xattr_security_init(trans, inode, dir, qstr);
114 	return err;
115 }
116 
117 /*
118  * this does all the hard work for inserting an inline extent into
119  * the btree.  The caller should have done a btrfs_drop_extents so that
120  * no overlapping inline items exist in the btree
121  */
122 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
123 				struct btrfs_root *root, struct inode *inode,
124 				u64 start, size_t size, size_t compressed_size,
125 				int compress_type,
126 				struct page **compressed_pages)
127 {
128 	struct btrfs_key key;
129 	struct btrfs_path *path;
130 	struct extent_buffer *leaf;
131 	struct page *page = NULL;
132 	char *kaddr;
133 	unsigned long ptr;
134 	struct btrfs_file_extent_item *ei;
135 	int err = 0;
136 	int ret;
137 	size_t cur_size = size;
138 	size_t datasize;
139 	unsigned long offset;
140 
141 	if (compressed_size && compressed_pages)
142 		cur_size = compressed_size;
143 
144 	path = btrfs_alloc_path();
145 	if (!path)
146 		return -ENOMEM;
147 
148 	path->leave_spinning = 1;
149 
150 	key.objectid = btrfs_ino(inode);
151 	key.offset = start;
152 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
153 	datasize = btrfs_file_extent_calc_inline_size(cur_size);
154 
155 	inode_add_bytes(inode, size);
156 	ret = btrfs_insert_empty_item(trans, root, path, &key,
157 				      datasize);
158 	if (ret) {
159 		err = ret;
160 		goto fail;
161 	}
162 	leaf = path->nodes[0];
163 	ei = btrfs_item_ptr(leaf, path->slots[0],
164 			    struct btrfs_file_extent_item);
165 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
166 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
167 	btrfs_set_file_extent_encryption(leaf, ei, 0);
168 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
169 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
170 	ptr = btrfs_file_extent_inline_start(ei);
171 
172 	if (compress_type != BTRFS_COMPRESS_NONE) {
173 		struct page *cpage;
174 		int i = 0;
175 		while (compressed_size > 0) {
176 			cpage = compressed_pages[i];
177 			cur_size = min_t(unsigned long, compressed_size,
178 				       PAGE_CACHE_SIZE);
179 
180 			kaddr = kmap_atomic(cpage);
181 			write_extent_buffer(leaf, kaddr, ptr, cur_size);
182 			kunmap_atomic(kaddr);
183 
184 			i++;
185 			ptr += cur_size;
186 			compressed_size -= cur_size;
187 		}
188 		btrfs_set_file_extent_compression(leaf, ei,
189 						  compress_type);
190 	} else {
191 		page = find_get_page(inode->i_mapping,
192 				     start >> PAGE_CACHE_SHIFT);
193 		btrfs_set_file_extent_compression(leaf, ei, 0);
194 		kaddr = kmap_atomic(page);
195 		offset = start & (PAGE_CACHE_SIZE - 1);
196 		write_extent_buffer(leaf, kaddr + offset, ptr, size);
197 		kunmap_atomic(kaddr);
198 		page_cache_release(page);
199 	}
200 	btrfs_mark_buffer_dirty(leaf);
201 	btrfs_free_path(path);
202 
203 	/*
204 	 * we're an inline extent, so nobody can
205 	 * extend the file past i_size without locking
206 	 * a page we already have locked.
207 	 *
208 	 * We must do any isize and inode updates
209 	 * before we unlock the pages.  Otherwise we
210 	 * could end up racing with unlink.
211 	 */
212 	BTRFS_I(inode)->disk_i_size = inode->i_size;
213 	ret = btrfs_update_inode(trans, root, inode);
214 
215 	return ret;
216 fail:
217 	btrfs_free_path(path);
218 	return err;
219 }
220 
221 
222 /*
223  * conditionally insert an inline extent into the file.  This
224  * does the checks required to make sure the data is small enough
225  * to fit as an inline extent.
226  */
227 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
228 				 struct btrfs_root *root,
229 				 struct inode *inode, u64 start, u64 end,
230 				 size_t compressed_size, int compress_type,
231 				 struct page **compressed_pages)
232 {
233 	u64 isize = i_size_read(inode);
234 	u64 actual_end = min(end + 1, isize);
235 	u64 inline_len = actual_end - start;
236 	u64 aligned_end = ALIGN(end, root->sectorsize);
237 	u64 data_len = inline_len;
238 	int ret;
239 
240 	if (compressed_size)
241 		data_len = compressed_size;
242 
243 	if (start > 0 ||
244 	    actual_end >= PAGE_CACHE_SIZE ||
245 	    data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
246 	    (!compressed_size &&
247 	    (actual_end & (root->sectorsize - 1)) == 0) ||
248 	    end + 1 < isize ||
249 	    data_len > root->fs_info->max_inline) {
250 		return 1;
251 	}
252 
253 	ret = btrfs_drop_extents(trans, root, inode, start, aligned_end, 1);
254 	if (ret)
255 		return ret;
256 
257 	if (isize > actual_end)
258 		inline_len = min_t(u64, isize, actual_end);
259 	ret = insert_inline_extent(trans, root, inode, start,
260 				   inline_len, compressed_size,
261 				   compress_type, compressed_pages);
262 	if (ret && ret != -ENOSPC) {
263 		btrfs_abort_transaction(trans, root, ret);
264 		return ret;
265 	} else if (ret == -ENOSPC) {
266 		return 1;
267 	}
268 
269 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
270 	btrfs_delalloc_release_metadata(inode, end + 1 - start);
271 	btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
272 	return 0;
273 }
274 
275 struct async_extent {
276 	u64 start;
277 	u64 ram_size;
278 	u64 compressed_size;
279 	struct page **pages;
280 	unsigned long nr_pages;
281 	int compress_type;
282 	struct list_head list;
283 };
284 
285 struct async_cow {
286 	struct inode *inode;
287 	struct btrfs_root *root;
288 	struct page *locked_page;
289 	u64 start;
290 	u64 end;
291 	struct list_head extents;
292 	struct btrfs_work work;
293 };
294 
295 static noinline int add_async_extent(struct async_cow *cow,
296 				     u64 start, u64 ram_size,
297 				     u64 compressed_size,
298 				     struct page **pages,
299 				     unsigned long nr_pages,
300 				     int compress_type)
301 {
302 	struct async_extent *async_extent;
303 
304 	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
305 	BUG_ON(!async_extent); /* -ENOMEM */
306 	async_extent->start = start;
307 	async_extent->ram_size = ram_size;
308 	async_extent->compressed_size = compressed_size;
309 	async_extent->pages = pages;
310 	async_extent->nr_pages = nr_pages;
311 	async_extent->compress_type = compress_type;
312 	list_add_tail(&async_extent->list, &cow->extents);
313 	return 0;
314 }
315 
316 /*
317  * we create compressed extents in two phases.  The first
318  * phase compresses a range of pages that have already been
319  * locked (both pages and state bits are locked).
320  *
321  * This is done inside an ordered work queue, and the compression
322  * is spread across many cpus.  The actual IO submission is step
323  * two, and the ordered work queue takes care of making sure that
324  * happens in the same order things were put onto the queue by
325  * writepages and friends.
326  *
327  * If this code finds it can't get good compression, it puts an
328  * entry onto the work queue to write the uncompressed bytes.  This
329  * makes sure that both compressed inodes and uncompressed inodes
330  * are written in the same order that the flusher thread sent them
331  * down.
332  */
333 static noinline int compress_file_range(struct inode *inode,
334 					struct page *locked_page,
335 					u64 start, u64 end,
336 					struct async_cow *async_cow,
337 					int *num_added)
338 {
339 	struct btrfs_root *root = BTRFS_I(inode)->root;
340 	struct btrfs_trans_handle *trans;
341 	u64 num_bytes;
342 	u64 blocksize = root->sectorsize;
343 	u64 actual_end;
344 	u64 isize = i_size_read(inode);
345 	int ret = 0;
346 	struct page **pages = NULL;
347 	unsigned long nr_pages;
348 	unsigned long nr_pages_ret = 0;
349 	unsigned long total_compressed = 0;
350 	unsigned long total_in = 0;
351 	unsigned long max_compressed = 128 * 1024;
352 	unsigned long max_uncompressed = 128 * 1024;
353 	int i;
354 	int will_compress;
355 	int compress_type = root->fs_info->compress_type;
356 
357 	/* if this is a small write inside eof, kick off a defrag */
358 	if ((end - start + 1) < 16 * 1024 &&
359 	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
360 		btrfs_add_inode_defrag(NULL, inode);
361 
362 	actual_end = min_t(u64, isize, end + 1);
363 again:
364 	will_compress = 0;
365 	nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
366 	nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
367 
368 	/*
369 	 * we don't want to send crud past the end of i_size through
370 	 * compression, that's just a waste of CPU time.  So, if the
371 	 * end of the file is before the start of our current
372 	 * requested range of bytes, we bail out to the uncompressed
373 	 * cleanup code that can deal with all of this.
374 	 *
375 	 * It isn't really the fastest way to fix things, but this is a
376 	 * very uncommon corner.
377 	 */
378 	if (actual_end <= start)
379 		goto cleanup_and_bail_uncompressed;
380 
381 	total_compressed = actual_end - start;
382 
383 	/* we want to make sure that amount of ram required to uncompress
384 	 * an extent is reasonable, so we limit the total size in ram
385 	 * of a compressed extent to 128k.  This is a crucial number
386 	 * because it also controls how easily we can spread reads across
387 	 * cpus for decompression.
388 	 *
389 	 * We also want to make sure the amount of IO required to do
390 	 * a random read is reasonably small, so we limit the size of
391 	 * a compressed extent to 128k.
392 	 */
393 	total_compressed = min(total_compressed, max_uncompressed);
394 	num_bytes = ALIGN(end - start + 1, blocksize);
395 	num_bytes = max(blocksize,  num_bytes);
396 	total_in = 0;
397 	ret = 0;
398 
399 	/*
400 	 * we do compression for mount -o compress and when the
401 	 * inode has not been flagged as nocompress.  This flag can
402 	 * change at any time if we discover bad compression ratios.
403 	 */
404 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
405 	    (btrfs_test_opt(root, COMPRESS) ||
406 	     (BTRFS_I(inode)->force_compress) ||
407 	     (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
408 		WARN_ON(pages);
409 		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
410 		if (!pages) {
411 			/* just bail out to the uncompressed code */
412 			goto cont;
413 		}
414 
415 		if (BTRFS_I(inode)->force_compress)
416 			compress_type = BTRFS_I(inode)->force_compress;
417 
418 		ret = btrfs_compress_pages(compress_type,
419 					   inode->i_mapping, start,
420 					   total_compressed, pages,
421 					   nr_pages, &nr_pages_ret,
422 					   &total_in,
423 					   &total_compressed,
424 					   max_compressed);
425 
426 		if (!ret) {
427 			unsigned long offset = total_compressed &
428 				(PAGE_CACHE_SIZE - 1);
429 			struct page *page = pages[nr_pages_ret - 1];
430 			char *kaddr;
431 
432 			/* zero the tail end of the last page, we might be
433 			 * sending it down to disk
434 			 */
435 			if (offset) {
436 				kaddr = kmap_atomic(page);
437 				memset(kaddr + offset, 0,
438 				       PAGE_CACHE_SIZE - offset);
439 				kunmap_atomic(kaddr);
440 			}
441 			will_compress = 1;
442 		}
443 	}
444 cont:
445 	if (start == 0) {
446 		trans = btrfs_join_transaction(root);
447 		if (IS_ERR(trans)) {
448 			ret = PTR_ERR(trans);
449 			trans = NULL;
450 			goto cleanup_and_out;
451 		}
452 		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
453 
454 		/* lets try to make an inline extent */
455 		if (ret || total_in < (actual_end - start)) {
456 			/* we didn't compress the entire range, try
457 			 * to make an uncompressed inline extent.
458 			 */
459 			ret = cow_file_range_inline(trans, root, inode,
460 						    start, end, 0, 0, NULL);
461 		} else {
462 			/* try making a compressed inline extent */
463 			ret = cow_file_range_inline(trans, root, inode,
464 						    start, end,
465 						    total_compressed,
466 						    compress_type, pages);
467 		}
468 		if (ret <= 0) {
469 			/*
470 			 * inline extent creation worked or returned error,
471 			 * we don't need to create any more async work items.
472 			 * Unlock and free up our temp pages.
473 			 */
474 			extent_clear_unlock_delalloc(inode,
475 			     &BTRFS_I(inode)->io_tree,
476 			     start, end, NULL,
477 			     EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
478 			     EXTENT_CLEAR_DELALLOC |
479 			     EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
480 
481 			btrfs_end_transaction(trans, root);
482 			goto free_pages_out;
483 		}
484 		btrfs_end_transaction(trans, root);
485 	}
486 
487 	if (will_compress) {
488 		/*
489 		 * we aren't doing an inline extent round the compressed size
490 		 * up to a block size boundary so the allocator does sane
491 		 * things
492 		 */
493 		total_compressed = ALIGN(total_compressed, blocksize);
494 
495 		/*
496 		 * one last check to make sure the compression is really a
497 		 * win, compare the page count read with the blocks on disk
498 		 */
499 		total_in = ALIGN(total_in, PAGE_CACHE_SIZE);
500 		if (total_compressed >= total_in) {
501 			will_compress = 0;
502 		} else {
503 			num_bytes = total_in;
504 		}
505 	}
506 	if (!will_compress && pages) {
507 		/*
508 		 * the compression code ran but failed to make things smaller,
509 		 * free any pages it allocated and our page pointer array
510 		 */
511 		for (i = 0; i < nr_pages_ret; i++) {
512 			WARN_ON(pages[i]->mapping);
513 			page_cache_release(pages[i]);
514 		}
515 		kfree(pages);
516 		pages = NULL;
517 		total_compressed = 0;
518 		nr_pages_ret = 0;
519 
520 		/* flag the file so we don't compress in the future */
521 		if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
522 		    !(BTRFS_I(inode)->force_compress)) {
523 			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
524 		}
525 	}
526 	if (will_compress) {
527 		*num_added += 1;
528 
529 		/* the async work queues will take care of doing actual
530 		 * allocation on disk for these compressed pages,
531 		 * and will submit them to the elevator.
532 		 */
533 		add_async_extent(async_cow, start, num_bytes,
534 				 total_compressed, pages, nr_pages_ret,
535 				 compress_type);
536 
537 		if (start + num_bytes < end) {
538 			start += num_bytes;
539 			pages = NULL;
540 			cond_resched();
541 			goto again;
542 		}
543 	} else {
544 cleanup_and_bail_uncompressed:
545 		/*
546 		 * No compression, but we still need to write the pages in
547 		 * the file we've been given so far.  redirty the locked
548 		 * page if it corresponds to our extent and set things up
549 		 * for the async work queue to run cow_file_range to do
550 		 * the normal delalloc dance
551 		 */
552 		if (page_offset(locked_page) >= start &&
553 		    page_offset(locked_page) <= end) {
554 			__set_page_dirty_nobuffers(locked_page);
555 			/* unlocked later on in the async handlers */
556 		}
557 		add_async_extent(async_cow, start, end - start + 1,
558 				 0, NULL, 0, BTRFS_COMPRESS_NONE);
559 		*num_added += 1;
560 	}
561 
562 out:
563 	return ret;
564 
565 free_pages_out:
566 	for (i = 0; i < nr_pages_ret; i++) {
567 		WARN_ON(pages[i]->mapping);
568 		page_cache_release(pages[i]);
569 	}
570 	kfree(pages);
571 
572 	goto out;
573 
574 cleanup_and_out:
575 	extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
576 				     start, end, NULL,
577 				     EXTENT_CLEAR_UNLOCK_PAGE |
578 				     EXTENT_CLEAR_DIRTY |
579 				     EXTENT_CLEAR_DELALLOC |
580 				     EXTENT_SET_WRITEBACK |
581 				     EXTENT_END_WRITEBACK);
582 	if (!trans || IS_ERR(trans))
583 		btrfs_error(root->fs_info, ret, "Failed to join transaction");
584 	else
585 		btrfs_abort_transaction(trans, root, ret);
586 	goto free_pages_out;
587 }
588 
589 /*
590  * phase two of compressed writeback.  This is the ordered portion
591  * of the code, which only gets called in the order the work was
592  * queued.  We walk all the async extents created by compress_file_range
593  * and send them down to the disk.
594  */
595 static noinline int submit_compressed_extents(struct inode *inode,
596 					      struct async_cow *async_cow)
597 {
598 	struct async_extent *async_extent;
599 	u64 alloc_hint = 0;
600 	struct btrfs_trans_handle *trans;
601 	struct btrfs_key ins;
602 	struct extent_map *em;
603 	struct btrfs_root *root = BTRFS_I(inode)->root;
604 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
605 	struct extent_io_tree *io_tree;
606 	int ret = 0;
607 
608 	if (list_empty(&async_cow->extents))
609 		return 0;
610 
611 again:
612 	while (!list_empty(&async_cow->extents)) {
613 		async_extent = list_entry(async_cow->extents.next,
614 					  struct async_extent, list);
615 		list_del(&async_extent->list);
616 
617 		io_tree = &BTRFS_I(inode)->io_tree;
618 
619 retry:
620 		/* did the compression code fall back to uncompressed IO? */
621 		if (!async_extent->pages) {
622 			int page_started = 0;
623 			unsigned long nr_written = 0;
624 
625 			lock_extent(io_tree, async_extent->start,
626 					 async_extent->start +
627 					 async_extent->ram_size - 1);
628 
629 			/* allocate blocks */
630 			ret = cow_file_range(inode, async_cow->locked_page,
631 					     async_extent->start,
632 					     async_extent->start +
633 					     async_extent->ram_size - 1,
634 					     &page_started, &nr_written, 0);
635 
636 			/* JDM XXX */
637 
638 			/*
639 			 * if page_started, cow_file_range inserted an
640 			 * inline extent and took care of all the unlocking
641 			 * and IO for us.  Otherwise, we need to submit
642 			 * all those pages down to the drive.
643 			 */
644 			if (!page_started && !ret)
645 				extent_write_locked_range(io_tree,
646 						  inode, async_extent->start,
647 						  async_extent->start +
648 						  async_extent->ram_size - 1,
649 						  btrfs_get_extent,
650 						  WB_SYNC_ALL);
651 			else if (ret)
652 				unlock_page(async_cow->locked_page);
653 			kfree(async_extent);
654 			cond_resched();
655 			continue;
656 		}
657 
658 		lock_extent(io_tree, async_extent->start,
659 			    async_extent->start + async_extent->ram_size - 1);
660 
661 		trans = btrfs_join_transaction(root);
662 		if (IS_ERR(trans)) {
663 			ret = PTR_ERR(trans);
664 		} else {
665 			trans->block_rsv = &root->fs_info->delalloc_block_rsv;
666 			ret = btrfs_reserve_extent(trans, root,
667 					   async_extent->compressed_size,
668 					   async_extent->compressed_size,
669 					   0, alloc_hint, &ins, 1);
670 			if (ret && ret != -ENOSPC)
671 				btrfs_abort_transaction(trans, root, ret);
672 			btrfs_end_transaction(trans, root);
673 		}
674 
675 		if (ret) {
676 			int i;
677 
678 			for (i = 0; i < async_extent->nr_pages; i++) {
679 				WARN_ON(async_extent->pages[i]->mapping);
680 				page_cache_release(async_extent->pages[i]);
681 			}
682 			kfree(async_extent->pages);
683 			async_extent->nr_pages = 0;
684 			async_extent->pages = NULL;
685 
686 			if (ret == -ENOSPC)
687 				goto retry;
688 			goto out_free;
689 		}
690 
691 		/*
692 		 * here we're doing allocation and writeback of the
693 		 * compressed pages
694 		 */
695 		btrfs_drop_extent_cache(inode, async_extent->start,
696 					async_extent->start +
697 					async_extent->ram_size - 1, 0);
698 
699 		em = alloc_extent_map();
700 		if (!em)
701 			goto out_free_reserve;
702 		em->start = async_extent->start;
703 		em->len = async_extent->ram_size;
704 		em->orig_start = em->start;
705 		em->mod_start = em->start;
706 		em->mod_len = em->len;
707 
708 		em->block_start = ins.objectid;
709 		em->block_len = ins.offset;
710 		em->orig_block_len = ins.offset;
711 		em->bdev = root->fs_info->fs_devices->latest_bdev;
712 		em->compress_type = async_extent->compress_type;
713 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
714 		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
715 		em->generation = -1;
716 
717 		while (1) {
718 			write_lock(&em_tree->lock);
719 			ret = add_extent_mapping(em_tree, em);
720 			if (!ret)
721 				list_move(&em->list,
722 					  &em_tree->modified_extents);
723 			write_unlock(&em_tree->lock);
724 			if (ret != -EEXIST) {
725 				free_extent_map(em);
726 				break;
727 			}
728 			btrfs_drop_extent_cache(inode, async_extent->start,
729 						async_extent->start +
730 						async_extent->ram_size - 1, 0);
731 		}
732 
733 		if (ret)
734 			goto out_free_reserve;
735 
736 		ret = btrfs_add_ordered_extent_compress(inode,
737 						async_extent->start,
738 						ins.objectid,
739 						async_extent->ram_size,
740 						ins.offset,
741 						BTRFS_ORDERED_COMPRESSED,
742 						async_extent->compress_type);
743 		if (ret)
744 			goto out_free_reserve;
745 
746 		/*
747 		 * clear dirty, set writeback and unlock the pages.
748 		 */
749 		extent_clear_unlock_delalloc(inode,
750 				&BTRFS_I(inode)->io_tree,
751 				async_extent->start,
752 				async_extent->start +
753 				async_extent->ram_size - 1,
754 				NULL, EXTENT_CLEAR_UNLOCK_PAGE |
755 				EXTENT_CLEAR_UNLOCK |
756 				EXTENT_CLEAR_DELALLOC |
757 				EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
758 
759 		ret = btrfs_submit_compressed_write(inode,
760 				    async_extent->start,
761 				    async_extent->ram_size,
762 				    ins.objectid,
763 				    ins.offset, async_extent->pages,
764 				    async_extent->nr_pages);
765 		alloc_hint = ins.objectid + ins.offset;
766 		kfree(async_extent);
767 		if (ret)
768 			goto out;
769 		cond_resched();
770 	}
771 	ret = 0;
772 out:
773 	return ret;
774 out_free_reserve:
775 	btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
776 out_free:
777 	extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
778 				     async_extent->start,
779 				     async_extent->start +
780 				     async_extent->ram_size - 1,
781 				     NULL, EXTENT_CLEAR_UNLOCK_PAGE |
782 				     EXTENT_CLEAR_UNLOCK |
783 				     EXTENT_CLEAR_DELALLOC |
784 				     EXTENT_CLEAR_DIRTY |
785 				     EXTENT_SET_WRITEBACK |
786 				     EXTENT_END_WRITEBACK);
787 	kfree(async_extent);
788 	goto again;
789 }
790 
791 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
792 				      u64 num_bytes)
793 {
794 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
795 	struct extent_map *em;
796 	u64 alloc_hint = 0;
797 
798 	read_lock(&em_tree->lock);
799 	em = search_extent_mapping(em_tree, start, num_bytes);
800 	if (em) {
801 		/*
802 		 * if block start isn't an actual block number then find the
803 		 * first block in this inode and use that as a hint.  If that
804 		 * block is also bogus then just don't worry about it.
805 		 */
806 		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
807 			free_extent_map(em);
808 			em = search_extent_mapping(em_tree, 0, 0);
809 			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
810 				alloc_hint = em->block_start;
811 			if (em)
812 				free_extent_map(em);
813 		} else {
814 			alloc_hint = em->block_start;
815 			free_extent_map(em);
816 		}
817 	}
818 	read_unlock(&em_tree->lock);
819 
820 	return alloc_hint;
821 }
822 
823 /*
824  * when extent_io.c finds a delayed allocation range in the file,
825  * the call backs end up in this code.  The basic idea is to
826  * allocate extents on disk for the range, and create ordered data structs
827  * in ram to track those extents.
828  *
829  * locked_page is the page that writepage had locked already.  We use
830  * it to make sure we don't do extra locks or unlocks.
831  *
832  * *page_started is set to one if we unlock locked_page and do everything
833  * required to start IO on it.  It may be clean and already done with
834  * IO when we return.
835  */
836 static noinline int __cow_file_range(struct btrfs_trans_handle *trans,
837 				     struct inode *inode,
838 				     struct btrfs_root *root,
839 				     struct page *locked_page,
840 				     u64 start, u64 end, int *page_started,
841 				     unsigned long *nr_written,
842 				     int unlock)
843 {
844 	u64 alloc_hint = 0;
845 	u64 num_bytes;
846 	unsigned long ram_size;
847 	u64 disk_num_bytes;
848 	u64 cur_alloc_size;
849 	u64 blocksize = root->sectorsize;
850 	struct btrfs_key ins;
851 	struct extent_map *em;
852 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
853 	int ret = 0;
854 
855 	BUG_ON(btrfs_is_free_space_inode(inode));
856 
857 	num_bytes = ALIGN(end - start + 1, blocksize);
858 	num_bytes = max(blocksize,  num_bytes);
859 	disk_num_bytes = num_bytes;
860 
861 	/* if this is a small write inside eof, kick off defrag */
862 	if (num_bytes < 64 * 1024 &&
863 	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
864 		btrfs_add_inode_defrag(trans, inode);
865 
866 	if (start == 0) {
867 		/* lets try to make an inline extent */
868 		ret = cow_file_range_inline(trans, root, inode,
869 					    start, end, 0, 0, NULL);
870 		if (ret == 0) {
871 			extent_clear_unlock_delalloc(inode,
872 				     &BTRFS_I(inode)->io_tree,
873 				     start, end, NULL,
874 				     EXTENT_CLEAR_UNLOCK_PAGE |
875 				     EXTENT_CLEAR_UNLOCK |
876 				     EXTENT_CLEAR_DELALLOC |
877 				     EXTENT_CLEAR_DIRTY |
878 				     EXTENT_SET_WRITEBACK |
879 				     EXTENT_END_WRITEBACK);
880 
881 			*nr_written = *nr_written +
882 			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
883 			*page_started = 1;
884 			goto out;
885 		} else if (ret < 0) {
886 			btrfs_abort_transaction(trans, root, ret);
887 			goto out_unlock;
888 		}
889 	}
890 
891 	BUG_ON(disk_num_bytes >
892 	       btrfs_super_total_bytes(root->fs_info->super_copy));
893 
894 	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
895 	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
896 
897 	while (disk_num_bytes > 0) {
898 		unsigned long op;
899 
900 		cur_alloc_size = disk_num_bytes;
901 		ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
902 					   root->sectorsize, 0, alloc_hint,
903 					   &ins, 1);
904 		if (ret < 0) {
905 			btrfs_abort_transaction(trans, root, ret);
906 			goto out_unlock;
907 		}
908 
909 		em = alloc_extent_map();
910 		BUG_ON(!em); /* -ENOMEM */
911 		em->start = start;
912 		em->orig_start = em->start;
913 		ram_size = ins.offset;
914 		em->len = ins.offset;
915 		em->mod_start = em->start;
916 		em->mod_len = em->len;
917 
918 		em->block_start = ins.objectid;
919 		em->block_len = ins.offset;
920 		em->orig_block_len = ins.offset;
921 		em->bdev = root->fs_info->fs_devices->latest_bdev;
922 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
923 		em->generation = -1;
924 
925 		while (1) {
926 			write_lock(&em_tree->lock);
927 			ret = add_extent_mapping(em_tree, em);
928 			if (!ret)
929 				list_move(&em->list,
930 					  &em_tree->modified_extents);
931 			write_unlock(&em_tree->lock);
932 			if (ret != -EEXIST) {
933 				free_extent_map(em);
934 				break;
935 			}
936 			btrfs_drop_extent_cache(inode, start,
937 						start + ram_size - 1, 0);
938 		}
939 
940 		cur_alloc_size = ins.offset;
941 		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
942 					       ram_size, cur_alloc_size, 0);
943 		BUG_ON(ret); /* -ENOMEM */
944 
945 		if (root->root_key.objectid ==
946 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
947 			ret = btrfs_reloc_clone_csums(inode, start,
948 						      cur_alloc_size);
949 			if (ret) {
950 				btrfs_abort_transaction(trans, root, ret);
951 				goto out_unlock;
952 			}
953 		}
954 
955 		if (disk_num_bytes < cur_alloc_size)
956 			break;
957 
958 		/* we're not doing compressed IO, don't unlock the first
959 		 * page (which the caller expects to stay locked), don't
960 		 * clear any dirty bits and don't set any writeback bits
961 		 *
962 		 * Do set the Private2 bit so we know this page was properly
963 		 * setup for writepage
964 		 */
965 		op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
966 		op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
967 			EXTENT_SET_PRIVATE2;
968 
969 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
970 					     start, start + ram_size - 1,
971 					     locked_page, op);
972 		disk_num_bytes -= cur_alloc_size;
973 		num_bytes -= cur_alloc_size;
974 		alloc_hint = ins.objectid + ins.offset;
975 		start += cur_alloc_size;
976 	}
977 out:
978 	return ret;
979 
980 out_unlock:
981 	extent_clear_unlock_delalloc(inode,
982 		     &BTRFS_I(inode)->io_tree,
983 		     start, end, locked_page,
984 		     EXTENT_CLEAR_UNLOCK_PAGE |
985 		     EXTENT_CLEAR_UNLOCK |
986 		     EXTENT_CLEAR_DELALLOC |
987 		     EXTENT_CLEAR_DIRTY |
988 		     EXTENT_SET_WRITEBACK |
989 		     EXTENT_END_WRITEBACK);
990 
991 	goto out;
992 }
993 
994 static noinline int cow_file_range(struct inode *inode,
995 				   struct page *locked_page,
996 				   u64 start, u64 end, int *page_started,
997 				   unsigned long *nr_written,
998 				   int unlock)
999 {
1000 	struct btrfs_trans_handle *trans;
1001 	struct btrfs_root *root = BTRFS_I(inode)->root;
1002 	int ret;
1003 
1004 	trans = btrfs_join_transaction(root);
1005 	if (IS_ERR(trans)) {
1006 		extent_clear_unlock_delalloc(inode,
1007 			     &BTRFS_I(inode)->io_tree,
1008 			     start, end, locked_page,
1009 			     EXTENT_CLEAR_UNLOCK_PAGE |
1010 			     EXTENT_CLEAR_UNLOCK |
1011 			     EXTENT_CLEAR_DELALLOC |
1012 			     EXTENT_CLEAR_DIRTY |
1013 			     EXTENT_SET_WRITEBACK |
1014 			     EXTENT_END_WRITEBACK);
1015 		return PTR_ERR(trans);
1016 	}
1017 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1018 
1019 	ret = __cow_file_range(trans, inode, root, locked_page, start, end,
1020 			       page_started, nr_written, unlock);
1021 
1022 	btrfs_end_transaction(trans, root);
1023 
1024 	return ret;
1025 }
1026 
1027 /*
1028  * work queue call back to started compression on a file and pages
1029  */
1030 static noinline void async_cow_start(struct btrfs_work *work)
1031 {
1032 	struct async_cow *async_cow;
1033 	int num_added = 0;
1034 	async_cow = container_of(work, struct async_cow, work);
1035 
1036 	compress_file_range(async_cow->inode, async_cow->locked_page,
1037 			    async_cow->start, async_cow->end, async_cow,
1038 			    &num_added);
1039 	if (num_added == 0) {
1040 		btrfs_add_delayed_iput(async_cow->inode);
1041 		async_cow->inode = NULL;
1042 	}
1043 }
1044 
1045 /*
1046  * work queue call back to submit previously compressed pages
1047  */
1048 static noinline void async_cow_submit(struct btrfs_work *work)
1049 {
1050 	struct async_cow *async_cow;
1051 	struct btrfs_root *root;
1052 	unsigned long nr_pages;
1053 
1054 	async_cow = container_of(work, struct async_cow, work);
1055 
1056 	root = async_cow->root;
1057 	nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
1058 		PAGE_CACHE_SHIFT;
1059 
1060 	if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
1061 	    5 * 1024 * 1024 &&
1062 	    waitqueue_active(&root->fs_info->async_submit_wait))
1063 		wake_up(&root->fs_info->async_submit_wait);
1064 
1065 	if (async_cow->inode)
1066 		submit_compressed_extents(async_cow->inode, async_cow);
1067 }
1068 
1069 static noinline void async_cow_free(struct btrfs_work *work)
1070 {
1071 	struct async_cow *async_cow;
1072 	async_cow = container_of(work, struct async_cow, work);
1073 	if (async_cow->inode)
1074 		btrfs_add_delayed_iput(async_cow->inode);
1075 	kfree(async_cow);
1076 }
1077 
1078 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1079 				u64 start, u64 end, int *page_started,
1080 				unsigned long *nr_written)
1081 {
1082 	struct async_cow *async_cow;
1083 	struct btrfs_root *root = BTRFS_I(inode)->root;
1084 	unsigned long nr_pages;
1085 	u64 cur_end;
1086 	int limit = 10 * 1024 * 1024;
1087 
1088 	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1089 			 1, 0, NULL, GFP_NOFS);
1090 	while (start < end) {
1091 		async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1092 		BUG_ON(!async_cow); /* -ENOMEM */
1093 		async_cow->inode = igrab(inode);
1094 		async_cow->root = root;
1095 		async_cow->locked_page = locked_page;
1096 		async_cow->start = start;
1097 
1098 		if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
1099 			cur_end = end;
1100 		else
1101 			cur_end = min(end, start + 512 * 1024 - 1);
1102 
1103 		async_cow->end = cur_end;
1104 		INIT_LIST_HEAD(&async_cow->extents);
1105 
1106 		async_cow->work.func = async_cow_start;
1107 		async_cow->work.ordered_func = async_cow_submit;
1108 		async_cow->work.ordered_free = async_cow_free;
1109 		async_cow->work.flags = 0;
1110 
1111 		nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
1112 			PAGE_CACHE_SHIFT;
1113 		atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1114 
1115 		btrfs_queue_worker(&root->fs_info->delalloc_workers,
1116 				   &async_cow->work);
1117 
1118 		if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
1119 			wait_event(root->fs_info->async_submit_wait,
1120 			   (atomic_read(&root->fs_info->async_delalloc_pages) <
1121 			    limit));
1122 		}
1123 
1124 		while (atomic_read(&root->fs_info->async_submit_draining) &&
1125 		      atomic_read(&root->fs_info->async_delalloc_pages)) {
1126 			wait_event(root->fs_info->async_submit_wait,
1127 			  (atomic_read(&root->fs_info->async_delalloc_pages) ==
1128 			   0));
1129 		}
1130 
1131 		*nr_written += nr_pages;
1132 		start = cur_end + 1;
1133 	}
1134 	*page_started = 1;
1135 	return 0;
1136 }
1137 
1138 static noinline int csum_exist_in_range(struct btrfs_root *root,
1139 					u64 bytenr, u64 num_bytes)
1140 {
1141 	int ret;
1142 	struct btrfs_ordered_sum *sums;
1143 	LIST_HEAD(list);
1144 
1145 	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
1146 				       bytenr + num_bytes - 1, &list, 0);
1147 	if (ret == 0 && list_empty(&list))
1148 		return 0;
1149 
1150 	while (!list_empty(&list)) {
1151 		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1152 		list_del(&sums->list);
1153 		kfree(sums);
1154 	}
1155 	return 1;
1156 }
1157 
1158 /*
1159  * when nowcow writeback call back.  This checks for snapshots or COW copies
1160  * of the extents that exist in the file, and COWs the file as required.
1161  *
1162  * If no cow copies or snapshots exist, we write directly to the existing
1163  * blocks on disk
1164  */
1165 static noinline int run_delalloc_nocow(struct inode *inode,
1166 				       struct page *locked_page,
1167 			      u64 start, u64 end, int *page_started, int force,
1168 			      unsigned long *nr_written)
1169 {
1170 	struct btrfs_root *root = BTRFS_I(inode)->root;
1171 	struct btrfs_trans_handle *trans;
1172 	struct extent_buffer *leaf;
1173 	struct btrfs_path *path;
1174 	struct btrfs_file_extent_item *fi;
1175 	struct btrfs_key found_key;
1176 	u64 cow_start;
1177 	u64 cur_offset;
1178 	u64 extent_end;
1179 	u64 extent_offset;
1180 	u64 disk_bytenr;
1181 	u64 num_bytes;
1182 	u64 disk_num_bytes;
1183 	int extent_type;
1184 	int ret, err;
1185 	int type;
1186 	int nocow;
1187 	int check_prev = 1;
1188 	bool nolock;
1189 	u64 ino = btrfs_ino(inode);
1190 
1191 	path = btrfs_alloc_path();
1192 	if (!path) {
1193 		extent_clear_unlock_delalloc(inode,
1194 			     &BTRFS_I(inode)->io_tree,
1195 			     start, end, locked_page,
1196 			     EXTENT_CLEAR_UNLOCK_PAGE |
1197 			     EXTENT_CLEAR_UNLOCK |
1198 			     EXTENT_CLEAR_DELALLOC |
1199 			     EXTENT_CLEAR_DIRTY |
1200 			     EXTENT_SET_WRITEBACK |
1201 			     EXTENT_END_WRITEBACK);
1202 		return -ENOMEM;
1203 	}
1204 
1205 	nolock = btrfs_is_free_space_inode(inode);
1206 
1207 	if (nolock)
1208 		trans = btrfs_join_transaction_nolock(root);
1209 	else
1210 		trans = btrfs_join_transaction(root);
1211 
1212 	if (IS_ERR(trans)) {
1213 		extent_clear_unlock_delalloc(inode,
1214 			     &BTRFS_I(inode)->io_tree,
1215 			     start, end, locked_page,
1216 			     EXTENT_CLEAR_UNLOCK_PAGE |
1217 			     EXTENT_CLEAR_UNLOCK |
1218 			     EXTENT_CLEAR_DELALLOC |
1219 			     EXTENT_CLEAR_DIRTY |
1220 			     EXTENT_SET_WRITEBACK |
1221 			     EXTENT_END_WRITEBACK);
1222 		btrfs_free_path(path);
1223 		return PTR_ERR(trans);
1224 	}
1225 
1226 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1227 
1228 	cow_start = (u64)-1;
1229 	cur_offset = start;
1230 	while (1) {
1231 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
1232 					       cur_offset, 0);
1233 		if (ret < 0) {
1234 			btrfs_abort_transaction(trans, root, ret);
1235 			goto error;
1236 		}
1237 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
1238 			leaf = path->nodes[0];
1239 			btrfs_item_key_to_cpu(leaf, &found_key,
1240 					      path->slots[0] - 1);
1241 			if (found_key.objectid == ino &&
1242 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
1243 				path->slots[0]--;
1244 		}
1245 		check_prev = 0;
1246 next_slot:
1247 		leaf = path->nodes[0];
1248 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1249 			ret = btrfs_next_leaf(root, path);
1250 			if (ret < 0) {
1251 				btrfs_abort_transaction(trans, root, ret);
1252 				goto error;
1253 			}
1254 			if (ret > 0)
1255 				break;
1256 			leaf = path->nodes[0];
1257 		}
1258 
1259 		nocow = 0;
1260 		disk_bytenr = 0;
1261 		num_bytes = 0;
1262 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1263 
1264 		if (found_key.objectid > ino ||
1265 		    found_key.type > BTRFS_EXTENT_DATA_KEY ||
1266 		    found_key.offset > end)
1267 			break;
1268 
1269 		if (found_key.offset > cur_offset) {
1270 			extent_end = found_key.offset;
1271 			extent_type = 0;
1272 			goto out_check;
1273 		}
1274 
1275 		fi = btrfs_item_ptr(leaf, path->slots[0],
1276 				    struct btrfs_file_extent_item);
1277 		extent_type = btrfs_file_extent_type(leaf, fi);
1278 
1279 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
1280 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1281 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1282 			extent_offset = btrfs_file_extent_offset(leaf, fi);
1283 			extent_end = found_key.offset +
1284 				btrfs_file_extent_num_bytes(leaf, fi);
1285 			disk_num_bytes =
1286 				btrfs_file_extent_disk_num_bytes(leaf, fi);
1287 			if (extent_end <= start) {
1288 				path->slots[0]++;
1289 				goto next_slot;
1290 			}
1291 			if (disk_bytenr == 0)
1292 				goto out_check;
1293 			if (btrfs_file_extent_compression(leaf, fi) ||
1294 			    btrfs_file_extent_encryption(leaf, fi) ||
1295 			    btrfs_file_extent_other_encoding(leaf, fi))
1296 				goto out_check;
1297 			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1298 				goto out_check;
1299 			if (btrfs_extent_readonly(root, disk_bytenr))
1300 				goto out_check;
1301 			if (btrfs_cross_ref_exist(trans, root, ino,
1302 						  found_key.offset -
1303 						  extent_offset, disk_bytenr))
1304 				goto out_check;
1305 			disk_bytenr += extent_offset;
1306 			disk_bytenr += cur_offset - found_key.offset;
1307 			num_bytes = min(end + 1, extent_end) - cur_offset;
1308 			/*
1309 			 * force cow if csum exists in the range.
1310 			 * this ensure that csum for a given extent are
1311 			 * either valid or do not exist.
1312 			 */
1313 			if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1314 				goto out_check;
1315 			nocow = 1;
1316 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1317 			extent_end = found_key.offset +
1318 				btrfs_file_extent_inline_len(leaf, fi);
1319 			extent_end = ALIGN(extent_end, root->sectorsize);
1320 		} else {
1321 			BUG_ON(1);
1322 		}
1323 out_check:
1324 		if (extent_end <= start) {
1325 			path->slots[0]++;
1326 			goto next_slot;
1327 		}
1328 		if (!nocow) {
1329 			if (cow_start == (u64)-1)
1330 				cow_start = cur_offset;
1331 			cur_offset = extent_end;
1332 			if (cur_offset > end)
1333 				break;
1334 			path->slots[0]++;
1335 			goto next_slot;
1336 		}
1337 
1338 		btrfs_release_path(path);
1339 		if (cow_start != (u64)-1) {
1340 			ret = __cow_file_range(trans, inode, root, locked_page,
1341 					       cow_start, found_key.offset - 1,
1342 					       page_started, nr_written, 1);
1343 			if (ret) {
1344 				btrfs_abort_transaction(trans, root, ret);
1345 				goto error;
1346 			}
1347 			cow_start = (u64)-1;
1348 		}
1349 
1350 		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1351 			struct extent_map *em;
1352 			struct extent_map_tree *em_tree;
1353 			em_tree = &BTRFS_I(inode)->extent_tree;
1354 			em = alloc_extent_map();
1355 			BUG_ON(!em); /* -ENOMEM */
1356 			em->start = cur_offset;
1357 			em->orig_start = found_key.offset - extent_offset;
1358 			em->len = num_bytes;
1359 			em->block_len = num_bytes;
1360 			em->block_start = disk_bytenr;
1361 			em->orig_block_len = disk_num_bytes;
1362 			em->bdev = root->fs_info->fs_devices->latest_bdev;
1363 			em->mod_start = em->start;
1364 			em->mod_len = em->len;
1365 			set_bit(EXTENT_FLAG_PINNED, &em->flags);
1366 			set_bit(EXTENT_FLAG_FILLING, &em->flags);
1367 			em->generation = -1;
1368 			while (1) {
1369 				write_lock(&em_tree->lock);
1370 				ret = add_extent_mapping(em_tree, em);
1371 				if (!ret)
1372 					list_move(&em->list,
1373 						  &em_tree->modified_extents);
1374 				write_unlock(&em_tree->lock);
1375 				if (ret != -EEXIST) {
1376 					free_extent_map(em);
1377 					break;
1378 				}
1379 				btrfs_drop_extent_cache(inode, em->start,
1380 						em->start + em->len - 1, 0);
1381 			}
1382 			type = BTRFS_ORDERED_PREALLOC;
1383 		} else {
1384 			type = BTRFS_ORDERED_NOCOW;
1385 		}
1386 
1387 		ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1388 					       num_bytes, num_bytes, type);
1389 		BUG_ON(ret); /* -ENOMEM */
1390 
1391 		if (root->root_key.objectid ==
1392 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
1393 			ret = btrfs_reloc_clone_csums(inode, cur_offset,
1394 						      num_bytes);
1395 			if (ret) {
1396 				btrfs_abort_transaction(trans, root, ret);
1397 				goto error;
1398 			}
1399 		}
1400 
1401 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1402 				cur_offset, cur_offset + num_bytes - 1,
1403 				locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1404 				EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1405 				EXTENT_SET_PRIVATE2);
1406 		cur_offset = extent_end;
1407 		if (cur_offset > end)
1408 			break;
1409 	}
1410 	btrfs_release_path(path);
1411 
1412 	if (cur_offset <= end && cow_start == (u64)-1) {
1413 		cow_start = cur_offset;
1414 		cur_offset = end;
1415 	}
1416 
1417 	if (cow_start != (u64)-1) {
1418 		ret = __cow_file_range(trans, inode, root, locked_page,
1419 				       cow_start, end,
1420 				       page_started, nr_written, 1);
1421 		if (ret) {
1422 			btrfs_abort_transaction(trans, root, ret);
1423 			goto error;
1424 		}
1425 	}
1426 
1427 error:
1428 	err = btrfs_end_transaction(trans, root);
1429 	if (!ret)
1430 		ret = err;
1431 
1432 	if (ret && cur_offset < end)
1433 		extent_clear_unlock_delalloc(inode,
1434 			     &BTRFS_I(inode)->io_tree,
1435 			     cur_offset, end, locked_page,
1436 			     EXTENT_CLEAR_UNLOCK_PAGE |
1437 			     EXTENT_CLEAR_UNLOCK |
1438 			     EXTENT_CLEAR_DELALLOC |
1439 			     EXTENT_CLEAR_DIRTY |
1440 			     EXTENT_SET_WRITEBACK |
1441 			     EXTENT_END_WRITEBACK);
1442 
1443 	btrfs_free_path(path);
1444 	return ret;
1445 }
1446 
1447 /*
1448  * extent_io.c call back to do delayed allocation processing
1449  */
1450 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1451 			      u64 start, u64 end, int *page_started,
1452 			      unsigned long *nr_written)
1453 {
1454 	int ret;
1455 	struct btrfs_root *root = BTRFS_I(inode)->root;
1456 
1457 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) {
1458 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1459 					 page_started, 1, nr_written);
1460 	} else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) {
1461 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1462 					 page_started, 0, nr_written);
1463 	} else if (!btrfs_test_opt(root, COMPRESS) &&
1464 		   !(BTRFS_I(inode)->force_compress) &&
1465 		   !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) {
1466 		ret = cow_file_range(inode, locked_page, start, end,
1467 				      page_started, nr_written, 1);
1468 	} else {
1469 		set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1470 			&BTRFS_I(inode)->runtime_flags);
1471 		ret = cow_file_range_async(inode, locked_page, start, end,
1472 					   page_started, nr_written);
1473 	}
1474 	return ret;
1475 }
1476 
1477 static void btrfs_split_extent_hook(struct inode *inode,
1478 				    struct extent_state *orig, u64 split)
1479 {
1480 	/* not delalloc, ignore it */
1481 	if (!(orig->state & EXTENT_DELALLOC))
1482 		return;
1483 
1484 	spin_lock(&BTRFS_I(inode)->lock);
1485 	BTRFS_I(inode)->outstanding_extents++;
1486 	spin_unlock(&BTRFS_I(inode)->lock);
1487 }
1488 
1489 /*
1490  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1491  * extents so we can keep track of new extents that are just merged onto old
1492  * extents, such as when we are doing sequential writes, so we can properly
1493  * account for the metadata space we'll need.
1494  */
1495 static void btrfs_merge_extent_hook(struct inode *inode,
1496 				    struct extent_state *new,
1497 				    struct extent_state *other)
1498 {
1499 	/* not delalloc, ignore it */
1500 	if (!(other->state & EXTENT_DELALLOC))
1501 		return;
1502 
1503 	spin_lock(&BTRFS_I(inode)->lock);
1504 	BTRFS_I(inode)->outstanding_extents--;
1505 	spin_unlock(&BTRFS_I(inode)->lock);
1506 }
1507 
1508 /*
1509  * extent_io.c set_bit_hook, used to track delayed allocation
1510  * bytes in this file, and to maintain the list of inodes that
1511  * have pending delalloc work to be done.
1512  */
1513 static void btrfs_set_bit_hook(struct inode *inode,
1514 			       struct extent_state *state, int *bits)
1515 {
1516 
1517 	/*
1518 	 * set_bit and clear bit hooks normally require _irqsave/restore
1519 	 * but in this case, we are only testing for the DELALLOC
1520 	 * bit, which is only set or cleared with irqs on
1521 	 */
1522 	if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1523 		struct btrfs_root *root = BTRFS_I(inode)->root;
1524 		u64 len = state->end + 1 - state->start;
1525 		bool do_list = !btrfs_is_free_space_inode(inode);
1526 
1527 		if (*bits & EXTENT_FIRST_DELALLOC) {
1528 			*bits &= ~EXTENT_FIRST_DELALLOC;
1529 		} else {
1530 			spin_lock(&BTRFS_I(inode)->lock);
1531 			BTRFS_I(inode)->outstanding_extents++;
1532 			spin_unlock(&BTRFS_I(inode)->lock);
1533 		}
1534 
1535 		__percpu_counter_add(&root->fs_info->delalloc_bytes, len,
1536 				     root->fs_info->delalloc_batch);
1537 		spin_lock(&BTRFS_I(inode)->lock);
1538 		BTRFS_I(inode)->delalloc_bytes += len;
1539 		if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1540 					 &BTRFS_I(inode)->runtime_flags)) {
1541 			spin_lock(&root->fs_info->delalloc_lock);
1542 			if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1543 				list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1544 					      &root->fs_info->delalloc_inodes);
1545 				set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1546 					&BTRFS_I(inode)->runtime_flags);
1547 			}
1548 			spin_unlock(&root->fs_info->delalloc_lock);
1549 		}
1550 		spin_unlock(&BTRFS_I(inode)->lock);
1551 	}
1552 }
1553 
1554 /*
1555  * extent_io.c clear_bit_hook, see set_bit_hook for why
1556  */
1557 static void btrfs_clear_bit_hook(struct inode *inode,
1558 				 struct extent_state *state, int *bits)
1559 {
1560 	/*
1561 	 * set_bit and clear bit hooks normally require _irqsave/restore
1562 	 * but in this case, we are only testing for the DELALLOC
1563 	 * bit, which is only set or cleared with irqs on
1564 	 */
1565 	if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1566 		struct btrfs_root *root = BTRFS_I(inode)->root;
1567 		u64 len = state->end + 1 - state->start;
1568 		bool do_list = !btrfs_is_free_space_inode(inode);
1569 
1570 		if (*bits & EXTENT_FIRST_DELALLOC) {
1571 			*bits &= ~EXTENT_FIRST_DELALLOC;
1572 		} else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1573 			spin_lock(&BTRFS_I(inode)->lock);
1574 			BTRFS_I(inode)->outstanding_extents--;
1575 			spin_unlock(&BTRFS_I(inode)->lock);
1576 		}
1577 
1578 		if (*bits & EXTENT_DO_ACCOUNTING)
1579 			btrfs_delalloc_release_metadata(inode, len);
1580 
1581 		if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1582 		    && do_list)
1583 			btrfs_free_reserved_data_space(inode, len);
1584 
1585 		__percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
1586 				     root->fs_info->delalloc_batch);
1587 		spin_lock(&BTRFS_I(inode)->lock);
1588 		BTRFS_I(inode)->delalloc_bytes -= len;
1589 		if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1590 		    test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1591 			     &BTRFS_I(inode)->runtime_flags)) {
1592 			spin_lock(&root->fs_info->delalloc_lock);
1593 			if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1594 				list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1595 				clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1596 					  &BTRFS_I(inode)->runtime_flags);
1597 			}
1598 			spin_unlock(&root->fs_info->delalloc_lock);
1599 		}
1600 		spin_unlock(&BTRFS_I(inode)->lock);
1601 	}
1602 }
1603 
1604 /*
1605  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1606  * we don't create bios that span stripes or chunks
1607  */
1608 int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
1609 			 size_t size, struct bio *bio,
1610 			 unsigned long bio_flags)
1611 {
1612 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1613 	u64 logical = (u64)bio->bi_sector << 9;
1614 	u64 length = 0;
1615 	u64 map_length;
1616 	int ret;
1617 
1618 	if (bio_flags & EXTENT_BIO_COMPRESSED)
1619 		return 0;
1620 
1621 	length = bio->bi_size;
1622 	map_length = length;
1623 	ret = btrfs_map_block(root->fs_info, rw, logical,
1624 			      &map_length, NULL, 0);
1625 	/* Will always return 0 with map_multi == NULL */
1626 	BUG_ON(ret < 0);
1627 	if (map_length < length + size)
1628 		return 1;
1629 	return 0;
1630 }
1631 
1632 /*
1633  * in order to insert checksums into the metadata in large chunks,
1634  * we wait until bio submission time.   All the pages in the bio are
1635  * checksummed and sums are attached onto the ordered extent record.
1636  *
1637  * At IO completion time the cums attached on the ordered extent record
1638  * are inserted into the btree
1639  */
1640 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1641 				    struct bio *bio, int mirror_num,
1642 				    unsigned long bio_flags,
1643 				    u64 bio_offset)
1644 {
1645 	struct btrfs_root *root = BTRFS_I(inode)->root;
1646 	int ret = 0;
1647 
1648 	ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1649 	BUG_ON(ret); /* -ENOMEM */
1650 	return 0;
1651 }
1652 
1653 /*
1654  * in order to insert checksums into the metadata in large chunks,
1655  * we wait until bio submission time.   All the pages in the bio are
1656  * checksummed and sums are attached onto the ordered extent record.
1657  *
1658  * At IO completion time the cums attached on the ordered extent record
1659  * are inserted into the btree
1660  */
1661 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1662 			  int mirror_num, unsigned long bio_flags,
1663 			  u64 bio_offset)
1664 {
1665 	struct btrfs_root *root = BTRFS_I(inode)->root;
1666 	int ret;
1667 
1668 	ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
1669 	if (ret)
1670 		bio_endio(bio, ret);
1671 	return ret;
1672 }
1673 
1674 /*
1675  * extent_io.c submission hook. This does the right thing for csum calculation
1676  * on write, or reading the csums from the tree before a read
1677  */
1678 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1679 			  int mirror_num, unsigned long bio_flags,
1680 			  u64 bio_offset)
1681 {
1682 	struct btrfs_root *root = BTRFS_I(inode)->root;
1683 	int ret = 0;
1684 	int skip_sum;
1685 	int metadata = 0;
1686 	int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
1687 
1688 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1689 
1690 	if (btrfs_is_free_space_inode(inode))
1691 		metadata = 2;
1692 
1693 	if (!(rw & REQ_WRITE)) {
1694 		ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1695 		if (ret)
1696 			goto out;
1697 
1698 		if (bio_flags & EXTENT_BIO_COMPRESSED) {
1699 			ret = btrfs_submit_compressed_read(inode, bio,
1700 							   mirror_num,
1701 							   bio_flags);
1702 			goto out;
1703 		} else if (!skip_sum) {
1704 			ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1705 			if (ret)
1706 				goto out;
1707 		}
1708 		goto mapit;
1709 	} else if (async && !skip_sum) {
1710 		/* csum items have already been cloned */
1711 		if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1712 			goto mapit;
1713 		/* we're doing a write, do the async checksumming */
1714 		ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1715 				   inode, rw, bio, mirror_num,
1716 				   bio_flags, bio_offset,
1717 				   __btrfs_submit_bio_start,
1718 				   __btrfs_submit_bio_done);
1719 		goto out;
1720 	} else if (!skip_sum) {
1721 		ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1722 		if (ret)
1723 			goto out;
1724 	}
1725 
1726 mapit:
1727 	ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
1728 
1729 out:
1730 	if (ret < 0)
1731 		bio_endio(bio, ret);
1732 	return ret;
1733 }
1734 
1735 /*
1736  * given a list of ordered sums record them in the inode.  This happens
1737  * at IO completion time based on sums calculated at bio submission time.
1738  */
1739 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1740 			     struct inode *inode, u64 file_offset,
1741 			     struct list_head *list)
1742 {
1743 	struct btrfs_ordered_sum *sum;
1744 
1745 	list_for_each_entry(sum, list, list) {
1746 		btrfs_csum_file_blocks(trans,
1747 		       BTRFS_I(inode)->root->fs_info->csum_root, sum);
1748 	}
1749 	return 0;
1750 }
1751 
1752 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1753 			      struct extent_state **cached_state)
1754 {
1755 	WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0);
1756 	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1757 				   cached_state, GFP_NOFS);
1758 }
1759 
1760 /* see btrfs_writepage_start_hook for details on why this is required */
1761 struct btrfs_writepage_fixup {
1762 	struct page *page;
1763 	struct btrfs_work work;
1764 };
1765 
1766 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1767 {
1768 	struct btrfs_writepage_fixup *fixup;
1769 	struct btrfs_ordered_extent *ordered;
1770 	struct extent_state *cached_state = NULL;
1771 	struct page *page;
1772 	struct inode *inode;
1773 	u64 page_start;
1774 	u64 page_end;
1775 	int ret;
1776 
1777 	fixup = container_of(work, struct btrfs_writepage_fixup, work);
1778 	page = fixup->page;
1779 again:
1780 	lock_page(page);
1781 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1782 		ClearPageChecked(page);
1783 		goto out_page;
1784 	}
1785 
1786 	inode = page->mapping->host;
1787 	page_start = page_offset(page);
1788 	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1789 
1790 	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1791 			 &cached_state);
1792 
1793 	/* already ordered? We're done */
1794 	if (PagePrivate2(page))
1795 		goto out;
1796 
1797 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
1798 	if (ordered) {
1799 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1800 				     page_end, &cached_state, GFP_NOFS);
1801 		unlock_page(page);
1802 		btrfs_start_ordered_extent(inode, ordered, 1);
1803 		btrfs_put_ordered_extent(ordered);
1804 		goto again;
1805 	}
1806 
1807 	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
1808 	if (ret) {
1809 		mapping_set_error(page->mapping, ret);
1810 		end_extent_writepage(page, ret, page_start, page_end);
1811 		ClearPageChecked(page);
1812 		goto out;
1813 	 }
1814 
1815 	btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1816 	ClearPageChecked(page);
1817 	set_page_dirty(page);
1818 out:
1819 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1820 			     &cached_state, GFP_NOFS);
1821 out_page:
1822 	unlock_page(page);
1823 	page_cache_release(page);
1824 	kfree(fixup);
1825 }
1826 
1827 /*
1828  * There are a few paths in the higher layers of the kernel that directly
1829  * set the page dirty bit without asking the filesystem if it is a
1830  * good idea.  This causes problems because we want to make sure COW
1831  * properly happens and the data=ordered rules are followed.
1832  *
1833  * In our case any range that doesn't have the ORDERED bit set
1834  * hasn't been properly setup for IO.  We kick off an async process
1835  * to fix it up.  The async helper will wait for ordered extents, set
1836  * the delalloc bit and make it safe to write the page.
1837  */
1838 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1839 {
1840 	struct inode *inode = page->mapping->host;
1841 	struct btrfs_writepage_fixup *fixup;
1842 	struct btrfs_root *root = BTRFS_I(inode)->root;
1843 
1844 	/* this page is properly in the ordered list */
1845 	if (TestClearPagePrivate2(page))
1846 		return 0;
1847 
1848 	if (PageChecked(page))
1849 		return -EAGAIN;
1850 
1851 	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1852 	if (!fixup)
1853 		return -EAGAIN;
1854 
1855 	SetPageChecked(page);
1856 	page_cache_get(page);
1857 	fixup->work.func = btrfs_writepage_fixup_worker;
1858 	fixup->page = page;
1859 	btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1860 	return -EBUSY;
1861 }
1862 
1863 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1864 				       struct inode *inode, u64 file_pos,
1865 				       u64 disk_bytenr, u64 disk_num_bytes,
1866 				       u64 num_bytes, u64 ram_bytes,
1867 				       u8 compression, u8 encryption,
1868 				       u16 other_encoding, int extent_type)
1869 {
1870 	struct btrfs_root *root = BTRFS_I(inode)->root;
1871 	struct btrfs_file_extent_item *fi;
1872 	struct btrfs_path *path;
1873 	struct extent_buffer *leaf;
1874 	struct btrfs_key ins;
1875 	int ret;
1876 
1877 	path = btrfs_alloc_path();
1878 	if (!path)
1879 		return -ENOMEM;
1880 
1881 	path->leave_spinning = 1;
1882 
1883 	/*
1884 	 * we may be replacing one extent in the tree with another.
1885 	 * The new extent is pinned in the extent map, and we don't want
1886 	 * to drop it from the cache until it is completely in the btree.
1887 	 *
1888 	 * So, tell btrfs_drop_extents to leave this extent in the cache.
1889 	 * the caller is expected to unpin it and allow it to be merged
1890 	 * with the others.
1891 	 */
1892 	ret = btrfs_drop_extents(trans, root, inode, file_pos,
1893 				 file_pos + num_bytes, 0);
1894 	if (ret)
1895 		goto out;
1896 
1897 	ins.objectid = btrfs_ino(inode);
1898 	ins.offset = file_pos;
1899 	ins.type = BTRFS_EXTENT_DATA_KEY;
1900 	ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1901 	if (ret)
1902 		goto out;
1903 	leaf = path->nodes[0];
1904 	fi = btrfs_item_ptr(leaf, path->slots[0],
1905 			    struct btrfs_file_extent_item);
1906 	btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1907 	btrfs_set_file_extent_type(leaf, fi, extent_type);
1908 	btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1909 	btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1910 	btrfs_set_file_extent_offset(leaf, fi, 0);
1911 	btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1912 	btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1913 	btrfs_set_file_extent_compression(leaf, fi, compression);
1914 	btrfs_set_file_extent_encryption(leaf, fi, encryption);
1915 	btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1916 
1917 	btrfs_mark_buffer_dirty(leaf);
1918 	btrfs_release_path(path);
1919 
1920 	inode_add_bytes(inode, num_bytes);
1921 
1922 	ins.objectid = disk_bytenr;
1923 	ins.offset = disk_num_bytes;
1924 	ins.type = BTRFS_EXTENT_ITEM_KEY;
1925 	ret = btrfs_alloc_reserved_file_extent(trans, root,
1926 					root->root_key.objectid,
1927 					btrfs_ino(inode), file_pos, &ins);
1928 out:
1929 	btrfs_free_path(path);
1930 
1931 	return ret;
1932 }
1933 
1934 /* snapshot-aware defrag */
1935 struct sa_defrag_extent_backref {
1936 	struct rb_node node;
1937 	struct old_sa_defrag_extent *old;
1938 	u64 root_id;
1939 	u64 inum;
1940 	u64 file_pos;
1941 	u64 extent_offset;
1942 	u64 num_bytes;
1943 	u64 generation;
1944 };
1945 
1946 struct old_sa_defrag_extent {
1947 	struct list_head list;
1948 	struct new_sa_defrag_extent *new;
1949 
1950 	u64 extent_offset;
1951 	u64 bytenr;
1952 	u64 offset;
1953 	u64 len;
1954 	int count;
1955 };
1956 
1957 struct new_sa_defrag_extent {
1958 	struct rb_root root;
1959 	struct list_head head;
1960 	struct btrfs_path *path;
1961 	struct inode *inode;
1962 	u64 file_pos;
1963 	u64 len;
1964 	u64 bytenr;
1965 	u64 disk_len;
1966 	u8 compress_type;
1967 };
1968 
1969 static int backref_comp(struct sa_defrag_extent_backref *b1,
1970 			struct sa_defrag_extent_backref *b2)
1971 {
1972 	if (b1->root_id < b2->root_id)
1973 		return -1;
1974 	else if (b1->root_id > b2->root_id)
1975 		return 1;
1976 
1977 	if (b1->inum < b2->inum)
1978 		return -1;
1979 	else if (b1->inum > b2->inum)
1980 		return 1;
1981 
1982 	if (b1->file_pos < b2->file_pos)
1983 		return -1;
1984 	else if (b1->file_pos > b2->file_pos)
1985 		return 1;
1986 
1987 	/*
1988 	 * [------------------------------] ===> (a range of space)
1989 	 *     |<--->|   |<---->| =============> (fs/file tree A)
1990 	 * |<---------------------------->| ===> (fs/file tree B)
1991 	 *
1992 	 * A range of space can refer to two file extents in one tree while
1993 	 * refer to only one file extent in another tree.
1994 	 *
1995 	 * So we may process a disk offset more than one time(two extents in A)
1996 	 * and locate at the same extent(one extent in B), then insert two same
1997 	 * backrefs(both refer to the extent in B).
1998 	 */
1999 	return 0;
2000 }
2001 
2002 static void backref_insert(struct rb_root *root,
2003 			   struct sa_defrag_extent_backref *backref)
2004 {
2005 	struct rb_node **p = &root->rb_node;
2006 	struct rb_node *parent = NULL;
2007 	struct sa_defrag_extent_backref *entry;
2008 	int ret;
2009 
2010 	while (*p) {
2011 		parent = *p;
2012 		entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
2013 
2014 		ret = backref_comp(backref, entry);
2015 		if (ret < 0)
2016 			p = &(*p)->rb_left;
2017 		else
2018 			p = &(*p)->rb_right;
2019 	}
2020 
2021 	rb_link_node(&backref->node, parent, p);
2022 	rb_insert_color(&backref->node, root);
2023 }
2024 
2025 /*
2026  * Note the backref might has changed, and in this case we just return 0.
2027  */
2028 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2029 				       void *ctx)
2030 {
2031 	struct btrfs_file_extent_item *extent;
2032 	struct btrfs_fs_info *fs_info;
2033 	struct old_sa_defrag_extent *old = ctx;
2034 	struct new_sa_defrag_extent *new = old->new;
2035 	struct btrfs_path *path = new->path;
2036 	struct btrfs_key key;
2037 	struct btrfs_root *root;
2038 	struct sa_defrag_extent_backref *backref;
2039 	struct extent_buffer *leaf;
2040 	struct inode *inode = new->inode;
2041 	int slot;
2042 	int ret;
2043 	u64 extent_offset;
2044 	u64 num_bytes;
2045 
2046 	if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2047 	    inum == btrfs_ino(inode))
2048 		return 0;
2049 
2050 	key.objectid = root_id;
2051 	key.type = BTRFS_ROOT_ITEM_KEY;
2052 	key.offset = (u64)-1;
2053 
2054 	fs_info = BTRFS_I(inode)->root->fs_info;
2055 	root = btrfs_read_fs_root_no_name(fs_info, &key);
2056 	if (IS_ERR(root)) {
2057 		if (PTR_ERR(root) == -ENOENT)
2058 			return 0;
2059 		WARN_ON(1);
2060 		pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
2061 			 inum, offset, root_id);
2062 		return PTR_ERR(root);
2063 	}
2064 
2065 	key.objectid = inum;
2066 	key.type = BTRFS_EXTENT_DATA_KEY;
2067 	if (offset > (u64)-1 << 32)
2068 		key.offset = 0;
2069 	else
2070 		key.offset = offset;
2071 
2072 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2073 	if (ret < 0) {
2074 		WARN_ON(1);
2075 		return ret;
2076 	}
2077 
2078 	while (1) {
2079 		cond_resched();
2080 
2081 		leaf = path->nodes[0];
2082 		slot = path->slots[0];
2083 
2084 		if (slot >= btrfs_header_nritems(leaf)) {
2085 			ret = btrfs_next_leaf(root, path);
2086 			if (ret < 0) {
2087 				goto out;
2088 			} else if (ret > 0) {
2089 				ret = 0;
2090 				goto out;
2091 			}
2092 			continue;
2093 		}
2094 
2095 		path->slots[0]++;
2096 
2097 		btrfs_item_key_to_cpu(leaf, &key, slot);
2098 
2099 		if (key.objectid > inum)
2100 			goto out;
2101 
2102 		if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2103 			continue;
2104 
2105 		extent = btrfs_item_ptr(leaf, slot,
2106 					struct btrfs_file_extent_item);
2107 
2108 		if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2109 			continue;
2110 
2111 		extent_offset = btrfs_file_extent_offset(leaf, extent);
2112 		if (key.offset - extent_offset != offset)
2113 			continue;
2114 
2115 		num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2116 		if (extent_offset >= old->extent_offset + old->offset +
2117 		    old->len || extent_offset + num_bytes <=
2118 		    old->extent_offset + old->offset)
2119 			continue;
2120 
2121 		break;
2122 	}
2123 
2124 	backref = kmalloc(sizeof(*backref), GFP_NOFS);
2125 	if (!backref) {
2126 		ret = -ENOENT;
2127 		goto out;
2128 	}
2129 
2130 	backref->root_id = root_id;
2131 	backref->inum = inum;
2132 	backref->file_pos = offset + extent_offset;
2133 	backref->num_bytes = num_bytes;
2134 	backref->extent_offset = extent_offset;
2135 	backref->generation = btrfs_file_extent_generation(leaf, extent);
2136 	backref->old = old;
2137 	backref_insert(&new->root, backref);
2138 	old->count++;
2139 out:
2140 	btrfs_release_path(path);
2141 	WARN_ON(ret);
2142 	return ret;
2143 }
2144 
2145 static noinline bool record_extent_backrefs(struct btrfs_path *path,
2146 				   struct new_sa_defrag_extent *new)
2147 {
2148 	struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info;
2149 	struct old_sa_defrag_extent *old, *tmp;
2150 	int ret;
2151 
2152 	new->path = path;
2153 
2154 	list_for_each_entry_safe(old, tmp, &new->head, list) {
2155 		ret = iterate_inodes_from_logical(old->bytenr, fs_info,
2156 						  path, record_one_backref,
2157 						  old);
2158 		BUG_ON(ret < 0 && ret != -ENOENT);
2159 
2160 		/* no backref to be processed for this extent */
2161 		if (!old->count) {
2162 			list_del(&old->list);
2163 			kfree(old);
2164 		}
2165 	}
2166 
2167 	if (list_empty(&new->head))
2168 		return false;
2169 
2170 	return true;
2171 }
2172 
2173 static int relink_is_mergable(struct extent_buffer *leaf,
2174 			      struct btrfs_file_extent_item *fi,
2175 			      u64 disk_bytenr)
2176 {
2177 	if (btrfs_file_extent_disk_bytenr(leaf, fi) != disk_bytenr)
2178 		return 0;
2179 
2180 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2181 		return 0;
2182 
2183 	if (btrfs_file_extent_compression(leaf, fi) ||
2184 	    btrfs_file_extent_encryption(leaf, fi) ||
2185 	    btrfs_file_extent_other_encoding(leaf, fi))
2186 		return 0;
2187 
2188 	return 1;
2189 }
2190 
2191 /*
2192  * Note the backref might has changed, and in this case we just return 0.
2193  */
2194 static noinline int relink_extent_backref(struct btrfs_path *path,
2195 				 struct sa_defrag_extent_backref *prev,
2196 				 struct sa_defrag_extent_backref *backref)
2197 {
2198 	struct btrfs_file_extent_item *extent;
2199 	struct btrfs_file_extent_item *item;
2200 	struct btrfs_ordered_extent *ordered;
2201 	struct btrfs_trans_handle *trans;
2202 	struct btrfs_fs_info *fs_info;
2203 	struct btrfs_root *root;
2204 	struct btrfs_key key;
2205 	struct extent_buffer *leaf;
2206 	struct old_sa_defrag_extent *old = backref->old;
2207 	struct new_sa_defrag_extent *new = old->new;
2208 	struct inode *src_inode = new->inode;
2209 	struct inode *inode;
2210 	struct extent_state *cached = NULL;
2211 	int ret = 0;
2212 	u64 start;
2213 	u64 len;
2214 	u64 lock_start;
2215 	u64 lock_end;
2216 	bool merge = false;
2217 	int index;
2218 
2219 	if (prev && prev->root_id == backref->root_id &&
2220 	    prev->inum == backref->inum &&
2221 	    prev->file_pos + prev->num_bytes == backref->file_pos)
2222 		merge = true;
2223 
2224 	/* step 1: get root */
2225 	key.objectid = backref->root_id;
2226 	key.type = BTRFS_ROOT_ITEM_KEY;
2227 	key.offset = (u64)-1;
2228 
2229 	fs_info = BTRFS_I(src_inode)->root->fs_info;
2230 	index = srcu_read_lock(&fs_info->subvol_srcu);
2231 
2232 	root = btrfs_read_fs_root_no_name(fs_info, &key);
2233 	if (IS_ERR(root)) {
2234 		srcu_read_unlock(&fs_info->subvol_srcu, index);
2235 		if (PTR_ERR(root) == -ENOENT)
2236 			return 0;
2237 		return PTR_ERR(root);
2238 	}
2239 	if (btrfs_root_refs(&root->root_item) == 0) {
2240 		srcu_read_unlock(&fs_info->subvol_srcu, index);
2241 		/* parse ENOENT to 0 */
2242 		return 0;
2243 	}
2244 
2245 	/* step 2: get inode */
2246 	key.objectid = backref->inum;
2247 	key.type = BTRFS_INODE_ITEM_KEY;
2248 	key.offset = 0;
2249 
2250 	inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2251 	if (IS_ERR(inode)) {
2252 		srcu_read_unlock(&fs_info->subvol_srcu, index);
2253 		return 0;
2254 	}
2255 
2256 	srcu_read_unlock(&fs_info->subvol_srcu, index);
2257 
2258 	/* step 3: relink backref */
2259 	lock_start = backref->file_pos;
2260 	lock_end = backref->file_pos + backref->num_bytes - 1;
2261 	lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2262 			 0, &cached);
2263 
2264 	ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2265 	if (ordered) {
2266 		btrfs_put_ordered_extent(ordered);
2267 		goto out_unlock;
2268 	}
2269 
2270 	trans = btrfs_join_transaction(root);
2271 	if (IS_ERR(trans)) {
2272 		ret = PTR_ERR(trans);
2273 		goto out_unlock;
2274 	}
2275 
2276 	key.objectid = backref->inum;
2277 	key.type = BTRFS_EXTENT_DATA_KEY;
2278 	key.offset = backref->file_pos;
2279 
2280 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2281 	if (ret < 0) {
2282 		goto out_free_path;
2283 	} else if (ret > 0) {
2284 		ret = 0;
2285 		goto out_free_path;
2286 	}
2287 
2288 	extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2289 				struct btrfs_file_extent_item);
2290 
2291 	if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2292 	    backref->generation)
2293 		goto out_free_path;
2294 
2295 	btrfs_release_path(path);
2296 
2297 	start = backref->file_pos;
2298 	if (backref->extent_offset < old->extent_offset + old->offset)
2299 		start += old->extent_offset + old->offset -
2300 			 backref->extent_offset;
2301 
2302 	len = min(backref->extent_offset + backref->num_bytes,
2303 		  old->extent_offset + old->offset + old->len);
2304 	len -= max(backref->extent_offset, old->extent_offset + old->offset);
2305 
2306 	ret = btrfs_drop_extents(trans, root, inode, start,
2307 				 start + len, 1);
2308 	if (ret)
2309 		goto out_free_path;
2310 again:
2311 	key.objectid = btrfs_ino(inode);
2312 	key.type = BTRFS_EXTENT_DATA_KEY;
2313 	key.offset = start;
2314 
2315 	if (merge) {
2316 		struct btrfs_file_extent_item *fi;
2317 		u64 extent_len;
2318 		struct btrfs_key found_key;
2319 
2320 		ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
2321 		if (ret < 0)
2322 			goto out_free_path;
2323 
2324 		path->slots[0]--;
2325 		leaf = path->nodes[0];
2326 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2327 
2328 		fi = btrfs_item_ptr(leaf, path->slots[0],
2329 				    struct btrfs_file_extent_item);
2330 		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2331 
2332 		if (relink_is_mergable(leaf, fi, new->bytenr) &&
2333 		    extent_len + found_key.offset == start) {
2334 			btrfs_set_file_extent_num_bytes(leaf, fi,
2335 							extent_len + len);
2336 			btrfs_mark_buffer_dirty(leaf);
2337 			inode_add_bytes(inode, len);
2338 
2339 			ret = 1;
2340 			goto out_free_path;
2341 		} else {
2342 			merge = false;
2343 			btrfs_release_path(path);
2344 			goto again;
2345 		}
2346 	}
2347 
2348 	ret = btrfs_insert_empty_item(trans, root, path, &key,
2349 					sizeof(*extent));
2350 	if (ret) {
2351 		btrfs_abort_transaction(trans, root, ret);
2352 		goto out_free_path;
2353 	}
2354 
2355 	leaf = path->nodes[0];
2356 	item = btrfs_item_ptr(leaf, path->slots[0],
2357 				struct btrfs_file_extent_item);
2358 	btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2359 	btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2360 	btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2361 	btrfs_set_file_extent_num_bytes(leaf, item, len);
2362 	btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2363 	btrfs_set_file_extent_generation(leaf, item, trans->transid);
2364 	btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2365 	btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2366 	btrfs_set_file_extent_encryption(leaf, item, 0);
2367 	btrfs_set_file_extent_other_encoding(leaf, item, 0);
2368 
2369 	btrfs_mark_buffer_dirty(leaf);
2370 	inode_add_bytes(inode, len);
2371 
2372 	ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
2373 			new->disk_len, 0,
2374 			backref->root_id, backref->inum,
2375 			new->file_pos, 0);	/* start - extent_offset */
2376 	if (ret) {
2377 		btrfs_abort_transaction(trans, root, ret);
2378 		goto out_free_path;
2379 	}
2380 
2381 	ret = 1;
2382 out_free_path:
2383 	btrfs_release_path(path);
2384 	btrfs_end_transaction(trans, root);
2385 out_unlock:
2386 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2387 			     &cached, GFP_NOFS);
2388 	iput(inode);
2389 	return ret;
2390 }
2391 
2392 static void relink_file_extents(struct new_sa_defrag_extent *new)
2393 {
2394 	struct btrfs_path *path;
2395 	struct old_sa_defrag_extent *old, *tmp;
2396 	struct sa_defrag_extent_backref *backref;
2397 	struct sa_defrag_extent_backref *prev = NULL;
2398 	struct inode *inode;
2399 	struct btrfs_root *root;
2400 	struct rb_node *node;
2401 	int ret;
2402 
2403 	inode = new->inode;
2404 	root = BTRFS_I(inode)->root;
2405 
2406 	path = btrfs_alloc_path();
2407 	if (!path)
2408 		return;
2409 
2410 	if (!record_extent_backrefs(path, new)) {
2411 		btrfs_free_path(path);
2412 		goto out;
2413 	}
2414 	btrfs_release_path(path);
2415 
2416 	while (1) {
2417 		node = rb_first(&new->root);
2418 		if (!node)
2419 			break;
2420 		rb_erase(node, &new->root);
2421 
2422 		backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2423 
2424 		ret = relink_extent_backref(path, prev, backref);
2425 		WARN_ON(ret < 0);
2426 
2427 		kfree(prev);
2428 
2429 		if (ret == 1)
2430 			prev = backref;
2431 		else
2432 			prev = NULL;
2433 		cond_resched();
2434 	}
2435 	kfree(prev);
2436 
2437 	btrfs_free_path(path);
2438 
2439 	list_for_each_entry_safe(old, tmp, &new->head, list) {
2440 		list_del(&old->list);
2441 		kfree(old);
2442 	}
2443 out:
2444 	atomic_dec(&root->fs_info->defrag_running);
2445 	wake_up(&root->fs_info->transaction_wait);
2446 
2447 	kfree(new);
2448 }
2449 
2450 static struct new_sa_defrag_extent *
2451 record_old_file_extents(struct inode *inode,
2452 			struct btrfs_ordered_extent *ordered)
2453 {
2454 	struct btrfs_root *root = BTRFS_I(inode)->root;
2455 	struct btrfs_path *path;
2456 	struct btrfs_key key;
2457 	struct old_sa_defrag_extent *old, *tmp;
2458 	struct new_sa_defrag_extent *new;
2459 	int ret;
2460 
2461 	new = kmalloc(sizeof(*new), GFP_NOFS);
2462 	if (!new)
2463 		return NULL;
2464 
2465 	new->inode = inode;
2466 	new->file_pos = ordered->file_offset;
2467 	new->len = ordered->len;
2468 	new->bytenr = ordered->start;
2469 	new->disk_len = ordered->disk_len;
2470 	new->compress_type = ordered->compress_type;
2471 	new->root = RB_ROOT;
2472 	INIT_LIST_HEAD(&new->head);
2473 
2474 	path = btrfs_alloc_path();
2475 	if (!path)
2476 		goto out_kfree;
2477 
2478 	key.objectid = btrfs_ino(inode);
2479 	key.type = BTRFS_EXTENT_DATA_KEY;
2480 	key.offset = new->file_pos;
2481 
2482 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2483 	if (ret < 0)
2484 		goto out_free_path;
2485 	if (ret > 0 && path->slots[0] > 0)
2486 		path->slots[0]--;
2487 
2488 	/* find out all the old extents for the file range */
2489 	while (1) {
2490 		struct btrfs_file_extent_item *extent;
2491 		struct extent_buffer *l;
2492 		int slot;
2493 		u64 num_bytes;
2494 		u64 offset;
2495 		u64 end;
2496 		u64 disk_bytenr;
2497 		u64 extent_offset;
2498 
2499 		l = path->nodes[0];
2500 		slot = path->slots[0];
2501 
2502 		if (slot >= btrfs_header_nritems(l)) {
2503 			ret = btrfs_next_leaf(root, path);
2504 			if (ret < 0)
2505 				goto out_free_list;
2506 			else if (ret > 0)
2507 				break;
2508 			continue;
2509 		}
2510 
2511 		btrfs_item_key_to_cpu(l, &key, slot);
2512 
2513 		if (key.objectid != btrfs_ino(inode))
2514 			break;
2515 		if (key.type != BTRFS_EXTENT_DATA_KEY)
2516 			break;
2517 		if (key.offset >= new->file_pos + new->len)
2518 			break;
2519 
2520 		extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
2521 
2522 		num_bytes = btrfs_file_extent_num_bytes(l, extent);
2523 		if (key.offset + num_bytes < new->file_pos)
2524 			goto next;
2525 
2526 		disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
2527 		if (!disk_bytenr)
2528 			goto next;
2529 
2530 		extent_offset = btrfs_file_extent_offset(l, extent);
2531 
2532 		old = kmalloc(sizeof(*old), GFP_NOFS);
2533 		if (!old)
2534 			goto out_free_list;
2535 
2536 		offset = max(new->file_pos, key.offset);
2537 		end = min(new->file_pos + new->len, key.offset + num_bytes);
2538 
2539 		old->bytenr = disk_bytenr;
2540 		old->extent_offset = extent_offset;
2541 		old->offset = offset - key.offset;
2542 		old->len = end - offset;
2543 		old->new = new;
2544 		old->count = 0;
2545 		list_add_tail(&old->list, &new->head);
2546 next:
2547 		path->slots[0]++;
2548 		cond_resched();
2549 	}
2550 
2551 	btrfs_free_path(path);
2552 	atomic_inc(&root->fs_info->defrag_running);
2553 
2554 	return new;
2555 
2556 out_free_list:
2557 	list_for_each_entry_safe(old, tmp, &new->head, list) {
2558 		list_del(&old->list);
2559 		kfree(old);
2560 	}
2561 out_free_path:
2562 	btrfs_free_path(path);
2563 out_kfree:
2564 	kfree(new);
2565 	return NULL;
2566 }
2567 
2568 /*
2569  * helper function for btrfs_finish_ordered_io, this
2570  * just reads in some of the csum leaves to prime them into ram
2571  * before we start the transaction.  It limits the amount of btree
2572  * reads required while inside the transaction.
2573  */
2574 /* as ordered data IO finishes, this gets called so we can finish
2575  * an ordered extent if the range of bytes in the file it covers are
2576  * fully written.
2577  */
2578 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2579 {
2580 	struct inode *inode = ordered_extent->inode;
2581 	struct btrfs_root *root = BTRFS_I(inode)->root;
2582 	struct btrfs_trans_handle *trans = NULL;
2583 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2584 	struct extent_state *cached_state = NULL;
2585 	struct new_sa_defrag_extent *new = NULL;
2586 	int compress_type = 0;
2587 	int ret;
2588 	bool nolock;
2589 
2590 	nolock = btrfs_is_free_space_inode(inode);
2591 
2592 	if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2593 		ret = -EIO;
2594 		goto out;
2595 	}
2596 
2597 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
2598 		BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
2599 		btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2600 		if (nolock)
2601 			trans = btrfs_join_transaction_nolock(root);
2602 		else
2603 			trans = btrfs_join_transaction(root);
2604 		if (IS_ERR(trans)) {
2605 			ret = PTR_ERR(trans);
2606 			trans = NULL;
2607 			goto out;
2608 		}
2609 		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2610 		ret = btrfs_update_inode_fallback(trans, root, inode);
2611 		if (ret) /* -ENOMEM or corruption */
2612 			btrfs_abort_transaction(trans, root, ret);
2613 		goto out;
2614 	}
2615 
2616 	lock_extent_bits(io_tree, ordered_extent->file_offset,
2617 			 ordered_extent->file_offset + ordered_extent->len - 1,
2618 			 0, &cached_state);
2619 
2620 	ret = test_range_bit(io_tree, ordered_extent->file_offset,
2621 			ordered_extent->file_offset + ordered_extent->len - 1,
2622 			EXTENT_DEFRAG, 1, cached_state);
2623 	if (ret) {
2624 		u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2625 		if (last_snapshot >= BTRFS_I(inode)->generation)
2626 			/* the inode is shared */
2627 			new = record_old_file_extents(inode, ordered_extent);
2628 
2629 		clear_extent_bit(io_tree, ordered_extent->file_offset,
2630 			ordered_extent->file_offset + ordered_extent->len - 1,
2631 			EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
2632 	}
2633 
2634 	if (nolock)
2635 		trans = btrfs_join_transaction_nolock(root);
2636 	else
2637 		trans = btrfs_join_transaction(root);
2638 	if (IS_ERR(trans)) {
2639 		ret = PTR_ERR(trans);
2640 		trans = NULL;
2641 		goto out_unlock;
2642 	}
2643 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2644 
2645 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
2646 		compress_type = ordered_extent->compress_type;
2647 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
2648 		BUG_ON(compress_type);
2649 		ret = btrfs_mark_extent_written(trans, inode,
2650 						ordered_extent->file_offset,
2651 						ordered_extent->file_offset +
2652 						ordered_extent->len);
2653 	} else {
2654 		BUG_ON(root == root->fs_info->tree_root);
2655 		ret = insert_reserved_file_extent(trans, inode,
2656 						ordered_extent->file_offset,
2657 						ordered_extent->start,
2658 						ordered_extent->disk_len,
2659 						ordered_extent->len,
2660 						ordered_extent->len,
2661 						compress_type, 0, 0,
2662 						BTRFS_FILE_EXTENT_REG);
2663 	}
2664 	unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
2665 			   ordered_extent->file_offset, ordered_extent->len,
2666 			   trans->transid);
2667 	if (ret < 0) {
2668 		btrfs_abort_transaction(trans, root, ret);
2669 		goto out_unlock;
2670 	}
2671 
2672 	add_pending_csums(trans, inode, ordered_extent->file_offset,
2673 			  &ordered_extent->list);
2674 
2675 	btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2676 	ret = btrfs_update_inode_fallback(trans, root, inode);
2677 	if (ret) { /* -ENOMEM or corruption */
2678 		btrfs_abort_transaction(trans, root, ret);
2679 		goto out_unlock;
2680 	}
2681 	ret = 0;
2682 out_unlock:
2683 	unlock_extent_cached(io_tree, ordered_extent->file_offset,
2684 			     ordered_extent->file_offset +
2685 			     ordered_extent->len - 1, &cached_state, GFP_NOFS);
2686 out:
2687 	if (root != root->fs_info->tree_root)
2688 		btrfs_delalloc_release_metadata(inode, ordered_extent->len);
2689 	if (trans)
2690 		btrfs_end_transaction(trans, root);
2691 
2692 	if (ret) {
2693 		clear_extent_uptodate(io_tree, ordered_extent->file_offset,
2694 				      ordered_extent->file_offset +
2695 				      ordered_extent->len - 1, NULL, GFP_NOFS);
2696 
2697 		/*
2698 		 * If the ordered extent had an IOERR or something else went
2699 		 * wrong we need to return the space for this ordered extent
2700 		 * back to the allocator.
2701 		 */
2702 		if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
2703 		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
2704 			btrfs_free_reserved_extent(root, ordered_extent->start,
2705 						   ordered_extent->disk_len);
2706 	}
2707 
2708 
2709 	/*
2710 	 * This needs to be done to make sure anybody waiting knows we are done
2711 	 * updating everything for this ordered extent.
2712 	 */
2713 	btrfs_remove_ordered_extent(inode, ordered_extent);
2714 
2715 	/* for snapshot-aware defrag */
2716 	if (new)
2717 		relink_file_extents(new);
2718 
2719 	/* once for us */
2720 	btrfs_put_ordered_extent(ordered_extent);
2721 	/* once for the tree */
2722 	btrfs_put_ordered_extent(ordered_extent);
2723 
2724 	return ret;
2725 }
2726 
2727 static void finish_ordered_fn(struct btrfs_work *work)
2728 {
2729 	struct btrfs_ordered_extent *ordered_extent;
2730 	ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
2731 	btrfs_finish_ordered_io(ordered_extent);
2732 }
2733 
2734 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
2735 				struct extent_state *state, int uptodate)
2736 {
2737 	struct inode *inode = page->mapping->host;
2738 	struct btrfs_root *root = BTRFS_I(inode)->root;
2739 	struct btrfs_ordered_extent *ordered_extent = NULL;
2740 	struct btrfs_workers *workers;
2741 
2742 	trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
2743 
2744 	ClearPagePrivate2(page);
2745 	if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
2746 					    end - start + 1, uptodate))
2747 		return 0;
2748 
2749 	ordered_extent->work.func = finish_ordered_fn;
2750 	ordered_extent->work.flags = 0;
2751 
2752 	if (btrfs_is_free_space_inode(inode))
2753 		workers = &root->fs_info->endio_freespace_worker;
2754 	else
2755 		workers = &root->fs_info->endio_write_workers;
2756 	btrfs_queue_worker(workers, &ordered_extent->work);
2757 
2758 	return 0;
2759 }
2760 
2761 /*
2762  * when reads are done, we need to check csums to verify the data is correct
2763  * if there's a match, we allow the bio to finish.  If not, the code in
2764  * extent_io.c will try to find good copies for us.
2765  */
2766 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
2767 			       struct extent_state *state, int mirror)
2768 {
2769 	size_t offset = start - page_offset(page);
2770 	struct inode *inode = page->mapping->host;
2771 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2772 	char *kaddr;
2773 	u64 private = ~(u32)0;
2774 	int ret;
2775 	struct btrfs_root *root = BTRFS_I(inode)->root;
2776 	u32 csum = ~(u32)0;
2777 
2778 	if (PageChecked(page)) {
2779 		ClearPageChecked(page);
2780 		goto good;
2781 	}
2782 
2783 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
2784 		goto good;
2785 
2786 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
2787 	    test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
2788 		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
2789 				  GFP_NOFS);
2790 		return 0;
2791 	}
2792 
2793 	if (state && state->start == start) {
2794 		private = state->private;
2795 		ret = 0;
2796 	} else {
2797 		ret = get_state_private(io_tree, start, &private);
2798 	}
2799 	kaddr = kmap_atomic(page);
2800 	if (ret)
2801 		goto zeroit;
2802 
2803 	csum = btrfs_csum_data(root, kaddr + offset, csum,  end - start + 1);
2804 	btrfs_csum_final(csum, (char *)&csum);
2805 	if (csum != private)
2806 		goto zeroit;
2807 
2808 	kunmap_atomic(kaddr);
2809 good:
2810 	return 0;
2811 
2812 zeroit:
2813 	printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
2814 		       "private %llu\n",
2815 		       (unsigned long long)btrfs_ino(page->mapping->host),
2816 		       (unsigned long long)start, csum,
2817 		       (unsigned long long)private);
2818 	memset(kaddr + offset, 1, end - start + 1);
2819 	flush_dcache_page(page);
2820 	kunmap_atomic(kaddr);
2821 	if (private == 0)
2822 		return 0;
2823 	return -EIO;
2824 }
2825 
2826 struct delayed_iput {
2827 	struct list_head list;
2828 	struct inode *inode;
2829 };
2830 
2831 /* JDM: If this is fs-wide, why can't we add a pointer to
2832  * btrfs_inode instead and avoid the allocation? */
2833 void btrfs_add_delayed_iput(struct inode *inode)
2834 {
2835 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2836 	struct delayed_iput *delayed;
2837 
2838 	if (atomic_add_unless(&inode->i_count, -1, 1))
2839 		return;
2840 
2841 	delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
2842 	delayed->inode = inode;
2843 
2844 	spin_lock(&fs_info->delayed_iput_lock);
2845 	list_add_tail(&delayed->list, &fs_info->delayed_iputs);
2846 	spin_unlock(&fs_info->delayed_iput_lock);
2847 }
2848 
2849 void btrfs_run_delayed_iputs(struct btrfs_root *root)
2850 {
2851 	LIST_HEAD(list);
2852 	struct btrfs_fs_info *fs_info = root->fs_info;
2853 	struct delayed_iput *delayed;
2854 	int empty;
2855 
2856 	spin_lock(&fs_info->delayed_iput_lock);
2857 	empty = list_empty(&fs_info->delayed_iputs);
2858 	spin_unlock(&fs_info->delayed_iput_lock);
2859 	if (empty)
2860 		return;
2861 
2862 	spin_lock(&fs_info->delayed_iput_lock);
2863 	list_splice_init(&fs_info->delayed_iputs, &list);
2864 	spin_unlock(&fs_info->delayed_iput_lock);
2865 
2866 	while (!list_empty(&list)) {
2867 		delayed = list_entry(list.next, struct delayed_iput, list);
2868 		list_del(&delayed->list);
2869 		iput(delayed->inode);
2870 		kfree(delayed);
2871 	}
2872 }
2873 
2874 /*
2875  * This is called in transaction commit time. If there are no orphan
2876  * files in the subvolume, it removes orphan item and frees block_rsv
2877  * structure.
2878  */
2879 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
2880 			      struct btrfs_root *root)
2881 {
2882 	struct btrfs_block_rsv *block_rsv;
2883 	int ret;
2884 
2885 	if (atomic_read(&root->orphan_inodes) ||
2886 	    root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
2887 		return;
2888 
2889 	spin_lock(&root->orphan_lock);
2890 	if (atomic_read(&root->orphan_inodes)) {
2891 		spin_unlock(&root->orphan_lock);
2892 		return;
2893 	}
2894 
2895 	if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
2896 		spin_unlock(&root->orphan_lock);
2897 		return;
2898 	}
2899 
2900 	block_rsv = root->orphan_block_rsv;
2901 	root->orphan_block_rsv = NULL;
2902 	spin_unlock(&root->orphan_lock);
2903 
2904 	if (root->orphan_item_inserted &&
2905 	    btrfs_root_refs(&root->root_item) > 0) {
2906 		ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
2907 					    root->root_key.objectid);
2908 		BUG_ON(ret);
2909 		root->orphan_item_inserted = 0;
2910 	}
2911 
2912 	if (block_rsv) {
2913 		WARN_ON(block_rsv->size > 0);
2914 		btrfs_free_block_rsv(root, block_rsv);
2915 	}
2916 }
2917 
2918 /*
2919  * This creates an orphan entry for the given inode in case something goes
2920  * wrong in the middle of an unlink/truncate.
2921  *
2922  * NOTE: caller of this function should reserve 5 units of metadata for
2923  *	 this function.
2924  */
2925 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2926 {
2927 	struct btrfs_root *root = BTRFS_I(inode)->root;
2928 	struct btrfs_block_rsv *block_rsv = NULL;
2929 	int reserve = 0;
2930 	int insert = 0;
2931 	int ret;
2932 
2933 	if (!root->orphan_block_rsv) {
2934 		block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
2935 		if (!block_rsv)
2936 			return -ENOMEM;
2937 	}
2938 
2939 	spin_lock(&root->orphan_lock);
2940 	if (!root->orphan_block_rsv) {
2941 		root->orphan_block_rsv = block_rsv;
2942 	} else if (block_rsv) {
2943 		btrfs_free_block_rsv(root, block_rsv);
2944 		block_rsv = NULL;
2945 	}
2946 
2947 	if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2948 			      &BTRFS_I(inode)->runtime_flags)) {
2949 #if 0
2950 		/*
2951 		 * For proper ENOSPC handling, we should do orphan
2952 		 * cleanup when mounting. But this introduces backward
2953 		 * compatibility issue.
2954 		 */
2955 		if (!xchg(&root->orphan_item_inserted, 1))
2956 			insert = 2;
2957 		else
2958 			insert = 1;
2959 #endif
2960 		insert = 1;
2961 		atomic_inc(&root->orphan_inodes);
2962 	}
2963 
2964 	if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
2965 			      &BTRFS_I(inode)->runtime_flags))
2966 		reserve = 1;
2967 	spin_unlock(&root->orphan_lock);
2968 
2969 	/* grab metadata reservation from transaction handle */
2970 	if (reserve) {
2971 		ret = btrfs_orphan_reserve_metadata(trans, inode);
2972 		BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
2973 	}
2974 
2975 	/* insert an orphan item to track this unlinked/truncated file */
2976 	if (insert >= 1) {
2977 		ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
2978 		if (ret && ret != -EEXIST) {
2979 			clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2980 				  &BTRFS_I(inode)->runtime_flags);
2981 			btrfs_abort_transaction(trans, root, ret);
2982 			return ret;
2983 		}
2984 		ret = 0;
2985 	}
2986 
2987 	/* insert an orphan item to track subvolume contains orphan files */
2988 	if (insert >= 2) {
2989 		ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
2990 					       root->root_key.objectid);
2991 		if (ret && ret != -EEXIST) {
2992 			btrfs_abort_transaction(trans, root, ret);
2993 			return ret;
2994 		}
2995 	}
2996 	return 0;
2997 }
2998 
2999 /*
3000  * We have done the truncate/delete so we can go ahead and remove the orphan
3001  * item for this particular inode.
3002  */
3003 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
3004 {
3005 	struct btrfs_root *root = BTRFS_I(inode)->root;
3006 	int delete_item = 0;
3007 	int release_rsv = 0;
3008 	int ret = 0;
3009 
3010 	spin_lock(&root->orphan_lock);
3011 	if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3012 			       &BTRFS_I(inode)->runtime_flags))
3013 		delete_item = 1;
3014 
3015 	if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3016 			       &BTRFS_I(inode)->runtime_flags))
3017 		release_rsv = 1;
3018 	spin_unlock(&root->orphan_lock);
3019 
3020 	if (trans && delete_item) {
3021 		ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
3022 		BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
3023 	}
3024 
3025 	if (release_rsv) {
3026 		btrfs_orphan_release_metadata(inode);
3027 		atomic_dec(&root->orphan_inodes);
3028 	}
3029 
3030 	return 0;
3031 }
3032 
3033 /*
3034  * this cleans up any orphans that may be left on the list from the last use
3035  * of this root.
3036  */
3037 int btrfs_orphan_cleanup(struct btrfs_root *root)
3038 {
3039 	struct btrfs_path *path;
3040 	struct extent_buffer *leaf;
3041 	struct btrfs_key key, found_key;
3042 	struct btrfs_trans_handle *trans;
3043 	struct inode *inode;
3044 	u64 last_objectid = 0;
3045 	int ret = 0, nr_unlink = 0, nr_truncate = 0;
3046 
3047 	if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
3048 		return 0;
3049 
3050 	path = btrfs_alloc_path();
3051 	if (!path) {
3052 		ret = -ENOMEM;
3053 		goto out;
3054 	}
3055 	path->reada = -1;
3056 
3057 	key.objectid = BTRFS_ORPHAN_OBJECTID;
3058 	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
3059 	key.offset = (u64)-1;
3060 
3061 	while (1) {
3062 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3063 		if (ret < 0)
3064 			goto out;
3065 
3066 		/*
3067 		 * if ret == 0 means we found what we were searching for, which
3068 		 * is weird, but possible, so only screw with path if we didn't
3069 		 * find the key and see if we have stuff that matches
3070 		 */
3071 		if (ret > 0) {
3072 			ret = 0;
3073 			if (path->slots[0] == 0)
3074 				break;
3075 			path->slots[0]--;
3076 		}
3077 
3078 		/* pull out the item */
3079 		leaf = path->nodes[0];
3080 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3081 
3082 		/* make sure the item matches what we want */
3083 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3084 			break;
3085 		if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
3086 			break;
3087 
3088 		/* release the path since we're done with it */
3089 		btrfs_release_path(path);
3090 
3091 		/*
3092 		 * this is where we are basically btrfs_lookup, without the
3093 		 * crossing root thing.  we store the inode number in the
3094 		 * offset of the orphan item.
3095 		 */
3096 
3097 		if (found_key.offset == last_objectid) {
3098 			printk(KERN_ERR "btrfs: Error removing orphan entry, "
3099 			       "stopping orphan cleanup\n");
3100 			ret = -EINVAL;
3101 			goto out;
3102 		}
3103 
3104 		last_objectid = found_key.offset;
3105 
3106 		found_key.objectid = found_key.offset;
3107 		found_key.type = BTRFS_INODE_ITEM_KEY;
3108 		found_key.offset = 0;
3109 		inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
3110 		ret = PTR_RET(inode);
3111 		if (ret && ret != -ESTALE)
3112 			goto out;
3113 
3114 		if (ret == -ESTALE && root == root->fs_info->tree_root) {
3115 			struct btrfs_root *dead_root;
3116 			struct btrfs_fs_info *fs_info = root->fs_info;
3117 			int is_dead_root = 0;
3118 
3119 			/*
3120 			 * this is an orphan in the tree root. Currently these
3121 			 * could come from 2 sources:
3122 			 *  a) a snapshot deletion in progress
3123 			 *  b) a free space cache inode
3124 			 * We need to distinguish those two, as the snapshot
3125 			 * orphan must not get deleted.
3126 			 * find_dead_roots already ran before us, so if this
3127 			 * is a snapshot deletion, we should find the root
3128 			 * in the dead_roots list
3129 			 */
3130 			spin_lock(&fs_info->trans_lock);
3131 			list_for_each_entry(dead_root, &fs_info->dead_roots,
3132 					    root_list) {
3133 				if (dead_root->root_key.objectid ==
3134 				    found_key.objectid) {
3135 					is_dead_root = 1;
3136 					break;
3137 				}
3138 			}
3139 			spin_unlock(&fs_info->trans_lock);
3140 			if (is_dead_root) {
3141 				/* prevent this orphan from being found again */
3142 				key.offset = found_key.objectid - 1;
3143 				continue;
3144 			}
3145 		}
3146 		/*
3147 		 * Inode is already gone but the orphan item is still there,
3148 		 * kill the orphan item.
3149 		 */
3150 		if (ret == -ESTALE) {
3151 			trans = btrfs_start_transaction(root, 1);
3152 			if (IS_ERR(trans)) {
3153 				ret = PTR_ERR(trans);
3154 				goto out;
3155 			}
3156 			printk(KERN_ERR "auto deleting %Lu\n",
3157 			       found_key.objectid);
3158 			ret = btrfs_del_orphan_item(trans, root,
3159 						    found_key.objectid);
3160 			BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
3161 			btrfs_end_transaction(trans, root);
3162 			continue;
3163 		}
3164 
3165 		/*
3166 		 * add this inode to the orphan list so btrfs_orphan_del does
3167 		 * the proper thing when we hit it
3168 		 */
3169 		set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3170 			&BTRFS_I(inode)->runtime_flags);
3171 		atomic_inc(&root->orphan_inodes);
3172 
3173 		/* if we have links, this was a truncate, lets do that */
3174 		if (inode->i_nlink) {
3175 			if (!S_ISREG(inode->i_mode)) {
3176 				WARN_ON(1);
3177 				iput(inode);
3178 				continue;
3179 			}
3180 			nr_truncate++;
3181 
3182 			/* 1 for the orphan item deletion. */
3183 			trans = btrfs_start_transaction(root, 1);
3184 			if (IS_ERR(trans)) {
3185 				ret = PTR_ERR(trans);
3186 				goto out;
3187 			}
3188 			ret = btrfs_orphan_add(trans, inode);
3189 			btrfs_end_transaction(trans, root);
3190 			if (ret)
3191 				goto out;
3192 
3193 			ret = btrfs_truncate(inode);
3194 			if (ret)
3195 				btrfs_orphan_del(NULL, inode);
3196 		} else {
3197 			nr_unlink++;
3198 		}
3199 
3200 		/* this will do delete_inode and everything for us */
3201 		iput(inode);
3202 		if (ret)
3203 			goto out;
3204 	}
3205 	/* release the path since we're done with it */
3206 	btrfs_release_path(path);
3207 
3208 	root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3209 
3210 	if (root->orphan_block_rsv)
3211 		btrfs_block_rsv_release(root, root->orphan_block_rsv,
3212 					(u64)-1);
3213 
3214 	if (root->orphan_block_rsv || root->orphan_item_inserted) {
3215 		trans = btrfs_join_transaction(root);
3216 		if (!IS_ERR(trans))
3217 			btrfs_end_transaction(trans, root);
3218 	}
3219 
3220 	if (nr_unlink)
3221 		printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
3222 	if (nr_truncate)
3223 		printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
3224 
3225 out:
3226 	if (ret)
3227 		printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret);
3228 	btrfs_free_path(path);
3229 	return ret;
3230 }
3231 
3232 /*
3233  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3234  * don't find any xattrs, we know there can't be any acls.
3235  *
3236  * slot is the slot the inode is in, objectid is the objectid of the inode
3237  */
3238 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3239 					  int slot, u64 objectid)
3240 {
3241 	u32 nritems = btrfs_header_nritems(leaf);
3242 	struct btrfs_key found_key;
3243 	int scanned = 0;
3244 
3245 	slot++;
3246 	while (slot < nritems) {
3247 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3248 
3249 		/* we found a different objectid, there must not be acls */
3250 		if (found_key.objectid != objectid)
3251 			return 0;
3252 
3253 		/* we found an xattr, assume we've got an acl */
3254 		if (found_key.type == BTRFS_XATTR_ITEM_KEY)
3255 			return 1;
3256 
3257 		/*
3258 		 * we found a key greater than an xattr key, there can't
3259 		 * be any acls later on
3260 		 */
3261 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3262 			return 0;
3263 
3264 		slot++;
3265 		scanned++;
3266 
3267 		/*
3268 		 * it goes inode, inode backrefs, xattrs, extents,
3269 		 * so if there are a ton of hard links to an inode there can
3270 		 * be a lot of backrefs.  Don't waste time searching too hard,
3271 		 * this is just an optimization
3272 		 */
3273 		if (scanned >= 8)
3274 			break;
3275 	}
3276 	/* we hit the end of the leaf before we found an xattr or
3277 	 * something larger than an xattr.  We have to assume the inode
3278 	 * has acls
3279 	 */
3280 	return 1;
3281 }
3282 
3283 /*
3284  * read an inode from the btree into the in-memory inode
3285  */
3286 static void btrfs_read_locked_inode(struct inode *inode)
3287 {
3288 	struct btrfs_path *path;
3289 	struct extent_buffer *leaf;
3290 	struct btrfs_inode_item *inode_item;
3291 	struct btrfs_timespec *tspec;
3292 	struct btrfs_root *root = BTRFS_I(inode)->root;
3293 	struct btrfs_key location;
3294 	int maybe_acls;
3295 	u32 rdev;
3296 	int ret;
3297 	bool filled = false;
3298 
3299 	ret = btrfs_fill_inode(inode, &rdev);
3300 	if (!ret)
3301 		filled = true;
3302 
3303 	path = btrfs_alloc_path();
3304 	if (!path)
3305 		goto make_bad;
3306 
3307 	path->leave_spinning = 1;
3308 	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3309 
3310 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3311 	if (ret)
3312 		goto make_bad;
3313 
3314 	leaf = path->nodes[0];
3315 
3316 	if (filled)
3317 		goto cache_acl;
3318 
3319 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
3320 				    struct btrfs_inode_item);
3321 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3322 	set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3323 	i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3324 	i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3325 	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
3326 
3327 	tspec = btrfs_inode_atime(inode_item);
3328 	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
3329 	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3330 
3331 	tspec = btrfs_inode_mtime(inode_item);
3332 	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
3333 	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3334 
3335 	tspec = btrfs_inode_ctime(inode_item);
3336 	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
3337 	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3338 
3339 	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3340 	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3341 	BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3342 
3343 	/*
3344 	 * If we were modified in the current generation and evicted from memory
3345 	 * and then re-read we need to do a full sync since we don't have any
3346 	 * idea about which extents were modified before we were evicted from
3347 	 * cache.
3348 	 */
3349 	if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
3350 		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3351 			&BTRFS_I(inode)->runtime_flags);
3352 
3353 	inode->i_version = btrfs_inode_sequence(leaf, inode_item);
3354 	inode->i_generation = BTRFS_I(inode)->generation;
3355 	inode->i_rdev = 0;
3356 	rdev = btrfs_inode_rdev(leaf, inode_item);
3357 
3358 	BTRFS_I(inode)->index_cnt = (u64)-1;
3359 	BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3360 cache_acl:
3361 	/*
3362 	 * try to precache a NULL acl entry for files that don't have
3363 	 * any xattrs or acls
3364 	 */
3365 	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3366 					   btrfs_ino(inode));
3367 	if (!maybe_acls)
3368 		cache_no_acl(inode);
3369 
3370 	btrfs_free_path(path);
3371 
3372 	switch (inode->i_mode & S_IFMT) {
3373 	case S_IFREG:
3374 		inode->i_mapping->a_ops = &btrfs_aops;
3375 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3376 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3377 		inode->i_fop = &btrfs_file_operations;
3378 		inode->i_op = &btrfs_file_inode_operations;
3379 		break;
3380 	case S_IFDIR:
3381 		inode->i_fop = &btrfs_dir_file_operations;
3382 		if (root == root->fs_info->tree_root)
3383 			inode->i_op = &btrfs_dir_ro_inode_operations;
3384 		else
3385 			inode->i_op = &btrfs_dir_inode_operations;
3386 		break;
3387 	case S_IFLNK:
3388 		inode->i_op = &btrfs_symlink_inode_operations;
3389 		inode->i_mapping->a_ops = &btrfs_symlink_aops;
3390 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3391 		break;
3392 	default:
3393 		inode->i_op = &btrfs_special_inode_operations;
3394 		init_special_inode(inode, inode->i_mode, rdev);
3395 		break;
3396 	}
3397 
3398 	btrfs_update_iflags(inode);
3399 	return;
3400 
3401 make_bad:
3402 	btrfs_free_path(path);
3403 	make_bad_inode(inode);
3404 }
3405 
3406 /*
3407  * given a leaf and an inode, copy the inode fields into the leaf
3408  */
3409 static void fill_inode_item(struct btrfs_trans_handle *trans,
3410 			    struct extent_buffer *leaf,
3411 			    struct btrfs_inode_item *item,
3412 			    struct inode *inode)
3413 {
3414 	struct btrfs_map_token token;
3415 
3416 	btrfs_init_map_token(&token);
3417 
3418 	btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3419 	btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3420 	btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
3421 				   &token);
3422 	btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3423 	btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3424 
3425 	btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
3426 				     inode->i_atime.tv_sec, &token);
3427 	btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
3428 				      inode->i_atime.tv_nsec, &token);
3429 
3430 	btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
3431 				     inode->i_mtime.tv_sec, &token);
3432 	btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
3433 				      inode->i_mtime.tv_nsec, &token);
3434 
3435 	btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
3436 				     inode->i_ctime.tv_sec, &token);
3437 	btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
3438 				      inode->i_ctime.tv_nsec, &token);
3439 
3440 	btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3441 				     &token);
3442 	btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
3443 					 &token);
3444 	btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3445 	btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3446 	btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3447 	btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3448 	btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3449 }
3450 
3451 /*
3452  * copy everything in the in-memory inode into the btree.
3453  */
3454 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
3455 				struct btrfs_root *root, struct inode *inode)
3456 {
3457 	struct btrfs_inode_item *inode_item;
3458 	struct btrfs_path *path;
3459 	struct extent_buffer *leaf;
3460 	int ret;
3461 
3462 	path = btrfs_alloc_path();
3463 	if (!path)
3464 		return -ENOMEM;
3465 
3466 	path->leave_spinning = 1;
3467 	ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
3468 				 1);
3469 	if (ret) {
3470 		if (ret > 0)
3471 			ret = -ENOENT;
3472 		goto failed;
3473 	}
3474 
3475 	btrfs_unlock_up_safe(path, 1);
3476 	leaf = path->nodes[0];
3477 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
3478 				    struct btrfs_inode_item);
3479 
3480 	fill_inode_item(trans, leaf, inode_item, inode);
3481 	btrfs_mark_buffer_dirty(leaf);
3482 	btrfs_set_inode_last_trans(trans, inode);
3483 	ret = 0;
3484 failed:
3485 	btrfs_free_path(path);
3486 	return ret;
3487 }
3488 
3489 /*
3490  * copy everything in the in-memory inode into the btree.
3491  */
3492 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
3493 				struct btrfs_root *root, struct inode *inode)
3494 {
3495 	int ret;
3496 
3497 	/*
3498 	 * If the inode is a free space inode, we can deadlock during commit
3499 	 * if we put it into the delayed code.
3500 	 *
3501 	 * The data relocation inode should also be directly updated
3502 	 * without delay
3503 	 */
3504 	if (!btrfs_is_free_space_inode(inode)
3505 	    && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
3506 		btrfs_update_root_times(trans, root);
3507 
3508 		ret = btrfs_delayed_update_inode(trans, root, inode);
3509 		if (!ret)
3510 			btrfs_set_inode_last_trans(trans, inode);
3511 		return ret;
3512 	}
3513 
3514 	return btrfs_update_inode_item(trans, root, inode);
3515 }
3516 
3517 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3518 					 struct btrfs_root *root,
3519 					 struct inode *inode)
3520 {
3521 	int ret;
3522 
3523 	ret = btrfs_update_inode(trans, root, inode);
3524 	if (ret == -ENOSPC)
3525 		return btrfs_update_inode_item(trans, root, inode);
3526 	return ret;
3527 }
3528 
3529 /*
3530  * unlink helper that gets used here in inode.c and in the tree logging
3531  * recovery code.  It remove a link in a directory with a given name, and
3532  * also drops the back refs in the inode to the directory
3533  */
3534 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3535 				struct btrfs_root *root,
3536 				struct inode *dir, struct inode *inode,
3537 				const char *name, int name_len)
3538 {
3539 	struct btrfs_path *path;
3540 	int ret = 0;
3541 	struct extent_buffer *leaf;
3542 	struct btrfs_dir_item *di;
3543 	struct btrfs_key key;
3544 	u64 index;
3545 	u64 ino = btrfs_ino(inode);
3546 	u64 dir_ino = btrfs_ino(dir);
3547 
3548 	path = btrfs_alloc_path();
3549 	if (!path) {
3550 		ret = -ENOMEM;
3551 		goto out;
3552 	}
3553 
3554 	path->leave_spinning = 1;
3555 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3556 				    name, name_len, -1);
3557 	if (IS_ERR(di)) {
3558 		ret = PTR_ERR(di);
3559 		goto err;
3560 	}
3561 	if (!di) {
3562 		ret = -ENOENT;
3563 		goto err;
3564 	}
3565 	leaf = path->nodes[0];
3566 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
3567 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
3568 	if (ret)
3569 		goto err;
3570 	btrfs_release_path(path);
3571 
3572 	ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
3573 				  dir_ino, &index);
3574 	if (ret) {
3575 		printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
3576 		       "inode %llu parent %llu\n", name_len, name,
3577 		       (unsigned long long)ino, (unsigned long long)dir_ino);
3578 		btrfs_abort_transaction(trans, root, ret);
3579 		goto err;
3580 	}
3581 
3582 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
3583 	if (ret) {
3584 		btrfs_abort_transaction(trans, root, ret);
3585 		goto err;
3586 	}
3587 
3588 	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
3589 					 inode, dir_ino);
3590 	if (ret != 0 && ret != -ENOENT) {
3591 		btrfs_abort_transaction(trans, root, ret);
3592 		goto err;
3593 	}
3594 
3595 	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
3596 					   dir, index);
3597 	if (ret == -ENOENT)
3598 		ret = 0;
3599 err:
3600 	btrfs_free_path(path);
3601 	if (ret)
3602 		goto out;
3603 
3604 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3605 	inode_inc_iversion(inode);
3606 	inode_inc_iversion(dir);
3607 	inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3608 	ret = btrfs_update_inode(trans, root, dir);
3609 out:
3610 	return ret;
3611 }
3612 
3613 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3614 		       struct btrfs_root *root,
3615 		       struct inode *dir, struct inode *inode,
3616 		       const char *name, int name_len)
3617 {
3618 	int ret;
3619 	ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
3620 	if (!ret) {
3621 		btrfs_drop_nlink(inode);
3622 		ret = btrfs_update_inode(trans, root, inode);
3623 	}
3624 	return ret;
3625 }
3626 
3627 
3628 /* helper to check if there is any shared block in the path */
3629 static int check_path_shared(struct btrfs_root *root,
3630 			     struct btrfs_path *path)
3631 {
3632 	struct extent_buffer *eb;
3633 	int level;
3634 	u64 refs = 1;
3635 
3636 	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
3637 		int ret;
3638 
3639 		if (!path->nodes[level])
3640 			break;
3641 		eb = path->nodes[level];
3642 		if (!btrfs_block_can_be_shared(root, eb))
3643 			continue;
3644 		ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len,
3645 					       &refs, NULL);
3646 		if (refs > 1)
3647 			return 1;
3648 	}
3649 	return 0;
3650 }
3651 
3652 /*
3653  * helper to start transaction for unlink and rmdir.
3654  *
3655  * unlink and rmdir are special in btrfs, they do not always free space.
3656  * so in enospc case, we should make sure they will free space before
3657  * allowing them to use the global metadata reservation.
3658  */
3659 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
3660 						       struct dentry *dentry)
3661 {
3662 	struct btrfs_trans_handle *trans;
3663 	struct btrfs_root *root = BTRFS_I(dir)->root;
3664 	struct btrfs_path *path;
3665 	struct btrfs_dir_item *di;
3666 	struct inode *inode = dentry->d_inode;
3667 	u64 index;
3668 	int check_link = 1;
3669 	int err = -ENOSPC;
3670 	int ret;
3671 	u64 ino = btrfs_ino(inode);
3672 	u64 dir_ino = btrfs_ino(dir);
3673 
3674 	/*
3675 	 * 1 for the possible orphan item
3676 	 * 1 for the dir item
3677 	 * 1 for the dir index
3678 	 * 1 for the inode ref
3679 	 * 1 for the inode ref in the tree log
3680 	 * 2 for the dir entries in the log
3681 	 * 1 for the inode
3682 	 */
3683 	trans = btrfs_start_transaction(root, 8);
3684 	if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
3685 		return trans;
3686 
3687 	if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
3688 		return ERR_PTR(-ENOSPC);
3689 
3690 	/* check if there is someone else holds reference */
3691 	if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
3692 		return ERR_PTR(-ENOSPC);
3693 
3694 	if (atomic_read(&inode->i_count) > 2)
3695 		return ERR_PTR(-ENOSPC);
3696 
3697 	if (xchg(&root->fs_info->enospc_unlink, 1))
3698 		return ERR_PTR(-ENOSPC);
3699 
3700 	path = btrfs_alloc_path();
3701 	if (!path) {
3702 		root->fs_info->enospc_unlink = 0;
3703 		return ERR_PTR(-ENOMEM);
3704 	}
3705 
3706 	/* 1 for the orphan item */
3707 	trans = btrfs_start_transaction(root, 1);
3708 	if (IS_ERR(trans)) {
3709 		btrfs_free_path(path);
3710 		root->fs_info->enospc_unlink = 0;
3711 		return trans;
3712 	}
3713 
3714 	path->skip_locking = 1;
3715 	path->search_commit_root = 1;
3716 
3717 	ret = btrfs_lookup_inode(trans, root, path,
3718 				&BTRFS_I(dir)->location, 0);
3719 	if (ret < 0) {
3720 		err = ret;
3721 		goto out;
3722 	}
3723 	if (ret == 0) {
3724 		if (check_path_shared(root, path))
3725 			goto out;
3726 	} else {
3727 		check_link = 0;
3728 	}
3729 	btrfs_release_path(path);
3730 
3731 	ret = btrfs_lookup_inode(trans, root, path,
3732 				&BTRFS_I(inode)->location, 0);
3733 	if (ret < 0) {
3734 		err = ret;
3735 		goto out;
3736 	}
3737 	if (ret == 0) {
3738 		if (check_path_shared(root, path))
3739 			goto out;
3740 	} else {
3741 		check_link = 0;
3742 	}
3743 	btrfs_release_path(path);
3744 
3745 	if (ret == 0 && S_ISREG(inode->i_mode)) {
3746 		ret = btrfs_lookup_file_extent(trans, root, path,
3747 					       ino, (u64)-1, 0);
3748 		if (ret < 0) {
3749 			err = ret;
3750 			goto out;
3751 		}
3752 		BUG_ON(ret == 0); /* Corruption */
3753 		if (check_path_shared(root, path))
3754 			goto out;
3755 		btrfs_release_path(path);
3756 	}
3757 
3758 	if (!check_link) {
3759 		err = 0;
3760 		goto out;
3761 	}
3762 
3763 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3764 				dentry->d_name.name, dentry->d_name.len, 0);
3765 	if (IS_ERR(di)) {
3766 		err = PTR_ERR(di);
3767 		goto out;
3768 	}
3769 	if (di) {
3770 		if (check_path_shared(root, path))
3771 			goto out;
3772 	} else {
3773 		err = 0;
3774 		goto out;
3775 	}
3776 	btrfs_release_path(path);
3777 
3778 	ret = btrfs_get_inode_ref_index(trans, root, path, dentry->d_name.name,
3779 					dentry->d_name.len, ino, dir_ino, 0,
3780 					&index);
3781 	if (ret) {
3782 		err = ret;
3783 		goto out;
3784 	}
3785 
3786 	if (check_path_shared(root, path))
3787 		goto out;
3788 
3789 	btrfs_release_path(path);
3790 
3791 	/*
3792 	 * This is a commit root search, if we can lookup inode item and other
3793 	 * relative items in the commit root, it means the transaction of
3794 	 * dir/file creation has been committed, and the dir index item that we
3795 	 * delay to insert has also been inserted into the commit root. So
3796 	 * we needn't worry about the delayed insertion of the dir index item
3797 	 * here.
3798 	 */
3799 	di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
3800 				dentry->d_name.name, dentry->d_name.len, 0);
3801 	if (IS_ERR(di)) {
3802 		err = PTR_ERR(di);
3803 		goto out;
3804 	}
3805 	BUG_ON(ret == -ENOENT);
3806 	if (check_path_shared(root, path))
3807 		goto out;
3808 
3809 	err = 0;
3810 out:
3811 	btrfs_free_path(path);
3812 	/* Migrate the orphan reservation over */
3813 	if (!err)
3814 		err = btrfs_block_rsv_migrate(trans->block_rsv,
3815 				&root->fs_info->global_block_rsv,
3816 				trans->bytes_reserved);
3817 
3818 	if (err) {
3819 		btrfs_end_transaction(trans, root);
3820 		root->fs_info->enospc_unlink = 0;
3821 		return ERR_PTR(err);
3822 	}
3823 
3824 	trans->block_rsv = &root->fs_info->global_block_rsv;
3825 	return trans;
3826 }
3827 
3828 static void __unlink_end_trans(struct btrfs_trans_handle *trans,
3829 			       struct btrfs_root *root)
3830 {
3831 	if (trans->block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL) {
3832 		btrfs_block_rsv_release(root, trans->block_rsv,
3833 					trans->bytes_reserved);
3834 		trans->block_rsv = &root->fs_info->trans_block_rsv;
3835 		BUG_ON(!root->fs_info->enospc_unlink);
3836 		root->fs_info->enospc_unlink = 0;
3837 	}
3838 	btrfs_end_transaction(trans, root);
3839 }
3840 
3841 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
3842 {
3843 	struct btrfs_root *root = BTRFS_I(dir)->root;
3844 	struct btrfs_trans_handle *trans;
3845 	struct inode *inode = dentry->d_inode;
3846 	int ret;
3847 
3848 	trans = __unlink_start_trans(dir, dentry);
3849 	if (IS_ERR(trans))
3850 		return PTR_ERR(trans);
3851 
3852 	btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
3853 
3854 	ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3855 				 dentry->d_name.name, dentry->d_name.len);
3856 	if (ret)
3857 		goto out;
3858 
3859 	if (inode->i_nlink == 0) {
3860 		ret = btrfs_orphan_add(trans, inode);
3861 		if (ret)
3862 			goto out;
3863 	}
3864 
3865 out:
3866 	__unlink_end_trans(trans, root);
3867 	btrfs_btree_balance_dirty(root);
3868 	return ret;
3869 }
3870 
3871 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3872 			struct btrfs_root *root,
3873 			struct inode *dir, u64 objectid,
3874 			const char *name, int name_len)
3875 {
3876 	struct btrfs_path *path;
3877 	struct extent_buffer *leaf;
3878 	struct btrfs_dir_item *di;
3879 	struct btrfs_key key;
3880 	u64 index;
3881 	int ret;
3882 	u64 dir_ino = btrfs_ino(dir);
3883 
3884 	path = btrfs_alloc_path();
3885 	if (!path)
3886 		return -ENOMEM;
3887 
3888 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3889 				   name, name_len, -1);
3890 	if (IS_ERR_OR_NULL(di)) {
3891 		if (!di)
3892 			ret = -ENOENT;
3893 		else
3894 			ret = PTR_ERR(di);
3895 		goto out;
3896 	}
3897 
3898 	leaf = path->nodes[0];
3899 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
3900 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
3901 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
3902 	if (ret) {
3903 		btrfs_abort_transaction(trans, root, ret);
3904 		goto out;
3905 	}
3906 	btrfs_release_path(path);
3907 
3908 	ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
3909 				 objectid, root->root_key.objectid,
3910 				 dir_ino, &index, name, name_len);
3911 	if (ret < 0) {
3912 		if (ret != -ENOENT) {
3913 			btrfs_abort_transaction(trans, root, ret);
3914 			goto out;
3915 		}
3916 		di = btrfs_search_dir_index_item(root, path, dir_ino,
3917 						 name, name_len);
3918 		if (IS_ERR_OR_NULL(di)) {
3919 			if (!di)
3920 				ret = -ENOENT;
3921 			else
3922 				ret = PTR_ERR(di);
3923 			btrfs_abort_transaction(trans, root, ret);
3924 			goto out;
3925 		}
3926 
3927 		leaf = path->nodes[0];
3928 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3929 		btrfs_release_path(path);
3930 		index = key.offset;
3931 	}
3932 	btrfs_release_path(path);
3933 
3934 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
3935 	if (ret) {
3936 		btrfs_abort_transaction(trans, root, ret);
3937 		goto out;
3938 	}
3939 
3940 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3941 	inode_inc_iversion(dir);
3942 	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3943 	ret = btrfs_update_inode_fallback(trans, root, dir);
3944 	if (ret)
3945 		btrfs_abort_transaction(trans, root, ret);
3946 out:
3947 	btrfs_free_path(path);
3948 	return ret;
3949 }
3950 
3951 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3952 {
3953 	struct inode *inode = dentry->d_inode;
3954 	int err = 0;
3955 	struct btrfs_root *root = BTRFS_I(dir)->root;
3956 	struct btrfs_trans_handle *trans;
3957 
3958 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
3959 		return -ENOTEMPTY;
3960 	if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
3961 		return -EPERM;
3962 
3963 	trans = __unlink_start_trans(dir, dentry);
3964 	if (IS_ERR(trans))
3965 		return PTR_ERR(trans);
3966 
3967 	if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
3968 		err = btrfs_unlink_subvol(trans, root, dir,
3969 					  BTRFS_I(inode)->location.objectid,
3970 					  dentry->d_name.name,
3971 					  dentry->d_name.len);
3972 		goto out;
3973 	}
3974 
3975 	err = btrfs_orphan_add(trans, inode);
3976 	if (err)
3977 		goto out;
3978 
3979 	/* now the directory is empty */
3980 	err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3981 				 dentry->d_name.name, dentry->d_name.len);
3982 	if (!err)
3983 		btrfs_i_size_write(inode, 0);
3984 out:
3985 	__unlink_end_trans(trans, root);
3986 	btrfs_btree_balance_dirty(root);
3987 
3988 	return err;
3989 }
3990 
3991 /*
3992  * this can truncate away extent items, csum items and directory items.
3993  * It starts at a high offset and removes keys until it can't find
3994  * any higher than new_size
3995  *
3996  * csum items that cross the new i_size are truncated to the new size
3997  * as well.
3998  *
3999  * min_type is the minimum key type to truncate down to.  If set to 0, this
4000  * will kill all the items on this inode, including the INODE_ITEM_KEY.
4001  */
4002 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4003 			       struct btrfs_root *root,
4004 			       struct inode *inode,
4005 			       u64 new_size, u32 min_type)
4006 {
4007 	struct btrfs_path *path;
4008 	struct extent_buffer *leaf;
4009 	struct btrfs_file_extent_item *fi;
4010 	struct btrfs_key key;
4011 	struct btrfs_key found_key;
4012 	u64 extent_start = 0;
4013 	u64 extent_num_bytes = 0;
4014 	u64 extent_offset = 0;
4015 	u64 item_end = 0;
4016 	u32 found_type = (u8)-1;
4017 	int found_extent;
4018 	int del_item;
4019 	int pending_del_nr = 0;
4020 	int pending_del_slot = 0;
4021 	int extent_type = -1;
4022 	int ret;
4023 	int err = 0;
4024 	u64 ino = btrfs_ino(inode);
4025 
4026 	BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
4027 
4028 	path = btrfs_alloc_path();
4029 	if (!path)
4030 		return -ENOMEM;
4031 	path->reada = -1;
4032 
4033 	/*
4034 	 * We want to drop from the next block forward in case this new size is
4035 	 * not block aligned since we will be keeping the last block of the
4036 	 * extent just the way it is.
4037 	 */
4038 	if (root->ref_cows || root == root->fs_info->tree_root)
4039 		btrfs_drop_extent_cache(inode, ALIGN(new_size,
4040 					root->sectorsize), (u64)-1, 0);
4041 
4042 	/*
4043 	 * This function is also used to drop the items in the log tree before
4044 	 * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4045 	 * it is used to drop the loged items. So we shouldn't kill the delayed
4046 	 * items.
4047 	 */
4048 	if (min_type == 0 && root == BTRFS_I(inode)->root)
4049 		btrfs_kill_delayed_inode_items(inode);
4050 
4051 	key.objectid = ino;
4052 	key.offset = (u64)-1;
4053 	key.type = (u8)-1;
4054 
4055 search_again:
4056 	path->leave_spinning = 1;
4057 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
4058 	if (ret < 0) {
4059 		err = ret;
4060 		goto out;
4061 	}
4062 
4063 	if (ret > 0) {
4064 		/* there are no items in the tree for us to truncate, we're
4065 		 * done
4066 		 */
4067 		if (path->slots[0] == 0)
4068 			goto out;
4069 		path->slots[0]--;
4070 	}
4071 
4072 	while (1) {
4073 		fi = NULL;
4074 		leaf = path->nodes[0];
4075 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4076 		found_type = btrfs_key_type(&found_key);
4077 
4078 		if (found_key.objectid != ino)
4079 			break;
4080 
4081 		if (found_type < min_type)
4082 			break;
4083 
4084 		item_end = found_key.offset;
4085 		if (found_type == BTRFS_EXTENT_DATA_KEY) {
4086 			fi = btrfs_item_ptr(leaf, path->slots[0],
4087 					    struct btrfs_file_extent_item);
4088 			extent_type = btrfs_file_extent_type(leaf, fi);
4089 			if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4090 				item_end +=
4091 				    btrfs_file_extent_num_bytes(leaf, fi);
4092 			} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4093 				item_end += btrfs_file_extent_inline_len(leaf,
4094 									 fi);
4095 			}
4096 			item_end--;
4097 		}
4098 		if (found_type > min_type) {
4099 			del_item = 1;
4100 		} else {
4101 			if (item_end < new_size)
4102 				break;
4103 			if (found_key.offset >= new_size)
4104 				del_item = 1;
4105 			else
4106 				del_item = 0;
4107 		}
4108 		found_extent = 0;
4109 		/* FIXME, shrink the extent if the ref count is only 1 */
4110 		if (found_type != BTRFS_EXTENT_DATA_KEY)
4111 			goto delete;
4112 
4113 		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4114 			u64 num_dec;
4115 			extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
4116 			if (!del_item) {
4117 				u64 orig_num_bytes =
4118 					btrfs_file_extent_num_bytes(leaf, fi);
4119 				extent_num_bytes = ALIGN(new_size -
4120 						found_key.offset,
4121 						root->sectorsize);
4122 				btrfs_set_file_extent_num_bytes(leaf, fi,
4123 							 extent_num_bytes);
4124 				num_dec = (orig_num_bytes -
4125 					   extent_num_bytes);
4126 				if (root->ref_cows && extent_start != 0)
4127 					inode_sub_bytes(inode, num_dec);
4128 				btrfs_mark_buffer_dirty(leaf);
4129 			} else {
4130 				extent_num_bytes =
4131 					btrfs_file_extent_disk_num_bytes(leaf,
4132 									 fi);
4133 				extent_offset = found_key.offset -
4134 					btrfs_file_extent_offset(leaf, fi);
4135 
4136 				/* FIXME blocksize != 4096 */
4137 				num_dec = btrfs_file_extent_num_bytes(leaf, fi);
4138 				if (extent_start != 0) {
4139 					found_extent = 1;
4140 					if (root->ref_cows)
4141 						inode_sub_bytes(inode, num_dec);
4142 				}
4143 			}
4144 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4145 			/*
4146 			 * we can't truncate inline items that have had
4147 			 * special encodings
4148 			 */
4149 			if (!del_item &&
4150 			    btrfs_file_extent_compression(leaf, fi) == 0 &&
4151 			    btrfs_file_extent_encryption(leaf, fi) == 0 &&
4152 			    btrfs_file_extent_other_encoding(leaf, fi) == 0) {
4153 				u32 size = new_size - found_key.offset;
4154 
4155 				if (root->ref_cows) {
4156 					inode_sub_bytes(inode, item_end + 1 -
4157 							new_size);
4158 				}
4159 				size =
4160 				    btrfs_file_extent_calc_inline_size(size);
4161 				btrfs_truncate_item(trans, root, path,
4162 						    size, 1);
4163 			} else if (root->ref_cows) {
4164 				inode_sub_bytes(inode, item_end + 1 -
4165 						found_key.offset);
4166 			}
4167 		}
4168 delete:
4169 		if (del_item) {
4170 			if (!pending_del_nr) {
4171 				/* no pending yet, add ourselves */
4172 				pending_del_slot = path->slots[0];
4173 				pending_del_nr = 1;
4174 			} else if (pending_del_nr &&
4175 				   path->slots[0] + 1 == pending_del_slot) {
4176 				/* hop on the pending chunk */
4177 				pending_del_nr++;
4178 				pending_del_slot = path->slots[0];
4179 			} else {
4180 				BUG();
4181 			}
4182 		} else {
4183 			break;
4184 		}
4185 		if (found_extent && (root->ref_cows ||
4186 				     root == root->fs_info->tree_root)) {
4187 			btrfs_set_path_blocking(path);
4188 			ret = btrfs_free_extent(trans, root, extent_start,
4189 						extent_num_bytes, 0,
4190 						btrfs_header_owner(leaf),
4191 						ino, extent_offset, 0);
4192 			BUG_ON(ret);
4193 		}
4194 
4195 		if (found_type == BTRFS_INODE_ITEM_KEY)
4196 			break;
4197 
4198 		if (path->slots[0] == 0 ||
4199 		    path->slots[0] != pending_del_slot) {
4200 			if (pending_del_nr) {
4201 				ret = btrfs_del_items(trans, root, path,
4202 						pending_del_slot,
4203 						pending_del_nr);
4204 				if (ret) {
4205 					btrfs_abort_transaction(trans,
4206 								root, ret);
4207 					goto error;
4208 				}
4209 				pending_del_nr = 0;
4210 			}
4211 			btrfs_release_path(path);
4212 			goto search_again;
4213 		} else {
4214 			path->slots[0]--;
4215 		}
4216 	}
4217 out:
4218 	if (pending_del_nr) {
4219 		ret = btrfs_del_items(trans, root, path, pending_del_slot,
4220 				      pending_del_nr);
4221 		if (ret)
4222 			btrfs_abort_transaction(trans, root, ret);
4223 	}
4224 error:
4225 	btrfs_free_path(path);
4226 	return err;
4227 }
4228 
4229 /*
4230  * btrfs_truncate_page - read, zero a chunk and write a page
4231  * @inode - inode that we're zeroing
4232  * @from - the offset to start zeroing
4233  * @len - the length to zero, 0 to zero the entire range respective to the
4234  *	offset
4235  * @front - zero up to the offset instead of from the offset on
4236  *
4237  * This will find the page for the "from" offset and cow the page and zero the
4238  * part we want to zero.  This is used with truncate and hole punching.
4239  */
4240 int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
4241 			int front)
4242 {
4243 	struct address_space *mapping = inode->i_mapping;
4244 	struct btrfs_root *root = BTRFS_I(inode)->root;
4245 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4246 	struct btrfs_ordered_extent *ordered;
4247 	struct extent_state *cached_state = NULL;
4248 	char *kaddr;
4249 	u32 blocksize = root->sectorsize;
4250 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
4251 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
4252 	struct page *page;
4253 	gfp_t mask = btrfs_alloc_write_mask(mapping);
4254 	int ret = 0;
4255 	u64 page_start;
4256 	u64 page_end;
4257 
4258 	if ((offset & (blocksize - 1)) == 0 &&
4259 	    (!len || ((len & (blocksize - 1)) == 0)))
4260 		goto out;
4261 	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
4262 	if (ret)
4263 		goto out;
4264 
4265 again:
4266 	page = find_or_create_page(mapping, index, mask);
4267 	if (!page) {
4268 		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
4269 		ret = -ENOMEM;
4270 		goto out;
4271 	}
4272 
4273 	page_start = page_offset(page);
4274 	page_end = page_start + PAGE_CACHE_SIZE - 1;
4275 
4276 	if (!PageUptodate(page)) {
4277 		ret = btrfs_readpage(NULL, page);
4278 		lock_page(page);
4279 		if (page->mapping != mapping) {
4280 			unlock_page(page);
4281 			page_cache_release(page);
4282 			goto again;
4283 		}
4284 		if (!PageUptodate(page)) {
4285 			ret = -EIO;
4286 			goto out_unlock;
4287 		}
4288 	}
4289 	wait_on_page_writeback(page);
4290 
4291 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
4292 	set_page_extent_mapped(page);
4293 
4294 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
4295 	if (ordered) {
4296 		unlock_extent_cached(io_tree, page_start, page_end,
4297 				     &cached_state, GFP_NOFS);
4298 		unlock_page(page);
4299 		page_cache_release(page);
4300 		btrfs_start_ordered_extent(inode, ordered, 1);
4301 		btrfs_put_ordered_extent(ordered);
4302 		goto again;
4303 	}
4304 
4305 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
4306 			  EXTENT_DIRTY | EXTENT_DELALLOC |
4307 			  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4308 			  0, 0, &cached_state, GFP_NOFS);
4309 
4310 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
4311 					&cached_state);
4312 	if (ret) {
4313 		unlock_extent_cached(io_tree, page_start, page_end,
4314 				     &cached_state, GFP_NOFS);
4315 		goto out_unlock;
4316 	}
4317 
4318 	if (offset != PAGE_CACHE_SIZE) {
4319 		if (!len)
4320 			len = PAGE_CACHE_SIZE - offset;
4321 		kaddr = kmap(page);
4322 		if (front)
4323 			memset(kaddr, 0, offset);
4324 		else
4325 			memset(kaddr + offset, 0, len);
4326 		flush_dcache_page(page);
4327 		kunmap(page);
4328 	}
4329 	ClearPageChecked(page);
4330 	set_page_dirty(page);
4331 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
4332 			     GFP_NOFS);
4333 
4334 out_unlock:
4335 	if (ret)
4336 		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
4337 	unlock_page(page);
4338 	page_cache_release(page);
4339 out:
4340 	return ret;
4341 }
4342 
4343 /*
4344  * This function puts in dummy file extents for the area we're creating a hole
4345  * for.  So if we are truncating this file to a larger size we need to insert
4346  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4347  * the range between oldsize and size
4348  */
4349 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
4350 {
4351 	struct btrfs_trans_handle *trans;
4352 	struct btrfs_root *root = BTRFS_I(inode)->root;
4353 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4354 	struct extent_map *em = NULL;
4355 	struct extent_state *cached_state = NULL;
4356 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4357 	u64 hole_start = ALIGN(oldsize, root->sectorsize);
4358 	u64 block_end = ALIGN(size, root->sectorsize);
4359 	u64 last_byte;
4360 	u64 cur_offset;
4361 	u64 hole_size;
4362 	int err = 0;
4363 
4364 	if (size <= hole_start)
4365 		return 0;
4366 
4367 	while (1) {
4368 		struct btrfs_ordered_extent *ordered;
4369 		btrfs_wait_ordered_range(inode, hole_start,
4370 					 block_end - hole_start);
4371 		lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
4372 				 &cached_state);
4373 		ordered = btrfs_lookup_ordered_extent(inode, hole_start);
4374 		if (!ordered)
4375 			break;
4376 		unlock_extent_cached(io_tree, hole_start, block_end - 1,
4377 				     &cached_state, GFP_NOFS);
4378 		btrfs_put_ordered_extent(ordered);
4379 	}
4380 
4381 	cur_offset = hole_start;
4382 	while (1) {
4383 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4384 				block_end - cur_offset, 0);
4385 		if (IS_ERR(em)) {
4386 			err = PTR_ERR(em);
4387 			em = NULL;
4388 			break;
4389 		}
4390 		last_byte = min(extent_map_end(em), block_end);
4391 		last_byte = ALIGN(last_byte , root->sectorsize);
4392 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
4393 			struct extent_map *hole_em;
4394 			hole_size = last_byte - cur_offset;
4395 
4396 			trans = btrfs_start_transaction(root, 3);
4397 			if (IS_ERR(trans)) {
4398 				err = PTR_ERR(trans);
4399 				break;
4400 			}
4401 
4402 			err = btrfs_drop_extents(trans, root, inode,
4403 						 cur_offset,
4404 						 cur_offset + hole_size, 1);
4405 			if (err) {
4406 				btrfs_abort_transaction(trans, root, err);
4407 				btrfs_end_transaction(trans, root);
4408 				break;
4409 			}
4410 
4411 			err = btrfs_insert_file_extent(trans, root,
4412 					btrfs_ino(inode), cur_offset, 0,
4413 					0, hole_size, 0, hole_size,
4414 					0, 0, 0);
4415 			if (err) {
4416 				btrfs_abort_transaction(trans, root, err);
4417 				btrfs_end_transaction(trans, root);
4418 				break;
4419 			}
4420 
4421 			btrfs_drop_extent_cache(inode, cur_offset,
4422 						cur_offset + hole_size - 1, 0);
4423 			hole_em = alloc_extent_map();
4424 			if (!hole_em) {
4425 				set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4426 					&BTRFS_I(inode)->runtime_flags);
4427 				goto next;
4428 			}
4429 			hole_em->start = cur_offset;
4430 			hole_em->len = hole_size;
4431 			hole_em->orig_start = cur_offset;
4432 
4433 			hole_em->block_start = EXTENT_MAP_HOLE;
4434 			hole_em->block_len = 0;
4435 			hole_em->orig_block_len = 0;
4436 			hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
4437 			hole_em->compress_type = BTRFS_COMPRESS_NONE;
4438 			hole_em->generation = trans->transid;
4439 
4440 			while (1) {
4441 				write_lock(&em_tree->lock);
4442 				err = add_extent_mapping(em_tree, hole_em);
4443 				if (!err)
4444 					list_move(&hole_em->list,
4445 						  &em_tree->modified_extents);
4446 				write_unlock(&em_tree->lock);
4447 				if (err != -EEXIST)
4448 					break;
4449 				btrfs_drop_extent_cache(inode, cur_offset,
4450 							cur_offset +
4451 							hole_size - 1, 0);
4452 			}
4453 			free_extent_map(hole_em);
4454 next:
4455 			btrfs_update_inode(trans, root, inode);
4456 			btrfs_end_transaction(trans, root);
4457 		}
4458 		free_extent_map(em);
4459 		em = NULL;
4460 		cur_offset = last_byte;
4461 		if (cur_offset >= block_end)
4462 			break;
4463 	}
4464 
4465 	free_extent_map(em);
4466 	unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
4467 			     GFP_NOFS);
4468 	return err;
4469 }
4470 
4471 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4472 {
4473 	struct btrfs_root *root = BTRFS_I(inode)->root;
4474 	struct btrfs_trans_handle *trans;
4475 	loff_t oldsize = i_size_read(inode);
4476 	loff_t newsize = attr->ia_size;
4477 	int mask = attr->ia_valid;
4478 	int ret;
4479 
4480 	if (newsize == oldsize)
4481 		return 0;
4482 
4483 	/*
4484 	 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4485 	 * special case where we need to update the times despite not having
4486 	 * these flags set.  For all other operations the VFS set these flags
4487 	 * explicitly if it wants a timestamp update.
4488 	 */
4489 	if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME))))
4490 		inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
4491 
4492 	if (newsize > oldsize) {
4493 		truncate_pagecache(inode, oldsize, newsize);
4494 		ret = btrfs_cont_expand(inode, oldsize, newsize);
4495 		if (ret)
4496 			return ret;
4497 
4498 		trans = btrfs_start_transaction(root, 1);
4499 		if (IS_ERR(trans))
4500 			return PTR_ERR(trans);
4501 
4502 		i_size_write(inode, newsize);
4503 		btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
4504 		ret = btrfs_update_inode(trans, root, inode);
4505 		btrfs_end_transaction(trans, root);
4506 	} else {
4507 
4508 		/*
4509 		 * We're truncating a file that used to have good data down to
4510 		 * zero. Make sure it gets into the ordered flush list so that
4511 		 * any new writes get down to disk quickly.
4512 		 */
4513 		if (newsize == 0)
4514 			set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
4515 				&BTRFS_I(inode)->runtime_flags);
4516 
4517 		/*
4518 		 * 1 for the orphan item we're going to add
4519 		 * 1 for the orphan item deletion.
4520 		 */
4521 		trans = btrfs_start_transaction(root, 2);
4522 		if (IS_ERR(trans))
4523 			return PTR_ERR(trans);
4524 
4525 		/*
4526 		 * We need to do this in case we fail at _any_ point during the
4527 		 * actual truncate.  Once we do the truncate_setsize we could
4528 		 * invalidate pages which forces any outstanding ordered io to
4529 		 * be instantly completed which will give us extents that need
4530 		 * to be truncated.  If we fail to get an orphan inode down we
4531 		 * could have left over extents that were never meant to live,
4532 		 * so we need to garuntee from this point on that everything
4533 		 * will be consistent.
4534 		 */
4535 		ret = btrfs_orphan_add(trans, inode);
4536 		btrfs_end_transaction(trans, root);
4537 		if (ret)
4538 			return ret;
4539 
4540 		/* we don't support swapfiles, so vmtruncate shouldn't fail */
4541 		truncate_setsize(inode, newsize);
4542 
4543 		/* Disable nonlocked read DIO to avoid the end less truncate */
4544 		btrfs_inode_block_unlocked_dio(inode);
4545 		inode_dio_wait(inode);
4546 		btrfs_inode_resume_unlocked_dio(inode);
4547 
4548 		ret = btrfs_truncate(inode);
4549 		if (ret && inode->i_nlink)
4550 			btrfs_orphan_del(NULL, inode);
4551 	}
4552 
4553 	return ret;
4554 }
4555 
4556 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
4557 {
4558 	struct inode *inode = dentry->d_inode;
4559 	struct btrfs_root *root = BTRFS_I(inode)->root;
4560 	int err;
4561 
4562 	if (btrfs_root_readonly(root))
4563 		return -EROFS;
4564 
4565 	err = inode_change_ok(inode, attr);
4566 	if (err)
4567 		return err;
4568 
4569 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
4570 		err = btrfs_setsize(inode, attr);
4571 		if (err)
4572 			return err;
4573 	}
4574 
4575 	if (attr->ia_valid) {
4576 		setattr_copy(inode, attr);
4577 		inode_inc_iversion(inode);
4578 		err = btrfs_dirty_inode(inode);
4579 
4580 		if (!err && attr->ia_valid & ATTR_MODE)
4581 			err = btrfs_acl_chmod(inode);
4582 	}
4583 
4584 	return err;
4585 }
4586 
4587 void btrfs_evict_inode(struct inode *inode)
4588 {
4589 	struct btrfs_trans_handle *trans;
4590 	struct btrfs_root *root = BTRFS_I(inode)->root;
4591 	struct btrfs_block_rsv *rsv, *global_rsv;
4592 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
4593 	int ret;
4594 
4595 	trace_btrfs_inode_evict(inode);
4596 
4597 	truncate_inode_pages(&inode->i_data, 0);
4598 	if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
4599 			       btrfs_is_free_space_inode(inode)))
4600 		goto no_delete;
4601 
4602 	if (is_bad_inode(inode)) {
4603 		btrfs_orphan_del(NULL, inode);
4604 		goto no_delete;
4605 	}
4606 	/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
4607 	btrfs_wait_ordered_range(inode, 0, (u64)-1);
4608 
4609 	if (root->fs_info->log_root_recovering) {
4610 		BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
4611 				 &BTRFS_I(inode)->runtime_flags));
4612 		goto no_delete;
4613 	}
4614 
4615 	if (inode->i_nlink > 0) {
4616 		BUG_ON(btrfs_root_refs(&root->root_item) != 0);
4617 		goto no_delete;
4618 	}
4619 
4620 	ret = btrfs_commit_inode_delayed_inode(inode);
4621 	if (ret) {
4622 		btrfs_orphan_del(NULL, inode);
4623 		goto no_delete;
4624 	}
4625 
4626 	rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
4627 	if (!rsv) {
4628 		btrfs_orphan_del(NULL, inode);
4629 		goto no_delete;
4630 	}
4631 	rsv->size = min_size;
4632 	rsv->failfast = 1;
4633 	global_rsv = &root->fs_info->global_block_rsv;
4634 
4635 	btrfs_i_size_write(inode, 0);
4636 
4637 	/*
4638 	 * This is a bit simpler than btrfs_truncate since we've already
4639 	 * reserved our space for our orphan item in the unlink, so we just
4640 	 * need to reserve some slack space in case we add bytes and update
4641 	 * inode item when doing the truncate.
4642 	 */
4643 	while (1) {
4644 		ret = btrfs_block_rsv_refill(root, rsv, min_size,
4645 					     BTRFS_RESERVE_FLUSH_LIMIT);
4646 
4647 		/*
4648 		 * Try and steal from the global reserve since we will
4649 		 * likely not use this space anyway, we want to try as
4650 		 * hard as possible to get this to work.
4651 		 */
4652 		if (ret)
4653 			ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);
4654 
4655 		if (ret) {
4656 			printk(KERN_WARNING "Could not get space for a "
4657 			       "delete, will truncate on mount %d\n", ret);
4658 			btrfs_orphan_del(NULL, inode);
4659 			btrfs_free_block_rsv(root, rsv);
4660 			goto no_delete;
4661 		}
4662 
4663 		trans = btrfs_join_transaction(root);
4664 		if (IS_ERR(trans)) {
4665 			btrfs_orphan_del(NULL, inode);
4666 			btrfs_free_block_rsv(root, rsv);
4667 			goto no_delete;
4668 		}
4669 
4670 		trans->block_rsv = rsv;
4671 
4672 		ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
4673 		if (ret != -ENOSPC)
4674 			break;
4675 
4676 		trans->block_rsv = &root->fs_info->trans_block_rsv;
4677 		btrfs_end_transaction(trans, root);
4678 		trans = NULL;
4679 		btrfs_btree_balance_dirty(root);
4680 	}
4681 
4682 	btrfs_free_block_rsv(root, rsv);
4683 
4684 	if (ret == 0) {
4685 		trans->block_rsv = root->orphan_block_rsv;
4686 		ret = btrfs_orphan_del(trans, inode);
4687 		BUG_ON(ret);
4688 	}
4689 
4690 	trans->block_rsv = &root->fs_info->trans_block_rsv;
4691 	if (!(root == root->fs_info->tree_root ||
4692 	      root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
4693 		btrfs_return_ino(root, btrfs_ino(inode));
4694 
4695 	btrfs_end_transaction(trans, root);
4696 	btrfs_btree_balance_dirty(root);
4697 no_delete:
4698 	clear_inode(inode);
4699 	return;
4700 }
4701 
4702 /*
4703  * this returns the key found in the dir entry in the location pointer.
4704  * If no dir entries were found, location->objectid is 0.
4705  */
4706 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
4707 			       struct btrfs_key *location)
4708 {
4709 	const char *name = dentry->d_name.name;
4710 	int namelen = dentry->d_name.len;
4711 	struct btrfs_dir_item *di;
4712 	struct btrfs_path *path;
4713 	struct btrfs_root *root = BTRFS_I(dir)->root;
4714 	int ret = 0;
4715 
4716 	path = btrfs_alloc_path();
4717 	if (!path)
4718 		return -ENOMEM;
4719 
4720 	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
4721 				    namelen, 0);
4722 	if (IS_ERR(di))
4723 		ret = PTR_ERR(di);
4724 
4725 	if (IS_ERR_OR_NULL(di))
4726 		goto out_err;
4727 
4728 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
4729 out:
4730 	btrfs_free_path(path);
4731 	return ret;
4732 out_err:
4733 	location->objectid = 0;
4734 	goto out;
4735 }
4736 
4737 /*
4738  * when we hit a tree root in a directory, the btrfs part of the inode
4739  * needs to be changed to reflect the root directory of the tree root.  This
4740  * is kind of like crossing a mount point.
4741  */
4742 static int fixup_tree_root_location(struct btrfs_root *root,
4743 				    struct inode *dir,
4744 				    struct dentry *dentry,
4745 				    struct btrfs_key *location,
4746 				    struct btrfs_root **sub_root)
4747 {
4748 	struct btrfs_path *path;
4749 	struct btrfs_root *new_root;
4750 	struct btrfs_root_ref *ref;
4751 	struct extent_buffer *leaf;
4752 	int ret;
4753 	int err = 0;
4754 
4755 	path = btrfs_alloc_path();
4756 	if (!path) {
4757 		err = -ENOMEM;
4758 		goto out;
4759 	}
4760 
4761 	err = -ENOENT;
4762 	ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
4763 				  BTRFS_I(dir)->root->root_key.objectid,
4764 				  location->objectid);
4765 	if (ret) {
4766 		if (ret < 0)
4767 			err = ret;
4768 		goto out;
4769 	}
4770 
4771 	leaf = path->nodes[0];
4772 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
4773 	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
4774 	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
4775 		goto out;
4776 
4777 	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
4778 				   (unsigned long)(ref + 1),
4779 				   dentry->d_name.len);
4780 	if (ret)
4781 		goto out;
4782 
4783 	btrfs_release_path(path);
4784 
4785 	new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
4786 	if (IS_ERR(new_root)) {
4787 		err = PTR_ERR(new_root);
4788 		goto out;
4789 	}
4790 
4791 	if (btrfs_root_refs(&new_root->root_item) == 0) {
4792 		err = -ENOENT;
4793 		goto out;
4794 	}
4795 
4796 	*sub_root = new_root;
4797 	location->objectid = btrfs_root_dirid(&new_root->root_item);
4798 	location->type = BTRFS_INODE_ITEM_KEY;
4799 	location->offset = 0;
4800 	err = 0;
4801 out:
4802 	btrfs_free_path(path);
4803 	return err;
4804 }
4805 
4806 static void inode_tree_add(struct inode *inode)
4807 {
4808 	struct btrfs_root *root = BTRFS_I(inode)->root;
4809 	struct btrfs_inode *entry;
4810 	struct rb_node **p;
4811 	struct rb_node *parent;
4812 	u64 ino = btrfs_ino(inode);
4813 again:
4814 	p = &root->inode_tree.rb_node;
4815 	parent = NULL;
4816 
4817 	if (inode_unhashed(inode))
4818 		return;
4819 
4820 	spin_lock(&root->inode_lock);
4821 	while (*p) {
4822 		parent = *p;
4823 		entry = rb_entry(parent, struct btrfs_inode, rb_node);
4824 
4825 		if (ino < btrfs_ino(&entry->vfs_inode))
4826 			p = &parent->rb_left;
4827 		else if (ino > btrfs_ino(&entry->vfs_inode))
4828 			p = &parent->rb_right;
4829 		else {
4830 			WARN_ON(!(entry->vfs_inode.i_state &
4831 				  (I_WILL_FREE | I_FREEING)));
4832 			rb_erase(parent, &root->inode_tree);
4833 			RB_CLEAR_NODE(parent);
4834 			spin_unlock(&root->inode_lock);
4835 			goto again;
4836 		}
4837 	}
4838 	rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
4839 	rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
4840 	spin_unlock(&root->inode_lock);
4841 }
4842 
4843 static void inode_tree_del(struct inode *inode)
4844 {
4845 	struct btrfs_root *root = BTRFS_I(inode)->root;
4846 	int empty = 0;
4847 
4848 	spin_lock(&root->inode_lock);
4849 	if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
4850 		rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
4851 		RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
4852 		empty = RB_EMPTY_ROOT(&root->inode_tree);
4853 	}
4854 	spin_unlock(&root->inode_lock);
4855 
4856 	/*
4857 	 * Free space cache has inodes in the tree root, but the tree root has a
4858 	 * root_refs of 0, so this could end up dropping the tree root as a
4859 	 * snapshot, so we need the extra !root->fs_info->tree_root check to
4860 	 * make sure we don't drop it.
4861 	 */
4862 	if (empty && btrfs_root_refs(&root->root_item) == 0 &&
4863 	    root != root->fs_info->tree_root) {
4864 		synchronize_srcu(&root->fs_info->subvol_srcu);
4865 		spin_lock(&root->inode_lock);
4866 		empty = RB_EMPTY_ROOT(&root->inode_tree);
4867 		spin_unlock(&root->inode_lock);
4868 		if (empty)
4869 			btrfs_add_dead_root(root);
4870 	}
4871 }
4872 
4873 void btrfs_invalidate_inodes(struct btrfs_root *root)
4874 {
4875 	struct rb_node *node;
4876 	struct rb_node *prev;
4877 	struct btrfs_inode *entry;
4878 	struct inode *inode;
4879 	u64 objectid = 0;
4880 
4881 	WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4882 
4883 	spin_lock(&root->inode_lock);
4884 again:
4885 	node = root->inode_tree.rb_node;
4886 	prev = NULL;
4887 	while (node) {
4888 		prev = node;
4889 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4890 
4891 		if (objectid < btrfs_ino(&entry->vfs_inode))
4892 			node = node->rb_left;
4893 		else if (objectid > btrfs_ino(&entry->vfs_inode))
4894 			node = node->rb_right;
4895 		else
4896 			break;
4897 	}
4898 	if (!node) {
4899 		while (prev) {
4900 			entry = rb_entry(prev, struct btrfs_inode, rb_node);
4901 			if (objectid <= btrfs_ino(&entry->vfs_inode)) {
4902 				node = prev;
4903 				break;
4904 			}
4905 			prev = rb_next(prev);
4906 		}
4907 	}
4908 	while (node) {
4909 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4910 		objectid = btrfs_ino(&entry->vfs_inode) + 1;
4911 		inode = igrab(&entry->vfs_inode);
4912 		if (inode) {
4913 			spin_unlock(&root->inode_lock);
4914 			if (atomic_read(&inode->i_count) > 1)
4915 				d_prune_aliases(inode);
4916 			/*
4917 			 * btrfs_drop_inode will have it removed from
4918 			 * the inode cache when its usage count
4919 			 * hits zero.
4920 			 */
4921 			iput(inode);
4922 			cond_resched();
4923 			spin_lock(&root->inode_lock);
4924 			goto again;
4925 		}
4926 
4927 		if (cond_resched_lock(&root->inode_lock))
4928 			goto again;
4929 
4930 		node = rb_next(node);
4931 	}
4932 	spin_unlock(&root->inode_lock);
4933 }
4934 
4935 static int btrfs_init_locked_inode(struct inode *inode, void *p)
4936 {
4937 	struct btrfs_iget_args *args = p;
4938 	inode->i_ino = args->ino;
4939 	BTRFS_I(inode)->root = args->root;
4940 	return 0;
4941 }
4942 
4943 static int btrfs_find_actor(struct inode *inode, void *opaque)
4944 {
4945 	struct btrfs_iget_args *args = opaque;
4946 	return args->ino == btrfs_ino(inode) &&
4947 		args->root == BTRFS_I(inode)->root;
4948 }
4949 
4950 static struct inode *btrfs_iget_locked(struct super_block *s,
4951 				       u64 objectid,
4952 				       struct btrfs_root *root)
4953 {
4954 	struct inode *inode;
4955 	struct btrfs_iget_args args;
4956 	args.ino = objectid;
4957 	args.root = root;
4958 
4959 	inode = iget5_locked(s, objectid, btrfs_find_actor,
4960 			     btrfs_init_locked_inode,
4961 			     (void *)&args);
4962 	return inode;
4963 }
4964 
4965 /* Get an inode object given its location and corresponding root.
4966  * Returns in *is_new if the inode was read from disk
4967  */
4968 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
4969 			 struct btrfs_root *root, int *new)
4970 {
4971 	struct inode *inode;
4972 
4973 	inode = btrfs_iget_locked(s, location->objectid, root);
4974 	if (!inode)
4975 		return ERR_PTR(-ENOMEM);
4976 
4977 	if (inode->i_state & I_NEW) {
4978 		BTRFS_I(inode)->root = root;
4979 		memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
4980 		btrfs_read_locked_inode(inode);
4981 		if (!is_bad_inode(inode)) {
4982 			inode_tree_add(inode);
4983 			unlock_new_inode(inode);
4984 			if (new)
4985 				*new = 1;
4986 		} else {
4987 			unlock_new_inode(inode);
4988 			iput(inode);
4989 			inode = ERR_PTR(-ESTALE);
4990 		}
4991 	}
4992 
4993 	return inode;
4994 }
4995 
4996 static struct inode *new_simple_dir(struct super_block *s,
4997 				    struct btrfs_key *key,
4998 				    struct btrfs_root *root)
4999 {
5000 	struct inode *inode = new_inode(s);
5001 
5002 	if (!inode)
5003 		return ERR_PTR(-ENOMEM);
5004 
5005 	BTRFS_I(inode)->root = root;
5006 	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5007 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5008 
5009 	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5010 	inode->i_op = &btrfs_dir_ro_inode_operations;
5011 	inode->i_fop = &simple_dir_operations;
5012 	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5013 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
5014 
5015 	return inode;
5016 }
5017 
5018 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5019 {
5020 	struct inode *inode;
5021 	struct btrfs_root *root = BTRFS_I(dir)->root;
5022 	struct btrfs_root *sub_root = root;
5023 	struct btrfs_key location;
5024 	int index;
5025 	int ret = 0;
5026 
5027 	if (dentry->d_name.len > BTRFS_NAME_LEN)
5028 		return ERR_PTR(-ENAMETOOLONG);
5029 
5030 	ret = btrfs_inode_by_name(dir, dentry, &location);
5031 	if (ret < 0)
5032 		return ERR_PTR(ret);
5033 
5034 	if (location.objectid == 0)
5035 		return NULL;
5036 
5037 	if (location.type == BTRFS_INODE_ITEM_KEY) {
5038 		inode = btrfs_iget(dir->i_sb, &location, root, NULL);
5039 		return inode;
5040 	}
5041 
5042 	BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
5043 
5044 	index = srcu_read_lock(&root->fs_info->subvol_srcu);
5045 	ret = fixup_tree_root_location(root, dir, dentry,
5046 				       &location, &sub_root);
5047 	if (ret < 0) {
5048 		if (ret != -ENOENT)
5049 			inode = ERR_PTR(ret);
5050 		else
5051 			inode = new_simple_dir(dir->i_sb, &location, sub_root);
5052 	} else {
5053 		inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
5054 	}
5055 	srcu_read_unlock(&root->fs_info->subvol_srcu, index);
5056 
5057 	if (!IS_ERR(inode) && root != sub_root) {
5058 		down_read(&root->fs_info->cleanup_work_sem);
5059 		if (!(inode->i_sb->s_flags & MS_RDONLY))
5060 			ret = btrfs_orphan_cleanup(sub_root);
5061 		up_read(&root->fs_info->cleanup_work_sem);
5062 		if (ret)
5063 			inode = ERR_PTR(ret);
5064 	}
5065 
5066 	return inode;
5067 }
5068 
5069 static int btrfs_dentry_delete(const struct dentry *dentry)
5070 {
5071 	struct btrfs_root *root;
5072 	struct inode *inode = dentry->d_inode;
5073 
5074 	if (!inode && !IS_ROOT(dentry))
5075 		inode = dentry->d_parent->d_inode;
5076 
5077 	if (inode) {
5078 		root = BTRFS_I(inode)->root;
5079 		if (btrfs_root_refs(&root->root_item) == 0)
5080 			return 1;
5081 
5082 		if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5083 			return 1;
5084 	}
5085 	return 0;
5086 }
5087 
5088 static void btrfs_dentry_release(struct dentry *dentry)
5089 {
5090 	if (dentry->d_fsdata)
5091 		kfree(dentry->d_fsdata);
5092 }
5093 
5094 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5095 				   unsigned int flags)
5096 {
5097 	struct dentry *ret;
5098 
5099 	ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
5100 	return ret;
5101 }
5102 
5103 unsigned char btrfs_filetype_table[] = {
5104 	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
5105 };
5106 
5107 static int btrfs_real_readdir(struct file *filp, void *dirent,
5108 			      filldir_t filldir)
5109 {
5110 	struct inode *inode = file_inode(filp);
5111 	struct btrfs_root *root = BTRFS_I(inode)->root;
5112 	struct btrfs_item *item;
5113 	struct btrfs_dir_item *di;
5114 	struct btrfs_key key;
5115 	struct btrfs_key found_key;
5116 	struct btrfs_path *path;
5117 	struct list_head ins_list;
5118 	struct list_head del_list;
5119 	int ret;
5120 	struct extent_buffer *leaf;
5121 	int slot;
5122 	unsigned char d_type;
5123 	int over = 0;
5124 	u32 di_cur;
5125 	u32 di_total;
5126 	u32 di_len;
5127 	int key_type = BTRFS_DIR_INDEX_KEY;
5128 	char tmp_name[32];
5129 	char *name_ptr;
5130 	int name_len;
5131 	int is_curr = 0;	/* filp->f_pos points to the current index? */
5132 
5133 	/* FIXME, use a real flag for deciding about the key type */
5134 	if (root->fs_info->tree_root == root)
5135 		key_type = BTRFS_DIR_ITEM_KEY;
5136 
5137 	/* special case for "." */
5138 	if (filp->f_pos == 0) {
5139 		over = filldir(dirent, ".", 1,
5140 			       filp->f_pos, btrfs_ino(inode), DT_DIR);
5141 		if (over)
5142 			return 0;
5143 		filp->f_pos = 1;
5144 	}
5145 	/* special case for .., just use the back ref */
5146 	if (filp->f_pos == 1) {
5147 		u64 pino = parent_ino(filp->f_path.dentry);
5148 		over = filldir(dirent, "..", 2,
5149 			       filp->f_pos, pino, DT_DIR);
5150 		if (over)
5151 			return 0;
5152 		filp->f_pos = 2;
5153 	}
5154 	path = btrfs_alloc_path();
5155 	if (!path)
5156 		return -ENOMEM;
5157 
5158 	path->reada = 1;
5159 
5160 	if (key_type == BTRFS_DIR_INDEX_KEY) {
5161 		INIT_LIST_HEAD(&ins_list);
5162 		INIT_LIST_HEAD(&del_list);
5163 		btrfs_get_delayed_items(inode, &ins_list, &del_list);
5164 	}
5165 
5166 	btrfs_set_key_type(&key, key_type);
5167 	key.offset = filp->f_pos;
5168 	key.objectid = btrfs_ino(inode);
5169 
5170 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5171 	if (ret < 0)
5172 		goto err;
5173 
5174 	while (1) {
5175 		leaf = path->nodes[0];
5176 		slot = path->slots[0];
5177 		if (slot >= btrfs_header_nritems(leaf)) {
5178 			ret = btrfs_next_leaf(root, path);
5179 			if (ret < 0)
5180 				goto err;
5181 			else if (ret > 0)
5182 				break;
5183 			continue;
5184 		}
5185 
5186 		item = btrfs_item_nr(leaf, slot);
5187 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
5188 
5189 		if (found_key.objectid != key.objectid)
5190 			break;
5191 		if (btrfs_key_type(&found_key) != key_type)
5192 			break;
5193 		if (found_key.offset < filp->f_pos)
5194 			goto next;
5195 		if (key_type == BTRFS_DIR_INDEX_KEY &&
5196 		    btrfs_should_delete_dir_index(&del_list,
5197 						  found_key.offset))
5198 			goto next;
5199 
5200 		filp->f_pos = found_key.offset;
5201 		is_curr = 1;
5202 
5203 		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
5204 		di_cur = 0;
5205 		di_total = btrfs_item_size(leaf, item);
5206 
5207 		while (di_cur < di_total) {
5208 			struct btrfs_key location;
5209 
5210 			if (verify_dir_item(root, leaf, di))
5211 				break;
5212 
5213 			name_len = btrfs_dir_name_len(leaf, di);
5214 			if (name_len <= sizeof(tmp_name)) {
5215 				name_ptr = tmp_name;
5216 			} else {
5217 				name_ptr = kmalloc(name_len, GFP_NOFS);
5218 				if (!name_ptr) {
5219 					ret = -ENOMEM;
5220 					goto err;
5221 				}
5222 			}
5223 			read_extent_buffer(leaf, name_ptr,
5224 					   (unsigned long)(di + 1), name_len);
5225 
5226 			d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
5227 			btrfs_dir_item_key_to_cpu(leaf, di, &location);
5228 
5229 
5230 			/* is this a reference to our own snapshot? If so
5231 			 * skip it.
5232 			 *
5233 			 * In contrast to old kernels, we insert the snapshot's
5234 			 * dir item and dir index after it has been created, so
5235 			 * we won't find a reference to our own snapshot. We
5236 			 * still keep the following code for backward
5237 			 * compatibility.
5238 			 */
5239 			if (location.type == BTRFS_ROOT_ITEM_KEY &&
5240 			    location.objectid == root->root_key.objectid) {
5241 				over = 0;
5242 				goto skip;
5243 			}
5244 			over = filldir(dirent, name_ptr, name_len,
5245 				       found_key.offset, location.objectid,
5246 				       d_type);
5247 
5248 skip:
5249 			if (name_ptr != tmp_name)
5250 				kfree(name_ptr);
5251 
5252 			if (over)
5253 				goto nopos;
5254 			di_len = btrfs_dir_name_len(leaf, di) +
5255 				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
5256 			di_cur += di_len;
5257 			di = (struct btrfs_dir_item *)((char *)di + di_len);
5258 		}
5259 next:
5260 		path->slots[0]++;
5261 	}
5262 
5263 	if (key_type == BTRFS_DIR_INDEX_KEY) {
5264 		if (is_curr)
5265 			filp->f_pos++;
5266 		ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir,
5267 						      &ins_list);
5268 		if (ret)
5269 			goto nopos;
5270 	}
5271 
5272 	/* Reached end of directory/root. Bump pos past the last item. */
5273 	if (key_type == BTRFS_DIR_INDEX_KEY)
5274 		/*
5275 		 * 32-bit glibc will use getdents64, but then strtol -
5276 		 * so the last number we can serve is this.
5277 		 */
5278 		filp->f_pos = 0x7fffffff;
5279 	else
5280 		filp->f_pos++;
5281 nopos:
5282 	ret = 0;
5283 err:
5284 	if (key_type == BTRFS_DIR_INDEX_KEY)
5285 		btrfs_put_delayed_items(&ins_list, &del_list);
5286 	btrfs_free_path(path);
5287 	return ret;
5288 }
5289 
5290 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
5291 {
5292 	struct btrfs_root *root = BTRFS_I(inode)->root;
5293 	struct btrfs_trans_handle *trans;
5294 	int ret = 0;
5295 	bool nolock = false;
5296 
5297 	if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5298 		return 0;
5299 
5300 	if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
5301 		nolock = true;
5302 
5303 	if (wbc->sync_mode == WB_SYNC_ALL) {
5304 		if (nolock)
5305 			trans = btrfs_join_transaction_nolock(root);
5306 		else
5307 			trans = btrfs_join_transaction(root);
5308 		if (IS_ERR(trans))
5309 			return PTR_ERR(trans);
5310 		ret = btrfs_commit_transaction(trans, root);
5311 	}
5312 	return ret;
5313 }
5314 
5315 /*
5316  * This is somewhat expensive, updating the tree every time the
5317  * inode changes.  But, it is most likely to find the inode in cache.
5318  * FIXME, needs more benchmarking...there are no reasons other than performance
5319  * to keep or drop this code.
5320  */
5321 int btrfs_dirty_inode(struct inode *inode)
5322 {
5323 	struct btrfs_root *root = BTRFS_I(inode)->root;
5324 	struct btrfs_trans_handle *trans;
5325 	int ret;
5326 
5327 	if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5328 		return 0;
5329 
5330 	trans = btrfs_join_transaction(root);
5331 	if (IS_ERR(trans))
5332 		return PTR_ERR(trans);
5333 
5334 	ret = btrfs_update_inode(trans, root, inode);
5335 	if (ret && ret == -ENOSPC) {
5336 		/* whoops, lets try again with the full transaction */
5337 		btrfs_end_transaction(trans, root);
5338 		trans = btrfs_start_transaction(root, 1);
5339 		if (IS_ERR(trans))
5340 			return PTR_ERR(trans);
5341 
5342 		ret = btrfs_update_inode(trans, root, inode);
5343 	}
5344 	btrfs_end_transaction(trans, root);
5345 	if (BTRFS_I(inode)->delayed_node)
5346 		btrfs_balance_delayed_items(root);
5347 
5348 	return ret;
5349 }
5350 
5351 /*
5352  * This is a copy of file_update_time.  We need this so we can return error on
5353  * ENOSPC for updating the inode in the case of file write and mmap writes.
5354  */
5355 static int btrfs_update_time(struct inode *inode, struct timespec *now,
5356 			     int flags)
5357 {
5358 	struct btrfs_root *root = BTRFS_I(inode)->root;
5359 
5360 	if (btrfs_root_readonly(root))
5361 		return -EROFS;
5362 
5363 	if (flags & S_VERSION)
5364 		inode_inc_iversion(inode);
5365 	if (flags & S_CTIME)
5366 		inode->i_ctime = *now;
5367 	if (flags & S_MTIME)
5368 		inode->i_mtime = *now;
5369 	if (flags & S_ATIME)
5370 		inode->i_atime = *now;
5371 	return btrfs_dirty_inode(inode);
5372 }
5373 
5374 /*
5375  * find the highest existing sequence number in a directory
5376  * and then set the in-memory index_cnt variable to reflect
5377  * free sequence numbers
5378  */
5379 static int btrfs_set_inode_index_count(struct inode *inode)
5380 {
5381 	struct btrfs_root *root = BTRFS_I(inode)->root;
5382 	struct btrfs_key key, found_key;
5383 	struct btrfs_path *path;
5384 	struct extent_buffer *leaf;
5385 	int ret;
5386 
5387 	key.objectid = btrfs_ino(inode);
5388 	btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
5389 	key.offset = (u64)-1;
5390 
5391 	path = btrfs_alloc_path();
5392 	if (!path)
5393 		return -ENOMEM;
5394 
5395 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5396 	if (ret < 0)
5397 		goto out;
5398 	/* FIXME: we should be able to handle this */
5399 	if (ret == 0)
5400 		goto out;
5401 	ret = 0;
5402 
5403 	/*
5404 	 * MAGIC NUMBER EXPLANATION:
5405 	 * since we search a directory based on f_pos we have to start at 2
5406 	 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
5407 	 * else has to start at 2
5408 	 */
5409 	if (path->slots[0] == 0) {
5410 		BTRFS_I(inode)->index_cnt = 2;
5411 		goto out;
5412 	}
5413 
5414 	path->slots[0]--;
5415 
5416 	leaf = path->nodes[0];
5417 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5418 
5419 	if (found_key.objectid != btrfs_ino(inode) ||
5420 	    btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
5421 		BTRFS_I(inode)->index_cnt = 2;
5422 		goto out;
5423 	}
5424 
5425 	BTRFS_I(inode)->index_cnt = found_key.offset + 1;
5426 out:
5427 	btrfs_free_path(path);
5428 	return ret;
5429 }
5430 
5431 /*
5432  * helper to find a free sequence number in a given directory.  This current
5433  * code is very simple, later versions will do smarter things in the btree
5434  */
5435 int btrfs_set_inode_index(struct inode *dir, u64 *index)
5436 {
5437 	int ret = 0;
5438 
5439 	if (BTRFS_I(dir)->index_cnt == (u64)-1) {
5440 		ret = btrfs_inode_delayed_dir_index_count(dir);
5441 		if (ret) {
5442 			ret = btrfs_set_inode_index_count(dir);
5443 			if (ret)
5444 				return ret;
5445 		}
5446 	}
5447 
5448 	*index = BTRFS_I(dir)->index_cnt;
5449 	BTRFS_I(dir)->index_cnt++;
5450 
5451 	return ret;
5452 }
5453 
5454 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
5455 				     struct btrfs_root *root,
5456 				     struct inode *dir,
5457 				     const char *name, int name_len,
5458 				     u64 ref_objectid, u64 objectid,
5459 				     umode_t mode, u64 *index)
5460 {
5461 	struct inode *inode;
5462 	struct btrfs_inode_item *inode_item;
5463 	struct btrfs_key *location;
5464 	struct btrfs_path *path;
5465 	struct btrfs_inode_ref *ref;
5466 	struct btrfs_key key[2];
5467 	u32 sizes[2];
5468 	unsigned long ptr;
5469 	int ret;
5470 	int owner;
5471 
5472 	path = btrfs_alloc_path();
5473 	if (!path)
5474 		return ERR_PTR(-ENOMEM);
5475 
5476 	inode = new_inode(root->fs_info->sb);
5477 	if (!inode) {
5478 		btrfs_free_path(path);
5479 		return ERR_PTR(-ENOMEM);
5480 	}
5481 
5482 	/*
5483 	 * we have to initialize this early, so we can reclaim the inode
5484 	 * number if we fail afterwards in this function.
5485 	 */
5486 	inode->i_ino = objectid;
5487 
5488 	if (dir) {
5489 		trace_btrfs_inode_request(dir);
5490 
5491 		ret = btrfs_set_inode_index(dir, index);
5492 		if (ret) {
5493 			btrfs_free_path(path);
5494 			iput(inode);
5495 			return ERR_PTR(ret);
5496 		}
5497 	}
5498 	/*
5499 	 * index_cnt is ignored for everything but a dir,
5500 	 * btrfs_get_inode_index_count has an explanation for the magic
5501 	 * number
5502 	 */
5503 	BTRFS_I(inode)->index_cnt = 2;
5504 	BTRFS_I(inode)->root = root;
5505 	BTRFS_I(inode)->generation = trans->transid;
5506 	inode->i_generation = BTRFS_I(inode)->generation;
5507 
5508 	/*
5509 	 * We could have gotten an inode number from somebody who was fsynced
5510 	 * and then removed in this same transaction, so let's just set full
5511 	 * sync since it will be a full sync anyway and this will blow away the
5512 	 * old info in the log.
5513 	 */
5514 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
5515 
5516 	if (S_ISDIR(mode))
5517 		owner = 0;
5518 	else
5519 		owner = 1;
5520 
5521 	key[0].objectid = objectid;
5522 	btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
5523 	key[0].offset = 0;
5524 
5525 	/*
5526 	 * Start new inodes with an inode_ref. This is slightly more
5527 	 * efficient for small numbers of hard links since they will
5528 	 * be packed into one item. Extended refs will kick in if we
5529 	 * add more hard links than can fit in the ref item.
5530 	 */
5531 	key[1].objectid = objectid;
5532 	btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
5533 	key[1].offset = ref_objectid;
5534 
5535 	sizes[0] = sizeof(struct btrfs_inode_item);
5536 	sizes[1] = name_len + sizeof(*ref);
5537 
5538 	path->leave_spinning = 1;
5539 	ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
5540 	if (ret != 0)
5541 		goto fail;
5542 
5543 	inode_init_owner(inode, dir, mode);
5544 	inode_set_bytes(inode, 0);
5545 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
5546 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
5547 				  struct btrfs_inode_item);
5548 	memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
5549 			     sizeof(*inode_item));
5550 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
5551 
5552 	ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
5553 			     struct btrfs_inode_ref);
5554 	btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
5555 	btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
5556 	ptr = (unsigned long)(ref + 1);
5557 	write_extent_buffer(path->nodes[0], name, ptr, name_len);
5558 
5559 	btrfs_mark_buffer_dirty(path->nodes[0]);
5560 	btrfs_free_path(path);
5561 
5562 	location = &BTRFS_I(inode)->location;
5563 	location->objectid = objectid;
5564 	location->offset = 0;
5565 	btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
5566 
5567 	btrfs_inherit_iflags(inode, dir);
5568 
5569 	if (S_ISREG(mode)) {
5570 		if (btrfs_test_opt(root, NODATASUM))
5571 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
5572 		if (btrfs_test_opt(root, NODATACOW))
5573 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
5574 				BTRFS_INODE_NODATASUM;
5575 	}
5576 
5577 	insert_inode_hash(inode);
5578 	inode_tree_add(inode);
5579 
5580 	trace_btrfs_inode_new(inode);
5581 	btrfs_set_inode_last_trans(trans, inode);
5582 
5583 	btrfs_update_root_times(trans, root);
5584 
5585 	return inode;
5586 fail:
5587 	if (dir)
5588 		BTRFS_I(dir)->index_cnt--;
5589 	btrfs_free_path(path);
5590 	iput(inode);
5591 	return ERR_PTR(ret);
5592 }
5593 
5594 static inline u8 btrfs_inode_type(struct inode *inode)
5595 {
5596 	return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
5597 }
5598 
5599 /*
5600  * utility function to add 'inode' into 'parent_inode' with
5601  * a give name and a given sequence number.
5602  * if 'add_backref' is true, also insert a backref from the
5603  * inode to the parent directory.
5604  */
5605 int btrfs_add_link(struct btrfs_trans_handle *trans,
5606 		   struct inode *parent_inode, struct inode *inode,
5607 		   const char *name, int name_len, int add_backref, u64 index)
5608 {
5609 	int ret = 0;
5610 	struct btrfs_key key;
5611 	struct btrfs_root *root = BTRFS_I(parent_inode)->root;
5612 	u64 ino = btrfs_ino(inode);
5613 	u64 parent_ino = btrfs_ino(parent_inode);
5614 
5615 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
5616 		memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
5617 	} else {
5618 		key.objectid = ino;
5619 		btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
5620 		key.offset = 0;
5621 	}
5622 
5623 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
5624 		ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
5625 					 key.objectid, root->root_key.objectid,
5626 					 parent_ino, index, name, name_len);
5627 	} else if (add_backref) {
5628 		ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
5629 					     parent_ino, index);
5630 	}
5631 
5632 	/* Nothing to clean up yet */
5633 	if (ret)
5634 		return ret;
5635 
5636 	ret = btrfs_insert_dir_item(trans, root, name, name_len,
5637 				    parent_inode, &key,
5638 				    btrfs_inode_type(inode), index);
5639 	if (ret == -EEXIST || ret == -EOVERFLOW)
5640 		goto fail_dir_item;
5641 	else if (ret) {
5642 		btrfs_abort_transaction(trans, root, ret);
5643 		return ret;
5644 	}
5645 
5646 	btrfs_i_size_write(parent_inode, parent_inode->i_size +
5647 			   name_len * 2);
5648 	inode_inc_iversion(parent_inode);
5649 	parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
5650 	ret = btrfs_update_inode(trans, root, parent_inode);
5651 	if (ret)
5652 		btrfs_abort_transaction(trans, root, ret);
5653 	return ret;
5654 
5655 fail_dir_item:
5656 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
5657 		u64 local_index;
5658 		int err;
5659 		err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
5660 				 key.objectid, root->root_key.objectid,
5661 				 parent_ino, &local_index, name, name_len);
5662 
5663 	} else if (add_backref) {
5664 		u64 local_index;
5665 		int err;
5666 
5667 		err = btrfs_del_inode_ref(trans, root, name, name_len,
5668 					  ino, parent_ino, &local_index);
5669 	}
5670 	return ret;
5671 }
5672 
5673 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
5674 			    struct inode *dir, struct dentry *dentry,
5675 			    struct inode *inode, int backref, u64 index)
5676 {
5677 	int err = btrfs_add_link(trans, dir, inode,
5678 				 dentry->d_name.name, dentry->d_name.len,
5679 				 backref, index);
5680 	if (err > 0)
5681 		err = -EEXIST;
5682 	return err;
5683 }
5684 
5685 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
5686 			umode_t mode, dev_t rdev)
5687 {
5688 	struct btrfs_trans_handle *trans;
5689 	struct btrfs_root *root = BTRFS_I(dir)->root;
5690 	struct inode *inode = NULL;
5691 	int err;
5692 	int drop_inode = 0;
5693 	u64 objectid;
5694 	u64 index = 0;
5695 
5696 	if (!new_valid_dev(rdev))
5697 		return -EINVAL;
5698 
5699 	/*
5700 	 * 2 for inode item and ref
5701 	 * 2 for dir items
5702 	 * 1 for xattr if selinux is on
5703 	 */
5704 	trans = btrfs_start_transaction(root, 5);
5705 	if (IS_ERR(trans))
5706 		return PTR_ERR(trans);
5707 
5708 	err = btrfs_find_free_ino(root, &objectid);
5709 	if (err)
5710 		goto out_unlock;
5711 
5712 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5713 				dentry->d_name.len, btrfs_ino(dir), objectid,
5714 				mode, &index);
5715 	if (IS_ERR(inode)) {
5716 		err = PTR_ERR(inode);
5717 		goto out_unlock;
5718 	}
5719 
5720 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
5721 	if (err) {
5722 		drop_inode = 1;
5723 		goto out_unlock;
5724 	}
5725 
5726 	/*
5727 	* If the active LSM wants to access the inode during
5728 	* d_instantiate it needs these. Smack checks to see
5729 	* if the filesystem supports xattrs by looking at the
5730 	* ops vector.
5731 	*/
5732 
5733 	inode->i_op = &btrfs_special_inode_operations;
5734 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
5735 	if (err)
5736 		drop_inode = 1;
5737 	else {
5738 		init_special_inode(inode, inode->i_mode, rdev);
5739 		btrfs_update_inode(trans, root, inode);
5740 		d_instantiate(dentry, inode);
5741 	}
5742 out_unlock:
5743 	btrfs_end_transaction(trans, root);
5744 	btrfs_btree_balance_dirty(root);
5745 	if (drop_inode) {
5746 		inode_dec_link_count(inode);
5747 		iput(inode);
5748 	}
5749 	return err;
5750 }
5751 
5752 static int btrfs_create(struct inode *dir, struct dentry *dentry,
5753 			umode_t mode, bool excl)
5754 {
5755 	struct btrfs_trans_handle *trans;
5756 	struct btrfs_root *root = BTRFS_I(dir)->root;
5757 	struct inode *inode = NULL;
5758 	int drop_inode_on_err = 0;
5759 	int err;
5760 	u64 objectid;
5761 	u64 index = 0;
5762 
5763 	/*
5764 	 * 2 for inode item and ref
5765 	 * 2 for dir items
5766 	 * 1 for xattr if selinux is on
5767 	 */
5768 	trans = btrfs_start_transaction(root, 5);
5769 	if (IS_ERR(trans))
5770 		return PTR_ERR(trans);
5771 
5772 	err = btrfs_find_free_ino(root, &objectid);
5773 	if (err)
5774 		goto out_unlock;
5775 
5776 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5777 				dentry->d_name.len, btrfs_ino(dir), objectid,
5778 				mode, &index);
5779 	if (IS_ERR(inode)) {
5780 		err = PTR_ERR(inode);
5781 		goto out_unlock;
5782 	}
5783 	drop_inode_on_err = 1;
5784 
5785 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
5786 	if (err)
5787 		goto out_unlock;
5788 
5789 	err = btrfs_update_inode(trans, root, inode);
5790 	if (err)
5791 		goto out_unlock;
5792 
5793 	/*
5794 	* If the active LSM wants to access the inode during
5795 	* d_instantiate it needs these. Smack checks to see
5796 	* if the filesystem supports xattrs by looking at the
5797 	* ops vector.
5798 	*/
5799 	inode->i_fop = &btrfs_file_operations;
5800 	inode->i_op = &btrfs_file_inode_operations;
5801 
5802 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
5803 	if (err)
5804 		goto out_unlock;
5805 
5806 	inode->i_mapping->a_ops = &btrfs_aops;
5807 	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5808 	BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
5809 	d_instantiate(dentry, inode);
5810 
5811 out_unlock:
5812 	btrfs_end_transaction(trans, root);
5813 	if (err && drop_inode_on_err) {
5814 		inode_dec_link_count(inode);
5815 		iput(inode);
5816 	}
5817 	btrfs_btree_balance_dirty(root);
5818 	return err;
5819 }
5820 
5821 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
5822 		      struct dentry *dentry)
5823 {
5824 	struct btrfs_trans_handle *trans;
5825 	struct btrfs_root *root = BTRFS_I(dir)->root;
5826 	struct inode *inode = old_dentry->d_inode;
5827 	u64 index;
5828 	int err;
5829 	int drop_inode = 0;
5830 
5831 	/* do not allow sys_link's with other subvols of the same device */
5832 	if (root->objectid != BTRFS_I(inode)->root->objectid)
5833 		return -EXDEV;
5834 
5835 	if (inode->i_nlink >= BTRFS_LINK_MAX)
5836 		return -EMLINK;
5837 
5838 	err = btrfs_set_inode_index(dir, &index);
5839 	if (err)
5840 		goto fail;
5841 
5842 	/*
5843 	 * 2 items for inode and inode ref
5844 	 * 2 items for dir items
5845 	 * 1 item for parent inode
5846 	 */
5847 	trans = btrfs_start_transaction(root, 5);
5848 	if (IS_ERR(trans)) {
5849 		err = PTR_ERR(trans);
5850 		goto fail;
5851 	}
5852 
5853 	btrfs_inc_nlink(inode);
5854 	inode_inc_iversion(inode);
5855 	inode->i_ctime = CURRENT_TIME;
5856 	ihold(inode);
5857 	set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
5858 
5859 	err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
5860 
5861 	if (err) {
5862 		drop_inode = 1;
5863 	} else {
5864 		struct dentry *parent = dentry->d_parent;
5865 		err = btrfs_update_inode(trans, root, inode);
5866 		if (err)
5867 			goto fail;
5868 		d_instantiate(dentry, inode);
5869 		btrfs_log_new_name(trans, inode, NULL, parent);
5870 	}
5871 
5872 	btrfs_end_transaction(trans, root);
5873 fail:
5874 	if (drop_inode) {
5875 		inode_dec_link_count(inode);
5876 		iput(inode);
5877 	}
5878 	btrfs_btree_balance_dirty(root);
5879 	return err;
5880 }
5881 
5882 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
5883 {
5884 	struct inode *inode = NULL;
5885 	struct btrfs_trans_handle *trans;
5886 	struct btrfs_root *root = BTRFS_I(dir)->root;
5887 	int err = 0;
5888 	int drop_on_err = 0;
5889 	u64 objectid = 0;
5890 	u64 index = 0;
5891 
5892 	/*
5893 	 * 2 items for inode and ref
5894 	 * 2 items for dir items
5895 	 * 1 for xattr if selinux is on
5896 	 */
5897 	trans = btrfs_start_transaction(root, 5);
5898 	if (IS_ERR(trans))
5899 		return PTR_ERR(trans);
5900 
5901 	err = btrfs_find_free_ino(root, &objectid);
5902 	if (err)
5903 		goto out_fail;
5904 
5905 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5906 				dentry->d_name.len, btrfs_ino(dir), objectid,
5907 				S_IFDIR | mode, &index);
5908 	if (IS_ERR(inode)) {
5909 		err = PTR_ERR(inode);
5910 		goto out_fail;
5911 	}
5912 
5913 	drop_on_err = 1;
5914 
5915 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
5916 	if (err)
5917 		goto out_fail;
5918 
5919 	inode->i_op = &btrfs_dir_inode_operations;
5920 	inode->i_fop = &btrfs_dir_file_operations;
5921 
5922 	btrfs_i_size_write(inode, 0);
5923 	err = btrfs_update_inode(trans, root, inode);
5924 	if (err)
5925 		goto out_fail;
5926 
5927 	err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
5928 			     dentry->d_name.len, 0, index);
5929 	if (err)
5930 		goto out_fail;
5931 
5932 	d_instantiate(dentry, inode);
5933 	drop_on_err = 0;
5934 
5935 out_fail:
5936 	btrfs_end_transaction(trans, root);
5937 	if (drop_on_err)
5938 		iput(inode);
5939 	btrfs_btree_balance_dirty(root);
5940 	return err;
5941 }
5942 
5943 /* helper for btfs_get_extent.  Given an existing extent in the tree,
5944  * and an extent that you want to insert, deal with overlap and insert
5945  * the new extent into the tree.
5946  */
5947 static int merge_extent_mapping(struct extent_map_tree *em_tree,
5948 				struct extent_map *existing,
5949 				struct extent_map *em,
5950 				u64 map_start, u64 map_len)
5951 {
5952 	u64 start_diff;
5953 
5954 	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
5955 	start_diff = map_start - em->start;
5956 	em->start = map_start;
5957 	em->len = map_len;
5958 	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
5959 	    !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
5960 		em->block_start += start_diff;
5961 		em->block_len -= start_diff;
5962 	}
5963 	return add_extent_mapping(em_tree, em);
5964 }
5965 
5966 static noinline int uncompress_inline(struct btrfs_path *path,
5967 				      struct inode *inode, struct page *page,
5968 				      size_t pg_offset, u64 extent_offset,
5969 				      struct btrfs_file_extent_item *item)
5970 {
5971 	int ret;
5972 	struct extent_buffer *leaf = path->nodes[0];
5973 	char *tmp;
5974 	size_t max_size;
5975 	unsigned long inline_size;
5976 	unsigned long ptr;
5977 	int compress_type;
5978 
5979 	WARN_ON(pg_offset != 0);
5980 	compress_type = btrfs_file_extent_compression(leaf, item);
5981 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
5982 	inline_size = btrfs_file_extent_inline_item_len(leaf,
5983 					btrfs_item_nr(leaf, path->slots[0]));
5984 	tmp = kmalloc(inline_size, GFP_NOFS);
5985 	if (!tmp)
5986 		return -ENOMEM;
5987 	ptr = btrfs_file_extent_inline_start(item);
5988 
5989 	read_extent_buffer(leaf, tmp, ptr, inline_size);
5990 
5991 	max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
5992 	ret = btrfs_decompress(compress_type, tmp, page,
5993 			       extent_offset, inline_size, max_size);
5994 	if (ret) {
5995 		char *kaddr = kmap_atomic(page);
5996 		unsigned long copy_size = min_t(u64,
5997 				  PAGE_CACHE_SIZE - pg_offset,
5998 				  max_size - extent_offset);
5999 		memset(kaddr + pg_offset, 0, copy_size);
6000 		kunmap_atomic(kaddr);
6001 	}
6002 	kfree(tmp);
6003 	return 0;
6004 }
6005 
6006 /*
6007  * a bit scary, this does extent mapping from logical file offset to the disk.
6008  * the ugly parts come from merging extents from the disk with the in-ram
6009  * representation.  This gets more complex because of the data=ordered code,
6010  * where the in-ram extents might be locked pending data=ordered completion.
6011  *
6012  * This also copies inline extents directly into the page.
6013  */
6014 
6015 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
6016 				    size_t pg_offset, u64 start, u64 len,
6017 				    int create)
6018 {
6019 	int ret;
6020 	int err = 0;
6021 	u64 bytenr;
6022 	u64 extent_start = 0;
6023 	u64 extent_end = 0;
6024 	u64 objectid = btrfs_ino(inode);
6025 	u32 found_type;
6026 	struct btrfs_path *path = NULL;
6027 	struct btrfs_root *root = BTRFS_I(inode)->root;
6028 	struct btrfs_file_extent_item *item;
6029 	struct extent_buffer *leaf;
6030 	struct btrfs_key found_key;
6031 	struct extent_map *em = NULL;
6032 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
6033 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6034 	struct btrfs_trans_handle *trans = NULL;
6035 	int compress_type;
6036 
6037 again:
6038 	read_lock(&em_tree->lock);
6039 	em = lookup_extent_mapping(em_tree, start, len);
6040 	if (em)
6041 		em->bdev = root->fs_info->fs_devices->latest_bdev;
6042 	read_unlock(&em_tree->lock);
6043 
6044 	if (em) {
6045 		if (em->start > start || em->start + em->len <= start)
6046 			free_extent_map(em);
6047 		else if (em->block_start == EXTENT_MAP_INLINE && page)
6048 			free_extent_map(em);
6049 		else
6050 			goto out;
6051 	}
6052 	em = alloc_extent_map();
6053 	if (!em) {
6054 		err = -ENOMEM;
6055 		goto out;
6056 	}
6057 	em->bdev = root->fs_info->fs_devices->latest_bdev;
6058 	em->start = EXTENT_MAP_HOLE;
6059 	em->orig_start = EXTENT_MAP_HOLE;
6060 	em->len = (u64)-1;
6061 	em->block_len = (u64)-1;
6062 
6063 	if (!path) {
6064 		path = btrfs_alloc_path();
6065 		if (!path) {
6066 			err = -ENOMEM;
6067 			goto out;
6068 		}
6069 		/*
6070 		 * Chances are we'll be called again, so go ahead and do
6071 		 * readahead
6072 		 */
6073 		path->reada = 1;
6074 	}
6075 
6076 	ret = btrfs_lookup_file_extent(trans, root, path,
6077 				       objectid, start, trans != NULL);
6078 	if (ret < 0) {
6079 		err = ret;
6080 		goto out;
6081 	}
6082 
6083 	if (ret != 0) {
6084 		if (path->slots[0] == 0)
6085 			goto not_found;
6086 		path->slots[0]--;
6087 	}
6088 
6089 	leaf = path->nodes[0];
6090 	item = btrfs_item_ptr(leaf, path->slots[0],
6091 			      struct btrfs_file_extent_item);
6092 	/* are we inside the extent that was found? */
6093 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6094 	found_type = btrfs_key_type(&found_key);
6095 	if (found_key.objectid != objectid ||
6096 	    found_type != BTRFS_EXTENT_DATA_KEY) {
6097 		goto not_found;
6098 	}
6099 
6100 	found_type = btrfs_file_extent_type(leaf, item);
6101 	extent_start = found_key.offset;
6102 	compress_type = btrfs_file_extent_compression(leaf, item);
6103 	if (found_type == BTRFS_FILE_EXTENT_REG ||
6104 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6105 		extent_end = extent_start +
6106 		       btrfs_file_extent_num_bytes(leaf, item);
6107 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6108 		size_t size;
6109 		size = btrfs_file_extent_inline_len(leaf, item);
6110 		extent_end = ALIGN(extent_start + size, root->sectorsize);
6111 	}
6112 
6113 	if (start >= extent_end) {
6114 		path->slots[0]++;
6115 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6116 			ret = btrfs_next_leaf(root, path);
6117 			if (ret < 0) {
6118 				err = ret;
6119 				goto out;
6120 			}
6121 			if (ret > 0)
6122 				goto not_found;
6123 			leaf = path->nodes[0];
6124 		}
6125 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6126 		if (found_key.objectid != objectid ||
6127 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
6128 			goto not_found;
6129 		if (start + len <= found_key.offset)
6130 			goto not_found;
6131 		em->start = start;
6132 		em->orig_start = start;
6133 		em->len = found_key.offset - start;
6134 		goto not_found_em;
6135 	}
6136 
6137 	if (found_type == BTRFS_FILE_EXTENT_REG ||
6138 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6139 		em->start = extent_start;
6140 		em->len = extent_end - extent_start;
6141 		em->orig_start = extent_start -
6142 				 btrfs_file_extent_offset(leaf, item);
6143 		em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf,
6144 								      item);
6145 		bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
6146 		if (bytenr == 0) {
6147 			em->block_start = EXTENT_MAP_HOLE;
6148 			goto insert;
6149 		}
6150 		if (compress_type != BTRFS_COMPRESS_NONE) {
6151 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
6152 			em->compress_type = compress_type;
6153 			em->block_start = bytenr;
6154 			em->block_len = em->orig_block_len;
6155 		} else {
6156 			bytenr += btrfs_file_extent_offset(leaf, item);
6157 			em->block_start = bytenr;
6158 			em->block_len = em->len;
6159 			if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
6160 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
6161 		}
6162 		goto insert;
6163 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6164 		unsigned long ptr;
6165 		char *map;
6166 		size_t size;
6167 		size_t extent_offset;
6168 		size_t copy_size;
6169 
6170 		em->block_start = EXTENT_MAP_INLINE;
6171 		if (!page || create) {
6172 			em->start = extent_start;
6173 			em->len = extent_end - extent_start;
6174 			goto out;
6175 		}
6176 
6177 		size = btrfs_file_extent_inline_len(leaf, item);
6178 		extent_offset = page_offset(page) + pg_offset - extent_start;
6179 		copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
6180 				size - extent_offset);
6181 		em->start = extent_start + extent_offset;
6182 		em->len = ALIGN(copy_size, root->sectorsize);
6183 		em->orig_block_len = em->len;
6184 		em->orig_start = em->start;
6185 		if (compress_type) {
6186 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
6187 			em->compress_type = compress_type;
6188 		}
6189 		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
6190 		if (create == 0 && !PageUptodate(page)) {
6191 			if (btrfs_file_extent_compression(leaf, item) !=
6192 			    BTRFS_COMPRESS_NONE) {
6193 				ret = uncompress_inline(path, inode, page,
6194 							pg_offset,
6195 							extent_offset, item);
6196 				BUG_ON(ret); /* -ENOMEM */
6197 			} else {
6198 				map = kmap(page);
6199 				read_extent_buffer(leaf, map + pg_offset, ptr,
6200 						   copy_size);
6201 				if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
6202 					memset(map + pg_offset + copy_size, 0,
6203 					       PAGE_CACHE_SIZE - pg_offset -
6204 					       copy_size);
6205 				}
6206 				kunmap(page);
6207 			}
6208 			flush_dcache_page(page);
6209 		} else if (create && PageUptodate(page)) {
6210 			BUG();
6211 			if (!trans) {
6212 				kunmap(page);
6213 				free_extent_map(em);
6214 				em = NULL;
6215 
6216 				btrfs_release_path(path);
6217 				trans = btrfs_join_transaction(root);
6218 
6219 				if (IS_ERR(trans))
6220 					return ERR_CAST(trans);
6221 				goto again;
6222 			}
6223 			map = kmap(page);
6224 			write_extent_buffer(leaf, map + pg_offset, ptr,
6225 					    copy_size);
6226 			kunmap(page);
6227 			btrfs_mark_buffer_dirty(leaf);
6228 		}
6229 		set_extent_uptodate(io_tree, em->start,
6230 				    extent_map_end(em) - 1, NULL, GFP_NOFS);
6231 		goto insert;
6232 	} else {
6233 		WARN(1, KERN_ERR "btrfs unknown found_type %d\n", found_type);
6234 	}
6235 not_found:
6236 	em->start = start;
6237 	em->orig_start = start;
6238 	em->len = len;
6239 not_found_em:
6240 	em->block_start = EXTENT_MAP_HOLE;
6241 	set_bit(EXTENT_FLAG_VACANCY, &em->flags);
6242 insert:
6243 	btrfs_release_path(path);
6244 	if (em->start > start || extent_map_end(em) <= start) {
6245 		printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
6246 		       "[%llu %llu]\n", (unsigned long long)em->start,
6247 		       (unsigned long long)em->len,
6248 		       (unsigned long long)start,
6249 		       (unsigned long long)len);
6250 		err = -EIO;
6251 		goto out;
6252 	}
6253 
6254 	err = 0;
6255 	write_lock(&em_tree->lock);
6256 	ret = add_extent_mapping(em_tree, em);
6257 	/* it is possible that someone inserted the extent into the tree
6258 	 * while we had the lock dropped.  It is also possible that
6259 	 * an overlapping map exists in the tree
6260 	 */
6261 	if (ret == -EEXIST) {
6262 		struct extent_map *existing;
6263 
6264 		ret = 0;
6265 
6266 		existing = lookup_extent_mapping(em_tree, start, len);
6267 		if (existing && (existing->start > start ||
6268 		    existing->start + existing->len <= start)) {
6269 			free_extent_map(existing);
6270 			existing = NULL;
6271 		}
6272 		if (!existing) {
6273 			existing = lookup_extent_mapping(em_tree, em->start,
6274 							 em->len);
6275 			if (existing) {
6276 				err = merge_extent_mapping(em_tree, existing,
6277 							   em, start,
6278 							   root->sectorsize);
6279 				free_extent_map(existing);
6280 				if (err) {
6281 					free_extent_map(em);
6282 					em = NULL;
6283 				}
6284 			} else {
6285 				err = -EIO;
6286 				free_extent_map(em);
6287 				em = NULL;
6288 			}
6289 		} else {
6290 			free_extent_map(em);
6291 			em = existing;
6292 			err = 0;
6293 		}
6294 	}
6295 	write_unlock(&em_tree->lock);
6296 out:
6297 
6298 	if (em)
6299 		trace_btrfs_get_extent(root, em);
6300 
6301 	if (path)
6302 		btrfs_free_path(path);
6303 	if (trans) {
6304 		ret = btrfs_end_transaction(trans, root);
6305 		if (!err)
6306 			err = ret;
6307 	}
6308 	if (err) {
6309 		free_extent_map(em);
6310 		return ERR_PTR(err);
6311 	}
6312 	BUG_ON(!em); /* Error is always set */
6313 	return em;
6314 }
6315 
6316 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
6317 					   size_t pg_offset, u64 start, u64 len,
6318 					   int create)
6319 {
6320 	struct extent_map *em;
6321 	struct extent_map *hole_em = NULL;
6322 	u64 range_start = start;
6323 	u64 end;
6324 	u64 found;
6325 	u64 found_end;
6326 	int err = 0;
6327 
6328 	em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
6329 	if (IS_ERR(em))
6330 		return em;
6331 	if (em) {
6332 		/*
6333 		 * if our em maps to
6334 		 * -  a hole or
6335 		 * -  a pre-alloc extent,
6336 		 * there might actually be delalloc bytes behind it.
6337 		 */
6338 		if (em->block_start != EXTENT_MAP_HOLE &&
6339 		    !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6340 			return em;
6341 		else
6342 			hole_em = em;
6343 	}
6344 
6345 	/* check to see if we've wrapped (len == -1 or similar) */
6346 	end = start + len;
6347 	if (end < start)
6348 		end = (u64)-1;
6349 	else
6350 		end -= 1;
6351 
6352 	em = NULL;
6353 
6354 	/* ok, we didn't find anything, lets look for delalloc */
6355 	found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
6356 				 end, len, EXTENT_DELALLOC, 1);
6357 	found_end = range_start + found;
6358 	if (found_end < range_start)
6359 		found_end = (u64)-1;
6360 
6361 	/*
6362 	 * we didn't find anything useful, return
6363 	 * the original results from get_extent()
6364 	 */
6365 	if (range_start > end || found_end <= start) {
6366 		em = hole_em;
6367 		hole_em = NULL;
6368 		goto out;
6369 	}
6370 
6371 	/* adjust the range_start to make sure it doesn't
6372 	 * go backwards from the start they passed in
6373 	 */
6374 	range_start = max(start,range_start);
6375 	found = found_end - range_start;
6376 
6377 	if (found > 0) {
6378 		u64 hole_start = start;
6379 		u64 hole_len = len;
6380 
6381 		em = alloc_extent_map();
6382 		if (!em) {
6383 			err = -ENOMEM;
6384 			goto out;
6385 		}
6386 		/*
6387 		 * when btrfs_get_extent can't find anything it
6388 		 * returns one huge hole
6389 		 *
6390 		 * make sure what it found really fits our range, and
6391 		 * adjust to make sure it is based on the start from
6392 		 * the caller
6393 		 */
6394 		if (hole_em) {
6395 			u64 calc_end = extent_map_end(hole_em);
6396 
6397 			if (calc_end <= start || (hole_em->start > end)) {
6398 				free_extent_map(hole_em);
6399 				hole_em = NULL;
6400 			} else {
6401 				hole_start = max(hole_em->start, start);
6402 				hole_len = calc_end - hole_start;
6403 			}
6404 		}
6405 		em->bdev = NULL;
6406 		if (hole_em && range_start > hole_start) {
6407 			/* our hole starts before our delalloc, so we
6408 			 * have to return just the parts of the hole
6409 			 * that go until  the delalloc starts
6410 			 */
6411 			em->len = min(hole_len,
6412 				      range_start - hole_start);
6413 			em->start = hole_start;
6414 			em->orig_start = hole_start;
6415 			/*
6416 			 * don't adjust block start at all,
6417 			 * it is fixed at EXTENT_MAP_HOLE
6418 			 */
6419 			em->block_start = hole_em->block_start;
6420 			em->block_len = hole_len;
6421 			if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
6422 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
6423 		} else {
6424 			em->start = range_start;
6425 			em->len = found;
6426 			em->orig_start = range_start;
6427 			em->block_start = EXTENT_MAP_DELALLOC;
6428 			em->block_len = found;
6429 		}
6430 	} else if (hole_em) {
6431 		return hole_em;
6432 	}
6433 out:
6434 
6435 	free_extent_map(hole_em);
6436 	if (err) {
6437 		free_extent_map(em);
6438 		return ERR_PTR(err);
6439 	}
6440 	return em;
6441 }
6442 
6443 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
6444 						  u64 start, u64 len)
6445 {
6446 	struct btrfs_root *root = BTRFS_I(inode)->root;
6447 	struct btrfs_trans_handle *trans;
6448 	struct extent_map *em;
6449 	struct btrfs_key ins;
6450 	u64 alloc_hint;
6451 	int ret;
6452 
6453 	trans = btrfs_join_transaction(root);
6454 	if (IS_ERR(trans))
6455 		return ERR_CAST(trans);
6456 
6457 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
6458 
6459 	alloc_hint = get_extent_allocation_hint(inode, start, len);
6460 	ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
6461 				   alloc_hint, &ins, 1);
6462 	if (ret) {
6463 		em = ERR_PTR(ret);
6464 		goto out;
6465 	}
6466 
6467 	em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
6468 			      ins.offset, ins.offset, 0);
6469 	if (IS_ERR(em))
6470 		goto out;
6471 
6472 	ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
6473 					   ins.offset, ins.offset, 0);
6474 	if (ret) {
6475 		btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
6476 		em = ERR_PTR(ret);
6477 	}
6478 out:
6479 	btrfs_end_transaction(trans, root);
6480 	return em;
6481 }
6482 
6483 /*
6484  * returns 1 when the nocow is safe, < 1 on error, 0 if the
6485  * block must be cow'd
6486  */
6487 static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
6488 				      struct inode *inode, u64 offset, u64 len)
6489 {
6490 	struct btrfs_path *path;
6491 	int ret;
6492 	struct extent_buffer *leaf;
6493 	struct btrfs_root *root = BTRFS_I(inode)->root;
6494 	struct btrfs_file_extent_item *fi;
6495 	struct btrfs_key key;
6496 	u64 disk_bytenr;
6497 	u64 backref_offset;
6498 	u64 extent_end;
6499 	u64 num_bytes;
6500 	int slot;
6501 	int found_type;
6502 
6503 	path = btrfs_alloc_path();
6504 	if (!path)
6505 		return -ENOMEM;
6506 
6507 	ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
6508 				       offset, 0);
6509 	if (ret < 0)
6510 		goto out;
6511 
6512 	slot = path->slots[0];
6513 	if (ret == 1) {
6514 		if (slot == 0) {
6515 			/* can't find the item, must cow */
6516 			ret = 0;
6517 			goto out;
6518 		}
6519 		slot--;
6520 	}
6521 	ret = 0;
6522 	leaf = path->nodes[0];
6523 	btrfs_item_key_to_cpu(leaf, &key, slot);
6524 	if (key.objectid != btrfs_ino(inode) ||
6525 	    key.type != BTRFS_EXTENT_DATA_KEY) {
6526 		/* not our file or wrong item type, must cow */
6527 		goto out;
6528 	}
6529 
6530 	if (key.offset > offset) {
6531 		/* Wrong offset, must cow */
6532 		goto out;
6533 	}
6534 
6535 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
6536 	found_type = btrfs_file_extent_type(leaf, fi);
6537 	if (found_type != BTRFS_FILE_EXTENT_REG &&
6538 	    found_type != BTRFS_FILE_EXTENT_PREALLOC) {
6539 		/* not a regular extent, must cow */
6540 		goto out;
6541 	}
6542 	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
6543 	backref_offset = btrfs_file_extent_offset(leaf, fi);
6544 
6545 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
6546 	if (extent_end < offset + len) {
6547 		/* extent doesn't include our full range, must cow */
6548 		goto out;
6549 	}
6550 
6551 	if (btrfs_extent_readonly(root, disk_bytenr))
6552 		goto out;
6553 
6554 	/*
6555 	 * look for other files referencing this extent, if we
6556 	 * find any we must cow
6557 	 */
6558 	if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
6559 				  key.offset - backref_offset, disk_bytenr))
6560 		goto out;
6561 
6562 	/*
6563 	 * adjust disk_bytenr and num_bytes to cover just the bytes
6564 	 * in this extent we are about to write.  If there
6565 	 * are any csums in that range we have to cow in order
6566 	 * to keep the csums correct
6567 	 */
6568 	disk_bytenr += backref_offset;
6569 	disk_bytenr += offset - key.offset;
6570 	num_bytes = min(offset + len, extent_end) - offset;
6571 	if (csum_exist_in_range(root, disk_bytenr, num_bytes))
6572 				goto out;
6573 	/*
6574 	 * all of the above have passed, it is safe to overwrite this extent
6575 	 * without cow
6576 	 */
6577 	ret = 1;
6578 out:
6579 	btrfs_free_path(path);
6580 	return ret;
6581 }
6582 
6583 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
6584 			      struct extent_state **cached_state, int writing)
6585 {
6586 	struct btrfs_ordered_extent *ordered;
6587 	int ret = 0;
6588 
6589 	while (1) {
6590 		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6591 				 0, cached_state);
6592 		/*
6593 		 * We're concerned with the entire range that we're going to be
6594 		 * doing DIO to, so we need to make sure theres no ordered
6595 		 * extents in this range.
6596 		 */
6597 		ordered = btrfs_lookup_ordered_range(inode, lockstart,
6598 						     lockend - lockstart + 1);
6599 
6600 		/*
6601 		 * We need to make sure there are no buffered pages in this
6602 		 * range either, we could have raced between the invalidate in
6603 		 * generic_file_direct_write and locking the extent.  The
6604 		 * invalidate needs to happen so that reads after a write do not
6605 		 * get stale data.
6606 		 */
6607 		if (!ordered && (!writing ||
6608 		    !test_range_bit(&BTRFS_I(inode)->io_tree,
6609 				    lockstart, lockend, EXTENT_UPTODATE, 0,
6610 				    *cached_state)))
6611 			break;
6612 
6613 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6614 				     cached_state, GFP_NOFS);
6615 
6616 		if (ordered) {
6617 			btrfs_start_ordered_extent(inode, ordered, 1);
6618 			btrfs_put_ordered_extent(ordered);
6619 		} else {
6620 			/* Screw you mmap */
6621 			ret = filemap_write_and_wait_range(inode->i_mapping,
6622 							   lockstart,
6623 							   lockend);
6624 			if (ret)
6625 				break;
6626 
6627 			/*
6628 			 * If we found a page that couldn't be invalidated just
6629 			 * fall back to buffered.
6630 			 */
6631 			ret = invalidate_inode_pages2_range(inode->i_mapping,
6632 					lockstart >> PAGE_CACHE_SHIFT,
6633 					lockend >> PAGE_CACHE_SHIFT);
6634 			if (ret)
6635 				break;
6636 		}
6637 
6638 		cond_resched();
6639 	}
6640 
6641 	return ret;
6642 }
6643 
6644 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
6645 					   u64 len, u64 orig_start,
6646 					   u64 block_start, u64 block_len,
6647 					   u64 orig_block_len, int type)
6648 {
6649 	struct extent_map_tree *em_tree;
6650 	struct extent_map *em;
6651 	struct btrfs_root *root = BTRFS_I(inode)->root;
6652 	int ret;
6653 
6654 	em_tree = &BTRFS_I(inode)->extent_tree;
6655 	em = alloc_extent_map();
6656 	if (!em)
6657 		return ERR_PTR(-ENOMEM);
6658 
6659 	em->start = start;
6660 	em->orig_start = orig_start;
6661 	em->mod_start = start;
6662 	em->mod_len = len;
6663 	em->len = len;
6664 	em->block_len = block_len;
6665 	em->block_start = block_start;
6666 	em->bdev = root->fs_info->fs_devices->latest_bdev;
6667 	em->orig_block_len = orig_block_len;
6668 	em->generation = -1;
6669 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
6670 	if (type == BTRFS_ORDERED_PREALLOC)
6671 		set_bit(EXTENT_FLAG_FILLING, &em->flags);
6672 
6673 	do {
6674 		btrfs_drop_extent_cache(inode, em->start,
6675 				em->start + em->len - 1, 0);
6676 		write_lock(&em_tree->lock);
6677 		ret = add_extent_mapping(em_tree, em);
6678 		if (!ret)
6679 			list_move(&em->list,
6680 				  &em_tree->modified_extents);
6681 		write_unlock(&em_tree->lock);
6682 	} while (ret == -EEXIST);
6683 
6684 	if (ret) {
6685 		free_extent_map(em);
6686 		return ERR_PTR(ret);
6687 	}
6688 
6689 	return em;
6690 }
6691 
6692 
6693 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
6694 				   struct buffer_head *bh_result, int create)
6695 {
6696 	struct extent_map *em;
6697 	struct btrfs_root *root = BTRFS_I(inode)->root;
6698 	struct extent_state *cached_state = NULL;
6699 	u64 start = iblock << inode->i_blkbits;
6700 	u64 lockstart, lockend;
6701 	u64 len = bh_result->b_size;
6702 	struct btrfs_trans_handle *trans;
6703 	int unlock_bits = EXTENT_LOCKED;
6704 	int ret = 0;
6705 
6706 	if (create)
6707 		unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY;
6708 	else
6709 		len = min_t(u64, len, root->sectorsize);
6710 
6711 	lockstart = start;
6712 	lockend = start + len - 1;
6713 
6714 	/*
6715 	 * If this errors out it's because we couldn't invalidate pagecache for
6716 	 * this range and we need to fallback to buffered.
6717 	 */
6718 	if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
6719 		return -ENOTBLK;
6720 
6721 	em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
6722 	if (IS_ERR(em)) {
6723 		ret = PTR_ERR(em);
6724 		goto unlock_err;
6725 	}
6726 
6727 	/*
6728 	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
6729 	 * io.  INLINE is special, and we could probably kludge it in here, but
6730 	 * it's still buffered so for safety lets just fall back to the generic
6731 	 * buffered path.
6732 	 *
6733 	 * For COMPRESSED we _have_ to read the entire extent in so we can
6734 	 * decompress it, so there will be buffering required no matter what we
6735 	 * do, so go ahead and fallback to buffered.
6736 	 *
6737 	 * We return -ENOTBLK because thats what makes DIO go ahead and go back
6738 	 * to buffered IO.  Don't blame me, this is the price we pay for using
6739 	 * the generic code.
6740 	 */
6741 	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
6742 	    em->block_start == EXTENT_MAP_INLINE) {
6743 		free_extent_map(em);
6744 		ret = -ENOTBLK;
6745 		goto unlock_err;
6746 	}
6747 
6748 	/* Just a good old fashioned hole, return */
6749 	if (!create && (em->block_start == EXTENT_MAP_HOLE ||
6750 			test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
6751 		free_extent_map(em);
6752 		goto unlock_err;
6753 	}
6754 
6755 	/*
6756 	 * We don't allocate a new extent in the following cases
6757 	 *
6758 	 * 1) The inode is marked as NODATACOW.  In this case we'll just use the
6759 	 * existing extent.
6760 	 * 2) The extent is marked as PREALLOC.  We're good to go here and can
6761 	 * just use the extent.
6762 	 *
6763 	 */
6764 	if (!create) {
6765 		len = min(len, em->len - (start - em->start));
6766 		lockstart = start + len;
6767 		goto unlock;
6768 	}
6769 
6770 	if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
6771 	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
6772 	     em->block_start != EXTENT_MAP_HOLE)) {
6773 		int type;
6774 		int ret;
6775 		u64 block_start;
6776 
6777 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6778 			type = BTRFS_ORDERED_PREALLOC;
6779 		else
6780 			type = BTRFS_ORDERED_NOCOW;
6781 		len = min(len, em->len - (start - em->start));
6782 		block_start = em->block_start + (start - em->start);
6783 
6784 		/*
6785 		 * we're not going to log anything, but we do need
6786 		 * to make sure the current transaction stays open
6787 		 * while we look for nocow cross refs
6788 		 */
6789 		trans = btrfs_join_transaction(root);
6790 		if (IS_ERR(trans))
6791 			goto must_cow;
6792 
6793 		if (can_nocow_odirect(trans, inode, start, len) == 1) {
6794 			u64 orig_start = em->orig_start;
6795 			u64 orig_block_len = em->orig_block_len;
6796 
6797 			if (type == BTRFS_ORDERED_PREALLOC) {
6798 				free_extent_map(em);
6799 				em = create_pinned_em(inode, start, len,
6800 						       orig_start,
6801 						       block_start, len,
6802 						       orig_block_len, type);
6803 				if (IS_ERR(em)) {
6804 					btrfs_end_transaction(trans, root);
6805 					goto unlock_err;
6806 				}
6807 			}
6808 
6809 			ret = btrfs_add_ordered_extent_dio(inode, start,
6810 					   block_start, len, len, type);
6811 			btrfs_end_transaction(trans, root);
6812 			if (ret) {
6813 				free_extent_map(em);
6814 				goto unlock_err;
6815 			}
6816 			goto unlock;
6817 		}
6818 		btrfs_end_transaction(trans, root);
6819 	}
6820 must_cow:
6821 	/*
6822 	 * this will cow the extent, reset the len in case we changed
6823 	 * it above
6824 	 */
6825 	len = bh_result->b_size;
6826 	free_extent_map(em);
6827 	em = btrfs_new_extent_direct(inode, start, len);
6828 	if (IS_ERR(em)) {
6829 		ret = PTR_ERR(em);
6830 		goto unlock_err;
6831 	}
6832 	len = min(len, em->len - (start - em->start));
6833 unlock:
6834 	bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
6835 		inode->i_blkbits;
6836 	bh_result->b_size = len;
6837 	bh_result->b_bdev = em->bdev;
6838 	set_buffer_mapped(bh_result);
6839 	if (create) {
6840 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6841 			set_buffer_new(bh_result);
6842 
6843 		/*
6844 		 * Need to update the i_size under the extent lock so buffered
6845 		 * readers will get the updated i_size when we unlock.
6846 		 */
6847 		if (start + len > i_size_read(inode))
6848 			i_size_write(inode, start + len);
6849 
6850 		spin_lock(&BTRFS_I(inode)->lock);
6851 		BTRFS_I(inode)->outstanding_extents++;
6852 		spin_unlock(&BTRFS_I(inode)->lock);
6853 
6854 		ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6855 				     lockstart + len - 1, EXTENT_DELALLOC, NULL,
6856 				     &cached_state, GFP_NOFS);
6857 		BUG_ON(ret);
6858 	}
6859 
6860 	/*
6861 	 * In the case of write we need to clear and unlock the entire range,
6862 	 * in the case of read we need to unlock only the end area that we
6863 	 * aren't using if there is any left over space.
6864 	 */
6865 	if (lockstart < lockend) {
6866 		clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6867 				 lockend, unlock_bits, 1, 0,
6868 				 &cached_state, GFP_NOFS);
6869 	} else {
6870 		free_extent_state(cached_state);
6871 	}
6872 
6873 	free_extent_map(em);
6874 
6875 	return 0;
6876 
6877 unlock_err:
6878 	clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6879 			 unlock_bits, 1, 0, &cached_state, GFP_NOFS);
6880 	return ret;
6881 }
6882 
6883 struct btrfs_dio_private {
6884 	struct inode *inode;
6885 	u64 logical_offset;
6886 	u64 disk_bytenr;
6887 	u64 bytes;
6888 	void *private;
6889 
6890 	/* number of bios pending for this dio */
6891 	atomic_t pending_bios;
6892 
6893 	/* IO errors */
6894 	int errors;
6895 
6896 	struct bio *orig_bio;
6897 };
6898 
6899 static void btrfs_endio_direct_read(struct bio *bio, int err)
6900 {
6901 	struct btrfs_dio_private *dip = bio->bi_private;
6902 	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
6903 	struct bio_vec *bvec = bio->bi_io_vec;
6904 	struct inode *inode = dip->inode;
6905 	struct btrfs_root *root = BTRFS_I(inode)->root;
6906 	u64 start;
6907 
6908 	start = dip->logical_offset;
6909 	do {
6910 		if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
6911 			struct page *page = bvec->bv_page;
6912 			char *kaddr;
6913 			u32 csum = ~(u32)0;
6914 			u64 private = ~(u32)0;
6915 			unsigned long flags;
6916 
6917 			if (get_state_private(&BTRFS_I(inode)->io_tree,
6918 					      start, &private))
6919 				goto failed;
6920 			local_irq_save(flags);
6921 			kaddr = kmap_atomic(page);
6922 			csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
6923 					       csum, bvec->bv_len);
6924 			btrfs_csum_final(csum, (char *)&csum);
6925 			kunmap_atomic(kaddr);
6926 			local_irq_restore(flags);
6927 
6928 			flush_dcache_page(bvec->bv_page);
6929 			if (csum != private) {
6930 failed:
6931 				printk(KERN_ERR "btrfs csum failed ino %llu off"
6932 				      " %llu csum %u private %u\n",
6933 				      (unsigned long long)btrfs_ino(inode),
6934 				      (unsigned long long)start,
6935 				      csum, (unsigned)private);
6936 				err = -EIO;
6937 			}
6938 		}
6939 
6940 		start += bvec->bv_len;
6941 		bvec++;
6942 	} while (bvec <= bvec_end);
6943 
6944 	unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
6945 		      dip->logical_offset + dip->bytes - 1);
6946 	bio->bi_private = dip->private;
6947 
6948 	kfree(dip);
6949 
6950 	/* If we had a csum failure make sure to clear the uptodate flag */
6951 	if (err)
6952 		clear_bit(BIO_UPTODATE, &bio->bi_flags);
6953 	dio_end_io(bio, err);
6954 }
6955 
6956 static void btrfs_endio_direct_write(struct bio *bio, int err)
6957 {
6958 	struct btrfs_dio_private *dip = bio->bi_private;
6959 	struct inode *inode = dip->inode;
6960 	struct btrfs_root *root = BTRFS_I(inode)->root;
6961 	struct btrfs_ordered_extent *ordered = NULL;
6962 	u64 ordered_offset = dip->logical_offset;
6963 	u64 ordered_bytes = dip->bytes;
6964 	int ret;
6965 
6966 	if (err)
6967 		goto out_done;
6968 again:
6969 	ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
6970 						   &ordered_offset,
6971 						   ordered_bytes, !err);
6972 	if (!ret)
6973 		goto out_test;
6974 
6975 	ordered->work.func = finish_ordered_fn;
6976 	ordered->work.flags = 0;
6977 	btrfs_queue_worker(&root->fs_info->endio_write_workers,
6978 			   &ordered->work);
6979 out_test:
6980 	/*
6981 	 * our bio might span multiple ordered extents.  If we haven't
6982 	 * completed the accounting for the whole dio, go back and try again
6983 	 */
6984 	if (ordered_offset < dip->logical_offset + dip->bytes) {
6985 		ordered_bytes = dip->logical_offset + dip->bytes -
6986 			ordered_offset;
6987 		ordered = NULL;
6988 		goto again;
6989 	}
6990 out_done:
6991 	bio->bi_private = dip->private;
6992 
6993 	kfree(dip);
6994 
6995 	/* If we had an error make sure to clear the uptodate flag */
6996 	if (err)
6997 		clear_bit(BIO_UPTODATE, &bio->bi_flags);
6998 	dio_end_io(bio, err);
6999 }
7000 
7001 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
7002 				    struct bio *bio, int mirror_num,
7003 				    unsigned long bio_flags, u64 offset)
7004 {
7005 	int ret;
7006 	struct btrfs_root *root = BTRFS_I(inode)->root;
7007 	ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
7008 	BUG_ON(ret); /* -ENOMEM */
7009 	return 0;
7010 }
7011 
7012 static void btrfs_end_dio_bio(struct bio *bio, int err)
7013 {
7014 	struct btrfs_dio_private *dip = bio->bi_private;
7015 
7016 	if (err) {
7017 		printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
7018 		      "sector %#Lx len %u err no %d\n",
7019 		      (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw,
7020 		      (unsigned long long)bio->bi_sector, bio->bi_size, err);
7021 		dip->errors = 1;
7022 
7023 		/*
7024 		 * before atomic variable goto zero, we must make sure
7025 		 * dip->errors is perceived to be set.
7026 		 */
7027 		smp_mb__before_atomic_dec();
7028 	}
7029 
7030 	/* if there are more bios still pending for this dio, just exit */
7031 	if (!atomic_dec_and_test(&dip->pending_bios))
7032 		goto out;
7033 
7034 	if (dip->errors)
7035 		bio_io_error(dip->orig_bio);
7036 	else {
7037 		set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
7038 		bio_endio(dip->orig_bio, 0);
7039 	}
7040 out:
7041 	bio_put(bio);
7042 }
7043 
7044 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
7045 				       u64 first_sector, gfp_t gfp_flags)
7046 {
7047 	int nr_vecs = bio_get_nr_vecs(bdev);
7048 	return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
7049 }
7050 
7051 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
7052 					 int rw, u64 file_offset, int skip_sum,
7053 					 int async_submit)
7054 {
7055 	int write = rw & REQ_WRITE;
7056 	struct btrfs_root *root = BTRFS_I(inode)->root;
7057 	int ret;
7058 
7059 	if (async_submit)
7060 		async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
7061 
7062 	bio_get(bio);
7063 
7064 	if (!write) {
7065 		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
7066 		if (ret)
7067 			goto err;
7068 	}
7069 
7070 	if (skip_sum)
7071 		goto map;
7072 
7073 	if (write && async_submit) {
7074 		ret = btrfs_wq_submit_bio(root->fs_info,
7075 				   inode, rw, bio, 0, 0,
7076 				   file_offset,
7077 				   __btrfs_submit_bio_start_direct_io,
7078 				   __btrfs_submit_bio_done);
7079 		goto err;
7080 	} else if (write) {
7081 		/*
7082 		 * If we aren't doing async submit, calculate the csum of the
7083 		 * bio now.
7084 		 */
7085 		ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
7086 		if (ret)
7087 			goto err;
7088 	} else if (!skip_sum) {
7089 		ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset);
7090 		if (ret)
7091 			goto err;
7092 	}
7093 
7094 map:
7095 	ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
7096 err:
7097 	bio_put(bio);
7098 	return ret;
7099 }
7100 
7101 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
7102 				    int skip_sum)
7103 {
7104 	struct inode *inode = dip->inode;
7105 	struct btrfs_root *root = BTRFS_I(inode)->root;
7106 	struct bio *bio;
7107 	struct bio *orig_bio = dip->orig_bio;
7108 	struct bio_vec *bvec = orig_bio->bi_io_vec;
7109 	u64 start_sector = orig_bio->bi_sector;
7110 	u64 file_offset = dip->logical_offset;
7111 	u64 submit_len = 0;
7112 	u64 map_length;
7113 	int nr_pages = 0;
7114 	int ret = 0;
7115 	int async_submit = 0;
7116 
7117 	map_length = orig_bio->bi_size;
7118 	ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
7119 			      &map_length, NULL, 0);
7120 	if (ret) {
7121 		bio_put(orig_bio);
7122 		return -EIO;
7123 	}
7124 	if (map_length >= orig_bio->bi_size) {
7125 		bio = orig_bio;
7126 		goto submit;
7127 	}
7128 
7129 	/* async crcs make it difficult to collect full stripe writes. */
7130 	if (btrfs_get_alloc_profile(root, 1) &
7131 	    (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6))
7132 		async_submit = 0;
7133 	else
7134 		async_submit = 1;
7135 
7136 	bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
7137 	if (!bio)
7138 		return -ENOMEM;
7139 	bio->bi_private = dip;
7140 	bio->bi_end_io = btrfs_end_dio_bio;
7141 	atomic_inc(&dip->pending_bios);
7142 
7143 	while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
7144 		if (unlikely(map_length < submit_len + bvec->bv_len ||
7145 		    bio_add_page(bio, bvec->bv_page, bvec->bv_len,
7146 				 bvec->bv_offset) < bvec->bv_len)) {
7147 			/*
7148 			 * inc the count before we submit the bio so
7149 			 * we know the end IO handler won't happen before
7150 			 * we inc the count. Otherwise, the dip might get freed
7151 			 * before we're done setting it up
7152 			 */
7153 			atomic_inc(&dip->pending_bios);
7154 			ret = __btrfs_submit_dio_bio(bio, inode, rw,
7155 						     file_offset, skip_sum,
7156 						     async_submit);
7157 			if (ret) {
7158 				bio_put(bio);
7159 				atomic_dec(&dip->pending_bios);
7160 				goto out_err;
7161 			}
7162 
7163 			start_sector += submit_len >> 9;
7164 			file_offset += submit_len;
7165 
7166 			submit_len = 0;
7167 			nr_pages = 0;
7168 
7169 			bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
7170 						  start_sector, GFP_NOFS);
7171 			if (!bio)
7172 				goto out_err;
7173 			bio->bi_private = dip;
7174 			bio->bi_end_io = btrfs_end_dio_bio;
7175 
7176 			map_length = orig_bio->bi_size;
7177 			ret = btrfs_map_block(root->fs_info, rw,
7178 					      start_sector << 9,
7179 					      &map_length, NULL, 0);
7180 			if (ret) {
7181 				bio_put(bio);
7182 				goto out_err;
7183 			}
7184 		} else {
7185 			submit_len += bvec->bv_len;
7186 			nr_pages ++;
7187 			bvec++;
7188 		}
7189 	}
7190 
7191 submit:
7192 	ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
7193 				     async_submit);
7194 	if (!ret)
7195 		return 0;
7196 
7197 	bio_put(bio);
7198 out_err:
7199 	dip->errors = 1;
7200 	/*
7201 	 * before atomic variable goto zero, we must
7202 	 * make sure dip->errors is perceived to be set.
7203 	 */
7204 	smp_mb__before_atomic_dec();
7205 	if (atomic_dec_and_test(&dip->pending_bios))
7206 		bio_io_error(dip->orig_bio);
7207 
7208 	/* bio_end_io() will handle error, so we needn't return it */
7209 	return 0;
7210 }
7211 
7212 static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
7213 				loff_t file_offset)
7214 {
7215 	struct btrfs_root *root = BTRFS_I(inode)->root;
7216 	struct btrfs_dio_private *dip;
7217 	struct bio_vec *bvec = bio->bi_io_vec;
7218 	int skip_sum;
7219 	int write = rw & REQ_WRITE;
7220 	int ret = 0;
7221 
7222 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
7223 
7224 	dip = kmalloc(sizeof(*dip), GFP_NOFS);
7225 	if (!dip) {
7226 		ret = -ENOMEM;
7227 		goto free_ordered;
7228 	}
7229 
7230 	dip->private = bio->bi_private;
7231 	dip->inode = inode;
7232 	dip->logical_offset = file_offset;
7233 
7234 	dip->bytes = 0;
7235 	do {
7236 		dip->bytes += bvec->bv_len;
7237 		bvec++;
7238 	} while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));
7239 
7240 	dip->disk_bytenr = (u64)bio->bi_sector << 9;
7241 	bio->bi_private = dip;
7242 	dip->errors = 0;
7243 	dip->orig_bio = bio;
7244 	atomic_set(&dip->pending_bios, 0);
7245 
7246 	if (write)
7247 		bio->bi_end_io = btrfs_endio_direct_write;
7248 	else
7249 		bio->bi_end_io = btrfs_endio_direct_read;
7250 
7251 	ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
7252 	if (!ret)
7253 		return;
7254 free_ordered:
7255 	/*
7256 	 * If this is a write, we need to clean up the reserved space and kill
7257 	 * the ordered extent.
7258 	 */
7259 	if (write) {
7260 		struct btrfs_ordered_extent *ordered;
7261 		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
7262 		if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
7263 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
7264 			btrfs_free_reserved_extent(root, ordered->start,
7265 						   ordered->disk_len);
7266 		btrfs_put_ordered_extent(ordered);
7267 		btrfs_put_ordered_extent(ordered);
7268 	}
7269 	bio_endio(bio, ret);
7270 }
7271 
7272 static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
7273 			const struct iovec *iov, loff_t offset,
7274 			unsigned long nr_segs)
7275 {
7276 	int seg;
7277 	int i;
7278 	size_t size;
7279 	unsigned long addr;
7280 	unsigned blocksize_mask = root->sectorsize - 1;
7281 	ssize_t retval = -EINVAL;
7282 	loff_t end = offset;
7283 
7284 	if (offset & blocksize_mask)
7285 		goto out;
7286 
7287 	/* Check the memory alignment.  Blocks cannot straddle pages */
7288 	for (seg = 0; seg < nr_segs; seg++) {
7289 		addr = (unsigned long)iov[seg].iov_base;
7290 		size = iov[seg].iov_len;
7291 		end += size;
7292 		if ((addr & blocksize_mask) || (size & blocksize_mask))
7293 			goto out;
7294 
7295 		/* If this is a write we don't need to check anymore */
7296 		if (rw & WRITE)
7297 			continue;
7298 
7299 		/*
7300 		 * Check to make sure we don't have duplicate iov_base's in this
7301 		 * iovec, if so return EINVAL, otherwise we'll get csum errors
7302 		 * when reading back.
7303 		 */
7304 		for (i = seg + 1; i < nr_segs; i++) {
7305 			if (iov[seg].iov_base == iov[i].iov_base)
7306 				goto out;
7307 		}
7308 	}
7309 	retval = 0;
7310 out:
7311 	return retval;
7312 }
7313 
7314 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
7315 			const struct iovec *iov, loff_t offset,
7316 			unsigned long nr_segs)
7317 {
7318 	struct file *file = iocb->ki_filp;
7319 	struct inode *inode = file->f_mapping->host;
7320 	size_t count = 0;
7321 	int flags = 0;
7322 	bool wakeup = true;
7323 	bool relock = false;
7324 	ssize_t ret;
7325 
7326 	if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
7327 			    offset, nr_segs))
7328 		return 0;
7329 
7330 	atomic_inc(&inode->i_dio_count);
7331 	smp_mb__after_atomic_inc();
7332 
7333 	if (rw & WRITE) {
7334 		count = iov_length(iov, nr_segs);
7335 		/*
7336 		 * If the write DIO is beyond the EOF, we need update
7337 		 * the isize, but it is protected by i_mutex. So we can
7338 		 * not unlock the i_mutex at this case.
7339 		 */
7340 		if (offset + count <= inode->i_size) {
7341 			mutex_unlock(&inode->i_mutex);
7342 			relock = true;
7343 		}
7344 		ret = btrfs_delalloc_reserve_space(inode, count);
7345 		if (ret)
7346 			goto out;
7347 	} else if (unlikely(test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
7348 				     &BTRFS_I(inode)->runtime_flags))) {
7349 		inode_dio_done(inode);
7350 		flags = DIO_LOCKING | DIO_SKIP_HOLES;
7351 		wakeup = false;
7352 	}
7353 
7354 	ret = __blockdev_direct_IO(rw, iocb, inode,
7355 			BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
7356 			iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
7357 			btrfs_submit_direct, flags);
7358 	if (rw & WRITE) {
7359 		if (ret < 0 && ret != -EIOCBQUEUED)
7360 			btrfs_delalloc_release_space(inode, count);
7361 		else if (ret >= 0 && (size_t)ret < count)
7362 			btrfs_delalloc_release_space(inode,
7363 						     count - (size_t)ret);
7364 		else
7365 			btrfs_delalloc_release_metadata(inode, 0);
7366 	}
7367 out:
7368 	if (wakeup)
7369 		inode_dio_done(inode);
7370 	if (relock)
7371 		mutex_lock(&inode->i_mutex);
7372 
7373 	return ret;
7374 }
7375 
7376 #define BTRFS_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC)
7377 
7378 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
7379 		__u64 start, __u64 len)
7380 {
7381 	int	ret;
7382 
7383 	ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
7384 	if (ret)
7385 		return ret;
7386 
7387 	return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
7388 }
7389 
7390 int btrfs_readpage(struct file *file, struct page *page)
7391 {
7392 	struct extent_io_tree *tree;
7393 	tree = &BTRFS_I(page->mapping->host)->io_tree;
7394 	return extent_read_full_page(tree, page, btrfs_get_extent, 0);
7395 }
7396 
7397 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
7398 {
7399 	struct extent_io_tree *tree;
7400 
7401 
7402 	if (current->flags & PF_MEMALLOC) {
7403 		redirty_page_for_writepage(wbc, page);
7404 		unlock_page(page);
7405 		return 0;
7406 	}
7407 	tree = &BTRFS_I(page->mapping->host)->io_tree;
7408 	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
7409 }
7410 
7411 int btrfs_writepages(struct address_space *mapping,
7412 		     struct writeback_control *wbc)
7413 {
7414 	struct extent_io_tree *tree;
7415 
7416 	tree = &BTRFS_I(mapping->host)->io_tree;
7417 	return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
7418 }
7419 
7420 static int
7421 btrfs_readpages(struct file *file, struct address_space *mapping,
7422 		struct list_head *pages, unsigned nr_pages)
7423 {
7424 	struct extent_io_tree *tree;
7425 	tree = &BTRFS_I(mapping->host)->io_tree;
7426 	return extent_readpages(tree, mapping, pages, nr_pages,
7427 				btrfs_get_extent);
7428 }
7429 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
7430 {
7431 	struct extent_io_tree *tree;
7432 	struct extent_map_tree *map;
7433 	int ret;
7434 
7435 	tree = &BTRFS_I(page->mapping->host)->io_tree;
7436 	map = &BTRFS_I(page->mapping->host)->extent_tree;
7437 	ret = try_release_extent_mapping(map, tree, page, gfp_flags);
7438 	if (ret == 1) {
7439 		ClearPagePrivate(page);
7440 		set_page_private(page, 0);
7441 		page_cache_release(page);
7442 	}
7443 	return ret;
7444 }
7445 
7446 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
7447 {
7448 	if (PageWriteback(page) || PageDirty(page))
7449 		return 0;
7450 	return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
7451 }
7452 
7453 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
7454 {
7455 	struct inode *inode = page->mapping->host;
7456 	struct extent_io_tree *tree;
7457 	struct btrfs_ordered_extent *ordered;
7458 	struct extent_state *cached_state = NULL;
7459 	u64 page_start = page_offset(page);
7460 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
7461 
7462 	/*
7463 	 * we have the page locked, so new writeback can't start,
7464 	 * and the dirty bit won't be cleared while we are here.
7465 	 *
7466 	 * Wait for IO on this page so that we can safely clear
7467 	 * the PagePrivate2 bit and do ordered accounting
7468 	 */
7469 	wait_on_page_writeback(page);
7470 
7471 	tree = &BTRFS_I(inode)->io_tree;
7472 	if (offset) {
7473 		btrfs_releasepage(page, GFP_NOFS);
7474 		return;
7475 	}
7476 	lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
7477 	ordered = btrfs_lookup_ordered_extent(inode, page_offset(page));
7478 	if (ordered) {
7479 		/*
7480 		 * IO on this page will never be started, so we need
7481 		 * to account for any ordered extents now
7482 		 */
7483 		clear_extent_bit(tree, page_start, page_end,
7484 				 EXTENT_DIRTY | EXTENT_DELALLOC |
7485 				 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
7486 				 EXTENT_DEFRAG, 1, 0, &cached_state, GFP_NOFS);
7487 		/*
7488 		 * whoever cleared the private bit is responsible
7489 		 * for the finish_ordered_io
7490 		 */
7491 		if (TestClearPagePrivate2(page) &&
7492 		    btrfs_dec_test_ordered_pending(inode, &ordered, page_start,
7493 						   PAGE_CACHE_SIZE, 1)) {
7494 			btrfs_finish_ordered_io(ordered);
7495 		}
7496 		btrfs_put_ordered_extent(ordered);
7497 		cached_state = NULL;
7498 		lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
7499 	}
7500 	clear_extent_bit(tree, page_start, page_end,
7501 		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
7502 		 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
7503 		 &cached_state, GFP_NOFS);
7504 	__btrfs_releasepage(page, GFP_NOFS);
7505 
7506 	ClearPageChecked(page);
7507 	if (PagePrivate(page)) {
7508 		ClearPagePrivate(page);
7509 		set_page_private(page, 0);
7510 		page_cache_release(page);
7511 	}
7512 }
7513 
7514 /*
7515  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
7516  * called from a page fault handler when a page is first dirtied. Hence we must
7517  * be careful to check for EOF conditions here. We set the page up correctly
7518  * for a written page which means we get ENOSPC checking when writing into
7519  * holes and correct delalloc and unwritten extent mapping on filesystems that
7520  * support these features.
7521  *
7522  * We are not allowed to take the i_mutex here so we have to play games to
7523  * protect against truncate races as the page could now be beyond EOF.  Because
7524  * vmtruncate() writes the inode size before removing pages, once we have the
7525  * page lock we can determine safely if the page is beyond EOF. If it is not
7526  * beyond EOF, then the page is guaranteed safe against truncation until we
7527  * unlock the page.
7528  */
7529 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
7530 {
7531 	struct page *page = vmf->page;
7532 	struct inode *inode = file_inode(vma->vm_file);
7533 	struct btrfs_root *root = BTRFS_I(inode)->root;
7534 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7535 	struct btrfs_ordered_extent *ordered;
7536 	struct extent_state *cached_state = NULL;
7537 	char *kaddr;
7538 	unsigned long zero_start;
7539 	loff_t size;
7540 	int ret;
7541 	int reserved = 0;
7542 	u64 page_start;
7543 	u64 page_end;
7544 
7545 	sb_start_pagefault(inode->i_sb);
7546 	ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
7547 	if (!ret) {
7548 		ret = file_update_time(vma->vm_file);
7549 		reserved = 1;
7550 	}
7551 	if (ret) {
7552 		if (ret == -ENOMEM)
7553 			ret = VM_FAULT_OOM;
7554 		else /* -ENOSPC, -EIO, etc */
7555 			ret = VM_FAULT_SIGBUS;
7556 		if (reserved)
7557 			goto out;
7558 		goto out_noreserve;
7559 	}
7560 
7561 	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
7562 again:
7563 	lock_page(page);
7564 	size = i_size_read(inode);
7565 	page_start = page_offset(page);
7566 	page_end = page_start + PAGE_CACHE_SIZE - 1;
7567 
7568 	if ((page->mapping != inode->i_mapping) ||
7569 	    (page_start >= size)) {
7570 		/* page got truncated out from underneath us */
7571 		goto out_unlock;
7572 	}
7573 	wait_on_page_writeback(page);
7574 
7575 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
7576 	set_page_extent_mapped(page);
7577 
7578 	/*
7579 	 * we can't set the delalloc bits if there are pending ordered
7580 	 * extents.  Drop our locks and wait for them to finish
7581 	 */
7582 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
7583 	if (ordered) {
7584 		unlock_extent_cached(io_tree, page_start, page_end,
7585 				     &cached_state, GFP_NOFS);
7586 		unlock_page(page);
7587 		btrfs_start_ordered_extent(inode, ordered, 1);
7588 		btrfs_put_ordered_extent(ordered);
7589 		goto again;
7590 	}
7591 
7592 	/*
7593 	 * XXX - page_mkwrite gets called every time the page is dirtied, even
7594 	 * if it was already dirty, so for space accounting reasons we need to
7595 	 * clear any delalloc bits for the range we are fixing to save.  There
7596 	 * is probably a better way to do this, but for now keep consistent with
7597 	 * prepare_pages in the normal write path.
7598 	 */
7599 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
7600 			  EXTENT_DIRTY | EXTENT_DELALLOC |
7601 			  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
7602 			  0, 0, &cached_state, GFP_NOFS);
7603 
7604 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
7605 					&cached_state);
7606 	if (ret) {
7607 		unlock_extent_cached(io_tree, page_start, page_end,
7608 				     &cached_state, GFP_NOFS);
7609 		ret = VM_FAULT_SIGBUS;
7610 		goto out_unlock;
7611 	}
7612 	ret = 0;
7613 
7614 	/* page is wholly or partially inside EOF */
7615 	if (page_start + PAGE_CACHE_SIZE > size)
7616 		zero_start = size & ~PAGE_CACHE_MASK;
7617 	else
7618 		zero_start = PAGE_CACHE_SIZE;
7619 
7620 	if (zero_start != PAGE_CACHE_SIZE) {
7621 		kaddr = kmap(page);
7622 		memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
7623 		flush_dcache_page(page);
7624 		kunmap(page);
7625 	}
7626 	ClearPageChecked(page);
7627 	set_page_dirty(page);
7628 	SetPageUptodate(page);
7629 
7630 	BTRFS_I(inode)->last_trans = root->fs_info->generation;
7631 	BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
7632 	BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
7633 
7634 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
7635 
7636 out_unlock:
7637 	if (!ret) {
7638 		sb_end_pagefault(inode->i_sb);
7639 		return VM_FAULT_LOCKED;
7640 	}
7641 	unlock_page(page);
7642 out:
7643 	btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
7644 out_noreserve:
7645 	sb_end_pagefault(inode->i_sb);
7646 	return ret;
7647 }
7648 
7649 static int btrfs_truncate(struct inode *inode)
7650 {
7651 	struct btrfs_root *root = BTRFS_I(inode)->root;
7652 	struct btrfs_block_rsv *rsv;
7653 	int ret;
7654 	int err = 0;
7655 	struct btrfs_trans_handle *trans;
7656 	u64 mask = root->sectorsize - 1;
7657 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
7658 
7659 	ret = btrfs_truncate_page(inode, inode->i_size, 0, 0);
7660 	if (ret)
7661 		return ret;
7662 
7663 	btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
7664 	btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
7665 
7666 	/*
7667 	 * Yes ladies and gentelment, this is indeed ugly.  The fact is we have
7668 	 * 3 things going on here
7669 	 *
7670 	 * 1) We need to reserve space for our orphan item and the space to
7671 	 * delete our orphan item.  Lord knows we don't want to have a dangling
7672 	 * orphan item because we didn't reserve space to remove it.
7673 	 *
7674 	 * 2) We need to reserve space to update our inode.
7675 	 *
7676 	 * 3) We need to have something to cache all the space that is going to
7677 	 * be free'd up by the truncate operation, but also have some slack
7678 	 * space reserved in case it uses space during the truncate (thank you
7679 	 * very much snapshotting).
7680 	 *
7681 	 * And we need these to all be seperate.  The fact is we can use alot of
7682 	 * space doing the truncate, and we have no earthly idea how much space
7683 	 * we will use, so we need the truncate reservation to be seperate so it
7684 	 * doesn't end up using space reserved for updating the inode or
7685 	 * removing the orphan item.  We also need to be able to stop the
7686 	 * transaction and start a new one, which means we need to be able to
7687 	 * update the inode several times, and we have no idea of knowing how
7688 	 * many times that will be, so we can't just reserve 1 item for the
7689 	 * entirety of the opration, so that has to be done seperately as well.
7690 	 * Then there is the orphan item, which does indeed need to be held on
7691 	 * to for the whole operation, and we need nobody to touch this reserved
7692 	 * space except the orphan code.
7693 	 *
7694 	 * So that leaves us with
7695 	 *
7696 	 * 1) root->orphan_block_rsv - for the orphan deletion.
7697 	 * 2) rsv - for the truncate reservation, which we will steal from the
7698 	 * transaction reservation.
7699 	 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
7700 	 * updating the inode.
7701 	 */
7702 	rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
7703 	if (!rsv)
7704 		return -ENOMEM;
7705 	rsv->size = min_size;
7706 	rsv->failfast = 1;
7707 
7708 	/*
7709 	 * 1 for the truncate slack space
7710 	 * 1 for updating the inode.
7711 	 */
7712 	trans = btrfs_start_transaction(root, 2);
7713 	if (IS_ERR(trans)) {
7714 		err = PTR_ERR(trans);
7715 		goto out;
7716 	}
7717 
7718 	/* Migrate the slack space for the truncate to our reserve */
7719 	ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
7720 				      min_size);
7721 	BUG_ON(ret);
7722 
7723 	/*
7724 	 * setattr is responsible for setting the ordered_data_close flag,
7725 	 * but that is only tested during the last file release.  That
7726 	 * could happen well after the next commit, leaving a great big
7727 	 * window where new writes may get lost if someone chooses to write
7728 	 * to this file after truncating to zero
7729 	 *
7730 	 * The inode doesn't have any dirty data here, and so if we commit
7731 	 * this is a noop.  If someone immediately starts writing to the inode
7732 	 * it is very likely we'll catch some of their writes in this
7733 	 * transaction, and the commit will find this file on the ordered
7734 	 * data list with good things to send down.
7735 	 *
7736 	 * This is a best effort solution, there is still a window where
7737 	 * using truncate to replace the contents of the file will
7738 	 * end up with a zero length file after a crash.
7739 	 */
7740 	if (inode->i_size == 0 && test_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
7741 					   &BTRFS_I(inode)->runtime_flags))
7742 		btrfs_add_ordered_operation(trans, root, inode);
7743 
7744 	/*
7745 	 * So if we truncate and then write and fsync we normally would just
7746 	 * write the extents that changed, which is a problem if we need to
7747 	 * first truncate that entire inode.  So set this flag so we write out
7748 	 * all of the extents in the inode to the sync log so we're completely
7749 	 * safe.
7750 	 */
7751 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
7752 	trans->block_rsv = rsv;
7753 
7754 	while (1) {
7755 		ret = btrfs_truncate_inode_items(trans, root, inode,
7756 						 inode->i_size,
7757 						 BTRFS_EXTENT_DATA_KEY);
7758 		if (ret != -ENOSPC) {
7759 			err = ret;
7760 			break;
7761 		}
7762 
7763 		trans->block_rsv = &root->fs_info->trans_block_rsv;
7764 		ret = btrfs_update_inode(trans, root, inode);
7765 		if (ret) {
7766 			err = ret;
7767 			break;
7768 		}
7769 
7770 		btrfs_end_transaction(trans, root);
7771 		btrfs_btree_balance_dirty(root);
7772 
7773 		trans = btrfs_start_transaction(root, 2);
7774 		if (IS_ERR(trans)) {
7775 			ret = err = PTR_ERR(trans);
7776 			trans = NULL;
7777 			break;
7778 		}
7779 
7780 		ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
7781 					      rsv, min_size);
7782 		BUG_ON(ret);	/* shouldn't happen */
7783 		trans->block_rsv = rsv;
7784 	}
7785 
7786 	if (ret == 0 && inode->i_nlink > 0) {
7787 		trans->block_rsv = root->orphan_block_rsv;
7788 		ret = btrfs_orphan_del(trans, inode);
7789 		if (ret)
7790 			err = ret;
7791 	}
7792 
7793 	if (trans) {
7794 		trans->block_rsv = &root->fs_info->trans_block_rsv;
7795 		ret = btrfs_update_inode(trans, root, inode);
7796 		if (ret && !err)
7797 			err = ret;
7798 
7799 		ret = btrfs_end_transaction(trans, root);
7800 		btrfs_btree_balance_dirty(root);
7801 	}
7802 
7803 out:
7804 	btrfs_free_block_rsv(root, rsv);
7805 
7806 	if (ret && !err)
7807 		err = ret;
7808 
7809 	return err;
7810 }
7811 
7812 /*
7813  * create a new subvolume directory/inode (helper for the ioctl).
7814  */
7815 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
7816 			     struct btrfs_root *new_root, u64 new_dirid)
7817 {
7818 	struct inode *inode;
7819 	int err;
7820 	u64 index = 0;
7821 
7822 	inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
7823 				new_dirid, new_dirid,
7824 				S_IFDIR | (~current_umask() & S_IRWXUGO),
7825 				&index);
7826 	if (IS_ERR(inode))
7827 		return PTR_ERR(inode);
7828 	inode->i_op = &btrfs_dir_inode_operations;
7829 	inode->i_fop = &btrfs_dir_file_operations;
7830 
7831 	set_nlink(inode, 1);
7832 	btrfs_i_size_write(inode, 0);
7833 
7834 	err = btrfs_update_inode(trans, new_root, inode);
7835 
7836 	iput(inode);
7837 	return err;
7838 }
7839 
7840 struct inode *btrfs_alloc_inode(struct super_block *sb)
7841 {
7842 	struct btrfs_inode *ei;
7843 	struct inode *inode;
7844 
7845 	ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
7846 	if (!ei)
7847 		return NULL;
7848 
7849 	ei->root = NULL;
7850 	ei->generation = 0;
7851 	ei->last_trans = 0;
7852 	ei->last_sub_trans = 0;
7853 	ei->logged_trans = 0;
7854 	ei->delalloc_bytes = 0;
7855 	ei->disk_i_size = 0;
7856 	ei->flags = 0;
7857 	ei->csum_bytes = 0;
7858 	ei->index_cnt = (u64)-1;
7859 	ei->last_unlink_trans = 0;
7860 	ei->last_log_commit = 0;
7861 
7862 	spin_lock_init(&ei->lock);
7863 	ei->outstanding_extents = 0;
7864 	ei->reserved_extents = 0;
7865 
7866 	ei->runtime_flags = 0;
7867 	ei->force_compress = BTRFS_COMPRESS_NONE;
7868 
7869 	ei->delayed_node = NULL;
7870 
7871 	inode = &ei->vfs_inode;
7872 	extent_map_tree_init(&ei->extent_tree);
7873 	extent_io_tree_init(&ei->io_tree, &inode->i_data);
7874 	extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
7875 	ei->io_tree.track_uptodate = 1;
7876 	ei->io_failure_tree.track_uptodate = 1;
7877 	atomic_set(&ei->sync_writers, 0);
7878 	mutex_init(&ei->log_mutex);
7879 	mutex_init(&ei->delalloc_mutex);
7880 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
7881 	INIT_LIST_HEAD(&ei->delalloc_inodes);
7882 	INIT_LIST_HEAD(&ei->ordered_operations);
7883 	RB_CLEAR_NODE(&ei->rb_node);
7884 
7885 	return inode;
7886 }
7887 
7888 static void btrfs_i_callback(struct rcu_head *head)
7889 {
7890 	struct inode *inode = container_of(head, struct inode, i_rcu);
7891 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
7892 }
7893 
7894 void btrfs_destroy_inode(struct inode *inode)
7895 {
7896 	struct btrfs_ordered_extent *ordered;
7897 	struct btrfs_root *root = BTRFS_I(inode)->root;
7898 
7899 	WARN_ON(!hlist_empty(&inode->i_dentry));
7900 	WARN_ON(inode->i_data.nrpages);
7901 	WARN_ON(BTRFS_I(inode)->outstanding_extents);
7902 	WARN_ON(BTRFS_I(inode)->reserved_extents);
7903 	WARN_ON(BTRFS_I(inode)->delalloc_bytes);
7904 	WARN_ON(BTRFS_I(inode)->csum_bytes);
7905 
7906 	/*
7907 	 * This can happen where we create an inode, but somebody else also
7908 	 * created the same inode and we need to destroy the one we already
7909 	 * created.
7910 	 */
7911 	if (!root)
7912 		goto free;
7913 
7914 	/*
7915 	 * Make sure we're properly removed from the ordered operation
7916 	 * lists.
7917 	 */
7918 	smp_mb();
7919 	if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
7920 		spin_lock(&root->fs_info->ordered_extent_lock);
7921 		list_del_init(&BTRFS_I(inode)->ordered_operations);
7922 		spin_unlock(&root->fs_info->ordered_extent_lock);
7923 	}
7924 
7925 	if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
7926 		     &BTRFS_I(inode)->runtime_flags)) {
7927 		printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
7928 		       (unsigned long long)btrfs_ino(inode));
7929 		atomic_dec(&root->orphan_inodes);
7930 	}
7931 
7932 	while (1) {
7933 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
7934 		if (!ordered)
7935 			break;
7936 		else {
7937 			printk(KERN_ERR "btrfs found ordered "
7938 			       "extent %llu %llu on inode cleanup\n",
7939 			       (unsigned long long)ordered->file_offset,
7940 			       (unsigned long long)ordered->len);
7941 			btrfs_remove_ordered_extent(inode, ordered);
7942 			btrfs_put_ordered_extent(ordered);
7943 			btrfs_put_ordered_extent(ordered);
7944 		}
7945 	}
7946 	inode_tree_del(inode);
7947 	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
7948 free:
7949 	btrfs_remove_delayed_node(inode);
7950 	call_rcu(&inode->i_rcu, btrfs_i_callback);
7951 }
7952 
7953 int btrfs_drop_inode(struct inode *inode)
7954 {
7955 	struct btrfs_root *root = BTRFS_I(inode)->root;
7956 
7957 	/* the snap/subvol tree is on deleting */
7958 	if (btrfs_root_refs(&root->root_item) == 0 &&
7959 	    root != root->fs_info->tree_root)
7960 		return 1;
7961 	else
7962 		return generic_drop_inode(inode);
7963 }
7964 
7965 static void init_once(void *foo)
7966 {
7967 	struct btrfs_inode *ei = (struct btrfs_inode *) foo;
7968 
7969 	inode_init_once(&ei->vfs_inode);
7970 }
7971 
7972 void btrfs_destroy_cachep(void)
7973 {
7974 	/*
7975 	 * Make sure all delayed rcu free inodes are flushed before we
7976 	 * destroy cache.
7977 	 */
7978 	rcu_barrier();
7979 	if (btrfs_inode_cachep)
7980 		kmem_cache_destroy(btrfs_inode_cachep);
7981 	if (btrfs_trans_handle_cachep)
7982 		kmem_cache_destroy(btrfs_trans_handle_cachep);
7983 	if (btrfs_transaction_cachep)
7984 		kmem_cache_destroy(btrfs_transaction_cachep);
7985 	if (btrfs_path_cachep)
7986 		kmem_cache_destroy(btrfs_path_cachep);
7987 	if (btrfs_free_space_cachep)
7988 		kmem_cache_destroy(btrfs_free_space_cachep);
7989 	if (btrfs_delalloc_work_cachep)
7990 		kmem_cache_destroy(btrfs_delalloc_work_cachep);
7991 }
7992 
7993 int btrfs_init_cachep(void)
7994 {
7995 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
7996 			sizeof(struct btrfs_inode), 0,
7997 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
7998 	if (!btrfs_inode_cachep)
7999 		goto fail;
8000 
8001 	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
8002 			sizeof(struct btrfs_trans_handle), 0,
8003 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
8004 	if (!btrfs_trans_handle_cachep)
8005 		goto fail;
8006 
8007 	btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction",
8008 			sizeof(struct btrfs_transaction), 0,
8009 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
8010 	if (!btrfs_transaction_cachep)
8011 		goto fail;
8012 
8013 	btrfs_path_cachep = kmem_cache_create("btrfs_path",
8014 			sizeof(struct btrfs_path), 0,
8015 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
8016 	if (!btrfs_path_cachep)
8017 		goto fail;
8018 
8019 	btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
8020 			sizeof(struct btrfs_free_space), 0,
8021 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
8022 	if (!btrfs_free_space_cachep)
8023 		goto fail;
8024 
8025 	btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work",
8026 			sizeof(struct btrfs_delalloc_work), 0,
8027 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
8028 			NULL);
8029 	if (!btrfs_delalloc_work_cachep)
8030 		goto fail;
8031 
8032 	return 0;
8033 fail:
8034 	btrfs_destroy_cachep();
8035 	return -ENOMEM;
8036 }
8037 
8038 static int btrfs_getattr(struct vfsmount *mnt,
8039 			 struct dentry *dentry, struct kstat *stat)
8040 {
8041 	u64 delalloc_bytes;
8042 	struct inode *inode = dentry->d_inode;
8043 	u32 blocksize = inode->i_sb->s_blocksize;
8044 
8045 	generic_fillattr(inode, stat);
8046 	stat->dev = BTRFS_I(inode)->root->anon_dev;
8047 	stat->blksize = PAGE_CACHE_SIZE;
8048 
8049 	spin_lock(&BTRFS_I(inode)->lock);
8050 	delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
8051 	spin_unlock(&BTRFS_I(inode)->lock);
8052 	stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
8053 			ALIGN(delalloc_bytes, blocksize)) >> 9;
8054 	return 0;
8055 }
8056 
8057 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
8058 			   struct inode *new_dir, struct dentry *new_dentry)
8059 {
8060 	struct btrfs_trans_handle *trans;
8061 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
8062 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
8063 	struct inode *new_inode = new_dentry->d_inode;
8064 	struct inode *old_inode = old_dentry->d_inode;
8065 	struct timespec ctime = CURRENT_TIME;
8066 	u64 index = 0;
8067 	u64 root_objectid;
8068 	int ret;
8069 	u64 old_ino = btrfs_ino(old_inode);
8070 
8071 	if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
8072 		return -EPERM;
8073 
8074 	/* we only allow rename subvolume link between subvolumes */
8075 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
8076 		return -EXDEV;
8077 
8078 	if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
8079 	    (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
8080 		return -ENOTEMPTY;
8081 
8082 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
8083 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
8084 		return -ENOTEMPTY;
8085 
8086 
8087 	/* check for collisions, even if the  name isn't there */
8088 	ret = btrfs_check_dir_item_collision(root, new_dir->i_ino,
8089 			     new_dentry->d_name.name,
8090 			     new_dentry->d_name.len);
8091 
8092 	if (ret) {
8093 		if (ret == -EEXIST) {
8094 			/* we shouldn't get
8095 			 * eexist without a new_inode */
8096 			if (!new_inode) {
8097 				WARN_ON(1);
8098 				return ret;
8099 			}
8100 		} else {
8101 			/* maybe -EOVERFLOW */
8102 			return ret;
8103 		}
8104 	}
8105 	ret = 0;
8106 
8107 	/*
8108 	 * we're using rename to replace one file with another.
8109 	 * and the replacement file is large.  Start IO on it now so
8110 	 * we don't add too much work to the end of the transaction
8111 	 */
8112 	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
8113 	    old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
8114 		filemap_flush(old_inode->i_mapping);
8115 
8116 	/* close the racy window with snapshot create/destroy ioctl */
8117 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
8118 		down_read(&root->fs_info->subvol_sem);
8119 	/*
8120 	 * We want to reserve the absolute worst case amount of items.  So if
8121 	 * both inodes are subvols and we need to unlink them then that would
8122 	 * require 4 item modifications, but if they are both normal inodes it
8123 	 * would require 5 item modifications, so we'll assume their normal
8124 	 * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
8125 	 * should cover the worst case number of items we'll modify.
8126 	 */
8127 	trans = btrfs_start_transaction(root, 20);
8128 	if (IS_ERR(trans)) {
8129                 ret = PTR_ERR(trans);
8130                 goto out_notrans;
8131         }
8132 
8133 	if (dest != root)
8134 		btrfs_record_root_in_trans(trans, dest);
8135 
8136 	ret = btrfs_set_inode_index(new_dir, &index);
8137 	if (ret)
8138 		goto out_fail;
8139 
8140 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
8141 		/* force full log commit if subvolume involved. */
8142 		root->fs_info->last_trans_log_full_commit = trans->transid;
8143 	} else {
8144 		ret = btrfs_insert_inode_ref(trans, dest,
8145 					     new_dentry->d_name.name,
8146 					     new_dentry->d_name.len,
8147 					     old_ino,
8148 					     btrfs_ino(new_dir), index);
8149 		if (ret)
8150 			goto out_fail;
8151 		/*
8152 		 * this is an ugly little race, but the rename is required
8153 		 * to make sure that if we crash, the inode is either at the
8154 		 * old name or the new one.  pinning the log transaction lets
8155 		 * us make sure we don't allow a log commit to come in after
8156 		 * we unlink the name but before we add the new name back in.
8157 		 */
8158 		btrfs_pin_log_trans(root);
8159 	}
8160 	/*
8161 	 * make sure the inode gets flushed if it is replacing
8162 	 * something.
8163 	 */
8164 	if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
8165 		btrfs_add_ordered_operation(trans, root, old_inode);
8166 
8167 	inode_inc_iversion(old_dir);
8168 	inode_inc_iversion(new_dir);
8169 	inode_inc_iversion(old_inode);
8170 	old_dir->i_ctime = old_dir->i_mtime = ctime;
8171 	new_dir->i_ctime = new_dir->i_mtime = ctime;
8172 	old_inode->i_ctime = ctime;
8173 
8174 	if (old_dentry->d_parent != new_dentry->d_parent)
8175 		btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
8176 
8177 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
8178 		root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
8179 		ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
8180 					old_dentry->d_name.name,
8181 					old_dentry->d_name.len);
8182 	} else {
8183 		ret = __btrfs_unlink_inode(trans, root, old_dir,
8184 					old_dentry->d_inode,
8185 					old_dentry->d_name.name,
8186 					old_dentry->d_name.len);
8187 		if (!ret)
8188 			ret = btrfs_update_inode(trans, root, old_inode);
8189 	}
8190 	if (ret) {
8191 		btrfs_abort_transaction(trans, root, ret);
8192 		goto out_fail;
8193 	}
8194 
8195 	if (new_inode) {
8196 		inode_inc_iversion(new_inode);
8197 		new_inode->i_ctime = CURRENT_TIME;
8198 		if (unlikely(btrfs_ino(new_inode) ==
8199 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
8200 			root_objectid = BTRFS_I(new_inode)->location.objectid;
8201 			ret = btrfs_unlink_subvol(trans, dest, new_dir,
8202 						root_objectid,
8203 						new_dentry->d_name.name,
8204 						new_dentry->d_name.len);
8205 			BUG_ON(new_inode->i_nlink == 0);
8206 		} else {
8207 			ret = btrfs_unlink_inode(trans, dest, new_dir,
8208 						 new_dentry->d_inode,
8209 						 new_dentry->d_name.name,
8210 						 new_dentry->d_name.len);
8211 		}
8212 		if (!ret && new_inode->i_nlink == 0) {
8213 			ret = btrfs_orphan_add(trans, new_dentry->d_inode);
8214 			BUG_ON(ret);
8215 		}
8216 		if (ret) {
8217 			btrfs_abort_transaction(trans, root, ret);
8218 			goto out_fail;
8219 		}
8220 	}
8221 
8222 	ret = btrfs_add_link(trans, new_dir, old_inode,
8223 			     new_dentry->d_name.name,
8224 			     new_dentry->d_name.len, 0, index);
8225 	if (ret) {
8226 		btrfs_abort_transaction(trans, root, ret);
8227 		goto out_fail;
8228 	}
8229 
8230 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
8231 		struct dentry *parent = new_dentry->d_parent;
8232 		btrfs_log_new_name(trans, old_inode, old_dir, parent);
8233 		btrfs_end_log_trans(root);
8234 	}
8235 out_fail:
8236 	btrfs_end_transaction(trans, root);
8237 out_notrans:
8238 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
8239 		up_read(&root->fs_info->subvol_sem);
8240 
8241 	return ret;
8242 }
8243 
8244 static void btrfs_run_delalloc_work(struct btrfs_work *work)
8245 {
8246 	struct btrfs_delalloc_work *delalloc_work;
8247 
8248 	delalloc_work = container_of(work, struct btrfs_delalloc_work,
8249 				     work);
8250 	if (delalloc_work->wait)
8251 		btrfs_wait_ordered_range(delalloc_work->inode, 0, (u64)-1);
8252 	else
8253 		filemap_flush(delalloc_work->inode->i_mapping);
8254 
8255 	if (delalloc_work->delay_iput)
8256 		btrfs_add_delayed_iput(delalloc_work->inode);
8257 	else
8258 		iput(delalloc_work->inode);
8259 	complete(&delalloc_work->completion);
8260 }
8261 
8262 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
8263 						    int wait, int delay_iput)
8264 {
8265 	struct btrfs_delalloc_work *work;
8266 
8267 	work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS);
8268 	if (!work)
8269 		return NULL;
8270 
8271 	init_completion(&work->completion);
8272 	INIT_LIST_HEAD(&work->list);
8273 	work->inode = inode;
8274 	work->wait = wait;
8275 	work->delay_iput = delay_iput;
8276 	work->work.func = btrfs_run_delalloc_work;
8277 
8278 	return work;
8279 }
8280 
8281 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
8282 {
8283 	wait_for_completion(&work->completion);
8284 	kmem_cache_free(btrfs_delalloc_work_cachep, work);
8285 }
8286 
8287 /*
8288  * some fairly slow code that needs optimization. This walks the list
8289  * of all the inodes with pending delalloc and forces them to disk.
8290  */
8291 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
8292 {
8293 	struct btrfs_inode *binode;
8294 	struct inode *inode;
8295 	struct btrfs_delalloc_work *work, *next;
8296 	struct list_head works;
8297 	struct list_head splice;
8298 	int ret = 0;
8299 
8300 	if (root->fs_info->sb->s_flags & MS_RDONLY)
8301 		return -EROFS;
8302 
8303 	INIT_LIST_HEAD(&works);
8304 	INIT_LIST_HEAD(&splice);
8305 
8306 	spin_lock(&root->fs_info->delalloc_lock);
8307 	list_splice_init(&root->fs_info->delalloc_inodes, &splice);
8308 	while (!list_empty(&splice)) {
8309 		binode = list_entry(splice.next, struct btrfs_inode,
8310 				    delalloc_inodes);
8311 
8312 		list_del_init(&binode->delalloc_inodes);
8313 
8314 		inode = igrab(&binode->vfs_inode);
8315 		if (!inode) {
8316 			clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
8317 				  &binode->runtime_flags);
8318 			continue;
8319 		}
8320 
8321 		list_add_tail(&binode->delalloc_inodes,
8322 			      &root->fs_info->delalloc_inodes);
8323 		spin_unlock(&root->fs_info->delalloc_lock);
8324 
8325 		work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
8326 		if (unlikely(!work)) {
8327 			ret = -ENOMEM;
8328 			goto out;
8329 		}
8330 		list_add_tail(&work->list, &works);
8331 		btrfs_queue_worker(&root->fs_info->flush_workers,
8332 				   &work->work);
8333 
8334 		cond_resched();
8335 		spin_lock(&root->fs_info->delalloc_lock);
8336 	}
8337 	spin_unlock(&root->fs_info->delalloc_lock);
8338 
8339 	list_for_each_entry_safe(work, next, &works, list) {
8340 		list_del_init(&work->list);
8341 		btrfs_wait_and_free_delalloc_work(work);
8342 	}
8343 
8344 	/* the filemap_flush will queue IO into the worker threads, but
8345 	 * we have to make sure the IO is actually started and that
8346 	 * ordered extents get created before we return
8347 	 */
8348 	atomic_inc(&root->fs_info->async_submit_draining);
8349 	while (atomic_read(&root->fs_info->nr_async_submits) ||
8350 	      atomic_read(&root->fs_info->async_delalloc_pages)) {
8351 		wait_event(root->fs_info->async_submit_wait,
8352 		   (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
8353 		    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
8354 	}
8355 	atomic_dec(&root->fs_info->async_submit_draining);
8356 	return 0;
8357 out:
8358 	list_for_each_entry_safe(work, next, &works, list) {
8359 		list_del_init(&work->list);
8360 		btrfs_wait_and_free_delalloc_work(work);
8361 	}
8362 
8363 	if (!list_empty_careful(&splice)) {
8364 		spin_lock(&root->fs_info->delalloc_lock);
8365 		list_splice_tail(&splice, &root->fs_info->delalloc_inodes);
8366 		spin_unlock(&root->fs_info->delalloc_lock);
8367 	}
8368 	return ret;
8369 }
8370 
8371 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
8372 			 const char *symname)
8373 {
8374 	struct btrfs_trans_handle *trans;
8375 	struct btrfs_root *root = BTRFS_I(dir)->root;
8376 	struct btrfs_path *path;
8377 	struct btrfs_key key;
8378 	struct inode *inode = NULL;
8379 	int err;
8380 	int drop_inode = 0;
8381 	u64 objectid;
8382 	u64 index = 0 ;
8383 	int name_len;
8384 	int datasize;
8385 	unsigned long ptr;
8386 	struct btrfs_file_extent_item *ei;
8387 	struct extent_buffer *leaf;
8388 
8389 	name_len = strlen(symname) + 1;
8390 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
8391 		return -ENAMETOOLONG;
8392 
8393 	/*
8394 	 * 2 items for inode item and ref
8395 	 * 2 items for dir items
8396 	 * 1 item for xattr if selinux is on
8397 	 */
8398 	trans = btrfs_start_transaction(root, 5);
8399 	if (IS_ERR(trans))
8400 		return PTR_ERR(trans);
8401 
8402 	err = btrfs_find_free_ino(root, &objectid);
8403 	if (err)
8404 		goto out_unlock;
8405 
8406 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
8407 				dentry->d_name.len, btrfs_ino(dir), objectid,
8408 				S_IFLNK|S_IRWXUGO, &index);
8409 	if (IS_ERR(inode)) {
8410 		err = PTR_ERR(inode);
8411 		goto out_unlock;
8412 	}
8413 
8414 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
8415 	if (err) {
8416 		drop_inode = 1;
8417 		goto out_unlock;
8418 	}
8419 
8420 	/*
8421 	* If the active LSM wants to access the inode during
8422 	* d_instantiate it needs these. Smack checks to see
8423 	* if the filesystem supports xattrs by looking at the
8424 	* ops vector.
8425 	*/
8426 	inode->i_fop = &btrfs_file_operations;
8427 	inode->i_op = &btrfs_file_inode_operations;
8428 
8429 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
8430 	if (err)
8431 		drop_inode = 1;
8432 	else {
8433 		inode->i_mapping->a_ops = &btrfs_aops;
8434 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
8435 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
8436 	}
8437 	if (drop_inode)
8438 		goto out_unlock;
8439 
8440 	path = btrfs_alloc_path();
8441 	if (!path) {
8442 		err = -ENOMEM;
8443 		drop_inode = 1;
8444 		goto out_unlock;
8445 	}
8446 	key.objectid = btrfs_ino(inode);
8447 	key.offset = 0;
8448 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
8449 	datasize = btrfs_file_extent_calc_inline_size(name_len);
8450 	err = btrfs_insert_empty_item(trans, root, path, &key,
8451 				      datasize);
8452 	if (err) {
8453 		drop_inode = 1;
8454 		btrfs_free_path(path);
8455 		goto out_unlock;
8456 	}
8457 	leaf = path->nodes[0];
8458 	ei = btrfs_item_ptr(leaf, path->slots[0],
8459 			    struct btrfs_file_extent_item);
8460 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
8461 	btrfs_set_file_extent_type(leaf, ei,
8462 				   BTRFS_FILE_EXTENT_INLINE);
8463 	btrfs_set_file_extent_encryption(leaf, ei, 0);
8464 	btrfs_set_file_extent_compression(leaf, ei, 0);
8465 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
8466 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
8467 
8468 	ptr = btrfs_file_extent_inline_start(ei);
8469 	write_extent_buffer(leaf, symname, ptr, name_len);
8470 	btrfs_mark_buffer_dirty(leaf);
8471 	btrfs_free_path(path);
8472 
8473 	inode->i_op = &btrfs_symlink_inode_operations;
8474 	inode->i_mapping->a_ops = &btrfs_symlink_aops;
8475 	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
8476 	inode_set_bytes(inode, name_len);
8477 	btrfs_i_size_write(inode, name_len - 1);
8478 	err = btrfs_update_inode(trans, root, inode);
8479 	if (err)
8480 		drop_inode = 1;
8481 
8482 out_unlock:
8483 	if (!err)
8484 		d_instantiate(dentry, inode);
8485 	btrfs_end_transaction(trans, root);
8486 	if (drop_inode) {
8487 		inode_dec_link_count(inode);
8488 		iput(inode);
8489 	}
8490 	btrfs_btree_balance_dirty(root);
8491 	return err;
8492 }
8493 
8494 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
8495 				       u64 start, u64 num_bytes, u64 min_size,
8496 				       loff_t actual_len, u64 *alloc_hint,
8497 				       struct btrfs_trans_handle *trans)
8498 {
8499 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
8500 	struct extent_map *em;
8501 	struct btrfs_root *root = BTRFS_I(inode)->root;
8502 	struct btrfs_key ins;
8503 	u64 cur_offset = start;
8504 	u64 i_size;
8505 	int ret = 0;
8506 	bool own_trans = true;
8507 
8508 	if (trans)
8509 		own_trans = false;
8510 	while (num_bytes > 0) {
8511 		if (own_trans) {
8512 			trans = btrfs_start_transaction(root, 3);
8513 			if (IS_ERR(trans)) {
8514 				ret = PTR_ERR(trans);
8515 				break;
8516 			}
8517 		}
8518 
8519 		ret = btrfs_reserve_extent(trans, root,
8520 					   min(num_bytes, 256ULL * 1024 * 1024),
8521 					   min_size, 0, *alloc_hint, &ins, 1);
8522 		if (ret) {
8523 			if (own_trans)
8524 				btrfs_end_transaction(trans, root);
8525 			break;
8526 		}
8527 
8528 		ret = insert_reserved_file_extent(trans, inode,
8529 						  cur_offset, ins.objectid,
8530 						  ins.offset, ins.offset,
8531 						  ins.offset, 0, 0, 0,
8532 						  BTRFS_FILE_EXTENT_PREALLOC);
8533 		if (ret) {
8534 			btrfs_abort_transaction(trans, root, ret);
8535 			if (own_trans)
8536 				btrfs_end_transaction(trans, root);
8537 			break;
8538 		}
8539 		btrfs_drop_extent_cache(inode, cur_offset,
8540 					cur_offset + ins.offset -1, 0);
8541 
8542 		em = alloc_extent_map();
8543 		if (!em) {
8544 			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
8545 				&BTRFS_I(inode)->runtime_flags);
8546 			goto next;
8547 		}
8548 
8549 		em->start = cur_offset;
8550 		em->orig_start = cur_offset;
8551 		em->len = ins.offset;
8552 		em->block_start = ins.objectid;
8553 		em->block_len = ins.offset;
8554 		em->orig_block_len = ins.offset;
8555 		em->bdev = root->fs_info->fs_devices->latest_bdev;
8556 		set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
8557 		em->generation = trans->transid;
8558 
8559 		while (1) {
8560 			write_lock(&em_tree->lock);
8561 			ret = add_extent_mapping(em_tree, em);
8562 			if (!ret)
8563 				list_move(&em->list,
8564 					  &em_tree->modified_extents);
8565 			write_unlock(&em_tree->lock);
8566 			if (ret != -EEXIST)
8567 				break;
8568 			btrfs_drop_extent_cache(inode, cur_offset,
8569 						cur_offset + ins.offset - 1,
8570 						0);
8571 		}
8572 		free_extent_map(em);
8573 next:
8574 		num_bytes -= ins.offset;
8575 		cur_offset += ins.offset;
8576 		*alloc_hint = ins.objectid + ins.offset;
8577 
8578 		inode_inc_iversion(inode);
8579 		inode->i_ctime = CURRENT_TIME;
8580 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
8581 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
8582 		    (actual_len > inode->i_size) &&
8583 		    (cur_offset > inode->i_size)) {
8584 			if (cur_offset > actual_len)
8585 				i_size = actual_len;
8586 			else
8587 				i_size = cur_offset;
8588 			i_size_write(inode, i_size);
8589 			btrfs_ordered_update_i_size(inode, i_size, NULL);
8590 		}
8591 
8592 		ret = btrfs_update_inode(trans, root, inode);
8593 
8594 		if (ret) {
8595 			btrfs_abort_transaction(trans, root, ret);
8596 			if (own_trans)
8597 				btrfs_end_transaction(trans, root);
8598 			break;
8599 		}
8600 
8601 		if (own_trans)
8602 			btrfs_end_transaction(trans, root);
8603 	}
8604 	return ret;
8605 }
8606 
8607 int btrfs_prealloc_file_range(struct inode *inode, int mode,
8608 			      u64 start, u64 num_bytes, u64 min_size,
8609 			      loff_t actual_len, u64 *alloc_hint)
8610 {
8611 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
8612 					   min_size, actual_len, alloc_hint,
8613 					   NULL);
8614 }
8615 
8616 int btrfs_prealloc_file_range_trans(struct inode *inode,
8617 				    struct btrfs_trans_handle *trans, int mode,
8618 				    u64 start, u64 num_bytes, u64 min_size,
8619 				    loff_t actual_len, u64 *alloc_hint)
8620 {
8621 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
8622 					   min_size, actual_len, alloc_hint, trans);
8623 }
8624 
8625 static int btrfs_set_page_dirty(struct page *page)
8626 {
8627 	return __set_page_dirty_nobuffers(page);
8628 }
8629 
8630 static int btrfs_permission(struct inode *inode, int mask)
8631 {
8632 	struct btrfs_root *root = BTRFS_I(inode)->root;
8633 	umode_t mode = inode->i_mode;
8634 
8635 	if (mask & MAY_WRITE &&
8636 	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
8637 		if (btrfs_root_readonly(root))
8638 			return -EROFS;
8639 		if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
8640 			return -EACCES;
8641 	}
8642 	return generic_permission(inode, mask);
8643 }
8644 
8645 static const struct inode_operations btrfs_dir_inode_operations = {
8646 	.getattr	= btrfs_getattr,
8647 	.lookup		= btrfs_lookup,
8648 	.create		= btrfs_create,
8649 	.unlink		= btrfs_unlink,
8650 	.link		= btrfs_link,
8651 	.mkdir		= btrfs_mkdir,
8652 	.rmdir		= btrfs_rmdir,
8653 	.rename		= btrfs_rename,
8654 	.symlink	= btrfs_symlink,
8655 	.setattr	= btrfs_setattr,
8656 	.mknod		= btrfs_mknod,
8657 	.setxattr	= btrfs_setxattr,
8658 	.getxattr	= btrfs_getxattr,
8659 	.listxattr	= btrfs_listxattr,
8660 	.removexattr	= btrfs_removexattr,
8661 	.permission	= btrfs_permission,
8662 	.get_acl	= btrfs_get_acl,
8663 };
8664 static const struct inode_operations btrfs_dir_ro_inode_operations = {
8665 	.lookup		= btrfs_lookup,
8666 	.permission	= btrfs_permission,
8667 	.get_acl	= btrfs_get_acl,
8668 };
8669 
8670 static const struct file_operations btrfs_dir_file_operations = {
8671 	.llseek		= generic_file_llseek,
8672 	.read		= generic_read_dir,
8673 	.readdir	= btrfs_real_readdir,
8674 	.unlocked_ioctl	= btrfs_ioctl,
8675 #ifdef CONFIG_COMPAT
8676 	.compat_ioctl	= btrfs_ioctl,
8677 #endif
8678 	.release        = btrfs_release_file,
8679 	.fsync		= btrfs_sync_file,
8680 };
8681 
8682 static struct extent_io_ops btrfs_extent_io_ops = {
8683 	.fill_delalloc = run_delalloc_range,
8684 	.submit_bio_hook = btrfs_submit_bio_hook,
8685 	.merge_bio_hook = btrfs_merge_bio_hook,
8686 	.readpage_end_io_hook = btrfs_readpage_end_io_hook,
8687 	.writepage_end_io_hook = btrfs_writepage_end_io_hook,
8688 	.writepage_start_hook = btrfs_writepage_start_hook,
8689 	.set_bit_hook = btrfs_set_bit_hook,
8690 	.clear_bit_hook = btrfs_clear_bit_hook,
8691 	.merge_extent_hook = btrfs_merge_extent_hook,
8692 	.split_extent_hook = btrfs_split_extent_hook,
8693 };
8694 
8695 /*
8696  * btrfs doesn't support the bmap operation because swapfiles
8697  * use bmap to make a mapping of extents in the file.  They assume
8698  * these extents won't change over the life of the file and they
8699  * use the bmap result to do IO directly to the drive.
8700  *
8701  * the btrfs bmap call would return logical addresses that aren't
8702  * suitable for IO and they also will change frequently as COW
8703  * operations happen.  So, swapfile + btrfs == corruption.
8704  *
8705  * For now we're avoiding this by dropping bmap.
8706  */
8707 static const struct address_space_operations btrfs_aops = {
8708 	.readpage	= btrfs_readpage,
8709 	.writepage	= btrfs_writepage,
8710 	.writepages	= btrfs_writepages,
8711 	.readpages	= btrfs_readpages,
8712 	.direct_IO	= btrfs_direct_IO,
8713 	.invalidatepage = btrfs_invalidatepage,
8714 	.releasepage	= btrfs_releasepage,
8715 	.set_page_dirty	= btrfs_set_page_dirty,
8716 	.error_remove_page = generic_error_remove_page,
8717 };
8718 
8719 static const struct address_space_operations btrfs_symlink_aops = {
8720 	.readpage	= btrfs_readpage,
8721 	.writepage	= btrfs_writepage,
8722 	.invalidatepage = btrfs_invalidatepage,
8723 	.releasepage	= btrfs_releasepage,
8724 };
8725 
8726 static const struct inode_operations btrfs_file_inode_operations = {
8727 	.getattr	= btrfs_getattr,
8728 	.setattr	= btrfs_setattr,
8729 	.setxattr	= btrfs_setxattr,
8730 	.getxattr	= btrfs_getxattr,
8731 	.listxattr      = btrfs_listxattr,
8732 	.removexattr	= btrfs_removexattr,
8733 	.permission	= btrfs_permission,
8734 	.fiemap		= btrfs_fiemap,
8735 	.get_acl	= btrfs_get_acl,
8736 	.update_time	= btrfs_update_time,
8737 };
8738 static const struct inode_operations btrfs_special_inode_operations = {
8739 	.getattr	= btrfs_getattr,
8740 	.setattr	= btrfs_setattr,
8741 	.permission	= btrfs_permission,
8742 	.setxattr	= btrfs_setxattr,
8743 	.getxattr	= btrfs_getxattr,
8744 	.listxattr	= btrfs_listxattr,
8745 	.removexattr	= btrfs_removexattr,
8746 	.get_acl	= btrfs_get_acl,
8747 	.update_time	= btrfs_update_time,
8748 };
8749 static const struct inode_operations btrfs_symlink_inode_operations = {
8750 	.readlink	= generic_readlink,
8751 	.follow_link	= page_follow_link_light,
8752 	.put_link	= page_put_link,
8753 	.getattr	= btrfs_getattr,
8754 	.setattr	= btrfs_setattr,
8755 	.permission	= btrfs_permission,
8756 	.setxattr	= btrfs_setxattr,
8757 	.getxattr	= btrfs_getxattr,
8758 	.listxattr	= btrfs_listxattr,
8759 	.removexattr	= btrfs_removexattr,
8760 	.get_acl	= btrfs_get_acl,
8761 	.update_time	= btrfs_update_time,
8762 };
8763 
8764 const struct dentry_operations btrfs_dentry_operations = {
8765 	.d_delete	= btrfs_dentry_delete,
8766 	.d_release	= btrfs_dentry_release,
8767 };
8768