xref: /openbmc/linux/fs/btrfs/inode.c (revision b6bec26c)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include <linux/slab.h>
40 #include <linux/ratelimit.h>
41 #include <linux/mount.h>
42 #include "compat.h"
43 #include "ctree.h"
44 #include "disk-io.h"
45 #include "transaction.h"
46 #include "btrfs_inode.h"
47 #include "ioctl.h"
48 #include "print-tree.h"
49 #include "ordered-data.h"
50 #include "xattr.h"
51 #include "tree-log.h"
52 #include "volumes.h"
53 #include "compression.h"
54 #include "locking.h"
55 #include "free-space-cache.h"
56 #include "inode-map.h"
57 
58 struct btrfs_iget_args {
59 	u64 ino;
60 	struct btrfs_root *root;
61 };
62 
63 static const struct inode_operations btrfs_dir_inode_operations;
64 static const struct inode_operations btrfs_symlink_inode_operations;
65 static const struct inode_operations btrfs_dir_ro_inode_operations;
66 static const struct inode_operations btrfs_special_inode_operations;
67 static const struct inode_operations btrfs_file_inode_operations;
68 static const struct address_space_operations btrfs_aops;
69 static const struct address_space_operations btrfs_symlink_aops;
70 static const struct file_operations btrfs_dir_file_operations;
71 static struct extent_io_ops btrfs_extent_io_ops;
72 
73 static struct kmem_cache *btrfs_inode_cachep;
74 static struct kmem_cache *btrfs_delalloc_work_cachep;
75 struct kmem_cache *btrfs_trans_handle_cachep;
76 struct kmem_cache *btrfs_transaction_cachep;
77 struct kmem_cache *btrfs_path_cachep;
78 struct kmem_cache *btrfs_free_space_cachep;
79 
80 #define S_SHIFT 12
81 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
82 	[S_IFREG >> S_SHIFT]	= BTRFS_FT_REG_FILE,
83 	[S_IFDIR >> S_SHIFT]	= BTRFS_FT_DIR,
84 	[S_IFCHR >> S_SHIFT]	= BTRFS_FT_CHRDEV,
85 	[S_IFBLK >> S_SHIFT]	= BTRFS_FT_BLKDEV,
86 	[S_IFIFO >> S_SHIFT]	= BTRFS_FT_FIFO,
87 	[S_IFSOCK >> S_SHIFT]	= BTRFS_FT_SOCK,
88 	[S_IFLNK >> S_SHIFT]	= BTRFS_FT_SYMLINK,
89 };
90 
91 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
92 static int btrfs_truncate(struct inode *inode);
93 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
94 static noinline int cow_file_range(struct inode *inode,
95 				   struct page *locked_page,
96 				   u64 start, u64 end, int *page_started,
97 				   unsigned long *nr_written, int unlock);
98 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
99 					   u64 len, u64 orig_start,
100 					   u64 block_start, u64 block_len,
101 					   u64 orig_block_len, int type);
102 
103 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
104 				     struct inode *inode,  struct inode *dir,
105 				     const struct qstr *qstr)
106 {
107 	int err;
108 
109 	err = btrfs_init_acl(trans, inode, dir);
110 	if (!err)
111 		err = btrfs_xattr_security_init(trans, inode, dir, qstr);
112 	return err;
113 }
114 
115 /*
116  * this does all the hard work for inserting an inline extent into
117  * the btree.  The caller should have done a btrfs_drop_extents so that
118  * no overlapping inline items exist in the btree
119  */
120 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
121 				struct btrfs_root *root, struct inode *inode,
122 				u64 start, size_t size, size_t compressed_size,
123 				int compress_type,
124 				struct page **compressed_pages)
125 {
126 	struct btrfs_key key;
127 	struct btrfs_path *path;
128 	struct extent_buffer *leaf;
129 	struct page *page = NULL;
130 	char *kaddr;
131 	unsigned long ptr;
132 	struct btrfs_file_extent_item *ei;
133 	int err = 0;
134 	int ret;
135 	size_t cur_size = size;
136 	size_t datasize;
137 	unsigned long offset;
138 
139 	if (compressed_size && compressed_pages)
140 		cur_size = compressed_size;
141 
142 	path = btrfs_alloc_path();
143 	if (!path)
144 		return -ENOMEM;
145 
146 	path->leave_spinning = 1;
147 
148 	key.objectid = btrfs_ino(inode);
149 	key.offset = start;
150 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
151 	datasize = btrfs_file_extent_calc_inline_size(cur_size);
152 
153 	inode_add_bytes(inode, size);
154 	ret = btrfs_insert_empty_item(trans, root, path, &key,
155 				      datasize);
156 	if (ret) {
157 		err = ret;
158 		goto fail;
159 	}
160 	leaf = path->nodes[0];
161 	ei = btrfs_item_ptr(leaf, path->slots[0],
162 			    struct btrfs_file_extent_item);
163 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
164 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
165 	btrfs_set_file_extent_encryption(leaf, ei, 0);
166 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
167 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
168 	ptr = btrfs_file_extent_inline_start(ei);
169 
170 	if (compress_type != BTRFS_COMPRESS_NONE) {
171 		struct page *cpage;
172 		int i = 0;
173 		while (compressed_size > 0) {
174 			cpage = compressed_pages[i];
175 			cur_size = min_t(unsigned long, compressed_size,
176 				       PAGE_CACHE_SIZE);
177 
178 			kaddr = kmap_atomic(cpage);
179 			write_extent_buffer(leaf, kaddr, ptr, cur_size);
180 			kunmap_atomic(kaddr);
181 
182 			i++;
183 			ptr += cur_size;
184 			compressed_size -= cur_size;
185 		}
186 		btrfs_set_file_extent_compression(leaf, ei,
187 						  compress_type);
188 	} else {
189 		page = find_get_page(inode->i_mapping,
190 				     start >> PAGE_CACHE_SHIFT);
191 		btrfs_set_file_extent_compression(leaf, ei, 0);
192 		kaddr = kmap_atomic(page);
193 		offset = start & (PAGE_CACHE_SIZE - 1);
194 		write_extent_buffer(leaf, kaddr + offset, ptr, size);
195 		kunmap_atomic(kaddr);
196 		page_cache_release(page);
197 	}
198 	btrfs_mark_buffer_dirty(leaf);
199 	btrfs_free_path(path);
200 
201 	/*
202 	 * we're an inline extent, so nobody can
203 	 * extend the file past i_size without locking
204 	 * a page we already have locked.
205 	 *
206 	 * We must do any isize and inode updates
207 	 * before we unlock the pages.  Otherwise we
208 	 * could end up racing with unlink.
209 	 */
210 	BTRFS_I(inode)->disk_i_size = inode->i_size;
211 	ret = btrfs_update_inode(trans, root, inode);
212 
213 	return ret;
214 fail:
215 	btrfs_free_path(path);
216 	return err;
217 }
218 
219 
220 /*
221  * conditionally insert an inline extent into the file.  This
222  * does the checks required to make sure the data is small enough
223  * to fit as an inline extent.
224  */
225 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
226 				 struct btrfs_root *root,
227 				 struct inode *inode, u64 start, u64 end,
228 				 size_t compressed_size, int compress_type,
229 				 struct page **compressed_pages)
230 {
231 	u64 isize = i_size_read(inode);
232 	u64 actual_end = min(end + 1, isize);
233 	u64 inline_len = actual_end - start;
234 	u64 aligned_end = (end + root->sectorsize - 1) &
235 			~((u64)root->sectorsize - 1);
236 	u64 data_len = inline_len;
237 	int ret;
238 
239 	if (compressed_size)
240 		data_len = compressed_size;
241 
242 	if (start > 0 ||
243 	    actual_end >= PAGE_CACHE_SIZE ||
244 	    data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
245 	    (!compressed_size &&
246 	    (actual_end & (root->sectorsize - 1)) == 0) ||
247 	    end + 1 < isize ||
248 	    data_len > root->fs_info->max_inline) {
249 		return 1;
250 	}
251 
252 	ret = btrfs_drop_extents(trans, root, inode, start, aligned_end, 1);
253 	if (ret)
254 		return ret;
255 
256 	if (isize > actual_end)
257 		inline_len = min_t(u64, isize, actual_end);
258 	ret = insert_inline_extent(trans, root, inode, start,
259 				   inline_len, compressed_size,
260 				   compress_type, compressed_pages);
261 	if (ret && ret != -ENOSPC) {
262 		btrfs_abort_transaction(trans, root, ret);
263 		return ret;
264 	} else if (ret == -ENOSPC) {
265 		return 1;
266 	}
267 
268 	btrfs_delalloc_release_metadata(inode, end + 1 - start);
269 	btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
270 	return 0;
271 }
272 
273 struct async_extent {
274 	u64 start;
275 	u64 ram_size;
276 	u64 compressed_size;
277 	struct page **pages;
278 	unsigned long nr_pages;
279 	int compress_type;
280 	struct list_head list;
281 };
282 
283 struct async_cow {
284 	struct inode *inode;
285 	struct btrfs_root *root;
286 	struct page *locked_page;
287 	u64 start;
288 	u64 end;
289 	struct list_head extents;
290 	struct btrfs_work work;
291 };
292 
293 static noinline int add_async_extent(struct async_cow *cow,
294 				     u64 start, u64 ram_size,
295 				     u64 compressed_size,
296 				     struct page **pages,
297 				     unsigned long nr_pages,
298 				     int compress_type)
299 {
300 	struct async_extent *async_extent;
301 
302 	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
303 	BUG_ON(!async_extent); /* -ENOMEM */
304 	async_extent->start = start;
305 	async_extent->ram_size = ram_size;
306 	async_extent->compressed_size = compressed_size;
307 	async_extent->pages = pages;
308 	async_extent->nr_pages = nr_pages;
309 	async_extent->compress_type = compress_type;
310 	list_add_tail(&async_extent->list, &cow->extents);
311 	return 0;
312 }
313 
314 /*
315  * we create compressed extents in two phases.  The first
316  * phase compresses a range of pages that have already been
317  * locked (both pages and state bits are locked).
318  *
319  * This is done inside an ordered work queue, and the compression
320  * is spread across many cpus.  The actual IO submission is step
321  * two, and the ordered work queue takes care of making sure that
322  * happens in the same order things were put onto the queue by
323  * writepages and friends.
324  *
325  * If this code finds it can't get good compression, it puts an
326  * entry onto the work queue to write the uncompressed bytes.  This
327  * makes sure that both compressed inodes and uncompressed inodes
328  * are written in the same order that the flusher thread sent them
329  * down.
330  */
331 static noinline int compress_file_range(struct inode *inode,
332 					struct page *locked_page,
333 					u64 start, u64 end,
334 					struct async_cow *async_cow,
335 					int *num_added)
336 {
337 	struct btrfs_root *root = BTRFS_I(inode)->root;
338 	struct btrfs_trans_handle *trans;
339 	u64 num_bytes;
340 	u64 blocksize = root->sectorsize;
341 	u64 actual_end;
342 	u64 isize = i_size_read(inode);
343 	int ret = 0;
344 	struct page **pages = NULL;
345 	unsigned long nr_pages;
346 	unsigned long nr_pages_ret = 0;
347 	unsigned long total_compressed = 0;
348 	unsigned long total_in = 0;
349 	unsigned long max_compressed = 128 * 1024;
350 	unsigned long max_uncompressed = 128 * 1024;
351 	int i;
352 	int will_compress;
353 	int compress_type = root->fs_info->compress_type;
354 
355 	/* if this is a small write inside eof, kick off a defrag */
356 	if ((end - start + 1) < 16 * 1024 &&
357 	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
358 		btrfs_add_inode_defrag(NULL, inode);
359 
360 	actual_end = min_t(u64, isize, end + 1);
361 again:
362 	will_compress = 0;
363 	nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
364 	nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
365 
366 	/*
367 	 * we don't want to send crud past the end of i_size through
368 	 * compression, that's just a waste of CPU time.  So, if the
369 	 * end of the file is before the start of our current
370 	 * requested range of bytes, we bail out to the uncompressed
371 	 * cleanup code that can deal with all of this.
372 	 *
373 	 * It isn't really the fastest way to fix things, but this is a
374 	 * very uncommon corner.
375 	 */
376 	if (actual_end <= start)
377 		goto cleanup_and_bail_uncompressed;
378 
379 	total_compressed = actual_end - start;
380 
381 	/* we want to make sure that amount of ram required to uncompress
382 	 * an extent is reasonable, so we limit the total size in ram
383 	 * of a compressed extent to 128k.  This is a crucial number
384 	 * because it also controls how easily we can spread reads across
385 	 * cpus for decompression.
386 	 *
387 	 * We also want to make sure the amount of IO required to do
388 	 * a random read is reasonably small, so we limit the size of
389 	 * a compressed extent to 128k.
390 	 */
391 	total_compressed = min(total_compressed, max_uncompressed);
392 	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
393 	num_bytes = max(blocksize,  num_bytes);
394 	total_in = 0;
395 	ret = 0;
396 
397 	/*
398 	 * we do compression for mount -o compress and when the
399 	 * inode has not been flagged as nocompress.  This flag can
400 	 * change at any time if we discover bad compression ratios.
401 	 */
402 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
403 	    (btrfs_test_opt(root, COMPRESS) ||
404 	     (BTRFS_I(inode)->force_compress) ||
405 	     (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
406 		WARN_ON(pages);
407 		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
408 		if (!pages) {
409 			/* just bail out to the uncompressed code */
410 			goto cont;
411 		}
412 
413 		if (BTRFS_I(inode)->force_compress)
414 			compress_type = BTRFS_I(inode)->force_compress;
415 
416 		ret = btrfs_compress_pages(compress_type,
417 					   inode->i_mapping, start,
418 					   total_compressed, pages,
419 					   nr_pages, &nr_pages_ret,
420 					   &total_in,
421 					   &total_compressed,
422 					   max_compressed);
423 
424 		if (!ret) {
425 			unsigned long offset = total_compressed &
426 				(PAGE_CACHE_SIZE - 1);
427 			struct page *page = pages[nr_pages_ret - 1];
428 			char *kaddr;
429 
430 			/* zero the tail end of the last page, we might be
431 			 * sending it down to disk
432 			 */
433 			if (offset) {
434 				kaddr = kmap_atomic(page);
435 				memset(kaddr + offset, 0,
436 				       PAGE_CACHE_SIZE - offset);
437 				kunmap_atomic(kaddr);
438 			}
439 			will_compress = 1;
440 		}
441 	}
442 cont:
443 	if (start == 0) {
444 		trans = btrfs_join_transaction(root);
445 		if (IS_ERR(trans)) {
446 			ret = PTR_ERR(trans);
447 			trans = NULL;
448 			goto cleanup_and_out;
449 		}
450 		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
451 
452 		/* lets try to make an inline extent */
453 		if (ret || total_in < (actual_end - start)) {
454 			/* we didn't compress the entire range, try
455 			 * to make an uncompressed inline extent.
456 			 */
457 			ret = cow_file_range_inline(trans, root, inode,
458 						    start, end, 0, 0, NULL);
459 		} else {
460 			/* try making a compressed inline extent */
461 			ret = cow_file_range_inline(trans, root, inode,
462 						    start, end,
463 						    total_compressed,
464 						    compress_type, pages);
465 		}
466 		if (ret <= 0) {
467 			/*
468 			 * inline extent creation worked or returned error,
469 			 * we don't need to create any more async work items.
470 			 * Unlock and free up our temp pages.
471 			 */
472 			extent_clear_unlock_delalloc(inode,
473 			     &BTRFS_I(inode)->io_tree,
474 			     start, end, NULL,
475 			     EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
476 			     EXTENT_CLEAR_DELALLOC |
477 			     EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
478 
479 			btrfs_end_transaction(trans, root);
480 			goto free_pages_out;
481 		}
482 		btrfs_end_transaction(trans, root);
483 	}
484 
485 	if (will_compress) {
486 		/*
487 		 * we aren't doing an inline extent round the compressed size
488 		 * up to a block size boundary so the allocator does sane
489 		 * things
490 		 */
491 		total_compressed = (total_compressed + blocksize - 1) &
492 			~(blocksize - 1);
493 
494 		/*
495 		 * one last check to make sure the compression is really a
496 		 * win, compare the page count read with the blocks on disk
497 		 */
498 		total_in = (total_in + PAGE_CACHE_SIZE - 1) &
499 			~(PAGE_CACHE_SIZE - 1);
500 		if (total_compressed >= total_in) {
501 			will_compress = 0;
502 		} else {
503 			num_bytes = total_in;
504 		}
505 	}
506 	if (!will_compress && pages) {
507 		/*
508 		 * the compression code ran but failed to make things smaller,
509 		 * free any pages it allocated and our page pointer array
510 		 */
511 		for (i = 0; i < nr_pages_ret; i++) {
512 			WARN_ON(pages[i]->mapping);
513 			page_cache_release(pages[i]);
514 		}
515 		kfree(pages);
516 		pages = NULL;
517 		total_compressed = 0;
518 		nr_pages_ret = 0;
519 
520 		/* flag the file so we don't compress in the future */
521 		if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
522 		    !(BTRFS_I(inode)->force_compress)) {
523 			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
524 		}
525 	}
526 	if (will_compress) {
527 		*num_added += 1;
528 
529 		/* the async work queues will take care of doing actual
530 		 * allocation on disk for these compressed pages,
531 		 * and will submit them to the elevator.
532 		 */
533 		add_async_extent(async_cow, start, num_bytes,
534 				 total_compressed, pages, nr_pages_ret,
535 				 compress_type);
536 
537 		if (start + num_bytes < end) {
538 			start += num_bytes;
539 			pages = NULL;
540 			cond_resched();
541 			goto again;
542 		}
543 	} else {
544 cleanup_and_bail_uncompressed:
545 		/*
546 		 * No compression, but we still need to write the pages in
547 		 * the file we've been given so far.  redirty the locked
548 		 * page if it corresponds to our extent and set things up
549 		 * for the async work queue to run cow_file_range to do
550 		 * the normal delalloc dance
551 		 */
552 		if (page_offset(locked_page) >= start &&
553 		    page_offset(locked_page) <= end) {
554 			__set_page_dirty_nobuffers(locked_page);
555 			/* unlocked later on in the async handlers */
556 		}
557 		add_async_extent(async_cow, start, end - start + 1,
558 				 0, NULL, 0, BTRFS_COMPRESS_NONE);
559 		*num_added += 1;
560 	}
561 
562 out:
563 	return ret;
564 
565 free_pages_out:
566 	for (i = 0; i < nr_pages_ret; i++) {
567 		WARN_ON(pages[i]->mapping);
568 		page_cache_release(pages[i]);
569 	}
570 	kfree(pages);
571 
572 	goto out;
573 
574 cleanup_and_out:
575 	extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
576 				     start, end, NULL,
577 				     EXTENT_CLEAR_UNLOCK_PAGE |
578 				     EXTENT_CLEAR_DIRTY |
579 				     EXTENT_CLEAR_DELALLOC |
580 				     EXTENT_SET_WRITEBACK |
581 				     EXTENT_END_WRITEBACK);
582 	if (!trans || IS_ERR(trans))
583 		btrfs_error(root->fs_info, ret, "Failed to join transaction");
584 	else
585 		btrfs_abort_transaction(trans, root, ret);
586 	goto free_pages_out;
587 }
588 
589 /*
590  * phase two of compressed writeback.  This is the ordered portion
591  * of the code, which only gets called in the order the work was
592  * queued.  We walk all the async extents created by compress_file_range
593  * and send them down to the disk.
594  */
595 static noinline int submit_compressed_extents(struct inode *inode,
596 					      struct async_cow *async_cow)
597 {
598 	struct async_extent *async_extent;
599 	u64 alloc_hint = 0;
600 	struct btrfs_trans_handle *trans;
601 	struct btrfs_key ins;
602 	struct extent_map *em;
603 	struct btrfs_root *root = BTRFS_I(inode)->root;
604 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
605 	struct extent_io_tree *io_tree;
606 	int ret = 0;
607 
608 	if (list_empty(&async_cow->extents))
609 		return 0;
610 
611 
612 	while (!list_empty(&async_cow->extents)) {
613 		async_extent = list_entry(async_cow->extents.next,
614 					  struct async_extent, list);
615 		list_del(&async_extent->list);
616 
617 		io_tree = &BTRFS_I(inode)->io_tree;
618 
619 retry:
620 		/* did the compression code fall back to uncompressed IO? */
621 		if (!async_extent->pages) {
622 			int page_started = 0;
623 			unsigned long nr_written = 0;
624 
625 			lock_extent(io_tree, async_extent->start,
626 					 async_extent->start +
627 					 async_extent->ram_size - 1);
628 
629 			/* allocate blocks */
630 			ret = cow_file_range(inode, async_cow->locked_page,
631 					     async_extent->start,
632 					     async_extent->start +
633 					     async_extent->ram_size - 1,
634 					     &page_started, &nr_written, 0);
635 
636 			/* JDM XXX */
637 
638 			/*
639 			 * if page_started, cow_file_range inserted an
640 			 * inline extent and took care of all the unlocking
641 			 * and IO for us.  Otherwise, we need to submit
642 			 * all those pages down to the drive.
643 			 */
644 			if (!page_started && !ret)
645 				extent_write_locked_range(io_tree,
646 						  inode, async_extent->start,
647 						  async_extent->start +
648 						  async_extent->ram_size - 1,
649 						  btrfs_get_extent,
650 						  WB_SYNC_ALL);
651 			kfree(async_extent);
652 			cond_resched();
653 			continue;
654 		}
655 
656 		lock_extent(io_tree, async_extent->start,
657 			    async_extent->start + async_extent->ram_size - 1);
658 
659 		trans = btrfs_join_transaction(root);
660 		if (IS_ERR(trans)) {
661 			ret = PTR_ERR(trans);
662 		} else {
663 			trans->block_rsv = &root->fs_info->delalloc_block_rsv;
664 			ret = btrfs_reserve_extent(trans, root,
665 					   async_extent->compressed_size,
666 					   async_extent->compressed_size,
667 					   0, alloc_hint, &ins, 1);
668 			if (ret && ret != -ENOSPC)
669 				btrfs_abort_transaction(trans, root, ret);
670 			btrfs_end_transaction(trans, root);
671 		}
672 
673 		if (ret) {
674 			int i;
675 			for (i = 0; i < async_extent->nr_pages; i++) {
676 				WARN_ON(async_extent->pages[i]->mapping);
677 				page_cache_release(async_extent->pages[i]);
678 			}
679 			kfree(async_extent->pages);
680 			async_extent->nr_pages = 0;
681 			async_extent->pages = NULL;
682 			unlock_extent(io_tree, async_extent->start,
683 				      async_extent->start +
684 				      async_extent->ram_size - 1);
685 			if (ret == -ENOSPC)
686 				goto retry;
687 			goto out_free; /* JDM: Requeue? */
688 		}
689 
690 		/*
691 		 * here we're doing allocation and writeback of the
692 		 * compressed pages
693 		 */
694 		btrfs_drop_extent_cache(inode, async_extent->start,
695 					async_extent->start +
696 					async_extent->ram_size - 1, 0);
697 
698 		em = alloc_extent_map();
699 		BUG_ON(!em); /* -ENOMEM */
700 		em->start = async_extent->start;
701 		em->len = async_extent->ram_size;
702 		em->orig_start = em->start;
703 
704 		em->block_start = ins.objectid;
705 		em->block_len = ins.offset;
706 		em->orig_block_len = ins.offset;
707 		em->bdev = root->fs_info->fs_devices->latest_bdev;
708 		em->compress_type = async_extent->compress_type;
709 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
710 		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
711 		em->generation = -1;
712 
713 		while (1) {
714 			write_lock(&em_tree->lock);
715 			ret = add_extent_mapping(em_tree, em);
716 			if (!ret)
717 				list_move(&em->list,
718 					  &em_tree->modified_extents);
719 			write_unlock(&em_tree->lock);
720 			if (ret != -EEXIST) {
721 				free_extent_map(em);
722 				break;
723 			}
724 			btrfs_drop_extent_cache(inode, async_extent->start,
725 						async_extent->start +
726 						async_extent->ram_size - 1, 0);
727 		}
728 
729 		ret = btrfs_add_ordered_extent_compress(inode,
730 						async_extent->start,
731 						ins.objectid,
732 						async_extent->ram_size,
733 						ins.offset,
734 						BTRFS_ORDERED_COMPRESSED,
735 						async_extent->compress_type);
736 		BUG_ON(ret); /* -ENOMEM */
737 
738 		/*
739 		 * clear dirty, set writeback and unlock the pages.
740 		 */
741 		extent_clear_unlock_delalloc(inode,
742 				&BTRFS_I(inode)->io_tree,
743 				async_extent->start,
744 				async_extent->start +
745 				async_extent->ram_size - 1,
746 				NULL, EXTENT_CLEAR_UNLOCK_PAGE |
747 				EXTENT_CLEAR_UNLOCK |
748 				EXTENT_CLEAR_DELALLOC |
749 				EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
750 
751 		ret = btrfs_submit_compressed_write(inode,
752 				    async_extent->start,
753 				    async_extent->ram_size,
754 				    ins.objectid,
755 				    ins.offset, async_extent->pages,
756 				    async_extent->nr_pages);
757 
758 		BUG_ON(ret); /* -ENOMEM */
759 		alloc_hint = ins.objectid + ins.offset;
760 		kfree(async_extent);
761 		cond_resched();
762 	}
763 	ret = 0;
764 out:
765 	return ret;
766 out_free:
767 	kfree(async_extent);
768 	goto out;
769 }
770 
771 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
772 				      u64 num_bytes)
773 {
774 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
775 	struct extent_map *em;
776 	u64 alloc_hint = 0;
777 
778 	read_lock(&em_tree->lock);
779 	em = search_extent_mapping(em_tree, start, num_bytes);
780 	if (em) {
781 		/*
782 		 * if block start isn't an actual block number then find the
783 		 * first block in this inode and use that as a hint.  If that
784 		 * block is also bogus then just don't worry about it.
785 		 */
786 		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
787 			free_extent_map(em);
788 			em = search_extent_mapping(em_tree, 0, 0);
789 			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
790 				alloc_hint = em->block_start;
791 			if (em)
792 				free_extent_map(em);
793 		} else {
794 			alloc_hint = em->block_start;
795 			free_extent_map(em);
796 		}
797 	}
798 	read_unlock(&em_tree->lock);
799 
800 	return alloc_hint;
801 }
802 
803 /*
804  * when extent_io.c finds a delayed allocation range in the file,
805  * the call backs end up in this code.  The basic idea is to
806  * allocate extents on disk for the range, and create ordered data structs
807  * in ram to track those extents.
808  *
809  * locked_page is the page that writepage had locked already.  We use
810  * it to make sure we don't do extra locks or unlocks.
811  *
812  * *page_started is set to one if we unlock locked_page and do everything
813  * required to start IO on it.  It may be clean and already done with
814  * IO when we return.
815  */
816 static noinline int __cow_file_range(struct btrfs_trans_handle *trans,
817 				     struct inode *inode,
818 				     struct btrfs_root *root,
819 				     struct page *locked_page,
820 				     u64 start, u64 end, int *page_started,
821 				     unsigned long *nr_written,
822 				     int unlock)
823 {
824 	u64 alloc_hint = 0;
825 	u64 num_bytes;
826 	unsigned long ram_size;
827 	u64 disk_num_bytes;
828 	u64 cur_alloc_size;
829 	u64 blocksize = root->sectorsize;
830 	struct btrfs_key ins;
831 	struct extent_map *em;
832 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
833 	int ret = 0;
834 
835 	BUG_ON(btrfs_is_free_space_inode(inode));
836 
837 	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
838 	num_bytes = max(blocksize,  num_bytes);
839 	disk_num_bytes = num_bytes;
840 
841 	/* if this is a small write inside eof, kick off defrag */
842 	if (num_bytes < 64 * 1024 &&
843 	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
844 		btrfs_add_inode_defrag(trans, inode);
845 
846 	if (start == 0) {
847 		/* lets try to make an inline extent */
848 		ret = cow_file_range_inline(trans, root, inode,
849 					    start, end, 0, 0, NULL);
850 		if (ret == 0) {
851 			extent_clear_unlock_delalloc(inode,
852 				     &BTRFS_I(inode)->io_tree,
853 				     start, end, NULL,
854 				     EXTENT_CLEAR_UNLOCK_PAGE |
855 				     EXTENT_CLEAR_UNLOCK |
856 				     EXTENT_CLEAR_DELALLOC |
857 				     EXTENT_CLEAR_DIRTY |
858 				     EXTENT_SET_WRITEBACK |
859 				     EXTENT_END_WRITEBACK);
860 
861 			*nr_written = *nr_written +
862 			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
863 			*page_started = 1;
864 			goto out;
865 		} else if (ret < 0) {
866 			btrfs_abort_transaction(trans, root, ret);
867 			goto out_unlock;
868 		}
869 	}
870 
871 	BUG_ON(disk_num_bytes >
872 	       btrfs_super_total_bytes(root->fs_info->super_copy));
873 
874 	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
875 	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
876 
877 	while (disk_num_bytes > 0) {
878 		unsigned long op;
879 
880 		cur_alloc_size = disk_num_bytes;
881 		ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
882 					   root->sectorsize, 0, alloc_hint,
883 					   &ins, 1);
884 		if (ret < 0) {
885 			btrfs_abort_transaction(trans, root, ret);
886 			goto out_unlock;
887 		}
888 
889 		em = alloc_extent_map();
890 		BUG_ON(!em); /* -ENOMEM */
891 		em->start = start;
892 		em->orig_start = em->start;
893 		ram_size = ins.offset;
894 		em->len = ins.offset;
895 
896 		em->block_start = ins.objectid;
897 		em->block_len = ins.offset;
898 		em->orig_block_len = ins.offset;
899 		em->bdev = root->fs_info->fs_devices->latest_bdev;
900 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
901 		em->generation = -1;
902 
903 		while (1) {
904 			write_lock(&em_tree->lock);
905 			ret = add_extent_mapping(em_tree, em);
906 			if (!ret)
907 				list_move(&em->list,
908 					  &em_tree->modified_extents);
909 			write_unlock(&em_tree->lock);
910 			if (ret != -EEXIST) {
911 				free_extent_map(em);
912 				break;
913 			}
914 			btrfs_drop_extent_cache(inode, start,
915 						start + ram_size - 1, 0);
916 		}
917 
918 		cur_alloc_size = ins.offset;
919 		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
920 					       ram_size, cur_alloc_size, 0);
921 		BUG_ON(ret); /* -ENOMEM */
922 
923 		if (root->root_key.objectid ==
924 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
925 			ret = btrfs_reloc_clone_csums(inode, start,
926 						      cur_alloc_size);
927 			if (ret) {
928 				btrfs_abort_transaction(trans, root, ret);
929 				goto out_unlock;
930 			}
931 		}
932 
933 		if (disk_num_bytes < cur_alloc_size)
934 			break;
935 
936 		/* we're not doing compressed IO, don't unlock the first
937 		 * page (which the caller expects to stay locked), don't
938 		 * clear any dirty bits and don't set any writeback bits
939 		 *
940 		 * Do set the Private2 bit so we know this page was properly
941 		 * setup for writepage
942 		 */
943 		op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
944 		op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
945 			EXTENT_SET_PRIVATE2;
946 
947 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
948 					     start, start + ram_size - 1,
949 					     locked_page, op);
950 		disk_num_bytes -= cur_alloc_size;
951 		num_bytes -= cur_alloc_size;
952 		alloc_hint = ins.objectid + ins.offset;
953 		start += cur_alloc_size;
954 	}
955 out:
956 	return ret;
957 
958 out_unlock:
959 	extent_clear_unlock_delalloc(inode,
960 		     &BTRFS_I(inode)->io_tree,
961 		     start, end, locked_page,
962 		     EXTENT_CLEAR_UNLOCK_PAGE |
963 		     EXTENT_CLEAR_UNLOCK |
964 		     EXTENT_CLEAR_DELALLOC |
965 		     EXTENT_CLEAR_DIRTY |
966 		     EXTENT_SET_WRITEBACK |
967 		     EXTENT_END_WRITEBACK);
968 
969 	goto out;
970 }
971 
972 static noinline int cow_file_range(struct inode *inode,
973 				   struct page *locked_page,
974 				   u64 start, u64 end, int *page_started,
975 				   unsigned long *nr_written,
976 				   int unlock)
977 {
978 	struct btrfs_trans_handle *trans;
979 	struct btrfs_root *root = BTRFS_I(inode)->root;
980 	int ret;
981 
982 	trans = btrfs_join_transaction(root);
983 	if (IS_ERR(trans)) {
984 		extent_clear_unlock_delalloc(inode,
985 			     &BTRFS_I(inode)->io_tree,
986 			     start, end, locked_page,
987 			     EXTENT_CLEAR_UNLOCK_PAGE |
988 			     EXTENT_CLEAR_UNLOCK |
989 			     EXTENT_CLEAR_DELALLOC |
990 			     EXTENT_CLEAR_DIRTY |
991 			     EXTENT_SET_WRITEBACK |
992 			     EXTENT_END_WRITEBACK);
993 		return PTR_ERR(trans);
994 	}
995 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
996 
997 	ret = __cow_file_range(trans, inode, root, locked_page, start, end,
998 			       page_started, nr_written, unlock);
999 
1000 	btrfs_end_transaction(trans, root);
1001 
1002 	return ret;
1003 }
1004 
1005 /*
1006  * work queue call back to started compression on a file and pages
1007  */
1008 static noinline void async_cow_start(struct btrfs_work *work)
1009 {
1010 	struct async_cow *async_cow;
1011 	int num_added = 0;
1012 	async_cow = container_of(work, struct async_cow, work);
1013 
1014 	compress_file_range(async_cow->inode, async_cow->locked_page,
1015 			    async_cow->start, async_cow->end, async_cow,
1016 			    &num_added);
1017 	if (num_added == 0) {
1018 		btrfs_add_delayed_iput(async_cow->inode);
1019 		async_cow->inode = NULL;
1020 	}
1021 }
1022 
1023 /*
1024  * work queue call back to submit previously compressed pages
1025  */
1026 static noinline void async_cow_submit(struct btrfs_work *work)
1027 {
1028 	struct async_cow *async_cow;
1029 	struct btrfs_root *root;
1030 	unsigned long nr_pages;
1031 
1032 	async_cow = container_of(work, struct async_cow, work);
1033 
1034 	root = async_cow->root;
1035 	nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
1036 		PAGE_CACHE_SHIFT;
1037 
1038 	if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
1039 	    5 * 1024 * 1024 &&
1040 	    waitqueue_active(&root->fs_info->async_submit_wait))
1041 		wake_up(&root->fs_info->async_submit_wait);
1042 
1043 	if (async_cow->inode)
1044 		submit_compressed_extents(async_cow->inode, async_cow);
1045 }
1046 
1047 static noinline void async_cow_free(struct btrfs_work *work)
1048 {
1049 	struct async_cow *async_cow;
1050 	async_cow = container_of(work, struct async_cow, work);
1051 	if (async_cow->inode)
1052 		btrfs_add_delayed_iput(async_cow->inode);
1053 	kfree(async_cow);
1054 }
1055 
1056 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1057 				u64 start, u64 end, int *page_started,
1058 				unsigned long *nr_written)
1059 {
1060 	struct async_cow *async_cow;
1061 	struct btrfs_root *root = BTRFS_I(inode)->root;
1062 	unsigned long nr_pages;
1063 	u64 cur_end;
1064 	int limit = 10 * 1024 * 1024;
1065 
1066 	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1067 			 1, 0, NULL, GFP_NOFS);
1068 	while (start < end) {
1069 		async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1070 		BUG_ON(!async_cow); /* -ENOMEM */
1071 		async_cow->inode = igrab(inode);
1072 		async_cow->root = root;
1073 		async_cow->locked_page = locked_page;
1074 		async_cow->start = start;
1075 
1076 		if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
1077 			cur_end = end;
1078 		else
1079 			cur_end = min(end, start + 512 * 1024 - 1);
1080 
1081 		async_cow->end = cur_end;
1082 		INIT_LIST_HEAD(&async_cow->extents);
1083 
1084 		async_cow->work.func = async_cow_start;
1085 		async_cow->work.ordered_func = async_cow_submit;
1086 		async_cow->work.ordered_free = async_cow_free;
1087 		async_cow->work.flags = 0;
1088 
1089 		nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
1090 			PAGE_CACHE_SHIFT;
1091 		atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1092 
1093 		btrfs_queue_worker(&root->fs_info->delalloc_workers,
1094 				   &async_cow->work);
1095 
1096 		if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
1097 			wait_event(root->fs_info->async_submit_wait,
1098 			   (atomic_read(&root->fs_info->async_delalloc_pages) <
1099 			    limit));
1100 		}
1101 
1102 		while (atomic_read(&root->fs_info->async_submit_draining) &&
1103 		      atomic_read(&root->fs_info->async_delalloc_pages)) {
1104 			wait_event(root->fs_info->async_submit_wait,
1105 			  (atomic_read(&root->fs_info->async_delalloc_pages) ==
1106 			   0));
1107 		}
1108 
1109 		*nr_written += nr_pages;
1110 		start = cur_end + 1;
1111 	}
1112 	*page_started = 1;
1113 	return 0;
1114 }
1115 
1116 static noinline int csum_exist_in_range(struct btrfs_root *root,
1117 					u64 bytenr, u64 num_bytes)
1118 {
1119 	int ret;
1120 	struct btrfs_ordered_sum *sums;
1121 	LIST_HEAD(list);
1122 
1123 	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
1124 				       bytenr + num_bytes - 1, &list, 0);
1125 	if (ret == 0 && list_empty(&list))
1126 		return 0;
1127 
1128 	while (!list_empty(&list)) {
1129 		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1130 		list_del(&sums->list);
1131 		kfree(sums);
1132 	}
1133 	return 1;
1134 }
1135 
1136 /*
1137  * when nowcow writeback call back.  This checks for snapshots or COW copies
1138  * of the extents that exist in the file, and COWs the file as required.
1139  *
1140  * If no cow copies or snapshots exist, we write directly to the existing
1141  * blocks on disk
1142  */
1143 static noinline int run_delalloc_nocow(struct inode *inode,
1144 				       struct page *locked_page,
1145 			      u64 start, u64 end, int *page_started, int force,
1146 			      unsigned long *nr_written)
1147 {
1148 	struct btrfs_root *root = BTRFS_I(inode)->root;
1149 	struct btrfs_trans_handle *trans;
1150 	struct extent_buffer *leaf;
1151 	struct btrfs_path *path;
1152 	struct btrfs_file_extent_item *fi;
1153 	struct btrfs_key found_key;
1154 	u64 cow_start;
1155 	u64 cur_offset;
1156 	u64 extent_end;
1157 	u64 extent_offset;
1158 	u64 disk_bytenr;
1159 	u64 num_bytes;
1160 	u64 disk_num_bytes;
1161 	int extent_type;
1162 	int ret, err;
1163 	int type;
1164 	int nocow;
1165 	int check_prev = 1;
1166 	bool nolock;
1167 	u64 ino = btrfs_ino(inode);
1168 
1169 	path = btrfs_alloc_path();
1170 	if (!path) {
1171 		extent_clear_unlock_delalloc(inode,
1172 			     &BTRFS_I(inode)->io_tree,
1173 			     start, end, locked_page,
1174 			     EXTENT_CLEAR_UNLOCK_PAGE |
1175 			     EXTENT_CLEAR_UNLOCK |
1176 			     EXTENT_CLEAR_DELALLOC |
1177 			     EXTENT_CLEAR_DIRTY |
1178 			     EXTENT_SET_WRITEBACK |
1179 			     EXTENT_END_WRITEBACK);
1180 		return -ENOMEM;
1181 	}
1182 
1183 	nolock = btrfs_is_free_space_inode(inode);
1184 
1185 	if (nolock)
1186 		trans = btrfs_join_transaction_nolock(root);
1187 	else
1188 		trans = btrfs_join_transaction(root);
1189 
1190 	if (IS_ERR(trans)) {
1191 		extent_clear_unlock_delalloc(inode,
1192 			     &BTRFS_I(inode)->io_tree,
1193 			     start, end, locked_page,
1194 			     EXTENT_CLEAR_UNLOCK_PAGE |
1195 			     EXTENT_CLEAR_UNLOCK |
1196 			     EXTENT_CLEAR_DELALLOC |
1197 			     EXTENT_CLEAR_DIRTY |
1198 			     EXTENT_SET_WRITEBACK |
1199 			     EXTENT_END_WRITEBACK);
1200 		btrfs_free_path(path);
1201 		return PTR_ERR(trans);
1202 	}
1203 
1204 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1205 
1206 	cow_start = (u64)-1;
1207 	cur_offset = start;
1208 	while (1) {
1209 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
1210 					       cur_offset, 0);
1211 		if (ret < 0) {
1212 			btrfs_abort_transaction(trans, root, ret);
1213 			goto error;
1214 		}
1215 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
1216 			leaf = path->nodes[0];
1217 			btrfs_item_key_to_cpu(leaf, &found_key,
1218 					      path->slots[0] - 1);
1219 			if (found_key.objectid == ino &&
1220 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
1221 				path->slots[0]--;
1222 		}
1223 		check_prev = 0;
1224 next_slot:
1225 		leaf = path->nodes[0];
1226 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1227 			ret = btrfs_next_leaf(root, path);
1228 			if (ret < 0) {
1229 				btrfs_abort_transaction(trans, root, ret);
1230 				goto error;
1231 			}
1232 			if (ret > 0)
1233 				break;
1234 			leaf = path->nodes[0];
1235 		}
1236 
1237 		nocow = 0;
1238 		disk_bytenr = 0;
1239 		num_bytes = 0;
1240 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1241 
1242 		if (found_key.objectid > ino ||
1243 		    found_key.type > BTRFS_EXTENT_DATA_KEY ||
1244 		    found_key.offset > end)
1245 			break;
1246 
1247 		if (found_key.offset > cur_offset) {
1248 			extent_end = found_key.offset;
1249 			extent_type = 0;
1250 			goto out_check;
1251 		}
1252 
1253 		fi = btrfs_item_ptr(leaf, path->slots[0],
1254 				    struct btrfs_file_extent_item);
1255 		extent_type = btrfs_file_extent_type(leaf, fi);
1256 
1257 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
1258 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1259 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1260 			extent_offset = btrfs_file_extent_offset(leaf, fi);
1261 			extent_end = found_key.offset +
1262 				btrfs_file_extent_num_bytes(leaf, fi);
1263 			disk_num_bytes =
1264 				btrfs_file_extent_disk_num_bytes(leaf, fi);
1265 			if (extent_end <= start) {
1266 				path->slots[0]++;
1267 				goto next_slot;
1268 			}
1269 			if (disk_bytenr == 0)
1270 				goto out_check;
1271 			if (btrfs_file_extent_compression(leaf, fi) ||
1272 			    btrfs_file_extent_encryption(leaf, fi) ||
1273 			    btrfs_file_extent_other_encoding(leaf, fi))
1274 				goto out_check;
1275 			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1276 				goto out_check;
1277 			if (btrfs_extent_readonly(root, disk_bytenr))
1278 				goto out_check;
1279 			if (btrfs_cross_ref_exist(trans, root, ino,
1280 						  found_key.offset -
1281 						  extent_offset, disk_bytenr))
1282 				goto out_check;
1283 			disk_bytenr += extent_offset;
1284 			disk_bytenr += cur_offset - found_key.offset;
1285 			num_bytes = min(end + 1, extent_end) - cur_offset;
1286 			/*
1287 			 * force cow if csum exists in the range.
1288 			 * this ensure that csum for a given extent are
1289 			 * either valid or do not exist.
1290 			 */
1291 			if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1292 				goto out_check;
1293 			nocow = 1;
1294 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1295 			extent_end = found_key.offset +
1296 				btrfs_file_extent_inline_len(leaf, fi);
1297 			extent_end = ALIGN(extent_end, root->sectorsize);
1298 		} else {
1299 			BUG_ON(1);
1300 		}
1301 out_check:
1302 		if (extent_end <= start) {
1303 			path->slots[0]++;
1304 			goto next_slot;
1305 		}
1306 		if (!nocow) {
1307 			if (cow_start == (u64)-1)
1308 				cow_start = cur_offset;
1309 			cur_offset = extent_end;
1310 			if (cur_offset > end)
1311 				break;
1312 			path->slots[0]++;
1313 			goto next_slot;
1314 		}
1315 
1316 		btrfs_release_path(path);
1317 		if (cow_start != (u64)-1) {
1318 			ret = __cow_file_range(trans, inode, root, locked_page,
1319 					       cow_start, found_key.offset - 1,
1320 					       page_started, nr_written, 1);
1321 			if (ret) {
1322 				btrfs_abort_transaction(trans, root, ret);
1323 				goto error;
1324 			}
1325 			cow_start = (u64)-1;
1326 		}
1327 
1328 		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1329 			struct extent_map *em;
1330 			struct extent_map_tree *em_tree;
1331 			em_tree = &BTRFS_I(inode)->extent_tree;
1332 			em = alloc_extent_map();
1333 			BUG_ON(!em); /* -ENOMEM */
1334 			em->start = cur_offset;
1335 			em->orig_start = found_key.offset - extent_offset;
1336 			em->len = num_bytes;
1337 			em->block_len = num_bytes;
1338 			em->block_start = disk_bytenr;
1339 			em->orig_block_len = disk_num_bytes;
1340 			em->bdev = root->fs_info->fs_devices->latest_bdev;
1341 			set_bit(EXTENT_FLAG_PINNED, &em->flags);
1342 			set_bit(EXTENT_FLAG_FILLING, &em->flags);
1343 			em->generation = -1;
1344 			while (1) {
1345 				write_lock(&em_tree->lock);
1346 				ret = add_extent_mapping(em_tree, em);
1347 				if (!ret)
1348 					list_move(&em->list,
1349 						  &em_tree->modified_extents);
1350 				write_unlock(&em_tree->lock);
1351 				if (ret != -EEXIST) {
1352 					free_extent_map(em);
1353 					break;
1354 				}
1355 				btrfs_drop_extent_cache(inode, em->start,
1356 						em->start + em->len - 1, 0);
1357 			}
1358 			type = BTRFS_ORDERED_PREALLOC;
1359 		} else {
1360 			type = BTRFS_ORDERED_NOCOW;
1361 		}
1362 
1363 		ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1364 					       num_bytes, num_bytes, type);
1365 		BUG_ON(ret); /* -ENOMEM */
1366 
1367 		if (root->root_key.objectid ==
1368 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
1369 			ret = btrfs_reloc_clone_csums(inode, cur_offset,
1370 						      num_bytes);
1371 			if (ret) {
1372 				btrfs_abort_transaction(trans, root, ret);
1373 				goto error;
1374 			}
1375 		}
1376 
1377 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1378 				cur_offset, cur_offset + num_bytes - 1,
1379 				locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1380 				EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1381 				EXTENT_SET_PRIVATE2);
1382 		cur_offset = extent_end;
1383 		if (cur_offset > end)
1384 			break;
1385 	}
1386 	btrfs_release_path(path);
1387 
1388 	if (cur_offset <= end && cow_start == (u64)-1) {
1389 		cow_start = cur_offset;
1390 		cur_offset = end;
1391 	}
1392 
1393 	if (cow_start != (u64)-1) {
1394 		ret = __cow_file_range(trans, inode, root, locked_page,
1395 				       cow_start, end,
1396 				       page_started, nr_written, 1);
1397 		if (ret) {
1398 			btrfs_abort_transaction(trans, root, ret);
1399 			goto error;
1400 		}
1401 	}
1402 
1403 error:
1404 	err = btrfs_end_transaction(trans, root);
1405 	if (!ret)
1406 		ret = err;
1407 
1408 	if (ret && cur_offset < end)
1409 		extent_clear_unlock_delalloc(inode,
1410 			     &BTRFS_I(inode)->io_tree,
1411 			     cur_offset, end, locked_page,
1412 			     EXTENT_CLEAR_UNLOCK_PAGE |
1413 			     EXTENT_CLEAR_UNLOCK |
1414 			     EXTENT_CLEAR_DELALLOC |
1415 			     EXTENT_CLEAR_DIRTY |
1416 			     EXTENT_SET_WRITEBACK |
1417 			     EXTENT_END_WRITEBACK);
1418 
1419 	btrfs_free_path(path);
1420 	return ret;
1421 }
1422 
1423 /*
1424  * extent_io.c call back to do delayed allocation processing
1425  */
1426 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1427 			      u64 start, u64 end, int *page_started,
1428 			      unsigned long *nr_written)
1429 {
1430 	int ret;
1431 	struct btrfs_root *root = BTRFS_I(inode)->root;
1432 
1433 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) {
1434 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1435 					 page_started, 1, nr_written);
1436 	} else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) {
1437 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1438 					 page_started, 0, nr_written);
1439 	} else if (!btrfs_test_opt(root, COMPRESS) &&
1440 		   !(BTRFS_I(inode)->force_compress) &&
1441 		   !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) {
1442 		ret = cow_file_range(inode, locked_page, start, end,
1443 				      page_started, nr_written, 1);
1444 	} else {
1445 		set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1446 			&BTRFS_I(inode)->runtime_flags);
1447 		ret = cow_file_range_async(inode, locked_page, start, end,
1448 					   page_started, nr_written);
1449 	}
1450 	return ret;
1451 }
1452 
1453 static void btrfs_split_extent_hook(struct inode *inode,
1454 				    struct extent_state *orig, u64 split)
1455 {
1456 	/* not delalloc, ignore it */
1457 	if (!(orig->state & EXTENT_DELALLOC))
1458 		return;
1459 
1460 	spin_lock(&BTRFS_I(inode)->lock);
1461 	BTRFS_I(inode)->outstanding_extents++;
1462 	spin_unlock(&BTRFS_I(inode)->lock);
1463 }
1464 
1465 /*
1466  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1467  * extents so we can keep track of new extents that are just merged onto old
1468  * extents, such as when we are doing sequential writes, so we can properly
1469  * account for the metadata space we'll need.
1470  */
1471 static void btrfs_merge_extent_hook(struct inode *inode,
1472 				    struct extent_state *new,
1473 				    struct extent_state *other)
1474 {
1475 	/* not delalloc, ignore it */
1476 	if (!(other->state & EXTENT_DELALLOC))
1477 		return;
1478 
1479 	spin_lock(&BTRFS_I(inode)->lock);
1480 	BTRFS_I(inode)->outstanding_extents--;
1481 	spin_unlock(&BTRFS_I(inode)->lock);
1482 }
1483 
1484 /*
1485  * extent_io.c set_bit_hook, used to track delayed allocation
1486  * bytes in this file, and to maintain the list of inodes that
1487  * have pending delalloc work to be done.
1488  */
1489 static void btrfs_set_bit_hook(struct inode *inode,
1490 			       struct extent_state *state, int *bits)
1491 {
1492 
1493 	/*
1494 	 * set_bit and clear bit hooks normally require _irqsave/restore
1495 	 * but in this case, we are only testing for the DELALLOC
1496 	 * bit, which is only set or cleared with irqs on
1497 	 */
1498 	if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1499 		struct btrfs_root *root = BTRFS_I(inode)->root;
1500 		u64 len = state->end + 1 - state->start;
1501 		bool do_list = !btrfs_is_free_space_inode(inode);
1502 
1503 		if (*bits & EXTENT_FIRST_DELALLOC) {
1504 			*bits &= ~EXTENT_FIRST_DELALLOC;
1505 		} else {
1506 			spin_lock(&BTRFS_I(inode)->lock);
1507 			BTRFS_I(inode)->outstanding_extents++;
1508 			spin_unlock(&BTRFS_I(inode)->lock);
1509 		}
1510 
1511 		spin_lock(&root->fs_info->delalloc_lock);
1512 		BTRFS_I(inode)->delalloc_bytes += len;
1513 		root->fs_info->delalloc_bytes += len;
1514 		if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1515 			list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1516 				      &root->fs_info->delalloc_inodes);
1517 		}
1518 		spin_unlock(&root->fs_info->delalloc_lock);
1519 	}
1520 }
1521 
1522 /*
1523  * extent_io.c clear_bit_hook, see set_bit_hook for why
1524  */
1525 static void btrfs_clear_bit_hook(struct inode *inode,
1526 				 struct extent_state *state, int *bits)
1527 {
1528 	/*
1529 	 * set_bit and clear bit hooks normally require _irqsave/restore
1530 	 * but in this case, we are only testing for the DELALLOC
1531 	 * bit, which is only set or cleared with irqs on
1532 	 */
1533 	if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1534 		struct btrfs_root *root = BTRFS_I(inode)->root;
1535 		u64 len = state->end + 1 - state->start;
1536 		bool do_list = !btrfs_is_free_space_inode(inode);
1537 
1538 		if (*bits & EXTENT_FIRST_DELALLOC) {
1539 			*bits &= ~EXTENT_FIRST_DELALLOC;
1540 		} else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1541 			spin_lock(&BTRFS_I(inode)->lock);
1542 			BTRFS_I(inode)->outstanding_extents--;
1543 			spin_unlock(&BTRFS_I(inode)->lock);
1544 		}
1545 
1546 		if (*bits & EXTENT_DO_ACCOUNTING)
1547 			btrfs_delalloc_release_metadata(inode, len);
1548 
1549 		if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1550 		    && do_list)
1551 			btrfs_free_reserved_data_space(inode, len);
1552 
1553 		spin_lock(&root->fs_info->delalloc_lock);
1554 		root->fs_info->delalloc_bytes -= len;
1555 		BTRFS_I(inode)->delalloc_bytes -= len;
1556 
1557 		if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1558 		    !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1559 			list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1560 		}
1561 		spin_unlock(&root->fs_info->delalloc_lock);
1562 	}
1563 }
1564 
1565 /*
1566  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1567  * we don't create bios that span stripes or chunks
1568  */
1569 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1570 			 size_t size, struct bio *bio,
1571 			 unsigned long bio_flags)
1572 {
1573 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1574 	u64 logical = (u64)bio->bi_sector << 9;
1575 	u64 length = 0;
1576 	u64 map_length;
1577 	int ret;
1578 
1579 	if (bio_flags & EXTENT_BIO_COMPRESSED)
1580 		return 0;
1581 
1582 	length = bio->bi_size;
1583 	map_length = length;
1584 	ret = btrfs_map_block(root->fs_info, READ, logical,
1585 			      &map_length, NULL, 0);
1586 	/* Will always return 0 with map_multi == NULL */
1587 	BUG_ON(ret < 0);
1588 	if (map_length < length + size)
1589 		return 1;
1590 	return 0;
1591 }
1592 
1593 /*
1594  * in order to insert checksums into the metadata in large chunks,
1595  * we wait until bio submission time.   All the pages in the bio are
1596  * checksummed and sums are attached onto the ordered extent record.
1597  *
1598  * At IO completion time the cums attached on the ordered extent record
1599  * are inserted into the btree
1600  */
1601 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1602 				    struct bio *bio, int mirror_num,
1603 				    unsigned long bio_flags,
1604 				    u64 bio_offset)
1605 {
1606 	struct btrfs_root *root = BTRFS_I(inode)->root;
1607 	int ret = 0;
1608 
1609 	ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1610 	BUG_ON(ret); /* -ENOMEM */
1611 	return 0;
1612 }
1613 
1614 /*
1615  * in order to insert checksums into the metadata in large chunks,
1616  * we wait until bio submission time.   All the pages in the bio are
1617  * checksummed and sums are attached onto the ordered extent record.
1618  *
1619  * At IO completion time the cums attached on the ordered extent record
1620  * are inserted into the btree
1621  */
1622 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1623 			  int mirror_num, unsigned long bio_flags,
1624 			  u64 bio_offset)
1625 {
1626 	struct btrfs_root *root = BTRFS_I(inode)->root;
1627 	int ret;
1628 
1629 	ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
1630 	if (ret)
1631 		bio_endio(bio, ret);
1632 	return ret;
1633 }
1634 
1635 /*
1636  * extent_io.c submission hook. This does the right thing for csum calculation
1637  * on write, or reading the csums from the tree before a read
1638  */
1639 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1640 			  int mirror_num, unsigned long bio_flags,
1641 			  u64 bio_offset)
1642 {
1643 	struct btrfs_root *root = BTRFS_I(inode)->root;
1644 	int ret = 0;
1645 	int skip_sum;
1646 	int metadata = 0;
1647 	int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
1648 
1649 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1650 
1651 	if (btrfs_is_free_space_inode(inode))
1652 		metadata = 2;
1653 
1654 	if (!(rw & REQ_WRITE)) {
1655 		ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1656 		if (ret)
1657 			goto out;
1658 
1659 		if (bio_flags & EXTENT_BIO_COMPRESSED) {
1660 			ret = btrfs_submit_compressed_read(inode, bio,
1661 							   mirror_num,
1662 							   bio_flags);
1663 			goto out;
1664 		} else if (!skip_sum) {
1665 			ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1666 			if (ret)
1667 				goto out;
1668 		}
1669 		goto mapit;
1670 	} else if (async && !skip_sum) {
1671 		/* csum items have already been cloned */
1672 		if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1673 			goto mapit;
1674 		/* we're doing a write, do the async checksumming */
1675 		ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1676 				   inode, rw, bio, mirror_num,
1677 				   bio_flags, bio_offset,
1678 				   __btrfs_submit_bio_start,
1679 				   __btrfs_submit_bio_done);
1680 		goto out;
1681 	} else if (!skip_sum) {
1682 		ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1683 		if (ret)
1684 			goto out;
1685 	}
1686 
1687 mapit:
1688 	ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
1689 
1690 out:
1691 	if (ret < 0)
1692 		bio_endio(bio, ret);
1693 	return ret;
1694 }
1695 
1696 /*
1697  * given a list of ordered sums record them in the inode.  This happens
1698  * at IO completion time based on sums calculated at bio submission time.
1699  */
1700 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1701 			     struct inode *inode, u64 file_offset,
1702 			     struct list_head *list)
1703 {
1704 	struct btrfs_ordered_sum *sum;
1705 
1706 	list_for_each_entry(sum, list, list) {
1707 		btrfs_csum_file_blocks(trans,
1708 		       BTRFS_I(inode)->root->fs_info->csum_root, sum);
1709 	}
1710 	return 0;
1711 }
1712 
1713 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1714 			      struct extent_state **cached_state)
1715 {
1716 	WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0);
1717 	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1718 				   cached_state, GFP_NOFS);
1719 }
1720 
1721 /* see btrfs_writepage_start_hook for details on why this is required */
1722 struct btrfs_writepage_fixup {
1723 	struct page *page;
1724 	struct btrfs_work work;
1725 };
1726 
1727 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1728 {
1729 	struct btrfs_writepage_fixup *fixup;
1730 	struct btrfs_ordered_extent *ordered;
1731 	struct extent_state *cached_state = NULL;
1732 	struct page *page;
1733 	struct inode *inode;
1734 	u64 page_start;
1735 	u64 page_end;
1736 	int ret;
1737 
1738 	fixup = container_of(work, struct btrfs_writepage_fixup, work);
1739 	page = fixup->page;
1740 again:
1741 	lock_page(page);
1742 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1743 		ClearPageChecked(page);
1744 		goto out_page;
1745 	}
1746 
1747 	inode = page->mapping->host;
1748 	page_start = page_offset(page);
1749 	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1750 
1751 	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1752 			 &cached_state);
1753 
1754 	/* already ordered? We're done */
1755 	if (PagePrivate2(page))
1756 		goto out;
1757 
1758 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
1759 	if (ordered) {
1760 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1761 				     page_end, &cached_state, GFP_NOFS);
1762 		unlock_page(page);
1763 		btrfs_start_ordered_extent(inode, ordered, 1);
1764 		btrfs_put_ordered_extent(ordered);
1765 		goto again;
1766 	}
1767 
1768 	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
1769 	if (ret) {
1770 		mapping_set_error(page->mapping, ret);
1771 		end_extent_writepage(page, ret, page_start, page_end);
1772 		ClearPageChecked(page);
1773 		goto out;
1774 	 }
1775 
1776 	btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1777 	ClearPageChecked(page);
1778 	set_page_dirty(page);
1779 out:
1780 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1781 			     &cached_state, GFP_NOFS);
1782 out_page:
1783 	unlock_page(page);
1784 	page_cache_release(page);
1785 	kfree(fixup);
1786 }
1787 
1788 /*
1789  * There are a few paths in the higher layers of the kernel that directly
1790  * set the page dirty bit without asking the filesystem if it is a
1791  * good idea.  This causes problems because we want to make sure COW
1792  * properly happens and the data=ordered rules are followed.
1793  *
1794  * In our case any range that doesn't have the ORDERED bit set
1795  * hasn't been properly setup for IO.  We kick off an async process
1796  * to fix it up.  The async helper will wait for ordered extents, set
1797  * the delalloc bit and make it safe to write the page.
1798  */
1799 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1800 {
1801 	struct inode *inode = page->mapping->host;
1802 	struct btrfs_writepage_fixup *fixup;
1803 	struct btrfs_root *root = BTRFS_I(inode)->root;
1804 
1805 	/* this page is properly in the ordered list */
1806 	if (TestClearPagePrivate2(page))
1807 		return 0;
1808 
1809 	if (PageChecked(page))
1810 		return -EAGAIN;
1811 
1812 	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1813 	if (!fixup)
1814 		return -EAGAIN;
1815 
1816 	SetPageChecked(page);
1817 	page_cache_get(page);
1818 	fixup->work.func = btrfs_writepage_fixup_worker;
1819 	fixup->page = page;
1820 	btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1821 	return -EBUSY;
1822 }
1823 
1824 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1825 				       struct inode *inode, u64 file_pos,
1826 				       u64 disk_bytenr, u64 disk_num_bytes,
1827 				       u64 num_bytes, u64 ram_bytes,
1828 				       u8 compression, u8 encryption,
1829 				       u16 other_encoding, int extent_type)
1830 {
1831 	struct btrfs_root *root = BTRFS_I(inode)->root;
1832 	struct btrfs_file_extent_item *fi;
1833 	struct btrfs_path *path;
1834 	struct extent_buffer *leaf;
1835 	struct btrfs_key ins;
1836 	int ret;
1837 
1838 	path = btrfs_alloc_path();
1839 	if (!path)
1840 		return -ENOMEM;
1841 
1842 	path->leave_spinning = 1;
1843 
1844 	/*
1845 	 * we may be replacing one extent in the tree with another.
1846 	 * The new extent is pinned in the extent map, and we don't want
1847 	 * to drop it from the cache until it is completely in the btree.
1848 	 *
1849 	 * So, tell btrfs_drop_extents to leave this extent in the cache.
1850 	 * the caller is expected to unpin it and allow it to be merged
1851 	 * with the others.
1852 	 */
1853 	ret = btrfs_drop_extents(trans, root, inode, file_pos,
1854 				 file_pos + num_bytes, 0);
1855 	if (ret)
1856 		goto out;
1857 
1858 	ins.objectid = btrfs_ino(inode);
1859 	ins.offset = file_pos;
1860 	ins.type = BTRFS_EXTENT_DATA_KEY;
1861 	ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1862 	if (ret)
1863 		goto out;
1864 	leaf = path->nodes[0];
1865 	fi = btrfs_item_ptr(leaf, path->slots[0],
1866 			    struct btrfs_file_extent_item);
1867 	btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1868 	btrfs_set_file_extent_type(leaf, fi, extent_type);
1869 	btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1870 	btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1871 	btrfs_set_file_extent_offset(leaf, fi, 0);
1872 	btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1873 	btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1874 	btrfs_set_file_extent_compression(leaf, fi, compression);
1875 	btrfs_set_file_extent_encryption(leaf, fi, encryption);
1876 	btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1877 
1878 	btrfs_mark_buffer_dirty(leaf);
1879 	btrfs_release_path(path);
1880 
1881 	inode_add_bytes(inode, num_bytes);
1882 
1883 	ins.objectid = disk_bytenr;
1884 	ins.offset = disk_num_bytes;
1885 	ins.type = BTRFS_EXTENT_ITEM_KEY;
1886 	ret = btrfs_alloc_reserved_file_extent(trans, root,
1887 					root->root_key.objectid,
1888 					btrfs_ino(inode), file_pos, &ins);
1889 out:
1890 	btrfs_free_path(path);
1891 
1892 	return ret;
1893 }
1894 
1895 /*
1896  * helper function for btrfs_finish_ordered_io, this
1897  * just reads in some of the csum leaves to prime them into ram
1898  * before we start the transaction.  It limits the amount of btree
1899  * reads required while inside the transaction.
1900  */
1901 /* as ordered data IO finishes, this gets called so we can finish
1902  * an ordered extent if the range of bytes in the file it covers are
1903  * fully written.
1904  */
1905 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
1906 {
1907 	struct inode *inode = ordered_extent->inode;
1908 	struct btrfs_root *root = BTRFS_I(inode)->root;
1909 	struct btrfs_trans_handle *trans = NULL;
1910 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1911 	struct extent_state *cached_state = NULL;
1912 	int compress_type = 0;
1913 	int ret;
1914 	bool nolock;
1915 
1916 	nolock = btrfs_is_free_space_inode(inode);
1917 
1918 	if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
1919 		ret = -EIO;
1920 		goto out;
1921 	}
1922 
1923 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1924 		BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
1925 		btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1926 		if (nolock)
1927 			trans = btrfs_join_transaction_nolock(root);
1928 		else
1929 			trans = btrfs_join_transaction(root);
1930 		if (IS_ERR(trans)) {
1931 			ret = PTR_ERR(trans);
1932 			trans = NULL;
1933 			goto out;
1934 		}
1935 		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1936 		ret = btrfs_update_inode_fallback(trans, root, inode);
1937 		if (ret) /* -ENOMEM or corruption */
1938 			btrfs_abort_transaction(trans, root, ret);
1939 		goto out;
1940 	}
1941 
1942 	lock_extent_bits(io_tree, ordered_extent->file_offset,
1943 			 ordered_extent->file_offset + ordered_extent->len - 1,
1944 			 0, &cached_state);
1945 
1946 	if (nolock)
1947 		trans = btrfs_join_transaction_nolock(root);
1948 	else
1949 		trans = btrfs_join_transaction(root);
1950 	if (IS_ERR(trans)) {
1951 		ret = PTR_ERR(trans);
1952 		trans = NULL;
1953 		goto out_unlock;
1954 	}
1955 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1956 
1957 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1958 		compress_type = ordered_extent->compress_type;
1959 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1960 		BUG_ON(compress_type);
1961 		ret = btrfs_mark_extent_written(trans, inode,
1962 						ordered_extent->file_offset,
1963 						ordered_extent->file_offset +
1964 						ordered_extent->len);
1965 	} else {
1966 		BUG_ON(root == root->fs_info->tree_root);
1967 		ret = insert_reserved_file_extent(trans, inode,
1968 						ordered_extent->file_offset,
1969 						ordered_extent->start,
1970 						ordered_extent->disk_len,
1971 						ordered_extent->len,
1972 						ordered_extent->len,
1973 						compress_type, 0, 0,
1974 						BTRFS_FILE_EXTENT_REG);
1975 	}
1976 	unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1977 			   ordered_extent->file_offset, ordered_extent->len,
1978 			   trans->transid);
1979 	if (ret < 0) {
1980 		btrfs_abort_transaction(trans, root, ret);
1981 		goto out_unlock;
1982 	}
1983 
1984 	add_pending_csums(trans, inode, ordered_extent->file_offset,
1985 			  &ordered_extent->list);
1986 
1987 	btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1988 	ret = btrfs_update_inode_fallback(trans, root, inode);
1989 	if (ret) { /* -ENOMEM or corruption */
1990 		btrfs_abort_transaction(trans, root, ret);
1991 		goto out_unlock;
1992 	}
1993 	ret = 0;
1994 out_unlock:
1995 	unlock_extent_cached(io_tree, ordered_extent->file_offset,
1996 			     ordered_extent->file_offset +
1997 			     ordered_extent->len - 1, &cached_state, GFP_NOFS);
1998 out:
1999 	if (root != root->fs_info->tree_root)
2000 		btrfs_delalloc_release_metadata(inode, ordered_extent->len);
2001 	if (trans)
2002 		btrfs_end_transaction(trans, root);
2003 
2004 	if (ret)
2005 		clear_extent_uptodate(io_tree, ordered_extent->file_offset,
2006 				      ordered_extent->file_offset +
2007 				      ordered_extent->len - 1, NULL, GFP_NOFS);
2008 
2009 	/*
2010 	 * This needs to be done to make sure anybody waiting knows we are done
2011 	 * updating everything for this ordered extent.
2012 	 */
2013 	btrfs_remove_ordered_extent(inode, ordered_extent);
2014 
2015 	/* once for us */
2016 	btrfs_put_ordered_extent(ordered_extent);
2017 	/* once for the tree */
2018 	btrfs_put_ordered_extent(ordered_extent);
2019 
2020 	return ret;
2021 }
2022 
2023 static void finish_ordered_fn(struct btrfs_work *work)
2024 {
2025 	struct btrfs_ordered_extent *ordered_extent;
2026 	ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
2027 	btrfs_finish_ordered_io(ordered_extent);
2028 }
2029 
2030 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
2031 				struct extent_state *state, int uptodate)
2032 {
2033 	struct inode *inode = page->mapping->host;
2034 	struct btrfs_root *root = BTRFS_I(inode)->root;
2035 	struct btrfs_ordered_extent *ordered_extent = NULL;
2036 	struct btrfs_workers *workers;
2037 
2038 	trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
2039 
2040 	ClearPagePrivate2(page);
2041 	if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
2042 					    end - start + 1, uptodate))
2043 		return 0;
2044 
2045 	ordered_extent->work.func = finish_ordered_fn;
2046 	ordered_extent->work.flags = 0;
2047 
2048 	if (btrfs_is_free_space_inode(inode))
2049 		workers = &root->fs_info->endio_freespace_worker;
2050 	else
2051 		workers = &root->fs_info->endio_write_workers;
2052 	btrfs_queue_worker(workers, &ordered_extent->work);
2053 
2054 	return 0;
2055 }
2056 
2057 /*
2058  * when reads are done, we need to check csums to verify the data is correct
2059  * if there's a match, we allow the bio to finish.  If not, the code in
2060  * extent_io.c will try to find good copies for us.
2061  */
2062 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
2063 			       struct extent_state *state, int mirror)
2064 {
2065 	size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
2066 	struct inode *inode = page->mapping->host;
2067 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2068 	char *kaddr;
2069 	u64 private = ~(u32)0;
2070 	int ret;
2071 	struct btrfs_root *root = BTRFS_I(inode)->root;
2072 	u32 csum = ~(u32)0;
2073 
2074 	if (PageChecked(page)) {
2075 		ClearPageChecked(page);
2076 		goto good;
2077 	}
2078 
2079 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
2080 		goto good;
2081 
2082 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
2083 	    test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
2084 		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
2085 				  GFP_NOFS);
2086 		return 0;
2087 	}
2088 
2089 	if (state && state->start == start) {
2090 		private = state->private;
2091 		ret = 0;
2092 	} else {
2093 		ret = get_state_private(io_tree, start, &private);
2094 	}
2095 	kaddr = kmap_atomic(page);
2096 	if (ret)
2097 		goto zeroit;
2098 
2099 	csum = btrfs_csum_data(root, kaddr + offset, csum,  end - start + 1);
2100 	btrfs_csum_final(csum, (char *)&csum);
2101 	if (csum != private)
2102 		goto zeroit;
2103 
2104 	kunmap_atomic(kaddr);
2105 good:
2106 	return 0;
2107 
2108 zeroit:
2109 	printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
2110 		       "private %llu\n",
2111 		       (unsigned long long)btrfs_ino(page->mapping->host),
2112 		       (unsigned long long)start, csum,
2113 		       (unsigned long long)private);
2114 	memset(kaddr + offset, 1, end - start + 1);
2115 	flush_dcache_page(page);
2116 	kunmap_atomic(kaddr);
2117 	if (private == 0)
2118 		return 0;
2119 	return -EIO;
2120 }
2121 
2122 struct delayed_iput {
2123 	struct list_head list;
2124 	struct inode *inode;
2125 };
2126 
2127 /* JDM: If this is fs-wide, why can't we add a pointer to
2128  * btrfs_inode instead and avoid the allocation? */
2129 void btrfs_add_delayed_iput(struct inode *inode)
2130 {
2131 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2132 	struct delayed_iput *delayed;
2133 
2134 	if (atomic_add_unless(&inode->i_count, -1, 1))
2135 		return;
2136 
2137 	delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
2138 	delayed->inode = inode;
2139 
2140 	spin_lock(&fs_info->delayed_iput_lock);
2141 	list_add_tail(&delayed->list, &fs_info->delayed_iputs);
2142 	spin_unlock(&fs_info->delayed_iput_lock);
2143 }
2144 
2145 void btrfs_run_delayed_iputs(struct btrfs_root *root)
2146 {
2147 	LIST_HEAD(list);
2148 	struct btrfs_fs_info *fs_info = root->fs_info;
2149 	struct delayed_iput *delayed;
2150 	int empty;
2151 
2152 	spin_lock(&fs_info->delayed_iput_lock);
2153 	empty = list_empty(&fs_info->delayed_iputs);
2154 	spin_unlock(&fs_info->delayed_iput_lock);
2155 	if (empty)
2156 		return;
2157 
2158 	spin_lock(&fs_info->delayed_iput_lock);
2159 	list_splice_init(&fs_info->delayed_iputs, &list);
2160 	spin_unlock(&fs_info->delayed_iput_lock);
2161 
2162 	while (!list_empty(&list)) {
2163 		delayed = list_entry(list.next, struct delayed_iput, list);
2164 		list_del(&delayed->list);
2165 		iput(delayed->inode);
2166 		kfree(delayed);
2167 	}
2168 }
2169 
2170 enum btrfs_orphan_cleanup_state {
2171 	ORPHAN_CLEANUP_STARTED	= 1,
2172 	ORPHAN_CLEANUP_DONE	= 2,
2173 };
2174 
2175 /*
2176  * This is called in transaction commit time. If there are no orphan
2177  * files in the subvolume, it removes orphan item and frees block_rsv
2178  * structure.
2179  */
2180 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
2181 			      struct btrfs_root *root)
2182 {
2183 	struct btrfs_block_rsv *block_rsv;
2184 	int ret;
2185 
2186 	if (atomic_read(&root->orphan_inodes) ||
2187 	    root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
2188 		return;
2189 
2190 	spin_lock(&root->orphan_lock);
2191 	if (atomic_read(&root->orphan_inodes)) {
2192 		spin_unlock(&root->orphan_lock);
2193 		return;
2194 	}
2195 
2196 	if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
2197 		spin_unlock(&root->orphan_lock);
2198 		return;
2199 	}
2200 
2201 	block_rsv = root->orphan_block_rsv;
2202 	root->orphan_block_rsv = NULL;
2203 	spin_unlock(&root->orphan_lock);
2204 
2205 	if (root->orphan_item_inserted &&
2206 	    btrfs_root_refs(&root->root_item) > 0) {
2207 		ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
2208 					    root->root_key.objectid);
2209 		BUG_ON(ret);
2210 		root->orphan_item_inserted = 0;
2211 	}
2212 
2213 	if (block_rsv) {
2214 		WARN_ON(block_rsv->size > 0);
2215 		btrfs_free_block_rsv(root, block_rsv);
2216 	}
2217 }
2218 
2219 /*
2220  * This creates an orphan entry for the given inode in case something goes
2221  * wrong in the middle of an unlink/truncate.
2222  *
2223  * NOTE: caller of this function should reserve 5 units of metadata for
2224  *	 this function.
2225  */
2226 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2227 {
2228 	struct btrfs_root *root = BTRFS_I(inode)->root;
2229 	struct btrfs_block_rsv *block_rsv = NULL;
2230 	int reserve = 0;
2231 	int insert = 0;
2232 	int ret;
2233 
2234 	if (!root->orphan_block_rsv) {
2235 		block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
2236 		if (!block_rsv)
2237 			return -ENOMEM;
2238 	}
2239 
2240 	spin_lock(&root->orphan_lock);
2241 	if (!root->orphan_block_rsv) {
2242 		root->orphan_block_rsv = block_rsv;
2243 	} else if (block_rsv) {
2244 		btrfs_free_block_rsv(root, block_rsv);
2245 		block_rsv = NULL;
2246 	}
2247 
2248 	if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2249 			      &BTRFS_I(inode)->runtime_flags)) {
2250 #if 0
2251 		/*
2252 		 * For proper ENOSPC handling, we should do orphan
2253 		 * cleanup when mounting. But this introduces backward
2254 		 * compatibility issue.
2255 		 */
2256 		if (!xchg(&root->orphan_item_inserted, 1))
2257 			insert = 2;
2258 		else
2259 			insert = 1;
2260 #endif
2261 		insert = 1;
2262 		atomic_inc(&root->orphan_inodes);
2263 	}
2264 
2265 	if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
2266 			      &BTRFS_I(inode)->runtime_flags))
2267 		reserve = 1;
2268 	spin_unlock(&root->orphan_lock);
2269 
2270 	/* grab metadata reservation from transaction handle */
2271 	if (reserve) {
2272 		ret = btrfs_orphan_reserve_metadata(trans, inode);
2273 		BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
2274 	}
2275 
2276 	/* insert an orphan item to track this unlinked/truncated file */
2277 	if (insert >= 1) {
2278 		ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
2279 		if (ret && ret != -EEXIST) {
2280 			clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2281 				  &BTRFS_I(inode)->runtime_flags);
2282 			btrfs_abort_transaction(trans, root, ret);
2283 			return ret;
2284 		}
2285 		ret = 0;
2286 	}
2287 
2288 	/* insert an orphan item to track subvolume contains orphan files */
2289 	if (insert >= 2) {
2290 		ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
2291 					       root->root_key.objectid);
2292 		if (ret && ret != -EEXIST) {
2293 			btrfs_abort_transaction(trans, root, ret);
2294 			return ret;
2295 		}
2296 	}
2297 	return 0;
2298 }
2299 
2300 /*
2301  * We have done the truncate/delete so we can go ahead and remove the orphan
2302  * item for this particular inode.
2303  */
2304 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2305 {
2306 	struct btrfs_root *root = BTRFS_I(inode)->root;
2307 	int delete_item = 0;
2308 	int release_rsv = 0;
2309 	int ret = 0;
2310 
2311 	spin_lock(&root->orphan_lock);
2312 	if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2313 			       &BTRFS_I(inode)->runtime_flags))
2314 		delete_item = 1;
2315 
2316 	if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
2317 			       &BTRFS_I(inode)->runtime_flags))
2318 		release_rsv = 1;
2319 	spin_unlock(&root->orphan_lock);
2320 
2321 	if (trans && delete_item) {
2322 		ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
2323 		BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
2324 	}
2325 
2326 	if (release_rsv) {
2327 		btrfs_orphan_release_metadata(inode);
2328 		atomic_dec(&root->orphan_inodes);
2329 	}
2330 
2331 	return 0;
2332 }
2333 
2334 /*
2335  * this cleans up any orphans that may be left on the list from the last use
2336  * of this root.
2337  */
2338 int btrfs_orphan_cleanup(struct btrfs_root *root)
2339 {
2340 	struct btrfs_path *path;
2341 	struct extent_buffer *leaf;
2342 	struct btrfs_key key, found_key;
2343 	struct btrfs_trans_handle *trans;
2344 	struct inode *inode;
2345 	u64 last_objectid = 0;
2346 	int ret = 0, nr_unlink = 0, nr_truncate = 0;
2347 
2348 	if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
2349 		return 0;
2350 
2351 	path = btrfs_alloc_path();
2352 	if (!path) {
2353 		ret = -ENOMEM;
2354 		goto out;
2355 	}
2356 	path->reada = -1;
2357 
2358 	key.objectid = BTRFS_ORPHAN_OBJECTID;
2359 	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
2360 	key.offset = (u64)-1;
2361 
2362 	while (1) {
2363 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2364 		if (ret < 0)
2365 			goto out;
2366 
2367 		/*
2368 		 * if ret == 0 means we found what we were searching for, which
2369 		 * is weird, but possible, so only screw with path if we didn't
2370 		 * find the key and see if we have stuff that matches
2371 		 */
2372 		if (ret > 0) {
2373 			ret = 0;
2374 			if (path->slots[0] == 0)
2375 				break;
2376 			path->slots[0]--;
2377 		}
2378 
2379 		/* pull out the item */
2380 		leaf = path->nodes[0];
2381 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2382 
2383 		/* make sure the item matches what we want */
2384 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
2385 			break;
2386 		if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
2387 			break;
2388 
2389 		/* release the path since we're done with it */
2390 		btrfs_release_path(path);
2391 
2392 		/*
2393 		 * this is where we are basically btrfs_lookup, without the
2394 		 * crossing root thing.  we store the inode number in the
2395 		 * offset of the orphan item.
2396 		 */
2397 
2398 		if (found_key.offset == last_objectid) {
2399 			printk(KERN_ERR "btrfs: Error removing orphan entry, "
2400 			       "stopping orphan cleanup\n");
2401 			ret = -EINVAL;
2402 			goto out;
2403 		}
2404 
2405 		last_objectid = found_key.offset;
2406 
2407 		found_key.objectid = found_key.offset;
2408 		found_key.type = BTRFS_INODE_ITEM_KEY;
2409 		found_key.offset = 0;
2410 		inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
2411 		ret = PTR_RET(inode);
2412 		if (ret && ret != -ESTALE)
2413 			goto out;
2414 
2415 		if (ret == -ESTALE && root == root->fs_info->tree_root) {
2416 			struct btrfs_root *dead_root;
2417 			struct btrfs_fs_info *fs_info = root->fs_info;
2418 			int is_dead_root = 0;
2419 
2420 			/*
2421 			 * this is an orphan in the tree root. Currently these
2422 			 * could come from 2 sources:
2423 			 *  a) a snapshot deletion in progress
2424 			 *  b) a free space cache inode
2425 			 * We need to distinguish those two, as the snapshot
2426 			 * orphan must not get deleted.
2427 			 * find_dead_roots already ran before us, so if this
2428 			 * is a snapshot deletion, we should find the root
2429 			 * in the dead_roots list
2430 			 */
2431 			spin_lock(&fs_info->trans_lock);
2432 			list_for_each_entry(dead_root, &fs_info->dead_roots,
2433 					    root_list) {
2434 				if (dead_root->root_key.objectid ==
2435 				    found_key.objectid) {
2436 					is_dead_root = 1;
2437 					break;
2438 				}
2439 			}
2440 			spin_unlock(&fs_info->trans_lock);
2441 			if (is_dead_root) {
2442 				/* prevent this orphan from being found again */
2443 				key.offset = found_key.objectid - 1;
2444 				continue;
2445 			}
2446 		}
2447 		/*
2448 		 * Inode is already gone but the orphan item is still there,
2449 		 * kill the orphan item.
2450 		 */
2451 		if (ret == -ESTALE) {
2452 			trans = btrfs_start_transaction(root, 1);
2453 			if (IS_ERR(trans)) {
2454 				ret = PTR_ERR(trans);
2455 				goto out;
2456 			}
2457 			printk(KERN_ERR "auto deleting %Lu\n",
2458 			       found_key.objectid);
2459 			ret = btrfs_del_orphan_item(trans, root,
2460 						    found_key.objectid);
2461 			BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
2462 			btrfs_end_transaction(trans, root);
2463 			continue;
2464 		}
2465 
2466 		/*
2467 		 * add this inode to the orphan list so btrfs_orphan_del does
2468 		 * the proper thing when we hit it
2469 		 */
2470 		set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2471 			&BTRFS_I(inode)->runtime_flags);
2472 
2473 		/* if we have links, this was a truncate, lets do that */
2474 		if (inode->i_nlink) {
2475 			if (!S_ISREG(inode->i_mode)) {
2476 				WARN_ON(1);
2477 				iput(inode);
2478 				continue;
2479 			}
2480 			nr_truncate++;
2481 
2482 			/* 1 for the orphan item deletion. */
2483 			trans = btrfs_start_transaction(root, 1);
2484 			if (IS_ERR(trans)) {
2485 				ret = PTR_ERR(trans);
2486 				goto out;
2487 			}
2488 			ret = btrfs_orphan_add(trans, inode);
2489 			btrfs_end_transaction(trans, root);
2490 			if (ret)
2491 				goto out;
2492 
2493 			ret = btrfs_truncate(inode);
2494 		} else {
2495 			nr_unlink++;
2496 		}
2497 
2498 		/* this will do delete_inode and everything for us */
2499 		iput(inode);
2500 		if (ret)
2501 			goto out;
2502 	}
2503 	/* release the path since we're done with it */
2504 	btrfs_release_path(path);
2505 
2506 	root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
2507 
2508 	if (root->orphan_block_rsv)
2509 		btrfs_block_rsv_release(root, root->orphan_block_rsv,
2510 					(u64)-1);
2511 
2512 	if (root->orphan_block_rsv || root->orphan_item_inserted) {
2513 		trans = btrfs_join_transaction(root);
2514 		if (!IS_ERR(trans))
2515 			btrfs_end_transaction(trans, root);
2516 	}
2517 
2518 	if (nr_unlink)
2519 		printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2520 	if (nr_truncate)
2521 		printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2522 
2523 out:
2524 	if (ret)
2525 		printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret);
2526 	btrfs_free_path(path);
2527 	return ret;
2528 }
2529 
2530 /*
2531  * very simple check to peek ahead in the leaf looking for xattrs.  If we
2532  * don't find any xattrs, we know there can't be any acls.
2533  *
2534  * slot is the slot the inode is in, objectid is the objectid of the inode
2535  */
2536 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2537 					  int slot, u64 objectid)
2538 {
2539 	u32 nritems = btrfs_header_nritems(leaf);
2540 	struct btrfs_key found_key;
2541 	int scanned = 0;
2542 
2543 	slot++;
2544 	while (slot < nritems) {
2545 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
2546 
2547 		/* we found a different objectid, there must not be acls */
2548 		if (found_key.objectid != objectid)
2549 			return 0;
2550 
2551 		/* we found an xattr, assume we've got an acl */
2552 		if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2553 			return 1;
2554 
2555 		/*
2556 		 * we found a key greater than an xattr key, there can't
2557 		 * be any acls later on
2558 		 */
2559 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2560 			return 0;
2561 
2562 		slot++;
2563 		scanned++;
2564 
2565 		/*
2566 		 * it goes inode, inode backrefs, xattrs, extents,
2567 		 * so if there are a ton of hard links to an inode there can
2568 		 * be a lot of backrefs.  Don't waste time searching too hard,
2569 		 * this is just an optimization
2570 		 */
2571 		if (scanned >= 8)
2572 			break;
2573 	}
2574 	/* we hit the end of the leaf before we found an xattr or
2575 	 * something larger than an xattr.  We have to assume the inode
2576 	 * has acls
2577 	 */
2578 	return 1;
2579 }
2580 
2581 /*
2582  * read an inode from the btree into the in-memory inode
2583  */
2584 static void btrfs_read_locked_inode(struct inode *inode)
2585 {
2586 	struct btrfs_path *path;
2587 	struct extent_buffer *leaf;
2588 	struct btrfs_inode_item *inode_item;
2589 	struct btrfs_timespec *tspec;
2590 	struct btrfs_root *root = BTRFS_I(inode)->root;
2591 	struct btrfs_key location;
2592 	int maybe_acls;
2593 	u32 rdev;
2594 	int ret;
2595 	bool filled = false;
2596 
2597 	ret = btrfs_fill_inode(inode, &rdev);
2598 	if (!ret)
2599 		filled = true;
2600 
2601 	path = btrfs_alloc_path();
2602 	if (!path)
2603 		goto make_bad;
2604 
2605 	path->leave_spinning = 1;
2606 	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2607 
2608 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2609 	if (ret)
2610 		goto make_bad;
2611 
2612 	leaf = path->nodes[0];
2613 
2614 	if (filled)
2615 		goto cache_acl;
2616 
2617 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
2618 				    struct btrfs_inode_item);
2619 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2620 	set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
2621 	i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
2622 	i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
2623 	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2624 
2625 	tspec = btrfs_inode_atime(inode_item);
2626 	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2627 	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2628 
2629 	tspec = btrfs_inode_mtime(inode_item);
2630 	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2631 	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2632 
2633 	tspec = btrfs_inode_ctime(inode_item);
2634 	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2635 	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2636 
2637 	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2638 	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2639 	BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
2640 
2641 	/*
2642 	 * If we were modified in the current generation and evicted from memory
2643 	 * and then re-read we need to do a full sync since we don't have any
2644 	 * idea about which extents were modified before we were evicted from
2645 	 * cache.
2646 	 */
2647 	if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
2648 		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2649 			&BTRFS_I(inode)->runtime_flags);
2650 
2651 	inode->i_version = btrfs_inode_sequence(leaf, inode_item);
2652 	inode->i_generation = BTRFS_I(inode)->generation;
2653 	inode->i_rdev = 0;
2654 	rdev = btrfs_inode_rdev(leaf, inode_item);
2655 
2656 	BTRFS_I(inode)->index_cnt = (u64)-1;
2657 	BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2658 cache_acl:
2659 	/*
2660 	 * try to precache a NULL acl entry for files that don't have
2661 	 * any xattrs or acls
2662 	 */
2663 	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
2664 					   btrfs_ino(inode));
2665 	if (!maybe_acls)
2666 		cache_no_acl(inode);
2667 
2668 	btrfs_free_path(path);
2669 
2670 	switch (inode->i_mode & S_IFMT) {
2671 	case S_IFREG:
2672 		inode->i_mapping->a_ops = &btrfs_aops;
2673 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2674 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2675 		inode->i_fop = &btrfs_file_operations;
2676 		inode->i_op = &btrfs_file_inode_operations;
2677 		break;
2678 	case S_IFDIR:
2679 		inode->i_fop = &btrfs_dir_file_operations;
2680 		if (root == root->fs_info->tree_root)
2681 			inode->i_op = &btrfs_dir_ro_inode_operations;
2682 		else
2683 			inode->i_op = &btrfs_dir_inode_operations;
2684 		break;
2685 	case S_IFLNK:
2686 		inode->i_op = &btrfs_symlink_inode_operations;
2687 		inode->i_mapping->a_ops = &btrfs_symlink_aops;
2688 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2689 		break;
2690 	default:
2691 		inode->i_op = &btrfs_special_inode_operations;
2692 		init_special_inode(inode, inode->i_mode, rdev);
2693 		break;
2694 	}
2695 
2696 	btrfs_update_iflags(inode);
2697 	return;
2698 
2699 make_bad:
2700 	btrfs_free_path(path);
2701 	make_bad_inode(inode);
2702 }
2703 
2704 /*
2705  * given a leaf and an inode, copy the inode fields into the leaf
2706  */
2707 static void fill_inode_item(struct btrfs_trans_handle *trans,
2708 			    struct extent_buffer *leaf,
2709 			    struct btrfs_inode_item *item,
2710 			    struct inode *inode)
2711 {
2712 	btrfs_set_inode_uid(leaf, item, i_uid_read(inode));
2713 	btrfs_set_inode_gid(leaf, item, i_gid_read(inode));
2714 	btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2715 	btrfs_set_inode_mode(leaf, item, inode->i_mode);
2716 	btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2717 
2718 	btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2719 			       inode->i_atime.tv_sec);
2720 	btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2721 				inode->i_atime.tv_nsec);
2722 
2723 	btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2724 			       inode->i_mtime.tv_sec);
2725 	btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2726 				inode->i_mtime.tv_nsec);
2727 
2728 	btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2729 			       inode->i_ctime.tv_sec);
2730 	btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2731 				inode->i_ctime.tv_nsec);
2732 
2733 	btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2734 	btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2735 	btrfs_set_inode_sequence(leaf, item, inode->i_version);
2736 	btrfs_set_inode_transid(leaf, item, trans->transid);
2737 	btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2738 	btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2739 	btrfs_set_inode_block_group(leaf, item, 0);
2740 }
2741 
2742 /*
2743  * copy everything in the in-memory inode into the btree.
2744  */
2745 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
2746 				struct btrfs_root *root, struct inode *inode)
2747 {
2748 	struct btrfs_inode_item *inode_item;
2749 	struct btrfs_path *path;
2750 	struct extent_buffer *leaf;
2751 	int ret;
2752 
2753 	path = btrfs_alloc_path();
2754 	if (!path)
2755 		return -ENOMEM;
2756 
2757 	path->leave_spinning = 1;
2758 	ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
2759 				 1);
2760 	if (ret) {
2761 		if (ret > 0)
2762 			ret = -ENOENT;
2763 		goto failed;
2764 	}
2765 
2766 	btrfs_unlock_up_safe(path, 1);
2767 	leaf = path->nodes[0];
2768 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
2769 				    struct btrfs_inode_item);
2770 
2771 	fill_inode_item(trans, leaf, inode_item, inode);
2772 	btrfs_mark_buffer_dirty(leaf);
2773 	btrfs_set_inode_last_trans(trans, inode);
2774 	ret = 0;
2775 failed:
2776 	btrfs_free_path(path);
2777 	return ret;
2778 }
2779 
2780 /*
2781  * copy everything in the in-memory inode into the btree.
2782  */
2783 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2784 				struct btrfs_root *root, struct inode *inode)
2785 {
2786 	int ret;
2787 
2788 	/*
2789 	 * If the inode is a free space inode, we can deadlock during commit
2790 	 * if we put it into the delayed code.
2791 	 *
2792 	 * The data relocation inode should also be directly updated
2793 	 * without delay
2794 	 */
2795 	if (!btrfs_is_free_space_inode(inode)
2796 	    && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
2797 		btrfs_update_root_times(trans, root);
2798 
2799 		ret = btrfs_delayed_update_inode(trans, root, inode);
2800 		if (!ret)
2801 			btrfs_set_inode_last_trans(trans, inode);
2802 		return ret;
2803 	}
2804 
2805 	return btrfs_update_inode_item(trans, root, inode);
2806 }
2807 
2808 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
2809 					 struct btrfs_root *root,
2810 					 struct inode *inode)
2811 {
2812 	int ret;
2813 
2814 	ret = btrfs_update_inode(trans, root, inode);
2815 	if (ret == -ENOSPC)
2816 		return btrfs_update_inode_item(trans, root, inode);
2817 	return ret;
2818 }
2819 
2820 /*
2821  * unlink helper that gets used here in inode.c and in the tree logging
2822  * recovery code.  It remove a link in a directory with a given name, and
2823  * also drops the back refs in the inode to the directory
2824  */
2825 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2826 				struct btrfs_root *root,
2827 				struct inode *dir, struct inode *inode,
2828 				const char *name, int name_len)
2829 {
2830 	struct btrfs_path *path;
2831 	int ret = 0;
2832 	struct extent_buffer *leaf;
2833 	struct btrfs_dir_item *di;
2834 	struct btrfs_key key;
2835 	u64 index;
2836 	u64 ino = btrfs_ino(inode);
2837 	u64 dir_ino = btrfs_ino(dir);
2838 
2839 	path = btrfs_alloc_path();
2840 	if (!path) {
2841 		ret = -ENOMEM;
2842 		goto out;
2843 	}
2844 
2845 	path->leave_spinning = 1;
2846 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2847 				    name, name_len, -1);
2848 	if (IS_ERR(di)) {
2849 		ret = PTR_ERR(di);
2850 		goto err;
2851 	}
2852 	if (!di) {
2853 		ret = -ENOENT;
2854 		goto err;
2855 	}
2856 	leaf = path->nodes[0];
2857 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
2858 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2859 	if (ret)
2860 		goto err;
2861 	btrfs_release_path(path);
2862 
2863 	ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
2864 				  dir_ino, &index);
2865 	if (ret) {
2866 		printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2867 		       "inode %llu parent %llu\n", name_len, name,
2868 		       (unsigned long long)ino, (unsigned long long)dir_ino);
2869 		btrfs_abort_transaction(trans, root, ret);
2870 		goto err;
2871 	}
2872 
2873 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
2874 	if (ret) {
2875 		btrfs_abort_transaction(trans, root, ret);
2876 		goto err;
2877 	}
2878 
2879 	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2880 					 inode, dir_ino);
2881 	if (ret != 0 && ret != -ENOENT) {
2882 		btrfs_abort_transaction(trans, root, ret);
2883 		goto err;
2884 	}
2885 
2886 	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2887 					   dir, index);
2888 	if (ret == -ENOENT)
2889 		ret = 0;
2890 err:
2891 	btrfs_free_path(path);
2892 	if (ret)
2893 		goto out;
2894 
2895 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2896 	inode_inc_iversion(inode);
2897 	inode_inc_iversion(dir);
2898 	inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2899 	ret = btrfs_update_inode(trans, root, dir);
2900 out:
2901 	return ret;
2902 }
2903 
2904 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2905 		       struct btrfs_root *root,
2906 		       struct inode *dir, struct inode *inode,
2907 		       const char *name, int name_len)
2908 {
2909 	int ret;
2910 	ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
2911 	if (!ret) {
2912 		btrfs_drop_nlink(inode);
2913 		ret = btrfs_update_inode(trans, root, inode);
2914 	}
2915 	return ret;
2916 }
2917 
2918 
2919 /* helper to check if there is any shared block in the path */
2920 static int check_path_shared(struct btrfs_root *root,
2921 			     struct btrfs_path *path)
2922 {
2923 	struct extent_buffer *eb;
2924 	int level;
2925 	u64 refs = 1;
2926 
2927 	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2928 		int ret;
2929 
2930 		if (!path->nodes[level])
2931 			break;
2932 		eb = path->nodes[level];
2933 		if (!btrfs_block_can_be_shared(root, eb))
2934 			continue;
2935 		ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len,
2936 					       &refs, NULL);
2937 		if (refs > 1)
2938 			return 1;
2939 	}
2940 	return 0;
2941 }
2942 
2943 /*
2944  * helper to start transaction for unlink and rmdir.
2945  *
2946  * unlink and rmdir are special in btrfs, they do not always free space.
2947  * so in enospc case, we should make sure they will free space before
2948  * allowing them to use the global metadata reservation.
2949  */
2950 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2951 						       struct dentry *dentry)
2952 {
2953 	struct btrfs_trans_handle *trans;
2954 	struct btrfs_root *root = BTRFS_I(dir)->root;
2955 	struct btrfs_path *path;
2956 	struct btrfs_dir_item *di;
2957 	struct inode *inode = dentry->d_inode;
2958 	u64 index;
2959 	int check_link = 1;
2960 	int err = -ENOSPC;
2961 	int ret;
2962 	u64 ino = btrfs_ino(inode);
2963 	u64 dir_ino = btrfs_ino(dir);
2964 
2965 	/*
2966 	 * 1 for the possible orphan item
2967 	 * 1 for the dir item
2968 	 * 1 for the dir index
2969 	 * 1 for the inode ref
2970 	 * 1 for the inode ref in the tree log
2971 	 * 2 for the dir entries in the log
2972 	 * 1 for the inode
2973 	 */
2974 	trans = btrfs_start_transaction(root, 8);
2975 	if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
2976 		return trans;
2977 
2978 	if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
2979 		return ERR_PTR(-ENOSPC);
2980 
2981 	/* check if there is someone else holds reference */
2982 	if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
2983 		return ERR_PTR(-ENOSPC);
2984 
2985 	if (atomic_read(&inode->i_count) > 2)
2986 		return ERR_PTR(-ENOSPC);
2987 
2988 	if (xchg(&root->fs_info->enospc_unlink, 1))
2989 		return ERR_PTR(-ENOSPC);
2990 
2991 	path = btrfs_alloc_path();
2992 	if (!path) {
2993 		root->fs_info->enospc_unlink = 0;
2994 		return ERR_PTR(-ENOMEM);
2995 	}
2996 
2997 	/* 1 for the orphan item */
2998 	trans = btrfs_start_transaction(root, 1);
2999 	if (IS_ERR(trans)) {
3000 		btrfs_free_path(path);
3001 		root->fs_info->enospc_unlink = 0;
3002 		return trans;
3003 	}
3004 
3005 	path->skip_locking = 1;
3006 	path->search_commit_root = 1;
3007 
3008 	ret = btrfs_lookup_inode(trans, root, path,
3009 				&BTRFS_I(dir)->location, 0);
3010 	if (ret < 0) {
3011 		err = ret;
3012 		goto out;
3013 	}
3014 	if (ret == 0) {
3015 		if (check_path_shared(root, path))
3016 			goto out;
3017 	} else {
3018 		check_link = 0;
3019 	}
3020 	btrfs_release_path(path);
3021 
3022 	ret = btrfs_lookup_inode(trans, root, path,
3023 				&BTRFS_I(inode)->location, 0);
3024 	if (ret < 0) {
3025 		err = ret;
3026 		goto out;
3027 	}
3028 	if (ret == 0) {
3029 		if (check_path_shared(root, path))
3030 			goto out;
3031 	} else {
3032 		check_link = 0;
3033 	}
3034 	btrfs_release_path(path);
3035 
3036 	if (ret == 0 && S_ISREG(inode->i_mode)) {
3037 		ret = btrfs_lookup_file_extent(trans, root, path,
3038 					       ino, (u64)-1, 0);
3039 		if (ret < 0) {
3040 			err = ret;
3041 			goto out;
3042 		}
3043 		BUG_ON(ret == 0); /* Corruption */
3044 		if (check_path_shared(root, path))
3045 			goto out;
3046 		btrfs_release_path(path);
3047 	}
3048 
3049 	if (!check_link) {
3050 		err = 0;
3051 		goto out;
3052 	}
3053 
3054 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3055 				dentry->d_name.name, dentry->d_name.len, 0);
3056 	if (IS_ERR(di)) {
3057 		err = PTR_ERR(di);
3058 		goto out;
3059 	}
3060 	if (di) {
3061 		if (check_path_shared(root, path))
3062 			goto out;
3063 	} else {
3064 		err = 0;
3065 		goto out;
3066 	}
3067 	btrfs_release_path(path);
3068 
3069 	ret = btrfs_get_inode_ref_index(trans, root, path, dentry->d_name.name,
3070 					dentry->d_name.len, ino, dir_ino, 0,
3071 					&index);
3072 	if (ret) {
3073 		err = ret;
3074 		goto out;
3075 	}
3076 
3077 	if (check_path_shared(root, path))
3078 		goto out;
3079 
3080 	btrfs_release_path(path);
3081 
3082 	/*
3083 	 * This is a commit root search, if we can lookup inode item and other
3084 	 * relative items in the commit root, it means the transaction of
3085 	 * dir/file creation has been committed, and the dir index item that we
3086 	 * delay to insert has also been inserted into the commit root. So
3087 	 * we needn't worry about the delayed insertion of the dir index item
3088 	 * here.
3089 	 */
3090 	di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
3091 				dentry->d_name.name, dentry->d_name.len, 0);
3092 	if (IS_ERR(di)) {
3093 		err = PTR_ERR(di);
3094 		goto out;
3095 	}
3096 	BUG_ON(ret == -ENOENT);
3097 	if (check_path_shared(root, path))
3098 		goto out;
3099 
3100 	err = 0;
3101 out:
3102 	btrfs_free_path(path);
3103 	/* Migrate the orphan reservation over */
3104 	if (!err)
3105 		err = btrfs_block_rsv_migrate(trans->block_rsv,
3106 				&root->fs_info->global_block_rsv,
3107 				trans->bytes_reserved);
3108 
3109 	if (err) {
3110 		btrfs_end_transaction(trans, root);
3111 		root->fs_info->enospc_unlink = 0;
3112 		return ERR_PTR(err);
3113 	}
3114 
3115 	trans->block_rsv = &root->fs_info->global_block_rsv;
3116 	return trans;
3117 }
3118 
3119 static void __unlink_end_trans(struct btrfs_trans_handle *trans,
3120 			       struct btrfs_root *root)
3121 {
3122 	if (trans->block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL) {
3123 		btrfs_block_rsv_release(root, trans->block_rsv,
3124 					trans->bytes_reserved);
3125 		trans->block_rsv = &root->fs_info->trans_block_rsv;
3126 		BUG_ON(!root->fs_info->enospc_unlink);
3127 		root->fs_info->enospc_unlink = 0;
3128 	}
3129 	btrfs_end_transaction(trans, root);
3130 }
3131 
3132 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
3133 {
3134 	struct btrfs_root *root = BTRFS_I(dir)->root;
3135 	struct btrfs_trans_handle *trans;
3136 	struct inode *inode = dentry->d_inode;
3137 	int ret;
3138 
3139 	trans = __unlink_start_trans(dir, dentry);
3140 	if (IS_ERR(trans))
3141 		return PTR_ERR(trans);
3142 
3143 	btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
3144 
3145 	ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3146 				 dentry->d_name.name, dentry->d_name.len);
3147 	if (ret)
3148 		goto out;
3149 
3150 	if (inode->i_nlink == 0) {
3151 		ret = btrfs_orphan_add(trans, inode);
3152 		if (ret)
3153 			goto out;
3154 	}
3155 
3156 out:
3157 	__unlink_end_trans(trans, root);
3158 	btrfs_btree_balance_dirty(root);
3159 	return ret;
3160 }
3161 
3162 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3163 			struct btrfs_root *root,
3164 			struct inode *dir, u64 objectid,
3165 			const char *name, int name_len)
3166 {
3167 	struct btrfs_path *path;
3168 	struct extent_buffer *leaf;
3169 	struct btrfs_dir_item *di;
3170 	struct btrfs_key key;
3171 	u64 index;
3172 	int ret;
3173 	u64 dir_ino = btrfs_ino(dir);
3174 
3175 	path = btrfs_alloc_path();
3176 	if (!path)
3177 		return -ENOMEM;
3178 
3179 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3180 				   name, name_len, -1);
3181 	if (IS_ERR_OR_NULL(di)) {
3182 		if (!di)
3183 			ret = -ENOENT;
3184 		else
3185 			ret = PTR_ERR(di);
3186 		goto out;
3187 	}
3188 
3189 	leaf = path->nodes[0];
3190 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
3191 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
3192 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
3193 	if (ret) {
3194 		btrfs_abort_transaction(trans, root, ret);
3195 		goto out;
3196 	}
3197 	btrfs_release_path(path);
3198 
3199 	ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
3200 				 objectid, root->root_key.objectid,
3201 				 dir_ino, &index, name, name_len);
3202 	if (ret < 0) {
3203 		if (ret != -ENOENT) {
3204 			btrfs_abort_transaction(trans, root, ret);
3205 			goto out;
3206 		}
3207 		di = btrfs_search_dir_index_item(root, path, dir_ino,
3208 						 name, name_len);
3209 		if (IS_ERR_OR_NULL(di)) {
3210 			if (!di)
3211 				ret = -ENOENT;
3212 			else
3213 				ret = PTR_ERR(di);
3214 			btrfs_abort_transaction(trans, root, ret);
3215 			goto out;
3216 		}
3217 
3218 		leaf = path->nodes[0];
3219 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3220 		btrfs_release_path(path);
3221 		index = key.offset;
3222 	}
3223 	btrfs_release_path(path);
3224 
3225 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
3226 	if (ret) {
3227 		btrfs_abort_transaction(trans, root, ret);
3228 		goto out;
3229 	}
3230 
3231 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3232 	inode_inc_iversion(dir);
3233 	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3234 	ret = btrfs_update_inode_fallback(trans, root, dir);
3235 	if (ret)
3236 		btrfs_abort_transaction(trans, root, ret);
3237 out:
3238 	btrfs_free_path(path);
3239 	return ret;
3240 }
3241 
3242 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3243 {
3244 	struct inode *inode = dentry->d_inode;
3245 	int err = 0;
3246 	struct btrfs_root *root = BTRFS_I(dir)->root;
3247 	struct btrfs_trans_handle *trans;
3248 
3249 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
3250 		return -ENOTEMPTY;
3251 	if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
3252 		return -EPERM;
3253 
3254 	trans = __unlink_start_trans(dir, dentry);
3255 	if (IS_ERR(trans))
3256 		return PTR_ERR(trans);
3257 
3258 	if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
3259 		err = btrfs_unlink_subvol(trans, root, dir,
3260 					  BTRFS_I(inode)->location.objectid,
3261 					  dentry->d_name.name,
3262 					  dentry->d_name.len);
3263 		goto out;
3264 	}
3265 
3266 	err = btrfs_orphan_add(trans, inode);
3267 	if (err)
3268 		goto out;
3269 
3270 	/* now the directory is empty */
3271 	err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3272 				 dentry->d_name.name, dentry->d_name.len);
3273 	if (!err)
3274 		btrfs_i_size_write(inode, 0);
3275 out:
3276 	__unlink_end_trans(trans, root);
3277 	btrfs_btree_balance_dirty(root);
3278 
3279 	return err;
3280 }
3281 
3282 /*
3283  * this can truncate away extent items, csum items and directory items.
3284  * It starts at a high offset and removes keys until it can't find
3285  * any higher than new_size
3286  *
3287  * csum items that cross the new i_size are truncated to the new size
3288  * as well.
3289  *
3290  * min_type is the minimum key type to truncate down to.  If set to 0, this
3291  * will kill all the items on this inode, including the INODE_ITEM_KEY.
3292  */
3293 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3294 			       struct btrfs_root *root,
3295 			       struct inode *inode,
3296 			       u64 new_size, u32 min_type)
3297 {
3298 	struct btrfs_path *path;
3299 	struct extent_buffer *leaf;
3300 	struct btrfs_file_extent_item *fi;
3301 	struct btrfs_key key;
3302 	struct btrfs_key found_key;
3303 	u64 extent_start = 0;
3304 	u64 extent_num_bytes = 0;
3305 	u64 extent_offset = 0;
3306 	u64 item_end = 0;
3307 	u64 mask = root->sectorsize - 1;
3308 	u32 found_type = (u8)-1;
3309 	int found_extent;
3310 	int del_item;
3311 	int pending_del_nr = 0;
3312 	int pending_del_slot = 0;
3313 	int extent_type = -1;
3314 	int ret;
3315 	int err = 0;
3316 	u64 ino = btrfs_ino(inode);
3317 
3318 	BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
3319 
3320 	path = btrfs_alloc_path();
3321 	if (!path)
3322 		return -ENOMEM;
3323 	path->reada = -1;
3324 
3325 	/*
3326 	 * We want to drop from the next block forward in case this new size is
3327 	 * not block aligned since we will be keeping the last block of the
3328 	 * extent just the way it is.
3329 	 */
3330 	if (root->ref_cows || root == root->fs_info->tree_root)
3331 		btrfs_drop_extent_cache(inode, (new_size + mask) & (~mask), (u64)-1, 0);
3332 
3333 	/*
3334 	 * This function is also used to drop the items in the log tree before
3335 	 * we relog the inode, so if root != BTRFS_I(inode)->root, it means
3336 	 * it is used to drop the loged items. So we shouldn't kill the delayed
3337 	 * items.
3338 	 */
3339 	if (min_type == 0 && root == BTRFS_I(inode)->root)
3340 		btrfs_kill_delayed_inode_items(inode);
3341 
3342 	key.objectid = ino;
3343 	key.offset = (u64)-1;
3344 	key.type = (u8)-1;
3345 
3346 search_again:
3347 	path->leave_spinning = 1;
3348 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3349 	if (ret < 0) {
3350 		err = ret;
3351 		goto out;
3352 	}
3353 
3354 	if (ret > 0) {
3355 		/* there are no items in the tree for us to truncate, we're
3356 		 * done
3357 		 */
3358 		if (path->slots[0] == 0)
3359 			goto out;
3360 		path->slots[0]--;
3361 	}
3362 
3363 	while (1) {
3364 		fi = NULL;
3365 		leaf = path->nodes[0];
3366 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3367 		found_type = btrfs_key_type(&found_key);
3368 
3369 		if (found_key.objectid != ino)
3370 			break;
3371 
3372 		if (found_type < min_type)
3373 			break;
3374 
3375 		item_end = found_key.offset;
3376 		if (found_type == BTRFS_EXTENT_DATA_KEY) {
3377 			fi = btrfs_item_ptr(leaf, path->slots[0],
3378 					    struct btrfs_file_extent_item);
3379 			extent_type = btrfs_file_extent_type(leaf, fi);
3380 			if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3381 				item_end +=
3382 				    btrfs_file_extent_num_bytes(leaf, fi);
3383 			} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3384 				item_end += btrfs_file_extent_inline_len(leaf,
3385 									 fi);
3386 			}
3387 			item_end--;
3388 		}
3389 		if (found_type > min_type) {
3390 			del_item = 1;
3391 		} else {
3392 			if (item_end < new_size)
3393 				break;
3394 			if (found_key.offset >= new_size)
3395 				del_item = 1;
3396 			else
3397 				del_item = 0;
3398 		}
3399 		found_extent = 0;
3400 		/* FIXME, shrink the extent if the ref count is only 1 */
3401 		if (found_type != BTRFS_EXTENT_DATA_KEY)
3402 			goto delete;
3403 
3404 		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3405 			u64 num_dec;
3406 			extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
3407 			if (!del_item) {
3408 				u64 orig_num_bytes =
3409 					btrfs_file_extent_num_bytes(leaf, fi);
3410 				extent_num_bytes = new_size -
3411 					found_key.offset + root->sectorsize - 1;
3412 				extent_num_bytes = extent_num_bytes &
3413 					~((u64)root->sectorsize - 1);
3414 				btrfs_set_file_extent_num_bytes(leaf, fi,
3415 							 extent_num_bytes);
3416 				num_dec = (orig_num_bytes -
3417 					   extent_num_bytes);
3418 				if (root->ref_cows && extent_start != 0)
3419 					inode_sub_bytes(inode, num_dec);
3420 				btrfs_mark_buffer_dirty(leaf);
3421 			} else {
3422 				extent_num_bytes =
3423 					btrfs_file_extent_disk_num_bytes(leaf,
3424 									 fi);
3425 				extent_offset = found_key.offset -
3426 					btrfs_file_extent_offset(leaf, fi);
3427 
3428 				/* FIXME blocksize != 4096 */
3429 				num_dec = btrfs_file_extent_num_bytes(leaf, fi);
3430 				if (extent_start != 0) {
3431 					found_extent = 1;
3432 					if (root->ref_cows)
3433 						inode_sub_bytes(inode, num_dec);
3434 				}
3435 			}
3436 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3437 			/*
3438 			 * we can't truncate inline items that have had
3439 			 * special encodings
3440 			 */
3441 			if (!del_item &&
3442 			    btrfs_file_extent_compression(leaf, fi) == 0 &&
3443 			    btrfs_file_extent_encryption(leaf, fi) == 0 &&
3444 			    btrfs_file_extent_other_encoding(leaf, fi) == 0) {
3445 				u32 size = new_size - found_key.offset;
3446 
3447 				if (root->ref_cows) {
3448 					inode_sub_bytes(inode, item_end + 1 -
3449 							new_size);
3450 				}
3451 				size =
3452 				    btrfs_file_extent_calc_inline_size(size);
3453 				btrfs_truncate_item(trans, root, path,
3454 						    size, 1);
3455 			} else if (root->ref_cows) {
3456 				inode_sub_bytes(inode, item_end + 1 -
3457 						found_key.offset);
3458 			}
3459 		}
3460 delete:
3461 		if (del_item) {
3462 			if (!pending_del_nr) {
3463 				/* no pending yet, add ourselves */
3464 				pending_del_slot = path->slots[0];
3465 				pending_del_nr = 1;
3466 			} else if (pending_del_nr &&
3467 				   path->slots[0] + 1 == pending_del_slot) {
3468 				/* hop on the pending chunk */
3469 				pending_del_nr++;
3470 				pending_del_slot = path->slots[0];
3471 			} else {
3472 				BUG();
3473 			}
3474 		} else {
3475 			break;
3476 		}
3477 		if (found_extent && (root->ref_cows ||
3478 				     root == root->fs_info->tree_root)) {
3479 			btrfs_set_path_blocking(path);
3480 			ret = btrfs_free_extent(trans, root, extent_start,
3481 						extent_num_bytes, 0,
3482 						btrfs_header_owner(leaf),
3483 						ino, extent_offset, 0);
3484 			BUG_ON(ret);
3485 		}
3486 
3487 		if (found_type == BTRFS_INODE_ITEM_KEY)
3488 			break;
3489 
3490 		if (path->slots[0] == 0 ||
3491 		    path->slots[0] != pending_del_slot) {
3492 			if (pending_del_nr) {
3493 				ret = btrfs_del_items(trans, root, path,
3494 						pending_del_slot,
3495 						pending_del_nr);
3496 				if (ret) {
3497 					btrfs_abort_transaction(trans,
3498 								root, ret);
3499 					goto error;
3500 				}
3501 				pending_del_nr = 0;
3502 			}
3503 			btrfs_release_path(path);
3504 			goto search_again;
3505 		} else {
3506 			path->slots[0]--;
3507 		}
3508 	}
3509 out:
3510 	if (pending_del_nr) {
3511 		ret = btrfs_del_items(trans, root, path, pending_del_slot,
3512 				      pending_del_nr);
3513 		if (ret)
3514 			btrfs_abort_transaction(trans, root, ret);
3515 	}
3516 error:
3517 	btrfs_free_path(path);
3518 	return err;
3519 }
3520 
3521 /*
3522  * btrfs_truncate_page - read, zero a chunk and write a page
3523  * @inode - inode that we're zeroing
3524  * @from - the offset to start zeroing
3525  * @len - the length to zero, 0 to zero the entire range respective to the
3526  *	offset
3527  * @front - zero up to the offset instead of from the offset on
3528  *
3529  * This will find the page for the "from" offset and cow the page and zero the
3530  * part we want to zero.  This is used with truncate and hole punching.
3531  */
3532 int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
3533 			int front)
3534 {
3535 	struct address_space *mapping = inode->i_mapping;
3536 	struct btrfs_root *root = BTRFS_I(inode)->root;
3537 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3538 	struct btrfs_ordered_extent *ordered;
3539 	struct extent_state *cached_state = NULL;
3540 	char *kaddr;
3541 	u32 blocksize = root->sectorsize;
3542 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
3543 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
3544 	struct page *page;
3545 	gfp_t mask = btrfs_alloc_write_mask(mapping);
3546 	int ret = 0;
3547 	u64 page_start;
3548 	u64 page_end;
3549 
3550 	if ((offset & (blocksize - 1)) == 0 &&
3551 	    (!len || ((len & (blocksize - 1)) == 0)))
3552 		goto out;
3553 	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
3554 	if (ret)
3555 		goto out;
3556 
3557 again:
3558 	page = find_or_create_page(mapping, index, mask);
3559 	if (!page) {
3560 		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
3561 		ret = -ENOMEM;
3562 		goto out;
3563 	}
3564 
3565 	page_start = page_offset(page);
3566 	page_end = page_start + PAGE_CACHE_SIZE - 1;
3567 
3568 	if (!PageUptodate(page)) {
3569 		ret = btrfs_readpage(NULL, page);
3570 		lock_page(page);
3571 		if (page->mapping != mapping) {
3572 			unlock_page(page);
3573 			page_cache_release(page);
3574 			goto again;
3575 		}
3576 		if (!PageUptodate(page)) {
3577 			ret = -EIO;
3578 			goto out_unlock;
3579 		}
3580 	}
3581 	wait_on_page_writeback(page);
3582 
3583 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
3584 	set_page_extent_mapped(page);
3585 
3586 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
3587 	if (ordered) {
3588 		unlock_extent_cached(io_tree, page_start, page_end,
3589 				     &cached_state, GFP_NOFS);
3590 		unlock_page(page);
3591 		page_cache_release(page);
3592 		btrfs_start_ordered_extent(inode, ordered, 1);
3593 		btrfs_put_ordered_extent(ordered);
3594 		goto again;
3595 	}
3596 
3597 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
3598 			  EXTENT_DIRTY | EXTENT_DELALLOC |
3599 			  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
3600 			  0, 0, &cached_state, GFP_NOFS);
3601 
3602 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
3603 					&cached_state);
3604 	if (ret) {
3605 		unlock_extent_cached(io_tree, page_start, page_end,
3606 				     &cached_state, GFP_NOFS);
3607 		goto out_unlock;
3608 	}
3609 
3610 	if (offset != PAGE_CACHE_SIZE) {
3611 		if (!len)
3612 			len = PAGE_CACHE_SIZE - offset;
3613 		kaddr = kmap(page);
3614 		if (front)
3615 			memset(kaddr, 0, offset);
3616 		else
3617 			memset(kaddr + offset, 0, len);
3618 		flush_dcache_page(page);
3619 		kunmap(page);
3620 	}
3621 	ClearPageChecked(page);
3622 	set_page_dirty(page);
3623 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
3624 			     GFP_NOFS);
3625 
3626 out_unlock:
3627 	if (ret)
3628 		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
3629 	unlock_page(page);
3630 	page_cache_release(page);
3631 out:
3632 	return ret;
3633 }
3634 
3635 /*
3636  * This function puts in dummy file extents for the area we're creating a hole
3637  * for.  So if we are truncating this file to a larger size we need to insert
3638  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
3639  * the range between oldsize and size
3640  */
3641 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3642 {
3643 	struct btrfs_trans_handle *trans;
3644 	struct btrfs_root *root = BTRFS_I(inode)->root;
3645 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3646 	struct extent_map *em = NULL;
3647 	struct extent_state *cached_state = NULL;
3648 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3649 	u64 mask = root->sectorsize - 1;
3650 	u64 hole_start = (oldsize + mask) & ~mask;
3651 	u64 block_end = (size + mask) & ~mask;
3652 	u64 last_byte;
3653 	u64 cur_offset;
3654 	u64 hole_size;
3655 	int err = 0;
3656 
3657 	if (size <= hole_start)
3658 		return 0;
3659 
3660 	while (1) {
3661 		struct btrfs_ordered_extent *ordered;
3662 		btrfs_wait_ordered_range(inode, hole_start,
3663 					 block_end - hole_start);
3664 		lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
3665 				 &cached_state);
3666 		ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3667 		if (!ordered)
3668 			break;
3669 		unlock_extent_cached(io_tree, hole_start, block_end - 1,
3670 				     &cached_state, GFP_NOFS);
3671 		btrfs_put_ordered_extent(ordered);
3672 	}
3673 
3674 	cur_offset = hole_start;
3675 	while (1) {
3676 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
3677 				block_end - cur_offset, 0);
3678 		if (IS_ERR(em)) {
3679 			err = PTR_ERR(em);
3680 			em = NULL;
3681 			break;
3682 		}
3683 		last_byte = min(extent_map_end(em), block_end);
3684 		last_byte = (last_byte + mask) & ~mask;
3685 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3686 			struct extent_map *hole_em;
3687 			hole_size = last_byte - cur_offset;
3688 
3689 			trans = btrfs_start_transaction(root, 3);
3690 			if (IS_ERR(trans)) {
3691 				err = PTR_ERR(trans);
3692 				break;
3693 			}
3694 
3695 			err = btrfs_drop_extents(trans, root, inode,
3696 						 cur_offset,
3697 						 cur_offset + hole_size, 1);
3698 			if (err) {
3699 				btrfs_abort_transaction(trans, root, err);
3700 				btrfs_end_transaction(trans, root);
3701 				break;
3702 			}
3703 
3704 			err = btrfs_insert_file_extent(trans, root,
3705 					btrfs_ino(inode), cur_offset, 0,
3706 					0, hole_size, 0, hole_size,
3707 					0, 0, 0);
3708 			if (err) {
3709 				btrfs_abort_transaction(trans, root, err);
3710 				btrfs_end_transaction(trans, root);
3711 				break;
3712 			}
3713 
3714 			btrfs_drop_extent_cache(inode, cur_offset,
3715 						cur_offset + hole_size - 1, 0);
3716 			hole_em = alloc_extent_map();
3717 			if (!hole_em) {
3718 				set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3719 					&BTRFS_I(inode)->runtime_flags);
3720 				goto next;
3721 			}
3722 			hole_em->start = cur_offset;
3723 			hole_em->len = hole_size;
3724 			hole_em->orig_start = cur_offset;
3725 
3726 			hole_em->block_start = EXTENT_MAP_HOLE;
3727 			hole_em->block_len = 0;
3728 			hole_em->orig_block_len = 0;
3729 			hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
3730 			hole_em->compress_type = BTRFS_COMPRESS_NONE;
3731 			hole_em->generation = trans->transid;
3732 
3733 			while (1) {
3734 				write_lock(&em_tree->lock);
3735 				err = add_extent_mapping(em_tree, hole_em);
3736 				if (!err)
3737 					list_move(&hole_em->list,
3738 						  &em_tree->modified_extents);
3739 				write_unlock(&em_tree->lock);
3740 				if (err != -EEXIST)
3741 					break;
3742 				btrfs_drop_extent_cache(inode, cur_offset,
3743 							cur_offset +
3744 							hole_size - 1, 0);
3745 			}
3746 			free_extent_map(hole_em);
3747 next:
3748 			btrfs_update_inode(trans, root, inode);
3749 			btrfs_end_transaction(trans, root);
3750 		}
3751 		free_extent_map(em);
3752 		em = NULL;
3753 		cur_offset = last_byte;
3754 		if (cur_offset >= block_end)
3755 			break;
3756 	}
3757 
3758 	free_extent_map(em);
3759 	unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
3760 			     GFP_NOFS);
3761 	return err;
3762 }
3763 
3764 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
3765 {
3766 	struct btrfs_root *root = BTRFS_I(inode)->root;
3767 	struct btrfs_trans_handle *trans;
3768 	loff_t oldsize = i_size_read(inode);
3769 	loff_t newsize = attr->ia_size;
3770 	int mask = attr->ia_valid;
3771 	int ret;
3772 
3773 	if (newsize == oldsize)
3774 		return 0;
3775 
3776 	/*
3777 	 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
3778 	 * special case where we need to update the times despite not having
3779 	 * these flags set.  For all other operations the VFS set these flags
3780 	 * explicitly if it wants a timestamp update.
3781 	 */
3782 	if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME))))
3783 		inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
3784 
3785 	if (newsize > oldsize) {
3786 		truncate_pagecache(inode, oldsize, newsize);
3787 		ret = btrfs_cont_expand(inode, oldsize, newsize);
3788 		if (ret)
3789 			return ret;
3790 
3791 		trans = btrfs_start_transaction(root, 1);
3792 		if (IS_ERR(trans))
3793 			return PTR_ERR(trans);
3794 
3795 		i_size_write(inode, newsize);
3796 		btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
3797 		ret = btrfs_update_inode(trans, root, inode);
3798 		btrfs_end_transaction(trans, root);
3799 	} else {
3800 
3801 		/*
3802 		 * We're truncating a file that used to have good data down to
3803 		 * zero. Make sure it gets into the ordered flush list so that
3804 		 * any new writes get down to disk quickly.
3805 		 */
3806 		if (newsize == 0)
3807 			set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
3808 				&BTRFS_I(inode)->runtime_flags);
3809 
3810 		/*
3811 		 * 1 for the orphan item we're going to add
3812 		 * 1 for the orphan item deletion.
3813 		 */
3814 		trans = btrfs_start_transaction(root, 2);
3815 		if (IS_ERR(trans))
3816 			return PTR_ERR(trans);
3817 
3818 		/*
3819 		 * We need to do this in case we fail at _any_ point during the
3820 		 * actual truncate.  Once we do the truncate_setsize we could
3821 		 * invalidate pages which forces any outstanding ordered io to
3822 		 * be instantly completed which will give us extents that need
3823 		 * to be truncated.  If we fail to get an orphan inode down we
3824 		 * could have left over extents that were never meant to live,
3825 		 * so we need to garuntee from this point on that everything
3826 		 * will be consistent.
3827 		 */
3828 		ret = btrfs_orphan_add(trans, inode);
3829 		btrfs_end_transaction(trans, root);
3830 		if (ret)
3831 			return ret;
3832 
3833 		/* we don't support swapfiles, so vmtruncate shouldn't fail */
3834 		truncate_setsize(inode, newsize);
3835 		ret = btrfs_truncate(inode);
3836 		if (ret && inode->i_nlink)
3837 			btrfs_orphan_del(NULL, inode);
3838 	}
3839 
3840 	return ret;
3841 }
3842 
3843 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3844 {
3845 	struct inode *inode = dentry->d_inode;
3846 	struct btrfs_root *root = BTRFS_I(inode)->root;
3847 	int err;
3848 
3849 	if (btrfs_root_readonly(root))
3850 		return -EROFS;
3851 
3852 	err = inode_change_ok(inode, attr);
3853 	if (err)
3854 		return err;
3855 
3856 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3857 		err = btrfs_setsize(inode, attr);
3858 		if (err)
3859 			return err;
3860 	}
3861 
3862 	if (attr->ia_valid) {
3863 		setattr_copy(inode, attr);
3864 		inode_inc_iversion(inode);
3865 		err = btrfs_dirty_inode(inode);
3866 
3867 		if (!err && attr->ia_valid & ATTR_MODE)
3868 			err = btrfs_acl_chmod(inode);
3869 	}
3870 
3871 	return err;
3872 }
3873 
3874 void btrfs_evict_inode(struct inode *inode)
3875 {
3876 	struct btrfs_trans_handle *trans;
3877 	struct btrfs_root *root = BTRFS_I(inode)->root;
3878 	struct btrfs_block_rsv *rsv, *global_rsv;
3879 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
3880 	int ret;
3881 
3882 	trace_btrfs_inode_evict(inode);
3883 
3884 	truncate_inode_pages(&inode->i_data, 0);
3885 	if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
3886 			       btrfs_is_free_space_inode(inode)))
3887 		goto no_delete;
3888 
3889 	if (is_bad_inode(inode)) {
3890 		btrfs_orphan_del(NULL, inode);
3891 		goto no_delete;
3892 	}
3893 	/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
3894 	btrfs_wait_ordered_range(inode, 0, (u64)-1);
3895 
3896 	if (root->fs_info->log_root_recovering) {
3897 		BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3898 				 &BTRFS_I(inode)->runtime_flags));
3899 		goto no_delete;
3900 	}
3901 
3902 	if (inode->i_nlink > 0) {
3903 		BUG_ON(btrfs_root_refs(&root->root_item) != 0);
3904 		goto no_delete;
3905 	}
3906 
3907 	rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
3908 	if (!rsv) {
3909 		btrfs_orphan_del(NULL, inode);
3910 		goto no_delete;
3911 	}
3912 	rsv->size = min_size;
3913 	rsv->failfast = 1;
3914 	global_rsv = &root->fs_info->global_block_rsv;
3915 
3916 	btrfs_i_size_write(inode, 0);
3917 
3918 	/*
3919 	 * This is a bit simpler than btrfs_truncate since we've already
3920 	 * reserved our space for our orphan item in the unlink, so we just
3921 	 * need to reserve some slack space in case we add bytes and update
3922 	 * inode item when doing the truncate.
3923 	 */
3924 	while (1) {
3925 		ret = btrfs_block_rsv_refill(root, rsv, min_size,
3926 					     BTRFS_RESERVE_FLUSH_LIMIT);
3927 
3928 		/*
3929 		 * Try and steal from the global reserve since we will
3930 		 * likely not use this space anyway, we want to try as
3931 		 * hard as possible to get this to work.
3932 		 */
3933 		if (ret)
3934 			ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);
3935 
3936 		if (ret) {
3937 			printk(KERN_WARNING "Could not get space for a "
3938 			       "delete, will truncate on mount %d\n", ret);
3939 			btrfs_orphan_del(NULL, inode);
3940 			btrfs_free_block_rsv(root, rsv);
3941 			goto no_delete;
3942 		}
3943 
3944 		trans = btrfs_start_transaction_lflush(root, 1);
3945 		if (IS_ERR(trans)) {
3946 			btrfs_orphan_del(NULL, inode);
3947 			btrfs_free_block_rsv(root, rsv);
3948 			goto no_delete;
3949 		}
3950 
3951 		trans->block_rsv = rsv;
3952 
3953 		ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
3954 		if (ret != -ENOSPC)
3955 			break;
3956 
3957 		trans->block_rsv = &root->fs_info->trans_block_rsv;
3958 		ret = btrfs_update_inode(trans, root, inode);
3959 		BUG_ON(ret);
3960 
3961 		btrfs_end_transaction(trans, root);
3962 		trans = NULL;
3963 		btrfs_btree_balance_dirty(root);
3964 	}
3965 
3966 	btrfs_free_block_rsv(root, rsv);
3967 
3968 	if (ret == 0) {
3969 		trans->block_rsv = root->orphan_block_rsv;
3970 		ret = btrfs_orphan_del(trans, inode);
3971 		BUG_ON(ret);
3972 	}
3973 
3974 	trans->block_rsv = &root->fs_info->trans_block_rsv;
3975 	if (!(root == root->fs_info->tree_root ||
3976 	      root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
3977 		btrfs_return_ino(root, btrfs_ino(inode));
3978 
3979 	btrfs_end_transaction(trans, root);
3980 	btrfs_btree_balance_dirty(root);
3981 no_delete:
3982 	clear_inode(inode);
3983 	return;
3984 }
3985 
3986 /*
3987  * this returns the key found in the dir entry in the location pointer.
3988  * If no dir entries were found, location->objectid is 0.
3989  */
3990 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3991 			       struct btrfs_key *location)
3992 {
3993 	const char *name = dentry->d_name.name;
3994 	int namelen = dentry->d_name.len;
3995 	struct btrfs_dir_item *di;
3996 	struct btrfs_path *path;
3997 	struct btrfs_root *root = BTRFS_I(dir)->root;
3998 	int ret = 0;
3999 
4000 	path = btrfs_alloc_path();
4001 	if (!path)
4002 		return -ENOMEM;
4003 
4004 	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
4005 				    namelen, 0);
4006 	if (IS_ERR(di))
4007 		ret = PTR_ERR(di);
4008 
4009 	if (IS_ERR_OR_NULL(di))
4010 		goto out_err;
4011 
4012 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
4013 out:
4014 	btrfs_free_path(path);
4015 	return ret;
4016 out_err:
4017 	location->objectid = 0;
4018 	goto out;
4019 }
4020 
4021 /*
4022  * when we hit a tree root in a directory, the btrfs part of the inode
4023  * needs to be changed to reflect the root directory of the tree root.  This
4024  * is kind of like crossing a mount point.
4025  */
4026 static int fixup_tree_root_location(struct btrfs_root *root,
4027 				    struct inode *dir,
4028 				    struct dentry *dentry,
4029 				    struct btrfs_key *location,
4030 				    struct btrfs_root **sub_root)
4031 {
4032 	struct btrfs_path *path;
4033 	struct btrfs_root *new_root;
4034 	struct btrfs_root_ref *ref;
4035 	struct extent_buffer *leaf;
4036 	int ret;
4037 	int err = 0;
4038 
4039 	path = btrfs_alloc_path();
4040 	if (!path) {
4041 		err = -ENOMEM;
4042 		goto out;
4043 	}
4044 
4045 	err = -ENOENT;
4046 	ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
4047 				  BTRFS_I(dir)->root->root_key.objectid,
4048 				  location->objectid);
4049 	if (ret) {
4050 		if (ret < 0)
4051 			err = ret;
4052 		goto out;
4053 	}
4054 
4055 	leaf = path->nodes[0];
4056 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
4057 	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
4058 	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
4059 		goto out;
4060 
4061 	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
4062 				   (unsigned long)(ref + 1),
4063 				   dentry->d_name.len);
4064 	if (ret)
4065 		goto out;
4066 
4067 	btrfs_release_path(path);
4068 
4069 	new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
4070 	if (IS_ERR(new_root)) {
4071 		err = PTR_ERR(new_root);
4072 		goto out;
4073 	}
4074 
4075 	if (btrfs_root_refs(&new_root->root_item) == 0) {
4076 		err = -ENOENT;
4077 		goto out;
4078 	}
4079 
4080 	*sub_root = new_root;
4081 	location->objectid = btrfs_root_dirid(&new_root->root_item);
4082 	location->type = BTRFS_INODE_ITEM_KEY;
4083 	location->offset = 0;
4084 	err = 0;
4085 out:
4086 	btrfs_free_path(path);
4087 	return err;
4088 }
4089 
4090 static void inode_tree_add(struct inode *inode)
4091 {
4092 	struct btrfs_root *root = BTRFS_I(inode)->root;
4093 	struct btrfs_inode *entry;
4094 	struct rb_node **p;
4095 	struct rb_node *parent;
4096 	u64 ino = btrfs_ino(inode);
4097 again:
4098 	p = &root->inode_tree.rb_node;
4099 	parent = NULL;
4100 
4101 	if (inode_unhashed(inode))
4102 		return;
4103 
4104 	spin_lock(&root->inode_lock);
4105 	while (*p) {
4106 		parent = *p;
4107 		entry = rb_entry(parent, struct btrfs_inode, rb_node);
4108 
4109 		if (ino < btrfs_ino(&entry->vfs_inode))
4110 			p = &parent->rb_left;
4111 		else if (ino > btrfs_ino(&entry->vfs_inode))
4112 			p = &parent->rb_right;
4113 		else {
4114 			WARN_ON(!(entry->vfs_inode.i_state &
4115 				  (I_WILL_FREE | I_FREEING)));
4116 			rb_erase(parent, &root->inode_tree);
4117 			RB_CLEAR_NODE(parent);
4118 			spin_unlock(&root->inode_lock);
4119 			goto again;
4120 		}
4121 	}
4122 	rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
4123 	rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
4124 	spin_unlock(&root->inode_lock);
4125 }
4126 
4127 static void inode_tree_del(struct inode *inode)
4128 {
4129 	struct btrfs_root *root = BTRFS_I(inode)->root;
4130 	int empty = 0;
4131 
4132 	spin_lock(&root->inode_lock);
4133 	if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
4134 		rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
4135 		RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
4136 		empty = RB_EMPTY_ROOT(&root->inode_tree);
4137 	}
4138 	spin_unlock(&root->inode_lock);
4139 
4140 	/*
4141 	 * Free space cache has inodes in the tree root, but the tree root has a
4142 	 * root_refs of 0, so this could end up dropping the tree root as a
4143 	 * snapshot, so we need the extra !root->fs_info->tree_root check to
4144 	 * make sure we don't drop it.
4145 	 */
4146 	if (empty && btrfs_root_refs(&root->root_item) == 0 &&
4147 	    root != root->fs_info->tree_root) {
4148 		synchronize_srcu(&root->fs_info->subvol_srcu);
4149 		spin_lock(&root->inode_lock);
4150 		empty = RB_EMPTY_ROOT(&root->inode_tree);
4151 		spin_unlock(&root->inode_lock);
4152 		if (empty)
4153 			btrfs_add_dead_root(root);
4154 	}
4155 }
4156 
4157 void btrfs_invalidate_inodes(struct btrfs_root *root)
4158 {
4159 	struct rb_node *node;
4160 	struct rb_node *prev;
4161 	struct btrfs_inode *entry;
4162 	struct inode *inode;
4163 	u64 objectid = 0;
4164 
4165 	WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4166 
4167 	spin_lock(&root->inode_lock);
4168 again:
4169 	node = root->inode_tree.rb_node;
4170 	prev = NULL;
4171 	while (node) {
4172 		prev = node;
4173 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4174 
4175 		if (objectid < btrfs_ino(&entry->vfs_inode))
4176 			node = node->rb_left;
4177 		else if (objectid > btrfs_ino(&entry->vfs_inode))
4178 			node = node->rb_right;
4179 		else
4180 			break;
4181 	}
4182 	if (!node) {
4183 		while (prev) {
4184 			entry = rb_entry(prev, struct btrfs_inode, rb_node);
4185 			if (objectid <= btrfs_ino(&entry->vfs_inode)) {
4186 				node = prev;
4187 				break;
4188 			}
4189 			prev = rb_next(prev);
4190 		}
4191 	}
4192 	while (node) {
4193 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4194 		objectid = btrfs_ino(&entry->vfs_inode) + 1;
4195 		inode = igrab(&entry->vfs_inode);
4196 		if (inode) {
4197 			spin_unlock(&root->inode_lock);
4198 			if (atomic_read(&inode->i_count) > 1)
4199 				d_prune_aliases(inode);
4200 			/*
4201 			 * btrfs_drop_inode will have it removed from
4202 			 * the inode cache when its usage count
4203 			 * hits zero.
4204 			 */
4205 			iput(inode);
4206 			cond_resched();
4207 			spin_lock(&root->inode_lock);
4208 			goto again;
4209 		}
4210 
4211 		if (cond_resched_lock(&root->inode_lock))
4212 			goto again;
4213 
4214 		node = rb_next(node);
4215 	}
4216 	spin_unlock(&root->inode_lock);
4217 }
4218 
4219 static int btrfs_init_locked_inode(struct inode *inode, void *p)
4220 {
4221 	struct btrfs_iget_args *args = p;
4222 	inode->i_ino = args->ino;
4223 	BTRFS_I(inode)->root = args->root;
4224 	return 0;
4225 }
4226 
4227 static int btrfs_find_actor(struct inode *inode, void *opaque)
4228 {
4229 	struct btrfs_iget_args *args = opaque;
4230 	return args->ino == btrfs_ino(inode) &&
4231 		args->root == BTRFS_I(inode)->root;
4232 }
4233 
4234 static struct inode *btrfs_iget_locked(struct super_block *s,
4235 				       u64 objectid,
4236 				       struct btrfs_root *root)
4237 {
4238 	struct inode *inode;
4239 	struct btrfs_iget_args args;
4240 	args.ino = objectid;
4241 	args.root = root;
4242 
4243 	inode = iget5_locked(s, objectid, btrfs_find_actor,
4244 			     btrfs_init_locked_inode,
4245 			     (void *)&args);
4246 	return inode;
4247 }
4248 
4249 /* Get an inode object given its location and corresponding root.
4250  * Returns in *is_new if the inode was read from disk
4251  */
4252 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
4253 			 struct btrfs_root *root, int *new)
4254 {
4255 	struct inode *inode;
4256 
4257 	inode = btrfs_iget_locked(s, location->objectid, root);
4258 	if (!inode)
4259 		return ERR_PTR(-ENOMEM);
4260 
4261 	if (inode->i_state & I_NEW) {
4262 		BTRFS_I(inode)->root = root;
4263 		memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
4264 		btrfs_read_locked_inode(inode);
4265 		if (!is_bad_inode(inode)) {
4266 			inode_tree_add(inode);
4267 			unlock_new_inode(inode);
4268 			if (new)
4269 				*new = 1;
4270 		} else {
4271 			unlock_new_inode(inode);
4272 			iput(inode);
4273 			inode = ERR_PTR(-ESTALE);
4274 		}
4275 	}
4276 
4277 	return inode;
4278 }
4279 
4280 static struct inode *new_simple_dir(struct super_block *s,
4281 				    struct btrfs_key *key,
4282 				    struct btrfs_root *root)
4283 {
4284 	struct inode *inode = new_inode(s);
4285 
4286 	if (!inode)
4287 		return ERR_PTR(-ENOMEM);
4288 
4289 	BTRFS_I(inode)->root = root;
4290 	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
4291 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
4292 
4293 	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
4294 	inode->i_op = &btrfs_dir_ro_inode_operations;
4295 	inode->i_fop = &simple_dir_operations;
4296 	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
4297 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4298 
4299 	return inode;
4300 }
4301 
4302 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
4303 {
4304 	struct inode *inode;
4305 	struct btrfs_root *root = BTRFS_I(dir)->root;
4306 	struct btrfs_root *sub_root = root;
4307 	struct btrfs_key location;
4308 	int index;
4309 	int ret = 0;
4310 
4311 	if (dentry->d_name.len > BTRFS_NAME_LEN)
4312 		return ERR_PTR(-ENAMETOOLONG);
4313 
4314 	ret = btrfs_inode_by_name(dir, dentry, &location);
4315 	if (ret < 0)
4316 		return ERR_PTR(ret);
4317 
4318 	if (location.objectid == 0)
4319 		return NULL;
4320 
4321 	if (location.type == BTRFS_INODE_ITEM_KEY) {
4322 		inode = btrfs_iget(dir->i_sb, &location, root, NULL);
4323 		return inode;
4324 	}
4325 
4326 	BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
4327 
4328 	index = srcu_read_lock(&root->fs_info->subvol_srcu);
4329 	ret = fixup_tree_root_location(root, dir, dentry,
4330 				       &location, &sub_root);
4331 	if (ret < 0) {
4332 		if (ret != -ENOENT)
4333 			inode = ERR_PTR(ret);
4334 		else
4335 			inode = new_simple_dir(dir->i_sb, &location, sub_root);
4336 	} else {
4337 		inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
4338 	}
4339 	srcu_read_unlock(&root->fs_info->subvol_srcu, index);
4340 
4341 	if (!IS_ERR(inode) && root != sub_root) {
4342 		down_read(&root->fs_info->cleanup_work_sem);
4343 		if (!(inode->i_sb->s_flags & MS_RDONLY))
4344 			ret = btrfs_orphan_cleanup(sub_root);
4345 		up_read(&root->fs_info->cleanup_work_sem);
4346 		if (ret)
4347 			inode = ERR_PTR(ret);
4348 	}
4349 
4350 	return inode;
4351 }
4352 
4353 static int btrfs_dentry_delete(const struct dentry *dentry)
4354 {
4355 	struct btrfs_root *root;
4356 	struct inode *inode = dentry->d_inode;
4357 
4358 	if (!inode && !IS_ROOT(dentry))
4359 		inode = dentry->d_parent->d_inode;
4360 
4361 	if (inode) {
4362 		root = BTRFS_I(inode)->root;
4363 		if (btrfs_root_refs(&root->root_item) == 0)
4364 			return 1;
4365 
4366 		if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
4367 			return 1;
4368 	}
4369 	return 0;
4370 }
4371 
4372 static void btrfs_dentry_release(struct dentry *dentry)
4373 {
4374 	if (dentry->d_fsdata)
4375 		kfree(dentry->d_fsdata);
4376 }
4377 
4378 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
4379 				   unsigned int flags)
4380 {
4381 	struct dentry *ret;
4382 
4383 	ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
4384 	return ret;
4385 }
4386 
4387 unsigned char btrfs_filetype_table[] = {
4388 	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
4389 };
4390 
4391 static int btrfs_real_readdir(struct file *filp, void *dirent,
4392 			      filldir_t filldir)
4393 {
4394 	struct inode *inode = filp->f_dentry->d_inode;
4395 	struct btrfs_root *root = BTRFS_I(inode)->root;
4396 	struct btrfs_item *item;
4397 	struct btrfs_dir_item *di;
4398 	struct btrfs_key key;
4399 	struct btrfs_key found_key;
4400 	struct btrfs_path *path;
4401 	struct list_head ins_list;
4402 	struct list_head del_list;
4403 	int ret;
4404 	struct extent_buffer *leaf;
4405 	int slot;
4406 	unsigned char d_type;
4407 	int over = 0;
4408 	u32 di_cur;
4409 	u32 di_total;
4410 	u32 di_len;
4411 	int key_type = BTRFS_DIR_INDEX_KEY;
4412 	char tmp_name[32];
4413 	char *name_ptr;
4414 	int name_len;
4415 	int is_curr = 0;	/* filp->f_pos points to the current index? */
4416 
4417 	/* FIXME, use a real flag for deciding about the key type */
4418 	if (root->fs_info->tree_root == root)
4419 		key_type = BTRFS_DIR_ITEM_KEY;
4420 
4421 	/* special case for "." */
4422 	if (filp->f_pos == 0) {
4423 		over = filldir(dirent, ".", 1,
4424 			       filp->f_pos, btrfs_ino(inode), DT_DIR);
4425 		if (over)
4426 			return 0;
4427 		filp->f_pos = 1;
4428 	}
4429 	/* special case for .., just use the back ref */
4430 	if (filp->f_pos == 1) {
4431 		u64 pino = parent_ino(filp->f_path.dentry);
4432 		over = filldir(dirent, "..", 2,
4433 			       filp->f_pos, pino, DT_DIR);
4434 		if (over)
4435 			return 0;
4436 		filp->f_pos = 2;
4437 	}
4438 	path = btrfs_alloc_path();
4439 	if (!path)
4440 		return -ENOMEM;
4441 
4442 	path->reada = 1;
4443 
4444 	if (key_type == BTRFS_DIR_INDEX_KEY) {
4445 		INIT_LIST_HEAD(&ins_list);
4446 		INIT_LIST_HEAD(&del_list);
4447 		btrfs_get_delayed_items(inode, &ins_list, &del_list);
4448 	}
4449 
4450 	btrfs_set_key_type(&key, key_type);
4451 	key.offset = filp->f_pos;
4452 	key.objectid = btrfs_ino(inode);
4453 
4454 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4455 	if (ret < 0)
4456 		goto err;
4457 
4458 	while (1) {
4459 		leaf = path->nodes[0];
4460 		slot = path->slots[0];
4461 		if (slot >= btrfs_header_nritems(leaf)) {
4462 			ret = btrfs_next_leaf(root, path);
4463 			if (ret < 0)
4464 				goto err;
4465 			else if (ret > 0)
4466 				break;
4467 			continue;
4468 		}
4469 
4470 		item = btrfs_item_nr(leaf, slot);
4471 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
4472 
4473 		if (found_key.objectid != key.objectid)
4474 			break;
4475 		if (btrfs_key_type(&found_key) != key_type)
4476 			break;
4477 		if (found_key.offset < filp->f_pos)
4478 			goto next;
4479 		if (key_type == BTRFS_DIR_INDEX_KEY &&
4480 		    btrfs_should_delete_dir_index(&del_list,
4481 						  found_key.offset))
4482 			goto next;
4483 
4484 		filp->f_pos = found_key.offset;
4485 		is_curr = 1;
4486 
4487 		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
4488 		di_cur = 0;
4489 		di_total = btrfs_item_size(leaf, item);
4490 
4491 		while (di_cur < di_total) {
4492 			struct btrfs_key location;
4493 
4494 			if (verify_dir_item(root, leaf, di))
4495 				break;
4496 
4497 			name_len = btrfs_dir_name_len(leaf, di);
4498 			if (name_len <= sizeof(tmp_name)) {
4499 				name_ptr = tmp_name;
4500 			} else {
4501 				name_ptr = kmalloc(name_len, GFP_NOFS);
4502 				if (!name_ptr) {
4503 					ret = -ENOMEM;
4504 					goto err;
4505 				}
4506 			}
4507 			read_extent_buffer(leaf, name_ptr,
4508 					   (unsigned long)(di + 1), name_len);
4509 
4510 			d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
4511 			btrfs_dir_item_key_to_cpu(leaf, di, &location);
4512 
4513 
4514 			/* is this a reference to our own snapshot? If so
4515 			 * skip it.
4516 			 *
4517 			 * In contrast to old kernels, we insert the snapshot's
4518 			 * dir item and dir index after it has been created, so
4519 			 * we won't find a reference to our own snapshot. We
4520 			 * still keep the following code for backward
4521 			 * compatibility.
4522 			 */
4523 			if (location.type == BTRFS_ROOT_ITEM_KEY &&
4524 			    location.objectid == root->root_key.objectid) {
4525 				over = 0;
4526 				goto skip;
4527 			}
4528 			over = filldir(dirent, name_ptr, name_len,
4529 				       found_key.offset, location.objectid,
4530 				       d_type);
4531 
4532 skip:
4533 			if (name_ptr != tmp_name)
4534 				kfree(name_ptr);
4535 
4536 			if (over)
4537 				goto nopos;
4538 			di_len = btrfs_dir_name_len(leaf, di) +
4539 				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
4540 			di_cur += di_len;
4541 			di = (struct btrfs_dir_item *)((char *)di + di_len);
4542 		}
4543 next:
4544 		path->slots[0]++;
4545 	}
4546 
4547 	if (key_type == BTRFS_DIR_INDEX_KEY) {
4548 		if (is_curr)
4549 			filp->f_pos++;
4550 		ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir,
4551 						      &ins_list);
4552 		if (ret)
4553 			goto nopos;
4554 	}
4555 
4556 	/* Reached end of directory/root. Bump pos past the last item. */
4557 	if (key_type == BTRFS_DIR_INDEX_KEY)
4558 		/*
4559 		 * 32-bit glibc will use getdents64, but then strtol -
4560 		 * so the last number we can serve is this.
4561 		 */
4562 		filp->f_pos = 0x7fffffff;
4563 	else
4564 		filp->f_pos++;
4565 nopos:
4566 	ret = 0;
4567 err:
4568 	if (key_type == BTRFS_DIR_INDEX_KEY)
4569 		btrfs_put_delayed_items(&ins_list, &del_list);
4570 	btrfs_free_path(path);
4571 	return ret;
4572 }
4573 
4574 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
4575 {
4576 	struct btrfs_root *root = BTRFS_I(inode)->root;
4577 	struct btrfs_trans_handle *trans;
4578 	int ret = 0;
4579 	bool nolock = false;
4580 
4581 	if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
4582 		return 0;
4583 
4584 	if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
4585 		nolock = true;
4586 
4587 	if (wbc->sync_mode == WB_SYNC_ALL) {
4588 		if (nolock)
4589 			trans = btrfs_join_transaction_nolock(root);
4590 		else
4591 			trans = btrfs_join_transaction(root);
4592 		if (IS_ERR(trans))
4593 			return PTR_ERR(trans);
4594 		ret = btrfs_commit_transaction(trans, root);
4595 	}
4596 	return ret;
4597 }
4598 
4599 /*
4600  * This is somewhat expensive, updating the tree every time the
4601  * inode changes.  But, it is most likely to find the inode in cache.
4602  * FIXME, needs more benchmarking...there are no reasons other than performance
4603  * to keep or drop this code.
4604  */
4605 int btrfs_dirty_inode(struct inode *inode)
4606 {
4607 	struct btrfs_root *root = BTRFS_I(inode)->root;
4608 	struct btrfs_trans_handle *trans;
4609 	int ret;
4610 
4611 	if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
4612 		return 0;
4613 
4614 	trans = btrfs_join_transaction(root);
4615 	if (IS_ERR(trans))
4616 		return PTR_ERR(trans);
4617 
4618 	ret = btrfs_update_inode(trans, root, inode);
4619 	if (ret && ret == -ENOSPC) {
4620 		/* whoops, lets try again with the full transaction */
4621 		btrfs_end_transaction(trans, root);
4622 		trans = btrfs_start_transaction(root, 1);
4623 		if (IS_ERR(trans))
4624 			return PTR_ERR(trans);
4625 
4626 		ret = btrfs_update_inode(trans, root, inode);
4627 	}
4628 	btrfs_end_transaction(trans, root);
4629 	if (BTRFS_I(inode)->delayed_node)
4630 		btrfs_balance_delayed_items(root);
4631 
4632 	return ret;
4633 }
4634 
4635 /*
4636  * This is a copy of file_update_time.  We need this so we can return error on
4637  * ENOSPC for updating the inode in the case of file write and mmap writes.
4638  */
4639 static int btrfs_update_time(struct inode *inode, struct timespec *now,
4640 			     int flags)
4641 {
4642 	struct btrfs_root *root = BTRFS_I(inode)->root;
4643 
4644 	if (btrfs_root_readonly(root))
4645 		return -EROFS;
4646 
4647 	if (flags & S_VERSION)
4648 		inode_inc_iversion(inode);
4649 	if (flags & S_CTIME)
4650 		inode->i_ctime = *now;
4651 	if (flags & S_MTIME)
4652 		inode->i_mtime = *now;
4653 	if (flags & S_ATIME)
4654 		inode->i_atime = *now;
4655 	return btrfs_dirty_inode(inode);
4656 }
4657 
4658 /*
4659  * find the highest existing sequence number in a directory
4660  * and then set the in-memory index_cnt variable to reflect
4661  * free sequence numbers
4662  */
4663 static int btrfs_set_inode_index_count(struct inode *inode)
4664 {
4665 	struct btrfs_root *root = BTRFS_I(inode)->root;
4666 	struct btrfs_key key, found_key;
4667 	struct btrfs_path *path;
4668 	struct extent_buffer *leaf;
4669 	int ret;
4670 
4671 	key.objectid = btrfs_ino(inode);
4672 	btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
4673 	key.offset = (u64)-1;
4674 
4675 	path = btrfs_alloc_path();
4676 	if (!path)
4677 		return -ENOMEM;
4678 
4679 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4680 	if (ret < 0)
4681 		goto out;
4682 	/* FIXME: we should be able to handle this */
4683 	if (ret == 0)
4684 		goto out;
4685 	ret = 0;
4686 
4687 	/*
4688 	 * MAGIC NUMBER EXPLANATION:
4689 	 * since we search a directory based on f_pos we have to start at 2
4690 	 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
4691 	 * else has to start at 2
4692 	 */
4693 	if (path->slots[0] == 0) {
4694 		BTRFS_I(inode)->index_cnt = 2;
4695 		goto out;
4696 	}
4697 
4698 	path->slots[0]--;
4699 
4700 	leaf = path->nodes[0];
4701 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4702 
4703 	if (found_key.objectid != btrfs_ino(inode) ||
4704 	    btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
4705 		BTRFS_I(inode)->index_cnt = 2;
4706 		goto out;
4707 	}
4708 
4709 	BTRFS_I(inode)->index_cnt = found_key.offset + 1;
4710 out:
4711 	btrfs_free_path(path);
4712 	return ret;
4713 }
4714 
4715 /*
4716  * helper to find a free sequence number in a given directory.  This current
4717  * code is very simple, later versions will do smarter things in the btree
4718  */
4719 int btrfs_set_inode_index(struct inode *dir, u64 *index)
4720 {
4721 	int ret = 0;
4722 
4723 	if (BTRFS_I(dir)->index_cnt == (u64)-1) {
4724 		ret = btrfs_inode_delayed_dir_index_count(dir);
4725 		if (ret) {
4726 			ret = btrfs_set_inode_index_count(dir);
4727 			if (ret)
4728 				return ret;
4729 		}
4730 	}
4731 
4732 	*index = BTRFS_I(dir)->index_cnt;
4733 	BTRFS_I(dir)->index_cnt++;
4734 
4735 	return ret;
4736 }
4737 
4738 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4739 				     struct btrfs_root *root,
4740 				     struct inode *dir,
4741 				     const char *name, int name_len,
4742 				     u64 ref_objectid, u64 objectid,
4743 				     umode_t mode, u64 *index)
4744 {
4745 	struct inode *inode;
4746 	struct btrfs_inode_item *inode_item;
4747 	struct btrfs_key *location;
4748 	struct btrfs_path *path;
4749 	struct btrfs_inode_ref *ref;
4750 	struct btrfs_key key[2];
4751 	u32 sizes[2];
4752 	unsigned long ptr;
4753 	int ret;
4754 	int owner;
4755 
4756 	path = btrfs_alloc_path();
4757 	if (!path)
4758 		return ERR_PTR(-ENOMEM);
4759 
4760 	inode = new_inode(root->fs_info->sb);
4761 	if (!inode) {
4762 		btrfs_free_path(path);
4763 		return ERR_PTR(-ENOMEM);
4764 	}
4765 
4766 	/*
4767 	 * we have to initialize this early, so we can reclaim the inode
4768 	 * number if we fail afterwards in this function.
4769 	 */
4770 	inode->i_ino = objectid;
4771 
4772 	if (dir) {
4773 		trace_btrfs_inode_request(dir);
4774 
4775 		ret = btrfs_set_inode_index(dir, index);
4776 		if (ret) {
4777 			btrfs_free_path(path);
4778 			iput(inode);
4779 			return ERR_PTR(ret);
4780 		}
4781 	}
4782 	/*
4783 	 * index_cnt is ignored for everything but a dir,
4784 	 * btrfs_get_inode_index_count has an explanation for the magic
4785 	 * number
4786 	 */
4787 	BTRFS_I(inode)->index_cnt = 2;
4788 	BTRFS_I(inode)->root = root;
4789 	BTRFS_I(inode)->generation = trans->transid;
4790 	inode->i_generation = BTRFS_I(inode)->generation;
4791 
4792 	/*
4793 	 * We could have gotten an inode number from somebody who was fsynced
4794 	 * and then removed in this same transaction, so let's just set full
4795 	 * sync since it will be a full sync anyway and this will blow away the
4796 	 * old info in the log.
4797 	 */
4798 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
4799 
4800 	if (S_ISDIR(mode))
4801 		owner = 0;
4802 	else
4803 		owner = 1;
4804 
4805 	key[0].objectid = objectid;
4806 	btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
4807 	key[0].offset = 0;
4808 
4809 	/*
4810 	 * Start new inodes with an inode_ref. This is slightly more
4811 	 * efficient for small numbers of hard links since they will
4812 	 * be packed into one item. Extended refs will kick in if we
4813 	 * add more hard links than can fit in the ref item.
4814 	 */
4815 	key[1].objectid = objectid;
4816 	btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
4817 	key[1].offset = ref_objectid;
4818 
4819 	sizes[0] = sizeof(struct btrfs_inode_item);
4820 	sizes[1] = name_len + sizeof(*ref);
4821 
4822 	path->leave_spinning = 1;
4823 	ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
4824 	if (ret != 0)
4825 		goto fail;
4826 
4827 	inode_init_owner(inode, dir, mode);
4828 	inode_set_bytes(inode, 0);
4829 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4830 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4831 				  struct btrfs_inode_item);
4832 	memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
4833 			     sizeof(*inode_item));
4834 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
4835 
4836 	ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
4837 			     struct btrfs_inode_ref);
4838 	btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
4839 	btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
4840 	ptr = (unsigned long)(ref + 1);
4841 	write_extent_buffer(path->nodes[0], name, ptr, name_len);
4842 
4843 	btrfs_mark_buffer_dirty(path->nodes[0]);
4844 	btrfs_free_path(path);
4845 
4846 	location = &BTRFS_I(inode)->location;
4847 	location->objectid = objectid;
4848 	location->offset = 0;
4849 	btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
4850 
4851 	btrfs_inherit_iflags(inode, dir);
4852 
4853 	if (S_ISREG(mode)) {
4854 		if (btrfs_test_opt(root, NODATASUM))
4855 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
4856 		if (btrfs_test_opt(root, NODATACOW))
4857 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
4858 	}
4859 
4860 	insert_inode_hash(inode);
4861 	inode_tree_add(inode);
4862 
4863 	trace_btrfs_inode_new(inode);
4864 	btrfs_set_inode_last_trans(trans, inode);
4865 
4866 	btrfs_update_root_times(trans, root);
4867 
4868 	return inode;
4869 fail:
4870 	if (dir)
4871 		BTRFS_I(dir)->index_cnt--;
4872 	btrfs_free_path(path);
4873 	iput(inode);
4874 	return ERR_PTR(ret);
4875 }
4876 
4877 static inline u8 btrfs_inode_type(struct inode *inode)
4878 {
4879 	return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
4880 }
4881 
4882 /*
4883  * utility function to add 'inode' into 'parent_inode' with
4884  * a give name and a given sequence number.
4885  * if 'add_backref' is true, also insert a backref from the
4886  * inode to the parent directory.
4887  */
4888 int btrfs_add_link(struct btrfs_trans_handle *trans,
4889 		   struct inode *parent_inode, struct inode *inode,
4890 		   const char *name, int name_len, int add_backref, u64 index)
4891 {
4892 	int ret = 0;
4893 	struct btrfs_key key;
4894 	struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4895 	u64 ino = btrfs_ino(inode);
4896 	u64 parent_ino = btrfs_ino(parent_inode);
4897 
4898 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4899 		memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
4900 	} else {
4901 		key.objectid = ino;
4902 		btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
4903 		key.offset = 0;
4904 	}
4905 
4906 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4907 		ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
4908 					 key.objectid, root->root_key.objectid,
4909 					 parent_ino, index, name, name_len);
4910 	} else if (add_backref) {
4911 		ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
4912 					     parent_ino, index);
4913 	}
4914 
4915 	/* Nothing to clean up yet */
4916 	if (ret)
4917 		return ret;
4918 
4919 	ret = btrfs_insert_dir_item(trans, root, name, name_len,
4920 				    parent_inode, &key,
4921 				    btrfs_inode_type(inode), index);
4922 	if (ret == -EEXIST || ret == -EOVERFLOW)
4923 		goto fail_dir_item;
4924 	else if (ret) {
4925 		btrfs_abort_transaction(trans, root, ret);
4926 		return ret;
4927 	}
4928 
4929 	btrfs_i_size_write(parent_inode, parent_inode->i_size +
4930 			   name_len * 2);
4931 	inode_inc_iversion(parent_inode);
4932 	parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4933 	ret = btrfs_update_inode(trans, root, parent_inode);
4934 	if (ret)
4935 		btrfs_abort_transaction(trans, root, ret);
4936 	return ret;
4937 
4938 fail_dir_item:
4939 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4940 		u64 local_index;
4941 		int err;
4942 		err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
4943 				 key.objectid, root->root_key.objectid,
4944 				 parent_ino, &local_index, name, name_len);
4945 
4946 	} else if (add_backref) {
4947 		u64 local_index;
4948 		int err;
4949 
4950 		err = btrfs_del_inode_ref(trans, root, name, name_len,
4951 					  ino, parent_ino, &local_index);
4952 	}
4953 	return ret;
4954 }
4955 
4956 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4957 			    struct inode *dir, struct dentry *dentry,
4958 			    struct inode *inode, int backref, u64 index)
4959 {
4960 	int err = btrfs_add_link(trans, dir, inode,
4961 				 dentry->d_name.name, dentry->d_name.len,
4962 				 backref, index);
4963 	if (err > 0)
4964 		err = -EEXIST;
4965 	return err;
4966 }
4967 
4968 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4969 			umode_t mode, dev_t rdev)
4970 {
4971 	struct btrfs_trans_handle *trans;
4972 	struct btrfs_root *root = BTRFS_I(dir)->root;
4973 	struct inode *inode = NULL;
4974 	int err;
4975 	int drop_inode = 0;
4976 	u64 objectid;
4977 	u64 index = 0;
4978 
4979 	if (!new_valid_dev(rdev))
4980 		return -EINVAL;
4981 
4982 	/*
4983 	 * 2 for inode item and ref
4984 	 * 2 for dir items
4985 	 * 1 for xattr if selinux is on
4986 	 */
4987 	trans = btrfs_start_transaction(root, 5);
4988 	if (IS_ERR(trans))
4989 		return PTR_ERR(trans);
4990 
4991 	err = btrfs_find_free_ino(root, &objectid);
4992 	if (err)
4993 		goto out_unlock;
4994 
4995 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4996 				dentry->d_name.len, btrfs_ino(dir), objectid,
4997 				mode, &index);
4998 	if (IS_ERR(inode)) {
4999 		err = PTR_ERR(inode);
5000 		goto out_unlock;
5001 	}
5002 
5003 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
5004 	if (err) {
5005 		drop_inode = 1;
5006 		goto out_unlock;
5007 	}
5008 
5009 	err = btrfs_update_inode(trans, root, inode);
5010 	if (err) {
5011 		drop_inode = 1;
5012 		goto out_unlock;
5013 	}
5014 
5015 	/*
5016 	* If the active LSM wants to access the inode during
5017 	* d_instantiate it needs these. Smack checks to see
5018 	* if the filesystem supports xattrs by looking at the
5019 	* ops vector.
5020 	*/
5021 
5022 	inode->i_op = &btrfs_special_inode_operations;
5023 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
5024 	if (err)
5025 		drop_inode = 1;
5026 	else {
5027 		init_special_inode(inode, inode->i_mode, rdev);
5028 		btrfs_update_inode(trans, root, inode);
5029 		d_instantiate(dentry, inode);
5030 	}
5031 out_unlock:
5032 	btrfs_end_transaction(trans, root);
5033 	btrfs_btree_balance_dirty(root);
5034 	if (drop_inode) {
5035 		inode_dec_link_count(inode);
5036 		iput(inode);
5037 	}
5038 	return err;
5039 }
5040 
5041 static int btrfs_create(struct inode *dir, struct dentry *dentry,
5042 			umode_t mode, bool excl)
5043 {
5044 	struct btrfs_trans_handle *trans;
5045 	struct btrfs_root *root = BTRFS_I(dir)->root;
5046 	struct inode *inode = NULL;
5047 	int drop_inode_on_err = 0;
5048 	int err;
5049 	u64 objectid;
5050 	u64 index = 0;
5051 
5052 	/*
5053 	 * 2 for inode item and ref
5054 	 * 2 for dir items
5055 	 * 1 for xattr if selinux is on
5056 	 */
5057 	trans = btrfs_start_transaction(root, 5);
5058 	if (IS_ERR(trans))
5059 		return PTR_ERR(trans);
5060 
5061 	err = btrfs_find_free_ino(root, &objectid);
5062 	if (err)
5063 		goto out_unlock;
5064 
5065 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5066 				dentry->d_name.len, btrfs_ino(dir), objectid,
5067 				mode, &index);
5068 	if (IS_ERR(inode)) {
5069 		err = PTR_ERR(inode);
5070 		goto out_unlock;
5071 	}
5072 	drop_inode_on_err = 1;
5073 
5074 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
5075 	if (err)
5076 		goto out_unlock;
5077 
5078 	err = btrfs_update_inode(trans, root, inode);
5079 	if (err)
5080 		goto out_unlock;
5081 
5082 	/*
5083 	* If the active LSM wants to access the inode during
5084 	* d_instantiate it needs these. Smack checks to see
5085 	* if the filesystem supports xattrs by looking at the
5086 	* ops vector.
5087 	*/
5088 	inode->i_fop = &btrfs_file_operations;
5089 	inode->i_op = &btrfs_file_inode_operations;
5090 
5091 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
5092 	if (err)
5093 		goto out_unlock;
5094 
5095 	inode->i_mapping->a_ops = &btrfs_aops;
5096 	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5097 	BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
5098 	d_instantiate(dentry, inode);
5099 
5100 out_unlock:
5101 	btrfs_end_transaction(trans, root);
5102 	if (err && drop_inode_on_err) {
5103 		inode_dec_link_count(inode);
5104 		iput(inode);
5105 	}
5106 	btrfs_btree_balance_dirty(root);
5107 	return err;
5108 }
5109 
5110 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
5111 		      struct dentry *dentry)
5112 {
5113 	struct btrfs_trans_handle *trans;
5114 	struct btrfs_root *root = BTRFS_I(dir)->root;
5115 	struct inode *inode = old_dentry->d_inode;
5116 	u64 index;
5117 	int err;
5118 	int drop_inode = 0;
5119 
5120 	/* do not allow sys_link's with other subvols of the same device */
5121 	if (root->objectid != BTRFS_I(inode)->root->objectid)
5122 		return -EXDEV;
5123 
5124 	if (inode->i_nlink >= BTRFS_LINK_MAX)
5125 		return -EMLINK;
5126 
5127 	err = btrfs_set_inode_index(dir, &index);
5128 	if (err)
5129 		goto fail;
5130 
5131 	/*
5132 	 * 2 items for inode and inode ref
5133 	 * 2 items for dir items
5134 	 * 1 item for parent inode
5135 	 */
5136 	trans = btrfs_start_transaction(root, 5);
5137 	if (IS_ERR(trans)) {
5138 		err = PTR_ERR(trans);
5139 		goto fail;
5140 	}
5141 
5142 	btrfs_inc_nlink(inode);
5143 	inode_inc_iversion(inode);
5144 	inode->i_ctime = CURRENT_TIME;
5145 	ihold(inode);
5146 	set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
5147 
5148 	err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
5149 
5150 	if (err) {
5151 		drop_inode = 1;
5152 	} else {
5153 		struct dentry *parent = dentry->d_parent;
5154 		err = btrfs_update_inode(trans, root, inode);
5155 		if (err)
5156 			goto fail;
5157 		d_instantiate(dentry, inode);
5158 		btrfs_log_new_name(trans, inode, NULL, parent);
5159 	}
5160 
5161 	btrfs_end_transaction(trans, root);
5162 fail:
5163 	if (drop_inode) {
5164 		inode_dec_link_count(inode);
5165 		iput(inode);
5166 	}
5167 	btrfs_btree_balance_dirty(root);
5168 	return err;
5169 }
5170 
5171 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
5172 {
5173 	struct inode *inode = NULL;
5174 	struct btrfs_trans_handle *trans;
5175 	struct btrfs_root *root = BTRFS_I(dir)->root;
5176 	int err = 0;
5177 	int drop_on_err = 0;
5178 	u64 objectid = 0;
5179 	u64 index = 0;
5180 
5181 	/*
5182 	 * 2 items for inode and ref
5183 	 * 2 items for dir items
5184 	 * 1 for xattr if selinux is on
5185 	 */
5186 	trans = btrfs_start_transaction(root, 5);
5187 	if (IS_ERR(trans))
5188 		return PTR_ERR(trans);
5189 
5190 	err = btrfs_find_free_ino(root, &objectid);
5191 	if (err)
5192 		goto out_fail;
5193 
5194 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5195 				dentry->d_name.len, btrfs_ino(dir), objectid,
5196 				S_IFDIR | mode, &index);
5197 	if (IS_ERR(inode)) {
5198 		err = PTR_ERR(inode);
5199 		goto out_fail;
5200 	}
5201 
5202 	drop_on_err = 1;
5203 
5204 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
5205 	if (err)
5206 		goto out_fail;
5207 
5208 	inode->i_op = &btrfs_dir_inode_operations;
5209 	inode->i_fop = &btrfs_dir_file_operations;
5210 
5211 	btrfs_i_size_write(inode, 0);
5212 	err = btrfs_update_inode(trans, root, inode);
5213 	if (err)
5214 		goto out_fail;
5215 
5216 	err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
5217 			     dentry->d_name.len, 0, index);
5218 	if (err)
5219 		goto out_fail;
5220 
5221 	d_instantiate(dentry, inode);
5222 	drop_on_err = 0;
5223 
5224 out_fail:
5225 	btrfs_end_transaction(trans, root);
5226 	if (drop_on_err)
5227 		iput(inode);
5228 	btrfs_btree_balance_dirty(root);
5229 	return err;
5230 }
5231 
5232 /* helper for btfs_get_extent.  Given an existing extent in the tree,
5233  * and an extent that you want to insert, deal with overlap and insert
5234  * the new extent into the tree.
5235  */
5236 static int merge_extent_mapping(struct extent_map_tree *em_tree,
5237 				struct extent_map *existing,
5238 				struct extent_map *em,
5239 				u64 map_start, u64 map_len)
5240 {
5241 	u64 start_diff;
5242 
5243 	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
5244 	start_diff = map_start - em->start;
5245 	em->start = map_start;
5246 	em->len = map_len;
5247 	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
5248 	    !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
5249 		em->block_start += start_diff;
5250 		em->block_len -= start_diff;
5251 	}
5252 	return add_extent_mapping(em_tree, em);
5253 }
5254 
5255 static noinline int uncompress_inline(struct btrfs_path *path,
5256 				      struct inode *inode, struct page *page,
5257 				      size_t pg_offset, u64 extent_offset,
5258 				      struct btrfs_file_extent_item *item)
5259 {
5260 	int ret;
5261 	struct extent_buffer *leaf = path->nodes[0];
5262 	char *tmp;
5263 	size_t max_size;
5264 	unsigned long inline_size;
5265 	unsigned long ptr;
5266 	int compress_type;
5267 
5268 	WARN_ON(pg_offset != 0);
5269 	compress_type = btrfs_file_extent_compression(leaf, item);
5270 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
5271 	inline_size = btrfs_file_extent_inline_item_len(leaf,
5272 					btrfs_item_nr(leaf, path->slots[0]));
5273 	tmp = kmalloc(inline_size, GFP_NOFS);
5274 	if (!tmp)
5275 		return -ENOMEM;
5276 	ptr = btrfs_file_extent_inline_start(item);
5277 
5278 	read_extent_buffer(leaf, tmp, ptr, inline_size);
5279 
5280 	max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
5281 	ret = btrfs_decompress(compress_type, tmp, page,
5282 			       extent_offset, inline_size, max_size);
5283 	if (ret) {
5284 		char *kaddr = kmap_atomic(page);
5285 		unsigned long copy_size = min_t(u64,
5286 				  PAGE_CACHE_SIZE - pg_offset,
5287 				  max_size - extent_offset);
5288 		memset(kaddr + pg_offset, 0, copy_size);
5289 		kunmap_atomic(kaddr);
5290 	}
5291 	kfree(tmp);
5292 	return 0;
5293 }
5294 
5295 /*
5296  * a bit scary, this does extent mapping from logical file offset to the disk.
5297  * the ugly parts come from merging extents from the disk with the in-ram
5298  * representation.  This gets more complex because of the data=ordered code,
5299  * where the in-ram extents might be locked pending data=ordered completion.
5300  *
5301  * This also copies inline extents directly into the page.
5302  */
5303 
5304 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
5305 				    size_t pg_offset, u64 start, u64 len,
5306 				    int create)
5307 {
5308 	int ret;
5309 	int err = 0;
5310 	u64 bytenr;
5311 	u64 extent_start = 0;
5312 	u64 extent_end = 0;
5313 	u64 objectid = btrfs_ino(inode);
5314 	u32 found_type;
5315 	struct btrfs_path *path = NULL;
5316 	struct btrfs_root *root = BTRFS_I(inode)->root;
5317 	struct btrfs_file_extent_item *item;
5318 	struct extent_buffer *leaf;
5319 	struct btrfs_key found_key;
5320 	struct extent_map *em = NULL;
5321 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5322 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5323 	struct btrfs_trans_handle *trans = NULL;
5324 	int compress_type;
5325 
5326 again:
5327 	read_lock(&em_tree->lock);
5328 	em = lookup_extent_mapping(em_tree, start, len);
5329 	if (em)
5330 		em->bdev = root->fs_info->fs_devices->latest_bdev;
5331 	read_unlock(&em_tree->lock);
5332 
5333 	if (em) {
5334 		if (em->start > start || em->start + em->len <= start)
5335 			free_extent_map(em);
5336 		else if (em->block_start == EXTENT_MAP_INLINE && page)
5337 			free_extent_map(em);
5338 		else
5339 			goto out;
5340 	}
5341 	em = alloc_extent_map();
5342 	if (!em) {
5343 		err = -ENOMEM;
5344 		goto out;
5345 	}
5346 	em->bdev = root->fs_info->fs_devices->latest_bdev;
5347 	em->start = EXTENT_MAP_HOLE;
5348 	em->orig_start = EXTENT_MAP_HOLE;
5349 	em->len = (u64)-1;
5350 	em->block_len = (u64)-1;
5351 
5352 	if (!path) {
5353 		path = btrfs_alloc_path();
5354 		if (!path) {
5355 			err = -ENOMEM;
5356 			goto out;
5357 		}
5358 		/*
5359 		 * Chances are we'll be called again, so go ahead and do
5360 		 * readahead
5361 		 */
5362 		path->reada = 1;
5363 	}
5364 
5365 	ret = btrfs_lookup_file_extent(trans, root, path,
5366 				       objectid, start, trans != NULL);
5367 	if (ret < 0) {
5368 		err = ret;
5369 		goto out;
5370 	}
5371 
5372 	if (ret != 0) {
5373 		if (path->slots[0] == 0)
5374 			goto not_found;
5375 		path->slots[0]--;
5376 	}
5377 
5378 	leaf = path->nodes[0];
5379 	item = btrfs_item_ptr(leaf, path->slots[0],
5380 			      struct btrfs_file_extent_item);
5381 	/* are we inside the extent that was found? */
5382 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5383 	found_type = btrfs_key_type(&found_key);
5384 	if (found_key.objectid != objectid ||
5385 	    found_type != BTRFS_EXTENT_DATA_KEY) {
5386 		goto not_found;
5387 	}
5388 
5389 	found_type = btrfs_file_extent_type(leaf, item);
5390 	extent_start = found_key.offset;
5391 	compress_type = btrfs_file_extent_compression(leaf, item);
5392 	if (found_type == BTRFS_FILE_EXTENT_REG ||
5393 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
5394 		extent_end = extent_start +
5395 		       btrfs_file_extent_num_bytes(leaf, item);
5396 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5397 		size_t size;
5398 		size = btrfs_file_extent_inline_len(leaf, item);
5399 		extent_end = (extent_start + size + root->sectorsize - 1) &
5400 			~((u64)root->sectorsize - 1);
5401 	}
5402 
5403 	if (start >= extent_end) {
5404 		path->slots[0]++;
5405 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
5406 			ret = btrfs_next_leaf(root, path);
5407 			if (ret < 0) {
5408 				err = ret;
5409 				goto out;
5410 			}
5411 			if (ret > 0)
5412 				goto not_found;
5413 			leaf = path->nodes[0];
5414 		}
5415 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5416 		if (found_key.objectid != objectid ||
5417 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
5418 			goto not_found;
5419 		if (start + len <= found_key.offset)
5420 			goto not_found;
5421 		em->start = start;
5422 		em->orig_start = start;
5423 		em->len = found_key.offset - start;
5424 		goto not_found_em;
5425 	}
5426 
5427 	if (found_type == BTRFS_FILE_EXTENT_REG ||
5428 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
5429 		em->start = extent_start;
5430 		em->len = extent_end - extent_start;
5431 		em->orig_start = extent_start -
5432 				 btrfs_file_extent_offset(leaf, item);
5433 		em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf,
5434 								      item);
5435 		bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
5436 		if (bytenr == 0) {
5437 			em->block_start = EXTENT_MAP_HOLE;
5438 			goto insert;
5439 		}
5440 		if (compress_type != BTRFS_COMPRESS_NONE) {
5441 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5442 			em->compress_type = compress_type;
5443 			em->block_start = bytenr;
5444 			em->block_len = em->orig_block_len;
5445 		} else {
5446 			bytenr += btrfs_file_extent_offset(leaf, item);
5447 			em->block_start = bytenr;
5448 			em->block_len = em->len;
5449 			if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
5450 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
5451 		}
5452 		goto insert;
5453 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5454 		unsigned long ptr;
5455 		char *map;
5456 		size_t size;
5457 		size_t extent_offset;
5458 		size_t copy_size;
5459 
5460 		em->block_start = EXTENT_MAP_INLINE;
5461 		if (!page || create) {
5462 			em->start = extent_start;
5463 			em->len = extent_end - extent_start;
5464 			goto out;
5465 		}
5466 
5467 		size = btrfs_file_extent_inline_len(leaf, item);
5468 		extent_offset = page_offset(page) + pg_offset - extent_start;
5469 		copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
5470 				size - extent_offset);
5471 		em->start = extent_start + extent_offset;
5472 		em->len = (copy_size + root->sectorsize - 1) &
5473 			~((u64)root->sectorsize - 1);
5474 		em->orig_block_len = em->len;
5475 		em->orig_start = em->start;
5476 		if (compress_type) {
5477 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5478 			em->compress_type = compress_type;
5479 		}
5480 		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
5481 		if (create == 0 && !PageUptodate(page)) {
5482 			if (btrfs_file_extent_compression(leaf, item) !=
5483 			    BTRFS_COMPRESS_NONE) {
5484 				ret = uncompress_inline(path, inode, page,
5485 							pg_offset,
5486 							extent_offset, item);
5487 				BUG_ON(ret); /* -ENOMEM */
5488 			} else {
5489 				map = kmap(page);
5490 				read_extent_buffer(leaf, map + pg_offset, ptr,
5491 						   copy_size);
5492 				if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
5493 					memset(map + pg_offset + copy_size, 0,
5494 					       PAGE_CACHE_SIZE - pg_offset -
5495 					       copy_size);
5496 				}
5497 				kunmap(page);
5498 			}
5499 			flush_dcache_page(page);
5500 		} else if (create && PageUptodate(page)) {
5501 			BUG();
5502 			if (!trans) {
5503 				kunmap(page);
5504 				free_extent_map(em);
5505 				em = NULL;
5506 
5507 				btrfs_release_path(path);
5508 				trans = btrfs_join_transaction(root);
5509 
5510 				if (IS_ERR(trans))
5511 					return ERR_CAST(trans);
5512 				goto again;
5513 			}
5514 			map = kmap(page);
5515 			write_extent_buffer(leaf, map + pg_offset, ptr,
5516 					    copy_size);
5517 			kunmap(page);
5518 			btrfs_mark_buffer_dirty(leaf);
5519 		}
5520 		set_extent_uptodate(io_tree, em->start,
5521 				    extent_map_end(em) - 1, NULL, GFP_NOFS);
5522 		goto insert;
5523 	} else {
5524 		WARN(1, KERN_ERR "btrfs unknown found_type %d\n", found_type);
5525 	}
5526 not_found:
5527 	em->start = start;
5528 	em->orig_start = start;
5529 	em->len = len;
5530 not_found_em:
5531 	em->block_start = EXTENT_MAP_HOLE;
5532 	set_bit(EXTENT_FLAG_VACANCY, &em->flags);
5533 insert:
5534 	btrfs_release_path(path);
5535 	if (em->start > start || extent_map_end(em) <= start) {
5536 		printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
5537 		       "[%llu %llu]\n", (unsigned long long)em->start,
5538 		       (unsigned long long)em->len,
5539 		       (unsigned long long)start,
5540 		       (unsigned long long)len);
5541 		err = -EIO;
5542 		goto out;
5543 	}
5544 
5545 	err = 0;
5546 	write_lock(&em_tree->lock);
5547 	ret = add_extent_mapping(em_tree, em);
5548 	/* it is possible that someone inserted the extent into the tree
5549 	 * while we had the lock dropped.  It is also possible that
5550 	 * an overlapping map exists in the tree
5551 	 */
5552 	if (ret == -EEXIST) {
5553 		struct extent_map *existing;
5554 
5555 		ret = 0;
5556 
5557 		existing = lookup_extent_mapping(em_tree, start, len);
5558 		if (existing && (existing->start > start ||
5559 		    existing->start + existing->len <= start)) {
5560 			free_extent_map(existing);
5561 			existing = NULL;
5562 		}
5563 		if (!existing) {
5564 			existing = lookup_extent_mapping(em_tree, em->start,
5565 							 em->len);
5566 			if (existing) {
5567 				err = merge_extent_mapping(em_tree, existing,
5568 							   em, start,
5569 							   root->sectorsize);
5570 				free_extent_map(existing);
5571 				if (err) {
5572 					free_extent_map(em);
5573 					em = NULL;
5574 				}
5575 			} else {
5576 				err = -EIO;
5577 				free_extent_map(em);
5578 				em = NULL;
5579 			}
5580 		} else {
5581 			free_extent_map(em);
5582 			em = existing;
5583 			err = 0;
5584 		}
5585 	}
5586 	write_unlock(&em_tree->lock);
5587 out:
5588 
5589 	if (em)
5590 		trace_btrfs_get_extent(root, em);
5591 
5592 	if (path)
5593 		btrfs_free_path(path);
5594 	if (trans) {
5595 		ret = btrfs_end_transaction(trans, root);
5596 		if (!err)
5597 			err = ret;
5598 	}
5599 	if (err) {
5600 		free_extent_map(em);
5601 		return ERR_PTR(err);
5602 	}
5603 	BUG_ON(!em); /* Error is always set */
5604 	return em;
5605 }
5606 
5607 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
5608 					   size_t pg_offset, u64 start, u64 len,
5609 					   int create)
5610 {
5611 	struct extent_map *em;
5612 	struct extent_map *hole_em = NULL;
5613 	u64 range_start = start;
5614 	u64 end;
5615 	u64 found;
5616 	u64 found_end;
5617 	int err = 0;
5618 
5619 	em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
5620 	if (IS_ERR(em))
5621 		return em;
5622 	if (em) {
5623 		/*
5624 		 * if our em maps to
5625 		 * -  a hole or
5626 		 * -  a pre-alloc extent,
5627 		 * there might actually be delalloc bytes behind it.
5628 		 */
5629 		if (em->block_start != EXTENT_MAP_HOLE &&
5630 		    !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5631 			return em;
5632 		else
5633 			hole_em = em;
5634 	}
5635 
5636 	/* check to see if we've wrapped (len == -1 or similar) */
5637 	end = start + len;
5638 	if (end < start)
5639 		end = (u64)-1;
5640 	else
5641 		end -= 1;
5642 
5643 	em = NULL;
5644 
5645 	/* ok, we didn't find anything, lets look for delalloc */
5646 	found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
5647 				 end, len, EXTENT_DELALLOC, 1);
5648 	found_end = range_start + found;
5649 	if (found_end < range_start)
5650 		found_end = (u64)-1;
5651 
5652 	/*
5653 	 * we didn't find anything useful, return
5654 	 * the original results from get_extent()
5655 	 */
5656 	if (range_start > end || found_end <= start) {
5657 		em = hole_em;
5658 		hole_em = NULL;
5659 		goto out;
5660 	}
5661 
5662 	/* adjust the range_start to make sure it doesn't
5663 	 * go backwards from the start they passed in
5664 	 */
5665 	range_start = max(start,range_start);
5666 	found = found_end - range_start;
5667 
5668 	if (found > 0) {
5669 		u64 hole_start = start;
5670 		u64 hole_len = len;
5671 
5672 		em = alloc_extent_map();
5673 		if (!em) {
5674 			err = -ENOMEM;
5675 			goto out;
5676 		}
5677 		/*
5678 		 * when btrfs_get_extent can't find anything it
5679 		 * returns one huge hole
5680 		 *
5681 		 * make sure what it found really fits our range, and
5682 		 * adjust to make sure it is based on the start from
5683 		 * the caller
5684 		 */
5685 		if (hole_em) {
5686 			u64 calc_end = extent_map_end(hole_em);
5687 
5688 			if (calc_end <= start || (hole_em->start > end)) {
5689 				free_extent_map(hole_em);
5690 				hole_em = NULL;
5691 			} else {
5692 				hole_start = max(hole_em->start, start);
5693 				hole_len = calc_end - hole_start;
5694 			}
5695 		}
5696 		em->bdev = NULL;
5697 		if (hole_em && range_start > hole_start) {
5698 			/* our hole starts before our delalloc, so we
5699 			 * have to return just the parts of the hole
5700 			 * that go until  the delalloc starts
5701 			 */
5702 			em->len = min(hole_len,
5703 				      range_start - hole_start);
5704 			em->start = hole_start;
5705 			em->orig_start = hole_start;
5706 			/*
5707 			 * don't adjust block start at all,
5708 			 * it is fixed at EXTENT_MAP_HOLE
5709 			 */
5710 			em->block_start = hole_em->block_start;
5711 			em->block_len = hole_len;
5712 			if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
5713 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
5714 		} else {
5715 			em->start = range_start;
5716 			em->len = found;
5717 			em->orig_start = range_start;
5718 			em->block_start = EXTENT_MAP_DELALLOC;
5719 			em->block_len = found;
5720 		}
5721 	} else if (hole_em) {
5722 		return hole_em;
5723 	}
5724 out:
5725 
5726 	free_extent_map(hole_em);
5727 	if (err) {
5728 		free_extent_map(em);
5729 		return ERR_PTR(err);
5730 	}
5731 	return em;
5732 }
5733 
5734 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5735 						  u64 start, u64 len)
5736 {
5737 	struct btrfs_root *root = BTRFS_I(inode)->root;
5738 	struct btrfs_trans_handle *trans;
5739 	struct extent_map *em;
5740 	struct btrfs_key ins;
5741 	u64 alloc_hint;
5742 	int ret;
5743 
5744 	trans = btrfs_join_transaction(root);
5745 	if (IS_ERR(trans))
5746 		return ERR_CAST(trans);
5747 
5748 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
5749 
5750 	alloc_hint = get_extent_allocation_hint(inode, start, len);
5751 	ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
5752 				   alloc_hint, &ins, 1);
5753 	if (ret) {
5754 		em = ERR_PTR(ret);
5755 		goto out;
5756 	}
5757 
5758 	em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
5759 			      ins.offset, ins.offset, 0);
5760 	if (IS_ERR(em))
5761 		goto out;
5762 
5763 	ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
5764 					   ins.offset, ins.offset, 0);
5765 	if (ret) {
5766 		btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
5767 		em = ERR_PTR(ret);
5768 	}
5769 out:
5770 	btrfs_end_transaction(trans, root);
5771 	return em;
5772 }
5773 
5774 /*
5775  * returns 1 when the nocow is safe, < 1 on error, 0 if the
5776  * block must be cow'd
5777  */
5778 static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
5779 				      struct inode *inode, u64 offset, u64 len)
5780 {
5781 	struct btrfs_path *path;
5782 	int ret;
5783 	struct extent_buffer *leaf;
5784 	struct btrfs_root *root = BTRFS_I(inode)->root;
5785 	struct btrfs_file_extent_item *fi;
5786 	struct btrfs_key key;
5787 	u64 disk_bytenr;
5788 	u64 backref_offset;
5789 	u64 extent_end;
5790 	u64 num_bytes;
5791 	int slot;
5792 	int found_type;
5793 
5794 	path = btrfs_alloc_path();
5795 	if (!path)
5796 		return -ENOMEM;
5797 
5798 	ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
5799 				       offset, 0);
5800 	if (ret < 0)
5801 		goto out;
5802 
5803 	slot = path->slots[0];
5804 	if (ret == 1) {
5805 		if (slot == 0) {
5806 			/* can't find the item, must cow */
5807 			ret = 0;
5808 			goto out;
5809 		}
5810 		slot--;
5811 	}
5812 	ret = 0;
5813 	leaf = path->nodes[0];
5814 	btrfs_item_key_to_cpu(leaf, &key, slot);
5815 	if (key.objectid != btrfs_ino(inode) ||
5816 	    key.type != BTRFS_EXTENT_DATA_KEY) {
5817 		/* not our file or wrong item type, must cow */
5818 		goto out;
5819 	}
5820 
5821 	if (key.offset > offset) {
5822 		/* Wrong offset, must cow */
5823 		goto out;
5824 	}
5825 
5826 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5827 	found_type = btrfs_file_extent_type(leaf, fi);
5828 	if (found_type != BTRFS_FILE_EXTENT_REG &&
5829 	    found_type != BTRFS_FILE_EXTENT_PREALLOC) {
5830 		/* not a regular extent, must cow */
5831 		goto out;
5832 	}
5833 	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
5834 	backref_offset = btrfs_file_extent_offset(leaf, fi);
5835 
5836 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
5837 	if (extent_end < offset + len) {
5838 		/* extent doesn't include our full range, must cow */
5839 		goto out;
5840 	}
5841 
5842 	if (btrfs_extent_readonly(root, disk_bytenr))
5843 		goto out;
5844 
5845 	/*
5846 	 * look for other files referencing this extent, if we
5847 	 * find any we must cow
5848 	 */
5849 	if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
5850 				  key.offset - backref_offset, disk_bytenr))
5851 		goto out;
5852 
5853 	/*
5854 	 * adjust disk_bytenr and num_bytes to cover just the bytes
5855 	 * in this extent we are about to write.  If there
5856 	 * are any csums in that range we have to cow in order
5857 	 * to keep the csums correct
5858 	 */
5859 	disk_bytenr += backref_offset;
5860 	disk_bytenr += offset - key.offset;
5861 	num_bytes = min(offset + len, extent_end) - offset;
5862 	if (csum_exist_in_range(root, disk_bytenr, num_bytes))
5863 				goto out;
5864 	/*
5865 	 * all of the above have passed, it is safe to overwrite this extent
5866 	 * without cow
5867 	 */
5868 	ret = 1;
5869 out:
5870 	btrfs_free_path(path);
5871 	return ret;
5872 }
5873 
5874 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
5875 			      struct extent_state **cached_state, int writing)
5876 {
5877 	struct btrfs_ordered_extent *ordered;
5878 	int ret = 0;
5879 
5880 	while (1) {
5881 		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
5882 				 0, cached_state);
5883 		/*
5884 		 * We're concerned with the entire range that we're going to be
5885 		 * doing DIO to, so we need to make sure theres no ordered
5886 		 * extents in this range.
5887 		 */
5888 		ordered = btrfs_lookup_ordered_range(inode, lockstart,
5889 						     lockend - lockstart + 1);
5890 
5891 		/*
5892 		 * We need to make sure there are no buffered pages in this
5893 		 * range either, we could have raced between the invalidate in
5894 		 * generic_file_direct_write and locking the extent.  The
5895 		 * invalidate needs to happen so that reads after a write do not
5896 		 * get stale data.
5897 		 */
5898 		if (!ordered && (!writing ||
5899 		    !test_range_bit(&BTRFS_I(inode)->io_tree,
5900 				    lockstart, lockend, EXTENT_UPTODATE, 0,
5901 				    *cached_state)))
5902 			break;
5903 
5904 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
5905 				     cached_state, GFP_NOFS);
5906 
5907 		if (ordered) {
5908 			btrfs_start_ordered_extent(inode, ordered, 1);
5909 			btrfs_put_ordered_extent(ordered);
5910 		} else {
5911 			/* Screw you mmap */
5912 			ret = filemap_write_and_wait_range(inode->i_mapping,
5913 							   lockstart,
5914 							   lockend);
5915 			if (ret)
5916 				break;
5917 
5918 			/*
5919 			 * If we found a page that couldn't be invalidated just
5920 			 * fall back to buffered.
5921 			 */
5922 			ret = invalidate_inode_pages2_range(inode->i_mapping,
5923 					lockstart >> PAGE_CACHE_SHIFT,
5924 					lockend >> PAGE_CACHE_SHIFT);
5925 			if (ret)
5926 				break;
5927 		}
5928 
5929 		cond_resched();
5930 	}
5931 
5932 	return ret;
5933 }
5934 
5935 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
5936 					   u64 len, u64 orig_start,
5937 					   u64 block_start, u64 block_len,
5938 					   u64 orig_block_len, int type)
5939 {
5940 	struct extent_map_tree *em_tree;
5941 	struct extent_map *em;
5942 	struct btrfs_root *root = BTRFS_I(inode)->root;
5943 	int ret;
5944 
5945 	em_tree = &BTRFS_I(inode)->extent_tree;
5946 	em = alloc_extent_map();
5947 	if (!em)
5948 		return ERR_PTR(-ENOMEM);
5949 
5950 	em->start = start;
5951 	em->orig_start = orig_start;
5952 	em->len = len;
5953 	em->block_len = block_len;
5954 	em->block_start = block_start;
5955 	em->bdev = root->fs_info->fs_devices->latest_bdev;
5956 	em->orig_block_len = orig_block_len;
5957 	em->generation = -1;
5958 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
5959 	if (type == BTRFS_ORDERED_PREALLOC)
5960 		set_bit(EXTENT_FLAG_FILLING, &em->flags);
5961 
5962 	do {
5963 		btrfs_drop_extent_cache(inode, em->start,
5964 				em->start + em->len - 1, 0);
5965 		write_lock(&em_tree->lock);
5966 		ret = add_extent_mapping(em_tree, em);
5967 		if (!ret)
5968 			list_move(&em->list,
5969 				  &em_tree->modified_extents);
5970 		write_unlock(&em_tree->lock);
5971 	} while (ret == -EEXIST);
5972 
5973 	if (ret) {
5974 		free_extent_map(em);
5975 		return ERR_PTR(ret);
5976 	}
5977 
5978 	return em;
5979 }
5980 
5981 
5982 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5983 				   struct buffer_head *bh_result, int create)
5984 {
5985 	struct extent_map *em;
5986 	struct btrfs_root *root = BTRFS_I(inode)->root;
5987 	struct extent_state *cached_state = NULL;
5988 	u64 start = iblock << inode->i_blkbits;
5989 	u64 lockstart, lockend;
5990 	u64 len = bh_result->b_size;
5991 	struct btrfs_trans_handle *trans;
5992 	int unlock_bits = EXTENT_LOCKED;
5993 	int ret;
5994 
5995 	if (create) {
5996 		ret = btrfs_delalloc_reserve_space(inode, len);
5997 		if (ret)
5998 			return ret;
5999 		unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY;
6000 	} else {
6001 		len = min_t(u64, len, root->sectorsize);
6002 	}
6003 
6004 	lockstart = start;
6005 	lockend = start + len - 1;
6006 
6007 	/*
6008 	 * If this errors out it's because we couldn't invalidate pagecache for
6009 	 * this range and we need to fallback to buffered.
6010 	 */
6011 	if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
6012 		return -ENOTBLK;
6013 
6014 	if (create) {
6015 		ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6016 				     lockend, EXTENT_DELALLOC, NULL,
6017 				     &cached_state, GFP_NOFS);
6018 		if (ret)
6019 			goto unlock_err;
6020 	}
6021 
6022 	em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
6023 	if (IS_ERR(em)) {
6024 		ret = PTR_ERR(em);
6025 		goto unlock_err;
6026 	}
6027 
6028 	/*
6029 	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
6030 	 * io.  INLINE is special, and we could probably kludge it in here, but
6031 	 * it's still buffered so for safety lets just fall back to the generic
6032 	 * buffered path.
6033 	 *
6034 	 * For COMPRESSED we _have_ to read the entire extent in so we can
6035 	 * decompress it, so there will be buffering required no matter what we
6036 	 * do, so go ahead and fallback to buffered.
6037 	 *
6038 	 * We return -ENOTBLK because thats what makes DIO go ahead and go back
6039 	 * to buffered IO.  Don't blame me, this is the price we pay for using
6040 	 * the generic code.
6041 	 */
6042 	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
6043 	    em->block_start == EXTENT_MAP_INLINE) {
6044 		free_extent_map(em);
6045 		ret = -ENOTBLK;
6046 		goto unlock_err;
6047 	}
6048 
6049 	/* Just a good old fashioned hole, return */
6050 	if (!create && (em->block_start == EXTENT_MAP_HOLE ||
6051 			test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
6052 		free_extent_map(em);
6053 		ret = 0;
6054 		goto unlock_err;
6055 	}
6056 
6057 	/*
6058 	 * We don't allocate a new extent in the following cases
6059 	 *
6060 	 * 1) The inode is marked as NODATACOW.  In this case we'll just use the
6061 	 * existing extent.
6062 	 * 2) The extent is marked as PREALLOC.  We're good to go here and can
6063 	 * just use the extent.
6064 	 *
6065 	 */
6066 	if (!create) {
6067 		len = min(len, em->len - (start - em->start));
6068 		lockstart = start + len;
6069 		goto unlock;
6070 	}
6071 
6072 	if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
6073 	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
6074 	     em->block_start != EXTENT_MAP_HOLE)) {
6075 		int type;
6076 		int ret;
6077 		u64 block_start;
6078 
6079 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6080 			type = BTRFS_ORDERED_PREALLOC;
6081 		else
6082 			type = BTRFS_ORDERED_NOCOW;
6083 		len = min(len, em->len - (start - em->start));
6084 		block_start = em->block_start + (start - em->start);
6085 
6086 		/*
6087 		 * we're not going to log anything, but we do need
6088 		 * to make sure the current transaction stays open
6089 		 * while we look for nocow cross refs
6090 		 */
6091 		trans = btrfs_join_transaction(root);
6092 		if (IS_ERR(trans))
6093 			goto must_cow;
6094 
6095 		if (can_nocow_odirect(trans, inode, start, len) == 1) {
6096 			u64 orig_start = em->orig_start;
6097 			u64 orig_block_len = em->orig_block_len;
6098 
6099 			if (type == BTRFS_ORDERED_PREALLOC) {
6100 				free_extent_map(em);
6101 				em = create_pinned_em(inode, start, len,
6102 						       orig_start,
6103 						       block_start, len,
6104 						       orig_block_len, type);
6105 				if (IS_ERR(em)) {
6106 					btrfs_end_transaction(trans, root);
6107 					goto unlock_err;
6108 				}
6109 			}
6110 
6111 			ret = btrfs_add_ordered_extent_dio(inode, start,
6112 					   block_start, len, len, type);
6113 			btrfs_end_transaction(trans, root);
6114 			if (ret) {
6115 				free_extent_map(em);
6116 				goto unlock_err;
6117 			}
6118 			goto unlock;
6119 		}
6120 		btrfs_end_transaction(trans, root);
6121 	}
6122 must_cow:
6123 	/*
6124 	 * this will cow the extent, reset the len in case we changed
6125 	 * it above
6126 	 */
6127 	len = bh_result->b_size;
6128 	free_extent_map(em);
6129 	em = btrfs_new_extent_direct(inode, start, len);
6130 	if (IS_ERR(em)) {
6131 		ret = PTR_ERR(em);
6132 		goto unlock_err;
6133 	}
6134 	len = min(len, em->len - (start - em->start));
6135 unlock:
6136 	bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
6137 		inode->i_blkbits;
6138 	bh_result->b_size = len;
6139 	bh_result->b_bdev = em->bdev;
6140 	set_buffer_mapped(bh_result);
6141 	if (create) {
6142 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6143 			set_buffer_new(bh_result);
6144 
6145 		/*
6146 		 * Need to update the i_size under the extent lock so buffered
6147 		 * readers will get the updated i_size when we unlock.
6148 		 */
6149 		if (start + len > i_size_read(inode))
6150 			i_size_write(inode, start + len);
6151 	}
6152 
6153 	/*
6154 	 * In the case of write we need to clear and unlock the entire range,
6155 	 * in the case of read we need to unlock only the end area that we
6156 	 * aren't using if there is any left over space.
6157 	 */
6158 	if (lockstart < lockend) {
6159 		if (create && len < lockend - lockstart) {
6160 			clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6161 					 lockstart + len - 1,
6162 					 unlock_bits | EXTENT_DEFRAG, 1, 0,
6163 					 &cached_state, GFP_NOFS);
6164 			/*
6165 			 * Beside unlock, we also need to cleanup reserved space
6166 			 * for the left range by attaching EXTENT_DO_ACCOUNTING.
6167 			 */
6168 			clear_extent_bit(&BTRFS_I(inode)->io_tree,
6169 					 lockstart + len, lockend,
6170 					 unlock_bits | EXTENT_DO_ACCOUNTING |
6171 					 EXTENT_DEFRAG, 1, 0, NULL, GFP_NOFS);
6172 		} else {
6173 			clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6174 					 lockend, unlock_bits, 1, 0,
6175 					 &cached_state, GFP_NOFS);
6176 		}
6177 	} else {
6178 		free_extent_state(cached_state);
6179 	}
6180 
6181 	free_extent_map(em);
6182 
6183 	return 0;
6184 
6185 unlock_err:
6186 	if (create)
6187 		unlock_bits |= EXTENT_DO_ACCOUNTING;
6188 
6189 	clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6190 			 unlock_bits, 1, 0, &cached_state, GFP_NOFS);
6191 	return ret;
6192 }
6193 
6194 struct btrfs_dio_private {
6195 	struct inode *inode;
6196 	u64 logical_offset;
6197 	u64 disk_bytenr;
6198 	u64 bytes;
6199 	void *private;
6200 
6201 	/* number of bios pending for this dio */
6202 	atomic_t pending_bios;
6203 
6204 	/* IO errors */
6205 	int errors;
6206 
6207 	struct bio *orig_bio;
6208 };
6209 
6210 static void btrfs_endio_direct_read(struct bio *bio, int err)
6211 {
6212 	struct btrfs_dio_private *dip = bio->bi_private;
6213 	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
6214 	struct bio_vec *bvec = bio->bi_io_vec;
6215 	struct inode *inode = dip->inode;
6216 	struct btrfs_root *root = BTRFS_I(inode)->root;
6217 	u64 start;
6218 
6219 	start = dip->logical_offset;
6220 	do {
6221 		if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
6222 			struct page *page = bvec->bv_page;
6223 			char *kaddr;
6224 			u32 csum = ~(u32)0;
6225 			u64 private = ~(u32)0;
6226 			unsigned long flags;
6227 
6228 			if (get_state_private(&BTRFS_I(inode)->io_tree,
6229 					      start, &private))
6230 				goto failed;
6231 			local_irq_save(flags);
6232 			kaddr = kmap_atomic(page);
6233 			csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
6234 					       csum, bvec->bv_len);
6235 			btrfs_csum_final(csum, (char *)&csum);
6236 			kunmap_atomic(kaddr);
6237 			local_irq_restore(flags);
6238 
6239 			flush_dcache_page(bvec->bv_page);
6240 			if (csum != private) {
6241 failed:
6242 				printk(KERN_ERR "btrfs csum failed ino %llu off"
6243 				      " %llu csum %u private %u\n",
6244 				      (unsigned long long)btrfs_ino(inode),
6245 				      (unsigned long long)start,
6246 				      csum, (unsigned)private);
6247 				err = -EIO;
6248 			}
6249 		}
6250 
6251 		start += bvec->bv_len;
6252 		bvec++;
6253 	} while (bvec <= bvec_end);
6254 
6255 	unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
6256 		      dip->logical_offset + dip->bytes - 1);
6257 	bio->bi_private = dip->private;
6258 
6259 	kfree(dip);
6260 
6261 	/* If we had a csum failure make sure to clear the uptodate flag */
6262 	if (err)
6263 		clear_bit(BIO_UPTODATE, &bio->bi_flags);
6264 	dio_end_io(bio, err);
6265 }
6266 
6267 static void btrfs_endio_direct_write(struct bio *bio, int err)
6268 {
6269 	struct btrfs_dio_private *dip = bio->bi_private;
6270 	struct inode *inode = dip->inode;
6271 	struct btrfs_root *root = BTRFS_I(inode)->root;
6272 	struct btrfs_ordered_extent *ordered = NULL;
6273 	u64 ordered_offset = dip->logical_offset;
6274 	u64 ordered_bytes = dip->bytes;
6275 	int ret;
6276 
6277 	if (err)
6278 		goto out_done;
6279 again:
6280 	ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
6281 						   &ordered_offset,
6282 						   ordered_bytes, !err);
6283 	if (!ret)
6284 		goto out_test;
6285 
6286 	ordered->work.func = finish_ordered_fn;
6287 	ordered->work.flags = 0;
6288 	btrfs_queue_worker(&root->fs_info->endio_write_workers,
6289 			   &ordered->work);
6290 out_test:
6291 	/*
6292 	 * our bio might span multiple ordered extents.  If we haven't
6293 	 * completed the accounting for the whole dio, go back and try again
6294 	 */
6295 	if (ordered_offset < dip->logical_offset + dip->bytes) {
6296 		ordered_bytes = dip->logical_offset + dip->bytes -
6297 			ordered_offset;
6298 		ordered = NULL;
6299 		goto again;
6300 	}
6301 out_done:
6302 	bio->bi_private = dip->private;
6303 
6304 	kfree(dip);
6305 
6306 	/* If we had an error make sure to clear the uptodate flag */
6307 	if (err)
6308 		clear_bit(BIO_UPTODATE, &bio->bi_flags);
6309 	dio_end_io(bio, err);
6310 }
6311 
6312 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
6313 				    struct bio *bio, int mirror_num,
6314 				    unsigned long bio_flags, u64 offset)
6315 {
6316 	int ret;
6317 	struct btrfs_root *root = BTRFS_I(inode)->root;
6318 	ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
6319 	BUG_ON(ret); /* -ENOMEM */
6320 	return 0;
6321 }
6322 
6323 static void btrfs_end_dio_bio(struct bio *bio, int err)
6324 {
6325 	struct btrfs_dio_private *dip = bio->bi_private;
6326 
6327 	if (err) {
6328 		printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
6329 		      "sector %#Lx len %u err no %d\n",
6330 		      (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw,
6331 		      (unsigned long long)bio->bi_sector, bio->bi_size, err);
6332 		dip->errors = 1;
6333 
6334 		/*
6335 		 * before atomic variable goto zero, we must make sure
6336 		 * dip->errors is perceived to be set.
6337 		 */
6338 		smp_mb__before_atomic_dec();
6339 	}
6340 
6341 	/* if there are more bios still pending for this dio, just exit */
6342 	if (!atomic_dec_and_test(&dip->pending_bios))
6343 		goto out;
6344 
6345 	if (dip->errors)
6346 		bio_io_error(dip->orig_bio);
6347 	else {
6348 		set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
6349 		bio_endio(dip->orig_bio, 0);
6350 	}
6351 out:
6352 	bio_put(bio);
6353 }
6354 
6355 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
6356 				       u64 first_sector, gfp_t gfp_flags)
6357 {
6358 	int nr_vecs = bio_get_nr_vecs(bdev);
6359 	return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
6360 }
6361 
6362 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
6363 					 int rw, u64 file_offset, int skip_sum,
6364 					 int async_submit)
6365 {
6366 	int write = rw & REQ_WRITE;
6367 	struct btrfs_root *root = BTRFS_I(inode)->root;
6368 	int ret;
6369 
6370 	if (async_submit)
6371 		async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
6372 
6373 	bio_get(bio);
6374 
6375 	if (!write) {
6376 		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
6377 		if (ret)
6378 			goto err;
6379 	}
6380 
6381 	if (skip_sum)
6382 		goto map;
6383 
6384 	if (write && async_submit) {
6385 		ret = btrfs_wq_submit_bio(root->fs_info,
6386 				   inode, rw, bio, 0, 0,
6387 				   file_offset,
6388 				   __btrfs_submit_bio_start_direct_io,
6389 				   __btrfs_submit_bio_done);
6390 		goto err;
6391 	} else if (write) {
6392 		/*
6393 		 * If we aren't doing async submit, calculate the csum of the
6394 		 * bio now.
6395 		 */
6396 		ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
6397 		if (ret)
6398 			goto err;
6399 	} else if (!skip_sum) {
6400 		ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset);
6401 		if (ret)
6402 			goto err;
6403 	}
6404 
6405 map:
6406 	ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
6407 err:
6408 	bio_put(bio);
6409 	return ret;
6410 }
6411 
6412 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
6413 				    int skip_sum)
6414 {
6415 	struct inode *inode = dip->inode;
6416 	struct btrfs_root *root = BTRFS_I(inode)->root;
6417 	struct bio *bio;
6418 	struct bio *orig_bio = dip->orig_bio;
6419 	struct bio_vec *bvec = orig_bio->bi_io_vec;
6420 	u64 start_sector = orig_bio->bi_sector;
6421 	u64 file_offset = dip->logical_offset;
6422 	u64 submit_len = 0;
6423 	u64 map_length;
6424 	int nr_pages = 0;
6425 	int ret = 0;
6426 	int async_submit = 0;
6427 
6428 	map_length = orig_bio->bi_size;
6429 	ret = btrfs_map_block(root->fs_info, READ, start_sector << 9,
6430 			      &map_length, NULL, 0);
6431 	if (ret) {
6432 		bio_put(orig_bio);
6433 		return -EIO;
6434 	}
6435 
6436 	if (map_length >= orig_bio->bi_size) {
6437 		bio = orig_bio;
6438 		goto submit;
6439 	}
6440 
6441 	async_submit = 1;
6442 	bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
6443 	if (!bio)
6444 		return -ENOMEM;
6445 	bio->bi_private = dip;
6446 	bio->bi_end_io = btrfs_end_dio_bio;
6447 	atomic_inc(&dip->pending_bios);
6448 
6449 	while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
6450 		if (unlikely(map_length < submit_len + bvec->bv_len ||
6451 		    bio_add_page(bio, bvec->bv_page, bvec->bv_len,
6452 				 bvec->bv_offset) < bvec->bv_len)) {
6453 			/*
6454 			 * inc the count before we submit the bio so
6455 			 * we know the end IO handler won't happen before
6456 			 * we inc the count. Otherwise, the dip might get freed
6457 			 * before we're done setting it up
6458 			 */
6459 			atomic_inc(&dip->pending_bios);
6460 			ret = __btrfs_submit_dio_bio(bio, inode, rw,
6461 						     file_offset, skip_sum,
6462 						     async_submit);
6463 			if (ret) {
6464 				bio_put(bio);
6465 				atomic_dec(&dip->pending_bios);
6466 				goto out_err;
6467 			}
6468 
6469 			start_sector += submit_len >> 9;
6470 			file_offset += submit_len;
6471 
6472 			submit_len = 0;
6473 			nr_pages = 0;
6474 
6475 			bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
6476 						  start_sector, GFP_NOFS);
6477 			if (!bio)
6478 				goto out_err;
6479 			bio->bi_private = dip;
6480 			bio->bi_end_io = btrfs_end_dio_bio;
6481 
6482 			map_length = orig_bio->bi_size;
6483 			ret = btrfs_map_block(root->fs_info, READ,
6484 					      start_sector << 9,
6485 					      &map_length, NULL, 0);
6486 			if (ret) {
6487 				bio_put(bio);
6488 				goto out_err;
6489 			}
6490 		} else {
6491 			submit_len += bvec->bv_len;
6492 			nr_pages ++;
6493 			bvec++;
6494 		}
6495 	}
6496 
6497 submit:
6498 	ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
6499 				     async_submit);
6500 	if (!ret)
6501 		return 0;
6502 
6503 	bio_put(bio);
6504 out_err:
6505 	dip->errors = 1;
6506 	/*
6507 	 * before atomic variable goto zero, we must
6508 	 * make sure dip->errors is perceived to be set.
6509 	 */
6510 	smp_mb__before_atomic_dec();
6511 	if (atomic_dec_and_test(&dip->pending_bios))
6512 		bio_io_error(dip->orig_bio);
6513 
6514 	/* bio_end_io() will handle error, so we needn't return it */
6515 	return 0;
6516 }
6517 
6518 static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
6519 				loff_t file_offset)
6520 {
6521 	struct btrfs_root *root = BTRFS_I(inode)->root;
6522 	struct btrfs_dio_private *dip;
6523 	struct bio_vec *bvec = bio->bi_io_vec;
6524 	int skip_sum;
6525 	int write = rw & REQ_WRITE;
6526 	int ret = 0;
6527 
6528 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
6529 
6530 	dip = kmalloc(sizeof(*dip), GFP_NOFS);
6531 	if (!dip) {
6532 		ret = -ENOMEM;
6533 		goto free_ordered;
6534 	}
6535 
6536 	dip->private = bio->bi_private;
6537 	dip->inode = inode;
6538 	dip->logical_offset = file_offset;
6539 
6540 	dip->bytes = 0;
6541 	do {
6542 		dip->bytes += bvec->bv_len;
6543 		bvec++;
6544 	} while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));
6545 
6546 	dip->disk_bytenr = (u64)bio->bi_sector << 9;
6547 	bio->bi_private = dip;
6548 	dip->errors = 0;
6549 	dip->orig_bio = bio;
6550 	atomic_set(&dip->pending_bios, 0);
6551 
6552 	if (write)
6553 		bio->bi_end_io = btrfs_endio_direct_write;
6554 	else
6555 		bio->bi_end_io = btrfs_endio_direct_read;
6556 
6557 	ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
6558 	if (!ret)
6559 		return;
6560 free_ordered:
6561 	/*
6562 	 * If this is a write, we need to clean up the reserved space and kill
6563 	 * the ordered extent.
6564 	 */
6565 	if (write) {
6566 		struct btrfs_ordered_extent *ordered;
6567 		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
6568 		if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
6569 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
6570 			btrfs_free_reserved_extent(root, ordered->start,
6571 						   ordered->disk_len);
6572 		btrfs_put_ordered_extent(ordered);
6573 		btrfs_put_ordered_extent(ordered);
6574 	}
6575 	bio_endio(bio, ret);
6576 }
6577 
6578 static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
6579 			const struct iovec *iov, loff_t offset,
6580 			unsigned long nr_segs)
6581 {
6582 	int seg;
6583 	int i;
6584 	size_t size;
6585 	unsigned long addr;
6586 	unsigned blocksize_mask = root->sectorsize - 1;
6587 	ssize_t retval = -EINVAL;
6588 	loff_t end = offset;
6589 
6590 	if (offset & blocksize_mask)
6591 		goto out;
6592 
6593 	/* Check the memory alignment.  Blocks cannot straddle pages */
6594 	for (seg = 0; seg < nr_segs; seg++) {
6595 		addr = (unsigned long)iov[seg].iov_base;
6596 		size = iov[seg].iov_len;
6597 		end += size;
6598 		if ((addr & blocksize_mask) || (size & blocksize_mask))
6599 			goto out;
6600 
6601 		/* If this is a write we don't need to check anymore */
6602 		if (rw & WRITE)
6603 			continue;
6604 
6605 		/*
6606 		 * Check to make sure we don't have duplicate iov_base's in this
6607 		 * iovec, if so return EINVAL, otherwise we'll get csum errors
6608 		 * when reading back.
6609 		 */
6610 		for (i = seg + 1; i < nr_segs; i++) {
6611 			if (iov[seg].iov_base == iov[i].iov_base)
6612 				goto out;
6613 		}
6614 	}
6615 	retval = 0;
6616 out:
6617 	return retval;
6618 }
6619 
6620 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
6621 			const struct iovec *iov, loff_t offset,
6622 			unsigned long nr_segs)
6623 {
6624 	struct file *file = iocb->ki_filp;
6625 	struct inode *inode = file->f_mapping->host;
6626 
6627 	if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
6628 			    offset, nr_segs))
6629 		return 0;
6630 
6631 	return __blockdev_direct_IO(rw, iocb, inode,
6632 		   BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
6633 		   iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
6634 		   btrfs_submit_direct, 0);
6635 }
6636 
6637 #define BTRFS_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC)
6638 
6639 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
6640 		__u64 start, __u64 len)
6641 {
6642 	int	ret;
6643 
6644 	ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
6645 	if (ret)
6646 		return ret;
6647 
6648 	return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
6649 }
6650 
6651 int btrfs_readpage(struct file *file, struct page *page)
6652 {
6653 	struct extent_io_tree *tree;
6654 	tree = &BTRFS_I(page->mapping->host)->io_tree;
6655 	return extent_read_full_page(tree, page, btrfs_get_extent, 0);
6656 }
6657 
6658 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
6659 {
6660 	struct extent_io_tree *tree;
6661 
6662 
6663 	if (current->flags & PF_MEMALLOC) {
6664 		redirty_page_for_writepage(wbc, page);
6665 		unlock_page(page);
6666 		return 0;
6667 	}
6668 	tree = &BTRFS_I(page->mapping->host)->io_tree;
6669 	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
6670 }
6671 
6672 int btrfs_writepages(struct address_space *mapping,
6673 		     struct writeback_control *wbc)
6674 {
6675 	struct extent_io_tree *tree;
6676 
6677 	tree = &BTRFS_I(mapping->host)->io_tree;
6678 	return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
6679 }
6680 
6681 static int
6682 btrfs_readpages(struct file *file, struct address_space *mapping,
6683 		struct list_head *pages, unsigned nr_pages)
6684 {
6685 	struct extent_io_tree *tree;
6686 	tree = &BTRFS_I(mapping->host)->io_tree;
6687 	return extent_readpages(tree, mapping, pages, nr_pages,
6688 				btrfs_get_extent);
6689 }
6690 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
6691 {
6692 	struct extent_io_tree *tree;
6693 	struct extent_map_tree *map;
6694 	int ret;
6695 
6696 	tree = &BTRFS_I(page->mapping->host)->io_tree;
6697 	map = &BTRFS_I(page->mapping->host)->extent_tree;
6698 	ret = try_release_extent_mapping(map, tree, page, gfp_flags);
6699 	if (ret == 1) {
6700 		ClearPagePrivate(page);
6701 		set_page_private(page, 0);
6702 		page_cache_release(page);
6703 	}
6704 	return ret;
6705 }
6706 
6707 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
6708 {
6709 	if (PageWriteback(page) || PageDirty(page))
6710 		return 0;
6711 	return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
6712 }
6713 
6714 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
6715 {
6716 	struct inode *inode = page->mapping->host;
6717 	struct extent_io_tree *tree;
6718 	struct btrfs_ordered_extent *ordered;
6719 	struct extent_state *cached_state = NULL;
6720 	u64 page_start = page_offset(page);
6721 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
6722 
6723 	/*
6724 	 * we have the page locked, so new writeback can't start,
6725 	 * and the dirty bit won't be cleared while we are here.
6726 	 *
6727 	 * Wait for IO on this page so that we can safely clear
6728 	 * the PagePrivate2 bit and do ordered accounting
6729 	 */
6730 	wait_on_page_writeback(page);
6731 
6732 	tree = &BTRFS_I(inode)->io_tree;
6733 	if (offset) {
6734 		btrfs_releasepage(page, GFP_NOFS);
6735 		return;
6736 	}
6737 	lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
6738 	ordered = btrfs_lookup_ordered_extent(inode,
6739 					   page_offset(page));
6740 	if (ordered) {
6741 		/*
6742 		 * IO on this page will never be started, so we need
6743 		 * to account for any ordered extents now
6744 		 */
6745 		clear_extent_bit(tree, page_start, page_end,
6746 				 EXTENT_DIRTY | EXTENT_DELALLOC |
6747 				 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
6748 				 EXTENT_DEFRAG, 1, 0, &cached_state, GFP_NOFS);
6749 		/*
6750 		 * whoever cleared the private bit is responsible
6751 		 * for the finish_ordered_io
6752 		 */
6753 		if (TestClearPagePrivate2(page) &&
6754 		    btrfs_dec_test_ordered_pending(inode, &ordered, page_start,
6755 						   PAGE_CACHE_SIZE, 1)) {
6756 			btrfs_finish_ordered_io(ordered);
6757 		}
6758 		btrfs_put_ordered_extent(ordered);
6759 		cached_state = NULL;
6760 		lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
6761 	}
6762 	clear_extent_bit(tree, page_start, page_end,
6763 		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
6764 		 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
6765 		 &cached_state, GFP_NOFS);
6766 	__btrfs_releasepage(page, GFP_NOFS);
6767 
6768 	ClearPageChecked(page);
6769 	if (PagePrivate(page)) {
6770 		ClearPagePrivate(page);
6771 		set_page_private(page, 0);
6772 		page_cache_release(page);
6773 	}
6774 }
6775 
6776 /*
6777  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
6778  * called from a page fault handler when a page is first dirtied. Hence we must
6779  * be careful to check for EOF conditions here. We set the page up correctly
6780  * for a written page which means we get ENOSPC checking when writing into
6781  * holes and correct delalloc and unwritten extent mapping on filesystems that
6782  * support these features.
6783  *
6784  * We are not allowed to take the i_mutex here so we have to play games to
6785  * protect against truncate races as the page could now be beyond EOF.  Because
6786  * vmtruncate() writes the inode size before removing pages, once we have the
6787  * page lock we can determine safely if the page is beyond EOF. If it is not
6788  * beyond EOF, then the page is guaranteed safe against truncation until we
6789  * unlock the page.
6790  */
6791 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
6792 {
6793 	struct page *page = vmf->page;
6794 	struct inode *inode = fdentry(vma->vm_file)->d_inode;
6795 	struct btrfs_root *root = BTRFS_I(inode)->root;
6796 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6797 	struct btrfs_ordered_extent *ordered;
6798 	struct extent_state *cached_state = NULL;
6799 	char *kaddr;
6800 	unsigned long zero_start;
6801 	loff_t size;
6802 	int ret;
6803 	int reserved = 0;
6804 	u64 page_start;
6805 	u64 page_end;
6806 
6807 	sb_start_pagefault(inode->i_sb);
6808 	ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
6809 	if (!ret) {
6810 		ret = file_update_time(vma->vm_file);
6811 		reserved = 1;
6812 	}
6813 	if (ret) {
6814 		if (ret == -ENOMEM)
6815 			ret = VM_FAULT_OOM;
6816 		else /* -ENOSPC, -EIO, etc */
6817 			ret = VM_FAULT_SIGBUS;
6818 		if (reserved)
6819 			goto out;
6820 		goto out_noreserve;
6821 	}
6822 
6823 	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
6824 again:
6825 	lock_page(page);
6826 	size = i_size_read(inode);
6827 	page_start = page_offset(page);
6828 	page_end = page_start + PAGE_CACHE_SIZE - 1;
6829 
6830 	if ((page->mapping != inode->i_mapping) ||
6831 	    (page_start >= size)) {
6832 		/* page got truncated out from underneath us */
6833 		goto out_unlock;
6834 	}
6835 	wait_on_page_writeback(page);
6836 
6837 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
6838 	set_page_extent_mapped(page);
6839 
6840 	/*
6841 	 * we can't set the delalloc bits if there are pending ordered
6842 	 * extents.  Drop our locks and wait for them to finish
6843 	 */
6844 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
6845 	if (ordered) {
6846 		unlock_extent_cached(io_tree, page_start, page_end,
6847 				     &cached_state, GFP_NOFS);
6848 		unlock_page(page);
6849 		btrfs_start_ordered_extent(inode, ordered, 1);
6850 		btrfs_put_ordered_extent(ordered);
6851 		goto again;
6852 	}
6853 
6854 	/*
6855 	 * XXX - page_mkwrite gets called every time the page is dirtied, even
6856 	 * if it was already dirty, so for space accounting reasons we need to
6857 	 * clear any delalloc bits for the range we are fixing to save.  There
6858 	 * is probably a better way to do this, but for now keep consistent with
6859 	 * prepare_pages in the normal write path.
6860 	 */
6861 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
6862 			  EXTENT_DIRTY | EXTENT_DELALLOC |
6863 			  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
6864 			  0, 0, &cached_state, GFP_NOFS);
6865 
6866 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
6867 					&cached_state);
6868 	if (ret) {
6869 		unlock_extent_cached(io_tree, page_start, page_end,
6870 				     &cached_state, GFP_NOFS);
6871 		ret = VM_FAULT_SIGBUS;
6872 		goto out_unlock;
6873 	}
6874 	ret = 0;
6875 
6876 	/* page is wholly or partially inside EOF */
6877 	if (page_start + PAGE_CACHE_SIZE > size)
6878 		zero_start = size & ~PAGE_CACHE_MASK;
6879 	else
6880 		zero_start = PAGE_CACHE_SIZE;
6881 
6882 	if (zero_start != PAGE_CACHE_SIZE) {
6883 		kaddr = kmap(page);
6884 		memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
6885 		flush_dcache_page(page);
6886 		kunmap(page);
6887 	}
6888 	ClearPageChecked(page);
6889 	set_page_dirty(page);
6890 	SetPageUptodate(page);
6891 
6892 	BTRFS_I(inode)->last_trans = root->fs_info->generation;
6893 	BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
6894 	BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
6895 
6896 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
6897 
6898 out_unlock:
6899 	if (!ret) {
6900 		sb_end_pagefault(inode->i_sb);
6901 		return VM_FAULT_LOCKED;
6902 	}
6903 	unlock_page(page);
6904 out:
6905 	btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
6906 out_noreserve:
6907 	sb_end_pagefault(inode->i_sb);
6908 	return ret;
6909 }
6910 
6911 static int btrfs_truncate(struct inode *inode)
6912 {
6913 	struct btrfs_root *root = BTRFS_I(inode)->root;
6914 	struct btrfs_block_rsv *rsv;
6915 	int ret;
6916 	int err = 0;
6917 	struct btrfs_trans_handle *trans;
6918 	u64 mask = root->sectorsize - 1;
6919 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
6920 
6921 	ret = btrfs_truncate_page(inode, inode->i_size, 0, 0);
6922 	if (ret)
6923 		return ret;
6924 
6925 	btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
6926 	btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
6927 
6928 	/*
6929 	 * Yes ladies and gentelment, this is indeed ugly.  The fact is we have
6930 	 * 3 things going on here
6931 	 *
6932 	 * 1) We need to reserve space for our orphan item and the space to
6933 	 * delete our orphan item.  Lord knows we don't want to have a dangling
6934 	 * orphan item because we didn't reserve space to remove it.
6935 	 *
6936 	 * 2) We need to reserve space to update our inode.
6937 	 *
6938 	 * 3) We need to have something to cache all the space that is going to
6939 	 * be free'd up by the truncate operation, but also have some slack
6940 	 * space reserved in case it uses space during the truncate (thank you
6941 	 * very much snapshotting).
6942 	 *
6943 	 * And we need these to all be seperate.  The fact is we can use alot of
6944 	 * space doing the truncate, and we have no earthly idea how much space
6945 	 * we will use, so we need the truncate reservation to be seperate so it
6946 	 * doesn't end up using space reserved for updating the inode or
6947 	 * removing the orphan item.  We also need to be able to stop the
6948 	 * transaction and start a new one, which means we need to be able to
6949 	 * update the inode several times, and we have no idea of knowing how
6950 	 * many times that will be, so we can't just reserve 1 item for the
6951 	 * entirety of the opration, so that has to be done seperately as well.
6952 	 * Then there is the orphan item, which does indeed need to be held on
6953 	 * to for the whole operation, and we need nobody to touch this reserved
6954 	 * space except the orphan code.
6955 	 *
6956 	 * So that leaves us with
6957 	 *
6958 	 * 1) root->orphan_block_rsv - for the orphan deletion.
6959 	 * 2) rsv - for the truncate reservation, which we will steal from the
6960 	 * transaction reservation.
6961 	 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
6962 	 * updating the inode.
6963 	 */
6964 	rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
6965 	if (!rsv)
6966 		return -ENOMEM;
6967 	rsv->size = min_size;
6968 	rsv->failfast = 1;
6969 
6970 	/*
6971 	 * 1 for the truncate slack space
6972 	 * 1 for updating the inode.
6973 	 */
6974 	trans = btrfs_start_transaction(root, 2);
6975 	if (IS_ERR(trans)) {
6976 		err = PTR_ERR(trans);
6977 		goto out;
6978 	}
6979 
6980 	/* Migrate the slack space for the truncate to our reserve */
6981 	ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
6982 				      min_size);
6983 	BUG_ON(ret);
6984 
6985 	/*
6986 	 * setattr is responsible for setting the ordered_data_close flag,
6987 	 * but that is only tested during the last file release.  That
6988 	 * could happen well after the next commit, leaving a great big
6989 	 * window where new writes may get lost if someone chooses to write
6990 	 * to this file after truncating to zero
6991 	 *
6992 	 * The inode doesn't have any dirty data here, and so if we commit
6993 	 * this is a noop.  If someone immediately starts writing to the inode
6994 	 * it is very likely we'll catch some of their writes in this
6995 	 * transaction, and the commit will find this file on the ordered
6996 	 * data list with good things to send down.
6997 	 *
6998 	 * This is a best effort solution, there is still a window where
6999 	 * using truncate to replace the contents of the file will
7000 	 * end up with a zero length file after a crash.
7001 	 */
7002 	if (inode->i_size == 0 && test_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
7003 					   &BTRFS_I(inode)->runtime_flags))
7004 		btrfs_add_ordered_operation(trans, root, inode);
7005 
7006 	/*
7007 	 * So if we truncate and then write and fsync we normally would just
7008 	 * write the extents that changed, which is a problem if we need to
7009 	 * first truncate that entire inode.  So set this flag so we write out
7010 	 * all of the extents in the inode to the sync log so we're completely
7011 	 * safe.
7012 	 */
7013 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
7014 	trans->block_rsv = rsv;
7015 
7016 	while (1) {
7017 		ret = btrfs_truncate_inode_items(trans, root, inode,
7018 						 inode->i_size,
7019 						 BTRFS_EXTENT_DATA_KEY);
7020 		if (ret != -ENOSPC) {
7021 			err = ret;
7022 			break;
7023 		}
7024 
7025 		trans->block_rsv = &root->fs_info->trans_block_rsv;
7026 		ret = btrfs_update_inode(trans, root, inode);
7027 		if (ret) {
7028 			err = ret;
7029 			break;
7030 		}
7031 
7032 		btrfs_end_transaction(trans, root);
7033 		btrfs_btree_balance_dirty(root);
7034 
7035 		trans = btrfs_start_transaction(root, 2);
7036 		if (IS_ERR(trans)) {
7037 			ret = err = PTR_ERR(trans);
7038 			trans = NULL;
7039 			break;
7040 		}
7041 
7042 		ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
7043 					      rsv, min_size);
7044 		BUG_ON(ret);	/* shouldn't happen */
7045 		trans->block_rsv = rsv;
7046 	}
7047 
7048 	if (ret == 0 && inode->i_nlink > 0) {
7049 		trans->block_rsv = root->orphan_block_rsv;
7050 		ret = btrfs_orphan_del(trans, inode);
7051 		if (ret)
7052 			err = ret;
7053 	}
7054 
7055 	if (trans) {
7056 		trans->block_rsv = &root->fs_info->trans_block_rsv;
7057 		ret = btrfs_update_inode(trans, root, inode);
7058 		if (ret && !err)
7059 			err = ret;
7060 
7061 		ret = btrfs_end_transaction(trans, root);
7062 		btrfs_btree_balance_dirty(root);
7063 	}
7064 
7065 out:
7066 	btrfs_free_block_rsv(root, rsv);
7067 
7068 	if (ret && !err)
7069 		err = ret;
7070 
7071 	return err;
7072 }
7073 
7074 /*
7075  * create a new subvolume directory/inode (helper for the ioctl).
7076  */
7077 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
7078 			     struct btrfs_root *new_root, u64 new_dirid)
7079 {
7080 	struct inode *inode;
7081 	int err;
7082 	u64 index = 0;
7083 
7084 	inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
7085 				new_dirid, new_dirid,
7086 				S_IFDIR | (~current_umask() & S_IRWXUGO),
7087 				&index);
7088 	if (IS_ERR(inode))
7089 		return PTR_ERR(inode);
7090 	inode->i_op = &btrfs_dir_inode_operations;
7091 	inode->i_fop = &btrfs_dir_file_operations;
7092 
7093 	set_nlink(inode, 1);
7094 	btrfs_i_size_write(inode, 0);
7095 
7096 	err = btrfs_update_inode(trans, new_root, inode);
7097 
7098 	iput(inode);
7099 	return err;
7100 }
7101 
7102 struct inode *btrfs_alloc_inode(struct super_block *sb)
7103 {
7104 	struct btrfs_inode *ei;
7105 	struct inode *inode;
7106 
7107 	ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
7108 	if (!ei)
7109 		return NULL;
7110 
7111 	ei->root = NULL;
7112 	ei->generation = 0;
7113 	ei->last_trans = 0;
7114 	ei->last_sub_trans = 0;
7115 	ei->logged_trans = 0;
7116 	ei->delalloc_bytes = 0;
7117 	ei->disk_i_size = 0;
7118 	ei->flags = 0;
7119 	ei->csum_bytes = 0;
7120 	ei->index_cnt = (u64)-1;
7121 	ei->last_unlink_trans = 0;
7122 	ei->last_log_commit = 0;
7123 
7124 	spin_lock_init(&ei->lock);
7125 	ei->outstanding_extents = 0;
7126 	ei->reserved_extents = 0;
7127 
7128 	ei->runtime_flags = 0;
7129 	ei->force_compress = BTRFS_COMPRESS_NONE;
7130 
7131 	ei->delayed_node = NULL;
7132 
7133 	inode = &ei->vfs_inode;
7134 	extent_map_tree_init(&ei->extent_tree);
7135 	extent_io_tree_init(&ei->io_tree, &inode->i_data);
7136 	extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
7137 	ei->io_tree.track_uptodate = 1;
7138 	ei->io_failure_tree.track_uptodate = 1;
7139 	atomic_set(&ei->sync_writers, 0);
7140 	mutex_init(&ei->log_mutex);
7141 	mutex_init(&ei->delalloc_mutex);
7142 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
7143 	INIT_LIST_HEAD(&ei->delalloc_inodes);
7144 	INIT_LIST_HEAD(&ei->ordered_operations);
7145 	RB_CLEAR_NODE(&ei->rb_node);
7146 
7147 	return inode;
7148 }
7149 
7150 static void btrfs_i_callback(struct rcu_head *head)
7151 {
7152 	struct inode *inode = container_of(head, struct inode, i_rcu);
7153 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
7154 }
7155 
7156 void btrfs_destroy_inode(struct inode *inode)
7157 {
7158 	struct btrfs_ordered_extent *ordered;
7159 	struct btrfs_root *root = BTRFS_I(inode)->root;
7160 
7161 	WARN_ON(!hlist_empty(&inode->i_dentry));
7162 	WARN_ON(inode->i_data.nrpages);
7163 	WARN_ON(BTRFS_I(inode)->outstanding_extents);
7164 	WARN_ON(BTRFS_I(inode)->reserved_extents);
7165 	WARN_ON(BTRFS_I(inode)->delalloc_bytes);
7166 	WARN_ON(BTRFS_I(inode)->csum_bytes);
7167 
7168 	/*
7169 	 * This can happen where we create an inode, but somebody else also
7170 	 * created the same inode and we need to destroy the one we already
7171 	 * created.
7172 	 */
7173 	if (!root)
7174 		goto free;
7175 
7176 	/*
7177 	 * Make sure we're properly removed from the ordered operation
7178 	 * lists.
7179 	 */
7180 	smp_mb();
7181 	if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
7182 		spin_lock(&root->fs_info->ordered_extent_lock);
7183 		list_del_init(&BTRFS_I(inode)->ordered_operations);
7184 		spin_unlock(&root->fs_info->ordered_extent_lock);
7185 	}
7186 
7187 	if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
7188 		     &BTRFS_I(inode)->runtime_flags)) {
7189 		printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
7190 		       (unsigned long long)btrfs_ino(inode));
7191 		atomic_dec(&root->orphan_inodes);
7192 	}
7193 
7194 	while (1) {
7195 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
7196 		if (!ordered)
7197 			break;
7198 		else {
7199 			printk(KERN_ERR "btrfs found ordered "
7200 			       "extent %llu %llu on inode cleanup\n",
7201 			       (unsigned long long)ordered->file_offset,
7202 			       (unsigned long long)ordered->len);
7203 			btrfs_remove_ordered_extent(inode, ordered);
7204 			btrfs_put_ordered_extent(ordered);
7205 			btrfs_put_ordered_extent(ordered);
7206 		}
7207 	}
7208 	inode_tree_del(inode);
7209 	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
7210 free:
7211 	btrfs_remove_delayed_node(inode);
7212 	call_rcu(&inode->i_rcu, btrfs_i_callback);
7213 }
7214 
7215 int btrfs_drop_inode(struct inode *inode)
7216 {
7217 	struct btrfs_root *root = BTRFS_I(inode)->root;
7218 
7219 	if (btrfs_root_refs(&root->root_item) == 0 &&
7220 	    !btrfs_is_free_space_inode(inode))
7221 		return 1;
7222 	else
7223 		return generic_drop_inode(inode);
7224 }
7225 
7226 static void init_once(void *foo)
7227 {
7228 	struct btrfs_inode *ei = (struct btrfs_inode *) foo;
7229 
7230 	inode_init_once(&ei->vfs_inode);
7231 }
7232 
7233 void btrfs_destroy_cachep(void)
7234 {
7235 	/*
7236 	 * Make sure all delayed rcu free inodes are flushed before we
7237 	 * destroy cache.
7238 	 */
7239 	rcu_barrier();
7240 	if (btrfs_inode_cachep)
7241 		kmem_cache_destroy(btrfs_inode_cachep);
7242 	if (btrfs_trans_handle_cachep)
7243 		kmem_cache_destroy(btrfs_trans_handle_cachep);
7244 	if (btrfs_transaction_cachep)
7245 		kmem_cache_destroy(btrfs_transaction_cachep);
7246 	if (btrfs_path_cachep)
7247 		kmem_cache_destroy(btrfs_path_cachep);
7248 	if (btrfs_free_space_cachep)
7249 		kmem_cache_destroy(btrfs_free_space_cachep);
7250 	if (btrfs_delalloc_work_cachep)
7251 		kmem_cache_destroy(btrfs_delalloc_work_cachep);
7252 }
7253 
7254 int btrfs_init_cachep(void)
7255 {
7256 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
7257 			sizeof(struct btrfs_inode), 0,
7258 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
7259 	if (!btrfs_inode_cachep)
7260 		goto fail;
7261 
7262 	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
7263 			sizeof(struct btrfs_trans_handle), 0,
7264 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7265 	if (!btrfs_trans_handle_cachep)
7266 		goto fail;
7267 
7268 	btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction",
7269 			sizeof(struct btrfs_transaction), 0,
7270 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7271 	if (!btrfs_transaction_cachep)
7272 		goto fail;
7273 
7274 	btrfs_path_cachep = kmem_cache_create("btrfs_path",
7275 			sizeof(struct btrfs_path), 0,
7276 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7277 	if (!btrfs_path_cachep)
7278 		goto fail;
7279 
7280 	btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
7281 			sizeof(struct btrfs_free_space), 0,
7282 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7283 	if (!btrfs_free_space_cachep)
7284 		goto fail;
7285 
7286 	btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work",
7287 			sizeof(struct btrfs_delalloc_work), 0,
7288 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
7289 			NULL);
7290 	if (!btrfs_delalloc_work_cachep)
7291 		goto fail;
7292 
7293 	return 0;
7294 fail:
7295 	btrfs_destroy_cachep();
7296 	return -ENOMEM;
7297 }
7298 
7299 static int btrfs_getattr(struct vfsmount *mnt,
7300 			 struct dentry *dentry, struct kstat *stat)
7301 {
7302 	struct inode *inode = dentry->d_inode;
7303 	u32 blocksize = inode->i_sb->s_blocksize;
7304 
7305 	generic_fillattr(inode, stat);
7306 	stat->dev = BTRFS_I(inode)->root->anon_dev;
7307 	stat->blksize = PAGE_CACHE_SIZE;
7308 	stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
7309 		ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9;
7310 	return 0;
7311 }
7312 
7313 /*
7314  * If a file is moved, it will inherit the cow and compression flags of the new
7315  * directory.
7316  */
7317 static void fixup_inode_flags(struct inode *dir, struct inode *inode)
7318 {
7319 	struct btrfs_inode *b_dir = BTRFS_I(dir);
7320 	struct btrfs_inode *b_inode = BTRFS_I(inode);
7321 
7322 	if (b_dir->flags & BTRFS_INODE_NODATACOW)
7323 		b_inode->flags |= BTRFS_INODE_NODATACOW;
7324 	else
7325 		b_inode->flags &= ~BTRFS_INODE_NODATACOW;
7326 
7327 	if (b_dir->flags & BTRFS_INODE_COMPRESS) {
7328 		b_inode->flags |= BTRFS_INODE_COMPRESS;
7329 		b_inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
7330 	} else {
7331 		b_inode->flags &= ~(BTRFS_INODE_COMPRESS |
7332 				    BTRFS_INODE_NOCOMPRESS);
7333 	}
7334 }
7335 
7336 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7337 			   struct inode *new_dir, struct dentry *new_dentry)
7338 {
7339 	struct btrfs_trans_handle *trans;
7340 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
7341 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
7342 	struct inode *new_inode = new_dentry->d_inode;
7343 	struct inode *old_inode = old_dentry->d_inode;
7344 	struct timespec ctime = CURRENT_TIME;
7345 	u64 index = 0;
7346 	u64 root_objectid;
7347 	int ret;
7348 	u64 old_ino = btrfs_ino(old_inode);
7349 
7350 	if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
7351 		return -EPERM;
7352 
7353 	/* we only allow rename subvolume link between subvolumes */
7354 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
7355 		return -EXDEV;
7356 
7357 	if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
7358 	    (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
7359 		return -ENOTEMPTY;
7360 
7361 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
7362 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
7363 		return -ENOTEMPTY;
7364 
7365 
7366 	/* check for collisions, even if the  name isn't there */
7367 	ret = btrfs_check_dir_item_collision(root, new_dir->i_ino,
7368 			     new_dentry->d_name.name,
7369 			     new_dentry->d_name.len);
7370 
7371 	if (ret) {
7372 		if (ret == -EEXIST) {
7373 			/* we shouldn't get
7374 			 * eexist without a new_inode */
7375 			if (!new_inode) {
7376 				WARN_ON(1);
7377 				return ret;
7378 			}
7379 		} else {
7380 			/* maybe -EOVERFLOW */
7381 			return ret;
7382 		}
7383 	}
7384 	ret = 0;
7385 
7386 	/*
7387 	 * we're using rename to replace one file with another.
7388 	 * and the replacement file is large.  Start IO on it now so
7389 	 * we don't add too much work to the end of the transaction
7390 	 */
7391 	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
7392 	    old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
7393 		filemap_flush(old_inode->i_mapping);
7394 
7395 	/* close the racy window with snapshot create/destroy ioctl */
7396 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
7397 		down_read(&root->fs_info->subvol_sem);
7398 	/*
7399 	 * We want to reserve the absolute worst case amount of items.  So if
7400 	 * both inodes are subvols and we need to unlink them then that would
7401 	 * require 4 item modifications, but if they are both normal inodes it
7402 	 * would require 5 item modifications, so we'll assume their normal
7403 	 * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
7404 	 * should cover the worst case number of items we'll modify.
7405 	 */
7406 	trans = btrfs_start_transaction(root, 20);
7407 	if (IS_ERR(trans)) {
7408                 ret = PTR_ERR(trans);
7409                 goto out_notrans;
7410         }
7411 
7412 	if (dest != root)
7413 		btrfs_record_root_in_trans(trans, dest);
7414 
7415 	ret = btrfs_set_inode_index(new_dir, &index);
7416 	if (ret)
7417 		goto out_fail;
7418 
7419 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
7420 		/* force full log commit if subvolume involved. */
7421 		root->fs_info->last_trans_log_full_commit = trans->transid;
7422 	} else {
7423 		ret = btrfs_insert_inode_ref(trans, dest,
7424 					     new_dentry->d_name.name,
7425 					     new_dentry->d_name.len,
7426 					     old_ino,
7427 					     btrfs_ino(new_dir), index);
7428 		if (ret)
7429 			goto out_fail;
7430 		/*
7431 		 * this is an ugly little race, but the rename is required
7432 		 * to make sure that if we crash, the inode is either at the
7433 		 * old name or the new one.  pinning the log transaction lets
7434 		 * us make sure we don't allow a log commit to come in after
7435 		 * we unlink the name but before we add the new name back in.
7436 		 */
7437 		btrfs_pin_log_trans(root);
7438 	}
7439 	/*
7440 	 * make sure the inode gets flushed if it is replacing
7441 	 * something.
7442 	 */
7443 	if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
7444 		btrfs_add_ordered_operation(trans, root, old_inode);
7445 
7446 	inode_inc_iversion(old_dir);
7447 	inode_inc_iversion(new_dir);
7448 	inode_inc_iversion(old_inode);
7449 	old_dir->i_ctime = old_dir->i_mtime = ctime;
7450 	new_dir->i_ctime = new_dir->i_mtime = ctime;
7451 	old_inode->i_ctime = ctime;
7452 
7453 	if (old_dentry->d_parent != new_dentry->d_parent)
7454 		btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
7455 
7456 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
7457 		root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
7458 		ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
7459 					old_dentry->d_name.name,
7460 					old_dentry->d_name.len);
7461 	} else {
7462 		ret = __btrfs_unlink_inode(trans, root, old_dir,
7463 					old_dentry->d_inode,
7464 					old_dentry->d_name.name,
7465 					old_dentry->d_name.len);
7466 		if (!ret)
7467 			ret = btrfs_update_inode(trans, root, old_inode);
7468 	}
7469 	if (ret) {
7470 		btrfs_abort_transaction(trans, root, ret);
7471 		goto out_fail;
7472 	}
7473 
7474 	if (new_inode) {
7475 		inode_inc_iversion(new_inode);
7476 		new_inode->i_ctime = CURRENT_TIME;
7477 		if (unlikely(btrfs_ino(new_inode) ==
7478 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
7479 			root_objectid = BTRFS_I(new_inode)->location.objectid;
7480 			ret = btrfs_unlink_subvol(trans, dest, new_dir,
7481 						root_objectid,
7482 						new_dentry->d_name.name,
7483 						new_dentry->d_name.len);
7484 			BUG_ON(new_inode->i_nlink == 0);
7485 		} else {
7486 			ret = btrfs_unlink_inode(trans, dest, new_dir,
7487 						 new_dentry->d_inode,
7488 						 new_dentry->d_name.name,
7489 						 new_dentry->d_name.len);
7490 		}
7491 		if (!ret && new_inode->i_nlink == 0) {
7492 			ret = btrfs_orphan_add(trans, new_dentry->d_inode);
7493 			BUG_ON(ret);
7494 		}
7495 		if (ret) {
7496 			btrfs_abort_transaction(trans, root, ret);
7497 			goto out_fail;
7498 		}
7499 	}
7500 
7501 	fixup_inode_flags(new_dir, old_inode);
7502 
7503 	ret = btrfs_add_link(trans, new_dir, old_inode,
7504 			     new_dentry->d_name.name,
7505 			     new_dentry->d_name.len, 0, index);
7506 	if (ret) {
7507 		btrfs_abort_transaction(trans, root, ret);
7508 		goto out_fail;
7509 	}
7510 
7511 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
7512 		struct dentry *parent = new_dentry->d_parent;
7513 		btrfs_log_new_name(trans, old_inode, old_dir, parent);
7514 		btrfs_end_log_trans(root);
7515 	}
7516 out_fail:
7517 	btrfs_end_transaction(trans, root);
7518 out_notrans:
7519 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
7520 		up_read(&root->fs_info->subvol_sem);
7521 
7522 	return ret;
7523 }
7524 
7525 static void btrfs_run_delalloc_work(struct btrfs_work *work)
7526 {
7527 	struct btrfs_delalloc_work *delalloc_work;
7528 
7529 	delalloc_work = container_of(work, struct btrfs_delalloc_work,
7530 				     work);
7531 	if (delalloc_work->wait)
7532 		btrfs_wait_ordered_range(delalloc_work->inode, 0, (u64)-1);
7533 	else
7534 		filemap_flush(delalloc_work->inode->i_mapping);
7535 
7536 	if (delalloc_work->delay_iput)
7537 		btrfs_add_delayed_iput(delalloc_work->inode);
7538 	else
7539 		iput(delalloc_work->inode);
7540 	complete(&delalloc_work->completion);
7541 }
7542 
7543 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
7544 						    int wait, int delay_iput)
7545 {
7546 	struct btrfs_delalloc_work *work;
7547 
7548 	work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS);
7549 	if (!work)
7550 		return NULL;
7551 
7552 	init_completion(&work->completion);
7553 	INIT_LIST_HEAD(&work->list);
7554 	work->inode = inode;
7555 	work->wait = wait;
7556 	work->delay_iput = delay_iput;
7557 	work->work.func = btrfs_run_delalloc_work;
7558 
7559 	return work;
7560 }
7561 
7562 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
7563 {
7564 	wait_for_completion(&work->completion);
7565 	kmem_cache_free(btrfs_delalloc_work_cachep, work);
7566 }
7567 
7568 /*
7569  * some fairly slow code that needs optimization. This walks the list
7570  * of all the inodes with pending delalloc and forces them to disk.
7571  */
7572 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
7573 {
7574 	struct btrfs_inode *binode;
7575 	struct inode *inode;
7576 	struct btrfs_delalloc_work *work, *next;
7577 	struct list_head works;
7578 	struct list_head splice;
7579 	int ret = 0;
7580 
7581 	if (root->fs_info->sb->s_flags & MS_RDONLY)
7582 		return -EROFS;
7583 
7584 	INIT_LIST_HEAD(&works);
7585 	INIT_LIST_HEAD(&splice);
7586 again:
7587 	spin_lock(&root->fs_info->delalloc_lock);
7588 	list_splice_init(&root->fs_info->delalloc_inodes, &splice);
7589 	while (!list_empty(&splice)) {
7590 		binode = list_entry(splice.next, struct btrfs_inode,
7591 				    delalloc_inodes);
7592 
7593 		list_del_init(&binode->delalloc_inodes);
7594 
7595 		inode = igrab(&binode->vfs_inode);
7596 		if (!inode)
7597 			continue;
7598 
7599 		list_add_tail(&binode->delalloc_inodes,
7600 			      &root->fs_info->delalloc_inodes);
7601 		spin_unlock(&root->fs_info->delalloc_lock);
7602 
7603 		work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
7604 		if (unlikely(!work)) {
7605 			ret = -ENOMEM;
7606 			goto out;
7607 		}
7608 		list_add_tail(&work->list, &works);
7609 		btrfs_queue_worker(&root->fs_info->flush_workers,
7610 				   &work->work);
7611 
7612 		cond_resched();
7613 		spin_lock(&root->fs_info->delalloc_lock);
7614 	}
7615 	spin_unlock(&root->fs_info->delalloc_lock);
7616 
7617 	list_for_each_entry_safe(work, next, &works, list) {
7618 		list_del_init(&work->list);
7619 		btrfs_wait_and_free_delalloc_work(work);
7620 	}
7621 
7622 	spin_lock(&root->fs_info->delalloc_lock);
7623 	if (!list_empty(&root->fs_info->delalloc_inodes)) {
7624 		spin_unlock(&root->fs_info->delalloc_lock);
7625 		goto again;
7626 	}
7627 	spin_unlock(&root->fs_info->delalloc_lock);
7628 
7629 	/* the filemap_flush will queue IO into the worker threads, but
7630 	 * we have to make sure the IO is actually started and that
7631 	 * ordered extents get created before we return
7632 	 */
7633 	atomic_inc(&root->fs_info->async_submit_draining);
7634 	while (atomic_read(&root->fs_info->nr_async_submits) ||
7635 	      atomic_read(&root->fs_info->async_delalloc_pages)) {
7636 		wait_event(root->fs_info->async_submit_wait,
7637 		   (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
7638 		    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
7639 	}
7640 	atomic_dec(&root->fs_info->async_submit_draining);
7641 	return 0;
7642 out:
7643 	list_for_each_entry_safe(work, next, &works, list) {
7644 		list_del_init(&work->list);
7645 		btrfs_wait_and_free_delalloc_work(work);
7646 	}
7647 
7648 	if (!list_empty_careful(&splice)) {
7649 		spin_lock(&root->fs_info->delalloc_lock);
7650 		list_splice_tail(&splice, &root->fs_info->delalloc_inodes);
7651 		spin_unlock(&root->fs_info->delalloc_lock);
7652 	}
7653 	return ret;
7654 }
7655 
7656 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7657 			 const char *symname)
7658 {
7659 	struct btrfs_trans_handle *trans;
7660 	struct btrfs_root *root = BTRFS_I(dir)->root;
7661 	struct btrfs_path *path;
7662 	struct btrfs_key key;
7663 	struct inode *inode = NULL;
7664 	int err;
7665 	int drop_inode = 0;
7666 	u64 objectid;
7667 	u64 index = 0 ;
7668 	int name_len;
7669 	int datasize;
7670 	unsigned long ptr;
7671 	struct btrfs_file_extent_item *ei;
7672 	struct extent_buffer *leaf;
7673 
7674 	name_len = strlen(symname) + 1;
7675 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
7676 		return -ENAMETOOLONG;
7677 
7678 	/*
7679 	 * 2 items for inode item and ref
7680 	 * 2 items for dir items
7681 	 * 1 item for xattr if selinux is on
7682 	 */
7683 	trans = btrfs_start_transaction(root, 5);
7684 	if (IS_ERR(trans))
7685 		return PTR_ERR(trans);
7686 
7687 	err = btrfs_find_free_ino(root, &objectid);
7688 	if (err)
7689 		goto out_unlock;
7690 
7691 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
7692 				dentry->d_name.len, btrfs_ino(dir), objectid,
7693 				S_IFLNK|S_IRWXUGO, &index);
7694 	if (IS_ERR(inode)) {
7695 		err = PTR_ERR(inode);
7696 		goto out_unlock;
7697 	}
7698 
7699 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
7700 	if (err) {
7701 		drop_inode = 1;
7702 		goto out_unlock;
7703 	}
7704 
7705 	/*
7706 	* If the active LSM wants to access the inode during
7707 	* d_instantiate it needs these. Smack checks to see
7708 	* if the filesystem supports xattrs by looking at the
7709 	* ops vector.
7710 	*/
7711 	inode->i_fop = &btrfs_file_operations;
7712 	inode->i_op = &btrfs_file_inode_operations;
7713 
7714 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
7715 	if (err)
7716 		drop_inode = 1;
7717 	else {
7718 		inode->i_mapping->a_ops = &btrfs_aops;
7719 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
7720 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
7721 	}
7722 	if (drop_inode)
7723 		goto out_unlock;
7724 
7725 	path = btrfs_alloc_path();
7726 	if (!path) {
7727 		err = -ENOMEM;
7728 		drop_inode = 1;
7729 		goto out_unlock;
7730 	}
7731 	key.objectid = btrfs_ino(inode);
7732 	key.offset = 0;
7733 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
7734 	datasize = btrfs_file_extent_calc_inline_size(name_len);
7735 	err = btrfs_insert_empty_item(trans, root, path, &key,
7736 				      datasize);
7737 	if (err) {
7738 		drop_inode = 1;
7739 		btrfs_free_path(path);
7740 		goto out_unlock;
7741 	}
7742 	leaf = path->nodes[0];
7743 	ei = btrfs_item_ptr(leaf, path->slots[0],
7744 			    struct btrfs_file_extent_item);
7745 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
7746 	btrfs_set_file_extent_type(leaf, ei,
7747 				   BTRFS_FILE_EXTENT_INLINE);
7748 	btrfs_set_file_extent_encryption(leaf, ei, 0);
7749 	btrfs_set_file_extent_compression(leaf, ei, 0);
7750 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
7751 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
7752 
7753 	ptr = btrfs_file_extent_inline_start(ei);
7754 	write_extent_buffer(leaf, symname, ptr, name_len);
7755 	btrfs_mark_buffer_dirty(leaf);
7756 	btrfs_free_path(path);
7757 
7758 	inode->i_op = &btrfs_symlink_inode_operations;
7759 	inode->i_mapping->a_ops = &btrfs_symlink_aops;
7760 	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
7761 	inode_set_bytes(inode, name_len);
7762 	btrfs_i_size_write(inode, name_len - 1);
7763 	err = btrfs_update_inode(trans, root, inode);
7764 	if (err)
7765 		drop_inode = 1;
7766 
7767 out_unlock:
7768 	if (!err)
7769 		d_instantiate(dentry, inode);
7770 	btrfs_end_transaction(trans, root);
7771 	if (drop_inode) {
7772 		inode_dec_link_count(inode);
7773 		iput(inode);
7774 	}
7775 	btrfs_btree_balance_dirty(root);
7776 	return err;
7777 }
7778 
7779 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
7780 				       u64 start, u64 num_bytes, u64 min_size,
7781 				       loff_t actual_len, u64 *alloc_hint,
7782 				       struct btrfs_trans_handle *trans)
7783 {
7784 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
7785 	struct extent_map *em;
7786 	struct btrfs_root *root = BTRFS_I(inode)->root;
7787 	struct btrfs_key ins;
7788 	u64 cur_offset = start;
7789 	u64 i_size;
7790 	int ret = 0;
7791 	bool own_trans = true;
7792 
7793 	if (trans)
7794 		own_trans = false;
7795 	while (num_bytes > 0) {
7796 		if (own_trans) {
7797 			trans = btrfs_start_transaction(root, 3);
7798 			if (IS_ERR(trans)) {
7799 				ret = PTR_ERR(trans);
7800 				break;
7801 			}
7802 		}
7803 
7804 		ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
7805 					   0, *alloc_hint, &ins, 1);
7806 		if (ret) {
7807 			if (own_trans)
7808 				btrfs_end_transaction(trans, root);
7809 			break;
7810 		}
7811 
7812 		ret = insert_reserved_file_extent(trans, inode,
7813 						  cur_offset, ins.objectid,
7814 						  ins.offset, ins.offset,
7815 						  ins.offset, 0, 0, 0,
7816 						  BTRFS_FILE_EXTENT_PREALLOC);
7817 		if (ret) {
7818 			btrfs_abort_transaction(trans, root, ret);
7819 			if (own_trans)
7820 				btrfs_end_transaction(trans, root);
7821 			break;
7822 		}
7823 		btrfs_drop_extent_cache(inode, cur_offset,
7824 					cur_offset + ins.offset -1, 0);
7825 
7826 		em = alloc_extent_map();
7827 		if (!em) {
7828 			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
7829 				&BTRFS_I(inode)->runtime_flags);
7830 			goto next;
7831 		}
7832 
7833 		em->start = cur_offset;
7834 		em->orig_start = cur_offset;
7835 		em->len = ins.offset;
7836 		em->block_start = ins.objectid;
7837 		em->block_len = ins.offset;
7838 		em->orig_block_len = ins.offset;
7839 		em->bdev = root->fs_info->fs_devices->latest_bdev;
7840 		set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
7841 		em->generation = trans->transid;
7842 
7843 		while (1) {
7844 			write_lock(&em_tree->lock);
7845 			ret = add_extent_mapping(em_tree, em);
7846 			if (!ret)
7847 				list_move(&em->list,
7848 					  &em_tree->modified_extents);
7849 			write_unlock(&em_tree->lock);
7850 			if (ret != -EEXIST)
7851 				break;
7852 			btrfs_drop_extent_cache(inode, cur_offset,
7853 						cur_offset + ins.offset - 1,
7854 						0);
7855 		}
7856 		free_extent_map(em);
7857 next:
7858 		num_bytes -= ins.offset;
7859 		cur_offset += ins.offset;
7860 		*alloc_hint = ins.objectid + ins.offset;
7861 
7862 		inode_inc_iversion(inode);
7863 		inode->i_ctime = CURRENT_TIME;
7864 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
7865 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
7866 		    (actual_len > inode->i_size) &&
7867 		    (cur_offset > inode->i_size)) {
7868 			if (cur_offset > actual_len)
7869 				i_size = actual_len;
7870 			else
7871 				i_size = cur_offset;
7872 			i_size_write(inode, i_size);
7873 			btrfs_ordered_update_i_size(inode, i_size, NULL);
7874 		}
7875 
7876 		ret = btrfs_update_inode(trans, root, inode);
7877 
7878 		if (ret) {
7879 			btrfs_abort_transaction(trans, root, ret);
7880 			if (own_trans)
7881 				btrfs_end_transaction(trans, root);
7882 			break;
7883 		}
7884 
7885 		if (own_trans)
7886 			btrfs_end_transaction(trans, root);
7887 	}
7888 	return ret;
7889 }
7890 
7891 int btrfs_prealloc_file_range(struct inode *inode, int mode,
7892 			      u64 start, u64 num_bytes, u64 min_size,
7893 			      loff_t actual_len, u64 *alloc_hint)
7894 {
7895 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
7896 					   min_size, actual_len, alloc_hint,
7897 					   NULL);
7898 }
7899 
7900 int btrfs_prealloc_file_range_trans(struct inode *inode,
7901 				    struct btrfs_trans_handle *trans, int mode,
7902 				    u64 start, u64 num_bytes, u64 min_size,
7903 				    loff_t actual_len, u64 *alloc_hint)
7904 {
7905 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
7906 					   min_size, actual_len, alloc_hint, trans);
7907 }
7908 
7909 static int btrfs_set_page_dirty(struct page *page)
7910 {
7911 	return __set_page_dirty_nobuffers(page);
7912 }
7913 
7914 static int btrfs_permission(struct inode *inode, int mask)
7915 {
7916 	struct btrfs_root *root = BTRFS_I(inode)->root;
7917 	umode_t mode = inode->i_mode;
7918 
7919 	if (mask & MAY_WRITE &&
7920 	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
7921 		if (btrfs_root_readonly(root))
7922 			return -EROFS;
7923 		if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
7924 			return -EACCES;
7925 	}
7926 	return generic_permission(inode, mask);
7927 }
7928 
7929 static const struct inode_operations btrfs_dir_inode_operations = {
7930 	.getattr	= btrfs_getattr,
7931 	.lookup		= btrfs_lookup,
7932 	.create		= btrfs_create,
7933 	.unlink		= btrfs_unlink,
7934 	.link		= btrfs_link,
7935 	.mkdir		= btrfs_mkdir,
7936 	.rmdir		= btrfs_rmdir,
7937 	.rename		= btrfs_rename,
7938 	.symlink	= btrfs_symlink,
7939 	.setattr	= btrfs_setattr,
7940 	.mknod		= btrfs_mknod,
7941 	.setxattr	= btrfs_setxattr,
7942 	.getxattr	= btrfs_getxattr,
7943 	.listxattr	= btrfs_listxattr,
7944 	.removexattr	= btrfs_removexattr,
7945 	.permission	= btrfs_permission,
7946 	.get_acl	= btrfs_get_acl,
7947 };
7948 static const struct inode_operations btrfs_dir_ro_inode_operations = {
7949 	.lookup		= btrfs_lookup,
7950 	.permission	= btrfs_permission,
7951 	.get_acl	= btrfs_get_acl,
7952 };
7953 
7954 static const struct file_operations btrfs_dir_file_operations = {
7955 	.llseek		= generic_file_llseek,
7956 	.read		= generic_read_dir,
7957 	.readdir	= btrfs_real_readdir,
7958 	.unlocked_ioctl	= btrfs_ioctl,
7959 #ifdef CONFIG_COMPAT
7960 	.compat_ioctl	= btrfs_ioctl,
7961 #endif
7962 	.release        = btrfs_release_file,
7963 	.fsync		= btrfs_sync_file,
7964 };
7965 
7966 static struct extent_io_ops btrfs_extent_io_ops = {
7967 	.fill_delalloc = run_delalloc_range,
7968 	.submit_bio_hook = btrfs_submit_bio_hook,
7969 	.merge_bio_hook = btrfs_merge_bio_hook,
7970 	.readpage_end_io_hook = btrfs_readpage_end_io_hook,
7971 	.writepage_end_io_hook = btrfs_writepage_end_io_hook,
7972 	.writepage_start_hook = btrfs_writepage_start_hook,
7973 	.set_bit_hook = btrfs_set_bit_hook,
7974 	.clear_bit_hook = btrfs_clear_bit_hook,
7975 	.merge_extent_hook = btrfs_merge_extent_hook,
7976 	.split_extent_hook = btrfs_split_extent_hook,
7977 };
7978 
7979 /*
7980  * btrfs doesn't support the bmap operation because swapfiles
7981  * use bmap to make a mapping of extents in the file.  They assume
7982  * these extents won't change over the life of the file and they
7983  * use the bmap result to do IO directly to the drive.
7984  *
7985  * the btrfs bmap call would return logical addresses that aren't
7986  * suitable for IO and they also will change frequently as COW
7987  * operations happen.  So, swapfile + btrfs == corruption.
7988  *
7989  * For now we're avoiding this by dropping bmap.
7990  */
7991 static const struct address_space_operations btrfs_aops = {
7992 	.readpage	= btrfs_readpage,
7993 	.writepage	= btrfs_writepage,
7994 	.writepages	= btrfs_writepages,
7995 	.readpages	= btrfs_readpages,
7996 	.direct_IO	= btrfs_direct_IO,
7997 	.invalidatepage = btrfs_invalidatepage,
7998 	.releasepage	= btrfs_releasepage,
7999 	.set_page_dirty	= btrfs_set_page_dirty,
8000 	.error_remove_page = generic_error_remove_page,
8001 };
8002 
8003 static const struct address_space_operations btrfs_symlink_aops = {
8004 	.readpage	= btrfs_readpage,
8005 	.writepage	= btrfs_writepage,
8006 	.invalidatepage = btrfs_invalidatepage,
8007 	.releasepage	= btrfs_releasepage,
8008 };
8009 
8010 static const struct inode_operations btrfs_file_inode_operations = {
8011 	.getattr	= btrfs_getattr,
8012 	.setattr	= btrfs_setattr,
8013 	.setxattr	= btrfs_setxattr,
8014 	.getxattr	= btrfs_getxattr,
8015 	.listxattr      = btrfs_listxattr,
8016 	.removexattr	= btrfs_removexattr,
8017 	.permission	= btrfs_permission,
8018 	.fiemap		= btrfs_fiemap,
8019 	.get_acl	= btrfs_get_acl,
8020 	.update_time	= btrfs_update_time,
8021 };
8022 static const struct inode_operations btrfs_special_inode_operations = {
8023 	.getattr	= btrfs_getattr,
8024 	.setattr	= btrfs_setattr,
8025 	.permission	= btrfs_permission,
8026 	.setxattr	= btrfs_setxattr,
8027 	.getxattr	= btrfs_getxattr,
8028 	.listxattr	= btrfs_listxattr,
8029 	.removexattr	= btrfs_removexattr,
8030 	.get_acl	= btrfs_get_acl,
8031 	.update_time	= btrfs_update_time,
8032 };
8033 static const struct inode_operations btrfs_symlink_inode_operations = {
8034 	.readlink	= generic_readlink,
8035 	.follow_link	= page_follow_link_light,
8036 	.put_link	= page_put_link,
8037 	.getattr	= btrfs_getattr,
8038 	.setattr	= btrfs_setattr,
8039 	.permission	= btrfs_permission,
8040 	.setxattr	= btrfs_setxattr,
8041 	.getxattr	= btrfs_getxattr,
8042 	.listxattr	= btrfs_listxattr,
8043 	.removexattr	= btrfs_removexattr,
8044 	.get_acl	= btrfs_get_acl,
8045 	.update_time	= btrfs_update_time,
8046 };
8047 
8048 const struct dentry_operations btrfs_dentry_operations = {
8049 	.d_delete	= btrfs_dentry_delete,
8050 	.d_release	= btrfs_dentry_release,
8051 };
8052