xref: /openbmc/linux/fs/btrfs/inode.c (revision 05bcf503)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include <linux/slab.h>
40 #include <linux/ratelimit.h>
41 #include <linux/mount.h>
42 #include "compat.h"
43 #include "ctree.h"
44 #include "disk-io.h"
45 #include "transaction.h"
46 #include "btrfs_inode.h"
47 #include "ioctl.h"
48 #include "print-tree.h"
49 #include "ordered-data.h"
50 #include "xattr.h"
51 #include "tree-log.h"
52 #include "volumes.h"
53 #include "compression.h"
54 #include "locking.h"
55 #include "free-space-cache.h"
56 #include "inode-map.h"
57 
58 struct btrfs_iget_args {
59 	u64 ino;
60 	struct btrfs_root *root;
61 };
62 
63 static const struct inode_operations btrfs_dir_inode_operations;
64 static const struct inode_operations btrfs_symlink_inode_operations;
65 static const struct inode_operations btrfs_dir_ro_inode_operations;
66 static const struct inode_operations btrfs_special_inode_operations;
67 static const struct inode_operations btrfs_file_inode_operations;
68 static const struct address_space_operations btrfs_aops;
69 static const struct address_space_operations btrfs_symlink_aops;
70 static const struct file_operations btrfs_dir_file_operations;
71 static struct extent_io_ops btrfs_extent_io_ops;
72 
73 static struct kmem_cache *btrfs_inode_cachep;
74 struct kmem_cache *btrfs_trans_handle_cachep;
75 struct kmem_cache *btrfs_transaction_cachep;
76 struct kmem_cache *btrfs_path_cachep;
77 struct kmem_cache *btrfs_free_space_cachep;
78 
79 #define S_SHIFT 12
80 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
81 	[S_IFREG >> S_SHIFT]	= BTRFS_FT_REG_FILE,
82 	[S_IFDIR >> S_SHIFT]	= BTRFS_FT_DIR,
83 	[S_IFCHR >> S_SHIFT]	= BTRFS_FT_CHRDEV,
84 	[S_IFBLK >> S_SHIFT]	= BTRFS_FT_BLKDEV,
85 	[S_IFIFO >> S_SHIFT]	= BTRFS_FT_FIFO,
86 	[S_IFSOCK >> S_SHIFT]	= BTRFS_FT_SOCK,
87 	[S_IFLNK >> S_SHIFT]	= BTRFS_FT_SYMLINK,
88 };
89 
90 static int btrfs_setsize(struct inode *inode, loff_t newsize);
91 static int btrfs_truncate(struct inode *inode);
92 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
93 static noinline int cow_file_range(struct inode *inode,
94 				   struct page *locked_page,
95 				   u64 start, u64 end, int *page_started,
96 				   unsigned long *nr_written, int unlock);
97 
98 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
99 				     struct inode *inode,  struct inode *dir,
100 				     const struct qstr *qstr)
101 {
102 	int err;
103 
104 	err = btrfs_init_acl(trans, inode, dir);
105 	if (!err)
106 		err = btrfs_xattr_security_init(trans, inode, dir, qstr);
107 	return err;
108 }
109 
110 /*
111  * this does all the hard work for inserting an inline extent into
112  * the btree.  The caller should have done a btrfs_drop_extents so that
113  * no overlapping inline items exist in the btree
114  */
115 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
116 				struct btrfs_root *root, struct inode *inode,
117 				u64 start, size_t size, size_t compressed_size,
118 				int compress_type,
119 				struct page **compressed_pages)
120 {
121 	struct btrfs_key key;
122 	struct btrfs_path *path;
123 	struct extent_buffer *leaf;
124 	struct page *page = NULL;
125 	char *kaddr;
126 	unsigned long ptr;
127 	struct btrfs_file_extent_item *ei;
128 	int err = 0;
129 	int ret;
130 	size_t cur_size = size;
131 	size_t datasize;
132 	unsigned long offset;
133 
134 	if (compressed_size && compressed_pages)
135 		cur_size = compressed_size;
136 
137 	path = btrfs_alloc_path();
138 	if (!path)
139 		return -ENOMEM;
140 
141 	path->leave_spinning = 1;
142 
143 	key.objectid = btrfs_ino(inode);
144 	key.offset = start;
145 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
146 	datasize = btrfs_file_extent_calc_inline_size(cur_size);
147 
148 	inode_add_bytes(inode, size);
149 	ret = btrfs_insert_empty_item(trans, root, path, &key,
150 				      datasize);
151 	if (ret) {
152 		err = ret;
153 		goto fail;
154 	}
155 	leaf = path->nodes[0];
156 	ei = btrfs_item_ptr(leaf, path->slots[0],
157 			    struct btrfs_file_extent_item);
158 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
159 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
160 	btrfs_set_file_extent_encryption(leaf, ei, 0);
161 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
162 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
163 	ptr = btrfs_file_extent_inline_start(ei);
164 
165 	if (compress_type != BTRFS_COMPRESS_NONE) {
166 		struct page *cpage;
167 		int i = 0;
168 		while (compressed_size > 0) {
169 			cpage = compressed_pages[i];
170 			cur_size = min_t(unsigned long, compressed_size,
171 				       PAGE_CACHE_SIZE);
172 
173 			kaddr = kmap_atomic(cpage);
174 			write_extent_buffer(leaf, kaddr, ptr, cur_size);
175 			kunmap_atomic(kaddr);
176 
177 			i++;
178 			ptr += cur_size;
179 			compressed_size -= cur_size;
180 		}
181 		btrfs_set_file_extent_compression(leaf, ei,
182 						  compress_type);
183 	} else {
184 		page = find_get_page(inode->i_mapping,
185 				     start >> PAGE_CACHE_SHIFT);
186 		btrfs_set_file_extent_compression(leaf, ei, 0);
187 		kaddr = kmap_atomic(page);
188 		offset = start & (PAGE_CACHE_SIZE - 1);
189 		write_extent_buffer(leaf, kaddr + offset, ptr, size);
190 		kunmap_atomic(kaddr);
191 		page_cache_release(page);
192 	}
193 	btrfs_mark_buffer_dirty(leaf);
194 	btrfs_free_path(path);
195 
196 	/*
197 	 * we're an inline extent, so nobody can
198 	 * extend the file past i_size without locking
199 	 * a page we already have locked.
200 	 *
201 	 * We must do any isize and inode updates
202 	 * before we unlock the pages.  Otherwise we
203 	 * could end up racing with unlink.
204 	 */
205 	BTRFS_I(inode)->disk_i_size = inode->i_size;
206 	ret = btrfs_update_inode(trans, root, inode);
207 
208 	return ret;
209 fail:
210 	btrfs_free_path(path);
211 	return err;
212 }
213 
214 
215 /*
216  * conditionally insert an inline extent into the file.  This
217  * does the checks required to make sure the data is small enough
218  * to fit as an inline extent.
219  */
220 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
221 				 struct btrfs_root *root,
222 				 struct inode *inode, u64 start, u64 end,
223 				 size_t compressed_size, int compress_type,
224 				 struct page **compressed_pages)
225 {
226 	u64 isize = i_size_read(inode);
227 	u64 actual_end = min(end + 1, isize);
228 	u64 inline_len = actual_end - start;
229 	u64 aligned_end = (end + root->sectorsize - 1) &
230 			~((u64)root->sectorsize - 1);
231 	u64 data_len = inline_len;
232 	int ret;
233 
234 	if (compressed_size)
235 		data_len = compressed_size;
236 
237 	if (start > 0 ||
238 	    actual_end >= PAGE_CACHE_SIZE ||
239 	    data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
240 	    (!compressed_size &&
241 	    (actual_end & (root->sectorsize - 1)) == 0) ||
242 	    end + 1 < isize ||
243 	    data_len > root->fs_info->max_inline) {
244 		return 1;
245 	}
246 
247 	ret = btrfs_drop_extents(trans, root, inode, start, aligned_end, 1);
248 	if (ret)
249 		return ret;
250 
251 	if (isize > actual_end)
252 		inline_len = min_t(u64, isize, actual_end);
253 	ret = insert_inline_extent(trans, root, inode, start,
254 				   inline_len, compressed_size,
255 				   compress_type, compressed_pages);
256 	if (ret && ret != -ENOSPC) {
257 		btrfs_abort_transaction(trans, root, ret);
258 		return ret;
259 	} else if (ret == -ENOSPC) {
260 		return 1;
261 	}
262 
263 	btrfs_delalloc_release_metadata(inode, end + 1 - start);
264 	btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
265 	return 0;
266 }
267 
268 struct async_extent {
269 	u64 start;
270 	u64 ram_size;
271 	u64 compressed_size;
272 	struct page **pages;
273 	unsigned long nr_pages;
274 	int compress_type;
275 	struct list_head list;
276 };
277 
278 struct async_cow {
279 	struct inode *inode;
280 	struct btrfs_root *root;
281 	struct page *locked_page;
282 	u64 start;
283 	u64 end;
284 	struct list_head extents;
285 	struct btrfs_work work;
286 };
287 
288 static noinline int add_async_extent(struct async_cow *cow,
289 				     u64 start, u64 ram_size,
290 				     u64 compressed_size,
291 				     struct page **pages,
292 				     unsigned long nr_pages,
293 				     int compress_type)
294 {
295 	struct async_extent *async_extent;
296 
297 	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
298 	BUG_ON(!async_extent); /* -ENOMEM */
299 	async_extent->start = start;
300 	async_extent->ram_size = ram_size;
301 	async_extent->compressed_size = compressed_size;
302 	async_extent->pages = pages;
303 	async_extent->nr_pages = nr_pages;
304 	async_extent->compress_type = compress_type;
305 	list_add_tail(&async_extent->list, &cow->extents);
306 	return 0;
307 }
308 
309 /*
310  * we create compressed extents in two phases.  The first
311  * phase compresses a range of pages that have already been
312  * locked (both pages and state bits are locked).
313  *
314  * This is done inside an ordered work queue, and the compression
315  * is spread across many cpus.  The actual IO submission is step
316  * two, and the ordered work queue takes care of making sure that
317  * happens in the same order things were put onto the queue by
318  * writepages and friends.
319  *
320  * If this code finds it can't get good compression, it puts an
321  * entry onto the work queue to write the uncompressed bytes.  This
322  * makes sure that both compressed inodes and uncompressed inodes
323  * are written in the same order that the flusher thread sent them
324  * down.
325  */
326 static noinline int compress_file_range(struct inode *inode,
327 					struct page *locked_page,
328 					u64 start, u64 end,
329 					struct async_cow *async_cow,
330 					int *num_added)
331 {
332 	struct btrfs_root *root = BTRFS_I(inode)->root;
333 	struct btrfs_trans_handle *trans;
334 	u64 num_bytes;
335 	u64 blocksize = root->sectorsize;
336 	u64 actual_end;
337 	u64 isize = i_size_read(inode);
338 	int ret = 0;
339 	struct page **pages = NULL;
340 	unsigned long nr_pages;
341 	unsigned long nr_pages_ret = 0;
342 	unsigned long total_compressed = 0;
343 	unsigned long total_in = 0;
344 	unsigned long max_compressed = 128 * 1024;
345 	unsigned long max_uncompressed = 128 * 1024;
346 	int i;
347 	int will_compress;
348 	int compress_type = root->fs_info->compress_type;
349 
350 	/* if this is a small write inside eof, kick off a defrag */
351 	if ((end - start + 1) < 16 * 1024 &&
352 	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
353 		btrfs_add_inode_defrag(NULL, inode);
354 
355 	actual_end = min_t(u64, isize, end + 1);
356 again:
357 	will_compress = 0;
358 	nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
359 	nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
360 
361 	/*
362 	 * we don't want to send crud past the end of i_size through
363 	 * compression, that's just a waste of CPU time.  So, if the
364 	 * end of the file is before the start of our current
365 	 * requested range of bytes, we bail out to the uncompressed
366 	 * cleanup code that can deal with all of this.
367 	 *
368 	 * It isn't really the fastest way to fix things, but this is a
369 	 * very uncommon corner.
370 	 */
371 	if (actual_end <= start)
372 		goto cleanup_and_bail_uncompressed;
373 
374 	total_compressed = actual_end - start;
375 
376 	/* we want to make sure that amount of ram required to uncompress
377 	 * an extent is reasonable, so we limit the total size in ram
378 	 * of a compressed extent to 128k.  This is a crucial number
379 	 * because it also controls how easily we can spread reads across
380 	 * cpus for decompression.
381 	 *
382 	 * We also want to make sure the amount of IO required to do
383 	 * a random read is reasonably small, so we limit the size of
384 	 * a compressed extent to 128k.
385 	 */
386 	total_compressed = min(total_compressed, max_uncompressed);
387 	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
388 	num_bytes = max(blocksize,  num_bytes);
389 	total_in = 0;
390 	ret = 0;
391 
392 	/*
393 	 * we do compression for mount -o compress and when the
394 	 * inode has not been flagged as nocompress.  This flag can
395 	 * change at any time if we discover bad compression ratios.
396 	 */
397 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
398 	    (btrfs_test_opt(root, COMPRESS) ||
399 	     (BTRFS_I(inode)->force_compress) ||
400 	     (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
401 		WARN_ON(pages);
402 		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
403 		if (!pages) {
404 			/* just bail out to the uncompressed code */
405 			goto cont;
406 		}
407 
408 		if (BTRFS_I(inode)->force_compress)
409 			compress_type = BTRFS_I(inode)->force_compress;
410 
411 		ret = btrfs_compress_pages(compress_type,
412 					   inode->i_mapping, start,
413 					   total_compressed, pages,
414 					   nr_pages, &nr_pages_ret,
415 					   &total_in,
416 					   &total_compressed,
417 					   max_compressed);
418 
419 		if (!ret) {
420 			unsigned long offset = total_compressed &
421 				(PAGE_CACHE_SIZE - 1);
422 			struct page *page = pages[nr_pages_ret - 1];
423 			char *kaddr;
424 
425 			/* zero the tail end of the last page, we might be
426 			 * sending it down to disk
427 			 */
428 			if (offset) {
429 				kaddr = kmap_atomic(page);
430 				memset(kaddr + offset, 0,
431 				       PAGE_CACHE_SIZE - offset);
432 				kunmap_atomic(kaddr);
433 			}
434 			will_compress = 1;
435 		}
436 	}
437 cont:
438 	if (start == 0) {
439 		trans = btrfs_join_transaction(root);
440 		if (IS_ERR(trans)) {
441 			ret = PTR_ERR(trans);
442 			trans = NULL;
443 			goto cleanup_and_out;
444 		}
445 		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
446 
447 		/* lets try to make an inline extent */
448 		if (ret || total_in < (actual_end - start)) {
449 			/* we didn't compress the entire range, try
450 			 * to make an uncompressed inline extent.
451 			 */
452 			ret = cow_file_range_inline(trans, root, inode,
453 						    start, end, 0, 0, NULL);
454 		} else {
455 			/* try making a compressed inline extent */
456 			ret = cow_file_range_inline(trans, root, inode,
457 						    start, end,
458 						    total_compressed,
459 						    compress_type, pages);
460 		}
461 		if (ret <= 0) {
462 			/*
463 			 * inline extent creation worked or returned error,
464 			 * we don't need to create any more async work items.
465 			 * Unlock and free up our temp pages.
466 			 */
467 			extent_clear_unlock_delalloc(inode,
468 			     &BTRFS_I(inode)->io_tree,
469 			     start, end, NULL,
470 			     EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
471 			     EXTENT_CLEAR_DELALLOC |
472 			     EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
473 
474 			btrfs_end_transaction(trans, root);
475 			goto free_pages_out;
476 		}
477 		btrfs_end_transaction(trans, root);
478 	}
479 
480 	if (will_compress) {
481 		/*
482 		 * we aren't doing an inline extent round the compressed size
483 		 * up to a block size boundary so the allocator does sane
484 		 * things
485 		 */
486 		total_compressed = (total_compressed + blocksize - 1) &
487 			~(blocksize - 1);
488 
489 		/*
490 		 * one last check to make sure the compression is really a
491 		 * win, compare the page count read with the blocks on disk
492 		 */
493 		total_in = (total_in + PAGE_CACHE_SIZE - 1) &
494 			~(PAGE_CACHE_SIZE - 1);
495 		if (total_compressed >= total_in) {
496 			will_compress = 0;
497 		} else {
498 			num_bytes = total_in;
499 		}
500 	}
501 	if (!will_compress && pages) {
502 		/*
503 		 * the compression code ran but failed to make things smaller,
504 		 * free any pages it allocated and our page pointer array
505 		 */
506 		for (i = 0; i < nr_pages_ret; i++) {
507 			WARN_ON(pages[i]->mapping);
508 			page_cache_release(pages[i]);
509 		}
510 		kfree(pages);
511 		pages = NULL;
512 		total_compressed = 0;
513 		nr_pages_ret = 0;
514 
515 		/* flag the file so we don't compress in the future */
516 		if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
517 		    !(BTRFS_I(inode)->force_compress)) {
518 			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
519 		}
520 	}
521 	if (will_compress) {
522 		*num_added += 1;
523 
524 		/* the async work queues will take care of doing actual
525 		 * allocation on disk for these compressed pages,
526 		 * and will submit them to the elevator.
527 		 */
528 		add_async_extent(async_cow, start, num_bytes,
529 				 total_compressed, pages, nr_pages_ret,
530 				 compress_type);
531 
532 		if (start + num_bytes < end) {
533 			start += num_bytes;
534 			pages = NULL;
535 			cond_resched();
536 			goto again;
537 		}
538 	} else {
539 cleanup_and_bail_uncompressed:
540 		/*
541 		 * No compression, but we still need to write the pages in
542 		 * the file we've been given so far.  redirty the locked
543 		 * page if it corresponds to our extent and set things up
544 		 * for the async work queue to run cow_file_range to do
545 		 * the normal delalloc dance
546 		 */
547 		if (page_offset(locked_page) >= start &&
548 		    page_offset(locked_page) <= end) {
549 			__set_page_dirty_nobuffers(locked_page);
550 			/* unlocked later on in the async handlers */
551 		}
552 		add_async_extent(async_cow, start, end - start + 1,
553 				 0, NULL, 0, BTRFS_COMPRESS_NONE);
554 		*num_added += 1;
555 	}
556 
557 out:
558 	return ret;
559 
560 free_pages_out:
561 	for (i = 0; i < nr_pages_ret; i++) {
562 		WARN_ON(pages[i]->mapping);
563 		page_cache_release(pages[i]);
564 	}
565 	kfree(pages);
566 
567 	goto out;
568 
569 cleanup_and_out:
570 	extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
571 				     start, end, NULL,
572 				     EXTENT_CLEAR_UNLOCK_PAGE |
573 				     EXTENT_CLEAR_DIRTY |
574 				     EXTENT_CLEAR_DELALLOC |
575 				     EXTENT_SET_WRITEBACK |
576 				     EXTENT_END_WRITEBACK);
577 	if (!trans || IS_ERR(trans))
578 		btrfs_error(root->fs_info, ret, "Failed to join transaction");
579 	else
580 		btrfs_abort_transaction(trans, root, ret);
581 	goto free_pages_out;
582 }
583 
584 /*
585  * phase two of compressed writeback.  This is the ordered portion
586  * of the code, which only gets called in the order the work was
587  * queued.  We walk all the async extents created by compress_file_range
588  * and send them down to the disk.
589  */
590 static noinline int submit_compressed_extents(struct inode *inode,
591 					      struct async_cow *async_cow)
592 {
593 	struct async_extent *async_extent;
594 	u64 alloc_hint = 0;
595 	struct btrfs_trans_handle *trans;
596 	struct btrfs_key ins;
597 	struct extent_map *em;
598 	struct btrfs_root *root = BTRFS_I(inode)->root;
599 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
600 	struct extent_io_tree *io_tree;
601 	int ret = 0;
602 
603 	if (list_empty(&async_cow->extents))
604 		return 0;
605 
606 
607 	while (!list_empty(&async_cow->extents)) {
608 		async_extent = list_entry(async_cow->extents.next,
609 					  struct async_extent, list);
610 		list_del(&async_extent->list);
611 
612 		io_tree = &BTRFS_I(inode)->io_tree;
613 
614 retry:
615 		/* did the compression code fall back to uncompressed IO? */
616 		if (!async_extent->pages) {
617 			int page_started = 0;
618 			unsigned long nr_written = 0;
619 
620 			lock_extent(io_tree, async_extent->start,
621 					 async_extent->start +
622 					 async_extent->ram_size - 1);
623 
624 			/* allocate blocks */
625 			ret = cow_file_range(inode, async_cow->locked_page,
626 					     async_extent->start,
627 					     async_extent->start +
628 					     async_extent->ram_size - 1,
629 					     &page_started, &nr_written, 0);
630 
631 			/* JDM XXX */
632 
633 			/*
634 			 * if page_started, cow_file_range inserted an
635 			 * inline extent and took care of all the unlocking
636 			 * and IO for us.  Otherwise, we need to submit
637 			 * all those pages down to the drive.
638 			 */
639 			if (!page_started && !ret)
640 				extent_write_locked_range(io_tree,
641 						  inode, async_extent->start,
642 						  async_extent->start +
643 						  async_extent->ram_size - 1,
644 						  btrfs_get_extent,
645 						  WB_SYNC_ALL);
646 			kfree(async_extent);
647 			cond_resched();
648 			continue;
649 		}
650 
651 		lock_extent(io_tree, async_extent->start,
652 			    async_extent->start + async_extent->ram_size - 1);
653 
654 		trans = btrfs_join_transaction(root);
655 		if (IS_ERR(trans)) {
656 			ret = PTR_ERR(trans);
657 		} else {
658 			trans->block_rsv = &root->fs_info->delalloc_block_rsv;
659 			ret = btrfs_reserve_extent(trans, root,
660 					   async_extent->compressed_size,
661 					   async_extent->compressed_size,
662 					   0, alloc_hint, &ins, 1);
663 			if (ret && ret != -ENOSPC)
664 				btrfs_abort_transaction(trans, root, ret);
665 			btrfs_end_transaction(trans, root);
666 		}
667 
668 		if (ret) {
669 			int i;
670 			for (i = 0; i < async_extent->nr_pages; i++) {
671 				WARN_ON(async_extent->pages[i]->mapping);
672 				page_cache_release(async_extent->pages[i]);
673 			}
674 			kfree(async_extent->pages);
675 			async_extent->nr_pages = 0;
676 			async_extent->pages = NULL;
677 			unlock_extent(io_tree, async_extent->start,
678 				      async_extent->start +
679 				      async_extent->ram_size - 1);
680 			if (ret == -ENOSPC)
681 				goto retry;
682 			goto out_free; /* JDM: Requeue? */
683 		}
684 
685 		/*
686 		 * here we're doing allocation and writeback of the
687 		 * compressed pages
688 		 */
689 		btrfs_drop_extent_cache(inode, async_extent->start,
690 					async_extent->start +
691 					async_extent->ram_size - 1, 0);
692 
693 		em = alloc_extent_map();
694 		BUG_ON(!em); /* -ENOMEM */
695 		em->start = async_extent->start;
696 		em->len = async_extent->ram_size;
697 		em->orig_start = em->start;
698 
699 		em->block_start = ins.objectid;
700 		em->block_len = ins.offset;
701 		em->bdev = root->fs_info->fs_devices->latest_bdev;
702 		em->compress_type = async_extent->compress_type;
703 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
704 		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
705 
706 		while (1) {
707 			write_lock(&em_tree->lock);
708 			ret = add_extent_mapping(em_tree, em);
709 			write_unlock(&em_tree->lock);
710 			if (ret != -EEXIST) {
711 				free_extent_map(em);
712 				break;
713 			}
714 			btrfs_drop_extent_cache(inode, async_extent->start,
715 						async_extent->start +
716 						async_extent->ram_size - 1, 0);
717 		}
718 
719 		ret = btrfs_add_ordered_extent_compress(inode,
720 						async_extent->start,
721 						ins.objectid,
722 						async_extent->ram_size,
723 						ins.offset,
724 						BTRFS_ORDERED_COMPRESSED,
725 						async_extent->compress_type);
726 		BUG_ON(ret); /* -ENOMEM */
727 
728 		/*
729 		 * clear dirty, set writeback and unlock the pages.
730 		 */
731 		extent_clear_unlock_delalloc(inode,
732 				&BTRFS_I(inode)->io_tree,
733 				async_extent->start,
734 				async_extent->start +
735 				async_extent->ram_size - 1,
736 				NULL, EXTENT_CLEAR_UNLOCK_PAGE |
737 				EXTENT_CLEAR_UNLOCK |
738 				EXTENT_CLEAR_DELALLOC |
739 				EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
740 
741 		ret = btrfs_submit_compressed_write(inode,
742 				    async_extent->start,
743 				    async_extent->ram_size,
744 				    ins.objectid,
745 				    ins.offset, async_extent->pages,
746 				    async_extent->nr_pages);
747 
748 		BUG_ON(ret); /* -ENOMEM */
749 		alloc_hint = ins.objectid + ins.offset;
750 		kfree(async_extent);
751 		cond_resched();
752 	}
753 	ret = 0;
754 out:
755 	return ret;
756 out_free:
757 	kfree(async_extent);
758 	goto out;
759 }
760 
761 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
762 				      u64 num_bytes)
763 {
764 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
765 	struct extent_map *em;
766 	u64 alloc_hint = 0;
767 
768 	read_lock(&em_tree->lock);
769 	em = search_extent_mapping(em_tree, start, num_bytes);
770 	if (em) {
771 		/*
772 		 * if block start isn't an actual block number then find the
773 		 * first block in this inode and use that as a hint.  If that
774 		 * block is also bogus then just don't worry about it.
775 		 */
776 		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
777 			free_extent_map(em);
778 			em = search_extent_mapping(em_tree, 0, 0);
779 			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
780 				alloc_hint = em->block_start;
781 			if (em)
782 				free_extent_map(em);
783 		} else {
784 			alloc_hint = em->block_start;
785 			free_extent_map(em);
786 		}
787 	}
788 	read_unlock(&em_tree->lock);
789 
790 	return alloc_hint;
791 }
792 
793 /*
794  * when extent_io.c finds a delayed allocation range in the file,
795  * the call backs end up in this code.  The basic idea is to
796  * allocate extents on disk for the range, and create ordered data structs
797  * in ram to track those extents.
798  *
799  * locked_page is the page that writepage had locked already.  We use
800  * it to make sure we don't do extra locks or unlocks.
801  *
802  * *page_started is set to one if we unlock locked_page and do everything
803  * required to start IO on it.  It may be clean and already done with
804  * IO when we return.
805  */
806 static noinline int cow_file_range(struct inode *inode,
807 				   struct page *locked_page,
808 				   u64 start, u64 end, int *page_started,
809 				   unsigned long *nr_written,
810 				   int unlock)
811 {
812 	struct btrfs_root *root = BTRFS_I(inode)->root;
813 	struct btrfs_trans_handle *trans;
814 	u64 alloc_hint = 0;
815 	u64 num_bytes;
816 	unsigned long ram_size;
817 	u64 disk_num_bytes;
818 	u64 cur_alloc_size;
819 	u64 blocksize = root->sectorsize;
820 	struct btrfs_key ins;
821 	struct extent_map *em;
822 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
823 	int ret = 0;
824 
825 	BUG_ON(btrfs_is_free_space_inode(inode));
826 	trans = btrfs_join_transaction(root);
827 	if (IS_ERR(trans)) {
828 		extent_clear_unlock_delalloc(inode,
829 			     &BTRFS_I(inode)->io_tree,
830 			     start, end, locked_page,
831 			     EXTENT_CLEAR_UNLOCK_PAGE |
832 			     EXTENT_CLEAR_UNLOCK |
833 			     EXTENT_CLEAR_DELALLOC |
834 			     EXTENT_CLEAR_DIRTY |
835 			     EXTENT_SET_WRITEBACK |
836 			     EXTENT_END_WRITEBACK);
837 		return PTR_ERR(trans);
838 	}
839 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
840 
841 	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
842 	num_bytes = max(blocksize,  num_bytes);
843 	disk_num_bytes = num_bytes;
844 	ret = 0;
845 
846 	/* if this is a small write inside eof, kick off defrag */
847 	if (num_bytes < 64 * 1024 &&
848 	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
849 		btrfs_add_inode_defrag(trans, inode);
850 
851 	if (start == 0) {
852 		/* lets try to make an inline extent */
853 		ret = cow_file_range_inline(trans, root, inode,
854 					    start, end, 0, 0, NULL);
855 		if (ret == 0) {
856 			extent_clear_unlock_delalloc(inode,
857 				     &BTRFS_I(inode)->io_tree,
858 				     start, end, NULL,
859 				     EXTENT_CLEAR_UNLOCK_PAGE |
860 				     EXTENT_CLEAR_UNLOCK |
861 				     EXTENT_CLEAR_DELALLOC |
862 				     EXTENT_CLEAR_DIRTY |
863 				     EXTENT_SET_WRITEBACK |
864 				     EXTENT_END_WRITEBACK);
865 
866 			*nr_written = *nr_written +
867 			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
868 			*page_started = 1;
869 			goto out;
870 		} else if (ret < 0) {
871 			btrfs_abort_transaction(trans, root, ret);
872 			goto out_unlock;
873 		}
874 	}
875 
876 	BUG_ON(disk_num_bytes >
877 	       btrfs_super_total_bytes(root->fs_info->super_copy));
878 
879 	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
880 	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
881 
882 	while (disk_num_bytes > 0) {
883 		unsigned long op;
884 
885 		cur_alloc_size = disk_num_bytes;
886 		ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
887 					   root->sectorsize, 0, alloc_hint,
888 					   &ins, 1);
889 		if (ret < 0) {
890 			btrfs_abort_transaction(trans, root, ret);
891 			goto out_unlock;
892 		}
893 
894 		em = alloc_extent_map();
895 		BUG_ON(!em); /* -ENOMEM */
896 		em->start = start;
897 		em->orig_start = em->start;
898 		ram_size = ins.offset;
899 		em->len = ins.offset;
900 
901 		em->block_start = ins.objectid;
902 		em->block_len = ins.offset;
903 		em->bdev = root->fs_info->fs_devices->latest_bdev;
904 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
905 
906 		while (1) {
907 			write_lock(&em_tree->lock);
908 			ret = add_extent_mapping(em_tree, em);
909 			write_unlock(&em_tree->lock);
910 			if (ret != -EEXIST) {
911 				free_extent_map(em);
912 				break;
913 			}
914 			btrfs_drop_extent_cache(inode, start,
915 						start + ram_size - 1, 0);
916 		}
917 
918 		cur_alloc_size = ins.offset;
919 		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
920 					       ram_size, cur_alloc_size, 0);
921 		BUG_ON(ret); /* -ENOMEM */
922 
923 		if (root->root_key.objectid ==
924 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
925 			ret = btrfs_reloc_clone_csums(inode, start,
926 						      cur_alloc_size);
927 			if (ret) {
928 				btrfs_abort_transaction(trans, root, ret);
929 				goto out_unlock;
930 			}
931 		}
932 
933 		if (disk_num_bytes < cur_alloc_size)
934 			break;
935 
936 		/* we're not doing compressed IO, don't unlock the first
937 		 * page (which the caller expects to stay locked), don't
938 		 * clear any dirty bits and don't set any writeback bits
939 		 *
940 		 * Do set the Private2 bit so we know this page was properly
941 		 * setup for writepage
942 		 */
943 		op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
944 		op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
945 			EXTENT_SET_PRIVATE2;
946 
947 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
948 					     start, start + ram_size - 1,
949 					     locked_page, op);
950 		disk_num_bytes -= cur_alloc_size;
951 		num_bytes -= cur_alloc_size;
952 		alloc_hint = ins.objectid + ins.offset;
953 		start += cur_alloc_size;
954 	}
955 	ret = 0;
956 out:
957 	btrfs_end_transaction(trans, root);
958 
959 	return ret;
960 out_unlock:
961 	extent_clear_unlock_delalloc(inode,
962 		     &BTRFS_I(inode)->io_tree,
963 		     start, end, locked_page,
964 		     EXTENT_CLEAR_UNLOCK_PAGE |
965 		     EXTENT_CLEAR_UNLOCK |
966 		     EXTENT_CLEAR_DELALLOC |
967 		     EXTENT_CLEAR_DIRTY |
968 		     EXTENT_SET_WRITEBACK |
969 		     EXTENT_END_WRITEBACK);
970 
971 	goto out;
972 }
973 
974 /*
975  * work queue call back to started compression on a file and pages
976  */
977 static noinline void async_cow_start(struct btrfs_work *work)
978 {
979 	struct async_cow *async_cow;
980 	int num_added = 0;
981 	async_cow = container_of(work, struct async_cow, work);
982 
983 	compress_file_range(async_cow->inode, async_cow->locked_page,
984 			    async_cow->start, async_cow->end, async_cow,
985 			    &num_added);
986 	if (num_added == 0) {
987 		btrfs_add_delayed_iput(async_cow->inode);
988 		async_cow->inode = NULL;
989 	}
990 }
991 
992 /*
993  * work queue call back to submit previously compressed pages
994  */
995 static noinline void async_cow_submit(struct btrfs_work *work)
996 {
997 	struct async_cow *async_cow;
998 	struct btrfs_root *root;
999 	unsigned long nr_pages;
1000 
1001 	async_cow = container_of(work, struct async_cow, work);
1002 
1003 	root = async_cow->root;
1004 	nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
1005 		PAGE_CACHE_SHIFT;
1006 
1007 	if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
1008 	    5 * 1024 * 1024 &&
1009 	    waitqueue_active(&root->fs_info->async_submit_wait))
1010 		wake_up(&root->fs_info->async_submit_wait);
1011 
1012 	if (async_cow->inode)
1013 		submit_compressed_extents(async_cow->inode, async_cow);
1014 }
1015 
1016 static noinline void async_cow_free(struct btrfs_work *work)
1017 {
1018 	struct async_cow *async_cow;
1019 	async_cow = container_of(work, struct async_cow, work);
1020 	if (async_cow->inode)
1021 		btrfs_add_delayed_iput(async_cow->inode);
1022 	kfree(async_cow);
1023 }
1024 
1025 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1026 				u64 start, u64 end, int *page_started,
1027 				unsigned long *nr_written)
1028 {
1029 	struct async_cow *async_cow;
1030 	struct btrfs_root *root = BTRFS_I(inode)->root;
1031 	unsigned long nr_pages;
1032 	u64 cur_end;
1033 	int limit = 10 * 1024 * 1024;
1034 
1035 	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1036 			 1, 0, NULL, GFP_NOFS);
1037 	while (start < end) {
1038 		async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1039 		BUG_ON(!async_cow); /* -ENOMEM */
1040 		async_cow->inode = igrab(inode);
1041 		async_cow->root = root;
1042 		async_cow->locked_page = locked_page;
1043 		async_cow->start = start;
1044 
1045 		if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
1046 			cur_end = end;
1047 		else
1048 			cur_end = min(end, start + 512 * 1024 - 1);
1049 
1050 		async_cow->end = cur_end;
1051 		INIT_LIST_HEAD(&async_cow->extents);
1052 
1053 		async_cow->work.func = async_cow_start;
1054 		async_cow->work.ordered_func = async_cow_submit;
1055 		async_cow->work.ordered_free = async_cow_free;
1056 		async_cow->work.flags = 0;
1057 
1058 		nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
1059 			PAGE_CACHE_SHIFT;
1060 		atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1061 
1062 		btrfs_queue_worker(&root->fs_info->delalloc_workers,
1063 				   &async_cow->work);
1064 
1065 		if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
1066 			wait_event(root->fs_info->async_submit_wait,
1067 			   (atomic_read(&root->fs_info->async_delalloc_pages) <
1068 			    limit));
1069 		}
1070 
1071 		while (atomic_read(&root->fs_info->async_submit_draining) &&
1072 		      atomic_read(&root->fs_info->async_delalloc_pages)) {
1073 			wait_event(root->fs_info->async_submit_wait,
1074 			  (atomic_read(&root->fs_info->async_delalloc_pages) ==
1075 			   0));
1076 		}
1077 
1078 		*nr_written += nr_pages;
1079 		start = cur_end + 1;
1080 	}
1081 	*page_started = 1;
1082 	return 0;
1083 }
1084 
1085 static noinline int csum_exist_in_range(struct btrfs_root *root,
1086 					u64 bytenr, u64 num_bytes)
1087 {
1088 	int ret;
1089 	struct btrfs_ordered_sum *sums;
1090 	LIST_HEAD(list);
1091 
1092 	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
1093 				       bytenr + num_bytes - 1, &list, 0);
1094 	if (ret == 0 && list_empty(&list))
1095 		return 0;
1096 
1097 	while (!list_empty(&list)) {
1098 		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1099 		list_del(&sums->list);
1100 		kfree(sums);
1101 	}
1102 	return 1;
1103 }
1104 
1105 /*
1106  * when nowcow writeback call back.  This checks for snapshots or COW copies
1107  * of the extents that exist in the file, and COWs the file as required.
1108  *
1109  * If no cow copies or snapshots exist, we write directly to the existing
1110  * blocks on disk
1111  */
1112 static noinline int run_delalloc_nocow(struct inode *inode,
1113 				       struct page *locked_page,
1114 			      u64 start, u64 end, int *page_started, int force,
1115 			      unsigned long *nr_written)
1116 {
1117 	struct btrfs_root *root = BTRFS_I(inode)->root;
1118 	struct btrfs_trans_handle *trans;
1119 	struct extent_buffer *leaf;
1120 	struct btrfs_path *path;
1121 	struct btrfs_file_extent_item *fi;
1122 	struct btrfs_key found_key;
1123 	u64 cow_start;
1124 	u64 cur_offset;
1125 	u64 extent_end;
1126 	u64 extent_offset;
1127 	u64 disk_bytenr;
1128 	u64 num_bytes;
1129 	int extent_type;
1130 	int ret, err;
1131 	int type;
1132 	int nocow;
1133 	int check_prev = 1;
1134 	bool nolock;
1135 	u64 ino = btrfs_ino(inode);
1136 
1137 	path = btrfs_alloc_path();
1138 	if (!path) {
1139 		extent_clear_unlock_delalloc(inode,
1140 			     &BTRFS_I(inode)->io_tree,
1141 			     start, end, locked_page,
1142 			     EXTENT_CLEAR_UNLOCK_PAGE |
1143 			     EXTENT_CLEAR_UNLOCK |
1144 			     EXTENT_CLEAR_DELALLOC |
1145 			     EXTENT_CLEAR_DIRTY |
1146 			     EXTENT_SET_WRITEBACK |
1147 			     EXTENT_END_WRITEBACK);
1148 		return -ENOMEM;
1149 	}
1150 
1151 	nolock = btrfs_is_free_space_inode(inode);
1152 
1153 	if (nolock)
1154 		trans = btrfs_join_transaction_nolock(root);
1155 	else
1156 		trans = btrfs_join_transaction(root);
1157 
1158 	if (IS_ERR(trans)) {
1159 		extent_clear_unlock_delalloc(inode,
1160 			     &BTRFS_I(inode)->io_tree,
1161 			     start, end, locked_page,
1162 			     EXTENT_CLEAR_UNLOCK_PAGE |
1163 			     EXTENT_CLEAR_UNLOCK |
1164 			     EXTENT_CLEAR_DELALLOC |
1165 			     EXTENT_CLEAR_DIRTY |
1166 			     EXTENT_SET_WRITEBACK |
1167 			     EXTENT_END_WRITEBACK);
1168 		btrfs_free_path(path);
1169 		return PTR_ERR(trans);
1170 	}
1171 
1172 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1173 
1174 	cow_start = (u64)-1;
1175 	cur_offset = start;
1176 	while (1) {
1177 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
1178 					       cur_offset, 0);
1179 		if (ret < 0) {
1180 			btrfs_abort_transaction(trans, root, ret);
1181 			goto error;
1182 		}
1183 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
1184 			leaf = path->nodes[0];
1185 			btrfs_item_key_to_cpu(leaf, &found_key,
1186 					      path->slots[0] - 1);
1187 			if (found_key.objectid == ino &&
1188 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
1189 				path->slots[0]--;
1190 		}
1191 		check_prev = 0;
1192 next_slot:
1193 		leaf = path->nodes[0];
1194 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1195 			ret = btrfs_next_leaf(root, path);
1196 			if (ret < 0) {
1197 				btrfs_abort_transaction(trans, root, ret);
1198 				goto error;
1199 			}
1200 			if (ret > 0)
1201 				break;
1202 			leaf = path->nodes[0];
1203 		}
1204 
1205 		nocow = 0;
1206 		disk_bytenr = 0;
1207 		num_bytes = 0;
1208 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1209 
1210 		if (found_key.objectid > ino ||
1211 		    found_key.type > BTRFS_EXTENT_DATA_KEY ||
1212 		    found_key.offset > end)
1213 			break;
1214 
1215 		if (found_key.offset > cur_offset) {
1216 			extent_end = found_key.offset;
1217 			extent_type = 0;
1218 			goto out_check;
1219 		}
1220 
1221 		fi = btrfs_item_ptr(leaf, path->slots[0],
1222 				    struct btrfs_file_extent_item);
1223 		extent_type = btrfs_file_extent_type(leaf, fi);
1224 
1225 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
1226 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1227 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1228 			extent_offset = btrfs_file_extent_offset(leaf, fi);
1229 			extent_end = found_key.offset +
1230 				btrfs_file_extent_num_bytes(leaf, fi);
1231 			if (extent_end <= start) {
1232 				path->slots[0]++;
1233 				goto next_slot;
1234 			}
1235 			if (disk_bytenr == 0)
1236 				goto out_check;
1237 			if (btrfs_file_extent_compression(leaf, fi) ||
1238 			    btrfs_file_extent_encryption(leaf, fi) ||
1239 			    btrfs_file_extent_other_encoding(leaf, fi))
1240 				goto out_check;
1241 			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1242 				goto out_check;
1243 			if (btrfs_extent_readonly(root, disk_bytenr))
1244 				goto out_check;
1245 			if (btrfs_cross_ref_exist(trans, root, ino,
1246 						  found_key.offset -
1247 						  extent_offset, disk_bytenr))
1248 				goto out_check;
1249 			disk_bytenr += extent_offset;
1250 			disk_bytenr += cur_offset - found_key.offset;
1251 			num_bytes = min(end + 1, extent_end) - cur_offset;
1252 			/*
1253 			 * force cow if csum exists in the range.
1254 			 * this ensure that csum for a given extent are
1255 			 * either valid or do not exist.
1256 			 */
1257 			if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1258 				goto out_check;
1259 			nocow = 1;
1260 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1261 			extent_end = found_key.offset +
1262 				btrfs_file_extent_inline_len(leaf, fi);
1263 			extent_end = ALIGN(extent_end, root->sectorsize);
1264 		} else {
1265 			BUG_ON(1);
1266 		}
1267 out_check:
1268 		if (extent_end <= start) {
1269 			path->slots[0]++;
1270 			goto next_slot;
1271 		}
1272 		if (!nocow) {
1273 			if (cow_start == (u64)-1)
1274 				cow_start = cur_offset;
1275 			cur_offset = extent_end;
1276 			if (cur_offset > end)
1277 				break;
1278 			path->slots[0]++;
1279 			goto next_slot;
1280 		}
1281 
1282 		btrfs_release_path(path);
1283 		if (cow_start != (u64)-1) {
1284 			ret = cow_file_range(inode, locked_page, cow_start,
1285 					found_key.offset - 1, page_started,
1286 					nr_written, 1);
1287 			if (ret) {
1288 				btrfs_abort_transaction(trans, root, ret);
1289 				goto error;
1290 			}
1291 			cow_start = (u64)-1;
1292 		}
1293 
1294 		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1295 			struct extent_map *em;
1296 			struct extent_map_tree *em_tree;
1297 			em_tree = &BTRFS_I(inode)->extent_tree;
1298 			em = alloc_extent_map();
1299 			BUG_ON(!em); /* -ENOMEM */
1300 			em->start = cur_offset;
1301 			em->orig_start = em->start;
1302 			em->len = num_bytes;
1303 			em->block_len = num_bytes;
1304 			em->block_start = disk_bytenr;
1305 			em->bdev = root->fs_info->fs_devices->latest_bdev;
1306 			set_bit(EXTENT_FLAG_PINNED, &em->flags);
1307 			set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
1308 			while (1) {
1309 				write_lock(&em_tree->lock);
1310 				ret = add_extent_mapping(em_tree, em);
1311 				write_unlock(&em_tree->lock);
1312 				if (ret != -EEXIST) {
1313 					free_extent_map(em);
1314 					break;
1315 				}
1316 				btrfs_drop_extent_cache(inode, em->start,
1317 						em->start + em->len - 1, 0);
1318 			}
1319 			type = BTRFS_ORDERED_PREALLOC;
1320 		} else {
1321 			type = BTRFS_ORDERED_NOCOW;
1322 		}
1323 
1324 		ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1325 					       num_bytes, num_bytes, type);
1326 		BUG_ON(ret); /* -ENOMEM */
1327 
1328 		if (root->root_key.objectid ==
1329 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
1330 			ret = btrfs_reloc_clone_csums(inode, cur_offset,
1331 						      num_bytes);
1332 			if (ret) {
1333 				btrfs_abort_transaction(trans, root, ret);
1334 				goto error;
1335 			}
1336 		}
1337 
1338 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1339 				cur_offset, cur_offset + num_bytes - 1,
1340 				locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1341 				EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1342 				EXTENT_SET_PRIVATE2);
1343 		cur_offset = extent_end;
1344 		if (cur_offset > end)
1345 			break;
1346 	}
1347 	btrfs_release_path(path);
1348 
1349 	if (cur_offset <= end && cow_start == (u64)-1) {
1350 		cow_start = cur_offset;
1351 		cur_offset = end;
1352 	}
1353 
1354 	if (cow_start != (u64)-1) {
1355 		ret = cow_file_range(inode, locked_page, cow_start, end,
1356 				     page_started, nr_written, 1);
1357 		if (ret) {
1358 			btrfs_abort_transaction(trans, root, ret);
1359 			goto error;
1360 		}
1361 	}
1362 
1363 error:
1364 	err = btrfs_end_transaction(trans, root);
1365 	if (!ret)
1366 		ret = err;
1367 
1368 	if (ret && cur_offset < end)
1369 		extent_clear_unlock_delalloc(inode,
1370 			     &BTRFS_I(inode)->io_tree,
1371 			     cur_offset, end, locked_page,
1372 			     EXTENT_CLEAR_UNLOCK_PAGE |
1373 			     EXTENT_CLEAR_UNLOCK |
1374 			     EXTENT_CLEAR_DELALLOC |
1375 			     EXTENT_CLEAR_DIRTY |
1376 			     EXTENT_SET_WRITEBACK |
1377 			     EXTENT_END_WRITEBACK);
1378 
1379 	btrfs_free_path(path);
1380 	return ret;
1381 }
1382 
1383 /*
1384  * extent_io.c call back to do delayed allocation processing
1385  */
1386 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1387 			      u64 start, u64 end, int *page_started,
1388 			      unsigned long *nr_written)
1389 {
1390 	int ret;
1391 	struct btrfs_root *root = BTRFS_I(inode)->root;
1392 
1393 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) {
1394 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1395 					 page_started, 1, nr_written);
1396 	} else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) {
1397 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1398 					 page_started, 0, nr_written);
1399 	} else if (!btrfs_test_opt(root, COMPRESS) &&
1400 		   !(BTRFS_I(inode)->force_compress) &&
1401 		   !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) {
1402 		ret = cow_file_range(inode, locked_page, start, end,
1403 				      page_started, nr_written, 1);
1404 	} else {
1405 		set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1406 			&BTRFS_I(inode)->runtime_flags);
1407 		ret = cow_file_range_async(inode, locked_page, start, end,
1408 					   page_started, nr_written);
1409 	}
1410 	return ret;
1411 }
1412 
1413 static void btrfs_split_extent_hook(struct inode *inode,
1414 				    struct extent_state *orig, u64 split)
1415 {
1416 	/* not delalloc, ignore it */
1417 	if (!(orig->state & EXTENT_DELALLOC))
1418 		return;
1419 
1420 	spin_lock(&BTRFS_I(inode)->lock);
1421 	BTRFS_I(inode)->outstanding_extents++;
1422 	spin_unlock(&BTRFS_I(inode)->lock);
1423 }
1424 
1425 /*
1426  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1427  * extents so we can keep track of new extents that are just merged onto old
1428  * extents, such as when we are doing sequential writes, so we can properly
1429  * account for the metadata space we'll need.
1430  */
1431 static void btrfs_merge_extent_hook(struct inode *inode,
1432 				    struct extent_state *new,
1433 				    struct extent_state *other)
1434 {
1435 	/* not delalloc, ignore it */
1436 	if (!(other->state & EXTENT_DELALLOC))
1437 		return;
1438 
1439 	spin_lock(&BTRFS_I(inode)->lock);
1440 	BTRFS_I(inode)->outstanding_extents--;
1441 	spin_unlock(&BTRFS_I(inode)->lock);
1442 }
1443 
1444 /*
1445  * extent_io.c set_bit_hook, used to track delayed allocation
1446  * bytes in this file, and to maintain the list of inodes that
1447  * have pending delalloc work to be done.
1448  */
1449 static void btrfs_set_bit_hook(struct inode *inode,
1450 			       struct extent_state *state, int *bits)
1451 {
1452 
1453 	/*
1454 	 * set_bit and clear bit hooks normally require _irqsave/restore
1455 	 * but in this case, we are only testing for the DELALLOC
1456 	 * bit, which is only set or cleared with irqs on
1457 	 */
1458 	if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1459 		struct btrfs_root *root = BTRFS_I(inode)->root;
1460 		u64 len = state->end + 1 - state->start;
1461 		bool do_list = !btrfs_is_free_space_inode(inode);
1462 
1463 		if (*bits & EXTENT_FIRST_DELALLOC) {
1464 			*bits &= ~EXTENT_FIRST_DELALLOC;
1465 		} else {
1466 			spin_lock(&BTRFS_I(inode)->lock);
1467 			BTRFS_I(inode)->outstanding_extents++;
1468 			spin_unlock(&BTRFS_I(inode)->lock);
1469 		}
1470 
1471 		spin_lock(&root->fs_info->delalloc_lock);
1472 		BTRFS_I(inode)->delalloc_bytes += len;
1473 		root->fs_info->delalloc_bytes += len;
1474 		if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1475 			list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1476 				      &root->fs_info->delalloc_inodes);
1477 		}
1478 		spin_unlock(&root->fs_info->delalloc_lock);
1479 	}
1480 }
1481 
1482 /*
1483  * extent_io.c clear_bit_hook, see set_bit_hook for why
1484  */
1485 static void btrfs_clear_bit_hook(struct inode *inode,
1486 				 struct extent_state *state, int *bits)
1487 {
1488 	/*
1489 	 * set_bit and clear bit hooks normally require _irqsave/restore
1490 	 * but in this case, we are only testing for the DELALLOC
1491 	 * bit, which is only set or cleared with irqs on
1492 	 */
1493 	if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1494 		struct btrfs_root *root = BTRFS_I(inode)->root;
1495 		u64 len = state->end + 1 - state->start;
1496 		bool do_list = !btrfs_is_free_space_inode(inode);
1497 
1498 		if (*bits & EXTENT_FIRST_DELALLOC) {
1499 			*bits &= ~EXTENT_FIRST_DELALLOC;
1500 		} else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1501 			spin_lock(&BTRFS_I(inode)->lock);
1502 			BTRFS_I(inode)->outstanding_extents--;
1503 			spin_unlock(&BTRFS_I(inode)->lock);
1504 		}
1505 
1506 		if (*bits & EXTENT_DO_ACCOUNTING)
1507 			btrfs_delalloc_release_metadata(inode, len);
1508 
1509 		if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1510 		    && do_list)
1511 			btrfs_free_reserved_data_space(inode, len);
1512 
1513 		spin_lock(&root->fs_info->delalloc_lock);
1514 		root->fs_info->delalloc_bytes -= len;
1515 		BTRFS_I(inode)->delalloc_bytes -= len;
1516 
1517 		if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1518 		    !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1519 			list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1520 		}
1521 		spin_unlock(&root->fs_info->delalloc_lock);
1522 	}
1523 }
1524 
1525 /*
1526  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1527  * we don't create bios that span stripes or chunks
1528  */
1529 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1530 			 size_t size, struct bio *bio,
1531 			 unsigned long bio_flags)
1532 {
1533 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1534 	struct btrfs_mapping_tree *map_tree;
1535 	u64 logical = (u64)bio->bi_sector << 9;
1536 	u64 length = 0;
1537 	u64 map_length;
1538 	int ret;
1539 
1540 	if (bio_flags & EXTENT_BIO_COMPRESSED)
1541 		return 0;
1542 
1543 	length = bio->bi_size;
1544 	map_tree = &root->fs_info->mapping_tree;
1545 	map_length = length;
1546 	ret = btrfs_map_block(map_tree, READ, logical,
1547 			      &map_length, NULL, 0);
1548 	/* Will always return 0 or 1 with map_multi == NULL */
1549 	BUG_ON(ret < 0);
1550 	if (map_length < length + size)
1551 		return 1;
1552 	return 0;
1553 }
1554 
1555 /*
1556  * in order to insert checksums into the metadata in large chunks,
1557  * we wait until bio submission time.   All the pages in the bio are
1558  * checksummed and sums are attached onto the ordered extent record.
1559  *
1560  * At IO completion time the cums attached on the ordered extent record
1561  * are inserted into the btree
1562  */
1563 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1564 				    struct bio *bio, int mirror_num,
1565 				    unsigned long bio_flags,
1566 				    u64 bio_offset)
1567 {
1568 	struct btrfs_root *root = BTRFS_I(inode)->root;
1569 	int ret = 0;
1570 
1571 	ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1572 	BUG_ON(ret); /* -ENOMEM */
1573 	return 0;
1574 }
1575 
1576 /*
1577  * in order to insert checksums into the metadata in large chunks,
1578  * we wait until bio submission time.   All the pages in the bio are
1579  * checksummed and sums are attached onto the ordered extent record.
1580  *
1581  * At IO completion time the cums attached on the ordered extent record
1582  * are inserted into the btree
1583  */
1584 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1585 			  int mirror_num, unsigned long bio_flags,
1586 			  u64 bio_offset)
1587 {
1588 	struct btrfs_root *root = BTRFS_I(inode)->root;
1589 	return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1590 }
1591 
1592 /*
1593  * extent_io.c submission hook. This does the right thing for csum calculation
1594  * on write, or reading the csums from the tree before a read
1595  */
1596 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1597 			  int mirror_num, unsigned long bio_flags,
1598 			  u64 bio_offset)
1599 {
1600 	struct btrfs_root *root = BTRFS_I(inode)->root;
1601 	int ret = 0;
1602 	int skip_sum;
1603 	int metadata = 0;
1604 
1605 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1606 
1607 	if (btrfs_is_free_space_inode(inode))
1608 		metadata = 2;
1609 
1610 	if (!(rw & REQ_WRITE)) {
1611 		ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1612 		if (ret)
1613 			return ret;
1614 
1615 		if (bio_flags & EXTENT_BIO_COMPRESSED) {
1616 			return btrfs_submit_compressed_read(inode, bio,
1617 						    mirror_num, bio_flags);
1618 		} else if (!skip_sum) {
1619 			ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1620 			if (ret)
1621 				return ret;
1622 		}
1623 		goto mapit;
1624 	} else if (!skip_sum) {
1625 		/* csum items have already been cloned */
1626 		if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1627 			goto mapit;
1628 		/* we're doing a write, do the async checksumming */
1629 		return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1630 				   inode, rw, bio, mirror_num,
1631 				   bio_flags, bio_offset,
1632 				   __btrfs_submit_bio_start,
1633 				   __btrfs_submit_bio_done);
1634 	}
1635 
1636 mapit:
1637 	return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1638 }
1639 
1640 /*
1641  * given a list of ordered sums record them in the inode.  This happens
1642  * at IO completion time based on sums calculated at bio submission time.
1643  */
1644 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1645 			     struct inode *inode, u64 file_offset,
1646 			     struct list_head *list)
1647 {
1648 	struct btrfs_ordered_sum *sum;
1649 
1650 	list_for_each_entry(sum, list, list) {
1651 		btrfs_csum_file_blocks(trans,
1652 		       BTRFS_I(inode)->root->fs_info->csum_root, sum);
1653 	}
1654 	return 0;
1655 }
1656 
1657 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1658 			      struct extent_state **cached_state)
1659 {
1660 	if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1661 		WARN_ON(1);
1662 	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1663 				   cached_state, GFP_NOFS);
1664 }
1665 
1666 /* see btrfs_writepage_start_hook for details on why this is required */
1667 struct btrfs_writepage_fixup {
1668 	struct page *page;
1669 	struct btrfs_work work;
1670 };
1671 
1672 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1673 {
1674 	struct btrfs_writepage_fixup *fixup;
1675 	struct btrfs_ordered_extent *ordered;
1676 	struct extent_state *cached_state = NULL;
1677 	struct page *page;
1678 	struct inode *inode;
1679 	u64 page_start;
1680 	u64 page_end;
1681 	int ret;
1682 
1683 	fixup = container_of(work, struct btrfs_writepage_fixup, work);
1684 	page = fixup->page;
1685 again:
1686 	lock_page(page);
1687 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1688 		ClearPageChecked(page);
1689 		goto out_page;
1690 	}
1691 
1692 	inode = page->mapping->host;
1693 	page_start = page_offset(page);
1694 	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1695 
1696 	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1697 			 &cached_state);
1698 
1699 	/* already ordered? We're done */
1700 	if (PagePrivate2(page))
1701 		goto out;
1702 
1703 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
1704 	if (ordered) {
1705 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1706 				     page_end, &cached_state, GFP_NOFS);
1707 		unlock_page(page);
1708 		btrfs_start_ordered_extent(inode, ordered, 1);
1709 		btrfs_put_ordered_extent(ordered);
1710 		goto again;
1711 	}
1712 
1713 	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
1714 	if (ret) {
1715 		mapping_set_error(page->mapping, ret);
1716 		end_extent_writepage(page, ret, page_start, page_end);
1717 		ClearPageChecked(page);
1718 		goto out;
1719 	 }
1720 
1721 	btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1722 	ClearPageChecked(page);
1723 	set_page_dirty(page);
1724 out:
1725 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1726 			     &cached_state, GFP_NOFS);
1727 out_page:
1728 	unlock_page(page);
1729 	page_cache_release(page);
1730 	kfree(fixup);
1731 }
1732 
1733 /*
1734  * There are a few paths in the higher layers of the kernel that directly
1735  * set the page dirty bit without asking the filesystem if it is a
1736  * good idea.  This causes problems because we want to make sure COW
1737  * properly happens and the data=ordered rules are followed.
1738  *
1739  * In our case any range that doesn't have the ORDERED bit set
1740  * hasn't been properly setup for IO.  We kick off an async process
1741  * to fix it up.  The async helper will wait for ordered extents, set
1742  * the delalloc bit and make it safe to write the page.
1743  */
1744 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1745 {
1746 	struct inode *inode = page->mapping->host;
1747 	struct btrfs_writepage_fixup *fixup;
1748 	struct btrfs_root *root = BTRFS_I(inode)->root;
1749 
1750 	/* this page is properly in the ordered list */
1751 	if (TestClearPagePrivate2(page))
1752 		return 0;
1753 
1754 	if (PageChecked(page))
1755 		return -EAGAIN;
1756 
1757 	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1758 	if (!fixup)
1759 		return -EAGAIN;
1760 
1761 	SetPageChecked(page);
1762 	page_cache_get(page);
1763 	fixup->work.func = btrfs_writepage_fixup_worker;
1764 	fixup->page = page;
1765 	btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1766 	return -EBUSY;
1767 }
1768 
1769 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1770 				       struct inode *inode, u64 file_pos,
1771 				       u64 disk_bytenr, u64 disk_num_bytes,
1772 				       u64 num_bytes, u64 ram_bytes,
1773 				       u8 compression, u8 encryption,
1774 				       u16 other_encoding, int extent_type)
1775 {
1776 	struct btrfs_root *root = BTRFS_I(inode)->root;
1777 	struct btrfs_file_extent_item *fi;
1778 	struct btrfs_path *path;
1779 	struct extent_buffer *leaf;
1780 	struct btrfs_key ins;
1781 	int ret;
1782 
1783 	path = btrfs_alloc_path();
1784 	if (!path)
1785 		return -ENOMEM;
1786 
1787 	path->leave_spinning = 1;
1788 
1789 	/*
1790 	 * we may be replacing one extent in the tree with another.
1791 	 * The new extent is pinned in the extent map, and we don't want
1792 	 * to drop it from the cache until it is completely in the btree.
1793 	 *
1794 	 * So, tell btrfs_drop_extents to leave this extent in the cache.
1795 	 * the caller is expected to unpin it and allow it to be merged
1796 	 * with the others.
1797 	 */
1798 	ret = btrfs_drop_extents(trans, root, inode, file_pos,
1799 				 file_pos + num_bytes, 0);
1800 	if (ret)
1801 		goto out;
1802 
1803 	ins.objectid = btrfs_ino(inode);
1804 	ins.offset = file_pos;
1805 	ins.type = BTRFS_EXTENT_DATA_KEY;
1806 	ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1807 	if (ret)
1808 		goto out;
1809 	leaf = path->nodes[0];
1810 	fi = btrfs_item_ptr(leaf, path->slots[0],
1811 			    struct btrfs_file_extent_item);
1812 	btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1813 	btrfs_set_file_extent_type(leaf, fi, extent_type);
1814 	btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1815 	btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1816 	btrfs_set_file_extent_offset(leaf, fi, 0);
1817 	btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1818 	btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1819 	btrfs_set_file_extent_compression(leaf, fi, compression);
1820 	btrfs_set_file_extent_encryption(leaf, fi, encryption);
1821 	btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1822 
1823 	btrfs_mark_buffer_dirty(leaf);
1824 	btrfs_release_path(path);
1825 
1826 	inode_add_bytes(inode, num_bytes);
1827 
1828 	ins.objectid = disk_bytenr;
1829 	ins.offset = disk_num_bytes;
1830 	ins.type = BTRFS_EXTENT_ITEM_KEY;
1831 	ret = btrfs_alloc_reserved_file_extent(trans, root,
1832 					root->root_key.objectid,
1833 					btrfs_ino(inode), file_pos, &ins);
1834 out:
1835 	btrfs_free_path(path);
1836 
1837 	return ret;
1838 }
1839 
1840 /*
1841  * helper function for btrfs_finish_ordered_io, this
1842  * just reads in some of the csum leaves to prime them into ram
1843  * before we start the transaction.  It limits the amount of btree
1844  * reads required while inside the transaction.
1845  */
1846 /* as ordered data IO finishes, this gets called so we can finish
1847  * an ordered extent if the range of bytes in the file it covers are
1848  * fully written.
1849  */
1850 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
1851 {
1852 	struct inode *inode = ordered_extent->inode;
1853 	struct btrfs_root *root = BTRFS_I(inode)->root;
1854 	struct btrfs_trans_handle *trans = NULL;
1855 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1856 	struct extent_state *cached_state = NULL;
1857 	int compress_type = 0;
1858 	int ret;
1859 	bool nolock;
1860 
1861 	nolock = btrfs_is_free_space_inode(inode);
1862 
1863 	if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
1864 		ret = -EIO;
1865 		goto out;
1866 	}
1867 
1868 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1869 		BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
1870 		ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1871 		if (!ret) {
1872 			if (nolock)
1873 				trans = btrfs_join_transaction_nolock(root);
1874 			else
1875 				trans = btrfs_join_transaction(root);
1876 			if (IS_ERR(trans)) {
1877 				ret = PTR_ERR(trans);
1878 				trans = NULL;
1879 				goto out;
1880 			}
1881 			trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1882 			ret = btrfs_update_inode_fallback(trans, root, inode);
1883 			if (ret) /* -ENOMEM or corruption */
1884 				btrfs_abort_transaction(trans, root, ret);
1885 		}
1886 		goto out;
1887 	}
1888 
1889 	lock_extent_bits(io_tree, ordered_extent->file_offset,
1890 			 ordered_extent->file_offset + ordered_extent->len - 1,
1891 			 0, &cached_state);
1892 
1893 	if (nolock)
1894 		trans = btrfs_join_transaction_nolock(root);
1895 	else
1896 		trans = btrfs_join_transaction(root);
1897 	if (IS_ERR(trans)) {
1898 		ret = PTR_ERR(trans);
1899 		trans = NULL;
1900 		goto out_unlock;
1901 	}
1902 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1903 
1904 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1905 		compress_type = ordered_extent->compress_type;
1906 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1907 		BUG_ON(compress_type);
1908 		ret = btrfs_mark_extent_written(trans, inode,
1909 						ordered_extent->file_offset,
1910 						ordered_extent->file_offset +
1911 						ordered_extent->len);
1912 	} else {
1913 		BUG_ON(root == root->fs_info->tree_root);
1914 		ret = insert_reserved_file_extent(trans, inode,
1915 						ordered_extent->file_offset,
1916 						ordered_extent->start,
1917 						ordered_extent->disk_len,
1918 						ordered_extent->len,
1919 						ordered_extent->len,
1920 						compress_type, 0, 0,
1921 						BTRFS_FILE_EXTENT_REG);
1922 	}
1923 	unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1924 			   ordered_extent->file_offset, ordered_extent->len,
1925 			   trans->transid);
1926 	if (ret < 0) {
1927 		btrfs_abort_transaction(trans, root, ret);
1928 		goto out_unlock;
1929 	}
1930 
1931 	add_pending_csums(trans, inode, ordered_extent->file_offset,
1932 			  &ordered_extent->list);
1933 
1934 	ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1935 	if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1936 		ret = btrfs_update_inode_fallback(trans, root, inode);
1937 		if (ret) { /* -ENOMEM or corruption */
1938 			btrfs_abort_transaction(trans, root, ret);
1939 			goto out_unlock;
1940 		}
1941 	} else {
1942 		btrfs_set_inode_last_trans(trans, inode);
1943 	}
1944 	ret = 0;
1945 out_unlock:
1946 	unlock_extent_cached(io_tree, ordered_extent->file_offset,
1947 			     ordered_extent->file_offset +
1948 			     ordered_extent->len - 1, &cached_state, GFP_NOFS);
1949 out:
1950 	if (root != root->fs_info->tree_root)
1951 		btrfs_delalloc_release_metadata(inode, ordered_extent->len);
1952 	if (trans)
1953 		btrfs_end_transaction(trans, root);
1954 
1955 	if (ret)
1956 		clear_extent_uptodate(io_tree, ordered_extent->file_offset,
1957 				      ordered_extent->file_offset +
1958 				      ordered_extent->len - 1, NULL, GFP_NOFS);
1959 
1960 	/*
1961 	 * This needs to be done to make sure anybody waiting knows we are done
1962 	 * updating everything for this ordered extent.
1963 	 */
1964 	btrfs_remove_ordered_extent(inode, ordered_extent);
1965 
1966 	/* once for us */
1967 	btrfs_put_ordered_extent(ordered_extent);
1968 	/* once for the tree */
1969 	btrfs_put_ordered_extent(ordered_extent);
1970 
1971 	return ret;
1972 }
1973 
1974 static void finish_ordered_fn(struct btrfs_work *work)
1975 {
1976 	struct btrfs_ordered_extent *ordered_extent;
1977 	ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
1978 	btrfs_finish_ordered_io(ordered_extent);
1979 }
1980 
1981 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1982 				struct extent_state *state, int uptodate)
1983 {
1984 	struct inode *inode = page->mapping->host;
1985 	struct btrfs_root *root = BTRFS_I(inode)->root;
1986 	struct btrfs_ordered_extent *ordered_extent = NULL;
1987 	struct btrfs_workers *workers;
1988 
1989 	trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
1990 
1991 	ClearPagePrivate2(page);
1992 	if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
1993 					    end - start + 1, uptodate))
1994 		return 0;
1995 
1996 	ordered_extent->work.func = finish_ordered_fn;
1997 	ordered_extent->work.flags = 0;
1998 
1999 	if (btrfs_is_free_space_inode(inode))
2000 		workers = &root->fs_info->endio_freespace_worker;
2001 	else
2002 		workers = &root->fs_info->endio_write_workers;
2003 	btrfs_queue_worker(workers, &ordered_extent->work);
2004 
2005 	return 0;
2006 }
2007 
2008 /*
2009  * when reads are done, we need to check csums to verify the data is correct
2010  * if there's a match, we allow the bio to finish.  If not, the code in
2011  * extent_io.c will try to find good copies for us.
2012  */
2013 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
2014 			       struct extent_state *state, int mirror)
2015 {
2016 	size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
2017 	struct inode *inode = page->mapping->host;
2018 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2019 	char *kaddr;
2020 	u64 private = ~(u32)0;
2021 	int ret;
2022 	struct btrfs_root *root = BTRFS_I(inode)->root;
2023 	u32 csum = ~(u32)0;
2024 
2025 	if (PageChecked(page)) {
2026 		ClearPageChecked(page);
2027 		goto good;
2028 	}
2029 
2030 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
2031 		goto good;
2032 
2033 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
2034 	    test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
2035 		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
2036 				  GFP_NOFS);
2037 		return 0;
2038 	}
2039 
2040 	if (state && state->start == start) {
2041 		private = state->private;
2042 		ret = 0;
2043 	} else {
2044 		ret = get_state_private(io_tree, start, &private);
2045 	}
2046 	kaddr = kmap_atomic(page);
2047 	if (ret)
2048 		goto zeroit;
2049 
2050 	csum = btrfs_csum_data(root, kaddr + offset, csum,  end - start + 1);
2051 	btrfs_csum_final(csum, (char *)&csum);
2052 	if (csum != private)
2053 		goto zeroit;
2054 
2055 	kunmap_atomic(kaddr);
2056 good:
2057 	return 0;
2058 
2059 zeroit:
2060 	printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
2061 		       "private %llu\n",
2062 		       (unsigned long long)btrfs_ino(page->mapping->host),
2063 		       (unsigned long long)start, csum,
2064 		       (unsigned long long)private);
2065 	memset(kaddr + offset, 1, end - start + 1);
2066 	flush_dcache_page(page);
2067 	kunmap_atomic(kaddr);
2068 	if (private == 0)
2069 		return 0;
2070 	return -EIO;
2071 }
2072 
2073 struct delayed_iput {
2074 	struct list_head list;
2075 	struct inode *inode;
2076 };
2077 
2078 /* JDM: If this is fs-wide, why can't we add a pointer to
2079  * btrfs_inode instead and avoid the allocation? */
2080 void btrfs_add_delayed_iput(struct inode *inode)
2081 {
2082 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2083 	struct delayed_iput *delayed;
2084 
2085 	if (atomic_add_unless(&inode->i_count, -1, 1))
2086 		return;
2087 
2088 	delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
2089 	delayed->inode = inode;
2090 
2091 	spin_lock(&fs_info->delayed_iput_lock);
2092 	list_add_tail(&delayed->list, &fs_info->delayed_iputs);
2093 	spin_unlock(&fs_info->delayed_iput_lock);
2094 }
2095 
2096 void btrfs_run_delayed_iputs(struct btrfs_root *root)
2097 {
2098 	LIST_HEAD(list);
2099 	struct btrfs_fs_info *fs_info = root->fs_info;
2100 	struct delayed_iput *delayed;
2101 	int empty;
2102 
2103 	spin_lock(&fs_info->delayed_iput_lock);
2104 	empty = list_empty(&fs_info->delayed_iputs);
2105 	spin_unlock(&fs_info->delayed_iput_lock);
2106 	if (empty)
2107 		return;
2108 
2109 	spin_lock(&fs_info->delayed_iput_lock);
2110 	list_splice_init(&fs_info->delayed_iputs, &list);
2111 	spin_unlock(&fs_info->delayed_iput_lock);
2112 
2113 	while (!list_empty(&list)) {
2114 		delayed = list_entry(list.next, struct delayed_iput, list);
2115 		list_del(&delayed->list);
2116 		iput(delayed->inode);
2117 		kfree(delayed);
2118 	}
2119 }
2120 
2121 enum btrfs_orphan_cleanup_state {
2122 	ORPHAN_CLEANUP_STARTED	= 1,
2123 	ORPHAN_CLEANUP_DONE	= 2,
2124 };
2125 
2126 /*
2127  * This is called in transaction commit time. If there are no orphan
2128  * files in the subvolume, it removes orphan item and frees block_rsv
2129  * structure.
2130  */
2131 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
2132 			      struct btrfs_root *root)
2133 {
2134 	struct btrfs_block_rsv *block_rsv;
2135 	int ret;
2136 
2137 	if (atomic_read(&root->orphan_inodes) ||
2138 	    root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
2139 		return;
2140 
2141 	spin_lock(&root->orphan_lock);
2142 	if (atomic_read(&root->orphan_inodes)) {
2143 		spin_unlock(&root->orphan_lock);
2144 		return;
2145 	}
2146 
2147 	if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
2148 		spin_unlock(&root->orphan_lock);
2149 		return;
2150 	}
2151 
2152 	block_rsv = root->orphan_block_rsv;
2153 	root->orphan_block_rsv = NULL;
2154 	spin_unlock(&root->orphan_lock);
2155 
2156 	if (root->orphan_item_inserted &&
2157 	    btrfs_root_refs(&root->root_item) > 0) {
2158 		ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
2159 					    root->root_key.objectid);
2160 		BUG_ON(ret);
2161 		root->orphan_item_inserted = 0;
2162 	}
2163 
2164 	if (block_rsv) {
2165 		WARN_ON(block_rsv->size > 0);
2166 		btrfs_free_block_rsv(root, block_rsv);
2167 	}
2168 }
2169 
2170 /*
2171  * This creates an orphan entry for the given inode in case something goes
2172  * wrong in the middle of an unlink/truncate.
2173  *
2174  * NOTE: caller of this function should reserve 5 units of metadata for
2175  *	 this function.
2176  */
2177 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2178 {
2179 	struct btrfs_root *root = BTRFS_I(inode)->root;
2180 	struct btrfs_block_rsv *block_rsv = NULL;
2181 	int reserve = 0;
2182 	int insert = 0;
2183 	int ret;
2184 
2185 	if (!root->orphan_block_rsv) {
2186 		block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
2187 		if (!block_rsv)
2188 			return -ENOMEM;
2189 	}
2190 
2191 	spin_lock(&root->orphan_lock);
2192 	if (!root->orphan_block_rsv) {
2193 		root->orphan_block_rsv = block_rsv;
2194 	} else if (block_rsv) {
2195 		btrfs_free_block_rsv(root, block_rsv);
2196 		block_rsv = NULL;
2197 	}
2198 
2199 	if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2200 			      &BTRFS_I(inode)->runtime_flags)) {
2201 #if 0
2202 		/*
2203 		 * For proper ENOSPC handling, we should do orphan
2204 		 * cleanup when mounting. But this introduces backward
2205 		 * compatibility issue.
2206 		 */
2207 		if (!xchg(&root->orphan_item_inserted, 1))
2208 			insert = 2;
2209 		else
2210 			insert = 1;
2211 #endif
2212 		insert = 1;
2213 		atomic_inc(&root->orphan_inodes);
2214 	}
2215 
2216 	if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
2217 			      &BTRFS_I(inode)->runtime_flags))
2218 		reserve = 1;
2219 	spin_unlock(&root->orphan_lock);
2220 
2221 	/* grab metadata reservation from transaction handle */
2222 	if (reserve) {
2223 		ret = btrfs_orphan_reserve_metadata(trans, inode);
2224 		BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
2225 	}
2226 
2227 	/* insert an orphan item to track this unlinked/truncated file */
2228 	if (insert >= 1) {
2229 		ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
2230 		if (ret && ret != -EEXIST) {
2231 			clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2232 				  &BTRFS_I(inode)->runtime_flags);
2233 			btrfs_abort_transaction(trans, root, ret);
2234 			return ret;
2235 		}
2236 		ret = 0;
2237 	}
2238 
2239 	/* insert an orphan item to track subvolume contains orphan files */
2240 	if (insert >= 2) {
2241 		ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
2242 					       root->root_key.objectid);
2243 		if (ret && ret != -EEXIST) {
2244 			btrfs_abort_transaction(trans, root, ret);
2245 			return ret;
2246 		}
2247 	}
2248 	return 0;
2249 }
2250 
2251 /*
2252  * We have done the truncate/delete so we can go ahead and remove the orphan
2253  * item for this particular inode.
2254  */
2255 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2256 {
2257 	struct btrfs_root *root = BTRFS_I(inode)->root;
2258 	int delete_item = 0;
2259 	int release_rsv = 0;
2260 	int ret = 0;
2261 
2262 	spin_lock(&root->orphan_lock);
2263 	if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2264 			       &BTRFS_I(inode)->runtime_flags))
2265 		delete_item = 1;
2266 
2267 	if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
2268 			       &BTRFS_I(inode)->runtime_flags))
2269 		release_rsv = 1;
2270 	spin_unlock(&root->orphan_lock);
2271 
2272 	if (trans && delete_item) {
2273 		ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
2274 		BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
2275 	}
2276 
2277 	if (release_rsv) {
2278 		btrfs_orphan_release_metadata(inode);
2279 		atomic_dec(&root->orphan_inodes);
2280 	}
2281 
2282 	return 0;
2283 }
2284 
2285 /*
2286  * this cleans up any orphans that may be left on the list from the last use
2287  * of this root.
2288  */
2289 int btrfs_orphan_cleanup(struct btrfs_root *root)
2290 {
2291 	struct btrfs_path *path;
2292 	struct extent_buffer *leaf;
2293 	struct btrfs_key key, found_key;
2294 	struct btrfs_trans_handle *trans;
2295 	struct inode *inode;
2296 	u64 last_objectid = 0;
2297 	int ret = 0, nr_unlink = 0, nr_truncate = 0;
2298 
2299 	if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
2300 		return 0;
2301 
2302 	path = btrfs_alloc_path();
2303 	if (!path) {
2304 		ret = -ENOMEM;
2305 		goto out;
2306 	}
2307 	path->reada = -1;
2308 
2309 	key.objectid = BTRFS_ORPHAN_OBJECTID;
2310 	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
2311 	key.offset = (u64)-1;
2312 
2313 	while (1) {
2314 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2315 		if (ret < 0)
2316 			goto out;
2317 
2318 		/*
2319 		 * if ret == 0 means we found what we were searching for, which
2320 		 * is weird, but possible, so only screw with path if we didn't
2321 		 * find the key and see if we have stuff that matches
2322 		 */
2323 		if (ret > 0) {
2324 			ret = 0;
2325 			if (path->slots[0] == 0)
2326 				break;
2327 			path->slots[0]--;
2328 		}
2329 
2330 		/* pull out the item */
2331 		leaf = path->nodes[0];
2332 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2333 
2334 		/* make sure the item matches what we want */
2335 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
2336 			break;
2337 		if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
2338 			break;
2339 
2340 		/* release the path since we're done with it */
2341 		btrfs_release_path(path);
2342 
2343 		/*
2344 		 * this is where we are basically btrfs_lookup, without the
2345 		 * crossing root thing.  we store the inode number in the
2346 		 * offset of the orphan item.
2347 		 */
2348 
2349 		if (found_key.offset == last_objectid) {
2350 			printk(KERN_ERR "btrfs: Error removing orphan entry, "
2351 			       "stopping orphan cleanup\n");
2352 			ret = -EINVAL;
2353 			goto out;
2354 		}
2355 
2356 		last_objectid = found_key.offset;
2357 
2358 		found_key.objectid = found_key.offset;
2359 		found_key.type = BTRFS_INODE_ITEM_KEY;
2360 		found_key.offset = 0;
2361 		inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
2362 		ret = PTR_RET(inode);
2363 		if (ret && ret != -ESTALE)
2364 			goto out;
2365 
2366 		if (ret == -ESTALE && root == root->fs_info->tree_root) {
2367 			struct btrfs_root *dead_root;
2368 			struct btrfs_fs_info *fs_info = root->fs_info;
2369 			int is_dead_root = 0;
2370 
2371 			/*
2372 			 * this is an orphan in the tree root. Currently these
2373 			 * could come from 2 sources:
2374 			 *  a) a snapshot deletion in progress
2375 			 *  b) a free space cache inode
2376 			 * We need to distinguish those two, as the snapshot
2377 			 * orphan must not get deleted.
2378 			 * find_dead_roots already ran before us, so if this
2379 			 * is a snapshot deletion, we should find the root
2380 			 * in the dead_roots list
2381 			 */
2382 			spin_lock(&fs_info->trans_lock);
2383 			list_for_each_entry(dead_root, &fs_info->dead_roots,
2384 					    root_list) {
2385 				if (dead_root->root_key.objectid ==
2386 				    found_key.objectid) {
2387 					is_dead_root = 1;
2388 					break;
2389 				}
2390 			}
2391 			spin_unlock(&fs_info->trans_lock);
2392 			if (is_dead_root) {
2393 				/* prevent this orphan from being found again */
2394 				key.offset = found_key.objectid - 1;
2395 				continue;
2396 			}
2397 		}
2398 		/*
2399 		 * Inode is already gone but the orphan item is still there,
2400 		 * kill the orphan item.
2401 		 */
2402 		if (ret == -ESTALE) {
2403 			trans = btrfs_start_transaction(root, 1);
2404 			if (IS_ERR(trans)) {
2405 				ret = PTR_ERR(trans);
2406 				goto out;
2407 			}
2408 			printk(KERN_ERR "auto deleting %Lu\n",
2409 			       found_key.objectid);
2410 			ret = btrfs_del_orphan_item(trans, root,
2411 						    found_key.objectid);
2412 			BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
2413 			btrfs_end_transaction(trans, root);
2414 			continue;
2415 		}
2416 
2417 		/*
2418 		 * add this inode to the orphan list so btrfs_orphan_del does
2419 		 * the proper thing when we hit it
2420 		 */
2421 		set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2422 			&BTRFS_I(inode)->runtime_flags);
2423 
2424 		/* if we have links, this was a truncate, lets do that */
2425 		if (inode->i_nlink) {
2426 			if (!S_ISREG(inode->i_mode)) {
2427 				WARN_ON(1);
2428 				iput(inode);
2429 				continue;
2430 			}
2431 			nr_truncate++;
2432 			ret = btrfs_truncate(inode);
2433 		} else {
2434 			nr_unlink++;
2435 		}
2436 
2437 		/* this will do delete_inode and everything for us */
2438 		iput(inode);
2439 		if (ret)
2440 			goto out;
2441 	}
2442 	/* release the path since we're done with it */
2443 	btrfs_release_path(path);
2444 
2445 	root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
2446 
2447 	if (root->orphan_block_rsv)
2448 		btrfs_block_rsv_release(root, root->orphan_block_rsv,
2449 					(u64)-1);
2450 
2451 	if (root->orphan_block_rsv || root->orphan_item_inserted) {
2452 		trans = btrfs_join_transaction(root);
2453 		if (!IS_ERR(trans))
2454 			btrfs_end_transaction(trans, root);
2455 	}
2456 
2457 	if (nr_unlink)
2458 		printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2459 	if (nr_truncate)
2460 		printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2461 
2462 out:
2463 	if (ret)
2464 		printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret);
2465 	btrfs_free_path(path);
2466 	return ret;
2467 }
2468 
2469 /*
2470  * very simple check to peek ahead in the leaf looking for xattrs.  If we
2471  * don't find any xattrs, we know there can't be any acls.
2472  *
2473  * slot is the slot the inode is in, objectid is the objectid of the inode
2474  */
2475 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2476 					  int slot, u64 objectid)
2477 {
2478 	u32 nritems = btrfs_header_nritems(leaf);
2479 	struct btrfs_key found_key;
2480 	int scanned = 0;
2481 
2482 	slot++;
2483 	while (slot < nritems) {
2484 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
2485 
2486 		/* we found a different objectid, there must not be acls */
2487 		if (found_key.objectid != objectid)
2488 			return 0;
2489 
2490 		/* we found an xattr, assume we've got an acl */
2491 		if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2492 			return 1;
2493 
2494 		/*
2495 		 * we found a key greater than an xattr key, there can't
2496 		 * be any acls later on
2497 		 */
2498 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2499 			return 0;
2500 
2501 		slot++;
2502 		scanned++;
2503 
2504 		/*
2505 		 * it goes inode, inode backrefs, xattrs, extents,
2506 		 * so if there are a ton of hard links to an inode there can
2507 		 * be a lot of backrefs.  Don't waste time searching too hard,
2508 		 * this is just an optimization
2509 		 */
2510 		if (scanned >= 8)
2511 			break;
2512 	}
2513 	/* we hit the end of the leaf before we found an xattr or
2514 	 * something larger than an xattr.  We have to assume the inode
2515 	 * has acls
2516 	 */
2517 	return 1;
2518 }
2519 
2520 /*
2521  * read an inode from the btree into the in-memory inode
2522  */
2523 static void btrfs_read_locked_inode(struct inode *inode)
2524 {
2525 	struct btrfs_path *path;
2526 	struct extent_buffer *leaf;
2527 	struct btrfs_inode_item *inode_item;
2528 	struct btrfs_timespec *tspec;
2529 	struct btrfs_root *root = BTRFS_I(inode)->root;
2530 	struct btrfs_key location;
2531 	int maybe_acls;
2532 	u32 rdev;
2533 	int ret;
2534 	bool filled = false;
2535 
2536 	ret = btrfs_fill_inode(inode, &rdev);
2537 	if (!ret)
2538 		filled = true;
2539 
2540 	path = btrfs_alloc_path();
2541 	if (!path)
2542 		goto make_bad;
2543 
2544 	path->leave_spinning = 1;
2545 	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2546 
2547 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2548 	if (ret)
2549 		goto make_bad;
2550 
2551 	leaf = path->nodes[0];
2552 
2553 	if (filled)
2554 		goto cache_acl;
2555 
2556 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
2557 				    struct btrfs_inode_item);
2558 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2559 	set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
2560 	i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
2561 	i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
2562 	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2563 
2564 	tspec = btrfs_inode_atime(inode_item);
2565 	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2566 	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2567 
2568 	tspec = btrfs_inode_mtime(inode_item);
2569 	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2570 	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2571 
2572 	tspec = btrfs_inode_ctime(inode_item);
2573 	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2574 	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2575 
2576 	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2577 	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2578 	BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
2579 
2580 	/*
2581 	 * If we were modified in the current generation and evicted from memory
2582 	 * and then re-read we need to do a full sync since we don't have any
2583 	 * idea about which extents were modified before we were evicted from
2584 	 * cache.
2585 	 */
2586 	if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
2587 		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2588 			&BTRFS_I(inode)->runtime_flags);
2589 
2590 	inode->i_version = btrfs_inode_sequence(leaf, inode_item);
2591 	inode->i_generation = BTRFS_I(inode)->generation;
2592 	inode->i_rdev = 0;
2593 	rdev = btrfs_inode_rdev(leaf, inode_item);
2594 
2595 	BTRFS_I(inode)->index_cnt = (u64)-1;
2596 	BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2597 cache_acl:
2598 	/*
2599 	 * try to precache a NULL acl entry for files that don't have
2600 	 * any xattrs or acls
2601 	 */
2602 	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
2603 					   btrfs_ino(inode));
2604 	if (!maybe_acls)
2605 		cache_no_acl(inode);
2606 
2607 	btrfs_free_path(path);
2608 
2609 	switch (inode->i_mode & S_IFMT) {
2610 	case S_IFREG:
2611 		inode->i_mapping->a_ops = &btrfs_aops;
2612 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2613 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2614 		inode->i_fop = &btrfs_file_operations;
2615 		inode->i_op = &btrfs_file_inode_operations;
2616 		break;
2617 	case S_IFDIR:
2618 		inode->i_fop = &btrfs_dir_file_operations;
2619 		if (root == root->fs_info->tree_root)
2620 			inode->i_op = &btrfs_dir_ro_inode_operations;
2621 		else
2622 			inode->i_op = &btrfs_dir_inode_operations;
2623 		break;
2624 	case S_IFLNK:
2625 		inode->i_op = &btrfs_symlink_inode_operations;
2626 		inode->i_mapping->a_ops = &btrfs_symlink_aops;
2627 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2628 		break;
2629 	default:
2630 		inode->i_op = &btrfs_special_inode_operations;
2631 		init_special_inode(inode, inode->i_mode, rdev);
2632 		break;
2633 	}
2634 
2635 	btrfs_update_iflags(inode);
2636 	return;
2637 
2638 make_bad:
2639 	btrfs_free_path(path);
2640 	make_bad_inode(inode);
2641 }
2642 
2643 /*
2644  * given a leaf and an inode, copy the inode fields into the leaf
2645  */
2646 static void fill_inode_item(struct btrfs_trans_handle *trans,
2647 			    struct extent_buffer *leaf,
2648 			    struct btrfs_inode_item *item,
2649 			    struct inode *inode)
2650 {
2651 	btrfs_set_inode_uid(leaf, item, i_uid_read(inode));
2652 	btrfs_set_inode_gid(leaf, item, i_gid_read(inode));
2653 	btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2654 	btrfs_set_inode_mode(leaf, item, inode->i_mode);
2655 	btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2656 
2657 	btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2658 			       inode->i_atime.tv_sec);
2659 	btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2660 				inode->i_atime.tv_nsec);
2661 
2662 	btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2663 			       inode->i_mtime.tv_sec);
2664 	btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2665 				inode->i_mtime.tv_nsec);
2666 
2667 	btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2668 			       inode->i_ctime.tv_sec);
2669 	btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2670 				inode->i_ctime.tv_nsec);
2671 
2672 	btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2673 	btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2674 	btrfs_set_inode_sequence(leaf, item, inode->i_version);
2675 	btrfs_set_inode_transid(leaf, item, trans->transid);
2676 	btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2677 	btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2678 	btrfs_set_inode_block_group(leaf, item, 0);
2679 }
2680 
2681 /*
2682  * copy everything in the in-memory inode into the btree.
2683  */
2684 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
2685 				struct btrfs_root *root, struct inode *inode)
2686 {
2687 	struct btrfs_inode_item *inode_item;
2688 	struct btrfs_path *path;
2689 	struct extent_buffer *leaf;
2690 	int ret;
2691 
2692 	path = btrfs_alloc_path();
2693 	if (!path)
2694 		return -ENOMEM;
2695 
2696 	path->leave_spinning = 1;
2697 	ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
2698 				 1);
2699 	if (ret) {
2700 		if (ret > 0)
2701 			ret = -ENOENT;
2702 		goto failed;
2703 	}
2704 
2705 	btrfs_unlock_up_safe(path, 1);
2706 	leaf = path->nodes[0];
2707 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
2708 				    struct btrfs_inode_item);
2709 
2710 	fill_inode_item(trans, leaf, inode_item, inode);
2711 	btrfs_mark_buffer_dirty(leaf);
2712 	btrfs_set_inode_last_trans(trans, inode);
2713 	ret = 0;
2714 failed:
2715 	btrfs_free_path(path);
2716 	return ret;
2717 }
2718 
2719 /*
2720  * copy everything in the in-memory inode into the btree.
2721  */
2722 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2723 				struct btrfs_root *root, struct inode *inode)
2724 {
2725 	int ret;
2726 
2727 	/*
2728 	 * If the inode is a free space inode, we can deadlock during commit
2729 	 * if we put it into the delayed code.
2730 	 *
2731 	 * The data relocation inode should also be directly updated
2732 	 * without delay
2733 	 */
2734 	if (!btrfs_is_free_space_inode(inode)
2735 	    && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
2736 		btrfs_update_root_times(trans, root);
2737 
2738 		ret = btrfs_delayed_update_inode(trans, root, inode);
2739 		if (!ret)
2740 			btrfs_set_inode_last_trans(trans, inode);
2741 		return ret;
2742 	}
2743 
2744 	return btrfs_update_inode_item(trans, root, inode);
2745 }
2746 
2747 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
2748 					 struct btrfs_root *root,
2749 					 struct inode *inode)
2750 {
2751 	int ret;
2752 
2753 	ret = btrfs_update_inode(trans, root, inode);
2754 	if (ret == -ENOSPC)
2755 		return btrfs_update_inode_item(trans, root, inode);
2756 	return ret;
2757 }
2758 
2759 /*
2760  * unlink helper that gets used here in inode.c and in the tree logging
2761  * recovery code.  It remove a link in a directory with a given name, and
2762  * also drops the back refs in the inode to the directory
2763  */
2764 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2765 				struct btrfs_root *root,
2766 				struct inode *dir, struct inode *inode,
2767 				const char *name, int name_len)
2768 {
2769 	struct btrfs_path *path;
2770 	int ret = 0;
2771 	struct extent_buffer *leaf;
2772 	struct btrfs_dir_item *di;
2773 	struct btrfs_key key;
2774 	u64 index;
2775 	u64 ino = btrfs_ino(inode);
2776 	u64 dir_ino = btrfs_ino(dir);
2777 
2778 	path = btrfs_alloc_path();
2779 	if (!path) {
2780 		ret = -ENOMEM;
2781 		goto out;
2782 	}
2783 
2784 	path->leave_spinning = 1;
2785 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2786 				    name, name_len, -1);
2787 	if (IS_ERR(di)) {
2788 		ret = PTR_ERR(di);
2789 		goto err;
2790 	}
2791 	if (!di) {
2792 		ret = -ENOENT;
2793 		goto err;
2794 	}
2795 	leaf = path->nodes[0];
2796 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
2797 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2798 	if (ret)
2799 		goto err;
2800 	btrfs_release_path(path);
2801 
2802 	ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
2803 				  dir_ino, &index);
2804 	if (ret) {
2805 		printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2806 		       "inode %llu parent %llu\n", name_len, name,
2807 		       (unsigned long long)ino, (unsigned long long)dir_ino);
2808 		btrfs_abort_transaction(trans, root, ret);
2809 		goto err;
2810 	}
2811 
2812 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
2813 	if (ret) {
2814 		btrfs_abort_transaction(trans, root, ret);
2815 		goto err;
2816 	}
2817 
2818 	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2819 					 inode, dir_ino);
2820 	if (ret != 0 && ret != -ENOENT) {
2821 		btrfs_abort_transaction(trans, root, ret);
2822 		goto err;
2823 	}
2824 
2825 	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2826 					   dir, index);
2827 	if (ret == -ENOENT)
2828 		ret = 0;
2829 err:
2830 	btrfs_free_path(path);
2831 	if (ret)
2832 		goto out;
2833 
2834 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2835 	inode_inc_iversion(inode);
2836 	inode_inc_iversion(dir);
2837 	inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2838 	ret = btrfs_update_inode(trans, root, dir);
2839 out:
2840 	return ret;
2841 }
2842 
2843 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2844 		       struct btrfs_root *root,
2845 		       struct inode *dir, struct inode *inode,
2846 		       const char *name, int name_len)
2847 {
2848 	int ret;
2849 	ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
2850 	if (!ret) {
2851 		btrfs_drop_nlink(inode);
2852 		ret = btrfs_update_inode(trans, root, inode);
2853 	}
2854 	return ret;
2855 }
2856 
2857 
2858 /* helper to check if there is any shared block in the path */
2859 static int check_path_shared(struct btrfs_root *root,
2860 			     struct btrfs_path *path)
2861 {
2862 	struct extent_buffer *eb;
2863 	int level;
2864 	u64 refs = 1;
2865 
2866 	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2867 		int ret;
2868 
2869 		if (!path->nodes[level])
2870 			break;
2871 		eb = path->nodes[level];
2872 		if (!btrfs_block_can_be_shared(root, eb))
2873 			continue;
2874 		ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len,
2875 					       &refs, NULL);
2876 		if (refs > 1)
2877 			return 1;
2878 	}
2879 	return 0;
2880 }
2881 
2882 /*
2883  * helper to start transaction for unlink and rmdir.
2884  *
2885  * unlink and rmdir are special in btrfs, they do not always free space.
2886  * so in enospc case, we should make sure they will free space before
2887  * allowing them to use the global metadata reservation.
2888  */
2889 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2890 						       struct dentry *dentry)
2891 {
2892 	struct btrfs_trans_handle *trans;
2893 	struct btrfs_root *root = BTRFS_I(dir)->root;
2894 	struct btrfs_path *path;
2895 	struct btrfs_dir_item *di;
2896 	struct inode *inode = dentry->d_inode;
2897 	u64 index;
2898 	int check_link = 1;
2899 	int err = -ENOSPC;
2900 	int ret;
2901 	u64 ino = btrfs_ino(inode);
2902 	u64 dir_ino = btrfs_ino(dir);
2903 
2904 	/*
2905 	 * 1 for the possible orphan item
2906 	 * 1 for the dir item
2907 	 * 1 for the dir index
2908 	 * 1 for the inode ref
2909 	 * 1 for the inode ref in the tree log
2910 	 * 2 for the dir entries in the log
2911 	 * 1 for the inode
2912 	 */
2913 	trans = btrfs_start_transaction(root, 8);
2914 	if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
2915 		return trans;
2916 
2917 	if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
2918 		return ERR_PTR(-ENOSPC);
2919 
2920 	/* check if there is someone else holds reference */
2921 	if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
2922 		return ERR_PTR(-ENOSPC);
2923 
2924 	if (atomic_read(&inode->i_count) > 2)
2925 		return ERR_PTR(-ENOSPC);
2926 
2927 	if (xchg(&root->fs_info->enospc_unlink, 1))
2928 		return ERR_PTR(-ENOSPC);
2929 
2930 	path = btrfs_alloc_path();
2931 	if (!path) {
2932 		root->fs_info->enospc_unlink = 0;
2933 		return ERR_PTR(-ENOMEM);
2934 	}
2935 
2936 	/* 1 for the orphan item */
2937 	trans = btrfs_start_transaction(root, 1);
2938 	if (IS_ERR(trans)) {
2939 		btrfs_free_path(path);
2940 		root->fs_info->enospc_unlink = 0;
2941 		return trans;
2942 	}
2943 
2944 	path->skip_locking = 1;
2945 	path->search_commit_root = 1;
2946 
2947 	ret = btrfs_lookup_inode(trans, root, path,
2948 				&BTRFS_I(dir)->location, 0);
2949 	if (ret < 0) {
2950 		err = ret;
2951 		goto out;
2952 	}
2953 	if (ret == 0) {
2954 		if (check_path_shared(root, path))
2955 			goto out;
2956 	} else {
2957 		check_link = 0;
2958 	}
2959 	btrfs_release_path(path);
2960 
2961 	ret = btrfs_lookup_inode(trans, root, path,
2962 				&BTRFS_I(inode)->location, 0);
2963 	if (ret < 0) {
2964 		err = ret;
2965 		goto out;
2966 	}
2967 	if (ret == 0) {
2968 		if (check_path_shared(root, path))
2969 			goto out;
2970 	} else {
2971 		check_link = 0;
2972 	}
2973 	btrfs_release_path(path);
2974 
2975 	if (ret == 0 && S_ISREG(inode->i_mode)) {
2976 		ret = btrfs_lookup_file_extent(trans, root, path,
2977 					       ino, (u64)-1, 0);
2978 		if (ret < 0) {
2979 			err = ret;
2980 			goto out;
2981 		}
2982 		BUG_ON(ret == 0); /* Corruption */
2983 		if (check_path_shared(root, path))
2984 			goto out;
2985 		btrfs_release_path(path);
2986 	}
2987 
2988 	if (!check_link) {
2989 		err = 0;
2990 		goto out;
2991 	}
2992 
2993 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2994 				dentry->d_name.name, dentry->d_name.len, 0);
2995 	if (IS_ERR(di)) {
2996 		err = PTR_ERR(di);
2997 		goto out;
2998 	}
2999 	if (di) {
3000 		if (check_path_shared(root, path))
3001 			goto out;
3002 	} else {
3003 		err = 0;
3004 		goto out;
3005 	}
3006 	btrfs_release_path(path);
3007 
3008 	ret = btrfs_get_inode_ref_index(trans, root, path, dentry->d_name.name,
3009 					dentry->d_name.len, ino, dir_ino, 0,
3010 					&index);
3011 	if (ret) {
3012 		err = ret;
3013 		goto out;
3014 	}
3015 
3016 	if (check_path_shared(root, path))
3017 		goto out;
3018 
3019 	btrfs_release_path(path);
3020 
3021 	/*
3022 	 * This is a commit root search, if we can lookup inode item and other
3023 	 * relative items in the commit root, it means the transaction of
3024 	 * dir/file creation has been committed, and the dir index item that we
3025 	 * delay to insert has also been inserted into the commit root. So
3026 	 * we needn't worry about the delayed insertion of the dir index item
3027 	 * here.
3028 	 */
3029 	di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
3030 				dentry->d_name.name, dentry->d_name.len, 0);
3031 	if (IS_ERR(di)) {
3032 		err = PTR_ERR(di);
3033 		goto out;
3034 	}
3035 	BUG_ON(ret == -ENOENT);
3036 	if (check_path_shared(root, path))
3037 		goto out;
3038 
3039 	err = 0;
3040 out:
3041 	btrfs_free_path(path);
3042 	/* Migrate the orphan reservation over */
3043 	if (!err)
3044 		err = btrfs_block_rsv_migrate(trans->block_rsv,
3045 				&root->fs_info->global_block_rsv,
3046 				trans->bytes_reserved);
3047 
3048 	if (err) {
3049 		btrfs_end_transaction(trans, root);
3050 		root->fs_info->enospc_unlink = 0;
3051 		return ERR_PTR(err);
3052 	}
3053 
3054 	trans->block_rsv = &root->fs_info->global_block_rsv;
3055 	return trans;
3056 }
3057 
3058 static void __unlink_end_trans(struct btrfs_trans_handle *trans,
3059 			       struct btrfs_root *root)
3060 {
3061 	if (trans->block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL) {
3062 		btrfs_block_rsv_release(root, trans->block_rsv,
3063 					trans->bytes_reserved);
3064 		trans->block_rsv = &root->fs_info->trans_block_rsv;
3065 		BUG_ON(!root->fs_info->enospc_unlink);
3066 		root->fs_info->enospc_unlink = 0;
3067 	}
3068 	btrfs_end_transaction(trans, root);
3069 }
3070 
3071 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
3072 {
3073 	struct btrfs_root *root = BTRFS_I(dir)->root;
3074 	struct btrfs_trans_handle *trans;
3075 	struct inode *inode = dentry->d_inode;
3076 	int ret;
3077 	unsigned long nr = 0;
3078 
3079 	trans = __unlink_start_trans(dir, dentry);
3080 	if (IS_ERR(trans))
3081 		return PTR_ERR(trans);
3082 
3083 	btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
3084 
3085 	ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3086 				 dentry->d_name.name, dentry->d_name.len);
3087 	if (ret)
3088 		goto out;
3089 
3090 	if (inode->i_nlink == 0) {
3091 		ret = btrfs_orphan_add(trans, inode);
3092 		if (ret)
3093 			goto out;
3094 	}
3095 
3096 out:
3097 	nr = trans->blocks_used;
3098 	__unlink_end_trans(trans, root);
3099 	btrfs_btree_balance_dirty(root, nr);
3100 	return ret;
3101 }
3102 
3103 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3104 			struct btrfs_root *root,
3105 			struct inode *dir, u64 objectid,
3106 			const char *name, int name_len)
3107 {
3108 	struct btrfs_path *path;
3109 	struct extent_buffer *leaf;
3110 	struct btrfs_dir_item *di;
3111 	struct btrfs_key key;
3112 	u64 index;
3113 	int ret;
3114 	u64 dir_ino = btrfs_ino(dir);
3115 
3116 	path = btrfs_alloc_path();
3117 	if (!path)
3118 		return -ENOMEM;
3119 
3120 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3121 				   name, name_len, -1);
3122 	if (IS_ERR_OR_NULL(di)) {
3123 		if (!di)
3124 			ret = -ENOENT;
3125 		else
3126 			ret = PTR_ERR(di);
3127 		goto out;
3128 	}
3129 
3130 	leaf = path->nodes[0];
3131 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
3132 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
3133 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
3134 	if (ret) {
3135 		btrfs_abort_transaction(trans, root, ret);
3136 		goto out;
3137 	}
3138 	btrfs_release_path(path);
3139 
3140 	ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
3141 				 objectid, root->root_key.objectid,
3142 				 dir_ino, &index, name, name_len);
3143 	if (ret < 0) {
3144 		if (ret != -ENOENT) {
3145 			btrfs_abort_transaction(trans, root, ret);
3146 			goto out;
3147 		}
3148 		di = btrfs_search_dir_index_item(root, path, dir_ino,
3149 						 name, name_len);
3150 		if (IS_ERR_OR_NULL(di)) {
3151 			if (!di)
3152 				ret = -ENOENT;
3153 			else
3154 				ret = PTR_ERR(di);
3155 			btrfs_abort_transaction(trans, root, ret);
3156 			goto out;
3157 		}
3158 
3159 		leaf = path->nodes[0];
3160 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3161 		btrfs_release_path(path);
3162 		index = key.offset;
3163 	}
3164 	btrfs_release_path(path);
3165 
3166 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
3167 	if (ret) {
3168 		btrfs_abort_transaction(trans, root, ret);
3169 		goto out;
3170 	}
3171 
3172 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3173 	inode_inc_iversion(dir);
3174 	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3175 	ret = btrfs_update_inode_fallback(trans, root, dir);
3176 	if (ret)
3177 		btrfs_abort_transaction(trans, root, ret);
3178 out:
3179 	btrfs_free_path(path);
3180 	return ret;
3181 }
3182 
3183 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3184 {
3185 	struct inode *inode = dentry->d_inode;
3186 	int err = 0;
3187 	struct btrfs_root *root = BTRFS_I(dir)->root;
3188 	struct btrfs_trans_handle *trans;
3189 	unsigned long nr = 0;
3190 
3191 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
3192 		return -ENOTEMPTY;
3193 	if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
3194 		return -EPERM;
3195 
3196 	trans = __unlink_start_trans(dir, dentry);
3197 	if (IS_ERR(trans))
3198 		return PTR_ERR(trans);
3199 
3200 	if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
3201 		err = btrfs_unlink_subvol(trans, root, dir,
3202 					  BTRFS_I(inode)->location.objectid,
3203 					  dentry->d_name.name,
3204 					  dentry->d_name.len);
3205 		goto out;
3206 	}
3207 
3208 	err = btrfs_orphan_add(trans, inode);
3209 	if (err)
3210 		goto out;
3211 
3212 	/* now the directory is empty */
3213 	err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3214 				 dentry->d_name.name, dentry->d_name.len);
3215 	if (!err)
3216 		btrfs_i_size_write(inode, 0);
3217 out:
3218 	nr = trans->blocks_used;
3219 	__unlink_end_trans(trans, root);
3220 	btrfs_btree_balance_dirty(root, nr);
3221 
3222 	return err;
3223 }
3224 
3225 /*
3226  * this can truncate away extent items, csum items and directory items.
3227  * It starts at a high offset and removes keys until it can't find
3228  * any higher than new_size
3229  *
3230  * csum items that cross the new i_size are truncated to the new size
3231  * as well.
3232  *
3233  * min_type is the minimum key type to truncate down to.  If set to 0, this
3234  * will kill all the items on this inode, including the INODE_ITEM_KEY.
3235  */
3236 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3237 			       struct btrfs_root *root,
3238 			       struct inode *inode,
3239 			       u64 new_size, u32 min_type)
3240 {
3241 	struct btrfs_path *path;
3242 	struct extent_buffer *leaf;
3243 	struct btrfs_file_extent_item *fi;
3244 	struct btrfs_key key;
3245 	struct btrfs_key found_key;
3246 	u64 extent_start = 0;
3247 	u64 extent_num_bytes = 0;
3248 	u64 extent_offset = 0;
3249 	u64 item_end = 0;
3250 	u64 mask = root->sectorsize - 1;
3251 	u32 found_type = (u8)-1;
3252 	int found_extent;
3253 	int del_item;
3254 	int pending_del_nr = 0;
3255 	int pending_del_slot = 0;
3256 	int extent_type = -1;
3257 	int ret;
3258 	int err = 0;
3259 	u64 ino = btrfs_ino(inode);
3260 
3261 	BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
3262 
3263 	path = btrfs_alloc_path();
3264 	if (!path)
3265 		return -ENOMEM;
3266 	path->reada = -1;
3267 
3268 	/*
3269 	 * We want to drop from the next block forward in case this new size is
3270 	 * not block aligned since we will be keeping the last block of the
3271 	 * extent just the way it is.
3272 	 */
3273 	if (root->ref_cows || root == root->fs_info->tree_root)
3274 		btrfs_drop_extent_cache(inode, (new_size + mask) & (~mask), (u64)-1, 0);
3275 
3276 	/*
3277 	 * This function is also used to drop the items in the log tree before
3278 	 * we relog the inode, so if root != BTRFS_I(inode)->root, it means
3279 	 * it is used to drop the loged items. So we shouldn't kill the delayed
3280 	 * items.
3281 	 */
3282 	if (min_type == 0 && root == BTRFS_I(inode)->root)
3283 		btrfs_kill_delayed_inode_items(inode);
3284 
3285 	key.objectid = ino;
3286 	key.offset = (u64)-1;
3287 	key.type = (u8)-1;
3288 
3289 search_again:
3290 	path->leave_spinning = 1;
3291 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3292 	if (ret < 0) {
3293 		err = ret;
3294 		goto out;
3295 	}
3296 
3297 	if (ret > 0) {
3298 		/* there are no items in the tree for us to truncate, we're
3299 		 * done
3300 		 */
3301 		if (path->slots[0] == 0)
3302 			goto out;
3303 		path->slots[0]--;
3304 	}
3305 
3306 	while (1) {
3307 		fi = NULL;
3308 		leaf = path->nodes[0];
3309 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3310 		found_type = btrfs_key_type(&found_key);
3311 
3312 		if (found_key.objectid != ino)
3313 			break;
3314 
3315 		if (found_type < min_type)
3316 			break;
3317 
3318 		item_end = found_key.offset;
3319 		if (found_type == BTRFS_EXTENT_DATA_KEY) {
3320 			fi = btrfs_item_ptr(leaf, path->slots[0],
3321 					    struct btrfs_file_extent_item);
3322 			extent_type = btrfs_file_extent_type(leaf, fi);
3323 			if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3324 				item_end +=
3325 				    btrfs_file_extent_num_bytes(leaf, fi);
3326 			} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3327 				item_end += btrfs_file_extent_inline_len(leaf,
3328 									 fi);
3329 			}
3330 			item_end--;
3331 		}
3332 		if (found_type > min_type) {
3333 			del_item = 1;
3334 		} else {
3335 			if (item_end < new_size)
3336 				break;
3337 			if (found_key.offset >= new_size)
3338 				del_item = 1;
3339 			else
3340 				del_item = 0;
3341 		}
3342 		found_extent = 0;
3343 		/* FIXME, shrink the extent if the ref count is only 1 */
3344 		if (found_type != BTRFS_EXTENT_DATA_KEY)
3345 			goto delete;
3346 
3347 		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3348 			u64 num_dec;
3349 			extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
3350 			if (!del_item) {
3351 				u64 orig_num_bytes =
3352 					btrfs_file_extent_num_bytes(leaf, fi);
3353 				extent_num_bytes = new_size -
3354 					found_key.offset + root->sectorsize - 1;
3355 				extent_num_bytes = extent_num_bytes &
3356 					~((u64)root->sectorsize - 1);
3357 				btrfs_set_file_extent_num_bytes(leaf, fi,
3358 							 extent_num_bytes);
3359 				num_dec = (orig_num_bytes -
3360 					   extent_num_bytes);
3361 				if (root->ref_cows && extent_start != 0)
3362 					inode_sub_bytes(inode, num_dec);
3363 				btrfs_mark_buffer_dirty(leaf);
3364 			} else {
3365 				extent_num_bytes =
3366 					btrfs_file_extent_disk_num_bytes(leaf,
3367 									 fi);
3368 				extent_offset = found_key.offset -
3369 					btrfs_file_extent_offset(leaf, fi);
3370 
3371 				/* FIXME blocksize != 4096 */
3372 				num_dec = btrfs_file_extent_num_bytes(leaf, fi);
3373 				if (extent_start != 0) {
3374 					found_extent = 1;
3375 					if (root->ref_cows)
3376 						inode_sub_bytes(inode, num_dec);
3377 				}
3378 			}
3379 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3380 			/*
3381 			 * we can't truncate inline items that have had
3382 			 * special encodings
3383 			 */
3384 			if (!del_item &&
3385 			    btrfs_file_extent_compression(leaf, fi) == 0 &&
3386 			    btrfs_file_extent_encryption(leaf, fi) == 0 &&
3387 			    btrfs_file_extent_other_encoding(leaf, fi) == 0) {
3388 				u32 size = new_size - found_key.offset;
3389 
3390 				if (root->ref_cows) {
3391 					inode_sub_bytes(inode, item_end + 1 -
3392 							new_size);
3393 				}
3394 				size =
3395 				    btrfs_file_extent_calc_inline_size(size);
3396 				btrfs_truncate_item(trans, root, path,
3397 						    size, 1);
3398 			} else if (root->ref_cows) {
3399 				inode_sub_bytes(inode, item_end + 1 -
3400 						found_key.offset);
3401 			}
3402 		}
3403 delete:
3404 		if (del_item) {
3405 			if (!pending_del_nr) {
3406 				/* no pending yet, add ourselves */
3407 				pending_del_slot = path->slots[0];
3408 				pending_del_nr = 1;
3409 			} else if (pending_del_nr &&
3410 				   path->slots[0] + 1 == pending_del_slot) {
3411 				/* hop on the pending chunk */
3412 				pending_del_nr++;
3413 				pending_del_slot = path->slots[0];
3414 			} else {
3415 				BUG();
3416 			}
3417 		} else {
3418 			break;
3419 		}
3420 		if (found_extent && (root->ref_cows ||
3421 				     root == root->fs_info->tree_root)) {
3422 			btrfs_set_path_blocking(path);
3423 			ret = btrfs_free_extent(trans, root, extent_start,
3424 						extent_num_bytes, 0,
3425 						btrfs_header_owner(leaf),
3426 						ino, extent_offset, 0);
3427 			BUG_ON(ret);
3428 		}
3429 
3430 		if (found_type == BTRFS_INODE_ITEM_KEY)
3431 			break;
3432 
3433 		if (path->slots[0] == 0 ||
3434 		    path->slots[0] != pending_del_slot) {
3435 			if (pending_del_nr) {
3436 				ret = btrfs_del_items(trans, root, path,
3437 						pending_del_slot,
3438 						pending_del_nr);
3439 				if (ret) {
3440 					btrfs_abort_transaction(trans,
3441 								root, ret);
3442 					goto error;
3443 				}
3444 				pending_del_nr = 0;
3445 			}
3446 			btrfs_release_path(path);
3447 			goto search_again;
3448 		} else {
3449 			path->slots[0]--;
3450 		}
3451 	}
3452 out:
3453 	if (pending_del_nr) {
3454 		ret = btrfs_del_items(trans, root, path, pending_del_slot,
3455 				      pending_del_nr);
3456 		if (ret)
3457 			btrfs_abort_transaction(trans, root, ret);
3458 	}
3459 error:
3460 	btrfs_free_path(path);
3461 	return err;
3462 }
3463 
3464 /*
3465  * btrfs_truncate_page - read, zero a chunk and write a page
3466  * @inode - inode that we're zeroing
3467  * @from - the offset to start zeroing
3468  * @len - the length to zero, 0 to zero the entire range respective to the
3469  *	offset
3470  * @front - zero up to the offset instead of from the offset on
3471  *
3472  * This will find the page for the "from" offset and cow the page and zero the
3473  * part we want to zero.  This is used with truncate and hole punching.
3474  */
3475 int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
3476 			int front)
3477 {
3478 	struct address_space *mapping = inode->i_mapping;
3479 	struct btrfs_root *root = BTRFS_I(inode)->root;
3480 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3481 	struct btrfs_ordered_extent *ordered;
3482 	struct extent_state *cached_state = NULL;
3483 	char *kaddr;
3484 	u32 blocksize = root->sectorsize;
3485 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
3486 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
3487 	struct page *page;
3488 	gfp_t mask = btrfs_alloc_write_mask(mapping);
3489 	int ret = 0;
3490 	u64 page_start;
3491 	u64 page_end;
3492 
3493 	if ((offset & (blocksize - 1)) == 0 &&
3494 	    (!len || ((len & (blocksize - 1)) == 0)))
3495 		goto out;
3496 	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
3497 	if (ret)
3498 		goto out;
3499 
3500 	ret = -ENOMEM;
3501 again:
3502 	page = find_or_create_page(mapping, index, mask);
3503 	if (!page) {
3504 		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
3505 		goto out;
3506 	}
3507 
3508 	page_start = page_offset(page);
3509 	page_end = page_start + PAGE_CACHE_SIZE - 1;
3510 
3511 	if (!PageUptodate(page)) {
3512 		ret = btrfs_readpage(NULL, page);
3513 		lock_page(page);
3514 		if (page->mapping != mapping) {
3515 			unlock_page(page);
3516 			page_cache_release(page);
3517 			goto again;
3518 		}
3519 		if (!PageUptodate(page)) {
3520 			ret = -EIO;
3521 			goto out_unlock;
3522 		}
3523 	}
3524 	wait_on_page_writeback(page);
3525 
3526 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
3527 	set_page_extent_mapped(page);
3528 
3529 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
3530 	if (ordered) {
3531 		unlock_extent_cached(io_tree, page_start, page_end,
3532 				     &cached_state, GFP_NOFS);
3533 		unlock_page(page);
3534 		page_cache_release(page);
3535 		btrfs_start_ordered_extent(inode, ordered, 1);
3536 		btrfs_put_ordered_extent(ordered);
3537 		goto again;
3538 	}
3539 
3540 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
3541 			  EXTENT_DIRTY | EXTENT_DELALLOC |
3542 			  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
3543 			  0, 0, &cached_state, GFP_NOFS);
3544 
3545 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
3546 					&cached_state);
3547 	if (ret) {
3548 		unlock_extent_cached(io_tree, page_start, page_end,
3549 				     &cached_state, GFP_NOFS);
3550 		goto out_unlock;
3551 	}
3552 
3553 	ret = 0;
3554 	if (offset != PAGE_CACHE_SIZE) {
3555 		if (!len)
3556 			len = PAGE_CACHE_SIZE - offset;
3557 		kaddr = kmap(page);
3558 		if (front)
3559 			memset(kaddr, 0, offset);
3560 		else
3561 			memset(kaddr + offset, 0, len);
3562 		flush_dcache_page(page);
3563 		kunmap(page);
3564 	}
3565 	ClearPageChecked(page);
3566 	set_page_dirty(page);
3567 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
3568 			     GFP_NOFS);
3569 
3570 out_unlock:
3571 	if (ret)
3572 		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
3573 	unlock_page(page);
3574 	page_cache_release(page);
3575 out:
3576 	return ret;
3577 }
3578 
3579 /*
3580  * This function puts in dummy file extents for the area we're creating a hole
3581  * for.  So if we are truncating this file to a larger size we need to insert
3582  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
3583  * the range between oldsize and size
3584  */
3585 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3586 {
3587 	struct btrfs_trans_handle *trans;
3588 	struct btrfs_root *root = BTRFS_I(inode)->root;
3589 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3590 	struct extent_map *em = NULL;
3591 	struct extent_state *cached_state = NULL;
3592 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3593 	u64 mask = root->sectorsize - 1;
3594 	u64 hole_start = (oldsize + mask) & ~mask;
3595 	u64 block_end = (size + mask) & ~mask;
3596 	u64 last_byte;
3597 	u64 cur_offset;
3598 	u64 hole_size;
3599 	int err = 0;
3600 
3601 	if (size <= hole_start)
3602 		return 0;
3603 
3604 	while (1) {
3605 		struct btrfs_ordered_extent *ordered;
3606 		btrfs_wait_ordered_range(inode, hole_start,
3607 					 block_end - hole_start);
3608 		lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
3609 				 &cached_state);
3610 		ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3611 		if (!ordered)
3612 			break;
3613 		unlock_extent_cached(io_tree, hole_start, block_end - 1,
3614 				     &cached_state, GFP_NOFS);
3615 		btrfs_put_ordered_extent(ordered);
3616 	}
3617 
3618 	cur_offset = hole_start;
3619 	while (1) {
3620 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
3621 				block_end - cur_offset, 0);
3622 		if (IS_ERR(em)) {
3623 			err = PTR_ERR(em);
3624 			break;
3625 		}
3626 		last_byte = min(extent_map_end(em), block_end);
3627 		last_byte = (last_byte + mask) & ~mask;
3628 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3629 			struct extent_map *hole_em;
3630 			hole_size = last_byte - cur_offset;
3631 
3632 			trans = btrfs_start_transaction(root, 3);
3633 			if (IS_ERR(trans)) {
3634 				err = PTR_ERR(trans);
3635 				break;
3636 			}
3637 
3638 			err = btrfs_drop_extents(trans, root, inode,
3639 						 cur_offset,
3640 						 cur_offset + hole_size, 1);
3641 			if (err) {
3642 				btrfs_abort_transaction(trans, root, err);
3643 				btrfs_end_transaction(trans, root);
3644 				break;
3645 			}
3646 
3647 			err = btrfs_insert_file_extent(trans, root,
3648 					btrfs_ino(inode), cur_offset, 0,
3649 					0, hole_size, 0, hole_size,
3650 					0, 0, 0);
3651 			if (err) {
3652 				btrfs_abort_transaction(trans, root, err);
3653 				btrfs_end_transaction(trans, root);
3654 				break;
3655 			}
3656 
3657 			btrfs_drop_extent_cache(inode, cur_offset,
3658 						cur_offset + hole_size - 1, 0);
3659 			hole_em = alloc_extent_map();
3660 			if (!hole_em) {
3661 				set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3662 					&BTRFS_I(inode)->runtime_flags);
3663 				goto next;
3664 			}
3665 			hole_em->start = cur_offset;
3666 			hole_em->len = hole_size;
3667 			hole_em->orig_start = cur_offset;
3668 
3669 			hole_em->block_start = EXTENT_MAP_HOLE;
3670 			hole_em->block_len = 0;
3671 			hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
3672 			hole_em->compress_type = BTRFS_COMPRESS_NONE;
3673 			hole_em->generation = trans->transid;
3674 
3675 			while (1) {
3676 				write_lock(&em_tree->lock);
3677 				err = add_extent_mapping(em_tree, hole_em);
3678 				if (!err)
3679 					list_move(&hole_em->list,
3680 						  &em_tree->modified_extents);
3681 				write_unlock(&em_tree->lock);
3682 				if (err != -EEXIST)
3683 					break;
3684 				btrfs_drop_extent_cache(inode, cur_offset,
3685 							cur_offset +
3686 							hole_size - 1, 0);
3687 			}
3688 			free_extent_map(hole_em);
3689 next:
3690 			btrfs_update_inode(trans, root, inode);
3691 			btrfs_end_transaction(trans, root);
3692 		}
3693 		free_extent_map(em);
3694 		em = NULL;
3695 		cur_offset = last_byte;
3696 		if (cur_offset >= block_end)
3697 			break;
3698 	}
3699 
3700 	free_extent_map(em);
3701 	unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
3702 			     GFP_NOFS);
3703 	return err;
3704 }
3705 
3706 static int btrfs_setsize(struct inode *inode, loff_t newsize)
3707 {
3708 	struct btrfs_root *root = BTRFS_I(inode)->root;
3709 	struct btrfs_trans_handle *trans;
3710 	loff_t oldsize = i_size_read(inode);
3711 	int ret;
3712 
3713 	if (newsize == oldsize)
3714 		return 0;
3715 
3716 	if (newsize > oldsize) {
3717 		truncate_pagecache(inode, oldsize, newsize);
3718 		ret = btrfs_cont_expand(inode, oldsize, newsize);
3719 		if (ret)
3720 			return ret;
3721 
3722 		trans = btrfs_start_transaction(root, 1);
3723 		if (IS_ERR(trans))
3724 			return PTR_ERR(trans);
3725 
3726 		i_size_write(inode, newsize);
3727 		btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
3728 		ret = btrfs_update_inode(trans, root, inode);
3729 		btrfs_end_transaction(trans, root);
3730 	} else {
3731 
3732 		/*
3733 		 * We're truncating a file that used to have good data down to
3734 		 * zero. Make sure it gets into the ordered flush list so that
3735 		 * any new writes get down to disk quickly.
3736 		 */
3737 		if (newsize == 0)
3738 			set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
3739 				&BTRFS_I(inode)->runtime_flags);
3740 
3741 		/* we don't support swapfiles, so vmtruncate shouldn't fail */
3742 		truncate_setsize(inode, newsize);
3743 		ret = btrfs_truncate(inode);
3744 	}
3745 
3746 	return ret;
3747 }
3748 
3749 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3750 {
3751 	struct inode *inode = dentry->d_inode;
3752 	struct btrfs_root *root = BTRFS_I(inode)->root;
3753 	int err;
3754 
3755 	if (btrfs_root_readonly(root))
3756 		return -EROFS;
3757 
3758 	err = inode_change_ok(inode, attr);
3759 	if (err)
3760 		return err;
3761 
3762 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3763 		err = btrfs_setsize(inode, attr->ia_size);
3764 		if (err)
3765 			return err;
3766 	}
3767 
3768 	if (attr->ia_valid) {
3769 		setattr_copy(inode, attr);
3770 		inode_inc_iversion(inode);
3771 		err = btrfs_dirty_inode(inode);
3772 
3773 		if (!err && attr->ia_valid & ATTR_MODE)
3774 			err = btrfs_acl_chmod(inode);
3775 	}
3776 
3777 	return err;
3778 }
3779 
3780 void btrfs_evict_inode(struct inode *inode)
3781 {
3782 	struct btrfs_trans_handle *trans;
3783 	struct btrfs_root *root = BTRFS_I(inode)->root;
3784 	struct btrfs_block_rsv *rsv, *global_rsv;
3785 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
3786 	unsigned long nr;
3787 	int ret;
3788 
3789 	trace_btrfs_inode_evict(inode);
3790 
3791 	truncate_inode_pages(&inode->i_data, 0);
3792 	if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
3793 			       btrfs_is_free_space_inode(inode)))
3794 		goto no_delete;
3795 
3796 	if (is_bad_inode(inode)) {
3797 		btrfs_orphan_del(NULL, inode);
3798 		goto no_delete;
3799 	}
3800 	/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
3801 	btrfs_wait_ordered_range(inode, 0, (u64)-1);
3802 
3803 	if (root->fs_info->log_root_recovering) {
3804 		BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3805 				 &BTRFS_I(inode)->runtime_flags));
3806 		goto no_delete;
3807 	}
3808 
3809 	if (inode->i_nlink > 0) {
3810 		BUG_ON(btrfs_root_refs(&root->root_item) != 0);
3811 		goto no_delete;
3812 	}
3813 
3814 	rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
3815 	if (!rsv) {
3816 		btrfs_orphan_del(NULL, inode);
3817 		goto no_delete;
3818 	}
3819 	rsv->size = min_size;
3820 	rsv->failfast = 1;
3821 	global_rsv = &root->fs_info->global_block_rsv;
3822 
3823 	btrfs_i_size_write(inode, 0);
3824 
3825 	/*
3826 	 * This is a bit simpler than btrfs_truncate since we've already
3827 	 * reserved our space for our orphan item in the unlink, so we just
3828 	 * need to reserve some slack space in case we add bytes and update
3829 	 * inode item when doing the truncate.
3830 	 */
3831 	while (1) {
3832 		ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
3833 
3834 		/*
3835 		 * Try and steal from the global reserve since we will
3836 		 * likely not use this space anyway, we want to try as
3837 		 * hard as possible to get this to work.
3838 		 */
3839 		if (ret)
3840 			ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);
3841 
3842 		if (ret) {
3843 			printk(KERN_WARNING "Could not get space for a "
3844 			       "delete, will truncate on mount %d\n", ret);
3845 			btrfs_orphan_del(NULL, inode);
3846 			btrfs_free_block_rsv(root, rsv);
3847 			goto no_delete;
3848 		}
3849 
3850 		trans = btrfs_start_transaction_noflush(root, 1);
3851 		if (IS_ERR(trans)) {
3852 			btrfs_orphan_del(NULL, inode);
3853 			btrfs_free_block_rsv(root, rsv);
3854 			goto no_delete;
3855 		}
3856 
3857 		trans->block_rsv = rsv;
3858 
3859 		ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
3860 		if (ret != -ENOSPC)
3861 			break;
3862 
3863 		trans->block_rsv = &root->fs_info->trans_block_rsv;
3864 		ret = btrfs_update_inode(trans, root, inode);
3865 		BUG_ON(ret);
3866 
3867 		nr = trans->blocks_used;
3868 		btrfs_end_transaction(trans, root);
3869 		trans = NULL;
3870 		btrfs_btree_balance_dirty(root, nr);
3871 	}
3872 
3873 	btrfs_free_block_rsv(root, rsv);
3874 
3875 	if (ret == 0) {
3876 		trans->block_rsv = root->orphan_block_rsv;
3877 		ret = btrfs_orphan_del(trans, inode);
3878 		BUG_ON(ret);
3879 	}
3880 
3881 	trans->block_rsv = &root->fs_info->trans_block_rsv;
3882 	if (!(root == root->fs_info->tree_root ||
3883 	      root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
3884 		btrfs_return_ino(root, btrfs_ino(inode));
3885 
3886 	nr = trans->blocks_used;
3887 	btrfs_end_transaction(trans, root);
3888 	btrfs_btree_balance_dirty(root, nr);
3889 no_delete:
3890 	clear_inode(inode);
3891 	return;
3892 }
3893 
3894 /*
3895  * this returns the key found in the dir entry in the location pointer.
3896  * If no dir entries were found, location->objectid is 0.
3897  */
3898 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3899 			       struct btrfs_key *location)
3900 {
3901 	const char *name = dentry->d_name.name;
3902 	int namelen = dentry->d_name.len;
3903 	struct btrfs_dir_item *di;
3904 	struct btrfs_path *path;
3905 	struct btrfs_root *root = BTRFS_I(dir)->root;
3906 	int ret = 0;
3907 
3908 	path = btrfs_alloc_path();
3909 	if (!path)
3910 		return -ENOMEM;
3911 
3912 	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
3913 				    namelen, 0);
3914 	if (IS_ERR(di))
3915 		ret = PTR_ERR(di);
3916 
3917 	if (IS_ERR_OR_NULL(di))
3918 		goto out_err;
3919 
3920 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3921 out:
3922 	btrfs_free_path(path);
3923 	return ret;
3924 out_err:
3925 	location->objectid = 0;
3926 	goto out;
3927 }
3928 
3929 /*
3930  * when we hit a tree root in a directory, the btrfs part of the inode
3931  * needs to be changed to reflect the root directory of the tree root.  This
3932  * is kind of like crossing a mount point.
3933  */
3934 static int fixup_tree_root_location(struct btrfs_root *root,
3935 				    struct inode *dir,
3936 				    struct dentry *dentry,
3937 				    struct btrfs_key *location,
3938 				    struct btrfs_root **sub_root)
3939 {
3940 	struct btrfs_path *path;
3941 	struct btrfs_root *new_root;
3942 	struct btrfs_root_ref *ref;
3943 	struct extent_buffer *leaf;
3944 	int ret;
3945 	int err = 0;
3946 
3947 	path = btrfs_alloc_path();
3948 	if (!path) {
3949 		err = -ENOMEM;
3950 		goto out;
3951 	}
3952 
3953 	err = -ENOENT;
3954 	ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
3955 				  BTRFS_I(dir)->root->root_key.objectid,
3956 				  location->objectid);
3957 	if (ret) {
3958 		if (ret < 0)
3959 			err = ret;
3960 		goto out;
3961 	}
3962 
3963 	leaf = path->nodes[0];
3964 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
3965 	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
3966 	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
3967 		goto out;
3968 
3969 	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
3970 				   (unsigned long)(ref + 1),
3971 				   dentry->d_name.len);
3972 	if (ret)
3973 		goto out;
3974 
3975 	btrfs_release_path(path);
3976 
3977 	new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
3978 	if (IS_ERR(new_root)) {
3979 		err = PTR_ERR(new_root);
3980 		goto out;
3981 	}
3982 
3983 	if (btrfs_root_refs(&new_root->root_item) == 0) {
3984 		err = -ENOENT;
3985 		goto out;
3986 	}
3987 
3988 	*sub_root = new_root;
3989 	location->objectid = btrfs_root_dirid(&new_root->root_item);
3990 	location->type = BTRFS_INODE_ITEM_KEY;
3991 	location->offset = 0;
3992 	err = 0;
3993 out:
3994 	btrfs_free_path(path);
3995 	return err;
3996 }
3997 
3998 static void inode_tree_add(struct inode *inode)
3999 {
4000 	struct btrfs_root *root = BTRFS_I(inode)->root;
4001 	struct btrfs_inode *entry;
4002 	struct rb_node **p;
4003 	struct rb_node *parent;
4004 	u64 ino = btrfs_ino(inode);
4005 again:
4006 	p = &root->inode_tree.rb_node;
4007 	parent = NULL;
4008 
4009 	if (inode_unhashed(inode))
4010 		return;
4011 
4012 	spin_lock(&root->inode_lock);
4013 	while (*p) {
4014 		parent = *p;
4015 		entry = rb_entry(parent, struct btrfs_inode, rb_node);
4016 
4017 		if (ino < btrfs_ino(&entry->vfs_inode))
4018 			p = &parent->rb_left;
4019 		else if (ino > btrfs_ino(&entry->vfs_inode))
4020 			p = &parent->rb_right;
4021 		else {
4022 			WARN_ON(!(entry->vfs_inode.i_state &
4023 				  (I_WILL_FREE | I_FREEING)));
4024 			rb_erase(parent, &root->inode_tree);
4025 			RB_CLEAR_NODE(parent);
4026 			spin_unlock(&root->inode_lock);
4027 			goto again;
4028 		}
4029 	}
4030 	rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
4031 	rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
4032 	spin_unlock(&root->inode_lock);
4033 }
4034 
4035 static void inode_tree_del(struct inode *inode)
4036 {
4037 	struct btrfs_root *root = BTRFS_I(inode)->root;
4038 	int empty = 0;
4039 
4040 	spin_lock(&root->inode_lock);
4041 	if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
4042 		rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
4043 		RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
4044 		empty = RB_EMPTY_ROOT(&root->inode_tree);
4045 	}
4046 	spin_unlock(&root->inode_lock);
4047 
4048 	/*
4049 	 * Free space cache has inodes in the tree root, but the tree root has a
4050 	 * root_refs of 0, so this could end up dropping the tree root as a
4051 	 * snapshot, so we need the extra !root->fs_info->tree_root check to
4052 	 * make sure we don't drop it.
4053 	 */
4054 	if (empty && btrfs_root_refs(&root->root_item) == 0 &&
4055 	    root != root->fs_info->tree_root) {
4056 		synchronize_srcu(&root->fs_info->subvol_srcu);
4057 		spin_lock(&root->inode_lock);
4058 		empty = RB_EMPTY_ROOT(&root->inode_tree);
4059 		spin_unlock(&root->inode_lock);
4060 		if (empty)
4061 			btrfs_add_dead_root(root);
4062 	}
4063 }
4064 
4065 void btrfs_invalidate_inodes(struct btrfs_root *root)
4066 {
4067 	struct rb_node *node;
4068 	struct rb_node *prev;
4069 	struct btrfs_inode *entry;
4070 	struct inode *inode;
4071 	u64 objectid = 0;
4072 
4073 	WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4074 
4075 	spin_lock(&root->inode_lock);
4076 again:
4077 	node = root->inode_tree.rb_node;
4078 	prev = NULL;
4079 	while (node) {
4080 		prev = node;
4081 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4082 
4083 		if (objectid < btrfs_ino(&entry->vfs_inode))
4084 			node = node->rb_left;
4085 		else if (objectid > btrfs_ino(&entry->vfs_inode))
4086 			node = node->rb_right;
4087 		else
4088 			break;
4089 	}
4090 	if (!node) {
4091 		while (prev) {
4092 			entry = rb_entry(prev, struct btrfs_inode, rb_node);
4093 			if (objectid <= btrfs_ino(&entry->vfs_inode)) {
4094 				node = prev;
4095 				break;
4096 			}
4097 			prev = rb_next(prev);
4098 		}
4099 	}
4100 	while (node) {
4101 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4102 		objectid = btrfs_ino(&entry->vfs_inode) + 1;
4103 		inode = igrab(&entry->vfs_inode);
4104 		if (inode) {
4105 			spin_unlock(&root->inode_lock);
4106 			if (atomic_read(&inode->i_count) > 1)
4107 				d_prune_aliases(inode);
4108 			/*
4109 			 * btrfs_drop_inode will have it removed from
4110 			 * the inode cache when its usage count
4111 			 * hits zero.
4112 			 */
4113 			iput(inode);
4114 			cond_resched();
4115 			spin_lock(&root->inode_lock);
4116 			goto again;
4117 		}
4118 
4119 		if (cond_resched_lock(&root->inode_lock))
4120 			goto again;
4121 
4122 		node = rb_next(node);
4123 	}
4124 	spin_unlock(&root->inode_lock);
4125 }
4126 
4127 static int btrfs_init_locked_inode(struct inode *inode, void *p)
4128 {
4129 	struct btrfs_iget_args *args = p;
4130 	inode->i_ino = args->ino;
4131 	BTRFS_I(inode)->root = args->root;
4132 	return 0;
4133 }
4134 
4135 static int btrfs_find_actor(struct inode *inode, void *opaque)
4136 {
4137 	struct btrfs_iget_args *args = opaque;
4138 	return args->ino == btrfs_ino(inode) &&
4139 		args->root == BTRFS_I(inode)->root;
4140 }
4141 
4142 static struct inode *btrfs_iget_locked(struct super_block *s,
4143 				       u64 objectid,
4144 				       struct btrfs_root *root)
4145 {
4146 	struct inode *inode;
4147 	struct btrfs_iget_args args;
4148 	args.ino = objectid;
4149 	args.root = root;
4150 
4151 	inode = iget5_locked(s, objectid, btrfs_find_actor,
4152 			     btrfs_init_locked_inode,
4153 			     (void *)&args);
4154 	return inode;
4155 }
4156 
4157 /* Get an inode object given its location and corresponding root.
4158  * Returns in *is_new if the inode was read from disk
4159  */
4160 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
4161 			 struct btrfs_root *root, int *new)
4162 {
4163 	struct inode *inode;
4164 
4165 	inode = btrfs_iget_locked(s, location->objectid, root);
4166 	if (!inode)
4167 		return ERR_PTR(-ENOMEM);
4168 
4169 	if (inode->i_state & I_NEW) {
4170 		BTRFS_I(inode)->root = root;
4171 		memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
4172 		btrfs_read_locked_inode(inode);
4173 		if (!is_bad_inode(inode)) {
4174 			inode_tree_add(inode);
4175 			unlock_new_inode(inode);
4176 			if (new)
4177 				*new = 1;
4178 		} else {
4179 			unlock_new_inode(inode);
4180 			iput(inode);
4181 			inode = ERR_PTR(-ESTALE);
4182 		}
4183 	}
4184 
4185 	return inode;
4186 }
4187 
4188 static struct inode *new_simple_dir(struct super_block *s,
4189 				    struct btrfs_key *key,
4190 				    struct btrfs_root *root)
4191 {
4192 	struct inode *inode = new_inode(s);
4193 
4194 	if (!inode)
4195 		return ERR_PTR(-ENOMEM);
4196 
4197 	BTRFS_I(inode)->root = root;
4198 	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
4199 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
4200 
4201 	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
4202 	inode->i_op = &btrfs_dir_ro_inode_operations;
4203 	inode->i_fop = &simple_dir_operations;
4204 	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
4205 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4206 
4207 	return inode;
4208 }
4209 
4210 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
4211 {
4212 	struct inode *inode;
4213 	struct btrfs_root *root = BTRFS_I(dir)->root;
4214 	struct btrfs_root *sub_root = root;
4215 	struct btrfs_key location;
4216 	int index;
4217 	int ret = 0;
4218 
4219 	if (dentry->d_name.len > BTRFS_NAME_LEN)
4220 		return ERR_PTR(-ENAMETOOLONG);
4221 
4222 	if (unlikely(d_need_lookup(dentry))) {
4223 		memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key));
4224 		kfree(dentry->d_fsdata);
4225 		dentry->d_fsdata = NULL;
4226 		/* This thing is hashed, drop it for now */
4227 		d_drop(dentry);
4228 	} else {
4229 		ret = btrfs_inode_by_name(dir, dentry, &location);
4230 	}
4231 
4232 	if (ret < 0)
4233 		return ERR_PTR(ret);
4234 
4235 	if (location.objectid == 0)
4236 		return NULL;
4237 
4238 	if (location.type == BTRFS_INODE_ITEM_KEY) {
4239 		inode = btrfs_iget(dir->i_sb, &location, root, NULL);
4240 		return inode;
4241 	}
4242 
4243 	BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
4244 
4245 	index = srcu_read_lock(&root->fs_info->subvol_srcu);
4246 	ret = fixup_tree_root_location(root, dir, dentry,
4247 				       &location, &sub_root);
4248 	if (ret < 0) {
4249 		if (ret != -ENOENT)
4250 			inode = ERR_PTR(ret);
4251 		else
4252 			inode = new_simple_dir(dir->i_sb, &location, sub_root);
4253 	} else {
4254 		inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
4255 	}
4256 	srcu_read_unlock(&root->fs_info->subvol_srcu, index);
4257 
4258 	if (!IS_ERR(inode) && root != sub_root) {
4259 		down_read(&root->fs_info->cleanup_work_sem);
4260 		if (!(inode->i_sb->s_flags & MS_RDONLY))
4261 			ret = btrfs_orphan_cleanup(sub_root);
4262 		up_read(&root->fs_info->cleanup_work_sem);
4263 		if (ret)
4264 			inode = ERR_PTR(ret);
4265 	}
4266 
4267 	return inode;
4268 }
4269 
4270 static int btrfs_dentry_delete(const struct dentry *dentry)
4271 {
4272 	struct btrfs_root *root;
4273 	struct inode *inode = dentry->d_inode;
4274 
4275 	if (!inode && !IS_ROOT(dentry))
4276 		inode = dentry->d_parent->d_inode;
4277 
4278 	if (inode) {
4279 		root = BTRFS_I(inode)->root;
4280 		if (btrfs_root_refs(&root->root_item) == 0)
4281 			return 1;
4282 
4283 		if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
4284 			return 1;
4285 	}
4286 	return 0;
4287 }
4288 
4289 static void btrfs_dentry_release(struct dentry *dentry)
4290 {
4291 	if (dentry->d_fsdata)
4292 		kfree(dentry->d_fsdata);
4293 }
4294 
4295 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
4296 				   unsigned int flags)
4297 {
4298 	struct dentry *ret;
4299 
4300 	ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
4301 	if (unlikely(d_need_lookup(dentry))) {
4302 		spin_lock(&dentry->d_lock);
4303 		dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
4304 		spin_unlock(&dentry->d_lock);
4305 	}
4306 	return ret;
4307 }
4308 
4309 unsigned char btrfs_filetype_table[] = {
4310 	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
4311 };
4312 
4313 static int btrfs_real_readdir(struct file *filp, void *dirent,
4314 			      filldir_t filldir)
4315 {
4316 	struct inode *inode = filp->f_dentry->d_inode;
4317 	struct btrfs_root *root = BTRFS_I(inode)->root;
4318 	struct btrfs_item *item;
4319 	struct btrfs_dir_item *di;
4320 	struct btrfs_key key;
4321 	struct btrfs_key found_key;
4322 	struct btrfs_path *path;
4323 	struct list_head ins_list;
4324 	struct list_head del_list;
4325 	int ret;
4326 	struct extent_buffer *leaf;
4327 	int slot;
4328 	unsigned char d_type;
4329 	int over = 0;
4330 	u32 di_cur;
4331 	u32 di_total;
4332 	u32 di_len;
4333 	int key_type = BTRFS_DIR_INDEX_KEY;
4334 	char tmp_name[32];
4335 	char *name_ptr;
4336 	int name_len;
4337 	int is_curr = 0;	/* filp->f_pos points to the current index? */
4338 
4339 	/* FIXME, use a real flag for deciding about the key type */
4340 	if (root->fs_info->tree_root == root)
4341 		key_type = BTRFS_DIR_ITEM_KEY;
4342 
4343 	/* special case for "." */
4344 	if (filp->f_pos == 0) {
4345 		over = filldir(dirent, ".", 1,
4346 			       filp->f_pos, btrfs_ino(inode), DT_DIR);
4347 		if (over)
4348 			return 0;
4349 		filp->f_pos = 1;
4350 	}
4351 	/* special case for .., just use the back ref */
4352 	if (filp->f_pos == 1) {
4353 		u64 pino = parent_ino(filp->f_path.dentry);
4354 		over = filldir(dirent, "..", 2,
4355 			       filp->f_pos, pino, DT_DIR);
4356 		if (over)
4357 			return 0;
4358 		filp->f_pos = 2;
4359 	}
4360 	path = btrfs_alloc_path();
4361 	if (!path)
4362 		return -ENOMEM;
4363 
4364 	path->reada = 1;
4365 
4366 	if (key_type == BTRFS_DIR_INDEX_KEY) {
4367 		INIT_LIST_HEAD(&ins_list);
4368 		INIT_LIST_HEAD(&del_list);
4369 		btrfs_get_delayed_items(inode, &ins_list, &del_list);
4370 	}
4371 
4372 	btrfs_set_key_type(&key, key_type);
4373 	key.offset = filp->f_pos;
4374 	key.objectid = btrfs_ino(inode);
4375 
4376 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4377 	if (ret < 0)
4378 		goto err;
4379 
4380 	while (1) {
4381 		leaf = path->nodes[0];
4382 		slot = path->slots[0];
4383 		if (slot >= btrfs_header_nritems(leaf)) {
4384 			ret = btrfs_next_leaf(root, path);
4385 			if (ret < 0)
4386 				goto err;
4387 			else if (ret > 0)
4388 				break;
4389 			continue;
4390 		}
4391 
4392 		item = btrfs_item_nr(leaf, slot);
4393 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
4394 
4395 		if (found_key.objectid != key.objectid)
4396 			break;
4397 		if (btrfs_key_type(&found_key) != key_type)
4398 			break;
4399 		if (found_key.offset < filp->f_pos)
4400 			goto next;
4401 		if (key_type == BTRFS_DIR_INDEX_KEY &&
4402 		    btrfs_should_delete_dir_index(&del_list,
4403 						  found_key.offset))
4404 			goto next;
4405 
4406 		filp->f_pos = found_key.offset;
4407 		is_curr = 1;
4408 
4409 		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
4410 		di_cur = 0;
4411 		di_total = btrfs_item_size(leaf, item);
4412 
4413 		while (di_cur < di_total) {
4414 			struct btrfs_key location;
4415 
4416 			if (verify_dir_item(root, leaf, di))
4417 				break;
4418 
4419 			name_len = btrfs_dir_name_len(leaf, di);
4420 			if (name_len <= sizeof(tmp_name)) {
4421 				name_ptr = tmp_name;
4422 			} else {
4423 				name_ptr = kmalloc(name_len, GFP_NOFS);
4424 				if (!name_ptr) {
4425 					ret = -ENOMEM;
4426 					goto err;
4427 				}
4428 			}
4429 			read_extent_buffer(leaf, name_ptr,
4430 					   (unsigned long)(di + 1), name_len);
4431 
4432 			d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
4433 			btrfs_dir_item_key_to_cpu(leaf, di, &location);
4434 
4435 
4436 			/* is this a reference to our own snapshot? If so
4437 			 * skip it.
4438 			 *
4439 			 * In contrast to old kernels, we insert the snapshot's
4440 			 * dir item and dir index after it has been created, so
4441 			 * we won't find a reference to our own snapshot. We
4442 			 * still keep the following code for backward
4443 			 * compatibility.
4444 			 */
4445 			if (location.type == BTRFS_ROOT_ITEM_KEY &&
4446 			    location.objectid == root->root_key.objectid) {
4447 				over = 0;
4448 				goto skip;
4449 			}
4450 			over = filldir(dirent, name_ptr, name_len,
4451 				       found_key.offset, location.objectid,
4452 				       d_type);
4453 
4454 skip:
4455 			if (name_ptr != tmp_name)
4456 				kfree(name_ptr);
4457 
4458 			if (over)
4459 				goto nopos;
4460 			di_len = btrfs_dir_name_len(leaf, di) +
4461 				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
4462 			di_cur += di_len;
4463 			di = (struct btrfs_dir_item *)((char *)di + di_len);
4464 		}
4465 next:
4466 		path->slots[0]++;
4467 	}
4468 
4469 	if (key_type == BTRFS_DIR_INDEX_KEY) {
4470 		if (is_curr)
4471 			filp->f_pos++;
4472 		ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir,
4473 						      &ins_list);
4474 		if (ret)
4475 			goto nopos;
4476 	}
4477 
4478 	/* Reached end of directory/root. Bump pos past the last item. */
4479 	if (key_type == BTRFS_DIR_INDEX_KEY)
4480 		/*
4481 		 * 32-bit glibc will use getdents64, but then strtol -
4482 		 * so the last number we can serve is this.
4483 		 */
4484 		filp->f_pos = 0x7fffffff;
4485 	else
4486 		filp->f_pos++;
4487 nopos:
4488 	ret = 0;
4489 err:
4490 	if (key_type == BTRFS_DIR_INDEX_KEY)
4491 		btrfs_put_delayed_items(&ins_list, &del_list);
4492 	btrfs_free_path(path);
4493 	return ret;
4494 }
4495 
4496 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
4497 {
4498 	struct btrfs_root *root = BTRFS_I(inode)->root;
4499 	struct btrfs_trans_handle *trans;
4500 	int ret = 0;
4501 	bool nolock = false;
4502 
4503 	if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
4504 		return 0;
4505 
4506 	if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
4507 		nolock = true;
4508 
4509 	if (wbc->sync_mode == WB_SYNC_ALL) {
4510 		if (nolock)
4511 			trans = btrfs_join_transaction_nolock(root);
4512 		else
4513 			trans = btrfs_join_transaction(root);
4514 		if (IS_ERR(trans))
4515 			return PTR_ERR(trans);
4516 		ret = btrfs_commit_transaction(trans, root);
4517 	}
4518 	return ret;
4519 }
4520 
4521 /*
4522  * This is somewhat expensive, updating the tree every time the
4523  * inode changes.  But, it is most likely to find the inode in cache.
4524  * FIXME, needs more benchmarking...there are no reasons other than performance
4525  * to keep or drop this code.
4526  */
4527 int btrfs_dirty_inode(struct inode *inode)
4528 {
4529 	struct btrfs_root *root = BTRFS_I(inode)->root;
4530 	struct btrfs_trans_handle *trans;
4531 	int ret;
4532 
4533 	if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
4534 		return 0;
4535 
4536 	trans = btrfs_join_transaction(root);
4537 	if (IS_ERR(trans))
4538 		return PTR_ERR(trans);
4539 
4540 	ret = btrfs_update_inode(trans, root, inode);
4541 	if (ret && ret == -ENOSPC) {
4542 		/* whoops, lets try again with the full transaction */
4543 		btrfs_end_transaction(trans, root);
4544 		trans = btrfs_start_transaction(root, 1);
4545 		if (IS_ERR(trans))
4546 			return PTR_ERR(trans);
4547 
4548 		ret = btrfs_update_inode(trans, root, inode);
4549 	}
4550 	btrfs_end_transaction(trans, root);
4551 	if (BTRFS_I(inode)->delayed_node)
4552 		btrfs_balance_delayed_items(root);
4553 
4554 	return ret;
4555 }
4556 
4557 /*
4558  * This is a copy of file_update_time.  We need this so we can return error on
4559  * ENOSPC for updating the inode in the case of file write and mmap writes.
4560  */
4561 static int btrfs_update_time(struct inode *inode, struct timespec *now,
4562 			     int flags)
4563 {
4564 	struct btrfs_root *root = BTRFS_I(inode)->root;
4565 
4566 	if (btrfs_root_readonly(root))
4567 		return -EROFS;
4568 
4569 	if (flags & S_VERSION)
4570 		inode_inc_iversion(inode);
4571 	if (flags & S_CTIME)
4572 		inode->i_ctime = *now;
4573 	if (flags & S_MTIME)
4574 		inode->i_mtime = *now;
4575 	if (flags & S_ATIME)
4576 		inode->i_atime = *now;
4577 	return btrfs_dirty_inode(inode);
4578 }
4579 
4580 /*
4581  * find the highest existing sequence number in a directory
4582  * and then set the in-memory index_cnt variable to reflect
4583  * free sequence numbers
4584  */
4585 static int btrfs_set_inode_index_count(struct inode *inode)
4586 {
4587 	struct btrfs_root *root = BTRFS_I(inode)->root;
4588 	struct btrfs_key key, found_key;
4589 	struct btrfs_path *path;
4590 	struct extent_buffer *leaf;
4591 	int ret;
4592 
4593 	key.objectid = btrfs_ino(inode);
4594 	btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
4595 	key.offset = (u64)-1;
4596 
4597 	path = btrfs_alloc_path();
4598 	if (!path)
4599 		return -ENOMEM;
4600 
4601 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4602 	if (ret < 0)
4603 		goto out;
4604 	/* FIXME: we should be able to handle this */
4605 	if (ret == 0)
4606 		goto out;
4607 	ret = 0;
4608 
4609 	/*
4610 	 * MAGIC NUMBER EXPLANATION:
4611 	 * since we search a directory based on f_pos we have to start at 2
4612 	 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
4613 	 * else has to start at 2
4614 	 */
4615 	if (path->slots[0] == 0) {
4616 		BTRFS_I(inode)->index_cnt = 2;
4617 		goto out;
4618 	}
4619 
4620 	path->slots[0]--;
4621 
4622 	leaf = path->nodes[0];
4623 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4624 
4625 	if (found_key.objectid != btrfs_ino(inode) ||
4626 	    btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
4627 		BTRFS_I(inode)->index_cnt = 2;
4628 		goto out;
4629 	}
4630 
4631 	BTRFS_I(inode)->index_cnt = found_key.offset + 1;
4632 out:
4633 	btrfs_free_path(path);
4634 	return ret;
4635 }
4636 
4637 /*
4638  * helper to find a free sequence number in a given directory.  This current
4639  * code is very simple, later versions will do smarter things in the btree
4640  */
4641 int btrfs_set_inode_index(struct inode *dir, u64 *index)
4642 {
4643 	int ret = 0;
4644 
4645 	if (BTRFS_I(dir)->index_cnt == (u64)-1) {
4646 		ret = btrfs_inode_delayed_dir_index_count(dir);
4647 		if (ret) {
4648 			ret = btrfs_set_inode_index_count(dir);
4649 			if (ret)
4650 				return ret;
4651 		}
4652 	}
4653 
4654 	*index = BTRFS_I(dir)->index_cnt;
4655 	BTRFS_I(dir)->index_cnt++;
4656 
4657 	return ret;
4658 }
4659 
4660 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4661 				     struct btrfs_root *root,
4662 				     struct inode *dir,
4663 				     const char *name, int name_len,
4664 				     u64 ref_objectid, u64 objectid,
4665 				     umode_t mode, u64 *index)
4666 {
4667 	struct inode *inode;
4668 	struct btrfs_inode_item *inode_item;
4669 	struct btrfs_key *location;
4670 	struct btrfs_path *path;
4671 	struct btrfs_inode_ref *ref;
4672 	struct btrfs_key key[2];
4673 	u32 sizes[2];
4674 	unsigned long ptr;
4675 	int ret;
4676 	int owner;
4677 
4678 	path = btrfs_alloc_path();
4679 	if (!path)
4680 		return ERR_PTR(-ENOMEM);
4681 
4682 	inode = new_inode(root->fs_info->sb);
4683 	if (!inode) {
4684 		btrfs_free_path(path);
4685 		return ERR_PTR(-ENOMEM);
4686 	}
4687 
4688 	/*
4689 	 * we have to initialize this early, so we can reclaim the inode
4690 	 * number if we fail afterwards in this function.
4691 	 */
4692 	inode->i_ino = objectid;
4693 
4694 	if (dir) {
4695 		trace_btrfs_inode_request(dir);
4696 
4697 		ret = btrfs_set_inode_index(dir, index);
4698 		if (ret) {
4699 			btrfs_free_path(path);
4700 			iput(inode);
4701 			return ERR_PTR(ret);
4702 		}
4703 	}
4704 	/*
4705 	 * index_cnt is ignored for everything but a dir,
4706 	 * btrfs_get_inode_index_count has an explanation for the magic
4707 	 * number
4708 	 */
4709 	BTRFS_I(inode)->index_cnt = 2;
4710 	BTRFS_I(inode)->root = root;
4711 	BTRFS_I(inode)->generation = trans->transid;
4712 	inode->i_generation = BTRFS_I(inode)->generation;
4713 
4714 	/*
4715 	 * We could have gotten an inode number from somebody who was fsynced
4716 	 * and then removed in this same transaction, so let's just set full
4717 	 * sync since it will be a full sync anyway and this will blow away the
4718 	 * old info in the log.
4719 	 */
4720 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
4721 
4722 	if (S_ISDIR(mode))
4723 		owner = 0;
4724 	else
4725 		owner = 1;
4726 
4727 	key[0].objectid = objectid;
4728 	btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
4729 	key[0].offset = 0;
4730 
4731 	/*
4732 	 * Start new inodes with an inode_ref. This is slightly more
4733 	 * efficient for small numbers of hard links since they will
4734 	 * be packed into one item. Extended refs will kick in if we
4735 	 * add more hard links than can fit in the ref item.
4736 	 */
4737 	key[1].objectid = objectid;
4738 	btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
4739 	key[1].offset = ref_objectid;
4740 
4741 	sizes[0] = sizeof(struct btrfs_inode_item);
4742 	sizes[1] = name_len + sizeof(*ref);
4743 
4744 	path->leave_spinning = 1;
4745 	ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
4746 	if (ret != 0)
4747 		goto fail;
4748 
4749 	inode_init_owner(inode, dir, mode);
4750 	inode_set_bytes(inode, 0);
4751 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4752 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4753 				  struct btrfs_inode_item);
4754 	memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
4755 			     sizeof(*inode_item));
4756 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
4757 
4758 	ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
4759 			     struct btrfs_inode_ref);
4760 	btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
4761 	btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
4762 	ptr = (unsigned long)(ref + 1);
4763 	write_extent_buffer(path->nodes[0], name, ptr, name_len);
4764 
4765 	btrfs_mark_buffer_dirty(path->nodes[0]);
4766 	btrfs_free_path(path);
4767 
4768 	location = &BTRFS_I(inode)->location;
4769 	location->objectid = objectid;
4770 	location->offset = 0;
4771 	btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
4772 
4773 	btrfs_inherit_iflags(inode, dir);
4774 
4775 	if (S_ISREG(mode)) {
4776 		if (btrfs_test_opt(root, NODATASUM))
4777 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
4778 		if (btrfs_test_opt(root, NODATACOW) ||
4779 		    (BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW))
4780 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
4781 	}
4782 
4783 	insert_inode_hash(inode);
4784 	inode_tree_add(inode);
4785 
4786 	trace_btrfs_inode_new(inode);
4787 	btrfs_set_inode_last_trans(trans, inode);
4788 
4789 	btrfs_update_root_times(trans, root);
4790 
4791 	return inode;
4792 fail:
4793 	if (dir)
4794 		BTRFS_I(dir)->index_cnt--;
4795 	btrfs_free_path(path);
4796 	iput(inode);
4797 	return ERR_PTR(ret);
4798 }
4799 
4800 static inline u8 btrfs_inode_type(struct inode *inode)
4801 {
4802 	return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
4803 }
4804 
4805 /*
4806  * utility function to add 'inode' into 'parent_inode' with
4807  * a give name and a given sequence number.
4808  * if 'add_backref' is true, also insert a backref from the
4809  * inode to the parent directory.
4810  */
4811 int btrfs_add_link(struct btrfs_trans_handle *trans,
4812 		   struct inode *parent_inode, struct inode *inode,
4813 		   const char *name, int name_len, int add_backref, u64 index)
4814 {
4815 	int ret = 0;
4816 	struct btrfs_key key;
4817 	struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4818 	u64 ino = btrfs_ino(inode);
4819 	u64 parent_ino = btrfs_ino(parent_inode);
4820 
4821 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4822 		memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
4823 	} else {
4824 		key.objectid = ino;
4825 		btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
4826 		key.offset = 0;
4827 	}
4828 
4829 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4830 		ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
4831 					 key.objectid, root->root_key.objectid,
4832 					 parent_ino, index, name, name_len);
4833 	} else if (add_backref) {
4834 		ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
4835 					     parent_ino, index);
4836 	}
4837 
4838 	/* Nothing to clean up yet */
4839 	if (ret)
4840 		return ret;
4841 
4842 	ret = btrfs_insert_dir_item(trans, root, name, name_len,
4843 				    parent_inode, &key,
4844 				    btrfs_inode_type(inode), index);
4845 	if (ret == -EEXIST)
4846 		goto fail_dir_item;
4847 	else if (ret) {
4848 		btrfs_abort_transaction(trans, root, ret);
4849 		return ret;
4850 	}
4851 
4852 	btrfs_i_size_write(parent_inode, parent_inode->i_size +
4853 			   name_len * 2);
4854 	inode_inc_iversion(parent_inode);
4855 	parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4856 	ret = btrfs_update_inode(trans, root, parent_inode);
4857 	if (ret)
4858 		btrfs_abort_transaction(trans, root, ret);
4859 	return ret;
4860 
4861 fail_dir_item:
4862 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4863 		u64 local_index;
4864 		int err;
4865 		err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
4866 				 key.objectid, root->root_key.objectid,
4867 				 parent_ino, &local_index, name, name_len);
4868 
4869 	} else if (add_backref) {
4870 		u64 local_index;
4871 		int err;
4872 
4873 		err = btrfs_del_inode_ref(trans, root, name, name_len,
4874 					  ino, parent_ino, &local_index);
4875 	}
4876 	return ret;
4877 }
4878 
4879 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4880 			    struct inode *dir, struct dentry *dentry,
4881 			    struct inode *inode, int backref, u64 index)
4882 {
4883 	int err = btrfs_add_link(trans, dir, inode,
4884 				 dentry->d_name.name, dentry->d_name.len,
4885 				 backref, index);
4886 	if (err > 0)
4887 		err = -EEXIST;
4888 	return err;
4889 }
4890 
4891 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4892 			umode_t mode, dev_t rdev)
4893 {
4894 	struct btrfs_trans_handle *trans;
4895 	struct btrfs_root *root = BTRFS_I(dir)->root;
4896 	struct inode *inode = NULL;
4897 	int err;
4898 	int drop_inode = 0;
4899 	u64 objectid;
4900 	unsigned long nr = 0;
4901 	u64 index = 0;
4902 
4903 	if (!new_valid_dev(rdev))
4904 		return -EINVAL;
4905 
4906 	/*
4907 	 * 2 for inode item and ref
4908 	 * 2 for dir items
4909 	 * 1 for xattr if selinux is on
4910 	 */
4911 	trans = btrfs_start_transaction(root, 5);
4912 	if (IS_ERR(trans))
4913 		return PTR_ERR(trans);
4914 
4915 	err = btrfs_find_free_ino(root, &objectid);
4916 	if (err)
4917 		goto out_unlock;
4918 
4919 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4920 				dentry->d_name.len, btrfs_ino(dir), objectid,
4921 				mode, &index);
4922 	if (IS_ERR(inode)) {
4923 		err = PTR_ERR(inode);
4924 		goto out_unlock;
4925 	}
4926 
4927 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
4928 	if (err) {
4929 		drop_inode = 1;
4930 		goto out_unlock;
4931 	}
4932 
4933 	/*
4934 	* If the active LSM wants to access the inode during
4935 	* d_instantiate it needs these. Smack checks to see
4936 	* if the filesystem supports xattrs by looking at the
4937 	* ops vector.
4938 	*/
4939 
4940 	inode->i_op = &btrfs_special_inode_operations;
4941 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4942 	if (err)
4943 		drop_inode = 1;
4944 	else {
4945 		init_special_inode(inode, inode->i_mode, rdev);
4946 		btrfs_update_inode(trans, root, inode);
4947 		d_instantiate(dentry, inode);
4948 	}
4949 out_unlock:
4950 	nr = trans->blocks_used;
4951 	btrfs_end_transaction(trans, root);
4952 	btrfs_btree_balance_dirty(root, nr);
4953 	if (drop_inode) {
4954 		inode_dec_link_count(inode);
4955 		iput(inode);
4956 	}
4957 	return err;
4958 }
4959 
4960 static int btrfs_create(struct inode *dir, struct dentry *dentry,
4961 			umode_t mode, bool excl)
4962 {
4963 	struct btrfs_trans_handle *trans;
4964 	struct btrfs_root *root = BTRFS_I(dir)->root;
4965 	struct inode *inode = NULL;
4966 	int drop_inode = 0;
4967 	int err;
4968 	unsigned long nr = 0;
4969 	u64 objectid;
4970 	u64 index = 0;
4971 
4972 	/*
4973 	 * 2 for inode item and ref
4974 	 * 2 for dir items
4975 	 * 1 for xattr if selinux is on
4976 	 */
4977 	trans = btrfs_start_transaction(root, 5);
4978 	if (IS_ERR(trans))
4979 		return PTR_ERR(trans);
4980 
4981 	err = btrfs_find_free_ino(root, &objectid);
4982 	if (err)
4983 		goto out_unlock;
4984 
4985 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4986 				dentry->d_name.len, btrfs_ino(dir), objectid,
4987 				mode, &index);
4988 	if (IS_ERR(inode)) {
4989 		err = PTR_ERR(inode);
4990 		goto out_unlock;
4991 	}
4992 
4993 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
4994 	if (err) {
4995 		drop_inode = 1;
4996 		goto out_unlock;
4997 	}
4998 
4999 	/*
5000 	* If the active LSM wants to access the inode during
5001 	* d_instantiate it needs these. Smack checks to see
5002 	* if the filesystem supports xattrs by looking at the
5003 	* ops vector.
5004 	*/
5005 	inode->i_fop = &btrfs_file_operations;
5006 	inode->i_op = &btrfs_file_inode_operations;
5007 
5008 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
5009 	if (err)
5010 		drop_inode = 1;
5011 	else {
5012 		inode->i_mapping->a_ops = &btrfs_aops;
5013 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5014 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
5015 		d_instantiate(dentry, inode);
5016 	}
5017 out_unlock:
5018 	nr = trans->blocks_used;
5019 	btrfs_end_transaction(trans, root);
5020 	if (drop_inode) {
5021 		inode_dec_link_count(inode);
5022 		iput(inode);
5023 	}
5024 	btrfs_btree_balance_dirty(root, nr);
5025 	return err;
5026 }
5027 
5028 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
5029 		      struct dentry *dentry)
5030 {
5031 	struct btrfs_trans_handle *trans;
5032 	struct btrfs_root *root = BTRFS_I(dir)->root;
5033 	struct inode *inode = old_dentry->d_inode;
5034 	u64 index;
5035 	unsigned long nr = 0;
5036 	int err;
5037 	int drop_inode = 0;
5038 
5039 	/* do not allow sys_link's with other subvols of the same device */
5040 	if (root->objectid != BTRFS_I(inode)->root->objectid)
5041 		return -EXDEV;
5042 
5043 	if (inode->i_nlink >= BTRFS_LINK_MAX)
5044 		return -EMLINK;
5045 
5046 	err = btrfs_set_inode_index(dir, &index);
5047 	if (err)
5048 		goto fail;
5049 
5050 	/*
5051 	 * 2 items for inode and inode ref
5052 	 * 2 items for dir items
5053 	 * 1 item for parent inode
5054 	 */
5055 	trans = btrfs_start_transaction(root, 5);
5056 	if (IS_ERR(trans)) {
5057 		err = PTR_ERR(trans);
5058 		goto fail;
5059 	}
5060 
5061 	btrfs_inc_nlink(inode);
5062 	inode_inc_iversion(inode);
5063 	inode->i_ctime = CURRENT_TIME;
5064 	ihold(inode);
5065 
5066 	err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
5067 
5068 	if (err) {
5069 		drop_inode = 1;
5070 	} else {
5071 		struct dentry *parent = dentry->d_parent;
5072 		err = btrfs_update_inode(trans, root, inode);
5073 		if (err)
5074 			goto fail;
5075 		d_instantiate(dentry, inode);
5076 		btrfs_log_new_name(trans, inode, NULL, parent);
5077 	}
5078 
5079 	nr = trans->blocks_used;
5080 	btrfs_end_transaction(trans, root);
5081 fail:
5082 	if (drop_inode) {
5083 		inode_dec_link_count(inode);
5084 		iput(inode);
5085 	}
5086 	btrfs_btree_balance_dirty(root, nr);
5087 	return err;
5088 }
5089 
5090 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
5091 {
5092 	struct inode *inode = NULL;
5093 	struct btrfs_trans_handle *trans;
5094 	struct btrfs_root *root = BTRFS_I(dir)->root;
5095 	int err = 0;
5096 	int drop_on_err = 0;
5097 	u64 objectid = 0;
5098 	u64 index = 0;
5099 	unsigned long nr = 1;
5100 
5101 	/*
5102 	 * 2 items for inode and ref
5103 	 * 2 items for dir items
5104 	 * 1 for xattr if selinux is on
5105 	 */
5106 	trans = btrfs_start_transaction(root, 5);
5107 	if (IS_ERR(trans))
5108 		return PTR_ERR(trans);
5109 
5110 	err = btrfs_find_free_ino(root, &objectid);
5111 	if (err)
5112 		goto out_fail;
5113 
5114 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5115 				dentry->d_name.len, btrfs_ino(dir), objectid,
5116 				S_IFDIR | mode, &index);
5117 	if (IS_ERR(inode)) {
5118 		err = PTR_ERR(inode);
5119 		goto out_fail;
5120 	}
5121 
5122 	drop_on_err = 1;
5123 
5124 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
5125 	if (err)
5126 		goto out_fail;
5127 
5128 	inode->i_op = &btrfs_dir_inode_operations;
5129 	inode->i_fop = &btrfs_dir_file_operations;
5130 
5131 	btrfs_i_size_write(inode, 0);
5132 	err = btrfs_update_inode(trans, root, inode);
5133 	if (err)
5134 		goto out_fail;
5135 
5136 	err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
5137 			     dentry->d_name.len, 0, index);
5138 	if (err)
5139 		goto out_fail;
5140 
5141 	d_instantiate(dentry, inode);
5142 	drop_on_err = 0;
5143 
5144 out_fail:
5145 	nr = trans->blocks_used;
5146 	btrfs_end_transaction(trans, root);
5147 	if (drop_on_err)
5148 		iput(inode);
5149 	btrfs_btree_balance_dirty(root, nr);
5150 	return err;
5151 }
5152 
5153 /* helper for btfs_get_extent.  Given an existing extent in the tree,
5154  * and an extent that you want to insert, deal with overlap and insert
5155  * the new extent into the tree.
5156  */
5157 static int merge_extent_mapping(struct extent_map_tree *em_tree,
5158 				struct extent_map *existing,
5159 				struct extent_map *em,
5160 				u64 map_start, u64 map_len)
5161 {
5162 	u64 start_diff;
5163 
5164 	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
5165 	start_diff = map_start - em->start;
5166 	em->start = map_start;
5167 	em->len = map_len;
5168 	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
5169 	    !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
5170 		em->block_start += start_diff;
5171 		em->block_len -= start_diff;
5172 	}
5173 	return add_extent_mapping(em_tree, em);
5174 }
5175 
5176 static noinline int uncompress_inline(struct btrfs_path *path,
5177 				      struct inode *inode, struct page *page,
5178 				      size_t pg_offset, u64 extent_offset,
5179 				      struct btrfs_file_extent_item *item)
5180 {
5181 	int ret;
5182 	struct extent_buffer *leaf = path->nodes[0];
5183 	char *tmp;
5184 	size_t max_size;
5185 	unsigned long inline_size;
5186 	unsigned long ptr;
5187 	int compress_type;
5188 
5189 	WARN_ON(pg_offset != 0);
5190 	compress_type = btrfs_file_extent_compression(leaf, item);
5191 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
5192 	inline_size = btrfs_file_extent_inline_item_len(leaf,
5193 					btrfs_item_nr(leaf, path->slots[0]));
5194 	tmp = kmalloc(inline_size, GFP_NOFS);
5195 	if (!tmp)
5196 		return -ENOMEM;
5197 	ptr = btrfs_file_extent_inline_start(item);
5198 
5199 	read_extent_buffer(leaf, tmp, ptr, inline_size);
5200 
5201 	max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
5202 	ret = btrfs_decompress(compress_type, tmp, page,
5203 			       extent_offset, inline_size, max_size);
5204 	if (ret) {
5205 		char *kaddr = kmap_atomic(page);
5206 		unsigned long copy_size = min_t(u64,
5207 				  PAGE_CACHE_SIZE - pg_offset,
5208 				  max_size - extent_offset);
5209 		memset(kaddr + pg_offset, 0, copy_size);
5210 		kunmap_atomic(kaddr);
5211 	}
5212 	kfree(tmp);
5213 	return 0;
5214 }
5215 
5216 /*
5217  * a bit scary, this does extent mapping from logical file offset to the disk.
5218  * the ugly parts come from merging extents from the disk with the in-ram
5219  * representation.  This gets more complex because of the data=ordered code,
5220  * where the in-ram extents might be locked pending data=ordered completion.
5221  *
5222  * This also copies inline extents directly into the page.
5223  */
5224 
5225 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
5226 				    size_t pg_offset, u64 start, u64 len,
5227 				    int create)
5228 {
5229 	int ret;
5230 	int err = 0;
5231 	u64 bytenr;
5232 	u64 extent_start = 0;
5233 	u64 extent_end = 0;
5234 	u64 objectid = btrfs_ino(inode);
5235 	u32 found_type;
5236 	struct btrfs_path *path = NULL;
5237 	struct btrfs_root *root = BTRFS_I(inode)->root;
5238 	struct btrfs_file_extent_item *item;
5239 	struct extent_buffer *leaf;
5240 	struct btrfs_key found_key;
5241 	struct extent_map *em = NULL;
5242 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5243 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5244 	struct btrfs_trans_handle *trans = NULL;
5245 	int compress_type;
5246 
5247 again:
5248 	read_lock(&em_tree->lock);
5249 	em = lookup_extent_mapping(em_tree, start, len);
5250 	if (em)
5251 		em->bdev = root->fs_info->fs_devices->latest_bdev;
5252 	read_unlock(&em_tree->lock);
5253 
5254 	if (em) {
5255 		if (em->start > start || em->start + em->len <= start)
5256 			free_extent_map(em);
5257 		else if (em->block_start == EXTENT_MAP_INLINE && page)
5258 			free_extent_map(em);
5259 		else
5260 			goto out;
5261 	}
5262 	em = alloc_extent_map();
5263 	if (!em) {
5264 		err = -ENOMEM;
5265 		goto out;
5266 	}
5267 	em->bdev = root->fs_info->fs_devices->latest_bdev;
5268 	em->start = EXTENT_MAP_HOLE;
5269 	em->orig_start = EXTENT_MAP_HOLE;
5270 	em->len = (u64)-1;
5271 	em->block_len = (u64)-1;
5272 
5273 	if (!path) {
5274 		path = btrfs_alloc_path();
5275 		if (!path) {
5276 			err = -ENOMEM;
5277 			goto out;
5278 		}
5279 		/*
5280 		 * Chances are we'll be called again, so go ahead and do
5281 		 * readahead
5282 		 */
5283 		path->reada = 1;
5284 	}
5285 
5286 	ret = btrfs_lookup_file_extent(trans, root, path,
5287 				       objectid, start, trans != NULL);
5288 	if (ret < 0) {
5289 		err = ret;
5290 		goto out;
5291 	}
5292 
5293 	if (ret != 0) {
5294 		if (path->slots[0] == 0)
5295 			goto not_found;
5296 		path->slots[0]--;
5297 	}
5298 
5299 	leaf = path->nodes[0];
5300 	item = btrfs_item_ptr(leaf, path->slots[0],
5301 			      struct btrfs_file_extent_item);
5302 	/* are we inside the extent that was found? */
5303 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5304 	found_type = btrfs_key_type(&found_key);
5305 	if (found_key.objectid != objectid ||
5306 	    found_type != BTRFS_EXTENT_DATA_KEY) {
5307 		goto not_found;
5308 	}
5309 
5310 	found_type = btrfs_file_extent_type(leaf, item);
5311 	extent_start = found_key.offset;
5312 	compress_type = btrfs_file_extent_compression(leaf, item);
5313 	if (found_type == BTRFS_FILE_EXTENT_REG ||
5314 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
5315 		extent_end = extent_start +
5316 		       btrfs_file_extent_num_bytes(leaf, item);
5317 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5318 		size_t size;
5319 		size = btrfs_file_extent_inline_len(leaf, item);
5320 		extent_end = (extent_start + size + root->sectorsize - 1) &
5321 			~((u64)root->sectorsize - 1);
5322 	}
5323 
5324 	if (start >= extent_end) {
5325 		path->slots[0]++;
5326 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
5327 			ret = btrfs_next_leaf(root, path);
5328 			if (ret < 0) {
5329 				err = ret;
5330 				goto out;
5331 			}
5332 			if (ret > 0)
5333 				goto not_found;
5334 			leaf = path->nodes[0];
5335 		}
5336 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5337 		if (found_key.objectid != objectid ||
5338 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
5339 			goto not_found;
5340 		if (start + len <= found_key.offset)
5341 			goto not_found;
5342 		em->start = start;
5343 		em->len = found_key.offset - start;
5344 		goto not_found_em;
5345 	}
5346 
5347 	if (found_type == BTRFS_FILE_EXTENT_REG ||
5348 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
5349 		em->start = extent_start;
5350 		em->len = extent_end - extent_start;
5351 		em->orig_start = extent_start -
5352 				 btrfs_file_extent_offset(leaf, item);
5353 		bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
5354 		if (bytenr == 0) {
5355 			em->block_start = EXTENT_MAP_HOLE;
5356 			goto insert;
5357 		}
5358 		if (compress_type != BTRFS_COMPRESS_NONE) {
5359 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5360 			em->compress_type = compress_type;
5361 			em->block_start = bytenr;
5362 			em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
5363 									 item);
5364 		} else {
5365 			bytenr += btrfs_file_extent_offset(leaf, item);
5366 			em->block_start = bytenr;
5367 			em->block_len = em->len;
5368 			if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
5369 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
5370 		}
5371 		goto insert;
5372 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5373 		unsigned long ptr;
5374 		char *map;
5375 		size_t size;
5376 		size_t extent_offset;
5377 		size_t copy_size;
5378 
5379 		em->block_start = EXTENT_MAP_INLINE;
5380 		if (!page || create) {
5381 			em->start = extent_start;
5382 			em->len = extent_end - extent_start;
5383 			goto out;
5384 		}
5385 
5386 		size = btrfs_file_extent_inline_len(leaf, item);
5387 		extent_offset = page_offset(page) + pg_offset - extent_start;
5388 		copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
5389 				size - extent_offset);
5390 		em->start = extent_start + extent_offset;
5391 		em->len = (copy_size + root->sectorsize - 1) &
5392 			~((u64)root->sectorsize - 1);
5393 		em->orig_start = EXTENT_MAP_INLINE;
5394 		if (compress_type) {
5395 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5396 			em->compress_type = compress_type;
5397 		}
5398 		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
5399 		if (create == 0 && !PageUptodate(page)) {
5400 			if (btrfs_file_extent_compression(leaf, item) !=
5401 			    BTRFS_COMPRESS_NONE) {
5402 				ret = uncompress_inline(path, inode, page,
5403 							pg_offset,
5404 							extent_offset, item);
5405 				BUG_ON(ret); /* -ENOMEM */
5406 			} else {
5407 				map = kmap(page);
5408 				read_extent_buffer(leaf, map + pg_offset, ptr,
5409 						   copy_size);
5410 				if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
5411 					memset(map + pg_offset + copy_size, 0,
5412 					       PAGE_CACHE_SIZE - pg_offset -
5413 					       copy_size);
5414 				}
5415 				kunmap(page);
5416 			}
5417 			flush_dcache_page(page);
5418 		} else if (create && PageUptodate(page)) {
5419 			BUG();
5420 			if (!trans) {
5421 				kunmap(page);
5422 				free_extent_map(em);
5423 				em = NULL;
5424 
5425 				btrfs_release_path(path);
5426 				trans = btrfs_join_transaction(root);
5427 
5428 				if (IS_ERR(trans))
5429 					return ERR_CAST(trans);
5430 				goto again;
5431 			}
5432 			map = kmap(page);
5433 			write_extent_buffer(leaf, map + pg_offset, ptr,
5434 					    copy_size);
5435 			kunmap(page);
5436 			btrfs_mark_buffer_dirty(leaf);
5437 		}
5438 		set_extent_uptodate(io_tree, em->start,
5439 				    extent_map_end(em) - 1, NULL, GFP_NOFS);
5440 		goto insert;
5441 	} else {
5442 		printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
5443 		WARN_ON(1);
5444 	}
5445 not_found:
5446 	em->start = start;
5447 	em->len = len;
5448 not_found_em:
5449 	em->block_start = EXTENT_MAP_HOLE;
5450 	set_bit(EXTENT_FLAG_VACANCY, &em->flags);
5451 insert:
5452 	btrfs_release_path(path);
5453 	if (em->start > start || extent_map_end(em) <= start) {
5454 		printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
5455 		       "[%llu %llu]\n", (unsigned long long)em->start,
5456 		       (unsigned long long)em->len,
5457 		       (unsigned long long)start,
5458 		       (unsigned long long)len);
5459 		err = -EIO;
5460 		goto out;
5461 	}
5462 
5463 	err = 0;
5464 	write_lock(&em_tree->lock);
5465 	ret = add_extent_mapping(em_tree, em);
5466 	/* it is possible that someone inserted the extent into the tree
5467 	 * while we had the lock dropped.  It is also possible that
5468 	 * an overlapping map exists in the tree
5469 	 */
5470 	if (ret == -EEXIST) {
5471 		struct extent_map *existing;
5472 
5473 		ret = 0;
5474 
5475 		existing = lookup_extent_mapping(em_tree, start, len);
5476 		if (existing && (existing->start > start ||
5477 		    existing->start + existing->len <= start)) {
5478 			free_extent_map(existing);
5479 			existing = NULL;
5480 		}
5481 		if (!existing) {
5482 			existing = lookup_extent_mapping(em_tree, em->start,
5483 							 em->len);
5484 			if (existing) {
5485 				err = merge_extent_mapping(em_tree, existing,
5486 							   em, start,
5487 							   root->sectorsize);
5488 				free_extent_map(existing);
5489 				if (err) {
5490 					free_extent_map(em);
5491 					em = NULL;
5492 				}
5493 			} else {
5494 				err = -EIO;
5495 				free_extent_map(em);
5496 				em = NULL;
5497 			}
5498 		} else {
5499 			free_extent_map(em);
5500 			em = existing;
5501 			err = 0;
5502 		}
5503 	}
5504 	write_unlock(&em_tree->lock);
5505 out:
5506 
5507 	if (em)
5508 		trace_btrfs_get_extent(root, em);
5509 
5510 	if (path)
5511 		btrfs_free_path(path);
5512 	if (trans) {
5513 		ret = btrfs_end_transaction(trans, root);
5514 		if (!err)
5515 			err = ret;
5516 	}
5517 	if (err) {
5518 		free_extent_map(em);
5519 		return ERR_PTR(err);
5520 	}
5521 	BUG_ON(!em); /* Error is always set */
5522 	return em;
5523 }
5524 
5525 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
5526 					   size_t pg_offset, u64 start, u64 len,
5527 					   int create)
5528 {
5529 	struct extent_map *em;
5530 	struct extent_map *hole_em = NULL;
5531 	u64 range_start = start;
5532 	u64 end;
5533 	u64 found;
5534 	u64 found_end;
5535 	int err = 0;
5536 
5537 	em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
5538 	if (IS_ERR(em))
5539 		return em;
5540 	if (em) {
5541 		/*
5542 		 * if our em maps to a hole, there might
5543 		 * actually be delalloc bytes behind it
5544 		 */
5545 		if (em->block_start != EXTENT_MAP_HOLE)
5546 			return em;
5547 		else
5548 			hole_em = em;
5549 	}
5550 
5551 	/* check to see if we've wrapped (len == -1 or similar) */
5552 	end = start + len;
5553 	if (end < start)
5554 		end = (u64)-1;
5555 	else
5556 		end -= 1;
5557 
5558 	em = NULL;
5559 
5560 	/* ok, we didn't find anything, lets look for delalloc */
5561 	found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
5562 				 end, len, EXTENT_DELALLOC, 1);
5563 	found_end = range_start + found;
5564 	if (found_end < range_start)
5565 		found_end = (u64)-1;
5566 
5567 	/*
5568 	 * we didn't find anything useful, return
5569 	 * the original results from get_extent()
5570 	 */
5571 	if (range_start > end || found_end <= start) {
5572 		em = hole_em;
5573 		hole_em = NULL;
5574 		goto out;
5575 	}
5576 
5577 	/* adjust the range_start to make sure it doesn't
5578 	 * go backwards from the start they passed in
5579 	 */
5580 	range_start = max(start,range_start);
5581 	found = found_end - range_start;
5582 
5583 	if (found > 0) {
5584 		u64 hole_start = start;
5585 		u64 hole_len = len;
5586 
5587 		em = alloc_extent_map();
5588 		if (!em) {
5589 			err = -ENOMEM;
5590 			goto out;
5591 		}
5592 		/*
5593 		 * when btrfs_get_extent can't find anything it
5594 		 * returns one huge hole
5595 		 *
5596 		 * make sure what it found really fits our range, and
5597 		 * adjust to make sure it is based on the start from
5598 		 * the caller
5599 		 */
5600 		if (hole_em) {
5601 			u64 calc_end = extent_map_end(hole_em);
5602 
5603 			if (calc_end <= start || (hole_em->start > end)) {
5604 				free_extent_map(hole_em);
5605 				hole_em = NULL;
5606 			} else {
5607 				hole_start = max(hole_em->start, start);
5608 				hole_len = calc_end - hole_start;
5609 			}
5610 		}
5611 		em->bdev = NULL;
5612 		if (hole_em && range_start > hole_start) {
5613 			/* our hole starts before our delalloc, so we
5614 			 * have to return just the parts of the hole
5615 			 * that go until  the delalloc starts
5616 			 */
5617 			em->len = min(hole_len,
5618 				      range_start - hole_start);
5619 			em->start = hole_start;
5620 			em->orig_start = hole_start;
5621 			/*
5622 			 * don't adjust block start at all,
5623 			 * it is fixed at EXTENT_MAP_HOLE
5624 			 */
5625 			em->block_start = hole_em->block_start;
5626 			em->block_len = hole_len;
5627 		} else {
5628 			em->start = range_start;
5629 			em->len = found;
5630 			em->orig_start = range_start;
5631 			em->block_start = EXTENT_MAP_DELALLOC;
5632 			em->block_len = found;
5633 		}
5634 	} else if (hole_em) {
5635 		return hole_em;
5636 	}
5637 out:
5638 
5639 	free_extent_map(hole_em);
5640 	if (err) {
5641 		free_extent_map(em);
5642 		return ERR_PTR(err);
5643 	}
5644 	return em;
5645 }
5646 
5647 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5648 						  struct extent_map *em,
5649 						  u64 start, u64 len)
5650 {
5651 	struct btrfs_root *root = BTRFS_I(inode)->root;
5652 	struct btrfs_trans_handle *trans;
5653 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5654 	struct btrfs_key ins;
5655 	u64 alloc_hint;
5656 	int ret;
5657 	bool insert = false;
5658 
5659 	/*
5660 	 * Ok if the extent map we looked up is a hole and is for the exact
5661 	 * range we want, there is no reason to allocate a new one, however if
5662 	 * it is not right then we need to free this one and drop the cache for
5663 	 * our range.
5664 	 */
5665 	if (em->block_start != EXTENT_MAP_HOLE || em->start != start ||
5666 	    em->len != len) {
5667 		free_extent_map(em);
5668 		em = NULL;
5669 		insert = true;
5670 		btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
5671 	}
5672 
5673 	trans = btrfs_join_transaction(root);
5674 	if (IS_ERR(trans))
5675 		return ERR_CAST(trans);
5676 
5677 	if (start <= BTRFS_I(inode)->disk_i_size && len < 64 * 1024)
5678 		btrfs_add_inode_defrag(trans, inode);
5679 
5680 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
5681 
5682 	alloc_hint = get_extent_allocation_hint(inode, start, len);
5683 	ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
5684 				   alloc_hint, &ins, 1);
5685 	if (ret) {
5686 		em = ERR_PTR(ret);
5687 		goto out;
5688 	}
5689 
5690 	if (!em) {
5691 		em = alloc_extent_map();
5692 		if (!em) {
5693 			em = ERR_PTR(-ENOMEM);
5694 			goto out;
5695 		}
5696 	}
5697 
5698 	em->start = start;
5699 	em->orig_start = em->start;
5700 	em->len = ins.offset;
5701 
5702 	em->block_start = ins.objectid;
5703 	em->block_len = ins.offset;
5704 	em->bdev = root->fs_info->fs_devices->latest_bdev;
5705 
5706 	/*
5707 	 * We need to do this because if we're using the original em we searched
5708 	 * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that.
5709 	 */
5710 	em->flags = 0;
5711 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
5712 
5713 	while (insert) {
5714 		write_lock(&em_tree->lock);
5715 		ret = add_extent_mapping(em_tree, em);
5716 		write_unlock(&em_tree->lock);
5717 		if (ret != -EEXIST)
5718 			break;
5719 		btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0);
5720 	}
5721 
5722 	ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
5723 					   ins.offset, ins.offset, 0);
5724 	if (ret) {
5725 		btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
5726 		em = ERR_PTR(ret);
5727 	}
5728 out:
5729 	btrfs_end_transaction(trans, root);
5730 	return em;
5731 }
5732 
5733 /*
5734  * returns 1 when the nocow is safe, < 1 on error, 0 if the
5735  * block must be cow'd
5736  */
5737 static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
5738 				      struct inode *inode, u64 offset, u64 len)
5739 {
5740 	struct btrfs_path *path;
5741 	int ret;
5742 	struct extent_buffer *leaf;
5743 	struct btrfs_root *root = BTRFS_I(inode)->root;
5744 	struct btrfs_file_extent_item *fi;
5745 	struct btrfs_key key;
5746 	u64 disk_bytenr;
5747 	u64 backref_offset;
5748 	u64 extent_end;
5749 	u64 num_bytes;
5750 	int slot;
5751 	int found_type;
5752 
5753 	path = btrfs_alloc_path();
5754 	if (!path)
5755 		return -ENOMEM;
5756 
5757 	ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
5758 				       offset, 0);
5759 	if (ret < 0)
5760 		goto out;
5761 
5762 	slot = path->slots[0];
5763 	if (ret == 1) {
5764 		if (slot == 0) {
5765 			/* can't find the item, must cow */
5766 			ret = 0;
5767 			goto out;
5768 		}
5769 		slot--;
5770 	}
5771 	ret = 0;
5772 	leaf = path->nodes[0];
5773 	btrfs_item_key_to_cpu(leaf, &key, slot);
5774 	if (key.objectid != btrfs_ino(inode) ||
5775 	    key.type != BTRFS_EXTENT_DATA_KEY) {
5776 		/* not our file or wrong item type, must cow */
5777 		goto out;
5778 	}
5779 
5780 	if (key.offset > offset) {
5781 		/* Wrong offset, must cow */
5782 		goto out;
5783 	}
5784 
5785 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5786 	found_type = btrfs_file_extent_type(leaf, fi);
5787 	if (found_type != BTRFS_FILE_EXTENT_REG &&
5788 	    found_type != BTRFS_FILE_EXTENT_PREALLOC) {
5789 		/* not a regular extent, must cow */
5790 		goto out;
5791 	}
5792 	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
5793 	backref_offset = btrfs_file_extent_offset(leaf, fi);
5794 
5795 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
5796 	if (extent_end < offset + len) {
5797 		/* extent doesn't include our full range, must cow */
5798 		goto out;
5799 	}
5800 
5801 	if (btrfs_extent_readonly(root, disk_bytenr))
5802 		goto out;
5803 
5804 	/*
5805 	 * look for other files referencing this extent, if we
5806 	 * find any we must cow
5807 	 */
5808 	if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
5809 				  key.offset - backref_offset, disk_bytenr))
5810 		goto out;
5811 
5812 	/*
5813 	 * adjust disk_bytenr and num_bytes to cover just the bytes
5814 	 * in this extent we are about to write.  If there
5815 	 * are any csums in that range we have to cow in order
5816 	 * to keep the csums correct
5817 	 */
5818 	disk_bytenr += backref_offset;
5819 	disk_bytenr += offset - key.offset;
5820 	num_bytes = min(offset + len, extent_end) - offset;
5821 	if (csum_exist_in_range(root, disk_bytenr, num_bytes))
5822 				goto out;
5823 	/*
5824 	 * all of the above have passed, it is safe to overwrite this extent
5825 	 * without cow
5826 	 */
5827 	ret = 1;
5828 out:
5829 	btrfs_free_path(path);
5830 	return ret;
5831 }
5832 
5833 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
5834 			      struct extent_state **cached_state, int writing)
5835 {
5836 	struct btrfs_ordered_extent *ordered;
5837 	int ret = 0;
5838 
5839 	while (1) {
5840 		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
5841 				 0, cached_state);
5842 		/*
5843 		 * We're concerned with the entire range that we're going to be
5844 		 * doing DIO to, so we need to make sure theres no ordered
5845 		 * extents in this range.
5846 		 */
5847 		ordered = btrfs_lookup_ordered_range(inode, lockstart,
5848 						     lockend - lockstart + 1);
5849 
5850 		/*
5851 		 * We need to make sure there are no buffered pages in this
5852 		 * range either, we could have raced between the invalidate in
5853 		 * generic_file_direct_write and locking the extent.  The
5854 		 * invalidate needs to happen so that reads after a write do not
5855 		 * get stale data.
5856 		 */
5857 		if (!ordered && (!writing ||
5858 		    !test_range_bit(&BTRFS_I(inode)->io_tree,
5859 				    lockstart, lockend, EXTENT_UPTODATE, 0,
5860 				    *cached_state)))
5861 			break;
5862 
5863 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
5864 				     cached_state, GFP_NOFS);
5865 
5866 		if (ordered) {
5867 			btrfs_start_ordered_extent(inode, ordered, 1);
5868 			btrfs_put_ordered_extent(ordered);
5869 		} else {
5870 			/* Screw you mmap */
5871 			ret = filemap_write_and_wait_range(inode->i_mapping,
5872 							   lockstart,
5873 							   lockend);
5874 			if (ret)
5875 				break;
5876 
5877 			/*
5878 			 * If we found a page that couldn't be invalidated just
5879 			 * fall back to buffered.
5880 			 */
5881 			ret = invalidate_inode_pages2_range(inode->i_mapping,
5882 					lockstart >> PAGE_CACHE_SHIFT,
5883 					lockend >> PAGE_CACHE_SHIFT);
5884 			if (ret)
5885 				break;
5886 		}
5887 
5888 		cond_resched();
5889 	}
5890 
5891 	return ret;
5892 }
5893 
5894 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
5895 					   u64 len, u64 orig_start,
5896 					   u64 block_start, u64 block_len,
5897 					   int type)
5898 {
5899 	struct extent_map_tree *em_tree;
5900 	struct extent_map *em;
5901 	struct btrfs_root *root = BTRFS_I(inode)->root;
5902 	int ret;
5903 
5904 	em_tree = &BTRFS_I(inode)->extent_tree;
5905 	em = alloc_extent_map();
5906 	if (!em)
5907 		return ERR_PTR(-ENOMEM);
5908 
5909 	em->start = start;
5910 	em->orig_start = orig_start;
5911 	em->len = len;
5912 	em->block_len = block_len;
5913 	em->block_start = block_start;
5914 	em->bdev = root->fs_info->fs_devices->latest_bdev;
5915 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
5916 	if (type == BTRFS_ORDERED_PREALLOC)
5917 		set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
5918 
5919 	do {
5920 		btrfs_drop_extent_cache(inode, em->start,
5921 				em->start + em->len - 1, 0);
5922 		write_lock(&em_tree->lock);
5923 		ret = add_extent_mapping(em_tree, em);
5924 		write_unlock(&em_tree->lock);
5925 	} while (ret == -EEXIST);
5926 
5927 	if (ret) {
5928 		free_extent_map(em);
5929 		return ERR_PTR(ret);
5930 	}
5931 
5932 	return em;
5933 }
5934 
5935 
5936 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5937 				   struct buffer_head *bh_result, int create)
5938 {
5939 	struct extent_map *em;
5940 	struct btrfs_root *root = BTRFS_I(inode)->root;
5941 	struct extent_state *cached_state = NULL;
5942 	u64 start = iblock << inode->i_blkbits;
5943 	u64 lockstart, lockend;
5944 	u64 len = bh_result->b_size;
5945 	struct btrfs_trans_handle *trans;
5946 	int unlock_bits = EXTENT_LOCKED;
5947 	int ret;
5948 
5949 	if (create) {
5950 		ret = btrfs_delalloc_reserve_space(inode, len);
5951 		if (ret)
5952 			return ret;
5953 		unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY;
5954 	} else {
5955 		len = min_t(u64, len, root->sectorsize);
5956 	}
5957 
5958 	lockstart = start;
5959 	lockend = start + len - 1;
5960 
5961 	/*
5962 	 * If this errors out it's because we couldn't invalidate pagecache for
5963 	 * this range and we need to fallback to buffered.
5964 	 */
5965 	if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
5966 		return -ENOTBLK;
5967 
5968 	if (create) {
5969 		ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
5970 				     lockend, EXTENT_DELALLOC, NULL,
5971 				     &cached_state, GFP_NOFS);
5972 		if (ret)
5973 			goto unlock_err;
5974 	}
5975 
5976 	em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
5977 	if (IS_ERR(em)) {
5978 		ret = PTR_ERR(em);
5979 		goto unlock_err;
5980 	}
5981 
5982 	/*
5983 	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
5984 	 * io.  INLINE is special, and we could probably kludge it in here, but
5985 	 * it's still buffered so for safety lets just fall back to the generic
5986 	 * buffered path.
5987 	 *
5988 	 * For COMPRESSED we _have_ to read the entire extent in so we can
5989 	 * decompress it, so there will be buffering required no matter what we
5990 	 * do, so go ahead and fallback to buffered.
5991 	 *
5992 	 * We return -ENOTBLK because thats what makes DIO go ahead and go back
5993 	 * to buffered IO.  Don't blame me, this is the price we pay for using
5994 	 * the generic code.
5995 	 */
5996 	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
5997 	    em->block_start == EXTENT_MAP_INLINE) {
5998 		free_extent_map(em);
5999 		ret = -ENOTBLK;
6000 		goto unlock_err;
6001 	}
6002 
6003 	/* Just a good old fashioned hole, return */
6004 	if (!create && (em->block_start == EXTENT_MAP_HOLE ||
6005 			test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
6006 		free_extent_map(em);
6007 		ret = 0;
6008 		goto unlock_err;
6009 	}
6010 
6011 	/*
6012 	 * We don't allocate a new extent in the following cases
6013 	 *
6014 	 * 1) The inode is marked as NODATACOW.  In this case we'll just use the
6015 	 * existing extent.
6016 	 * 2) The extent is marked as PREALLOC.  We're good to go here and can
6017 	 * just use the extent.
6018 	 *
6019 	 */
6020 	if (!create) {
6021 		len = min(len, em->len - (start - em->start));
6022 		lockstart = start + len;
6023 		goto unlock;
6024 	}
6025 
6026 	if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
6027 	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
6028 	     em->block_start != EXTENT_MAP_HOLE)) {
6029 		int type;
6030 		int ret;
6031 		u64 block_start;
6032 
6033 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6034 			type = BTRFS_ORDERED_PREALLOC;
6035 		else
6036 			type = BTRFS_ORDERED_NOCOW;
6037 		len = min(len, em->len - (start - em->start));
6038 		block_start = em->block_start + (start - em->start);
6039 
6040 		/*
6041 		 * we're not going to log anything, but we do need
6042 		 * to make sure the current transaction stays open
6043 		 * while we look for nocow cross refs
6044 		 */
6045 		trans = btrfs_join_transaction(root);
6046 		if (IS_ERR(trans))
6047 			goto must_cow;
6048 
6049 		if (can_nocow_odirect(trans, inode, start, len) == 1) {
6050 			u64 orig_start = em->start;
6051 
6052 			if (type == BTRFS_ORDERED_PREALLOC) {
6053 				free_extent_map(em);
6054 				em = create_pinned_em(inode, start, len,
6055 						       orig_start,
6056 						       block_start, len, type);
6057 				if (IS_ERR(em)) {
6058 					btrfs_end_transaction(trans, root);
6059 					goto unlock_err;
6060 				}
6061 			}
6062 
6063 			ret = btrfs_add_ordered_extent_dio(inode, start,
6064 					   block_start, len, len, type);
6065 			btrfs_end_transaction(trans, root);
6066 			if (ret) {
6067 				free_extent_map(em);
6068 				goto unlock_err;
6069 			}
6070 			goto unlock;
6071 		}
6072 		btrfs_end_transaction(trans, root);
6073 	}
6074 must_cow:
6075 	/*
6076 	 * this will cow the extent, reset the len in case we changed
6077 	 * it above
6078 	 */
6079 	len = bh_result->b_size;
6080 	em = btrfs_new_extent_direct(inode, em, start, len);
6081 	if (IS_ERR(em)) {
6082 		ret = PTR_ERR(em);
6083 		goto unlock_err;
6084 	}
6085 	len = min(len, em->len - (start - em->start));
6086 unlock:
6087 	bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
6088 		inode->i_blkbits;
6089 	bh_result->b_size = len;
6090 	bh_result->b_bdev = em->bdev;
6091 	set_buffer_mapped(bh_result);
6092 	if (create) {
6093 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6094 			set_buffer_new(bh_result);
6095 
6096 		/*
6097 		 * Need to update the i_size under the extent lock so buffered
6098 		 * readers will get the updated i_size when we unlock.
6099 		 */
6100 		if (start + len > i_size_read(inode))
6101 			i_size_write(inode, start + len);
6102 	}
6103 
6104 	/*
6105 	 * In the case of write we need to clear and unlock the entire range,
6106 	 * in the case of read we need to unlock only the end area that we
6107 	 * aren't using if there is any left over space.
6108 	 */
6109 	if (lockstart < lockend) {
6110 		if (create && len < lockend - lockstart) {
6111 			clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6112 					 lockstart + len - 1,
6113 					 unlock_bits | EXTENT_DEFRAG, 1, 0,
6114 					 &cached_state, GFP_NOFS);
6115 			/*
6116 			 * Beside unlock, we also need to cleanup reserved space
6117 			 * for the left range by attaching EXTENT_DO_ACCOUNTING.
6118 			 */
6119 			clear_extent_bit(&BTRFS_I(inode)->io_tree,
6120 					 lockstart + len, lockend,
6121 					 unlock_bits | EXTENT_DO_ACCOUNTING |
6122 					 EXTENT_DEFRAG, 1, 0, NULL, GFP_NOFS);
6123 		} else {
6124 			clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6125 					 lockend, unlock_bits, 1, 0,
6126 					 &cached_state, GFP_NOFS);
6127 		}
6128 	} else {
6129 		free_extent_state(cached_state);
6130 	}
6131 
6132 	free_extent_map(em);
6133 
6134 	return 0;
6135 
6136 unlock_err:
6137 	if (create)
6138 		unlock_bits |= EXTENT_DO_ACCOUNTING;
6139 
6140 	clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6141 			 unlock_bits, 1, 0, &cached_state, GFP_NOFS);
6142 	return ret;
6143 }
6144 
6145 struct btrfs_dio_private {
6146 	struct inode *inode;
6147 	u64 logical_offset;
6148 	u64 disk_bytenr;
6149 	u64 bytes;
6150 	void *private;
6151 
6152 	/* number of bios pending for this dio */
6153 	atomic_t pending_bios;
6154 
6155 	/* IO errors */
6156 	int errors;
6157 
6158 	struct bio *orig_bio;
6159 };
6160 
6161 static void btrfs_endio_direct_read(struct bio *bio, int err)
6162 {
6163 	struct btrfs_dio_private *dip = bio->bi_private;
6164 	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
6165 	struct bio_vec *bvec = bio->bi_io_vec;
6166 	struct inode *inode = dip->inode;
6167 	struct btrfs_root *root = BTRFS_I(inode)->root;
6168 	u64 start;
6169 
6170 	start = dip->logical_offset;
6171 	do {
6172 		if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
6173 			struct page *page = bvec->bv_page;
6174 			char *kaddr;
6175 			u32 csum = ~(u32)0;
6176 			u64 private = ~(u32)0;
6177 			unsigned long flags;
6178 
6179 			if (get_state_private(&BTRFS_I(inode)->io_tree,
6180 					      start, &private))
6181 				goto failed;
6182 			local_irq_save(flags);
6183 			kaddr = kmap_atomic(page);
6184 			csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
6185 					       csum, bvec->bv_len);
6186 			btrfs_csum_final(csum, (char *)&csum);
6187 			kunmap_atomic(kaddr);
6188 			local_irq_restore(flags);
6189 
6190 			flush_dcache_page(bvec->bv_page);
6191 			if (csum != private) {
6192 failed:
6193 				printk(KERN_ERR "btrfs csum failed ino %llu off"
6194 				      " %llu csum %u private %u\n",
6195 				      (unsigned long long)btrfs_ino(inode),
6196 				      (unsigned long long)start,
6197 				      csum, (unsigned)private);
6198 				err = -EIO;
6199 			}
6200 		}
6201 
6202 		start += bvec->bv_len;
6203 		bvec++;
6204 	} while (bvec <= bvec_end);
6205 
6206 	unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
6207 		      dip->logical_offset + dip->bytes - 1);
6208 	bio->bi_private = dip->private;
6209 
6210 	kfree(dip);
6211 
6212 	/* If we had a csum failure make sure to clear the uptodate flag */
6213 	if (err)
6214 		clear_bit(BIO_UPTODATE, &bio->bi_flags);
6215 	dio_end_io(bio, err);
6216 }
6217 
6218 static void btrfs_endio_direct_write(struct bio *bio, int err)
6219 {
6220 	struct btrfs_dio_private *dip = bio->bi_private;
6221 	struct inode *inode = dip->inode;
6222 	struct btrfs_root *root = BTRFS_I(inode)->root;
6223 	struct btrfs_ordered_extent *ordered = NULL;
6224 	u64 ordered_offset = dip->logical_offset;
6225 	u64 ordered_bytes = dip->bytes;
6226 	int ret;
6227 
6228 	if (err)
6229 		goto out_done;
6230 again:
6231 	ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
6232 						   &ordered_offset,
6233 						   ordered_bytes, !err);
6234 	if (!ret)
6235 		goto out_test;
6236 
6237 	ordered->work.func = finish_ordered_fn;
6238 	ordered->work.flags = 0;
6239 	btrfs_queue_worker(&root->fs_info->endio_write_workers,
6240 			   &ordered->work);
6241 out_test:
6242 	/*
6243 	 * our bio might span multiple ordered extents.  If we haven't
6244 	 * completed the accounting for the whole dio, go back and try again
6245 	 */
6246 	if (ordered_offset < dip->logical_offset + dip->bytes) {
6247 		ordered_bytes = dip->logical_offset + dip->bytes -
6248 			ordered_offset;
6249 		ordered = NULL;
6250 		goto again;
6251 	}
6252 out_done:
6253 	bio->bi_private = dip->private;
6254 
6255 	kfree(dip);
6256 
6257 	/* If we had an error make sure to clear the uptodate flag */
6258 	if (err)
6259 		clear_bit(BIO_UPTODATE, &bio->bi_flags);
6260 	dio_end_io(bio, err);
6261 }
6262 
6263 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
6264 				    struct bio *bio, int mirror_num,
6265 				    unsigned long bio_flags, u64 offset)
6266 {
6267 	int ret;
6268 	struct btrfs_root *root = BTRFS_I(inode)->root;
6269 	ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
6270 	BUG_ON(ret); /* -ENOMEM */
6271 	return 0;
6272 }
6273 
6274 static void btrfs_end_dio_bio(struct bio *bio, int err)
6275 {
6276 	struct btrfs_dio_private *dip = bio->bi_private;
6277 
6278 	if (err) {
6279 		printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
6280 		      "sector %#Lx len %u err no %d\n",
6281 		      (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw,
6282 		      (unsigned long long)bio->bi_sector, bio->bi_size, err);
6283 		dip->errors = 1;
6284 
6285 		/*
6286 		 * before atomic variable goto zero, we must make sure
6287 		 * dip->errors is perceived to be set.
6288 		 */
6289 		smp_mb__before_atomic_dec();
6290 	}
6291 
6292 	/* if there are more bios still pending for this dio, just exit */
6293 	if (!atomic_dec_and_test(&dip->pending_bios))
6294 		goto out;
6295 
6296 	if (dip->errors)
6297 		bio_io_error(dip->orig_bio);
6298 	else {
6299 		set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
6300 		bio_endio(dip->orig_bio, 0);
6301 	}
6302 out:
6303 	bio_put(bio);
6304 }
6305 
6306 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
6307 				       u64 first_sector, gfp_t gfp_flags)
6308 {
6309 	int nr_vecs = bio_get_nr_vecs(bdev);
6310 	return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
6311 }
6312 
6313 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
6314 					 int rw, u64 file_offset, int skip_sum,
6315 					 int async_submit)
6316 {
6317 	int write = rw & REQ_WRITE;
6318 	struct btrfs_root *root = BTRFS_I(inode)->root;
6319 	int ret;
6320 
6321 	bio_get(bio);
6322 
6323 	if (!write) {
6324 		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
6325 		if (ret)
6326 			goto err;
6327 	}
6328 
6329 	if (skip_sum)
6330 		goto map;
6331 
6332 	if (write && async_submit) {
6333 		ret = btrfs_wq_submit_bio(root->fs_info,
6334 				   inode, rw, bio, 0, 0,
6335 				   file_offset,
6336 				   __btrfs_submit_bio_start_direct_io,
6337 				   __btrfs_submit_bio_done);
6338 		goto err;
6339 	} else if (write) {
6340 		/*
6341 		 * If we aren't doing async submit, calculate the csum of the
6342 		 * bio now.
6343 		 */
6344 		ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
6345 		if (ret)
6346 			goto err;
6347 	} else if (!skip_sum) {
6348 		ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset);
6349 		if (ret)
6350 			goto err;
6351 	}
6352 
6353 map:
6354 	ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
6355 err:
6356 	bio_put(bio);
6357 	return ret;
6358 }
6359 
6360 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
6361 				    int skip_sum)
6362 {
6363 	struct inode *inode = dip->inode;
6364 	struct btrfs_root *root = BTRFS_I(inode)->root;
6365 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
6366 	struct bio *bio;
6367 	struct bio *orig_bio = dip->orig_bio;
6368 	struct bio_vec *bvec = orig_bio->bi_io_vec;
6369 	u64 start_sector = orig_bio->bi_sector;
6370 	u64 file_offset = dip->logical_offset;
6371 	u64 submit_len = 0;
6372 	u64 map_length;
6373 	int nr_pages = 0;
6374 	int ret = 0;
6375 	int async_submit = 0;
6376 
6377 	map_length = orig_bio->bi_size;
6378 	ret = btrfs_map_block(map_tree, READ, start_sector << 9,
6379 			      &map_length, NULL, 0);
6380 	if (ret) {
6381 		bio_put(orig_bio);
6382 		return -EIO;
6383 	}
6384 
6385 	if (map_length >= orig_bio->bi_size) {
6386 		bio = orig_bio;
6387 		goto submit;
6388 	}
6389 
6390 	async_submit = 1;
6391 	bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
6392 	if (!bio)
6393 		return -ENOMEM;
6394 	bio->bi_private = dip;
6395 	bio->bi_end_io = btrfs_end_dio_bio;
6396 	atomic_inc(&dip->pending_bios);
6397 
6398 	while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
6399 		if (unlikely(map_length < submit_len + bvec->bv_len ||
6400 		    bio_add_page(bio, bvec->bv_page, bvec->bv_len,
6401 				 bvec->bv_offset) < bvec->bv_len)) {
6402 			/*
6403 			 * inc the count before we submit the bio so
6404 			 * we know the end IO handler won't happen before
6405 			 * we inc the count. Otherwise, the dip might get freed
6406 			 * before we're done setting it up
6407 			 */
6408 			atomic_inc(&dip->pending_bios);
6409 			ret = __btrfs_submit_dio_bio(bio, inode, rw,
6410 						     file_offset, skip_sum,
6411 						     async_submit);
6412 			if (ret) {
6413 				bio_put(bio);
6414 				atomic_dec(&dip->pending_bios);
6415 				goto out_err;
6416 			}
6417 
6418 			start_sector += submit_len >> 9;
6419 			file_offset += submit_len;
6420 
6421 			submit_len = 0;
6422 			nr_pages = 0;
6423 
6424 			bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
6425 						  start_sector, GFP_NOFS);
6426 			if (!bio)
6427 				goto out_err;
6428 			bio->bi_private = dip;
6429 			bio->bi_end_io = btrfs_end_dio_bio;
6430 
6431 			map_length = orig_bio->bi_size;
6432 			ret = btrfs_map_block(map_tree, READ, start_sector << 9,
6433 					      &map_length, NULL, 0);
6434 			if (ret) {
6435 				bio_put(bio);
6436 				goto out_err;
6437 			}
6438 		} else {
6439 			submit_len += bvec->bv_len;
6440 			nr_pages ++;
6441 			bvec++;
6442 		}
6443 	}
6444 
6445 submit:
6446 	ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
6447 				     async_submit);
6448 	if (!ret)
6449 		return 0;
6450 
6451 	bio_put(bio);
6452 out_err:
6453 	dip->errors = 1;
6454 	/*
6455 	 * before atomic variable goto zero, we must
6456 	 * make sure dip->errors is perceived to be set.
6457 	 */
6458 	smp_mb__before_atomic_dec();
6459 	if (atomic_dec_and_test(&dip->pending_bios))
6460 		bio_io_error(dip->orig_bio);
6461 
6462 	/* bio_end_io() will handle error, so we needn't return it */
6463 	return 0;
6464 }
6465 
6466 static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
6467 				loff_t file_offset)
6468 {
6469 	struct btrfs_root *root = BTRFS_I(inode)->root;
6470 	struct btrfs_dio_private *dip;
6471 	struct bio_vec *bvec = bio->bi_io_vec;
6472 	int skip_sum;
6473 	int write = rw & REQ_WRITE;
6474 	int ret = 0;
6475 
6476 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
6477 
6478 	dip = kmalloc(sizeof(*dip), GFP_NOFS);
6479 	if (!dip) {
6480 		ret = -ENOMEM;
6481 		goto free_ordered;
6482 	}
6483 
6484 	dip->private = bio->bi_private;
6485 	dip->inode = inode;
6486 	dip->logical_offset = file_offset;
6487 
6488 	dip->bytes = 0;
6489 	do {
6490 		dip->bytes += bvec->bv_len;
6491 		bvec++;
6492 	} while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));
6493 
6494 	dip->disk_bytenr = (u64)bio->bi_sector << 9;
6495 	bio->bi_private = dip;
6496 	dip->errors = 0;
6497 	dip->orig_bio = bio;
6498 	atomic_set(&dip->pending_bios, 0);
6499 
6500 	if (write)
6501 		bio->bi_end_io = btrfs_endio_direct_write;
6502 	else
6503 		bio->bi_end_io = btrfs_endio_direct_read;
6504 
6505 	ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
6506 	if (!ret)
6507 		return;
6508 free_ordered:
6509 	/*
6510 	 * If this is a write, we need to clean up the reserved space and kill
6511 	 * the ordered extent.
6512 	 */
6513 	if (write) {
6514 		struct btrfs_ordered_extent *ordered;
6515 		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
6516 		if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
6517 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
6518 			btrfs_free_reserved_extent(root, ordered->start,
6519 						   ordered->disk_len);
6520 		btrfs_put_ordered_extent(ordered);
6521 		btrfs_put_ordered_extent(ordered);
6522 	}
6523 	bio_endio(bio, ret);
6524 }
6525 
6526 static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
6527 			const struct iovec *iov, loff_t offset,
6528 			unsigned long nr_segs)
6529 {
6530 	int seg;
6531 	int i;
6532 	size_t size;
6533 	unsigned long addr;
6534 	unsigned blocksize_mask = root->sectorsize - 1;
6535 	ssize_t retval = -EINVAL;
6536 	loff_t end = offset;
6537 
6538 	if (offset & blocksize_mask)
6539 		goto out;
6540 
6541 	/* Check the memory alignment.  Blocks cannot straddle pages */
6542 	for (seg = 0; seg < nr_segs; seg++) {
6543 		addr = (unsigned long)iov[seg].iov_base;
6544 		size = iov[seg].iov_len;
6545 		end += size;
6546 		if ((addr & blocksize_mask) || (size & blocksize_mask))
6547 			goto out;
6548 
6549 		/* If this is a write we don't need to check anymore */
6550 		if (rw & WRITE)
6551 			continue;
6552 
6553 		/*
6554 		 * Check to make sure we don't have duplicate iov_base's in this
6555 		 * iovec, if so return EINVAL, otherwise we'll get csum errors
6556 		 * when reading back.
6557 		 */
6558 		for (i = seg + 1; i < nr_segs; i++) {
6559 			if (iov[seg].iov_base == iov[i].iov_base)
6560 				goto out;
6561 		}
6562 	}
6563 	retval = 0;
6564 out:
6565 	return retval;
6566 }
6567 
6568 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
6569 			const struct iovec *iov, loff_t offset,
6570 			unsigned long nr_segs)
6571 {
6572 	struct file *file = iocb->ki_filp;
6573 	struct inode *inode = file->f_mapping->host;
6574 
6575 	if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
6576 			    offset, nr_segs))
6577 		return 0;
6578 
6579 	return __blockdev_direct_IO(rw, iocb, inode,
6580 		   BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
6581 		   iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
6582 		   btrfs_submit_direct, 0);
6583 }
6584 
6585 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
6586 		__u64 start, __u64 len)
6587 {
6588 	return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
6589 }
6590 
6591 int btrfs_readpage(struct file *file, struct page *page)
6592 {
6593 	struct extent_io_tree *tree;
6594 	tree = &BTRFS_I(page->mapping->host)->io_tree;
6595 	return extent_read_full_page(tree, page, btrfs_get_extent, 0);
6596 }
6597 
6598 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
6599 {
6600 	struct extent_io_tree *tree;
6601 
6602 
6603 	if (current->flags & PF_MEMALLOC) {
6604 		redirty_page_for_writepage(wbc, page);
6605 		unlock_page(page);
6606 		return 0;
6607 	}
6608 	tree = &BTRFS_I(page->mapping->host)->io_tree;
6609 	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
6610 }
6611 
6612 int btrfs_writepages(struct address_space *mapping,
6613 		     struct writeback_control *wbc)
6614 {
6615 	struct extent_io_tree *tree;
6616 
6617 	tree = &BTRFS_I(mapping->host)->io_tree;
6618 	return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
6619 }
6620 
6621 static int
6622 btrfs_readpages(struct file *file, struct address_space *mapping,
6623 		struct list_head *pages, unsigned nr_pages)
6624 {
6625 	struct extent_io_tree *tree;
6626 	tree = &BTRFS_I(mapping->host)->io_tree;
6627 	return extent_readpages(tree, mapping, pages, nr_pages,
6628 				btrfs_get_extent);
6629 }
6630 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
6631 {
6632 	struct extent_io_tree *tree;
6633 	struct extent_map_tree *map;
6634 	int ret;
6635 
6636 	tree = &BTRFS_I(page->mapping->host)->io_tree;
6637 	map = &BTRFS_I(page->mapping->host)->extent_tree;
6638 	ret = try_release_extent_mapping(map, tree, page, gfp_flags);
6639 	if (ret == 1) {
6640 		ClearPagePrivate(page);
6641 		set_page_private(page, 0);
6642 		page_cache_release(page);
6643 	}
6644 	return ret;
6645 }
6646 
6647 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
6648 {
6649 	if (PageWriteback(page) || PageDirty(page))
6650 		return 0;
6651 	return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
6652 }
6653 
6654 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
6655 {
6656 	struct inode *inode = page->mapping->host;
6657 	struct extent_io_tree *tree;
6658 	struct btrfs_ordered_extent *ordered;
6659 	struct extent_state *cached_state = NULL;
6660 	u64 page_start = page_offset(page);
6661 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
6662 
6663 	/*
6664 	 * we have the page locked, so new writeback can't start,
6665 	 * and the dirty bit won't be cleared while we are here.
6666 	 *
6667 	 * Wait for IO on this page so that we can safely clear
6668 	 * the PagePrivate2 bit and do ordered accounting
6669 	 */
6670 	wait_on_page_writeback(page);
6671 
6672 	tree = &BTRFS_I(inode)->io_tree;
6673 	if (offset) {
6674 		btrfs_releasepage(page, GFP_NOFS);
6675 		return;
6676 	}
6677 	lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
6678 	ordered = btrfs_lookup_ordered_extent(inode,
6679 					   page_offset(page));
6680 	if (ordered) {
6681 		/*
6682 		 * IO on this page will never be started, so we need
6683 		 * to account for any ordered extents now
6684 		 */
6685 		clear_extent_bit(tree, page_start, page_end,
6686 				 EXTENT_DIRTY | EXTENT_DELALLOC |
6687 				 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
6688 				 EXTENT_DEFRAG, 1, 0, &cached_state, GFP_NOFS);
6689 		/*
6690 		 * whoever cleared the private bit is responsible
6691 		 * for the finish_ordered_io
6692 		 */
6693 		if (TestClearPagePrivate2(page) &&
6694 		    btrfs_dec_test_ordered_pending(inode, &ordered, page_start,
6695 						   PAGE_CACHE_SIZE, 1)) {
6696 			btrfs_finish_ordered_io(ordered);
6697 		}
6698 		btrfs_put_ordered_extent(ordered);
6699 		cached_state = NULL;
6700 		lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
6701 	}
6702 	clear_extent_bit(tree, page_start, page_end,
6703 		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
6704 		 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
6705 		 &cached_state, GFP_NOFS);
6706 	__btrfs_releasepage(page, GFP_NOFS);
6707 
6708 	ClearPageChecked(page);
6709 	if (PagePrivate(page)) {
6710 		ClearPagePrivate(page);
6711 		set_page_private(page, 0);
6712 		page_cache_release(page);
6713 	}
6714 }
6715 
6716 /*
6717  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
6718  * called from a page fault handler when a page is first dirtied. Hence we must
6719  * be careful to check for EOF conditions here. We set the page up correctly
6720  * for a written page which means we get ENOSPC checking when writing into
6721  * holes and correct delalloc and unwritten extent mapping on filesystems that
6722  * support these features.
6723  *
6724  * We are not allowed to take the i_mutex here so we have to play games to
6725  * protect against truncate races as the page could now be beyond EOF.  Because
6726  * vmtruncate() writes the inode size before removing pages, once we have the
6727  * page lock we can determine safely if the page is beyond EOF. If it is not
6728  * beyond EOF, then the page is guaranteed safe against truncation until we
6729  * unlock the page.
6730  */
6731 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
6732 {
6733 	struct page *page = vmf->page;
6734 	struct inode *inode = fdentry(vma->vm_file)->d_inode;
6735 	struct btrfs_root *root = BTRFS_I(inode)->root;
6736 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6737 	struct btrfs_ordered_extent *ordered;
6738 	struct extent_state *cached_state = NULL;
6739 	char *kaddr;
6740 	unsigned long zero_start;
6741 	loff_t size;
6742 	int ret;
6743 	int reserved = 0;
6744 	u64 page_start;
6745 	u64 page_end;
6746 
6747 	sb_start_pagefault(inode->i_sb);
6748 	ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
6749 	if (!ret) {
6750 		ret = file_update_time(vma->vm_file);
6751 		reserved = 1;
6752 	}
6753 	if (ret) {
6754 		if (ret == -ENOMEM)
6755 			ret = VM_FAULT_OOM;
6756 		else /* -ENOSPC, -EIO, etc */
6757 			ret = VM_FAULT_SIGBUS;
6758 		if (reserved)
6759 			goto out;
6760 		goto out_noreserve;
6761 	}
6762 
6763 	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
6764 again:
6765 	lock_page(page);
6766 	size = i_size_read(inode);
6767 	page_start = page_offset(page);
6768 	page_end = page_start + PAGE_CACHE_SIZE - 1;
6769 
6770 	if ((page->mapping != inode->i_mapping) ||
6771 	    (page_start >= size)) {
6772 		/* page got truncated out from underneath us */
6773 		goto out_unlock;
6774 	}
6775 	wait_on_page_writeback(page);
6776 
6777 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
6778 	set_page_extent_mapped(page);
6779 
6780 	/*
6781 	 * we can't set the delalloc bits if there are pending ordered
6782 	 * extents.  Drop our locks and wait for them to finish
6783 	 */
6784 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
6785 	if (ordered) {
6786 		unlock_extent_cached(io_tree, page_start, page_end,
6787 				     &cached_state, GFP_NOFS);
6788 		unlock_page(page);
6789 		btrfs_start_ordered_extent(inode, ordered, 1);
6790 		btrfs_put_ordered_extent(ordered);
6791 		goto again;
6792 	}
6793 
6794 	/*
6795 	 * XXX - page_mkwrite gets called every time the page is dirtied, even
6796 	 * if it was already dirty, so for space accounting reasons we need to
6797 	 * clear any delalloc bits for the range we are fixing to save.  There
6798 	 * is probably a better way to do this, but for now keep consistent with
6799 	 * prepare_pages in the normal write path.
6800 	 */
6801 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
6802 			  EXTENT_DIRTY | EXTENT_DELALLOC |
6803 			  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
6804 			  0, 0, &cached_state, GFP_NOFS);
6805 
6806 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
6807 					&cached_state);
6808 	if (ret) {
6809 		unlock_extent_cached(io_tree, page_start, page_end,
6810 				     &cached_state, GFP_NOFS);
6811 		ret = VM_FAULT_SIGBUS;
6812 		goto out_unlock;
6813 	}
6814 	ret = 0;
6815 
6816 	/* page is wholly or partially inside EOF */
6817 	if (page_start + PAGE_CACHE_SIZE > size)
6818 		zero_start = size & ~PAGE_CACHE_MASK;
6819 	else
6820 		zero_start = PAGE_CACHE_SIZE;
6821 
6822 	if (zero_start != PAGE_CACHE_SIZE) {
6823 		kaddr = kmap(page);
6824 		memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
6825 		flush_dcache_page(page);
6826 		kunmap(page);
6827 	}
6828 	ClearPageChecked(page);
6829 	set_page_dirty(page);
6830 	SetPageUptodate(page);
6831 
6832 	BTRFS_I(inode)->last_trans = root->fs_info->generation;
6833 	BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
6834 	BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
6835 
6836 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
6837 
6838 out_unlock:
6839 	if (!ret) {
6840 		sb_end_pagefault(inode->i_sb);
6841 		return VM_FAULT_LOCKED;
6842 	}
6843 	unlock_page(page);
6844 out:
6845 	btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
6846 out_noreserve:
6847 	sb_end_pagefault(inode->i_sb);
6848 	return ret;
6849 }
6850 
6851 static int btrfs_truncate(struct inode *inode)
6852 {
6853 	struct btrfs_root *root = BTRFS_I(inode)->root;
6854 	struct btrfs_block_rsv *rsv;
6855 	int ret;
6856 	int err = 0;
6857 	struct btrfs_trans_handle *trans;
6858 	unsigned long nr;
6859 	u64 mask = root->sectorsize - 1;
6860 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
6861 
6862 	ret = btrfs_truncate_page(inode, inode->i_size, 0, 0);
6863 	if (ret)
6864 		return ret;
6865 
6866 	btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
6867 	btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
6868 
6869 	/*
6870 	 * Yes ladies and gentelment, this is indeed ugly.  The fact is we have
6871 	 * 3 things going on here
6872 	 *
6873 	 * 1) We need to reserve space for our orphan item and the space to
6874 	 * delete our orphan item.  Lord knows we don't want to have a dangling
6875 	 * orphan item because we didn't reserve space to remove it.
6876 	 *
6877 	 * 2) We need to reserve space to update our inode.
6878 	 *
6879 	 * 3) We need to have something to cache all the space that is going to
6880 	 * be free'd up by the truncate operation, but also have some slack
6881 	 * space reserved in case it uses space during the truncate (thank you
6882 	 * very much snapshotting).
6883 	 *
6884 	 * And we need these to all be seperate.  The fact is we can use alot of
6885 	 * space doing the truncate, and we have no earthly idea how much space
6886 	 * we will use, so we need the truncate reservation to be seperate so it
6887 	 * doesn't end up using space reserved for updating the inode or
6888 	 * removing the orphan item.  We also need to be able to stop the
6889 	 * transaction and start a new one, which means we need to be able to
6890 	 * update the inode several times, and we have no idea of knowing how
6891 	 * many times that will be, so we can't just reserve 1 item for the
6892 	 * entirety of the opration, so that has to be done seperately as well.
6893 	 * Then there is the orphan item, which does indeed need to be held on
6894 	 * to for the whole operation, and we need nobody to touch this reserved
6895 	 * space except the orphan code.
6896 	 *
6897 	 * So that leaves us with
6898 	 *
6899 	 * 1) root->orphan_block_rsv - for the orphan deletion.
6900 	 * 2) rsv - for the truncate reservation, which we will steal from the
6901 	 * transaction reservation.
6902 	 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
6903 	 * updating the inode.
6904 	 */
6905 	rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
6906 	if (!rsv)
6907 		return -ENOMEM;
6908 	rsv->size = min_size;
6909 	rsv->failfast = 1;
6910 
6911 	/*
6912 	 * 1 for the truncate slack space
6913 	 * 1 for the orphan item we're going to add
6914 	 * 1 for the orphan item deletion
6915 	 * 1 for updating the inode.
6916 	 */
6917 	trans = btrfs_start_transaction(root, 4);
6918 	if (IS_ERR(trans)) {
6919 		err = PTR_ERR(trans);
6920 		goto out;
6921 	}
6922 
6923 	/* Migrate the slack space for the truncate to our reserve */
6924 	ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
6925 				      min_size);
6926 	BUG_ON(ret);
6927 
6928 	ret = btrfs_orphan_add(trans, inode);
6929 	if (ret) {
6930 		btrfs_end_transaction(trans, root);
6931 		goto out;
6932 	}
6933 
6934 	/*
6935 	 * setattr is responsible for setting the ordered_data_close flag,
6936 	 * but that is only tested during the last file release.  That
6937 	 * could happen well after the next commit, leaving a great big
6938 	 * window where new writes may get lost if someone chooses to write
6939 	 * to this file after truncating to zero
6940 	 *
6941 	 * The inode doesn't have any dirty data here, and so if we commit
6942 	 * this is a noop.  If someone immediately starts writing to the inode
6943 	 * it is very likely we'll catch some of their writes in this
6944 	 * transaction, and the commit will find this file on the ordered
6945 	 * data list with good things to send down.
6946 	 *
6947 	 * This is a best effort solution, there is still a window where
6948 	 * using truncate to replace the contents of the file will
6949 	 * end up with a zero length file after a crash.
6950 	 */
6951 	if (inode->i_size == 0 && test_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
6952 					   &BTRFS_I(inode)->runtime_flags))
6953 		btrfs_add_ordered_operation(trans, root, inode);
6954 
6955 	/*
6956 	 * So if we truncate and then write and fsync we normally would just
6957 	 * write the extents that changed, which is a problem if we need to
6958 	 * first truncate that entire inode.  So set this flag so we write out
6959 	 * all of the extents in the inode to the sync log so we're completely
6960 	 * safe.
6961 	 */
6962 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
6963 	trans->block_rsv = rsv;
6964 
6965 	while (1) {
6966 		ret = btrfs_truncate_inode_items(trans, root, inode,
6967 						 inode->i_size,
6968 						 BTRFS_EXTENT_DATA_KEY);
6969 		if (ret != -ENOSPC) {
6970 			err = ret;
6971 			break;
6972 		}
6973 
6974 		trans->block_rsv = &root->fs_info->trans_block_rsv;
6975 		ret = btrfs_update_inode(trans, root, inode);
6976 		if (ret) {
6977 			err = ret;
6978 			break;
6979 		}
6980 
6981 		nr = trans->blocks_used;
6982 		btrfs_end_transaction(trans, root);
6983 		btrfs_btree_balance_dirty(root, nr);
6984 
6985 		trans = btrfs_start_transaction(root, 2);
6986 		if (IS_ERR(trans)) {
6987 			ret = err = PTR_ERR(trans);
6988 			trans = NULL;
6989 			break;
6990 		}
6991 
6992 		ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
6993 					      rsv, min_size);
6994 		BUG_ON(ret);	/* shouldn't happen */
6995 		trans->block_rsv = rsv;
6996 	}
6997 
6998 	if (ret == 0 && inode->i_nlink > 0) {
6999 		trans->block_rsv = root->orphan_block_rsv;
7000 		ret = btrfs_orphan_del(trans, inode);
7001 		if (ret)
7002 			err = ret;
7003 	} else if (ret && inode->i_nlink > 0) {
7004 		/*
7005 		 * Failed to do the truncate, remove us from the in memory
7006 		 * orphan list.
7007 		 */
7008 		ret = btrfs_orphan_del(NULL, inode);
7009 	}
7010 
7011 	if (trans) {
7012 		trans->block_rsv = &root->fs_info->trans_block_rsv;
7013 		ret = btrfs_update_inode(trans, root, inode);
7014 		if (ret && !err)
7015 			err = ret;
7016 
7017 		nr = trans->blocks_used;
7018 		ret = btrfs_end_transaction(trans, root);
7019 		btrfs_btree_balance_dirty(root, nr);
7020 	}
7021 
7022 out:
7023 	btrfs_free_block_rsv(root, rsv);
7024 
7025 	if (ret && !err)
7026 		err = ret;
7027 
7028 	return err;
7029 }
7030 
7031 /*
7032  * create a new subvolume directory/inode (helper for the ioctl).
7033  */
7034 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
7035 			     struct btrfs_root *new_root, u64 new_dirid)
7036 {
7037 	struct inode *inode;
7038 	int err;
7039 	u64 index = 0;
7040 
7041 	inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
7042 				new_dirid, new_dirid,
7043 				S_IFDIR | (~current_umask() & S_IRWXUGO),
7044 				&index);
7045 	if (IS_ERR(inode))
7046 		return PTR_ERR(inode);
7047 	inode->i_op = &btrfs_dir_inode_operations;
7048 	inode->i_fop = &btrfs_dir_file_operations;
7049 
7050 	set_nlink(inode, 1);
7051 	btrfs_i_size_write(inode, 0);
7052 
7053 	err = btrfs_update_inode(trans, new_root, inode);
7054 
7055 	iput(inode);
7056 	return err;
7057 }
7058 
7059 struct inode *btrfs_alloc_inode(struct super_block *sb)
7060 {
7061 	struct btrfs_inode *ei;
7062 	struct inode *inode;
7063 
7064 	ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
7065 	if (!ei)
7066 		return NULL;
7067 
7068 	ei->root = NULL;
7069 	ei->generation = 0;
7070 	ei->last_trans = 0;
7071 	ei->last_sub_trans = 0;
7072 	ei->logged_trans = 0;
7073 	ei->delalloc_bytes = 0;
7074 	ei->disk_i_size = 0;
7075 	ei->flags = 0;
7076 	ei->csum_bytes = 0;
7077 	ei->index_cnt = (u64)-1;
7078 	ei->last_unlink_trans = 0;
7079 	ei->last_log_commit = 0;
7080 
7081 	spin_lock_init(&ei->lock);
7082 	ei->outstanding_extents = 0;
7083 	ei->reserved_extents = 0;
7084 
7085 	ei->runtime_flags = 0;
7086 	ei->force_compress = BTRFS_COMPRESS_NONE;
7087 
7088 	ei->delayed_node = NULL;
7089 
7090 	inode = &ei->vfs_inode;
7091 	extent_map_tree_init(&ei->extent_tree);
7092 	extent_io_tree_init(&ei->io_tree, &inode->i_data);
7093 	extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
7094 	ei->io_tree.track_uptodate = 1;
7095 	ei->io_failure_tree.track_uptodate = 1;
7096 	mutex_init(&ei->log_mutex);
7097 	mutex_init(&ei->delalloc_mutex);
7098 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
7099 	INIT_LIST_HEAD(&ei->delalloc_inodes);
7100 	INIT_LIST_HEAD(&ei->ordered_operations);
7101 	RB_CLEAR_NODE(&ei->rb_node);
7102 
7103 	return inode;
7104 }
7105 
7106 static void btrfs_i_callback(struct rcu_head *head)
7107 {
7108 	struct inode *inode = container_of(head, struct inode, i_rcu);
7109 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
7110 }
7111 
7112 void btrfs_destroy_inode(struct inode *inode)
7113 {
7114 	struct btrfs_ordered_extent *ordered;
7115 	struct btrfs_root *root = BTRFS_I(inode)->root;
7116 
7117 	WARN_ON(!hlist_empty(&inode->i_dentry));
7118 	WARN_ON(inode->i_data.nrpages);
7119 	WARN_ON(BTRFS_I(inode)->outstanding_extents);
7120 	WARN_ON(BTRFS_I(inode)->reserved_extents);
7121 	WARN_ON(BTRFS_I(inode)->delalloc_bytes);
7122 	WARN_ON(BTRFS_I(inode)->csum_bytes);
7123 
7124 	/*
7125 	 * This can happen where we create an inode, but somebody else also
7126 	 * created the same inode and we need to destroy the one we already
7127 	 * created.
7128 	 */
7129 	if (!root)
7130 		goto free;
7131 
7132 	/*
7133 	 * Make sure we're properly removed from the ordered operation
7134 	 * lists.
7135 	 */
7136 	smp_mb();
7137 	if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
7138 		spin_lock(&root->fs_info->ordered_extent_lock);
7139 		list_del_init(&BTRFS_I(inode)->ordered_operations);
7140 		spin_unlock(&root->fs_info->ordered_extent_lock);
7141 	}
7142 
7143 	if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
7144 		     &BTRFS_I(inode)->runtime_flags)) {
7145 		printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
7146 		       (unsigned long long)btrfs_ino(inode));
7147 		atomic_dec(&root->orphan_inodes);
7148 	}
7149 
7150 	while (1) {
7151 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
7152 		if (!ordered)
7153 			break;
7154 		else {
7155 			printk(KERN_ERR "btrfs found ordered "
7156 			       "extent %llu %llu on inode cleanup\n",
7157 			       (unsigned long long)ordered->file_offset,
7158 			       (unsigned long long)ordered->len);
7159 			btrfs_remove_ordered_extent(inode, ordered);
7160 			btrfs_put_ordered_extent(ordered);
7161 			btrfs_put_ordered_extent(ordered);
7162 		}
7163 	}
7164 	inode_tree_del(inode);
7165 	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
7166 free:
7167 	btrfs_remove_delayed_node(inode);
7168 	call_rcu(&inode->i_rcu, btrfs_i_callback);
7169 }
7170 
7171 int btrfs_drop_inode(struct inode *inode)
7172 {
7173 	struct btrfs_root *root = BTRFS_I(inode)->root;
7174 
7175 	if (btrfs_root_refs(&root->root_item) == 0 &&
7176 	    !btrfs_is_free_space_inode(inode))
7177 		return 1;
7178 	else
7179 		return generic_drop_inode(inode);
7180 }
7181 
7182 static void init_once(void *foo)
7183 {
7184 	struct btrfs_inode *ei = (struct btrfs_inode *) foo;
7185 
7186 	inode_init_once(&ei->vfs_inode);
7187 }
7188 
7189 void btrfs_destroy_cachep(void)
7190 {
7191 	/*
7192 	 * Make sure all delayed rcu free inodes are flushed before we
7193 	 * destroy cache.
7194 	 */
7195 	rcu_barrier();
7196 	if (btrfs_inode_cachep)
7197 		kmem_cache_destroy(btrfs_inode_cachep);
7198 	if (btrfs_trans_handle_cachep)
7199 		kmem_cache_destroy(btrfs_trans_handle_cachep);
7200 	if (btrfs_transaction_cachep)
7201 		kmem_cache_destroy(btrfs_transaction_cachep);
7202 	if (btrfs_path_cachep)
7203 		kmem_cache_destroy(btrfs_path_cachep);
7204 	if (btrfs_free_space_cachep)
7205 		kmem_cache_destroy(btrfs_free_space_cachep);
7206 }
7207 
7208 int btrfs_init_cachep(void)
7209 {
7210 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
7211 			sizeof(struct btrfs_inode), 0,
7212 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
7213 	if (!btrfs_inode_cachep)
7214 		goto fail;
7215 
7216 	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
7217 			sizeof(struct btrfs_trans_handle), 0,
7218 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7219 	if (!btrfs_trans_handle_cachep)
7220 		goto fail;
7221 
7222 	btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction",
7223 			sizeof(struct btrfs_transaction), 0,
7224 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7225 	if (!btrfs_transaction_cachep)
7226 		goto fail;
7227 
7228 	btrfs_path_cachep = kmem_cache_create("btrfs_path",
7229 			sizeof(struct btrfs_path), 0,
7230 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7231 	if (!btrfs_path_cachep)
7232 		goto fail;
7233 
7234 	btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
7235 			sizeof(struct btrfs_free_space), 0,
7236 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7237 	if (!btrfs_free_space_cachep)
7238 		goto fail;
7239 
7240 	return 0;
7241 fail:
7242 	btrfs_destroy_cachep();
7243 	return -ENOMEM;
7244 }
7245 
7246 static int btrfs_getattr(struct vfsmount *mnt,
7247 			 struct dentry *dentry, struct kstat *stat)
7248 {
7249 	struct inode *inode = dentry->d_inode;
7250 	u32 blocksize = inode->i_sb->s_blocksize;
7251 
7252 	generic_fillattr(inode, stat);
7253 	stat->dev = BTRFS_I(inode)->root->anon_dev;
7254 	stat->blksize = PAGE_CACHE_SIZE;
7255 	stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
7256 		ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9;
7257 	return 0;
7258 }
7259 
7260 /*
7261  * If a file is moved, it will inherit the cow and compression flags of the new
7262  * directory.
7263  */
7264 static void fixup_inode_flags(struct inode *dir, struct inode *inode)
7265 {
7266 	struct btrfs_inode *b_dir = BTRFS_I(dir);
7267 	struct btrfs_inode *b_inode = BTRFS_I(inode);
7268 
7269 	if (b_dir->flags & BTRFS_INODE_NODATACOW)
7270 		b_inode->flags |= BTRFS_INODE_NODATACOW;
7271 	else
7272 		b_inode->flags &= ~BTRFS_INODE_NODATACOW;
7273 
7274 	if (b_dir->flags & BTRFS_INODE_COMPRESS) {
7275 		b_inode->flags |= BTRFS_INODE_COMPRESS;
7276 		b_inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
7277 	} else {
7278 		b_inode->flags &= ~(BTRFS_INODE_COMPRESS |
7279 				    BTRFS_INODE_NOCOMPRESS);
7280 	}
7281 }
7282 
7283 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7284 			   struct inode *new_dir, struct dentry *new_dentry)
7285 {
7286 	struct btrfs_trans_handle *trans;
7287 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
7288 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
7289 	struct inode *new_inode = new_dentry->d_inode;
7290 	struct inode *old_inode = old_dentry->d_inode;
7291 	struct timespec ctime = CURRENT_TIME;
7292 	u64 index = 0;
7293 	u64 root_objectid;
7294 	int ret;
7295 	u64 old_ino = btrfs_ino(old_inode);
7296 
7297 	if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
7298 		return -EPERM;
7299 
7300 	/* we only allow rename subvolume link between subvolumes */
7301 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
7302 		return -EXDEV;
7303 
7304 	if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
7305 	    (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
7306 		return -ENOTEMPTY;
7307 
7308 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
7309 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
7310 		return -ENOTEMPTY;
7311 	/*
7312 	 * we're using rename to replace one file with another.
7313 	 * and the replacement file is large.  Start IO on it now so
7314 	 * we don't add too much work to the end of the transaction
7315 	 */
7316 	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
7317 	    old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
7318 		filemap_flush(old_inode->i_mapping);
7319 
7320 	/* close the racy window with snapshot create/destroy ioctl */
7321 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
7322 		down_read(&root->fs_info->subvol_sem);
7323 	/*
7324 	 * We want to reserve the absolute worst case amount of items.  So if
7325 	 * both inodes are subvols and we need to unlink them then that would
7326 	 * require 4 item modifications, but if they are both normal inodes it
7327 	 * would require 5 item modifications, so we'll assume their normal
7328 	 * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
7329 	 * should cover the worst case number of items we'll modify.
7330 	 */
7331 	trans = btrfs_start_transaction(root, 20);
7332 	if (IS_ERR(trans)) {
7333                 ret = PTR_ERR(trans);
7334                 goto out_notrans;
7335         }
7336 
7337 	if (dest != root)
7338 		btrfs_record_root_in_trans(trans, dest);
7339 
7340 	ret = btrfs_set_inode_index(new_dir, &index);
7341 	if (ret)
7342 		goto out_fail;
7343 
7344 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
7345 		/* force full log commit if subvolume involved. */
7346 		root->fs_info->last_trans_log_full_commit = trans->transid;
7347 	} else {
7348 		ret = btrfs_insert_inode_ref(trans, dest,
7349 					     new_dentry->d_name.name,
7350 					     new_dentry->d_name.len,
7351 					     old_ino,
7352 					     btrfs_ino(new_dir), index);
7353 		if (ret)
7354 			goto out_fail;
7355 		/*
7356 		 * this is an ugly little race, but the rename is required
7357 		 * to make sure that if we crash, the inode is either at the
7358 		 * old name or the new one.  pinning the log transaction lets
7359 		 * us make sure we don't allow a log commit to come in after
7360 		 * we unlink the name but before we add the new name back in.
7361 		 */
7362 		btrfs_pin_log_trans(root);
7363 	}
7364 	/*
7365 	 * make sure the inode gets flushed if it is replacing
7366 	 * something.
7367 	 */
7368 	if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
7369 		btrfs_add_ordered_operation(trans, root, old_inode);
7370 
7371 	inode_inc_iversion(old_dir);
7372 	inode_inc_iversion(new_dir);
7373 	inode_inc_iversion(old_inode);
7374 	old_dir->i_ctime = old_dir->i_mtime = ctime;
7375 	new_dir->i_ctime = new_dir->i_mtime = ctime;
7376 	old_inode->i_ctime = ctime;
7377 
7378 	if (old_dentry->d_parent != new_dentry->d_parent)
7379 		btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
7380 
7381 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
7382 		root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
7383 		ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
7384 					old_dentry->d_name.name,
7385 					old_dentry->d_name.len);
7386 	} else {
7387 		ret = __btrfs_unlink_inode(trans, root, old_dir,
7388 					old_dentry->d_inode,
7389 					old_dentry->d_name.name,
7390 					old_dentry->d_name.len);
7391 		if (!ret)
7392 			ret = btrfs_update_inode(trans, root, old_inode);
7393 	}
7394 	if (ret) {
7395 		btrfs_abort_transaction(trans, root, ret);
7396 		goto out_fail;
7397 	}
7398 
7399 	if (new_inode) {
7400 		inode_inc_iversion(new_inode);
7401 		new_inode->i_ctime = CURRENT_TIME;
7402 		if (unlikely(btrfs_ino(new_inode) ==
7403 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
7404 			root_objectid = BTRFS_I(new_inode)->location.objectid;
7405 			ret = btrfs_unlink_subvol(trans, dest, new_dir,
7406 						root_objectid,
7407 						new_dentry->d_name.name,
7408 						new_dentry->d_name.len);
7409 			BUG_ON(new_inode->i_nlink == 0);
7410 		} else {
7411 			ret = btrfs_unlink_inode(trans, dest, new_dir,
7412 						 new_dentry->d_inode,
7413 						 new_dentry->d_name.name,
7414 						 new_dentry->d_name.len);
7415 		}
7416 		if (!ret && new_inode->i_nlink == 0) {
7417 			ret = btrfs_orphan_add(trans, new_dentry->d_inode);
7418 			BUG_ON(ret);
7419 		}
7420 		if (ret) {
7421 			btrfs_abort_transaction(trans, root, ret);
7422 			goto out_fail;
7423 		}
7424 	}
7425 
7426 	fixup_inode_flags(new_dir, old_inode);
7427 
7428 	ret = btrfs_add_link(trans, new_dir, old_inode,
7429 			     new_dentry->d_name.name,
7430 			     new_dentry->d_name.len, 0, index);
7431 	if (ret) {
7432 		btrfs_abort_transaction(trans, root, ret);
7433 		goto out_fail;
7434 	}
7435 
7436 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
7437 		struct dentry *parent = new_dentry->d_parent;
7438 		btrfs_log_new_name(trans, old_inode, old_dir, parent);
7439 		btrfs_end_log_trans(root);
7440 	}
7441 out_fail:
7442 	btrfs_end_transaction(trans, root);
7443 out_notrans:
7444 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
7445 		up_read(&root->fs_info->subvol_sem);
7446 
7447 	return ret;
7448 }
7449 
7450 /*
7451  * some fairly slow code that needs optimization. This walks the list
7452  * of all the inodes with pending delalloc and forces them to disk.
7453  */
7454 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
7455 {
7456 	struct list_head *head = &root->fs_info->delalloc_inodes;
7457 	struct btrfs_inode *binode;
7458 	struct inode *inode;
7459 
7460 	if (root->fs_info->sb->s_flags & MS_RDONLY)
7461 		return -EROFS;
7462 
7463 	spin_lock(&root->fs_info->delalloc_lock);
7464 	while (!list_empty(head)) {
7465 		binode = list_entry(head->next, struct btrfs_inode,
7466 				    delalloc_inodes);
7467 		inode = igrab(&binode->vfs_inode);
7468 		if (!inode)
7469 			list_del_init(&binode->delalloc_inodes);
7470 		spin_unlock(&root->fs_info->delalloc_lock);
7471 		if (inode) {
7472 			filemap_flush(inode->i_mapping);
7473 			if (delay_iput)
7474 				btrfs_add_delayed_iput(inode);
7475 			else
7476 				iput(inode);
7477 		}
7478 		cond_resched();
7479 		spin_lock(&root->fs_info->delalloc_lock);
7480 	}
7481 	spin_unlock(&root->fs_info->delalloc_lock);
7482 
7483 	/* the filemap_flush will queue IO into the worker threads, but
7484 	 * we have to make sure the IO is actually started and that
7485 	 * ordered extents get created before we return
7486 	 */
7487 	atomic_inc(&root->fs_info->async_submit_draining);
7488 	while (atomic_read(&root->fs_info->nr_async_submits) ||
7489 	      atomic_read(&root->fs_info->async_delalloc_pages)) {
7490 		wait_event(root->fs_info->async_submit_wait,
7491 		   (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
7492 		    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
7493 	}
7494 	atomic_dec(&root->fs_info->async_submit_draining);
7495 	return 0;
7496 }
7497 
7498 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7499 			 const char *symname)
7500 {
7501 	struct btrfs_trans_handle *trans;
7502 	struct btrfs_root *root = BTRFS_I(dir)->root;
7503 	struct btrfs_path *path;
7504 	struct btrfs_key key;
7505 	struct inode *inode = NULL;
7506 	int err;
7507 	int drop_inode = 0;
7508 	u64 objectid;
7509 	u64 index = 0 ;
7510 	int name_len;
7511 	int datasize;
7512 	unsigned long ptr;
7513 	struct btrfs_file_extent_item *ei;
7514 	struct extent_buffer *leaf;
7515 	unsigned long nr = 0;
7516 
7517 	name_len = strlen(symname) + 1;
7518 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
7519 		return -ENAMETOOLONG;
7520 
7521 	/*
7522 	 * 2 items for inode item and ref
7523 	 * 2 items for dir items
7524 	 * 1 item for xattr if selinux is on
7525 	 */
7526 	trans = btrfs_start_transaction(root, 5);
7527 	if (IS_ERR(trans))
7528 		return PTR_ERR(trans);
7529 
7530 	err = btrfs_find_free_ino(root, &objectid);
7531 	if (err)
7532 		goto out_unlock;
7533 
7534 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
7535 				dentry->d_name.len, btrfs_ino(dir), objectid,
7536 				S_IFLNK|S_IRWXUGO, &index);
7537 	if (IS_ERR(inode)) {
7538 		err = PTR_ERR(inode);
7539 		goto out_unlock;
7540 	}
7541 
7542 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
7543 	if (err) {
7544 		drop_inode = 1;
7545 		goto out_unlock;
7546 	}
7547 
7548 	/*
7549 	* If the active LSM wants to access the inode during
7550 	* d_instantiate it needs these. Smack checks to see
7551 	* if the filesystem supports xattrs by looking at the
7552 	* ops vector.
7553 	*/
7554 	inode->i_fop = &btrfs_file_operations;
7555 	inode->i_op = &btrfs_file_inode_operations;
7556 
7557 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
7558 	if (err)
7559 		drop_inode = 1;
7560 	else {
7561 		inode->i_mapping->a_ops = &btrfs_aops;
7562 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
7563 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
7564 	}
7565 	if (drop_inode)
7566 		goto out_unlock;
7567 
7568 	path = btrfs_alloc_path();
7569 	if (!path) {
7570 		err = -ENOMEM;
7571 		drop_inode = 1;
7572 		goto out_unlock;
7573 	}
7574 	key.objectid = btrfs_ino(inode);
7575 	key.offset = 0;
7576 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
7577 	datasize = btrfs_file_extent_calc_inline_size(name_len);
7578 	err = btrfs_insert_empty_item(trans, root, path, &key,
7579 				      datasize);
7580 	if (err) {
7581 		drop_inode = 1;
7582 		btrfs_free_path(path);
7583 		goto out_unlock;
7584 	}
7585 	leaf = path->nodes[0];
7586 	ei = btrfs_item_ptr(leaf, path->slots[0],
7587 			    struct btrfs_file_extent_item);
7588 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
7589 	btrfs_set_file_extent_type(leaf, ei,
7590 				   BTRFS_FILE_EXTENT_INLINE);
7591 	btrfs_set_file_extent_encryption(leaf, ei, 0);
7592 	btrfs_set_file_extent_compression(leaf, ei, 0);
7593 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
7594 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
7595 
7596 	ptr = btrfs_file_extent_inline_start(ei);
7597 	write_extent_buffer(leaf, symname, ptr, name_len);
7598 	btrfs_mark_buffer_dirty(leaf);
7599 	btrfs_free_path(path);
7600 
7601 	inode->i_op = &btrfs_symlink_inode_operations;
7602 	inode->i_mapping->a_ops = &btrfs_symlink_aops;
7603 	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
7604 	inode_set_bytes(inode, name_len);
7605 	btrfs_i_size_write(inode, name_len - 1);
7606 	err = btrfs_update_inode(trans, root, inode);
7607 	if (err)
7608 		drop_inode = 1;
7609 
7610 out_unlock:
7611 	if (!err)
7612 		d_instantiate(dentry, inode);
7613 	nr = trans->blocks_used;
7614 	btrfs_end_transaction(trans, root);
7615 	if (drop_inode) {
7616 		inode_dec_link_count(inode);
7617 		iput(inode);
7618 	}
7619 	btrfs_btree_balance_dirty(root, nr);
7620 	return err;
7621 }
7622 
7623 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
7624 				       u64 start, u64 num_bytes, u64 min_size,
7625 				       loff_t actual_len, u64 *alloc_hint,
7626 				       struct btrfs_trans_handle *trans)
7627 {
7628 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
7629 	struct extent_map *em;
7630 	struct btrfs_root *root = BTRFS_I(inode)->root;
7631 	struct btrfs_key ins;
7632 	u64 cur_offset = start;
7633 	u64 i_size;
7634 	int ret = 0;
7635 	bool own_trans = true;
7636 
7637 	if (trans)
7638 		own_trans = false;
7639 	while (num_bytes > 0) {
7640 		if (own_trans) {
7641 			trans = btrfs_start_transaction(root, 3);
7642 			if (IS_ERR(trans)) {
7643 				ret = PTR_ERR(trans);
7644 				break;
7645 			}
7646 		}
7647 
7648 		ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
7649 					   0, *alloc_hint, &ins, 1);
7650 		if (ret) {
7651 			if (own_trans)
7652 				btrfs_end_transaction(trans, root);
7653 			break;
7654 		}
7655 
7656 		ret = insert_reserved_file_extent(trans, inode,
7657 						  cur_offset, ins.objectid,
7658 						  ins.offset, ins.offset,
7659 						  ins.offset, 0, 0, 0,
7660 						  BTRFS_FILE_EXTENT_PREALLOC);
7661 		if (ret) {
7662 			btrfs_abort_transaction(trans, root, ret);
7663 			if (own_trans)
7664 				btrfs_end_transaction(trans, root);
7665 			break;
7666 		}
7667 		btrfs_drop_extent_cache(inode, cur_offset,
7668 					cur_offset + ins.offset -1, 0);
7669 
7670 		em = alloc_extent_map();
7671 		if (!em) {
7672 			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
7673 				&BTRFS_I(inode)->runtime_flags);
7674 			goto next;
7675 		}
7676 
7677 		em->start = cur_offset;
7678 		em->orig_start = cur_offset;
7679 		em->len = ins.offset;
7680 		em->block_start = ins.objectid;
7681 		em->block_len = ins.offset;
7682 		em->bdev = root->fs_info->fs_devices->latest_bdev;
7683 		set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
7684 		em->generation = trans->transid;
7685 
7686 		while (1) {
7687 			write_lock(&em_tree->lock);
7688 			ret = add_extent_mapping(em_tree, em);
7689 			if (!ret)
7690 				list_move(&em->list,
7691 					  &em_tree->modified_extents);
7692 			write_unlock(&em_tree->lock);
7693 			if (ret != -EEXIST)
7694 				break;
7695 			btrfs_drop_extent_cache(inode, cur_offset,
7696 						cur_offset + ins.offset - 1,
7697 						0);
7698 		}
7699 		free_extent_map(em);
7700 next:
7701 		num_bytes -= ins.offset;
7702 		cur_offset += ins.offset;
7703 		*alloc_hint = ins.objectid + ins.offset;
7704 
7705 		inode_inc_iversion(inode);
7706 		inode->i_ctime = CURRENT_TIME;
7707 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
7708 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
7709 		    (actual_len > inode->i_size) &&
7710 		    (cur_offset > inode->i_size)) {
7711 			if (cur_offset > actual_len)
7712 				i_size = actual_len;
7713 			else
7714 				i_size = cur_offset;
7715 			i_size_write(inode, i_size);
7716 			btrfs_ordered_update_i_size(inode, i_size, NULL);
7717 		}
7718 
7719 		ret = btrfs_update_inode(trans, root, inode);
7720 
7721 		if (ret) {
7722 			btrfs_abort_transaction(trans, root, ret);
7723 			if (own_trans)
7724 				btrfs_end_transaction(trans, root);
7725 			break;
7726 		}
7727 
7728 		if (own_trans)
7729 			btrfs_end_transaction(trans, root);
7730 	}
7731 	return ret;
7732 }
7733 
7734 int btrfs_prealloc_file_range(struct inode *inode, int mode,
7735 			      u64 start, u64 num_bytes, u64 min_size,
7736 			      loff_t actual_len, u64 *alloc_hint)
7737 {
7738 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
7739 					   min_size, actual_len, alloc_hint,
7740 					   NULL);
7741 }
7742 
7743 int btrfs_prealloc_file_range_trans(struct inode *inode,
7744 				    struct btrfs_trans_handle *trans, int mode,
7745 				    u64 start, u64 num_bytes, u64 min_size,
7746 				    loff_t actual_len, u64 *alloc_hint)
7747 {
7748 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
7749 					   min_size, actual_len, alloc_hint, trans);
7750 }
7751 
7752 static int btrfs_set_page_dirty(struct page *page)
7753 {
7754 	return __set_page_dirty_nobuffers(page);
7755 }
7756 
7757 static int btrfs_permission(struct inode *inode, int mask)
7758 {
7759 	struct btrfs_root *root = BTRFS_I(inode)->root;
7760 	umode_t mode = inode->i_mode;
7761 
7762 	if (mask & MAY_WRITE &&
7763 	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
7764 		if (btrfs_root_readonly(root))
7765 			return -EROFS;
7766 		if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
7767 			return -EACCES;
7768 	}
7769 	return generic_permission(inode, mask);
7770 }
7771 
7772 static const struct inode_operations btrfs_dir_inode_operations = {
7773 	.getattr	= btrfs_getattr,
7774 	.lookup		= btrfs_lookup,
7775 	.create		= btrfs_create,
7776 	.unlink		= btrfs_unlink,
7777 	.link		= btrfs_link,
7778 	.mkdir		= btrfs_mkdir,
7779 	.rmdir		= btrfs_rmdir,
7780 	.rename		= btrfs_rename,
7781 	.symlink	= btrfs_symlink,
7782 	.setattr	= btrfs_setattr,
7783 	.mknod		= btrfs_mknod,
7784 	.setxattr	= btrfs_setxattr,
7785 	.getxattr	= btrfs_getxattr,
7786 	.listxattr	= btrfs_listxattr,
7787 	.removexattr	= btrfs_removexattr,
7788 	.permission	= btrfs_permission,
7789 	.get_acl	= btrfs_get_acl,
7790 };
7791 static const struct inode_operations btrfs_dir_ro_inode_operations = {
7792 	.lookup		= btrfs_lookup,
7793 	.permission	= btrfs_permission,
7794 	.get_acl	= btrfs_get_acl,
7795 };
7796 
7797 static const struct file_operations btrfs_dir_file_operations = {
7798 	.llseek		= generic_file_llseek,
7799 	.read		= generic_read_dir,
7800 	.readdir	= btrfs_real_readdir,
7801 	.unlocked_ioctl	= btrfs_ioctl,
7802 #ifdef CONFIG_COMPAT
7803 	.compat_ioctl	= btrfs_ioctl,
7804 #endif
7805 	.release        = btrfs_release_file,
7806 	.fsync		= btrfs_sync_file,
7807 };
7808 
7809 static struct extent_io_ops btrfs_extent_io_ops = {
7810 	.fill_delalloc = run_delalloc_range,
7811 	.submit_bio_hook = btrfs_submit_bio_hook,
7812 	.merge_bio_hook = btrfs_merge_bio_hook,
7813 	.readpage_end_io_hook = btrfs_readpage_end_io_hook,
7814 	.writepage_end_io_hook = btrfs_writepage_end_io_hook,
7815 	.writepage_start_hook = btrfs_writepage_start_hook,
7816 	.set_bit_hook = btrfs_set_bit_hook,
7817 	.clear_bit_hook = btrfs_clear_bit_hook,
7818 	.merge_extent_hook = btrfs_merge_extent_hook,
7819 	.split_extent_hook = btrfs_split_extent_hook,
7820 };
7821 
7822 /*
7823  * btrfs doesn't support the bmap operation because swapfiles
7824  * use bmap to make a mapping of extents in the file.  They assume
7825  * these extents won't change over the life of the file and they
7826  * use the bmap result to do IO directly to the drive.
7827  *
7828  * the btrfs bmap call would return logical addresses that aren't
7829  * suitable for IO and they also will change frequently as COW
7830  * operations happen.  So, swapfile + btrfs == corruption.
7831  *
7832  * For now we're avoiding this by dropping bmap.
7833  */
7834 static const struct address_space_operations btrfs_aops = {
7835 	.readpage	= btrfs_readpage,
7836 	.writepage	= btrfs_writepage,
7837 	.writepages	= btrfs_writepages,
7838 	.readpages	= btrfs_readpages,
7839 	.direct_IO	= btrfs_direct_IO,
7840 	.invalidatepage = btrfs_invalidatepage,
7841 	.releasepage	= btrfs_releasepage,
7842 	.set_page_dirty	= btrfs_set_page_dirty,
7843 	.error_remove_page = generic_error_remove_page,
7844 };
7845 
7846 static const struct address_space_operations btrfs_symlink_aops = {
7847 	.readpage	= btrfs_readpage,
7848 	.writepage	= btrfs_writepage,
7849 	.invalidatepage = btrfs_invalidatepage,
7850 	.releasepage	= btrfs_releasepage,
7851 };
7852 
7853 static const struct inode_operations btrfs_file_inode_operations = {
7854 	.getattr	= btrfs_getattr,
7855 	.setattr	= btrfs_setattr,
7856 	.setxattr	= btrfs_setxattr,
7857 	.getxattr	= btrfs_getxattr,
7858 	.listxattr      = btrfs_listxattr,
7859 	.removexattr	= btrfs_removexattr,
7860 	.permission	= btrfs_permission,
7861 	.fiemap		= btrfs_fiemap,
7862 	.get_acl	= btrfs_get_acl,
7863 	.update_time	= btrfs_update_time,
7864 };
7865 static const struct inode_operations btrfs_special_inode_operations = {
7866 	.getattr	= btrfs_getattr,
7867 	.setattr	= btrfs_setattr,
7868 	.permission	= btrfs_permission,
7869 	.setxattr	= btrfs_setxattr,
7870 	.getxattr	= btrfs_getxattr,
7871 	.listxattr	= btrfs_listxattr,
7872 	.removexattr	= btrfs_removexattr,
7873 	.get_acl	= btrfs_get_acl,
7874 	.update_time	= btrfs_update_time,
7875 };
7876 static const struct inode_operations btrfs_symlink_inode_operations = {
7877 	.readlink	= generic_readlink,
7878 	.follow_link	= page_follow_link_light,
7879 	.put_link	= page_put_link,
7880 	.getattr	= btrfs_getattr,
7881 	.setattr	= btrfs_setattr,
7882 	.permission	= btrfs_permission,
7883 	.setxattr	= btrfs_setxattr,
7884 	.getxattr	= btrfs_getxattr,
7885 	.listxattr	= btrfs_listxattr,
7886 	.removexattr	= btrfs_removexattr,
7887 	.get_acl	= btrfs_get_acl,
7888 	.update_time	= btrfs_update_time,
7889 };
7890 
7891 const struct dentry_operations btrfs_dentry_operations = {
7892 	.d_delete	= btrfs_dentry_delete,
7893 	.d_release	= btrfs_dentry_release,
7894 };
7895