xref: /openbmc/linux/fs/btrfs/inode.c (revision b6dcefde)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include "compat.h"
40 #include "ctree.h"
41 #include "disk-io.h"
42 #include "transaction.h"
43 #include "btrfs_inode.h"
44 #include "ioctl.h"
45 #include "print-tree.h"
46 #include "volumes.h"
47 #include "ordered-data.h"
48 #include "xattr.h"
49 #include "tree-log.h"
50 #include "compression.h"
51 #include "locking.h"
52 
53 struct btrfs_iget_args {
54 	u64 ino;
55 	struct btrfs_root *root;
56 };
57 
58 static const struct inode_operations btrfs_dir_inode_operations;
59 static const struct inode_operations btrfs_symlink_inode_operations;
60 static const struct inode_operations btrfs_dir_ro_inode_operations;
61 static const struct inode_operations btrfs_special_inode_operations;
62 static const struct inode_operations btrfs_file_inode_operations;
63 static const struct address_space_operations btrfs_aops;
64 static const struct address_space_operations btrfs_symlink_aops;
65 static const struct file_operations btrfs_dir_file_operations;
66 static struct extent_io_ops btrfs_extent_io_ops;
67 
68 static struct kmem_cache *btrfs_inode_cachep;
69 struct kmem_cache *btrfs_trans_handle_cachep;
70 struct kmem_cache *btrfs_transaction_cachep;
71 struct kmem_cache *btrfs_path_cachep;
72 
73 #define S_SHIFT 12
74 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
75 	[S_IFREG >> S_SHIFT]	= BTRFS_FT_REG_FILE,
76 	[S_IFDIR >> S_SHIFT]	= BTRFS_FT_DIR,
77 	[S_IFCHR >> S_SHIFT]	= BTRFS_FT_CHRDEV,
78 	[S_IFBLK >> S_SHIFT]	= BTRFS_FT_BLKDEV,
79 	[S_IFIFO >> S_SHIFT]	= BTRFS_FT_FIFO,
80 	[S_IFSOCK >> S_SHIFT]	= BTRFS_FT_SOCK,
81 	[S_IFLNK >> S_SHIFT]	= BTRFS_FT_SYMLINK,
82 };
83 
84 static void btrfs_truncate(struct inode *inode);
85 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
86 static noinline int cow_file_range(struct inode *inode,
87 				   struct page *locked_page,
88 				   u64 start, u64 end, int *page_started,
89 				   unsigned long *nr_written, int unlock);
90 
91 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
92 				     struct inode *inode,  struct inode *dir)
93 {
94 	int err;
95 
96 	err = btrfs_init_acl(trans, inode, dir);
97 	if (!err)
98 		err = btrfs_xattr_security_init(trans, inode, dir);
99 	return err;
100 }
101 
102 /*
103  * this does all the hard work for inserting an inline extent into
104  * the btree.  The caller should have done a btrfs_drop_extents so that
105  * no overlapping inline items exist in the btree
106  */
107 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
108 				struct btrfs_root *root, struct inode *inode,
109 				u64 start, size_t size, size_t compressed_size,
110 				struct page **compressed_pages)
111 {
112 	struct btrfs_key key;
113 	struct btrfs_path *path;
114 	struct extent_buffer *leaf;
115 	struct page *page = NULL;
116 	char *kaddr;
117 	unsigned long ptr;
118 	struct btrfs_file_extent_item *ei;
119 	int err = 0;
120 	int ret;
121 	size_t cur_size = size;
122 	size_t datasize;
123 	unsigned long offset;
124 	int use_compress = 0;
125 
126 	if (compressed_size && compressed_pages) {
127 		use_compress = 1;
128 		cur_size = compressed_size;
129 	}
130 
131 	path = btrfs_alloc_path();
132 	if (!path)
133 		return -ENOMEM;
134 
135 	path->leave_spinning = 1;
136 	btrfs_set_trans_block_group(trans, inode);
137 
138 	key.objectid = inode->i_ino;
139 	key.offset = start;
140 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
141 	datasize = btrfs_file_extent_calc_inline_size(cur_size);
142 
143 	inode_add_bytes(inode, size);
144 	ret = btrfs_insert_empty_item(trans, root, path, &key,
145 				      datasize);
146 	BUG_ON(ret);
147 	if (ret) {
148 		err = ret;
149 		goto fail;
150 	}
151 	leaf = path->nodes[0];
152 	ei = btrfs_item_ptr(leaf, path->slots[0],
153 			    struct btrfs_file_extent_item);
154 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
155 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
156 	btrfs_set_file_extent_encryption(leaf, ei, 0);
157 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
158 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
159 	ptr = btrfs_file_extent_inline_start(ei);
160 
161 	if (use_compress) {
162 		struct page *cpage;
163 		int i = 0;
164 		while (compressed_size > 0) {
165 			cpage = compressed_pages[i];
166 			cur_size = min_t(unsigned long, compressed_size,
167 				       PAGE_CACHE_SIZE);
168 
169 			kaddr = kmap_atomic(cpage, KM_USER0);
170 			write_extent_buffer(leaf, kaddr, ptr, cur_size);
171 			kunmap_atomic(kaddr, KM_USER0);
172 
173 			i++;
174 			ptr += cur_size;
175 			compressed_size -= cur_size;
176 		}
177 		btrfs_set_file_extent_compression(leaf, ei,
178 						  BTRFS_COMPRESS_ZLIB);
179 	} else {
180 		page = find_get_page(inode->i_mapping,
181 				     start >> PAGE_CACHE_SHIFT);
182 		btrfs_set_file_extent_compression(leaf, ei, 0);
183 		kaddr = kmap_atomic(page, KM_USER0);
184 		offset = start & (PAGE_CACHE_SIZE - 1);
185 		write_extent_buffer(leaf, kaddr + offset, ptr, size);
186 		kunmap_atomic(kaddr, KM_USER0);
187 		page_cache_release(page);
188 	}
189 	btrfs_mark_buffer_dirty(leaf);
190 	btrfs_free_path(path);
191 
192 	/*
193 	 * we're an inline extent, so nobody can
194 	 * extend the file past i_size without locking
195 	 * a page we already have locked.
196 	 *
197 	 * We must do any isize and inode updates
198 	 * before we unlock the pages.  Otherwise we
199 	 * could end up racing with unlink.
200 	 */
201 	BTRFS_I(inode)->disk_i_size = inode->i_size;
202 	btrfs_update_inode(trans, root, inode);
203 
204 	return 0;
205 fail:
206 	btrfs_free_path(path);
207 	return err;
208 }
209 
210 
211 /*
212  * conditionally insert an inline extent into the file.  This
213  * does the checks required to make sure the data is small enough
214  * to fit as an inline extent.
215  */
216 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
217 				 struct btrfs_root *root,
218 				 struct inode *inode, u64 start, u64 end,
219 				 size_t compressed_size,
220 				 struct page **compressed_pages)
221 {
222 	u64 isize = i_size_read(inode);
223 	u64 actual_end = min(end + 1, isize);
224 	u64 inline_len = actual_end - start;
225 	u64 aligned_end = (end + root->sectorsize - 1) &
226 			~((u64)root->sectorsize - 1);
227 	u64 hint_byte;
228 	u64 data_len = inline_len;
229 	int ret;
230 
231 	if (compressed_size)
232 		data_len = compressed_size;
233 
234 	if (start > 0 ||
235 	    actual_end >= PAGE_CACHE_SIZE ||
236 	    data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
237 	    (!compressed_size &&
238 	    (actual_end & (root->sectorsize - 1)) == 0) ||
239 	    end + 1 < isize ||
240 	    data_len > root->fs_info->max_inline) {
241 		return 1;
242 	}
243 
244 	ret = btrfs_drop_extents(trans, inode, start, aligned_end,
245 				 &hint_byte, 1);
246 	BUG_ON(ret);
247 
248 	if (isize > actual_end)
249 		inline_len = min_t(u64, isize, actual_end);
250 	ret = insert_inline_extent(trans, root, inode, start,
251 				   inline_len, compressed_size,
252 				   compressed_pages);
253 	BUG_ON(ret);
254 	btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
255 	return 0;
256 }
257 
258 struct async_extent {
259 	u64 start;
260 	u64 ram_size;
261 	u64 compressed_size;
262 	struct page **pages;
263 	unsigned long nr_pages;
264 	struct list_head list;
265 };
266 
267 struct async_cow {
268 	struct inode *inode;
269 	struct btrfs_root *root;
270 	struct page *locked_page;
271 	u64 start;
272 	u64 end;
273 	struct list_head extents;
274 	struct btrfs_work work;
275 };
276 
277 static noinline int add_async_extent(struct async_cow *cow,
278 				     u64 start, u64 ram_size,
279 				     u64 compressed_size,
280 				     struct page **pages,
281 				     unsigned long nr_pages)
282 {
283 	struct async_extent *async_extent;
284 
285 	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
286 	async_extent->start = start;
287 	async_extent->ram_size = ram_size;
288 	async_extent->compressed_size = compressed_size;
289 	async_extent->pages = pages;
290 	async_extent->nr_pages = nr_pages;
291 	list_add_tail(&async_extent->list, &cow->extents);
292 	return 0;
293 }
294 
295 /*
296  * we create compressed extents in two phases.  The first
297  * phase compresses a range of pages that have already been
298  * locked (both pages and state bits are locked).
299  *
300  * This is done inside an ordered work queue, and the compression
301  * is spread across many cpus.  The actual IO submission is step
302  * two, and the ordered work queue takes care of making sure that
303  * happens in the same order things were put onto the queue by
304  * writepages and friends.
305  *
306  * If this code finds it can't get good compression, it puts an
307  * entry onto the work queue to write the uncompressed bytes.  This
308  * makes sure that both compressed inodes and uncompressed inodes
309  * are written in the same order that pdflush sent them down.
310  */
311 static noinline int compress_file_range(struct inode *inode,
312 					struct page *locked_page,
313 					u64 start, u64 end,
314 					struct async_cow *async_cow,
315 					int *num_added)
316 {
317 	struct btrfs_root *root = BTRFS_I(inode)->root;
318 	struct btrfs_trans_handle *trans;
319 	u64 num_bytes;
320 	u64 orig_start;
321 	u64 disk_num_bytes;
322 	u64 blocksize = root->sectorsize;
323 	u64 actual_end;
324 	u64 isize = i_size_read(inode);
325 	int ret = 0;
326 	struct page **pages = NULL;
327 	unsigned long nr_pages;
328 	unsigned long nr_pages_ret = 0;
329 	unsigned long total_compressed = 0;
330 	unsigned long total_in = 0;
331 	unsigned long max_compressed = 128 * 1024;
332 	unsigned long max_uncompressed = 128 * 1024;
333 	int i;
334 	int will_compress;
335 
336 	orig_start = start;
337 
338 	actual_end = min_t(u64, isize, end + 1);
339 again:
340 	will_compress = 0;
341 	nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
342 	nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
343 
344 	/*
345 	 * we don't want to send crud past the end of i_size through
346 	 * compression, that's just a waste of CPU time.  So, if the
347 	 * end of the file is before the start of our current
348 	 * requested range of bytes, we bail out to the uncompressed
349 	 * cleanup code that can deal with all of this.
350 	 *
351 	 * It isn't really the fastest way to fix things, but this is a
352 	 * very uncommon corner.
353 	 */
354 	if (actual_end <= start)
355 		goto cleanup_and_bail_uncompressed;
356 
357 	total_compressed = actual_end - start;
358 
359 	/* we want to make sure that amount of ram required to uncompress
360 	 * an extent is reasonable, so we limit the total size in ram
361 	 * of a compressed extent to 128k.  This is a crucial number
362 	 * because it also controls how easily we can spread reads across
363 	 * cpus for decompression.
364 	 *
365 	 * We also want to make sure the amount of IO required to do
366 	 * a random read is reasonably small, so we limit the size of
367 	 * a compressed extent to 128k.
368 	 */
369 	total_compressed = min(total_compressed, max_uncompressed);
370 	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
371 	num_bytes = max(blocksize,  num_bytes);
372 	disk_num_bytes = num_bytes;
373 	total_in = 0;
374 	ret = 0;
375 
376 	/*
377 	 * we do compression for mount -o compress and when the
378 	 * inode has not been flagged as nocompress.  This flag can
379 	 * change at any time if we discover bad compression ratios.
380 	 */
381 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
382 	    btrfs_test_opt(root, COMPRESS)) {
383 		WARN_ON(pages);
384 		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
385 
386 		ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
387 						total_compressed, pages,
388 						nr_pages, &nr_pages_ret,
389 						&total_in,
390 						&total_compressed,
391 						max_compressed);
392 
393 		if (!ret) {
394 			unsigned long offset = total_compressed &
395 				(PAGE_CACHE_SIZE - 1);
396 			struct page *page = pages[nr_pages_ret - 1];
397 			char *kaddr;
398 
399 			/* zero the tail end of the last page, we might be
400 			 * sending it down to disk
401 			 */
402 			if (offset) {
403 				kaddr = kmap_atomic(page, KM_USER0);
404 				memset(kaddr + offset, 0,
405 				       PAGE_CACHE_SIZE - offset);
406 				kunmap_atomic(kaddr, KM_USER0);
407 			}
408 			will_compress = 1;
409 		}
410 	}
411 	if (start == 0) {
412 		trans = btrfs_join_transaction(root, 1);
413 		BUG_ON(!trans);
414 		btrfs_set_trans_block_group(trans, inode);
415 
416 		/* lets try to make an inline extent */
417 		if (ret || total_in < (actual_end - start)) {
418 			/* we didn't compress the entire range, try
419 			 * to make an uncompressed inline extent.
420 			 */
421 			ret = cow_file_range_inline(trans, root, inode,
422 						    start, end, 0, NULL);
423 		} else {
424 			/* try making a compressed inline extent */
425 			ret = cow_file_range_inline(trans, root, inode,
426 						    start, end,
427 						    total_compressed, pages);
428 		}
429 		if (ret == 0) {
430 			/*
431 			 * inline extent creation worked, we don't need
432 			 * to create any more async work items.  Unlock
433 			 * and free up our temp pages.
434 			 */
435 			extent_clear_unlock_delalloc(inode,
436 			     &BTRFS_I(inode)->io_tree,
437 			     start, end, NULL,
438 			     EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
439 			     EXTENT_CLEAR_DELALLOC |
440 			     EXTENT_CLEAR_ACCOUNTING |
441 			     EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
442 
443 			btrfs_end_transaction(trans, root);
444 			goto free_pages_out;
445 		}
446 		btrfs_end_transaction(trans, root);
447 	}
448 
449 	if (will_compress) {
450 		/*
451 		 * we aren't doing an inline extent round the compressed size
452 		 * up to a block size boundary so the allocator does sane
453 		 * things
454 		 */
455 		total_compressed = (total_compressed + blocksize - 1) &
456 			~(blocksize - 1);
457 
458 		/*
459 		 * one last check to make sure the compression is really a
460 		 * win, compare the page count read with the blocks on disk
461 		 */
462 		total_in = (total_in + PAGE_CACHE_SIZE - 1) &
463 			~(PAGE_CACHE_SIZE - 1);
464 		if (total_compressed >= total_in) {
465 			will_compress = 0;
466 		} else {
467 			disk_num_bytes = total_compressed;
468 			num_bytes = total_in;
469 		}
470 	}
471 	if (!will_compress && pages) {
472 		/*
473 		 * the compression code ran but failed to make things smaller,
474 		 * free any pages it allocated and our page pointer array
475 		 */
476 		for (i = 0; i < nr_pages_ret; i++) {
477 			WARN_ON(pages[i]->mapping);
478 			page_cache_release(pages[i]);
479 		}
480 		kfree(pages);
481 		pages = NULL;
482 		total_compressed = 0;
483 		nr_pages_ret = 0;
484 
485 		/* flag the file so we don't compress in the future */
486 		if (!btrfs_test_opt(root, FORCE_COMPRESS))
487 			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
488 	}
489 	if (will_compress) {
490 		*num_added += 1;
491 
492 		/* the async work queues will take care of doing actual
493 		 * allocation on disk for these compressed pages,
494 		 * and will submit them to the elevator.
495 		 */
496 		add_async_extent(async_cow, start, num_bytes,
497 				 total_compressed, pages, nr_pages_ret);
498 
499 		if (start + num_bytes < end && start + num_bytes < actual_end) {
500 			start += num_bytes;
501 			pages = NULL;
502 			cond_resched();
503 			goto again;
504 		}
505 	} else {
506 cleanup_and_bail_uncompressed:
507 		/*
508 		 * No compression, but we still need to write the pages in
509 		 * the file we've been given so far.  redirty the locked
510 		 * page if it corresponds to our extent and set things up
511 		 * for the async work queue to run cow_file_range to do
512 		 * the normal delalloc dance
513 		 */
514 		if (page_offset(locked_page) >= start &&
515 		    page_offset(locked_page) <= end) {
516 			__set_page_dirty_nobuffers(locked_page);
517 			/* unlocked later on in the async handlers */
518 		}
519 		add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
520 		*num_added += 1;
521 	}
522 
523 out:
524 	return 0;
525 
526 free_pages_out:
527 	for (i = 0; i < nr_pages_ret; i++) {
528 		WARN_ON(pages[i]->mapping);
529 		page_cache_release(pages[i]);
530 	}
531 	kfree(pages);
532 
533 	goto out;
534 }
535 
536 /*
537  * phase two of compressed writeback.  This is the ordered portion
538  * of the code, which only gets called in the order the work was
539  * queued.  We walk all the async extents created by compress_file_range
540  * and send them down to the disk.
541  */
542 static noinline int submit_compressed_extents(struct inode *inode,
543 					      struct async_cow *async_cow)
544 {
545 	struct async_extent *async_extent;
546 	u64 alloc_hint = 0;
547 	struct btrfs_trans_handle *trans;
548 	struct btrfs_key ins;
549 	struct extent_map *em;
550 	struct btrfs_root *root = BTRFS_I(inode)->root;
551 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
552 	struct extent_io_tree *io_tree;
553 	int ret = 0;
554 
555 	if (list_empty(&async_cow->extents))
556 		return 0;
557 
558 
559 	while (!list_empty(&async_cow->extents)) {
560 		async_extent = list_entry(async_cow->extents.next,
561 					  struct async_extent, list);
562 		list_del(&async_extent->list);
563 
564 		io_tree = &BTRFS_I(inode)->io_tree;
565 
566 retry:
567 		/* did the compression code fall back to uncompressed IO? */
568 		if (!async_extent->pages) {
569 			int page_started = 0;
570 			unsigned long nr_written = 0;
571 
572 			lock_extent(io_tree, async_extent->start,
573 				    async_extent->start +
574 				    async_extent->ram_size - 1, GFP_NOFS);
575 
576 			/* allocate blocks */
577 			ret = cow_file_range(inode, async_cow->locked_page,
578 					     async_extent->start,
579 					     async_extent->start +
580 					     async_extent->ram_size - 1,
581 					     &page_started, &nr_written, 0);
582 
583 			/*
584 			 * if page_started, cow_file_range inserted an
585 			 * inline extent and took care of all the unlocking
586 			 * and IO for us.  Otherwise, we need to submit
587 			 * all those pages down to the drive.
588 			 */
589 			if (!page_started && !ret)
590 				extent_write_locked_range(io_tree,
591 						  inode, async_extent->start,
592 						  async_extent->start +
593 						  async_extent->ram_size - 1,
594 						  btrfs_get_extent,
595 						  WB_SYNC_ALL);
596 			kfree(async_extent);
597 			cond_resched();
598 			continue;
599 		}
600 
601 		lock_extent(io_tree, async_extent->start,
602 			    async_extent->start + async_extent->ram_size - 1,
603 			    GFP_NOFS);
604 
605 		trans = btrfs_join_transaction(root, 1);
606 		ret = btrfs_reserve_extent(trans, root,
607 					   async_extent->compressed_size,
608 					   async_extent->compressed_size,
609 					   0, alloc_hint,
610 					   (u64)-1, &ins, 1);
611 		btrfs_end_transaction(trans, root);
612 
613 		if (ret) {
614 			int i;
615 			for (i = 0; i < async_extent->nr_pages; i++) {
616 				WARN_ON(async_extent->pages[i]->mapping);
617 				page_cache_release(async_extent->pages[i]);
618 			}
619 			kfree(async_extent->pages);
620 			async_extent->nr_pages = 0;
621 			async_extent->pages = NULL;
622 			unlock_extent(io_tree, async_extent->start,
623 				      async_extent->start +
624 				      async_extent->ram_size - 1, GFP_NOFS);
625 			goto retry;
626 		}
627 
628 		/*
629 		 * here we're doing allocation and writeback of the
630 		 * compressed pages
631 		 */
632 		btrfs_drop_extent_cache(inode, async_extent->start,
633 					async_extent->start +
634 					async_extent->ram_size - 1, 0);
635 
636 		em = alloc_extent_map(GFP_NOFS);
637 		em->start = async_extent->start;
638 		em->len = async_extent->ram_size;
639 		em->orig_start = em->start;
640 
641 		em->block_start = ins.objectid;
642 		em->block_len = ins.offset;
643 		em->bdev = root->fs_info->fs_devices->latest_bdev;
644 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
645 		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
646 
647 		while (1) {
648 			write_lock(&em_tree->lock);
649 			ret = add_extent_mapping(em_tree, em);
650 			write_unlock(&em_tree->lock);
651 			if (ret != -EEXIST) {
652 				free_extent_map(em);
653 				break;
654 			}
655 			btrfs_drop_extent_cache(inode, async_extent->start,
656 						async_extent->start +
657 						async_extent->ram_size - 1, 0);
658 		}
659 
660 		ret = btrfs_add_ordered_extent(inode, async_extent->start,
661 					       ins.objectid,
662 					       async_extent->ram_size,
663 					       ins.offset,
664 					       BTRFS_ORDERED_COMPRESSED);
665 		BUG_ON(ret);
666 
667 		/*
668 		 * clear dirty, set writeback and unlock the pages.
669 		 */
670 		extent_clear_unlock_delalloc(inode,
671 				&BTRFS_I(inode)->io_tree,
672 				async_extent->start,
673 				async_extent->start +
674 				async_extent->ram_size - 1,
675 				NULL, EXTENT_CLEAR_UNLOCK_PAGE |
676 				EXTENT_CLEAR_UNLOCK |
677 				EXTENT_CLEAR_DELALLOC |
678 				EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
679 
680 		ret = btrfs_submit_compressed_write(inode,
681 				    async_extent->start,
682 				    async_extent->ram_size,
683 				    ins.objectid,
684 				    ins.offset, async_extent->pages,
685 				    async_extent->nr_pages);
686 
687 		BUG_ON(ret);
688 		alloc_hint = ins.objectid + ins.offset;
689 		kfree(async_extent);
690 		cond_resched();
691 	}
692 
693 	return 0;
694 }
695 
696 /*
697  * when extent_io.c finds a delayed allocation range in the file,
698  * the call backs end up in this code.  The basic idea is to
699  * allocate extents on disk for the range, and create ordered data structs
700  * in ram to track those extents.
701  *
702  * locked_page is the page that writepage had locked already.  We use
703  * it to make sure we don't do extra locks or unlocks.
704  *
705  * *page_started is set to one if we unlock locked_page and do everything
706  * required to start IO on it.  It may be clean and already done with
707  * IO when we return.
708  */
709 static noinline int cow_file_range(struct inode *inode,
710 				   struct page *locked_page,
711 				   u64 start, u64 end, int *page_started,
712 				   unsigned long *nr_written,
713 				   int unlock)
714 {
715 	struct btrfs_root *root = BTRFS_I(inode)->root;
716 	struct btrfs_trans_handle *trans;
717 	u64 alloc_hint = 0;
718 	u64 num_bytes;
719 	unsigned long ram_size;
720 	u64 disk_num_bytes;
721 	u64 cur_alloc_size;
722 	u64 blocksize = root->sectorsize;
723 	u64 actual_end;
724 	u64 isize = i_size_read(inode);
725 	struct btrfs_key ins;
726 	struct extent_map *em;
727 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
728 	int ret = 0;
729 
730 	trans = btrfs_join_transaction(root, 1);
731 	BUG_ON(!trans);
732 	btrfs_set_trans_block_group(trans, inode);
733 
734 	actual_end = min_t(u64, isize, end + 1);
735 
736 	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
737 	num_bytes = max(blocksize,  num_bytes);
738 	disk_num_bytes = num_bytes;
739 	ret = 0;
740 
741 	if (start == 0) {
742 		/* lets try to make an inline extent */
743 		ret = cow_file_range_inline(trans, root, inode,
744 					    start, end, 0, NULL);
745 		if (ret == 0) {
746 			extent_clear_unlock_delalloc(inode,
747 				     &BTRFS_I(inode)->io_tree,
748 				     start, end, NULL,
749 				     EXTENT_CLEAR_UNLOCK_PAGE |
750 				     EXTENT_CLEAR_UNLOCK |
751 				     EXTENT_CLEAR_DELALLOC |
752 				     EXTENT_CLEAR_ACCOUNTING |
753 				     EXTENT_CLEAR_DIRTY |
754 				     EXTENT_SET_WRITEBACK |
755 				     EXTENT_END_WRITEBACK);
756 
757 			*nr_written = *nr_written +
758 			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
759 			*page_started = 1;
760 			ret = 0;
761 			goto out;
762 		}
763 	}
764 
765 	BUG_ON(disk_num_bytes >
766 	       btrfs_super_total_bytes(&root->fs_info->super_copy));
767 
768 
769 	read_lock(&BTRFS_I(inode)->extent_tree.lock);
770 	em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
771 				   start, num_bytes);
772 	if (em) {
773 		/*
774 		 * if block start isn't an actual block number then find the
775 		 * first block in this inode and use that as a hint.  If that
776 		 * block is also bogus then just don't worry about it.
777 		 */
778 		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
779 			free_extent_map(em);
780 			em = search_extent_mapping(em_tree, 0, 0);
781 			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
782 				alloc_hint = em->block_start;
783 			if (em)
784 				free_extent_map(em);
785 		} else {
786 			alloc_hint = em->block_start;
787 			free_extent_map(em);
788 		}
789 	}
790 	read_unlock(&BTRFS_I(inode)->extent_tree.lock);
791 	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
792 
793 	while (disk_num_bytes > 0) {
794 		unsigned long op;
795 
796 		cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
797 		ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
798 					   root->sectorsize, 0, alloc_hint,
799 					   (u64)-1, &ins, 1);
800 		BUG_ON(ret);
801 
802 		em = alloc_extent_map(GFP_NOFS);
803 		em->start = start;
804 		em->orig_start = em->start;
805 		ram_size = ins.offset;
806 		em->len = ins.offset;
807 
808 		em->block_start = ins.objectid;
809 		em->block_len = ins.offset;
810 		em->bdev = root->fs_info->fs_devices->latest_bdev;
811 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
812 
813 		while (1) {
814 			write_lock(&em_tree->lock);
815 			ret = add_extent_mapping(em_tree, em);
816 			write_unlock(&em_tree->lock);
817 			if (ret != -EEXIST) {
818 				free_extent_map(em);
819 				break;
820 			}
821 			btrfs_drop_extent_cache(inode, start,
822 						start + ram_size - 1, 0);
823 		}
824 
825 		cur_alloc_size = ins.offset;
826 		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
827 					       ram_size, cur_alloc_size, 0);
828 		BUG_ON(ret);
829 
830 		if (root->root_key.objectid ==
831 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
832 			ret = btrfs_reloc_clone_csums(inode, start,
833 						      cur_alloc_size);
834 			BUG_ON(ret);
835 		}
836 
837 		if (disk_num_bytes < cur_alloc_size)
838 			break;
839 
840 		/* we're not doing compressed IO, don't unlock the first
841 		 * page (which the caller expects to stay locked), don't
842 		 * clear any dirty bits and don't set any writeback bits
843 		 *
844 		 * Do set the Private2 bit so we know this page was properly
845 		 * setup for writepage
846 		 */
847 		op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
848 		op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
849 			EXTENT_SET_PRIVATE2;
850 
851 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
852 					     start, start + ram_size - 1,
853 					     locked_page, op);
854 		disk_num_bytes -= cur_alloc_size;
855 		num_bytes -= cur_alloc_size;
856 		alloc_hint = ins.objectid + ins.offset;
857 		start += cur_alloc_size;
858 	}
859 out:
860 	ret = 0;
861 	btrfs_end_transaction(trans, root);
862 
863 	return ret;
864 }
865 
866 /*
867  * work queue call back to started compression on a file and pages
868  */
869 static noinline void async_cow_start(struct btrfs_work *work)
870 {
871 	struct async_cow *async_cow;
872 	int num_added = 0;
873 	async_cow = container_of(work, struct async_cow, work);
874 
875 	compress_file_range(async_cow->inode, async_cow->locked_page,
876 			    async_cow->start, async_cow->end, async_cow,
877 			    &num_added);
878 	if (num_added == 0)
879 		async_cow->inode = NULL;
880 }
881 
882 /*
883  * work queue call back to submit previously compressed pages
884  */
885 static noinline void async_cow_submit(struct btrfs_work *work)
886 {
887 	struct async_cow *async_cow;
888 	struct btrfs_root *root;
889 	unsigned long nr_pages;
890 
891 	async_cow = container_of(work, struct async_cow, work);
892 
893 	root = async_cow->root;
894 	nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
895 		PAGE_CACHE_SHIFT;
896 
897 	atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
898 
899 	if (atomic_read(&root->fs_info->async_delalloc_pages) <
900 	    5 * 1042 * 1024 &&
901 	    waitqueue_active(&root->fs_info->async_submit_wait))
902 		wake_up(&root->fs_info->async_submit_wait);
903 
904 	if (async_cow->inode)
905 		submit_compressed_extents(async_cow->inode, async_cow);
906 }
907 
908 static noinline void async_cow_free(struct btrfs_work *work)
909 {
910 	struct async_cow *async_cow;
911 	async_cow = container_of(work, struct async_cow, work);
912 	kfree(async_cow);
913 }
914 
915 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
916 				u64 start, u64 end, int *page_started,
917 				unsigned long *nr_written)
918 {
919 	struct async_cow *async_cow;
920 	struct btrfs_root *root = BTRFS_I(inode)->root;
921 	unsigned long nr_pages;
922 	u64 cur_end;
923 	int limit = 10 * 1024 * 1042;
924 
925 	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
926 			 1, 0, NULL, GFP_NOFS);
927 	while (start < end) {
928 		async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
929 		async_cow->inode = inode;
930 		async_cow->root = root;
931 		async_cow->locked_page = locked_page;
932 		async_cow->start = start;
933 
934 		if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
935 			cur_end = end;
936 		else
937 			cur_end = min(end, start + 512 * 1024 - 1);
938 
939 		async_cow->end = cur_end;
940 		INIT_LIST_HEAD(&async_cow->extents);
941 
942 		async_cow->work.func = async_cow_start;
943 		async_cow->work.ordered_func = async_cow_submit;
944 		async_cow->work.ordered_free = async_cow_free;
945 		async_cow->work.flags = 0;
946 
947 		nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
948 			PAGE_CACHE_SHIFT;
949 		atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
950 
951 		btrfs_queue_worker(&root->fs_info->delalloc_workers,
952 				   &async_cow->work);
953 
954 		if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
955 			wait_event(root->fs_info->async_submit_wait,
956 			   (atomic_read(&root->fs_info->async_delalloc_pages) <
957 			    limit));
958 		}
959 
960 		while (atomic_read(&root->fs_info->async_submit_draining) &&
961 		      atomic_read(&root->fs_info->async_delalloc_pages)) {
962 			wait_event(root->fs_info->async_submit_wait,
963 			  (atomic_read(&root->fs_info->async_delalloc_pages) ==
964 			   0));
965 		}
966 
967 		*nr_written += nr_pages;
968 		start = cur_end + 1;
969 	}
970 	*page_started = 1;
971 	return 0;
972 }
973 
974 static noinline int csum_exist_in_range(struct btrfs_root *root,
975 					u64 bytenr, u64 num_bytes)
976 {
977 	int ret;
978 	struct btrfs_ordered_sum *sums;
979 	LIST_HEAD(list);
980 
981 	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
982 				       bytenr + num_bytes - 1, &list);
983 	if (ret == 0 && list_empty(&list))
984 		return 0;
985 
986 	while (!list_empty(&list)) {
987 		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
988 		list_del(&sums->list);
989 		kfree(sums);
990 	}
991 	return 1;
992 }
993 
994 /*
995  * when nowcow writeback call back.  This checks for snapshots or COW copies
996  * of the extents that exist in the file, and COWs the file as required.
997  *
998  * If no cow copies or snapshots exist, we write directly to the existing
999  * blocks on disk
1000  */
1001 static noinline int run_delalloc_nocow(struct inode *inode,
1002 				       struct page *locked_page,
1003 			      u64 start, u64 end, int *page_started, int force,
1004 			      unsigned long *nr_written)
1005 {
1006 	struct btrfs_root *root = BTRFS_I(inode)->root;
1007 	struct btrfs_trans_handle *trans;
1008 	struct extent_buffer *leaf;
1009 	struct btrfs_path *path;
1010 	struct btrfs_file_extent_item *fi;
1011 	struct btrfs_key found_key;
1012 	u64 cow_start;
1013 	u64 cur_offset;
1014 	u64 extent_end;
1015 	u64 extent_offset;
1016 	u64 disk_bytenr;
1017 	u64 num_bytes;
1018 	int extent_type;
1019 	int ret;
1020 	int type;
1021 	int nocow;
1022 	int check_prev = 1;
1023 
1024 	path = btrfs_alloc_path();
1025 	BUG_ON(!path);
1026 	trans = btrfs_join_transaction(root, 1);
1027 	BUG_ON(!trans);
1028 
1029 	cow_start = (u64)-1;
1030 	cur_offset = start;
1031 	while (1) {
1032 		ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
1033 					       cur_offset, 0);
1034 		BUG_ON(ret < 0);
1035 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
1036 			leaf = path->nodes[0];
1037 			btrfs_item_key_to_cpu(leaf, &found_key,
1038 					      path->slots[0] - 1);
1039 			if (found_key.objectid == inode->i_ino &&
1040 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
1041 				path->slots[0]--;
1042 		}
1043 		check_prev = 0;
1044 next_slot:
1045 		leaf = path->nodes[0];
1046 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1047 			ret = btrfs_next_leaf(root, path);
1048 			if (ret < 0)
1049 				BUG_ON(1);
1050 			if (ret > 0)
1051 				break;
1052 			leaf = path->nodes[0];
1053 		}
1054 
1055 		nocow = 0;
1056 		disk_bytenr = 0;
1057 		num_bytes = 0;
1058 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1059 
1060 		if (found_key.objectid > inode->i_ino ||
1061 		    found_key.type > BTRFS_EXTENT_DATA_KEY ||
1062 		    found_key.offset > end)
1063 			break;
1064 
1065 		if (found_key.offset > cur_offset) {
1066 			extent_end = found_key.offset;
1067 			extent_type = 0;
1068 			goto out_check;
1069 		}
1070 
1071 		fi = btrfs_item_ptr(leaf, path->slots[0],
1072 				    struct btrfs_file_extent_item);
1073 		extent_type = btrfs_file_extent_type(leaf, fi);
1074 
1075 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
1076 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1077 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1078 			extent_offset = btrfs_file_extent_offset(leaf, fi);
1079 			extent_end = found_key.offset +
1080 				btrfs_file_extent_num_bytes(leaf, fi);
1081 			if (extent_end <= start) {
1082 				path->slots[0]++;
1083 				goto next_slot;
1084 			}
1085 			if (disk_bytenr == 0)
1086 				goto out_check;
1087 			if (btrfs_file_extent_compression(leaf, fi) ||
1088 			    btrfs_file_extent_encryption(leaf, fi) ||
1089 			    btrfs_file_extent_other_encoding(leaf, fi))
1090 				goto out_check;
1091 			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1092 				goto out_check;
1093 			if (btrfs_extent_readonly(root, disk_bytenr))
1094 				goto out_check;
1095 			if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1096 						  found_key.offset -
1097 						  extent_offset, disk_bytenr))
1098 				goto out_check;
1099 			disk_bytenr += extent_offset;
1100 			disk_bytenr += cur_offset - found_key.offset;
1101 			num_bytes = min(end + 1, extent_end) - cur_offset;
1102 			/*
1103 			 * force cow if csum exists in the range.
1104 			 * this ensure that csum for a given extent are
1105 			 * either valid or do not exist.
1106 			 */
1107 			if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1108 				goto out_check;
1109 			nocow = 1;
1110 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1111 			extent_end = found_key.offset +
1112 				btrfs_file_extent_inline_len(leaf, fi);
1113 			extent_end = ALIGN(extent_end, root->sectorsize);
1114 		} else {
1115 			BUG_ON(1);
1116 		}
1117 out_check:
1118 		if (extent_end <= start) {
1119 			path->slots[0]++;
1120 			goto next_slot;
1121 		}
1122 		if (!nocow) {
1123 			if (cow_start == (u64)-1)
1124 				cow_start = cur_offset;
1125 			cur_offset = extent_end;
1126 			if (cur_offset > end)
1127 				break;
1128 			path->slots[0]++;
1129 			goto next_slot;
1130 		}
1131 
1132 		btrfs_release_path(root, path);
1133 		if (cow_start != (u64)-1) {
1134 			ret = cow_file_range(inode, locked_page, cow_start,
1135 					found_key.offset - 1, page_started,
1136 					nr_written, 1);
1137 			BUG_ON(ret);
1138 			cow_start = (u64)-1;
1139 		}
1140 
1141 		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1142 			struct extent_map *em;
1143 			struct extent_map_tree *em_tree;
1144 			em_tree = &BTRFS_I(inode)->extent_tree;
1145 			em = alloc_extent_map(GFP_NOFS);
1146 			em->start = cur_offset;
1147 			em->orig_start = em->start;
1148 			em->len = num_bytes;
1149 			em->block_len = num_bytes;
1150 			em->block_start = disk_bytenr;
1151 			em->bdev = root->fs_info->fs_devices->latest_bdev;
1152 			set_bit(EXTENT_FLAG_PINNED, &em->flags);
1153 			while (1) {
1154 				write_lock(&em_tree->lock);
1155 				ret = add_extent_mapping(em_tree, em);
1156 				write_unlock(&em_tree->lock);
1157 				if (ret != -EEXIST) {
1158 					free_extent_map(em);
1159 					break;
1160 				}
1161 				btrfs_drop_extent_cache(inode, em->start,
1162 						em->start + em->len - 1, 0);
1163 			}
1164 			type = BTRFS_ORDERED_PREALLOC;
1165 		} else {
1166 			type = BTRFS_ORDERED_NOCOW;
1167 		}
1168 
1169 		ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1170 					       num_bytes, num_bytes, type);
1171 		BUG_ON(ret);
1172 
1173 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1174 				cur_offset, cur_offset + num_bytes - 1,
1175 				locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1176 				EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1177 				EXTENT_SET_PRIVATE2);
1178 		cur_offset = extent_end;
1179 		if (cur_offset > end)
1180 			break;
1181 	}
1182 	btrfs_release_path(root, path);
1183 
1184 	if (cur_offset <= end && cow_start == (u64)-1)
1185 		cow_start = cur_offset;
1186 	if (cow_start != (u64)-1) {
1187 		ret = cow_file_range(inode, locked_page, cow_start, end,
1188 				     page_started, nr_written, 1);
1189 		BUG_ON(ret);
1190 	}
1191 
1192 	ret = btrfs_end_transaction(trans, root);
1193 	BUG_ON(ret);
1194 	btrfs_free_path(path);
1195 	return 0;
1196 }
1197 
1198 /*
1199  * extent_io.c call back to do delayed allocation processing
1200  */
1201 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1202 			      u64 start, u64 end, int *page_started,
1203 			      unsigned long *nr_written)
1204 {
1205 	int ret;
1206 	struct btrfs_root *root = BTRFS_I(inode)->root;
1207 
1208 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
1209 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1210 					 page_started, 1, nr_written);
1211 	else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
1212 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1213 					 page_started, 0, nr_written);
1214 	else if (!btrfs_test_opt(root, COMPRESS))
1215 		ret = cow_file_range(inode, locked_page, start, end,
1216 				      page_started, nr_written, 1);
1217 	else
1218 		ret = cow_file_range_async(inode, locked_page, start, end,
1219 					   page_started, nr_written);
1220 	return ret;
1221 }
1222 
1223 static int btrfs_split_extent_hook(struct inode *inode,
1224 				    struct extent_state *orig, u64 split)
1225 {
1226 	struct btrfs_root *root = BTRFS_I(inode)->root;
1227 	u64 size;
1228 
1229 	if (!(orig->state & EXTENT_DELALLOC))
1230 		return 0;
1231 
1232 	size = orig->end - orig->start + 1;
1233 	if (size > root->fs_info->max_extent) {
1234 		u64 num_extents;
1235 		u64 new_size;
1236 
1237 		new_size = orig->end - split + 1;
1238 		num_extents = div64_u64(size + root->fs_info->max_extent - 1,
1239 					root->fs_info->max_extent);
1240 
1241 		/*
1242 		 * if we break a large extent up then leave oustanding_extents
1243 		 * be, since we've already accounted for the large extent.
1244 		 */
1245 		if (div64_u64(new_size + root->fs_info->max_extent - 1,
1246 			      root->fs_info->max_extent) < num_extents)
1247 			return 0;
1248 	}
1249 
1250 	spin_lock(&BTRFS_I(inode)->accounting_lock);
1251 	BTRFS_I(inode)->outstanding_extents++;
1252 	spin_unlock(&BTRFS_I(inode)->accounting_lock);
1253 
1254 	return 0;
1255 }
1256 
1257 /*
1258  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1259  * extents so we can keep track of new extents that are just merged onto old
1260  * extents, such as when we are doing sequential writes, so we can properly
1261  * account for the metadata space we'll need.
1262  */
1263 static int btrfs_merge_extent_hook(struct inode *inode,
1264 				   struct extent_state *new,
1265 				   struct extent_state *other)
1266 {
1267 	struct btrfs_root *root = BTRFS_I(inode)->root;
1268 	u64 new_size, old_size;
1269 	u64 num_extents;
1270 
1271 	/* not delalloc, ignore it */
1272 	if (!(other->state & EXTENT_DELALLOC))
1273 		return 0;
1274 
1275 	old_size = other->end - other->start + 1;
1276 	if (new->start < other->start)
1277 		new_size = other->end - new->start + 1;
1278 	else
1279 		new_size = new->end - other->start + 1;
1280 
1281 	/* we're not bigger than the max, unreserve the space and go */
1282 	if (new_size <= root->fs_info->max_extent) {
1283 		spin_lock(&BTRFS_I(inode)->accounting_lock);
1284 		BTRFS_I(inode)->outstanding_extents--;
1285 		spin_unlock(&BTRFS_I(inode)->accounting_lock);
1286 		return 0;
1287 	}
1288 
1289 	/*
1290 	 * If we grew by another max_extent, just return, we want to keep that
1291 	 * reserved amount.
1292 	 */
1293 	num_extents = div64_u64(old_size + root->fs_info->max_extent - 1,
1294 				root->fs_info->max_extent);
1295 	if (div64_u64(new_size + root->fs_info->max_extent - 1,
1296 		      root->fs_info->max_extent) > num_extents)
1297 		return 0;
1298 
1299 	spin_lock(&BTRFS_I(inode)->accounting_lock);
1300 	BTRFS_I(inode)->outstanding_extents--;
1301 	spin_unlock(&BTRFS_I(inode)->accounting_lock);
1302 
1303 	return 0;
1304 }
1305 
1306 /*
1307  * extent_io.c set_bit_hook, used to track delayed allocation
1308  * bytes in this file, and to maintain the list of inodes that
1309  * have pending delalloc work to be done.
1310  */
1311 static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1312 		       unsigned long old, unsigned long bits)
1313 {
1314 
1315 	/*
1316 	 * set_bit and clear bit hooks normally require _irqsave/restore
1317 	 * but in this case, we are only testeing for the DELALLOC
1318 	 * bit, which is only set or cleared with irqs on
1319 	 */
1320 	if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1321 		struct btrfs_root *root = BTRFS_I(inode)->root;
1322 
1323 		spin_lock(&BTRFS_I(inode)->accounting_lock);
1324 		BTRFS_I(inode)->outstanding_extents++;
1325 		spin_unlock(&BTRFS_I(inode)->accounting_lock);
1326 		btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1327 		spin_lock(&root->fs_info->delalloc_lock);
1328 		BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1329 		root->fs_info->delalloc_bytes += end - start + 1;
1330 		if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1331 			list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1332 				      &root->fs_info->delalloc_inodes);
1333 		}
1334 		spin_unlock(&root->fs_info->delalloc_lock);
1335 	}
1336 	return 0;
1337 }
1338 
1339 /*
1340  * extent_io.c clear_bit_hook, see set_bit_hook for why
1341  */
1342 static int btrfs_clear_bit_hook(struct inode *inode,
1343 				struct extent_state *state, unsigned long bits)
1344 {
1345 	/*
1346 	 * set_bit and clear bit hooks normally require _irqsave/restore
1347 	 * but in this case, we are only testeing for the DELALLOC
1348 	 * bit, which is only set or cleared with irqs on
1349 	 */
1350 	if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1351 		struct btrfs_root *root = BTRFS_I(inode)->root;
1352 
1353 		if (bits & EXTENT_DO_ACCOUNTING) {
1354 			spin_lock(&BTRFS_I(inode)->accounting_lock);
1355 			BTRFS_I(inode)->outstanding_extents--;
1356 			spin_unlock(&BTRFS_I(inode)->accounting_lock);
1357 			btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
1358 		}
1359 
1360 		spin_lock(&root->fs_info->delalloc_lock);
1361 		if (state->end - state->start + 1 >
1362 		    root->fs_info->delalloc_bytes) {
1363 			printk(KERN_INFO "btrfs warning: delalloc account "
1364 			       "%llu %llu\n",
1365 			       (unsigned long long)
1366 			       state->end - state->start + 1,
1367 			       (unsigned long long)
1368 			       root->fs_info->delalloc_bytes);
1369 			btrfs_delalloc_free_space(root, inode, (u64)-1);
1370 			root->fs_info->delalloc_bytes = 0;
1371 			BTRFS_I(inode)->delalloc_bytes = 0;
1372 		} else {
1373 			btrfs_delalloc_free_space(root, inode,
1374 						  state->end -
1375 						  state->start + 1);
1376 			root->fs_info->delalloc_bytes -= state->end -
1377 				state->start + 1;
1378 			BTRFS_I(inode)->delalloc_bytes -= state->end -
1379 				state->start + 1;
1380 		}
1381 		if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1382 		    !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1383 			list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1384 		}
1385 		spin_unlock(&root->fs_info->delalloc_lock);
1386 	}
1387 	return 0;
1388 }
1389 
1390 /*
1391  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1392  * we don't create bios that span stripes or chunks
1393  */
1394 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1395 			 size_t size, struct bio *bio,
1396 			 unsigned long bio_flags)
1397 {
1398 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1399 	struct btrfs_mapping_tree *map_tree;
1400 	u64 logical = (u64)bio->bi_sector << 9;
1401 	u64 length = 0;
1402 	u64 map_length;
1403 	int ret;
1404 
1405 	if (bio_flags & EXTENT_BIO_COMPRESSED)
1406 		return 0;
1407 
1408 	length = bio->bi_size;
1409 	map_tree = &root->fs_info->mapping_tree;
1410 	map_length = length;
1411 	ret = btrfs_map_block(map_tree, READ, logical,
1412 			      &map_length, NULL, 0);
1413 
1414 	if (map_length < length + size)
1415 		return 1;
1416 	return 0;
1417 }
1418 
1419 /*
1420  * in order to insert checksums into the metadata in large chunks,
1421  * we wait until bio submission time.   All the pages in the bio are
1422  * checksummed and sums are attached onto the ordered extent record.
1423  *
1424  * At IO completion time the cums attached on the ordered extent record
1425  * are inserted into the btree
1426  */
1427 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1428 				    struct bio *bio, int mirror_num,
1429 				    unsigned long bio_flags)
1430 {
1431 	struct btrfs_root *root = BTRFS_I(inode)->root;
1432 	int ret = 0;
1433 
1434 	ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1435 	BUG_ON(ret);
1436 	return 0;
1437 }
1438 
1439 /*
1440  * in order to insert checksums into the metadata in large chunks,
1441  * we wait until bio submission time.   All the pages in the bio are
1442  * checksummed and sums are attached onto the ordered extent record.
1443  *
1444  * At IO completion time the cums attached on the ordered extent record
1445  * are inserted into the btree
1446  */
1447 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1448 			  int mirror_num, unsigned long bio_flags)
1449 {
1450 	struct btrfs_root *root = BTRFS_I(inode)->root;
1451 	return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1452 }
1453 
1454 /*
1455  * extent_io.c submission hook. This does the right thing for csum calculation
1456  * on write, or reading the csums from the tree before a read
1457  */
1458 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1459 			  int mirror_num, unsigned long bio_flags)
1460 {
1461 	struct btrfs_root *root = BTRFS_I(inode)->root;
1462 	int ret = 0;
1463 	int skip_sum;
1464 
1465 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1466 
1467 	ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1468 	BUG_ON(ret);
1469 
1470 	if (!(rw & (1 << BIO_RW))) {
1471 		if (bio_flags & EXTENT_BIO_COMPRESSED) {
1472 			return btrfs_submit_compressed_read(inode, bio,
1473 						    mirror_num, bio_flags);
1474 		} else if (!skip_sum)
1475 			btrfs_lookup_bio_sums(root, inode, bio, NULL);
1476 		goto mapit;
1477 	} else if (!skip_sum) {
1478 		/* csum items have already been cloned */
1479 		if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1480 			goto mapit;
1481 		/* we're doing a write, do the async checksumming */
1482 		return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1483 				   inode, rw, bio, mirror_num,
1484 				   bio_flags, __btrfs_submit_bio_start,
1485 				   __btrfs_submit_bio_done);
1486 	}
1487 
1488 mapit:
1489 	return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1490 }
1491 
1492 /*
1493  * given a list of ordered sums record them in the inode.  This happens
1494  * at IO completion time based on sums calculated at bio submission time.
1495  */
1496 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1497 			     struct inode *inode, u64 file_offset,
1498 			     struct list_head *list)
1499 {
1500 	struct btrfs_ordered_sum *sum;
1501 
1502 	btrfs_set_trans_block_group(trans, inode);
1503 
1504 	list_for_each_entry(sum, list, list) {
1505 		btrfs_csum_file_blocks(trans,
1506 		       BTRFS_I(inode)->root->fs_info->csum_root, sum);
1507 	}
1508 	return 0;
1509 }
1510 
1511 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
1512 {
1513 	if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1514 		WARN_ON(1);
1515 	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1516 				   GFP_NOFS);
1517 }
1518 
1519 /* see btrfs_writepage_start_hook for details on why this is required */
1520 struct btrfs_writepage_fixup {
1521 	struct page *page;
1522 	struct btrfs_work work;
1523 };
1524 
1525 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1526 {
1527 	struct btrfs_writepage_fixup *fixup;
1528 	struct btrfs_ordered_extent *ordered;
1529 	struct page *page;
1530 	struct inode *inode;
1531 	u64 page_start;
1532 	u64 page_end;
1533 
1534 	fixup = container_of(work, struct btrfs_writepage_fixup, work);
1535 	page = fixup->page;
1536 again:
1537 	lock_page(page);
1538 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1539 		ClearPageChecked(page);
1540 		goto out_page;
1541 	}
1542 
1543 	inode = page->mapping->host;
1544 	page_start = page_offset(page);
1545 	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1546 
1547 	lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1548 
1549 	/* already ordered? We're done */
1550 	if (PagePrivate2(page))
1551 		goto out;
1552 
1553 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
1554 	if (ordered) {
1555 		unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
1556 			      page_end, GFP_NOFS);
1557 		unlock_page(page);
1558 		btrfs_start_ordered_extent(inode, ordered, 1);
1559 		goto again;
1560 	}
1561 
1562 	btrfs_set_extent_delalloc(inode, page_start, page_end);
1563 	ClearPageChecked(page);
1564 out:
1565 	unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1566 out_page:
1567 	unlock_page(page);
1568 	page_cache_release(page);
1569 }
1570 
1571 /*
1572  * There are a few paths in the higher layers of the kernel that directly
1573  * set the page dirty bit without asking the filesystem if it is a
1574  * good idea.  This causes problems because we want to make sure COW
1575  * properly happens and the data=ordered rules are followed.
1576  *
1577  * In our case any range that doesn't have the ORDERED bit set
1578  * hasn't been properly setup for IO.  We kick off an async process
1579  * to fix it up.  The async helper will wait for ordered extents, set
1580  * the delalloc bit and make it safe to write the page.
1581  */
1582 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1583 {
1584 	struct inode *inode = page->mapping->host;
1585 	struct btrfs_writepage_fixup *fixup;
1586 	struct btrfs_root *root = BTRFS_I(inode)->root;
1587 
1588 	/* this page is properly in the ordered list */
1589 	if (TestClearPagePrivate2(page))
1590 		return 0;
1591 
1592 	if (PageChecked(page))
1593 		return -EAGAIN;
1594 
1595 	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1596 	if (!fixup)
1597 		return -EAGAIN;
1598 
1599 	SetPageChecked(page);
1600 	page_cache_get(page);
1601 	fixup->work.func = btrfs_writepage_fixup_worker;
1602 	fixup->page = page;
1603 	btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1604 	return -EAGAIN;
1605 }
1606 
1607 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1608 				       struct inode *inode, u64 file_pos,
1609 				       u64 disk_bytenr, u64 disk_num_bytes,
1610 				       u64 num_bytes, u64 ram_bytes,
1611 				       u8 compression, u8 encryption,
1612 				       u16 other_encoding, int extent_type)
1613 {
1614 	struct btrfs_root *root = BTRFS_I(inode)->root;
1615 	struct btrfs_file_extent_item *fi;
1616 	struct btrfs_path *path;
1617 	struct extent_buffer *leaf;
1618 	struct btrfs_key ins;
1619 	u64 hint;
1620 	int ret;
1621 
1622 	path = btrfs_alloc_path();
1623 	BUG_ON(!path);
1624 
1625 	path->leave_spinning = 1;
1626 
1627 	/*
1628 	 * we may be replacing one extent in the tree with another.
1629 	 * The new extent is pinned in the extent map, and we don't want
1630 	 * to drop it from the cache until it is completely in the btree.
1631 	 *
1632 	 * So, tell btrfs_drop_extents to leave this extent in the cache.
1633 	 * the caller is expected to unpin it and allow it to be merged
1634 	 * with the others.
1635 	 */
1636 	ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
1637 				 &hint, 0);
1638 	BUG_ON(ret);
1639 
1640 	ins.objectid = inode->i_ino;
1641 	ins.offset = file_pos;
1642 	ins.type = BTRFS_EXTENT_DATA_KEY;
1643 	ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1644 	BUG_ON(ret);
1645 	leaf = path->nodes[0];
1646 	fi = btrfs_item_ptr(leaf, path->slots[0],
1647 			    struct btrfs_file_extent_item);
1648 	btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1649 	btrfs_set_file_extent_type(leaf, fi, extent_type);
1650 	btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1651 	btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1652 	btrfs_set_file_extent_offset(leaf, fi, 0);
1653 	btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1654 	btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1655 	btrfs_set_file_extent_compression(leaf, fi, compression);
1656 	btrfs_set_file_extent_encryption(leaf, fi, encryption);
1657 	btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1658 
1659 	btrfs_unlock_up_safe(path, 1);
1660 	btrfs_set_lock_blocking(leaf);
1661 
1662 	btrfs_mark_buffer_dirty(leaf);
1663 
1664 	inode_add_bytes(inode, num_bytes);
1665 
1666 	ins.objectid = disk_bytenr;
1667 	ins.offset = disk_num_bytes;
1668 	ins.type = BTRFS_EXTENT_ITEM_KEY;
1669 	ret = btrfs_alloc_reserved_file_extent(trans, root,
1670 					root->root_key.objectid,
1671 					inode->i_ino, file_pos, &ins);
1672 	BUG_ON(ret);
1673 	btrfs_free_path(path);
1674 
1675 	return 0;
1676 }
1677 
1678 /*
1679  * helper function for btrfs_finish_ordered_io, this
1680  * just reads in some of the csum leaves to prime them into ram
1681  * before we start the transaction.  It limits the amount of btree
1682  * reads required while inside the transaction.
1683  */
1684 /* as ordered data IO finishes, this gets called so we can finish
1685  * an ordered extent if the range of bytes in the file it covers are
1686  * fully written.
1687  */
1688 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1689 {
1690 	struct btrfs_root *root = BTRFS_I(inode)->root;
1691 	struct btrfs_trans_handle *trans;
1692 	struct btrfs_ordered_extent *ordered_extent = NULL;
1693 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1694 	int compressed = 0;
1695 	int ret;
1696 
1697 	ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1698 	if (!ret)
1699 		return 0;
1700 
1701 	ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1702 	BUG_ON(!ordered_extent);
1703 
1704 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1705 		BUG_ON(!list_empty(&ordered_extent->list));
1706 		ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1707 		if (!ret) {
1708 			trans = btrfs_join_transaction(root, 1);
1709 			ret = btrfs_update_inode(trans, root, inode);
1710 			BUG_ON(ret);
1711 			btrfs_end_transaction(trans, root);
1712 		}
1713 		goto out;
1714 	}
1715 
1716 	lock_extent(io_tree, ordered_extent->file_offset,
1717 		    ordered_extent->file_offset + ordered_extent->len - 1,
1718 		    GFP_NOFS);
1719 
1720 	trans = btrfs_join_transaction(root, 1);
1721 
1722 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1723 		compressed = 1;
1724 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1725 		BUG_ON(compressed);
1726 		ret = btrfs_mark_extent_written(trans, inode,
1727 						ordered_extent->file_offset,
1728 						ordered_extent->file_offset +
1729 						ordered_extent->len);
1730 		BUG_ON(ret);
1731 	} else {
1732 		ret = insert_reserved_file_extent(trans, inode,
1733 						ordered_extent->file_offset,
1734 						ordered_extent->start,
1735 						ordered_extent->disk_len,
1736 						ordered_extent->len,
1737 						ordered_extent->len,
1738 						compressed, 0, 0,
1739 						BTRFS_FILE_EXTENT_REG);
1740 		unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1741 				   ordered_extent->file_offset,
1742 				   ordered_extent->len);
1743 		BUG_ON(ret);
1744 	}
1745 	unlock_extent(io_tree, ordered_extent->file_offset,
1746 		    ordered_extent->file_offset + ordered_extent->len - 1,
1747 		    GFP_NOFS);
1748 	add_pending_csums(trans, inode, ordered_extent->file_offset,
1749 			  &ordered_extent->list);
1750 
1751 	/* this also removes the ordered extent from the tree */
1752 	btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1753 	ret = btrfs_update_inode(trans, root, inode);
1754 	BUG_ON(ret);
1755 	btrfs_end_transaction(trans, root);
1756 out:
1757 	/* once for us */
1758 	btrfs_put_ordered_extent(ordered_extent);
1759 	/* once for the tree */
1760 	btrfs_put_ordered_extent(ordered_extent);
1761 
1762 	return 0;
1763 }
1764 
1765 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1766 				struct extent_state *state, int uptodate)
1767 {
1768 	ClearPagePrivate2(page);
1769 	return btrfs_finish_ordered_io(page->mapping->host, start, end);
1770 }
1771 
1772 /*
1773  * When IO fails, either with EIO or csum verification fails, we
1774  * try other mirrors that might have a good copy of the data.  This
1775  * io_failure_record is used to record state as we go through all the
1776  * mirrors.  If another mirror has good data, the page is set up to date
1777  * and things continue.  If a good mirror can't be found, the original
1778  * bio end_io callback is called to indicate things have failed.
1779  */
1780 struct io_failure_record {
1781 	struct page *page;
1782 	u64 start;
1783 	u64 len;
1784 	u64 logical;
1785 	unsigned long bio_flags;
1786 	int last_mirror;
1787 };
1788 
1789 static int btrfs_io_failed_hook(struct bio *failed_bio,
1790 			 struct page *page, u64 start, u64 end,
1791 			 struct extent_state *state)
1792 {
1793 	struct io_failure_record *failrec = NULL;
1794 	u64 private;
1795 	struct extent_map *em;
1796 	struct inode *inode = page->mapping->host;
1797 	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1798 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1799 	struct bio *bio;
1800 	int num_copies;
1801 	int ret;
1802 	int rw;
1803 	u64 logical;
1804 
1805 	ret = get_state_private(failure_tree, start, &private);
1806 	if (ret) {
1807 		failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1808 		if (!failrec)
1809 			return -ENOMEM;
1810 		failrec->start = start;
1811 		failrec->len = end - start + 1;
1812 		failrec->last_mirror = 0;
1813 		failrec->bio_flags = 0;
1814 
1815 		read_lock(&em_tree->lock);
1816 		em = lookup_extent_mapping(em_tree, start, failrec->len);
1817 		if (em->start > start || em->start + em->len < start) {
1818 			free_extent_map(em);
1819 			em = NULL;
1820 		}
1821 		read_unlock(&em_tree->lock);
1822 
1823 		if (!em || IS_ERR(em)) {
1824 			kfree(failrec);
1825 			return -EIO;
1826 		}
1827 		logical = start - em->start;
1828 		logical = em->block_start + logical;
1829 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1830 			logical = em->block_start;
1831 			failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1832 		}
1833 		failrec->logical = logical;
1834 		free_extent_map(em);
1835 		set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1836 				EXTENT_DIRTY, GFP_NOFS);
1837 		set_state_private(failure_tree, start,
1838 				 (u64)(unsigned long)failrec);
1839 	} else {
1840 		failrec = (struct io_failure_record *)(unsigned long)private;
1841 	}
1842 	num_copies = btrfs_num_copies(
1843 			      &BTRFS_I(inode)->root->fs_info->mapping_tree,
1844 			      failrec->logical, failrec->len);
1845 	failrec->last_mirror++;
1846 	if (!state) {
1847 		spin_lock(&BTRFS_I(inode)->io_tree.lock);
1848 		state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1849 						    failrec->start,
1850 						    EXTENT_LOCKED);
1851 		if (state && state->start != failrec->start)
1852 			state = NULL;
1853 		spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1854 	}
1855 	if (!state || failrec->last_mirror > num_copies) {
1856 		set_state_private(failure_tree, failrec->start, 0);
1857 		clear_extent_bits(failure_tree, failrec->start,
1858 				  failrec->start + failrec->len - 1,
1859 				  EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1860 		kfree(failrec);
1861 		return -EIO;
1862 	}
1863 	bio = bio_alloc(GFP_NOFS, 1);
1864 	bio->bi_private = state;
1865 	bio->bi_end_io = failed_bio->bi_end_io;
1866 	bio->bi_sector = failrec->logical >> 9;
1867 	bio->bi_bdev = failed_bio->bi_bdev;
1868 	bio->bi_size = 0;
1869 
1870 	bio_add_page(bio, page, failrec->len, start - page_offset(page));
1871 	if (failed_bio->bi_rw & (1 << BIO_RW))
1872 		rw = WRITE;
1873 	else
1874 		rw = READ;
1875 
1876 	BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1877 						      failrec->last_mirror,
1878 						      failrec->bio_flags);
1879 	return 0;
1880 }
1881 
1882 /*
1883  * each time an IO finishes, we do a fast check in the IO failure tree
1884  * to see if we need to process or clean up an io_failure_record
1885  */
1886 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1887 {
1888 	u64 private;
1889 	u64 private_failure;
1890 	struct io_failure_record *failure;
1891 	int ret;
1892 
1893 	private = 0;
1894 	if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1895 			     (u64)-1, 1, EXTENT_DIRTY)) {
1896 		ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1897 					start, &private_failure);
1898 		if (ret == 0) {
1899 			failure = (struct io_failure_record *)(unsigned long)
1900 				   private_failure;
1901 			set_state_private(&BTRFS_I(inode)->io_failure_tree,
1902 					  failure->start, 0);
1903 			clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1904 					  failure->start,
1905 					  failure->start + failure->len - 1,
1906 					  EXTENT_DIRTY | EXTENT_LOCKED,
1907 					  GFP_NOFS);
1908 			kfree(failure);
1909 		}
1910 	}
1911 	return 0;
1912 }
1913 
1914 /*
1915  * when reads are done, we need to check csums to verify the data is correct
1916  * if there's a match, we allow the bio to finish.  If not, we go through
1917  * the io_failure_record routines to find good copies
1918  */
1919 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1920 			       struct extent_state *state)
1921 {
1922 	size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1923 	struct inode *inode = page->mapping->host;
1924 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1925 	char *kaddr;
1926 	u64 private = ~(u32)0;
1927 	int ret;
1928 	struct btrfs_root *root = BTRFS_I(inode)->root;
1929 	u32 csum = ~(u32)0;
1930 
1931 	if (PageChecked(page)) {
1932 		ClearPageChecked(page);
1933 		goto good;
1934 	}
1935 
1936 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1937 		return 0;
1938 
1939 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1940 	    test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
1941 		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1942 				  GFP_NOFS);
1943 		return 0;
1944 	}
1945 
1946 	if (state && state->start == start) {
1947 		private = state->private;
1948 		ret = 0;
1949 	} else {
1950 		ret = get_state_private(io_tree, start, &private);
1951 	}
1952 	kaddr = kmap_atomic(page, KM_USER0);
1953 	if (ret)
1954 		goto zeroit;
1955 
1956 	csum = btrfs_csum_data(root, kaddr + offset, csum,  end - start + 1);
1957 	btrfs_csum_final(csum, (char *)&csum);
1958 	if (csum != private)
1959 		goto zeroit;
1960 
1961 	kunmap_atomic(kaddr, KM_USER0);
1962 good:
1963 	/* if the io failure tree for this inode is non-empty,
1964 	 * check to see if we've recovered from a failed IO
1965 	 */
1966 	btrfs_clean_io_failures(inode, start);
1967 	return 0;
1968 
1969 zeroit:
1970 	if (printk_ratelimit()) {
1971 		printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1972 		       "private %llu\n", page->mapping->host->i_ino,
1973 		       (unsigned long long)start, csum,
1974 		       (unsigned long long)private);
1975 	}
1976 	memset(kaddr + offset, 1, end - start + 1);
1977 	flush_dcache_page(page);
1978 	kunmap_atomic(kaddr, KM_USER0);
1979 	if (private == 0)
1980 		return 0;
1981 	return -EIO;
1982 }
1983 
1984 struct delayed_iput {
1985 	struct list_head list;
1986 	struct inode *inode;
1987 };
1988 
1989 void btrfs_add_delayed_iput(struct inode *inode)
1990 {
1991 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1992 	struct delayed_iput *delayed;
1993 
1994 	if (atomic_add_unless(&inode->i_count, -1, 1))
1995 		return;
1996 
1997 	delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
1998 	delayed->inode = inode;
1999 
2000 	spin_lock(&fs_info->delayed_iput_lock);
2001 	list_add_tail(&delayed->list, &fs_info->delayed_iputs);
2002 	spin_unlock(&fs_info->delayed_iput_lock);
2003 }
2004 
2005 void btrfs_run_delayed_iputs(struct btrfs_root *root)
2006 {
2007 	LIST_HEAD(list);
2008 	struct btrfs_fs_info *fs_info = root->fs_info;
2009 	struct delayed_iput *delayed;
2010 	int empty;
2011 
2012 	spin_lock(&fs_info->delayed_iput_lock);
2013 	empty = list_empty(&fs_info->delayed_iputs);
2014 	spin_unlock(&fs_info->delayed_iput_lock);
2015 	if (empty)
2016 		return;
2017 
2018 	down_read(&root->fs_info->cleanup_work_sem);
2019 	spin_lock(&fs_info->delayed_iput_lock);
2020 	list_splice_init(&fs_info->delayed_iputs, &list);
2021 	spin_unlock(&fs_info->delayed_iput_lock);
2022 
2023 	while (!list_empty(&list)) {
2024 		delayed = list_entry(list.next, struct delayed_iput, list);
2025 		list_del(&delayed->list);
2026 		iput(delayed->inode);
2027 		kfree(delayed);
2028 	}
2029 	up_read(&root->fs_info->cleanup_work_sem);
2030 }
2031 
2032 /*
2033  * This creates an orphan entry for the given inode in case something goes
2034  * wrong in the middle of an unlink/truncate.
2035  */
2036 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2037 {
2038 	struct btrfs_root *root = BTRFS_I(inode)->root;
2039 	int ret = 0;
2040 
2041 	spin_lock(&root->list_lock);
2042 
2043 	/* already on the orphan list, we're good */
2044 	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
2045 		spin_unlock(&root->list_lock);
2046 		return 0;
2047 	}
2048 
2049 	list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2050 
2051 	spin_unlock(&root->list_lock);
2052 
2053 	/*
2054 	 * insert an orphan item to track this unlinked/truncated file
2055 	 */
2056 	ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
2057 
2058 	return ret;
2059 }
2060 
2061 /*
2062  * We have done the truncate/delete so we can go ahead and remove the orphan
2063  * item for this particular inode.
2064  */
2065 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2066 {
2067 	struct btrfs_root *root = BTRFS_I(inode)->root;
2068 	int ret = 0;
2069 
2070 	spin_lock(&root->list_lock);
2071 
2072 	if (list_empty(&BTRFS_I(inode)->i_orphan)) {
2073 		spin_unlock(&root->list_lock);
2074 		return 0;
2075 	}
2076 
2077 	list_del_init(&BTRFS_I(inode)->i_orphan);
2078 	if (!trans) {
2079 		spin_unlock(&root->list_lock);
2080 		return 0;
2081 	}
2082 
2083 	spin_unlock(&root->list_lock);
2084 
2085 	ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
2086 
2087 	return ret;
2088 }
2089 
2090 /*
2091  * this cleans up any orphans that may be left on the list from the last use
2092  * of this root.
2093  */
2094 void btrfs_orphan_cleanup(struct btrfs_root *root)
2095 {
2096 	struct btrfs_path *path;
2097 	struct extent_buffer *leaf;
2098 	struct btrfs_item *item;
2099 	struct btrfs_key key, found_key;
2100 	struct btrfs_trans_handle *trans;
2101 	struct inode *inode;
2102 	int ret = 0, nr_unlink = 0, nr_truncate = 0;
2103 
2104 	if (!xchg(&root->clean_orphans, 0))
2105 		return;
2106 
2107 	path = btrfs_alloc_path();
2108 	BUG_ON(!path);
2109 	path->reada = -1;
2110 
2111 	key.objectid = BTRFS_ORPHAN_OBJECTID;
2112 	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
2113 	key.offset = (u64)-1;
2114 
2115 	while (1) {
2116 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2117 		if (ret < 0) {
2118 			printk(KERN_ERR "Error searching slot for orphan: %d"
2119 			       "\n", ret);
2120 			break;
2121 		}
2122 
2123 		/*
2124 		 * if ret == 0 means we found what we were searching for, which
2125 		 * is weird, but possible, so only screw with path if we didnt
2126 		 * find the key and see if we have stuff that matches
2127 		 */
2128 		if (ret > 0) {
2129 			if (path->slots[0] == 0)
2130 				break;
2131 			path->slots[0]--;
2132 		}
2133 
2134 		/* pull out the item */
2135 		leaf = path->nodes[0];
2136 		item = btrfs_item_nr(leaf, path->slots[0]);
2137 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2138 
2139 		/* make sure the item matches what we want */
2140 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
2141 			break;
2142 		if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
2143 			break;
2144 
2145 		/* release the path since we're done with it */
2146 		btrfs_release_path(root, path);
2147 
2148 		/*
2149 		 * this is where we are basically btrfs_lookup, without the
2150 		 * crossing root thing.  we store the inode number in the
2151 		 * offset of the orphan item.
2152 		 */
2153 		found_key.objectid = found_key.offset;
2154 		found_key.type = BTRFS_INODE_ITEM_KEY;
2155 		found_key.offset = 0;
2156 		inode = btrfs_iget(root->fs_info->sb, &found_key, root);
2157 		if (IS_ERR(inode))
2158 			break;
2159 
2160 		/*
2161 		 * add this inode to the orphan list so btrfs_orphan_del does
2162 		 * the proper thing when we hit it
2163 		 */
2164 		spin_lock(&root->list_lock);
2165 		list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2166 		spin_unlock(&root->list_lock);
2167 
2168 		/*
2169 		 * if this is a bad inode, means we actually succeeded in
2170 		 * removing the inode, but not the orphan record, which means
2171 		 * we need to manually delete the orphan since iput will just
2172 		 * do a destroy_inode
2173 		 */
2174 		if (is_bad_inode(inode)) {
2175 			trans = btrfs_start_transaction(root, 1);
2176 			btrfs_orphan_del(trans, inode);
2177 			btrfs_end_transaction(trans, root);
2178 			iput(inode);
2179 			continue;
2180 		}
2181 
2182 		/* if we have links, this was a truncate, lets do that */
2183 		if (inode->i_nlink) {
2184 			nr_truncate++;
2185 			btrfs_truncate(inode);
2186 		} else {
2187 			nr_unlink++;
2188 		}
2189 
2190 		/* this will do delete_inode and everything for us */
2191 		iput(inode);
2192 	}
2193 
2194 	if (nr_unlink)
2195 		printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2196 	if (nr_truncate)
2197 		printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2198 
2199 	btrfs_free_path(path);
2200 }
2201 
2202 /*
2203  * very simple check to peek ahead in the leaf looking for xattrs.  If we
2204  * don't find any xattrs, we know there can't be any acls.
2205  *
2206  * slot is the slot the inode is in, objectid is the objectid of the inode
2207  */
2208 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2209 					  int slot, u64 objectid)
2210 {
2211 	u32 nritems = btrfs_header_nritems(leaf);
2212 	struct btrfs_key found_key;
2213 	int scanned = 0;
2214 
2215 	slot++;
2216 	while (slot < nritems) {
2217 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
2218 
2219 		/* we found a different objectid, there must not be acls */
2220 		if (found_key.objectid != objectid)
2221 			return 0;
2222 
2223 		/* we found an xattr, assume we've got an acl */
2224 		if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2225 			return 1;
2226 
2227 		/*
2228 		 * we found a key greater than an xattr key, there can't
2229 		 * be any acls later on
2230 		 */
2231 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2232 			return 0;
2233 
2234 		slot++;
2235 		scanned++;
2236 
2237 		/*
2238 		 * it goes inode, inode backrefs, xattrs, extents,
2239 		 * so if there are a ton of hard links to an inode there can
2240 		 * be a lot of backrefs.  Don't waste time searching too hard,
2241 		 * this is just an optimization
2242 		 */
2243 		if (scanned >= 8)
2244 			break;
2245 	}
2246 	/* we hit the end of the leaf before we found an xattr or
2247 	 * something larger than an xattr.  We have to assume the inode
2248 	 * has acls
2249 	 */
2250 	return 1;
2251 }
2252 
2253 /*
2254  * read an inode from the btree into the in-memory inode
2255  */
2256 static void btrfs_read_locked_inode(struct inode *inode)
2257 {
2258 	struct btrfs_path *path;
2259 	struct extent_buffer *leaf;
2260 	struct btrfs_inode_item *inode_item;
2261 	struct btrfs_timespec *tspec;
2262 	struct btrfs_root *root = BTRFS_I(inode)->root;
2263 	struct btrfs_key location;
2264 	int maybe_acls;
2265 	u64 alloc_group_block;
2266 	u32 rdev;
2267 	int ret;
2268 
2269 	path = btrfs_alloc_path();
2270 	BUG_ON(!path);
2271 	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2272 
2273 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2274 	if (ret)
2275 		goto make_bad;
2276 
2277 	leaf = path->nodes[0];
2278 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
2279 				    struct btrfs_inode_item);
2280 
2281 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2282 	inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2283 	inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2284 	inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2285 	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2286 
2287 	tspec = btrfs_inode_atime(inode_item);
2288 	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2289 	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2290 
2291 	tspec = btrfs_inode_mtime(inode_item);
2292 	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2293 	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2294 
2295 	tspec = btrfs_inode_ctime(inode_item);
2296 	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2297 	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2298 
2299 	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2300 	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2301 	BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2302 	inode->i_generation = BTRFS_I(inode)->generation;
2303 	inode->i_rdev = 0;
2304 	rdev = btrfs_inode_rdev(leaf, inode_item);
2305 
2306 	BTRFS_I(inode)->index_cnt = (u64)-1;
2307 	BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2308 
2309 	alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2310 
2311 	/*
2312 	 * try to precache a NULL acl entry for files that don't have
2313 	 * any xattrs or acls
2314 	 */
2315 	maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
2316 	if (!maybe_acls)
2317 		cache_no_acl(inode);
2318 
2319 	BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2320 						alloc_group_block, 0);
2321 	btrfs_free_path(path);
2322 	inode_item = NULL;
2323 
2324 	switch (inode->i_mode & S_IFMT) {
2325 	case S_IFREG:
2326 		inode->i_mapping->a_ops = &btrfs_aops;
2327 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2328 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2329 		inode->i_fop = &btrfs_file_operations;
2330 		inode->i_op = &btrfs_file_inode_operations;
2331 		break;
2332 	case S_IFDIR:
2333 		inode->i_fop = &btrfs_dir_file_operations;
2334 		if (root == root->fs_info->tree_root)
2335 			inode->i_op = &btrfs_dir_ro_inode_operations;
2336 		else
2337 			inode->i_op = &btrfs_dir_inode_operations;
2338 		break;
2339 	case S_IFLNK:
2340 		inode->i_op = &btrfs_symlink_inode_operations;
2341 		inode->i_mapping->a_ops = &btrfs_symlink_aops;
2342 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2343 		break;
2344 	default:
2345 		inode->i_op = &btrfs_special_inode_operations;
2346 		init_special_inode(inode, inode->i_mode, rdev);
2347 		break;
2348 	}
2349 
2350 	btrfs_update_iflags(inode);
2351 	return;
2352 
2353 make_bad:
2354 	btrfs_free_path(path);
2355 	make_bad_inode(inode);
2356 }
2357 
2358 /*
2359  * given a leaf and an inode, copy the inode fields into the leaf
2360  */
2361 static void fill_inode_item(struct btrfs_trans_handle *trans,
2362 			    struct extent_buffer *leaf,
2363 			    struct btrfs_inode_item *item,
2364 			    struct inode *inode)
2365 {
2366 	btrfs_set_inode_uid(leaf, item, inode->i_uid);
2367 	btrfs_set_inode_gid(leaf, item, inode->i_gid);
2368 	btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2369 	btrfs_set_inode_mode(leaf, item, inode->i_mode);
2370 	btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2371 
2372 	btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2373 			       inode->i_atime.tv_sec);
2374 	btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2375 				inode->i_atime.tv_nsec);
2376 
2377 	btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2378 			       inode->i_mtime.tv_sec);
2379 	btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2380 				inode->i_mtime.tv_nsec);
2381 
2382 	btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2383 			       inode->i_ctime.tv_sec);
2384 	btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2385 				inode->i_ctime.tv_nsec);
2386 
2387 	btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2388 	btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2389 	btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2390 	btrfs_set_inode_transid(leaf, item, trans->transid);
2391 	btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2392 	btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2393 	btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2394 }
2395 
2396 /*
2397  * copy everything in the in-memory inode into the btree.
2398  */
2399 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2400 				struct btrfs_root *root, struct inode *inode)
2401 {
2402 	struct btrfs_inode_item *inode_item;
2403 	struct btrfs_path *path;
2404 	struct extent_buffer *leaf;
2405 	int ret;
2406 
2407 	path = btrfs_alloc_path();
2408 	BUG_ON(!path);
2409 	path->leave_spinning = 1;
2410 	ret = btrfs_lookup_inode(trans, root, path,
2411 				 &BTRFS_I(inode)->location, 1);
2412 	if (ret) {
2413 		if (ret > 0)
2414 			ret = -ENOENT;
2415 		goto failed;
2416 	}
2417 
2418 	btrfs_unlock_up_safe(path, 1);
2419 	leaf = path->nodes[0];
2420 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
2421 				  struct btrfs_inode_item);
2422 
2423 	fill_inode_item(trans, leaf, inode_item, inode);
2424 	btrfs_mark_buffer_dirty(leaf);
2425 	btrfs_set_inode_last_trans(trans, inode);
2426 	ret = 0;
2427 failed:
2428 	btrfs_free_path(path);
2429 	return ret;
2430 }
2431 
2432 
2433 /*
2434  * unlink helper that gets used here in inode.c and in the tree logging
2435  * recovery code.  It remove a link in a directory with a given name, and
2436  * also drops the back refs in the inode to the directory
2437  */
2438 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2439 		       struct btrfs_root *root,
2440 		       struct inode *dir, struct inode *inode,
2441 		       const char *name, int name_len)
2442 {
2443 	struct btrfs_path *path;
2444 	int ret = 0;
2445 	struct extent_buffer *leaf;
2446 	struct btrfs_dir_item *di;
2447 	struct btrfs_key key;
2448 	u64 index;
2449 
2450 	path = btrfs_alloc_path();
2451 	if (!path) {
2452 		ret = -ENOMEM;
2453 		goto err;
2454 	}
2455 
2456 	path->leave_spinning = 1;
2457 	di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2458 				    name, name_len, -1);
2459 	if (IS_ERR(di)) {
2460 		ret = PTR_ERR(di);
2461 		goto err;
2462 	}
2463 	if (!di) {
2464 		ret = -ENOENT;
2465 		goto err;
2466 	}
2467 	leaf = path->nodes[0];
2468 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
2469 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2470 	if (ret)
2471 		goto err;
2472 	btrfs_release_path(root, path);
2473 
2474 	ret = btrfs_del_inode_ref(trans, root, name, name_len,
2475 				  inode->i_ino,
2476 				  dir->i_ino, &index);
2477 	if (ret) {
2478 		printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2479 		       "inode %lu parent %lu\n", name_len, name,
2480 		       inode->i_ino, dir->i_ino);
2481 		goto err;
2482 	}
2483 
2484 	di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2485 					 index, name, name_len, -1);
2486 	if (IS_ERR(di)) {
2487 		ret = PTR_ERR(di);
2488 		goto err;
2489 	}
2490 	if (!di) {
2491 		ret = -ENOENT;
2492 		goto err;
2493 	}
2494 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2495 	btrfs_release_path(root, path);
2496 
2497 	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2498 					 inode, dir->i_ino);
2499 	BUG_ON(ret != 0 && ret != -ENOENT);
2500 
2501 	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2502 					   dir, index);
2503 	BUG_ON(ret);
2504 err:
2505 	btrfs_free_path(path);
2506 	if (ret)
2507 		goto out;
2508 
2509 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2510 	inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2511 	btrfs_update_inode(trans, root, dir);
2512 	btrfs_drop_nlink(inode);
2513 	ret = btrfs_update_inode(trans, root, inode);
2514 out:
2515 	return ret;
2516 }
2517 
2518 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2519 {
2520 	struct btrfs_root *root;
2521 	struct btrfs_trans_handle *trans;
2522 	struct inode *inode = dentry->d_inode;
2523 	int ret;
2524 	unsigned long nr = 0;
2525 
2526 	root = BTRFS_I(dir)->root;
2527 
2528 	/*
2529 	 * 5 items for unlink inode
2530 	 * 1 for orphan
2531 	 */
2532 	ret = btrfs_reserve_metadata_space(root, 6);
2533 	if (ret)
2534 		return ret;
2535 
2536 	trans = btrfs_start_transaction(root, 1);
2537 	if (IS_ERR(trans)) {
2538 		btrfs_unreserve_metadata_space(root, 6);
2539 		return PTR_ERR(trans);
2540 	}
2541 
2542 	btrfs_set_trans_block_group(trans, dir);
2543 
2544 	btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
2545 
2546 	ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2547 				 dentry->d_name.name, dentry->d_name.len);
2548 
2549 	if (inode->i_nlink == 0)
2550 		ret = btrfs_orphan_add(trans, inode);
2551 
2552 	nr = trans->blocks_used;
2553 
2554 	btrfs_end_transaction_throttle(trans, root);
2555 	btrfs_unreserve_metadata_space(root, 6);
2556 	btrfs_btree_balance_dirty(root, nr);
2557 	return ret;
2558 }
2559 
2560 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
2561 			struct btrfs_root *root,
2562 			struct inode *dir, u64 objectid,
2563 			const char *name, int name_len)
2564 {
2565 	struct btrfs_path *path;
2566 	struct extent_buffer *leaf;
2567 	struct btrfs_dir_item *di;
2568 	struct btrfs_key key;
2569 	u64 index;
2570 	int ret;
2571 
2572 	path = btrfs_alloc_path();
2573 	if (!path)
2574 		return -ENOMEM;
2575 
2576 	di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2577 				   name, name_len, -1);
2578 	BUG_ON(!di || IS_ERR(di));
2579 
2580 	leaf = path->nodes[0];
2581 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
2582 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2583 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2584 	BUG_ON(ret);
2585 	btrfs_release_path(root, path);
2586 
2587 	ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
2588 				 objectid, root->root_key.objectid,
2589 				 dir->i_ino, &index, name, name_len);
2590 	if (ret < 0) {
2591 		BUG_ON(ret != -ENOENT);
2592 		di = btrfs_search_dir_index_item(root, path, dir->i_ino,
2593 						 name, name_len);
2594 		BUG_ON(!di || IS_ERR(di));
2595 
2596 		leaf = path->nodes[0];
2597 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2598 		btrfs_release_path(root, path);
2599 		index = key.offset;
2600 	}
2601 
2602 	di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2603 					 index, name, name_len, -1);
2604 	BUG_ON(!di || IS_ERR(di));
2605 
2606 	leaf = path->nodes[0];
2607 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
2608 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2609 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2610 	BUG_ON(ret);
2611 	btrfs_release_path(root, path);
2612 
2613 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2614 	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2615 	ret = btrfs_update_inode(trans, root, dir);
2616 	BUG_ON(ret);
2617 	dir->i_sb->s_dirt = 1;
2618 
2619 	btrfs_free_path(path);
2620 	return 0;
2621 }
2622 
2623 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2624 {
2625 	struct inode *inode = dentry->d_inode;
2626 	int err = 0;
2627 	int ret;
2628 	struct btrfs_root *root = BTRFS_I(dir)->root;
2629 	struct btrfs_trans_handle *trans;
2630 	unsigned long nr = 0;
2631 
2632 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2633 	    inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
2634 		return -ENOTEMPTY;
2635 
2636 	ret = btrfs_reserve_metadata_space(root, 5);
2637 	if (ret)
2638 		return ret;
2639 
2640 	trans = btrfs_start_transaction(root, 1);
2641 	if (IS_ERR(trans)) {
2642 		btrfs_unreserve_metadata_space(root, 5);
2643 		return PTR_ERR(trans);
2644 	}
2645 
2646 	btrfs_set_trans_block_group(trans, dir);
2647 
2648 	if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
2649 		err = btrfs_unlink_subvol(trans, root, dir,
2650 					  BTRFS_I(inode)->location.objectid,
2651 					  dentry->d_name.name,
2652 					  dentry->d_name.len);
2653 		goto out;
2654 	}
2655 
2656 	err = btrfs_orphan_add(trans, inode);
2657 	if (err)
2658 		goto out;
2659 
2660 	/* now the directory is empty */
2661 	err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2662 				 dentry->d_name.name, dentry->d_name.len);
2663 	if (!err)
2664 		btrfs_i_size_write(inode, 0);
2665 out:
2666 	nr = trans->blocks_used;
2667 	ret = btrfs_end_transaction_throttle(trans, root);
2668 	btrfs_unreserve_metadata_space(root, 5);
2669 	btrfs_btree_balance_dirty(root, nr);
2670 
2671 	if (ret && !err)
2672 		err = ret;
2673 	return err;
2674 }
2675 
2676 #if 0
2677 /*
2678  * when truncating bytes in a file, it is possible to avoid reading
2679  * the leaves that contain only checksum items.  This can be the
2680  * majority of the IO required to delete a large file, but it must
2681  * be done carefully.
2682  *
2683  * The keys in the level just above the leaves are checked to make sure
2684  * the lowest key in a given leaf is a csum key, and starts at an offset
2685  * after the new  size.
2686  *
2687  * Then the key for the next leaf is checked to make sure it also has
2688  * a checksum item for the same file.  If it does, we know our target leaf
2689  * contains only checksum items, and it can be safely freed without reading
2690  * it.
2691  *
2692  * This is just an optimization targeted at large files.  It may do
2693  * nothing.  It will return 0 unless things went badly.
2694  */
2695 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2696 				     struct btrfs_root *root,
2697 				     struct btrfs_path *path,
2698 				     struct inode *inode, u64 new_size)
2699 {
2700 	struct btrfs_key key;
2701 	int ret;
2702 	int nritems;
2703 	struct btrfs_key found_key;
2704 	struct btrfs_key other_key;
2705 	struct btrfs_leaf_ref *ref;
2706 	u64 leaf_gen;
2707 	u64 leaf_start;
2708 
2709 	path->lowest_level = 1;
2710 	key.objectid = inode->i_ino;
2711 	key.type = BTRFS_CSUM_ITEM_KEY;
2712 	key.offset = new_size;
2713 again:
2714 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2715 	if (ret < 0)
2716 		goto out;
2717 
2718 	if (path->nodes[1] == NULL) {
2719 		ret = 0;
2720 		goto out;
2721 	}
2722 	ret = 0;
2723 	btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2724 	nritems = btrfs_header_nritems(path->nodes[1]);
2725 
2726 	if (!nritems)
2727 		goto out;
2728 
2729 	if (path->slots[1] >= nritems)
2730 		goto next_node;
2731 
2732 	/* did we find a key greater than anything we want to delete? */
2733 	if (found_key.objectid > inode->i_ino ||
2734 	   (found_key.objectid == inode->i_ino && found_key.type > key.type))
2735 		goto out;
2736 
2737 	/* we check the next key in the node to make sure the leave contains
2738 	 * only checksum items.  This comparison doesn't work if our
2739 	 * leaf is the last one in the node
2740 	 */
2741 	if (path->slots[1] + 1 >= nritems) {
2742 next_node:
2743 		/* search forward from the last key in the node, this
2744 		 * will bring us into the next node in the tree
2745 		 */
2746 		btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2747 
2748 		/* unlikely, but we inc below, so check to be safe */
2749 		if (found_key.offset == (u64)-1)
2750 			goto out;
2751 
2752 		/* search_forward needs a path with locks held, do the
2753 		 * search again for the original key.  It is possible
2754 		 * this will race with a balance and return a path that
2755 		 * we could modify, but this drop is just an optimization
2756 		 * and is allowed to miss some leaves.
2757 		 */
2758 		btrfs_release_path(root, path);
2759 		found_key.offset++;
2760 
2761 		/* setup a max key for search_forward */
2762 		other_key.offset = (u64)-1;
2763 		other_key.type = key.type;
2764 		other_key.objectid = key.objectid;
2765 
2766 		path->keep_locks = 1;
2767 		ret = btrfs_search_forward(root, &found_key, &other_key,
2768 					   path, 0, 0);
2769 		path->keep_locks = 0;
2770 		if (ret || found_key.objectid != key.objectid ||
2771 		    found_key.type != key.type) {
2772 			ret = 0;
2773 			goto out;
2774 		}
2775 
2776 		key.offset = found_key.offset;
2777 		btrfs_release_path(root, path);
2778 		cond_resched();
2779 		goto again;
2780 	}
2781 
2782 	/* we know there's one more slot after us in the tree,
2783 	 * read that key so we can verify it is also a checksum item
2784 	 */
2785 	btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2786 
2787 	if (found_key.objectid < inode->i_ino)
2788 		goto next_key;
2789 
2790 	if (found_key.type != key.type || found_key.offset < new_size)
2791 		goto next_key;
2792 
2793 	/*
2794 	 * if the key for the next leaf isn't a csum key from this objectid,
2795 	 * we can't be sure there aren't good items inside this leaf.
2796 	 * Bail out
2797 	 */
2798 	if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2799 		goto out;
2800 
2801 	leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2802 	leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2803 	/*
2804 	 * it is safe to delete this leaf, it contains only
2805 	 * csum items from this inode at an offset >= new_size
2806 	 */
2807 	ret = btrfs_del_leaf(trans, root, path, leaf_start);
2808 	BUG_ON(ret);
2809 
2810 	if (root->ref_cows && leaf_gen < trans->transid) {
2811 		ref = btrfs_alloc_leaf_ref(root, 0);
2812 		if (ref) {
2813 			ref->root_gen = root->root_key.offset;
2814 			ref->bytenr = leaf_start;
2815 			ref->owner = 0;
2816 			ref->generation = leaf_gen;
2817 			ref->nritems = 0;
2818 
2819 			btrfs_sort_leaf_ref(ref);
2820 
2821 			ret = btrfs_add_leaf_ref(root, ref, 0);
2822 			WARN_ON(ret);
2823 			btrfs_free_leaf_ref(root, ref);
2824 		} else {
2825 			WARN_ON(1);
2826 		}
2827 	}
2828 next_key:
2829 	btrfs_release_path(root, path);
2830 
2831 	if (other_key.objectid == inode->i_ino &&
2832 	    other_key.type == key.type && other_key.offset > key.offset) {
2833 		key.offset = other_key.offset;
2834 		cond_resched();
2835 		goto again;
2836 	}
2837 	ret = 0;
2838 out:
2839 	/* fixup any changes we've made to the path */
2840 	path->lowest_level = 0;
2841 	path->keep_locks = 0;
2842 	btrfs_release_path(root, path);
2843 	return ret;
2844 }
2845 
2846 #endif
2847 
2848 /*
2849  * this can truncate away extent items, csum items and directory items.
2850  * It starts at a high offset and removes keys until it can't find
2851  * any higher than new_size
2852  *
2853  * csum items that cross the new i_size are truncated to the new size
2854  * as well.
2855  *
2856  * min_type is the minimum key type to truncate down to.  If set to 0, this
2857  * will kill all the items on this inode, including the INODE_ITEM_KEY.
2858  */
2859 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2860 			       struct btrfs_root *root,
2861 			       struct inode *inode,
2862 			       u64 new_size, u32 min_type)
2863 {
2864 	struct btrfs_path *path;
2865 	struct extent_buffer *leaf;
2866 	struct btrfs_file_extent_item *fi;
2867 	struct btrfs_key key;
2868 	struct btrfs_key found_key;
2869 	u64 extent_start = 0;
2870 	u64 extent_num_bytes = 0;
2871 	u64 extent_offset = 0;
2872 	u64 item_end = 0;
2873 	u64 mask = root->sectorsize - 1;
2874 	u32 found_type = (u8)-1;
2875 	int found_extent;
2876 	int del_item;
2877 	int pending_del_nr = 0;
2878 	int pending_del_slot = 0;
2879 	int extent_type = -1;
2880 	int encoding;
2881 	int ret;
2882 	int err = 0;
2883 
2884 	BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
2885 
2886 	if (root->ref_cows)
2887 		btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2888 
2889 	path = btrfs_alloc_path();
2890 	BUG_ON(!path);
2891 	path->reada = -1;
2892 
2893 	key.objectid = inode->i_ino;
2894 	key.offset = (u64)-1;
2895 	key.type = (u8)-1;
2896 
2897 search_again:
2898 	path->leave_spinning = 1;
2899 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2900 	if (ret < 0) {
2901 		err = ret;
2902 		goto out;
2903 	}
2904 
2905 	if (ret > 0) {
2906 		/* there are no items in the tree for us to truncate, we're
2907 		 * done
2908 		 */
2909 		if (path->slots[0] == 0)
2910 			goto out;
2911 		path->slots[0]--;
2912 	}
2913 
2914 	while (1) {
2915 		fi = NULL;
2916 		leaf = path->nodes[0];
2917 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2918 		found_type = btrfs_key_type(&found_key);
2919 		encoding = 0;
2920 
2921 		if (found_key.objectid != inode->i_ino)
2922 			break;
2923 
2924 		if (found_type < min_type)
2925 			break;
2926 
2927 		item_end = found_key.offset;
2928 		if (found_type == BTRFS_EXTENT_DATA_KEY) {
2929 			fi = btrfs_item_ptr(leaf, path->slots[0],
2930 					    struct btrfs_file_extent_item);
2931 			extent_type = btrfs_file_extent_type(leaf, fi);
2932 			encoding = btrfs_file_extent_compression(leaf, fi);
2933 			encoding |= btrfs_file_extent_encryption(leaf, fi);
2934 			encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2935 
2936 			if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2937 				item_end +=
2938 				    btrfs_file_extent_num_bytes(leaf, fi);
2939 			} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2940 				item_end += btrfs_file_extent_inline_len(leaf,
2941 									 fi);
2942 			}
2943 			item_end--;
2944 		}
2945 		if (found_type > min_type) {
2946 			del_item = 1;
2947 		} else {
2948 			if (item_end < new_size)
2949 				break;
2950 			if (found_key.offset >= new_size)
2951 				del_item = 1;
2952 			else
2953 				del_item = 0;
2954 		}
2955 		found_extent = 0;
2956 		/* FIXME, shrink the extent if the ref count is only 1 */
2957 		if (found_type != BTRFS_EXTENT_DATA_KEY)
2958 			goto delete;
2959 
2960 		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2961 			u64 num_dec;
2962 			extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2963 			if (!del_item && !encoding) {
2964 				u64 orig_num_bytes =
2965 					btrfs_file_extent_num_bytes(leaf, fi);
2966 				extent_num_bytes = new_size -
2967 					found_key.offset + root->sectorsize - 1;
2968 				extent_num_bytes = extent_num_bytes &
2969 					~((u64)root->sectorsize - 1);
2970 				btrfs_set_file_extent_num_bytes(leaf, fi,
2971 							 extent_num_bytes);
2972 				num_dec = (orig_num_bytes -
2973 					   extent_num_bytes);
2974 				if (root->ref_cows && extent_start != 0)
2975 					inode_sub_bytes(inode, num_dec);
2976 				btrfs_mark_buffer_dirty(leaf);
2977 			} else {
2978 				extent_num_bytes =
2979 					btrfs_file_extent_disk_num_bytes(leaf,
2980 									 fi);
2981 				extent_offset = found_key.offset -
2982 					btrfs_file_extent_offset(leaf, fi);
2983 
2984 				/* FIXME blocksize != 4096 */
2985 				num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2986 				if (extent_start != 0) {
2987 					found_extent = 1;
2988 					if (root->ref_cows)
2989 						inode_sub_bytes(inode, num_dec);
2990 				}
2991 			}
2992 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2993 			/*
2994 			 * we can't truncate inline items that have had
2995 			 * special encodings
2996 			 */
2997 			if (!del_item &&
2998 			    btrfs_file_extent_compression(leaf, fi) == 0 &&
2999 			    btrfs_file_extent_encryption(leaf, fi) == 0 &&
3000 			    btrfs_file_extent_other_encoding(leaf, fi) == 0) {
3001 				u32 size = new_size - found_key.offset;
3002 
3003 				if (root->ref_cows) {
3004 					inode_sub_bytes(inode, item_end + 1 -
3005 							new_size);
3006 				}
3007 				size =
3008 				    btrfs_file_extent_calc_inline_size(size);
3009 				ret = btrfs_truncate_item(trans, root, path,
3010 							  size, 1);
3011 				BUG_ON(ret);
3012 			} else if (root->ref_cows) {
3013 				inode_sub_bytes(inode, item_end + 1 -
3014 						found_key.offset);
3015 			}
3016 		}
3017 delete:
3018 		if (del_item) {
3019 			if (!pending_del_nr) {
3020 				/* no pending yet, add ourselves */
3021 				pending_del_slot = path->slots[0];
3022 				pending_del_nr = 1;
3023 			} else if (pending_del_nr &&
3024 				   path->slots[0] + 1 == pending_del_slot) {
3025 				/* hop on the pending chunk */
3026 				pending_del_nr++;
3027 				pending_del_slot = path->slots[0];
3028 			} else {
3029 				BUG();
3030 			}
3031 		} else {
3032 			break;
3033 		}
3034 		if (found_extent && root->ref_cows) {
3035 			btrfs_set_path_blocking(path);
3036 			ret = btrfs_free_extent(trans, root, extent_start,
3037 						extent_num_bytes, 0,
3038 						btrfs_header_owner(leaf),
3039 						inode->i_ino, extent_offset);
3040 			BUG_ON(ret);
3041 		}
3042 
3043 		if (found_type == BTRFS_INODE_ITEM_KEY)
3044 			break;
3045 
3046 		if (path->slots[0] == 0 ||
3047 		    path->slots[0] != pending_del_slot) {
3048 			if (root->ref_cows) {
3049 				err = -EAGAIN;
3050 				goto out;
3051 			}
3052 			if (pending_del_nr) {
3053 				ret = btrfs_del_items(trans, root, path,
3054 						pending_del_slot,
3055 						pending_del_nr);
3056 				BUG_ON(ret);
3057 				pending_del_nr = 0;
3058 			}
3059 			btrfs_release_path(root, path);
3060 			goto search_again;
3061 		} else {
3062 			path->slots[0]--;
3063 		}
3064 	}
3065 out:
3066 	if (pending_del_nr) {
3067 		ret = btrfs_del_items(trans, root, path, pending_del_slot,
3068 				      pending_del_nr);
3069 	}
3070 	btrfs_free_path(path);
3071 	return err;
3072 }
3073 
3074 /*
3075  * taken from block_truncate_page, but does cow as it zeros out
3076  * any bytes left in the last page in the file.
3077  */
3078 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3079 {
3080 	struct inode *inode = mapping->host;
3081 	struct btrfs_root *root = BTRFS_I(inode)->root;
3082 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3083 	struct btrfs_ordered_extent *ordered;
3084 	char *kaddr;
3085 	u32 blocksize = root->sectorsize;
3086 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
3087 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
3088 	struct page *page;
3089 	int ret = 0;
3090 	u64 page_start;
3091 	u64 page_end;
3092 
3093 	if ((offset & (blocksize - 1)) == 0)
3094 		goto out;
3095 	ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
3096 	if (ret)
3097 		goto out;
3098 
3099 	ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
3100 	if (ret)
3101 		goto out;
3102 
3103 	ret = -ENOMEM;
3104 again:
3105 	page = grab_cache_page(mapping, index);
3106 	if (!page) {
3107 		btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
3108 		btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
3109 		goto out;
3110 	}
3111 
3112 	page_start = page_offset(page);
3113 	page_end = page_start + PAGE_CACHE_SIZE - 1;
3114 
3115 	if (!PageUptodate(page)) {
3116 		ret = btrfs_readpage(NULL, page);
3117 		lock_page(page);
3118 		if (page->mapping != mapping) {
3119 			unlock_page(page);
3120 			page_cache_release(page);
3121 			goto again;
3122 		}
3123 		if (!PageUptodate(page)) {
3124 			ret = -EIO;
3125 			goto out_unlock;
3126 		}
3127 	}
3128 	wait_on_page_writeback(page);
3129 
3130 	lock_extent(io_tree, page_start, page_end, GFP_NOFS);
3131 	set_page_extent_mapped(page);
3132 
3133 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
3134 	if (ordered) {
3135 		unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3136 		unlock_page(page);
3137 		page_cache_release(page);
3138 		btrfs_start_ordered_extent(inode, ordered, 1);
3139 		btrfs_put_ordered_extent(ordered);
3140 		goto again;
3141 	}
3142 
3143 	clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
3144 			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
3145 			  GFP_NOFS);
3146 
3147 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
3148 	if (ret) {
3149 		unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3150 		goto out_unlock;
3151 	}
3152 
3153 	ret = 0;
3154 	if (offset != PAGE_CACHE_SIZE) {
3155 		kaddr = kmap(page);
3156 		memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
3157 		flush_dcache_page(page);
3158 		kunmap(page);
3159 	}
3160 	ClearPageChecked(page);
3161 	set_page_dirty(page);
3162 	unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3163 
3164 out_unlock:
3165 	if (ret)
3166 		btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
3167 	btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
3168 	unlock_page(page);
3169 	page_cache_release(page);
3170 out:
3171 	return ret;
3172 }
3173 
3174 int btrfs_cont_expand(struct inode *inode, loff_t size)
3175 {
3176 	struct btrfs_trans_handle *trans;
3177 	struct btrfs_root *root = BTRFS_I(inode)->root;
3178 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3179 	struct extent_map *em;
3180 	u64 mask = root->sectorsize - 1;
3181 	u64 hole_start = (inode->i_size + mask) & ~mask;
3182 	u64 block_end = (size + mask) & ~mask;
3183 	u64 last_byte;
3184 	u64 cur_offset;
3185 	u64 hole_size;
3186 	int err = 0;
3187 
3188 	if (size <= hole_start)
3189 		return 0;
3190 
3191 	while (1) {
3192 		struct btrfs_ordered_extent *ordered;
3193 		btrfs_wait_ordered_range(inode, hole_start,
3194 					 block_end - hole_start);
3195 		lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3196 		ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3197 		if (!ordered)
3198 			break;
3199 		unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3200 		btrfs_put_ordered_extent(ordered);
3201 	}
3202 
3203 	cur_offset = hole_start;
3204 	while (1) {
3205 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
3206 				block_end - cur_offset, 0);
3207 		BUG_ON(IS_ERR(em) || !em);
3208 		last_byte = min(extent_map_end(em), block_end);
3209 		last_byte = (last_byte + mask) & ~mask;
3210 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3211 			u64 hint_byte = 0;
3212 			hole_size = last_byte - cur_offset;
3213 
3214 			err = btrfs_reserve_metadata_space(root, 2);
3215 			if (err)
3216 				break;
3217 
3218 			trans = btrfs_start_transaction(root, 1);
3219 			btrfs_set_trans_block_group(trans, inode);
3220 
3221 			err = btrfs_drop_extents(trans, inode, cur_offset,
3222 						 cur_offset + hole_size,
3223 						 &hint_byte, 1);
3224 			BUG_ON(err);
3225 
3226 			err = btrfs_insert_file_extent(trans, root,
3227 					inode->i_ino, cur_offset, 0,
3228 					0, hole_size, 0, hole_size,
3229 					0, 0, 0);
3230 			BUG_ON(err);
3231 
3232 			btrfs_drop_extent_cache(inode, hole_start,
3233 					last_byte - 1, 0);
3234 
3235 			btrfs_end_transaction(trans, root);
3236 			btrfs_unreserve_metadata_space(root, 2);
3237 		}
3238 		free_extent_map(em);
3239 		cur_offset = last_byte;
3240 		if (cur_offset >= block_end)
3241 			break;
3242 	}
3243 
3244 	unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3245 	return err;
3246 }
3247 
3248 static int btrfs_setattr_size(struct inode *inode, struct iattr *attr)
3249 {
3250 	struct btrfs_root *root = BTRFS_I(inode)->root;
3251 	struct btrfs_trans_handle *trans;
3252 	unsigned long nr;
3253 	int ret;
3254 
3255 	if (attr->ia_size == inode->i_size)
3256 		return 0;
3257 
3258 	if (attr->ia_size > inode->i_size) {
3259 		unsigned long limit;
3260 		limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
3261 		if (attr->ia_size > inode->i_sb->s_maxbytes)
3262 			return -EFBIG;
3263 		if (limit != RLIM_INFINITY && attr->ia_size > limit) {
3264 			send_sig(SIGXFSZ, current, 0);
3265 			return -EFBIG;
3266 		}
3267 	}
3268 
3269 	ret = btrfs_reserve_metadata_space(root, 1);
3270 	if (ret)
3271 		return ret;
3272 
3273 	trans = btrfs_start_transaction(root, 1);
3274 	btrfs_set_trans_block_group(trans, inode);
3275 
3276 	ret = btrfs_orphan_add(trans, inode);
3277 	BUG_ON(ret);
3278 
3279 	nr = trans->blocks_used;
3280 	btrfs_end_transaction(trans, root);
3281 	btrfs_unreserve_metadata_space(root, 1);
3282 	btrfs_btree_balance_dirty(root, nr);
3283 
3284 	if (attr->ia_size > inode->i_size) {
3285 		ret = btrfs_cont_expand(inode, attr->ia_size);
3286 		if (ret) {
3287 			btrfs_truncate(inode);
3288 			return ret;
3289 		}
3290 
3291 		i_size_write(inode, attr->ia_size);
3292 		btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
3293 
3294 		trans = btrfs_start_transaction(root, 1);
3295 		btrfs_set_trans_block_group(trans, inode);
3296 
3297 		ret = btrfs_update_inode(trans, root, inode);
3298 		BUG_ON(ret);
3299 		if (inode->i_nlink > 0) {
3300 			ret = btrfs_orphan_del(trans, inode);
3301 			BUG_ON(ret);
3302 		}
3303 		nr = trans->blocks_used;
3304 		btrfs_end_transaction(trans, root);
3305 		btrfs_btree_balance_dirty(root, nr);
3306 		return 0;
3307 	}
3308 
3309 	/*
3310 	 * We're truncating a file that used to have good data down to
3311 	 * zero. Make sure it gets into the ordered flush list so that
3312 	 * any new writes get down to disk quickly.
3313 	 */
3314 	if (attr->ia_size == 0)
3315 		BTRFS_I(inode)->ordered_data_close = 1;
3316 
3317 	/* we don't support swapfiles, so vmtruncate shouldn't fail */
3318 	ret = vmtruncate(inode, attr->ia_size);
3319 	BUG_ON(ret);
3320 
3321 	return 0;
3322 }
3323 
3324 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3325 {
3326 	struct inode *inode = dentry->d_inode;
3327 	int err;
3328 
3329 	err = inode_change_ok(inode, attr);
3330 	if (err)
3331 		return err;
3332 
3333 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3334 		err = btrfs_setattr_size(inode, attr);
3335 		if (err)
3336 			return err;
3337 	}
3338 	attr->ia_valid &= ~ATTR_SIZE;
3339 
3340 	if (attr->ia_valid)
3341 		err = inode_setattr(inode, attr);
3342 
3343 	if (!err && ((attr->ia_valid & ATTR_MODE)))
3344 		err = btrfs_acl_chmod(inode);
3345 	return err;
3346 }
3347 
3348 void btrfs_delete_inode(struct inode *inode)
3349 {
3350 	struct btrfs_trans_handle *trans;
3351 	struct btrfs_root *root = BTRFS_I(inode)->root;
3352 	unsigned long nr;
3353 	int ret;
3354 
3355 	truncate_inode_pages(&inode->i_data, 0);
3356 	if (is_bad_inode(inode)) {
3357 		btrfs_orphan_del(NULL, inode);
3358 		goto no_delete;
3359 	}
3360 	btrfs_wait_ordered_range(inode, 0, (u64)-1);
3361 
3362 	if (root->fs_info->log_root_recovering) {
3363 		BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
3364 		goto no_delete;
3365 	}
3366 
3367 	if (inode->i_nlink > 0) {
3368 		BUG_ON(btrfs_root_refs(&root->root_item) != 0);
3369 		goto no_delete;
3370 	}
3371 
3372 	btrfs_i_size_write(inode, 0);
3373 
3374 	while (1) {
3375 		trans = btrfs_start_transaction(root, 1);
3376 		btrfs_set_trans_block_group(trans, inode);
3377 		ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
3378 
3379 		if (ret != -EAGAIN)
3380 			break;
3381 
3382 		nr = trans->blocks_used;
3383 		btrfs_end_transaction(trans, root);
3384 		trans = NULL;
3385 		btrfs_btree_balance_dirty(root, nr);
3386 	}
3387 
3388 	if (ret == 0) {
3389 		ret = btrfs_orphan_del(trans, inode);
3390 		BUG_ON(ret);
3391 	}
3392 
3393 	nr = trans->blocks_used;
3394 	btrfs_end_transaction(trans, root);
3395 	btrfs_btree_balance_dirty(root, nr);
3396 no_delete:
3397 	clear_inode(inode);
3398 	return;
3399 }
3400 
3401 /*
3402  * this returns the key found in the dir entry in the location pointer.
3403  * If no dir entries were found, location->objectid is 0.
3404  */
3405 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3406 			       struct btrfs_key *location)
3407 {
3408 	const char *name = dentry->d_name.name;
3409 	int namelen = dentry->d_name.len;
3410 	struct btrfs_dir_item *di;
3411 	struct btrfs_path *path;
3412 	struct btrfs_root *root = BTRFS_I(dir)->root;
3413 	int ret = 0;
3414 
3415 	path = btrfs_alloc_path();
3416 	BUG_ON(!path);
3417 
3418 	di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
3419 				    namelen, 0);
3420 	if (IS_ERR(di))
3421 		ret = PTR_ERR(di);
3422 
3423 	if (!di || IS_ERR(di))
3424 		goto out_err;
3425 
3426 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3427 out:
3428 	btrfs_free_path(path);
3429 	return ret;
3430 out_err:
3431 	location->objectid = 0;
3432 	goto out;
3433 }
3434 
3435 /*
3436  * when we hit a tree root in a directory, the btrfs part of the inode
3437  * needs to be changed to reflect the root directory of the tree root.  This
3438  * is kind of like crossing a mount point.
3439  */
3440 static int fixup_tree_root_location(struct btrfs_root *root,
3441 				    struct inode *dir,
3442 				    struct dentry *dentry,
3443 				    struct btrfs_key *location,
3444 				    struct btrfs_root **sub_root)
3445 {
3446 	struct btrfs_path *path;
3447 	struct btrfs_root *new_root;
3448 	struct btrfs_root_ref *ref;
3449 	struct extent_buffer *leaf;
3450 	int ret;
3451 	int err = 0;
3452 
3453 	path = btrfs_alloc_path();
3454 	if (!path) {
3455 		err = -ENOMEM;
3456 		goto out;
3457 	}
3458 
3459 	err = -ENOENT;
3460 	ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
3461 				  BTRFS_I(dir)->root->root_key.objectid,
3462 				  location->objectid);
3463 	if (ret) {
3464 		if (ret < 0)
3465 			err = ret;
3466 		goto out;
3467 	}
3468 
3469 	leaf = path->nodes[0];
3470 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
3471 	if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino ||
3472 	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
3473 		goto out;
3474 
3475 	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
3476 				   (unsigned long)(ref + 1),
3477 				   dentry->d_name.len);
3478 	if (ret)
3479 		goto out;
3480 
3481 	btrfs_release_path(root->fs_info->tree_root, path);
3482 
3483 	new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
3484 	if (IS_ERR(new_root)) {
3485 		err = PTR_ERR(new_root);
3486 		goto out;
3487 	}
3488 
3489 	if (btrfs_root_refs(&new_root->root_item) == 0) {
3490 		err = -ENOENT;
3491 		goto out;
3492 	}
3493 
3494 	*sub_root = new_root;
3495 	location->objectid = btrfs_root_dirid(&new_root->root_item);
3496 	location->type = BTRFS_INODE_ITEM_KEY;
3497 	location->offset = 0;
3498 	err = 0;
3499 out:
3500 	btrfs_free_path(path);
3501 	return err;
3502 }
3503 
3504 static void inode_tree_add(struct inode *inode)
3505 {
3506 	struct btrfs_root *root = BTRFS_I(inode)->root;
3507 	struct btrfs_inode *entry;
3508 	struct rb_node **p;
3509 	struct rb_node *parent;
3510 again:
3511 	p = &root->inode_tree.rb_node;
3512 	parent = NULL;
3513 
3514 	if (hlist_unhashed(&inode->i_hash))
3515 		return;
3516 
3517 	spin_lock(&root->inode_lock);
3518 	while (*p) {
3519 		parent = *p;
3520 		entry = rb_entry(parent, struct btrfs_inode, rb_node);
3521 
3522 		if (inode->i_ino < entry->vfs_inode.i_ino)
3523 			p = &parent->rb_left;
3524 		else if (inode->i_ino > entry->vfs_inode.i_ino)
3525 			p = &parent->rb_right;
3526 		else {
3527 			WARN_ON(!(entry->vfs_inode.i_state &
3528 				  (I_WILL_FREE | I_FREEING | I_CLEAR)));
3529 			rb_erase(parent, &root->inode_tree);
3530 			RB_CLEAR_NODE(parent);
3531 			spin_unlock(&root->inode_lock);
3532 			goto again;
3533 		}
3534 	}
3535 	rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
3536 	rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3537 	spin_unlock(&root->inode_lock);
3538 }
3539 
3540 static void inode_tree_del(struct inode *inode)
3541 {
3542 	struct btrfs_root *root = BTRFS_I(inode)->root;
3543 	int empty = 0;
3544 
3545 	spin_lock(&root->inode_lock);
3546 	if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
3547 		rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3548 		RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3549 		empty = RB_EMPTY_ROOT(&root->inode_tree);
3550 	}
3551 	spin_unlock(&root->inode_lock);
3552 
3553 	if (empty && btrfs_root_refs(&root->root_item) == 0) {
3554 		synchronize_srcu(&root->fs_info->subvol_srcu);
3555 		spin_lock(&root->inode_lock);
3556 		empty = RB_EMPTY_ROOT(&root->inode_tree);
3557 		spin_unlock(&root->inode_lock);
3558 		if (empty)
3559 			btrfs_add_dead_root(root);
3560 	}
3561 }
3562 
3563 int btrfs_invalidate_inodes(struct btrfs_root *root)
3564 {
3565 	struct rb_node *node;
3566 	struct rb_node *prev;
3567 	struct btrfs_inode *entry;
3568 	struct inode *inode;
3569 	u64 objectid = 0;
3570 
3571 	WARN_ON(btrfs_root_refs(&root->root_item) != 0);
3572 
3573 	spin_lock(&root->inode_lock);
3574 again:
3575 	node = root->inode_tree.rb_node;
3576 	prev = NULL;
3577 	while (node) {
3578 		prev = node;
3579 		entry = rb_entry(node, struct btrfs_inode, rb_node);
3580 
3581 		if (objectid < entry->vfs_inode.i_ino)
3582 			node = node->rb_left;
3583 		else if (objectid > entry->vfs_inode.i_ino)
3584 			node = node->rb_right;
3585 		else
3586 			break;
3587 	}
3588 	if (!node) {
3589 		while (prev) {
3590 			entry = rb_entry(prev, struct btrfs_inode, rb_node);
3591 			if (objectid <= entry->vfs_inode.i_ino) {
3592 				node = prev;
3593 				break;
3594 			}
3595 			prev = rb_next(prev);
3596 		}
3597 	}
3598 	while (node) {
3599 		entry = rb_entry(node, struct btrfs_inode, rb_node);
3600 		objectid = entry->vfs_inode.i_ino + 1;
3601 		inode = igrab(&entry->vfs_inode);
3602 		if (inode) {
3603 			spin_unlock(&root->inode_lock);
3604 			if (atomic_read(&inode->i_count) > 1)
3605 				d_prune_aliases(inode);
3606 			/*
3607 			 * btrfs_drop_inode will remove it from
3608 			 * the inode cache when its usage count
3609 			 * hits zero.
3610 			 */
3611 			iput(inode);
3612 			cond_resched();
3613 			spin_lock(&root->inode_lock);
3614 			goto again;
3615 		}
3616 
3617 		if (cond_resched_lock(&root->inode_lock))
3618 			goto again;
3619 
3620 		node = rb_next(node);
3621 	}
3622 	spin_unlock(&root->inode_lock);
3623 	return 0;
3624 }
3625 
3626 static noinline void init_btrfs_i(struct inode *inode)
3627 {
3628 	struct btrfs_inode *bi = BTRFS_I(inode);
3629 
3630 	bi->generation = 0;
3631 	bi->sequence = 0;
3632 	bi->last_trans = 0;
3633 	bi->last_sub_trans = 0;
3634 	bi->logged_trans = 0;
3635 	bi->delalloc_bytes = 0;
3636 	bi->reserved_bytes = 0;
3637 	bi->disk_i_size = 0;
3638 	bi->flags = 0;
3639 	bi->index_cnt = (u64)-1;
3640 	bi->last_unlink_trans = 0;
3641 	bi->ordered_data_close = 0;
3642 	extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3643 	extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3644 			     inode->i_mapping, GFP_NOFS);
3645 	extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3646 			     inode->i_mapping, GFP_NOFS);
3647 	INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
3648 	INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
3649 	RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3650 	btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3651 	mutex_init(&BTRFS_I(inode)->log_mutex);
3652 }
3653 
3654 static int btrfs_init_locked_inode(struct inode *inode, void *p)
3655 {
3656 	struct btrfs_iget_args *args = p;
3657 	inode->i_ino = args->ino;
3658 	init_btrfs_i(inode);
3659 	BTRFS_I(inode)->root = args->root;
3660 	btrfs_set_inode_space_info(args->root, inode);
3661 	return 0;
3662 }
3663 
3664 static int btrfs_find_actor(struct inode *inode, void *opaque)
3665 {
3666 	struct btrfs_iget_args *args = opaque;
3667 	return args->ino == inode->i_ino &&
3668 		args->root == BTRFS_I(inode)->root;
3669 }
3670 
3671 static struct inode *btrfs_iget_locked(struct super_block *s,
3672 				       u64 objectid,
3673 				       struct btrfs_root *root)
3674 {
3675 	struct inode *inode;
3676 	struct btrfs_iget_args args;
3677 	args.ino = objectid;
3678 	args.root = root;
3679 
3680 	inode = iget5_locked(s, objectid, btrfs_find_actor,
3681 			     btrfs_init_locked_inode,
3682 			     (void *)&args);
3683 	return inode;
3684 }
3685 
3686 /* Get an inode object given its location and corresponding root.
3687  * Returns in *is_new if the inode was read from disk
3688  */
3689 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3690 			 struct btrfs_root *root)
3691 {
3692 	struct inode *inode;
3693 
3694 	inode = btrfs_iget_locked(s, location->objectid, root);
3695 	if (!inode)
3696 		return ERR_PTR(-ENOMEM);
3697 
3698 	if (inode->i_state & I_NEW) {
3699 		BTRFS_I(inode)->root = root;
3700 		memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3701 		btrfs_read_locked_inode(inode);
3702 
3703 		inode_tree_add(inode);
3704 		unlock_new_inode(inode);
3705 	}
3706 
3707 	return inode;
3708 }
3709 
3710 static struct inode *new_simple_dir(struct super_block *s,
3711 				    struct btrfs_key *key,
3712 				    struct btrfs_root *root)
3713 {
3714 	struct inode *inode = new_inode(s);
3715 
3716 	if (!inode)
3717 		return ERR_PTR(-ENOMEM);
3718 
3719 	init_btrfs_i(inode);
3720 
3721 	BTRFS_I(inode)->root = root;
3722 	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
3723 	BTRFS_I(inode)->dummy_inode = 1;
3724 
3725 	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
3726 	inode->i_op = &simple_dir_inode_operations;
3727 	inode->i_fop = &simple_dir_operations;
3728 	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
3729 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3730 
3731 	return inode;
3732 }
3733 
3734 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3735 {
3736 	struct inode *inode;
3737 	struct btrfs_root *root = BTRFS_I(dir)->root;
3738 	struct btrfs_root *sub_root = root;
3739 	struct btrfs_key location;
3740 	int index;
3741 	int ret;
3742 
3743 	dentry->d_op = &btrfs_dentry_operations;
3744 
3745 	if (dentry->d_name.len > BTRFS_NAME_LEN)
3746 		return ERR_PTR(-ENAMETOOLONG);
3747 
3748 	ret = btrfs_inode_by_name(dir, dentry, &location);
3749 
3750 	if (ret < 0)
3751 		return ERR_PTR(ret);
3752 
3753 	if (location.objectid == 0)
3754 		return NULL;
3755 
3756 	if (location.type == BTRFS_INODE_ITEM_KEY) {
3757 		inode = btrfs_iget(dir->i_sb, &location, root);
3758 		return inode;
3759 	}
3760 
3761 	BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
3762 
3763 	index = srcu_read_lock(&root->fs_info->subvol_srcu);
3764 	ret = fixup_tree_root_location(root, dir, dentry,
3765 				       &location, &sub_root);
3766 	if (ret < 0) {
3767 		if (ret != -ENOENT)
3768 			inode = ERR_PTR(ret);
3769 		else
3770 			inode = new_simple_dir(dir->i_sb, &location, sub_root);
3771 	} else {
3772 		inode = btrfs_iget(dir->i_sb, &location, sub_root);
3773 	}
3774 	srcu_read_unlock(&root->fs_info->subvol_srcu, index);
3775 
3776 	if (root != sub_root) {
3777 		down_read(&root->fs_info->cleanup_work_sem);
3778 		if (!(inode->i_sb->s_flags & MS_RDONLY))
3779 			btrfs_orphan_cleanup(sub_root);
3780 		up_read(&root->fs_info->cleanup_work_sem);
3781 	}
3782 
3783 	return inode;
3784 }
3785 
3786 static int btrfs_dentry_delete(struct dentry *dentry)
3787 {
3788 	struct btrfs_root *root;
3789 
3790 	if (!dentry->d_inode && !IS_ROOT(dentry))
3791 		dentry = dentry->d_parent;
3792 
3793 	if (dentry->d_inode) {
3794 		root = BTRFS_I(dentry->d_inode)->root;
3795 		if (btrfs_root_refs(&root->root_item) == 0)
3796 			return 1;
3797 	}
3798 	return 0;
3799 }
3800 
3801 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
3802 				   struct nameidata *nd)
3803 {
3804 	struct inode *inode;
3805 
3806 	inode = btrfs_lookup_dentry(dir, dentry);
3807 	if (IS_ERR(inode))
3808 		return ERR_CAST(inode);
3809 
3810 	return d_splice_alias(inode, dentry);
3811 }
3812 
3813 static unsigned char btrfs_filetype_table[] = {
3814 	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
3815 };
3816 
3817 static int btrfs_real_readdir(struct file *filp, void *dirent,
3818 			      filldir_t filldir)
3819 {
3820 	struct inode *inode = filp->f_dentry->d_inode;
3821 	struct btrfs_root *root = BTRFS_I(inode)->root;
3822 	struct btrfs_item *item;
3823 	struct btrfs_dir_item *di;
3824 	struct btrfs_key key;
3825 	struct btrfs_key found_key;
3826 	struct btrfs_path *path;
3827 	int ret;
3828 	u32 nritems;
3829 	struct extent_buffer *leaf;
3830 	int slot;
3831 	int advance;
3832 	unsigned char d_type;
3833 	int over = 0;
3834 	u32 di_cur;
3835 	u32 di_total;
3836 	u32 di_len;
3837 	int key_type = BTRFS_DIR_INDEX_KEY;
3838 	char tmp_name[32];
3839 	char *name_ptr;
3840 	int name_len;
3841 
3842 	/* FIXME, use a real flag for deciding about the key type */
3843 	if (root->fs_info->tree_root == root)
3844 		key_type = BTRFS_DIR_ITEM_KEY;
3845 
3846 	/* special case for "." */
3847 	if (filp->f_pos == 0) {
3848 		over = filldir(dirent, ".", 1,
3849 			       1, inode->i_ino,
3850 			       DT_DIR);
3851 		if (over)
3852 			return 0;
3853 		filp->f_pos = 1;
3854 	}
3855 	/* special case for .., just use the back ref */
3856 	if (filp->f_pos == 1) {
3857 		u64 pino = parent_ino(filp->f_path.dentry);
3858 		over = filldir(dirent, "..", 2,
3859 			       2, pino, DT_DIR);
3860 		if (over)
3861 			return 0;
3862 		filp->f_pos = 2;
3863 	}
3864 	path = btrfs_alloc_path();
3865 	path->reada = 2;
3866 
3867 	btrfs_set_key_type(&key, key_type);
3868 	key.offset = filp->f_pos;
3869 	key.objectid = inode->i_ino;
3870 
3871 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3872 	if (ret < 0)
3873 		goto err;
3874 	advance = 0;
3875 
3876 	while (1) {
3877 		leaf = path->nodes[0];
3878 		nritems = btrfs_header_nritems(leaf);
3879 		slot = path->slots[0];
3880 		if (advance || slot >= nritems) {
3881 			if (slot >= nritems - 1) {
3882 				ret = btrfs_next_leaf(root, path);
3883 				if (ret)
3884 					break;
3885 				leaf = path->nodes[0];
3886 				nritems = btrfs_header_nritems(leaf);
3887 				slot = path->slots[0];
3888 			} else {
3889 				slot++;
3890 				path->slots[0]++;
3891 			}
3892 		}
3893 
3894 		advance = 1;
3895 		item = btrfs_item_nr(leaf, slot);
3896 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3897 
3898 		if (found_key.objectid != key.objectid)
3899 			break;
3900 		if (btrfs_key_type(&found_key) != key_type)
3901 			break;
3902 		if (found_key.offset < filp->f_pos)
3903 			continue;
3904 
3905 		filp->f_pos = found_key.offset;
3906 
3907 		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
3908 		di_cur = 0;
3909 		di_total = btrfs_item_size(leaf, item);
3910 
3911 		while (di_cur < di_total) {
3912 			struct btrfs_key location;
3913 
3914 			name_len = btrfs_dir_name_len(leaf, di);
3915 			if (name_len <= sizeof(tmp_name)) {
3916 				name_ptr = tmp_name;
3917 			} else {
3918 				name_ptr = kmalloc(name_len, GFP_NOFS);
3919 				if (!name_ptr) {
3920 					ret = -ENOMEM;
3921 					goto err;
3922 				}
3923 			}
3924 			read_extent_buffer(leaf, name_ptr,
3925 					   (unsigned long)(di + 1), name_len);
3926 
3927 			d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
3928 			btrfs_dir_item_key_to_cpu(leaf, di, &location);
3929 
3930 			/* is this a reference to our own snapshot? If so
3931 			 * skip it
3932 			 */
3933 			if (location.type == BTRFS_ROOT_ITEM_KEY &&
3934 			    location.objectid == root->root_key.objectid) {
3935 				over = 0;
3936 				goto skip;
3937 			}
3938 			over = filldir(dirent, name_ptr, name_len,
3939 				       found_key.offset, location.objectid,
3940 				       d_type);
3941 
3942 skip:
3943 			if (name_ptr != tmp_name)
3944 				kfree(name_ptr);
3945 
3946 			if (over)
3947 				goto nopos;
3948 			di_len = btrfs_dir_name_len(leaf, di) +
3949 				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
3950 			di_cur += di_len;
3951 			di = (struct btrfs_dir_item *)((char *)di + di_len);
3952 		}
3953 	}
3954 
3955 	/* Reached end of directory/root. Bump pos past the last item. */
3956 	if (key_type == BTRFS_DIR_INDEX_KEY)
3957 		/*
3958 		 * 32-bit glibc will use getdents64, but then strtol -
3959 		 * so the last number we can serve is this.
3960 		 */
3961 		filp->f_pos = 0x7fffffff;
3962 	else
3963 		filp->f_pos++;
3964 nopos:
3965 	ret = 0;
3966 err:
3967 	btrfs_free_path(path);
3968 	return ret;
3969 }
3970 
3971 int btrfs_write_inode(struct inode *inode, int wait)
3972 {
3973 	struct btrfs_root *root = BTRFS_I(inode)->root;
3974 	struct btrfs_trans_handle *trans;
3975 	int ret = 0;
3976 
3977 	if (root->fs_info->btree_inode == inode)
3978 		return 0;
3979 
3980 	if (wait) {
3981 		trans = btrfs_join_transaction(root, 1);
3982 		btrfs_set_trans_block_group(trans, inode);
3983 		ret = btrfs_commit_transaction(trans, root);
3984 	}
3985 	return ret;
3986 }
3987 
3988 /*
3989  * This is somewhat expensive, updating the tree every time the
3990  * inode changes.  But, it is most likely to find the inode in cache.
3991  * FIXME, needs more benchmarking...there are no reasons other than performance
3992  * to keep or drop this code.
3993  */
3994 void btrfs_dirty_inode(struct inode *inode)
3995 {
3996 	struct btrfs_root *root = BTRFS_I(inode)->root;
3997 	struct btrfs_trans_handle *trans;
3998 
3999 	trans = btrfs_join_transaction(root, 1);
4000 	btrfs_set_trans_block_group(trans, inode);
4001 	btrfs_update_inode(trans, root, inode);
4002 	btrfs_end_transaction(trans, root);
4003 }
4004 
4005 /*
4006  * find the highest existing sequence number in a directory
4007  * and then set the in-memory index_cnt variable to reflect
4008  * free sequence numbers
4009  */
4010 static int btrfs_set_inode_index_count(struct inode *inode)
4011 {
4012 	struct btrfs_root *root = BTRFS_I(inode)->root;
4013 	struct btrfs_key key, found_key;
4014 	struct btrfs_path *path;
4015 	struct extent_buffer *leaf;
4016 	int ret;
4017 
4018 	key.objectid = inode->i_ino;
4019 	btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
4020 	key.offset = (u64)-1;
4021 
4022 	path = btrfs_alloc_path();
4023 	if (!path)
4024 		return -ENOMEM;
4025 
4026 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4027 	if (ret < 0)
4028 		goto out;
4029 	/* FIXME: we should be able to handle this */
4030 	if (ret == 0)
4031 		goto out;
4032 	ret = 0;
4033 
4034 	/*
4035 	 * MAGIC NUMBER EXPLANATION:
4036 	 * since we search a directory based on f_pos we have to start at 2
4037 	 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
4038 	 * else has to start at 2
4039 	 */
4040 	if (path->slots[0] == 0) {
4041 		BTRFS_I(inode)->index_cnt = 2;
4042 		goto out;
4043 	}
4044 
4045 	path->slots[0]--;
4046 
4047 	leaf = path->nodes[0];
4048 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4049 
4050 	if (found_key.objectid != inode->i_ino ||
4051 	    btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
4052 		BTRFS_I(inode)->index_cnt = 2;
4053 		goto out;
4054 	}
4055 
4056 	BTRFS_I(inode)->index_cnt = found_key.offset + 1;
4057 out:
4058 	btrfs_free_path(path);
4059 	return ret;
4060 }
4061 
4062 /*
4063  * helper to find a free sequence number in a given directory.  This current
4064  * code is very simple, later versions will do smarter things in the btree
4065  */
4066 int btrfs_set_inode_index(struct inode *dir, u64 *index)
4067 {
4068 	int ret = 0;
4069 
4070 	if (BTRFS_I(dir)->index_cnt == (u64)-1) {
4071 		ret = btrfs_set_inode_index_count(dir);
4072 		if (ret)
4073 			return ret;
4074 	}
4075 
4076 	*index = BTRFS_I(dir)->index_cnt;
4077 	BTRFS_I(dir)->index_cnt++;
4078 
4079 	return ret;
4080 }
4081 
4082 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4083 				     struct btrfs_root *root,
4084 				     struct inode *dir,
4085 				     const char *name, int name_len,
4086 				     u64 ref_objectid, u64 objectid,
4087 				     u64 alloc_hint, int mode, u64 *index)
4088 {
4089 	struct inode *inode;
4090 	struct btrfs_inode_item *inode_item;
4091 	struct btrfs_key *location;
4092 	struct btrfs_path *path;
4093 	struct btrfs_inode_ref *ref;
4094 	struct btrfs_key key[2];
4095 	u32 sizes[2];
4096 	unsigned long ptr;
4097 	int ret;
4098 	int owner;
4099 
4100 	path = btrfs_alloc_path();
4101 	BUG_ON(!path);
4102 
4103 	inode = new_inode(root->fs_info->sb);
4104 	if (!inode)
4105 		return ERR_PTR(-ENOMEM);
4106 
4107 	if (dir) {
4108 		ret = btrfs_set_inode_index(dir, index);
4109 		if (ret) {
4110 			iput(inode);
4111 			return ERR_PTR(ret);
4112 		}
4113 	}
4114 	/*
4115 	 * index_cnt is ignored for everything but a dir,
4116 	 * btrfs_get_inode_index_count has an explanation for the magic
4117 	 * number
4118 	 */
4119 	init_btrfs_i(inode);
4120 	BTRFS_I(inode)->index_cnt = 2;
4121 	BTRFS_I(inode)->root = root;
4122 	BTRFS_I(inode)->generation = trans->transid;
4123 	btrfs_set_inode_space_info(root, inode);
4124 
4125 	if (mode & S_IFDIR)
4126 		owner = 0;
4127 	else
4128 		owner = 1;
4129 	BTRFS_I(inode)->block_group =
4130 			btrfs_find_block_group(root, 0, alloc_hint, owner);
4131 
4132 	key[0].objectid = objectid;
4133 	btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
4134 	key[0].offset = 0;
4135 
4136 	key[1].objectid = objectid;
4137 	btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
4138 	key[1].offset = ref_objectid;
4139 
4140 	sizes[0] = sizeof(struct btrfs_inode_item);
4141 	sizes[1] = name_len + sizeof(*ref);
4142 
4143 	path->leave_spinning = 1;
4144 	ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
4145 	if (ret != 0)
4146 		goto fail;
4147 
4148 	inode->i_uid = current_fsuid();
4149 
4150 	if (dir && (dir->i_mode & S_ISGID)) {
4151 		inode->i_gid = dir->i_gid;
4152 		if (S_ISDIR(mode))
4153 			mode |= S_ISGID;
4154 	} else
4155 		inode->i_gid = current_fsgid();
4156 
4157 	inode->i_mode = mode;
4158 	inode->i_ino = objectid;
4159 	inode_set_bytes(inode, 0);
4160 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4161 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4162 				  struct btrfs_inode_item);
4163 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
4164 
4165 	ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
4166 			     struct btrfs_inode_ref);
4167 	btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
4168 	btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
4169 	ptr = (unsigned long)(ref + 1);
4170 	write_extent_buffer(path->nodes[0], name, ptr, name_len);
4171 
4172 	btrfs_mark_buffer_dirty(path->nodes[0]);
4173 	btrfs_free_path(path);
4174 
4175 	location = &BTRFS_I(inode)->location;
4176 	location->objectid = objectid;
4177 	location->offset = 0;
4178 	btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
4179 
4180 	btrfs_inherit_iflags(inode, dir);
4181 
4182 	if ((mode & S_IFREG)) {
4183 		if (btrfs_test_opt(root, NODATASUM))
4184 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
4185 		if (btrfs_test_opt(root, NODATACOW))
4186 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
4187 	}
4188 
4189 	insert_inode_hash(inode);
4190 	inode_tree_add(inode);
4191 	return inode;
4192 fail:
4193 	if (dir)
4194 		BTRFS_I(dir)->index_cnt--;
4195 	btrfs_free_path(path);
4196 	iput(inode);
4197 	return ERR_PTR(ret);
4198 }
4199 
4200 static inline u8 btrfs_inode_type(struct inode *inode)
4201 {
4202 	return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
4203 }
4204 
4205 /*
4206  * utility function to add 'inode' into 'parent_inode' with
4207  * a give name and a given sequence number.
4208  * if 'add_backref' is true, also insert a backref from the
4209  * inode to the parent directory.
4210  */
4211 int btrfs_add_link(struct btrfs_trans_handle *trans,
4212 		   struct inode *parent_inode, struct inode *inode,
4213 		   const char *name, int name_len, int add_backref, u64 index)
4214 {
4215 	int ret = 0;
4216 	struct btrfs_key key;
4217 	struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4218 
4219 	if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4220 		memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
4221 	} else {
4222 		key.objectid = inode->i_ino;
4223 		btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
4224 		key.offset = 0;
4225 	}
4226 
4227 	if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4228 		ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
4229 					 key.objectid, root->root_key.objectid,
4230 					 parent_inode->i_ino,
4231 					 index, name, name_len);
4232 	} else if (add_backref) {
4233 		ret = btrfs_insert_inode_ref(trans, root,
4234 					     name, name_len, inode->i_ino,
4235 					     parent_inode->i_ino, index);
4236 	}
4237 
4238 	if (ret == 0) {
4239 		ret = btrfs_insert_dir_item(trans, root, name, name_len,
4240 					    parent_inode->i_ino, &key,
4241 					    btrfs_inode_type(inode), index);
4242 		BUG_ON(ret);
4243 
4244 		btrfs_i_size_write(parent_inode, parent_inode->i_size +
4245 				   name_len * 2);
4246 		parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4247 		ret = btrfs_update_inode(trans, root, parent_inode);
4248 	}
4249 	return ret;
4250 }
4251 
4252 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4253 			    struct dentry *dentry, struct inode *inode,
4254 			    int backref, u64 index)
4255 {
4256 	int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
4257 				 inode, dentry->d_name.name,
4258 				 dentry->d_name.len, backref, index);
4259 	if (!err) {
4260 		d_instantiate(dentry, inode);
4261 		return 0;
4262 	}
4263 	if (err > 0)
4264 		err = -EEXIST;
4265 	return err;
4266 }
4267 
4268 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4269 			int mode, dev_t rdev)
4270 {
4271 	struct btrfs_trans_handle *trans;
4272 	struct btrfs_root *root = BTRFS_I(dir)->root;
4273 	struct inode *inode = NULL;
4274 	int err;
4275 	int drop_inode = 0;
4276 	u64 objectid;
4277 	unsigned long nr = 0;
4278 	u64 index = 0;
4279 
4280 	if (!new_valid_dev(rdev))
4281 		return -EINVAL;
4282 
4283 	/*
4284 	 * 2 for inode item and ref
4285 	 * 2 for dir items
4286 	 * 1 for xattr if selinux is on
4287 	 */
4288 	err = btrfs_reserve_metadata_space(root, 5);
4289 	if (err)
4290 		return err;
4291 
4292 	trans = btrfs_start_transaction(root, 1);
4293 	if (!trans)
4294 		goto fail;
4295 	btrfs_set_trans_block_group(trans, dir);
4296 
4297 	err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4298 	if (err) {
4299 		err = -ENOSPC;
4300 		goto out_unlock;
4301 	}
4302 
4303 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4304 				dentry->d_name.len,
4305 				dentry->d_parent->d_inode->i_ino, objectid,
4306 				BTRFS_I(dir)->block_group, mode, &index);
4307 	err = PTR_ERR(inode);
4308 	if (IS_ERR(inode))
4309 		goto out_unlock;
4310 
4311 	err = btrfs_init_inode_security(trans, inode, dir);
4312 	if (err) {
4313 		drop_inode = 1;
4314 		goto out_unlock;
4315 	}
4316 
4317 	btrfs_set_trans_block_group(trans, inode);
4318 	err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4319 	if (err)
4320 		drop_inode = 1;
4321 	else {
4322 		inode->i_op = &btrfs_special_inode_operations;
4323 		init_special_inode(inode, inode->i_mode, rdev);
4324 		btrfs_update_inode(trans, root, inode);
4325 	}
4326 	btrfs_update_inode_block_group(trans, inode);
4327 	btrfs_update_inode_block_group(trans, dir);
4328 out_unlock:
4329 	nr = trans->blocks_used;
4330 	btrfs_end_transaction_throttle(trans, root);
4331 fail:
4332 	btrfs_unreserve_metadata_space(root, 5);
4333 	if (drop_inode) {
4334 		inode_dec_link_count(inode);
4335 		iput(inode);
4336 	}
4337 	btrfs_btree_balance_dirty(root, nr);
4338 	return err;
4339 }
4340 
4341 static int btrfs_create(struct inode *dir, struct dentry *dentry,
4342 			int mode, struct nameidata *nd)
4343 {
4344 	struct btrfs_trans_handle *trans;
4345 	struct btrfs_root *root = BTRFS_I(dir)->root;
4346 	struct inode *inode = NULL;
4347 	int err;
4348 	int drop_inode = 0;
4349 	unsigned long nr = 0;
4350 	u64 objectid;
4351 	u64 index = 0;
4352 
4353 	/*
4354 	 * 2 for inode item and ref
4355 	 * 2 for dir items
4356 	 * 1 for xattr if selinux is on
4357 	 */
4358 	err = btrfs_reserve_metadata_space(root, 5);
4359 	if (err)
4360 		return err;
4361 
4362 	trans = btrfs_start_transaction(root, 1);
4363 	if (!trans)
4364 		goto fail;
4365 	btrfs_set_trans_block_group(trans, dir);
4366 
4367 	err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4368 	if (err) {
4369 		err = -ENOSPC;
4370 		goto out_unlock;
4371 	}
4372 
4373 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4374 				dentry->d_name.len,
4375 				dentry->d_parent->d_inode->i_ino,
4376 				objectid, BTRFS_I(dir)->block_group, mode,
4377 				&index);
4378 	err = PTR_ERR(inode);
4379 	if (IS_ERR(inode))
4380 		goto out_unlock;
4381 
4382 	err = btrfs_init_inode_security(trans, inode, dir);
4383 	if (err) {
4384 		drop_inode = 1;
4385 		goto out_unlock;
4386 	}
4387 
4388 	btrfs_set_trans_block_group(trans, inode);
4389 	err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4390 	if (err)
4391 		drop_inode = 1;
4392 	else {
4393 		inode->i_mapping->a_ops = &btrfs_aops;
4394 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4395 		inode->i_fop = &btrfs_file_operations;
4396 		inode->i_op = &btrfs_file_inode_operations;
4397 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4398 	}
4399 	btrfs_update_inode_block_group(trans, inode);
4400 	btrfs_update_inode_block_group(trans, dir);
4401 out_unlock:
4402 	nr = trans->blocks_used;
4403 	btrfs_end_transaction_throttle(trans, root);
4404 fail:
4405 	btrfs_unreserve_metadata_space(root, 5);
4406 	if (drop_inode) {
4407 		inode_dec_link_count(inode);
4408 		iput(inode);
4409 	}
4410 	btrfs_btree_balance_dirty(root, nr);
4411 	return err;
4412 }
4413 
4414 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4415 		      struct dentry *dentry)
4416 {
4417 	struct btrfs_trans_handle *trans;
4418 	struct btrfs_root *root = BTRFS_I(dir)->root;
4419 	struct inode *inode = old_dentry->d_inode;
4420 	u64 index;
4421 	unsigned long nr = 0;
4422 	int err;
4423 	int drop_inode = 0;
4424 
4425 	if (inode->i_nlink == 0)
4426 		return -ENOENT;
4427 
4428 	/* do not allow sys_link's with other subvols of the same device */
4429 	if (root->objectid != BTRFS_I(inode)->root->objectid)
4430 		return -EPERM;
4431 
4432 	/*
4433 	 * 1 item for inode ref
4434 	 * 2 items for dir items
4435 	 */
4436 	err = btrfs_reserve_metadata_space(root, 3);
4437 	if (err)
4438 		return err;
4439 
4440 	btrfs_inc_nlink(inode);
4441 
4442 	err = btrfs_set_inode_index(dir, &index);
4443 	if (err)
4444 		goto fail;
4445 
4446 	trans = btrfs_start_transaction(root, 1);
4447 
4448 	btrfs_set_trans_block_group(trans, dir);
4449 	atomic_inc(&inode->i_count);
4450 
4451 	err = btrfs_add_nondir(trans, dentry, inode, 1, index);
4452 
4453 	if (err) {
4454 		drop_inode = 1;
4455 	} else {
4456 		btrfs_update_inode_block_group(trans, dir);
4457 		err = btrfs_update_inode(trans, root, inode);
4458 		BUG_ON(err);
4459 		btrfs_log_new_name(trans, inode, NULL, dentry->d_parent);
4460 	}
4461 
4462 	nr = trans->blocks_used;
4463 	btrfs_end_transaction_throttle(trans, root);
4464 fail:
4465 	btrfs_unreserve_metadata_space(root, 3);
4466 	if (drop_inode) {
4467 		inode_dec_link_count(inode);
4468 		iput(inode);
4469 	}
4470 	btrfs_btree_balance_dirty(root, nr);
4471 	return err;
4472 }
4473 
4474 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4475 {
4476 	struct inode *inode = NULL;
4477 	struct btrfs_trans_handle *trans;
4478 	struct btrfs_root *root = BTRFS_I(dir)->root;
4479 	int err = 0;
4480 	int drop_on_err = 0;
4481 	u64 objectid = 0;
4482 	u64 index = 0;
4483 	unsigned long nr = 1;
4484 
4485 	/*
4486 	 * 2 items for inode and ref
4487 	 * 2 items for dir items
4488 	 * 1 for xattr if selinux is on
4489 	 */
4490 	err = btrfs_reserve_metadata_space(root, 5);
4491 	if (err)
4492 		return err;
4493 
4494 	trans = btrfs_start_transaction(root, 1);
4495 	if (!trans) {
4496 		err = -ENOMEM;
4497 		goto out_unlock;
4498 	}
4499 	btrfs_set_trans_block_group(trans, dir);
4500 
4501 	err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4502 	if (err) {
4503 		err = -ENOSPC;
4504 		goto out_unlock;
4505 	}
4506 
4507 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4508 				dentry->d_name.len,
4509 				dentry->d_parent->d_inode->i_ino, objectid,
4510 				BTRFS_I(dir)->block_group, S_IFDIR | mode,
4511 				&index);
4512 	if (IS_ERR(inode)) {
4513 		err = PTR_ERR(inode);
4514 		goto out_fail;
4515 	}
4516 
4517 	drop_on_err = 1;
4518 
4519 	err = btrfs_init_inode_security(trans, inode, dir);
4520 	if (err)
4521 		goto out_fail;
4522 
4523 	inode->i_op = &btrfs_dir_inode_operations;
4524 	inode->i_fop = &btrfs_dir_file_operations;
4525 	btrfs_set_trans_block_group(trans, inode);
4526 
4527 	btrfs_i_size_write(inode, 0);
4528 	err = btrfs_update_inode(trans, root, inode);
4529 	if (err)
4530 		goto out_fail;
4531 
4532 	err = btrfs_add_link(trans, dentry->d_parent->d_inode,
4533 				 inode, dentry->d_name.name,
4534 				 dentry->d_name.len, 0, index);
4535 	if (err)
4536 		goto out_fail;
4537 
4538 	d_instantiate(dentry, inode);
4539 	drop_on_err = 0;
4540 	btrfs_update_inode_block_group(trans, inode);
4541 	btrfs_update_inode_block_group(trans, dir);
4542 
4543 out_fail:
4544 	nr = trans->blocks_used;
4545 	btrfs_end_transaction_throttle(trans, root);
4546 
4547 out_unlock:
4548 	btrfs_unreserve_metadata_space(root, 5);
4549 	if (drop_on_err)
4550 		iput(inode);
4551 	btrfs_btree_balance_dirty(root, nr);
4552 	return err;
4553 }
4554 
4555 /* helper for btfs_get_extent.  Given an existing extent in the tree,
4556  * and an extent that you want to insert, deal with overlap and insert
4557  * the new extent into the tree.
4558  */
4559 static int merge_extent_mapping(struct extent_map_tree *em_tree,
4560 				struct extent_map *existing,
4561 				struct extent_map *em,
4562 				u64 map_start, u64 map_len)
4563 {
4564 	u64 start_diff;
4565 
4566 	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
4567 	start_diff = map_start - em->start;
4568 	em->start = map_start;
4569 	em->len = map_len;
4570 	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
4571 	    !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
4572 		em->block_start += start_diff;
4573 		em->block_len -= start_diff;
4574 	}
4575 	return add_extent_mapping(em_tree, em);
4576 }
4577 
4578 static noinline int uncompress_inline(struct btrfs_path *path,
4579 				      struct inode *inode, struct page *page,
4580 				      size_t pg_offset, u64 extent_offset,
4581 				      struct btrfs_file_extent_item *item)
4582 {
4583 	int ret;
4584 	struct extent_buffer *leaf = path->nodes[0];
4585 	char *tmp;
4586 	size_t max_size;
4587 	unsigned long inline_size;
4588 	unsigned long ptr;
4589 
4590 	WARN_ON(pg_offset != 0);
4591 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
4592 	inline_size = btrfs_file_extent_inline_item_len(leaf,
4593 					btrfs_item_nr(leaf, path->slots[0]));
4594 	tmp = kmalloc(inline_size, GFP_NOFS);
4595 	ptr = btrfs_file_extent_inline_start(item);
4596 
4597 	read_extent_buffer(leaf, tmp, ptr, inline_size);
4598 
4599 	max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
4600 	ret = btrfs_zlib_decompress(tmp, page, extent_offset,
4601 				    inline_size, max_size);
4602 	if (ret) {
4603 		char *kaddr = kmap_atomic(page, KM_USER0);
4604 		unsigned long copy_size = min_t(u64,
4605 				  PAGE_CACHE_SIZE - pg_offset,
4606 				  max_size - extent_offset);
4607 		memset(kaddr + pg_offset, 0, copy_size);
4608 		kunmap_atomic(kaddr, KM_USER0);
4609 	}
4610 	kfree(tmp);
4611 	return 0;
4612 }
4613 
4614 /*
4615  * a bit scary, this does extent mapping from logical file offset to the disk.
4616  * the ugly parts come from merging extents from the disk with the in-ram
4617  * representation.  This gets more complex because of the data=ordered code,
4618  * where the in-ram extents might be locked pending data=ordered completion.
4619  *
4620  * This also copies inline extents directly into the page.
4621  */
4622 
4623 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
4624 				    size_t pg_offset, u64 start, u64 len,
4625 				    int create)
4626 {
4627 	int ret;
4628 	int err = 0;
4629 	u64 bytenr;
4630 	u64 extent_start = 0;
4631 	u64 extent_end = 0;
4632 	u64 objectid = inode->i_ino;
4633 	u32 found_type;
4634 	struct btrfs_path *path = NULL;
4635 	struct btrfs_root *root = BTRFS_I(inode)->root;
4636 	struct btrfs_file_extent_item *item;
4637 	struct extent_buffer *leaf;
4638 	struct btrfs_key found_key;
4639 	struct extent_map *em = NULL;
4640 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4641 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4642 	struct btrfs_trans_handle *trans = NULL;
4643 	int compressed;
4644 
4645 again:
4646 	read_lock(&em_tree->lock);
4647 	em = lookup_extent_mapping(em_tree, start, len);
4648 	if (em)
4649 		em->bdev = root->fs_info->fs_devices->latest_bdev;
4650 	read_unlock(&em_tree->lock);
4651 
4652 	if (em) {
4653 		if (em->start > start || em->start + em->len <= start)
4654 			free_extent_map(em);
4655 		else if (em->block_start == EXTENT_MAP_INLINE && page)
4656 			free_extent_map(em);
4657 		else
4658 			goto out;
4659 	}
4660 	em = alloc_extent_map(GFP_NOFS);
4661 	if (!em) {
4662 		err = -ENOMEM;
4663 		goto out;
4664 	}
4665 	em->bdev = root->fs_info->fs_devices->latest_bdev;
4666 	em->start = EXTENT_MAP_HOLE;
4667 	em->orig_start = EXTENT_MAP_HOLE;
4668 	em->len = (u64)-1;
4669 	em->block_len = (u64)-1;
4670 
4671 	if (!path) {
4672 		path = btrfs_alloc_path();
4673 		BUG_ON(!path);
4674 	}
4675 
4676 	ret = btrfs_lookup_file_extent(trans, root, path,
4677 				       objectid, start, trans != NULL);
4678 	if (ret < 0) {
4679 		err = ret;
4680 		goto out;
4681 	}
4682 
4683 	if (ret != 0) {
4684 		if (path->slots[0] == 0)
4685 			goto not_found;
4686 		path->slots[0]--;
4687 	}
4688 
4689 	leaf = path->nodes[0];
4690 	item = btrfs_item_ptr(leaf, path->slots[0],
4691 			      struct btrfs_file_extent_item);
4692 	/* are we inside the extent that was found? */
4693 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4694 	found_type = btrfs_key_type(&found_key);
4695 	if (found_key.objectid != objectid ||
4696 	    found_type != BTRFS_EXTENT_DATA_KEY) {
4697 		goto not_found;
4698 	}
4699 
4700 	found_type = btrfs_file_extent_type(leaf, item);
4701 	extent_start = found_key.offset;
4702 	compressed = btrfs_file_extent_compression(leaf, item);
4703 	if (found_type == BTRFS_FILE_EXTENT_REG ||
4704 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4705 		extent_end = extent_start +
4706 		       btrfs_file_extent_num_bytes(leaf, item);
4707 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4708 		size_t size;
4709 		size = btrfs_file_extent_inline_len(leaf, item);
4710 		extent_end = (extent_start + size + root->sectorsize - 1) &
4711 			~((u64)root->sectorsize - 1);
4712 	}
4713 
4714 	if (start >= extent_end) {
4715 		path->slots[0]++;
4716 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
4717 			ret = btrfs_next_leaf(root, path);
4718 			if (ret < 0) {
4719 				err = ret;
4720 				goto out;
4721 			}
4722 			if (ret > 0)
4723 				goto not_found;
4724 			leaf = path->nodes[0];
4725 		}
4726 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4727 		if (found_key.objectid != objectid ||
4728 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
4729 			goto not_found;
4730 		if (start + len <= found_key.offset)
4731 			goto not_found;
4732 		em->start = start;
4733 		em->len = found_key.offset - start;
4734 		goto not_found_em;
4735 	}
4736 
4737 	if (found_type == BTRFS_FILE_EXTENT_REG ||
4738 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4739 		em->start = extent_start;
4740 		em->len = extent_end - extent_start;
4741 		em->orig_start = extent_start -
4742 				 btrfs_file_extent_offset(leaf, item);
4743 		bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
4744 		if (bytenr == 0) {
4745 			em->block_start = EXTENT_MAP_HOLE;
4746 			goto insert;
4747 		}
4748 		if (compressed) {
4749 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4750 			em->block_start = bytenr;
4751 			em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
4752 									 item);
4753 		} else {
4754 			bytenr += btrfs_file_extent_offset(leaf, item);
4755 			em->block_start = bytenr;
4756 			em->block_len = em->len;
4757 			if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
4758 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
4759 		}
4760 		goto insert;
4761 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4762 		unsigned long ptr;
4763 		char *map;
4764 		size_t size;
4765 		size_t extent_offset;
4766 		size_t copy_size;
4767 
4768 		em->block_start = EXTENT_MAP_INLINE;
4769 		if (!page || create) {
4770 			em->start = extent_start;
4771 			em->len = extent_end - extent_start;
4772 			goto out;
4773 		}
4774 
4775 		size = btrfs_file_extent_inline_len(leaf, item);
4776 		extent_offset = page_offset(page) + pg_offset - extent_start;
4777 		copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
4778 				size - extent_offset);
4779 		em->start = extent_start + extent_offset;
4780 		em->len = (copy_size + root->sectorsize - 1) &
4781 			~((u64)root->sectorsize - 1);
4782 		em->orig_start = EXTENT_MAP_INLINE;
4783 		if (compressed)
4784 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4785 		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
4786 		if (create == 0 && !PageUptodate(page)) {
4787 			if (btrfs_file_extent_compression(leaf, item) ==
4788 			    BTRFS_COMPRESS_ZLIB) {
4789 				ret = uncompress_inline(path, inode, page,
4790 							pg_offset,
4791 							extent_offset, item);
4792 				BUG_ON(ret);
4793 			} else {
4794 				map = kmap(page);
4795 				read_extent_buffer(leaf, map + pg_offset, ptr,
4796 						   copy_size);
4797 				if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
4798 					memset(map + pg_offset + copy_size, 0,
4799 					       PAGE_CACHE_SIZE - pg_offset -
4800 					       copy_size);
4801 				}
4802 				kunmap(page);
4803 			}
4804 			flush_dcache_page(page);
4805 		} else if (create && PageUptodate(page)) {
4806 			if (!trans) {
4807 				kunmap(page);
4808 				free_extent_map(em);
4809 				em = NULL;
4810 				btrfs_release_path(root, path);
4811 				trans = btrfs_join_transaction(root, 1);
4812 				goto again;
4813 			}
4814 			map = kmap(page);
4815 			write_extent_buffer(leaf, map + pg_offset, ptr,
4816 					    copy_size);
4817 			kunmap(page);
4818 			btrfs_mark_buffer_dirty(leaf);
4819 		}
4820 		set_extent_uptodate(io_tree, em->start,
4821 				    extent_map_end(em) - 1, GFP_NOFS);
4822 		goto insert;
4823 	} else {
4824 		printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
4825 		WARN_ON(1);
4826 	}
4827 not_found:
4828 	em->start = start;
4829 	em->len = len;
4830 not_found_em:
4831 	em->block_start = EXTENT_MAP_HOLE;
4832 	set_bit(EXTENT_FLAG_VACANCY, &em->flags);
4833 insert:
4834 	btrfs_release_path(root, path);
4835 	if (em->start > start || extent_map_end(em) <= start) {
4836 		printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
4837 		       "[%llu %llu]\n", (unsigned long long)em->start,
4838 		       (unsigned long long)em->len,
4839 		       (unsigned long long)start,
4840 		       (unsigned long long)len);
4841 		err = -EIO;
4842 		goto out;
4843 	}
4844 
4845 	err = 0;
4846 	write_lock(&em_tree->lock);
4847 	ret = add_extent_mapping(em_tree, em);
4848 	/* it is possible that someone inserted the extent into the tree
4849 	 * while we had the lock dropped.  It is also possible that
4850 	 * an overlapping map exists in the tree
4851 	 */
4852 	if (ret == -EEXIST) {
4853 		struct extent_map *existing;
4854 
4855 		ret = 0;
4856 
4857 		existing = lookup_extent_mapping(em_tree, start, len);
4858 		if (existing && (existing->start > start ||
4859 		    existing->start + existing->len <= start)) {
4860 			free_extent_map(existing);
4861 			existing = NULL;
4862 		}
4863 		if (!existing) {
4864 			existing = lookup_extent_mapping(em_tree, em->start,
4865 							 em->len);
4866 			if (existing) {
4867 				err = merge_extent_mapping(em_tree, existing,
4868 							   em, start,
4869 							   root->sectorsize);
4870 				free_extent_map(existing);
4871 				if (err) {
4872 					free_extent_map(em);
4873 					em = NULL;
4874 				}
4875 			} else {
4876 				err = -EIO;
4877 				free_extent_map(em);
4878 				em = NULL;
4879 			}
4880 		} else {
4881 			free_extent_map(em);
4882 			em = existing;
4883 			err = 0;
4884 		}
4885 	}
4886 	write_unlock(&em_tree->lock);
4887 out:
4888 	if (path)
4889 		btrfs_free_path(path);
4890 	if (trans) {
4891 		ret = btrfs_end_transaction(trans, root);
4892 		if (!err)
4893 			err = ret;
4894 	}
4895 	if (err) {
4896 		free_extent_map(em);
4897 		return ERR_PTR(err);
4898 	}
4899 	return em;
4900 }
4901 
4902 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4903 			const struct iovec *iov, loff_t offset,
4904 			unsigned long nr_segs)
4905 {
4906 	return -EINVAL;
4907 }
4908 
4909 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4910 		__u64 start, __u64 len)
4911 {
4912 	return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
4913 }
4914 
4915 int btrfs_readpage(struct file *file, struct page *page)
4916 {
4917 	struct extent_io_tree *tree;
4918 	tree = &BTRFS_I(page->mapping->host)->io_tree;
4919 	return extent_read_full_page(tree, page, btrfs_get_extent);
4920 }
4921 
4922 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
4923 {
4924 	struct extent_io_tree *tree;
4925 
4926 
4927 	if (current->flags & PF_MEMALLOC) {
4928 		redirty_page_for_writepage(wbc, page);
4929 		unlock_page(page);
4930 		return 0;
4931 	}
4932 	tree = &BTRFS_I(page->mapping->host)->io_tree;
4933 	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
4934 }
4935 
4936 int btrfs_writepages(struct address_space *mapping,
4937 		     struct writeback_control *wbc)
4938 {
4939 	struct extent_io_tree *tree;
4940 
4941 	tree = &BTRFS_I(mapping->host)->io_tree;
4942 	return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
4943 }
4944 
4945 static int
4946 btrfs_readpages(struct file *file, struct address_space *mapping,
4947 		struct list_head *pages, unsigned nr_pages)
4948 {
4949 	struct extent_io_tree *tree;
4950 	tree = &BTRFS_I(mapping->host)->io_tree;
4951 	return extent_readpages(tree, mapping, pages, nr_pages,
4952 				btrfs_get_extent);
4953 }
4954 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4955 {
4956 	struct extent_io_tree *tree;
4957 	struct extent_map_tree *map;
4958 	int ret;
4959 
4960 	tree = &BTRFS_I(page->mapping->host)->io_tree;
4961 	map = &BTRFS_I(page->mapping->host)->extent_tree;
4962 	ret = try_release_extent_mapping(map, tree, page, gfp_flags);
4963 	if (ret == 1) {
4964 		ClearPagePrivate(page);
4965 		set_page_private(page, 0);
4966 		page_cache_release(page);
4967 	}
4968 	return ret;
4969 }
4970 
4971 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4972 {
4973 	if (PageWriteback(page) || PageDirty(page))
4974 		return 0;
4975 	return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
4976 }
4977 
4978 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4979 {
4980 	struct extent_io_tree *tree;
4981 	struct btrfs_ordered_extent *ordered;
4982 	u64 page_start = page_offset(page);
4983 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4984 
4985 
4986 	/*
4987 	 * we have the page locked, so new writeback can't start,
4988 	 * and the dirty bit won't be cleared while we are here.
4989 	 *
4990 	 * Wait for IO on this page so that we can safely clear
4991 	 * the PagePrivate2 bit and do ordered accounting
4992 	 */
4993 	wait_on_page_writeback(page);
4994 
4995 	tree = &BTRFS_I(page->mapping->host)->io_tree;
4996 	if (offset) {
4997 		btrfs_releasepage(page, GFP_NOFS);
4998 		return;
4999 	}
5000 	lock_extent(tree, page_start, page_end, GFP_NOFS);
5001 	ordered = btrfs_lookup_ordered_extent(page->mapping->host,
5002 					   page_offset(page));
5003 	if (ordered) {
5004 		/*
5005 		 * IO on this page will never be started, so we need
5006 		 * to account for any ordered extents now
5007 		 */
5008 		clear_extent_bit(tree, page_start, page_end,
5009 				 EXTENT_DIRTY | EXTENT_DELALLOC |
5010 				 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
5011 				 NULL, GFP_NOFS);
5012 		/*
5013 		 * whoever cleared the private bit is responsible
5014 		 * for the finish_ordered_io
5015 		 */
5016 		if (TestClearPagePrivate2(page)) {
5017 			btrfs_finish_ordered_io(page->mapping->host,
5018 						page_start, page_end);
5019 		}
5020 		btrfs_put_ordered_extent(ordered);
5021 		lock_extent(tree, page_start, page_end, GFP_NOFS);
5022 	}
5023 	clear_extent_bit(tree, page_start, page_end,
5024 		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
5025 		 EXTENT_DO_ACCOUNTING, 1, 1, NULL, GFP_NOFS);
5026 	__btrfs_releasepage(page, GFP_NOFS);
5027 
5028 	ClearPageChecked(page);
5029 	if (PagePrivate(page)) {
5030 		ClearPagePrivate(page);
5031 		set_page_private(page, 0);
5032 		page_cache_release(page);
5033 	}
5034 }
5035 
5036 /*
5037  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
5038  * called from a page fault handler when a page is first dirtied. Hence we must
5039  * be careful to check for EOF conditions here. We set the page up correctly
5040  * for a written page which means we get ENOSPC checking when writing into
5041  * holes and correct delalloc and unwritten extent mapping on filesystems that
5042  * support these features.
5043  *
5044  * We are not allowed to take the i_mutex here so we have to play games to
5045  * protect against truncate races as the page could now be beyond EOF.  Because
5046  * vmtruncate() writes the inode size before removing pages, once we have the
5047  * page lock we can determine safely if the page is beyond EOF. If it is not
5048  * beyond EOF, then the page is guaranteed safe against truncation until we
5049  * unlock the page.
5050  */
5051 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5052 {
5053 	struct page *page = vmf->page;
5054 	struct inode *inode = fdentry(vma->vm_file)->d_inode;
5055 	struct btrfs_root *root = BTRFS_I(inode)->root;
5056 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5057 	struct btrfs_ordered_extent *ordered;
5058 	char *kaddr;
5059 	unsigned long zero_start;
5060 	loff_t size;
5061 	int ret;
5062 	u64 page_start;
5063 	u64 page_end;
5064 
5065 	ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
5066 	if (ret) {
5067 		if (ret == -ENOMEM)
5068 			ret = VM_FAULT_OOM;
5069 		else /* -ENOSPC, -EIO, etc */
5070 			ret = VM_FAULT_SIGBUS;
5071 		goto out;
5072 	}
5073 
5074 	ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
5075 	if (ret) {
5076 		btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5077 		ret = VM_FAULT_SIGBUS;
5078 		goto out;
5079 	}
5080 
5081 	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
5082 again:
5083 	lock_page(page);
5084 	size = i_size_read(inode);
5085 	page_start = page_offset(page);
5086 	page_end = page_start + PAGE_CACHE_SIZE - 1;
5087 
5088 	if ((page->mapping != inode->i_mapping) ||
5089 	    (page_start >= size)) {
5090 		btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5091 		/* page got truncated out from underneath us */
5092 		goto out_unlock;
5093 	}
5094 	wait_on_page_writeback(page);
5095 
5096 	lock_extent(io_tree, page_start, page_end, GFP_NOFS);
5097 	set_page_extent_mapped(page);
5098 
5099 	/*
5100 	 * we can't set the delalloc bits if there are pending ordered
5101 	 * extents.  Drop our locks and wait for them to finish
5102 	 */
5103 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
5104 	if (ordered) {
5105 		unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5106 		unlock_page(page);
5107 		btrfs_start_ordered_extent(inode, ordered, 1);
5108 		btrfs_put_ordered_extent(ordered);
5109 		goto again;
5110 	}
5111 
5112 	/*
5113 	 * XXX - page_mkwrite gets called every time the page is dirtied, even
5114 	 * if it was already dirty, so for space accounting reasons we need to
5115 	 * clear any delalloc bits for the range we are fixing to save.  There
5116 	 * is probably a better way to do this, but for now keep consistent with
5117 	 * prepare_pages in the normal write path.
5118 	 */
5119 	clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
5120 			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
5121 			  GFP_NOFS);
5122 
5123 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
5124 	if (ret) {
5125 		unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5126 		ret = VM_FAULT_SIGBUS;
5127 		btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5128 		goto out_unlock;
5129 	}
5130 	ret = 0;
5131 
5132 	/* page is wholly or partially inside EOF */
5133 	if (page_start + PAGE_CACHE_SIZE > size)
5134 		zero_start = size & ~PAGE_CACHE_MASK;
5135 	else
5136 		zero_start = PAGE_CACHE_SIZE;
5137 
5138 	if (zero_start != PAGE_CACHE_SIZE) {
5139 		kaddr = kmap(page);
5140 		memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
5141 		flush_dcache_page(page);
5142 		kunmap(page);
5143 	}
5144 	ClearPageChecked(page);
5145 	set_page_dirty(page);
5146 	SetPageUptodate(page);
5147 
5148 	BTRFS_I(inode)->last_trans = root->fs_info->generation;
5149 	BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
5150 
5151 	unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5152 
5153 out_unlock:
5154 	btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
5155 	if (!ret)
5156 		return VM_FAULT_LOCKED;
5157 	unlock_page(page);
5158 out:
5159 	return ret;
5160 }
5161 
5162 static void btrfs_truncate(struct inode *inode)
5163 {
5164 	struct btrfs_root *root = BTRFS_I(inode)->root;
5165 	int ret;
5166 	struct btrfs_trans_handle *trans;
5167 	unsigned long nr;
5168 	u64 mask = root->sectorsize - 1;
5169 
5170 	if (!S_ISREG(inode->i_mode)) {
5171 		WARN_ON(1);
5172 		return;
5173 	}
5174 
5175 	ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
5176 	if (ret)
5177 		return;
5178 
5179 	btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
5180 	btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
5181 
5182 	trans = btrfs_start_transaction(root, 1);
5183 	btrfs_set_trans_block_group(trans, inode);
5184 
5185 	/*
5186 	 * setattr is responsible for setting the ordered_data_close flag,
5187 	 * but that is only tested during the last file release.  That
5188 	 * could happen well after the next commit, leaving a great big
5189 	 * window where new writes may get lost if someone chooses to write
5190 	 * to this file after truncating to zero
5191 	 *
5192 	 * The inode doesn't have any dirty data here, and so if we commit
5193 	 * this is a noop.  If someone immediately starts writing to the inode
5194 	 * it is very likely we'll catch some of their writes in this
5195 	 * transaction, and the commit will find this file on the ordered
5196 	 * data list with good things to send down.
5197 	 *
5198 	 * This is a best effort solution, there is still a window where
5199 	 * using truncate to replace the contents of the file will
5200 	 * end up with a zero length file after a crash.
5201 	 */
5202 	if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
5203 		btrfs_add_ordered_operation(trans, root, inode);
5204 
5205 	while (1) {
5206 		ret = btrfs_truncate_inode_items(trans, root, inode,
5207 						 inode->i_size,
5208 						 BTRFS_EXTENT_DATA_KEY);
5209 		if (ret != -EAGAIN)
5210 			break;
5211 
5212 		ret = btrfs_update_inode(trans, root, inode);
5213 		BUG_ON(ret);
5214 
5215 		nr = trans->blocks_used;
5216 		btrfs_end_transaction(trans, root);
5217 		btrfs_btree_balance_dirty(root, nr);
5218 
5219 		trans = btrfs_start_transaction(root, 1);
5220 		btrfs_set_trans_block_group(trans, inode);
5221 	}
5222 
5223 	if (ret == 0 && inode->i_nlink > 0) {
5224 		ret = btrfs_orphan_del(trans, inode);
5225 		BUG_ON(ret);
5226 	}
5227 
5228 	ret = btrfs_update_inode(trans, root, inode);
5229 	BUG_ON(ret);
5230 
5231 	nr = trans->blocks_used;
5232 	ret = btrfs_end_transaction_throttle(trans, root);
5233 	BUG_ON(ret);
5234 	btrfs_btree_balance_dirty(root, nr);
5235 }
5236 
5237 /*
5238  * create a new subvolume directory/inode (helper for the ioctl).
5239  */
5240 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
5241 			     struct btrfs_root *new_root,
5242 			     u64 new_dirid, u64 alloc_hint)
5243 {
5244 	struct inode *inode;
5245 	int err;
5246 	u64 index = 0;
5247 
5248 	inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
5249 				new_dirid, alloc_hint, S_IFDIR | 0700, &index);
5250 	if (IS_ERR(inode))
5251 		return PTR_ERR(inode);
5252 	inode->i_op = &btrfs_dir_inode_operations;
5253 	inode->i_fop = &btrfs_dir_file_operations;
5254 
5255 	inode->i_nlink = 1;
5256 	btrfs_i_size_write(inode, 0);
5257 
5258 	err = btrfs_update_inode(trans, new_root, inode);
5259 	BUG_ON(err);
5260 
5261 	iput(inode);
5262 	return 0;
5263 }
5264 
5265 /* helper function for file defrag and space balancing.  This
5266  * forces readahead on a given range of bytes in an inode
5267  */
5268 unsigned long btrfs_force_ra(struct address_space *mapping,
5269 			      struct file_ra_state *ra, struct file *file,
5270 			      pgoff_t offset, pgoff_t last_index)
5271 {
5272 	pgoff_t req_size = last_index - offset + 1;
5273 
5274 	page_cache_sync_readahead(mapping, ra, file, offset, req_size);
5275 	return offset + req_size;
5276 }
5277 
5278 struct inode *btrfs_alloc_inode(struct super_block *sb)
5279 {
5280 	struct btrfs_inode *ei;
5281 
5282 	ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
5283 	if (!ei)
5284 		return NULL;
5285 	ei->last_trans = 0;
5286 	ei->last_sub_trans = 0;
5287 	ei->logged_trans = 0;
5288 	ei->outstanding_extents = 0;
5289 	ei->reserved_extents = 0;
5290 	ei->root = NULL;
5291 	spin_lock_init(&ei->accounting_lock);
5292 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
5293 	INIT_LIST_HEAD(&ei->i_orphan);
5294 	INIT_LIST_HEAD(&ei->ordered_operations);
5295 	return &ei->vfs_inode;
5296 }
5297 
5298 void btrfs_destroy_inode(struct inode *inode)
5299 {
5300 	struct btrfs_ordered_extent *ordered;
5301 	struct btrfs_root *root = BTRFS_I(inode)->root;
5302 
5303 	WARN_ON(!list_empty(&inode->i_dentry));
5304 	WARN_ON(inode->i_data.nrpages);
5305 
5306 	/*
5307 	 * This can happen where we create an inode, but somebody else also
5308 	 * created the same inode and we need to destroy the one we already
5309 	 * created.
5310 	 */
5311 	if (!root)
5312 		goto free;
5313 
5314 	/*
5315 	 * Make sure we're properly removed from the ordered operation
5316 	 * lists.
5317 	 */
5318 	smp_mb();
5319 	if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
5320 		spin_lock(&root->fs_info->ordered_extent_lock);
5321 		list_del_init(&BTRFS_I(inode)->ordered_operations);
5322 		spin_unlock(&root->fs_info->ordered_extent_lock);
5323 	}
5324 
5325 	spin_lock(&root->list_lock);
5326 	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
5327 		printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n",
5328 		       inode->i_ino);
5329 		list_del_init(&BTRFS_I(inode)->i_orphan);
5330 	}
5331 	spin_unlock(&root->list_lock);
5332 
5333 	while (1) {
5334 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
5335 		if (!ordered)
5336 			break;
5337 		else {
5338 			printk(KERN_ERR "btrfs found ordered "
5339 			       "extent %llu %llu on inode cleanup\n",
5340 			       (unsigned long long)ordered->file_offset,
5341 			       (unsigned long long)ordered->len);
5342 			btrfs_remove_ordered_extent(inode, ordered);
5343 			btrfs_put_ordered_extent(ordered);
5344 			btrfs_put_ordered_extent(ordered);
5345 		}
5346 	}
5347 	inode_tree_del(inode);
5348 	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
5349 free:
5350 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
5351 }
5352 
5353 void btrfs_drop_inode(struct inode *inode)
5354 {
5355 	struct btrfs_root *root = BTRFS_I(inode)->root;
5356 
5357 	if (inode->i_nlink > 0 && btrfs_root_refs(&root->root_item) == 0)
5358 		generic_delete_inode(inode);
5359 	else
5360 		generic_drop_inode(inode);
5361 }
5362 
5363 static void init_once(void *foo)
5364 {
5365 	struct btrfs_inode *ei = (struct btrfs_inode *) foo;
5366 
5367 	inode_init_once(&ei->vfs_inode);
5368 }
5369 
5370 void btrfs_destroy_cachep(void)
5371 {
5372 	if (btrfs_inode_cachep)
5373 		kmem_cache_destroy(btrfs_inode_cachep);
5374 	if (btrfs_trans_handle_cachep)
5375 		kmem_cache_destroy(btrfs_trans_handle_cachep);
5376 	if (btrfs_transaction_cachep)
5377 		kmem_cache_destroy(btrfs_transaction_cachep);
5378 	if (btrfs_path_cachep)
5379 		kmem_cache_destroy(btrfs_path_cachep);
5380 }
5381 
5382 int btrfs_init_cachep(void)
5383 {
5384 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
5385 			sizeof(struct btrfs_inode), 0,
5386 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
5387 	if (!btrfs_inode_cachep)
5388 		goto fail;
5389 
5390 	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
5391 			sizeof(struct btrfs_trans_handle), 0,
5392 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5393 	if (!btrfs_trans_handle_cachep)
5394 		goto fail;
5395 
5396 	btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
5397 			sizeof(struct btrfs_transaction), 0,
5398 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5399 	if (!btrfs_transaction_cachep)
5400 		goto fail;
5401 
5402 	btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
5403 			sizeof(struct btrfs_path), 0,
5404 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5405 	if (!btrfs_path_cachep)
5406 		goto fail;
5407 
5408 	return 0;
5409 fail:
5410 	btrfs_destroy_cachep();
5411 	return -ENOMEM;
5412 }
5413 
5414 static int btrfs_getattr(struct vfsmount *mnt,
5415 			 struct dentry *dentry, struct kstat *stat)
5416 {
5417 	struct inode *inode = dentry->d_inode;
5418 	generic_fillattr(inode, stat);
5419 	stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
5420 	stat->blksize = PAGE_CACHE_SIZE;
5421 	stat->blocks = (inode_get_bytes(inode) +
5422 			BTRFS_I(inode)->delalloc_bytes) >> 9;
5423 	return 0;
5424 }
5425 
5426 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
5427 			   struct inode *new_dir, struct dentry *new_dentry)
5428 {
5429 	struct btrfs_trans_handle *trans;
5430 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
5431 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
5432 	struct inode *new_inode = new_dentry->d_inode;
5433 	struct inode *old_inode = old_dentry->d_inode;
5434 	struct timespec ctime = CURRENT_TIME;
5435 	u64 index = 0;
5436 	u64 root_objectid;
5437 	int ret;
5438 
5439 	if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5440 		return -EPERM;
5441 
5442 	/* we only allow rename subvolume link between subvolumes */
5443 	if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
5444 		return -EXDEV;
5445 
5446 	if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
5447 	    (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID))
5448 		return -ENOTEMPTY;
5449 
5450 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
5451 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
5452 		return -ENOTEMPTY;
5453 
5454 	/*
5455 	 * We want to reserve the absolute worst case amount of items.  So if
5456 	 * both inodes are subvols and we need to unlink them then that would
5457 	 * require 4 item modifications, but if they are both normal inodes it
5458 	 * would require 5 item modifications, so we'll assume their normal
5459 	 * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
5460 	 * should cover the worst case number of items we'll modify.
5461 	 */
5462 	ret = btrfs_reserve_metadata_space(root, 11);
5463 	if (ret)
5464 		return ret;
5465 
5466 	/*
5467 	 * we're using rename to replace one file with another.
5468 	 * and the replacement file is large.  Start IO on it now so
5469 	 * we don't add too much work to the end of the transaction
5470 	 */
5471 	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
5472 	    old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
5473 		filemap_flush(old_inode->i_mapping);
5474 
5475 	/* close the racy window with snapshot create/destroy ioctl */
5476 	if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5477 		down_read(&root->fs_info->subvol_sem);
5478 
5479 	trans = btrfs_start_transaction(root, 1);
5480 	btrfs_set_trans_block_group(trans, new_dir);
5481 
5482 	if (dest != root)
5483 		btrfs_record_root_in_trans(trans, dest);
5484 
5485 	ret = btrfs_set_inode_index(new_dir, &index);
5486 	if (ret)
5487 		goto out_fail;
5488 
5489 	if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
5490 		/* force full log commit if subvolume involved. */
5491 		root->fs_info->last_trans_log_full_commit = trans->transid;
5492 	} else {
5493 		ret = btrfs_insert_inode_ref(trans, dest,
5494 					     new_dentry->d_name.name,
5495 					     new_dentry->d_name.len,
5496 					     old_inode->i_ino,
5497 					     new_dir->i_ino, index);
5498 		if (ret)
5499 			goto out_fail;
5500 		/*
5501 		 * this is an ugly little race, but the rename is required
5502 		 * to make sure that if we crash, the inode is either at the
5503 		 * old name or the new one.  pinning the log transaction lets
5504 		 * us make sure we don't allow a log commit to come in after
5505 		 * we unlink the name but before we add the new name back in.
5506 		 */
5507 		btrfs_pin_log_trans(root);
5508 	}
5509 	/*
5510 	 * make sure the inode gets flushed if it is replacing
5511 	 * something.
5512 	 */
5513 	if (new_inode && new_inode->i_size &&
5514 	    old_inode && S_ISREG(old_inode->i_mode)) {
5515 		btrfs_add_ordered_operation(trans, root, old_inode);
5516 	}
5517 
5518 	old_dir->i_ctime = old_dir->i_mtime = ctime;
5519 	new_dir->i_ctime = new_dir->i_mtime = ctime;
5520 	old_inode->i_ctime = ctime;
5521 
5522 	if (old_dentry->d_parent != new_dentry->d_parent)
5523 		btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
5524 
5525 	if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
5526 		root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
5527 		ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
5528 					old_dentry->d_name.name,
5529 					old_dentry->d_name.len);
5530 	} else {
5531 		btrfs_inc_nlink(old_dentry->d_inode);
5532 		ret = btrfs_unlink_inode(trans, root, old_dir,
5533 					 old_dentry->d_inode,
5534 					 old_dentry->d_name.name,
5535 					 old_dentry->d_name.len);
5536 	}
5537 	BUG_ON(ret);
5538 
5539 	if (new_inode) {
5540 		new_inode->i_ctime = CURRENT_TIME;
5541 		if (unlikely(new_inode->i_ino ==
5542 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
5543 			root_objectid = BTRFS_I(new_inode)->location.objectid;
5544 			ret = btrfs_unlink_subvol(trans, dest, new_dir,
5545 						root_objectid,
5546 						new_dentry->d_name.name,
5547 						new_dentry->d_name.len);
5548 			BUG_ON(new_inode->i_nlink == 0);
5549 		} else {
5550 			ret = btrfs_unlink_inode(trans, dest, new_dir,
5551 						 new_dentry->d_inode,
5552 						 new_dentry->d_name.name,
5553 						 new_dentry->d_name.len);
5554 		}
5555 		BUG_ON(ret);
5556 		if (new_inode->i_nlink == 0) {
5557 			ret = btrfs_orphan_add(trans, new_dentry->d_inode);
5558 			BUG_ON(ret);
5559 		}
5560 	}
5561 
5562 	ret = btrfs_add_link(trans, new_dir, old_inode,
5563 			     new_dentry->d_name.name,
5564 			     new_dentry->d_name.len, 0, index);
5565 	BUG_ON(ret);
5566 
5567 	if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
5568 		btrfs_log_new_name(trans, old_inode, old_dir,
5569 				   new_dentry->d_parent);
5570 		btrfs_end_log_trans(root);
5571 	}
5572 out_fail:
5573 	btrfs_end_transaction_throttle(trans, root);
5574 
5575 	if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5576 		up_read(&root->fs_info->subvol_sem);
5577 
5578 	btrfs_unreserve_metadata_space(root, 11);
5579 	return ret;
5580 }
5581 
5582 /*
5583  * some fairly slow code that needs optimization. This walks the list
5584  * of all the inodes with pending delalloc and forces them to disk.
5585  */
5586 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
5587 {
5588 	struct list_head *head = &root->fs_info->delalloc_inodes;
5589 	struct btrfs_inode *binode;
5590 	struct inode *inode;
5591 
5592 	if (root->fs_info->sb->s_flags & MS_RDONLY)
5593 		return -EROFS;
5594 
5595 	spin_lock(&root->fs_info->delalloc_lock);
5596 	while (!list_empty(head)) {
5597 		binode = list_entry(head->next, struct btrfs_inode,
5598 				    delalloc_inodes);
5599 		inode = igrab(&binode->vfs_inode);
5600 		if (!inode)
5601 			list_del_init(&binode->delalloc_inodes);
5602 		spin_unlock(&root->fs_info->delalloc_lock);
5603 		if (inode) {
5604 			filemap_flush(inode->i_mapping);
5605 			if (delay_iput)
5606 				btrfs_add_delayed_iput(inode);
5607 			else
5608 				iput(inode);
5609 		}
5610 		cond_resched();
5611 		spin_lock(&root->fs_info->delalloc_lock);
5612 	}
5613 	spin_unlock(&root->fs_info->delalloc_lock);
5614 
5615 	/* the filemap_flush will queue IO into the worker threads, but
5616 	 * we have to make sure the IO is actually started and that
5617 	 * ordered extents get created before we return
5618 	 */
5619 	atomic_inc(&root->fs_info->async_submit_draining);
5620 	while (atomic_read(&root->fs_info->nr_async_submits) ||
5621 	      atomic_read(&root->fs_info->async_delalloc_pages)) {
5622 		wait_event(root->fs_info->async_submit_wait,
5623 		   (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
5624 		    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
5625 	}
5626 	atomic_dec(&root->fs_info->async_submit_draining);
5627 	return 0;
5628 }
5629 
5630 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
5631 			 const char *symname)
5632 {
5633 	struct btrfs_trans_handle *trans;
5634 	struct btrfs_root *root = BTRFS_I(dir)->root;
5635 	struct btrfs_path *path;
5636 	struct btrfs_key key;
5637 	struct inode *inode = NULL;
5638 	int err;
5639 	int drop_inode = 0;
5640 	u64 objectid;
5641 	u64 index = 0 ;
5642 	int name_len;
5643 	int datasize;
5644 	unsigned long ptr;
5645 	struct btrfs_file_extent_item *ei;
5646 	struct extent_buffer *leaf;
5647 	unsigned long nr = 0;
5648 
5649 	name_len = strlen(symname) + 1;
5650 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
5651 		return -ENAMETOOLONG;
5652 
5653 	/*
5654 	 * 2 items for inode item and ref
5655 	 * 2 items for dir items
5656 	 * 1 item for xattr if selinux is on
5657 	 */
5658 	err = btrfs_reserve_metadata_space(root, 5);
5659 	if (err)
5660 		return err;
5661 
5662 	trans = btrfs_start_transaction(root, 1);
5663 	if (!trans)
5664 		goto out_fail;
5665 	btrfs_set_trans_block_group(trans, dir);
5666 
5667 	err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
5668 	if (err) {
5669 		err = -ENOSPC;
5670 		goto out_unlock;
5671 	}
5672 
5673 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5674 				dentry->d_name.len,
5675 				dentry->d_parent->d_inode->i_ino, objectid,
5676 				BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
5677 				&index);
5678 	err = PTR_ERR(inode);
5679 	if (IS_ERR(inode))
5680 		goto out_unlock;
5681 
5682 	err = btrfs_init_inode_security(trans, inode, dir);
5683 	if (err) {
5684 		drop_inode = 1;
5685 		goto out_unlock;
5686 	}
5687 
5688 	btrfs_set_trans_block_group(trans, inode);
5689 	err = btrfs_add_nondir(trans, dentry, inode, 0, index);
5690 	if (err)
5691 		drop_inode = 1;
5692 	else {
5693 		inode->i_mapping->a_ops = &btrfs_aops;
5694 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5695 		inode->i_fop = &btrfs_file_operations;
5696 		inode->i_op = &btrfs_file_inode_operations;
5697 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
5698 	}
5699 	btrfs_update_inode_block_group(trans, inode);
5700 	btrfs_update_inode_block_group(trans, dir);
5701 	if (drop_inode)
5702 		goto out_unlock;
5703 
5704 	path = btrfs_alloc_path();
5705 	BUG_ON(!path);
5706 	key.objectid = inode->i_ino;
5707 	key.offset = 0;
5708 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
5709 	datasize = btrfs_file_extent_calc_inline_size(name_len);
5710 	err = btrfs_insert_empty_item(trans, root, path, &key,
5711 				      datasize);
5712 	if (err) {
5713 		drop_inode = 1;
5714 		goto out_unlock;
5715 	}
5716 	leaf = path->nodes[0];
5717 	ei = btrfs_item_ptr(leaf, path->slots[0],
5718 			    struct btrfs_file_extent_item);
5719 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
5720 	btrfs_set_file_extent_type(leaf, ei,
5721 				   BTRFS_FILE_EXTENT_INLINE);
5722 	btrfs_set_file_extent_encryption(leaf, ei, 0);
5723 	btrfs_set_file_extent_compression(leaf, ei, 0);
5724 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
5725 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
5726 
5727 	ptr = btrfs_file_extent_inline_start(ei);
5728 	write_extent_buffer(leaf, symname, ptr, name_len);
5729 	btrfs_mark_buffer_dirty(leaf);
5730 	btrfs_free_path(path);
5731 
5732 	inode->i_op = &btrfs_symlink_inode_operations;
5733 	inode->i_mapping->a_ops = &btrfs_symlink_aops;
5734 	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5735 	inode_set_bytes(inode, name_len);
5736 	btrfs_i_size_write(inode, name_len - 1);
5737 	err = btrfs_update_inode(trans, root, inode);
5738 	if (err)
5739 		drop_inode = 1;
5740 
5741 out_unlock:
5742 	nr = trans->blocks_used;
5743 	btrfs_end_transaction_throttle(trans, root);
5744 out_fail:
5745 	btrfs_unreserve_metadata_space(root, 5);
5746 	if (drop_inode) {
5747 		inode_dec_link_count(inode);
5748 		iput(inode);
5749 	}
5750 	btrfs_btree_balance_dirty(root, nr);
5751 	return err;
5752 }
5753 
5754 static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
5755 			u64 alloc_hint, int mode, loff_t actual_len)
5756 {
5757 	struct btrfs_trans_handle *trans;
5758 	struct btrfs_root *root = BTRFS_I(inode)->root;
5759 	struct btrfs_key ins;
5760 	u64 alloc_size;
5761 	u64 cur_offset = start;
5762 	u64 num_bytes = end - start;
5763 	int ret = 0;
5764 	u64 i_size;
5765 
5766 	while (num_bytes > 0) {
5767 		alloc_size = min(num_bytes, root->fs_info->max_extent);
5768 
5769 		trans = btrfs_start_transaction(root, 1);
5770 
5771 		ret = btrfs_reserve_extent(trans, root, alloc_size,
5772 					   root->sectorsize, 0, alloc_hint,
5773 					   (u64)-1, &ins, 1);
5774 		if (ret) {
5775 			WARN_ON(1);
5776 			goto stop_trans;
5777 		}
5778 
5779 		ret = btrfs_reserve_metadata_space(root, 3);
5780 		if (ret) {
5781 			btrfs_free_reserved_extent(root, ins.objectid,
5782 						   ins.offset);
5783 			goto stop_trans;
5784 		}
5785 
5786 		ret = insert_reserved_file_extent(trans, inode,
5787 						  cur_offset, ins.objectid,
5788 						  ins.offset, ins.offset,
5789 						  ins.offset, 0, 0, 0,
5790 						  BTRFS_FILE_EXTENT_PREALLOC);
5791 		BUG_ON(ret);
5792 		btrfs_drop_extent_cache(inode, cur_offset,
5793 					cur_offset + ins.offset -1, 0);
5794 
5795 		num_bytes -= ins.offset;
5796 		cur_offset += ins.offset;
5797 		alloc_hint = ins.objectid + ins.offset;
5798 
5799 		inode->i_ctime = CURRENT_TIME;
5800 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
5801 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
5802 			(actual_len > inode->i_size) &&
5803 			(cur_offset > inode->i_size)) {
5804 
5805 			if (cur_offset > actual_len)
5806 				i_size  = actual_len;
5807 			else
5808 				i_size = cur_offset;
5809 			i_size_write(inode, i_size);
5810 			btrfs_ordered_update_i_size(inode, i_size, NULL);
5811 		}
5812 
5813 		ret = btrfs_update_inode(trans, root, inode);
5814 		BUG_ON(ret);
5815 
5816 		btrfs_end_transaction(trans, root);
5817 		btrfs_unreserve_metadata_space(root, 3);
5818 	}
5819 	return ret;
5820 
5821 stop_trans:
5822 	btrfs_end_transaction(trans, root);
5823 	return ret;
5824 
5825 }
5826 
5827 static long btrfs_fallocate(struct inode *inode, int mode,
5828 			    loff_t offset, loff_t len)
5829 {
5830 	u64 cur_offset;
5831 	u64 last_byte;
5832 	u64 alloc_start;
5833 	u64 alloc_end;
5834 	u64 alloc_hint = 0;
5835 	u64 locked_end;
5836 	u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
5837 	struct extent_map *em;
5838 	int ret;
5839 
5840 	alloc_start = offset & ~mask;
5841 	alloc_end =  (offset + len + mask) & ~mask;
5842 
5843 	/*
5844 	 * wait for ordered IO before we have any locks.  We'll loop again
5845 	 * below with the locks held.
5846 	 */
5847 	btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
5848 
5849 	mutex_lock(&inode->i_mutex);
5850 	if (alloc_start > inode->i_size) {
5851 		ret = btrfs_cont_expand(inode, alloc_start);
5852 		if (ret)
5853 			goto out;
5854 	}
5855 
5856 	ret = btrfs_check_data_free_space(BTRFS_I(inode)->root, inode,
5857 					  alloc_end - alloc_start);
5858 	if (ret)
5859 		goto out;
5860 
5861 	locked_end = alloc_end - 1;
5862 	while (1) {
5863 		struct btrfs_ordered_extent *ordered;
5864 
5865 		/* the extent lock is ordered inside the running
5866 		 * transaction
5867 		 */
5868 		lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5869 			    GFP_NOFS);
5870 		ordered = btrfs_lookup_first_ordered_extent(inode,
5871 							    alloc_end - 1);
5872 		if (ordered &&
5873 		    ordered->file_offset + ordered->len > alloc_start &&
5874 		    ordered->file_offset < alloc_end) {
5875 			btrfs_put_ordered_extent(ordered);
5876 			unlock_extent(&BTRFS_I(inode)->io_tree,
5877 				      alloc_start, locked_end, GFP_NOFS);
5878 			/*
5879 			 * we can't wait on the range with the transaction
5880 			 * running or with the extent lock held
5881 			 */
5882 			btrfs_wait_ordered_range(inode, alloc_start,
5883 						 alloc_end - alloc_start);
5884 		} else {
5885 			if (ordered)
5886 				btrfs_put_ordered_extent(ordered);
5887 			break;
5888 		}
5889 	}
5890 
5891 	cur_offset = alloc_start;
5892 	while (1) {
5893 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
5894 				      alloc_end - cur_offset, 0);
5895 		BUG_ON(IS_ERR(em) || !em);
5896 		last_byte = min(extent_map_end(em), alloc_end);
5897 		last_byte = (last_byte + mask) & ~mask;
5898 		if (em->block_start == EXTENT_MAP_HOLE ||
5899 		    (cur_offset >= inode->i_size &&
5900 		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
5901 			ret = prealloc_file_range(inode,
5902 						  cur_offset, last_byte,
5903 						alloc_hint, mode, offset+len);
5904 			if (ret < 0) {
5905 				free_extent_map(em);
5906 				break;
5907 			}
5908 		}
5909 		if (em->block_start <= EXTENT_MAP_LAST_BYTE)
5910 			alloc_hint = em->block_start;
5911 		free_extent_map(em);
5912 
5913 		cur_offset = last_byte;
5914 		if (cur_offset >= alloc_end) {
5915 			ret = 0;
5916 			break;
5917 		}
5918 	}
5919 	unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5920 		      GFP_NOFS);
5921 
5922 	btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode,
5923 				       alloc_end - alloc_start);
5924 out:
5925 	mutex_unlock(&inode->i_mutex);
5926 	return ret;
5927 }
5928 
5929 static int btrfs_set_page_dirty(struct page *page)
5930 {
5931 	return __set_page_dirty_nobuffers(page);
5932 }
5933 
5934 static int btrfs_permission(struct inode *inode, int mask)
5935 {
5936 	if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE))
5937 		return -EACCES;
5938 	return generic_permission(inode, mask, btrfs_check_acl);
5939 }
5940 
5941 static const struct inode_operations btrfs_dir_inode_operations = {
5942 	.getattr	= btrfs_getattr,
5943 	.lookup		= btrfs_lookup,
5944 	.create		= btrfs_create,
5945 	.unlink		= btrfs_unlink,
5946 	.link		= btrfs_link,
5947 	.mkdir		= btrfs_mkdir,
5948 	.rmdir		= btrfs_rmdir,
5949 	.rename		= btrfs_rename,
5950 	.symlink	= btrfs_symlink,
5951 	.setattr	= btrfs_setattr,
5952 	.mknod		= btrfs_mknod,
5953 	.setxattr	= btrfs_setxattr,
5954 	.getxattr	= btrfs_getxattr,
5955 	.listxattr	= btrfs_listxattr,
5956 	.removexattr	= btrfs_removexattr,
5957 	.permission	= btrfs_permission,
5958 };
5959 static const struct inode_operations btrfs_dir_ro_inode_operations = {
5960 	.lookup		= btrfs_lookup,
5961 	.permission	= btrfs_permission,
5962 };
5963 
5964 static const struct file_operations btrfs_dir_file_operations = {
5965 	.llseek		= generic_file_llseek,
5966 	.read		= generic_read_dir,
5967 	.readdir	= btrfs_real_readdir,
5968 	.unlocked_ioctl	= btrfs_ioctl,
5969 #ifdef CONFIG_COMPAT
5970 	.compat_ioctl	= btrfs_ioctl,
5971 #endif
5972 	.release        = btrfs_release_file,
5973 	.fsync		= btrfs_sync_file,
5974 };
5975 
5976 static struct extent_io_ops btrfs_extent_io_ops = {
5977 	.fill_delalloc = run_delalloc_range,
5978 	.submit_bio_hook = btrfs_submit_bio_hook,
5979 	.merge_bio_hook = btrfs_merge_bio_hook,
5980 	.readpage_end_io_hook = btrfs_readpage_end_io_hook,
5981 	.writepage_end_io_hook = btrfs_writepage_end_io_hook,
5982 	.writepage_start_hook = btrfs_writepage_start_hook,
5983 	.readpage_io_failed_hook = btrfs_io_failed_hook,
5984 	.set_bit_hook = btrfs_set_bit_hook,
5985 	.clear_bit_hook = btrfs_clear_bit_hook,
5986 	.merge_extent_hook = btrfs_merge_extent_hook,
5987 	.split_extent_hook = btrfs_split_extent_hook,
5988 };
5989 
5990 /*
5991  * btrfs doesn't support the bmap operation because swapfiles
5992  * use bmap to make a mapping of extents in the file.  They assume
5993  * these extents won't change over the life of the file and they
5994  * use the bmap result to do IO directly to the drive.
5995  *
5996  * the btrfs bmap call would return logical addresses that aren't
5997  * suitable for IO and they also will change frequently as COW
5998  * operations happen.  So, swapfile + btrfs == corruption.
5999  *
6000  * For now we're avoiding this by dropping bmap.
6001  */
6002 static const struct address_space_operations btrfs_aops = {
6003 	.readpage	= btrfs_readpage,
6004 	.writepage	= btrfs_writepage,
6005 	.writepages	= btrfs_writepages,
6006 	.readpages	= btrfs_readpages,
6007 	.sync_page	= block_sync_page,
6008 	.direct_IO	= btrfs_direct_IO,
6009 	.invalidatepage = btrfs_invalidatepage,
6010 	.releasepage	= btrfs_releasepage,
6011 	.set_page_dirty	= btrfs_set_page_dirty,
6012 	.error_remove_page = generic_error_remove_page,
6013 };
6014 
6015 static const struct address_space_operations btrfs_symlink_aops = {
6016 	.readpage	= btrfs_readpage,
6017 	.writepage	= btrfs_writepage,
6018 	.invalidatepage = btrfs_invalidatepage,
6019 	.releasepage	= btrfs_releasepage,
6020 };
6021 
6022 static const struct inode_operations btrfs_file_inode_operations = {
6023 	.truncate	= btrfs_truncate,
6024 	.getattr	= btrfs_getattr,
6025 	.setattr	= btrfs_setattr,
6026 	.setxattr	= btrfs_setxattr,
6027 	.getxattr	= btrfs_getxattr,
6028 	.listxattr      = btrfs_listxattr,
6029 	.removexattr	= btrfs_removexattr,
6030 	.permission	= btrfs_permission,
6031 	.fallocate	= btrfs_fallocate,
6032 	.fiemap		= btrfs_fiemap,
6033 };
6034 static const struct inode_operations btrfs_special_inode_operations = {
6035 	.getattr	= btrfs_getattr,
6036 	.setattr	= btrfs_setattr,
6037 	.permission	= btrfs_permission,
6038 	.setxattr	= btrfs_setxattr,
6039 	.getxattr	= btrfs_getxattr,
6040 	.listxattr	= btrfs_listxattr,
6041 	.removexattr	= btrfs_removexattr,
6042 };
6043 static const struct inode_operations btrfs_symlink_inode_operations = {
6044 	.readlink	= generic_readlink,
6045 	.follow_link	= page_follow_link_light,
6046 	.put_link	= page_put_link,
6047 	.permission	= btrfs_permission,
6048 	.setxattr	= btrfs_setxattr,
6049 	.getxattr	= btrfs_getxattr,
6050 	.listxattr	= btrfs_listxattr,
6051 	.removexattr	= btrfs_removexattr,
6052 };
6053 
6054 const struct dentry_operations btrfs_dentry_operations = {
6055 	.d_delete	= btrfs_dentry_delete,
6056 };
6057