xref: /openbmc/linux/fs/btrfs/inode.c (revision a5c43003)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include <linux/slab.h>
40 #include "compat.h"
41 #include "ctree.h"
42 #include "disk-io.h"
43 #include "transaction.h"
44 #include "btrfs_inode.h"
45 #include "ioctl.h"
46 #include "print-tree.h"
47 #include "volumes.h"
48 #include "ordered-data.h"
49 #include "xattr.h"
50 #include "tree-log.h"
51 #include "compression.h"
52 #include "locking.h"
53 
54 struct btrfs_iget_args {
55 	u64 ino;
56 	struct btrfs_root *root;
57 };
58 
59 static const struct inode_operations btrfs_dir_inode_operations;
60 static const struct inode_operations btrfs_symlink_inode_operations;
61 static const struct inode_operations btrfs_dir_ro_inode_operations;
62 static const struct inode_operations btrfs_special_inode_operations;
63 static const struct inode_operations btrfs_file_inode_operations;
64 static const struct address_space_operations btrfs_aops;
65 static const struct address_space_operations btrfs_symlink_aops;
66 static const struct file_operations btrfs_dir_file_operations;
67 static struct extent_io_ops btrfs_extent_io_ops;
68 
69 static struct kmem_cache *btrfs_inode_cachep;
70 struct kmem_cache *btrfs_trans_handle_cachep;
71 struct kmem_cache *btrfs_transaction_cachep;
72 struct kmem_cache *btrfs_path_cachep;
73 
74 #define S_SHIFT 12
75 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
76 	[S_IFREG >> S_SHIFT]	= BTRFS_FT_REG_FILE,
77 	[S_IFDIR >> S_SHIFT]	= BTRFS_FT_DIR,
78 	[S_IFCHR >> S_SHIFT]	= BTRFS_FT_CHRDEV,
79 	[S_IFBLK >> S_SHIFT]	= BTRFS_FT_BLKDEV,
80 	[S_IFIFO >> S_SHIFT]	= BTRFS_FT_FIFO,
81 	[S_IFSOCK >> S_SHIFT]	= BTRFS_FT_SOCK,
82 	[S_IFLNK >> S_SHIFT]	= BTRFS_FT_SYMLINK,
83 };
84 
85 static void btrfs_truncate(struct inode *inode);
86 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
87 static noinline int cow_file_range(struct inode *inode,
88 				   struct page *locked_page,
89 				   u64 start, u64 end, int *page_started,
90 				   unsigned long *nr_written, int unlock);
91 
92 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
93 				     struct inode *inode,  struct inode *dir)
94 {
95 	int err;
96 
97 	err = btrfs_init_acl(trans, inode, dir);
98 	if (!err)
99 		err = btrfs_xattr_security_init(trans, inode, dir);
100 	return err;
101 }
102 
103 /*
104  * this does all the hard work for inserting an inline extent into
105  * the btree.  The caller should have done a btrfs_drop_extents so that
106  * no overlapping inline items exist in the btree
107  */
108 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
109 				struct btrfs_root *root, struct inode *inode,
110 				u64 start, size_t size, size_t compressed_size,
111 				struct page **compressed_pages)
112 {
113 	struct btrfs_key key;
114 	struct btrfs_path *path;
115 	struct extent_buffer *leaf;
116 	struct page *page = NULL;
117 	char *kaddr;
118 	unsigned long ptr;
119 	struct btrfs_file_extent_item *ei;
120 	int err = 0;
121 	int ret;
122 	size_t cur_size = size;
123 	size_t datasize;
124 	unsigned long offset;
125 	int use_compress = 0;
126 
127 	if (compressed_size && compressed_pages) {
128 		use_compress = 1;
129 		cur_size = compressed_size;
130 	}
131 
132 	path = btrfs_alloc_path();
133 	if (!path)
134 		return -ENOMEM;
135 
136 	path->leave_spinning = 1;
137 	btrfs_set_trans_block_group(trans, inode);
138 
139 	key.objectid = inode->i_ino;
140 	key.offset = start;
141 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
142 	datasize = btrfs_file_extent_calc_inline_size(cur_size);
143 
144 	inode_add_bytes(inode, size);
145 	ret = btrfs_insert_empty_item(trans, root, path, &key,
146 				      datasize);
147 	BUG_ON(ret);
148 	if (ret) {
149 		err = ret;
150 		goto fail;
151 	}
152 	leaf = path->nodes[0];
153 	ei = btrfs_item_ptr(leaf, path->slots[0],
154 			    struct btrfs_file_extent_item);
155 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
156 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
157 	btrfs_set_file_extent_encryption(leaf, ei, 0);
158 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
159 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
160 	ptr = btrfs_file_extent_inline_start(ei);
161 
162 	if (use_compress) {
163 		struct page *cpage;
164 		int i = 0;
165 		while (compressed_size > 0) {
166 			cpage = compressed_pages[i];
167 			cur_size = min_t(unsigned long, compressed_size,
168 				       PAGE_CACHE_SIZE);
169 
170 			kaddr = kmap_atomic(cpage, KM_USER0);
171 			write_extent_buffer(leaf, kaddr, ptr, cur_size);
172 			kunmap_atomic(kaddr, KM_USER0);
173 
174 			i++;
175 			ptr += cur_size;
176 			compressed_size -= cur_size;
177 		}
178 		btrfs_set_file_extent_compression(leaf, ei,
179 						  BTRFS_COMPRESS_ZLIB);
180 	} else {
181 		page = find_get_page(inode->i_mapping,
182 				     start >> PAGE_CACHE_SHIFT);
183 		btrfs_set_file_extent_compression(leaf, ei, 0);
184 		kaddr = kmap_atomic(page, KM_USER0);
185 		offset = start & (PAGE_CACHE_SIZE - 1);
186 		write_extent_buffer(leaf, kaddr + offset, ptr, size);
187 		kunmap_atomic(kaddr, KM_USER0);
188 		page_cache_release(page);
189 	}
190 	btrfs_mark_buffer_dirty(leaf);
191 	btrfs_free_path(path);
192 
193 	/*
194 	 * we're an inline extent, so nobody can
195 	 * extend the file past i_size without locking
196 	 * a page we already have locked.
197 	 *
198 	 * We must do any isize and inode updates
199 	 * before we unlock the pages.  Otherwise we
200 	 * could end up racing with unlink.
201 	 */
202 	BTRFS_I(inode)->disk_i_size = inode->i_size;
203 	btrfs_update_inode(trans, root, inode);
204 
205 	return 0;
206 fail:
207 	btrfs_free_path(path);
208 	return err;
209 }
210 
211 
212 /*
213  * conditionally insert an inline extent into the file.  This
214  * does the checks required to make sure the data is small enough
215  * to fit as an inline extent.
216  */
217 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
218 				 struct btrfs_root *root,
219 				 struct inode *inode, u64 start, u64 end,
220 				 size_t compressed_size,
221 				 struct page **compressed_pages)
222 {
223 	u64 isize = i_size_read(inode);
224 	u64 actual_end = min(end + 1, isize);
225 	u64 inline_len = actual_end - start;
226 	u64 aligned_end = (end + root->sectorsize - 1) &
227 			~((u64)root->sectorsize - 1);
228 	u64 hint_byte;
229 	u64 data_len = inline_len;
230 	int ret;
231 
232 	if (compressed_size)
233 		data_len = compressed_size;
234 
235 	if (start > 0 ||
236 	    actual_end >= PAGE_CACHE_SIZE ||
237 	    data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
238 	    (!compressed_size &&
239 	    (actual_end & (root->sectorsize - 1)) == 0) ||
240 	    end + 1 < isize ||
241 	    data_len > root->fs_info->max_inline) {
242 		return 1;
243 	}
244 
245 	ret = btrfs_drop_extents(trans, inode, start, aligned_end,
246 				 &hint_byte, 1);
247 	BUG_ON(ret);
248 
249 	if (isize > actual_end)
250 		inline_len = min_t(u64, isize, actual_end);
251 	ret = insert_inline_extent(trans, root, inode, start,
252 				   inline_len, compressed_size,
253 				   compressed_pages);
254 	BUG_ON(ret);
255 	btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
256 	return 0;
257 }
258 
259 struct async_extent {
260 	u64 start;
261 	u64 ram_size;
262 	u64 compressed_size;
263 	struct page **pages;
264 	unsigned long nr_pages;
265 	struct list_head list;
266 };
267 
268 struct async_cow {
269 	struct inode *inode;
270 	struct btrfs_root *root;
271 	struct page *locked_page;
272 	u64 start;
273 	u64 end;
274 	struct list_head extents;
275 	struct btrfs_work work;
276 };
277 
278 static noinline int add_async_extent(struct async_cow *cow,
279 				     u64 start, u64 ram_size,
280 				     u64 compressed_size,
281 				     struct page **pages,
282 				     unsigned long nr_pages)
283 {
284 	struct async_extent *async_extent;
285 
286 	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
287 	async_extent->start = start;
288 	async_extent->ram_size = ram_size;
289 	async_extent->compressed_size = compressed_size;
290 	async_extent->pages = pages;
291 	async_extent->nr_pages = nr_pages;
292 	list_add_tail(&async_extent->list, &cow->extents);
293 	return 0;
294 }
295 
296 /*
297  * we create compressed extents in two phases.  The first
298  * phase compresses a range of pages that have already been
299  * locked (both pages and state bits are locked).
300  *
301  * This is done inside an ordered work queue, and the compression
302  * is spread across many cpus.  The actual IO submission is step
303  * two, and the ordered work queue takes care of making sure that
304  * happens in the same order things were put onto the queue by
305  * writepages and friends.
306  *
307  * If this code finds it can't get good compression, it puts an
308  * entry onto the work queue to write the uncompressed bytes.  This
309  * makes sure that both compressed inodes and uncompressed inodes
310  * are written in the same order that pdflush sent them down.
311  */
312 static noinline int compress_file_range(struct inode *inode,
313 					struct page *locked_page,
314 					u64 start, u64 end,
315 					struct async_cow *async_cow,
316 					int *num_added)
317 {
318 	struct btrfs_root *root = BTRFS_I(inode)->root;
319 	struct btrfs_trans_handle *trans;
320 	u64 num_bytes;
321 	u64 orig_start;
322 	u64 disk_num_bytes;
323 	u64 blocksize = root->sectorsize;
324 	u64 actual_end;
325 	u64 isize = i_size_read(inode);
326 	int ret = 0;
327 	struct page **pages = NULL;
328 	unsigned long nr_pages;
329 	unsigned long nr_pages_ret = 0;
330 	unsigned long total_compressed = 0;
331 	unsigned long total_in = 0;
332 	unsigned long max_compressed = 128 * 1024;
333 	unsigned long max_uncompressed = 128 * 1024;
334 	int i;
335 	int will_compress;
336 
337 	orig_start = start;
338 
339 	actual_end = min_t(u64, isize, end + 1);
340 again:
341 	will_compress = 0;
342 	nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
343 	nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
344 
345 	/*
346 	 * we don't want to send crud past the end of i_size through
347 	 * compression, that's just a waste of CPU time.  So, if the
348 	 * end of the file is before the start of our current
349 	 * requested range of bytes, we bail out to the uncompressed
350 	 * cleanup code that can deal with all of this.
351 	 *
352 	 * It isn't really the fastest way to fix things, but this is a
353 	 * very uncommon corner.
354 	 */
355 	if (actual_end <= start)
356 		goto cleanup_and_bail_uncompressed;
357 
358 	total_compressed = actual_end - start;
359 
360 	/* we want to make sure that amount of ram required to uncompress
361 	 * an extent is reasonable, so we limit the total size in ram
362 	 * of a compressed extent to 128k.  This is a crucial number
363 	 * because it also controls how easily we can spread reads across
364 	 * cpus for decompression.
365 	 *
366 	 * We also want to make sure the amount of IO required to do
367 	 * a random read is reasonably small, so we limit the size of
368 	 * a compressed extent to 128k.
369 	 */
370 	total_compressed = min(total_compressed, max_uncompressed);
371 	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
372 	num_bytes = max(blocksize,  num_bytes);
373 	disk_num_bytes = num_bytes;
374 	total_in = 0;
375 	ret = 0;
376 
377 	/*
378 	 * we do compression for mount -o compress and when the
379 	 * inode has not been flagged as nocompress.  This flag can
380 	 * change at any time if we discover bad compression ratios.
381 	 */
382 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
383 	    (btrfs_test_opt(root, COMPRESS) ||
384 	     (BTRFS_I(inode)->force_compress))) {
385 		WARN_ON(pages);
386 		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
387 
388 		ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
389 						total_compressed, pages,
390 						nr_pages, &nr_pages_ret,
391 						&total_in,
392 						&total_compressed,
393 						max_compressed);
394 
395 		if (!ret) {
396 			unsigned long offset = total_compressed &
397 				(PAGE_CACHE_SIZE - 1);
398 			struct page *page = pages[nr_pages_ret - 1];
399 			char *kaddr;
400 
401 			/* zero the tail end of the last page, we might be
402 			 * sending it down to disk
403 			 */
404 			if (offset) {
405 				kaddr = kmap_atomic(page, KM_USER0);
406 				memset(kaddr + offset, 0,
407 				       PAGE_CACHE_SIZE - offset);
408 				kunmap_atomic(kaddr, KM_USER0);
409 			}
410 			will_compress = 1;
411 		}
412 	}
413 	if (start == 0) {
414 		trans = btrfs_join_transaction(root, 1);
415 		BUG_ON(!trans);
416 		btrfs_set_trans_block_group(trans, inode);
417 
418 		/* lets try to make an inline extent */
419 		if (ret || total_in < (actual_end - start)) {
420 			/* we didn't compress the entire range, try
421 			 * to make an uncompressed inline extent.
422 			 */
423 			ret = cow_file_range_inline(trans, root, inode,
424 						    start, end, 0, NULL);
425 		} else {
426 			/* try making a compressed inline extent */
427 			ret = cow_file_range_inline(trans, root, inode,
428 						    start, end,
429 						    total_compressed, pages);
430 		}
431 		if (ret == 0) {
432 			/*
433 			 * inline extent creation worked, we don't need
434 			 * to create any more async work items.  Unlock
435 			 * and free up our temp pages.
436 			 */
437 			extent_clear_unlock_delalloc(inode,
438 			     &BTRFS_I(inode)->io_tree,
439 			     start, end, NULL,
440 			     EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
441 			     EXTENT_CLEAR_DELALLOC |
442 			     EXTENT_CLEAR_ACCOUNTING |
443 			     EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
444 
445 			btrfs_end_transaction(trans, root);
446 			goto free_pages_out;
447 		}
448 		btrfs_end_transaction(trans, root);
449 	}
450 
451 	if (will_compress) {
452 		/*
453 		 * we aren't doing an inline extent round the compressed size
454 		 * up to a block size boundary so the allocator does sane
455 		 * things
456 		 */
457 		total_compressed = (total_compressed + blocksize - 1) &
458 			~(blocksize - 1);
459 
460 		/*
461 		 * one last check to make sure the compression is really a
462 		 * win, compare the page count read with the blocks on disk
463 		 */
464 		total_in = (total_in + PAGE_CACHE_SIZE - 1) &
465 			~(PAGE_CACHE_SIZE - 1);
466 		if (total_compressed >= total_in) {
467 			will_compress = 0;
468 		} else {
469 			disk_num_bytes = total_compressed;
470 			num_bytes = total_in;
471 		}
472 	}
473 	if (!will_compress && pages) {
474 		/*
475 		 * the compression code ran but failed to make things smaller,
476 		 * free any pages it allocated and our page pointer array
477 		 */
478 		for (i = 0; i < nr_pages_ret; i++) {
479 			WARN_ON(pages[i]->mapping);
480 			page_cache_release(pages[i]);
481 		}
482 		kfree(pages);
483 		pages = NULL;
484 		total_compressed = 0;
485 		nr_pages_ret = 0;
486 
487 		/* flag the file so we don't compress in the future */
488 		if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
489 		    !(BTRFS_I(inode)->force_compress)) {
490 			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
491 		}
492 	}
493 	if (will_compress) {
494 		*num_added += 1;
495 
496 		/* the async work queues will take care of doing actual
497 		 * allocation on disk for these compressed pages,
498 		 * and will submit them to the elevator.
499 		 */
500 		add_async_extent(async_cow, start, num_bytes,
501 				 total_compressed, pages, nr_pages_ret);
502 
503 		if (start + num_bytes < end && start + num_bytes < actual_end) {
504 			start += num_bytes;
505 			pages = NULL;
506 			cond_resched();
507 			goto again;
508 		}
509 	} else {
510 cleanup_and_bail_uncompressed:
511 		/*
512 		 * No compression, but we still need to write the pages in
513 		 * the file we've been given so far.  redirty the locked
514 		 * page if it corresponds to our extent and set things up
515 		 * for the async work queue to run cow_file_range to do
516 		 * the normal delalloc dance
517 		 */
518 		if (page_offset(locked_page) >= start &&
519 		    page_offset(locked_page) <= end) {
520 			__set_page_dirty_nobuffers(locked_page);
521 			/* unlocked later on in the async handlers */
522 		}
523 		add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
524 		*num_added += 1;
525 	}
526 
527 out:
528 	return 0;
529 
530 free_pages_out:
531 	for (i = 0; i < nr_pages_ret; i++) {
532 		WARN_ON(pages[i]->mapping);
533 		page_cache_release(pages[i]);
534 	}
535 	kfree(pages);
536 
537 	goto out;
538 }
539 
540 /*
541  * phase two of compressed writeback.  This is the ordered portion
542  * of the code, which only gets called in the order the work was
543  * queued.  We walk all the async extents created by compress_file_range
544  * and send them down to the disk.
545  */
546 static noinline int submit_compressed_extents(struct inode *inode,
547 					      struct async_cow *async_cow)
548 {
549 	struct async_extent *async_extent;
550 	u64 alloc_hint = 0;
551 	struct btrfs_trans_handle *trans;
552 	struct btrfs_key ins;
553 	struct extent_map *em;
554 	struct btrfs_root *root = BTRFS_I(inode)->root;
555 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
556 	struct extent_io_tree *io_tree;
557 	int ret = 0;
558 
559 	if (list_empty(&async_cow->extents))
560 		return 0;
561 
562 
563 	while (!list_empty(&async_cow->extents)) {
564 		async_extent = list_entry(async_cow->extents.next,
565 					  struct async_extent, list);
566 		list_del(&async_extent->list);
567 
568 		io_tree = &BTRFS_I(inode)->io_tree;
569 
570 retry:
571 		/* did the compression code fall back to uncompressed IO? */
572 		if (!async_extent->pages) {
573 			int page_started = 0;
574 			unsigned long nr_written = 0;
575 
576 			lock_extent(io_tree, async_extent->start,
577 					 async_extent->start +
578 					 async_extent->ram_size - 1, GFP_NOFS);
579 
580 			/* allocate blocks */
581 			ret = cow_file_range(inode, async_cow->locked_page,
582 					     async_extent->start,
583 					     async_extent->start +
584 					     async_extent->ram_size - 1,
585 					     &page_started, &nr_written, 0);
586 
587 			/*
588 			 * if page_started, cow_file_range inserted an
589 			 * inline extent and took care of all the unlocking
590 			 * and IO for us.  Otherwise, we need to submit
591 			 * all those pages down to the drive.
592 			 */
593 			if (!page_started && !ret)
594 				extent_write_locked_range(io_tree,
595 						  inode, async_extent->start,
596 						  async_extent->start +
597 						  async_extent->ram_size - 1,
598 						  btrfs_get_extent,
599 						  WB_SYNC_ALL);
600 			kfree(async_extent);
601 			cond_resched();
602 			continue;
603 		}
604 
605 		lock_extent(io_tree, async_extent->start,
606 			    async_extent->start + async_extent->ram_size - 1,
607 			    GFP_NOFS);
608 
609 		trans = btrfs_join_transaction(root, 1);
610 		ret = btrfs_reserve_extent(trans, root,
611 					   async_extent->compressed_size,
612 					   async_extent->compressed_size,
613 					   0, alloc_hint,
614 					   (u64)-1, &ins, 1);
615 		btrfs_end_transaction(trans, root);
616 
617 		if (ret) {
618 			int i;
619 			for (i = 0; i < async_extent->nr_pages; i++) {
620 				WARN_ON(async_extent->pages[i]->mapping);
621 				page_cache_release(async_extent->pages[i]);
622 			}
623 			kfree(async_extent->pages);
624 			async_extent->nr_pages = 0;
625 			async_extent->pages = NULL;
626 			unlock_extent(io_tree, async_extent->start,
627 				      async_extent->start +
628 				      async_extent->ram_size - 1, GFP_NOFS);
629 			goto retry;
630 		}
631 
632 		/*
633 		 * here we're doing allocation and writeback of the
634 		 * compressed pages
635 		 */
636 		btrfs_drop_extent_cache(inode, async_extent->start,
637 					async_extent->start +
638 					async_extent->ram_size - 1, 0);
639 
640 		em = alloc_extent_map(GFP_NOFS);
641 		em->start = async_extent->start;
642 		em->len = async_extent->ram_size;
643 		em->orig_start = em->start;
644 
645 		em->block_start = ins.objectid;
646 		em->block_len = ins.offset;
647 		em->bdev = root->fs_info->fs_devices->latest_bdev;
648 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
649 		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
650 
651 		while (1) {
652 			write_lock(&em_tree->lock);
653 			ret = add_extent_mapping(em_tree, em);
654 			write_unlock(&em_tree->lock);
655 			if (ret != -EEXIST) {
656 				free_extent_map(em);
657 				break;
658 			}
659 			btrfs_drop_extent_cache(inode, async_extent->start,
660 						async_extent->start +
661 						async_extent->ram_size - 1, 0);
662 		}
663 
664 		ret = btrfs_add_ordered_extent(inode, async_extent->start,
665 					       ins.objectid,
666 					       async_extent->ram_size,
667 					       ins.offset,
668 					       BTRFS_ORDERED_COMPRESSED);
669 		BUG_ON(ret);
670 
671 		/*
672 		 * clear dirty, set writeback and unlock the pages.
673 		 */
674 		extent_clear_unlock_delalloc(inode,
675 				&BTRFS_I(inode)->io_tree,
676 				async_extent->start,
677 				async_extent->start +
678 				async_extent->ram_size - 1,
679 				NULL, EXTENT_CLEAR_UNLOCK_PAGE |
680 				EXTENT_CLEAR_UNLOCK |
681 				EXTENT_CLEAR_DELALLOC |
682 				EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
683 
684 		ret = btrfs_submit_compressed_write(inode,
685 				    async_extent->start,
686 				    async_extent->ram_size,
687 				    ins.objectid,
688 				    ins.offset, async_extent->pages,
689 				    async_extent->nr_pages);
690 
691 		BUG_ON(ret);
692 		alloc_hint = ins.objectid + ins.offset;
693 		kfree(async_extent);
694 		cond_resched();
695 	}
696 
697 	return 0;
698 }
699 
700 /*
701  * when extent_io.c finds a delayed allocation range in the file,
702  * the call backs end up in this code.  The basic idea is to
703  * allocate extents on disk for the range, and create ordered data structs
704  * in ram to track those extents.
705  *
706  * locked_page is the page that writepage had locked already.  We use
707  * it to make sure we don't do extra locks or unlocks.
708  *
709  * *page_started is set to one if we unlock locked_page and do everything
710  * required to start IO on it.  It may be clean and already done with
711  * IO when we return.
712  */
713 static noinline int cow_file_range(struct inode *inode,
714 				   struct page *locked_page,
715 				   u64 start, u64 end, int *page_started,
716 				   unsigned long *nr_written,
717 				   int unlock)
718 {
719 	struct btrfs_root *root = BTRFS_I(inode)->root;
720 	struct btrfs_trans_handle *trans;
721 	u64 alloc_hint = 0;
722 	u64 num_bytes;
723 	unsigned long ram_size;
724 	u64 disk_num_bytes;
725 	u64 cur_alloc_size;
726 	u64 blocksize = root->sectorsize;
727 	u64 actual_end;
728 	u64 isize = i_size_read(inode);
729 	struct btrfs_key ins;
730 	struct extent_map *em;
731 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
732 	int ret = 0;
733 
734 	trans = btrfs_join_transaction(root, 1);
735 	BUG_ON(!trans);
736 	btrfs_set_trans_block_group(trans, inode);
737 
738 	actual_end = min_t(u64, isize, end + 1);
739 
740 	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
741 	num_bytes = max(blocksize,  num_bytes);
742 	disk_num_bytes = num_bytes;
743 	ret = 0;
744 
745 	if (start == 0) {
746 		/* lets try to make an inline extent */
747 		ret = cow_file_range_inline(trans, root, inode,
748 					    start, end, 0, NULL);
749 		if (ret == 0) {
750 			extent_clear_unlock_delalloc(inode,
751 				     &BTRFS_I(inode)->io_tree,
752 				     start, end, NULL,
753 				     EXTENT_CLEAR_UNLOCK_PAGE |
754 				     EXTENT_CLEAR_UNLOCK |
755 				     EXTENT_CLEAR_DELALLOC |
756 				     EXTENT_CLEAR_ACCOUNTING |
757 				     EXTENT_CLEAR_DIRTY |
758 				     EXTENT_SET_WRITEBACK |
759 				     EXTENT_END_WRITEBACK);
760 
761 			*nr_written = *nr_written +
762 			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
763 			*page_started = 1;
764 			ret = 0;
765 			goto out;
766 		}
767 	}
768 
769 	BUG_ON(disk_num_bytes >
770 	       btrfs_super_total_bytes(&root->fs_info->super_copy));
771 
772 
773 	read_lock(&BTRFS_I(inode)->extent_tree.lock);
774 	em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
775 				   start, num_bytes);
776 	if (em) {
777 		/*
778 		 * if block start isn't an actual block number then find the
779 		 * first block in this inode and use that as a hint.  If that
780 		 * block is also bogus then just don't worry about it.
781 		 */
782 		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
783 			free_extent_map(em);
784 			em = search_extent_mapping(em_tree, 0, 0);
785 			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
786 				alloc_hint = em->block_start;
787 			if (em)
788 				free_extent_map(em);
789 		} else {
790 			alloc_hint = em->block_start;
791 			free_extent_map(em);
792 		}
793 	}
794 	read_unlock(&BTRFS_I(inode)->extent_tree.lock);
795 	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
796 
797 	while (disk_num_bytes > 0) {
798 		unsigned long op;
799 
800 		cur_alloc_size = disk_num_bytes;
801 		ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
802 					   root->sectorsize, 0, alloc_hint,
803 					   (u64)-1, &ins, 1);
804 		BUG_ON(ret);
805 
806 		em = alloc_extent_map(GFP_NOFS);
807 		em->start = start;
808 		em->orig_start = em->start;
809 		ram_size = ins.offset;
810 		em->len = ins.offset;
811 
812 		em->block_start = ins.objectid;
813 		em->block_len = ins.offset;
814 		em->bdev = root->fs_info->fs_devices->latest_bdev;
815 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
816 
817 		while (1) {
818 			write_lock(&em_tree->lock);
819 			ret = add_extent_mapping(em_tree, em);
820 			write_unlock(&em_tree->lock);
821 			if (ret != -EEXIST) {
822 				free_extent_map(em);
823 				break;
824 			}
825 			btrfs_drop_extent_cache(inode, start,
826 						start + ram_size - 1, 0);
827 		}
828 
829 		cur_alloc_size = ins.offset;
830 		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
831 					       ram_size, cur_alloc_size, 0);
832 		BUG_ON(ret);
833 
834 		if (root->root_key.objectid ==
835 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
836 			ret = btrfs_reloc_clone_csums(inode, start,
837 						      cur_alloc_size);
838 			BUG_ON(ret);
839 		}
840 
841 		if (disk_num_bytes < cur_alloc_size)
842 			break;
843 
844 		/* we're not doing compressed IO, don't unlock the first
845 		 * page (which the caller expects to stay locked), don't
846 		 * clear any dirty bits and don't set any writeback bits
847 		 *
848 		 * Do set the Private2 bit so we know this page was properly
849 		 * setup for writepage
850 		 */
851 		op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
852 		op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
853 			EXTENT_SET_PRIVATE2;
854 
855 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
856 					     start, start + ram_size - 1,
857 					     locked_page, op);
858 		disk_num_bytes -= cur_alloc_size;
859 		num_bytes -= cur_alloc_size;
860 		alloc_hint = ins.objectid + ins.offset;
861 		start += cur_alloc_size;
862 	}
863 out:
864 	ret = 0;
865 	btrfs_end_transaction(trans, root);
866 
867 	return ret;
868 }
869 
870 /*
871  * work queue call back to started compression on a file and pages
872  */
873 static noinline void async_cow_start(struct btrfs_work *work)
874 {
875 	struct async_cow *async_cow;
876 	int num_added = 0;
877 	async_cow = container_of(work, struct async_cow, work);
878 
879 	compress_file_range(async_cow->inode, async_cow->locked_page,
880 			    async_cow->start, async_cow->end, async_cow,
881 			    &num_added);
882 	if (num_added == 0)
883 		async_cow->inode = NULL;
884 }
885 
886 /*
887  * work queue call back to submit previously compressed pages
888  */
889 static noinline void async_cow_submit(struct btrfs_work *work)
890 {
891 	struct async_cow *async_cow;
892 	struct btrfs_root *root;
893 	unsigned long nr_pages;
894 
895 	async_cow = container_of(work, struct async_cow, work);
896 
897 	root = async_cow->root;
898 	nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
899 		PAGE_CACHE_SHIFT;
900 
901 	atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
902 
903 	if (atomic_read(&root->fs_info->async_delalloc_pages) <
904 	    5 * 1042 * 1024 &&
905 	    waitqueue_active(&root->fs_info->async_submit_wait))
906 		wake_up(&root->fs_info->async_submit_wait);
907 
908 	if (async_cow->inode)
909 		submit_compressed_extents(async_cow->inode, async_cow);
910 }
911 
912 static noinline void async_cow_free(struct btrfs_work *work)
913 {
914 	struct async_cow *async_cow;
915 	async_cow = container_of(work, struct async_cow, work);
916 	kfree(async_cow);
917 }
918 
919 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
920 				u64 start, u64 end, int *page_started,
921 				unsigned long *nr_written)
922 {
923 	struct async_cow *async_cow;
924 	struct btrfs_root *root = BTRFS_I(inode)->root;
925 	unsigned long nr_pages;
926 	u64 cur_end;
927 	int limit = 10 * 1024 * 1042;
928 
929 	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
930 			 1, 0, NULL, GFP_NOFS);
931 	while (start < end) {
932 		async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
933 		async_cow->inode = inode;
934 		async_cow->root = root;
935 		async_cow->locked_page = locked_page;
936 		async_cow->start = start;
937 
938 		if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
939 			cur_end = end;
940 		else
941 			cur_end = min(end, start + 512 * 1024 - 1);
942 
943 		async_cow->end = cur_end;
944 		INIT_LIST_HEAD(&async_cow->extents);
945 
946 		async_cow->work.func = async_cow_start;
947 		async_cow->work.ordered_func = async_cow_submit;
948 		async_cow->work.ordered_free = async_cow_free;
949 		async_cow->work.flags = 0;
950 
951 		nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
952 			PAGE_CACHE_SHIFT;
953 		atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
954 
955 		btrfs_queue_worker(&root->fs_info->delalloc_workers,
956 				   &async_cow->work);
957 
958 		if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
959 			wait_event(root->fs_info->async_submit_wait,
960 			   (atomic_read(&root->fs_info->async_delalloc_pages) <
961 			    limit));
962 		}
963 
964 		while (atomic_read(&root->fs_info->async_submit_draining) &&
965 		      atomic_read(&root->fs_info->async_delalloc_pages)) {
966 			wait_event(root->fs_info->async_submit_wait,
967 			  (atomic_read(&root->fs_info->async_delalloc_pages) ==
968 			   0));
969 		}
970 
971 		*nr_written += nr_pages;
972 		start = cur_end + 1;
973 	}
974 	*page_started = 1;
975 	return 0;
976 }
977 
978 static noinline int csum_exist_in_range(struct btrfs_root *root,
979 					u64 bytenr, u64 num_bytes)
980 {
981 	int ret;
982 	struct btrfs_ordered_sum *sums;
983 	LIST_HEAD(list);
984 
985 	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
986 				       bytenr + num_bytes - 1, &list);
987 	if (ret == 0 && list_empty(&list))
988 		return 0;
989 
990 	while (!list_empty(&list)) {
991 		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
992 		list_del(&sums->list);
993 		kfree(sums);
994 	}
995 	return 1;
996 }
997 
998 /*
999  * when nowcow writeback call back.  This checks for snapshots or COW copies
1000  * of the extents that exist in the file, and COWs the file as required.
1001  *
1002  * If no cow copies or snapshots exist, we write directly to the existing
1003  * blocks on disk
1004  */
1005 static noinline int run_delalloc_nocow(struct inode *inode,
1006 				       struct page *locked_page,
1007 			      u64 start, u64 end, int *page_started, int force,
1008 			      unsigned long *nr_written)
1009 {
1010 	struct btrfs_root *root = BTRFS_I(inode)->root;
1011 	struct btrfs_trans_handle *trans;
1012 	struct extent_buffer *leaf;
1013 	struct btrfs_path *path;
1014 	struct btrfs_file_extent_item *fi;
1015 	struct btrfs_key found_key;
1016 	u64 cow_start;
1017 	u64 cur_offset;
1018 	u64 extent_end;
1019 	u64 extent_offset;
1020 	u64 disk_bytenr;
1021 	u64 num_bytes;
1022 	int extent_type;
1023 	int ret;
1024 	int type;
1025 	int nocow;
1026 	int check_prev = 1;
1027 
1028 	path = btrfs_alloc_path();
1029 	BUG_ON(!path);
1030 	trans = btrfs_join_transaction(root, 1);
1031 	BUG_ON(!trans);
1032 
1033 	cow_start = (u64)-1;
1034 	cur_offset = start;
1035 	while (1) {
1036 		ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
1037 					       cur_offset, 0);
1038 		BUG_ON(ret < 0);
1039 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
1040 			leaf = path->nodes[0];
1041 			btrfs_item_key_to_cpu(leaf, &found_key,
1042 					      path->slots[0] - 1);
1043 			if (found_key.objectid == inode->i_ino &&
1044 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
1045 				path->slots[0]--;
1046 		}
1047 		check_prev = 0;
1048 next_slot:
1049 		leaf = path->nodes[0];
1050 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1051 			ret = btrfs_next_leaf(root, path);
1052 			if (ret < 0)
1053 				BUG_ON(1);
1054 			if (ret > 0)
1055 				break;
1056 			leaf = path->nodes[0];
1057 		}
1058 
1059 		nocow = 0;
1060 		disk_bytenr = 0;
1061 		num_bytes = 0;
1062 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1063 
1064 		if (found_key.objectid > inode->i_ino ||
1065 		    found_key.type > BTRFS_EXTENT_DATA_KEY ||
1066 		    found_key.offset > end)
1067 			break;
1068 
1069 		if (found_key.offset > cur_offset) {
1070 			extent_end = found_key.offset;
1071 			extent_type = 0;
1072 			goto out_check;
1073 		}
1074 
1075 		fi = btrfs_item_ptr(leaf, path->slots[0],
1076 				    struct btrfs_file_extent_item);
1077 		extent_type = btrfs_file_extent_type(leaf, fi);
1078 
1079 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
1080 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1081 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1082 			extent_offset = btrfs_file_extent_offset(leaf, fi);
1083 			extent_end = found_key.offset +
1084 				btrfs_file_extent_num_bytes(leaf, fi);
1085 			if (extent_end <= start) {
1086 				path->slots[0]++;
1087 				goto next_slot;
1088 			}
1089 			if (disk_bytenr == 0)
1090 				goto out_check;
1091 			if (btrfs_file_extent_compression(leaf, fi) ||
1092 			    btrfs_file_extent_encryption(leaf, fi) ||
1093 			    btrfs_file_extent_other_encoding(leaf, fi))
1094 				goto out_check;
1095 			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1096 				goto out_check;
1097 			if (btrfs_extent_readonly(root, disk_bytenr))
1098 				goto out_check;
1099 			if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1100 						  found_key.offset -
1101 						  extent_offset, disk_bytenr))
1102 				goto out_check;
1103 			disk_bytenr += extent_offset;
1104 			disk_bytenr += cur_offset - found_key.offset;
1105 			num_bytes = min(end + 1, extent_end) - cur_offset;
1106 			/*
1107 			 * force cow if csum exists in the range.
1108 			 * this ensure that csum for a given extent are
1109 			 * either valid or do not exist.
1110 			 */
1111 			if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1112 				goto out_check;
1113 			nocow = 1;
1114 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1115 			extent_end = found_key.offset +
1116 				btrfs_file_extent_inline_len(leaf, fi);
1117 			extent_end = ALIGN(extent_end, root->sectorsize);
1118 		} else {
1119 			BUG_ON(1);
1120 		}
1121 out_check:
1122 		if (extent_end <= start) {
1123 			path->slots[0]++;
1124 			goto next_slot;
1125 		}
1126 		if (!nocow) {
1127 			if (cow_start == (u64)-1)
1128 				cow_start = cur_offset;
1129 			cur_offset = extent_end;
1130 			if (cur_offset > end)
1131 				break;
1132 			path->slots[0]++;
1133 			goto next_slot;
1134 		}
1135 
1136 		btrfs_release_path(root, path);
1137 		if (cow_start != (u64)-1) {
1138 			ret = cow_file_range(inode, locked_page, cow_start,
1139 					found_key.offset - 1, page_started,
1140 					nr_written, 1);
1141 			BUG_ON(ret);
1142 			cow_start = (u64)-1;
1143 		}
1144 
1145 		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1146 			struct extent_map *em;
1147 			struct extent_map_tree *em_tree;
1148 			em_tree = &BTRFS_I(inode)->extent_tree;
1149 			em = alloc_extent_map(GFP_NOFS);
1150 			em->start = cur_offset;
1151 			em->orig_start = em->start;
1152 			em->len = num_bytes;
1153 			em->block_len = num_bytes;
1154 			em->block_start = disk_bytenr;
1155 			em->bdev = root->fs_info->fs_devices->latest_bdev;
1156 			set_bit(EXTENT_FLAG_PINNED, &em->flags);
1157 			while (1) {
1158 				write_lock(&em_tree->lock);
1159 				ret = add_extent_mapping(em_tree, em);
1160 				write_unlock(&em_tree->lock);
1161 				if (ret != -EEXIST) {
1162 					free_extent_map(em);
1163 					break;
1164 				}
1165 				btrfs_drop_extent_cache(inode, em->start,
1166 						em->start + em->len - 1, 0);
1167 			}
1168 			type = BTRFS_ORDERED_PREALLOC;
1169 		} else {
1170 			type = BTRFS_ORDERED_NOCOW;
1171 		}
1172 
1173 		ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1174 					       num_bytes, num_bytes, type);
1175 		BUG_ON(ret);
1176 
1177 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1178 				cur_offset, cur_offset + num_bytes - 1,
1179 				locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1180 				EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1181 				EXTENT_SET_PRIVATE2);
1182 		cur_offset = extent_end;
1183 		if (cur_offset > end)
1184 			break;
1185 	}
1186 	btrfs_release_path(root, path);
1187 
1188 	if (cur_offset <= end && cow_start == (u64)-1)
1189 		cow_start = cur_offset;
1190 	if (cow_start != (u64)-1) {
1191 		ret = cow_file_range(inode, locked_page, cow_start, end,
1192 				     page_started, nr_written, 1);
1193 		BUG_ON(ret);
1194 	}
1195 
1196 	ret = btrfs_end_transaction(trans, root);
1197 	BUG_ON(ret);
1198 	btrfs_free_path(path);
1199 	return 0;
1200 }
1201 
1202 /*
1203  * extent_io.c call back to do delayed allocation processing
1204  */
1205 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1206 			      u64 start, u64 end, int *page_started,
1207 			      unsigned long *nr_written)
1208 {
1209 	int ret;
1210 	struct btrfs_root *root = BTRFS_I(inode)->root;
1211 
1212 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
1213 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1214 					 page_started, 1, nr_written);
1215 	else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
1216 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1217 					 page_started, 0, nr_written);
1218 	else if (!btrfs_test_opt(root, COMPRESS) &&
1219 		 !(BTRFS_I(inode)->force_compress))
1220 		ret = cow_file_range(inode, locked_page, start, end,
1221 				      page_started, nr_written, 1);
1222 	else
1223 		ret = cow_file_range_async(inode, locked_page, start, end,
1224 					   page_started, nr_written);
1225 	return ret;
1226 }
1227 
1228 static int btrfs_split_extent_hook(struct inode *inode,
1229 				    struct extent_state *orig, u64 split)
1230 {
1231 	if (!(orig->state & EXTENT_DELALLOC))
1232 		return 0;
1233 
1234 	spin_lock(&BTRFS_I(inode)->accounting_lock);
1235 	BTRFS_I(inode)->outstanding_extents++;
1236 	spin_unlock(&BTRFS_I(inode)->accounting_lock);
1237 
1238 	return 0;
1239 }
1240 
1241 /*
1242  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1243  * extents so we can keep track of new extents that are just merged onto old
1244  * extents, such as when we are doing sequential writes, so we can properly
1245  * account for the metadata space we'll need.
1246  */
1247 static int btrfs_merge_extent_hook(struct inode *inode,
1248 				   struct extent_state *new,
1249 				   struct extent_state *other)
1250 {
1251 	/* not delalloc, ignore it */
1252 	if (!(other->state & EXTENT_DELALLOC))
1253 		return 0;
1254 
1255 	spin_lock(&BTRFS_I(inode)->accounting_lock);
1256 	BTRFS_I(inode)->outstanding_extents--;
1257 	spin_unlock(&BTRFS_I(inode)->accounting_lock);
1258 
1259 	return 0;
1260 }
1261 
1262 /*
1263  * extent_io.c set_bit_hook, used to track delayed allocation
1264  * bytes in this file, and to maintain the list of inodes that
1265  * have pending delalloc work to be done.
1266  */
1267 static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1268 		       unsigned long old, unsigned long bits)
1269 {
1270 
1271 	/*
1272 	 * set_bit and clear bit hooks normally require _irqsave/restore
1273 	 * but in this case, we are only testeing for the DELALLOC
1274 	 * bit, which is only set or cleared with irqs on
1275 	 */
1276 	if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1277 		struct btrfs_root *root = BTRFS_I(inode)->root;
1278 
1279 		spin_lock(&BTRFS_I(inode)->accounting_lock);
1280 		BTRFS_I(inode)->outstanding_extents++;
1281 		spin_unlock(&BTRFS_I(inode)->accounting_lock);
1282 		btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1283 
1284 		spin_lock(&root->fs_info->delalloc_lock);
1285 		BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1286 		root->fs_info->delalloc_bytes += end - start + 1;
1287 		if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1288 			list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1289 				      &root->fs_info->delalloc_inodes);
1290 		}
1291 		spin_unlock(&root->fs_info->delalloc_lock);
1292 	}
1293 	return 0;
1294 }
1295 
1296 /*
1297  * extent_io.c clear_bit_hook, see set_bit_hook for why
1298  */
1299 static int btrfs_clear_bit_hook(struct inode *inode,
1300 				struct extent_state *state, unsigned long bits)
1301 {
1302 	/*
1303 	 * set_bit and clear bit hooks normally require _irqsave/restore
1304 	 * but in this case, we are only testeing for the DELALLOC
1305 	 * bit, which is only set or cleared with irqs on
1306 	 */
1307 	if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1308 		struct btrfs_root *root = BTRFS_I(inode)->root;
1309 
1310 		if (bits & EXTENT_DO_ACCOUNTING) {
1311 			spin_lock(&BTRFS_I(inode)->accounting_lock);
1312 			WARN_ON(!BTRFS_I(inode)->outstanding_extents);
1313 			BTRFS_I(inode)->outstanding_extents--;
1314 			spin_unlock(&BTRFS_I(inode)->accounting_lock);
1315 			btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
1316 		}
1317 
1318 		spin_lock(&root->fs_info->delalloc_lock);
1319 		if (state->end - state->start + 1 >
1320 		    root->fs_info->delalloc_bytes) {
1321 			printk(KERN_INFO "btrfs warning: delalloc account "
1322 			       "%llu %llu\n",
1323 			       (unsigned long long)
1324 			       state->end - state->start + 1,
1325 			       (unsigned long long)
1326 			       root->fs_info->delalloc_bytes);
1327 			btrfs_delalloc_free_space(root, inode, (u64)-1);
1328 			root->fs_info->delalloc_bytes = 0;
1329 			BTRFS_I(inode)->delalloc_bytes = 0;
1330 		} else {
1331 			btrfs_delalloc_free_space(root, inode,
1332 						  state->end -
1333 						  state->start + 1);
1334 			root->fs_info->delalloc_bytes -= state->end -
1335 				state->start + 1;
1336 			BTRFS_I(inode)->delalloc_bytes -= state->end -
1337 				state->start + 1;
1338 		}
1339 		if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1340 		    !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1341 			list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1342 		}
1343 		spin_unlock(&root->fs_info->delalloc_lock);
1344 	}
1345 	return 0;
1346 }
1347 
1348 /*
1349  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1350  * we don't create bios that span stripes or chunks
1351  */
1352 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1353 			 size_t size, struct bio *bio,
1354 			 unsigned long bio_flags)
1355 {
1356 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1357 	struct btrfs_mapping_tree *map_tree;
1358 	u64 logical = (u64)bio->bi_sector << 9;
1359 	u64 length = 0;
1360 	u64 map_length;
1361 	int ret;
1362 
1363 	if (bio_flags & EXTENT_BIO_COMPRESSED)
1364 		return 0;
1365 
1366 	length = bio->bi_size;
1367 	map_tree = &root->fs_info->mapping_tree;
1368 	map_length = length;
1369 	ret = btrfs_map_block(map_tree, READ, logical,
1370 			      &map_length, NULL, 0);
1371 
1372 	if (map_length < length + size)
1373 		return 1;
1374 	return 0;
1375 }
1376 
1377 /*
1378  * in order to insert checksums into the metadata in large chunks,
1379  * we wait until bio submission time.   All the pages in the bio are
1380  * checksummed and sums are attached onto the ordered extent record.
1381  *
1382  * At IO completion time the cums attached on the ordered extent record
1383  * are inserted into the btree
1384  */
1385 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1386 				    struct bio *bio, int mirror_num,
1387 				    unsigned long bio_flags)
1388 {
1389 	struct btrfs_root *root = BTRFS_I(inode)->root;
1390 	int ret = 0;
1391 
1392 	ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1393 	BUG_ON(ret);
1394 	return 0;
1395 }
1396 
1397 /*
1398  * in order to insert checksums into the metadata in large chunks,
1399  * we wait until bio submission time.   All the pages in the bio are
1400  * checksummed and sums are attached onto the ordered extent record.
1401  *
1402  * At IO completion time the cums attached on the ordered extent record
1403  * are inserted into the btree
1404  */
1405 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1406 			  int mirror_num, unsigned long bio_flags)
1407 {
1408 	struct btrfs_root *root = BTRFS_I(inode)->root;
1409 	return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1410 }
1411 
1412 /*
1413  * extent_io.c submission hook. This does the right thing for csum calculation
1414  * on write, or reading the csums from the tree before a read
1415  */
1416 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1417 			  int mirror_num, unsigned long bio_flags)
1418 {
1419 	struct btrfs_root *root = BTRFS_I(inode)->root;
1420 	int ret = 0;
1421 	int skip_sum;
1422 
1423 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1424 
1425 	ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1426 	BUG_ON(ret);
1427 
1428 	if (!(rw & (1 << BIO_RW))) {
1429 		if (bio_flags & EXTENT_BIO_COMPRESSED) {
1430 			return btrfs_submit_compressed_read(inode, bio,
1431 						    mirror_num, bio_flags);
1432 		} else if (!skip_sum)
1433 			btrfs_lookup_bio_sums(root, inode, bio, NULL);
1434 		goto mapit;
1435 	} else if (!skip_sum) {
1436 		/* csum items have already been cloned */
1437 		if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1438 			goto mapit;
1439 		/* we're doing a write, do the async checksumming */
1440 		return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1441 				   inode, rw, bio, mirror_num,
1442 				   bio_flags, __btrfs_submit_bio_start,
1443 				   __btrfs_submit_bio_done);
1444 	}
1445 
1446 mapit:
1447 	return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1448 }
1449 
1450 /*
1451  * given a list of ordered sums record them in the inode.  This happens
1452  * at IO completion time based on sums calculated at bio submission time.
1453  */
1454 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1455 			     struct inode *inode, u64 file_offset,
1456 			     struct list_head *list)
1457 {
1458 	struct btrfs_ordered_sum *sum;
1459 
1460 	btrfs_set_trans_block_group(trans, inode);
1461 
1462 	list_for_each_entry(sum, list, list) {
1463 		btrfs_csum_file_blocks(trans,
1464 		       BTRFS_I(inode)->root->fs_info->csum_root, sum);
1465 	}
1466 	return 0;
1467 }
1468 
1469 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1470 			      struct extent_state **cached_state)
1471 {
1472 	if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1473 		WARN_ON(1);
1474 	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1475 				   cached_state, GFP_NOFS);
1476 }
1477 
1478 /* see btrfs_writepage_start_hook for details on why this is required */
1479 struct btrfs_writepage_fixup {
1480 	struct page *page;
1481 	struct btrfs_work work;
1482 };
1483 
1484 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1485 {
1486 	struct btrfs_writepage_fixup *fixup;
1487 	struct btrfs_ordered_extent *ordered;
1488 	struct extent_state *cached_state = NULL;
1489 	struct page *page;
1490 	struct inode *inode;
1491 	u64 page_start;
1492 	u64 page_end;
1493 
1494 	fixup = container_of(work, struct btrfs_writepage_fixup, work);
1495 	page = fixup->page;
1496 again:
1497 	lock_page(page);
1498 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1499 		ClearPageChecked(page);
1500 		goto out_page;
1501 	}
1502 
1503 	inode = page->mapping->host;
1504 	page_start = page_offset(page);
1505 	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1506 
1507 	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1508 			 &cached_state, GFP_NOFS);
1509 
1510 	/* already ordered? We're done */
1511 	if (PagePrivate2(page))
1512 		goto out;
1513 
1514 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
1515 	if (ordered) {
1516 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1517 				     page_end, &cached_state, GFP_NOFS);
1518 		unlock_page(page);
1519 		btrfs_start_ordered_extent(inode, ordered, 1);
1520 		goto again;
1521 	}
1522 
1523 	btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1524 	ClearPageChecked(page);
1525 out:
1526 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1527 			     &cached_state, GFP_NOFS);
1528 out_page:
1529 	unlock_page(page);
1530 	page_cache_release(page);
1531 }
1532 
1533 /*
1534  * There are a few paths in the higher layers of the kernel that directly
1535  * set the page dirty bit without asking the filesystem if it is a
1536  * good idea.  This causes problems because we want to make sure COW
1537  * properly happens and the data=ordered rules are followed.
1538  *
1539  * In our case any range that doesn't have the ORDERED bit set
1540  * hasn't been properly setup for IO.  We kick off an async process
1541  * to fix it up.  The async helper will wait for ordered extents, set
1542  * the delalloc bit and make it safe to write the page.
1543  */
1544 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1545 {
1546 	struct inode *inode = page->mapping->host;
1547 	struct btrfs_writepage_fixup *fixup;
1548 	struct btrfs_root *root = BTRFS_I(inode)->root;
1549 
1550 	/* this page is properly in the ordered list */
1551 	if (TestClearPagePrivate2(page))
1552 		return 0;
1553 
1554 	if (PageChecked(page))
1555 		return -EAGAIN;
1556 
1557 	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1558 	if (!fixup)
1559 		return -EAGAIN;
1560 
1561 	SetPageChecked(page);
1562 	page_cache_get(page);
1563 	fixup->work.func = btrfs_writepage_fixup_worker;
1564 	fixup->page = page;
1565 	btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1566 	return -EAGAIN;
1567 }
1568 
1569 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1570 				       struct inode *inode, u64 file_pos,
1571 				       u64 disk_bytenr, u64 disk_num_bytes,
1572 				       u64 num_bytes, u64 ram_bytes,
1573 				       u8 compression, u8 encryption,
1574 				       u16 other_encoding, int extent_type)
1575 {
1576 	struct btrfs_root *root = BTRFS_I(inode)->root;
1577 	struct btrfs_file_extent_item *fi;
1578 	struct btrfs_path *path;
1579 	struct extent_buffer *leaf;
1580 	struct btrfs_key ins;
1581 	u64 hint;
1582 	int ret;
1583 
1584 	path = btrfs_alloc_path();
1585 	BUG_ON(!path);
1586 
1587 	path->leave_spinning = 1;
1588 
1589 	/*
1590 	 * we may be replacing one extent in the tree with another.
1591 	 * The new extent is pinned in the extent map, and we don't want
1592 	 * to drop it from the cache until it is completely in the btree.
1593 	 *
1594 	 * So, tell btrfs_drop_extents to leave this extent in the cache.
1595 	 * the caller is expected to unpin it and allow it to be merged
1596 	 * with the others.
1597 	 */
1598 	ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
1599 				 &hint, 0);
1600 	BUG_ON(ret);
1601 
1602 	ins.objectid = inode->i_ino;
1603 	ins.offset = file_pos;
1604 	ins.type = BTRFS_EXTENT_DATA_KEY;
1605 	ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1606 	BUG_ON(ret);
1607 	leaf = path->nodes[0];
1608 	fi = btrfs_item_ptr(leaf, path->slots[0],
1609 			    struct btrfs_file_extent_item);
1610 	btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1611 	btrfs_set_file_extent_type(leaf, fi, extent_type);
1612 	btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1613 	btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1614 	btrfs_set_file_extent_offset(leaf, fi, 0);
1615 	btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1616 	btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1617 	btrfs_set_file_extent_compression(leaf, fi, compression);
1618 	btrfs_set_file_extent_encryption(leaf, fi, encryption);
1619 	btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1620 
1621 	btrfs_unlock_up_safe(path, 1);
1622 	btrfs_set_lock_blocking(leaf);
1623 
1624 	btrfs_mark_buffer_dirty(leaf);
1625 
1626 	inode_add_bytes(inode, num_bytes);
1627 
1628 	ins.objectid = disk_bytenr;
1629 	ins.offset = disk_num_bytes;
1630 	ins.type = BTRFS_EXTENT_ITEM_KEY;
1631 	ret = btrfs_alloc_reserved_file_extent(trans, root,
1632 					root->root_key.objectid,
1633 					inode->i_ino, file_pos, &ins);
1634 	BUG_ON(ret);
1635 	btrfs_free_path(path);
1636 
1637 	return 0;
1638 }
1639 
1640 /*
1641  * helper function for btrfs_finish_ordered_io, this
1642  * just reads in some of the csum leaves to prime them into ram
1643  * before we start the transaction.  It limits the amount of btree
1644  * reads required while inside the transaction.
1645  */
1646 /* as ordered data IO finishes, this gets called so we can finish
1647  * an ordered extent if the range of bytes in the file it covers are
1648  * fully written.
1649  */
1650 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1651 {
1652 	struct btrfs_root *root = BTRFS_I(inode)->root;
1653 	struct btrfs_trans_handle *trans;
1654 	struct btrfs_ordered_extent *ordered_extent = NULL;
1655 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1656 	struct extent_state *cached_state = NULL;
1657 	int compressed = 0;
1658 	int ret;
1659 
1660 	ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
1661 					     end - start + 1);
1662 	if (!ret)
1663 		return 0;
1664 	BUG_ON(!ordered_extent);
1665 
1666 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1667 		BUG_ON(!list_empty(&ordered_extent->list));
1668 		ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1669 		if (!ret) {
1670 			trans = btrfs_join_transaction(root, 1);
1671 			ret = btrfs_update_inode(trans, root, inode);
1672 			BUG_ON(ret);
1673 			btrfs_end_transaction(trans, root);
1674 		}
1675 		goto out;
1676 	}
1677 
1678 	lock_extent_bits(io_tree, ordered_extent->file_offset,
1679 			 ordered_extent->file_offset + ordered_extent->len - 1,
1680 			 0, &cached_state, GFP_NOFS);
1681 
1682 	trans = btrfs_join_transaction(root, 1);
1683 
1684 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1685 		compressed = 1;
1686 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1687 		BUG_ON(compressed);
1688 		ret = btrfs_mark_extent_written(trans, inode,
1689 						ordered_extent->file_offset,
1690 						ordered_extent->file_offset +
1691 						ordered_extent->len);
1692 		BUG_ON(ret);
1693 	} else {
1694 		ret = insert_reserved_file_extent(trans, inode,
1695 						ordered_extent->file_offset,
1696 						ordered_extent->start,
1697 						ordered_extent->disk_len,
1698 						ordered_extent->len,
1699 						ordered_extent->len,
1700 						compressed, 0, 0,
1701 						BTRFS_FILE_EXTENT_REG);
1702 		unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1703 				   ordered_extent->file_offset,
1704 				   ordered_extent->len);
1705 		BUG_ON(ret);
1706 	}
1707 	unlock_extent_cached(io_tree, ordered_extent->file_offset,
1708 			     ordered_extent->file_offset +
1709 			     ordered_extent->len - 1, &cached_state, GFP_NOFS);
1710 
1711 	add_pending_csums(trans, inode, ordered_extent->file_offset,
1712 			  &ordered_extent->list);
1713 
1714 	/* this also removes the ordered extent from the tree */
1715 	btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1716 	ret = btrfs_update_inode(trans, root, inode);
1717 	BUG_ON(ret);
1718 	btrfs_end_transaction(trans, root);
1719 out:
1720 	/* once for us */
1721 	btrfs_put_ordered_extent(ordered_extent);
1722 	/* once for the tree */
1723 	btrfs_put_ordered_extent(ordered_extent);
1724 
1725 	return 0;
1726 }
1727 
1728 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1729 				struct extent_state *state, int uptodate)
1730 {
1731 	ClearPagePrivate2(page);
1732 	return btrfs_finish_ordered_io(page->mapping->host, start, end);
1733 }
1734 
1735 /*
1736  * When IO fails, either with EIO or csum verification fails, we
1737  * try other mirrors that might have a good copy of the data.  This
1738  * io_failure_record is used to record state as we go through all the
1739  * mirrors.  If another mirror has good data, the page is set up to date
1740  * and things continue.  If a good mirror can't be found, the original
1741  * bio end_io callback is called to indicate things have failed.
1742  */
1743 struct io_failure_record {
1744 	struct page *page;
1745 	u64 start;
1746 	u64 len;
1747 	u64 logical;
1748 	unsigned long bio_flags;
1749 	int last_mirror;
1750 };
1751 
1752 static int btrfs_io_failed_hook(struct bio *failed_bio,
1753 			 struct page *page, u64 start, u64 end,
1754 			 struct extent_state *state)
1755 {
1756 	struct io_failure_record *failrec = NULL;
1757 	u64 private;
1758 	struct extent_map *em;
1759 	struct inode *inode = page->mapping->host;
1760 	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1761 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1762 	struct bio *bio;
1763 	int num_copies;
1764 	int ret;
1765 	int rw;
1766 	u64 logical;
1767 
1768 	ret = get_state_private(failure_tree, start, &private);
1769 	if (ret) {
1770 		failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1771 		if (!failrec)
1772 			return -ENOMEM;
1773 		failrec->start = start;
1774 		failrec->len = end - start + 1;
1775 		failrec->last_mirror = 0;
1776 		failrec->bio_flags = 0;
1777 
1778 		read_lock(&em_tree->lock);
1779 		em = lookup_extent_mapping(em_tree, start, failrec->len);
1780 		if (em->start > start || em->start + em->len < start) {
1781 			free_extent_map(em);
1782 			em = NULL;
1783 		}
1784 		read_unlock(&em_tree->lock);
1785 
1786 		if (!em || IS_ERR(em)) {
1787 			kfree(failrec);
1788 			return -EIO;
1789 		}
1790 		logical = start - em->start;
1791 		logical = em->block_start + logical;
1792 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1793 			logical = em->block_start;
1794 			failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1795 		}
1796 		failrec->logical = logical;
1797 		free_extent_map(em);
1798 		set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1799 				EXTENT_DIRTY, GFP_NOFS);
1800 		set_state_private(failure_tree, start,
1801 				 (u64)(unsigned long)failrec);
1802 	} else {
1803 		failrec = (struct io_failure_record *)(unsigned long)private;
1804 	}
1805 	num_copies = btrfs_num_copies(
1806 			      &BTRFS_I(inode)->root->fs_info->mapping_tree,
1807 			      failrec->logical, failrec->len);
1808 	failrec->last_mirror++;
1809 	if (!state) {
1810 		spin_lock(&BTRFS_I(inode)->io_tree.lock);
1811 		state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1812 						    failrec->start,
1813 						    EXTENT_LOCKED);
1814 		if (state && state->start != failrec->start)
1815 			state = NULL;
1816 		spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1817 	}
1818 	if (!state || failrec->last_mirror > num_copies) {
1819 		set_state_private(failure_tree, failrec->start, 0);
1820 		clear_extent_bits(failure_tree, failrec->start,
1821 				  failrec->start + failrec->len - 1,
1822 				  EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1823 		kfree(failrec);
1824 		return -EIO;
1825 	}
1826 	bio = bio_alloc(GFP_NOFS, 1);
1827 	bio->bi_private = state;
1828 	bio->bi_end_io = failed_bio->bi_end_io;
1829 	bio->bi_sector = failrec->logical >> 9;
1830 	bio->bi_bdev = failed_bio->bi_bdev;
1831 	bio->bi_size = 0;
1832 
1833 	bio_add_page(bio, page, failrec->len, start - page_offset(page));
1834 	if (failed_bio->bi_rw & (1 << BIO_RW))
1835 		rw = WRITE;
1836 	else
1837 		rw = READ;
1838 
1839 	BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1840 						      failrec->last_mirror,
1841 						      failrec->bio_flags);
1842 	return 0;
1843 }
1844 
1845 /*
1846  * each time an IO finishes, we do a fast check in the IO failure tree
1847  * to see if we need to process or clean up an io_failure_record
1848  */
1849 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1850 {
1851 	u64 private;
1852 	u64 private_failure;
1853 	struct io_failure_record *failure;
1854 	int ret;
1855 
1856 	private = 0;
1857 	if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1858 			     (u64)-1, 1, EXTENT_DIRTY)) {
1859 		ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1860 					start, &private_failure);
1861 		if (ret == 0) {
1862 			failure = (struct io_failure_record *)(unsigned long)
1863 				   private_failure;
1864 			set_state_private(&BTRFS_I(inode)->io_failure_tree,
1865 					  failure->start, 0);
1866 			clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1867 					  failure->start,
1868 					  failure->start + failure->len - 1,
1869 					  EXTENT_DIRTY | EXTENT_LOCKED,
1870 					  GFP_NOFS);
1871 			kfree(failure);
1872 		}
1873 	}
1874 	return 0;
1875 }
1876 
1877 /*
1878  * when reads are done, we need to check csums to verify the data is correct
1879  * if there's a match, we allow the bio to finish.  If not, we go through
1880  * the io_failure_record routines to find good copies
1881  */
1882 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1883 			       struct extent_state *state)
1884 {
1885 	size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1886 	struct inode *inode = page->mapping->host;
1887 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1888 	char *kaddr;
1889 	u64 private = ~(u32)0;
1890 	int ret;
1891 	struct btrfs_root *root = BTRFS_I(inode)->root;
1892 	u32 csum = ~(u32)0;
1893 
1894 	if (PageChecked(page)) {
1895 		ClearPageChecked(page);
1896 		goto good;
1897 	}
1898 
1899 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1900 		return 0;
1901 
1902 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1903 	    test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
1904 		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1905 				  GFP_NOFS);
1906 		return 0;
1907 	}
1908 
1909 	if (state && state->start == start) {
1910 		private = state->private;
1911 		ret = 0;
1912 	} else {
1913 		ret = get_state_private(io_tree, start, &private);
1914 	}
1915 	kaddr = kmap_atomic(page, KM_USER0);
1916 	if (ret)
1917 		goto zeroit;
1918 
1919 	csum = btrfs_csum_data(root, kaddr + offset, csum,  end - start + 1);
1920 	btrfs_csum_final(csum, (char *)&csum);
1921 	if (csum != private)
1922 		goto zeroit;
1923 
1924 	kunmap_atomic(kaddr, KM_USER0);
1925 good:
1926 	/* if the io failure tree for this inode is non-empty,
1927 	 * check to see if we've recovered from a failed IO
1928 	 */
1929 	btrfs_clean_io_failures(inode, start);
1930 	return 0;
1931 
1932 zeroit:
1933 	if (printk_ratelimit()) {
1934 		printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1935 		       "private %llu\n", page->mapping->host->i_ino,
1936 		       (unsigned long long)start, csum,
1937 		       (unsigned long long)private);
1938 	}
1939 	memset(kaddr + offset, 1, end - start + 1);
1940 	flush_dcache_page(page);
1941 	kunmap_atomic(kaddr, KM_USER0);
1942 	if (private == 0)
1943 		return 0;
1944 	return -EIO;
1945 }
1946 
1947 struct delayed_iput {
1948 	struct list_head list;
1949 	struct inode *inode;
1950 };
1951 
1952 void btrfs_add_delayed_iput(struct inode *inode)
1953 {
1954 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1955 	struct delayed_iput *delayed;
1956 
1957 	if (atomic_add_unless(&inode->i_count, -1, 1))
1958 		return;
1959 
1960 	delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
1961 	delayed->inode = inode;
1962 
1963 	spin_lock(&fs_info->delayed_iput_lock);
1964 	list_add_tail(&delayed->list, &fs_info->delayed_iputs);
1965 	spin_unlock(&fs_info->delayed_iput_lock);
1966 }
1967 
1968 void btrfs_run_delayed_iputs(struct btrfs_root *root)
1969 {
1970 	LIST_HEAD(list);
1971 	struct btrfs_fs_info *fs_info = root->fs_info;
1972 	struct delayed_iput *delayed;
1973 	int empty;
1974 
1975 	spin_lock(&fs_info->delayed_iput_lock);
1976 	empty = list_empty(&fs_info->delayed_iputs);
1977 	spin_unlock(&fs_info->delayed_iput_lock);
1978 	if (empty)
1979 		return;
1980 
1981 	down_read(&root->fs_info->cleanup_work_sem);
1982 	spin_lock(&fs_info->delayed_iput_lock);
1983 	list_splice_init(&fs_info->delayed_iputs, &list);
1984 	spin_unlock(&fs_info->delayed_iput_lock);
1985 
1986 	while (!list_empty(&list)) {
1987 		delayed = list_entry(list.next, struct delayed_iput, list);
1988 		list_del(&delayed->list);
1989 		iput(delayed->inode);
1990 		kfree(delayed);
1991 	}
1992 	up_read(&root->fs_info->cleanup_work_sem);
1993 }
1994 
1995 /*
1996  * This creates an orphan entry for the given inode in case something goes
1997  * wrong in the middle of an unlink/truncate.
1998  */
1999 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2000 {
2001 	struct btrfs_root *root = BTRFS_I(inode)->root;
2002 	int ret = 0;
2003 
2004 	spin_lock(&root->list_lock);
2005 
2006 	/* already on the orphan list, we're good */
2007 	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
2008 		spin_unlock(&root->list_lock);
2009 		return 0;
2010 	}
2011 
2012 	list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2013 
2014 	spin_unlock(&root->list_lock);
2015 
2016 	/*
2017 	 * insert an orphan item to track this unlinked/truncated file
2018 	 */
2019 	ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
2020 
2021 	return ret;
2022 }
2023 
2024 /*
2025  * We have done the truncate/delete so we can go ahead and remove the orphan
2026  * item for this particular inode.
2027  */
2028 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2029 {
2030 	struct btrfs_root *root = BTRFS_I(inode)->root;
2031 	int ret = 0;
2032 
2033 	spin_lock(&root->list_lock);
2034 
2035 	if (list_empty(&BTRFS_I(inode)->i_orphan)) {
2036 		spin_unlock(&root->list_lock);
2037 		return 0;
2038 	}
2039 
2040 	list_del_init(&BTRFS_I(inode)->i_orphan);
2041 	if (!trans) {
2042 		spin_unlock(&root->list_lock);
2043 		return 0;
2044 	}
2045 
2046 	spin_unlock(&root->list_lock);
2047 
2048 	ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
2049 
2050 	return ret;
2051 }
2052 
2053 /*
2054  * this cleans up any orphans that may be left on the list from the last use
2055  * of this root.
2056  */
2057 void btrfs_orphan_cleanup(struct btrfs_root *root)
2058 {
2059 	struct btrfs_path *path;
2060 	struct extent_buffer *leaf;
2061 	struct btrfs_item *item;
2062 	struct btrfs_key key, found_key;
2063 	struct btrfs_trans_handle *trans;
2064 	struct inode *inode;
2065 	int ret = 0, nr_unlink = 0, nr_truncate = 0;
2066 
2067 	if (!xchg(&root->clean_orphans, 0))
2068 		return;
2069 
2070 	path = btrfs_alloc_path();
2071 	BUG_ON(!path);
2072 	path->reada = -1;
2073 
2074 	key.objectid = BTRFS_ORPHAN_OBJECTID;
2075 	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
2076 	key.offset = (u64)-1;
2077 
2078 	while (1) {
2079 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2080 		if (ret < 0) {
2081 			printk(KERN_ERR "Error searching slot for orphan: %d"
2082 			       "\n", ret);
2083 			break;
2084 		}
2085 
2086 		/*
2087 		 * if ret == 0 means we found what we were searching for, which
2088 		 * is weird, but possible, so only screw with path if we didnt
2089 		 * find the key and see if we have stuff that matches
2090 		 */
2091 		if (ret > 0) {
2092 			if (path->slots[0] == 0)
2093 				break;
2094 			path->slots[0]--;
2095 		}
2096 
2097 		/* pull out the item */
2098 		leaf = path->nodes[0];
2099 		item = btrfs_item_nr(leaf, path->slots[0]);
2100 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2101 
2102 		/* make sure the item matches what we want */
2103 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
2104 			break;
2105 		if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
2106 			break;
2107 
2108 		/* release the path since we're done with it */
2109 		btrfs_release_path(root, path);
2110 
2111 		/*
2112 		 * this is where we are basically btrfs_lookup, without the
2113 		 * crossing root thing.  we store the inode number in the
2114 		 * offset of the orphan item.
2115 		 */
2116 		found_key.objectid = found_key.offset;
2117 		found_key.type = BTRFS_INODE_ITEM_KEY;
2118 		found_key.offset = 0;
2119 		inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
2120 		if (IS_ERR(inode))
2121 			break;
2122 
2123 		/*
2124 		 * add this inode to the orphan list so btrfs_orphan_del does
2125 		 * the proper thing when we hit it
2126 		 */
2127 		spin_lock(&root->list_lock);
2128 		list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2129 		spin_unlock(&root->list_lock);
2130 
2131 		/*
2132 		 * if this is a bad inode, means we actually succeeded in
2133 		 * removing the inode, but not the orphan record, which means
2134 		 * we need to manually delete the orphan since iput will just
2135 		 * do a destroy_inode
2136 		 */
2137 		if (is_bad_inode(inode)) {
2138 			trans = btrfs_start_transaction(root, 1);
2139 			btrfs_orphan_del(trans, inode);
2140 			btrfs_end_transaction(trans, root);
2141 			iput(inode);
2142 			continue;
2143 		}
2144 
2145 		/* if we have links, this was a truncate, lets do that */
2146 		if (inode->i_nlink) {
2147 			nr_truncate++;
2148 			btrfs_truncate(inode);
2149 		} else {
2150 			nr_unlink++;
2151 		}
2152 
2153 		/* this will do delete_inode and everything for us */
2154 		iput(inode);
2155 	}
2156 
2157 	if (nr_unlink)
2158 		printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2159 	if (nr_truncate)
2160 		printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2161 
2162 	btrfs_free_path(path);
2163 }
2164 
2165 /*
2166  * very simple check to peek ahead in the leaf looking for xattrs.  If we
2167  * don't find any xattrs, we know there can't be any acls.
2168  *
2169  * slot is the slot the inode is in, objectid is the objectid of the inode
2170  */
2171 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2172 					  int slot, u64 objectid)
2173 {
2174 	u32 nritems = btrfs_header_nritems(leaf);
2175 	struct btrfs_key found_key;
2176 	int scanned = 0;
2177 
2178 	slot++;
2179 	while (slot < nritems) {
2180 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
2181 
2182 		/* we found a different objectid, there must not be acls */
2183 		if (found_key.objectid != objectid)
2184 			return 0;
2185 
2186 		/* we found an xattr, assume we've got an acl */
2187 		if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2188 			return 1;
2189 
2190 		/*
2191 		 * we found a key greater than an xattr key, there can't
2192 		 * be any acls later on
2193 		 */
2194 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2195 			return 0;
2196 
2197 		slot++;
2198 		scanned++;
2199 
2200 		/*
2201 		 * it goes inode, inode backrefs, xattrs, extents,
2202 		 * so if there are a ton of hard links to an inode there can
2203 		 * be a lot of backrefs.  Don't waste time searching too hard,
2204 		 * this is just an optimization
2205 		 */
2206 		if (scanned >= 8)
2207 			break;
2208 	}
2209 	/* we hit the end of the leaf before we found an xattr or
2210 	 * something larger than an xattr.  We have to assume the inode
2211 	 * has acls
2212 	 */
2213 	return 1;
2214 }
2215 
2216 /*
2217  * read an inode from the btree into the in-memory inode
2218  */
2219 static void btrfs_read_locked_inode(struct inode *inode)
2220 {
2221 	struct btrfs_path *path;
2222 	struct extent_buffer *leaf;
2223 	struct btrfs_inode_item *inode_item;
2224 	struct btrfs_timespec *tspec;
2225 	struct btrfs_root *root = BTRFS_I(inode)->root;
2226 	struct btrfs_key location;
2227 	int maybe_acls;
2228 	u64 alloc_group_block;
2229 	u32 rdev;
2230 	int ret;
2231 
2232 	path = btrfs_alloc_path();
2233 	BUG_ON(!path);
2234 	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2235 
2236 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2237 	if (ret)
2238 		goto make_bad;
2239 
2240 	leaf = path->nodes[0];
2241 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
2242 				    struct btrfs_inode_item);
2243 
2244 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2245 	inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2246 	inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2247 	inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2248 	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2249 
2250 	tspec = btrfs_inode_atime(inode_item);
2251 	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2252 	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2253 
2254 	tspec = btrfs_inode_mtime(inode_item);
2255 	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2256 	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2257 
2258 	tspec = btrfs_inode_ctime(inode_item);
2259 	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2260 	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2261 
2262 	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2263 	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2264 	BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2265 	inode->i_generation = BTRFS_I(inode)->generation;
2266 	inode->i_rdev = 0;
2267 	rdev = btrfs_inode_rdev(leaf, inode_item);
2268 
2269 	BTRFS_I(inode)->index_cnt = (u64)-1;
2270 	BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2271 
2272 	alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2273 
2274 	/*
2275 	 * try to precache a NULL acl entry for files that don't have
2276 	 * any xattrs or acls
2277 	 */
2278 	maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
2279 	if (!maybe_acls)
2280 		cache_no_acl(inode);
2281 
2282 	BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2283 						alloc_group_block, 0);
2284 	btrfs_free_path(path);
2285 	inode_item = NULL;
2286 
2287 	switch (inode->i_mode & S_IFMT) {
2288 	case S_IFREG:
2289 		inode->i_mapping->a_ops = &btrfs_aops;
2290 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2291 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2292 		inode->i_fop = &btrfs_file_operations;
2293 		inode->i_op = &btrfs_file_inode_operations;
2294 		break;
2295 	case S_IFDIR:
2296 		inode->i_fop = &btrfs_dir_file_operations;
2297 		if (root == root->fs_info->tree_root)
2298 			inode->i_op = &btrfs_dir_ro_inode_operations;
2299 		else
2300 			inode->i_op = &btrfs_dir_inode_operations;
2301 		break;
2302 	case S_IFLNK:
2303 		inode->i_op = &btrfs_symlink_inode_operations;
2304 		inode->i_mapping->a_ops = &btrfs_symlink_aops;
2305 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2306 		break;
2307 	default:
2308 		inode->i_op = &btrfs_special_inode_operations;
2309 		init_special_inode(inode, inode->i_mode, rdev);
2310 		break;
2311 	}
2312 
2313 	btrfs_update_iflags(inode);
2314 	return;
2315 
2316 make_bad:
2317 	btrfs_free_path(path);
2318 	make_bad_inode(inode);
2319 }
2320 
2321 /*
2322  * given a leaf and an inode, copy the inode fields into the leaf
2323  */
2324 static void fill_inode_item(struct btrfs_trans_handle *trans,
2325 			    struct extent_buffer *leaf,
2326 			    struct btrfs_inode_item *item,
2327 			    struct inode *inode)
2328 {
2329 	btrfs_set_inode_uid(leaf, item, inode->i_uid);
2330 	btrfs_set_inode_gid(leaf, item, inode->i_gid);
2331 	btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2332 	btrfs_set_inode_mode(leaf, item, inode->i_mode);
2333 	btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2334 
2335 	btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2336 			       inode->i_atime.tv_sec);
2337 	btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2338 				inode->i_atime.tv_nsec);
2339 
2340 	btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2341 			       inode->i_mtime.tv_sec);
2342 	btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2343 				inode->i_mtime.tv_nsec);
2344 
2345 	btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2346 			       inode->i_ctime.tv_sec);
2347 	btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2348 				inode->i_ctime.tv_nsec);
2349 
2350 	btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2351 	btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2352 	btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2353 	btrfs_set_inode_transid(leaf, item, trans->transid);
2354 	btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2355 	btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2356 	btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2357 }
2358 
2359 /*
2360  * copy everything in the in-memory inode into the btree.
2361  */
2362 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2363 				struct btrfs_root *root, struct inode *inode)
2364 {
2365 	struct btrfs_inode_item *inode_item;
2366 	struct btrfs_path *path;
2367 	struct extent_buffer *leaf;
2368 	int ret;
2369 
2370 	path = btrfs_alloc_path();
2371 	BUG_ON(!path);
2372 	path->leave_spinning = 1;
2373 	ret = btrfs_lookup_inode(trans, root, path,
2374 				 &BTRFS_I(inode)->location, 1);
2375 	if (ret) {
2376 		if (ret > 0)
2377 			ret = -ENOENT;
2378 		goto failed;
2379 	}
2380 
2381 	btrfs_unlock_up_safe(path, 1);
2382 	leaf = path->nodes[0];
2383 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
2384 				  struct btrfs_inode_item);
2385 
2386 	fill_inode_item(trans, leaf, inode_item, inode);
2387 	btrfs_mark_buffer_dirty(leaf);
2388 	btrfs_set_inode_last_trans(trans, inode);
2389 	ret = 0;
2390 failed:
2391 	btrfs_free_path(path);
2392 	return ret;
2393 }
2394 
2395 
2396 /*
2397  * unlink helper that gets used here in inode.c and in the tree logging
2398  * recovery code.  It remove a link in a directory with a given name, and
2399  * also drops the back refs in the inode to the directory
2400  */
2401 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2402 		       struct btrfs_root *root,
2403 		       struct inode *dir, struct inode *inode,
2404 		       const char *name, int name_len)
2405 {
2406 	struct btrfs_path *path;
2407 	int ret = 0;
2408 	struct extent_buffer *leaf;
2409 	struct btrfs_dir_item *di;
2410 	struct btrfs_key key;
2411 	u64 index;
2412 
2413 	path = btrfs_alloc_path();
2414 	if (!path) {
2415 		ret = -ENOMEM;
2416 		goto err;
2417 	}
2418 
2419 	path->leave_spinning = 1;
2420 	di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2421 				    name, name_len, -1);
2422 	if (IS_ERR(di)) {
2423 		ret = PTR_ERR(di);
2424 		goto err;
2425 	}
2426 	if (!di) {
2427 		ret = -ENOENT;
2428 		goto err;
2429 	}
2430 	leaf = path->nodes[0];
2431 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
2432 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2433 	if (ret)
2434 		goto err;
2435 	btrfs_release_path(root, path);
2436 
2437 	ret = btrfs_del_inode_ref(trans, root, name, name_len,
2438 				  inode->i_ino,
2439 				  dir->i_ino, &index);
2440 	if (ret) {
2441 		printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2442 		       "inode %lu parent %lu\n", name_len, name,
2443 		       inode->i_ino, dir->i_ino);
2444 		goto err;
2445 	}
2446 
2447 	di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2448 					 index, name, name_len, -1);
2449 	if (IS_ERR(di)) {
2450 		ret = PTR_ERR(di);
2451 		goto err;
2452 	}
2453 	if (!di) {
2454 		ret = -ENOENT;
2455 		goto err;
2456 	}
2457 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2458 	btrfs_release_path(root, path);
2459 
2460 	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2461 					 inode, dir->i_ino);
2462 	BUG_ON(ret != 0 && ret != -ENOENT);
2463 
2464 	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2465 					   dir, index);
2466 	BUG_ON(ret);
2467 err:
2468 	btrfs_free_path(path);
2469 	if (ret)
2470 		goto out;
2471 
2472 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2473 	inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2474 	btrfs_update_inode(trans, root, dir);
2475 	btrfs_drop_nlink(inode);
2476 	ret = btrfs_update_inode(trans, root, inode);
2477 out:
2478 	return ret;
2479 }
2480 
2481 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2482 {
2483 	struct btrfs_root *root;
2484 	struct btrfs_trans_handle *trans;
2485 	struct inode *inode = dentry->d_inode;
2486 	int ret;
2487 	unsigned long nr = 0;
2488 
2489 	root = BTRFS_I(dir)->root;
2490 
2491 	/*
2492 	 * 5 items for unlink inode
2493 	 * 1 for orphan
2494 	 */
2495 	ret = btrfs_reserve_metadata_space(root, 6);
2496 	if (ret)
2497 		return ret;
2498 
2499 	trans = btrfs_start_transaction(root, 1);
2500 	if (IS_ERR(trans)) {
2501 		btrfs_unreserve_metadata_space(root, 6);
2502 		return PTR_ERR(trans);
2503 	}
2504 
2505 	btrfs_set_trans_block_group(trans, dir);
2506 
2507 	btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
2508 
2509 	ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2510 				 dentry->d_name.name, dentry->d_name.len);
2511 
2512 	if (inode->i_nlink == 0)
2513 		ret = btrfs_orphan_add(trans, inode);
2514 
2515 	nr = trans->blocks_used;
2516 
2517 	btrfs_end_transaction_throttle(trans, root);
2518 	btrfs_unreserve_metadata_space(root, 6);
2519 	btrfs_btree_balance_dirty(root, nr);
2520 	return ret;
2521 }
2522 
2523 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
2524 			struct btrfs_root *root,
2525 			struct inode *dir, u64 objectid,
2526 			const char *name, int name_len)
2527 {
2528 	struct btrfs_path *path;
2529 	struct extent_buffer *leaf;
2530 	struct btrfs_dir_item *di;
2531 	struct btrfs_key key;
2532 	u64 index;
2533 	int ret;
2534 
2535 	path = btrfs_alloc_path();
2536 	if (!path)
2537 		return -ENOMEM;
2538 
2539 	di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2540 				   name, name_len, -1);
2541 	BUG_ON(!di || IS_ERR(di));
2542 
2543 	leaf = path->nodes[0];
2544 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
2545 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2546 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2547 	BUG_ON(ret);
2548 	btrfs_release_path(root, path);
2549 
2550 	ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
2551 				 objectid, root->root_key.objectid,
2552 				 dir->i_ino, &index, name, name_len);
2553 	if (ret < 0) {
2554 		BUG_ON(ret != -ENOENT);
2555 		di = btrfs_search_dir_index_item(root, path, dir->i_ino,
2556 						 name, name_len);
2557 		BUG_ON(!di || IS_ERR(di));
2558 
2559 		leaf = path->nodes[0];
2560 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2561 		btrfs_release_path(root, path);
2562 		index = key.offset;
2563 	}
2564 
2565 	di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2566 					 index, name, name_len, -1);
2567 	BUG_ON(!di || IS_ERR(di));
2568 
2569 	leaf = path->nodes[0];
2570 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
2571 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2572 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2573 	BUG_ON(ret);
2574 	btrfs_release_path(root, path);
2575 
2576 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2577 	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2578 	ret = btrfs_update_inode(trans, root, dir);
2579 	BUG_ON(ret);
2580 	dir->i_sb->s_dirt = 1;
2581 
2582 	btrfs_free_path(path);
2583 	return 0;
2584 }
2585 
2586 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2587 {
2588 	struct inode *inode = dentry->d_inode;
2589 	int err = 0;
2590 	int ret;
2591 	struct btrfs_root *root = BTRFS_I(dir)->root;
2592 	struct btrfs_trans_handle *trans;
2593 	unsigned long nr = 0;
2594 
2595 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2596 	    inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
2597 		return -ENOTEMPTY;
2598 
2599 	ret = btrfs_reserve_metadata_space(root, 5);
2600 	if (ret)
2601 		return ret;
2602 
2603 	trans = btrfs_start_transaction(root, 1);
2604 	if (IS_ERR(trans)) {
2605 		btrfs_unreserve_metadata_space(root, 5);
2606 		return PTR_ERR(trans);
2607 	}
2608 
2609 	btrfs_set_trans_block_group(trans, dir);
2610 
2611 	if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
2612 		err = btrfs_unlink_subvol(trans, root, dir,
2613 					  BTRFS_I(inode)->location.objectid,
2614 					  dentry->d_name.name,
2615 					  dentry->d_name.len);
2616 		goto out;
2617 	}
2618 
2619 	err = btrfs_orphan_add(trans, inode);
2620 	if (err)
2621 		goto out;
2622 
2623 	/* now the directory is empty */
2624 	err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2625 				 dentry->d_name.name, dentry->d_name.len);
2626 	if (!err)
2627 		btrfs_i_size_write(inode, 0);
2628 out:
2629 	nr = trans->blocks_used;
2630 	ret = btrfs_end_transaction_throttle(trans, root);
2631 	btrfs_unreserve_metadata_space(root, 5);
2632 	btrfs_btree_balance_dirty(root, nr);
2633 
2634 	if (ret && !err)
2635 		err = ret;
2636 	return err;
2637 }
2638 
2639 #if 0
2640 /*
2641  * when truncating bytes in a file, it is possible to avoid reading
2642  * the leaves that contain only checksum items.  This can be the
2643  * majority of the IO required to delete a large file, but it must
2644  * be done carefully.
2645  *
2646  * The keys in the level just above the leaves are checked to make sure
2647  * the lowest key in a given leaf is a csum key, and starts at an offset
2648  * after the new  size.
2649  *
2650  * Then the key for the next leaf is checked to make sure it also has
2651  * a checksum item for the same file.  If it does, we know our target leaf
2652  * contains only checksum items, and it can be safely freed without reading
2653  * it.
2654  *
2655  * This is just an optimization targeted at large files.  It may do
2656  * nothing.  It will return 0 unless things went badly.
2657  */
2658 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2659 				     struct btrfs_root *root,
2660 				     struct btrfs_path *path,
2661 				     struct inode *inode, u64 new_size)
2662 {
2663 	struct btrfs_key key;
2664 	int ret;
2665 	int nritems;
2666 	struct btrfs_key found_key;
2667 	struct btrfs_key other_key;
2668 	struct btrfs_leaf_ref *ref;
2669 	u64 leaf_gen;
2670 	u64 leaf_start;
2671 
2672 	path->lowest_level = 1;
2673 	key.objectid = inode->i_ino;
2674 	key.type = BTRFS_CSUM_ITEM_KEY;
2675 	key.offset = new_size;
2676 again:
2677 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2678 	if (ret < 0)
2679 		goto out;
2680 
2681 	if (path->nodes[1] == NULL) {
2682 		ret = 0;
2683 		goto out;
2684 	}
2685 	ret = 0;
2686 	btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2687 	nritems = btrfs_header_nritems(path->nodes[1]);
2688 
2689 	if (!nritems)
2690 		goto out;
2691 
2692 	if (path->slots[1] >= nritems)
2693 		goto next_node;
2694 
2695 	/* did we find a key greater than anything we want to delete? */
2696 	if (found_key.objectid > inode->i_ino ||
2697 	   (found_key.objectid == inode->i_ino && found_key.type > key.type))
2698 		goto out;
2699 
2700 	/* we check the next key in the node to make sure the leave contains
2701 	 * only checksum items.  This comparison doesn't work if our
2702 	 * leaf is the last one in the node
2703 	 */
2704 	if (path->slots[1] + 1 >= nritems) {
2705 next_node:
2706 		/* search forward from the last key in the node, this
2707 		 * will bring us into the next node in the tree
2708 		 */
2709 		btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2710 
2711 		/* unlikely, but we inc below, so check to be safe */
2712 		if (found_key.offset == (u64)-1)
2713 			goto out;
2714 
2715 		/* search_forward needs a path with locks held, do the
2716 		 * search again for the original key.  It is possible
2717 		 * this will race with a balance and return a path that
2718 		 * we could modify, but this drop is just an optimization
2719 		 * and is allowed to miss some leaves.
2720 		 */
2721 		btrfs_release_path(root, path);
2722 		found_key.offset++;
2723 
2724 		/* setup a max key for search_forward */
2725 		other_key.offset = (u64)-1;
2726 		other_key.type = key.type;
2727 		other_key.objectid = key.objectid;
2728 
2729 		path->keep_locks = 1;
2730 		ret = btrfs_search_forward(root, &found_key, &other_key,
2731 					   path, 0, 0);
2732 		path->keep_locks = 0;
2733 		if (ret || found_key.objectid != key.objectid ||
2734 		    found_key.type != key.type) {
2735 			ret = 0;
2736 			goto out;
2737 		}
2738 
2739 		key.offset = found_key.offset;
2740 		btrfs_release_path(root, path);
2741 		cond_resched();
2742 		goto again;
2743 	}
2744 
2745 	/* we know there's one more slot after us in the tree,
2746 	 * read that key so we can verify it is also a checksum item
2747 	 */
2748 	btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2749 
2750 	if (found_key.objectid < inode->i_ino)
2751 		goto next_key;
2752 
2753 	if (found_key.type != key.type || found_key.offset < new_size)
2754 		goto next_key;
2755 
2756 	/*
2757 	 * if the key for the next leaf isn't a csum key from this objectid,
2758 	 * we can't be sure there aren't good items inside this leaf.
2759 	 * Bail out
2760 	 */
2761 	if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2762 		goto out;
2763 
2764 	leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2765 	leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2766 	/*
2767 	 * it is safe to delete this leaf, it contains only
2768 	 * csum items from this inode at an offset >= new_size
2769 	 */
2770 	ret = btrfs_del_leaf(trans, root, path, leaf_start);
2771 	BUG_ON(ret);
2772 
2773 	if (root->ref_cows && leaf_gen < trans->transid) {
2774 		ref = btrfs_alloc_leaf_ref(root, 0);
2775 		if (ref) {
2776 			ref->root_gen = root->root_key.offset;
2777 			ref->bytenr = leaf_start;
2778 			ref->owner = 0;
2779 			ref->generation = leaf_gen;
2780 			ref->nritems = 0;
2781 
2782 			btrfs_sort_leaf_ref(ref);
2783 
2784 			ret = btrfs_add_leaf_ref(root, ref, 0);
2785 			WARN_ON(ret);
2786 			btrfs_free_leaf_ref(root, ref);
2787 		} else {
2788 			WARN_ON(1);
2789 		}
2790 	}
2791 next_key:
2792 	btrfs_release_path(root, path);
2793 
2794 	if (other_key.objectid == inode->i_ino &&
2795 	    other_key.type == key.type && other_key.offset > key.offset) {
2796 		key.offset = other_key.offset;
2797 		cond_resched();
2798 		goto again;
2799 	}
2800 	ret = 0;
2801 out:
2802 	/* fixup any changes we've made to the path */
2803 	path->lowest_level = 0;
2804 	path->keep_locks = 0;
2805 	btrfs_release_path(root, path);
2806 	return ret;
2807 }
2808 
2809 #endif
2810 
2811 /*
2812  * this can truncate away extent items, csum items and directory items.
2813  * It starts at a high offset and removes keys until it can't find
2814  * any higher than new_size
2815  *
2816  * csum items that cross the new i_size are truncated to the new size
2817  * as well.
2818  *
2819  * min_type is the minimum key type to truncate down to.  If set to 0, this
2820  * will kill all the items on this inode, including the INODE_ITEM_KEY.
2821  */
2822 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2823 			       struct btrfs_root *root,
2824 			       struct inode *inode,
2825 			       u64 new_size, u32 min_type)
2826 {
2827 	struct btrfs_path *path;
2828 	struct extent_buffer *leaf;
2829 	struct btrfs_file_extent_item *fi;
2830 	struct btrfs_key key;
2831 	struct btrfs_key found_key;
2832 	u64 extent_start = 0;
2833 	u64 extent_num_bytes = 0;
2834 	u64 extent_offset = 0;
2835 	u64 item_end = 0;
2836 	u64 mask = root->sectorsize - 1;
2837 	u32 found_type = (u8)-1;
2838 	int found_extent;
2839 	int del_item;
2840 	int pending_del_nr = 0;
2841 	int pending_del_slot = 0;
2842 	int extent_type = -1;
2843 	int encoding;
2844 	int ret;
2845 	int err = 0;
2846 
2847 	BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
2848 
2849 	if (root->ref_cows)
2850 		btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2851 
2852 	path = btrfs_alloc_path();
2853 	BUG_ON(!path);
2854 	path->reada = -1;
2855 
2856 	key.objectid = inode->i_ino;
2857 	key.offset = (u64)-1;
2858 	key.type = (u8)-1;
2859 
2860 search_again:
2861 	path->leave_spinning = 1;
2862 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2863 	if (ret < 0) {
2864 		err = ret;
2865 		goto out;
2866 	}
2867 
2868 	if (ret > 0) {
2869 		/* there are no items in the tree for us to truncate, we're
2870 		 * done
2871 		 */
2872 		if (path->slots[0] == 0)
2873 			goto out;
2874 		path->slots[0]--;
2875 	}
2876 
2877 	while (1) {
2878 		fi = NULL;
2879 		leaf = path->nodes[0];
2880 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2881 		found_type = btrfs_key_type(&found_key);
2882 		encoding = 0;
2883 
2884 		if (found_key.objectid != inode->i_ino)
2885 			break;
2886 
2887 		if (found_type < min_type)
2888 			break;
2889 
2890 		item_end = found_key.offset;
2891 		if (found_type == BTRFS_EXTENT_DATA_KEY) {
2892 			fi = btrfs_item_ptr(leaf, path->slots[0],
2893 					    struct btrfs_file_extent_item);
2894 			extent_type = btrfs_file_extent_type(leaf, fi);
2895 			encoding = btrfs_file_extent_compression(leaf, fi);
2896 			encoding |= btrfs_file_extent_encryption(leaf, fi);
2897 			encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2898 
2899 			if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2900 				item_end +=
2901 				    btrfs_file_extent_num_bytes(leaf, fi);
2902 			} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2903 				item_end += btrfs_file_extent_inline_len(leaf,
2904 									 fi);
2905 			}
2906 			item_end--;
2907 		}
2908 		if (found_type > min_type) {
2909 			del_item = 1;
2910 		} else {
2911 			if (item_end < new_size)
2912 				break;
2913 			if (found_key.offset >= new_size)
2914 				del_item = 1;
2915 			else
2916 				del_item = 0;
2917 		}
2918 		found_extent = 0;
2919 		/* FIXME, shrink the extent if the ref count is only 1 */
2920 		if (found_type != BTRFS_EXTENT_DATA_KEY)
2921 			goto delete;
2922 
2923 		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2924 			u64 num_dec;
2925 			extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2926 			if (!del_item && !encoding) {
2927 				u64 orig_num_bytes =
2928 					btrfs_file_extent_num_bytes(leaf, fi);
2929 				extent_num_bytes = new_size -
2930 					found_key.offset + root->sectorsize - 1;
2931 				extent_num_bytes = extent_num_bytes &
2932 					~((u64)root->sectorsize - 1);
2933 				btrfs_set_file_extent_num_bytes(leaf, fi,
2934 							 extent_num_bytes);
2935 				num_dec = (orig_num_bytes -
2936 					   extent_num_bytes);
2937 				if (root->ref_cows && extent_start != 0)
2938 					inode_sub_bytes(inode, num_dec);
2939 				btrfs_mark_buffer_dirty(leaf);
2940 			} else {
2941 				extent_num_bytes =
2942 					btrfs_file_extent_disk_num_bytes(leaf,
2943 									 fi);
2944 				extent_offset = found_key.offset -
2945 					btrfs_file_extent_offset(leaf, fi);
2946 
2947 				/* FIXME blocksize != 4096 */
2948 				num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2949 				if (extent_start != 0) {
2950 					found_extent = 1;
2951 					if (root->ref_cows)
2952 						inode_sub_bytes(inode, num_dec);
2953 				}
2954 			}
2955 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2956 			/*
2957 			 * we can't truncate inline items that have had
2958 			 * special encodings
2959 			 */
2960 			if (!del_item &&
2961 			    btrfs_file_extent_compression(leaf, fi) == 0 &&
2962 			    btrfs_file_extent_encryption(leaf, fi) == 0 &&
2963 			    btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2964 				u32 size = new_size - found_key.offset;
2965 
2966 				if (root->ref_cows) {
2967 					inode_sub_bytes(inode, item_end + 1 -
2968 							new_size);
2969 				}
2970 				size =
2971 				    btrfs_file_extent_calc_inline_size(size);
2972 				ret = btrfs_truncate_item(trans, root, path,
2973 							  size, 1);
2974 				BUG_ON(ret);
2975 			} else if (root->ref_cows) {
2976 				inode_sub_bytes(inode, item_end + 1 -
2977 						found_key.offset);
2978 			}
2979 		}
2980 delete:
2981 		if (del_item) {
2982 			if (!pending_del_nr) {
2983 				/* no pending yet, add ourselves */
2984 				pending_del_slot = path->slots[0];
2985 				pending_del_nr = 1;
2986 			} else if (pending_del_nr &&
2987 				   path->slots[0] + 1 == pending_del_slot) {
2988 				/* hop on the pending chunk */
2989 				pending_del_nr++;
2990 				pending_del_slot = path->slots[0];
2991 			} else {
2992 				BUG();
2993 			}
2994 		} else {
2995 			break;
2996 		}
2997 		if (found_extent && root->ref_cows) {
2998 			btrfs_set_path_blocking(path);
2999 			ret = btrfs_free_extent(trans, root, extent_start,
3000 						extent_num_bytes, 0,
3001 						btrfs_header_owner(leaf),
3002 						inode->i_ino, extent_offset);
3003 			BUG_ON(ret);
3004 		}
3005 
3006 		if (found_type == BTRFS_INODE_ITEM_KEY)
3007 			break;
3008 
3009 		if (path->slots[0] == 0 ||
3010 		    path->slots[0] != pending_del_slot) {
3011 			if (root->ref_cows) {
3012 				err = -EAGAIN;
3013 				goto out;
3014 			}
3015 			if (pending_del_nr) {
3016 				ret = btrfs_del_items(trans, root, path,
3017 						pending_del_slot,
3018 						pending_del_nr);
3019 				BUG_ON(ret);
3020 				pending_del_nr = 0;
3021 			}
3022 			btrfs_release_path(root, path);
3023 			goto search_again;
3024 		} else {
3025 			path->slots[0]--;
3026 		}
3027 	}
3028 out:
3029 	if (pending_del_nr) {
3030 		ret = btrfs_del_items(trans, root, path, pending_del_slot,
3031 				      pending_del_nr);
3032 	}
3033 	btrfs_free_path(path);
3034 	return err;
3035 }
3036 
3037 /*
3038  * taken from block_truncate_page, but does cow as it zeros out
3039  * any bytes left in the last page in the file.
3040  */
3041 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3042 {
3043 	struct inode *inode = mapping->host;
3044 	struct btrfs_root *root = BTRFS_I(inode)->root;
3045 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3046 	struct btrfs_ordered_extent *ordered;
3047 	struct extent_state *cached_state = NULL;
3048 	char *kaddr;
3049 	u32 blocksize = root->sectorsize;
3050 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
3051 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
3052 	struct page *page;
3053 	int ret = 0;
3054 	u64 page_start;
3055 	u64 page_end;
3056 
3057 	if ((offset & (blocksize - 1)) == 0)
3058 		goto out;
3059 	ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
3060 	if (ret)
3061 		goto out;
3062 
3063 	ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
3064 	if (ret)
3065 		goto out;
3066 
3067 	ret = -ENOMEM;
3068 again:
3069 	page = grab_cache_page(mapping, index);
3070 	if (!page) {
3071 		btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
3072 		btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
3073 		goto out;
3074 	}
3075 
3076 	page_start = page_offset(page);
3077 	page_end = page_start + PAGE_CACHE_SIZE - 1;
3078 
3079 	if (!PageUptodate(page)) {
3080 		ret = btrfs_readpage(NULL, page);
3081 		lock_page(page);
3082 		if (page->mapping != mapping) {
3083 			unlock_page(page);
3084 			page_cache_release(page);
3085 			goto again;
3086 		}
3087 		if (!PageUptodate(page)) {
3088 			ret = -EIO;
3089 			goto out_unlock;
3090 		}
3091 	}
3092 	wait_on_page_writeback(page);
3093 
3094 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
3095 			 GFP_NOFS);
3096 	set_page_extent_mapped(page);
3097 
3098 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
3099 	if (ordered) {
3100 		unlock_extent_cached(io_tree, page_start, page_end,
3101 				     &cached_state, GFP_NOFS);
3102 		unlock_page(page);
3103 		page_cache_release(page);
3104 		btrfs_start_ordered_extent(inode, ordered, 1);
3105 		btrfs_put_ordered_extent(ordered);
3106 		goto again;
3107 	}
3108 
3109 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
3110 			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
3111 			  0, 0, &cached_state, GFP_NOFS);
3112 
3113 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
3114 					&cached_state);
3115 	if (ret) {
3116 		unlock_extent_cached(io_tree, page_start, page_end,
3117 				     &cached_state, GFP_NOFS);
3118 		goto out_unlock;
3119 	}
3120 
3121 	ret = 0;
3122 	if (offset != PAGE_CACHE_SIZE) {
3123 		kaddr = kmap(page);
3124 		memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
3125 		flush_dcache_page(page);
3126 		kunmap(page);
3127 	}
3128 	ClearPageChecked(page);
3129 	set_page_dirty(page);
3130 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
3131 			     GFP_NOFS);
3132 
3133 out_unlock:
3134 	if (ret)
3135 		btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
3136 	btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
3137 	unlock_page(page);
3138 	page_cache_release(page);
3139 out:
3140 	return ret;
3141 }
3142 
3143 int btrfs_cont_expand(struct inode *inode, loff_t size)
3144 {
3145 	struct btrfs_trans_handle *trans;
3146 	struct btrfs_root *root = BTRFS_I(inode)->root;
3147 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3148 	struct extent_map *em;
3149 	struct extent_state *cached_state = NULL;
3150 	u64 mask = root->sectorsize - 1;
3151 	u64 hole_start = (inode->i_size + mask) & ~mask;
3152 	u64 block_end = (size + mask) & ~mask;
3153 	u64 last_byte;
3154 	u64 cur_offset;
3155 	u64 hole_size;
3156 	int err = 0;
3157 
3158 	if (size <= hole_start)
3159 		return 0;
3160 
3161 	while (1) {
3162 		struct btrfs_ordered_extent *ordered;
3163 		btrfs_wait_ordered_range(inode, hole_start,
3164 					 block_end - hole_start);
3165 		lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
3166 				 &cached_state, GFP_NOFS);
3167 		ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3168 		if (!ordered)
3169 			break;
3170 		unlock_extent_cached(io_tree, hole_start, block_end - 1,
3171 				     &cached_state, GFP_NOFS);
3172 		btrfs_put_ordered_extent(ordered);
3173 	}
3174 
3175 	cur_offset = hole_start;
3176 	while (1) {
3177 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
3178 				block_end - cur_offset, 0);
3179 		BUG_ON(IS_ERR(em) || !em);
3180 		last_byte = min(extent_map_end(em), block_end);
3181 		last_byte = (last_byte + mask) & ~mask;
3182 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3183 			u64 hint_byte = 0;
3184 			hole_size = last_byte - cur_offset;
3185 
3186 			err = btrfs_reserve_metadata_space(root, 2);
3187 			if (err)
3188 				break;
3189 
3190 			trans = btrfs_start_transaction(root, 1);
3191 			btrfs_set_trans_block_group(trans, inode);
3192 
3193 			err = btrfs_drop_extents(trans, inode, cur_offset,
3194 						 cur_offset + hole_size,
3195 						 &hint_byte, 1);
3196 			BUG_ON(err);
3197 
3198 			err = btrfs_insert_file_extent(trans, root,
3199 					inode->i_ino, cur_offset, 0,
3200 					0, hole_size, 0, hole_size,
3201 					0, 0, 0);
3202 			BUG_ON(err);
3203 
3204 			btrfs_drop_extent_cache(inode, hole_start,
3205 					last_byte - 1, 0);
3206 
3207 			btrfs_end_transaction(trans, root);
3208 			btrfs_unreserve_metadata_space(root, 2);
3209 		}
3210 		free_extent_map(em);
3211 		cur_offset = last_byte;
3212 		if (cur_offset >= block_end)
3213 			break;
3214 	}
3215 
3216 	unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
3217 			     GFP_NOFS);
3218 	return err;
3219 }
3220 
3221 static int btrfs_setattr_size(struct inode *inode, struct iattr *attr)
3222 {
3223 	struct btrfs_root *root = BTRFS_I(inode)->root;
3224 	struct btrfs_trans_handle *trans;
3225 	unsigned long nr;
3226 	int ret;
3227 
3228 	if (attr->ia_size == inode->i_size)
3229 		return 0;
3230 
3231 	if (attr->ia_size > inode->i_size) {
3232 		unsigned long limit;
3233 		limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
3234 		if (attr->ia_size > inode->i_sb->s_maxbytes)
3235 			return -EFBIG;
3236 		if (limit != RLIM_INFINITY && attr->ia_size > limit) {
3237 			send_sig(SIGXFSZ, current, 0);
3238 			return -EFBIG;
3239 		}
3240 	}
3241 
3242 	ret = btrfs_reserve_metadata_space(root, 1);
3243 	if (ret)
3244 		return ret;
3245 
3246 	trans = btrfs_start_transaction(root, 1);
3247 	btrfs_set_trans_block_group(trans, inode);
3248 
3249 	ret = btrfs_orphan_add(trans, inode);
3250 	BUG_ON(ret);
3251 
3252 	nr = trans->blocks_used;
3253 	btrfs_end_transaction(trans, root);
3254 	btrfs_unreserve_metadata_space(root, 1);
3255 	btrfs_btree_balance_dirty(root, nr);
3256 
3257 	if (attr->ia_size > inode->i_size) {
3258 		ret = btrfs_cont_expand(inode, attr->ia_size);
3259 		if (ret) {
3260 			btrfs_truncate(inode);
3261 			return ret;
3262 		}
3263 
3264 		i_size_write(inode, attr->ia_size);
3265 		btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
3266 
3267 		trans = btrfs_start_transaction(root, 1);
3268 		btrfs_set_trans_block_group(trans, inode);
3269 
3270 		ret = btrfs_update_inode(trans, root, inode);
3271 		BUG_ON(ret);
3272 		if (inode->i_nlink > 0) {
3273 			ret = btrfs_orphan_del(trans, inode);
3274 			BUG_ON(ret);
3275 		}
3276 		nr = trans->blocks_used;
3277 		btrfs_end_transaction(trans, root);
3278 		btrfs_btree_balance_dirty(root, nr);
3279 		return 0;
3280 	}
3281 
3282 	/*
3283 	 * We're truncating a file that used to have good data down to
3284 	 * zero. Make sure it gets into the ordered flush list so that
3285 	 * any new writes get down to disk quickly.
3286 	 */
3287 	if (attr->ia_size == 0)
3288 		BTRFS_I(inode)->ordered_data_close = 1;
3289 
3290 	/* we don't support swapfiles, so vmtruncate shouldn't fail */
3291 	ret = vmtruncate(inode, attr->ia_size);
3292 	BUG_ON(ret);
3293 
3294 	return 0;
3295 }
3296 
3297 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3298 {
3299 	struct inode *inode = dentry->d_inode;
3300 	int err;
3301 
3302 	err = inode_change_ok(inode, attr);
3303 	if (err)
3304 		return err;
3305 
3306 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3307 		err = btrfs_setattr_size(inode, attr);
3308 		if (err)
3309 			return err;
3310 	}
3311 	attr->ia_valid &= ~ATTR_SIZE;
3312 
3313 	if (attr->ia_valid)
3314 		err = inode_setattr(inode, attr);
3315 
3316 	if (!err && ((attr->ia_valid & ATTR_MODE)))
3317 		err = btrfs_acl_chmod(inode);
3318 	return err;
3319 }
3320 
3321 void btrfs_delete_inode(struct inode *inode)
3322 {
3323 	struct btrfs_trans_handle *trans;
3324 	struct btrfs_root *root = BTRFS_I(inode)->root;
3325 	unsigned long nr;
3326 	int ret;
3327 
3328 	truncate_inode_pages(&inode->i_data, 0);
3329 	if (is_bad_inode(inode)) {
3330 		btrfs_orphan_del(NULL, inode);
3331 		goto no_delete;
3332 	}
3333 	btrfs_wait_ordered_range(inode, 0, (u64)-1);
3334 
3335 	if (root->fs_info->log_root_recovering) {
3336 		BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
3337 		goto no_delete;
3338 	}
3339 
3340 	if (inode->i_nlink > 0) {
3341 		BUG_ON(btrfs_root_refs(&root->root_item) != 0);
3342 		goto no_delete;
3343 	}
3344 
3345 	btrfs_i_size_write(inode, 0);
3346 
3347 	while (1) {
3348 		trans = btrfs_start_transaction(root, 1);
3349 		btrfs_set_trans_block_group(trans, inode);
3350 		ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
3351 
3352 		if (ret != -EAGAIN)
3353 			break;
3354 
3355 		nr = trans->blocks_used;
3356 		btrfs_end_transaction(trans, root);
3357 		trans = NULL;
3358 		btrfs_btree_balance_dirty(root, nr);
3359 	}
3360 
3361 	if (ret == 0) {
3362 		ret = btrfs_orphan_del(trans, inode);
3363 		BUG_ON(ret);
3364 	}
3365 
3366 	nr = trans->blocks_used;
3367 	btrfs_end_transaction(trans, root);
3368 	btrfs_btree_balance_dirty(root, nr);
3369 no_delete:
3370 	clear_inode(inode);
3371 	return;
3372 }
3373 
3374 /*
3375  * this returns the key found in the dir entry in the location pointer.
3376  * If no dir entries were found, location->objectid is 0.
3377  */
3378 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3379 			       struct btrfs_key *location)
3380 {
3381 	const char *name = dentry->d_name.name;
3382 	int namelen = dentry->d_name.len;
3383 	struct btrfs_dir_item *di;
3384 	struct btrfs_path *path;
3385 	struct btrfs_root *root = BTRFS_I(dir)->root;
3386 	int ret = 0;
3387 
3388 	path = btrfs_alloc_path();
3389 	BUG_ON(!path);
3390 
3391 	di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
3392 				    namelen, 0);
3393 	if (IS_ERR(di))
3394 		ret = PTR_ERR(di);
3395 
3396 	if (!di || IS_ERR(di))
3397 		goto out_err;
3398 
3399 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3400 out:
3401 	btrfs_free_path(path);
3402 	return ret;
3403 out_err:
3404 	location->objectid = 0;
3405 	goto out;
3406 }
3407 
3408 /*
3409  * when we hit a tree root in a directory, the btrfs part of the inode
3410  * needs to be changed to reflect the root directory of the tree root.  This
3411  * is kind of like crossing a mount point.
3412  */
3413 static int fixup_tree_root_location(struct btrfs_root *root,
3414 				    struct inode *dir,
3415 				    struct dentry *dentry,
3416 				    struct btrfs_key *location,
3417 				    struct btrfs_root **sub_root)
3418 {
3419 	struct btrfs_path *path;
3420 	struct btrfs_root *new_root;
3421 	struct btrfs_root_ref *ref;
3422 	struct extent_buffer *leaf;
3423 	int ret;
3424 	int err = 0;
3425 
3426 	path = btrfs_alloc_path();
3427 	if (!path) {
3428 		err = -ENOMEM;
3429 		goto out;
3430 	}
3431 
3432 	err = -ENOENT;
3433 	ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
3434 				  BTRFS_I(dir)->root->root_key.objectid,
3435 				  location->objectid);
3436 	if (ret) {
3437 		if (ret < 0)
3438 			err = ret;
3439 		goto out;
3440 	}
3441 
3442 	leaf = path->nodes[0];
3443 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
3444 	if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino ||
3445 	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
3446 		goto out;
3447 
3448 	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
3449 				   (unsigned long)(ref + 1),
3450 				   dentry->d_name.len);
3451 	if (ret)
3452 		goto out;
3453 
3454 	btrfs_release_path(root->fs_info->tree_root, path);
3455 
3456 	new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
3457 	if (IS_ERR(new_root)) {
3458 		err = PTR_ERR(new_root);
3459 		goto out;
3460 	}
3461 
3462 	if (btrfs_root_refs(&new_root->root_item) == 0) {
3463 		err = -ENOENT;
3464 		goto out;
3465 	}
3466 
3467 	*sub_root = new_root;
3468 	location->objectid = btrfs_root_dirid(&new_root->root_item);
3469 	location->type = BTRFS_INODE_ITEM_KEY;
3470 	location->offset = 0;
3471 	err = 0;
3472 out:
3473 	btrfs_free_path(path);
3474 	return err;
3475 }
3476 
3477 static void inode_tree_add(struct inode *inode)
3478 {
3479 	struct btrfs_root *root = BTRFS_I(inode)->root;
3480 	struct btrfs_inode *entry;
3481 	struct rb_node **p;
3482 	struct rb_node *parent;
3483 again:
3484 	p = &root->inode_tree.rb_node;
3485 	parent = NULL;
3486 
3487 	if (hlist_unhashed(&inode->i_hash))
3488 		return;
3489 
3490 	spin_lock(&root->inode_lock);
3491 	while (*p) {
3492 		parent = *p;
3493 		entry = rb_entry(parent, struct btrfs_inode, rb_node);
3494 
3495 		if (inode->i_ino < entry->vfs_inode.i_ino)
3496 			p = &parent->rb_left;
3497 		else if (inode->i_ino > entry->vfs_inode.i_ino)
3498 			p = &parent->rb_right;
3499 		else {
3500 			WARN_ON(!(entry->vfs_inode.i_state &
3501 				  (I_WILL_FREE | I_FREEING | I_CLEAR)));
3502 			rb_erase(parent, &root->inode_tree);
3503 			RB_CLEAR_NODE(parent);
3504 			spin_unlock(&root->inode_lock);
3505 			goto again;
3506 		}
3507 	}
3508 	rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
3509 	rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3510 	spin_unlock(&root->inode_lock);
3511 }
3512 
3513 static void inode_tree_del(struct inode *inode)
3514 {
3515 	struct btrfs_root *root = BTRFS_I(inode)->root;
3516 	int empty = 0;
3517 
3518 	spin_lock(&root->inode_lock);
3519 	if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
3520 		rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3521 		RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3522 		empty = RB_EMPTY_ROOT(&root->inode_tree);
3523 	}
3524 	spin_unlock(&root->inode_lock);
3525 
3526 	if (empty && btrfs_root_refs(&root->root_item) == 0) {
3527 		synchronize_srcu(&root->fs_info->subvol_srcu);
3528 		spin_lock(&root->inode_lock);
3529 		empty = RB_EMPTY_ROOT(&root->inode_tree);
3530 		spin_unlock(&root->inode_lock);
3531 		if (empty)
3532 			btrfs_add_dead_root(root);
3533 	}
3534 }
3535 
3536 int btrfs_invalidate_inodes(struct btrfs_root *root)
3537 {
3538 	struct rb_node *node;
3539 	struct rb_node *prev;
3540 	struct btrfs_inode *entry;
3541 	struct inode *inode;
3542 	u64 objectid = 0;
3543 
3544 	WARN_ON(btrfs_root_refs(&root->root_item) != 0);
3545 
3546 	spin_lock(&root->inode_lock);
3547 again:
3548 	node = root->inode_tree.rb_node;
3549 	prev = NULL;
3550 	while (node) {
3551 		prev = node;
3552 		entry = rb_entry(node, struct btrfs_inode, rb_node);
3553 
3554 		if (objectid < entry->vfs_inode.i_ino)
3555 			node = node->rb_left;
3556 		else if (objectid > entry->vfs_inode.i_ino)
3557 			node = node->rb_right;
3558 		else
3559 			break;
3560 	}
3561 	if (!node) {
3562 		while (prev) {
3563 			entry = rb_entry(prev, struct btrfs_inode, rb_node);
3564 			if (objectid <= entry->vfs_inode.i_ino) {
3565 				node = prev;
3566 				break;
3567 			}
3568 			prev = rb_next(prev);
3569 		}
3570 	}
3571 	while (node) {
3572 		entry = rb_entry(node, struct btrfs_inode, rb_node);
3573 		objectid = entry->vfs_inode.i_ino + 1;
3574 		inode = igrab(&entry->vfs_inode);
3575 		if (inode) {
3576 			spin_unlock(&root->inode_lock);
3577 			if (atomic_read(&inode->i_count) > 1)
3578 				d_prune_aliases(inode);
3579 			/*
3580 			 * btrfs_drop_inode will remove it from
3581 			 * the inode cache when its usage count
3582 			 * hits zero.
3583 			 */
3584 			iput(inode);
3585 			cond_resched();
3586 			spin_lock(&root->inode_lock);
3587 			goto again;
3588 		}
3589 
3590 		if (cond_resched_lock(&root->inode_lock))
3591 			goto again;
3592 
3593 		node = rb_next(node);
3594 	}
3595 	spin_unlock(&root->inode_lock);
3596 	return 0;
3597 }
3598 
3599 static noinline void init_btrfs_i(struct inode *inode)
3600 {
3601 	struct btrfs_inode *bi = BTRFS_I(inode);
3602 
3603 	bi->generation = 0;
3604 	bi->sequence = 0;
3605 	bi->last_trans = 0;
3606 	bi->last_sub_trans = 0;
3607 	bi->logged_trans = 0;
3608 	bi->delalloc_bytes = 0;
3609 	bi->reserved_bytes = 0;
3610 	bi->disk_i_size = 0;
3611 	bi->flags = 0;
3612 	bi->index_cnt = (u64)-1;
3613 	bi->last_unlink_trans = 0;
3614 	bi->ordered_data_close = 0;
3615 	bi->force_compress = 0;
3616 	extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3617 	extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3618 			     inode->i_mapping, GFP_NOFS);
3619 	extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3620 			     inode->i_mapping, GFP_NOFS);
3621 	INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
3622 	INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
3623 	RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3624 	btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3625 	mutex_init(&BTRFS_I(inode)->log_mutex);
3626 }
3627 
3628 static int btrfs_init_locked_inode(struct inode *inode, void *p)
3629 {
3630 	struct btrfs_iget_args *args = p;
3631 	inode->i_ino = args->ino;
3632 	init_btrfs_i(inode);
3633 	BTRFS_I(inode)->root = args->root;
3634 	btrfs_set_inode_space_info(args->root, inode);
3635 	return 0;
3636 }
3637 
3638 static int btrfs_find_actor(struct inode *inode, void *opaque)
3639 {
3640 	struct btrfs_iget_args *args = opaque;
3641 	return args->ino == inode->i_ino &&
3642 		args->root == BTRFS_I(inode)->root;
3643 }
3644 
3645 static struct inode *btrfs_iget_locked(struct super_block *s,
3646 				       u64 objectid,
3647 				       struct btrfs_root *root)
3648 {
3649 	struct inode *inode;
3650 	struct btrfs_iget_args args;
3651 	args.ino = objectid;
3652 	args.root = root;
3653 
3654 	inode = iget5_locked(s, objectid, btrfs_find_actor,
3655 			     btrfs_init_locked_inode,
3656 			     (void *)&args);
3657 	return inode;
3658 }
3659 
3660 /* Get an inode object given its location and corresponding root.
3661  * Returns in *is_new if the inode was read from disk
3662  */
3663 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3664 			 struct btrfs_root *root, int *new)
3665 {
3666 	struct inode *inode;
3667 
3668 	inode = btrfs_iget_locked(s, location->objectid, root);
3669 	if (!inode)
3670 		return ERR_PTR(-ENOMEM);
3671 
3672 	if (inode->i_state & I_NEW) {
3673 		BTRFS_I(inode)->root = root;
3674 		memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3675 		btrfs_read_locked_inode(inode);
3676 
3677 		inode_tree_add(inode);
3678 		unlock_new_inode(inode);
3679 		if (new)
3680 			*new = 1;
3681 	}
3682 
3683 	return inode;
3684 }
3685 
3686 static struct inode *new_simple_dir(struct super_block *s,
3687 				    struct btrfs_key *key,
3688 				    struct btrfs_root *root)
3689 {
3690 	struct inode *inode = new_inode(s);
3691 
3692 	if (!inode)
3693 		return ERR_PTR(-ENOMEM);
3694 
3695 	init_btrfs_i(inode);
3696 
3697 	BTRFS_I(inode)->root = root;
3698 	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
3699 	BTRFS_I(inode)->dummy_inode = 1;
3700 
3701 	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
3702 	inode->i_op = &simple_dir_inode_operations;
3703 	inode->i_fop = &simple_dir_operations;
3704 	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
3705 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3706 
3707 	return inode;
3708 }
3709 
3710 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3711 {
3712 	struct inode *inode;
3713 	struct btrfs_root *root = BTRFS_I(dir)->root;
3714 	struct btrfs_root *sub_root = root;
3715 	struct btrfs_key location;
3716 	int index;
3717 	int ret;
3718 
3719 	dentry->d_op = &btrfs_dentry_operations;
3720 
3721 	if (dentry->d_name.len > BTRFS_NAME_LEN)
3722 		return ERR_PTR(-ENAMETOOLONG);
3723 
3724 	ret = btrfs_inode_by_name(dir, dentry, &location);
3725 
3726 	if (ret < 0)
3727 		return ERR_PTR(ret);
3728 
3729 	if (location.objectid == 0)
3730 		return NULL;
3731 
3732 	if (location.type == BTRFS_INODE_ITEM_KEY) {
3733 		inode = btrfs_iget(dir->i_sb, &location, root, NULL);
3734 		return inode;
3735 	}
3736 
3737 	BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
3738 
3739 	index = srcu_read_lock(&root->fs_info->subvol_srcu);
3740 	ret = fixup_tree_root_location(root, dir, dentry,
3741 				       &location, &sub_root);
3742 	if (ret < 0) {
3743 		if (ret != -ENOENT)
3744 			inode = ERR_PTR(ret);
3745 		else
3746 			inode = new_simple_dir(dir->i_sb, &location, sub_root);
3747 	} else {
3748 		inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
3749 	}
3750 	srcu_read_unlock(&root->fs_info->subvol_srcu, index);
3751 
3752 	if (root != sub_root) {
3753 		down_read(&root->fs_info->cleanup_work_sem);
3754 		if (!(inode->i_sb->s_flags & MS_RDONLY))
3755 			btrfs_orphan_cleanup(sub_root);
3756 		up_read(&root->fs_info->cleanup_work_sem);
3757 	}
3758 
3759 	return inode;
3760 }
3761 
3762 static int btrfs_dentry_delete(struct dentry *dentry)
3763 {
3764 	struct btrfs_root *root;
3765 
3766 	if (!dentry->d_inode && !IS_ROOT(dentry))
3767 		dentry = dentry->d_parent;
3768 
3769 	if (dentry->d_inode) {
3770 		root = BTRFS_I(dentry->d_inode)->root;
3771 		if (btrfs_root_refs(&root->root_item) == 0)
3772 			return 1;
3773 	}
3774 	return 0;
3775 }
3776 
3777 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
3778 				   struct nameidata *nd)
3779 {
3780 	struct inode *inode;
3781 
3782 	inode = btrfs_lookup_dentry(dir, dentry);
3783 	if (IS_ERR(inode))
3784 		return ERR_CAST(inode);
3785 
3786 	return d_splice_alias(inode, dentry);
3787 }
3788 
3789 static unsigned char btrfs_filetype_table[] = {
3790 	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
3791 };
3792 
3793 static int btrfs_real_readdir(struct file *filp, void *dirent,
3794 			      filldir_t filldir)
3795 {
3796 	struct inode *inode = filp->f_dentry->d_inode;
3797 	struct btrfs_root *root = BTRFS_I(inode)->root;
3798 	struct btrfs_item *item;
3799 	struct btrfs_dir_item *di;
3800 	struct btrfs_key key;
3801 	struct btrfs_key found_key;
3802 	struct btrfs_path *path;
3803 	int ret;
3804 	u32 nritems;
3805 	struct extent_buffer *leaf;
3806 	int slot;
3807 	int advance;
3808 	unsigned char d_type;
3809 	int over = 0;
3810 	u32 di_cur;
3811 	u32 di_total;
3812 	u32 di_len;
3813 	int key_type = BTRFS_DIR_INDEX_KEY;
3814 	char tmp_name[32];
3815 	char *name_ptr;
3816 	int name_len;
3817 
3818 	/* FIXME, use a real flag for deciding about the key type */
3819 	if (root->fs_info->tree_root == root)
3820 		key_type = BTRFS_DIR_ITEM_KEY;
3821 
3822 	/* special case for "." */
3823 	if (filp->f_pos == 0) {
3824 		over = filldir(dirent, ".", 1,
3825 			       1, inode->i_ino,
3826 			       DT_DIR);
3827 		if (over)
3828 			return 0;
3829 		filp->f_pos = 1;
3830 	}
3831 	/* special case for .., just use the back ref */
3832 	if (filp->f_pos == 1) {
3833 		u64 pino = parent_ino(filp->f_path.dentry);
3834 		over = filldir(dirent, "..", 2,
3835 			       2, pino, DT_DIR);
3836 		if (over)
3837 			return 0;
3838 		filp->f_pos = 2;
3839 	}
3840 	path = btrfs_alloc_path();
3841 	path->reada = 2;
3842 
3843 	btrfs_set_key_type(&key, key_type);
3844 	key.offset = filp->f_pos;
3845 	key.objectid = inode->i_ino;
3846 
3847 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3848 	if (ret < 0)
3849 		goto err;
3850 	advance = 0;
3851 
3852 	while (1) {
3853 		leaf = path->nodes[0];
3854 		nritems = btrfs_header_nritems(leaf);
3855 		slot = path->slots[0];
3856 		if (advance || slot >= nritems) {
3857 			if (slot >= nritems - 1) {
3858 				ret = btrfs_next_leaf(root, path);
3859 				if (ret)
3860 					break;
3861 				leaf = path->nodes[0];
3862 				nritems = btrfs_header_nritems(leaf);
3863 				slot = path->slots[0];
3864 			} else {
3865 				slot++;
3866 				path->slots[0]++;
3867 			}
3868 		}
3869 
3870 		advance = 1;
3871 		item = btrfs_item_nr(leaf, slot);
3872 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3873 
3874 		if (found_key.objectid != key.objectid)
3875 			break;
3876 		if (btrfs_key_type(&found_key) != key_type)
3877 			break;
3878 		if (found_key.offset < filp->f_pos)
3879 			continue;
3880 
3881 		filp->f_pos = found_key.offset;
3882 
3883 		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
3884 		di_cur = 0;
3885 		di_total = btrfs_item_size(leaf, item);
3886 
3887 		while (di_cur < di_total) {
3888 			struct btrfs_key location;
3889 
3890 			name_len = btrfs_dir_name_len(leaf, di);
3891 			if (name_len <= sizeof(tmp_name)) {
3892 				name_ptr = tmp_name;
3893 			} else {
3894 				name_ptr = kmalloc(name_len, GFP_NOFS);
3895 				if (!name_ptr) {
3896 					ret = -ENOMEM;
3897 					goto err;
3898 				}
3899 			}
3900 			read_extent_buffer(leaf, name_ptr,
3901 					   (unsigned long)(di + 1), name_len);
3902 
3903 			d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
3904 			btrfs_dir_item_key_to_cpu(leaf, di, &location);
3905 
3906 			/* is this a reference to our own snapshot? If so
3907 			 * skip it
3908 			 */
3909 			if (location.type == BTRFS_ROOT_ITEM_KEY &&
3910 			    location.objectid == root->root_key.objectid) {
3911 				over = 0;
3912 				goto skip;
3913 			}
3914 			over = filldir(dirent, name_ptr, name_len,
3915 				       found_key.offset, location.objectid,
3916 				       d_type);
3917 
3918 skip:
3919 			if (name_ptr != tmp_name)
3920 				kfree(name_ptr);
3921 
3922 			if (over)
3923 				goto nopos;
3924 			di_len = btrfs_dir_name_len(leaf, di) +
3925 				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
3926 			di_cur += di_len;
3927 			di = (struct btrfs_dir_item *)((char *)di + di_len);
3928 		}
3929 	}
3930 
3931 	/* Reached end of directory/root. Bump pos past the last item. */
3932 	if (key_type == BTRFS_DIR_INDEX_KEY)
3933 		/*
3934 		 * 32-bit glibc will use getdents64, but then strtol -
3935 		 * so the last number we can serve is this.
3936 		 */
3937 		filp->f_pos = 0x7fffffff;
3938 	else
3939 		filp->f_pos++;
3940 nopos:
3941 	ret = 0;
3942 err:
3943 	btrfs_free_path(path);
3944 	return ret;
3945 }
3946 
3947 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
3948 {
3949 	struct btrfs_root *root = BTRFS_I(inode)->root;
3950 	struct btrfs_trans_handle *trans;
3951 	int ret = 0;
3952 
3953 	if (root->fs_info->btree_inode == inode)
3954 		return 0;
3955 
3956 	if (wbc->sync_mode == WB_SYNC_ALL) {
3957 		trans = btrfs_join_transaction(root, 1);
3958 		btrfs_set_trans_block_group(trans, inode);
3959 		ret = btrfs_commit_transaction(trans, root);
3960 	}
3961 	return ret;
3962 }
3963 
3964 /*
3965  * This is somewhat expensive, updating the tree every time the
3966  * inode changes.  But, it is most likely to find the inode in cache.
3967  * FIXME, needs more benchmarking...there are no reasons other than performance
3968  * to keep or drop this code.
3969  */
3970 void btrfs_dirty_inode(struct inode *inode)
3971 {
3972 	struct btrfs_root *root = BTRFS_I(inode)->root;
3973 	struct btrfs_trans_handle *trans;
3974 
3975 	trans = btrfs_join_transaction(root, 1);
3976 	btrfs_set_trans_block_group(trans, inode);
3977 	btrfs_update_inode(trans, root, inode);
3978 	btrfs_end_transaction(trans, root);
3979 }
3980 
3981 /*
3982  * find the highest existing sequence number in a directory
3983  * and then set the in-memory index_cnt variable to reflect
3984  * free sequence numbers
3985  */
3986 static int btrfs_set_inode_index_count(struct inode *inode)
3987 {
3988 	struct btrfs_root *root = BTRFS_I(inode)->root;
3989 	struct btrfs_key key, found_key;
3990 	struct btrfs_path *path;
3991 	struct extent_buffer *leaf;
3992 	int ret;
3993 
3994 	key.objectid = inode->i_ino;
3995 	btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
3996 	key.offset = (u64)-1;
3997 
3998 	path = btrfs_alloc_path();
3999 	if (!path)
4000 		return -ENOMEM;
4001 
4002 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4003 	if (ret < 0)
4004 		goto out;
4005 	/* FIXME: we should be able to handle this */
4006 	if (ret == 0)
4007 		goto out;
4008 	ret = 0;
4009 
4010 	/*
4011 	 * MAGIC NUMBER EXPLANATION:
4012 	 * since we search a directory based on f_pos we have to start at 2
4013 	 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
4014 	 * else has to start at 2
4015 	 */
4016 	if (path->slots[0] == 0) {
4017 		BTRFS_I(inode)->index_cnt = 2;
4018 		goto out;
4019 	}
4020 
4021 	path->slots[0]--;
4022 
4023 	leaf = path->nodes[0];
4024 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4025 
4026 	if (found_key.objectid != inode->i_ino ||
4027 	    btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
4028 		BTRFS_I(inode)->index_cnt = 2;
4029 		goto out;
4030 	}
4031 
4032 	BTRFS_I(inode)->index_cnt = found_key.offset + 1;
4033 out:
4034 	btrfs_free_path(path);
4035 	return ret;
4036 }
4037 
4038 /*
4039  * helper to find a free sequence number in a given directory.  This current
4040  * code is very simple, later versions will do smarter things in the btree
4041  */
4042 int btrfs_set_inode_index(struct inode *dir, u64 *index)
4043 {
4044 	int ret = 0;
4045 
4046 	if (BTRFS_I(dir)->index_cnt == (u64)-1) {
4047 		ret = btrfs_set_inode_index_count(dir);
4048 		if (ret)
4049 			return ret;
4050 	}
4051 
4052 	*index = BTRFS_I(dir)->index_cnt;
4053 	BTRFS_I(dir)->index_cnt++;
4054 
4055 	return ret;
4056 }
4057 
4058 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4059 				     struct btrfs_root *root,
4060 				     struct inode *dir,
4061 				     const char *name, int name_len,
4062 				     u64 ref_objectid, u64 objectid,
4063 				     u64 alloc_hint, int mode, u64 *index)
4064 {
4065 	struct inode *inode;
4066 	struct btrfs_inode_item *inode_item;
4067 	struct btrfs_key *location;
4068 	struct btrfs_path *path;
4069 	struct btrfs_inode_ref *ref;
4070 	struct btrfs_key key[2];
4071 	u32 sizes[2];
4072 	unsigned long ptr;
4073 	int ret;
4074 	int owner;
4075 
4076 	path = btrfs_alloc_path();
4077 	BUG_ON(!path);
4078 
4079 	inode = new_inode(root->fs_info->sb);
4080 	if (!inode)
4081 		return ERR_PTR(-ENOMEM);
4082 
4083 	if (dir) {
4084 		ret = btrfs_set_inode_index(dir, index);
4085 		if (ret) {
4086 			iput(inode);
4087 			return ERR_PTR(ret);
4088 		}
4089 	}
4090 	/*
4091 	 * index_cnt is ignored for everything but a dir,
4092 	 * btrfs_get_inode_index_count has an explanation for the magic
4093 	 * number
4094 	 */
4095 	init_btrfs_i(inode);
4096 	BTRFS_I(inode)->index_cnt = 2;
4097 	BTRFS_I(inode)->root = root;
4098 	BTRFS_I(inode)->generation = trans->transid;
4099 	btrfs_set_inode_space_info(root, inode);
4100 
4101 	if (mode & S_IFDIR)
4102 		owner = 0;
4103 	else
4104 		owner = 1;
4105 	BTRFS_I(inode)->block_group =
4106 			btrfs_find_block_group(root, 0, alloc_hint, owner);
4107 
4108 	key[0].objectid = objectid;
4109 	btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
4110 	key[0].offset = 0;
4111 
4112 	key[1].objectid = objectid;
4113 	btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
4114 	key[1].offset = ref_objectid;
4115 
4116 	sizes[0] = sizeof(struct btrfs_inode_item);
4117 	sizes[1] = name_len + sizeof(*ref);
4118 
4119 	path->leave_spinning = 1;
4120 	ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
4121 	if (ret != 0)
4122 		goto fail;
4123 
4124 	inode_init_owner(inode, dir, mode);
4125 	inode->i_ino = objectid;
4126 	inode_set_bytes(inode, 0);
4127 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4128 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4129 				  struct btrfs_inode_item);
4130 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
4131 
4132 	ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
4133 			     struct btrfs_inode_ref);
4134 	btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
4135 	btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
4136 	ptr = (unsigned long)(ref + 1);
4137 	write_extent_buffer(path->nodes[0], name, ptr, name_len);
4138 
4139 	btrfs_mark_buffer_dirty(path->nodes[0]);
4140 	btrfs_free_path(path);
4141 
4142 	location = &BTRFS_I(inode)->location;
4143 	location->objectid = objectid;
4144 	location->offset = 0;
4145 	btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
4146 
4147 	btrfs_inherit_iflags(inode, dir);
4148 
4149 	if ((mode & S_IFREG)) {
4150 		if (btrfs_test_opt(root, NODATASUM))
4151 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
4152 		if (btrfs_test_opt(root, NODATACOW))
4153 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
4154 	}
4155 
4156 	insert_inode_hash(inode);
4157 	inode_tree_add(inode);
4158 	return inode;
4159 fail:
4160 	if (dir)
4161 		BTRFS_I(dir)->index_cnt--;
4162 	btrfs_free_path(path);
4163 	iput(inode);
4164 	return ERR_PTR(ret);
4165 }
4166 
4167 static inline u8 btrfs_inode_type(struct inode *inode)
4168 {
4169 	return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
4170 }
4171 
4172 /*
4173  * utility function to add 'inode' into 'parent_inode' with
4174  * a give name and a given sequence number.
4175  * if 'add_backref' is true, also insert a backref from the
4176  * inode to the parent directory.
4177  */
4178 int btrfs_add_link(struct btrfs_trans_handle *trans,
4179 		   struct inode *parent_inode, struct inode *inode,
4180 		   const char *name, int name_len, int add_backref, u64 index)
4181 {
4182 	int ret = 0;
4183 	struct btrfs_key key;
4184 	struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4185 
4186 	if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4187 		memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
4188 	} else {
4189 		key.objectid = inode->i_ino;
4190 		btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
4191 		key.offset = 0;
4192 	}
4193 
4194 	if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4195 		ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
4196 					 key.objectid, root->root_key.objectid,
4197 					 parent_inode->i_ino,
4198 					 index, name, name_len);
4199 	} else if (add_backref) {
4200 		ret = btrfs_insert_inode_ref(trans, root,
4201 					     name, name_len, inode->i_ino,
4202 					     parent_inode->i_ino, index);
4203 	}
4204 
4205 	if (ret == 0) {
4206 		ret = btrfs_insert_dir_item(trans, root, name, name_len,
4207 					    parent_inode->i_ino, &key,
4208 					    btrfs_inode_type(inode), index);
4209 		BUG_ON(ret);
4210 
4211 		btrfs_i_size_write(parent_inode, parent_inode->i_size +
4212 				   name_len * 2);
4213 		parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4214 		ret = btrfs_update_inode(trans, root, parent_inode);
4215 	}
4216 	return ret;
4217 }
4218 
4219 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4220 			    struct dentry *dentry, struct inode *inode,
4221 			    int backref, u64 index)
4222 {
4223 	int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
4224 				 inode, dentry->d_name.name,
4225 				 dentry->d_name.len, backref, index);
4226 	if (!err) {
4227 		d_instantiate(dentry, inode);
4228 		return 0;
4229 	}
4230 	if (err > 0)
4231 		err = -EEXIST;
4232 	return err;
4233 }
4234 
4235 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4236 			int mode, dev_t rdev)
4237 {
4238 	struct btrfs_trans_handle *trans;
4239 	struct btrfs_root *root = BTRFS_I(dir)->root;
4240 	struct inode *inode = NULL;
4241 	int err;
4242 	int drop_inode = 0;
4243 	u64 objectid;
4244 	unsigned long nr = 0;
4245 	u64 index = 0;
4246 
4247 	if (!new_valid_dev(rdev))
4248 		return -EINVAL;
4249 
4250 	/*
4251 	 * 2 for inode item and ref
4252 	 * 2 for dir items
4253 	 * 1 for xattr if selinux is on
4254 	 */
4255 	err = btrfs_reserve_metadata_space(root, 5);
4256 	if (err)
4257 		return err;
4258 
4259 	trans = btrfs_start_transaction(root, 1);
4260 	if (!trans)
4261 		goto fail;
4262 	btrfs_set_trans_block_group(trans, dir);
4263 
4264 	err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4265 	if (err) {
4266 		err = -ENOSPC;
4267 		goto out_unlock;
4268 	}
4269 
4270 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4271 				dentry->d_name.len,
4272 				dentry->d_parent->d_inode->i_ino, objectid,
4273 				BTRFS_I(dir)->block_group, mode, &index);
4274 	err = PTR_ERR(inode);
4275 	if (IS_ERR(inode))
4276 		goto out_unlock;
4277 
4278 	err = btrfs_init_inode_security(trans, inode, dir);
4279 	if (err) {
4280 		drop_inode = 1;
4281 		goto out_unlock;
4282 	}
4283 
4284 	btrfs_set_trans_block_group(trans, inode);
4285 	err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4286 	if (err)
4287 		drop_inode = 1;
4288 	else {
4289 		inode->i_op = &btrfs_special_inode_operations;
4290 		init_special_inode(inode, inode->i_mode, rdev);
4291 		btrfs_update_inode(trans, root, inode);
4292 	}
4293 	btrfs_update_inode_block_group(trans, inode);
4294 	btrfs_update_inode_block_group(trans, dir);
4295 out_unlock:
4296 	nr = trans->blocks_used;
4297 	btrfs_end_transaction_throttle(trans, root);
4298 fail:
4299 	btrfs_unreserve_metadata_space(root, 5);
4300 	if (drop_inode) {
4301 		inode_dec_link_count(inode);
4302 		iput(inode);
4303 	}
4304 	btrfs_btree_balance_dirty(root, nr);
4305 	return err;
4306 }
4307 
4308 static int btrfs_create(struct inode *dir, struct dentry *dentry,
4309 			int mode, struct nameidata *nd)
4310 {
4311 	struct btrfs_trans_handle *trans;
4312 	struct btrfs_root *root = BTRFS_I(dir)->root;
4313 	struct inode *inode = NULL;
4314 	int err;
4315 	int drop_inode = 0;
4316 	unsigned long nr = 0;
4317 	u64 objectid;
4318 	u64 index = 0;
4319 
4320 	/*
4321 	 * 2 for inode item and ref
4322 	 * 2 for dir items
4323 	 * 1 for xattr if selinux is on
4324 	 */
4325 	err = btrfs_reserve_metadata_space(root, 5);
4326 	if (err)
4327 		return err;
4328 
4329 	trans = btrfs_start_transaction(root, 1);
4330 	if (!trans)
4331 		goto fail;
4332 	btrfs_set_trans_block_group(trans, dir);
4333 
4334 	err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4335 	if (err) {
4336 		err = -ENOSPC;
4337 		goto out_unlock;
4338 	}
4339 
4340 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4341 				dentry->d_name.len,
4342 				dentry->d_parent->d_inode->i_ino,
4343 				objectid, BTRFS_I(dir)->block_group, mode,
4344 				&index);
4345 	err = PTR_ERR(inode);
4346 	if (IS_ERR(inode))
4347 		goto out_unlock;
4348 
4349 	err = btrfs_init_inode_security(trans, inode, dir);
4350 	if (err) {
4351 		drop_inode = 1;
4352 		goto out_unlock;
4353 	}
4354 
4355 	btrfs_set_trans_block_group(trans, inode);
4356 	err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4357 	if (err)
4358 		drop_inode = 1;
4359 	else {
4360 		inode->i_mapping->a_ops = &btrfs_aops;
4361 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4362 		inode->i_fop = &btrfs_file_operations;
4363 		inode->i_op = &btrfs_file_inode_operations;
4364 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4365 	}
4366 	btrfs_update_inode_block_group(trans, inode);
4367 	btrfs_update_inode_block_group(trans, dir);
4368 out_unlock:
4369 	nr = trans->blocks_used;
4370 	btrfs_end_transaction_throttle(trans, root);
4371 fail:
4372 	btrfs_unreserve_metadata_space(root, 5);
4373 	if (drop_inode) {
4374 		inode_dec_link_count(inode);
4375 		iput(inode);
4376 	}
4377 	btrfs_btree_balance_dirty(root, nr);
4378 	return err;
4379 }
4380 
4381 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4382 		      struct dentry *dentry)
4383 {
4384 	struct btrfs_trans_handle *trans;
4385 	struct btrfs_root *root = BTRFS_I(dir)->root;
4386 	struct inode *inode = old_dentry->d_inode;
4387 	u64 index;
4388 	unsigned long nr = 0;
4389 	int err;
4390 	int drop_inode = 0;
4391 
4392 	if (inode->i_nlink == 0)
4393 		return -ENOENT;
4394 
4395 	/* do not allow sys_link's with other subvols of the same device */
4396 	if (root->objectid != BTRFS_I(inode)->root->objectid)
4397 		return -EPERM;
4398 
4399 	/*
4400 	 * 1 item for inode ref
4401 	 * 2 items for dir items
4402 	 */
4403 	err = btrfs_reserve_metadata_space(root, 3);
4404 	if (err)
4405 		return err;
4406 
4407 	btrfs_inc_nlink(inode);
4408 
4409 	err = btrfs_set_inode_index(dir, &index);
4410 	if (err)
4411 		goto fail;
4412 
4413 	trans = btrfs_start_transaction(root, 1);
4414 
4415 	btrfs_set_trans_block_group(trans, dir);
4416 	atomic_inc(&inode->i_count);
4417 
4418 	err = btrfs_add_nondir(trans, dentry, inode, 1, index);
4419 
4420 	if (err) {
4421 		drop_inode = 1;
4422 	} else {
4423 		btrfs_update_inode_block_group(trans, dir);
4424 		err = btrfs_update_inode(trans, root, inode);
4425 		BUG_ON(err);
4426 		btrfs_log_new_name(trans, inode, NULL, dentry->d_parent);
4427 	}
4428 
4429 	nr = trans->blocks_used;
4430 	btrfs_end_transaction_throttle(trans, root);
4431 fail:
4432 	btrfs_unreserve_metadata_space(root, 3);
4433 	if (drop_inode) {
4434 		inode_dec_link_count(inode);
4435 		iput(inode);
4436 	}
4437 	btrfs_btree_balance_dirty(root, nr);
4438 	return err;
4439 }
4440 
4441 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4442 {
4443 	struct inode *inode = NULL;
4444 	struct btrfs_trans_handle *trans;
4445 	struct btrfs_root *root = BTRFS_I(dir)->root;
4446 	int err = 0;
4447 	int drop_on_err = 0;
4448 	u64 objectid = 0;
4449 	u64 index = 0;
4450 	unsigned long nr = 1;
4451 
4452 	/*
4453 	 * 2 items for inode and ref
4454 	 * 2 items for dir items
4455 	 * 1 for xattr if selinux is on
4456 	 */
4457 	err = btrfs_reserve_metadata_space(root, 5);
4458 	if (err)
4459 		return err;
4460 
4461 	trans = btrfs_start_transaction(root, 1);
4462 	if (!trans) {
4463 		err = -ENOMEM;
4464 		goto out_unlock;
4465 	}
4466 	btrfs_set_trans_block_group(trans, dir);
4467 
4468 	err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4469 	if (err) {
4470 		err = -ENOSPC;
4471 		goto out_fail;
4472 	}
4473 
4474 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4475 				dentry->d_name.len,
4476 				dentry->d_parent->d_inode->i_ino, objectid,
4477 				BTRFS_I(dir)->block_group, S_IFDIR | mode,
4478 				&index);
4479 	if (IS_ERR(inode)) {
4480 		err = PTR_ERR(inode);
4481 		goto out_fail;
4482 	}
4483 
4484 	drop_on_err = 1;
4485 
4486 	err = btrfs_init_inode_security(trans, inode, dir);
4487 	if (err)
4488 		goto out_fail;
4489 
4490 	inode->i_op = &btrfs_dir_inode_operations;
4491 	inode->i_fop = &btrfs_dir_file_operations;
4492 	btrfs_set_trans_block_group(trans, inode);
4493 
4494 	btrfs_i_size_write(inode, 0);
4495 	err = btrfs_update_inode(trans, root, inode);
4496 	if (err)
4497 		goto out_fail;
4498 
4499 	err = btrfs_add_link(trans, dentry->d_parent->d_inode,
4500 				 inode, dentry->d_name.name,
4501 				 dentry->d_name.len, 0, index);
4502 	if (err)
4503 		goto out_fail;
4504 
4505 	d_instantiate(dentry, inode);
4506 	drop_on_err = 0;
4507 	btrfs_update_inode_block_group(trans, inode);
4508 	btrfs_update_inode_block_group(trans, dir);
4509 
4510 out_fail:
4511 	nr = trans->blocks_used;
4512 	btrfs_end_transaction_throttle(trans, root);
4513 
4514 out_unlock:
4515 	btrfs_unreserve_metadata_space(root, 5);
4516 	if (drop_on_err)
4517 		iput(inode);
4518 	btrfs_btree_balance_dirty(root, nr);
4519 	return err;
4520 }
4521 
4522 /* helper for btfs_get_extent.  Given an existing extent in the tree,
4523  * and an extent that you want to insert, deal with overlap and insert
4524  * the new extent into the tree.
4525  */
4526 static int merge_extent_mapping(struct extent_map_tree *em_tree,
4527 				struct extent_map *existing,
4528 				struct extent_map *em,
4529 				u64 map_start, u64 map_len)
4530 {
4531 	u64 start_diff;
4532 
4533 	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
4534 	start_diff = map_start - em->start;
4535 	em->start = map_start;
4536 	em->len = map_len;
4537 	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
4538 	    !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
4539 		em->block_start += start_diff;
4540 		em->block_len -= start_diff;
4541 	}
4542 	return add_extent_mapping(em_tree, em);
4543 }
4544 
4545 static noinline int uncompress_inline(struct btrfs_path *path,
4546 				      struct inode *inode, struct page *page,
4547 				      size_t pg_offset, u64 extent_offset,
4548 				      struct btrfs_file_extent_item *item)
4549 {
4550 	int ret;
4551 	struct extent_buffer *leaf = path->nodes[0];
4552 	char *tmp;
4553 	size_t max_size;
4554 	unsigned long inline_size;
4555 	unsigned long ptr;
4556 
4557 	WARN_ON(pg_offset != 0);
4558 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
4559 	inline_size = btrfs_file_extent_inline_item_len(leaf,
4560 					btrfs_item_nr(leaf, path->slots[0]));
4561 	tmp = kmalloc(inline_size, GFP_NOFS);
4562 	ptr = btrfs_file_extent_inline_start(item);
4563 
4564 	read_extent_buffer(leaf, tmp, ptr, inline_size);
4565 
4566 	max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
4567 	ret = btrfs_zlib_decompress(tmp, page, extent_offset,
4568 				    inline_size, max_size);
4569 	if (ret) {
4570 		char *kaddr = kmap_atomic(page, KM_USER0);
4571 		unsigned long copy_size = min_t(u64,
4572 				  PAGE_CACHE_SIZE - pg_offset,
4573 				  max_size - extent_offset);
4574 		memset(kaddr + pg_offset, 0, copy_size);
4575 		kunmap_atomic(kaddr, KM_USER0);
4576 	}
4577 	kfree(tmp);
4578 	return 0;
4579 }
4580 
4581 /*
4582  * a bit scary, this does extent mapping from logical file offset to the disk.
4583  * the ugly parts come from merging extents from the disk with the in-ram
4584  * representation.  This gets more complex because of the data=ordered code,
4585  * where the in-ram extents might be locked pending data=ordered completion.
4586  *
4587  * This also copies inline extents directly into the page.
4588  */
4589 
4590 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
4591 				    size_t pg_offset, u64 start, u64 len,
4592 				    int create)
4593 {
4594 	int ret;
4595 	int err = 0;
4596 	u64 bytenr;
4597 	u64 extent_start = 0;
4598 	u64 extent_end = 0;
4599 	u64 objectid = inode->i_ino;
4600 	u32 found_type;
4601 	struct btrfs_path *path = NULL;
4602 	struct btrfs_root *root = BTRFS_I(inode)->root;
4603 	struct btrfs_file_extent_item *item;
4604 	struct extent_buffer *leaf;
4605 	struct btrfs_key found_key;
4606 	struct extent_map *em = NULL;
4607 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4608 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4609 	struct btrfs_trans_handle *trans = NULL;
4610 	int compressed;
4611 
4612 again:
4613 	read_lock(&em_tree->lock);
4614 	em = lookup_extent_mapping(em_tree, start, len);
4615 	if (em)
4616 		em->bdev = root->fs_info->fs_devices->latest_bdev;
4617 	read_unlock(&em_tree->lock);
4618 
4619 	if (em) {
4620 		if (em->start > start || em->start + em->len <= start)
4621 			free_extent_map(em);
4622 		else if (em->block_start == EXTENT_MAP_INLINE && page)
4623 			free_extent_map(em);
4624 		else
4625 			goto out;
4626 	}
4627 	em = alloc_extent_map(GFP_NOFS);
4628 	if (!em) {
4629 		err = -ENOMEM;
4630 		goto out;
4631 	}
4632 	em->bdev = root->fs_info->fs_devices->latest_bdev;
4633 	em->start = EXTENT_MAP_HOLE;
4634 	em->orig_start = EXTENT_MAP_HOLE;
4635 	em->len = (u64)-1;
4636 	em->block_len = (u64)-1;
4637 
4638 	if (!path) {
4639 		path = btrfs_alloc_path();
4640 		BUG_ON(!path);
4641 	}
4642 
4643 	ret = btrfs_lookup_file_extent(trans, root, path,
4644 				       objectid, start, trans != NULL);
4645 	if (ret < 0) {
4646 		err = ret;
4647 		goto out;
4648 	}
4649 
4650 	if (ret != 0) {
4651 		if (path->slots[0] == 0)
4652 			goto not_found;
4653 		path->slots[0]--;
4654 	}
4655 
4656 	leaf = path->nodes[0];
4657 	item = btrfs_item_ptr(leaf, path->slots[0],
4658 			      struct btrfs_file_extent_item);
4659 	/* are we inside the extent that was found? */
4660 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4661 	found_type = btrfs_key_type(&found_key);
4662 	if (found_key.objectid != objectid ||
4663 	    found_type != BTRFS_EXTENT_DATA_KEY) {
4664 		goto not_found;
4665 	}
4666 
4667 	found_type = btrfs_file_extent_type(leaf, item);
4668 	extent_start = found_key.offset;
4669 	compressed = btrfs_file_extent_compression(leaf, item);
4670 	if (found_type == BTRFS_FILE_EXTENT_REG ||
4671 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4672 		extent_end = extent_start +
4673 		       btrfs_file_extent_num_bytes(leaf, item);
4674 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4675 		size_t size;
4676 		size = btrfs_file_extent_inline_len(leaf, item);
4677 		extent_end = (extent_start + size + root->sectorsize - 1) &
4678 			~((u64)root->sectorsize - 1);
4679 	}
4680 
4681 	if (start >= extent_end) {
4682 		path->slots[0]++;
4683 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
4684 			ret = btrfs_next_leaf(root, path);
4685 			if (ret < 0) {
4686 				err = ret;
4687 				goto out;
4688 			}
4689 			if (ret > 0)
4690 				goto not_found;
4691 			leaf = path->nodes[0];
4692 		}
4693 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4694 		if (found_key.objectid != objectid ||
4695 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
4696 			goto not_found;
4697 		if (start + len <= found_key.offset)
4698 			goto not_found;
4699 		em->start = start;
4700 		em->len = found_key.offset - start;
4701 		goto not_found_em;
4702 	}
4703 
4704 	if (found_type == BTRFS_FILE_EXTENT_REG ||
4705 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4706 		em->start = extent_start;
4707 		em->len = extent_end - extent_start;
4708 		em->orig_start = extent_start -
4709 				 btrfs_file_extent_offset(leaf, item);
4710 		bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
4711 		if (bytenr == 0) {
4712 			em->block_start = EXTENT_MAP_HOLE;
4713 			goto insert;
4714 		}
4715 		if (compressed) {
4716 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4717 			em->block_start = bytenr;
4718 			em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
4719 									 item);
4720 		} else {
4721 			bytenr += btrfs_file_extent_offset(leaf, item);
4722 			em->block_start = bytenr;
4723 			em->block_len = em->len;
4724 			if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
4725 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
4726 		}
4727 		goto insert;
4728 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4729 		unsigned long ptr;
4730 		char *map;
4731 		size_t size;
4732 		size_t extent_offset;
4733 		size_t copy_size;
4734 
4735 		em->block_start = EXTENT_MAP_INLINE;
4736 		if (!page || create) {
4737 			em->start = extent_start;
4738 			em->len = extent_end - extent_start;
4739 			goto out;
4740 		}
4741 
4742 		size = btrfs_file_extent_inline_len(leaf, item);
4743 		extent_offset = page_offset(page) + pg_offset - extent_start;
4744 		copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
4745 				size - extent_offset);
4746 		em->start = extent_start + extent_offset;
4747 		em->len = (copy_size + root->sectorsize - 1) &
4748 			~((u64)root->sectorsize - 1);
4749 		em->orig_start = EXTENT_MAP_INLINE;
4750 		if (compressed)
4751 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4752 		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
4753 		if (create == 0 && !PageUptodate(page)) {
4754 			if (btrfs_file_extent_compression(leaf, item) ==
4755 			    BTRFS_COMPRESS_ZLIB) {
4756 				ret = uncompress_inline(path, inode, page,
4757 							pg_offset,
4758 							extent_offset, item);
4759 				BUG_ON(ret);
4760 			} else {
4761 				map = kmap(page);
4762 				read_extent_buffer(leaf, map + pg_offset, ptr,
4763 						   copy_size);
4764 				if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
4765 					memset(map + pg_offset + copy_size, 0,
4766 					       PAGE_CACHE_SIZE - pg_offset -
4767 					       copy_size);
4768 				}
4769 				kunmap(page);
4770 			}
4771 			flush_dcache_page(page);
4772 		} else if (create && PageUptodate(page)) {
4773 			if (!trans) {
4774 				kunmap(page);
4775 				free_extent_map(em);
4776 				em = NULL;
4777 				btrfs_release_path(root, path);
4778 				trans = btrfs_join_transaction(root, 1);
4779 				goto again;
4780 			}
4781 			map = kmap(page);
4782 			write_extent_buffer(leaf, map + pg_offset, ptr,
4783 					    copy_size);
4784 			kunmap(page);
4785 			btrfs_mark_buffer_dirty(leaf);
4786 		}
4787 		set_extent_uptodate(io_tree, em->start,
4788 				    extent_map_end(em) - 1, GFP_NOFS);
4789 		goto insert;
4790 	} else {
4791 		printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
4792 		WARN_ON(1);
4793 	}
4794 not_found:
4795 	em->start = start;
4796 	em->len = len;
4797 not_found_em:
4798 	em->block_start = EXTENT_MAP_HOLE;
4799 	set_bit(EXTENT_FLAG_VACANCY, &em->flags);
4800 insert:
4801 	btrfs_release_path(root, path);
4802 	if (em->start > start || extent_map_end(em) <= start) {
4803 		printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
4804 		       "[%llu %llu]\n", (unsigned long long)em->start,
4805 		       (unsigned long long)em->len,
4806 		       (unsigned long long)start,
4807 		       (unsigned long long)len);
4808 		err = -EIO;
4809 		goto out;
4810 	}
4811 
4812 	err = 0;
4813 	write_lock(&em_tree->lock);
4814 	ret = add_extent_mapping(em_tree, em);
4815 	/* it is possible that someone inserted the extent into the tree
4816 	 * while we had the lock dropped.  It is also possible that
4817 	 * an overlapping map exists in the tree
4818 	 */
4819 	if (ret == -EEXIST) {
4820 		struct extent_map *existing;
4821 
4822 		ret = 0;
4823 
4824 		existing = lookup_extent_mapping(em_tree, start, len);
4825 		if (existing && (existing->start > start ||
4826 		    existing->start + existing->len <= start)) {
4827 			free_extent_map(existing);
4828 			existing = NULL;
4829 		}
4830 		if (!existing) {
4831 			existing = lookup_extent_mapping(em_tree, em->start,
4832 							 em->len);
4833 			if (existing) {
4834 				err = merge_extent_mapping(em_tree, existing,
4835 							   em, start,
4836 							   root->sectorsize);
4837 				free_extent_map(existing);
4838 				if (err) {
4839 					free_extent_map(em);
4840 					em = NULL;
4841 				}
4842 			} else {
4843 				err = -EIO;
4844 				free_extent_map(em);
4845 				em = NULL;
4846 			}
4847 		} else {
4848 			free_extent_map(em);
4849 			em = existing;
4850 			err = 0;
4851 		}
4852 	}
4853 	write_unlock(&em_tree->lock);
4854 out:
4855 	if (path)
4856 		btrfs_free_path(path);
4857 	if (trans) {
4858 		ret = btrfs_end_transaction(trans, root);
4859 		if (!err)
4860 			err = ret;
4861 	}
4862 	if (err) {
4863 		free_extent_map(em);
4864 		return ERR_PTR(err);
4865 	}
4866 	return em;
4867 }
4868 
4869 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4870 			const struct iovec *iov, loff_t offset,
4871 			unsigned long nr_segs)
4872 {
4873 	return -EINVAL;
4874 }
4875 
4876 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4877 		__u64 start, __u64 len)
4878 {
4879 	return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
4880 }
4881 
4882 int btrfs_readpage(struct file *file, struct page *page)
4883 {
4884 	struct extent_io_tree *tree;
4885 	tree = &BTRFS_I(page->mapping->host)->io_tree;
4886 	return extent_read_full_page(tree, page, btrfs_get_extent);
4887 }
4888 
4889 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
4890 {
4891 	struct extent_io_tree *tree;
4892 
4893 
4894 	if (current->flags & PF_MEMALLOC) {
4895 		redirty_page_for_writepage(wbc, page);
4896 		unlock_page(page);
4897 		return 0;
4898 	}
4899 	tree = &BTRFS_I(page->mapping->host)->io_tree;
4900 	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
4901 }
4902 
4903 int btrfs_writepages(struct address_space *mapping,
4904 		     struct writeback_control *wbc)
4905 {
4906 	struct extent_io_tree *tree;
4907 
4908 	tree = &BTRFS_I(mapping->host)->io_tree;
4909 	return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
4910 }
4911 
4912 static int
4913 btrfs_readpages(struct file *file, struct address_space *mapping,
4914 		struct list_head *pages, unsigned nr_pages)
4915 {
4916 	struct extent_io_tree *tree;
4917 	tree = &BTRFS_I(mapping->host)->io_tree;
4918 	return extent_readpages(tree, mapping, pages, nr_pages,
4919 				btrfs_get_extent);
4920 }
4921 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4922 {
4923 	struct extent_io_tree *tree;
4924 	struct extent_map_tree *map;
4925 	int ret;
4926 
4927 	tree = &BTRFS_I(page->mapping->host)->io_tree;
4928 	map = &BTRFS_I(page->mapping->host)->extent_tree;
4929 	ret = try_release_extent_mapping(map, tree, page, gfp_flags);
4930 	if (ret == 1) {
4931 		ClearPagePrivate(page);
4932 		set_page_private(page, 0);
4933 		page_cache_release(page);
4934 	}
4935 	return ret;
4936 }
4937 
4938 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4939 {
4940 	if (PageWriteback(page) || PageDirty(page))
4941 		return 0;
4942 	return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
4943 }
4944 
4945 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4946 {
4947 	struct extent_io_tree *tree;
4948 	struct btrfs_ordered_extent *ordered;
4949 	struct extent_state *cached_state = NULL;
4950 	u64 page_start = page_offset(page);
4951 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4952 
4953 
4954 	/*
4955 	 * we have the page locked, so new writeback can't start,
4956 	 * and the dirty bit won't be cleared while we are here.
4957 	 *
4958 	 * Wait for IO on this page so that we can safely clear
4959 	 * the PagePrivate2 bit and do ordered accounting
4960 	 */
4961 	wait_on_page_writeback(page);
4962 
4963 	tree = &BTRFS_I(page->mapping->host)->io_tree;
4964 	if (offset) {
4965 		btrfs_releasepage(page, GFP_NOFS);
4966 		return;
4967 	}
4968 	lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
4969 			 GFP_NOFS);
4970 	ordered = btrfs_lookup_ordered_extent(page->mapping->host,
4971 					   page_offset(page));
4972 	if (ordered) {
4973 		/*
4974 		 * IO on this page will never be started, so we need
4975 		 * to account for any ordered extents now
4976 		 */
4977 		clear_extent_bit(tree, page_start, page_end,
4978 				 EXTENT_DIRTY | EXTENT_DELALLOC |
4979 				 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
4980 				 &cached_state, GFP_NOFS);
4981 		/*
4982 		 * whoever cleared the private bit is responsible
4983 		 * for the finish_ordered_io
4984 		 */
4985 		if (TestClearPagePrivate2(page)) {
4986 			btrfs_finish_ordered_io(page->mapping->host,
4987 						page_start, page_end);
4988 		}
4989 		btrfs_put_ordered_extent(ordered);
4990 		cached_state = NULL;
4991 		lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
4992 				 GFP_NOFS);
4993 	}
4994 	clear_extent_bit(tree, page_start, page_end,
4995 		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4996 		 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS);
4997 	__btrfs_releasepage(page, GFP_NOFS);
4998 
4999 	ClearPageChecked(page);
5000 	if (PagePrivate(page)) {
5001 		ClearPagePrivate(page);
5002 		set_page_private(page, 0);
5003 		page_cache_release(page);
5004 	}
5005 }
5006 
5007 /*
5008  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
5009  * called from a page fault handler when a page is first dirtied. Hence we must
5010  * be careful to check for EOF conditions here. We set the page up correctly
5011  * for a written page which means we get ENOSPC checking when writing into
5012  * holes and correct delalloc and unwritten extent mapping on filesystems that
5013  * support these features.
5014  *
5015  * We are not allowed to take the i_mutex here so we have to play games to
5016  * protect against truncate races as the page could now be beyond EOF.  Because
5017  * vmtruncate() writes the inode size before removing pages, once we have the
5018  * page lock we can determine safely if the page is beyond EOF. If it is not
5019  * beyond EOF, then the page is guaranteed safe against truncation until we
5020  * unlock the page.
5021  */
5022 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5023 {
5024 	struct page *page = vmf->page;
5025 	struct inode *inode = fdentry(vma->vm_file)->d_inode;
5026 	struct btrfs_root *root = BTRFS_I(inode)->root;
5027 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5028 	struct btrfs_ordered_extent *ordered;
5029 	struct extent_state *cached_state = NULL;
5030 	char *kaddr;
5031 	unsigned long zero_start;
5032 	loff_t size;
5033 	int ret;
5034 	u64 page_start;
5035 	u64 page_end;
5036 
5037 	ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
5038 	if (ret) {
5039 		if (ret == -ENOMEM)
5040 			ret = VM_FAULT_OOM;
5041 		else /* -ENOSPC, -EIO, etc */
5042 			ret = VM_FAULT_SIGBUS;
5043 		goto out;
5044 	}
5045 
5046 	ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
5047 	if (ret) {
5048 		btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5049 		ret = VM_FAULT_SIGBUS;
5050 		goto out;
5051 	}
5052 
5053 	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
5054 again:
5055 	lock_page(page);
5056 	size = i_size_read(inode);
5057 	page_start = page_offset(page);
5058 	page_end = page_start + PAGE_CACHE_SIZE - 1;
5059 
5060 	if ((page->mapping != inode->i_mapping) ||
5061 	    (page_start >= size)) {
5062 		btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5063 		/* page got truncated out from underneath us */
5064 		goto out_unlock;
5065 	}
5066 	wait_on_page_writeback(page);
5067 
5068 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
5069 			 GFP_NOFS);
5070 	set_page_extent_mapped(page);
5071 
5072 	/*
5073 	 * we can't set the delalloc bits if there are pending ordered
5074 	 * extents.  Drop our locks and wait for them to finish
5075 	 */
5076 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
5077 	if (ordered) {
5078 		unlock_extent_cached(io_tree, page_start, page_end,
5079 				     &cached_state, GFP_NOFS);
5080 		unlock_page(page);
5081 		btrfs_start_ordered_extent(inode, ordered, 1);
5082 		btrfs_put_ordered_extent(ordered);
5083 		goto again;
5084 	}
5085 
5086 	/*
5087 	 * XXX - page_mkwrite gets called every time the page is dirtied, even
5088 	 * if it was already dirty, so for space accounting reasons we need to
5089 	 * clear any delalloc bits for the range we are fixing to save.  There
5090 	 * is probably a better way to do this, but for now keep consistent with
5091 	 * prepare_pages in the normal write path.
5092 	 */
5093 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
5094 			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
5095 			  0, 0, &cached_state, GFP_NOFS);
5096 
5097 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
5098 					&cached_state);
5099 	if (ret) {
5100 		unlock_extent_cached(io_tree, page_start, page_end,
5101 				     &cached_state, GFP_NOFS);
5102 		ret = VM_FAULT_SIGBUS;
5103 		btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5104 		goto out_unlock;
5105 	}
5106 	ret = 0;
5107 
5108 	/* page is wholly or partially inside EOF */
5109 	if (page_start + PAGE_CACHE_SIZE > size)
5110 		zero_start = size & ~PAGE_CACHE_MASK;
5111 	else
5112 		zero_start = PAGE_CACHE_SIZE;
5113 
5114 	if (zero_start != PAGE_CACHE_SIZE) {
5115 		kaddr = kmap(page);
5116 		memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
5117 		flush_dcache_page(page);
5118 		kunmap(page);
5119 	}
5120 	ClearPageChecked(page);
5121 	set_page_dirty(page);
5122 	SetPageUptodate(page);
5123 
5124 	BTRFS_I(inode)->last_trans = root->fs_info->generation;
5125 	BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
5126 
5127 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
5128 
5129 out_unlock:
5130 	btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
5131 	if (!ret)
5132 		return VM_FAULT_LOCKED;
5133 	unlock_page(page);
5134 out:
5135 	return ret;
5136 }
5137 
5138 static void btrfs_truncate(struct inode *inode)
5139 {
5140 	struct btrfs_root *root = BTRFS_I(inode)->root;
5141 	int ret;
5142 	struct btrfs_trans_handle *trans;
5143 	unsigned long nr;
5144 	u64 mask = root->sectorsize - 1;
5145 
5146 	if (!S_ISREG(inode->i_mode)) {
5147 		WARN_ON(1);
5148 		return;
5149 	}
5150 
5151 	ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
5152 	if (ret)
5153 		return;
5154 
5155 	btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
5156 	btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
5157 
5158 	trans = btrfs_start_transaction(root, 1);
5159 	btrfs_set_trans_block_group(trans, inode);
5160 
5161 	/*
5162 	 * setattr is responsible for setting the ordered_data_close flag,
5163 	 * but that is only tested during the last file release.  That
5164 	 * could happen well after the next commit, leaving a great big
5165 	 * window where new writes may get lost if someone chooses to write
5166 	 * to this file after truncating to zero
5167 	 *
5168 	 * The inode doesn't have any dirty data here, and so if we commit
5169 	 * this is a noop.  If someone immediately starts writing to the inode
5170 	 * it is very likely we'll catch some of their writes in this
5171 	 * transaction, and the commit will find this file on the ordered
5172 	 * data list with good things to send down.
5173 	 *
5174 	 * This is a best effort solution, there is still a window where
5175 	 * using truncate to replace the contents of the file will
5176 	 * end up with a zero length file after a crash.
5177 	 */
5178 	if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
5179 		btrfs_add_ordered_operation(trans, root, inode);
5180 
5181 	while (1) {
5182 		ret = btrfs_truncate_inode_items(trans, root, inode,
5183 						 inode->i_size,
5184 						 BTRFS_EXTENT_DATA_KEY);
5185 		if (ret != -EAGAIN)
5186 			break;
5187 
5188 		ret = btrfs_update_inode(trans, root, inode);
5189 		BUG_ON(ret);
5190 
5191 		nr = trans->blocks_used;
5192 		btrfs_end_transaction(trans, root);
5193 		btrfs_btree_balance_dirty(root, nr);
5194 
5195 		trans = btrfs_start_transaction(root, 1);
5196 		btrfs_set_trans_block_group(trans, inode);
5197 	}
5198 
5199 	if (ret == 0 && inode->i_nlink > 0) {
5200 		ret = btrfs_orphan_del(trans, inode);
5201 		BUG_ON(ret);
5202 	}
5203 
5204 	ret = btrfs_update_inode(trans, root, inode);
5205 	BUG_ON(ret);
5206 
5207 	nr = trans->blocks_used;
5208 	ret = btrfs_end_transaction_throttle(trans, root);
5209 	BUG_ON(ret);
5210 	btrfs_btree_balance_dirty(root, nr);
5211 }
5212 
5213 /*
5214  * create a new subvolume directory/inode (helper for the ioctl).
5215  */
5216 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
5217 			     struct btrfs_root *new_root,
5218 			     u64 new_dirid, u64 alloc_hint)
5219 {
5220 	struct inode *inode;
5221 	int err;
5222 	u64 index = 0;
5223 
5224 	inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
5225 				new_dirid, alloc_hint, S_IFDIR | 0700, &index);
5226 	if (IS_ERR(inode))
5227 		return PTR_ERR(inode);
5228 	inode->i_op = &btrfs_dir_inode_operations;
5229 	inode->i_fop = &btrfs_dir_file_operations;
5230 
5231 	inode->i_nlink = 1;
5232 	btrfs_i_size_write(inode, 0);
5233 
5234 	err = btrfs_update_inode(trans, new_root, inode);
5235 	BUG_ON(err);
5236 
5237 	iput(inode);
5238 	return 0;
5239 }
5240 
5241 /* helper function for file defrag and space balancing.  This
5242  * forces readahead on a given range of bytes in an inode
5243  */
5244 unsigned long btrfs_force_ra(struct address_space *mapping,
5245 			      struct file_ra_state *ra, struct file *file,
5246 			      pgoff_t offset, pgoff_t last_index)
5247 {
5248 	pgoff_t req_size = last_index - offset + 1;
5249 
5250 	page_cache_sync_readahead(mapping, ra, file, offset, req_size);
5251 	return offset + req_size;
5252 }
5253 
5254 struct inode *btrfs_alloc_inode(struct super_block *sb)
5255 {
5256 	struct btrfs_inode *ei;
5257 
5258 	ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
5259 	if (!ei)
5260 		return NULL;
5261 	ei->last_trans = 0;
5262 	ei->last_sub_trans = 0;
5263 	ei->logged_trans = 0;
5264 	ei->outstanding_extents = 0;
5265 	ei->reserved_extents = 0;
5266 	ei->root = NULL;
5267 	spin_lock_init(&ei->accounting_lock);
5268 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
5269 	INIT_LIST_HEAD(&ei->i_orphan);
5270 	INIT_LIST_HEAD(&ei->ordered_operations);
5271 	return &ei->vfs_inode;
5272 }
5273 
5274 void btrfs_destroy_inode(struct inode *inode)
5275 {
5276 	struct btrfs_ordered_extent *ordered;
5277 	struct btrfs_root *root = BTRFS_I(inode)->root;
5278 
5279 	WARN_ON(!list_empty(&inode->i_dentry));
5280 	WARN_ON(inode->i_data.nrpages);
5281 
5282 	/*
5283 	 * This can happen where we create an inode, but somebody else also
5284 	 * created the same inode and we need to destroy the one we already
5285 	 * created.
5286 	 */
5287 	if (!root)
5288 		goto free;
5289 
5290 	/*
5291 	 * Make sure we're properly removed from the ordered operation
5292 	 * lists.
5293 	 */
5294 	smp_mb();
5295 	if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
5296 		spin_lock(&root->fs_info->ordered_extent_lock);
5297 		list_del_init(&BTRFS_I(inode)->ordered_operations);
5298 		spin_unlock(&root->fs_info->ordered_extent_lock);
5299 	}
5300 
5301 	spin_lock(&root->list_lock);
5302 	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
5303 		printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n",
5304 		       inode->i_ino);
5305 		list_del_init(&BTRFS_I(inode)->i_orphan);
5306 	}
5307 	spin_unlock(&root->list_lock);
5308 
5309 	while (1) {
5310 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
5311 		if (!ordered)
5312 			break;
5313 		else {
5314 			printk(KERN_ERR "btrfs found ordered "
5315 			       "extent %llu %llu on inode cleanup\n",
5316 			       (unsigned long long)ordered->file_offset,
5317 			       (unsigned long long)ordered->len);
5318 			btrfs_remove_ordered_extent(inode, ordered);
5319 			btrfs_put_ordered_extent(ordered);
5320 			btrfs_put_ordered_extent(ordered);
5321 		}
5322 	}
5323 	inode_tree_del(inode);
5324 	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
5325 free:
5326 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
5327 }
5328 
5329 void btrfs_drop_inode(struct inode *inode)
5330 {
5331 	struct btrfs_root *root = BTRFS_I(inode)->root;
5332 	if (inode->i_nlink > 0 && btrfs_root_refs(&root->root_item) == 0)
5333 		generic_delete_inode(inode);
5334 	else
5335 		generic_drop_inode(inode);
5336 }
5337 
5338 static void init_once(void *foo)
5339 {
5340 	struct btrfs_inode *ei = (struct btrfs_inode *) foo;
5341 
5342 	inode_init_once(&ei->vfs_inode);
5343 }
5344 
5345 void btrfs_destroy_cachep(void)
5346 {
5347 	if (btrfs_inode_cachep)
5348 		kmem_cache_destroy(btrfs_inode_cachep);
5349 	if (btrfs_trans_handle_cachep)
5350 		kmem_cache_destroy(btrfs_trans_handle_cachep);
5351 	if (btrfs_transaction_cachep)
5352 		kmem_cache_destroy(btrfs_transaction_cachep);
5353 	if (btrfs_path_cachep)
5354 		kmem_cache_destroy(btrfs_path_cachep);
5355 }
5356 
5357 int btrfs_init_cachep(void)
5358 {
5359 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
5360 			sizeof(struct btrfs_inode), 0,
5361 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
5362 	if (!btrfs_inode_cachep)
5363 		goto fail;
5364 
5365 	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
5366 			sizeof(struct btrfs_trans_handle), 0,
5367 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5368 	if (!btrfs_trans_handle_cachep)
5369 		goto fail;
5370 
5371 	btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
5372 			sizeof(struct btrfs_transaction), 0,
5373 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5374 	if (!btrfs_transaction_cachep)
5375 		goto fail;
5376 
5377 	btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
5378 			sizeof(struct btrfs_path), 0,
5379 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5380 	if (!btrfs_path_cachep)
5381 		goto fail;
5382 
5383 	return 0;
5384 fail:
5385 	btrfs_destroy_cachep();
5386 	return -ENOMEM;
5387 }
5388 
5389 static int btrfs_getattr(struct vfsmount *mnt,
5390 			 struct dentry *dentry, struct kstat *stat)
5391 {
5392 	struct inode *inode = dentry->d_inode;
5393 	generic_fillattr(inode, stat);
5394 	stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
5395 	stat->blksize = PAGE_CACHE_SIZE;
5396 	stat->blocks = (inode_get_bytes(inode) +
5397 			BTRFS_I(inode)->delalloc_bytes) >> 9;
5398 	return 0;
5399 }
5400 
5401 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
5402 			   struct inode *new_dir, struct dentry *new_dentry)
5403 {
5404 	struct btrfs_trans_handle *trans;
5405 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
5406 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
5407 	struct inode *new_inode = new_dentry->d_inode;
5408 	struct inode *old_inode = old_dentry->d_inode;
5409 	struct timespec ctime = CURRENT_TIME;
5410 	u64 index = 0;
5411 	u64 root_objectid;
5412 	int ret;
5413 
5414 	if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5415 		return -EPERM;
5416 
5417 	/* we only allow rename subvolume link between subvolumes */
5418 	if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
5419 		return -EXDEV;
5420 
5421 	if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
5422 	    (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID))
5423 		return -ENOTEMPTY;
5424 
5425 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
5426 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
5427 		return -ENOTEMPTY;
5428 
5429 	/*
5430 	 * We want to reserve the absolute worst case amount of items.  So if
5431 	 * both inodes are subvols and we need to unlink them then that would
5432 	 * require 4 item modifications, but if they are both normal inodes it
5433 	 * would require 5 item modifications, so we'll assume their normal
5434 	 * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
5435 	 * should cover the worst case number of items we'll modify.
5436 	 */
5437 	ret = btrfs_reserve_metadata_space(root, 11);
5438 	if (ret)
5439 		return ret;
5440 
5441 	/*
5442 	 * we're using rename to replace one file with another.
5443 	 * and the replacement file is large.  Start IO on it now so
5444 	 * we don't add too much work to the end of the transaction
5445 	 */
5446 	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
5447 	    old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
5448 		filemap_flush(old_inode->i_mapping);
5449 
5450 	/* close the racy window with snapshot create/destroy ioctl */
5451 	if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5452 		down_read(&root->fs_info->subvol_sem);
5453 
5454 	trans = btrfs_start_transaction(root, 1);
5455 	btrfs_set_trans_block_group(trans, new_dir);
5456 
5457 	if (dest != root)
5458 		btrfs_record_root_in_trans(trans, dest);
5459 
5460 	ret = btrfs_set_inode_index(new_dir, &index);
5461 	if (ret)
5462 		goto out_fail;
5463 
5464 	if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
5465 		/* force full log commit if subvolume involved. */
5466 		root->fs_info->last_trans_log_full_commit = trans->transid;
5467 	} else {
5468 		ret = btrfs_insert_inode_ref(trans, dest,
5469 					     new_dentry->d_name.name,
5470 					     new_dentry->d_name.len,
5471 					     old_inode->i_ino,
5472 					     new_dir->i_ino, index);
5473 		if (ret)
5474 			goto out_fail;
5475 		/*
5476 		 * this is an ugly little race, but the rename is required
5477 		 * to make sure that if we crash, the inode is either at the
5478 		 * old name or the new one.  pinning the log transaction lets
5479 		 * us make sure we don't allow a log commit to come in after
5480 		 * we unlink the name but before we add the new name back in.
5481 		 */
5482 		btrfs_pin_log_trans(root);
5483 	}
5484 	/*
5485 	 * make sure the inode gets flushed if it is replacing
5486 	 * something.
5487 	 */
5488 	if (new_inode && new_inode->i_size &&
5489 	    old_inode && S_ISREG(old_inode->i_mode)) {
5490 		btrfs_add_ordered_operation(trans, root, old_inode);
5491 	}
5492 
5493 	old_dir->i_ctime = old_dir->i_mtime = ctime;
5494 	new_dir->i_ctime = new_dir->i_mtime = ctime;
5495 	old_inode->i_ctime = ctime;
5496 
5497 	if (old_dentry->d_parent != new_dentry->d_parent)
5498 		btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
5499 
5500 	if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
5501 		root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
5502 		ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
5503 					old_dentry->d_name.name,
5504 					old_dentry->d_name.len);
5505 	} else {
5506 		btrfs_inc_nlink(old_dentry->d_inode);
5507 		ret = btrfs_unlink_inode(trans, root, old_dir,
5508 					 old_dentry->d_inode,
5509 					 old_dentry->d_name.name,
5510 					 old_dentry->d_name.len);
5511 	}
5512 	BUG_ON(ret);
5513 
5514 	if (new_inode) {
5515 		new_inode->i_ctime = CURRENT_TIME;
5516 		if (unlikely(new_inode->i_ino ==
5517 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
5518 			root_objectid = BTRFS_I(new_inode)->location.objectid;
5519 			ret = btrfs_unlink_subvol(trans, dest, new_dir,
5520 						root_objectid,
5521 						new_dentry->d_name.name,
5522 						new_dentry->d_name.len);
5523 			BUG_ON(new_inode->i_nlink == 0);
5524 		} else {
5525 			ret = btrfs_unlink_inode(trans, dest, new_dir,
5526 						 new_dentry->d_inode,
5527 						 new_dentry->d_name.name,
5528 						 new_dentry->d_name.len);
5529 		}
5530 		BUG_ON(ret);
5531 		if (new_inode->i_nlink == 0) {
5532 			ret = btrfs_orphan_add(trans, new_dentry->d_inode);
5533 			BUG_ON(ret);
5534 		}
5535 	}
5536 
5537 	ret = btrfs_add_link(trans, new_dir, old_inode,
5538 			     new_dentry->d_name.name,
5539 			     new_dentry->d_name.len, 0, index);
5540 	BUG_ON(ret);
5541 
5542 	if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
5543 		btrfs_log_new_name(trans, old_inode, old_dir,
5544 				   new_dentry->d_parent);
5545 		btrfs_end_log_trans(root);
5546 	}
5547 out_fail:
5548 	btrfs_end_transaction_throttle(trans, root);
5549 
5550 	if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5551 		up_read(&root->fs_info->subvol_sem);
5552 
5553 	btrfs_unreserve_metadata_space(root, 11);
5554 	return ret;
5555 }
5556 
5557 /*
5558  * some fairly slow code that needs optimization. This walks the list
5559  * of all the inodes with pending delalloc and forces them to disk.
5560  */
5561 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
5562 {
5563 	struct list_head *head = &root->fs_info->delalloc_inodes;
5564 	struct btrfs_inode *binode;
5565 	struct inode *inode;
5566 
5567 	if (root->fs_info->sb->s_flags & MS_RDONLY)
5568 		return -EROFS;
5569 
5570 	spin_lock(&root->fs_info->delalloc_lock);
5571 	while (!list_empty(head)) {
5572 		binode = list_entry(head->next, struct btrfs_inode,
5573 				    delalloc_inodes);
5574 		inode = igrab(&binode->vfs_inode);
5575 		if (!inode)
5576 			list_del_init(&binode->delalloc_inodes);
5577 		spin_unlock(&root->fs_info->delalloc_lock);
5578 		if (inode) {
5579 			filemap_flush(inode->i_mapping);
5580 			if (delay_iput)
5581 				btrfs_add_delayed_iput(inode);
5582 			else
5583 				iput(inode);
5584 		}
5585 		cond_resched();
5586 		spin_lock(&root->fs_info->delalloc_lock);
5587 	}
5588 	spin_unlock(&root->fs_info->delalloc_lock);
5589 
5590 	/* the filemap_flush will queue IO into the worker threads, but
5591 	 * we have to make sure the IO is actually started and that
5592 	 * ordered extents get created before we return
5593 	 */
5594 	atomic_inc(&root->fs_info->async_submit_draining);
5595 	while (atomic_read(&root->fs_info->nr_async_submits) ||
5596 	      atomic_read(&root->fs_info->async_delalloc_pages)) {
5597 		wait_event(root->fs_info->async_submit_wait,
5598 		   (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
5599 		    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
5600 	}
5601 	atomic_dec(&root->fs_info->async_submit_draining);
5602 	return 0;
5603 }
5604 
5605 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
5606 			 const char *symname)
5607 {
5608 	struct btrfs_trans_handle *trans;
5609 	struct btrfs_root *root = BTRFS_I(dir)->root;
5610 	struct btrfs_path *path;
5611 	struct btrfs_key key;
5612 	struct inode *inode = NULL;
5613 	int err;
5614 	int drop_inode = 0;
5615 	u64 objectid;
5616 	u64 index = 0 ;
5617 	int name_len;
5618 	int datasize;
5619 	unsigned long ptr;
5620 	struct btrfs_file_extent_item *ei;
5621 	struct extent_buffer *leaf;
5622 	unsigned long nr = 0;
5623 
5624 	name_len = strlen(symname) + 1;
5625 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
5626 		return -ENAMETOOLONG;
5627 
5628 	/*
5629 	 * 2 items for inode item and ref
5630 	 * 2 items for dir items
5631 	 * 1 item for xattr if selinux is on
5632 	 */
5633 	err = btrfs_reserve_metadata_space(root, 5);
5634 	if (err)
5635 		return err;
5636 
5637 	trans = btrfs_start_transaction(root, 1);
5638 	if (!trans)
5639 		goto out_fail;
5640 	btrfs_set_trans_block_group(trans, dir);
5641 
5642 	err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
5643 	if (err) {
5644 		err = -ENOSPC;
5645 		goto out_unlock;
5646 	}
5647 
5648 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5649 				dentry->d_name.len,
5650 				dentry->d_parent->d_inode->i_ino, objectid,
5651 				BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
5652 				&index);
5653 	err = PTR_ERR(inode);
5654 	if (IS_ERR(inode))
5655 		goto out_unlock;
5656 
5657 	err = btrfs_init_inode_security(trans, inode, dir);
5658 	if (err) {
5659 		drop_inode = 1;
5660 		goto out_unlock;
5661 	}
5662 
5663 	btrfs_set_trans_block_group(trans, inode);
5664 	err = btrfs_add_nondir(trans, dentry, inode, 0, index);
5665 	if (err)
5666 		drop_inode = 1;
5667 	else {
5668 		inode->i_mapping->a_ops = &btrfs_aops;
5669 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5670 		inode->i_fop = &btrfs_file_operations;
5671 		inode->i_op = &btrfs_file_inode_operations;
5672 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
5673 	}
5674 	btrfs_update_inode_block_group(trans, inode);
5675 	btrfs_update_inode_block_group(trans, dir);
5676 	if (drop_inode)
5677 		goto out_unlock;
5678 
5679 	path = btrfs_alloc_path();
5680 	BUG_ON(!path);
5681 	key.objectid = inode->i_ino;
5682 	key.offset = 0;
5683 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
5684 	datasize = btrfs_file_extent_calc_inline_size(name_len);
5685 	err = btrfs_insert_empty_item(trans, root, path, &key,
5686 				      datasize);
5687 	if (err) {
5688 		drop_inode = 1;
5689 		goto out_unlock;
5690 	}
5691 	leaf = path->nodes[0];
5692 	ei = btrfs_item_ptr(leaf, path->slots[0],
5693 			    struct btrfs_file_extent_item);
5694 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
5695 	btrfs_set_file_extent_type(leaf, ei,
5696 				   BTRFS_FILE_EXTENT_INLINE);
5697 	btrfs_set_file_extent_encryption(leaf, ei, 0);
5698 	btrfs_set_file_extent_compression(leaf, ei, 0);
5699 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
5700 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
5701 
5702 	ptr = btrfs_file_extent_inline_start(ei);
5703 	write_extent_buffer(leaf, symname, ptr, name_len);
5704 	btrfs_mark_buffer_dirty(leaf);
5705 	btrfs_free_path(path);
5706 
5707 	inode->i_op = &btrfs_symlink_inode_operations;
5708 	inode->i_mapping->a_ops = &btrfs_symlink_aops;
5709 	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5710 	inode_set_bytes(inode, name_len);
5711 	btrfs_i_size_write(inode, name_len - 1);
5712 	err = btrfs_update_inode(trans, root, inode);
5713 	if (err)
5714 		drop_inode = 1;
5715 
5716 out_unlock:
5717 	nr = trans->blocks_used;
5718 	btrfs_end_transaction_throttle(trans, root);
5719 out_fail:
5720 	btrfs_unreserve_metadata_space(root, 5);
5721 	if (drop_inode) {
5722 		inode_dec_link_count(inode);
5723 		iput(inode);
5724 	}
5725 	btrfs_btree_balance_dirty(root, nr);
5726 	return err;
5727 }
5728 
5729 static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
5730 			u64 alloc_hint, int mode, loff_t actual_len)
5731 {
5732 	struct btrfs_trans_handle *trans;
5733 	struct btrfs_root *root = BTRFS_I(inode)->root;
5734 	struct btrfs_key ins;
5735 	u64 cur_offset = start;
5736 	u64 num_bytes = end - start;
5737 	int ret = 0;
5738 	u64 i_size;
5739 
5740 	while (num_bytes > 0) {
5741 		trans = btrfs_start_transaction(root, 1);
5742 
5743 		ret = btrfs_reserve_extent(trans, root, num_bytes,
5744 					   root->sectorsize, 0, alloc_hint,
5745 					   (u64)-1, &ins, 1);
5746 		if (ret) {
5747 			WARN_ON(1);
5748 			goto stop_trans;
5749 		}
5750 
5751 		ret = btrfs_reserve_metadata_space(root, 3);
5752 		if (ret) {
5753 			btrfs_free_reserved_extent(root, ins.objectid,
5754 						   ins.offset);
5755 			goto stop_trans;
5756 		}
5757 
5758 		ret = insert_reserved_file_extent(trans, inode,
5759 						  cur_offset, ins.objectid,
5760 						  ins.offset, ins.offset,
5761 						  ins.offset, 0, 0, 0,
5762 						  BTRFS_FILE_EXTENT_PREALLOC);
5763 		BUG_ON(ret);
5764 		btrfs_drop_extent_cache(inode, cur_offset,
5765 					cur_offset + ins.offset -1, 0);
5766 
5767 		num_bytes -= ins.offset;
5768 		cur_offset += ins.offset;
5769 		alloc_hint = ins.objectid + ins.offset;
5770 
5771 		inode->i_ctime = CURRENT_TIME;
5772 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
5773 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
5774 			(actual_len > inode->i_size) &&
5775 			(cur_offset > inode->i_size)) {
5776 
5777 			if (cur_offset > actual_len)
5778 				i_size  = actual_len;
5779 			else
5780 				i_size = cur_offset;
5781 			i_size_write(inode, i_size);
5782 			btrfs_ordered_update_i_size(inode, i_size, NULL);
5783 		}
5784 
5785 		ret = btrfs_update_inode(trans, root, inode);
5786 		BUG_ON(ret);
5787 
5788 		btrfs_end_transaction(trans, root);
5789 		btrfs_unreserve_metadata_space(root, 3);
5790 	}
5791 	return ret;
5792 
5793 stop_trans:
5794 	btrfs_end_transaction(trans, root);
5795 	return ret;
5796 
5797 }
5798 
5799 static long btrfs_fallocate(struct inode *inode, int mode,
5800 			    loff_t offset, loff_t len)
5801 {
5802 	struct extent_state *cached_state = NULL;
5803 	u64 cur_offset;
5804 	u64 last_byte;
5805 	u64 alloc_start;
5806 	u64 alloc_end;
5807 	u64 alloc_hint = 0;
5808 	u64 locked_end;
5809 	u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
5810 	struct extent_map *em;
5811 	int ret;
5812 
5813 	alloc_start = offset & ~mask;
5814 	alloc_end =  (offset + len + mask) & ~mask;
5815 
5816 	/*
5817 	 * wait for ordered IO before we have any locks.  We'll loop again
5818 	 * below with the locks held.
5819 	 */
5820 	btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
5821 
5822 	mutex_lock(&inode->i_mutex);
5823 	if (alloc_start > inode->i_size) {
5824 		ret = btrfs_cont_expand(inode, alloc_start);
5825 		if (ret)
5826 			goto out;
5827 	}
5828 
5829 	ret = btrfs_check_data_free_space(BTRFS_I(inode)->root, inode,
5830 					  alloc_end - alloc_start);
5831 	if (ret)
5832 		goto out;
5833 
5834 	locked_end = alloc_end - 1;
5835 	while (1) {
5836 		struct btrfs_ordered_extent *ordered;
5837 
5838 		/* the extent lock is ordered inside the running
5839 		 * transaction
5840 		 */
5841 		lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
5842 				 locked_end, 0, &cached_state, GFP_NOFS);
5843 		ordered = btrfs_lookup_first_ordered_extent(inode,
5844 							    alloc_end - 1);
5845 		if (ordered &&
5846 		    ordered->file_offset + ordered->len > alloc_start &&
5847 		    ordered->file_offset < alloc_end) {
5848 			btrfs_put_ordered_extent(ordered);
5849 			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
5850 					     alloc_start, locked_end,
5851 					     &cached_state, GFP_NOFS);
5852 			/*
5853 			 * we can't wait on the range with the transaction
5854 			 * running or with the extent lock held
5855 			 */
5856 			btrfs_wait_ordered_range(inode, alloc_start,
5857 						 alloc_end - alloc_start);
5858 		} else {
5859 			if (ordered)
5860 				btrfs_put_ordered_extent(ordered);
5861 			break;
5862 		}
5863 	}
5864 
5865 	cur_offset = alloc_start;
5866 	while (1) {
5867 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
5868 				      alloc_end - cur_offset, 0);
5869 		BUG_ON(IS_ERR(em) || !em);
5870 		last_byte = min(extent_map_end(em), alloc_end);
5871 		last_byte = (last_byte + mask) & ~mask;
5872 		if (em->block_start == EXTENT_MAP_HOLE ||
5873 		    (cur_offset >= inode->i_size &&
5874 		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
5875 			ret = prealloc_file_range(inode,
5876 						  cur_offset, last_byte,
5877 						alloc_hint, mode, offset+len);
5878 			if (ret < 0) {
5879 				free_extent_map(em);
5880 				break;
5881 			}
5882 		}
5883 		if (em->block_start <= EXTENT_MAP_LAST_BYTE)
5884 			alloc_hint = em->block_start;
5885 		free_extent_map(em);
5886 
5887 		cur_offset = last_byte;
5888 		if (cur_offset >= alloc_end) {
5889 			ret = 0;
5890 			break;
5891 		}
5892 	}
5893 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5894 			     &cached_state, GFP_NOFS);
5895 
5896 	btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode,
5897 				       alloc_end - alloc_start);
5898 out:
5899 	mutex_unlock(&inode->i_mutex);
5900 	return ret;
5901 }
5902 
5903 static int btrfs_set_page_dirty(struct page *page)
5904 {
5905 	return __set_page_dirty_nobuffers(page);
5906 }
5907 
5908 static int btrfs_permission(struct inode *inode, int mask)
5909 {
5910 	if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE))
5911 		return -EACCES;
5912 	return generic_permission(inode, mask, btrfs_check_acl);
5913 }
5914 
5915 static const struct inode_operations btrfs_dir_inode_operations = {
5916 	.getattr	= btrfs_getattr,
5917 	.lookup		= btrfs_lookup,
5918 	.create		= btrfs_create,
5919 	.unlink		= btrfs_unlink,
5920 	.link		= btrfs_link,
5921 	.mkdir		= btrfs_mkdir,
5922 	.rmdir		= btrfs_rmdir,
5923 	.rename		= btrfs_rename,
5924 	.symlink	= btrfs_symlink,
5925 	.setattr	= btrfs_setattr,
5926 	.mknod		= btrfs_mknod,
5927 	.setxattr	= btrfs_setxattr,
5928 	.getxattr	= btrfs_getxattr,
5929 	.listxattr	= btrfs_listxattr,
5930 	.removexattr	= btrfs_removexattr,
5931 	.permission	= btrfs_permission,
5932 };
5933 static const struct inode_operations btrfs_dir_ro_inode_operations = {
5934 	.lookup		= btrfs_lookup,
5935 	.permission	= btrfs_permission,
5936 };
5937 
5938 static const struct file_operations btrfs_dir_file_operations = {
5939 	.llseek		= generic_file_llseek,
5940 	.read		= generic_read_dir,
5941 	.readdir	= btrfs_real_readdir,
5942 	.unlocked_ioctl	= btrfs_ioctl,
5943 #ifdef CONFIG_COMPAT
5944 	.compat_ioctl	= btrfs_ioctl,
5945 #endif
5946 	.release        = btrfs_release_file,
5947 	.fsync		= btrfs_sync_file,
5948 };
5949 
5950 static struct extent_io_ops btrfs_extent_io_ops = {
5951 	.fill_delalloc = run_delalloc_range,
5952 	.submit_bio_hook = btrfs_submit_bio_hook,
5953 	.merge_bio_hook = btrfs_merge_bio_hook,
5954 	.readpage_end_io_hook = btrfs_readpage_end_io_hook,
5955 	.writepage_end_io_hook = btrfs_writepage_end_io_hook,
5956 	.writepage_start_hook = btrfs_writepage_start_hook,
5957 	.readpage_io_failed_hook = btrfs_io_failed_hook,
5958 	.set_bit_hook = btrfs_set_bit_hook,
5959 	.clear_bit_hook = btrfs_clear_bit_hook,
5960 	.merge_extent_hook = btrfs_merge_extent_hook,
5961 	.split_extent_hook = btrfs_split_extent_hook,
5962 };
5963 
5964 /*
5965  * btrfs doesn't support the bmap operation because swapfiles
5966  * use bmap to make a mapping of extents in the file.  They assume
5967  * these extents won't change over the life of the file and they
5968  * use the bmap result to do IO directly to the drive.
5969  *
5970  * the btrfs bmap call would return logical addresses that aren't
5971  * suitable for IO and they also will change frequently as COW
5972  * operations happen.  So, swapfile + btrfs == corruption.
5973  *
5974  * For now we're avoiding this by dropping bmap.
5975  */
5976 static const struct address_space_operations btrfs_aops = {
5977 	.readpage	= btrfs_readpage,
5978 	.writepage	= btrfs_writepage,
5979 	.writepages	= btrfs_writepages,
5980 	.readpages	= btrfs_readpages,
5981 	.sync_page	= block_sync_page,
5982 	.direct_IO	= btrfs_direct_IO,
5983 	.invalidatepage = btrfs_invalidatepage,
5984 	.releasepage	= btrfs_releasepage,
5985 	.set_page_dirty	= btrfs_set_page_dirty,
5986 	.error_remove_page = generic_error_remove_page,
5987 };
5988 
5989 static const struct address_space_operations btrfs_symlink_aops = {
5990 	.readpage	= btrfs_readpage,
5991 	.writepage	= btrfs_writepage,
5992 	.invalidatepage = btrfs_invalidatepage,
5993 	.releasepage	= btrfs_releasepage,
5994 };
5995 
5996 static const struct inode_operations btrfs_file_inode_operations = {
5997 	.truncate	= btrfs_truncate,
5998 	.getattr	= btrfs_getattr,
5999 	.setattr	= btrfs_setattr,
6000 	.setxattr	= btrfs_setxattr,
6001 	.getxattr	= btrfs_getxattr,
6002 	.listxattr      = btrfs_listxattr,
6003 	.removexattr	= btrfs_removexattr,
6004 	.permission	= btrfs_permission,
6005 	.fallocate	= btrfs_fallocate,
6006 	.fiemap		= btrfs_fiemap,
6007 };
6008 static const struct inode_operations btrfs_special_inode_operations = {
6009 	.getattr	= btrfs_getattr,
6010 	.setattr	= btrfs_setattr,
6011 	.permission	= btrfs_permission,
6012 	.setxattr	= btrfs_setxattr,
6013 	.getxattr	= btrfs_getxattr,
6014 	.listxattr	= btrfs_listxattr,
6015 	.removexattr	= btrfs_removexattr,
6016 };
6017 static const struct inode_operations btrfs_symlink_inode_operations = {
6018 	.readlink	= generic_readlink,
6019 	.follow_link	= page_follow_link_light,
6020 	.put_link	= page_put_link,
6021 	.permission	= btrfs_permission,
6022 	.setxattr	= btrfs_setxattr,
6023 	.getxattr	= btrfs_getxattr,
6024 	.listxattr	= btrfs_listxattr,
6025 	.removexattr	= btrfs_removexattr,
6026 };
6027 
6028 const struct dentry_operations btrfs_dentry_operations = {
6029 	.d_delete	= btrfs_dentry_delete,
6030 };
6031