xref: /openbmc/linux/fs/btrfs/inode.c (revision 82ced6fd)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/smp_lock.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mpage.h>
32 #include <linux/swap.h>
33 #include <linux/writeback.h>
34 #include <linux/statfs.h>
35 #include <linux/compat.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/xattr.h>
38 #include <linux/posix_acl.h>
39 #include <linux/falloc.h>
40 #include "compat.h"
41 #include "ctree.h"
42 #include "disk-io.h"
43 #include "transaction.h"
44 #include "btrfs_inode.h"
45 #include "ioctl.h"
46 #include "print-tree.h"
47 #include "volumes.h"
48 #include "ordered-data.h"
49 #include "xattr.h"
50 #include "tree-log.h"
51 #include "ref-cache.h"
52 #include "compression.h"
53 #include "locking.h"
54 
55 struct btrfs_iget_args {
56 	u64 ino;
57 	struct btrfs_root *root;
58 };
59 
60 static struct inode_operations btrfs_dir_inode_operations;
61 static struct inode_operations btrfs_symlink_inode_operations;
62 static struct inode_operations btrfs_dir_ro_inode_operations;
63 static struct inode_operations btrfs_special_inode_operations;
64 static struct inode_operations btrfs_file_inode_operations;
65 static struct address_space_operations btrfs_aops;
66 static struct address_space_operations btrfs_symlink_aops;
67 static struct file_operations btrfs_dir_file_operations;
68 static struct extent_io_ops btrfs_extent_io_ops;
69 
70 static struct kmem_cache *btrfs_inode_cachep;
71 struct kmem_cache *btrfs_trans_handle_cachep;
72 struct kmem_cache *btrfs_transaction_cachep;
73 struct kmem_cache *btrfs_path_cachep;
74 
75 #define S_SHIFT 12
76 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
77 	[S_IFREG >> S_SHIFT]	= BTRFS_FT_REG_FILE,
78 	[S_IFDIR >> S_SHIFT]	= BTRFS_FT_DIR,
79 	[S_IFCHR >> S_SHIFT]	= BTRFS_FT_CHRDEV,
80 	[S_IFBLK >> S_SHIFT]	= BTRFS_FT_BLKDEV,
81 	[S_IFIFO >> S_SHIFT]	= BTRFS_FT_FIFO,
82 	[S_IFSOCK >> S_SHIFT]	= BTRFS_FT_SOCK,
83 	[S_IFLNK >> S_SHIFT]	= BTRFS_FT_SYMLINK,
84 };
85 
86 static void btrfs_truncate(struct inode *inode);
87 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
88 static noinline int cow_file_range(struct inode *inode,
89 				   struct page *locked_page,
90 				   u64 start, u64 end, int *page_started,
91 				   unsigned long *nr_written, int unlock);
92 
93 static int btrfs_init_inode_security(struct inode *inode,  struct inode *dir)
94 {
95 	int err;
96 
97 	err = btrfs_init_acl(inode, dir);
98 	if (!err)
99 		err = btrfs_xattr_security_init(inode, dir);
100 	return err;
101 }
102 
103 /*
104  * this does all the hard work for inserting an inline extent into
105  * the btree.  The caller should have done a btrfs_drop_extents so that
106  * no overlapping inline items exist in the btree
107  */
108 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
109 				struct btrfs_root *root, struct inode *inode,
110 				u64 start, size_t size, size_t compressed_size,
111 				struct page **compressed_pages)
112 {
113 	struct btrfs_key key;
114 	struct btrfs_path *path;
115 	struct extent_buffer *leaf;
116 	struct page *page = NULL;
117 	char *kaddr;
118 	unsigned long ptr;
119 	struct btrfs_file_extent_item *ei;
120 	int err = 0;
121 	int ret;
122 	size_t cur_size = size;
123 	size_t datasize;
124 	unsigned long offset;
125 	int use_compress = 0;
126 
127 	if (compressed_size && compressed_pages) {
128 		use_compress = 1;
129 		cur_size = compressed_size;
130 	}
131 
132 	path = btrfs_alloc_path();
133 	if (!path)
134 		return -ENOMEM;
135 
136 	path->leave_spinning = 1;
137 	btrfs_set_trans_block_group(trans, inode);
138 
139 	key.objectid = inode->i_ino;
140 	key.offset = start;
141 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
142 	datasize = btrfs_file_extent_calc_inline_size(cur_size);
143 
144 	inode_add_bytes(inode, size);
145 	ret = btrfs_insert_empty_item(trans, root, path, &key,
146 				      datasize);
147 	BUG_ON(ret);
148 	if (ret) {
149 		err = ret;
150 		goto fail;
151 	}
152 	leaf = path->nodes[0];
153 	ei = btrfs_item_ptr(leaf, path->slots[0],
154 			    struct btrfs_file_extent_item);
155 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
156 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
157 	btrfs_set_file_extent_encryption(leaf, ei, 0);
158 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
159 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
160 	ptr = btrfs_file_extent_inline_start(ei);
161 
162 	if (use_compress) {
163 		struct page *cpage;
164 		int i = 0;
165 		while (compressed_size > 0) {
166 			cpage = compressed_pages[i];
167 			cur_size = min_t(unsigned long, compressed_size,
168 				       PAGE_CACHE_SIZE);
169 
170 			kaddr = kmap_atomic(cpage, KM_USER0);
171 			write_extent_buffer(leaf, kaddr, ptr, cur_size);
172 			kunmap_atomic(kaddr, KM_USER0);
173 
174 			i++;
175 			ptr += cur_size;
176 			compressed_size -= cur_size;
177 		}
178 		btrfs_set_file_extent_compression(leaf, ei,
179 						  BTRFS_COMPRESS_ZLIB);
180 	} else {
181 		page = find_get_page(inode->i_mapping,
182 				     start >> PAGE_CACHE_SHIFT);
183 		btrfs_set_file_extent_compression(leaf, ei, 0);
184 		kaddr = kmap_atomic(page, KM_USER0);
185 		offset = start & (PAGE_CACHE_SIZE - 1);
186 		write_extent_buffer(leaf, kaddr + offset, ptr, size);
187 		kunmap_atomic(kaddr, KM_USER0);
188 		page_cache_release(page);
189 	}
190 	btrfs_mark_buffer_dirty(leaf);
191 	btrfs_free_path(path);
192 
193 	BTRFS_I(inode)->disk_i_size = inode->i_size;
194 	btrfs_update_inode(trans, root, inode);
195 	return 0;
196 fail:
197 	btrfs_free_path(path);
198 	return err;
199 }
200 
201 
202 /*
203  * conditionally insert an inline extent into the file.  This
204  * does the checks required to make sure the data is small enough
205  * to fit as an inline extent.
206  */
207 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
208 				 struct btrfs_root *root,
209 				 struct inode *inode, u64 start, u64 end,
210 				 size_t compressed_size,
211 				 struct page **compressed_pages)
212 {
213 	u64 isize = i_size_read(inode);
214 	u64 actual_end = min(end + 1, isize);
215 	u64 inline_len = actual_end - start;
216 	u64 aligned_end = (end + root->sectorsize - 1) &
217 			~((u64)root->sectorsize - 1);
218 	u64 hint_byte;
219 	u64 data_len = inline_len;
220 	int ret;
221 
222 	if (compressed_size)
223 		data_len = compressed_size;
224 
225 	if (start > 0 ||
226 	    actual_end >= PAGE_CACHE_SIZE ||
227 	    data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
228 	    (!compressed_size &&
229 	    (actual_end & (root->sectorsize - 1)) == 0) ||
230 	    end + 1 < isize ||
231 	    data_len > root->fs_info->max_inline) {
232 		return 1;
233 	}
234 
235 	ret = btrfs_drop_extents(trans, root, inode, start,
236 				 aligned_end, aligned_end, start, &hint_byte);
237 	BUG_ON(ret);
238 
239 	if (isize > actual_end)
240 		inline_len = min_t(u64, isize, actual_end);
241 	ret = insert_inline_extent(trans, root, inode, start,
242 				   inline_len, compressed_size,
243 				   compressed_pages);
244 	BUG_ON(ret);
245 	btrfs_drop_extent_cache(inode, start, aligned_end, 0);
246 	return 0;
247 }
248 
249 struct async_extent {
250 	u64 start;
251 	u64 ram_size;
252 	u64 compressed_size;
253 	struct page **pages;
254 	unsigned long nr_pages;
255 	struct list_head list;
256 };
257 
258 struct async_cow {
259 	struct inode *inode;
260 	struct btrfs_root *root;
261 	struct page *locked_page;
262 	u64 start;
263 	u64 end;
264 	struct list_head extents;
265 	struct btrfs_work work;
266 };
267 
268 static noinline int add_async_extent(struct async_cow *cow,
269 				     u64 start, u64 ram_size,
270 				     u64 compressed_size,
271 				     struct page **pages,
272 				     unsigned long nr_pages)
273 {
274 	struct async_extent *async_extent;
275 
276 	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
277 	async_extent->start = start;
278 	async_extent->ram_size = ram_size;
279 	async_extent->compressed_size = compressed_size;
280 	async_extent->pages = pages;
281 	async_extent->nr_pages = nr_pages;
282 	list_add_tail(&async_extent->list, &cow->extents);
283 	return 0;
284 }
285 
286 /*
287  * we create compressed extents in two phases.  The first
288  * phase compresses a range of pages that have already been
289  * locked (both pages and state bits are locked).
290  *
291  * This is done inside an ordered work queue, and the compression
292  * is spread across many cpus.  The actual IO submission is step
293  * two, and the ordered work queue takes care of making sure that
294  * happens in the same order things were put onto the queue by
295  * writepages and friends.
296  *
297  * If this code finds it can't get good compression, it puts an
298  * entry onto the work queue to write the uncompressed bytes.  This
299  * makes sure that both compressed inodes and uncompressed inodes
300  * are written in the same order that pdflush sent them down.
301  */
302 static noinline int compress_file_range(struct inode *inode,
303 					struct page *locked_page,
304 					u64 start, u64 end,
305 					struct async_cow *async_cow,
306 					int *num_added)
307 {
308 	struct btrfs_root *root = BTRFS_I(inode)->root;
309 	struct btrfs_trans_handle *trans;
310 	u64 num_bytes;
311 	u64 orig_start;
312 	u64 disk_num_bytes;
313 	u64 blocksize = root->sectorsize;
314 	u64 actual_end;
315 	u64 isize = i_size_read(inode);
316 	int ret = 0;
317 	struct page **pages = NULL;
318 	unsigned long nr_pages;
319 	unsigned long nr_pages_ret = 0;
320 	unsigned long total_compressed = 0;
321 	unsigned long total_in = 0;
322 	unsigned long max_compressed = 128 * 1024;
323 	unsigned long max_uncompressed = 128 * 1024;
324 	int i;
325 	int will_compress;
326 
327 	orig_start = start;
328 
329 	actual_end = min_t(u64, isize, end + 1);
330 again:
331 	will_compress = 0;
332 	nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
333 	nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
334 
335 	/*
336 	 * we don't want to send crud past the end of i_size through
337 	 * compression, that's just a waste of CPU time.  So, if the
338 	 * end of the file is before the start of our current
339 	 * requested range of bytes, we bail out to the uncompressed
340 	 * cleanup code that can deal with all of this.
341 	 *
342 	 * It isn't really the fastest way to fix things, but this is a
343 	 * very uncommon corner.
344 	 */
345 	if (actual_end <= start)
346 		goto cleanup_and_bail_uncompressed;
347 
348 	total_compressed = actual_end - start;
349 
350 	/* we want to make sure that amount of ram required to uncompress
351 	 * an extent is reasonable, so we limit the total size in ram
352 	 * of a compressed extent to 128k.  This is a crucial number
353 	 * because it also controls how easily we can spread reads across
354 	 * cpus for decompression.
355 	 *
356 	 * We also want to make sure the amount of IO required to do
357 	 * a random read is reasonably small, so we limit the size of
358 	 * a compressed extent to 128k.
359 	 */
360 	total_compressed = min(total_compressed, max_uncompressed);
361 	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
362 	num_bytes = max(blocksize,  num_bytes);
363 	disk_num_bytes = num_bytes;
364 	total_in = 0;
365 	ret = 0;
366 
367 	/*
368 	 * we do compression for mount -o compress and when the
369 	 * inode has not been flagged as nocompress.  This flag can
370 	 * change at any time if we discover bad compression ratios.
371 	 */
372 	if (!btrfs_test_flag(inode, NOCOMPRESS) &&
373 	    btrfs_test_opt(root, COMPRESS)) {
374 		WARN_ON(pages);
375 		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
376 
377 		ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
378 						total_compressed, pages,
379 						nr_pages, &nr_pages_ret,
380 						&total_in,
381 						&total_compressed,
382 						max_compressed);
383 
384 		if (!ret) {
385 			unsigned long offset = total_compressed &
386 				(PAGE_CACHE_SIZE - 1);
387 			struct page *page = pages[nr_pages_ret - 1];
388 			char *kaddr;
389 
390 			/* zero the tail end of the last page, we might be
391 			 * sending it down to disk
392 			 */
393 			if (offset) {
394 				kaddr = kmap_atomic(page, KM_USER0);
395 				memset(kaddr + offset, 0,
396 				       PAGE_CACHE_SIZE - offset);
397 				kunmap_atomic(kaddr, KM_USER0);
398 			}
399 			will_compress = 1;
400 		}
401 	}
402 	if (start == 0) {
403 		trans = btrfs_join_transaction(root, 1);
404 		BUG_ON(!trans);
405 		btrfs_set_trans_block_group(trans, inode);
406 
407 		/* lets try to make an inline extent */
408 		if (ret || total_in < (actual_end - start)) {
409 			/* we didn't compress the entire range, try
410 			 * to make an uncompressed inline extent.
411 			 */
412 			ret = cow_file_range_inline(trans, root, inode,
413 						    start, end, 0, NULL);
414 		} else {
415 			/* try making a compressed inline extent */
416 			ret = cow_file_range_inline(trans, root, inode,
417 						    start, end,
418 						    total_compressed, pages);
419 		}
420 		btrfs_end_transaction(trans, root);
421 		if (ret == 0) {
422 			/*
423 			 * inline extent creation worked, we don't need
424 			 * to create any more async work items.  Unlock
425 			 * and free up our temp pages.
426 			 */
427 			extent_clear_unlock_delalloc(inode,
428 						     &BTRFS_I(inode)->io_tree,
429 						     start, end, NULL, 1, 0,
430 						     0, 1, 1, 1);
431 			ret = 0;
432 			goto free_pages_out;
433 		}
434 	}
435 
436 	if (will_compress) {
437 		/*
438 		 * we aren't doing an inline extent round the compressed size
439 		 * up to a block size boundary so the allocator does sane
440 		 * things
441 		 */
442 		total_compressed = (total_compressed + blocksize - 1) &
443 			~(blocksize - 1);
444 
445 		/*
446 		 * one last check to make sure the compression is really a
447 		 * win, compare the page count read with the blocks on disk
448 		 */
449 		total_in = (total_in + PAGE_CACHE_SIZE - 1) &
450 			~(PAGE_CACHE_SIZE - 1);
451 		if (total_compressed >= total_in) {
452 			will_compress = 0;
453 		} else {
454 			disk_num_bytes = total_compressed;
455 			num_bytes = total_in;
456 		}
457 	}
458 	if (!will_compress && pages) {
459 		/*
460 		 * the compression code ran but failed to make things smaller,
461 		 * free any pages it allocated and our page pointer array
462 		 */
463 		for (i = 0; i < nr_pages_ret; i++) {
464 			WARN_ON(pages[i]->mapping);
465 			page_cache_release(pages[i]);
466 		}
467 		kfree(pages);
468 		pages = NULL;
469 		total_compressed = 0;
470 		nr_pages_ret = 0;
471 
472 		/* flag the file so we don't compress in the future */
473 		btrfs_set_flag(inode, NOCOMPRESS);
474 	}
475 	if (will_compress) {
476 		*num_added += 1;
477 
478 		/* the async work queues will take care of doing actual
479 		 * allocation on disk for these compressed pages,
480 		 * and will submit them to the elevator.
481 		 */
482 		add_async_extent(async_cow, start, num_bytes,
483 				 total_compressed, pages, nr_pages_ret);
484 
485 		if (start + num_bytes < end && start + num_bytes < actual_end) {
486 			start += num_bytes;
487 			pages = NULL;
488 			cond_resched();
489 			goto again;
490 		}
491 	} else {
492 cleanup_and_bail_uncompressed:
493 		/*
494 		 * No compression, but we still need to write the pages in
495 		 * the file we've been given so far.  redirty the locked
496 		 * page if it corresponds to our extent and set things up
497 		 * for the async work queue to run cow_file_range to do
498 		 * the normal delalloc dance
499 		 */
500 		if (page_offset(locked_page) >= start &&
501 		    page_offset(locked_page) <= end) {
502 			__set_page_dirty_nobuffers(locked_page);
503 			/* unlocked later on in the async handlers */
504 		}
505 		add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
506 		*num_added += 1;
507 	}
508 
509 out:
510 	return 0;
511 
512 free_pages_out:
513 	for (i = 0; i < nr_pages_ret; i++) {
514 		WARN_ON(pages[i]->mapping);
515 		page_cache_release(pages[i]);
516 	}
517 	kfree(pages);
518 
519 	goto out;
520 }
521 
522 /*
523  * phase two of compressed writeback.  This is the ordered portion
524  * of the code, which only gets called in the order the work was
525  * queued.  We walk all the async extents created by compress_file_range
526  * and send them down to the disk.
527  */
528 static noinline int submit_compressed_extents(struct inode *inode,
529 					      struct async_cow *async_cow)
530 {
531 	struct async_extent *async_extent;
532 	u64 alloc_hint = 0;
533 	struct btrfs_trans_handle *trans;
534 	struct btrfs_key ins;
535 	struct extent_map *em;
536 	struct btrfs_root *root = BTRFS_I(inode)->root;
537 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
538 	struct extent_io_tree *io_tree;
539 	int ret;
540 
541 	if (list_empty(&async_cow->extents))
542 		return 0;
543 
544 	trans = btrfs_join_transaction(root, 1);
545 
546 	while (!list_empty(&async_cow->extents)) {
547 		async_extent = list_entry(async_cow->extents.next,
548 					  struct async_extent, list);
549 		list_del(&async_extent->list);
550 
551 		io_tree = &BTRFS_I(inode)->io_tree;
552 
553 		/* did the compression code fall back to uncompressed IO? */
554 		if (!async_extent->pages) {
555 			int page_started = 0;
556 			unsigned long nr_written = 0;
557 
558 			lock_extent(io_tree, async_extent->start,
559 				    async_extent->start +
560 				    async_extent->ram_size - 1, GFP_NOFS);
561 
562 			/* allocate blocks */
563 			cow_file_range(inode, async_cow->locked_page,
564 				       async_extent->start,
565 				       async_extent->start +
566 				       async_extent->ram_size - 1,
567 				       &page_started, &nr_written, 0);
568 
569 			/*
570 			 * if page_started, cow_file_range inserted an
571 			 * inline extent and took care of all the unlocking
572 			 * and IO for us.  Otherwise, we need to submit
573 			 * all those pages down to the drive.
574 			 */
575 			if (!page_started)
576 				extent_write_locked_range(io_tree,
577 						  inode, async_extent->start,
578 						  async_extent->start +
579 						  async_extent->ram_size - 1,
580 						  btrfs_get_extent,
581 						  WB_SYNC_ALL);
582 			kfree(async_extent);
583 			cond_resched();
584 			continue;
585 		}
586 
587 		lock_extent(io_tree, async_extent->start,
588 			    async_extent->start + async_extent->ram_size - 1,
589 			    GFP_NOFS);
590 		/*
591 		 * here we're doing allocation and writeback of the
592 		 * compressed pages
593 		 */
594 		btrfs_drop_extent_cache(inode, async_extent->start,
595 					async_extent->start +
596 					async_extent->ram_size - 1, 0);
597 
598 		ret = btrfs_reserve_extent(trans, root,
599 					   async_extent->compressed_size,
600 					   async_extent->compressed_size,
601 					   0, alloc_hint,
602 					   (u64)-1, &ins, 1);
603 		BUG_ON(ret);
604 		em = alloc_extent_map(GFP_NOFS);
605 		em->start = async_extent->start;
606 		em->len = async_extent->ram_size;
607 		em->orig_start = em->start;
608 
609 		em->block_start = ins.objectid;
610 		em->block_len = ins.offset;
611 		em->bdev = root->fs_info->fs_devices->latest_bdev;
612 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
613 		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
614 
615 		while (1) {
616 			spin_lock(&em_tree->lock);
617 			ret = add_extent_mapping(em_tree, em);
618 			spin_unlock(&em_tree->lock);
619 			if (ret != -EEXIST) {
620 				free_extent_map(em);
621 				break;
622 			}
623 			btrfs_drop_extent_cache(inode, async_extent->start,
624 						async_extent->start +
625 						async_extent->ram_size - 1, 0);
626 		}
627 
628 		ret = btrfs_add_ordered_extent(inode, async_extent->start,
629 					       ins.objectid,
630 					       async_extent->ram_size,
631 					       ins.offset,
632 					       BTRFS_ORDERED_COMPRESSED);
633 		BUG_ON(ret);
634 
635 		btrfs_end_transaction(trans, root);
636 
637 		/*
638 		 * clear dirty, set writeback and unlock the pages.
639 		 */
640 		extent_clear_unlock_delalloc(inode,
641 					     &BTRFS_I(inode)->io_tree,
642 					     async_extent->start,
643 					     async_extent->start +
644 					     async_extent->ram_size - 1,
645 					     NULL, 1, 1, 0, 1, 1, 0);
646 
647 		ret = btrfs_submit_compressed_write(inode,
648 				    async_extent->start,
649 				    async_extent->ram_size,
650 				    ins.objectid,
651 				    ins.offset, async_extent->pages,
652 				    async_extent->nr_pages);
653 
654 		BUG_ON(ret);
655 		trans = btrfs_join_transaction(root, 1);
656 		alloc_hint = ins.objectid + ins.offset;
657 		kfree(async_extent);
658 		cond_resched();
659 	}
660 
661 	btrfs_end_transaction(trans, root);
662 	return 0;
663 }
664 
665 /*
666  * when extent_io.c finds a delayed allocation range in the file,
667  * the call backs end up in this code.  The basic idea is to
668  * allocate extents on disk for the range, and create ordered data structs
669  * in ram to track those extents.
670  *
671  * locked_page is the page that writepage had locked already.  We use
672  * it to make sure we don't do extra locks or unlocks.
673  *
674  * *page_started is set to one if we unlock locked_page and do everything
675  * required to start IO on it.  It may be clean and already done with
676  * IO when we return.
677  */
678 static noinline int cow_file_range(struct inode *inode,
679 				   struct page *locked_page,
680 				   u64 start, u64 end, int *page_started,
681 				   unsigned long *nr_written,
682 				   int unlock)
683 {
684 	struct btrfs_root *root = BTRFS_I(inode)->root;
685 	struct btrfs_trans_handle *trans;
686 	u64 alloc_hint = 0;
687 	u64 num_bytes;
688 	unsigned long ram_size;
689 	u64 disk_num_bytes;
690 	u64 cur_alloc_size;
691 	u64 blocksize = root->sectorsize;
692 	u64 actual_end;
693 	u64 isize = i_size_read(inode);
694 	struct btrfs_key ins;
695 	struct extent_map *em;
696 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
697 	int ret = 0;
698 
699 	trans = btrfs_join_transaction(root, 1);
700 	BUG_ON(!trans);
701 	btrfs_set_trans_block_group(trans, inode);
702 
703 	actual_end = min_t(u64, isize, end + 1);
704 
705 	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
706 	num_bytes = max(blocksize,  num_bytes);
707 	disk_num_bytes = num_bytes;
708 	ret = 0;
709 
710 	if (start == 0) {
711 		/* lets try to make an inline extent */
712 		ret = cow_file_range_inline(trans, root, inode,
713 					    start, end, 0, NULL);
714 		if (ret == 0) {
715 			extent_clear_unlock_delalloc(inode,
716 						     &BTRFS_I(inode)->io_tree,
717 						     start, end, NULL, 1, 1,
718 						     1, 1, 1, 1);
719 			*nr_written = *nr_written +
720 			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
721 			*page_started = 1;
722 			ret = 0;
723 			goto out;
724 		}
725 	}
726 
727 	BUG_ON(disk_num_bytes >
728 	       btrfs_super_total_bytes(&root->fs_info->super_copy));
729 
730 	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
731 
732 	while (disk_num_bytes > 0) {
733 		cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
734 		ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
735 					   root->sectorsize, 0, alloc_hint,
736 					   (u64)-1, &ins, 1);
737 		BUG_ON(ret);
738 
739 		em = alloc_extent_map(GFP_NOFS);
740 		em->start = start;
741 		em->orig_start = em->start;
742 
743 		ram_size = ins.offset;
744 		em->len = ins.offset;
745 
746 		em->block_start = ins.objectid;
747 		em->block_len = ins.offset;
748 		em->bdev = root->fs_info->fs_devices->latest_bdev;
749 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
750 
751 		while (1) {
752 			spin_lock(&em_tree->lock);
753 			ret = add_extent_mapping(em_tree, em);
754 			spin_unlock(&em_tree->lock);
755 			if (ret != -EEXIST) {
756 				free_extent_map(em);
757 				break;
758 			}
759 			btrfs_drop_extent_cache(inode, start,
760 						start + ram_size - 1, 0);
761 		}
762 
763 		cur_alloc_size = ins.offset;
764 		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
765 					       ram_size, cur_alloc_size, 0);
766 		BUG_ON(ret);
767 
768 		if (root->root_key.objectid ==
769 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
770 			ret = btrfs_reloc_clone_csums(inode, start,
771 						      cur_alloc_size);
772 			BUG_ON(ret);
773 		}
774 
775 		if (disk_num_bytes < cur_alloc_size)
776 			break;
777 
778 		/* we're not doing compressed IO, don't unlock the first
779 		 * page (which the caller expects to stay locked), don't
780 		 * clear any dirty bits and don't set any writeback bits
781 		 */
782 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
783 					     start, start + ram_size - 1,
784 					     locked_page, unlock, 1,
785 					     1, 0, 0, 0);
786 		disk_num_bytes -= cur_alloc_size;
787 		num_bytes -= cur_alloc_size;
788 		alloc_hint = ins.objectid + ins.offset;
789 		start += cur_alloc_size;
790 	}
791 out:
792 	ret = 0;
793 	btrfs_end_transaction(trans, root);
794 
795 	return ret;
796 }
797 
798 /*
799  * work queue call back to started compression on a file and pages
800  */
801 static noinline void async_cow_start(struct btrfs_work *work)
802 {
803 	struct async_cow *async_cow;
804 	int num_added = 0;
805 	async_cow = container_of(work, struct async_cow, work);
806 
807 	compress_file_range(async_cow->inode, async_cow->locked_page,
808 			    async_cow->start, async_cow->end, async_cow,
809 			    &num_added);
810 	if (num_added == 0)
811 		async_cow->inode = NULL;
812 }
813 
814 /*
815  * work queue call back to submit previously compressed pages
816  */
817 static noinline void async_cow_submit(struct btrfs_work *work)
818 {
819 	struct async_cow *async_cow;
820 	struct btrfs_root *root;
821 	unsigned long nr_pages;
822 
823 	async_cow = container_of(work, struct async_cow, work);
824 
825 	root = async_cow->root;
826 	nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
827 		PAGE_CACHE_SHIFT;
828 
829 	atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
830 
831 	if (atomic_read(&root->fs_info->async_delalloc_pages) <
832 	    5 * 1042 * 1024 &&
833 	    waitqueue_active(&root->fs_info->async_submit_wait))
834 		wake_up(&root->fs_info->async_submit_wait);
835 
836 	if (async_cow->inode)
837 		submit_compressed_extents(async_cow->inode, async_cow);
838 }
839 
840 static noinline void async_cow_free(struct btrfs_work *work)
841 {
842 	struct async_cow *async_cow;
843 	async_cow = container_of(work, struct async_cow, work);
844 	kfree(async_cow);
845 }
846 
847 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
848 				u64 start, u64 end, int *page_started,
849 				unsigned long *nr_written)
850 {
851 	struct async_cow *async_cow;
852 	struct btrfs_root *root = BTRFS_I(inode)->root;
853 	unsigned long nr_pages;
854 	u64 cur_end;
855 	int limit = 10 * 1024 * 1042;
856 
857 	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED |
858 			 EXTENT_DELALLOC, 1, 0, GFP_NOFS);
859 	while (start < end) {
860 		async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
861 		async_cow->inode = inode;
862 		async_cow->root = root;
863 		async_cow->locked_page = locked_page;
864 		async_cow->start = start;
865 
866 		if (btrfs_test_flag(inode, NOCOMPRESS))
867 			cur_end = end;
868 		else
869 			cur_end = min(end, start + 512 * 1024 - 1);
870 
871 		async_cow->end = cur_end;
872 		INIT_LIST_HEAD(&async_cow->extents);
873 
874 		async_cow->work.func = async_cow_start;
875 		async_cow->work.ordered_func = async_cow_submit;
876 		async_cow->work.ordered_free = async_cow_free;
877 		async_cow->work.flags = 0;
878 
879 		nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
880 			PAGE_CACHE_SHIFT;
881 		atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
882 
883 		btrfs_queue_worker(&root->fs_info->delalloc_workers,
884 				   &async_cow->work);
885 
886 		if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
887 			wait_event(root->fs_info->async_submit_wait,
888 			   (atomic_read(&root->fs_info->async_delalloc_pages) <
889 			    limit));
890 		}
891 
892 		while (atomic_read(&root->fs_info->async_submit_draining) &&
893 		      atomic_read(&root->fs_info->async_delalloc_pages)) {
894 			wait_event(root->fs_info->async_submit_wait,
895 			  (atomic_read(&root->fs_info->async_delalloc_pages) ==
896 			   0));
897 		}
898 
899 		*nr_written += nr_pages;
900 		start = cur_end + 1;
901 	}
902 	*page_started = 1;
903 	return 0;
904 }
905 
906 static noinline int csum_exist_in_range(struct btrfs_root *root,
907 					u64 bytenr, u64 num_bytes)
908 {
909 	int ret;
910 	struct btrfs_ordered_sum *sums;
911 	LIST_HEAD(list);
912 
913 	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
914 				       bytenr + num_bytes - 1, &list);
915 	if (ret == 0 && list_empty(&list))
916 		return 0;
917 
918 	while (!list_empty(&list)) {
919 		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
920 		list_del(&sums->list);
921 		kfree(sums);
922 	}
923 	return 1;
924 }
925 
926 /*
927  * when nowcow writeback call back.  This checks for snapshots or COW copies
928  * of the extents that exist in the file, and COWs the file as required.
929  *
930  * If no cow copies or snapshots exist, we write directly to the existing
931  * blocks on disk
932  */
933 static noinline int run_delalloc_nocow(struct inode *inode,
934 				       struct page *locked_page,
935 			      u64 start, u64 end, int *page_started, int force,
936 			      unsigned long *nr_written)
937 {
938 	struct btrfs_root *root = BTRFS_I(inode)->root;
939 	struct btrfs_trans_handle *trans;
940 	struct extent_buffer *leaf;
941 	struct btrfs_path *path;
942 	struct btrfs_file_extent_item *fi;
943 	struct btrfs_key found_key;
944 	u64 cow_start;
945 	u64 cur_offset;
946 	u64 extent_end;
947 	u64 disk_bytenr;
948 	u64 num_bytes;
949 	int extent_type;
950 	int ret;
951 	int type;
952 	int nocow;
953 	int check_prev = 1;
954 
955 	path = btrfs_alloc_path();
956 	BUG_ON(!path);
957 	trans = btrfs_join_transaction(root, 1);
958 	BUG_ON(!trans);
959 
960 	cow_start = (u64)-1;
961 	cur_offset = start;
962 	while (1) {
963 		ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
964 					       cur_offset, 0);
965 		BUG_ON(ret < 0);
966 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
967 			leaf = path->nodes[0];
968 			btrfs_item_key_to_cpu(leaf, &found_key,
969 					      path->slots[0] - 1);
970 			if (found_key.objectid == inode->i_ino &&
971 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
972 				path->slots[0]--;
973 		}
974 		check_prev = 0;
975 next_slot:
976 		leaf = path->nodes[0];
977 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
978 			ret = btrfs_next_leaf(root, path);
979 			if (ret < 0)
980 				BUG_ON(1);
981 			if (ret > 0)
982 				break;
983 			leaf = path->nodes[0];
984 		}
985 
986 		nocow = 0;
987 		disk_bytenr = 0;
988 		num_bytes = 0;
989 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
990 
991 		if (found_key.objectid > inode->i_ino ||
992 		    found_key.type > BTRFS_EXTENT_DATA_KEY ||
993 		    found_key.offset > end)
994 			break;
995 
996 		if (found_key.offset > cur_offset) {
997 			extent_end = found_key.offset;
998 			goto out_check;
999 		}
1000 
1001 		fi = btrfs_item_ptr(leaf, path->slots[0],
1002 				    struct btrfs_file_extent_item);
1003 		extent_type = btrfs_file_extent_type(leaf, fi);
1004 
1005 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
1006 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1007 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1008 			extent_end = found_key.offset +
1009 				btrfs_file_extent_num_bytes(leaf, fi);
1010 			if (extent_end <= start) {
1011 				path->slots[0]++;
1012 				goto next_slot;
1013 			}
1014 			if (disk_bytenr == 0)
1015 				goto out_check;
1016 			if (btrfs_file_extent_compression(leaf, fi) ||
1017 			    btrfs_file_extent_encryption(leaf, fi) ||
1018 			    btrfs_file_extent_other_encoding(leaf, fi))
1019 				goto out_check;
1020 			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1021 				goto out_check;
1022 			if (btrfs_extent_readonly(root, disk_bytenr))
1023 				goto out_check;
1024 			if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1025 						  disk_bytenr))
1026 				goto out_check;
1027 			disk_bytenr += btrfs_file_extent_offset(leaf, fi);
1028 			disk_bytenr += cur_offset - found_key.offset;
1029 			num_bytes = min(end + 1, extent_end) - cur_offset;
1030 			/*
1031 			 * force cow if csum exists in the range.
1032 			 * this ensure that csum for a given extent are
1033 			 * either valid or do not exist.
1034 			 */
1035 			if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1036 				goto out_check;
1037 			nocow = 1;
1038 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1039 			extent_end = found_key.offset +
1040 				btrfs_file_extent_inline_len(leaf, fi);
1041 			extent_end = ALIGN(extent_end, root->sectorsize);
1042 		} else {
1043 			BUG_ON(1);
1044 		}
1045 out_check:
1046 		if (extent_end <= start) {
1047 			path->slots[0]++;
1048 			goto next_slot;
1049 		}
1050 		if (!nocow) {
1051 			if (cow_start == (u64)-1)
1052 				cow_start = cur_offset;
1053 			cur_offset = extent_end;
1054 			if (cur_offset > end)
1055 				break;
1056 			path->slots[0]++;
1057 			goto next_slot;
1058 		}
1059 
1060 		btrfs_release_path(root, path);
1061 		if (cow_start != (u64)-1) {
1062 			ret = cow_file_range(inode, locked_page, cow_start,
1063 					found_key.offset - 1, page_started,
1064 					nr_written, 1);
1065 			BUG_ON(ret);
1066 			cow_start = (u64)-1;
1067 		}
1068 
1069 		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1070 			struct extent_map *em;
1071 			struct extent_map_tree *em_tree;
1072 			em_tree = &BTRFS_I(inode)->extent_tree;
1073 			em = alloc_extent_map(GFP_NOFS);
1074 			em->start = cur_offset;
1075 			em->orig_start = em->start;
1076 			em->len = num_bytes;
1077 			em->block_len = num_bytes;
1078 			em->block_start = disk_bytenr;
1079 			em->bdev = root->fs_info->fs_devices->latest_bdev;
1080 			set_bit(EXTENT_FLAG_PINNED, &em->flags);
1081 			while (1) {
1082 				spin_lock(&em_tree->lock);
1083 				ret = add_extent_mapping(em_tree, em);
1084 				spin_unlock(&em_tree->lock);
1085 				if (ret != -EEXIST) {
1086 					free_extent_map(em);
1087 					break;
1088 				}
1089 				btrfs_drop_extent_cache(inode, em->start,
1090 						em->start + em->len - 1, 0);
1091 			}
1092 			type = BTRFS_ORDERED_PREALLOC;
1093 		} else {
1094 			type = BTRFS_ORDERED_NOCOW;
1095 		}
1096 
1097 		ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1098 					       num_bytes, num_bytes, type);
1099 		BUG_ON(ret);
1100 
1101 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1102 					cur_offset, cur_offset + num_bytes - 1,
1103 					locked_page, 1, 1, 1, 0, 0, 0);
1104 		cur_offset = extent_end;
1105 		if (cur_offset > end)
1106 			break;
1107 	}
1108 	btrfs_release_path(root, path);
1109 
1110 	if (cur_offset <= end && cow_start == (u64)-1)
1111 		cow_start = cur_offset;
1112 	if (cow_start != (u64)-1) {
1113 		ret = cow_file_range(inode, locked_page, cow_start, end,
1114 				     page_started, nr_written, 1);
1115 		BUG_ON(ret);
1116 	}
1117 
1118 	ret = btrfs_end_transaction(trans, root);
1119 	BUG_ON(ret);
1120 	btrfs_free_path(path);
1121 	return 0;
1122 }
1123 
1124 /*
1125  * extent_io.c call back to do delayed allocation processing
1126  */
1127 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1128 			      u64 start, u64 end, int *page_started,
1129 			      unsigned long *nr_written)
1130 {
1131 	int ret;
1132 	struct btrfs_root *root = BTRFS_I(inode)->root;
1133 
1134 	if (btrfs_test_flag(inode, NODATACOW))
1135 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1136 					 page_started, 1, nr_written);
1137 	else if (btrfs_test_flag(inode, PREALLOC))
1138 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1139 					 page_started, 0, nr_written);
1140 	else if (!btrfs_test_opt(root, COMPRESS))
1141 		ret = cow_file_range(inode, locked_page, start, end,
1142 				      page_started, nr_written, 1);
1143 	else
1144 		ret = cow_file_range_async(inode, locked_page, start, end,
1145 					   page_started, nr_written);
1146 	return ret;
1147 }
1148 
1149 /*
1150  * extent_io.c set_bit_hook, used to track delayed allocation
1151  * bytes in this file, and to maintain the list of inodes that
1152  * have pending delalloc work to be done.
1153  */
1154 static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1155 		       unsigned long old, unsigned long bits)
1156 {
1157 	/*
1158 	 * set_bit and clear bit hooks normally require _irqsave/restore
1159 	 * but in this case, we are only testeing for the DELALLOC
1160 	 * bit, which is only set or cleared with irqs on
1161 	 */
1162 	if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1163 		struct btrfs_root *root = BTRFS_I(inode)->root;
1164 		btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1165 		spin_lock(&root->fs_info->delalloc_lock);
1166 		BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1167 		root->fs_info->delalloc_bytes += end - start + 1;
1168 		if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1169 			list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1170 				      &root->fs_info->delalloc_inodes);
1171 		}
1172 		spin_unlock(&root->fs_info->delalloc_lock);
1173 	}
1174 	return 0;
1175 }
1176 
1177 /*
1178  * extent_io.c clear_bit_hook, see set_bit_hook for why
1179  */
1180 static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
1181 			 unsigned long old, unsigned long bits)
1182 {
1183 	/*
1184 	 * set_bit and clear bit hooks normally require _irqsave/restore
1185 	 * but in this case, we are only testeing for the DELALLOC
1186 	 * bit, which is only set or cleared with irqs on
1187 	 */
1188 	if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1189 		struct btrfs_root *root = BTRFS_I(inode)->root;
1190 
1191 		spin_lock(&root->fs_info->delalloc_lock);
1192 		if (end - start + 1 > root->fs_info->delalloc_bytes) {
1193 			printk(KERN_INFO "btrfs warning: delalloc account "
1194 			       "%llu %llu\n",
1195 			       (unsigned long long)end - start + 1,
1196 			       (unsigned long long)
1197 			       root->fs_info->delalloc_bytes);
1198 			btrfs_delalloc_free_space(root, inode, (u64)-1);
1199 			root->fs_info->delalloc_bytes = 0;
1200 			BTRFS_I(inode)->delalloc_bytes = 0;
1201 		} else {
1202 			btrfs_delalloc_free_space(root, inode,
1203 						  end - start + 1);
1204 			root->fs_info->delalloc_bytes -= end - start + 1;
1205 			BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
1206 		}
1207 		if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1208 		    !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1209 			list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1210 		}
1211 		spin_unlock(&root->fs_info->delalloc_lock);
1212 	}
1213 	return 0;
1214 }
1215 
1216 /*
1217  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1218  * we don't create bios that span stripes or chunks
1219  */
1220 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1221 			 size_t size, struct bio *bio,
1222 			 unsigned long bio_flags)
1223 {
1224 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1225 	struct btrfs_mapping_tree *map_tree;
1226 	u64 logical = (u64)bio->bi_sector << 9;
1227 	u64 length = 0;
1228 	u64 map_length;
1229 	int ret;
1230 
1231 	if (bio_flags & EXTENT_BIO_COMPRESSED)
1232 		return 0;
1233 
1234 	length = bio->bi_size;
1235 	map_tree = &root->fs_info->mapping_tree;
1236 	map_length = length;
1237 	ret = btrfs_map_block(map_tree, READ, logical,
1238 			      &map_length, NULL, 0);
1239 
1240 	if (map_length < length + size)
1241 		return 1;
1242 	return 0;
1243 }
1244 
1245 /*
1246  * in order to insert checksums into the metadata in large chunks,
1247  * we wait until bio submission time.   All the pages in the bio are
1248  * checksummed and sums are attached onto the ordered extent record.
1249  *
1250  * At IO completion time the cums attached on the ordered extent record
1251  * are inserted into the btree
1252  */
1253 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1254 				    struct bio *bio, int mirror_num,
1255 				    unsigned long bio_flags)
1256 {
1257 	struct btrfs_root *root = BTRFS_I(inode)->root;
1258 	int ret = 0;
1259 
1260 	ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1261 	BUG_ON(ret);
1262 	return 0;
1263 }
1264 
1265 /*
1266  * in order to insert checksums into the metadata in large chunks,
1267  * we wait until bio submission time.   All the pages in the bio are
1268  * checksummed and sums are attached onto the ordered extent record.
1269  *
1270  * At IO completion time the cums attached on the ordered extent record
1271  * are inserted into the btree
1272  */
1273 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1274 			  int mirror_num, unsigned long bio_flags)
1275 {
1276 	struct btrfs_root *root = BTRFS_I(inode)->root;
1277 	return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1278 }
1279 
1280 /*
1281  * extent_io.c submission hook. This does the right thing for csum calculation
1282  * on write, or reading the csums from the tree before a read
1283  */
1284 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1285 			  int mirror_num, unsigned long bio_flags)
1286 {
1287 	struct btrfs_root *root = BTRFS_I(inode)->root;
1288 	int ret = 0;
1289 	int skip_sum;
1290 
1291 	skip_sum = btrfs_test_flag(inode, NODATASUM);
1292 
1293 	ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1294 	BUG_ON(ret);
1295 
1296 	if (!(rw & (1 << BIO_RW))) {
1297 		if (bio_flags & EXTENT_BIO_COMPRESSED) {
1298 			return btrfs_submit_compressed_read(inode, bio,
1299 						    mirror_num, bio_flags);
1300 		} else if (!skip_sum)
1301 			btrfs_lookup_bio_sums(root, inode, bio, NULL);
1302 		goto mapit;
1303 	} else if (!skip_sum) {
1304 		/* csum items have already been cloned */
1305 		if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1306 			goto mapit;
1307 		/* we're doing a write, do the async checksumming */
1308 		return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1309 				   inode, rw, bio, mirror_num,
1310 				   bio_flags, __btrfs_submit_bio_start,
1311 				   __btrfs_submit_bio_done);
1312 	}
1313 
1314 mapit:
1315 	return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1316 }
1317 
1318 /*
1319  * given a list of ordered sums record them in the inode.  This happens
1320  * at IO completion time based on sums calculated at bio submission time.
1321  */
1322 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1323 			     struct inode *inode, u64 file_offset,
1324 			     struct list_head *list)
1325 {
1326 	struct btrfs_ordered_sum *sum;
1327 
1328 	btrfs_set_trans_block_group(trans, inode);
1329 
1330 	list_for_each_entry(sum, list, list) {
1331 		btrfs_csum_file_blocks(trans,
1332 		       BTRFS_I(inode)->root->fs_info->csum_root, sum);
1333 	}
1334 	return 0;
1335 }
1336 
1337 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
1338 {
1339 	if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1340 		WARN_ON(1);
1341 	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1342 				   GFP_NOFS);
1343 }
1344 
1345 /* see btrfs_writepage_start_hook for details on why this is required */
1346 struct btrfs_writepage_fixup {
1347 	struct page *page;
1348 	struct btrfs_work work;
1349 };
1350 
1351 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1352 {
1353 	struct btrfs_writepage_fixup *fixup;
1354 	struct btrfs_ordered_extent *ordered;
1355 	struct page *page;
1356 	struct inode *inode;
1357 	u64 page_start;
1358 	u64 page_end;
1359 
1360 	fixup = container_of(work, struct btrfs_writepage_fixup, work);
1361 	page = fixup->page;
1362 again:
1363 	lock_page(page);
1364 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1365 		ClearPageChecked(page);
1366 		goto out_page;
1367 	}
1368 
1369 	inode = page->mapping->host;
1370 	page_start = page_offset(page);
1371 	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1372 
1373 	lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1374 
1375 	/* already ordered? We're done */
1376 	if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
1377 			     EXTENT_ORDERED, 0)) {
1378 		goto out;
1379 	}
1380 
1381 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
1382 	if (ordered) {
1383 		unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
1384 			      page_end, GFP_NOFS);
1385 		unlock_page(page);
1386 		btrfs_start_ordered_extent(inode, ordered, 1);
1387 		goto again;
1388 	}
1389 
1390 	btrfs_set_extent_delalloc(inode, page_start, page_end);
1391 	ClearPageChecked(page);
1392 out:
1393 	unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1394 out_page:
1395 	unlock_page(page);
1396 	page_cache_release(page);
1397 }
1398 
1399 /*
1400  * There are a few paths in the higher layers of the kernel that directly
1401  * set the page dirty bit without asking the filesystem if it is a
1402  * good idea.  This causes problems because we want to make sure COW
1403  * properly happens and the data=ordered rules are followed.
1404  *
1405  * In our case any range that doesn't have the ORDERED bit set
1406  * hasn't been properly setup for IO.  We kick off an async process
1407  * to fix it up.  The async helper will wait for ordered extents, set
1408  * the delalloc bit and make it safe to write the page.
1409  */
1410 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1411 {
1412 	struct inode *inode = page->mapping->host;
1413 	struct btrfs_writepage_fixup *fixup;
1414 	struct btrfs_root *root = BTRFS_I(inode)->root;
1415 	int ret;
1416 
1417 	ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1418 			     EXTENT_ORDERED, 0);
1419 	if (ret)
1420 		return 0;
1421 
1422 	if (PageChecked(page))
1423 		return -EAGAIN;
1424 
1425 	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1426 	if (!fixup)
1427 		return -EAGAIN;
1428 
1429 	SetPageChecked(page);
1430 	page_cache_get(page);
1431 	fixup->work.func = btrfs_writepage_fixup_worker;
1432 	fixup->page = page;
1433 	btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1434 	return -EAGAIN;
1435 }
1436 
1437 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1438 				       struct inode *inode, u64 file_pos,
1439 				       u64 disk_bytenr, u64 disk_num_bytes,
1440 				       u64 num_bytes, u64 ram_bytes,
1441 				       u64 locked_end,
1442 				       u8 compression, u8 encryption,
1443 				       u16 other_encoding, int extent_type)
1444 {
1445 	struct btrfs_root *root = BTRFS_I(inode)->root;
1446 	struct btrfs_file_extent_item *fi;
1447 	struct btrfs_path *path;
1448 	struct extent_buffer *leaf;
1449 	struct btrfs_key ins;
1450 	u64 hint;
1451 	int ret;
1452 
1453 	path = btrfs_alloc_path();
1454 	BUG_ON(!path);
1455 
1456 	path->leave_spinning = 1;
1457 	ret = btrfs_drop_extents(trans, root, inode, file_pos,
1458 				 file_pos + num_bytes, locked_end,
1459 				 file_pos, &hint);
1460 	BUG_ON(ret);
1461 
1462 	ins.objectid = inode->i_ino;
1463 	ins.offset = file_pos;
1464 	ins.type = BTRFS_EXTENT_DATA_KEY;
1465 	ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1466 	BUG_ON(ret);
1467 	leaf = path->nodes[0];
1468 	fi = btrfs_item_ptr(leaf, path->slots[0],
1469 			    struct btrfs_file_extent_item);
1470 	btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1471 	btrfs_set_file_extent_type(leaf, fi, extent_type);
1472 	btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1473 	btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1474 	btrfs_set_file_extent_offset(leaf, fi, 0);
1475 	btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1476 	btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1477 	btrfs_set_file_extent_compression(leaf, fi, compression);
1478 	btrfs_set_file_extent_encryption(leaf, fi, encryption);
1479 	btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1480 
1481 	btrfs_unlock_up_safe(path, 1);
1482 	btrfs_set_lock_blocking(leaf);
1483 
1484 	btrfs_mark_buffer_dirty(leaf);
1485 
1486 	inode_add_bytes(inode, num_bytes);
1487 	btrfs_drop_extent_cache(inode, file_pos, file_pos + num_bytes - 1, 0);
1488 
1489 	ins.objectid = disk_bytenr;
1490 	ins.offset = disk_num_bytes;
1491 	ins.type = BTRFS_EXTENT_ITEM_KEY;
1492 	ret = btrfs_alloc_reserved_extent(trans, root, leaf->start,
1493 					  root->root_key.objectid,
1494 					  trans->transid, inode->i_ino, &ins);
1495 	BUG_ON(ret);
1496 	btrfs_free_path(path);
1497 
1498 	return 0;
1499 }
1500 
1501 /*
1502  * helper function for btrfs_finish_ordered_io, this
1503  * just reads in some of the csum leaves to prime them into ram
1504  * before we start the transaction.  It limits the amount of btree
1505  * reads required while inside the transaction.
1506  */
1507 static noinline void reada_csum(struct btrfs_root *root,
1508 				struct btrfs_path *path,
1509 				struct btrfs_ordered_extent *ordered_extent)
1510 {
1511 	struct btrfs_ordered_sum *sum;
1512 	u64 bytenr;
1513 
1514 	sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
1515 			 list);
1516 	bytenr = sum->sums[0].bytenr;
1517 
1518 	/*
1519 	 * we don't care about the results, the point of this search is
1520 	 * just to get the btree leaves into ram
1521 	 */
1522 	btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
1523 }
1524 
1525 /* as ordered data IO finishes, this gets called so we can finish
1526  * an ordered extent if the range of bytes in the file it covers are
1527  * fully written.
1528  */
1529 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1530 {
1531 	struct btrfs_root *root = BTRFS_I(inode)->root;
1532 	struct btrfs_trans_handle *trans;
1533 	struct btrfs_ordered_extent *ordered_extent = NULL;
1534 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1535 	struct btrfs_path *path;
1536 	int compressed = 0;
1537 	int ret;
1538 
1539 	ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1540 	if (!ret)
1541 		return 0;
1542 
1543 	/*
1544 	 * before we join the transaction, try to do some of our IO.
1545 	 * This will limit the amount of IO that we have to do with
1546 	 * the transaction running.  We're unlikely to need to do any
1547 	 * IO if the file extents are new, the disk_i_size checks
1548 	 * covers the most common case.
1549 	 */
1550 	if (start < BTRFS_I(inode)->disk_i_size) {
1551 		path = btrfs_alloc_path();
1552 		if (path) {
1553 			ret = btrfs_lookup_file_extent(NULL, root, path,
1554 						       inode->i_ino,
1555 						       start, 0);
1556 			ordered_extent = btrfs_lookup_ordered_extent(inode,
1557 								     start);
1558 			if (!list_empty(&ordered_extent->list)) {
1559 				btrfs_release_path(root, path);
1560 				reada_csum(root, path, ordered_extent);
1561 			}
1562 			btrfs_free_path(path);
1563 		}
1564 	}
1565 
1566 	trans = btrfs_join_transaction(root, 1);
1567 
1568 	if (!ordered_extent)
1569 		ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1570 	BUG_ON(!ordered_extent);
1571 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
1572 		goto nocow;
1573 
1574 	lock_extent(io_tree, ordered_extent->file_offset,
1575 		    ordered_extent->file_offset + ordered_extent->len - 1,
1576 		    GFP_NOFS);
1577 
1578 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1579 		compressed = 1;
1580 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1581 		BUG_ON(compressed);
1582 		ret = btrfs_mark_extent_written(trans, root, inode,
1583 						ordered_extent->file_offset,
1584 						ordered_extent->file_offset +
1585 						ordered_extent->len);
1586 		BUG_ON(ret);
1587 	} else {
1588 		ret = insert_reserved_file_extent(trans, inode,
1589 						ordered_extent->file_offset,
1590 						ordered_extent->start,
1591 						ordered_extent->disk_len,
1592 						ordered_extent->len,
1593 						ordered_extent->len,
1594 						ordered_extent->file_offset +
1595 						ordered_extent->len,
1596 						compressed, 0, 0,
1597 						BTRFS_FILE_EXTENT_REG);
1598 		BUG_ON(ret);
1599 	}
1600 	unlock_extent(io_tree, ordered_extent->file_offset,
1601 		    ordered_extent->file_offset + ordered_extent->len - 1,
1602 		    GFP_NOFS);
1603 nocow:
1604 	add_pending_csums(trans, inode, ordered_extent->file_offset,
1605 			  &ordered_extent->list);
1606 
1607 	mutex_lock(&BTRFS_I(inode)->extent_mutex);
1608 	btrfs_ordered_update_i_size(inode, ordered_extent);
1609 	btrfs_update_inode(trans, root, inode);
1610 	btrfs_remove_ordered_extent(inode, ordered_extent);
1611 	mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1612 
1613 	/* once for us */
1614 	btrfs_put_ordered_extent(ordered_extent);
1615 	/* once for the tree */
1616 	btrfs_put_ordered_extent(ordered_extent);
1617 
1618 	btrfs_end_transaction(trans, root);
1619 	return 0;
1620 }
1621 
1622 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1623 				struct extent_state *state, int uptodate)
1624 {
1625 	return btrfs_finish_ordered_io(page->mapping->host, start, end);
1626 }
1627 
1628 /*
1629  * When IO fails, either with EIO or csum verification fails, we
1630  * try other mirrors that might have a good copy of the data.  This
1631  * io_failure_record is used to record state as we go through all the
1632  * mirrors.  If another mirror has good data, the page is set up to date
1633  * and things continue.  If a good mirror can't be found, the original
1634  * bio end_io callback is called to indicate things have failed.
1635  */
1636 struct io_failure_record {
1637 	struct page *page;
1638 	u64 start;
1639 	u64 len;
1640 	u64 logical;
1641 	unsigned long bio_flags;
1642 	int last_mirror;
1643 };
1644 
1645 static int btrfs_io_failed_hook(struct bio *failed_bio,
1646 			 struct page *page, u64 start, u64 end,
1647 			 struct extent_state *state)
1648 {
1649 	struct io_failure_record *failrec = NULL;
1650 	u64 private;
1651 	struct extent_map *em;
1652 	struct inode *inode = page->mapping->host;
1653 	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1654 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1655 	struct bio *bio;
1656 	int num_copies;
1657 	int ret;
1658 	int rw;
1659 	u64 logical;
1660 
1661 	ret = get_state_private(failure_tree, start, &private);
1662 	if (ret) {
1663 		failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1664 		if (!failrec)
1665 			return -ENOMEM;
1666 		failrec->start = start;
1667 		failrec->len = end - start + 1;
1668 		failrec->last_mirror = 0;
1669 		failrec->bio_flags = 0;
1670 
1671 		spin_lock(&em_tree->lock);
1672 		em = lookup_extent_mapping(em_tree, start, failrec->len);
1673 		if (em->start > start || em->start + em->len < start) {
1674 			free_extent_map(em);
1675 			em = NULL;
1676 		}
1677 		spin_unlock(&em_tree->lock);
1678 
1679 		if (!em || IS_ERR(em)) {
1680 			kfree(failrec);
1681 			return -EIO;
1682 		}
1683 		logical = start - em->start;
1684 		logical = em->block_start + logical;
1685 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1686 			logical = em->block_start;
1687 			failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1688 		}
1689 		failrec->logical = logical;
1690 		free_extent_map(em);
1691 		set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1692 				EXTENT_DIRTY, GFP_NOFS);
1693 		set_state_private(failure_tree, start,
1694 				 (u64)(unsigned long)failrec);
1695 	} else {
1696 		failrec = (struct io_failure_record *)(unsigned long)private;
1697 	}
1698 	num_copies = btrfs_num_copies(
1699 			      &BTRFS_I(inode)->root->fs_info->mapping_tree,
1700 			      failrec->logical, failrec->len);
1701 	failrec->last_mirror++;
1702 	if (!state) {
1703 		spin_lock(&BTRFS_I(inode)->io_tree.lock);
1704 		state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1705 						    failrec->start,
1706 						    EXTENT_LOCKED);
1707 		if (state && state->start != failrec->start)
1708 			state = NULL;
1709 		spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1710 	}
1711 	if (!state || failrec->last_mirror > num_copies) {
1712 		set_state_private(failure_tree, failrec->start, 0);
1713 		clear_extent_bits(failure_tree, failrec->start,
1714 				  failrec->start + failrec->len - 1,
1715 				  EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1716 		kfree(failrec);
1717 		return -EIO;
1718 	}
1719 	bio = bio_alloc(GFP_NOFS, 1);
1720 	bio->bi_private = state;
1721 	bio->bi_end_io = failed_bio->bi_end_io;
1722 	bio->bi_sector = failrec->logical >> 9;
1723 	bio->bi_bdev = failed_bio->bi_bdev;
1724 	bio->bi_size = 0;
1725 
1726 	bio_add_page(bio, page, failrec->len, start - page_offset(page));
1727 	if (failed_bio->bi_rw & (1 << BIO_RW))
1728 		rw = WRITE;
1729 	else
1730 		rw = READ;
1731 
1732 	BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1733 						      failrec->last_mirror,
1734 						      failrec->bio_flags);
1735 	return 0;
1736 }
1737 
1738 /*
1739  * each time an IO finishes, we do a fast check in the IO failure tree
1740  * to see if we need to process or clean up an io_failure_record
1741  */
1742 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1743 {
1744 	u64 private;
1745 	u64 private_failure;
1746 	struct io_failure_record *failure;
1747 	int ret;
1748 
1749 	private = 0;
1750 	if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1751 			     (u64)-1, 1, EXTENT_DIRTY)) {
1752 		ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1753 					start, &private_failure);
1754 		if (ret == 0) {
1755 			failure = (struct io_failure_record *)(unsigned long)
1756 				   private_failure;
1757 			set_state_private(&BTRFS_I(inode)->io_failure_tree,
1758 					  failure->start, 0);
1759 			clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1760 					  failure->start,
1761 					  failure->start + failure->len - 1,
1762 					  EXTENT_DIRTY | EXTENT_LOCKED,
1763 					  GFP_NOFS);
1764 			kfree(failure);
1765 		}
1766 	}
1767 	return 0;
1768 }
1769 
1770 /*
1771  * when reads are done, we need to check csums to verify the data is correct
1772  * if there's a match, we allow the bio to finish.  If not, we go through
1773  * the io_failure_record routines to find good copies
1774  */
1775 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1776 			       struct extent_state *state)
1777 {
1778 	size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1779 	struct inode *inode = page->mapping->host;
1780 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1781 	char *kaddr;
1782 	u64 private = ~(u32)0;
1783 	int ret;
1784 	struct btrfs_root *root = BTRFS_I(inode)->root;
1785 	u32 csum = ~(u32)0;
1786 
1787 	if (PageChecked(page)) {
1788 		ClearPageChecked(page);
1789 		goto good;
1790 	}
1791 	if (btrfs_test_flag(inode, NODATASUM))
1792 		return 0;
1793 
1794 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1795 	    test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1)) {
1796 		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1797 				  GFP_NOFS);
1798 		return 0;
1799 	}
1800 
1801 	if (state && state->start == start) {
1802 		private = state->private;
1803 		ret = 0;
1804 	} else {
1805 		ret = get_state_private(io_tree, start, &private);
1806 	}
1807 	kaddr = kmap_atomic(page, KM_USER0);
1808 	if (ret)
1809 		goto zeroit;
1810 
1811 	csum = btrfs_csum_data(root, kaddr + offset, csum,  end - start + 1);
1812 	btrfs_csum_final(csum, (char *)&csum);
1813 	if (csum != private)
1814 		goto zeroit;
1815 
1816 	kunmap_atomic(kaddr, KM_USER0);
1817 good:
1818 	/* if the io failure tree for this inode is non-empty,
1819 	 * check to see if we've recovered from a failed IO
1820 	 */
1821 	btrfs_clean_io_failures(inode, start);
1822 	return 0;
1823 
1824 zeroit:
1825 	if (printk_ratelimit()) {
1826 		printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1827 		       "private %llu\n", page->mapping->host->i_ino,
1828 		       (unsigned long long)start, csum,
1829 		       (unsigned long long)private);
1830 	}
1831 	memset(kaddr + offset, 1, end - start + 1);
1832 	flush_dcache_page(page);
1833 	kunmap_atomic(kaddr, KM_USER0);
1834 	if (private == 0)
1835 		return 0;
1836 	return -EIO;
1837 }
1838 
1839 /*
1840  * This creates an orphan entry for the given inode in case something goes
1841  * wrong in the middle of an unlink/truncate.
1842  */
1843 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
1844 {
1845 	struct btrfs_root *root = BTRFS_I(inode)->root;
1846 	int ret = 0;
1847 
1848 	spin_lock(&root->list_lock);
1849 
1850 	/* already on the orphan list, we're good */
1851 	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
1852 		spin_unlock(&root->list_lock);
1853 		return 0;
1854 	}
1855 
1856 	list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1857 
1858 	spin_unlock(&root->list_lock);
1859 
1860 	/*
1861 	 * insert an orphan item to track this unlinked/truncated file
1862 	 */
1863 	ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
1864 
1865 	return ret;
1866 }
1867 
1868 /*
1869  * We have done the truncate/delete so we can go ahead and remove the orphan
1870  * item for this particular inode.
1871  */
1872 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
1873 {
1874 	struct btrfs_root *root = BTRFS_I(inode)->root;
1875 	int ret = 0;
1876 
1877 	spin_lock(&root->list_lock);
1878 
1879 	if (list_empty(&BTRFS_I(inode)->i_orphan)) {
1880 		spin_unlock(&root->list_lock);
1881 		return 0;
1882 	}
1883 
1884 	list_del_init(&BTRFS_I(inode)->i_orphan);
1885 	if (!trans) {
1886 		spin_unlock(&root->list_lock);
1887 		return 0;
1888 	}
1889 
1890 	spin_unlock(&root->list_lock);
1891 
1892 	ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
1893 
1894 	return ret;
1895 }
1896 
1897 /*
1898  * this cleans up any orphans that may be left on the list from the last use
1899  * of this root.
1900  */
1901 void btrfs_orphan_cleanup(struct btrfs_root *root)
1902 {
1903 	struct btrfs_path *path;
1904 	struct extent_buffer *leaf;
1905 	struct btrfs_item *item;
1906 	struct btrfs_key key, found_key;
1907 	struct btrfs_trans_handle *trans;
1908 	struct inode *inode;
1909 	int ret = 0, nr_unlink = 0, nr_truncate = 0;
1910 
1911 	path = btrfs_alloc_path();
1912 	if (!path)
1913 		return;
1914 	path->reada = -1;
1915 
1916 	key.objectid = BTRFS_ORPHAN_OBJECTID;
1917 	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1918 	key.offset = (u64)-1;
1919 
1920 
1921 	while (1) {
1922 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1923 		if (ret < 0) {
1924 			printk(KERN_ERR "Error searching slot for orphan: %d"
1925 			       "\n", ret);
1926 			break;
1927 		}
1928 
1929 		/*
1930 		 * if ret == 0 means we found what we were searching for, which
1931 		 * is weird, but possible, so only screw with path if we didnt
1932 		 * find the key and see if we have stuff that matches
1933 		 */
1934 		if (ret > 0) {
1935 			if (path->slots[0] == 0)
1936 				break;
1937 			path->slots[0]--;
1938 		}
1939 
1940 		/* pull out the item */
1941 		leaf = path->nodes[0];
1942 		item = btrfs_item_nr(leaf, path->slots[0]);
1943 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1944 
1945 		/* make sure the item matches what we want */
1946 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
1947 			break;
1948 		if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
1949 			break;
1950 
1951 		/* release the path since we're done with it */
1952 		btrfs_release_path(root, path);
1953 
1954 		/*
1955 		 * this is where we are basically btrfs_lookup, without the
1956 		 * crossing root thing.  we store the inode number in the
1957 		 * offset of the orphan item.
1958 		 */
1959 		inode = btrfs_iget_locked(root->fs_info->sb,
1960 					  found_key.offset, root);
1961 		if (!inode)
1962 			break;
1963 
1964 		if (inode->i_state & I_NEW) {
1965 			BTRFS_I(inode)->root = root;
1966 
1967 			/* have to set the location manually */
1968 			BTRFS_I(inode)->location.objectid = inode->i_ino;
1969 			BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
1970 			BTRFS_I(inode)->location.offset = 0;
1971 
1972 			btrfs_read_locked_inode(inode);
1973 			unlock_new_inode(inode);
1974 		}
1975 
1976 		/*
1977 		 * add this inode to the orphan list so btrfs_orphan_del does
1978 		 * the proper thing when we hit it
1979 		 */
1980 		spin_lock(&root->list_lock);
1981 		list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1982 		spin_unlock(&root->list_lock);
1983 
1984 		/*
1985 		 * if this is a bad inode, means we actually succeeded in
1986 		 * removing the inode, but not the orphan record, which means
1987 		 * we need to manually delete the orphan since iput will just
1988 		 * do a destroy_inode
1989 		 */
1990 		if (is_bad_inode(inode)) {
1991 			trans = btrfs_start_transaction(root, 1);
1992 			btrfs_orphan_del(trans, inode);
1993 			btrfs_end_transaction(trans, root);
1994 			iput(inode);
1995 			continue;
1996 		}
1997 
1998 		/* if we have links, this was a truncate, lets do that */
1999 		if (inode->i_nlink) {
2000 			nr_truncate++;
2001 			btrfs_truncate(inode);
2002 		} else {
2003 			nr_unlink++;
2004 		}
2005 
2006 		/* this will do delete_inode and everything for us */
2007 		iput(inode);
2008 	}
2009 
2010 	if (nr_unlink)
2011 		printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2012 	if (nr_truncate)
2013 		printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2014 
2015 	btrfs_free_path(path);
2016 }
2017 
2018 /*
2019  * very simple check to peek ahead in the leaf looking for xattrs.  If we
2020  * don't find any xattrs, we know there can't be any acls.
2021  *
2022  * slot is the slot the inode is in, objectid is the objectid of the inode
2023  */
2024 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2025 					  int slot, u64 objectid)
2026 {
2027 	u32 nritems = btrfs_header_nritems(leaf);
2028 	struct btrfs_key found_key;
2029 	int scanned = 0;
2030 
2031 	slot++;
2032 	while (slot < nritems) {
2033 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
2034 
2035 		/* we found a different objectid, there must not be acls */
2036 		if (found_key.objectid != objectid)
2037 			return 0;
2038 
2039 		/* we found an xattr, assume we've got an acl */
2040 		if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2041 			return 1;
2042 
2043 		/*
2044 		 * we found a key greater than an xattr key, there can't
2045 		 * be any acls later on
2046 		 */
2047 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2048 			return 0;
2049 
2050 		slot++;
2051 		scanned++;
2052 
2053 		/*
2054 		 * it goes inode, inode backrefs, xattrs, extents,
2055 		 * so if there are a ton of hard links to an inode there can
2056 		 * be a lot of backrefs.  Don't waste time searching too hard,
2057 		 * this is just an optimization
2058 		 */
2059 		if (scanned >= 8)
2060 			break;
2061 	}
2062 	/* we hit the end of the leaf before we found an xattr or
2063 	 * something larger than an xattr.  We have to assume the inode
2064 	 * has acls
2065 	 */
2066 	return 1;
2067 }
2068 
2069 /*
2070  * read an inode from the btree into the in-memory inode
2071  */
2072 void btrfs_read_locked_inode(struct inode *inode)
2073 {
2074 	struct btrfs_path *path;
2075 	struct extent_buffer *leaf;
2076 	struct btrfs_inode_item *inode_item;
2077 	struct btrfs_timespec *tspec;
2078 	struct btrfs_root *root = BTRFS_I(inode)->root;
2079 	struct btrfs_key location;
2080 	int maybe_acls;
2081 	u64 alloc_group_block;
2082 	u32 rdev;
2083 	int ret;
2084 
2085 	path = btrfs_alloc_path();
2086 	BUG_ON(!path);
2087 	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2088 
2089 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2090 	if (ret)
2091 		goto make_bad;
2092 
2093 	leaf = path->nodes[0];
2094 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
2095 				    struct btrfs_inode_item);
2096 
2097 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2098 	inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2099 	inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2100 	inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2101 	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2102 
2103 	tspec = btrfs_inode_atime(inode_item);
2104 	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2105 	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2106 
2107 	tspec = btrfs_inode_mtime(inode_item);
2108 	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2109 	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2110 
2111 	tspec = btrfs_inode_ctime(inode_item);
2112 	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2113 	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2114 
2115 	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2116 	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2117 	BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2118 	inode->i_generation = BTRFS_I(inode)->generation;
2119 	inode->i_rdev = 0;
2120 	rdev = btrfs_inode_rdev(leaf, inode_item);
2121 
2122 	BTRFS_I(inode)->index_cnt = (u64)-1;
2123 	BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2124 
2125 	alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2126 
2127 	/*
2128 	 * try to precache a NULL acl entry for files that don't have
2129 	 * any xattrs or acls
2130 	 */
2131 	maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
2132 	if (!maybe_acls) {
2133 		BTRFS_I(inode)->i_acl = NULL;
2134 		BTRFS_I(inode)->i_default_acl = NULL;
2135 	}
2136 
2137 	BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2138 						alloc_group_block, 0);
2139 	btrfs_free_path(path);
2140 	inode_item = NULL;
2141 
2142 	switch (inode->i_mode & S_IFMT) {
2143 	case S_IFREG:
2144 		inode->i_mapping->a_ops = &btrfs_aops;
2145 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2146 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2147 		inode->i_fop = &btrfs_file_operations;
2148 		inode->i_op = &btrfs_file_inode_operations;
2149 		break;
2150 	case S_IFDIR:
2151 		inode->i_fop = &btrfs_dir_file_operations;
2152 		if (root == root->fs_info->tree_root)
2153 			inode->i_op = &btrfs_dir_ro_inode_operations;
2154 		else
2155 			inode->i_op = &btrfs_dir_inode_operations;
2156 		break;
2157 	case S_IFLNK:
2158 		inode->i_op = &btrfs_symlink_inode_operations;
2159 		inode->i_mapping->a_ops = &btrfs_symlink_aops;
2160 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2161 		break;
2162 	default:
2163 		inode->i_op = &btrfs_special_inode_operations;
2164 		init_special_inode(inode, inode->i_mode, rdev);
2165 		break;
2166 	}
2167 	return;
2168 
2169 make_bad:
2170 	btrfs_free_path(path);
2171 	make_bad_inode(inode);
2172 }
2173 
2174 /*
2175  * given a leaf and an inode, copy the inode fields into the leaf
2176  */
2177 static void fill_inode_item(struct btrfs_trans_handle *trans,
2178 			    struct extent_buffer *leaf,
2179 			    struct btrfs_inode_item *item,
2180 			    struct inode *inode)
2181 {
2182 	btrfs_set_inode_uid(leaf, item, inode->i_uid);
2183 	btrfs_set_inode_gid(leaf, item, inode->i_gid);
2184 	btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2185 	btrfs_set_inode_mode(leaf, item, inode->i_mode);
2186 	btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2187 
2188 	btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2189 			       inode->i_atime.tv_sec);
2190 	btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2191 				inode->i_atime.tv_nsec);
2192 
2193 	btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2194 			       inode->i_mtime.tv_sec);
2195 	btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2196 				inode->i_mtime.tv_nsec);
2197 
2198 	btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2199 			       inode->i_ctime.tv_sec);
2200 	btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2201 				inode->i_ctime.tv_nsec);
2202 
2203 	btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2204 	btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2205 	btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2206 	btrfs_set_inode_transid(leaf, item, trans->transid);
2207 	btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2208 	btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2209 	btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2210 }
2211 
2212 /*
2213  * copy everything in the in-memory inode into the btree.
2214  */
2215 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2216 				struct btrfs_root *root, struct inode *inode)
2217 {
2218 	struct btrfs_inode_item *inode_item;
2219 	struct btrfs_path *path;
2220 	struct extent_buffer *leaf;
2221 	int ret;
2222 
2223 	path = btrfs_alloc_path();
2224 	BUG_ON(!path);
2225 	path->leave_spinning = 1;
2226 	ret = btrfs_lookup_inode(trans, root, path,
2227 				 &BTRFS_I(inode)->location, 1);
2228 	if (ret) {
2229 		if (ret > 0)
2230 			ret = -ENOENT;
2231 		goto failed;
2232 	}
2233 
2234 	btrfs_unlock_up_safe(path, 1);
2235 	leaf = path->nodes[0];
2236 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
2237 				  struct btrfs_inode_item);
2238 
2239 	fill_inode_item(trans, leaf, inode_item, inode);
2240 	btrfs_mark_buffer_dirty(leaf);
2241 	btrfs_set_inode_last_trans(trans, inode);
2242 	ret = 0;
2243 failed:
2244 	btrfs_free_path(path);
2245 	return ret;
2246 }
2247 
2248 
2249 /*
2250  * unlink helper that gets used here in inode.c and in the tree logging
2251  * recovery code.  It remove a link in a directory with a given name, and
2252  * also drops the back refs in the inode to the directory
2253  */
2254 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2255 		       struct btrfs_root *root,
2256 		       struct inode *dir, struct inode *inode,
2257 		       const char *name, int name_len)
2258 {
2259 	struct btrfs_path *path;
2260 	int ret = 0;
2261 	struct extent_buffer *leaf;
2262 	struct btrfs_dir_item *di;
2263 	struct btrfs_key key;
2264 	u64 index;
2265 
2266 	path = btrfs_alloc_path();
2267 	if (!path) {
2268 		ret = -ENOMEM;
2269 		goto err;
2270 	}
2271 
2272 	path->leave_spinning = 1;
2273 	di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2274 				    name, name_len, -1);
2275 	if (IS_ERR(di)) {
2276 		ret = PTR_ERR(di);
2277 		goto err;
2278 	}
2279 	if (!di) {
2280 		ret = -ENOENT;
2281 		goto err;
2282 	}
2283 	leaf = path->nodes[0];
2284 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
2285 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2286 	if (ret)
2287 		goto err;
2288 	btrfs_release_path(root, path);
2289 
2290 	ret = btrfs_del_inode_ref(trans, root, name, name_len,
2291 				  inode->i_ino,
2292 				  dir->i_ino, &index);
2293 	if (ret) {
2294 		printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2295 		       "inode %lu parent %lu\n", name_len, name,
2296 		       inode->i_ino, dir->i_ino);
2297 		goto err;
2298 	}
2299 
2300 	di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2301 					 index, name, name_len, -1);
2302 	if (IS_ERR(di)) {
2303 		ret = PTR_ERR(di);
2304 		goto err;
2305 	}
2306 	if (!di) {
2307 		ret = -ENOENT;
2308 		goto err;
2309 	}
2310 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2311 	btrfs_release_path(root, path);
2312 
2313 	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2314 					 inode, dir->i_ino);
2315 	BUG_ON(ret != 0 && ret != -ENOENT);
2316 
2317 	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2318 					   dir, index);
2319 	BUG_ON(ret);
2320 err:
2321 	btrfs_free_path(path);
2322 	if (ret)
2323 		goto out;
2324 
2325 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2326 	inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2327 	btrfs_update_inode(trans, root, dir);
2328 	btrfs_drop_nlink(inode);
2329 	ret = btrfs_update_inode(trans, root, inode);
2330 	dir->i_sb->s_dirt = 1;
2331 out:
2332 	return ret;
2333 }
2334 
2335 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2336 {
2337 	struct btrfs_root *root;
2338 	struct btrfs_trans_handle *trans;
2339 	struct inode *inode = dentry->d_inode;
2340 	int ret;
2341 	unsigned long nr = 0;
2342 
2343 	root = BTRFS_I(dir)->root;
2344 
2345 	trans = btrfs_start_transaction(root, 1);
2346 
2347 	btrfs_set_trans_block_group(trans, dir);
2348 
2349 	btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
2350 
2351 	ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2352 				 dentry->d_name.name, dentry->d_name.len);
2353 
2354 	if (inode->i_nlink == 0)
2355 		ret = btrfs_orphan_add(trans, inode);
2356 
2357 	nr = trans->blocks_used;
2358 
2359 	btrfs_end_transaction_throttle(trans, root);
2360 	btrfs_btree_balance_dirty(root, nr);
2361 	return ret;
2362 }
2363 
2364 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2365 {
2366 	struct inode *inode = dentry->d_inode;
2367 	int err = 0;
2368 	int ret;
2369 	struct btrfs_root *root = BTRFS_I(dir)->root;
2370 	struct btrfs_trans_handle *trans;
2371 	unsigned long nr = 0;
2372 
2373 	/*
2374 	 * the FIRST_FREE_OBJECTID check makes sure we don't try to rmdir
2375 	 * the root of a subvolume or snapshot
2376 	 */
2377 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2378 	    inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
2379 		return -ENOTEMPTY;
2380 	}
2381 
2382 	trans = btrfs_start_transaction(root, 1);
2383 	btrfs_set_trans_block_group(trans, dir);
2384 
2385 	err = btrfs_orphan_add(trans, inode);
2386 	if (err)
2387 		goto fail_trans;
2388 
2389 	/* now the directory is empty */
2390 	err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2391 				 dentry->d_name.name, dentry->d_name.len);
2392 	if (!err)
2393 		btrfs_i_size_write(inode, 0);
2394 
2395 fail_trans:
2396 	nr = trans->blocks_used;
2397 	ret = btrfs_end_transaction_throttle(trans, root);
2398 	btrfs_btree_balance_dirty(root, nr);
2399 
2400 	if (ret && !err)
2401 		err = ret;
2402 	return err;
2403 }
2404 
2405 #if 0
2406 /*
2407  * when truncating bytes in a file, it is possible to avoid reading
2408  * the leaves that contain only checksum items.  This can be the
2409  * majority of the IO required to delete a large file, but it must
2410  * be done carefully.
2411  *
2412  * The keys in the level just above the leaves are checked to make sure
2413  * the lowest key in a given leaf is a csum key, and starts at an offset
2414  * after the new  size.
2415  *
2416  * Then the key for the next leaf is checked to make sure it also has
2417  * a checksum item for the same file.  If it does, we know our target leaf
2418  * contains only checksum items, and it can be safely freed without reading
2419  * it.
2420  *
2421  * This is just an optimization targeted at large files.  It may do
2422  * nothing.  It will return 0 unless things went badly.
2423  */
2424 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2425 				     struct btrfs_root *root,
2426 				     struct btrfs_path *path,
2427 				     struct inode *inode, u64 new_size)
2428 {
2429 	struct btrfs_key key;
2430 	int ret;
2431 	int nritems;
2432 	struct btrfs_key found_key;
2433 	struct btrfs_key other_key;
2434 	struct btrfs_leaf_ref *ref;
2435 	u64 leaf_gen;
2436 	u64 leaf_start;
2437 
2438 	path->lowest_level = 1;
2439 	key.objectid = inode->i_ino;
2440 	key.type = BTRFS_CSUM_ITEM_KEY;
2441 	key.offset = new_size;
2442 again:
2443 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2444 	if (ret < 0)
2445 		goto out;
2446 
2447 	if (path->nodes[1] == NULL) {
2448 		ret = 0;
2449 		goto out;
2450 	}
2451 	ret = 0;
2452 	btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2453 	nritems = btrfs_header_nritems(path->nodes[1]);
2454 
2455 	if (!nritems)
2456 		goto out;
2457 
2458 	if (path->slots[1] >= nritems)
2459 		goto next_node;
2460 
2461 	/* did we find a key greater than anything we want to delete? */
2462 	if (found_key.objectid > inode->i_ino ||
2463 	   (found_key.objectid == inode->i_ino && found_key.type > key.type))
2464 		goto out;
2465 
2466 	/* we check the next key in the node to make sure the leave contains
2467 	 * only checksum items.  This comparison doesn't work if our
2468 	 * leaf is the last one in the node
2469 	 */
2470 	if (path->slots[1] + 1 >= nritems) {
2471 next_node:
2472 		/* search forward from the last key in the node, this
2473 		 * will bring us into the next node in the tree
2474 		 */
2475 		btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2476 
2477 		/* unlikely, but we inc below, so check to be safe */
2478 		if (found_key.offset == (u64)-1)
2479 			goto out;
2480 
2481 		/* search_forward needs a path with locks held, do the
2482 		 * search again for the original key.  It is possible
2483 		 * this will race with a balance and return a path that
2484 		 * we could modify, but this drop is just an optimization
2485 		 * and is allowed to miss some leaves.
2486 		 */
2487 		btrfs_release_path(root, path);
2488 		found_key.offset++;
2489 
2490 		/* setup a max key for search_forward */
2491 		other_key.offset = (u64)-1;
2492 		other_key.type = key.type;
2493 		other_key.objectid = key.objectid;
2494 
2495 		path->keep_locks = 1;
2496 		ret = btrfs_search_forward(root, &found_key, &other_key,
2497 					   path, 0, 0);
2498 		path->keep_locks = 0;
2499 		if (ret || found_key.objectid != key.objectid ||
2500 		    found_key.type != key.type) {
2501 			ret = 0;
2502 			goto out;
2503 		}
2504 
2505 		key.offset = found_key.offset;
2506 		btrfs_release_path(root, path);
2507 		cond_resched();
2508 		goto again;
2509 	}
2510 
2511 	/* we know there's one more slot after us in the tree,
2512 	 * read that key so we can verify it is also a checksum item
2513 	 */
2514 	btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2515 
2516 	if (found_key.objectid < inode->i_ino)
2517 		goto next_key;
2518 
2519 	if (found_key.type != key.type || found_key.offset < new_size)
2520 		goto next_key;
2521 
2522 	/*
2523 	 * if the key for the next leaf isn't a csum key from this objectid,
2524 	 * we can't be sure there aren't good items inside this leaf.
2525 	 * Bail out
2526 	 */
2527 	if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2528 		goto out;
2529 
2530 	leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2531 	leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2532 	/*
2533 	 * it is safe to delete this leaf, it contains only
2534 	 * csum items from this inode at an offset >= new_size
2535 	 */
2536 	ret = btrfs_del_leaf(trans, root, path, leaf_start);
2537 	BUG_ON(ret);
2538 
2539 	if (root->ref_cows && leaf_gen < trans->transid) {
2540 		ref = btrfs_alloc_leaf_ref(root, 0);
2541 		if (ref) {
2542 			ref->root_gen = root->root_key.offset;
2543 			ref->bytenr = leaf_start;
2544 			ref->owner = 0;
2545 			ref->generation = leaf_gen;
2546 			ref->nritems = 0;
2547 
2548 			btrfs_sort_leaf_ref(ref);
2549 
2550 			ret = btrfs_add_leaf_ref(root, ref, 0);
2551 			WARN_ON(ret);
2552 			btrfs_free_leaf_ref(root, ref);
2553 		} else {
2554 			WARN_ON(1);
2555 		}
2556 	}
2557 next_key:
2558 	btrfs_release_path(root, path);
2559 
2560 	if (other_key.objectid == inode->i_ino &&
2561 	    other_key.type == key.type && other_key.offset > key.offset) {
2562 		key.offset = other_key.offset;
2563 		cond_resched();
2564 		goto again;
2565 	}
2566 	ret = 0;
2567 out:
2568 	/* fixup any changes we've made to the path */
2569 	path->lowest_level = 0;
2570 	path->keep_locks = 0;
2571 	btrfs_release_path(root, path);
2572 	return ret;
2573 }
2574 
2575 #endif
2576 
2577 /*
2578  * this can truncate away extent items, csum items and directory items.
2579  * It starts at a high offset and removes keys until it can't find
2580  * any higher than new_size
2581  *
2582  * csum items that cross the new i_size are truncated to the new size
2583  * as well.
2584  *
2585  * min_type is the minimum key type to truncate down to.  If set to 0, this
2586  * will kill all the items on this inode, including the INODE_ITEM_KEY.
2587  */
2588 noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2589 					struct btrfs_root *root,
2590 					struct inode *inode,
2591 					u64 new_size, u32 min_type)
2592 {
2593 	int ret;
2594 	struct btrfs_path *path;
2595 	struct btrfs_key key;
2596 	struct btrfs_key found_key;
2597 	u32 found_type = (u8)-1;
2598 	struct extent_buffer *leaf;
2599 	struct btrfs_file_extent_item *fi;
2600 	u64 extent_start = 0;
2601 	u64 extent_num_bytes = 0;
2602 	u64 item_end = 0;
2603 	u64 root_gen = 0;
2604 	u64 root_owner = 0;
2605 	int found_extent;
2606 	int del_item;
2607 	int pending_del_nr = 0;
2608 	int pending_del_slot = 0;
2609 	int extent_type = -1;
2610 	int encoding;
2611 	u64 mask = root->sectorsize - 1;
2612 
2613 	if (root->ref_cows)
2614 		btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2615 	path = btrfs_alloc_path();
2616 	path->reada = -1;
2617 	BUG_ON(!path);
2618 
2619 	/* FIXME, add redo link to tree so we don't leak on crash */
2620 	key.objectid = inode->i_ino;
2621 	key.offset = (u64)-1;
2622 	key.type = (u8)-1;
2623 
2624 search_again:
2625 	path->leave_spinning = 1;
2626 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2627 	if (ret < 0)
2628 		goto error;
2629 
2630 	if (ret > 0) {
2631 		/* there are no items in the tree for us to truncate, we're
2632 		 * done
2633 		 */
2634 		if (path->slots[0] == 0) {
2635 			ret = 0;
2636 			goto error;
2637 		}
2638 		path->slots[0]--;
2639 	}
2640 
2641 	while (1) {
2642 		fi = NULL;
2643 		leaf = path->nodes[0];
2644 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2645 		found_type = btrfs_key_type(&found_key);
2646 		encoding = 0;
2647 
2648 		if (found_key.objectid != inode->i_ino)
2649 			break;
2650 
2651 		if (found_type < min_type)
2652 			break;
2653 
2654 		item_end = found_key.offset;
2655 		if (found_type == BTRFS_EXTENT_DATA_KEY) {
2656 			fi = btrfs_item_ptr(leaf, path->slots[0],
2657 					    struct btrfs_file_extent_item);
2658 			extent_type = btrfs_file_extent_type(leaf, fi);
2659 			encoding = btrfs_file_extent_compression(leaf, fi);
2660 			encoding |= btrfs_file_extent_encryption(leaf, fi);
2661 			encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2662 
2663 			if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2664 				item_end +=
2665 				    btrfs_file_extent_num_bytes(leaf, fi);
2666 			} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2667 				item_end += btrfs_file_extent_inline_len(leaf,
2668 									 fi);
2669 			}
2670 			item_end--;
2671 		}
2672 		if (item_end < new_size) {
2673 			if (found_type == BTRFS_DIR_ITEM_KEY)
2674 				found_type = BTRFS_INODE_ITEM_KEY;
2675 			else if (found_type == BTRFS_EXTENT_ITEM_KEY)
2676 				found_type = BTRFS_EXTENT_DATA_KEY;
2677 			else if (found_type == BTRFS_EXTENT_DATA_KEY)
2678 				found_type = BTRFS_XATTR_ITEM_KEY;
2679 			else if (found_type == BTRFS_XATTR_ITEM_KEY)
2680 				found_type = BTRFS_INODE_REF_KEY;
2681 			else if (found_type)
2682 				found_type--;
2683 			else
2684 				break;
2685 			btrfs_set_key_type(&key, found_type);
2686 			goto next;
2687 		}
2688 		if (found_key.offset >= new_size)
2689 			del_item = 1;
2690 		else
2691 			del_item = 0;
2692 		found_extent = 0;
2693 
2694 		/* FIXME, shrink the extent if the ref count is only 1 */
2695 		if (found_type != BTRFS_EXTENT_DATA_KEY)
2696 			goto delete;
2697 
2698 		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2699 			u64 num_dec;
2700 			extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2701 			if (!del_item && !encoding) {
2702 				u64 orig_num_bytes =
2703 					btrfs_file_extent_num_bytes(leaf, fi);
2704 				extent_num_bytes = new_size -
2705 					found_key.offset + root->sectorsize - 1;
2706 				extent_num_bytes = extent_num_bytes &
2707 					~((u64)root->sectorsize - 1);
2708 				btrfs_set_file_extent_num_bytes(leaf, fi,
2709 							 extent_num_bytes);
2710 				num_dec = (orig_num_bytes -
2711 					   extent_num_bytes);
2712 				if (root->ref_cows && extent_start != 0)
2713 					inode_sub_bytes(inode, num_dec);
2714 				btrfs_mark_buffer_dirty(leaf);
2715 			} else {
2716 				extent_num_bytes =
2717 					btrfs_file_extent_disk_num_bytes(leaf,
2718 									 fi);
2719 				/* FIXME blocksize != 4096 */
2720 				num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2721 				if (extent_start != 0) {
2722 					found_extent = 1;
2723 					if (root->ref_cows)
2724 						inode_sub_bytes(inode, num_dec);
2725 				}
2726 				root_gen = btrfs_header_generation(leaf);
2727 				root_owner = btrfs_header_owner(leaf);
2728 			}
2729 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2730 			/*
2731 			 * we can't truncate inline items that have had
2732 			 * special encodings
2733 			 */
2734 			if (!del_item &&
2735 			    btrfs_file_extent_compression(leaf, fi) == 0 &&
2736 			    btrfs_file_extent_encryption(leaf, fi) == 0 &&
2737 			    btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2738 				u32 size = new_size - found_key.offset;
2739 
2740 				if (root->ref_cows) {
2741 					inode_sub_bytes(inode, item_end + 1 -
2742 							new_size);
2743 				}
2744 				size =
2745 				    btrfs_file_extent_calc_inline_size(size);
2746 				ret = btrfs_truncate_item(trans, root, path,
2747 							  size, 1);
2748 				BUG_ON(ret);
2749 			} else if (root->ref_cows) {
2750 				inode_sub_bytes(inode, item_end + 1 -
2751 						found_key.offset);
2752 			}
2753 		}
2754 delete:
2755 		if (del_item) {
2756 			if (!pending_del_nr) {
2757 				/* no pending yet, add ourselves */
2758 				pending_del_slot = path->slots[0];
2759 				pending_del_nr = 1;
2760 			} else if (pending_del_nr &&
2761 				   path->slots[0] + 1 == pending_del_slot) {
2762 				/* hop on the pending chunk */
2763 				pending_del_nr++;
2764 				pending_del_slot = path->slots[0];
2765 			} else {
2766 				BUG();
2767 			}
2768 		} else {
2769 			break;
2770 		}
2771 		if (found_extent) {
2772 			btrfs_set_path_blocking(path);
2773 			ret = btrfs_free_extent(trans, root, extent_start,
2774 						extent_num_bytes,
2775 						leaf->start, root_owner,
2776 						root_gen, inode->i_ino, 0);
2777 			BUG_ON(ret);
2778 		}
2779 next:
2780 		if (path->slots[0] == 0) {
2781 			if (pending_del_nr)
2782 				goto del_pending;
2783 			btrfs_release_path(root, path);
2784 			if (found_type == BTRFS_INODE_ITEM_KEY)
2785 				break;
2786 			goto search_again;
2787 		}
2788 
2789 		path->slots[0]--;
2790 		if (pending_del_nr &&
2791 		    path->slots[0] + 1 != pending_del_slot) {
2792 			struct btrfs_key debug;
2793 del_pending:
2794 			btrfs_item_key_to_cpu(path->nodes[0], &debug,
2795 					      pending_del_slot);
2796 			ret = btrfs_del_items(trans, root, path,
2797 					      pending_del_slot,
2798 					      pending_del_nr);
2799 			BUG_ON(ret);
2800 			pending_del_nr = 0;
2801 			btrfs_release_path(root, path);
2802 			if (found_type == BTRFS_INODE_ITEM_KEY)
2803 				break;
2804 			goto search_again;
2805 		}
2806 	}
2807 	ret = 0;
2808 error:
2809 	if (pending_del_nr) {
2810 		ret = btrfs_del_items(trans, root, path, pending_del_slot,
2811 				      pending_del_nr);
2812 	}
2813 	btrfs_free_path(path);
2814 	inode->i_sb->s_dirt = 1;
2815 	return ret;
2816 }
2817 
2818 /*
2819  * taken from block_truncate_page, but does cow as it zeros out
2820  * any bytes left in the last page in the file.
2821  */
2822 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
2823 {
2824 	struct inode *inode = mapping->host;
2825 	struct btrfs_root *root = BTRFS_I(inode)->root;
2826 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2827 	struct btrfs_ordered_extent *ordered;
2828 	char *kaddr;
2829 	u32 blocksize = root->sectorsize;
2830 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2831 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2832 	struct page *page;
2833 	int ret = 0;
2834 	u64 page_start;
2835 	u64 page_end;
2836 
2837 	if ((offset & (blocksize - 1)) == 0)
2838 		goto out;
2839 
2840 	ret = -ENOMEM;
2841 again:
2842 	page = grab_cache_page(mapping, index);
2843 	if (!page)
2844 		goto out;
2845 
2846 	page_start = page_offset(page);
2847 	page_end = page_start + PAGE_CACHE_SIZE - 1;
2848 
2849 	if (!PageUptodate(page)) {
2850 		ret = btrfs_readpage(NULL, page);
2851 		lock_page(page);
2852 		if (page->mapping != mapping) {
2853 			unlock_page(page);
2854 			page_cache_release(page);
2855 			goto again;
2856 		}
2857 		if (!PageUptodate(page)) {
2858 			ret = -EIO;
2859 			goto out_unlock;
2860 		}
2861 	}
2862 	wait_on_page_writeback(page);
2863 
2864 	lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2865 	set_page_extent_mapped(page);
2866 
2867 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
2868 	if (ordered) {
2869 		unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2870 		unlock_page(page);
2871 		page_cache_release(page);
2872 		btrfs_start_ordered_extent(inode, ordered, 1);
2873 		btrfs_put_ordered_extent(ordered);
2874 		goto again;
2875 	}
2876 
2877 	btrfs_set_extent_delalloc(inode, page_start, page_end);
2878 	ret = 0;
2879 	if (offset != PAGE_CACHE_SIZE) {
2880 		kaddr = kmap(page);
2881 		memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2882 		flush_dcache_page(page);
2883 		kunmap(page);
2884 	}
2885 	ClearPageChecked(page);
2886 	set_page_dirty(page);
2887 	unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2888 
2889 out_unlock:
2890 	unlock_page(page);
2891 	page_cache_release(page);
2892 out:
2893 	return ret;
2894 }
2895 
2896 int btrfs_cont_expand(struct inode *inode, loff_t size)
2897 {
2898 	struct btrfs_trans_handle *trans;
2899 	struct btrfs_root *root = BTRFS_I(inode)->root;
2900 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2901 	struct extent_map *em;
2902 	u64 mask = root->sectorsize - 1;
2903 	u64 hole_start = (inode->i_size + mask) & ~mask;
2904 	u64 block_end = (size + mask) & ~mask;
2905 	u64 last_byte;
2906 	u64 cur_offset;
2907 	u64 hole_size;
2908 	int err;
2909 
2910 	if (size <= hole_start)
2911 		return 0;
2912 
2913 	err = btrfs_check_metadata_free_space(root);
2914 	if (err)
2915 		return err;
2916 
2917 	btrfs_truncate_page(inode->i_mapping, inode->i_size);
2918 
2919 	while (1) {
2920 		struct btrfs_ordered_extent *ordered;
2921 		btrfs_wait_ordered_range(inode, hole_start,
2922 					 block_end - hole_start);
2923 		lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2924 		ordered = btrfs_lookup_ordered_extent(inode, hole_start);
2925 		if (!ordered)
2926 			break;
2927 		unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2928 		btrfs_put_ordered_extent(ordered);
2929 	}
2930 
2931 	trans = btrfs_start_transaction(root, 1);
2932 	btrfs_set_trans_block_group(trans, inode);
2933 
2934 	cur_offset = hole_start;
2935 	while (1) {
2936 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2937 				block_end - cur_offset, 0);
2938 		BUG_ON(IS_ERR(em) || !em);
2939 		last_byte = min(extent_map_end(em), block_end);
2940 		last_byte = (last_byte + mask) & ~mask;
2941 		if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2942 			u64 hint_byte = 0;
2943 			hole_size = last_byte - cur_offset;
2944 			err = btrfs_drop_extents(trans, root, inode,
2945 						 cur_offset,
2946 						 cur_offset + hole_size,
2947 						 block_end,
2948 						 cur_offset, &hint_byte);
2949 			if (err)
2950 				break;
2951 			err = btrfs_insert_file_extent(trans, root,
2952 					inode->i_ino, cur_offset, 0,
2953 					0, hole_size, 0, hole_size,
2954 					0, 0, 0);
2955 			btrfs_drop_extent_cache(inode, hole_start,
2956 					last_byte - 1, 0);
2957 		}
2958 		free_extent_map(em);
2959 		cur_offset = last_byte;
2960 		if (err || cur_offset >= block_end)
2961 			break;
2962 	}
2963 
2964 	btrfs_end_transaction(trans, root);
2965 	unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2966 	return err;
2967 }
2968 
2969 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
2970 {
2971 	struct inode *inode = dentry->d_inode;
2972 	int err;
2973 
2974 	err = inode_change_ok(inode, attr);
2975 	if (err)
2976 		return err;
2977 
2978 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
2979 		if (attr->ia_size > inode->i_size) {
2980 			err = btrfs_cont_expand(inode, attr->ia_size);
2981 			if (err)
2982 				return err;
2983 		} else if (inode->i_size > 0 &&
2984 			   attr->ia_size == 0) {
2985 
2986 			/* we're truncating a file that used to have good
2987 			 * data down to zero.  Make sure it gets into
2988 			 * the ordered flush list so that any new writes
2989 			 * get down to disk quickly.
2990 			 */
2991 			BTRFS_I(inode)->ordered_data_close = 1;
2992 		}
2993 	}
2994 
2995 	err = inode_setattr(inode, attr);
2996 
2997 	if (!err && ((attr->ia_valid & ATTR_MODE)))
2998 		err = btrfs_acl_chmod(inode);
2999 	return err;
3000 }
3001 
3002 void btrfs_delete_inode(struct inode *inode)
3003 {
3004 	struct btrfs_trans_handle *trans;
3005 	struct btrfs_root *root = BTRFS_I(inode)->root;
3006 	unsigned long nr;
3007 	int ret;
3008 
3009 	truncate_inode_pages(&inode->i_data, 0);
3010 	if (is_bad_inode(inode)) {
3011 		btrfs_orphan_del(NULL, inode);
3012 		goto no_delete;
3013 	}
3014 	btrfs_wait_ordered_range(inode, 0, (u64)-1);
3015 
3016 	btrfs_i_size_write(inode, 0);
3017 	trans = btrfs_join_transaction(root, 1);
3018 
3019 	btrfs_set_trans_block_group(trans, inode);
3020 	ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
3021 	if (ret) {
3022 		btrfs_orphan_del(NULL, inode);
3023 		goto no_delete_lock;
3024 	}
3025 
3026 	btrfs_orphan_del(trans, inode);
3027 
3028 	nr = trans->blocks_used;
3029 	clear_inode(inode);
3030 
3031 	btrfs_end_transaction(trans, root);
3032 	btrfs_btree_balance_dirty(root, nr);
3033 	return;
3034 
3035 no_delete_lock:
3036 	nr = trans->blocks_used;
3037 	btrfs_end_transaction(trans, root);
3038 	btrfs_btree_balance_dirty(root, nr);
3039 no_delete:
3040 	clear_inode(inode);
3041 }
3042 
3043 /*
3044  * this returns the key found in the dir entry in the location pointer.
3045  * If no dir entries were found, location->objectid is 0.
3046  */
3047 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3048 			       struct btrfs_key *location)
3049 {
3050 	const char *name = dentry->d_name.name;
3051 	int namelen = dentry->d_name.len;
3052 	struct btrfs_dir_item *di;
3053 	struct btrfs_path *path;
3054 	struct btrfs_root *root = BTRFS_I(dir)->root;
3055 	int ret = 0;
3056 
3057 	path = btrfs_alloc_path();
3058 	BUG_ON(!path);
3059 
3060 	di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
3061 				    namelen, 0);
3062 	if (IS_ERR(di))
3063 		ret = PTR_ERR(di);
3064 
3065 	if (!di || IS_ERR(di))
3066 		goto out_err;
3067 
3068 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3069 out:
3070 	btrfs_free_path(path);
3071 	return ret;
3072 out_err:
3073 	location->objectid = 0;
3074 	goto out;
3075 }
3076 
3077 /*
3078  * when we hit a tree root in a directory, the btrfs part of the inode
3079  * needs to be changed to reflect the root directory of the tree root.  This
3080  * is kind of like crossing a mount point.
3081  */
3082 static int fixup_tree_root_location(struct btrfs_root *root,
3083 			     struct btrfs_key *location,
3084 			     struct btrfs_root **sub_root,
3085 			     struct dentry *dentry)
3086 {
3087 	struct btrfs_root_item *ri;
3088 
3089 	if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
3090 		return 0;
3091 	if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
3092 		return 0;
3093 
3094 	*sub_root = btrfs_read_fs_root(root->fs_info, location,
3095 					dentry->d_name.name,
3096 					dentry->d_name.len);
3097 	if (IS_ERR(*sub_root))
3098 		return PTR_ERR(*sub_root);
3099 
3100 	ri = &(*sub_root)->root_item;
3101 	location->objectid = btrfs_root_dirid(ri);
3102 	btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
3103 	location->offset = 0;
3104 
3105 	return 0;
3106 }
3107 
3108 static noinline void init_btrfs_i(struct inode *inode)
3109 {
3110 	struct btrfs_inode *bi = BTRFS_I(inode);
3111 
3112 	bi->i_acl = BTRFS_ACL_NOT_CACHED;
3113 	bi->i_default_acl = BTRFS_ACL_NOT_CACHED;
3114 
3115 	bi->generation = 0;
3116 	bi->sequence = 0;
3117 	bi->last_trans = 0;
3118 	bi->logged_trans = 0;
3119 	bi->delalloc_bytes = 0;
3120 	bi->reserved_bytes = 0;
3121 	bi->disk_i_size = 0;
3122 	bi->flags = 0;
3123 	bi->index_cnt = (u64)-1;
3124 	bi->last_unlink_trans = 0;
3125 	bi->ordered_data_close = 0;
3126 	extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3127 	extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3128 			     inode->i_mapping, GFP_NOFS);
3129 	extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3130 			     inode->i_mapping, GFP_NOFS);
3131 	INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
3132 	INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
3133 	btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3134 	mutex_init(&BTRFS_I(inode)->extent_mutex);
3135 	mutex_init(&BTRFS_I(inode)->log_mutex);
3136 }
3137 
3138 static int btrfs_init_locked_inode(struct inode *inode, void *p)
3139 {
3140 	struct btrfs_iget_args *args = p;
3141 	inode->i_ino = args->ino;
3142 	init_btrfs_i(inode);
3143 	BTRFS_I(inode)->root = args->root;
3144 	btrfs_set_inode_space_info(args->root, inode);
3145 	return 0;
3146 }
3147 
3148 static int btrfs_find_actor(struct inode *inode, void *opaque)
3149 {
3150 	struct btrfs_iget_args *args = opaque;
3151 	return args->ino == inode->i_ino &&
3152 		args->root == BTRFS_I(inode)->root;
3153 }
3154 
3155 struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
3156 			    struct btrfs_root *root, int wait)
3157 {
3158 	struct inode *inode;
3159 	struct btrfs_iget_args args;
3160 	args.ino = objectid;
3161 	args.root = root;
3162 
3163 	if (wait) {
3164 		inode = ilookup5(s, objectid, btrfs_find_actor,
3165 				 (void *)&args);
3166 	} else {
3167 		inode = ilookup5_nowait(s, objectid, btrfs_find_actor,
3168 					(void *)&args);
3169 	}
3170 	return inode;
3171 }
3172 
3173 struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
3174 				struct btrfs_root *root)
3175 {
3176 	struct inode *inode;
3177 	struct btrfs_iget_args args;
3178 	args.ino = objectid;
3179 	args.root = root;
3180 
3181 	inode = iget5_locked(s, objectid, btrfs_find_actor,
3182 			     btrfs_init_locked_inode,
3183 			     (void *)&args);
3184 	return inode;
3185 }
3186 
3187 /* Get an inode object given its location and corresponding root.
3188  * Returns in *is_new if the inode was read from disk
3189  */
3190 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3191 			 struct btrfs_root *root, int *is_new)
3192 {
3193 	struct inode *inode;
3194 
3195 	inode = btrfs_iget_locked(s, location->objectid, root);
3196 	if (!inode)
3197 		return ERR_PTR(-EACCES);
3198 
3199 	if (inode->i_state & I_NEW) {
3200 		BTRFS_I(inode)->root = root;
3201 		memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3202 		btrfs_read_locked_inode(inode);
3203 		unlock_new_inode(inode);
3204 		if (is_new)
3205 			*is_new = 1;
3206 	} else {
3207 		if (is_new)
3208 			*is_new = 0;
3209 	}
3210 
3211 	return inode;
3212 }
3213 
3214 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3215 {
3216 	struct inode *inode;
3217 	struct btrfs_inode *bi = BTRFS_I(dir);
3218 	struct btrfs_root *root = bi->root;
3219 	struct btrfs_root *sub_root = root;
3220 	struct btrfs_key location;
3221 	int ret, new;
3222 
3223 	if (dentry->d_name.len > BTRFS_NAME_LEN)
3224 		return ERR_PTR(-ENAMETOOLONG);
3225 
3226 	ret = btrfs_inode_by_name(dir, dentry, &location);
3227 
3228 	if (ret < 0)
3229 		return ERR_PTR(ret);
3230 
3231 	inode = NULL;
3232 	if (location.objectid) {
3233 		ret = fixup_tree_root_location(root, &location, &sub_root,
3234 						dentry);
3235 		if (ret < 0)
3236 			return ERR_PTR(ret);
3237 		if (ret > 0)
3238 			return ERR_PTR(-ENOENT);
3239 		inode = btrfs_iget(dir->i_sb, &location, sub_root, &new);
3240 		if (IS_ERR(inode))
3241 			return ERR_CAST(inode);
3242 	}
3243 	return inode;
3244 }
3245 
3246 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
3247 				   struct nameidata *nd)
3248 {
3249 	struct inode *inode;
3250 
3251 	if (dentry->d_name.len > BTRFS_NAME_LEN)
3252 		return ERR_PTR(-ENAMETOOLONG);
3253 
3254 	inode = btrfs_lookup_dentry(dir, dentry);
3255 	if (IS_ERR(inode))
3256 		return ERR_CAST(inode);
3257 
3258 	return d_splice_alias(inode, dentry);
3259 }
3260 
3261 static unsigned char btrfs_filetype_table[] = {
3262 	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
3263 };
3264 
3265 static int btrfs_real_readdir(struct file *filp, void *dirent,
3266 			      filldir_t filldir)
3267 {
3268 	struct inode *inode = filp->f_dentry->d_inode;
3269 	struct btrfs_root *root = BTRFS_I(inode)->root;
3270 	struct btrfs_item *item;
3271 	struct btrfs_dir_item *di;
3272 	struct btrfs_key key;
3273 	struct btrfs_key found_key;
3274 	struct btrfs_path *path;
3275 	int ret;
3276 	u32 nritems;
3277 	struct extent_buffer *leaf;
3278 	int slot;
3279 	int advance;
3280 	unsigned char d_type;
3281 	int over = 0;
3282 	u32 di_cur;
3283 	u32 di_total;
3284 	u32 di_len;
3285 	int key_type = BTRFS_DIR_INDEX_KEY;
3286 	char tmp_name[32];
3287 	char *name_ptr;
3288 	int name_len;
3289 
3290 	/* FIXME, use a real flag for deciding about the key type */
3291 	if (root->fs_info->tree_root == root)
3292 		key_type = BTRFS_DIR_ITEM_KEY;
3293 
3294 	/* special case for "." */
3295 	if (filp->f_pos == 0) {
3296 		over = filldir(dirent, ".", 1,
3297 			       1, inode->i_ino,
3298 			       DT_DIR);
3299 		if (over)
3300 			return 0;
3301 		filp->f_pos = 1;
3302 	}
3303 	/* special case for .., just use the back ref */
3304 	if (filp->f_pos == 1) {
3305 		u64 pino = parent_ino(filp->f_path.dentry);
3306 		over = filldir(dirent, "..", 2,
3307 			       2, pino, DT_DIR);
3308 		if (over)
3309 			return 0;
3310 		filp->f_pos = 2;
3311 	}
3312 	path = btrfs_alloc_path();
3313 	path->reada = 2;
3314 
3315 	btrfs_set_key_type(&key, key_type);
3316 	key.offset = filp->f_pos;
3317 	key.objectid = inode->i_ino;
3318 
3319 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3320 	if (ret < 0)
3321 		goto err;
3322 	advance = 0;
3323 
3324 	while (1) {
3325 		leaf = path->nodes[0];
3326 		nritems = btrfs_header_nritems(leaf);
3327 		slot = path->slots[0];
3328 		if (advance || slot >= nritems) {
3329 			if (slot >= nritems - 1) {
3330 				ret = btrfs_next_leaf(root, path);
3331 				if (ret)
3332 					break;
3333 				leaf = path->nodes[0];
3334 				nritems = btrfs_header_nritems(leaf);
3335 				slot = path->slots[0];
3336 			} else {
3337 				slot++;
3338 				path->slots[0]++;
3339 			}
3340 		}
3341 
3342 		advance = 1;
3343 		item = btrfs_item_nr(leaf, slot);
3344 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3345 
3346 		if (found_key.objectid != key.objectid)
3347 			break;
3348 		if (btrfs_key_type(&found_key) != key_type)
3349 			break;
3350 		if (found_key.offset < filp->f_pos)
3351 			continue;
3352 
3353 		filp->f_pos = found_key.offset;
3354 
3355 		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
3356 		di_cur = 0;
3357 		di_total = btrfs_item_size(leaf, item);
3358 
3359 		while (di_cur < di_total) {
3360 			struct btrfs_key location;
3361 
3362 			name_len = btrfs_dir_name_len(leaf, di);
3363 			if (name_len <= sizeof(tmp_name)) {
3364 				name_ptr = tmp_name;
3365 			} else {
3366 				name_ptr = kmalloc(name_len, GFP_NOFS);
3367 				if (!name_ptr) {
3368 					ret = -ENOMEM;
3369 					goto err;
3370 				}
3371 			}
3372 			read_extent_buffer(leaf, name_ptr,
3373 					   (unsigned long)(di + 1), name_len);
3374 
3375 			d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
3376 			btrfs_dir_item_key_to_cpu(leaf, di, &location);
3377 
3378 			/* is this a reference to our own snapshot? If so
3379 			 * skip it
3380 			 */
3381 			if (location.type == BTRFS_ROOT_ITEM_KEY &&
3382 			    location.objectid == root->root_key.objectid) {
3383 				over = 0;
3384 				goto skip;
3385 			}
3386 			over = filldir(dirent, name_ptr, name_len,
3387 				       found_key.offset, location.objectid,
3388 				       d_type);
3389 
3390 skip:
3391 			if (name_ptr != tmp_name)
3392 				kfree(name_ptr);
3393 
3394 			if (over)
3395 				goto nopos;
3396 			di_len = btrfs_dir_name_len(leaf, di) +
3397 				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
3398 			di_cur += di_len;
3399 			di = (struct btrfs_dir_item *)((char *)di + di_len);
3400 		}
3401 	}
3402 
3403 	/* Reached end of directory/root. Bump pos past the last item. */
3404 	if (key_type == BTRFS_DIR_INDEX_KEY)
3405 		filp->f_pos = INT_LIMIT(off_t);
3406 	else
3407 		filp->f_pos++;
3408 nopos:
3409 	ret = 0;
3410 err:
3411 	btrfs_free_path(path);
3412 	return ret;
3413 }
3414 
3415 int btrfs_write_inode(struct inode *inode, int wait)
3416 {
3417 	struct btrfs_root *root = BTRFS_I(inode)->root;
3418 	struct btrfs_trans_handle *trans;
3419 	int ret = 0;
3420 
3421 	if (root->fs_info->btree_inode == inode)
3422 		return 0;
3423 
3424 	if (wait) {
3425 		trans = btrfs_join_transaction(root, 1);
3426 		btrfs_set_trans_block_group(trans, inode);
3427 		ret = btrfs_commit_transaction(trans, root);
3428 	}
3429 	return ret;
3430 }
3431 
3432 /*
3433  * This is somewhat expensive, updating the tree every time the
3434  * inode changes.  But, it is most likely to find the inode in cache.
3435  * FIXME, needs more benchmarking...there are no reasons other than performance
3436  * to keep or drop this code.
3437  */
3438 void btrfs_dirty_inode(struct inode *inode)
3439 {
3440 	struct btrfs_root *root = BTRFS_I(inode)->root;
3441 	struct btrfs_trans_handle *trans;
3442 
3443 	trans = btrfs_join_transaction(root, 1);
3444 	btrfs_set_trans_block_group(trans, inode);
3445 	btrfs_update_inode(trans, root, inode);
3446 	btrfs_end_transaction(trans, root);
3447 }
3448 
3449 /*
3450  * find the highest existing sequence number in a directory
3451  * and then set the in-memory index_cnt variable to reflect
3452  * free sequence numbers
3453  */
3454 static int btrfs_set_inode_index_count(struct inode *inode)
3455 {
3456 	struct btrfs_root *root = BTRFS_I(inode)->root;
3457 	struct btrfs_key key, found_key;
3458 	struct btrfs_path *path;
3459 	struct extent_buffer *leaf;
3460 	int ret;
3461 
3462 	key.objectid = inode->i_ino;
3463 	btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
3464 	key.offset = (u64)-1;
3465 
3466 	path = btrfs_alloc_path();
3467 	if (!path)
3468 		return -ENOMEM;
3469 
3470 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3471 	if (ret < 0)
3472 		goto out;
3473 	/* FIXME: we should be able to handle this */
3474 	if (ret == 0)
3475 		goto out;
3476 	ret = 0;
3477 
3478 	/*
3479 	 * MAGIC NUMBER EXPLANATION:
3480 	 * since we search a directory based on f_pos we have to start at 2
3481 	 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
3482 	 * else has to start at 2
3483 	 */
3484 	if (path->slots[0] == 0) {
3485 		BTRFS_I(inode)->index_cnt = 2;
3486 		goto out;
3487 	}
3488 
3489 	path->slots[0]--;
3490 
3491 	leaf = path->nodes[0];
3492 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3493 
3494 	if (found_key.objectid != inode->i_ino ||
3495 	    btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
3496 		BTRFS_I(inode)->index_cnt = 2;
3497 		goto out;
3498 	}
3499 
3500 	BTRFS_I(inode)->index_cnt = found_key.offset + 1;
3501 out:
3502 	btrfs_free_path(path);
3503 	return ret;
3504 }
3505 
3506 /*
3507  * helper to find a free sequence number in a given directory.  This current
3508  * code is very simple, later versions will do smarter things in the btree
3509  */
3510 int btrfs_set_inode_index(struct inode *dir, u64 *index)
3511 {
3512 	int ret = 0;
3513 
3514 	if (BTRFS_I(dir)->index_cnt == (u64)-1) {
3515 		ret = btrfs_set_inode_index_count(dir);
3516 		if (ret)
3517 			return ret;
3518 	}
3519 
3520 	*index = BTRFS_I(dir)->index_cnt;
3521 	BTRFS_I(dir)->index_cnt++;
3522 
3523 	return ret;
3524 }
3525 
3526 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
3527 				     struct btrfs_root *root,
3528 				     struct inode *dir,
3529 				     const char *name, int name_len,
3530 				     u64 ref_objectid, u64 objectid,
3531 				     u64 alloc_hint, int mode, u64 *index)
3532 {
3533 	struct inode *inode;
3534 	struct btrfs_inode_item *inode_item;
3535 	struct btrfs_key *location;
3536 	struct btrfs_path *path;
3537 	struct btrfs_inode_ref *ref;
3538 	struct btrfs_key key[2];
3539 	u32 sizes[2];
3540 	unsigned long ptr;
3541 	int ret;
3542 	int owner;
3543 
3544 	path = btrfs_alloc_path();
3545 	BUG_ON(!path);
3546 
3547 	inode = new_inode(root->fs_info->sb);
3548 	if (!inode)
3549 		return ERR_PTR(-ENOMEM);
3550 
3551 	if (dir) {
3552 		ret = btrfs_set_inode_index(dir, index);
3553 		if (ret) {
3554 			iput(inode);
3555 			return ERR_PTR(ret);
3556 		}
3557 	}
3558 	/*
3559 	 * index_cnt is ignored for everything but a dir,
3560 	 * btrfs_get_inode_index_count has an explanation for the magic
3561 	 * number
3562 	 */
3563 	init_btrfs_i(inode);
3564 	BTRFS_I(inode)->index_cnt = 2;
3565 	BTRFS_I(inode)->root = root;
3566 	BTRFS_I(inode)->generation = trans->transid;
3567 	btrfs_set_inode_space_info(root, inode);
3568 
3569 	if (mode & S_IFDIR)
3570 		owner = 0;
3571 	else
3572 		owner = 1;
3573 	BTRFS_I(inode)->block_group =
3574 			btrfs_find_block_group(root, 0, alloc_hint, owner);
3575 	if ((mode & S_IFREG)) {
3576 		if (btrfs_test_opt(root, NODATASUM))
3577 			btrfs_set_flag(inode, NODATASUM);
3578 		if (btrfs_test_opt(root, NODATACOW))
3579 			btrfs_set_flag(inode, NODATACOW);
3580 	}
3581 
3582 	key[0].objectid = objectid;
3583 	btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
3584 	key[0].offset = 0;
3585 
3586 	key[1].objectid = objectid;
3587 	btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
3588 	key[1].offset = ref_objectid;
3589 
3590 	sizes[0] = sizeof(struct btrfs_inode_item);
3591 	sizes[1] = name_len + sizeof(*ref);
3592 
3593 	path->leave_spinning = 1;
3594 	ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
3595 	if (ret != 0)
3596 		goto fail;
3597 
3598 	if (objectid > root->highest_inode)
3599 		root->highest_inode = objectid;
3600 
3601 	inode->i_uid = current_fsuid();
3602 
3603 	if (dir && (dir->i_mode & S_ISGID)) {
3604 		inode->i_gid = dir->i_gid;
3605 		if (S_ISDIR(mode))
3606 			mode |= S_ISGID;
3607 	} else
3608 		inode->i_gid = current_fsgid();
3609 
3610 	inode->i_mode = mode;
3611 	inode->i_ino = objectid;
3612 	inode_set_bytes(inode, 0);
3613 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3614 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3615 				  struct btrfs_inode_item);
3616 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
3617 
3618 	ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
3619 			     struct btrfs_inode_ref);
3620 	btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
3621 	btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
3622 	ptr = (unsigned long)(ref + 1);
3623 	write_extent_buffer(path->nodes[0], name, ptr, name_len);
3624 
3625 	btrfs_mark_buffer_dirty(path->nodes[0]);
3626 	btrfs_free_path(path);
3627 
3628 	location = &BTRFS_I(inode)->location;
3629 	location->objectid = objectid;
3630 	location->offset = 0;
3631 	btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
3632 
3633 	insert_inode_hash(inode);
3634 	return inode;
3635 fail:
3636 	if (dir)
3637 		BTRFS_I(dir)->index_cnt--;
3638 	btrfs_free_path(path);
3639 	iput(inode);
3640 	return ERR_PTR(ret);
3641 }
3642 
3643 static inline u8 btrfs_inode_type(struct inode *inode)
3644 {
3645 	return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
3646 }
3647 
3648 /*
3649  * utility function to add 'inode' into 'parent_inode' with
3650  * a give name and a given sequence number.
3651  * if 'add_backref' is true, also insert a backref from the
3652  * inode to the parent directory.
3653  */
3654 int btrfs_add_link(struct btrfs_trans_handle *trans,
3655 		   struct inode *parent_inode, struct inode *inode,
3656 		   const char *name, int name_len, int add_backref, u64 index)
3657 {
3658 	int ret;
3659 	struct btrfs_key key;
3660 	struct btrfs_root *root = BTRFS_I(parent_inode)->root;
3661 
3662 	key.objectid = inode->i_ino;
3663 	btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
3664 	key.offset = 0;
3665 
3666 	ret = btrfs_insert_dir_item(trans, root, name, name_len,
3667 				    parent_inode->i_ino,
3668 				    &key, btrfs_inode_type(inode),
3669 				    index);
3670 	if (ret == 0) {
3671 		if (add_backref) {
3672 			ret = btrfs_insert_inode_ref(trans, root,
3673 						     name, name_len,
3674 						     inode->i_ino,
3675 						     parent_inode->i_ino,
3676 						     index);
3677 		}
3678 		btrfs_i_size_write(parent_inode, parent_inode->i_size +
3679 				   name_len * 2);
3680 		parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
3681 		ret = btrfs_update_inode(trans, root, parent_inode);
3682 	}
3683 	return ret;
3684 }
3685 
3686 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
3687 			    struct dentry *dentry, struct inode *inode,
3688 			    int backref, u64 index)
3689 {
3690 	int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3691 				 inode, dentry->d_name.name,
3692 				 dentry->d_name.len, backref, index);
3693 	if (!err) {
3694 		d_instantiate(dentry, inode);
3695 		return 0;
3696 	}
3697 	if (err > 0)
3698 		err = -EEXIST;
3699 	return err;
3700 }
3701 
3702 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
3703 			int mode, dev_t rdev)
3704 {
3705 	struct btrfs_trans_handle *trans;
3706 	struct btrfs_root *root = BTRFS_I(dir)->root;
3707 	struct inode *inode = NULL;
3708 	int err;
3709 	int drop_inode = 0;
3710 	u64 objectid;
3711 	unsigned long nr = 0;
3712 	u64 index = 0;
3713 
3714 	if (!new_valid_dev(rdev))
3715 		return -EINVAL;
3716 
3717 	err = btrfs_check_metadata_free_space(root);
3718 	if (err)
3719 		goto fail;
3720 
3721 	trans = btrfs_start_transaction(root, 1);
3722 	btrfs_set_trans_block_group(trans, dir);
3723 
3724 	err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3725 	if (err) {
3726 		err = -ENOSPC;
3727 		goto out_unlock;
3728 	}
3729 
3730 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3731 				dentry->d_name.len,
3732 				dentry->d_parent->d_inode->i_ino, objectid,
3733 				BTRFS_I(dir)->block_group, mode, &index);
3734 	err = PTR_ERR(inode);
3735 	if (IS_ERR(inode))
3736 		goto out_unlock;
3737 
3738 	err = btrfs_init_inode_security(inode, dir);
3739 	if (err) {
3740 		drop_inode = 1;
3741 		goto out_unlock;
3742 	}
3743 
3744 	btrfs_set_trans_block_group(trans, inode);
3745 	err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3746 	if (err)
3747 		drop_inode = 1;
3748 	else {
3749 		inode->i_op = &btrfs_special_inode_operations;
3750 		init_special_inode(inode, inode->i_mode, rdev);
3751 		btrfs_update_inode(trans, root, inode);
3752 	}
3753 	dir->i_sb->s_dirt = 1;
3754 	btrfs_update_inode_block_group(trans, inode);
3755 	btrfs_update_inode_block_group(trans, dir);
3756 out_unlock:
3757 	nr = trans->blocks_used;
3758 	btrfs_end_transaction_throttle(trans, root);
3759 fail:
3760 	if (drop_inode) {
3761 		inode_dec_link_count(inode);
3762 		iput(inode);
3763 	}
3764 	btrfs_btree_balance_dirty(root, nr);
3765 	return err;
3766 }
3767 
3768 static int btrfs_create(struct inode *dir, struct dentry *dentry,
3769 			int mode, struct nameidata *nd)
3770 {
3771 	struct btrfs_trans_handle *trans;
3772 	struct btrfs_root *root = BTRFS_I(dir)->root;
3773 	struct inode *inode = NULL;
3774 	int err;
3775 	int drop_inode = 0;
3776 	unsigned long nr = 0;
3777 	u64 objectid;
3778 	u64 index = 0;
3779 
3780 	err = btrfs_check_metadata_free_space(root);
3781 	if (err)
3782 		goto fail;
3783 	trans = btrfs_start_transaction(root, 1);
3784 	btrfs_set_trans_block_group(trans, dir);
3785 
3786 	err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3787 	if (err) {
3788 		err = -ENOSPC;
3789 		goto out_unlock;
3790 	}
3791 
3792 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3793 				dentry->d_name.len,
3794 				dentry->d_parent->d_inode->i_ino,
3795 				objectid, BTRFS_I(dir)->block_group, mode,
3796 				&index);
3797 	err = PTR_ERR(inode);
3798 	if (IS_ERR(inode))
3799 		goto out_unlock;
3800 
3801 	err = btrfs_init_inode_security(inode, dir);
3802 	if (err) {
3803 		drop_inode = 1;
3804 		goto out_unlock;
3805 	}
3806 
3807 	btrfs_set_trans_block_group(trans, inode);
3808 	err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3809 	if (err)
3810 		drop_inode = 1;
3811 	else {
3812 		inode->i_mapping->a_ops = &btrfs_aops;
3813 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3814 		inode->i_fop = &btrfs_file_operations;
3815 		inode->i_op = &btrfs_file_inode_operations;
3816 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3817 	}
3818 	dir->i_sb->s_dirt = 1;
3819 	btrfs_update_inode_block_group(trans, inode);
3820 	btrfs_update_inode_block_group(trans, dir);
3821 out_unlock:
3822 	nr = trans->blocks_used;
3823 	btrfs_end_transaction_throttle(trans, root);
3824 fail:
3825 	if (drop_inode) {
3826 		inode_dec_link_count(inode);
3827 		iput(inode);
3828 	}
3829 	btrfs_btree_balance_dirty(root, nr);
3830 	return err;
3831 }
3832 
3833 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
3834 		      struct dentry *dentry)
3835 {
3836 	struct btrfs_trans_handle *trans;
3837 	struct btrfs_root *root = BTRFS_I(dir)->root;
3838 	struct inode *inode = old_dentry->d_inode;
3839 	u64 index;
3840 	unsigned long nr = 0;
3841 	int err;
3842 	int drop_inode = 0;
3843 
3844 	if (inode->i_nlink == 0)
3845 		return -ENOENT;
3846 
3847 	btrfs_inc_nlink(inode);
3848 	err = btrfs_check_metadata_free_space(root);
3849 	if (err)
3850 		goto fail;
3851 	err = btrfs_set_inode_index(dir, &index);
3852 	if (err)
3853 		goto fail;
3854 
3855 	trans = btrfs_start_transaction(root, 1);
3856 
3857 	btrfs_set_trans_block_group(trans, dir);
3858 	atomic_inc(&inode->i_count);
3859 
3860 	err = btrfs_add_nondir(trans, dentry, inode, 1, index);
3861 
3862 	if (err)
3863 		drop_inode = 1;
3864 
3865 	dir->i_sb->s_dirt = 1;
3866 	btrfs_update_inode_block_group(trans, dir);
3867 	err = btrfs_update_inode(trans, root, inode);
3868 
3869 	if (err)
3870 		drop_inode = 1;
3871 
3872 	nr = trans->blocks_used;
3873 
3874 	btrfs_log_new_name(trans, inode, NULL, dentry->d_parent);
3875 	btrfs_end_transaction_throttle(trans, root);
3876 fail:
3877 	if (drop_inode) {
3878 		inode_dec_link_count(inode);
3879 		iput(inode);
3880 	}
3881 	btrfs_btree_balance_dirty(root, nr);
3882 	return err;
3883 }
3884 
3885 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
3886 {
3887 	struct inode *inode = NULL;
3888 	struct btrfs_trans_handle *trans;
3889 	struct btrfs_root *root = BTRFS_I(dir)->root;
3890 	int err = 0;
3891 	int drop_on_err = 0;
3892 	u64 objectid = 0;
3893 	u64 index = 0;
3894 	unsigned long nr = 1;
3895 
3896 	err = btrfs_check_metadata_free_space(root);
3897 	if (err)
3898 		goto out_unlock;
3899 
3900 	trans = btrfs_start_transaction(root, 1);
3901 	btrfs_set_trans_block_group(trans, dir);
3902 
3903 	if (IS_ERR(trans)) {
3904 		err = PTR_ERR(trans);
3905 		goto out_unlock;
3906 	}
3907 
3908 	err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3909 	if (err) {
3910 		err = -ENOSPC;
3911 		goto out_unlock;
3912 	}
3913 
3914 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3915 				dentry->d_name.len,
3916 				dentry->d_parent->d_inode->i_ino, objectid,
3917 				BTRFS_I(dir)->block_group, S_IFDIR | mode,
3918 				&index);
3919 	if (IS_ERR(inode)) {
3920 		err = PTR_ERR(inode);
3921 		goto out_fail;
3922 	}
3923 
3924 	drop_on_err = 1;
3925 
3926 	err = btrfs_init_inode_security(inode, dir);
3927 	if (err)
3928 		goto out_fail;
3929 
3930 	inode->i_op = &btrfs_dir_inode_operations;
3931 	inode->i_fop = &btrfs_dir_file_operations;
3932 	btrfs_set_trans_block_group(trans, inode);
3933 
3934 	btrfs_i_size_write(inode, 0);
3935 	err = btrfs_update_inode(trans, root, inode);
3936 	if (err)
3937 		goto out_fail;
3938 
3939 	err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3940 				 inode, dentry->d_name.name,
3941 				 dentry->d_name.len, 0, index);
3942 	if (err)
3943 		goto out_fail;
3944 
3945 	d_instantiate(dentry, inode);
3946 	drop_on_err = 0;
3947 	dir->i_sb->s_dirt = 1;
3948 	btrfs_update_inode_block_group(trans, inode);
3949 	btrfs_update_inode_block_group(trans, dir);
3950 
3951 out_fail:
3952 	nr = trans->blocks_used;
3953 	btrfs_end_transaction_throttle(trans, root);
3954 
3955 out_unlock:
3956 	if (drop_on_err)
3957 		iput(inode);
3958 	btrfs_btree_balance_dirty(root, nr);
3959 	return err;
3960 }
3961 
3962 /* helper for btfs_get_extent.  Given an existing extent in the tree,
3963  * and an extent that you want to insert, deal with overlap and insert
3964  * the new extent into the tree.
3965  */
3966 static int merge_extent_mapping(struct extent_map_tree *em_tree,
3967 				struct extent_map *existing,
3968 				struct extent_map *em,
3969 				u64 map_start, u64 map_len)
3970 {
3971 	u64 start_diff;
3972 
3973 	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
3974 	start_diff = map_start - em->start;
3975 	em->start = map_start;
3976 	em->len = map_len;
3977 	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
3978 	    !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3979 		em->block_start += start_diff;
3980 		em->block_len -= start_diff;
3981 	}
3982 	return add_extent_mapping(em_tree, em);
3983 }
3984 
3985 static noinline int uncompress_inline(struct btrfs_path *path,
3986 				      struct inode *inode, struct page *page,
3987 				      size_t pg_offset, u64 extent_offset,
3988 				      struct btrfs_file_extent_item *item)
3989 {
3990 	int ret;
3991 	struct extent_buffer *leaf = path->nodes[0];
3992 	char *tmp;
3993 	size_t max_size;
3994 	unsigned long inline_size;
3995 	unsigned long ptr;
3996 
3997 	WARN_ON(pg_offset != 0);
3998 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
3999 	inline_size = btrfs_file_extent_inline_item_len(leaf,
4000 					btrfs_item_nr(leaf, path->slots[0]));
4001 	tmp = kmalloc(inline_size, GFP_NOFS);
4002 	ptr = btrfs_file_extent_inline_start(item);
4003 
4004 	read_extent_buffer(leaf, tmp, ptr, inline_size);
4005 
4006 	max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
4007 	ret = btrfs_zlib_decompress(tmp, page, extent_offset,
4008 				    inline_size, max_size);
4009 	if (ret) {
4010 		char *kaddr = kmap_atomic(page, KM_USER0);
4011 		unsigned long copy_size = min_t(u64,
4012 				  PAGE_CACHE_SIZE - pg_offset,
4013 				  max_size - extent_offset);
4014 		memset(kaddr + pg_offset, 0, copy_size);
4015 		kunmap_atomic(kaddr, KM_USER0);
4016 	}
4017 	kfree(tmp);
4018 	return 0;
4019 }
4020 
4021 /*
4022  * a bit scary, this does extent mapping from logical file offset to the disk.
4023  * the ugly parts come from merging extents from the disk with the in-ram
4024  * representation.  This gets more complex because of the data=ordered code,
4025  * where the in-ram extents might be locked pending data=ordered completion.
4026  *
4027  * This also copies inline extents directly into the page.
4028  */
4029 
4030 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
4031 				    size_t pg_offset, u64 start, u64 len,
4032 				    int create)
4033 {
4034 	int ret;
4035 	int err = 0;
4036 	u64 bytenr;
4037 	u64 extent_start = 0;
4038 	u64 extent_end = 0;
4039 	u64 objectid = inode->i_ino;
4040 	u32 found_type;
4041 	struct btrfs_path *path = NULL;
4042 	struct btrfs_root *root = BTRFS_I(inode)->root;
4043 	struct btrfs_file_extent_item *item;
4044 	struct extent_buffer *leaf;
4045 	struct btrfs_key found_key;
4046 	struct extent_map *em = NULL;
4047 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4048 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4049 	struct btrfs_trans_handle *trans = NULL;
4050 	int compressed;
4051 
4052 again:
4053 	spin_lock(&em_tree->lock);
4054 	em = lookup_extent_mapping(em_tree, start, len);
4055 	if (em)
4056 		em->bdev = root->fs_info->fs_devices->latest_bdev;
4057 	spin_unlock(&em_tree->lock);
4058 
4059 	if (em) {
4060 		if (em->start > start || em->start + em->len <= start)
4061 			free_extent_map(em);
4062 		else if (em->block_start == EXTENT_MAP_INLINE && page)
4063 			free_extent_map(em);
4064 		else
4065 			goto out;
4066 	}
4067 	em = alloc_extent_map(GFP_NOFS);
4068 	if (!em) {
4069 		err = -ENOMEM;
4070 		goto out;
4071 	}
4072 	em->bdev = root->fs_info->fs_devices->latest_bdev;
4073 	em->start = EXTENT_MAP_HOLE;
4074 	em->orig_start = EXTENT_MAP_HOLE;
4075 	em->len = (u64)-1;
4076 	em->block_len = (u64)-1;
4077 
4078 	if (!path) {
4079 		path = btrfs_alloc_path();
4080 		BUG_ON(!path);
4081 	}
4082 
4083 	ret = btrfs_lookup_file_extent(trans, root, path,
4084 				       objectid, start, trans != NULL);
4085 	if (ret < 0) {
4086 		err = ret;
4087 		goto out;
4088 	}
4089 
4090 	if (ret != 0) {
4091 		if (path->slots[0] == 0)
4092 			goto not_found;
4093 		path->slots[0]--;
4094 	}
4095 
4096 	leaf = path->nodes[0];
4097 	item = btrfs_item_ptr(leaf, path->slots[0],
4098 			      struct btrfs_file_extent_item);
4099 	/* are we inside the extent that was found? */
4100 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4101 	found_type = btrfs_key_type(&found_key);
4102 	if (found_key.objectid != objectid ||
4103 	    found_type != BTRFS_EXTENT_DATA_KEY) {
4104 		goto not_found;
4105 	}
4106 
4107 	found_type = btrfs_file_extent_type(leaf, item);
4108 	extent_start = found_key.offset;
4109 	compressed = btrfs_file_extent_compression(leaf, item);
4110 	if (found_type == BTRFS_FILE_EXTENT_REG ||
4111 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4112 		extent_end = extent_start +
4113 		       btrfs_file_extent_num_bytes(leaf, item);
4114 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4115 		size_t size;
4116 		size = btrfs_file_extent_inline_len(leaf, item);
4117 		extent_end = (extent_start + size + root->sectorsize - 1) &
4118 			~((u64)root->sectorsize - 1);
4119 	}
4120 
4121 	if (start >= extent_end) {
4122 		path->slots[0]++;
4123 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
4124 			ret = btrfs_next_leaf(root, path);
4125 			if (ret < 0) {
4126 				err = ret;
4127 				goto out;
4128 			}
4129 			if (ret > 0)
4130 				goto not_found;
4131 			leaf = path->nodes[0];
4132 		}
4133 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4134 		if (found_key.objectid != objectid ||
4135 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
4136 			goto not_found;
4137 		if (start + len <= found_key.offset)
4138 			goto not_found;
4139 		em->start = start;
4140 		em->len = found_key.offset - start;
4141 		goto not_found_em;
4142 	}
4143 
4144 	if (found_type == BTRFS_FILE_EXTENT_REG ||
4145 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4146 		em->start = extent_start;
4147 		em->len = extent_end - extent_start;
4148 		em->orig_start = extent_start -
4149 				 btrfs_file_extent_offset(leaf, item);
4150 		bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
4151 		if (bytenr == 0) {
4152 			em->block_start = EXTENT_MAP_HOLE;
4153 			goto insert;
4154 		}
4155 		if (compressed) {
4156 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4157 			em->block_start = bytenr;
4158 			em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
4159 									 item);
4160 		} else {
4161 			bytenr += btrfs_file_extent_offset(leaf, item);
4162 			em->block_start = bytenr;
4163 			em->block_len = em->len;
4164 			if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
4165 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
4166 		}
4167 		goto insert;
4168 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4169 		unsigned long ptr;
4170 		char *map;
4171 		size_t size;
4172 		size_t extent_offset;
4173 		size_t copy_size;
4174 
4175 		em->block_start = EXTENT_MAP_INLINE;
4176 		if (!page || create) {
4177 			em->start = extent_start;
4178 			em->len = extent_end - extent_start;
4179 			goto out;
4180 		}
4181 
4182 		size = btrfs_file_extent_inline_len(leaf, item);
4183 		extent_offset = page_offset(page) + pg_offset - extent_start;
4184 		copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
4185 				size - extent_offset);
4186 		em->start = extent_start + extent_offset;
4187 		em->len = (copy_size + root->sectorsize - 1) &
4188 			~((u64)root->sectorsize - 1);
4189 		em->orig_start = EXTENT_MAP_INLINE;
4190 		if (compressed)
4191 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4192 		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
4193 		if (create == 0 && !PageUptodate(page)) {
4194 			if (btrfs_file_extent_compression(leaf, item) ==
4195 			    BTRFS_COMPRESS_ZLIB) {
4196 				ret = uncompress_inline(path, inode, page,
4197 							pg_offset,
4198 							extent_offset, item);
4199 				BUG_ON(ret);
4200 			} else {
4201 				map = kmap(page);
4202 				read_extent_buffer(leaf, map + pg_offset, ptr,
4203 						   copy_size);
4204 				kunmap(page);
4205 			}
4206 			flush_dcache_page(page);
4207 		} else if (create && PageUptodate(page)) {
4208 			if (!trans) {
4209 				kunmap(page);
4210 				free_extent_map(em);
4211 				em = NULL;
4212 				btrfs_release_path(root, path);
4213 				trans = btrfs_join_transaction(root, 1);
4214 				goto again;
4215 			}
4216 			map = kmap(page);
4217 			write_extent_buffer(leaf, map + pg_offset, ptr,
4218 					    copy_size);
4219 			kunmap(page);
4220 			btrfs_mark_buffer_dirty(leaf);
4221 		}
4222 		set_extent_uptodate(io_tree, em->start,
4223 				    extent_map_end(em) - 1, GFP_NOFS);
4224 		goto insert;
4225 	} else {
4226 		printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
4227 		WARN_ON(1);
4228 	}
4229 not_found:
4230 	em->start = start;
4231 	em->len = len;
4232 not_found_em:
4233 	em->block_start = EXTENT_MAP_HOLE;
4234 	set_bit(EXTENT_FLAG_VACANCY, &em->flags);
4235 insert:
4236 	btrfs_release_path(root, path);
4237 	if (em->start > start || extent_map_end(em) <= start) {
4238 		printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
4239 		       "[%llu %llu]\n", (unsigned long long)em->start,
4240 		       (unsigned long long)em->len,
4241 		       (unsigned long long)start,
4242 		       (unsigned long long)len);
4243 		err = -EIO;
4244 		goto out;
4245 	}
4246 
4247 	err = 0;
4248 	spin_lock(&em_tree->lock);
4249 	ret = add_extent_mapping(em_tree, em);
4250 	/* it is possible that someone inserted the extent into the tree
4251 	 * while we had the lock dropped.  It is also possible that
4252 	 * an overlapping map exists in the tree
4253 	 */
4254 	if (ret == -EEXIST) {
4255 		struct extent_map *existing;
4256 
4257 		ret = 0;
4258 
4259 		existing = lookup_extent_mapping(em_tree, start, len);
4260 		if (existing && (existing->start > start ||
4261 		    existing->start + existing->len <= start)) {
4262 			free_extent_map(existing);
4263 			existing = NULL;
4264 		}
4265 		if (!existing) {
4266 			existing = lookup_extent_mapping(em_tree, em->start,
4267 							 em->len);
4268 			if (existing) {
4269 				err = merge_extent_mapping(em_tree, existing,
4270 							   em, start,
4271 							   root->sectorsize);
4272 				free_extent_map(existing);
4273 				if (err) {
4274 					free_extent_map(em);
4275 					em = NULL;
4276 				}
4277 			} else {
4278 				err = -EIO;
4279 				free_extent_map(em);
4280 				em = NULL;
4281 			}
4282 		} else {
4283 			free_extent_map(em);
4284 			em = existing;
4285 			err = 0;
4286 		}
4287 	}
4288 	spin_unlock(&em_tree->lock);
4289 out:
4290 	if (path)
4291 		btrfs_free_path(path);
4292 	if (trans) {
4293 		ret = btrfs_end_transaction(trans, root);
4294 		if (!err)
4295 			err = ret;
4296 	}
4297 	if (err) {
4298 		free_extent_map(em);
4299 		return ERR_PTR(err);
4300 	}
4301 	return em;
4302 }
4303 
4304 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4305 			const struct iovec *iov, loff_t offset,
4306 			unsigned long nr_segs)
4307 {
4308 	return -EINVAL;
4309 }
4310 
4311 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4312 		__u64 start, __u64 len)
4313 {
4314 	return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
4315 }
4316 
4317 int btrfs_readpage(struct file *file, struct page *page)
4318 {
4319 	struct extent_io_tree *tree;
4320 	tree = &BTRFS_I(page->mapping->host)->io_tree;
4321 	return extent_read_full_page(tree, page, btrfs_get_extent);
4322 }
4323 
4324 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
4325 {
4326 	struct extent_io_tree *tree;
4327 
4328 
4329 	if (current->flags & PF_MEMALLOC) {
4330 		redirty_page_for_writepage(wbc, page);
4331 		unlock_page(page);
4332 		return 0;
4333 	}
4334 	tree = &BTRFS_I(page->mapping->host)->io_tree;
4335 	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
4336 }
4337 
4338 int btrfs_writepages(struct address_space *mapping,
4339 		     struct writeback_control *wbc)
4340 {
4341 	struct extent_io_tree *tree;
4342 
4343 	tree = &BTRFS_I(mapping->host)->io_tree;
4344 	return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
4345 }
4346 
4347 static int
4348 btrfs_readpages(struct file *file, struct address_space *mapping,
4349 		struct list_head *pages, unsigned nr_pages)
4350 {
4351 	struct extent_io_tree *tree;
4352 	tree = &BTRFS_I(mapping->host)->io_tree;
4353 	return extent_readpages(tree, mapping, pages, nr_pages,
4354 				btrfs_get_extent);
4355 }
4356 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4357 {
4358 	struct extent_io_tree *tree;
4359 	struct extent_map_tree *map;
4360 	int ret;
4361 
4362 	tree = &BTRFS_I(page->mapping->host)->io_tree;
4363 	map = &BTRFS_I(page->mapping->host)->extent_tree;
4364 	ret = try_release_extent_mapping(map, tree, page, gfp_flags);
4365 	if (ret == 1) {
4366 		ClearPagePrivate(page);
4367 		set_page_private(page, 0);
4368 		page_cache_release(page);
4369 	}
4370 	return ret;
4371 }
4372 
4373 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4374 {
4375 	if (PageWriteback(page) || PageDirty(page))
4376 		return 0;
4377 	return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
4378 }
4379 
4380 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4381 {
4382 	struct extent_io_tree *tree;
4383 	struct btrfs_ordered_extent *ordered;
4384 	u64 page_start = page_offset(page);
4385 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4386 
4387 	wait_on_page_writeback(page);
4388 	tree = &BTRFS_I(page->mapping->host)->io_tree;
4389 	if (offset) {
4390 		btrfs_releasepage(page, GFP_NOFS);
4391 		return;
4392 	}
4393 
4394 	lock_extent(tree, page_start, page_end, GFP_NOFS);
4395 	ordered = btrfs_lookup_ordered_extent(page->mapping->host,
4396 					   page_offset(page));
4397 	if (ordered) {
4398 		/*
4399 		 * IO on this page will never be started, so we need
4400 		 * to account for any ordered extents now
4401 		 */
4402 		clear_extent_bit(tree, page_start, page_end,
4403 				 EXTENT_DIRTY | EXTENT_DELALLOC |
4404 				 EXTENT_LOCKED, 1, 0, GFP_NOFS);
4405 		btrfs_finish_ordered_io(page->mapping->host,
4406 					page_start, page_end);
4407 		btrfs_put_ordered_extent(ordered);
4408 		lock_extent(tree, page_start, page_end, GFP_NOFS);
4409 	}
4410 	clear_extent_bit(tree, page_start, page_end,
4411 		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4412 		 EXTENT_ORDERED,
4413 		 1, 1, GFP_NOFS);
4414 	__btrfs_releasepage(page, GFP_NOFS);
4415 
4416 	ClearPageChecked(page);
4417 	if (PagePrivate(page)) {
4418 		ClearPagePrivate(page);
4419 		set_page_private(page, 0);
4420 		page_cache_release(page);
4421 	}
4422 }
4423 
4424 /*
4425  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
4426  * called from a page fault handler when a page is first dirtied. Hence we must
4427  * be careful to check for EOF conditions here. We set the page up correctly
4428  * for a written page which means we get ENOSPC checking when writing into
4429  * holes and correct delalloc and unwritten extent mapping on filesystems that
4430  * support these features.
4431  *
4432  * We are not allowed to take the i_mutex here so we have to play games to
4433  * protect against truncate races as the page could now be beyond EOF.  Because
4434  * vmtruncate() writes the inode size before removing pages, once we have the
4435  * page lock we can determine safely if the page is beyond EOF. If it is not
4436  * beyond EOF, then the page is guaranteed safe against truncation until we
4437  * unlock the page.
4438  */
4439 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4440 {
4441 	struct page *page = vmf->page;
4442 	struct inode *inode = fdentry(vma->vm_file)->d_inode;
4443 	struct btrfs_root *root = BTRFS_I(inode)->root;
4444 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4445 	struct btrfs_ordered_extent *ordered;
4446 	char *kaddr;
4447 	unsigned long zero_start;
4448 	loff_t size;
4449 	int ret;
4450 	u64 page_start;
4451 	u64 page_end;
4452 
4453 	ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
4454 	if (ret) {
4455 		if (ret == -ENOMEM)
4456 			ret = VM_FAULT_OOM;
4457 		else /* -ENOSPC, -EIO, etc */
4458 			ret = VM_FAULT_SIGBUS;
4459 		goto out;
4460 	}
4461 
4462 	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
4463 again:
4464 	lock_page(page);
4465 	size = i_size_read(inode);
4466 	page_start = page_offset(page);
4467 	page_end = page_start + PAGE_CACHE_SIZE - 1;
4468 
4469 	if ((page->mapping != inode->i_mapping) ||
4470 	    (page_start >= size)) {
4471 		btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
4472 		/* page got truncated out from underneath us */
4473 		goto out_unlock;
4474 	}
4475 	wait_on_page_writeback(page);
4476 
4477 	lock_extent(io_tree, page_start, page_end, GFP_NOFS);
4478 	set_page_extent_mapped(page);
4479 
4480 	/*
4481 	 * we can't set the delalloc bits if there are pending ordered
4482 	 * extents.  Drop our locks and wait for them to finish
4483 	 */
4484 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
4485 	if (ordered) {
4486 		unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4487 		unlock_page(page);
4488 		btrfs_start_ordered_extent(inode, ordered, 1);
4489 		btrfs_put_ordered_extent(ordered);
4490 		goto again;
4491 	}
4492 
4493 	btrfs_set_extent_delalloc(inode, page_start, page_end);
4494 	ret = 0;
4495 
4496 	/* page is wholly or partially inside EOF */
4497 	if (page_start + PAGE_CACHE_SIZE > size)
4498 		zero_start = size & ~PAGE_CACHE_MASK;
4499 	else
4500 		zero_start = PAGE_CACHE_SIZE;
4501 
4502 	if (zero_start != PAGE_CACHE_SIZE) {
4503 		kaddr = kmap(page);
4504 		memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
4505 		flush_dcache_page(page);
4506 		kunmap(page);
4507 	}
4508 	ClearPageChecked(page);
4509 	set_page_dirty(page);
4510 
4511 	BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
4512 	unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4513 
4514 out_unlock:
4515 	unlock_page(page);
4516 out:
4517 	return ret;
4518 }
4519 
4520 static void btrfs_truncate(struct inode *inode)
4521 {
4522 	struct btrfs_root *root = BTRFS_I(inode)->root;
4523 	int ret;
4524 	struct btrfs_trans_handle *trans;
4525 	unsigned long nr;
4526 	u64 mask = root->sectorsize - 1;
4527 
4528 	if (!S_ISREG(inode->i_mode))
4529 		return;
4530 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4531 		return;
4532 
4533 	btrfs_truncate_page(inode->i_mapping, inode->i_size);
4534 	btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
4535 
4536 	trans = btrfs_start_transaction(root, 1);
4537 
4538 	/*
4539 	 * setattr is responsible for setting the ordered_data_close flag,
4540 	 * but that is only tested during the last file release.  That
4541 	 * could happen well after the next commit, leaving a great big
4542 	 * window where new writes may get lost if someone chooses to write
4543 	 * to this file after truncating to zero
4544 	 *
4545 	 * The inode doesn't have any dirty data here, and so if we commit
4546 	 * this is a noop.  If someone immediately starts writing to the inode
4547 	 * it is very likely we'll catch some of their writes in this
4548 	 * transaction, and the commit will find this file on the ordered
4549 	 * data list with good things to send down.
4550 	 *
4551 	 * This is a best effort solution, there is still a window where
4552 	 * using truncate to replace the contents of the file will
4553 	 * end up with a zero length file after a crash.
4554 	 */
4555 	if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
4556 		btrfs_add_ordered_operation(trans, root, inode);
4557 
4558 	btrfs_set_trans_block_group(trans, inode);
4559 	btrfs_i_size_write(inode, inode->i_size);
4560 
4561 	ret = btrfs_orphan_add(trans, inode);
4562 	if (ret)
4563 		goto out;
4564 	/* FIXME, add redo link to tree so we don't leak on crash */
4565 	ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
4566 				      BTRFS_EXTENT_DATA_KEY);
4567 	btrfs_update_inode(trans, root, inode);
4568 
4569 	ret = btrfs_orphan_del(trans, inode);
4570 	BUG_ON(ret);
4571 
4572 out:
4573 	nr = trans->blocks_used;
4574 	ret = btrfs_end_transaction_throttle(trans, root);
4575 	BUG_ON(ret);
4576 	btrfs_btree_balance_dirty(root, nr);
4577 }
4578 
4579 /*
4580  * create a new subvolume directory/inode (helper for the ioctl).
4581  */
4582 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
4583 			     struct btrfs_root *new_root, struct dentry *dentry,
4584 			     u64 new_dirid, u64 alloc_hint)
4585 {
4586 	struct inode *inode;
4587 	int error;
4588 	u64 index = 0;
4589 
4590 	inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
4591 				new_dirid, alloc_hint, S_IFDIR | 0700, &index);
4592 	if (IS_ERR(inode))
4593 		return PTR_ERR(inode);
4594 	inode->i_op = &btrfs_dir_inode_operations;
4595 	inode->i_fop = &btrfs_dir_file_operations;
4596 
4597 	inode->i_nlink = 1;
4598 	btrfs_i_size_write(inode, 0);
4599 
4600 	error = btrfs_update_inode(trans, new_root, inode);
4601 	if (error)
4602 		return error;
4603 
4604 	d_instantiate(dentry, inode);
4605 	return 0;
4606 }
4607 
4608 /* helper function for file defrag and space balancing.  This
4609  * forces readahead on a given range of bytes in an inode
4610  */
4611 unsigned long btrfs_force_ra(struct address_space *mapping,
4612 			      struct file_ra_state *ra, struct file *file,
4613 			      pgoff_t offset, pgoff_t last_index)
4614 {
4615 	pgoff_t req_size = last_index - offset + 1;
4616 
4617 	page_cache_sync_readahead(mapping, ra, file, offset, req_size);
4618 	return offset + req_size;
4619 }
4620 
4621 struct inode *btrfs_alloc_inode(struct super_block *sb)
4622 {
4623 	struct btrfs_inode *ei;
4624 
4625 	ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
4626 	if (!ei)
4627 		return NULL;
4628 	ei->last_trans = 0;
4629 	ei->logged_trans = 0;
4630 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
4631 	ei->i_acl = BTRFS_ACL_NOT_CACHED;
4632 	ei->i_default_acl = BTRFS_ACL_NOT_CACHED;
4633 	INIT_LIST_HEAD(&ei->i_orphan);
4634 	INIT_LIST_HEAD(&ei->ordered_operations);
4635 	return &ei->vfs_inode;
4636 }
4637 
4638 void btrfs_destroy_inode(struct inode *inode)
4639 {
4640 	struct btrfs_ordered_extent *ordered;
4641 	struct btrfs_root *root = BTRFS_I(inode)->root;
4642 
4643 	WARN_ON(!list_empty(&inode->i_dentry));
4644 	WARN_ON(inode->i_data.nrpages);
4645 
4646 	if (BTRFS_I(inode)->i_acl &&
4647 	    BTRFS_I(inode)->i_acl != BTRFS_ACL_NOT_CACHED)
4648 		posix_acl_release(BTRFS_I(inode)->i_acl);
4649 	if (BTRFS_I(inode)->i_default_acl &&
4650 	    BTRFS_I(inode)->i_default_acl != BTRFS_ACL_NOT_CACHED)
4651 		posix_acl_release(BTRFS_I(inode)->i_default_acl);
4652 
4653 	/*
4654 	 * Make sure we're properly removed from the ordered operation
4655 	 * lists.
4656 	 */
4657 	smp_mb();
4658 	if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
4659 		spin_lock(&root->fs_info->ordered_extent_lock);
4660 		list_del_init(&BTRFS_I(inode)->ordered_operations);
4661 		spin_unlock(&root->fs_info->ordered_extent_lock);
4662 	}
4663 
4664 	spin_lock(&root->list_lock);
4665 	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
4666 		printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
4667 		       " list\n", inode->i_ino);
4668 		dump_stack();
4669 	}
4670 	spin_unlock(&root->list_lock);
4671 
4672 	while (1) {
4673 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
4674 		if (!ordered)
4675 			break;
4676 		else {
4677 			printk(KERN_ERR "btrfs found ordered "
4678 			       "extent %llu %llu on inode cleanup\n",
4679 			       (unsigned long long)ordered->file_offset,
4680 			       (unsigned long long)ordered->len);
4681 			btrfs_remove_ordered_extent(inode, ordered);
4682 			btrfs_put_ordered_extent(ordered);
4683 			btrfs_put_ordered_extent(ordered);
4684 		}
4685 	}
4686 	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
4687 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
4688 }
4689 
4690 static void init_once(void *foo)
4691 {
4692 	struct btrfs_inode *ei = (struct btrfs_inode *) foo;
4693 
4694 	inode_init_once(&ei->vfs_inode);
4695 }
4696 
4697 void btrfs_destroy_cachep(void)
4698 {
4699 	if (btrfs_inode_cachep)
4700 		kmem_cache_destroy(btrfs_inode_cachep);
4701 	if (btrfs_trans_handle_cachep)
4702 		kmem_cache_destroy(btrfs_trans_handle_cachep);
4703 	if (btrfs_transaction_cachep)
4704 		kmem_cache_destroy(btrfs_transaction_cachep);
4705 	if (btrfs_path_cachep)
4706 		kmem_cache_destroy(btrfs_path_cachep);
4707 }
4708 
4709 int btrfs_init_cachep(void)
4710 {
4711 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
4712 			sizeof(struct btrfs_inode), 0,
4713 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
4714 	if (!btrfs_inode_cachep)
4715 		goto fail;
4716 
4717 	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
4718 			sizeof(struct btrfs_trans_handle), 0,
4719 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
4720 	if (!btrfs_trans_handle_cachep)
4721 		goto fail;
4722 
4723 	btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
4724 			sizeof(struct btrfs_transaction), 0,
4725 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
4726 	if (!btrfs_transaction_cachep)
4727 		goto fail;
4728 
4729 	btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
4730 			sizeof(struct btrfs_path), 0,
4731 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
4732 	if (!btrfs_path_cachep)
4733 		goto fail;
4734 
4735 	return 0;
4736 fail:
4737 	btrfs_destroy_cachep();
4738 	return -ENOMEM;
4739 }
4740 
4741 static int btrfs_getattr(struct vfsmount *mnt,
4742 			 struct dentry *dentry, struct kstat *stat)
4743 {
4744 	struct inode *inode = dentry->d_inode;
4745 	generic_fillattr(inode, stat);
4746 	stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
4747 	stat->blksize = PAGE_CACHE_SIZE;
4748 	stat->blocks = (inode_get_bytes(inode) +
4749 			BTRFS_I(inode)->delalloc_bytes) >> 9;
4750 	return 0;
4751 }
4752 
4753 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
4754 			   struct inode *new_dir, struct dentry *new_dentry)
4755 {
4756 	struct btrfs_trans_handle *trans;
4757 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
4758 	struct inode *new_inode = new_dentry->d_inode;
4759 	struct inode *old_inode = old_dentry->d_inode;
4760 	struct timespec ctime = CURRENT_TIME;
4761 	u64 index = 0;
4762 	int ret;
4763 
4764 	/* we're not allowed to rename between subvolumes */
4765 	if (BTRFS_I(old_inode)->root->root_key.objectid !=
4766 	    BTRFS_I(new_dir)->root->root_key.objectid)
4767 		return -EXDEV;
4768 
4769 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
4770 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
4771 		return -ENOTEMPTY;
4772 	}
4773 
4774 	/* to rename a snapshot or subvolume, we need to juggle the
4775 	 * backrefs.  This isn't coded yet
4776 	 */
4777 	if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
4778 		return -EXDEV;
4779 
4780 	ret = btrfs_check_metadata_free_space(root);
4781 	if (ret)
4782 		goto out_unlock;
4783 
4784 	/*
4785 	 * we're using rename to replace one file with another.
4786 	 * and the replacement file is large.  Start IO on it now so
4787 	 * we don't add too much work to the end of the transaction
4788 	 */
4789 	if (new_inode && old_inode && S_ISREG(old_inode->i_mode) &&
4790 	    new_inode->i_size &&
4791 	    old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
4792 		filemap_flush(old_inode->i_mapping);
4793 
4794 	trans = btrfs_start_transaction(root, 1);
4795 
4796 	/*
4797 	 * make sure the inode gets flushed if it is replacing
4798 	 * something.
4799 	 */
4800 	if (new_inode && new_inode->i_size &&
4801 	    old_inode && S_ISREG(old_inode->i_mode)) {
4802 		btrfs_add_ordered_operation(trans, root, old_inode);
4803 	}
4804 
4805 	/*
4806 	 * this is an ugly little race, but the rename is required to make
4807 	 * sure that if we crash, the inode is either at the old name
4808 	 * or the new one.  pinning the log transaction lets us make sure
4809 	 * we don't allow a log commit to come in after we unlink the
4810 	 * name but before we add the new name back in.
4811 	 */
4812 	btrfs_pin_log_trans(root);
4813 
4814 	btrfs_set_trans_block_group(trans, new_dir);
4815 
4816 	btrfs_inc_nlink(old_dentry->d_inode);
4817 	old_dir->i_ctime = old_dir->i_mtime = ctime;
4818 	new_dir->i_ctime = new_dir->i_mtime = ctime;
4819 	old_inode->i_ctime = ctime;
4820 
4821 	if (old_dentry->d_parent != new_dentry->d_parent)
4822 		btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
4823 
4824 	ret = btrfs_unlink_inode(trans, root, old_dir, old_dentry->d_inode,
4825 				 old_dentry->d_name.name,
4826 				 old_dentry->d_name.len);
4827 	if (ret)
4828 		goto out_fail;
4829 
4830 	if (new_inode) {
4831 		new_inode->i_ctime = CURRENT_TIME;
4832 		ret = btrfs_unlink_inode(trans, root, new_dir,
4833 					 new_dentry->d_inode,
4834 					 new_dentry->d_name.name,
4835 					 new_dentry->d_name.len);
4836 		if (ret)
4837 			goto out_fail;
4838 		if (new_inode->i_nlink == 0) {
4839 			ret = btrfs_orphan_add(trans, new_dentry->d_inode);
4840 			if (ret)
4841 				goto out_fail;
4842 		}
4843 
4844 	}
4845 	ret = btrfs_set_inode_index(new_dir, &index);
4846 	if (ret)
4847 		goto out_fail;
4848 
4849 	ret = btrfs_add_link(trans, new_dentry->d_parent->d_inode,
4850 			     old_inode, new_dentry->d_name.name,
4851 			     new_dentry->d_name.len, 1, index);
4852 	if (ret)
4853 		goto out_fail;
4854 
4855 	btrfs_log_new_name(trans, old_inode, old_dir,
4856 				       new_dentry->d_parent);
4857 out_fail:
4858 
4859 	/* this btrfs_end_log_trans just allows the current
4860 	 * log-sub transaction to complete
4861 	 */
4862 	btrfs_end_log_trans(root);
4863 	btrfs_end_transaction_throttle(trans, root);
4864 out_unlock:
4865 	return ret;
4866 }
4867 
4868 /*
4869  * some fairly slow code that needs optimization. This walks the list
4870  * of all the inodes with pending delalloc and forces them to disk.
4871  */
4872 int btrfs_start_delalloc_inodes(struct btrfs_root *root)
4873 {
4874 	struct list_head *head = &root->fs_info->delalloc_inodes;
4875 	struct btrfs_inode *binode;
4876 	struct inode *inode;
4877 
4878 	if (root->fs_info->sb->s_flags & MS_RDONLY)
4879 		return -EROFS;
4880 
4881 	spin_lock(&root->fs_info->delalloc_lock);
4882 	while (!list_empty(head)) {
4883 		binode = list_entry(head->next, struct btrfs_inode,
4884 				    delalloc_inodes);
4885 		inode = igrab(&binode->vfs_inode);
4886 		if (!inode)
4887 			list_del_init(&binode->delalloc_inodes);
4888 		spin_unlock(&root->fs_info->delalloc_lock);
4889 		if (inode) {
4890 			filemap_flush(inode->i_mapping);
4891 			iput(inode);
4892 		}
4893 		cond_resched();
4894 		spin_lock(&root->fs_info->delalloc_lock);
4895 	}
4896 	spin_unlock(&root->fs_info->delalloc_lock);
4897 
4898 	/* the filemap_flush will queue IO into the worker threads, but
4899 	 * we have to make sure the IO is actually started and that
4900 	 * ordered extents get created before we return
4901 	 */
4902 	atomic_inc(&root->fs_info->async_submit_draining);
4903 	while (atomic_read(&root->fs_info->nr_async_submits) ||
4904 	      atomic_read(&root->fs_info->async_delalloc_pages)) {
4905 		wait_event(root->fs_info->async_submit_wait,
4906 		   (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
4907 		    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
4908 	}
4909 	atomic_dec(&root->fs_info->async_submit_draining);
4910 	return 0;
4911 }
4912 
4913 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
4914 			 const char *symname)
4915 {
4916 	struct btrfs_trans_handle *trans;
4917 	struct btrfs_root *root = BTRFS_I(dir)->root;
4918 	struct btrfs_path *path;
4919 	struct btrfs_key key;
4920 	struct inode *inode = NULL;
4921 	int err;
4922 	int drop_inode = 0;
4923 	u64 objectid;
4924 	u64 index = 0 ;
4925 	int name_len;
4926 	int datasize;
4927 	unsigned long ptr;
4928 	struct btrfs_file_extent_item *ei;
4929 	struct extent_buffer *leaf;
4930 	unsigned long nr = 0;
4931 
4932 	name_len = strlen(symname) + 1;
4933 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
4934 		return -ENAMETOOLONG;
4935 
4936 	err = btrfs_check_metadata_free_space(root);
4937 	if (err)
4938 		goto out_fail;
4939 
4940 	trans = btrfs_start_transaction(root, 1);
4941 	btrfs_set_trans_block_group(trans, dir);
4942 
4943 	err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4944 	if (err) {
4945 		err = -ENOSPC;
4946 		goto out_unlock;
4947 	}
4948 
4949 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4950 				dentry->d_name.len,
4951 				dentry->d_parent->d_inode->i_ino, objectid,
4952 				BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
4953 				&index);
4954 	err = PTR_ERR(inode);
4955 	if (IS_ERR(inode))
4956 		goto out_unlock;
4957 
4958 	err = btrfs_init_inode_security(inode, dir);
4959 	if (err) {
4960 		drop_inode = 1;
4961 		goto out_unlock;
4962 	}
4963 
4964 	btrfs_set_trans_block_group(trans, inode);
4965 	err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4966 	if (err)
4967 		drop_inode = 1;
4968 	else {
4969 		inode->i_mapping->a_ops = &btrfs_aops;
4970 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4971 		inode->i_fop = &btrfs_file_operations;
4972 		inode->i_op = &btrfs_file_inode_operations;
4973 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4974 	}
4975 	dir->i_sb->s_dirt = 1;
4976 	btrfs_update_inode_block_group(trans, inode);
4977 	btrfs_update_inode_block_group(trans, dir);
4978 	if (drop_inode)
4979 		goto out_unlock;
4980 
4981 	path = btrfs_alloc_path();
4982 	BUG_ON(!path);
4983 	key.objectid = inode->i_ino;
4984 	key.offset = 0;
4985 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
4986 	datasize = btrfs_file_extent_calc_inline_size(name_len);
4987 	err = btrfs_insert_empty_item(trans, root, path, &key,
4988 				      datasize);
4989 	if (err) {
4990 		drop_inode = 1;
4991 		goto out_unlock;
4992 	}
4993 	leaf = path->nodes[0];
4994 	ei = btrfs_item_ptr(leaf, path->slots[0],
4995 			    struct btrfs_file_extent_item);
4996 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
4997 	btrfs_set_file_extent_type(leaf, ei,
4998 				   BTRFS_FILE_EXTENT_INLINE);
4999 	btrfs_set_file_extent_encryption(leaf, ei, 0);
5000 	btrfs_set_file_extent_compression(leaf, ei, 0);
5001 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
5002 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
5003 
5004 	ptr = btrfs_file_extent_inline_start(ei);
5005 	write_extent_buffer(leaf, symname, ptr, name_len);
5006 	btrfs_mark_buffer_dirty(leaf);
5007 	btrfs_free_path(path);
5008 
5009 	inode->i_op = &btrfs_symlink_inode_operations;
5010 	inode->i_mapping->a_ops = &btrfs_symlink_aops;
5011 	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5012 	inode_set_bytes(inode, name_len);
5013 	btrfs_i_size_write(inode, name_len - 1);
5014 	err = btrfs_update_inode(trans, root, inode);
5015 	if (err)
5016 		drop_inode = 1;
5017 
5018 out_unlock:
5019 	nr = trans->blocks_used;
5020 	btrfs_end_transaction_throttle(trans, root);
5021 out_fail:
5022 	if (drop_inode) {
5023 		inode_dec_link_count(inode);
5024 		iput(inode);
5025 	}
5026 	btrfs_btree_balance_dirty(root, nr);
5027 	return err;
5028 }
5029 
5030 static int prealloc_file_range(struct btrfs_trans_handle *trans,
5031 			       struct inode *inode, u64 start, u64 end,
5032 			       u64 locked_end, u64 alloc_hint, int mode)
5033 {
5034 	struct btrfs_root *root = BTRFS_I(inode)->root;
5035 	struct btrfs_key ins;
5036 	u64 alloc_size;
5037 	u64 cur_offset = start;
5038 	u64 num_bytes = end - start;
5039 	int ret = 0;
5040 
5041 	while (num_bytes > 0) {
5042 		alloc_size = min(num_bytes, root->fs_info->max_extent);
5043 		ret = btrfs_reserve_extent(trans, root, alloc_size,
5044 					   root->sectorsize, 0, alloc_hint,
5045 					   (u64)-1, &ins, 1);
5046 		if (ret) {
5047 			WARN_ON(1);
5048 			goto out;
5049 		}
5050 		ret = insert_reserved_file_extent(trans, inode,
5051 						  cur_offset, ins.objectid,
5052 						  ins.offset, ins.offset,
5053 						  ins.offset, locked_end,
5054 						  0, 0, 0,
5055 						  BTRFS_FILE_EXTENT_PREALLOC);
5056 		BUG_ON(ret);
5057 		num_bytes -= ins.offset;
5058 		cur_offset += ins.offset;
5059 		alloc_hint = ins.objectid + ins.offset;
5060 	}
5061 out:
5062 	if (cur_offset > start) {
5063 		inode->i_ctime = CURRENT_TIME;
5064 		btrfs_set_flag(inode, PREALLOC);
5065 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
5066 		    cur_offset > i_size_read(inode))
5067 			btrfs_i_size_write(inode, cur_offset);
5068 		ret = btrfs_update_inode(trans, root, inode);
5069 		BUG_ON(ret);
5070 	}
5071 
5072 	return ret;
5073 }
5074 
5075 static long btrfs_fallocate(struct inode *inode, int mode,
5076 			    loff_t offset, loff_t len)
5077 {
5078 	u64 cur_offset;
5079 	u64 last_byte;
5080 	u64 alloc_start;
5081 	u64 alloc_end;
5082 	u64 alloc_hint = 0;
5083 	u64 locked_end;
5084 	u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
5085 	struct extent_map *em;
5086 	struct btrfs_trans_handle *trans;
5087 	int ret;
5088 
5089 	alloc_start = offset & ~mask;
5090 	alloc_end =  (offset + len + mask) & ~mask;
5091 
5092 	/*
5093 	 * wait for ordered IO before we have any locks.  We'll loop again
5094 	 * below with the locks held.
5095 	 */
5096 	btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
5097 
5098 	mutex_lock(&inode->i_mutex);
5099 	if (alloc_start > inode->i_size) {
5100 		ret = btrfs_cont_expand(inode, alloc_start);
5101 		if (ret)
5102 			goto out;
5103 	}
5104 
5105 	locked_end = alloc_end - 1;
5106 	while (1) {
5107 		struct btrfs_ordered_extent *ordered;
5108 
5109 		trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
5110 		if (!trans) {
5111 			ret = -EIO;
5112 			goto out;
5113 		}
5114 
5115 		/* the extent lock is ordered inside the running
5116 		 * transaction
5117 		 */
5118 		lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5119 			    GFP_NOFS);
5120 		ordered = btrfs_lookup_first_ordered_extent(inode,
5121 							    alloc_end - 1);
5122 		if (ordered &&
5123 		    ordered->file_offset + ordered->len > alloc_start &&
5124 		    ordered->file_offset < alloc_end) {
5125 			btrfs_put_ordered_extent(ordered);
5126 			unlock_extent(&BTRFS_I(inode)->io_tree,
5127 				      alloc_start, locked_end, GFP_NOFS);
5128 			btrfs_end_transaction(trans, BTRFS_I(inode)->root);
5129 
5130 			/*
5131 			 * we can't wait on the range with the transaction
5132 			 * running or with the extent lock held
5133 			 */
5134 			btrfs_wait_ordered_range(inode, alloc_start,
5135 						 alloc_end - alloc_start);
5136 		} else {
5137 			if (ordered)
5138 				btrfs_put_ordered_extent(ordered);
5139 			break;
5140 		}
5141 	}
5142 
5143 	cur_offset = alloc_start;
5144 	while (1) {
5145 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
5146 				      alloc_end - cur_offset, 0);
5147 		BUG_ON(IS_ERR(em) || !em);
5148 		last_byte = min(extent_map_end(em), alloc_end);
5149 		last_byte = (last_byte + mask) & ~mask;
5150 		if (em->block_start == EXTENT_MAP_HOLE) {
5151 			ret = prealloc_file_range(trans, inode, cur_offset,
5152 					last_byte, locked_end + 1,
5153 					alloc_hint, mode);
5154 			if (ret < 0) {
5155 				free_extent_map(em);
5156 				break;
5157 			}
5158 		}
5159 		if (em->block_start <= EXTENT_MAP_LAST_BYTE)
5160 			alloc_hint = em->block_start;
5161 		free_extent_map(em);
5162 
5163 		cur_offset = last_byte;
5164 		if (cur_offset >= alloc_end) {
5165 			ret = 0;
5166 			break;
5167 		}
5168 	}
5169 	unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5170 		      GFP_NOFS);
5171 
5172 	btrfs_end_transaction(trans, BTRFS_I(inode)->root);
5173 out:
5174 	mutex_unlock(&inode->i_mutex);
5175 	return ret;
5176 }
5177 
5178 static int btrfs_set_page_dirty(struct page *page)
5179 {
5180 	return __set_page_dirty_nobuffers(page);
5181 }
5182 
5183 static int btrfs_permission(struct inode *inode, int mask)
5184 {
5185 	if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
5186 		return -EACCES;
5187 	return generic_permission(inode, mask, btrfs_check_acl);
5188 }
5189 
5190 static struct inode_operations btrfs_dir_inode_operations = {
5191 	.getattr	= btrfs_getattr,
5192 	.lookup		= btrfs_lookup,
5193 	.create		= btrfs_create,
5194 	.unlink		= btrfs_unlink,
5195 	.link		= btrfs_link,
5196 	.mkdir		= btrfs_mkdir,
5197 	.rmdir		= btrfs_rmdir,
5198 	.rename		= btrfs_rename,
5199 	.symlink	= btrfs_symlink,
5200 	.setattr	= btrfs_setattr,
5201 	.mknod		= btrfs_mknod,
5202 	.setxattr	= btrfs_setxattr,
5203 	.getxattr	= btrfs_getxattr,
5204 	.listxattr	= btrfs_listxattr,
5205 	.removexattr	= btrfs_removexattr,
5206 	.permission	= btrfs_permission,
5207 };
5208 static struct inode_operations btrfs_dir_ro_inode_operations = {
5209 	.lookup		= btrfs_lookup,
5210 	.permission	= btrfs_permission,
5211 };
5212 static struct file_operations btrfs_dir_file_operations = {
5213 	.llseek		= generic_file_llseek,
5214 	.read		= generic_read_dir,
5215 	.readdir	= btrfs_real_readdir,
5216 	.unlocked_ioctl	= btrfs_ioctl,
5217 #ifdef CONFIG_COMPAT
5218 	.compat_ioctl	= btrfs_ioctl,
5219 #endif
5220 	.release        = btrfs_release_file,
5221 	.fsync		= btrfs_sync_file,
5222 };
5223 
5224 static struct extent_io_ops btrfs_extent_io_ops = {
5225 	.fill_delalloc = run_delalloc_range,
5226 	.submit_bio_hook = btrfs_submit_bio_hook,
5227 	.merge_bio_hook = btrfs_merge_bio_hook,
5228 	.readpage_end_io_hook = btrfs_readpage_end_io_hook,
5229 	.writepage_end_io_hook = btrfs_writepage_end_io_hook,
5230 	.writepage_start_hook = btrfs_writepage_start_hook,
5231 	.readpage_io_failed_hook = btrfs_io_failed_hook,
5232 	.set_bit_hook = btrfs_set_bit_hook,
5233 	.clear_bit_hook = btrfs_clear_bit_hook,
5234 };
5235 
5236 /*
5237  * btrfs doesn't support the bmap operation because swapfiles
5238  * use bmap to make a mapping of extents in the file.  They assume
5239  * these extents won't change over the life of the file and they
5240  * use the bmap result to do IO directly to the drive.
5241  *
5242  * the btrfs bmap call would return logical addresses that aren't
5243  * suitable for IO and they also will change frequently as COW
5244  * operations happen.  So, swapfile + btrfs == corruption.
5245  *
5246  * For now we're avoiding this by dropping bmap.
5247  */
5248 static struct address_space_operations btrfs_aops = {
5249 	.readpage	= btrfs_readpage,
5250 	.writepage	= btrfs_writepage,
5251 	.writepages	= btrfs_writepages,
5252 	.readpages	= btrfs_readpages,
5253 	.sync_page	= block_sync_page,
5254 	.direct_IO	= btrfs_direct_IO,
5255 	.invalidatepage = btrfs_invalidatepage,
5256 	.releasepage	= btrfs_releasepage,
5257 	.set_page_dirty	= btrfs_set_page_dirty,
5258 };
5259 
5260 static struct address_space_operations btrfs_symlink_aops = {
5261 	.readpage	= btrfs_readpage,
5262 	.writepage	= btrfs_writepage,
5263 	.invalidatepage = btrfs_invalidatepage,
5264 	.releasepage	= btrfs_releasepage,
5265 };
5266 
5267 static struct inode_operations btrfs_file_inode_operations = {
5268 	.truncate	= btrfs_truncate,
5269 	.getattr	= btrfs_getattr,
5270 	.setattr	= btrfs_setattr,
5271 	.setxattr	= btrfs_setxattr,
5272 	.getxattr	= btrfs_getxattr,
5273 	.listxattr      = btrfs_listxattr,
5274 	.removexattr	= btrfs_removexattr,
5275 	.permission	= btrfs_permission,
5276 	.fallocate	= btrfs_fallocate,
5277 	.fiemap		= btrfs_fiemap,
5278 };
5279 static struct inode_operations btrfs_special_inode_operations = {
5280 	.getattr	= btrfs_getattr,
5281 	.setattr	= btrfs_setattr,
5282 	.permission	= btrfs_permission,
5283 	.setxattr	= btrfs_setxattr,
5284 	.getxattr	= btrfs_getxattr,
5285 	.listxattr	= btrfs_listxattr,
5286 	.removexattr	= btrfs_removexattr,
5287 };
5288 static struct inode_operations btrfs_symlink_inode_operations = {
5289 	.readlink	= generic_readlink,
5290 	.follow_link	= page_follow_link_light,
5291 	.put_link	= page_put_link,
5292 	.permission	= btrfs_permission,
5293 	.setxattr	= btrfs_setxattr,
5294 	.getxattr	= btrfs_getxattr,
5295 	.listxattr	= btrfs_listxattr,
5296 	.removexattr	= btrfs_removexattr,
5297 };
5298