xref: /openbmc/linux/fs/btrfs/inode.c (revision 565d76cb)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include <linux/slab.h>
40 #include "compat.h"
41 #include "ctree.h"
42 #include "disk-io.h"
43 #include "transaction.h"
44 #include "btrfs_inode.h"
45 #include "ioctl.h"
46 #include "print-tree.h"
47 #include "volumes.h"
48 #include "ordered-data.h"
49 #include "xattr.h"
50 #include "tree-log.h"
51 #include "compression.h"
52 #include "locking.h"
53 
54 struct btrfs_iget_args {
55 	u64 ino;
56 	struct btrfs_root *root;
57 };
58 
59 static const struct inode_operations btrfs_dir_inode_operations;
60 static const struct inode_operations btrfs_symlink_inode_operations;
61 static const struct inode_operations btrfs_dir_ro_inode_operations;
62 static const struct inode_operations btrfs_special_inode_operations;
63 static const struct inode_operations btrfs_file_inode_operations;
64 static const struct address_space_operations btrfs_aops;
65 static const struct address_space_operations btrfs_symlink_aops;
66 static const struct file_operations btrfs_dir_file_operations;
67 static struct extent_io_ops btrfs_extent_io_ops;
68 
69 static struct kmem_cache *btrfs_inode_cachep;
70 struct kmem_cache *btrfs_trans_handle_cachep;
71 struct kmem_cache *btrfs_transaction_cachep;
72 struct kmem_cache *btrfs_path_cachep;
73 
74 #define S_SHIFT 12
75 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
76 	[S_IFREG >> S_SHIFT]	= BTRFS_FT_REG_FILE,
77 	[S_IFDIR >> S_SHIFT]	= BTRFS_FT_DIR,
78 	[S_IFCHR >> S_SHIFT]	= BTRFS_FT_CHRDEV,
79 	[S_IFBLK >> S_SHIFT]	= BTRFS_FT_BLKDEV,
80 	[S_IFIFO >> S_SHIFT]	= BTRFS_FT_FIFO,
81 	[S_IFSOCK >> S_SHIFT]	= BTRFS_FT_SOCK,
82 	[S_IFLNK >> S_SHIFT]	= BTRFS_FT_SYMLINK,
83 };
84 
85 static void btrfs_truncate(struct inode *inode);
86 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
87 static noinline int cow_file_range(struct inode *inode,
88 				   struct page *locked_page,
89 				   u64 start, u64 end, int *page_started,
90 				   unsigned long *nr_written, int unlock);
91 
92 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
93 				     struct inode *inode,  struct inode *dir,
94 				     const struct qstr *qstr)
95 {
96 	int err;
97 
98 	err = btrfs_init_acl(trans, inode, dir);
99 	if (!err)
100 		err = btrfs_xattr_security_init(trans, inode, dir, qstr);
101 	return err;
102 }
103 
104 /*
105  * this does all the hard work for inserting an inline extent into
106  * the btree.  The caller should have done a btrfs_drop_extents so that
107  * no overlapping inline items exist in the btree
108  */
109 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
110 				struct btrfs_root *root, struct inode *inode,
111 				u64 start, size_t size, size_t compressed_size,
112 				struct page **compressed_pages)
113 {
114 	struct btrfs_key key;
115 	struct btrfs_path *path;
116 	struct extent_buffer *leaf;
117 	struct page *page = NULL;
118 	char *kaddr;
119 	unsigned long ptr;
120 	struct btrfs_file_extent_item *ei;
121 	int err = 0;
122 	int ret;
123 	size_t cur_size = size;
124 	size_t datasize;
125 	unsigned long offset;
126 	int compress_type = BTRFS_COMPRESS_NONE;
127 
128 	if (compressed_size && compressed_pages) {
129 		compress_type = root->fs_info->compress_type;
130 		cur_size = compressed_size;
131 	}
132 
133 	path = btrfs_alloc_path();
134 	if (!path)
135 		return -ENOMEM;
136 
137 	path->leave_spinning = 1;
138 	btrfs_set_trans_block_group(trans, inode);
139 
140 	key.objectid = inode->i_ino;
141 	key.offset = start;
142 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
143 	datasize = btrfs_file_extent_calc_inline_size(cur_size);
144 
145 	inode_add_bytes(inode, size);
146 	ret = btrfs_insert_empty_item(trans, root, path, &key,
147 				      datasize);
148 	BUG_ON(ret);
149 	if (ret) {
150 		err = ret;
151 		goto fail;
152 	}
153 	leaf = path->nodes[0];
154 	ei = btrfs_item_ptr(leaf, path->slots[0],
155 			    struct btrfs_file_extent_item);
156 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
157 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
158 	btrfs_set_file_extent_encryption(leaf, ei, 0);
159 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
160 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
161 	ptr = btrfs_file_extent_inline_start(ei);
162 
163 	if (compress_type != BTRFS_COMPRESS_NONE) {
164 		struct page *cpage;
165 		int i = 0;
166 		while (compressed_size > 0) {
167 			cpage = compressed_pages[i];
168 			cur_size = min_t(unsigned long, compressed_size,
169 				       PAGE_CACHE_SIZE);
170 
171 			kaddr = kmap_atomic(cpage, KM_USER0);
172 			write_extent_buffer(leaf, kaddr, ptr, cur_size);
173 			kunmap_atomic(kaddr, KM_USER0);
174 
175 			i++;
176 			ptr += cur_size;
177 			compressed_size -= cur_size;
178 		}
179 		btrfs_set_file_extent_compression(leaf, ei,
180 						  compress_type);
181 	} else {
182 		page = find_get_page(inode->i_mapping,
183 				     start >> PAGE_CACHE_SHIFT);
184 		btrfs_set_file_extent_compression(leaf, ei, 0);
185 		kaddr = kmap_atomic(page, KM_USER0);
186 		offset = start & (PAGE_CACHE_SIZE - 1);
187 		write_extent_buffer(leaf, kaddr + offset, ptr, size);
188 		kunmap_atomic(kaddr, KM_USER0);
189 		page_cache_release(page);
190 	}
191 	btrfs_mark_buffer_dirty(leaf);
192 	btrfs_free_path(path);
193 
194 	/*
195 	 * we're an inline extent, so nobody can
196 	 * extend the file past i_size without locking
197 	 * a page we already have locked.
198 	 *
199 	 * We must do any isize and inode updates
200 	 * before we unlock the pages.  Otherwise we
201 	 * could end up racing with unlink.
202 	 */
203 	BTRFS_I(inode)->disk_i_size = inode->i_size;
204 	btrfs_update_inode(trans, root, inode);
205 
206 	return 0;
207 fail:
208 	btrfs_free_path(path);
209 	return err;
210 }
211 
212 
213 /*
214  * conditionally insert an inline extent into the file.  This
215  * does the checks required to make sure the data is small enough
216  * to fit as an inline extent.
217  */
218 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
219 				 struct btrfs_root *root,
220 				 struct inode *inode, u64 start, u64 end,
221 				 size_t compressed_size,
222 				 struct page **compressed_pages)
223 {
224 	u64 isize = i_size_read(inode);
225 	u64 actual_end = min(end + 1, isize);
226 	u64 inline_len = actual_end - start;
227 	u64 aligned_end = (end + root->sectorsize - 1) &
228 			~((u64)root->sectorsize - 1);
229 	u64 hint_byte;
230 	u64 data_len = inline_len;
231 	int ret;
232 
233 	if (compressed_size)
234 		data_len = compressed_size;
235 
236 	if (start > 0 ||
237 	    actual_end >= PAGE_CACHE_SIZE ||
238 	    data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
239 	    (!compressed_size &&
240 	    (actual_end & (root->sectorsize - 1)) == 0) ||
241 	    end + 1 < isize ||
242 	    data_len > root->fs_info->max_inline) {
243 		return 1;
244 	}
245 
246 	ret = btrfs_drop_extents(trans, inode, start, aligned_end,
247 				 &hint_byte, 1);
248 	BUG_ON(ret);
249 
250 	if (isize > actual_end)
251 		inline_len = min_t(u64, isize, actual_end);
252 	ret = insert_inline_extent(trans, root, inode, start,
253 				   inline_len, compressed_size,
254 				   compressed_pages);
255 	BUG_ON(ret);
256 	btrfs_delalloc_release_metadata(inode, end + 1 - start);
257 	btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
258 	return 0;
259 }
260 
261 struct async_extent {
262 	u64 start;
263 	u64 ram_size;
264 	u64 compressed_size;
265 	struct page **pages;
266 	unsigned long nr_pages;
267 	int compress_type;
268 	struct list_head list;
269 };
270 
271 struct async_cow {
272 	struct inode *inode;
273 	struct btrfs_root *root;
274 	struct page *locked_page;
275 	u64 start;
276 	u64 end;
277 	struct list_head extents;
278 	struct btrfs_work work;
279 };
280 
281 static noinline int add_async_extent(struct async_cow *cow,
282 				     u64 start, u64 ram_size,
283 				     u64 compressed_size,
284 				     struct page **pages,
285 				     unsigned long nr_pages,
286 				     int compress_type)
287 {
288 	struct async_extent *async_extent;
289 
290 	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
291 	async_extent->start = start;
292 	async_extent->ram_size = ram_size;
293 	async_extent->compressed_size = compressed_size;
294 	async_extent->pages = pages;
295 	async_extent->nr_pages = nr_pages;
296 	async_extent->compress_type = compress_type;
297 	list_add_tail(&async_extent->list, &cow->extents);
298 	return 0;
299 }
300 
301 /*
302  * we create compressed extents in two phases.  The first
303  * phase compresses a range of pages that have already been
304  * locked (both pages and state bits are locked).
305  *
306  * This is done inside an ordered work queue, and the compression
307  * is spread across many cpus.  The actual IO submission is step
308  * two, and the ordered work queue takes care of making sure that
309  * happens in the same order things were put onto the queue by
310  * writepages and friends.
311  *
312  * If this code finds it can't get good compression, it puts an
313  * entry onto the work queue to write the uncompressed bytes.  This
314  * makes sure that both compressed inodes and uncompressed inodes
315  * are written in the same order that pdflush sent them down.
316  */
317 static noinline int compress_file_range(struct inode *inode,
318 					struct page *locked_page,
319 					u64 start, u64 end,
320 					struct async_cow *async_cow,
321 					int *num_added)
322 {
323 	struct btrfs_root *root = BTRFS_I(inode)->root;
324 	struct btrfs_trans_handle *trans;
325 	u64 num_bytes;
326 	u64 blocksize = root->sectorsize;
327 	u64 actual_end;
328 	u64 isize = i_size_read(inode);
329 	int ret = 0;
330 	struct page **pages = NULL;
331 	unsigned long nr_pages;
332 	unsigned long nr_pages_ret = 0;
333 	unsigned long total_compressed = 0;
334 	unsigned long total_in = 0;
335 	unsigned long max_compressed = 128 * 1024;
336 	unsigned long max_uncompressed = 128 * 1024;
337 	int i;
338 	int will_compress;
339 	int compress_type = root->fs_info->compress_type;
340 
341 	actual_end = min_t(u64, isize, end + 1);
342 again:
343 	will_compress = 0;
344 	nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
345 	nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
346 
347 	/*
348 	 * we don't want to send crud past the end of i_size through
349 	 * compression, that's just a waste of CPU time.  So, if the
350 	 * end of the file is before the start of our current
351 	 * requested range of bytes, we bail out to the uncompressed
352 	 * cleanup code that can deal with all of this.
353 	 *
354 	 * It isn't really the fastest way to fix things, but this is a
355 	 * very uncommon corner.
356 	 */
357 	if (actual_end <= start)
358 		goto cleanup_and_bail_uncompressed;
359 
360 	total_compressed = actual_end - start;
361 
362 	/* we want to make sure that amount of ram required to uncompress
363 	 * an extent is reasonable, so we limit the total size in ram
364 	 * of a compressed extent to 128k.  This is a crucial number
365 	 * because it also controls how easily we can spread reads across
366 	 * cpus for decompression.
367 	 *
368 	 * We also want to make sure the amount of IO required to do
369 	 * a random read is reasonably small, so we limit the size of
370 	 * a compressed extent to 128k.
371 	 */
372 	total_compressed = min(total_compressed, max_uncompressed);
373 	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
374 	num_bytes = max(blocksize,  num_bytes);
375 	total_in = 0;
376 	ret = 0;
377 
378 	/*
379 	 * we do compression for mount -o compress and when the
380 	 * inode has not been flagged as nocompress.  This flag can
381 	 * change at any time if we discover bad compression ratios.
382 	 */
383 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
384 	    (btrfs_test_opt(root, COMPRESS) ||
385 	     (BTRFS_I(inode)->force_compress))) {
386 		WARN_ON(pages);
387 		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
388 
389 		if (BTRFS_I(inode)->force_compress)
390 			compress_type = BTRFS_I(inode)->force_compress;
391 
392 		ret = btrfs_compress_pages(compress_type,
393 					   inode->i_mapping, start,
394 					   total_compressed, pages,
395 					   nr_pages, &nr_pages_ret,
396 					   &total_in,
397 					   &total_compressed,
398 					   max_compressed);
399 
400 		if (!ret) {
401 			unsigned long offset = total_compressed &
402 				(PAGE_CACHE_SIZE - 1);
403 			struct page *page = pages[nr_pages_ret - 1];
404 			char *kaddr;
405 
406 			/* zero the tail end of the last page, we might be
407 			 * sending it down to disk
408 			 */
409 			if (offset) {
410 				kaddr = kmap_atomic(page, KM_USER0);
411 				memset(kaddr + offset, 0,
412 				       PAGE_CACHE_SIZE - offset);
413 				kunmap_atomic(kaddr, KM_USER0);
414 			}
415 			will_compress = 1;
416 		}
417 	}
418 	if (start == 0) {
419 		trans = btrfs_join_transaction(root, 1);
420 		BUG_ON(IS_ERR(trans));
421 		btrfs_set_trans_block_group(trans, inode);
422 		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
423 
424 		/* lets try to make an inline extent */
425 		if (ret || total_in < (actual_end - start)) {
426 			/* we didn't compress the entire range, try
427 			 * to make an uncompressed inline extent.
428 			 */
429 			ret = cow_file_range_inline(trans, root, inode,
430 						    start, end, 0, NULL);
431 		} else {
432 			/* try making a compressed inline extent */
433 			ret = cow_file_range_inline(trans, root, inode,
434 						    start, end,
435 						    total_compressed, pages);
436 		}
437 		if (ret == 0) {
438 			/*
439 			 * inline extent creation worked, we don't need
440 			 * to create any more async work items.  Unlock
441 			 * and free up our temp pages.
442 			 */
443 			extent_clear_unlock_delalloc(inode,
444 			     &BTRFS_I(inode)->io_tree,
445 			     start, end, NULL,
446 			     EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
447 			     EXTENT_CLEAR_DELALLOC |
448 			     EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
449 
450 			btrfs_end_transaction(trans, root);
451 			goto free_pages_out;
452 		}
453 		btrfs_end_transaction(trans, root);
454 	}
455 
456 	if (will_compress) {
457 		/*
458 		 * we aren't doing an inline extent round the compressed size
459 		 * up to a block size boundary so the allocator does sane
460 		 * things
461 		 */
462 		total_compressed = (total_compressed + blocksize - 1) &
463 			~(blocksize - 1);
464 
465 		/*
466 		 * one last check to make sure the compression is really a
467 		 * win, compare the page count read with the blocks on disk
468 		 */
469 		total_in = (total_in + PAGE_CACHE_SIZE - 1) &
470 			~(PAGE_CACHE_SIZE - 1);
471 		if (total_compressed >= total_in) {
472 			will_compress = 0;
473 		} else {
474 			num_bytes = total_in;
475 		}
476 	}
477 	if (!will_compress && pages) {
478 		/*
479 		 * the compression code ran but failed to make things smaller,
480 		 * free any pages it allocated and our page pointer array
481 		 */
482 		for (i = 0; i < nr_pages_ret; i++) {
483 			WARN_ON(pages[i]->mapping);
484 			page_cache_release(pages[i]);
485 		}
486 		kfree(pages);
487 		pages = NULL;
488 		total_compressed = 0;
489 		nr_pages_ret = 0;
490 
491 		/* flag the file so we don't compress in the future */
492 		if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
493 		    !(BTRFS_I(inode)->force_compress)) {
494 			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
495 		}
496 	}
497 	if (will_compress) {
498 		*num_added += 1;
499 
500 		/* the async work queues will take care of doing actual
501 		 * allocation on disk for these compressed pages,
502 		 * and will submit them to the elevator.
503 		 */
504 		add_async_extent(async_cow, start, num_bytes,
505 				 total_compressed, pages, nr_pages_ret,
506 				 compress_type);
507 
508 		if (start + num_bytes < end) {
509 			start += num_bytes;
510 			pages = NULL;
511 			cond_resched();
512 			goto again;
513 		}
514 	} else {
515 cleanup_and_bail_uncompressed:
516 		/*
517 		 * No compression, but we still need to write the pages in
518 		 * the file we've been given so far.  redirty the locked
519 		 * page if it corresponds to our extent and set things up
520 		 * for the async work queue to run cow_file_range to do
521 		 * the normal delalloc dance
522 		 */
523 		if (page_offset(locked_page) >= start &&
524 		    page_offset(locked_page) <= end) {
525 			__set_page_dirty_nobuffers(locked_page);
526 			/* unlocked later on in the async handlers */
527 		}
528 		add_async_extent(async_cow, start, end - start + 1,
529 				 0, NULL, 0, BTRFS_COMPRESS_NONE);
530 		*num_added += 1;
531 	}
532 
533 out:
534 	return 0;
535 
536 free_pages_out:
537 	for (i = 0; i < nr_pages_ret; i++) {
538 		WARN_ON(pages[i]->mapping);
539 		page_cache_release(pages[i]);
540 	}
541 	kfree(pages);
542 
543 	goto out;
544 }
545 
546 /*
547  * phase two of compressed writeback.  This is the ordered portion
548  * of the code, which only gets called in the order the work was
549  * queued.  We walk all the async extents created by compress_file_range
550  * and send them down to the disk.
551  */
552 static noinline int submit_compressed_extents(struct inode *inode,
553 					      struct async_cow *async_cow)
554 {
555 	struct async_extent *async_extent;
556 	u64 alloc_hint = 0;
557 	struct btrfs_trans_handle *trans;
558 	struct btrfs_key ins;
559 	struct extent_map *em;
560 	struct btrfs_root *root = BTRFS_I(inode)->root;
561 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
562 	struct extent_io_tree *io_tree;
563 	int ret = 0;
564 
565 	if (list_empty(&async_cow->extents))
566 		return 0;
567 
568 
569 	while (!list_empty(&async_cow->extents)) {
570 		async_extent = list_entry(async_cow->extents.next,
571 					  struct async_extent, list);
572 		list_del(&async_extent->list);
573 
574 		io_tree = &BTRFS_I(inode)->io_tree;
575 
576 retry:
577 		/* did the compression code fall back to uncompressed IO? */
578 		if (!async_extent->pages) {
579 			int page_started = 0;
580 			unsigned long nr_written = 0;
581 
582 			lock_extent(io_tree, async_extent->start,
583 					 async_extent->start +
584 					 async_extent->ram_size - 1, GFP_NOFS);
585 
586 			/* allocate blocks */
587 			ret = cow_file_range(inode, async_cow->locked_page,
588 					     async_extent->start,
589 					     async_extent->start +
590 					     async_extent->ram_size - 1,
591 					     &page_started, &nr_written, 0);
592 
593 			/*
594 			 * if page_started, cow_file_range inserted an
595 			 * inline extent and took care of all the unlocking
596 			 * and IO for us.  Otherwise, we need to submit
597 			 * all those pages down to the drive.
598 			 */
599 			if (!page_started && !ret)
600 				extent_write_locked_range(io_tree,
601 						  inode, async_extent->start,
602 						  async_extent->start +
603 						  async_extent->ram_size - 1,
604 						  btrfs_get_extent,
605 						  WB_SYNC_ALL);
606 			kfree(async_extent);
607 			cond_resched();
608 			continue;
609 		}
610 
611 		lock_extent(io_tree, async_extent->start,
612 			    async_extent->start + async_extent->ram_size - 1,
613 			    GFP_NOFS);
614 
615 		trans = btrfs_join_transaction(root, 1);
616 		BUG_ON(IS_ERR(trans));
617 		ret = btrfs_reserve_extent(trans, root,
618 					   async_extent->compressed_size,
619 					   async_extent->compressed_size,
620 					   0, alloc_hint,
621 					   (u64)-1, &ins, 1);
622 		btrfs_end_transaction(trans, root);
623 
624 		if (ret) {
625 			int i;
626 			for (i = 0; i < async_extent->nr_pages; i++) {
627 				WARN_ON(async_extent->pages[i]->mapping);
628 				page_cache_release(async_extent->pages[i]);
629 			}
630 			kfree(async_extent->pages);
631 			async_extent->nr_pages = 0;
632 			async_extent->pages = NULL;
633 			unlock_extent(io_tree, async_extent->start,
634 				      async_extent->start +
635 				      async_extent->ram_size - 1, GFP_NOFS);
636 			goto retry;
637 		}
638 
639 		/*
640 		 * here we're doing allocation and writeback of the
641 		 * compressed pages
642 		 */
643 		btrfs_drop_extent_cache(inode, async_extent->start,
644 					async_extent->start +
645 					async_extent->ram_size - 1, 0);
646 
647 		em = alloc_extent_map(GFP_NOFS);
648 		BUG_ON(!em);
649 		em->start = async_extent->start;
650 		em->len = async_extent->ram_size;
651 		em->orig_start = em->start;
652 
653 		em->block_start = ins.objectid;
654 		em->block_len = ins.offset;
655 		em->bdev = root->fs_info->fs_devices->latest_bdev;
656 		em->compress_type = async_extent->compress_type;
657 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
658 		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
659 
660 		while (1) {
661 			write_lock(&em_tree->lock);
662 			ret = add_extent_mapping(em_tree, em);
663 			write_unlock(&em_tree->lock);
664 			if (ret != -EEXIST) {
665 				free_extent_map(em);
666 				break;
667 			}
668 			btrfs_drop_extent_cache(inode, async_extent->start,
669 						async_extent->start +
670 						async_extent->ram_size - 1, 0);
671 		}
672 
673 		ret = btrfs_add_ordered_extent_compress(inode,
674 						async_extent->start,
675 						ins.objectid,
676 						async_extent->ram_size,
677 						ins.offset,
678 						BTRFS_ORDERED_COMPRESSED,
679 						async_extent->compress_type);
680 		BUG_ON(ret);
681 
682 		/*
683 		 * clear dirty, set writeback and unlock the pages.
684 		 */
685 		extent_clear_unlock_delalloc(inode,
686 				&BTRFS_I(inode)->io_tree,
687 				async_extent->start,
688 				async_extent->start +
689 				async_extent->ram_size - 1,
690 				NULL, EXTENT_CLEAR_UNLOCK_PAGE |
691 				EXTENT_CLEAR_UNLOCK |
692 				EXTENT_CLEAR_DELALLOC |
693 				EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
694 
695 		ret = btrfs_submit_compressed_write(inode,
696 				    async_extent->start,
697 				    async_extent->ram_size,
698 				    ins.objectid,
699 				    ins.offset, async_extent->pages,
700 				    async_extent->nr_pages);
701 
702 		BUG_ON(ret);
703 		alloc_hint = ins.objectid + ins.offset;
704 		kfree(async_extent);
705 		cond_resched();
706 	}
707 
708 	return 0;
709 }
710 
711 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
712 				      u64 num_bytes)
713 {
714 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
715 	struct extent_map *em;
716 	u64 alloc_hint = 0;
717 
718 	read_lock(&em_tree->lock);
719 	em = search_extent_mapping(em_tree, start, num_bytes);
720 	if (em) {
721 		/*
722 		 * if block start isn't an actual block number then find the
723 		 * first block in this inode and use that as a hint.  If that
724 		 * block is also bogus then just don't worry about it.
725 		 */
726 		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
727 			free_extent_map(em);
728 			em = search_extent_mapping(em_tree, 0, 0);
729 			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
730 				alloc_hint = em->block_start;
731 			if (em)
732 				free_extent_map(em);
733 		} else {
734 			alloc_hint = em->block_start;
735 			free_extent_map(em);
736 		}
737 	}
738 	read_unlock(&em_tree->lock);
739 
740 	return alloc_hint;
741 }
742 
743 /*
744  * when extent_io.c finds a delayed allocation range in the file,
745  * the call backs end up in this code.  The basic idea is to
746  * allocate extents on disk for the range, and create ordered data structs
747  * in ram to track those extents.
748  *
749  * locked_page is the page that writepage had locked already.  We use
750  * it to make sure we don't do extra locks or unlocks.
751  *
752  * *page_started is set to one if we unlock locked_page and do everything
753  * required to start IO on it.  It may be clean and already done with
754  * IO when we return.
755  */
756 static noinline int cow_file_range(struct inode *inode,
757 				   struct page *locked_page,
758 				   u64 start, u64 end, int *page_started,
759 				   unsigned long *nr_written,
760 				   int unlock)
761 {
762 	struct btrfs_root *root = BTRFS_I(inode)->root;
763 	struct btrfs_trans_handle *trans;
764 	u64 alloc_hint = 0;
765 	u64 num_bytes;
766 	unsigned long ram_size;
767 	u64 disk_num_bytes;
768 	u64 cur_alloc_size;
769 	u64 blocksize = root->sectorsize;
770 	struct btrfs_key ins;
771 	struct extent_map *em;
772 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
773 	int ret = 0;
774 
775 	BUG_ON(root == root->fs_info->tree_root);
776 	trans = btrfs_join_transaction(root, 1);
777 	BUG_ON(IS_ERR(trans));
778 	btrfs_set_trans_block_group(trans, inode);
779 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
780 
781 	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
782 	num_bytes = max(blocksize,  num_bytes);
783 	disk_num_bytes = num_bytes;
784 	ret = 0;
785 
786 	if (start == 0) {
787 		/* lets try to make an inline extent */
788 		ret = cow_file_range_inline(trans, root, inode,
789 					    start, end, 0, NULL);
790 		if (ret == 0) {
791 			extent_clear_unlock_delalloc(inode,
792 				     &BTRFS_I(inode)->io_tree,
793 				     start, end, NULL,
794 				     EXTENT_CLEAR_UNLOCK_PAGE |
795 				     EXTENT_CLEAR_UNLOCK |
796 				     EXTENT_CLEAR_DELALLOC |
797 				     EXTENT_CLEAR_DIRTY |
798 				     EXTENT_SET_WRITEBACK |
799 				     EXTENT_END_WRITEBACK);
800 
801 			*nr_written = *nr_written +
802 			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
803 			*page_started = 1;
804 			ret = 0;
805 			goto out;
806 		}
807 	}
808 
809 	BUG_ON(disk_num_bytes >
810 	       btrfs_super_total_bytes(&root->fs_info->super_copy));
811 
812 	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
813 	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
814 
815 	while (disk_num_bytes > 0) {
816 		unsigned long op;
817 
818 		cur_alloc_size = disk_num_bytes;
819 		ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
820 					   root->sectorsize, 0, alloc_hint,
821 					   (u64)-1, &ins, 1);
822 		BUG_ON(ret);
823 
824 		em = alloc_extent_map(GFP_NOFS);
825 		BUG_ON(!em);
826 		em->start = start;
827 		em->orig_start = em->start;
828 		ram_size = ins.offset;
829 		em->len = ins.offset;
830 
831 		em->block_start = ins.objectid;
832 		em->block_len = ins.offset;
833 		em->bdev = root->fs_info->fs_devices->latest_bdev;
834 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
835 
836 		while (1) {
837 			write_lock(&em_tree->lock);
838 			ret = add_extent_mapping(em_tree, em);
839 			write_unlock(&em_tree->lock);
840 			if (ret != -EEXIST) {
841 				free_extent_map(em);
842 				break;
843 			}
844 			btrfs_drop_extent_cache(inode, start,
845 						start + ram_size - 1, 0);
846 		}
847 
848 		cur_alloc_size = ins.offset;
849 		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
850 					       ram_size, cur_alloc_size, 0);
851 		BUG_ON(ret);
852 
853 		if (root->root_key.objectid ==
854 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
855 			ret = btrfs_reloc_clone_csums(inode, start,
856 						      cur_alloc_size);
857 			BUG_ON(ret);
858 		}
859 
860 		if (disk_num_bytes < cur_alloc_size)
861 			break;
862 
863 		/* we're not doing compressed IO, don't unlock the first
864 		 * page (which the caller expects to stay locked), don't
865 		 * clear any dirty bits and don't set any writeback bits
866 		 *
867 		 * Do set the Private2 bit so we know this page was properly
868 		 * setup for writepage
869 		 */
870 		op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
871 		op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
872 			EXTENT_SET_PRIVATE2;
873 
874 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
875 					     start, start + ram_size - 1,
876 					     locked_page, op);
877 		disk_num_bytes -= cur_alloc_size;
878 		num_bytes -= cur_alloc_size;
879 		alloc_hint = ins.objectid + ins.offset;
880 		start += cur_alloc_size;
881 	}
882 out:
883 	ret = 0;
884 	btrfs_end_transaction(trans, root);
885 
886 	return ret;
887 }
888 
889 /*
890  * work queue call back to started compression on a file and pages
891  */
892 static noinline void async_cow_start(struct btrfs_work *work)
893 {
894 	struct async_cow *async_cow;
895 	int num_added = 0;
896 	async_cow = container_of(work, struct async_cow, work);
897 
898 	compress_file_range(async_cow->inode, async_cow->locked_page,
899 			    async_cow->start, async_cow->end, async_cow,
900 			    &num_added);
901 	if (num_added == 0)
902 		async_cow->inode = NULL;
903 }
904 
905 /*
906  * work queue call back to submit previously compressed pages
907  */
908 static noinline void async_cow_submit(struct btrfs_work *work)
909 {
910 	struct async_cow *async_cow;
911 	struct btrfs_root *root;
912 	unsigned long nr_pages;
913 
914 	async_cow = container_of(work, struct async_cow, work);
915 
916 	root = async_cow->root;
917 	nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
918 		PAGE_CACHE_SHIFT;
919 
920 	atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
921 
922 	if (atomic_read(&root->fs_info->async_delalloc_pages) <
923 	    5 * 1042 * 1024 &&
924 	    waitqueue_active(&root->fs_info->async_submit_wait))
925 		wake_up(&root->fs_info->async_submit_wait);
926 
927 	if (async_cow->inode)
928 		submit_compressed_extents(async_cow->inode, async_cow);
929 }
930 
931 static noinline void async_cow_free(struct btrfs_work *work)
932 {
933 	struct async_cow *async_cow;
934 	async_cow = container_of(work, struct async_cow, work);
935 	kfree(async_cow);
936 }
937 
938 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
939 				u64 start, u64 end, int *page_started,
940 				unsigned long *nr_written)
941 {
942 	struct async_cow *async_cow;
943 	struct btrfs_root *root = BTRFS_I(inode)->root;
944 	unsigned long nr_pages;
945 	u64 cur_end;
946 	int limit = 10 * 1024 * 1042;
947 
948 	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
949 			 1, 0, NULL, GFP_NOFS);
950 	while (start < end) {
951 		async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
952 		async_cow->inode = inode;
953 		async_cow->root = root;
954 		async_cow->locked_page = locked_page;
955 		async_cow->start = start;
956 
957 		if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
958 			cur_end = end;
959 		else
960 			cur_end = min(end, start + 512 * 1024 - 1);
961 
962 		async_cow->end = cur_end;
963 		INIT_LIST_HEAD(&async_cow->extents);
964 
965 		async_cow->work.func = async_cow_start;
966 		async_cow->work.ordered_func = async_cow_submit;
967 		async_cow->work.ordered_free = async_cow_free;
968 		async_cow->work.flags = 0;
969 
970 		nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
971 			PAGE_CACHE_SHIFT;
972 		atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
973 
974 		btrfs_queue_worker(&root->fs_info->delalloc_workers,
975 				   &async_cow->work);
976 
977 		if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
978 			wait_event(root->fs_info->async_submit_wait,
979 			   (atomic_read(&root->fs_info->async_delalloc_pages) <
980 			    limit));
981 		}
982 
983 		while (atomic_read(&root->fs_info->async_submit_draining) &&
984 		      atomic_read(&root->fs_info->async_delalloc_pages)) {
985 			wait_event(root->fs_info->async_submit_wait,
986 			  (atomic_read(&root->fs_info->async_delalloc_pages) ==
987 			   0));
988 		}
989 
990 		*nr_written += nr_pages;
991 		start = cur_end + 1;
992 	}
993 	*page_started = 1;
994 	return 0;
995 }
996 
997 static noinline int csum_exist_in_range(struct btrfs_root *root,
998 					u64 bytenr, u64 num_bytes)
999 {
1000 	int ret;
1001 	struct btrfs_ordered_sum *sums;
1002 	LIST_HEAD(list);
1003 
1004 	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
1005 				       bytenr + num_bytes - 1, &list);
1006 	if (ret == 0 && list_empty(&list))
1007 		return 0;
1008 
1009 	while (!list_empty(&list)) {
1010 		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1011 		list_del(&sums->list);
1012 		kfree(sums);
1013 	}
1014 	return 1;
1015 }
1016 
1017 /*
1018  * when nowcow writeback call back.  This checks for snapshots or COW copies
1019  * of the extents that exist in the file, and COWs the file as required.
1020  *
1021  * If no cow copies or snapshots exist, we write directly to the existing
1022  * blocks on disk
1023  */
1024 static noinline int run_delalloc_nocow(struct inode *inode,
1025 				       struct page *locked_page,
1026 			      u64 start, u64 end, int *page_started, int force,
1027 			      unsigned long *nr_written)
1028 {
1029 	struct btrfs_root *root = BTRFS_I(inode)->root;
1030 	struct btrfs_trans_handle *trans;
1031 	struct extent_buffer *leaf;
1032 	struct btrfs_path *path;
1033 	struct btrfs_file_extent_item *fi;
1034 	struct btrfs_key found_key;
1035 	u64 cow_start;
1036 	u64 cur_offset;
1037 	u64 extent_end;
1038 	u64 extent_offset;
1039 	u64 disk_bytenr;
1040 	u64 num_bytes;
1041 	int extent_type;
1042 	int ret;
1043 	int type;
1044 	int nocow;
1045 	int check_prev = 1;
1046 	bool nolock = false;
1047 
1048 	path = btrfs_alloc_path();
1049 	BUG_ON(!path);
1050 	if (root == root->fs_info->tree_root) {
1051 		nolock = true;
1052 		trans = btrfs_join_transaction_nolock(root, 1);
1053 	} else {
1054 		trans = btrfs_join_transaction(root, 1);
1055 	}
1056 	BUG_ON(IS_ERR(trans));
1057 
1058 	cow_start = (u64)-1;
1059 	cur_offset = start;
1060 	while (1) {
1061 		ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
1062 					       cur_offset, 0);
1063 		BUG_ON(ret < 0);
1064 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
1065 			leaf = path->nodes[0];
1066 			btrfs_item_key_to_cpu(leaf, &found_key,
1067 					      path->slots[0] - 1);
1068 			if (found_key.objectid == inode->i_ino &&
1069 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
1070 				path->slots[0]--;
1071 		}
1072 		check_prev = 0;
1073 next_slot:
1074 		leaf = path->nodes[0];
1075 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1076 			ret = btrfs_next_leaf(root, path);
1077 			if (ret < 0)
1078 				BUG_ON(1);
1079 			if (ret > 0)
1080 				break;
1081 			leaf = path->nodes[0];
1082 		}
1083 
1084 		nocow = 0;
1085 		disk_bytenr = 0;
1086 		num_bytes = 0;
1087 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1088 
1089 		if (found_key.objectid > inode->i_ino ||
1090 		    found_key.type > BTRFS_EXTENT_DATA_KEY ||
1091 		    found_key.offset > end)
1092 			break;
1093 
1094 		if (found_key.offset > cur_offset) {
1095 			extent_end = found_key.offset;
1096 			extent_type = 0;
1097 			goto out_check;
1098 		}
1099 
1100 		fi = btrfs_item_ptr(leaf, path->slots[0],
1101 				    struct btrfs_file_extent_item);
1102 		extent_type = btrfs_file_extent_type(leaf, fi);
1103 
1104 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
1105 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1106 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1107 			extent_offset = btrfs_file_extent_offset(leaf, fi);
1108 			extent_end = found_key.offset +
1109 				btrfs_file_extent_num_bytes(leaf, fi);
1110 			if (extent_end <= start) {
1111 				path->slots[0]++;
1112 				goto next_slot;
1113 			}
1114 			if (disk_bytenr == 0)
1115 				goto out_check;
1116 			if (btrfs_file_extent_compression(leaf, fi) ||
1117 			    btrfs_file_extent_encryption(leaf, fi) ||
1118 			    btrfs_file_extent_other_encoding(leaf, fi))
1119 				goto out_check;
1120 			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1121 				goto out_check;
1122 			if (btrfs_extent_readonly(root, disk_bytenr))
1123 				goto out_check;
1124 			if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1125 						  found_key.offset -
1126 						  extent_offset, disk_bytenr))
1127 				goto out_check;
1128 			disk_bytenr += extent_offset;
1129 			disk_bytenr += cur_offset - found_key.offset;
1130 			num_bytes = min(end + 1, extent_end) - cur_offset;
1131 			/*
1132 			 * force cow if csum exists in the range.
1133 			 * this ensure that csum for a given extent are
1134 			 * either valid or do not exist.
1135 			 */
1136 			if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1137 				goto out_check;
1138 			nocow = 1;
1139 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1140 			extent_end = found_key.offset +
1141 				btrfs_file_extent_inline_len(leaf, fi);
1142 			extent_end = ALIGN(extent_end, root->sectorsize);
1143 		} else {
1144 			BUG_ON(1);
1145 		}
1146 out_check:
1147 		if (extent_end <= start) {
1148 			path->slots[0]++;
1149 			goto next_slot;
1150 		}
1151 		if (!nocow) {
1152 			if (cow_start == (u64)-1)
1153 				cow_start = cur_offset;
1154 			cur_offset = extent_end;
1155 			if (cur_offset > end)
1156 				break;
1157 			path->slots[0]++;
1158 			goto next_slot;
1159 		}
1160 
1161 		btrfs_release_path(root, path);
1162 		if (cow_start != (u64)-1) {
1163 			ret = cow_file_range(inode, locked_page, cow_start,
1164 					found_key.offset - 1, page_started,
1165 					nr_written, 1);
1166 			BUG_ON(ret);
1167 			cow_start = (u64)-1;
1168 		}
1169 
1170 		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1171 			struct extent_map *em;
1172 			struct extent_map_tree *em_tree;
1173 			em_tree = &BTRFS_I(inode)->extent_tree;
1174 			em = alloc_extent_map(GFP_NOFS);
1175 			BUG_ON(!em);
1176 			em->start = cur_offset;
1177 			em->orig_start = em->start;
1178 			em->len = num_bytes;
1179 			em->block_len = num_bytes;
1180 			em->block_start = disk_bytenr;
1181 			em->bdev = root->fs_info->fs_devices->latest_bdev;
1182 			set_bit(EXTENT_FLAG_PINNED, &em->flags);
1183 			while (1) {
1184 				write_lock(&em_tree->lock);
1185 				ret = add_extent_mapping(em_tree, em);
1186 				write_unlock(&em_tree->lock);
1187 				if (ret != -EEXIST) {
1188 					free_extent_map(em);
1189 					break;
1190 				}
1191 				btrfs_drop_extent_cache(inode, em->start,
1192 						em->start + em->len - 1, 0);
1193 			}
1194 			type = BTRFS_ORDERED_PREALLOC;
1195 		} else {
1196 			type = BTRFS_ORDERED_NOCOW;
1197 		}
1198 
1199 		ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1200 					       num_bytes, num_bytes, type);
1201 		BUG_ON(ret);
1202 
1203 		if (root->root_key.objectid ==
1204 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
1205 			ret = btrfs_reloc_clone_csums(inode, cur_offset,
1206 						      num_bytes);
1207 			BUG_ON(ret);
1208 		}
1209 
1210 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1211 				cur_offset, cur_offset + num_bytes - 1,
1212 				locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1213 				EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1214 				EXTENT_SET_PRIVATE2);
1215 		cur_offset = extent_end;
1216 		if (cur_offset > end)
1217 			break;
1218 	}
1219 	btrfs_release_path(root, path);
1220 
1221 	if (cur_offset <= end && cow_start == (u64)-1)
1222 		cow_start = cur_offset;
1223 	if (cow_start != (u64)-1) {
1224 		ret = cow_file_range(inode, locked_page, cow_start, end,
1225 				     page_started, nr_written, 1);
1226 		BUG_ON(ret);
1227 	}
1228 
1229 	if (nolock) {
1230 		ret = btrfs_end_transaction_nolock(trans, root);
1231 		BUG_ON(ret);
1232 	} else {
1233 		ret = btrfs_end_transaction(trans, root);
1234 		BUG_ON(ret);
1235 	}
1236 	btrfs_free_path(path);
1237 	return 0;
1238 }
1239 
1240 /*
1241  * extent_io.c call back to do delayed allocation processing
1242  */
1243 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1244 			      u64 start, u64 end, int *page_started,
1245 			      unsigned long *nr_written)
1246 {
1247 	int ret;
1248 	struct btrfs_root *root = BTRFS_I(inode)->root;
1249 
1250 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
1251 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1252 					 page_started, 1, nr_written);
1253 	else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
1254 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1255 					 page_started, 0, nr_written);
1256 	else if (!btrfs_test_opt(root, COMPRESS) &&
1257 		 !(BTRFS_I(inode)->force_compress))
1258 		ret = cow_file_range(inode, locked_page, start, end,
1259 				      page_started, nr_written, 1);
1260 	else
1261 		ret = cow_file_range_async(inode, locked_page, start, end,
1262 					   page_started, nr_written);
1263 	return ret;
1264 }
1265 
1266 static int btrfs_split_extent_hook(struct inode *inode,
1267 				   struct extent_state *orig, u64 split)
1268 {
1269 	/* not delalloc, ignore it */
1270 	if (!(orig->state & EXTENT_DELALLOC))
1271 		return 0;
1272 
1273 	atomic_inc(&BTRFS_I(inode)->outstanding_extents);
1274 	return 0;
1275 }
1276 
1277 /*
1278  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1279  * extents so we can keep track of new extents that are just merged onto old
1280  * extents, such as when we are doing sequential writes, so we can properly
1281  * account for the metadata space we'll need.
1282  */
1283 static int btrfs_merge_extent_hook(struct inode *inode,
1284 				   struct extent_state *new,
1285 				   struct extent_state *other)
1286 {
1287 	/* not delalloc, ignore it */
1288 	if (!(other->state & EXTENT_DELALLOC))
1289 		return 0;
1290 
1291 	atomic_dec(&BTRFS_I(inode)->outstanding_extents);
1292 	return 0;
1293 }
1294 
1295 /*
1296  * extent_io.c set_bit_hook, used to track delayed allocation
1297  * bytes in this file, and to maintain the list of inodes that
1298  * have pending delalloc work to be done.
1299  */
1300 static int btrfs_set_bit_hook(struct inode *inode,
1301 			      struct extent_state *state, int *bits)
1302 {
1303 
1304 	/*
1305 	 * set_bit and clear bit hooks normally require _irqsave/restore
1306 	 * but in this case, we are only testeing for the DELALLOC
1307 	 * bit, which is only set or cleared with irqs on
1308 	 */
1309 	if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1310 		struct btrfs_root *root = BTRFS_I(inode)->root;
1311 		u64 len = state->end + 1 - state->start;
1312 		int do_list = (root->root_key.objectid !=
1313 			       BTRFS_ROOT_TREE_OBJECTID);
1314 
1315 		if (*bits & EXTENT_FIRST_DELALLOC)
1316 			*bits &= ~EXTENT_FIRST_DELALLOC;
1317 		else
1318 			atomic_inc(&BTRFS_I(inode)->outstanding_extents);
1319 
1320 		spin_lock(&root->fs_info->delalloc_lock);
1321 		BTRFS_I(inode)->delalloc_bytes += len;
1322 		root->fs_info->delalloc_bytes += len;
1323 		if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1324 			list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1325 				      &root->fs_info->delalloc_inodes);
1326 		}
1327 		spin_unlock(&root->fs_info->delalloc_lock);
1328 	}
1329 	return 0;
1330 }
1331 
1332 /*
1333  * extent_io.c clear_bit_hook, see set_bit_hook for why
1334  */
1335 static int btrfs_clear_bit_hook(struct inode *inode,
1336 				struct extent_state *state, int *bits)
1337 {
1338 	/*
1339 	 * set_bit and clear bit hooks normally require _irqsave/restore
1340 	 * but in this case, we are only testeing for the DELALLOC
1341 	 * bit, which is only set or cleared with irqs on
1342 	 */
1343 	if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1344 		struct btrfs_root *root = BTRFS_I(inode)->root;
1345 		u64 len = state->end + 1 - state->start;
1346 		int do_list = (root->root_key.objectid !=
1347 			       BTRFS_ROOT_TREE_OBJECTID);
1348 
1349 		if (*bits & EXTENT_FIRST_DELALLOC)
1350 			*bits &= ~EXTENT_FIRST_DELALLOC;
1351 		else if (!(*bits & EXTENT_DO_ACCOUNTING))
1352 			atomic_dec(&BTRFS_I(inode)->outstanding_extents);
1353 
1354 		if (*bits & EXTENT_DO_ACCOUNTING)
1355 			btrfs_delalloc_release_metadata(inode, len);
1356 
1357 		if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1358 		    && do_list)
1359 			btrfs_free_reserved_data_space(inode, len);
1360 
1361 		spin_lock(&root->fs_info->delalloc_lock);
1362 		root->fs_info->delalloc_bytes -= len;
1363 		BTRFS_I(inode)->delalloc_bytes -= len;
1364 
1365 		if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1366 		    !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1367 			list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1368 		}
1369 		spin_unlock(&root->fs_info->delalloc_lock);
1370 	}
1371 	return 0;
1372 }
1373 
1374 /*
1375  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1376  * we don't create bios that span stripes or chunks
1377  */
1378 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1379 			 size_t size, struct bio *bio,
1380 			 unsigned long bio_flags)
1381 {
1382 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1383 	struct btrfs_mapping_tree *map_tree;
1384 	u64 logical = (u64)bio->bi_sector << 9;
1385 	u64 length = 0;
1386 	u64 map_length;
1387 	int ret;
1388 
1389 	if (bio_flags & EXTENT_BIO_COMPRESSED)
1390 		return 0;
1391 
1392 	length = bio->bi_size;
1393 	map_tree = &root->fs_info->mapping_tree;
1394 	map_length = length;
1395 	ret = btrfs_map_block(map_tree, READ, logical,
1396 			      &map_length, NULL, 0);
1397 
1398 	if (map_length < length + size)
1399 		return 1;
1400 	return ret;
1401 }
1402 
1403 /*
1404  * in order to insert checksums into the metadata in large chunks,
1405  * we wait until bio submission time.   All the pages in the bio are
1406  * checksummed and sums are attached onto the ordered extent record.
1407  *
1408  * At IO completion time the cums attached on the ordered extent record
1409  * are inserted into the btree
1410  */
1411 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1412 				    struct bio *bio, int mirror_num,
1413 				    unsigned long bio_flags,
1414 				    u64 bio_offset)
1415 {
1416 	struct btrfs_root *root = BTRFS_I(inode)->root;
1417 	int ret = 0;
1418 
1419 	ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1420 	BUG_ON(ret);
1421 	return 0;
1422 }
1423 
1424 /*
1425  * in order to insert checksums into the metadata in large chunks,
1426  * we wait until bio submission time.   All the pages in the bio are
1427  * checksummed and sums are attached onto the ordered extent record.
1428  *
1429  * At IO completion time the cums attached on the ordered extent record
1430  * are inserted into the btree
1431  */
1432 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1433 			  int mirror_num, unsigned long bio_flags,
1434 			  u64 bio_offset)
1435 {
1436 	struct btrfs_root *root = BTRFS_I(inode)->root;
1437 	return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1438 }
1439 
1440 /*
1441  * extent_io.c submission hook. This does the right thing for csum calculation
1442  * on write, or reading the csums from the tree before a read
1443  */
1444 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1445 			  int mirror_num, unsigned long bio_flags,
1446 			  u64 bio_offset)
1447 {
1448 	struct btrfs_root *root = BTRFS_I(inode)->root;
1449 	int ret = 0;
1450 	int skip_sum;
1451 
1452 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1453 
1454 	if (root == root->fs_info->tree_root)
1455 		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2);
1456 	else
1457 		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1458 	BUG_ON(ret);
1459 
1460 	if (!(rw & REQ_WRITE)) {
1461 		if (bio_flags & EXTENT_BIO_COMPRESSED) {
1462 			return btrfs_submit_compressed_read(inode, bio,
1463 						    mirror_num, bio_flags);
1464 		} else if (!skip_sum)
1465 			btrfs_lookup_bio_sums(root, inode, bio, NULL);
1466 		goto mapit;
1467 	} else if (!skip_sum) {
1468 		/* csum items have already been cloned */
1469 		if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1470 			goto mapit;
1471 		/* we're doing a write, do the async checksumming */
1472 		return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1473 				   inode, rw, bio, mirror_num,
1474 				   bio_flags, bio_offset,
1475 				   __btrfs_submit_bio_start,
1476 				   __btrfs_submit_bio_done);
1477 	}
1478 
1479 mapit:
1480 	return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1481 }
1482 
1483 /*
1484  * given a list of ordered sums record them in the inode.  This happens
1485  * at IO completion time based on sums calculated at bio submission time.
1486  */
1487 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1488 			     struct inode *inode, u64 file_offset,
1489 			     struct list_head *list)
1490 {
1491 	struct btrfs_ordered_sum *sum;
1492 
1493 	btrfs_set_trans_block_group(trans, inode);
1494 
1495 	list_for_each_entry(sum, list, list) {
1496 		btrfs_csum_file_blocks(trans,
1497 		       BTRFS_I(inode)->root->fs_info->csum_root, sum);
1498 	}
1499 	return 0;
1500 }
1501 
1502 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1503 			      struct extent_state **cached_state)
1504 {
1505 	if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1506 		WARN_ON(1);
1507 	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1508 				   cached_state, GFP_NOFS);
1509 }
1510 
1511 /* see btrfs_writepage_start_hook for details on why this is required */
1512 struct btrfs_writepage_fixup {
1513 	struct page *page;
1514 	struct btrfs_work work;
1515 };
1516 
1517 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1518 {
1519 	struct btrfs_writepage_fixup *fixup;
1520 	struct btrfs_ordered_extent *ordered;
1521 	struct extent_state *cached_state = NULL;
1522 	struct page *page;
1523 	struct inode *inode;
1524 	u64 page_start;
1525 	u64 page_end;
1526 
1527 	fixup = container_of(work, struct btrfs_writepage_fixup, work);
1528 	page = fixup->page;
1529 again:
1530 	lock_page(page);
1531 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1532 		ClearPageChecked(page);
1533 		goto out_page;
1534 	}
1535 
1536 	inode = page->mapping->host;
1537 	page_start = page_offset(page);
1538 	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1539 
1540 	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1541 			 &cached_state, GFP_NOFS);
1542 
1543 	/* already ordered? We're done */
1544 	if (PagePrivate2(page))
1545 		goto out;
1546 
1547 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
1548 	if (ordered) {
1549 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1550 				     page_end, &cached_state, GFP_NOFS);
1551 		unlock_page(page);
1552 		btrfs_start_ordered_extent(inode, ordered, 1);
1553 		goto again;
1554 	}
1555 
1556 	BUG();
1557 	btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1558 	ClearPageChecked(page);
1559 out:
1560 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1561 			     &cached_state, GFP_NOFS);
1562 out_page:
1563 	unlock_page(page);
1564 	page_cache_release(page);
1565 	kfree(fixup);
1566 }
1567 
1568 /*
1569  * There are a few paths in the higher layers of the kernel that directly
1570  * set the page dirty bit without asking the filesystem if it is a
1571  * good idea.  This causes problems because we want to make sure COW
1572  * properly happens and the data=ordered rules are followed.
1573  *
1574  * In our case any range that doesn't have the ORDERED bit set
1575  * hasn't been properly setup for IO.  We kick off an async process
1576  * to fix it up.  The async helper will wait for ordered extents, set
1577  * the delalloc bit and make it safe to write the page.
1578  */
1579 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1580 {
1581 	struct inode *inode = page->mapping->host;
1582 	struct btrfs_writepage_fixup *fixup;
1583 	struct btrfs_root *root = BTRFS_I(inode)->root;
1584 
1585 	/* this page is properly in the ordered list */
1586 	if (TestClearPagePrivate2(page))
1587 		return 0;
1588 
1589 	if (PageChecked(page))
1590 		return -EAGAIN;
1591 
1592 	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1593 	if (!fixup)
1594 		return -EAGAIN;
1595 
1596 	SetPageChecked(page);
1597 	page_cache_get(page);
1598 	fixup->work.func = btrfs_writepage_fixup_worker;
1599 	fixup->page = page;
1600 	btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1601 	return -EAGAIN;
1602 }
1603 
1604 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1605 				       struct inode *inode, u64 file_pos,
1606 				       u64 disk_bytenr, u64 disk_num_bytes,
1607 				       u64 num_bytes, u64 ram_bytes,
1608 				       u8 compression, u8 encryption,
1609 				       u16 other_encoding, int extent_type)
1610 {
1611 	struct btrfs_root *root = BTRFS_I(inode)->root;
1612 	struct btrfs_file_extent_item *fi;
1613 	struct btrfs_path *path;
1614 	struct extent_buffer *leaf;
1615 	struct btrfs_key ins;
1616 	u64 hint;
1617 	int ret;
1618 
1619 	path = btrfs_alloc_path();
1620 	BUG_ON(!path);
1621 
1622 	path->leave_spinning = 1;
1623 
1624 	/*
1625 	 * we may be replacing one extent in the tree with another.
1626 	 * The new extent is pinned in the extent map, and we don't want
1627 	 * to drop it from the cache until it is completely in the btree.
1628 	 *
1629 	 * So, tell btrfs_drop_extents to leave this extent in the cache.
1630 	 * the caller is expected to unpin it and allow it to be merged
1631 	 * with the others.
1632 	 */
1633 	ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
1634 				 &hint, 0);
1635 	BUG_ON(ret);
1636 
1637 	ins.objectid = inode->i_ino;
1638 	ins.offset = file_pos;
1639 	ins.type = BTRFS_EXTENT_DATA_KEY;
1640 	ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1641 	BUG_ON(ret);
1642 	leaf = path->nodes[0];
1643 	fi = btrfs_item_ptr(leaf, path->slots[0],
1644 			    struct btrfs_file_extent_item);
1645 	btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1646 	btrfs_set_file_extent_type(leaf, fi, extent_type);
1647 	btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1648 	btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1649 	btrfs_set_file_extent_offset(leaf, fi, 0);
1650 	btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1651 	btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1652 	btrfs_set_file_extent_compression(leaf, fi, compression);
1653 	btrfs_set_file_extent_encryption(leaf, fi, encryption);
1654 	btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1655 
1656 	btrfs_unlock_up_safe(path, 1);
1657 	btrfs_set_lock_blocking(leaf);
1658 
1659 	btrfs_mark_buffer_dirty(leaf);
1660 
1661 	inode_add_bytes(inode, num_bytes);
1662 
1663 	ins.objectid = disk_bytenr;
1664 	ins.offset = disk_num_bytes;
1665 	ins.type = BTRFS_EXTENT_ITEM_KEY;
1666 	ret = btrfs_alloc_reserved_file_extent(trans, root,
1667 					root->root_key.objectid,
1668 					inode->i_ino, file_pos, &ins);
1669 	BUG_ON(ret);
1670 	btrfs_free_path(path);
1671 
1672 	return 0;
1673 }
1674 
1675 /*
1676  * helper function for btrfs_finish_ordered_io, this
1677  * just reads in some of the csum leaves to prime them into ram
1678  * before we start the transaction.  It limits the amount of btree
1679  * reads required while inside the transaction.
1680  */
1681 /* as ordered data IO finishes, this gets called so we can finish
1682  * an ordered extent if the range of bytes in the file it covers are
1683  * fully written.
1684  */
1685 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1686 {
1687 	struct btrfs_root *root = BTRFS_I(inode)->root;
1688 	struct btrfs_trans_handle *trans = NULL;
1689 	struct btrfs_ordered_extent *ordered_extent = NULL;
1690 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1691 	struct extent_state *cached_state = NULL;
1692 	int compress_type = 0;
1693 	int ret;
1694 	bool nolock = false;
1695 
1696 	ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
1697 					     end - start + 1);
1698 	if (!ret)
1699 		return 0;
1700 	BUG_ON(!ordered_extent);
1701 
1702 	nolock = (root == root->fs_info->tree_root);
1703 
1704 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1705 		BUG_ON(!list_empty(&ordered_extent->list));
1706 		ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1707 		if (!ret) {
1708 			if (nolock)
1709 				trans = btrfs_join_transaction_nolock(root, 1);
1710 			else
1711 				trans = btrfs_join_transaction(root, 1);
1712 			BUG_ON(IS_ERR(trans));
1713 			btrfs_set_trans_block_group(trans, inode);
1714 			trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1715 			ret = btrfs_update_inode(trans, root, inode);
1716 			BUG_ON(ret);
1717 		}
1718 		goto out;
1719 	}
1720 
1721 	lock_extent_bits(io_tree, ordered_extent->file_offset,
1722 			 ordered_extent->file_offset + ordered_extent->len - 1,
1723 			 0, &cached_state, GFP_NOFS);
1724 
1725 	if (nolock)
1726 		trans = btrfs_join_transaction_nolock(root, 1);
1727 	else
1728 		trans = btrfs_join_transaction(root, 1);
1729 	BUG_ON(IS_ERR(trans));
1730 	btrfs_set_trans_block_group(trans, inode);
1731 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1732 
1733 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1734 		compress_type = ordered_extent->compress_type;
1735 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1736 		BUG_ON(compress_type);
1737 		ret = btrfs_mark_extent_written(trans, inode,
1738 						ordered_extent->file_offset,
1739 						ordered_extent->file_offset +
1740 						ordered_extent->len);
1741 		BUG_ON(ret);
1742 	} else {
1743 		BUG_ON(root == root->fs_info->tree_root);
1744 		ret = insert_reserved_file_extent(trans, inode,
1745 						ordered_extent->file_offset,
1746 						ordered_extent->start,
1747 						ordered_extent->disk_len,
1748 						ordered_extent->len,
1749 						ordered_extent->len,
1750 						compress_type, 0, 0,
1751 						BTRFS_FILE_EXTENT_REG);
1752 		unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1753 				   ordered_extent->file_offset,
1754 				   ordered_extent->len);
1755 		BUG_ON(ret);
1756 	}
1757 	unlock_extent_cached(io_tree, ordered_extent->file_offset,
1758 			     ordered_extent->file_offset +
1759 			     ordered_extent->len - 1, &cached_state, GFP_NOFS);
1760 
1761 	add_pending_csums(trans, inode, ordered_extent->file_offset,
1762 			  &ordered_extent->list);
1763 
1764 	btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1765 	ret = btrfs_update_inode(trans, root, inode);
1766 	BUG_ON(ret);
1767 out:
1768 	if (nolock) {
1769 		if (trans)
1770 			btrfs_end_transaction_nolock(trans, root);
1771 	} else {
1772 		btrfs_delalloc_release_metadata(inode, ordered_extent->len);
1773 		if (trans)
1774 			btrfs_end_transaction(trans, root);
1775 	}
1776 
1777 	/* once for us */
1778 	btrfs_put_ordered_extent(ordered_extent);
1779 	/* once for the tree */
1780 	btrfs_put_ordered_extent(ordered_extent);
1781 
1782 	return 0;
1783 }
1784 
1785 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1786 				struct extent_state *state, int uptodate)
1787 {
1788 	ClearPagePrivate2(page);
1789 	return btrfs_finish_ordered_io(page->mapping->host, start, end);
1790 }
1791 
1792 /*
1793  * When IO fails, either with EIO or csum verification fails, we
1794  * try other mirrors that might have a good copy of the data.  This
1795  * io_failure_record is used to record state as we go through all the
1796  * mirrors.  If another mirror has good data, the page is set up to date
1797  * and things continue.  If a good mirror can't be found, the original
1798  * bio end_io callback is called to indicate things have failed.
1799  */
1800 struct io_failure_record {
1801 	struct page *page;
1802 	u64 start;
1803 	u64 len;
1804 	u64 logical;
1805 	unsigned long bio_flags;
1806 	int last_mirror;
1807 };
1808 
1809 static int btrfs_io_failed_hook(struct bio *failed_bio,
1810 			 struct page *page, u64 start, u64 end,
1811 			 struct extent_state *state)
1812 {
1813 	struct io_failure_record *failrec = NULL;
1814 	u64 private;
1815 	struct extent_map *em;
1816 	struct inode *inode = page->mapping->host;
1817 	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1818 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1819 	struct bio *bio;
1820 	int num_copies;
1821 	int ret;
1822 	int rw;
1823 	u64 logical;
1824 
1825 	ret = get_state_private(failure_tree, start, &private);
1826 	if (ret) {
1827 		failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1828 		if (!failrec)
1829 			return -ENOMEM;
1830 		failrec->start = start;
1831 		failrec->len = end - start + 1;
1832 		failrec->last_mirror = 0;
1833 		failrec->bio_flags = 0;
1834 
1835 		read_lock(&em_tree->lock);
1836 		em = lookup_extent_mapping(em_tree, start, failrec->len);
1837 		if (em->start > start || em->start + em->len < start) {
1838 			free_extent_map(em);
1839 			em = NULL;
1840 		}
1841 		read_unlock(&em_tree->lock);
1842 
1843 		if (!em || IS_ERR(em)) {
1844 			kfree(failrec);
1845 			return -EIO;
1846 		}
1847 		logical = start - em->start;
1848 		logical = em->block_start + logical;
1849 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1850 			logical = em->block_start;
1851 			failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1852 			extent_set_compress_type(&failrec->bio_flags,
1853 						 em->compress_type);
1854 		}
1855 		failrec->logical = logical;
1856 		free_extent_map(em);
1857 		set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1858 				EXTENT_DIRTY, GFP_NOFS);
1859 		set_state_private(failure_tree, start,
1860 				 (u64)(unsigned long)failrec);
1861 	} else {
1862 		failrec = (struct io_failure_record *)(unsigned long)private;
1863 	}
1864 	num_copies = btrfs_num_copies(
1865 			      &BTRFS_I(inode)->root->fs_info->mapping_tree,
1866 			      failrec->logical, failrec->len);
1867 	failrec->last_mirror++;
1868 	if (!state) {
1869 		spin_lock(&BTRFS_I(inode)->io_tree.lock);
1870 		state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1871 						    failrec->start,
1872 						    EXTENT_LOCKED);
1873 		if (state && state->start != failrec->start)
1874 			state = NULL;
1875 		spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1876 	}
1877 	if (!state || failrec->last_mirror > num_copies) {
1878 		set_state_private(failure_tree, failrec->start, 0);
1879 		clear_extent_bits(failure_tree, failrec->start,
1880 				  failrec->start + failrec->len - 1,
1881 				  EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1882 		kfree(failrec);
1883 		return -EIO;
1884 	}
1885 	bio = bio_alloc(GFP_NOFS, 1);
1886 	bio->bi_private = state;
1887 	bio->bi_end_io = failed_bio->bi_end_io;
1888 	bio->bi_sector = failrec->logical >> 9;
1889 	bio->bi_bdev = failed_bio->bi_bdev;
1890 	bio->bi_size = 0;
1891 
1892 	bio_add_page(bio, page, failrec->len, start - page_offset(page));
1893 	if (failed_bio->bi_rw & REQ_WRITE)
1894 		rw = WRITE;
1895 	else
1896 		rw = READ;
1897 
1898 	BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1899 						      failrec->last_mirror,
1900 						      failrec->bio_flags, 0);
1901 	return 0;
1902 }
1903 
1904 /*
1905  * each time an IO finishes, we do a fast check in the IO failure tree
1906  * to see if we need to process or clean up an io_failure_record
1907  */
1908 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1909 {
1910 	u64 private;
1911 	u64 private_failure;
1912 	struct io_failure_record *failure;
1913 	int ret;
1914 
1915 	private = 0;
1916 	if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1917 			     (u64)-1, 1, EXTENT_DIRTY, 0)) {
1918 		ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1919 					start, &private_failure);
1920 		if (ret == 0) {
1921 			failure = (struct io_failure_record *)(unsigned long)
1922 				   private_failure;
1923 			set_state_private(&BTRFS_I(inode)->io_failure_tree,
1924 					  failure->start, 0);
1925 			clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1926 					  failure->start,
1927 					  failure->start + failure->len - 1,
1928 					  EXTENT_DIRTY | EXTENT_LOCKED,
1929 					  GFP_NOFS);
1930 			kfree(failure);
1931 		}
1932 	}
1933 	return 0;
1934 }
1935 
1936 /*
1937  * when reads are done, we need to check csums to verify the data is correct
1938  * if there's a match, we allow the bio to finish.  If not, we go through
1939  * the io_failure_record routines to find good copies
1940  */
1941 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1942 			       struct extent_state *state)
1943 {
1944 	size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1945 	struct inode *inode = page->mapping->host;
1946 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1947 	char *kaddr;
1948 	u64 private = ~(u32)0;
1949 	int ret;
1950 	struct btrfs_root *root = BTRFS_I(inode)->root;
1951 	u32 csum = ~(u32)0;
1952 
1953 	if (PageChecked(page)) {
1954 		ClearPageChecked(page);
1955 		goto good;
1956 	}
1957 
1958 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1959 		return 0;
1960 
1961 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1962 	    test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
1963 		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1964 				  GFP_NOFS);
1965 		return 0;
1966 	}
1967 
1968 	if (state && state->start == start) {
1969 		private = state->private;
1970 		ret = 0;
1971 	} else {
1972 		ret = get_state_private(io_tree, start, &private);
1973 	}
1974 	kaddr = kmap_atomic(page, KM_USER0);
1975 	if (ret)
1976 		goto zeroit;
1977 
1978 	csum = btrfs_csum_data(root, kaddr + offset, csum,  end - start + 1);
1979 	btrfs_csum_final(csum, (char *)&csum);
1980 	if (csum != private)
1981 		goto zeroit;
1982 
1983 	kunmap_atomic(kaddr, KM_USER0);
1984 good:
1985 	/* if the io failure tree for this inode is non-empty,
1986 	 * check to see if we've recovered from a failed IO
1987 	 */
1988 	btrfs_clean_io_failures(inode, start);
1989 	return 0;
1990 
1991 zeroit:
1992 	if (printk_ratelimit()) {
1993 		printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1994 		       "private %llu\n", page->mapping->host->i_ino,
1995 		       (unsigned long long)start, csum,
1996 		       (unsigned long long)private);
1997 	}
1998 	memset(kaddr + offset, 1, end - start + 1);
1999 	flush_dcache_page(page);
2000 	kunmap_atomic(kaddr, KM_USER0);
2001 	if (private == 0)
2002 		return 0;
2003 	return -EIO;
2004 }
2005 
2006 struct delayed_iput {
2007 	struct list_head list;
2008 	struct inode *inode;
2009 };
2010 
2011 void btrfs_add_delayed_iput(struct inode *inode)
2012 {
2013 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2014 	struct delayed_iput *delayed;
2015 
2016 	if (atomic_add_unless(&inode->i_count, -1, 1))
2017 		return;
2018 
2019 	delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
2020 	delayed->inode = inode;
2021 
2022 	spin_lock(&fs_info->delayed_iput_lock);
2023 	list_add_tail(&delayed->list, &fs_info->delayed_iputs);
2024 	spin_unlock(&fs_info->delayed_iput_lock);
2025 }
2026 
2027 void btrfs_run_delayed_iputs(struct btrfs_root *root)
2028 {
2029 	LIST_HEAD(list);
2030 	struct btrfs_fs_info *fs_info = root->fs_info;
2031 	struct delayed_iput *delayed;
2032 	int empty;
2033 
2034 	spin_lock(&fs_info->delayed_iput_lock);
2035 	empty = list_empty(&fs_info->delayed_iputs);
2036 	spin_unlock(&fs_info->delayed_iput_lock);
2037 	if (empty)
2038 		return;
2039 
2040 	down_read(&root->fs_info->cleanup_work_sem);
2041 	spin_lock(&fs_info->delayed_iput_lock);
2042 	list_splice_init(&fs_info->delayed_iputs, &list);
2043 	spin_unlock(&fs_info->delayed_iput_lock);
2044 
2045 	while (!list_empty(&list)) {
2046 		delayed = list_entry(list.next, struct delayed_iput, list);
2047 		list_del(&delayed->list);
2048 		iput(delayed->inode);
2049 		kfree(delayed);
2050 	}
2051 	up_read(&root->fs_info->cleanup_work_sem);
2052 }
2053 
2054 /*
2055  * calculate extra metadata reservation when snapshotting a subvolume
2056  * contains orphan files.
2057  */
2058 void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans,
2059 				struct btrfs_pending_snapshot *pending,
2060 				u64 *bytes_to_reserve)
2061 {
2062 	struct btrfs_root *root;
2063 	struct btrfs_block_rsv *block_rsv;
2064 	u64 num_bytes;
2065 	int index;
2066 
2067 	root = pending->root;
2068 	if (!root->orphan_block_rsv || list_empty(&root->orphan_list))
2069 		return;
2070 
2071 	block_rsv = root->orphan_block_rsv;
2072 
2073 	/* orphan block reservation for the snapshot */
2074 	num_bytes = block_rsv->size;
2075 
2076 	/*
2077 	 * after the snapshot is created, COWing tree blocks may use more
2078 	 * space than it frees. So we should make sure there is enough
2079 	 * reserved space.
2080 	 */
2081 	index = trans->transid & 0x1;
2082 	if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) {
2083 		num_bytes += block_rsv->size -
2084 			     (block_rsv->reserved + block_rsv->freed[index]);
2085 	}
2086 
2087 	*bytes_to_reserve += num_bytes;
2088 }
2089 
2090 void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans,
2091 				struct btrfs_pending_snapshot *pending)
2092 {
2093 	struct btrfs_root *root = pending->root;
2094 	struct btrfs_root *snap = pending->snap;
2095 	struct btrfs_block_rsv *block_rsv;
2096 	u64 num_bytes;
2097 	int index;
2098 	int ret;
2099 
2100 	if (!root->orphan_block_rsv || list_empty(&root->orphan_list))
2101 		return;
2102 
2103 	/* refill source subvolume's orphan block reservation */
2104 	block_rsv = root->orphan_block_rsv;
2105 	index = trans->transid & 0x1;
2106 	if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) {
2107 		num_bytes = block_rsv->size -
2108 			    (block_rsv->reserved + block_rsv->freed[index]);
2109 		ret = btrfs_block_rsv_migrate(&pending->block_rsv,
2110 					      root->orphan_block_rsv,
2111 					      num_bytes);
2112 		BUG_ON(ret);
2113 	}
2114 
2115 	/* setup orphan block reservation for the snapshot */
2116 	block_rsv = btrfs_alloc_block_rsv(snap);
2117 	BUG_ON(!block_rsv);
2118 
2119 	btrfs_add_durable_block_rsv(root->fs_info, block_rsv);
2120 	snap->orphan_block_rsv = block_rsv;
2121 
2122 	num_bytes = root->orphan_block_rsv->size;
2123 	ret = btrfs_block_rsv_migrate(&pending->block_rsv,
2124 				      block_rsv, num_bytes);
2125 	BUG_ON(ret);
2126 
2127 #if 0
2128 	/* insert orphan item for the snapshot */
2129 	WARN_ON(!root->orphan_item_inserted);
2130 	ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
2131 				       snap->root_key.objectid);
2132 	BUG_ON(ret);
2133 	snap->orphan_item_inserted = 1;
2134 #endif
2135 }
2136 
2137 enum btrfs_orphan_cleanup_state {
2138 	ORPHAN_CLEANUP_STARTED	= 1,
2139 	ORPHAN_CLEANUP_DONE	= 2,
2140 };
2141 
2142 /*
2143  * This is called in transaction commmit time. If there are no orphan
2144  * files in the subvolume, it removes orphan item and frees block_rsv
2145  * structure.
2146  */
2147 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
2148 			      struct btrfs_root *root)
2149 {
2150 	int ret;
2151 
2152 	if (!list_empty(&root->orphan_list) ||
2153 	    root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
2154 		return;
2155 
2156 	if (root->orphan_item_inserted &&
2157 	    btrfs_root_refs(&root->root_item) > 0) {
2158 		ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
2159 					    root->root_key.objectid);
2160 		BUG_ON(ret);
2161 		root->orphan_item_inserted = 0;
2162 	}
2163 
2164 	if (root->orphan_block_rsv) {
2165 		WARN_ON(root->orphan_block_rsv->size > 0);
2166 		btrfs_free_block_rsv(root, root->orphan_block_rsv);
2167 		root->orphan_block_rsv = NULL;
2168 	}
2169 }
2170 
2171 /*
2172  * This creates an orphan entry for the given inode in case something goes
2173  * wrong in the middle of an unlink/truncate.
2174  *
2175  * NOTE: caller of this function should reserve 5 units of metadata for
2176  *	 this function.
2177  */
2178 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2179 {
2180 	struct btrfs_root *root = BTRFS_I(inode)->root;
2181 	struct btrfs_block_rsv *block_rsv = NULL;
2182 	int reserve = 0;
2183 	int insert = 0;
2184 	int ret;
2185 
2186 	if (!root->orphan_block_rsv) {
2187 		block_rsv = btrfs_alloc_block_rsv(root);
2188 		BUG_ON(!block_rsv);
2189 	}
2190 
2191 	spin_lock(&root->orphan_lock);
2192 	if (!root->orphan_block_rsv) {
2193 		root->orphan_block_rsv = block_rsv;
2194 	} else if (block_rsv) {
2195 		btrfs_free_block_rsv(root, block_rsv);
2196 		block_rsv = NULL;
2197 	}
2198 
2199 	if (list_empty(&BTRFS_I(inode)->i_orphan)) {
2200 		list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2201 #if 0
2202 		/*
2203 		 * For proper ENOSPC handling, we should do orphan
2204 		 * cleanup when mounting. But this introduces backward
2205 		 * compatibility issue.
2206 		 */
2207 		if (!xchg(&root->orphan_item_inserted, 1))
2208 			insert = 2;
2209 		else
2210 			insert = 1;
2211 #endif
2212 		insert = 1;
2213 	} else {
2214 		WARN_ON(!BTRFS_I(inode)->orphan_meta_reserved);
2215 	}
2216 
2217 	if (!BTRFS_I(inode)->orphan_meta_reserved) {
2218 		BTRFS_I(inode)->orphan_meta_reserved = 1;
2219 		reserve = 1;
2220 	}
2221 	spin_unlock(&root->orphan_lock);
2222 
2223 	if (block_rsv)
2224 		btrfs_add_durable_block_rsv(root->fs_info, block_rsv);
2225 
2226 	/* grab metadata reservation from transaction handle */
2227 	if (reserve) {
2228 		ret = btrfs_orphan_reserve_metadata(trans, inode);
2229 		BUG_ON(ret);
2230 	}
2231 
2232 	/* insert an orphan item to track this unlinked/truncated file */
2233 	if (insert >= 1) {
2234 		ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
2235 		BUG_ON(ret);
2236 	}
2237 
2238 	/* insert an orphan item to track subvolume contains orphan files */
2239 	if (insert >= 2) {
2240 		ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
2241 					       root->root_key.objectid);
2242 		BUG_ON(ret);
2243 	}
2244 	return 0;
2245 }
2246 
2247 /*
2248  * We have done the truncate/delete so we can go ahead and remove the orphan
2249  * item for this particular inode.
2250  */
2251 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2252 {
2253 	struct btrfs_root *root = BTRFS_I(inode)->root;
2254 	int delete_item = 0;
2255 	int release_rsv = 0;
2256 	int ret = 0;
2257 
2258 	spin_lock(&root->orphan_lock);
2259 	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
2260 		list_del_init(&BTRFS_I(inode)->i_orphan);
2261 		delete_item = 1;
2262 	}
2263 
2264 	if (BTRFS_I(inode)->orphan_meta_reserved) {
2265 		BTRFS_I(inode)->orphan_meta_reserved = 0;
2266 		release_rsv = 1;
2267 	}
2268 	spin_unlock(&root->orphan_lock);
2269 
2270 	if (trans && delete_item) {
2271 		ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
2272 		BUG_ON(ret);
2273 	}
2274 
2275 	if (release_rsv)
2276 		btrfs_orphan_release_metadata(inode);
2277 
2278 	return 0;
2279 }
2280 
2281 /*
2282  * this cleans up any orphans that may be left on the list from the last use
2283  * of this root.
2284  */
2285 void btrfs_orphan_cleanup(struct btrfs_root *root)
2286 {
2287 	struct btrfs_path *path;
2288 	struct extent_buffer *leaf;
2289 	struct btrfs_key key, found_key;
2290 	struct btrfs_trans_handle *trans;
2291 	struct inode *inode;
2292 	int ret = 0, nr_unlink = 0, nr_truncate = 0;
2293 
2294 	if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
2295 		return;
2296 
2297 	path = btrfs_alloc_path();
2298 	BUG_ON(!path);
2299 	path->reada = -1;
2300 
2301 	key.objectid = BTRFS_ORPHAN_OBJECTID;
2302 	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
2303 	key.offset = (u64)-1;
2304 
2305 	while (1) {
2306 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2307 		if (ret < 0) {
2308 			printk(KERN_ERR "Error searching slot for orphan: %d"
2309 			       "\n", ret);
2310 			break;
2311 		}
2312 
2313 		/*
2314 		 * if ret == 0 means we found what we were searching for, which
2315 		 * is weird, but possible, so only screw with path if we didnt
2316 		 * find the key and see if we have stuff that matches
2317 		 */
2318 		if (ret > 0) {
2319 			if (path->slots[0] == 0)
2320 				break;
2321 			path->slots[0]--;
2322 		}
2323 
2324 		/* pull out the item */
2325 		leaf = path->nodes[0];
2326 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2327 
2328 		/* make sure the item matches what we want */
2329 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
2330 			break;
2331 		if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
2332 			break;
2333 
2334 		/* release the path since we're done with it */
2335 		btrfs_release_path(root, path);
2336 
2337 		/*
2338 		 * this is where we are basically btrfs_lookup, without the
2339 		 * crossing root thing.  we store the inode number in the
2340 		 * offset of the orphan item.
2341 		 */
2342 		found_key.objectid = found_key.offset;
2343 		found_key.type = BTRFS_INODE_ITEM_KEY;
2344 		found_key.offset = 0;
2345 		inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
2346 		BUG_ON(IS_ERR(inode));
2347 
2348 		/*
2349 		 * add this inode to the orphan list so btrfs_orphan_del does
2350 		 * the proper thing when we hit it
2351 		 */
2352 		spin_lock(&root->orphan_lock);
2353 		list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2354 		spin_unlock(&root->orphan_lock);
2355 
2356 		/*
2357 		 * if this is a bad inode, means we actually succeeded in
2358 		 * removing the inode, but not the orphan record, which means
2359 		 * we need to manually delete the orphan since iput will just
2360 		 * do a destroy_inode
2361 		 */
2362 		if (is_bad_inode(inode)) {
2363 			trans = btrfs_start_transaction(root, 0);
2364 			BUG_ON(IS_ERR(trans));
2365 			btrfs_orphan_del(trans, inode);
2366 			btrfs_end_transaction(trans, root);
2367 			iput(inode);
2368 			continue;
2369 		}
2370 
2371 		/* if we have links, this was a truncate, lets do that */
2372 		if (inode->i_nlink) {
2373 			nr_truncate++;
2374 			btrfs_truncate(inode);
2375 		} else {
2376 			nr_unlink++;
2377 		}
2378 
2379 		/* this will do delete_inode and everything for us */
2380 		iput(inode);
2381 	}
2382 	btrfs_free_path(path);
2383 
2384 	root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
2385 
2386 	if (root->orphan_block_rsv)
2387 		btrfs_block_rsv_release(root, root->orphan_block_rsv,
2388 					(u64)-1);
2389 
2390 	if (root->orphan_block_rsv || root->orphan_item_inserted) {
2391 		trans = btrfs_join_transaction(root, 1);
2392 		BUG_ON(IS_ERR(trans));
2393 		btrfs_end_transaction(trans, root);
2394 	}
2395 
2396 	if (nr_unlink)
2397 		printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2398 	if (nr_truncate)
2399 		printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2400 }
2401 
2402 /*
2403  * very simple check to peek ahead in the leaf looking for xattrs.  If we
2404  * don't find any xattrs, we know there can't be any acls.
2405  *
2406  * slot is the slot the inode is in, objectid is the objectid of the inode
2407  */
2408 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2409 					  int slot, u64 objectid)
2410 {
2411 	u32 nritems = btrfs_header_nritems(leaf);
2412 	struct btrfs_key found_key;
2413 	int scanned = 0;
2414 
2415 	slot++;
2416 	while (slot < nritems) {
2417 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
2418 
2419 		/* we found a different objectid, there must not be acls */
2420 		if (found_key.objectid != objectid)
2421 			return 0;
2422 
2423 		/* we found an xattr, assume we've got an acl */
2424 		if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2425 			return 1;
2426 
2427 		/*
2428 		 * we found a key greater than an xattr key, there can't
2429 		 * be any acls later on
2430 		 */
2431 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2432 			return 0;
2433 
2434 		slot++;
2435 		scanned++;
2436 
2437 		/*
2438 		 * it goes inode, inode backrefs, xattrs, extents,
2439 		 * so if there are a ton of hard links to an inode there can
2440 		 * be a lot of backrefs.  Don't waste time searching too hard,
2441 		 * this is just an optimization
2442 		 */
2443 		if (scanned >= 8)
2444 			break;
2445 	}
2446 	/* we hit the end of the leaf before we found an xattr or
2447 	 * something larger than an xattr.  We have to assume the inode
2448 	 * has acls
2449 	 */
2450 	return 1;
2451 }
2452 
2453 /*
2454  * read an inode from the btree into the in-memory inode
2455  */
2456 static void btrfs_read_locked_inode(struct inode *inode)
2457 {
2458 	struct btrfs_path *path;
2459 	struct extent_buffer *leaf;
2460 	struct btrfs_inode_item *inode_item;
2461 	struct btrfs_timespec *tspec;
2462 	struct btrfs_root *root = BTRFS_I(inode)->root;
2463 	struct btrfs_key location;
2464 	int maybe_acls;
2465 	u64 alloc_group_block;
2466 	u32 rdev;
2467 	int ret;
2468 
2469 	path = btrfs_alloc_path();
2470 	BUG_ON(!path);
2471 	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2472 
2473 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2474 	if (ret)
2475 		goto make_bad;
2476 
2477 	leaf = path->nodes[0];
2478 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
2479 				    struct btrfs_inode_item);
2480 
2481 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2482 	inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2483 	inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2484 	inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2485 	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2486 
2487 	tspec = btrfs_inode_atime(inode_item);
2488 	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2489 	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2490 
2491 	tspec = btrfs_inode_mtime(inode_item);
2492 	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2493 	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2494 
2495 	tspec = btrfs_inode_ctime(inode_item);
2496 	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2497 	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2498 
2499 	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2500 	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2501 	BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2502 	inode->i_generation = BTRFS_I(inode)->generation;
2503 	inode->i_rdev = 0;
2504 	rdev = btrfs_inode_rdev(leaf, inode_item);
2505 
2506 	BTRFS_I(inode)->index_cnt = (u64)-1;
2507 	BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2508 
2509 	alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2510 
2511 	/*
2512 	 * try to precache a NULL acl entry for files that don't have
2513 	 * any xattrs or acls
2514 	 */
2515 	maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
2516 	if (!maybe_acls)
2517 		cache_no_acl(inode);
2518 
2519 	BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2520 						alloc_group_block, 0);
2521 	btrfs_free_path(path);
2522 	inode_item = NULL;
2523 
2524 	switch (inode->i_mode & S_IFMT) {
2525 	case S_IFREG:
2526 		inode->i_mapping->a_ops = &btrfs_aops;
2527 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2528 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2529 		inode->i_fop = &btrfs_file_operations;
2530 		inode->i_op = &btrfs_file_inode_operations;
2531 		break;
2532 	case S_IFDIR:
2533 		inode->i_fop = &btrfs_dir_file_operations;
2534 		if (root == root->fs_info->tree_root)
2535 			inode->i_op = &btrfs_dir_ro_inode_operations;
2536 		else
2537 			inode->i_op = &btrfs_dir_inode_operations;
2538 		break;
2539 	case S_IFLNK:
2540 		inode->i_op = &btrfs_symlink_inode_operations;
2541 		inode->i_mapping->a_ops = &btrfs_symlink_aops;
2542 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2543 		break;
2544 	default:
2545 		inode->i_op = &btrfs_special_inode_operations;
2546 		init_special_inode(inode, inode->i_mode, rdev);
2547 		break;
2548 	}
2549 
2550 	btrfs_update_iflags(inode);
2551 	return;
2552 
2553 make_bad:
2554 	btrfs_free_path(path);
2555 	make_bad_inode(inode);
2556 }
2557 
2558 /*
2559  * given a leaf and an inode, copy the inode fields into the leaf
2560  */
2561 static void fill_inode_item(struct btrfs_trans_handle *trans,
2562 			    struct extent_buffer *leaf,
2563 			    struct btrfs_inode_item *item,
2564 			    struct inode *inode)
2565 {
2566 	btrfs_set_inode_uid(leaf, item, inode->i_uid);
2567 	btrfs_set_inode_gid(leaf, item, inode->i_gid);
2568 	btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2569 	btrfs_set_inode_mode(leaf, item, inode->i_mode);
2570 	btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2571 
2572 	btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2573 			       inode->i_atime.tv_sec);
2574 	btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2575 				inode->i_atime.tv_nsec);
2576 
2577 	btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2578 			       inode->i_mtime.tv_sec);
2579 	btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2580 				inode->i_mtime.tv_nsec);
2581 
2582 	btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2583 			       inode->i_ctime.tv_sec);
2584 	btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2585 				inode->i_ctime.tv_nsec);
2586 
2587 	btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2588 	btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2589 	btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2590 	btrfs_set_inode_transid(leaf, item, trans->transid);
2591 	btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2592 	btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2593 	btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2594 }
2595 
2596 /*
2597  * copy everything in the in-memory inode into the btree.
2598  */
2599 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2600 				struct btrfs_root *root, struct inode *inode)
2601 {
2602 	struct btrfs_inode_item *inode_item;
2603 	struct btrfs_path *path;
2604 	struct extent_buffer *leaf;
2605 	int ret;
2606 
2607 	path = btrfs_alloc_path();
2608 	BUG_ON(!path);
2609 	path->leave_spinning = 1;
2610 	ret = btrfs_lookup_inode(trans, root, path,
2611 				 &BTRFS_I(inode)->location, 1);
2612 	if (ret) {
2613 		if (ret > 0)
2614 			ret = -ENOENT;
2615 		goto failed;
2616 	}
2617 
2618 	btrfs_unlock_up_safe(path, 1);
2619 	leaf = path->nodes[0];
2620 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
2621 				  struct btrfs_inode_item);
2622 
2623 	fill_inode_item(trans, leaf, inode_item, inode);
2624 	btrfs_mark_buffer_dirty(leaf);
2625 	btrfs_set_inode_last_trans(trans, inode);
2626 	ret = 0;
2627 failed:
2628 	btrfs_free_path(path);
2629 	return ret;
2630 }
2631 
2632 
2633 /*
2634  * unlink helper that gets used here in inode.c and in the tree logging
2635  * recovery code.  It remove a link in a directory with a given name, and
2636  * also drops the back refs in the inode to the directory
2637  */
2638 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2639 		       struct btrfs_root *root,
2640 		       struct inode *dir, struct inode *inode,
2641 		       const char *name, int name_len)
2642 {
2643 	struct btrfs_path *path;
2644 	int ret = 0;
2645 	struct extent_buffer *leaf;
2646 	struct btrfs_dir_item *di;
2647 	struct btrfs_key key;
2648 	u64 index;
2649 
2650 	path = btrfs_alloc_path();
2651 	if (!path) {
2652 		ret = -ENOMEM;
2653 		goto out;
2654 	}
2655 
2656 	path->leave_spinning = 1;
2657 	di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2658 				    name, name_len, -1);
2659 	if (IS_ERR(di)) {
2660 		ret = PTR_ERR(di);
2661 		goto err;
2662 	}
2663 	if (!di) {
2664 		ret = -ENOENT;
2665 		goto err;
2666 	}
2667 	leaf = path->nodes[0];
2668 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
2669 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2670 	if (ret)
2671 		goto err;
2672 	btrfs_release_path(root, path);
2673 
2674 	ret = btrfs_del_inode_ref(trans, root, name, name_len,
2675 				  inode->i_ino,
2676 				  dir->i_ino, &index);
2677 	if (ret) {
2678 		printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2679 		       "inode %lu parent %lu\n", name_len, name,
2680 		       inode->i_ino, dir->i_ino);
2681 		goto err;
2682 	}
2683 
2684 	di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2685 					 index, name, name_len, -1);
2686 	if (IS_ERR(di)) {
2687 		ret = PTR_ERR(di);
2688 		goto err;
2689 	}
2690 	if (!di) {
2691 		ret = -ENOENT;
2692 		goto err;
2693 	}
2694 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2695 	btrfs_release_path(root, path);
2696 
2697 	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2698 					 inode, dir->i_ino);
2699 	BUG_ON(ret != 0 && ret != -ENOENT);
2700 
2701 	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2702 					   dir, index);
2703 	if (ret == -ENOENT)
2704 		ret = 0;
2705 err:
2706 	btrfs_free_path(path);
2707 	if (ret)
2708 		goto out;
2709 
2710 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2711 	inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2712 	btrfs_update_inode(trans, root, dir);
2713 	btrfs_drop_nlink(inode);
2714 	ret = btrfs_update_inode(trans, root, inode);
2715 out:
2716 	return ret;
2717 }
2718 
2719 /* helper to check if there is any shared block in the path */
2720 static int check_path_shared(struct btrfs_root *root,
2721 			     struct btrfs_path *path)
2722 {
2723 	struct extent_buffer *eb;
2724 	int level;
2725 	u64 refs = 1;
2726 
2727 	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2728 		int ret;
2729 
2730 		if (!path->nodes[level])
2731 			break;
2732 		eb = path->nodes[level];
2733 		if (!btrfs_block_can_be_shared(root, eb))
2734 			continue;
2735 		ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len,
2736 					       &refs, NULL);
2737 		if (refs > 1)
2738 			return 1;
2739 	}
2740 	return 0;
2741 }
2742 
2743 /*
2744  * helper to start transaction for unlink and rmdir.
2745  *
2746  * unlink and rmdir are special in btrfs, they do not always free space.
2747  * so in enospc case, we should make sure they will free space before
2748  * allowing them to use the global metadata reservation.
2749  */
2750 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2751 						       struct dentry *dentry)
2752 {
2753 	struct btrfs_trans_handle *trans;
2754 	struct btrfs_root *root = BTRFS_I(dir)->root;
2755 	struct btrfs_path *path;
2756 	struct btrfs_inode_ref *ref;
2757 	struct btrfs_dir_item *di;
2758 	struct inode *inode = dentry->d_inode;
2759 	u64 index;
2760 	int check_link = 1;
2761 	int err = -ENOSPC;
2762 	int ret;
2763 
2764 	trans = btrfs_start_transaction(root, 10);
2765 	if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
2766 		return trans;
2767 
2768 	if (inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
2769 		return ERR_PTR(-ENOSPC);
2770 
2771 	/* check if there is someone else holds reference */
2772 	if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
2773 		return ERR_PTR(-ENOSPC);
2774 
2775 	if (atomic_read(&inode->i_count) > 2)
2776 		return ERR_PTR(-ENOSPC);
2777 
2778 	if (xchg(&root->fs_info->enospc_unlink, 1))
2779 		return ERR_PTR(-ENOSPC);
2780 
2781 	path = btrfs_alloc_path();
2782 	if (!path) {
2783 		root->fs_info->enospc_unlink = 0;
2784 		return ERR_PTR(-ENOMEM);
2785 	}
2786 
2787 	trans = btrfs_start_transaction(root, 0);
2788 	if (IS_ERR(trans)) {
2789 		btrfs_free_path(path);
2790 		root->fs_info->enospc_unlink = 0;
2791 		return trans;
2792 	}
2793 
2794 	path->skip_locking = 1;
2795 	path->search_commit_root = 1;
2796 
2797 	ret = btrfs_lookup_inode(trans, root, path,
2798 				&BTRFS_I(dir)->location, 0);
2799 	if (ret < 0) {
2800 		err = ret;
2801 		goto out;
2802 	}
2803 	if (ret == 0) {
2804 		if (check_path_shared(root, path))
2805 			goto out;
2806 	} else {
2807 		check_link = 0;
2808 	}
2809 	btrfs_release_path(root, path);
2810 
2811 	ret = btrfs_lookup_inode(trans, root, path,
2812 				&BTRFS_I(inode)->location, 0);
2813 	if (ret < 0) {
2814 		err = ret;
2815 		goto out;
2816 	}
2817 	if (ret == 0) {
2818 		if (check_path_shared(root, path))
2819 			goto out;
2820 	} else {
2821 		check_link = 0;
2822 	}
2823 	btrfs_release_path(root, path);
2824 
2825 	if (ret == 0 && S_ISREG(inode->i_mode)) {
2826 		ret = btrfs_lookup_file_extent(trans, root, path,
2827 					       inode->i_ino, (u64)-1, 0);
2828 		if (ret < 0) {
2829 			err = ret;
2830 			goto out;
2831 		}
2832 		BUG_ON(ret == 0);
2833 		if (check_path_shared(root, path))
2834 			goto out;
2835 		btrfs_release_path(root, path);
2836 	}
2837 
2838 	if (!check_link) {
2839 		err = 0;
2840 		goto out;
2841 	}
2842 
2843 	di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2844 				dentry->d_name.name, dentry->d_name.len, 0);
2845 	if (IS_ERR(di)) {
2846 		err = PTR_ERR(di);
2847 		goto out;
2848 	}
2849 	if (di) {
2850 		if (check_path_shared(root, path))
2851 			goto out;
2852 	} else {
2853 		err = 0;
2854 		goto out;
2855 	}
2856 	btrfs_release_path(root, path);
2857 
2858 	ref = btrfs_lookup_inode_ref(trans, root, path,
2859 				dentry->d_name.name, dentry->d_name.len,
2860 				inode->i_ino, dir->i_ino, 0);
2861 	if (IS_ERR(ref)) {
2862 		err = PTR_ERR(ref);
2863 		goto out;
2864 	}
2865 	BUG_ON(!ref);
2866 	if (check_path_shared(root, path))
2867 		goto out;
2868 	index = btrfs_inode_ref_index(path->nodes[0], ref);
2869 	btrfs_release_path(root, path);
2870 
2871 	di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index,
2872 				dentry->d_name.name, dentry->d_name.len, 0);
2873 	if (IS_ERR(di)) {
2874 		err = PTR_ERR(di);
2875 		goto out;
2876 	}
2877 	BUG_ON(ret == -ENOENT);
2878 	if (check_path_shared(root, path))
2879 		goto out;
2880 
2881 	err = 0;
2882 out:
2883 	btrfs_free_path(path);
2884 	if (err) {
2885 		btrfs_end_transaction(trans, root);
2886 		root->fs_info->enospc_unlink = 0;
2887 		return ERR_PTR(err);
2888 	}
2889 
2890 	trans->block_rsv = &root->fs_info->global_block_rsv;
2891 	return trans;
2892 }
2893 
2894 static void __unlink_end_trans(struct btrfs_trans_handle *trans,
2895 			       struct btrfs_root *root)
2896 {
2897 	if (trans->block_rsv == &root->fs_info->global_block_rsv) {
2898 		BUG_ON(!root->fs_info->enospc_unlink);
2899 		root->fs_info->enospc_unlink = 0;
2900 	}
2901 	btrfs_end_transaction_throttle(trans, root);
2902 }
2903 
2904 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2905 {
2906 	struct btrfs_root *root = BTRFS_I(dir)->root;
2907 	struct btrfs_trans_handle *trans;
2908 	struct inode *inode = dentry->d_inode;
2909 	int ret;
2910 	unsigned long nr = 0;
2911 
2912 	trans = __unlink_start_trans(dir, dentry);
2913 	if (IS_ERR(trans))
2914 		return PTR_ERR(trans);
2915 
2916 	btrfs_set_trans_block_group(trans, dir);
2917 
2918 	btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
2919 
2920 	ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2921 				 dentry->d_name.name, dentry->d_name.len);
2922 	BUG_ON(ret);
2923 
2924 	if (inode->i_nlink == 0) {
2925 		ret = btrfs_orphan_add(trans, inode);
2926 		BUG_ON(ret);
2927 	}
2928 
2929 	nr = trans->blocks_used;
2930 	__unlink_end_trans(trans, root);
2931 	btrfs_btree_balance_dirty(root, nr);
2932 	return ret;
2933 }
2934 
2935 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
2936 			struct btrfs_root *root,
2937 			struct inode *dir, u64 objectid,
2938 			const char *name, int name_len)
2939 {
2940 	struct btrfs_path *path;
2941 	struct extent_buffer *leaf;
2942 	struct btrfs_dir_item *di;
2943 	struct btrfs_key key;
2944 	u64 index;
2945 	int ret;
2946 
2947 	path = btrfs_alloc_path();
2948 	if (!path)
2949 		return -ENOMEM;
2950 
2951 	di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2952 				   name, name_len, -1);
2953 	BUG_ON(!di || IS_ERR(di));
2954 
2955 	leaf = path->nodes[0];
2956 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
2957 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2958 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2959 	BUG_ON(ret);
2960 	btrfs_release_path(root, path);
2961 
2962 	ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
2963 				 objectid, root->root_key.objectid,
2964 				 dir->i_ino, &index, name, name_len);
2965 	if (ret < 0) {
2966 		BUG_ON(ret != -ENOENT);
2967 		di = btrfs_search_dir_index_item(root, path, dir->i_ino,
2968 						 name, name_len);
2969 		BUG_ON(!di || IS_ERR(di));
2970 
2971 		leaf = path->nodes[0];
2972 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2973 		btrfs_release_path(root, path);
2974 		index = key.offset;
2975 	}
2976 
2977 	di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2978 					 index, name, name_len, -1);
2979 	BUG_ON(!di || IS_ERR(di));
2980 
2981 	leaf = path->nodes[0];
2982 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
2983 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2984 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2985 	BUG_ON(ret);
2986 	btrfs_release_path(root, path);
2987 
2988 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2989 	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2990 	ret = btrfs_update_inode(trans, root, dir);
2991 	BUG_ON(ret);
2992 
2993 	btrfs_free_path(path);
2994 	return 0;
2995 }
2996 
2997 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2998 {
2999 	struct inode *inode = dentry->d_inode;
3000 	int err = 0;
3001 	struct btrfs_root *root = BTRFS_I(dir)->root;
3002 	struct btrfs_trans_handle *trans;
3003 	unsigned long nr = 0;
3004 
3005 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
3006 	    inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
3007 		return -ENOTEMPTY;
3008 
3009 	trans = __unlink_start_trans(dir, dentry);
3010 	if (IS_ERR(trans))
3011 		return PTR_ERR(trans);
3012 
3013 	btrfs_set_trans_block_group(trans, dir);
3014 
3015 	if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
3016 		err = btrfs_unlink_subvol(trans, root, dir,
3017 					  BTRFS_I(inode)->location.objectid,
3018 					  dentry->d_name.name,
3019 					  dentry->d_name.len);
3020 		goto out;
3021 	}
3022 
3023 	err = btrfs_orphan_add(trans, inode);
3024 	if (err)
3025 		goto out;
3026 
3027 	/* now the directory is empty */
3028 	err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3029 				 dentry->d_name.name, dentry->d_name.len);
3030 	if (!err)
3031 		btrfs_i_size_write(inode, 0);
3032 out:
3033 	nr = trans->blocks_used;
3034 	__unlink_end_trans(trans, root);
3035 	btrfs_btree_balance_dirty(root, nr);
3036 
3037 	return err;
3038 }
3039 
3040 #if 0
3041 /*
3042  * when truncating bytes in a file, it is possible to avoid reading
3043  * the leaves that contain only checksum items.  This can be the
3044  * majority of the IO required to delete a large file, but it must
3045  * be done carefully.
3046  *
3047  * The keys in the level just above the leaves are checked to make sure
3048  * the lowest key in a given leaf is a csum key, and starts at an offset
3049  * after the new  size.
3050  *
3051  * Then the key for the next leaf is checked to make sure it also has
3052  * a checksum item for the same file.  If it does, we know our target leaf
3053  * contains only checksum items, and it can be safely freed without reading
3054  * it.
3055  *
3056  * This is just an optimization targeted at large files.  It may do
3057  * nothing.  It will return 0 unless things went badly.
3058  */
3059 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
3060 				     struct btrfs_root *root,
3061 				     struct btrfs_path *path,
3062 				     struct inode *inode, u64 new_size)
3063 {
3064 	struct btrfs_key key;
3065 	int ret;
3066 	int nritems;
3067 	struct btrfs_key found_key;
3068 	struct btrfs_key other_key;
3069 	struct btrfs_leaf_ref *ref;
3070 	u64 leaf_gen;
3071 	u64 leaf_start;
3072 
3073 	path->lowest_level = 1;
3074 	key.objectid = inode->i_ino;
3075 	key.type = BTRFS_CSUM_ITEM_KEY;
3076 	key.offset = new_size;
3077 again:
3078 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3079 	if (ret < 0)
3080 		goto out;
3081 
3082 	if (path->nodes[1] == NULL) {
3083 		ret = 0;
3084 		goto out;
3085 	}
3086 	ret = 0;
3087 	btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
3088 	nritems = btrfs_header_nritems(path->nodes[1]);
3089 
3090 	if (!nritems)
3091 		goto out;
3092 
3093 	if (path->slots[1] >= nritems)
3094 		goto next_node;
3095 
3096 	/* did we find a key greater than anything we want to delete? */
3097 	if (found_key.objectid > inode->i_ino ||
3098 	   (found_key.objectid == inode->i_ino && found_key.type > key.type))
3099 		goto out;
3100 
3101 	/* we check the next key in the node to make sure the leave contains
3102 	 * only checksum items.  This comparison doesn't work if our
3103 	 * leaf is the last one in the node
3104 	 */
3105 	if (path->slots[1] + 1 >= nritems) {
3106 next_node:
3107 		/* search forward from the last key in the node, this
3108 		 * will bring us into the next node in the tree
3109 		 */
3110 		btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
3111 
3112 		/* unlikely, but we inc below, so check to be safe */
3113 		if (found_key.offset == (u64)-1)
3114 			goto out;
3115 
3116 		/* search_forward needs a path with locks held, do the
3117 		 * search again for the original key.  It is possible
3118 		 * this will race with a balance and return a path that
3119 		 * we could modify, but this drop is just an optimization
3120 		 * and is allowed to miss some leaves.
3121 		 */
3122 		btrfs_release_path(root, path);
3123 		found_key.offset++;
3124 
3125 		/* setup a max key for search_forward */
3126 		other_key.offset = (u64)-1;
3127 		other_key.type = key.type;
3128 		other_key.objectid = key.objectid;
3129 
3130 		path->keep_locks = 1;
3131 		ret = btrfs_search_forward(root, &found_key, &other_key,
3132 					   path, 0, 0);
3133 		path->keep_locks = 0;
3134 		if (ret || found_key.objectid != key.objectid ||
3135 		    found_key.type != key.type) {
3136 			ret = 0;
3137 			goto out;
3138 		}
3139 
3140 		key.offset = found_key.offset;
3141 		btrfs_release_path(root, path);
3142 		cond_resched();
3143 		goto again;
3144 	}
3145 
3146 	/* we know there's one more slot after us in the tree,
3147 	 * read that key so we can verify it is also a checksum item
3148 	 */
3149 	btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
3150 
3151 	if (found_key.objectid < inode->i_ino)
3152 		goto next_key;
3153 
3154 	if (found_key.type != key.type || found_key.offset < new_size)
3155 		goto next_key;
3156 
3157 	/*
3158 	 * if the key for the next leaf isn't a csum key from this objectid,
3159 	 * we can't be sure there aren't good items inside this leaf.
3160 	 * Bail out
3161 	 */
3162 	if (other_key.objectid != inode->i_ino || other_key.type != key.type)
3163 		goto out;
3164 
3165 	leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
3166 	leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
3167 	/*
3168 	 * it is safe to delete this leaf, it contains only
3169 	 * csum items from this inode at an offset >= new_size
3170 	 */
3171 	ret = btrfs_del_leaf(trans, root, path, leaf_start);
3172 	BUG_ON(ret);
3173 
3174 	if (root->ref_cows && leaf_gen < trans->transid) {
3175 		ref = btrfs_alloc_leaf_ref(root, 0);
3176 		if (ref) {
3177 			ref->root_gen = root->root_key.offset;
3178 			ref->bytenr = leaf_start;
3179 			ref->owner = 0;
3180 			ref->generation = leaf_gen;
3181 			ref->nritems = 0;
3182 
3183 			btrfs_sort_leaf_ref(ref);
3184 
3185 			ret = btrfs_add_leaf_ref(root, ref, 0);
3186 			WARN_ON(ret);
3187 			btrfs_free_leaf_ref(root, ref);
3188 		} else {
3189 			WARN_ON(1);
3190 		}
3191 	}
3192 next_key:
3193 	btrfs_release_path(root, path);
3194 
3195 	if (other_key.objectid == inode->i_ino &&
3196 	    other_key.type == key.type && other_key.offset > key.offset) {
3197 		key.offset = other_key.offset;
3198 		cond_resched();
3199 		goto again;
3200 	}
3201 	ret = 0;
3202 out:
3203 	/* fixup any changes we've made to the path */
3204 	path->lowest_level = 0;
3205 	path->keep_locks = 0;
3206 	btrfs_release_path(root, path);
3207 	return ret;
3208 }
3209 
3210 #endif
3211 
3212 /*
3213  * this can truncate away extent items, csum items and directory items.
3214  * It starts at a high offset and removes keys until it can't find
3215  * any higher than new_size
3216  *
3217  * csum items that cross the new i_size are truncated to the new size
3218  * as well.
3219  *
3220  * min_type is the minimum key type to truncate down to.  If set to 0, this
3221  * will kill all the items on this inode, including the INODE_ITEM_KEY.
3222  */
3223 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3224 			       struct btrfs_root *root,
3225 			       struct inode *inode,
3226 			       u64 new_size, u32 min_type)
3227 {
3228 	struct btrfs_path *path;
3229 	struct extent_buffer *leaf;
3230 	struct btrfs_file_extent_item *fi;
3231 	struct btrfs_key key;
3232 	struct btrfs_key found_key;
3233 	u64 extent_start = 0;
3234 	u64 extent_num_bytes = 0;
3235 	u64 extent_offset = 0;
3236 	u64 item_end = 0;
3237 	u64 mask = root->sectorsize - 1;
3238 	u32 found_type = (u8)-1;
3239 	int found_extent;
3240 	int del_item;
3241 	int pending_del_nr = 0;
3242 	int pending_del_slot = 0;
3243 	int extent_type = -1;
3244 	int encoding;
3245 	int ret;
3246 	int err = 0;
3247 
3248 	BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
3249 
3250 	if (root->ref_cows || root == root->fs_info->tree_root)
3251 		btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
3252 
3253 	path = btrfs_alloc_path();
3254 	BUG_ON(!path);
3255 	path->reada = -1;
3256 
3257 	key.objectid = inode->i_ino;
3258 	key.offset = (u64)-1;
3259 	key.type = (u8)-1;
3260 
3261 search_again:
3262 	path->leave_spinning = 1;
3263 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3264 	if (ret < 0) {
3265 		err = ret;
3266 		goto out;
3267 	}
3268 
3269 	if (ret > 0) {
3270 		/* there are no items in the tree for us to truncate, we're
3271 		 * done
3272 		 */
3273 		if (path->slots[0] == 0)
3274 			goto out;
3275 		path->slots[0]--;
3276 	}
3277 
3278 	while (1) {
3279 		fi = NULL;
3280 		leaf = path->nodes[0];
3281 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3282 		found_type = btrfs_key_type(&found_key);
3283 		encoding = 0;
3284 
3285 		if (found_key.objectid != inode->i_ino)
3286 			break;
3287 
3288 		if (found_type < min_type)
3289 			break;
3290 
3291 		item_end = found_key.offset;
3292 		if (found_type == BTRFS_EXTENT_DATA_KEY) {
3293 			fi = btrfs_item_ptr(leaf, path->slots[0],
3294 					    struct btrfs_file_extent_item);
3295 			extent_type = btrfs_file_extent_type(leaf, fi);
3296 			encoding = btrfs_file_extent_compression(leaf, fi);
3297 			encoding |= btrfs_file_extent_encryption(leaf, fi);
3298 			encoding |= btrfs_file_extent_other_encoding(leaf, fi);
3299 
3300 			if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3301 				item_end +=
3302 				    btrfs_file_extent_num_bytes(leaf, fi);
3303 			} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3304 				item_end += btrfs_file_extent_inline_len(leaf,
3305 									 fi);
3306 			}
3307 			item_end--;
3308 		}
3309 		if (found_type > min_type) {
3310 			del_item = 1;
3311 		} else {
3312 			if (item_end < new_size)
3313 				break;
3314 			if (found_key.offset >= new_size)
3315 				del_item = 1;
3316 			else
3317 				del_item = 0;
3318 		}
3319 		found_extent = 0;
3320 		/* FIXME, shrink the extent if the ref count is only 1 */
3321 		if (found_type != BTRFS_EXTENT_DATA_KEY)
3322 			goto delete;
3323 
3324 		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3325 			u64 num_dec;
3326 			extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
3327 			if (!del_item && !encoding) {
3328 				u64 orig_num_bytes =
3329 					btrfs_file_extent_num_bytes(leaf, fi);
3330 				extent_num_bytes = new_size -
3331 					found_key.offset + root->sectorsize - 1;
3332 				extent_num_bytes = extent_num_bytes &
3333 					~((u64)root->sectorsize - 1);
3334 				btrfs_set_file_extent_num_bytes(leaf, fi,
3335 							 extent_num_bytes);
3336 				num_dec = (orig_num_bytes -
3337 					   extent_num_bytes);
3338 				if (root->ref_cows && extent_start != 0)
3339 					inode_sub_bytes(inode, num_dec);
3340 				btrfs_mark_buffer_dirty(leaf);
3341 			} else {
3342 				extent_num_bytes =
3343 					btrfs_file_extent_disk_num_bytes(leaf,
3344 									 fi);
3345 				extent_offset = found_key.offset -
3346 					btrfs_file_extent_offset(leaf, fi);
3347 
3348 				/* FIXME blocksize != 4096 */
3349 				num_dec = btrfs_file_extent_num_bytes(leaf, fi);
3350 				if (extent_start != 0) {
3351 					found_extent = 1;
3352 					if (root->ref_cows)
3353 						inode_sub_bytes(inode, num_dec);
3354 				}
3355 			}
3356 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3357 			/*
3358 			 * we can't truncate inline items that have had
3359 			 * special encodings
3360 			 */
3361 			if (!del_item &&
3362 			    btrfs_file_extent_compression(leaf, fi) == 0 &&
3363 			    btrfs_file_extent_encryption(leaf, fi) == 0 &&
3364 			    btrfs_file_extent_other_encoding(leaf, fi) == 0) {
3365 				u32 size = new_size - found_key.offset;
3366 
3367 				if (root->ref_cows) {
3368 					inode_sub_bytes(inode, item_end + 1 -
3369 							new_size);
3370 				}
3371 				size =
3372 				    btrfs_file_extent_calc_inline_size(size);
3373 				ret = btrfs_truncate_item(trans, root, path,
3374 							  size, 1);
3375 				BUG_ON(ret);
3376 			} else if (root->ref_cows) {
3377 				inode_sub_bytes(inode, item_end + 1 -
3378 						found_key.offset);
3379 			}
3380 		}
3381 delete:
3382 		if (del_item) {
3383 			if (!pending_del_nr) {
3384 				/* no pending yet, add ourselves */
3385 				pending_del_slot = path->slots[0];
3386 				pending_del_nr = 1;
3387 			} else if (pending_del_nr &&
3388 				   path->slots[0] + 1 == pending_del_slot) {
3389 				/* hop on the pending chunk */
3390 				pending_del_nr++;
3391 				pending_del_slot = path->slots[0];
3392 			} else {
3393 				BUG();
3394 			}
3395 		} else {
3396 			break;
3397 		}
3398 		if (found_extent && (root->ref_cows ||
3399 				     root == root->fs_info->tree_root)) {
3400 			btrfs_set_path_blocking(path);
3401 			ret = btrfs_free_extent(trans, root, extent_start,
3402 						extent_num_bytes, 0,
3403 						btrfs_header_owner(leaf),
3404 						inode->i_ino, extent_offset);
3405 			BUG_ON(ret);
3406 		}
3407 
3408 		if (found_type == BTRFS_INODE_ITEM_KEY)
3409 			break;
3410 
3411 		if (path->slots[0] == 0 ||
3412 		    path->slots[0] != pending_del_slot) {
3413 			if (root->ref_cows) {
3414 				err = -EAGAIN;
3415 				goto out;
3416 			}
3417 			if (pending_del_nr) {
3418 				ret = btrfs_del_items(trans, root, path,
3419 						pending_del_slot,
3420 						pending_del_nr);
3421 				BUG_ON(ret);
3422 				pending_del_nr = 0;
3423 			}
3424 			btrfs_release_path(root, path);
3425 			goto search_again;
3426 		} else {
3427 			path->slots[0]--;
3428 		}
3429 	}
3430 out:
3431 	if (pending_del_nr) {
3432 		ret = btrfs_del_items(trans, root, path, pending_del_slot,
3433 				      pending_del_nr);
3434 		BUG_ON(ret);
3435 	}
3436 	btrfs_free_path(path);
3437 	return err;
3438 }
3439 
3440 /*
3441  * taken from block_truncate_page, but does cow as it zeros out
3442  * any bytes left in the last page in the file.
3443  */
3444 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3445 {
3446 	struct inode *inode = mapping->host;
3447 	struct btrfs_root *root = BTRFS_I(inode)->root;
3448 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3449 	struct btrfs_ordered_extent *ordered;
3450 	struct extent_state *cached_state = NULL;
3451 	char *kaddr;
3452 	u32 blocksize = root->sectorsize;
3453 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
3454 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
3455 	struct page *page;
3456 	int ret = 0;
3457 	u64 page_start;
3458 	u64 page_end;
3459 
3460 	if ((offset & (blocksize - 1)) == 0)
3461 		goto out;
3462 	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
3463 	if (ret)
3464 		goto out;
3465 
3466 	ret = -ENOMEM;
3467 again:
3468 	page = grab_cache_page(mapping, index);
3469 	if (!page) {
3470 		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
3471 		goto out;
3472 	}
3473 
3474 	page_start = page_offset(page);
3475 	page_end = page_start + PAGE_CACHE_SIZE - 1;
3476 
3477 	if (!PageUptodate(page)) {
3478 		ret = btrfs_readpage(NULL, page);
3479 		lock_page(page);
3480 		if (page->mapping != mapping) {
3481 			unlock_page(page);
3482 			page_cache_release(page);
3483 			goto again;
3484 		}
3485 		if (!PageUptodate(page)) {
3486 			ret = -EIO;
3487 			goto out_unlock;
3488 		}
3489 	}
3490 	wait_on_page_writeback(page);
3491 
3492 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
3493 			 GFP_NOFS);
3494 	set_page_extent_mapped(page);
3495 
3496 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
3497 	if (ordered) {
3498 		unlock_extent_cached(io_tree, page_start, page_end,
3499 				     &cached_state, GFP_NOFS);
3500 		unlock_page(page);
3501 		page_cache_release(page);
3502 		btrfs_start_ordered_extent(inode, ordered, 1);
3503 		btrfs_put_ordered_extent(ordered);
3504 		goto again;
3505 	}
3506 
3507 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
3508 			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
3509 			  0, 0, &cached_state, GFP_NOFS);
3510 
3511 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
3512 					&cached_state);
3513 	if (ret) {
3514 		unlock_extent_cached(io_tree, page_start, page_end,
3515 				     &cached_state, GFP_NOFS);
3516 		goto out_unlock;
3517 	}
3518 
3519 	ret = 0;
3520 	if (offset != PAGE_CACHE_SIZE) {
3521 		kaddr = kmap(page);
3522 		memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
3523 		flush_dcache_page(page);
3524 		kunmap(page);
3525 	}
3526 	ClearPageChecked(page);
3527 	set_page_dirty(page);
3528 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
3529 			     GFP_NOFS);
3530 
3531 out_unlock:
3532 	if (ret)
3533 		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
3534 	unlock_page(page);
3535 	page_cache_release(page);
3536 out:
3537 	return ret;
3538 }
3539 
3540 int btrfs_cont_expand(struct inode *inode, loff_t size)
3541 {
3542 	struct btrfs_trans_handle *trans;
3543 	struct btrfs_root *root = BTRFS_I(inode)->root;
3544 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3545 	struct extent_map *em = NULL;
3546 	struct extent_state *cached_state = NULL;
3547 	u64 mask = root->sectorsize - 1;
3548 	u64 hole_start = (inode->i_size + mask) & ~mask;
3549 	u64 block_end = (size + mask) & ~mask;
3550 	u64 last_byte;
3551 	u64 cur_offset;
3552 	u64 hole_size;
3553 	int err = 0;
3554 
3555 	if (size <= hole_start)
3556 		return 0;
3557 
3558 	while (1) {
3559 		struct btrfs_ordered_extent *ordered;
3560 		btrfs_wait_ordered_range(inode, hole_start,
3561 					 block_end - hole_start);
3562 		lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
3563 				 &cached_state, GFP_NOFS);
3564 		ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3565 		if (!ordered)
3566 			break;
3567 		unlock_extent_cached(io_tree, hole_start, block_end - 1,
3568 				     &cached_state, GFP_NOFS);
3569 		btrfs_put_ordered_extent(ordered);
3570 	}
3571 
3572 	cur_offset = hole_start;
3573 	while (1) {
3574 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
3575 				block_end - cur_offset, 0);
3576 		BUG_ON(IS_ERR(em) || !em);
3577 		last_byte = min(extent_map_end(em), block_end);
3578 		last_byte = (last_byte + mask) & ~mask;
3579 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3580 			u64 hint_byte = 0;
3581 			hole_size = last_byte - cur_offset;
3582 
3583 			trans = btrfs_start_transaction(root, 2);
3584 			if (IS_ERR(trans)) {
3585 				err = PTR_ERR(trans);
3586 				break;
3587 			}
3588 			btrfs_set_trans_block_group(trans, inode);
3589 
3590 			err = btrfs_drop_extents(trans, inode, cur_offset,
3591 						 cur_offset + hole_size,
3592 						 &hint_byte, 1);
3593 			BUG_ON(err);
3594 
3595 			err = btrfs_insert_file_extent(trans, root,
3596 					inode->i_ino, cur_offset, 0,
3597 					0, hole_size, 0, hole_size,
3598 					0, 0, 0);
3599 			BUG_ON(err);
3600 
3601 			btrfs_drop_extent_cache(inode, hole_start,
3602 					last_byte - 1, 0);
3603 
3604 			btrfs_end_transaction(trans, root);
3605 		}
3606 		free_extent_map(em);
3607 		em = NULL;
3608 		cur_offset = last_byte;
3609 		if (cur_offset >= block_end)
3610 			break;
3611 	}
3612 
3613 	free_extent_map(em);
3614 	unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
3615 			     GFP_NOFS);
3616 	return err;
3617 }
3618 
3619 static int btrfs_setattr_size(struct inode *inode, struct iattr *attr)
3620 {
3621 	struct btrfs_root *root = BTRFS_I(inode)->root;
3622 	struct btrfs_trans_handle *trans;
3623 	unsigned long nr;
3624 	int ret;
3625 
3626 	if (attr->ia_size == inode->i_size)
3627 		return 0;
3628 
3629 	if (attr->ia_size > inode->i_size) {
3630 		unsigned long limit;
3631 		limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
3632 		if (attr->ia_size > inode->i_sb->s_maxbytes)
3633 			return -EFBIG;
3634 		if (limit != RLIM_INFINITY && attr->ia_size > limit) {
3635 			send_sig(SIGXFSZ, current, 0);
3636 			return -EFBIG;
3637 		}
3638 	}
3639 
3640 	trans = btrfs_start_transaction(root, 5);
3641 	if (IS_ERR(trans))
3642 		return PTR_ERR(trans);
3643 
3644 	btrfs_set_trans_block_group(trans, inode);
3645 
3646 	ret = btrfs_orphan_add(trans, inode);
3647 	BUG_ON(ret);
3648 
3649 	nr = trans->blocks_used;
3650 	btrfs_end_transaction(trans, root);
3651 	btrfs_btree_balance_dirty(root, nr);
3652 
3653 	if (attr->ia_size > inode->i_size) {
3654 		ret = btrfs_cont_expand(inode, attr->ia_size);
3655 		if (ret) {
3656 			btrfs_truncate(inode);
3657 			return ret;
3658 		}
3659 
3660 		i_size_write(inode, attr->ia_size);
3661 		btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
3662 
3663 		trans = btrfs_start_transaction(root, 0);
3664 		BUG_ON(IS_ERR(trans));
3665 		btrfs_set_trans_block_group(trans, inode);
3666 		trans->block_rsv = root->orphan_block_rsv;
3667 		BUG_ON(!trans->block_rsv);
3668 
3669 		ret = btrfs_update_inode(trans, root, inode);
3670 		BUG_ON(ret);
3671 		if (inode->i_nlink > 0) {
3672 			ret = btrfs_orphan_del(trans, inode);
3673 			BUG_ON(ret);
3674 		}
3675 		nr = trans->blocks_used;
3676 		btrfs_end_transaction(trans, root);
3677 		btrfs_btree_balance_dirty(root, nr);
3678 		return 0;
3679 	}
3680 
3681 	/*
3682 	 * We're truncating a file that used to have good data down to
3683 	 * zero. Make sure it gets into the ordered flush list so that
3684 	 * any new writes get down to disk quickly.
3685 	 */
3686 	if (attr->ia_size == 0)
3687 		BTRFS_I(inode)->ordered_data_close = 1;
3688 
3689 	/* we don't support swapfiles, so vmtruncate shouldn't fail */
3690 	ret = vmtruncate(inode, attr->ia_size);
3691 	BUG_ON(ret);
3692 
3693 	return 0;
3694 }
3695 
3696 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3697 {
3698 	struct inode *inode = dentry->d_inode;
3699 	struct btrfs_root *root = BTRFS_I(inode)->root;
3700 	int err;
3701 
3702 	if (btrfs_root_readonly(root))
3703 		return -EROFS;
3704 
3705 	err = inode_change_ok(inode, attr);
3706 	if (err)
3707 		return err;
3708 
3709 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3710 		err = btrfs_setattr_size(inode, attr);
3711 		if (err)
3712 			return err;
3713 	}
3714 
3715 	if (attr->ia_valid) {
3716 		setattr_copy(inode, attr);
3717 		mark_inode_dirty(inode);
3718 
3719 		if (attr->ia_valid & ATTR_MODE)
3720 			err = btrfs_acl_chmod(inode);
3721 	}
3722 
3723 	return err;
3724 }
3725 
3726 void btrfs_evict_inode(struct inode *inode)
3727 {
3728 	struct btrfs_trans_handle *trans;
3729 	struct btrfs_root *root = BTRFS_I(inode)->root;
3730 	unsigned long nr;
3731 	int ret;
3732 
3733 	truncate_inode_pages(&inode->i_data, 0);
3734 	if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
3735 			       root == root->fs_info->tree_root))
3736 		goto no_delete;
3737 
3738 	if (is_bad_inode(inode)) {
3739 		btrfs_orphan_del(NULL, inode);
3740 		goto no_delete;
3741 	}
3742 	/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
3743 	btrfs_wait_ordered_range(inode, 0, (u64)-1);
3744 
3745 	if (root->fs_info->log_root_recovering) {
3746 		BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
3747 		goto no_delete;
3748 	}
3749 
3750 	if (inode->i_nlink > 0) {
3751 		BUG_ON(btrfs_root_refs(&root->root_item) != 0);
3752 		goto no_delete;
3753 	}
3754 
3755 	btrfs_i_size_write(inode, 0);
3756 
3757 	while (1) {
3758 		trans = btrfs_start_transaction(root, 0);
3759 		BUG_ON(IS_ERR(trans));
3760 		btrfs_set_trans_block_group(trans, inode);
3761 		trans->block_rsv = root->orphan_block_rsv;
3762 
3763 		ret = btrfs_block_rsv_check(trans, root,
3764 					    root->orphan_block_rsv, 0, 5);
3765 		if (ret) {
3766 			BUG_ON(ret != -EAGAIN);
3767 			ret = btrfs_commit_transaction(trans, root);
3768 			BUG_ON(ret);
3769 			continue;
3770 		}
3771 
3772 		ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
3773 		if (ret != -EAGAIN)
3774 			break;
3775 
3776 		nr = trans->blocks_used;
3777 		btrfs_end_transaction(trans, root);
3778 		trans = NULL;
3779 		btrfs_btree_balance_dirty(root, nr);
3780 
3781 	}
3782 
3783 	if (ret == 0) {
3784 		ret = btrfs_orphan_del(trans, inode);
3785 		BUG_ON(ret);
3786 	}
3787 
3788 	nr = trans->blocks_used;
3789 	btrfs_end_transaction(trans, root);
3790 	btrfs_btree_balance_dirty(root, nr);
3791 no_delete:
3792 	end_writeback(inode);
3793 	return;
3794 }
3795 
3796 /*
3797  * this returns the key found in the dir entry in the location pointer.
3798  * If no dir entries were found, location->objectid is 0.
3799  */
3800 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3801 			       struct btrfs_key *location)
3802 {
3803 	const char *name = dentry->d_name.name;
3804 	int namelen = dentry->d_name.len;
3805 	struct btrfs_dir_item *di;
3806 	struct btrfs_path *path;
3807 	struct btrfs_root *root = BTRFS_I(dir)->root;
3808 	int ret = 0;
3809 
3810 	path = btrfs_alloc_path();
3811 	BUG_ON(!path);
3812 
3813 	di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
3814 				    namelen, 0);
3815 	if (IS_ERR(di))
3816 		ret = PTR_ERR(di);
3817 
3818 	if (!di || IS_ERR(di))
3819 		goto out_err;
3820 
3821 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3822 out:
3823 	btrfs_free_path(path);
3824 	return ret;
3825 out_err:
3826 	location->objectid = 0;
3827 	goto out;
3828 }
3829 
3830 /*
3831  * when we hit a tree root in a directory, the btrfs part of the inode
3832  * needs to be changed to reflect the root directory of the tree root.  This
3833  * is kind of like crossing a mount point.
3834  */
3835 static int fixup_tree_root_location(struct btrfs_root *root,
3836 				    struct inode *dir,
3837 				    struct dentry *dentry,
3838 				    struct btrfs_key *location,
3839 				    struct btrfs_root **sub_root)
3840 {
3841 	struct btrfs_path *path;
3842 	struct btrfs_root *new_root;
3843 	struct btrfs_root_ref *ref;
3844 	struct extent_buffer *leaf;
3845 	int ret;
3846 	int err = 0;
3847 
3848 	path = btrfs_alloc_path();
3849 	if (!path) {
3850 		err = -ENOMEM;
3851 		goto out;
3852 	}
3853 
3854 	err = -ENOENT;
3855 	ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
3856 				  BTRFS_I(dir)->root->root_key.objectid,
3857 				  location->objectid);
3858 	if (ret) {
3859 		if (ret < 0)
3860 			err = ret;
3861 		goto out;
3862 	}
3863 
3864 	leaf = path->nodes[0];
3865 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
3866 	if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino ||
3867 	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
3868 		goto out;
3869 
3870 	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
3871 				   (unsigned long)(ref + 1),
3872 				   dentry->d_name.len);
3873 	if (ret)
3874 		goto out;
3875 
3876 	btrfs_release_path(root->fs_info->tree_root, path);
3877 
3878 	new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
3879 	if (IS_ERR(new_root)) {
3880 		err = PTR_ERR(new_root);
3881 		goto out;
3882 	}
3883 
3884 	if (btrfs_root_refs(&new_root->root_item) == 0) {
3885 		err = -ENOENT;
3886 		goto out;
3887 	}
3888 
3889 	*sub_root = new_root;
3890 	location->objectid = btrfs_root_dirid(&new_root->root_item);
3891 	location->type = BTRFS_INODE_ITEM_KEY;
3892 	location->offset = 0;
3893 	err = 0;
3894 out:
3895 	btrfs_free_path(path);
3896 	return err;
3897 }
3898 
3899 static void inode_tree_add(struct inode *inode)
3900 {
3901 	struct btrfs_root *root = BTRFS_I(inode)->root;
3902 	struct btrfs_inode *entry;
3903 	struct rb_node **p;
3904 	struct rb_node *parent;
3905 again:
3906 	p = &root->inode_tree.rb_node;
3907 	parent = NULL;
3908 
3909 	if (inode_unhashed(inode))
3910 		return;
3911 
3912 	spin_lock(&root->inode_lock);
3913 	while (*p) {
3914 		parent = *p;
3915 		entry = rb_entry(parent, struct btrfs_inode, rb_node);
3916 
3917 		if (inode->i_ino < entry->vfs_inode.i_ino)
3918 			p = &parent->rb_left;
3919 		else if (inode->i_ino > entry->vfs_inode.i_ino)
3920 			p = &parent->rb_right;
3921 		else {
3922 			WARN_ON(!(entry->vfs_inode.i_state &
3923 				  (I_WILL_FREE | I_FREEING)));
3924 			rb_erase(parent, &root->inode_tree);
3925 			RB_CLEAR_NODE(parent);
3926 			spin_unlock(&root->inode_lock);
3927 			goto again;
3928 		}
3929 	}
3930 	rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
3931 	rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3932 	spin_unlock(&root->inode_lock);
3933 }
3934 
3935 static void inode_tree_del(struct inode *inode)
3936 {
3937 	struct btrfs_root *root = BTRFS_I(inode)->root;
3938 	int empty = 0;
3939 
3940 	spin_lock(&root->inode_lock);
3941 	if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
3942 		rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3943 		RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3944 		empty = RB_EMPTY_ROOT(&root->inode_tree);
3945 	}
3946 	spin_unlock(&root->inode_lock);
3947 
3948 	/*
3949 	 * Free space cache has inodes in the tree root, but the tree root has a
3950 	 * root_refs of 0, so this could end up dropping the tree root as a
3951 	 * snapshot, so we need the extra !root->fs_info->tree_root check to
3952 	 * make sure we don't drop it.
3953 	 */
3954 	if (empty && btrfs_root_refs(&root->root_item) == 0 &&
3955 	    root != root->fs_info->tree_root) {
3956 		synchronize_srcu(&root->fs_info->subvol_srcu);
3957 		spin_lock(&root->inode_lock);
3958 		empty = RB_EMPTY_ROOT(&root->inode_tree);
3959 		spin_unlock(&root->inode_lock);
3960 		if (empty)
3961 			btrfs_add_dead_root(root);
3962 	}
3963 }
3964 
3965 int btrfs_invalidate_inodes(struct btrfs_root *root)
3966 {
3967 	struct rb_node *node;
3968 	struct rb_node *prev;
3969 	struct btrfs_inode *entry;
3970 	struct inode *inode;
3971 	u64 objectid = 0;
3972 
3973 	WARN_ON(btrfs_root_refs(&root->root_item) != 0);
3974 
3975 	spin_lock(&root->inode_lock);
3976 again:
3977 	node = root->inode_tree.rb_node;
3978 	prev = NULL;
3979 	while (node) {
3980 		prev = node;
3981 		entry = rb_entry(node, struct btrfs_inode, rb_node);
3982 
3983 		if (objectid < entry->vfs_inode.i_ino)
3984 			node = node->rb_left;
3985 		else if (objectid > entry->vfs_inode.i_ino)
3986 			node = node->rb_right;
3987 		else
3988 			break;
3989 	}
3990 	if (!node) {
3991 		while (prev) {
3992 			entry = rb_entry(prev, struct btrfs_inode, rb_node);
3993 			if (objectid <= entry->vfs_inode.i_ino) {
3994 				node = prev;
3995 				break;
3996 			}
3997 			prev = rb_next(prev);
3998 		}
3999 	}
4000 	while (node) {
4001 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4002 		objectid = entry->vfs_inode.i_ino + 1;
4003 		inode = igrab(&entry->vfs_inode);
4004 		if (inode) {
4005 			spin_unlock(&root->inode_lock);
4006 			if (atomic_read(&inode->i_count) > 1)
4007 				d_prune_aliases(inode);
4008 			/*
4009 			 * btrfs_drop_inode will have it removed from
4010 			 * the inode cache when its usage count
4011 			 * hits zero.
4012 			 */
4013 			iput(inode);
4014 			cond_resched();
4015 			spin_lock(&root->inode_lock);
4016 			goto again;
4017 		}
4018 
4019 		if (cond_resched_lock(&root->inode_lock))
4020 			goto again;
4021 
4022 		node = rb_next(node);
4023 	}
4024 	spin_unlock(&root->inode_lock);
4025 	return 0;
4026 }
4027 
4028 static int btrfs_init_locked_inode(struct inode *inode, void *p)
4029 {
4030 	struct btrfs_iget_args *args = p;
4031 	inode->i_ino = args->ino;
4032 	BTRFS_I(inode)->root = args->root;
4033 	btrfs_set_inode_space_info(args->root, inode);
4034 	return 0;
4035 }
4036 
4037 static int btrfs_find_actor(struct inode *inode, void *opaque)
4038 {
4039 	struct btrfs_iget_args *args = opaque;
4040 	return args->ino == inode->i_ino &&
4041 		args->root == BTRFS_I(inode)->root;
4042 }
4043 
4044 static struct inode *btrfs_iget_locked(struct super_block *s,
4045 				       u64 objectid,
4046 				       struct btrfs_root *root)
4047 {
4048 	struct inode *inode;
4049 	struct btrfs_iget_args args;
4050 	args.ino = objectid;
4051 	args.root = root;
4052 
4053 	inode = iget5_locked(s, objectid, btrfs_find_actor,
4054 			     btrfs_init_locked_inode,
4055 			     (void *)&args);
4056 	return inode;
4057 }
4058 
4059 /* Get an inode object given its location and corresponding root.
4060  * Returns in *is_new if the inode was read from disk
4061  */
4062 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
4063 			 struct btrfs_root *root, int *new)
4064 {
4065 	struct inode *inode;
4066 
4067 	inode = btrfs_iget_locked(s, location->objectid, root);
4068 	if (!inode)
4069 		return ERR_PTR(-ENOMEM);
4070 
4071 	if (inode->i_state & I_NEW) {
4072 		BTRFS_I(inode)->root = root;
4073 		memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
4074 		btrfs_read_locked_inode(inode);
4075 
4076 		inode_tree_add(inode);
4077 		unlock_new_inode(inode);
4078 		if (new)
4079 			*new = 1;
4080 	}
4081 
4082 	return inode;
4083 }
4084 
4085 static struct inode *new_simple_dir(struct super_block *s,
4086 				    struct btrfs_key *key,
4087 				    struct btrfs_root *root)
4088 {
4089 	struct inode *inode = new_inode(s);
4090 
4091 	if (!inode)
4092 		return ERR_PTR(-ENOMEM);
4093 
4094 	BTRFS_I(inode)->root = root;
4095 	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
4096 	BTRFS_I(inode)->dummy_inode = 1;
4097 
4098 	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
4099 	inode->i_op = &simple_dir_inode_operations;
4100 	inode->i_fop = &simple_dir_operations;
4101 	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
4102 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4103 
4104 	return inode;
4105 }
4106 
4107 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
4108 {
4109 	struct inode *inode;
4110 	struct btrfs_root *root = BTRFS_I(dir)->root;
4111 	struct btrfs_root *sub_root = root;
4112 	struct btrfs_key location;
4113 	int index;
4114 	int ret;
4115 
4116 	if (dentry->d_name.len > BTRFS_NAME_LEN)
4117 		return ERR_PTR(-ENAMETOOLONG);
4118 
4119 	ret = btrfs_inode_by_name(dir, dentry, &location);
4120 
4121 	if (ret < 0)
4122 		return ERR_PTR(ret);
4123 
4124 	if (location.objectid == 0)
4125 		return NULL;
4126 
4127 	if (location.type == BTRFS_INODE_ITEM_KEY) {
4128 		inode = btrfs_iget(dir->i_sb, &location, root, NULL);
4129 		return inode;
4130 	}
4131 
4132 	BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
4133 
4134 	index = srcu_read_lock(&root->fs_info->subvol_srcu);
4135 	ret = fixup_tree_root_location(root, dir, dentry,
4136 				       &location, &sub_root);
4137 	if (ret < 0) {
4138 		if (ret != -ENOENT)
4139 			inode = ERR_PTR(ret);
4140 		else
4141 			inode = new_simple_dir(dir->i_sb, &location, sub_root);
4142 	} else {
4143 		inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
4144 	}
4145 	srcu_read_unlock(&root->fs_info->subvol_srcu, index);
4146 
4147 	if (!IS_ERR(inode) && root != sub_root) {
4148 		down_read(&root->fs_info->cleanup_work_sem);
4149 		if (!(inode->i_sb->s_flags & MS_RDONLY))
4150 			btrfs_orphan_cleanup(sub_root);
4151 		up_read(&root->fs_info->cleanup_work_sem);
4152 	}
4153 
4154 	return inode;
4155 }
4156 
4157 static int btrfs_dentry_delete(const struct dentry *dentry)
4158 {
4159 	struct btrfs_root *root;
4160 
4161 	if (!dentry->d_inode && !IS_ROOT(dentry))
4162 		dentry = dentry->d_parent;
4163 
4164 	if (dentry->d_inode) {
4165 		root = BTRFS_I(dentry->d_inode)->root;
4166 		if (btrfs_root_refs(&root->root_item) == 0)
4167 			return 1;
4168 	}
4169 	return 0;
4170 }
4171 
4172 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
4173 				   struct nameidata *nd)
4174 {
4175 	struct inode *inode;
4176 
4177 	inode = btrfs_lookup_dentry(dir, dentry);
4178 	if (IS_ERR(inode))
4179 		return ERR_CAST(inode);
4180 
4181 	return d_splice_alias(inode, dentry);
4182 }
4183 
4184 static unsigned char btrfs_filetype_table[] = {
4185 	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
4186 };
4187 
4188 static int btrfs_real_readdir(struct file *filp, void *dirent,
4189 			      filldir_t filldir)
4190 {
4191 	struct inode *inode = filp->f_dentry->d_inode;
4192 	struct btrfs_root *root = BTRFS_I(inode)->root;
4193 	struct btrfs_item *item;
4194 	struct btrfs_dir_item *di;
4195 	struct btrfs_key key;
4196 	struct btrfs_key found_key;
4197 	struct btrfs_path *path;
4198 	int ret;
4199 	u32 nritems;
4200 	struct extent_buffer *leaf;
4201 	int slot;
4202 	int advance;
4203 	unsigned char d_type;
4204 	int over = 0;
4205 	u32 di_cur;
4206 	u32 di_total;
4207 	u32 di_len;
4208 	int key_type = BTRFS_DIR_INDEX_KEY;
4209 	char tmp_name[32];
4210 	char *name_ptr;
4211 	int name_len;
4212 
4213 	/* FIXME, use a real flag for deciding about the key type */
4214 	if (root->fs_info->tree_root == root)
4215 		key_type = BTRFS_DIR_ITEM_KEY;
4216 
4217 	/* special case for "." */
4218 	if (filp->f_pos == 0) {
4219 		over = filldir(dirent, ".", 1,
4220 			       1, inode->i_ino,
4221 			       DT_DIR);
4222 		if (over)
4223 			return 0;
4224 		filp->f_pos = 1;
4225 	}
4226 	/* special case for .., just use the back ref */
4227 	if (filp->f_pos == 1) {
4228 		u64 pino = parent_ino(filp->f_path.dentry);
4229 		over = filldir(dirent, "..", 2,
4230 			       2, pino, DT_DIR);
4231 		if (over)
4232 			return 0;
4233 		filp->f_pos = 2;
4234 	}
4235 	path = btrfs_alloc_path();
4236 	path->reada = 2;
4237 
4238 	btrfs_set_key_type(&key, key_type);
4239 	key.offset = filp->f_pos;
4240 	key.objectid = inode->i_ino;
4241 
4242 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4243 	if (ret < 0)
4244 		goto err;
4245 	advance = 0;
4246 
4247 	while (1) {
4248 		leaf = path->nodes[0];
4249 		nritems = btrfs_header_nritems(leaf);
4250 		slot = path->slots[0];
4251 		if (advance || slot >= nritems) {
4252 			if (slot >= nritems - 1) {
4253 				ret = btrfs_next_leaf(root, path);
4254 				if (ret)
4255 					break;
4256 				leaf = path->nodes[0];
4257 				nritems = btrfs_header_nritems(leaf);
4258 				slot = path->slots[0];
4259 			} else {
4260 				slot++;
4261 				path->slots[0]++;
4262 			}
4263 		}
4264 
4265 		advance = 1;
4266 		item = btrfs_item_nr(leaf, slot);
4267 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
4268 
4269 		if (found_key.objectid != key.objectid)
4270 			break;
4271 		if (btrfs_key_type(&found_key) != key_type)
4272 			break;
4273 		if (found_key.offset < filp->f_pos)
4274 			continue;
4275 
4276 		filp->f_pos = found_key.offset;
4277 
4278 		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
4279 		di_cur = 0;
4280 		di_total = btrfs_item_size(leaf, item);
4281 
4282 		while (di_cur < di_total) {
4283 			struct btrfs_key location;
4284 
4285 			name_len = btrfs_dir_name_len(leaf, di);
4286 			if (name_len <= sizeof(tmp_name)) {
4287 				name_ptr = tmp_name;
4288 			} else {
4289 				name_ptr = kmalloc(name_len, GFP_NOFS);
4290 				if (!name_ptr) {
4291 					ret = -ENOMEM;
4292 					goto err;
4293 				}
4294 			}
4295 			read_extent_buffer(leaf, name_ptr,
4296 					   (unsigned long)(di + 1), name_len);
4297 
4298 			d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
4299 			btrfs_dir_item_key_to_cpu(leaf, di, &location);
4300 
4301 			/* is this a reference to our own snapshot? If so
4302 			 * skip it
4303 			 */
4304 			if (location.type == BTRFS_ROOT_ITEM_KEY &&
4305 			    location.objectid == root->root_key.objectid) {
4306 				over = 0;
4307 				goto skip;
4308 			}
4309 			over = filldir(dirent, name_ptr, name_len,
4310 				       found_key.offset, location.objectid,
4311 				       d_type);
4312 
4313 skip:
4314 			if (name_ptr != tmp_name)
4315 				kfree(name_ptr);
4316 
4317 			if (over)
4318 				goto nopos;
4319 			di_len = btrfs_dir_name_len(leaf, di) +
4320 				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
4321 			di_cur += di_len;
4322 			di = (struct btrfs_dir_item *)((char *)di + di_len);
4323 		}
4324 	}
4325 
4326 	/* Reached end of directory/root. Bump pos past the last item. */
4327 	if (key_type == BTRFS_DIR_INDEX_KEY)
4328 		/*
4329 		 * 32-bit glibc will use getdents64, but then strtol -
4330 		 * so the last number we can serve is this.
4331 		 */
4332 		filp->f_pos = 0x7fffffff;
4333 	else
4334 		filp->f_pos++;
4335 nopos:
4336 	ret = 0;
4337 err:
4338 	btrfs_free_path(path);
4339 	return ret;
4340 }
4341 
4342 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
4343 {
4344 	struct btrfs_root *root = BTRFS_I(inode)->root;
4345 	struct btrfs_trans_handle *trans;
4346 	int ret = 0;
4347 	bool nolock = false;
4348 
4349 	if (BTRFS_I(inode)->dummy_inode)
4350 		return 0;
4351 
4352 	smp_mb();
4353 	nolock = (root->fs_info->closing && root == root->fs_info->tree_root);
4354 
4355 	if (wbc->sync_mode == WB_SYNC_ALL) {
4356 		if (nolock)
4357 			trans = btrfs_join_transaction_nolock(root, 1);
4358 		else
4359 			trans = btrfs_join_transaction(root, 1);
4360 		if (IS_ERR(trans))
4361 			return PTR_ERR(trans);
4362 		btrfs_set_trans_block_group(trans, inode);
4363 		if (nolock)
4364 			ret = btrfs_end_transaction_nolock(trans, root);
4365 		else
4366 			ret = btrfs_commit_transaction(trans, root);
4367 	}
4368 	return ret;
4369 }
4370 
4371 /*
4372  * This is somewhat expensive, updating the tree every time the
4373  * inode changes.  But, it is most likely to find the inode in cache.
4374  * FIXME, needs more benchmarking...there are no reasons other than performance
4375  * to keep or drop this code.
4376  */
4377 void btrfs_dirty_inode(struct inode *inode)
4378 {
4379 	struct btrfs_root *root = BTRFS_I(inode)->root;
4380 	struct btrfs_trans_handle *trans;
4381 	int ret;
4382 
4383 	if (BTRFS_I(inode)->dummy_inode)
4384 		return;
4385 
4386 	trans = btrfs_join_transaction(root, 1);
4387 	BUG_ON(IS_ERR(trans));
4388 	btrfs_set_trans_block_group(trans, inode);
4389 
4390 	ret = btrfs_update_inode(trans, root, inode);
4391 	if (ret && ret == -ENOSPC) {
4392 		/* whoops, lets try again with the full transaction */
4393 		btrfs_end_transaction(trans, root);
4394 		trans = btrfs_start_transaction(root, 1);
4395 		if (IS_ERR(trans)) {
4396 			if (printk_ratelimit()) {
4397 				printk(KERN_ERR "btrfs: fail to "
4398 				       "dirty  inode %lu error %ld\n",
4399 				       inode->i_ino, PTR_ERR(trans));
4400 			}
4401 			return;
4402 		}
4403 		btrfs_set_trans_block_group(trans, inode);
4404 
4405 		ret = btrfs_update_inode(trans, root, inode);
4406 		if (ret) {
4407 			if (printk_ratelimit()) {
4408 				printk(KERN_ERR "btrfs: fail to "
4409 				       "dirty  inode %lu error %d\n",
4410 				       inode->i_ino, ret);
4411 			}
4412 		}
4413 	}
4414 	btrfs_end_transaction(trans, root);
4415 }
4416 
4417 /*
4418  * find the highest existing sequence number in a directory
4419  * and then set the in-memory index_cnt variable to reflect
4420  * free sequence numbers
4421  */
4422 static int btrfs_set_inode_index_count(struct inode *inode)
4423 {
4424 	struct btrfs_root *root = BTRFS_I(inode)->root;
4425 	struct btrfs_key key, found_key;
4426 	struct btrfs_path *path;
4427 	struct extent_buffer *leaf;
4428 	int ret;
4429 
4430 	key.objectid = inode->i_ino;
4431 	btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
4432 	key.offset = (u64)-1;
4433 
4434 	path = btrfs_alloc_path();
4435 	if (!path)
4436 		return -ENOMEM;
4437 
4438 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4439 	if (ret < 0)
4440 		goto out;
4441 	/* FIXME: we should be able to handle this */
4442 	if (ret == 0)
4443 		goto out;
4444 	ret = 0;
4445 
4446 	/*
4447 	 * MAGIC NUMBER EXPLANATION:
4448 	 * since we search a directory based on f_pos we have to start at 2
4449 	 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
4450 	 * else has to start at 2
4451 	 */
4452 	if (path->slots[0] == 0) {
4453 		BTRFS_I(inode)->index_cnt = 2;
4454 		goto out;
4455 	}
4456 
4457 	path->slots[0]--;
4458 
4459 	leaf = path->nodes[0];
4460 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4461 
4462 	if (found_key.objectid != inode->i_ino ||
4463 	    btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
4464 		BTRFS_I(inode)->index_cnt = 2;
4465 		goto out;
4466 	}
4467 
4468 	BTRFS_I(inode)->index_cnt = found_key.offset + 1;
4469 out:
4470 	btrfs_free_path(path);
4471 	return ret;
4472 }
4473 
4474 /*
4475  * helper to find a free sequence number in a given directory.  This current
4476  * code is very simple, later versions will do smarter things in the btree
4477  */
4478 int btrfs_set_inode_index(struct inode *dir, u64 *index)
4479 {
4480 	int ret = 0;
4481 
4482 	if (BTRFS_I(dir)->index_cnt == (u64)-1) {
4483 		ret = btrfs_set_inode_index_count(dir);
4484 		if (ret)
4485 			return ret;
4486 	}
4487 
4488 	*index = BTRFS_I(dir)->index_cnt;
4489 	BTRFS_I(dir)->index_cnt++;
4490 
4491 	return ret;
4492 }
4493 
4494 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4495 				     struct btrfs_root *root,
4496 				     struct inode *dir,
4497 				     const char *name, int name_len,
4498 				     u64 ref_objectid, u64 objectid,
4499 				     u64 alloc_hint, int mode, u64 *index)
4500 {
4501 	struct inode *inode;
4502 	struct btrfs_inode_item *inode_item;
4503 	struct btrfs_key *location;
4504 	struct btrfs_path *path;
4505 	struct btrfs_inode_ref *ref;
4506 	struct btrfs_key key[2];
4507 	u32 sizes[2];
4508 	unsigned long ptr;
4509 	int ret;
4510 	int owner;
4511 
4512 	path = btrfs_alloc_path();
4513 	BUG_ON(!path);
4514 
4515 	inode = new_inode(root->fs_info->sb);
4516 	if (!inode)
4517 		return ERR_PTR(-ENOMEM);
4518 
4519 	if (dir) {
4520 		ret = btrfs_set_inode_index(dir, index);
4521 		if (ret) {
4522 			iput(inode);
4523 			return ERR_PTR(ret);
4524 		}
4525 	}
4526 	/*
4527 	 * index_cnt is ignored for everything but a dir,
4528 	 * btrfs_get_inode_index_count has an explanation for the magic
4529 	 * number
4530 	 */
4531 	BTRFS_I(inode)->index_cnt = 2;
4532 	BTRFS_I(inode)->root = root;
4533 	BTRFS_I(inode)->generation = trans->transid;
4534 	inode->i_generation = BTRFS_I(inode)->generation;
4535 	btrfs_set_inode_space_info(root, inode);
4536 
4537 	if (mode & S_IFDIR)
4538 		owner = 0;
4539 	else
4540 		owner = 1;
4541 	BTRFS_I(inode)->block_group =
4542 			btrfs_find_block_group(root, 0, alloc_hint, owner);
4543 
4544 	key[0].objectid = objectid;
4545 	btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
4546 	key[0].offset = 0;
4547 
4548 	key[1].objectid = objectid;
4549 	btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
4550 	key[1].offset = ref_objectid;
4551 
4552 	sizes[0] = sizeof(struct btrfs_inode_item);
4553 	sizes[1] = name_len + sizeof(*ref);
4554 
4555 	path->leave_spinning = 1;
4556 	ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
4557 	if (ret != 0)
4558 		goto fail;
4559 
4560 	inode_init_owner(inode, dir, mode);
4561 	inode->i_ino = objectid;
4562 	inode_set_bytes(inode, 0);
4563 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4564 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4565 				  struct btrfs_inode_item);
4566 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
4567 
4568 	ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
4569 			     struct btrfs_inode_ref);
4570 	btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
4571 	btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
4572 	ptr = (unsigned long)(ref + 1);
4573 	write_extent_buffer(path->nodes[0], name, ptr, name_len);
4574 
4575 	btrfs_mark_buffer_dirty(path->nodes[0]);
4576 	btrfs_free_path(path);
4577 
4578 	location = &BTRFS_I(inode)->location;
4579 	location->objectid = objectid;
4580 	location->offset = 0;
4581 	btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
4582 
4583 	btrfs_inherit_iflags(inode, dir);
4584 
4585 	if ((mode & S_IFREG)) {
4586 		if (btrfs_test_opt(root, NODATASUM))
4587 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
4588 		if (btrfs_test_opt(root, NODATACOW))
4589 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
4590 	}
4591 
4592 	insert_inode_hash(inode);
4593 	inode_tree_add(inode);
4594 	return inode;
4595 fail:
4596 	if (dir)
4597 		BTRFS_I(dir)->index_cnt--;
4598 	btrfs_free_path(path);
4599 	iput(inode);
4600 	return ERR_PTR(ret);
4601 }
4602 
4603 static inline u8 btrfs_inode_type(struct inode *inode)
4604 {
4605 	return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
4606 }
4607 
4608 /*
4609  * utility function to add 'inode' into 'parent_inode' with
4610  * a give name and a given sequence number.
4611  * if 'add_backref' is true, also insert a backref from the
4612  * inode to the parent directory.
4613  */
4614 int btrfs_add_link(struct btrfs_trans_handle *trans,
4615 		   struct inode *parent_inode, struct inode *inode,
4616 		   const char *name, int name_len, int add_backref, u64 index)
4617 {
4618 	int ret = 0;
4619 	struct btrfs_key key;
4620 	struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4621 
4622 	if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4623 		memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
4624 	} else {
4625 		key.objectid = inode->i_ino;
4626 		btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
4627 		key.offset = 0;
4628 	}
4629 
4630 	if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4631 		ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
4632 					 key.objectid, root->root_key.objectid,
4633 					 parent_inode->i_ino,
4634 					 index, name, name_len);
4635 	} else if (add_backref) {
4636 		ret = btrfs_insert_inode_ref(trans, root,
4637 					     name, name_len, inode->i_ino,
4638 					     parent_inode->i_ino, index);
4639 	}
4640 
4641 	if (ret == 0) {
4642 		ret = btrfs_insert_dir_item(trans, root, name, name_len,
4643 					    parent_inode->i_ino, &key,
4644 					    btrfs_inode_type(inode), index);
4645 		BUG_ON(ret);
4646 
4647 		btrfs_i_size_write(parent_inode, parent_inode->i_size +
4648 				   name_len * 2);
4649 		parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4650 		ret = btrfs_update_inode(trans, root, parent_inode);
4651 	}
4652 	return ret;
4653 }
4654 
4655 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4656 			    struct inode *dir, struct dentry *dentry,
4657 			    struct inode *inode, int backref, u64 index)
4658 {
4659 	int err = btrfs_add_link(trans, dir, inode,
4660 				 dentry->d_name.name, dentry->d_name.len,
4661 				 backref, index);
4662 	if (!err) {
4663 		d_instantiate(dentry, inode);
4664 		return 0;
4665 	}
4666 	if (err > 0)
4667 		err = -EEXIST;
4668 	return err;
4669 }
4670 
4671 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4672 			int mode, dev_t rdev)
4673 {
4674 	struct btrfs_trans_handle *trans;
4675 	struct btrfs_root *root = BTRFS_I(dir)->root;
4676 	struct inode *inode = NULL;
4677 	int err;
4678 	int drop_inode = 0;
4679 	u64 objectid;
4680 	unsigned long nr = 0;
4681 	u64 index = 0;
4682 
4683 	if (!new_valid_dev(rdev))
4684 		return -EINVAL;
4685 
4686 	err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
4687 	if (err)
4688 		return err;
4689 
4690 	/*
4691 	 * 2 for inode item and ref
4692 	 * 2 for dir items
4693 	 * 1 for xattr if selinux is on
4694 	 */
4695 	trans = btrfs_start_transaction(root, 5);
4696 	if (IS_ERR(trans))
4697 		return PTR_ERR(trans);
4698 
4699 	btrfs_set_trans_block_group(trans, dir);
4700 
4701 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4702 				dentry->d_name.len, dir->i_ino, objectid,
4703 				BTRFS_I(dir)->block_group, mode, &index);
4704 	err = PTR_ERR(inode);
4705 	if (IS_ERR(inode))
4706 		goto out_unlock;
4707 
4708 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
4709 	if (err) {
4710 		drop_inode = 1;
4711 		goto out_unlock;
4712 	}
4713 
4714 	btrfs_set_trans_block_group(trans, inode);
4715 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4716 	if (err)
4717 		drop_inode = 1;
4718 	else {
4719 		inode->i_op = &btrfs_special_inode_operations;
4720 		init_special_inode(inode, inode->i_mode, rdev);
4721 		btrfs_update_inode(trans, root, inode);
4722 	}
4723 	btrfs_update_inode_block_group(trans, inode);
4724 	btrfs_update_inode_block_group(trans, dir);
4725 out_unlock:
4726 	nr = trans->blocks_used;
4727 	btrfs_end_transaction_throttle(trans, root);
4728 	btrfs_btree_balance_dirty(root, nr);
4729 	if (drop_inode) {
4730 		inode_dec_link_count(inode);
4731 		iput(inode);
4732 	}
4733 	return err;
4734 }
4735 
4736 static int btrfs_create(struct inode *dir, struct dentry *dentry,
4737 			int mode, struct nameidata *nd)
4738 {
4739 	struct btrfs_trans_handle *trans;
4740 	struct btrfs_root *root = BTRFS_I(dir)->root;
4741 	struct inode *inode = NULL;
4742 	int drop_inode = 0;
4743 	int err;
4744 	unsigned long nr = 0;
4745 	u64 objectid;
4746 	u64 index = 0;
4747 
4748 	err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
4749 	if (err)
4750 		return err;
4751 	/*
4752 	 * 2 for inode item and ref
4753 	 * 2 for dir items
4754 	 * 1 for xattr if selinux is on
4755 	 */
4756 	trans = btrfs_start_transaction(root, 5);
4757 	if (IS_ERR(trans))
4758 		return PTR_ERR(trans);
4759 
4760 	btrfs_set_trans_block_group(trans, dir);
4761 
4762 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4763 				dentry->d_name.len, dir->i_ino, objectid,
4764 				BTRFS_I(dir)->block_group, mode, &index);
4765 	err = PTR_ERR(inode);
4766 	if (IS_ERR(inode))
4767 		goto out_unlock;
4768 
4769 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
4770 	if (err) {
4771 		drop_inode = 1;
4772 		goto out_unlock;
4773 	}
4774 
4775 	btrfs_set_trans_block_group(trans, inode);
4776 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4777 	if (err)
4778 		drop_inode = 1;
4779 	else {
4780 		inode->i_mapping->a_ops = &btrfs_aops;
4781 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4782 		inode->i_fop = &btrfs_file_operations;
4783 		inode->i_op = &btrfs_file_inode_operations;
4784 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4785 	}
4786 	btrfs_update_inode_block_group(trans, inode);
4787 	btrfs_update_inode_block_group(trans, dir);
4788 out_unlock:
4789 	nr = trans->blocks_used;
4790 	btrfs_end_transaction_throttle(trans, root);
4791 	if (drop_inode) {
4792 		inode_dec_link_count(inode);
4793 		iput(inode);
4794 	}
4795 	btrfs_btree_balance_dirty(root, nr);
4796 	return err;
4797 }
4798 
4799 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4800 		      struct dentry *dentry)
4801 {
4802 	struct btrfs_trans_handle *trans;
4803 	struct btrfs_root *root = BTRFS_I(dir)->root;
4804 	struct inode *inode = old_dentry->d_inode;
4805 	u64 index;
4806 	unsigned long nr = 0;
4807 	int err;
4808 	int drop_inode = 0;
4809 
4810 	/* do not allow sys_link's with other subvols of the same device */
4811 	if (root->objectid != BTRFS_I(inode)->root->objectid)
4812 		return -EPERM;
4813 
4814 	btrfs_inc_nlink(inode);
4815 	inode->i_ctime = CURRENT_TIME;
4816 
4817 	err = btrfs_set_inode_index(dir, &index);
4818 	if (err)
4819 		goto fail;
4820 
4821 	/*
4822 	 * 2 items for inode and inode ref
4823 	 * 2 items for dir items
4824 	 * 1 item for parent inode
4825 	 */
4826 	trans = btrfs_start_transaction(root, 5);
4827 	if (IS_ERR(trans)) {
4828 		err = PTR_ERR(trans);
4829 		goto fail;
4830 	}
4831 
4832 	btrfs_set_trans_block_group(trans, dir);
4833 	ihold(inode);
4834 
4835 	err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
4836 
4837 	if (err) {
4838 		drop_inode = 1;
4839 	} else {
4840 		struct dentry *parent = dget_parent(dentry);
4841 		btrfs_update_inode_block_group(trans, dir);
4842 		err = btrfs_update_inode(trans, root, inode);
4843 		BUG_ON(err);
4844 		btrfs_log_new_name(trans, inode, NULL, parent);
4845 		dput(parent);
4846 	}
4847 
4848 	nr = trans->blocks_used;
4849 	btrfs_end_transaction_throttle(trans, root);
4850 fail:
4851 	if (drop_inode) {
4852 		inode_dec_link_count(inode);
4853 		iput(inode);
4854 	}
4855 	btrfs_btree_balance_dirty(root, nr);
4856 	return err;
4857 }
4858 
4859 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4860 {
4861 	struct inode *inode = NULL;
4862 	struct btrfs_trans_handle *trans;
4863 	struct btrfs_root *root = BTRFS_I(dir)->root;
4864 	int err = 0;
4865 	int drop_on_err = 0;
4866 	u64 objectid = 0;
4867 	u64 index = 0;
4868 	unsigned long nr = 1;
4869 
4870 	err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
4871 	if (err)
4872 		return err;
4873 
4874 	/*
4875 	 * 2 items for inode and ref
4876 	 * 2 items for dir items
4877 	 * 1 for xattr if selinux is on
4878 	 */
4879 	trans = btrfs_start_transaction(root, 5);
4880 	if (IS_ERR(trans))
4881 		return PTR_ERR(trans);
4882 	btrfs_set_trans_block_group(trans, dir);
4883 
4884 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4885 				dentry->d_name.len, dir->i_ino, objectid,
4886 				BTRFS_I(dir)->block_group, S_IFDIR | mode,
4887 				&index);
4888 	if (IS_ERR(inode)) {
4889 		err = PTR_ERR(inode);
4890 		goto out_fail;
4891 	}
4892 
4893 	drop_on_err = 1;
4894 
4895 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
4896 	if (err)
4897 		goto out_fail;
4898 
4899 	inode->i_op = &btrfs_dir_inode_operations;
4900 	inode->i_fop = &btrfs_dir_file_operations;
4901 	btrfs_set_trans_block_group(trans, inode);
4902 
4903 	btrfs_i_size_write(inode, 0);
4904 	err = btrfs_update_inode(trans, root, inode);
4905 	if (err)
4906 		goto out_fail;
4907 
4908 	err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
4909 			     dentry->d_name.len, 0, index);
4910 	if (err)
4911 		goto out_fail;
4912 
4913 	d_instantiate(dentry, inode);
4914 	drop_on_err = 0;
4915 	btrfs_update_inode_block_group(trans, inode);
4916 	btrfs_update_inode_block_group(trans, dir);
4917 
4918 out_fail:
4919 	nr = trans->blocks_used;
4920 	btrfs_end_transaction_throttle(trans, root);
4921 	if (drop_on_err)
4922 		iput(inode);
4923 	btrfs_btree_balance_dirty(root, nr);
4924 	return err;
4925 }
4926 
4927 /* helper for btfs_get_extent.  Given an existing extent in the tree,
4928  * and an extent that you want to insert, deal with overlap and insert
4929  * the new extent into the tree.
4930  */
4931 static int merge_extent_mapping(struct extent_map_tree *em_tree,
4932 				struct extent_map *existing,
4933 				struct extent_map *em,
4934 				u64 map_start, u64 map_len)
4935 {
4936 	u64 start_diff;
4937 
4938 	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
4939 	start_diff = map_start - em->start;
4940 	em->start = map_start;
4941 	em->len = map_len;
4942 	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
4943 	    !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
4944 		em->block_start += start_diff;
4945 		em->block_len -= start_diff;
4946 	}
4947 	return add_extent_mapping(em_tree, em);
4948 }
4949 
4950 static noinline int uncompress_inline(struct btrfs_path *path,
4951 				      struct inode *inode, struct page *page,
4952 				      size_t pg_offset, u64 extent_offset,
4953 				      struct btrfs_file_extent_item *item)
4954 {
4955 	int ret;
4956 	struct extent_buffer *leaf = path->nodes[0];
4957 	char *tmp;
4958 	size_t max_size;
4959 	unsigned long inline_size;
4960 	unsigned long ptr;
4961 	int compress_type;
4962 
4963 	WARN_ON(pg_offset != 0);
4964 	compress_type = btrfs_file_extent_compression(leaf, item);
4965 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
4966 	inline_size = btrfs_file_extent_inline_item_len(leaf,
4967 					btrfs_item_nr(leaf, path->slots[0]));
4968 	tmp = kmalloc(inline_size, GFP_NOFS);
4969 	ptr = btrfs_file_extent_inline_start(item);
4970 
4971 	read_extent_buffer(leaf, tmp, ptr, inline_size);
4972 
4973 	max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
4974 	ret = btrfs_decompress(compress_type, tmp, page,
4975 			       extent_offset, inline_size, max_size);
4976 	if (ret) {
4977 		char *kaddr = kmap_atomic(page, KM_USER0);
4978 		unsigned long copy_size = min_t(u64,
4979 				  PAGE_CACHE_SIZE - pg_offset,
4980 				  max_size - extent_offset);
4981 		memset(kaddr + pg_offset, 0, copy_size);
4982 		kunmap_atomic(kaddr, KM_USER0);
4983 	}
4984 	kfree(tmp);
4985 	return 0;
4986 }
4987 
4988 /*
4989  * a bit scary, this does extent mapping from logical file offset to the disk.
4990  * the ugly parts come from merging extents from the disk with the in-ram
4991  * representation.  This gets more complex because of the data=ordered code,
4992  * where the in-ram extents might be locked pending data=ordered completion.
4993  *
4994  * This also copies inline extents directly into the page.
4995  */
4996 
4997 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
4998 				    size_t pg_offset, u64 start, u64 len,
4999 				    int create)
5000 {
5001 	int ret;
5002 	int err = 0;
5003 	u64 bytenr;
5004 	u64 extent_start = 0;
5005 	u64 extent_end = 0;
5006 	u64 objectid = inode->i_ino;
5007 	u32 found_type;
5008 	struct btrfs_path *path = NULL;
5009 	struct btrfs_root *root = BTRFS_I(inode)->root;
5010 	struct btrfs_file_extent_item *item;
5011 	struct extent_buffer *leaf;
5012 	struct btrfs_key found_key;
5013 	struct extent_map *em = NULL;
5014 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5015 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5016 	struct btrfs_trans_handle *trans = NULL;
5017 	int compress_type;
5018 
5019 again:
5020 	read_lock(&em_tree->lock);
5021 	em = lookup_extent_mapping(em_tree, start, len);
5022 	if (em)
5023 		em->bdev = root->fs_info->fs_devices->latest_bdev;
5024 	read_unlock(&em_tree->lock);
5025 
5026 	if (em) {
5027 		if (em->start > start || em->start + em->len <= start)
5028 			free_extent_map(em);
5029 		else if (em->block_start == EXTENT_MAP_INLINE && page)
5030 			free_extent_map(em);
5031 		else
5032 			goto out;
5033 	}
5034 	em = alloc_extent_map(GFP_NOFS);
5035 	if (!em) {
5036 		err = -ENOMEM;
5037 		goto out;
5038 	}
5039 	em->bdev = root->fs_info->fs_devices->latest_bdev;
5040 	em->start = EXTENT_MAP_HOLE;
5041 	em->orig_start = EXTENT_MAP_HOLE;
5042 	em->len = (u64)-1;
5043 	em->block_len = (u64)-1;
5044 
5045 	if (!path) {
5046 		path = btrfs_alloc_path();
5047 		BUG_ON(!path);
5048 	}
5049 
5050 	ret = btrfs_lookup_file_extent(trans, root, path,
5051 				       objectid, start, trans != NULL);
5052 	if (ret < 0) {
5053 		err = ret;
5054 		goto out;
5055 	}
5056 
5057 	if (ret != 0) {
5058 		if (path->slots[0] == 0)
5059 			goto not_found;
5060 		path->slots[0]--;
5061 	}
5062 
5063 	leaf = path->nodes[0];
5064 	item = btrfs_item_ptr(leaf, path->slots[0],
5065 			      struct btrfs_file_extent_item);
5066 	/* are we inside the extent that was found? */
5067 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5068 	found_type = btrfs_key_type(&found_key);
5069 	if (found_key.objectid != objectid ||
5070 	    found_type != BTRFS_EXTENT_DATA_KEY) {
5071 		goto not_found;
5072 	}
5073 
5074 	found_type = btrfs_file_extent_type(leaf, item);
5075 	extent_start = found_key.offset;
5076 	compress_type = btrfs_file_extent_compression(leaf, item);
5077 	if (found_type == BTRFS_FILE_EXTENT_REG ||
5078 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
5079 		extent_end = extent_start +
5080 		       btrfs_file_extent_num_bytes(leaf, item);
5081 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5082 		size_t size;
5083 		size = btrfs_file_extent_inline_len(leaf, item);
5084 		extent_end = (extent_start + size + root->sectorsize - 1) &
5085 			~((u64)root->sectorsize - 1);
5086 	}
5087 
5088 	if (start >= extent_end) {
5089 		path->slots[0]++;
5090 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
5091 			ret = btrfs_next_leaf(root, path);
5092 			if (ret < 0) {
5093 				err = ret;
5094 				goto out;
5095 			}
5096 			if (ret > 0)
5097 				goto not_found;
5098 			leaf = path->nodes[0];
5099 		}
5100 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5101 		if (found_key.objectid != objectid ||
5102 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
5103 			goto not_found;
5104 		if (start + len <= found_key.offset)
5105 			goto not_found;
5106 		em->start = start;
5107 		em->len = found_key.offset - start;
5108 		goto not_found_em;
5109 	}
5110 
5111 	if (found_type == BTRFS_FILE_EXTENT_REG ||
5112 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
5113 		em->start = extent_start;
5114 		em->len = extent_end - extent_start;
5115 		em->orig_start = extent_start -
5116 				 btrfs_file_extent_offset(leaf, item);
5117 		bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
5118 		if (bytenr == 0) {
5119 			em->block_start = EXTENT_MAP_HOLE;
5120 			goto insert;
5121 		}
5122 		if (compress_type != BTRFS_COMPRESS_NONE) {
5123 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5124 			em->compress_type = compress_type;
5125 			em->block_start = bytenr;
5126 			em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
5127 									 item);
5128 		} else {
5129 			bytenr += btrfs_file_extent_offset(leaf, item);
5130 			em->block_start = bytenr;
5131 			em->block_len = em->len;
5132 			if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
5133 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
5134 		}
5135 		goto insert;
5136 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5137 		unsigned long ptr;
5138 		char *map;
5139 		size_t size;
5140 		size_t extent_offset;
5141 		size_t copy_size;
5142 
5143 		em->block_start = EXTENT_MAP_INLINE;
5144 		if (!page || create) {
5145 			em->start = extent_start;
5146 			em->len = extent_end - extent_start;
5147 			goto out;
5148 		}
5149 
5150 		size = btrfs_file_extent_inline_len(leaf, item);
5151 		extent_offset = page_offset(page) + pg_offset - extent_start;
5152 		copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
5153 				size - extent_offset);
5154 		em->start = extent_start + extent_offset;
5155 		em->len = (copy_size + root->sectorsize - 1) &
5156 			~((u64)root->sectorsize - 1);
5157 		em->orig_start = EXTENT_MAP_INLINE;
5158 		if (compress_type) {
5159 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5160 			em->compress_type = compress_type;
5161 		}
5162 		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
5163 		if (create == 0 && !PageUptodate(page)) {
5164 			if (btrfs_file_extent_compression(leaf, item) !=
5165 			    BTRFS_COMPRESS_NONE) {
5166 				ret = uncompress_inline(path, inode, page,
5167 							pg_offset,
5168 							extent_offset, item);
5169 				BUG_ON(ret);
5170 			} else {
5171 				map = kmap(page);
5172 				read_extent_buffer(leaf, map + pg_offset, ptr,
5173 						   copy_size);
5174 				if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
5175 					memset(map + pg_offset + copy_size, 0,
5176 					       PAGE_CACHE_SIZE - pg_offset -
5177 					       copy_size);
5178 				}
5179 				kunmap(page);
5180 			}
5181 			flush_dcache_page(page);
5182 		} else if (create && PageUptodate(page)) {
5183 			WARN_ON(1);
5184 			if (!trans) {
5185 				kunmap(page);
5186 				free_extent_map(em);
5187 				em = NULL;
5188 				btrfs_release_path(root, path);
5189 				trans = btrfs_join_transaction(root, 1);
5190 				if (IS_ERR(trans))
5191 					return ERR_CAST(trans);
5192 				goto again;
5193 			}
5194 			map = kmap(page);
5195 			write_extent_buffer(leaf, map + pg_offset, ptr,
5196 					    copy_size);
5197 			kunmap(page);
5198 			btrfs_mark_buffer_dirty(leaf);
5199 		}
5200 		set_extent_uptodate(io_tree, em->start,
5201 				    extent_map_end(em) - 1, GFP_NOFS);
5202 		goto insert;
5203 	} else {
5204 		printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
5205 		WARN_ON(1);
5206 	}
5207 not_found:
5208 	em->start = start;
5209 	em->len = len;
5210 not_found_em:
5211 	em->block_start = EXTENT_MAP_HOLE;
5212 	set_bit(EXTENT_FLAG_VACANCY, &em->flags);
5213 insert:
5214 	btrfs_release_path(root, path);
5215 	if (em->start > start || extent_map_end(em) <= start) {
5216 		printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
5217 		       "[%llu %llu]\n", (unsigned long long)em->start,
5218 		       (unsigned long long)em->len,
5219 		       (unsigned long long)start,
5220 		       (unsigned long long)len);
5221 		err = -EIO;
5222 		goto out;
5223 	}
5224 
5225 	err = 0;
5226 	write_lock(&em_tree->lock);
5227 	ret = add_extent_mapping(em_tree, em);
5228 	/* it is possible that someone inserted the extent into the tree
5229 	 * while we had the lock dropped.  It is also possible that
5230 	 * an overlapping map exists in the tree
5231 	 */
5232 	if (ret == -EEXIST) {
5233 		struct extent_map *existing;
5234 
5235 		ret = 0;
5236 
5237 		existing = lookup_extent_mapping(em_tree, start, len);
5238 		if (existing && (existing->start > start ||
5239 		    existing->start + existing->len <= start)) {
5240 			free_extent_map(existing);
5241 			existing = NULL;
5242 		}
5243 		if (!existing) {
5244 			existing = lookup_extent_mapping(em_tree, em->start,
5245 							 em->len);
5246 			if (existing) {
5247 				err = merge_extent_mapping(em_tree, existing,
5248 							   em, start,
5249 							   root->sectorsize);
5250 				free_extent_map(existing);
5251 				if (err) {
5252 					free_extent_map(em);
5253 					em = NULL;
5254 				}
5255 			} else {
5256 				err = -EIO;
5257 				free_extent_map(em);
5258 				em = NULL;
5259 			}
5260 		} else {
5261 			free_extent_map(em);
5262 			em = existing;
5263 			err = 0;
5264 		}
5265 	}
5266 	write_unlock(&em_tree->lock);
5267 out:
5268 	if (path)
5269 		btrfs_free_path(path);
5270 	if (trans) {
5271 		ret = btrfs_end_transaction(trans, root);
5272 		if (!err)
5273 			err = ret;
5274 	}
5275 	if (err) {
5276 		free_extent_map(em);
5277 		return ERR_PTR(err);
5278 	}
5279 	return em;
5280 }
5281 
5282 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
5283 					   size_t pg_offset, u64 start, u64 len,
5284 					   int create)
5285 {
5286 	struct extent_map *em;
5287 	struct extent_map *hole_em = NULL;
5288 	u64 range_start = start;
5289 	u64 end;
5290 	u64 found;
5291 	u64 found_end;
5292 	int err = 0;
5293 
5294 	em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
5295 	if (IS_ERR(em))
5296 		return em;
5297 	if (em) {
5298 		/*
5299 		 * if our em maps to a hole, there might
5300 		 * actually be delalloc bytes behind it
5301 		 */
5302 		if (em->block_start != EXTENT_MAP_HOLE)
5303 			return em;
5304 		else
5305 			hole_em = em;
5306 	}
5307 
5308 	/* check to see if we've wrapped (len == -1 or similar) */
5309 	end = start + len;
5310 	if (end < start)
5311 		end = (u64)-1;
5312 	else
5313 		end -= 1;
5314 
5315 	em = NULL;
5316 
5317 	/* ok, we didn't find anything, lets look for delalloc */
5318 	found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
5319 				 end, len, EXTENT_DELALLOC, 1);
5320 	found_end = range_start + found;
5321 	if (found_end < range_start)
5322 		found_end = (u64)-1;
5323 
5324 	/*
5325 	 * we didn't find anything useful, return
5326 	 * the original results from get_extent()
5327 	 */
5328 	if (range_start > end || found_end <= start) {
5329 		em = hole_em;
5330 		hole_em = NULL;
5331 		goto out;
5332 	}
5333 
5334 	/* adjust the range_start to make sure it doesn't
5335 	 * go backwards from the start they passed in
5336 	 */
5337 	range_start = max(start,range_start);
5338 	found = found_end - range_start;
5339 
5340 	if (found > 0) {
5341 		u64 hole_start = start;
5342 		u64 hole_len = len;
5343 
5344 		em = alloc_extent_map(GFP_NOFS);
5345 		if (!em) {
5346 			err = -ENOMEM;
5347 			goto out;
5348 		}
5349 		/*
5350 		 * when btrfs_get_extent can't find anything it
5351 		 * returns one huge hole
5352 		 *
5353 		 * make sure what it found really fits our range, and
5354 		 * adjust to make sure it is based on the start from
5355 		 * the caller
5356 		 */
5357 		if (hole_em) {
5358 			u64 calc_end = extent_map_end(hole_em);
5359 
5360 			if (calc_end <= start || (hole_em->start > end)) {
5361 				free_extent_map(hole_em);
5362 				hole_em = NULL;
5363 			} else {
5364 				hole_start = max(hole_em->start, start);
5365 				hole_len = calc_end - hole_start;
5366 			}
5367 		}
5368 		em->bdev = NULL;
5369 		if (hole_em && range_start > hole_start) {
5370 			/* our hole starts before our delalloc, so we
5371 			 * have to return just the parts of the hole
5372 			 * that go until  the delalloc starts
5373 			 */
5374 			em->len = min(hole_len,
5375 				      range_start - hole_start);
5376 			em->start = hole_start;
5377 			em->orig_start = hole_start;
5378 			/*
5379 			 * don't adjust block start at all,
5380 			 * it is fixed at EXTENT_MAP_HOLE
5381 			 */
5382 			em->block_start = hole_em->block_start;
5383 			em->block_len = hole_len;
5384 		} else {
5385 			em->start = range_start;
5386 			em->len = found;
5387 			em->orig_start = range_start;
5388 			em->block_start = EXTENT_MAP_DELALLOC;
5389 			em->block_len = found;
5390 		}
5391 	} else if (hole_em) {
5392 		return hole_em;
5393 	}
5394 out:
5395 
5396 	free_extent_map(hole_em);
5397 	if (err) {
5398 		free_extent_map(em);
5399 		return ERR_PTR(err);
5400 	}
5401 	return em;
5402 }
5403 
5404 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5405 						  u64 start, u64 len)
5406 {
5407 	struct btrfs_root *root = BTRFS_I(inode)->root;
5408 	struct btrfs_trans_handle *trans;
5409 	struct extent_map *em;
5410 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5411 	struct btrfs_key ins;
5412 	u64 alloc_hint;
5413 	int ret;
5414 
5415 	btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
5416 
5417 	trans = btrfs_join_transaction(root, 0);
5418 	if (IS_ERR(trans))
5419 		return ERR_CAST(trans);
5420 
5421 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
5422 
5423 	alloc_hint = get_extent_allocation_hint(inode, start, len);
5424 	ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
5425 				   alloc_hint, (u64)-1, &ins, 1);
5426 	if (ret) {
5427 		em = ERR_PTR(ret);
5428 		goto out;
5429 	}
5430 
5431 	em = alloc_extent_map(GFP_NOFS);
5432 	if (!em) {
5433 		em = ERR_PTR(-ENOMEM);
5434 		goto out;
5435 	}
5436 
5437 	em->start = start;
5438 	em->orig_start = em->start;
5439 	em->len = ins.offset;
5440 
5441 	em->block_start = ins.objectid;
5442 	em->block_len = ins.offset;
5443 	em->bdev = root->fs_info->fs_devices->latest_bdev;
5444 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
5445 
5446 	while (1) {
5447 		write_lock(&em_tree->lock);
5448 		ret = add_extent_mapping(em_tree, em);
5449 		write_unlock(&em_tree->lock);
5450 		if (ret != -EEXIST)
5451 			break;
5452 		btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0);
5453 	}
5454 
5455 	ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
5456 					   ins.offset, ins.offset, 0);
5457 	if (ret) {
5458 		btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
5459 		em = ERR_PTR(ret);
5460 	}
5461 out:
5462 	btrfs_end_transaction(trans, root);
5463 	return em;
5464 }
5465 
5466 /*
5467  * returns 1 when the nocow is safe, < 1 on error, 0 if the
5468  * block must be cow'd
5469  */
5470 static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
5471 				      struct inode *inode, u64 offset, u64 len)
5472 {
5473 	struct btrfs_path *path;
5474 	int ret;
5475 	struct extent_buffer *leaf;
5476 	struct btrfs_root *root = BTRFS_I(inode)->root;
5477 	struct btrfs_file_extent_item *fi;
5478 	struct btrfs_key key;
5479 	u64 disk_bytenr;
5480 	u64 backref_offset;
5481 	u64 extent_end;
5482 	u64 num_bytes;
5483 	int slot;
5484 	int found_type;
5485 
5486 	path = btrfs_alloc_path();
5487 	if (!path)
5488 		return -ENOMEM;
5489 
5490 	ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
5491 				       offset, 0);
5492 	if (ret < 0)
5493 		goto out;
5494 
5495 	slot = path->slots[0];
5496 	if (ret == 1) {
5497 		if (slot == 0) {
5498 			/* can't find the item, must cow */
5499 			ret = 0;
5500 			goto out;
5501 		}
5502 		slot--;
5503 	}
5504 	ret = 0;
5505 	leaf = path->nodes[0];
5506 	btrfs_item_key_to_cpu(leaf, &key, slot);
5507 	if (key.objectid != inode->i_ino ||
5508 	    key.type != BTRFS_EXTENT_DATA_KEY) {
5509 		/* not our file or wrong item type, must cow */
5510 		goto out;
5511 	}
5512 
5513 	if (key.offset > offset) {
5514 		/* Wrong offset, must cow */
5515 		goto out;
5516 	}
5517 
5518 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5519 	found_type = btrfs_file_extent_type(leaf, fi);
5520 	if (found_type != BTRFS_FILE_EXTENT_REG &&
5521 	    found_type != BTRFS_FILE_EXTENT_PREALLOC) {
5522 		/* not a regular extent, must cow */
5523 		goto out;
5524 	}
5525 	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
5526 	backref_offset = btrfs_file_extent_offset(leaf, fi);
5527 
5528 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
5529 	if (extent_end < offset + len) {
5530 		/* extent doesn't include our full range, must cow */
5531 		goto out;
5532 	}
5533 
5534 	if (btrfs_extent_readonly(root, disk_bytenr))
5535 		goto out;
5536 
5537 	/*
5538 	 * look for other files referencing this extent, if we
5539 	 * find any we must cow
5540 	 */
5541 	if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
5542 				  key.offset - backref_offset, disk_bytenr))
5543 		goto out;
5544 
5545 	/*
5546 	 * adjust disk_bytenr and num_bytes to cover just the bytes
5547 	 * in this extent we are about to write.  If there
5548 	 * are any csums in that range we have to cow in order
5549 	 * to keep the csums correct
5550 	 */
5551 	disk_bytenr += backref_offset;
5552 	disk_bytenr += offset - key.offset;
5553 	num_bytes = min(offset + len, extent_end) - offset;
5554 	if (csum_exist_in_range(root, disk_bytenr, num_bytes))
5555 				goto out;
5556 	/*
5557 	 * all of the above have passed, it is safe to overwrite this extent
5558 	 * without cow
5559 	 */
5560 	ret = 1;
5561 out:
5562 	btrfs_free_path(path);
5563 	return ret;
5564 }
5565 
5566 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5567 				   struct buffer_head *bh_result, int create)
5568 {
5569 	struct extent_map *em;
5570 	struct btrfs_root *root = BTRFS_I(inode)->root;
5571 	u64 start = iblock << inode->i_blkbits;
5572 	u64 len = bh_result->b_size;
5573 	struct btrfs_trans_handle *trans;
5574 
5575 	em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
5576 	if (IS_ERR(em))
5577 		return PTR_ERR(em);
5578 
5579 	/*
5580 	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
5581 	 * io.  INLINE is special, and we could probably kludge it in here, but
5582 	 * it's still buffered so for safety lets just fall back to the generic
5583 	 * buffered path.
5584 	 *
5585 	 * For COMPRESSED we _have_ to read the entire extent in so we can
5586 	 * decompress it, so there will be buffering required no matter what we
5587 	 * do, so go ahead and fallback to buffered.
5588 	 *
5589 	 * We return -ENOTBLK because thats what makes DIO go ahead and go back
5590 	 * to buffered IO.  Don't blame me, this is the price we pay for using
5591 	 * the generic code.
5592 	 */
5593 	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
5594 	    em->block_start == EXTENT_MAP_INLINE) {
5595 		free_extent_map(em);
5596 		return -ENOTBLK;
5597 	}
5598 
5599 	/* Just a good old fashioned hole, return */
5600 	if (!create && (em->block_start == EXTENT_MAP_HOLE ||
5601 			test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
5602 		free_extent_map(em);
5603 		/* DIO will do one hole at a time, so just unlock a sector */
5604 		unlock_extent(&BTRFS_I(inode)->io_tree, start,
5605 			      start + root->sectorsize - 1, GFP_NOFS);
5606 		return 0;
5607 	}
5608 
5609 	/*
5610 	 * We don't allocate a new extent in the following cases
5611 	 *
5612 	 * 1) The inode is marked as NODATACOW.  In this case we'll just use the
5613 	 * existing extent.
5614 	 * 2) The extent is marked as PREALLOC.  We're good to go here and can
5615 	 * just use the extent.
5616 	 *
5617 	 */
5618 	if (!create) {
5619 		len = em->len - (start - em->start);
5620 		goto map;
5621 	}
5622 
5623 	if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
5624 	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
5625 	     em->block_start != EXTENT_MAP_HOLE)) {
5626 		int type;
5627 		int ret;
5628 		u64 block_start;
5629 
5630 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5631 			type = BTRFS_ORDERED_PREALLOC;
5632 		else
5633 			type = BTRFS_ORDERED_NOCOW;
5634 		len = min(len, em->len - (start - em->start));
5635 		block_start = em->block_start + (start - em->start);
5636 
5637 		/*
5638 		 * we're not going to log anything, but we do need
5639 		 * to make sure the current transaction stays open
5640 		 * while we look for nocow cross refs
5641 		 */
5642 		trans = btrfs_join_transaction(root, 0);
5643 		if (IS_ERR(trans))
5644 			goto must_cow;
5645 
5646 		if (can_nocow_odirect(trans, inode, start, len) == 1) {
5647 			ret = btrfs_add_ordered_extent_dio(inode, start,
5648 					   block_start, len, len, type);
5649 			btrfs_end_transaction(trans, root);
5650 			if (ret) {
5651 				free_extent_map(em);
5652 				return ret;
5653 			}
5654 			goto unlock;
5655 		}
5656 		btrfs_end_transaction(trans, root);
5657 	}
5658 must_cow:
5659 	/*
5660 	 * this will cow the extent, reset the len in case we changed
5661 	 * it above
5662 	 */
5663 	len = bh_result->b_size;
5664 	free_extent_map(em);
5665 	em = btrfs_new_extent_direct(inode, start, len);
5666 	if (IS_ERR(em))
5667 		return PTR_ERR(em);
5668 	len = min(len, em->len - (start - em->start));
5669 unlock:
5670 	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1,
5671 			  EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1,
5672 			  0, NULL, GFP_NOFS);
5673 map:
5674 	bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
5675 		inode->i_blkbits;
5676 	bh_result->b_size = len;
5677 	bh_result->b_bdev = em->bdev;
5678 	set_buffer_mapped(bh_result);
5679 	if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5680 		set_buffer_new(bh_result);
5681 
5682 	free_extent_map(em);
5683 
5684 	return 0;
5685 }
5686 
5687 struct btrfs_dio_private {
5688 	struct inode *inode;
5689 	u64 logical_offset;
5690 	u64 disk_bytenr;
5691 	u64 bytes;
5692 	u32 *csums;
5693 	void *private;
5694 
5695 	/* number of bios pending for this dio */
5696 	atomic_t pending_bios;
5697 
5698 	/* IO errors */
5699 	int errors;
5700 
5701 	struct bio *orig_bio;
5702 };
5703 
5704 static void btrfs_endio_direct_read(struct bio *bio, int err)
5705 {
5706 	struct btrfs_dio_private *dip = bio->bi_private;
5707 	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
5708 	struct bio_vec *bvec = bio->bi_io_vec;
5709 	struct inode *inode = dip->inode;
5710 	struct btrfs_root *root = BTRFS_I(inode)->root;
5711 	u64 start;
5712 	u32 *private = dip->csums;
5713 
5714 	start = dip->logical_offset;
5715 	do {
5716 		if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
5717 			struct page *page = bvec->bv_page;
5718 			char *kaddr;
5719 			u32 csum = ~(u32)0;
5720 			unsigned long flags;
5721 
5722 			local_irq_save(flags);
5723 			kaddr = kmap_atomic(page, KM_IRQ0);
5724 			csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
5725 					       csum, bvec->bv_len);
5726 			btrfs_csum_final(csum, (char *)&csum);
5727 			kunmap_atomic(kaddr, KM_IRQ0);
5728 			local_irq_restore(flags);
5729 
5730 			flush_dcache_page(bvec->bv_page);
5731 			if (csum != *private) {
5732 				printk(KERN_ERR "btrfs csum failed ino %lu off"
5733 				      " %llu csum %u private %u\n",
5734 				      inode->i_ino, (unsigned long long)start,
5735 				      csum, *private);
5736 				err = -EIO;
5737 			}
5738 		}
5739 
5740 		start += bvec->bv_len;
5741 		private++;
5742 		bvec++;
5743 	} while (bvec <= bvec_end);
5744 
5745 	unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
5746 		      dip->logical_offset + dip->bytes - 1, GFP_NOFS);
5747 	bio->bi_private = dip->private;
5748 
5749 	kfree(dip->csums);
5750 	kfree(dip);
5751 	dio_end_io(bio, err);
5752 }
5753 
5754 static void btrfs_endio_direct_write(struct bio *bio, int err)
5755 {
5756 	struct btrfs_dio_private *dip = bio->bi_private;
5757 	struct inode *inode = dip->inode;
5758 	struct btrfs_root *root = BTRFS_I(inode)->root;
5759 	struct btrfs_trans_handle *trans;
5760 	struct btrfs_ordered_extent *ordered = NULL;
5761 	struct extent_state *cached_state = NULL;
5762 	u64 ordered_offset = dip->logical_offset;
5763 	u64 ordered_bytes = dip->bytes;
5764 	int ret;
5765 
5766 	if (err)
5767 		goto out_done;
5768 again:
5769 	ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
5770 						   &ordered_offset,
5771 						   ordered_bytes);
5772 	if (!ret)
5773 		goto out_test;
5774 
5775 	BUG_ON(!ordered);
5776 
5777 	trans = btrfs_join_transaction(root, 1);
5778 	if (IS_ERR(trans)) {
5779 		err = -ENOMEM;
5780 		goto out;
5781 	}
5782 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
5783 
5784 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
5785 		ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5786 		if (!ret)
5787 			ret = btrfs_update_inode(trans, root, inode);
5788 		err = ret;
5789 		goto out;
5790 	}
5791 
5792 	lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
5793 			 ordered->file_offset + ordered->len - 1, 0,
5794 			 &cached_state, GFP_NOFS);
5795 
5796 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
5797 		ret = btrfs_mark_extent_written(trans, inode,
5798 						ordered->file_offset,
5799 						ordered->file_offset +
5800 						ordered->len);
5801 		if (ret) {
5802 			err = ret;
5803 			goto out_unlock;
5804 		}
5805 	} else {
5806 		ret = insert_reserved_file_extent(trans, inode,
5807 						  ordered->file_offset,
5808 						  ordered->start,
5809 						  ordered->disk_len,
5810 						  ordered->len,
5811 						  ordered->len,
5812 						  0, 0, 0,
5813 						  BTRFS_FILE_EXTENT_REG);
5814 		unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
5815 				   ordered->file_offset, ordered->len);
5816 		if (ret) {
5817 			err = ret;
5818 			WARN_ON(1);
5819 			goto out_unlock;
5820 		}
5821 	}
5822 
5823 	add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
5824 	btrfs_ordered_update_i_size(inode, 0, ordered);
5825 	btrfs_update_inode(trans, root, inode);
5826 out_unlock:
5827 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
5828 			     ordered->file_offset + ordered->len - 1,
5829 			     &cached_state, GFP_NOFS);
5830 out:
5831 	btrfs_delalloc_release_metadata(inode, ordered->len);
5832 	btrfs_end_transaction(trans, root);
5833 	ordered_offset = ordered->file_offset + ordered->len;
5834 	btrfs_put_ordered_extent(ordered);
5835 	btrfs_put_ordered_extent(ordered);
5836 
5837 out_test:
5838 	/*
5839 	 * our bio might span multiple ordered extents.  If we haven't
5840 	 * completed the accounting for the whole dio, go back and try again
5841 	 */
5842 	if (ordered_offset < dip->logical_offset + dip->bytes) {
5843 		ordered_bytes = dip->logical_offset + dip->bytes -
5844 			ordered_offset;
5845 		goto again;
5846 	}
5847 out_done:
5848 	bio->bi_private = dip->private;
5849 
5850 	kfree(dip->csums);
5851 	kfree(dip);
5852 	dio_end_io(bio, err);
5853 }
5854 
5855 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
5856 				    struct bio *bio, int mirror_num,
5857 				    unsigned long bio_flags, u64 offset)
5858 {
5859 	int ret;
5860 	struct btrfs_root *root = BTRFS_I(inode)->root;
5861 	ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
5862 	BUG_ON(ret);
5863 	return 0;
5864 }
5865 
5866 static void btrfs_end_dio_bio(struct bio *bio, int err)
5867 {
5868 	struct btrfs_dio_private *dip = bio->bi_private;
5869 
5870 	if (err) {
5871 		printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu "
5872 		      "sector %#Lx len %u err no %d\n",
5873 		      dip->inode->i_ino, bio->bi_rw,
5874 		      (unsigned long long)bio->bi_sector, bio->bi_size, err);
5875 		dip->errors = 1;
5876 
5877 		/*
5878 		 * before atomic variable goto zero, we must make sure
5879 		 * dip->errors is perceived to be set.
5880 		 */
5881 		smp_mb__before_atomic_dec();
5882 	}
5883 
5884 	/* if there are more bios still pending for this dio, just exit */
5885 	if (!atomic_dec_and_test(&dip->pending_bios))
5886 		goto out;
5887 
5888 	if (dip->errors)
5889 		bio_io_error(dip->orig_bio);
5890 	else {
5891 		set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
5892 		bio_endio(dip->orig_bio, 0);
5893 	}
5894 out:
5895 	bio_put(bio);
5896 }
5897 
5898 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
5899 				       u64 first_sector, gfp_t gfp_flags)
5900 {
5901 	int nr_vecs = bio_get_nr_vecs(bdev);
5902 	return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
5903 }
5904 
5905 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
5906 					 int rw, u64 file_offset, int skip_sum,
5907 					 u32 *csums)
5908 {
5909 	int write = rw & REQ_WRITE;
5910 	struct btrfs_root *root = BTRFS_I(inode)->root;
5911 	int ret;
5912 
5913 	bio_get(bio);
5914 	ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
5915 	if (ret)
5916 		goto err;
5917 
5918 	if (write && !skip_sum) {
5919 		ret = btrfs_wq_submit_bio(root->fs_info,
5920 				   inode, rw, bio, 0, 0,
5921 				   file_offset,
5922 				   __btrfs_submit_bio_start_direct_io,
5923 				   __btrfs_submit_bio_done);
5924 		goto err;
5925 	} else if (!skip_sum)
5926 		btrfs_lookup_bio_sums_dio(root, inode, bio,
5927 					  file_offset, csums);
5928 
5929 	ret = btrfs_map_bio(root, rw, bio, 0, 1);
5930 err:
5931 	bio_put(bio);
5932 	return ret;
5933 }
5934 
5935 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
5936 				    int skip_sum)
5937 {
5938 	struct inode *inode = dip->inode;
5939 	struct btrfs_root *root = BTRFS_I(inode)->root;
5940 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
5941 	struct bio *bio;
5942 	struct bio *orig_bio = dip->orig_bio;
5943 	struct bio_vec *bvec = orig_bio->bi_io_vec;
5944 	u64 start_sector = orig_bio->bi_sector;
5945 	u64 file_offset = dip->logical_offset;
5946 	u64 submit_len = 0;
5947 	u64 map_length;
5948 	int nr_pages = 0;
5949 	u32 *csums = dip->csums;
5950 	int ret = 0;
5951 
5952 	bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
5953 	if (!bio)
5954 		return -ENOMEM;
5955 	bio->bi_private = dip;
5956 	bio->bi_end_io = btrfs_end_dio_bio;
5957 	atomic_inc(&dip->pending_bios);
5958 
5959 	map_length = orig_bio->bi_size;
5960 	ret = btrfs_map_block(map_tree, READ, start_sector << 9,
5961 			      &map_length, NULL, 0);
5962 	if (ret) {
5963 		bio_put(bio);
5964 		return -EIO;
5965 	}
5966 
5967 	while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
5968 		if (unlikely(map_length < submit_len + bvec->bv_len ||
5969 		    bio_add_page(bio, bvec->bv_page, bvec->bv_len,
5970 				 bvec->bv_offset) < bvec->bv_len)) {
5971 			/*
5972 			 * inc the count before we submit the bio so
5973 			 * we know the end IO handler won't happen before
5974 			 * we inc the count. Otherwise, the dip might get freed
5975 			 * before we're done setting it up
5976 			 */
5977 			atomic_inc(&dip->pending_bios);
5978 			ret = __btrfs_submit_dio_bio(bio, inode, rw,
5979 						     file_offset, skip_sum,
5980 						     csums);
5981 			if (ret) {
5982 				bio_put(bio);
5983 				atomic_dec(&dip->pending_bios);
5984 				goto out_err;
5985 			}
5986 
5987 			if (!skip_sum)
5988 				csums = csums + nr_pages;
5989 			start_sector += submit_len >> 9;
5990 			file_offset += submit_len;
5991 
5992 			submit_len = 0;
5993 			nr_pages = 0;
5994 
5995 			bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
5996 						  start_sector, GFP_NOFS);
5997 			if (!bio)
5998 				goto out_err;
5999 			bio->bi_private = dip;
6000 			bio->bi_end_io = btrfs_end_dio_bio;
6001 
6002 			map_length = orig_bio->bi_size;
6003 			ret = btrfs_map_block(map_tree, READ, start_sector << 9,
6004 					      &map_length, NULL, 0);
6005 			if (ret) {
6006 				bio_put(bio);
6007 				goto out_err;
6008 			}
6009 		} else {
6010 			submit_len += bvec->bv_len;
6011 			nr_pages ++;
6012 			bvec++;
6013 		}
6014 	}
6015 
6016 	ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
6017 				     csums);
6018 	if (!ret)
6019 		return 0;
6020 
6021 	bio_put(bio);
6022 out_err:
6023 	dip->errors = 1;
6024 	/*
6025 	 * before atomic variable goto zero, we must
6026 	 * make sure dip->errors is perceived to be set.
6027 	 */
6028 	smp_mb__before_atomic_dec();
6029 	if (atomic_dec_and_test(&dip->pending_bios))
6030 		bio_io_error(dip->orig_bio);
6031 
6032 	/* bio_end_io() will handle error, so we needn't return it */
6033 	return 0;
6034 }
6035 
6036 static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
6037 				loff_t file_offset)
6038 {
6039 	struct btrfs_root *root = BTRFS_I(inode)->root;
6040 	struct btrfs_dio_private *dip;
6041 	struct bio_vec *bvec = bio->bi_io_vec;
6042 	int skip_sum;
6043 	int write = rw & REQ_WRITE;
6044 	int ret = 0;
6045 
6046 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
6047 
6048 	dip = kmalloc(sizeof(*dip), GFP_NOFS);
6049 	if (!dip) {
6050 		ret = -ENOMEM;
6051 		goto free_ordered;
6052 	}
6053 	dip->csums = NULL;
6054 
6055 	if (!skip_sum) {
6056 		dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
6057 		if (!dip->csums) {
6058 			kfree(dip);
6059 			ret = -ENOMEM;
6060 			goto free_ordered;
6061 		}
6062 	}
6063 
6064 	dip->private = bio->bi_private;
6065 	dip->inode = inode;
6066 	dip->logical_offset = file_offset;
6067 
6068 	dip->bytes = 0;
6069 	do {
6070 		dip->bytes += bvec->bv_len;
6071 		bvec++;
6072 	} while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));
6073 
6074 	dip->disk_bytenr = (u64)bio->bi_sector << 9;
6075 	bio->bi_private = dip;
6076 	dip->errors = 0;
6077 	dip->orig_bio = bio;
6078 	atomic_set(&dip->pending_bios, 0);
6079 
6080 	if (write)
6081 		bio->bi_end_io = btrfs_endio_direct_write;
6082 	else
6083 		bio->bi_end_io = btrfs_endio_direct_read;
6084 
6085 	ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
6086 	if (!ret)
6087 		return;
6088 free_ordered:
6089 	/*
6090 	 * If this is a write, we need to clean up the reserved space and kill
6091 	 * the ordered extent.
6092 	 */
6093 	if (write) {
6094 		struct btrfs_ordered_extent *ordered;
6095 		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
6096 		if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
6097 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
6098 			btrfs_free_reserved_extent(root, ordered->start,
6099 						   ordered->disk_len);
6100 		btrfs_put_ordered_extent(ordered);
6101 		btrfs_put_ordered_extent(ordered);
6102 	}
6103 	bio_endio(bio, ret);
6104 }
6105 
6106 static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
6107 			const struct iovec *iov, loff_t offset,
6108 			unsigned long nr_segs)
6109 {
6110 	int seg;
6111 	size_t size;
6112 	unsigned long addr;
6113 	unsigned blocksize_mask = root->sectorsize - 1;
6114 	ssize_t retval = -EINVAL;
6115 	loff_t end = offset;
6116 
6117 	if (offset & blocksize_mask)
6118 		goto out;
6119 
6120 	/* Check the memory alignment.  Blocks cannot straddle pages */
6121 	for (seg = 0; seg < nr_segs; seg++) {
6122 		addr = (unsigned long)iov[seg].iov_base;
6123 		size = iov[seg].iov_len;
6124 		end += size;
6125 		if ((addr & blocksize_mask) || (size & blocksize_mask))
6126 			goto out;
6127 	}
6128 	retval = 0;
6129 out:
6130 	return retval;
6131 }
6132 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
6133 			const struct iovec *iov, loff_t offset,
6134 			unsigned long nr_segs)
6135 {
6136 	struct file *file = iocb->ki_filp;
6137 	struct inode *inode = file->f_mapping->host;
6138 	struct btrfs_ordered_extent *ordered;
6139 	struct extent_state *cached_state = NULL;
6140 	u64 lockstart, lockend;
6141 	ssize_t ret;
6142 	int writing = rw & WRITE;
6143 	int write_bits = 0;
6144 	size_t count = iov_length(iov, nr_segs);
6145 
6146 	if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
6147 			    offset, nr_segs)) {
6148 		return 0;
6149 	}
6150 
6151 	lockstart = offset;
6152 	lockend = offset + count - 1;
6153 
6154 	if (writing) {
6155 		ret = btrfs_delalloc_reserve_space(inode, count);
6156 		if (ret)
6157 			goto out;
6158 	}
6159 
6160 	while (1) {
6161 		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6162 				 0, &cached_state, GFP_NOFS);
6163 		/*
6164 		 * We're concerned with the entire range that we're going to be
6165 		 * doing DIO to, so we need to make sure theres no ordered
6166 		 * extents in this range.
6167 		 */
6168 		ordered = btrfs_lookup_ordered_range(inode, lockstart,
6169 						     lockend - lockstart + 1);
6170 		if (!ordered)
6171 			break;
6172 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6173 				     &cached_state, GFP_NOFS);
6174 		btrfs_start_ordered_extent(inode, ordered, 1);
6175 		btrfs_put_ordered_extent(ordered);
6176 		cond_resched();
6177 	}
6178 
6179 	/*
6180 	 * we don't use btrfs_set_extent_delalloc because we don't want
6181 	 * the dirty or uptodate bits
6182 	 */
6183 	if (writing) {
6184 		write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
6185 		ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6186 				     EXTENT_DELALLOC, 0, NULL, &cached_state,
6187 				     GFP_NOFS);
6188 		if (ret) {
6189 			clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6190 					 lockend, EXTENT_LOCKED | write_bits,
6191 					 1, 0, &cached_state, GFP_NOFS);
6192 			goto out;
6193 		}
6194 	}
6195 
6196 	free_extent_state(cached_state);
6197 	cached_state = NULL;
6198 
6199 	ret = __blockdev_direct_IO(rw, iocb, inode,
6200 		   BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
6201 		   iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
6202 		   btrfs_submit_direct, 0);
6203 
6204 	if (ret < 0 && ret != -EIOCBQUEUED) {
6205 		clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
6206 			      offset + iov_length(iov, nr_segs) - 1,
6207 			      EXTENT_LOCKED | write_bits, 1, 0,
6208 			      &cached_state, GFP_NOFS);
6209 	} else if (ret >= 0 && ret < iov_length(iov, nr_segs)) {
6210 		/*
6211 		 * We're falling back to buffered, unlock the section we didn't
6212 		 * do IO on.
6213 		 */
6214 		clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
6215 			      offset + iov_length(iov, nr_segs) - 1,
6216 			      EXTENT_LOCKED | write_bits, 1, 0,
6217 			      &cached_state, GFP_NOFS);
6218 	}
6219 out:
6220 	free_extent_state(cached_state);
6221 	return ret;
6222 }
6223 
6224 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
6225 		__u64 start, __u64 len)
6226 {
6227 	return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
6228 }
6229 
6230 int btrfs_readpage(struct file *file, struct page *page)
6231 {
6232 	struct extent_io_tree *tree;
6233 	tree = &BTRFS_I(page->mapping->host)->io_tree;
6234 	return extent_read_full_page(tree, page, btrfs_get_extent);
6235 }
6236 
6237 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
6238 {
6239 	struct extent_io_tree *tree;
6240 
6241 
6242 	if (current->flags & PF_MEMALLOC) {
6243 		redirty_page_for_writepage(wbc, page);
6244 		unlock_page(page);
6245 		return 0;
6246 	}
6247 	tree = &BTRFS_I(page->mapping->host)->io_tree;
6248 	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
6249 }
6250 
6251 int btrfs_writepages(struct address_space *mapping,
6252 		     struct writeback_control *wbc)
6253 {
6254 	struct extent_io_tree *tree;
6255 
6256 	tree = &BTRFS_I(mapping->host)->io_tree;
6257 	return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
6258 }
6259 
6260 static int
6261 btrfs_readpages(struct file *file, struct address_space *mapping,
6262 		struct list_head *pages, unsigned nr_pages)
6263 {
6264 	struct extent_io_tree *tree;
6265 	tree = &BTRFS_I(mapping->host)->io_tree;
6266 	return extent_readpages(tree, mapping, pages, nr_pages,
6267 				btrfs_get_extent);
6268 }
6269 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
6270 {
6271 	struct extent_io_tree *tree;
6272 	struct extent_map_tree *map;
6273 	int ret;
6274 
6275 	tree = &BTRFS_I(page->mapping->host)->io_tree;
6276 	map = &BTRFS_I(page->mapping->host)->extent_tree;
6277 	ret = try_release_extent_mapping(map, tree, page, gfp_flags);
6278 	if (ret == 1) {
6279 		ClearPagePrivate(page);
6280 		set_page_private(page, 0);
6281 		page_cache_release(page);
6282 	}
6283 	return ret;
6284 }
6285 
6286 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
6287 {
6288 	if (PageWriteback(page) || PageDirty(page))
6289 		return 0;
6290 	return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
6291 }
6292 
6293 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
6294 {
6295 	struct extent_io_tree *tree;
6296 	struct btrfs_ordered_extent *ordered;
6297 	struct extent_state *cached_state = NULL;
6298 	u64 page_start = page_offset(page);
6299 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
6300 
6301 
6302 	/*
6303 	 * we have the page locked, so new writeback can't start,
6304 	 * and the dirty bit won't be cleared while we are here.
6305 	 *
6306 	 * Wait for IO on this page so that we can safely clear
6307 	 * the PagePrivate2 bit and do ordered accounting
6308 	 */
6309 	wait_on_page_writeback(page);
6310 
6311 	tree = &BTRFS_I(page->mapping->host)->io_tree;
6312 	if (offset) {
6313 		btrfs_releasepage(page, GFP_NOFS);
6314 		return;
6315 	}
6316 	lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
6317 			 GFP_NOFS);
6318 	ordered = btrfs_lookup_ordered_extent(page->mapping->host,
6319 					   page_offset(page));
6320 	if (ordered) {
6321 		/*
6322 		 * IO on this page will never be started, so we need
6323 		 * to account for any ordered extents now
6324 		 */
6325 		clear_extent_bit(tree, page_start, page_end,
6326 				 EXTENT_DIRTY | EXTENT_DELALLOC |
6327 				 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
6328 				 &cached_state, GFP_NOFS);
6329 		/*
6330 		 * whoever cleared the private bit is responsible
6331 		 * for the finish_ordered_io
6332 		 */
6333 		if (TestClearPagePrivate2(page)) {
6334 			btrfs_finish_ordered_io(page->mapping->host,
6335 						page_start, page_end);
6336 		}
6337 		btrfs_put_ordered_extent(ordered);
6338 		cached_state = NULL;
6339 		lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
6340 				 GFP_NOFS);
6341 	}
6342 	clear_extent_bit(tree, page_start, page_end,
6343 		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
6344 		 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS);
6345 	__btrfs_releasepage(page, GFP_NOFS);
6346 
6347 	ClearPageChecked(page);
6348 	if (PagePrivate(page)) {
6349 		ClearPagePrivate(page);
6350 		set_page_private(page, 0);
6351 		page_cache_release(page);
6352 	}
6353 }
6354 
6355 /*
6356  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
6357  * called from a page fault handler when a page is first dirtied. Hence we must
6358  * be careful to check for EOF conditions here. We set the page up correctly
6359  * for a written page which means we get ENOSPC checking when writing into
6360  * holes and correct delalloc and unwritten extent mapping on filesystems that
6361  * support these features.
6362  *
6363  * We are not allowed to take the i_mutex here so we have to play games to
6364  * protect against truncate races as the page could now be beyond EOF.  Because
6365  * vmtruncate() writes the inode size before removing pages, once we have the
6366  * page lock we can determine safely if the page is beyond EOF. If it is not
6367  * beyond EOF, then the page is guaranteed safe against truncation until we
6368  * unlock the page.
6369  */
6370 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
6371 {
6372 	struct page *page = vmf->page;
6373 	struct inode *inode = fdentry(vma->vm_file)->d_inode;
6374 	struct btrfs_root *root = BTRFS_I(inode)->root;
6375 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6376 	struct btrfs_ordered_extent *ordered;
6377 	struct extent_state *cached_state = NULL;
6378 	char *kaddr;
6379 	unsigned long zero_start;
6380 	loff_t size;
6381 	int ret;
6382 	u64 page_start;
6383 	u64 page_end;
6384 
6385 	ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
6386 	if (ret) {
6387 		if (ret == -ENOMEM)
6388 			ret = VM_FAULT_OOM;
6389 		else /* -ENOSPC, -EIO, etc */
6390 			ret = VM_FAULT_SIGBUS;
6391 		goto out;
6392 	}
6393 
6394 	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
6395 again:
6396 	lock_page(page);
6397 	size = i_size_read(inode);
6398 	page_start = page_offset(page);
6399 	page_end = page_start + PAGE_CACHE_SIZE - 1;
6400 
6401 	if ((page->mapping != inode->i_mapping) ||
6402 	    (page_start >= size)) {
6403 		/* page got truncated out from underneath us */
6404 		goto out_unlock;
6405 	}
6406 	wait_on_page_writeback(page);
6407 
6408 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
6409 			 GFP_NOFS);
6410 	set_page_extent_mapped(page);
6411 
6412 	/*
6413 	 * we can't set the delalloc bits if there are pending ordered
6414 	 * extents.  Drop our locks and wait for them to finish
6415 	 */
6416 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
6417 	if (ordered) {
6418 		unlock_extent_cached(io_tree, page_start, page_end,
6419 				     &cached_state, GFP_NOFS);
6420 		unlock_page(page);
6421 		btrfs_start_ordered_extent(inode, ordered, 1);
6422 		btrfs_put_ordered_extent(ordered);
6423 		goto again;
6424 	}
6425 
6426 	/*
6427 	 * XXX - page_mkwrite gets called every time the page is dirtied, even
6428 	 * if it was already dirty, so for space accounting reasons we need to
6429 	 * clear any delalloc bits for the range we are fixing to save.  There
6430 	 * is probably a better way to do this, but for now keep consistent with
6431 	 * prepare_pages in the normal write path.
6432 	 */
6433 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
6434 			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
6435 			  0, 0, &cached_state, GFP_NOFS);
6436 
6437 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
6438 					&cached_state);
6439 	if (ret) {
6440 		unlock_extent_cached(io_tree, page_start, page_end,
6441 				     &cached_state, GFP_NOFS);
6442 		ret = VM_FAULT_SIGBUS;
6443 		goto out_unlock;
6444 	}
6445 	ret = 0;
6446 
6447 	/* page is wholly or partially inside EOF */
6448 	if (page_start + PAGE_CACHE_SIZE > size)
6449 		zero_start = size & ~PAGE_CACHE_MASK;
6450 	else
6451 		zero_start = PAGE_CACHE_SIZE;
6452 
6453 	if (zero_start != PAGE_CACHE_SIZE) {
6454 		kaddr = kmap(page);
6455 		memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
6456 		flush_dcache_page(page);
6457 		kunmap(page);
6458 	}
6459 	ClearPageChecked(page);
6460 	set_page_dirty(page);
6461 	SetPageUptodate(page);
6462 
6463 	BTRFS_I(inode)->last_trans = root->fs_info->generation;
6464 	BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
6465 
6466 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
6467 
6468 out_unlock:
6469 	if (!ret)
6470 		return VM_FAULT_LOCKED;
6471 	unlock_page(page);
6472 	btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
6473 out:
6474 	return ret;
6475 }
6476 
6477 static void btrfs_truncate(struct inode *inode)
6478 {
6479 	struct btrfs_root *root = BTRFS_I(inode)->root;
6480 	int ret;
6481 	struct btrfs_trans_handle *trans;
6482 	unsigned long nr;
6483 	u64 mask = root->sectorsize - 1;
6484 
6485 	if (!S_ISREG(inode->i_mode)) {
6486 		WARN_ON(1);
6487 		return;
6488 	}
6489 
6490 	ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
6491 	if (ret)
6492 		return;
6493 
6494 	btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
6495 	btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
6496 
6497 	trans = btrfs_start_transaction(root, 0);
6498 	BUG_ON(IS_ERR(trans));
6499 	btrfs_set_trans_block_group(trans, inode);
6500 	trans->block_rsv = root->orphan_block_rsv;
6501 
6502 	/*
6503 	 * setattr is responsible for setting the ordered_data_close flag,
6504 	 * but that is only tested during the last file release.  That
6505 	 * could happen well after the next commit, leaving a great big
6506 	 * window where new writes may get lost if someone chooses to write
6507 	 * to this file after truncating to zero
6508 	 *
6509 	 * The inode doesn't have any dirty data here, and so if we commit
6510 	 * this is a noop.  If someone immediately starts writing to the inode
6511 	 * it is very likely we'll catch some of their writes in this
6512 	 * transaction, and the commit will find this file on the ordered
6513 	 * data list with good things to send down.
6514 	 *
6515 	 * This is a best effort solution, there is still a window where
6516 	 * using truncate to replace the contents of the file will
6517 	 * end up with a zero length file after a crash.
6518 	 */
6519 	if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
6520 		btrfs_add_ordered_operation(trans, root, inode);
6521 
6522 	while (1) {
6523 		if (!trans) {
6524 			trans = btrfs_start_transaction(root, 0);
6525 			BUG_ON(IS_ERR(trans));
6526 			btrfs_set_trans_block_group(trans, inode);
6527 			trans->block_rsv = root->orphan_block_rsv;
6528 		}
6529 
6530 		ret = btrfs_block_rsv_check(trans, root,
6531 					    root->orphan_block_rsv, 0, 5);
6532 		if (ret) {
6533 			BUG_ON(ret != -EAGAIN);
6534 			ret = btrfs_commit_transaction(trans, root);
6535 			BUG_ON(ret);
6536 			trans = NULL;
6537 			continue;
6538 		}
6539 
6540 		ret = btrfs_truncate_inode_items(trans, root, inode,
6541 						 inode->i_size,
6542 						 BTRFS_EXTENT_DATA_KEY);
6543 		if (ret != -EAGAIN)
6544 			break;
6545 
6546 		ret = btrfs_update_inode(trans, root, inode);
6547 		BUG_ON(ret);
6548 
6549 		nr = trans->blocks_used;
6550 		btrfs_end_transaction(trans, root);
6551 		trans = NULL;
6552 		btrfs_btree_balance_dirty(root, nr);
6553 	}
6554 
6555 	if (ret == 0 && inode->i_nlink > 0) {
6556 		ret = btrfs_orphan_del(trans, inode);
6557 		BUG_ON(ret);
6558 	}
6559 
6560 	ret = btrfs_update_inode(trans, root, inode);
6561 	BUG_ON(ret);
6562 
6563 	nr = trans->blocks_used;
6564 	ret = btrfs_end_transaction_throttle(trans, root);
6565 	BUG_ON(ret);
6566 	btrfs_btree_balance_dirty(root, nr);
6567 }
6568 
6569 /*
6570  * create a new subvolume directory/inode (helper for the ioctl).
6571  */
6572 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
6573 			     struct btrfs_root *new_root,
6574 			     u64 new_dirid, u64 alloc_hint)
6575 {
6576 	struct inode *inode;
6577 	int err;
6578 	u64 index = 0;
6579 
6580 	inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
6581 				new_dirid, alloc_hint, S_IFDIR | 0700, &index);
6582 	if (IS_ERR(inode))
6583 		return PTR_ERR(inode);
6584 	inode->i_op = &btrfs_dir_inode_operations;
6585 	inode->i_fop = &btrfs_dir_file_operations;
6586 
6587 	inode->i_nlink = 1;
6588 	btrfs_i_size_write(inode, 0);
6589 
6590 	err = btrfs_update_inode(trans, new_root, inode);
6591 	BUG_ON(err);
6592 
6593 	iput(inode);
6594 	return 0;
6595 }
6596 
6597 /* helper function for file defrag and space balancing.  This
6598  * forces readahead on a given range of bytes in an inode
6599  */
6600 unsigned long btrfs_force_ra(struct address_space *mapping,
6601 			      struct file_ra_state *ra, struct file *file,
6602 			      pgoff_t offset, pgoff_t last_index)
6603 {
6604 	pgoff_t req_size = last_index - offset + 1;
6605 
6606 	page_cache_sync_readahead(mapping, ra, file, offset, req_size);
6607 	return offset + req_size;
6608 }
6609 
6610 struct inode *btrfs_alloc_inode(struct super_block *sb)
6611 {
6612 	struct btrfs_inode *ei;
6613 	struct inode *inode;
6614 
6615 	ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
6616 	if (!ei)
6617 		return NULL;
6618 
6619 	ei->root = NULL;
6620 	ei->space_info = NULL;
6621 	ei->generation = 0;
6622 	ei->sequence = 0;
6623 	ei->last_trans = 0;
6624 	ei->last_sub_trans = 0;
6625 	ei->logged_trans = 0;
6626 	ei->delalloc_bytes = 0;
6627 	ei->reserved_bytes = 0;
6628 	ei->disk_i_size = 0;
6629 	ei->flags = 0;
6630 	ei->index_cnt = (u64)-1;
6631 	ei->last_unlink_trans = 0;
6632 
6633 	spin_lock_init(&ei->accounting_lock);
6634 	atomic_set(&ei->outstanding_extents, 0);
6635 	ei->reserved_extents = 0;
6636 
6637 	ei->ordered_data_close = 0;
6638 	ei->orphan_meta_reserved = 0;
6639 	ei->dummy_inode = 0;
6640 	ei->force_compress = BTRFS_COMPRESS_NONE;
6641 
6642 	inode = &ei->vfs_inode;
6643 	extent_map_tree_init(&ei->extent_tree, GFP_NOFS);
6644 	extent_io_tree_init(&ei->io_tree, &inode->i_data, GFP_NOFS);
6645 	extent_io_tree_init(&ei->io_failure_tree, &inode->i_data, GFP_NOFS);
6646 	mutex_init(&ei->log_mutex);
6647 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
6648 	INIT_LIST_HEAD(&ei->i_orphan);
6649 	INIT_LIST_HEAD(&ei->delalloc_inodes);
6650 	INIT_LIST_HEAD(&ei->ordered_operations);
6651 	RB_CLEAR_NODE(&ei->rb_node);
6652 
6653 	return inode;
6654 }
6655 
6656 static void btrfs_i_callback(struct rcu_head *head)
6657 {
6658 	struct inode *inode = container_of(head, struct inode, i_rcu);
6659 	INIT_LIST_HEAD(&inode->i_dentry);
6660 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
6661 }
6662 
6663 void btrfs_destroy_inode(struct inode *inode)
6664 {
6665 	struct btrfs_ordered_extent *ordered;
6666 	struct btrfs_root *root = BTRFS_I(inode)->root;
6667 
6668 	WARN_ON(!list_empty(&inode->i_dentry));
6669 	WARN_ON(inode->i_data.nrpages);
6670 	WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents));
6671 	WARN_ON(BTRFS_I(inode)->reserved_extents);
6672 
6673 	/*
6674 	 * This can happen where we create an inode, but somebody else also
6675 	 * created the same inode and we need to destroy the one we already
6676 	 * created.
6677 	 */
6678 	if (!root)
6679 		goto free;
6680 
6681 	/*
6682 	 * Make sure we're properly removed from the ordered operation
6683 	 * lists.
6684 	 */
6685 	smp_mb();
6686 	if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
6687 		spin_lock(&root->fs_info->ordered_extent_lock);
6688 		list_del_init(&BTRFS_I(inode)->ordered_operations);
6689 		spin_unlock(&root->fs_info->ordered_extent_lock);
6690 	}
6691 
6692 	if (root == root->fs_info->tree_root) {
6693 		struct btrfs_block_group_cache *block_group;
6694 
6695 		block_group = btrfs_lookup_block_group(root->fs_info,
6696 						BTRFS_I(inode)->block_group);
6697 		if (block_group && block_group->inode == inode) {
6698 			spin_lock(&block_group->lock);
6699 			block_group->inode = NULL;
6700 			spin_unlock(&block_group->lock);
6701 			btrfs_put_block_group(block_group);
6702 		} else if (block_group) {
6703 			btrfs_put_block_group(block_group);
6704 		}
6705 	}
6706 
6707 	spin_lock(&root->orphan_lock);
6708 	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
6709 		printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n",
6710 		       inode->i_ino);
6711 		list_del_init(&BTRFS_I(inode)->i_orphan);
6712 	}
6713 	spin_unlock(&root->orphan_lock);
6714 
6715 	while (1) {
6716 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
6717 		if (!ordered)
6718 			break;
6719 		else {
6720 			printk(KERN_ERR "btrfs found ordered "
6721 			       "extent %llu %llu on inode cleanup\n",
6722 			       (unsigned long long)ordered->file_offset,
6723 			       (unsigned long long)ordered->len);
6724 			btrfs_remove_ordered_extent(inode, ordered);
6725 			btrfs_put_ordered_extent(ordered);
6726 			btrfs_put_ordered_extent(ordered);
6727 		}
6728 	}
6729 	inode_tree_del(inode);
6730 	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
6731 free:
6732 	call_rcu(&inode->i_rcu, btrfs_i_callback);
6733 }
6734 
6735 int btrfs_drop_inode(struct inode *inode)
6736 {
6737 	struct btrfs_root *root = BTRFS_I(inode)->root;
6738 
6739 	if (btrfs_root_refs(&root->root_item) == 0 &&
6740 	    root != root->fs_info->tree_root)
6741 		return 1;
6742 	else
6743 		return generic_drop_inode(inode);
6744 }
6745 
6746 static void init_once(void *foo)
6747 {
6748 	struct btrfs_inode *ei = (struct btrfs_inode *) foo;
6749 
6750 	inode_init_once(&ei->vfs_inode);
6751 }
6752 
6753 void btrfs_destroy_cachep(void)
6754 {
6755 	if (btrfs_inode_cachep)
6756 		kmem_cache_destroy(btrfs_inode_cachep);
6757 	if (btrfs_trans_handle_cachep)
6758 		kmem_cache_destroy(btrfs_trans_handle_cachep);
6759 	if (btrfs_transaction_cachep)
6760 		kmem_cache_destroy(btrfs_transaction_cachep);
6761 	if (btrfs_path_cachep)
6762 		kmem_cache_destroy(btrfs_path_cachep);
6763 }
6764 
6765 int btrfs_init_cachep(void)
6766 {
6767 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
6768 			sizeof(struct btrfs_inode), 0,
6769 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
6770 	if (!btrfs_inode_cachep)
6771 		goto fail;
6772 
6773 	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
6774 			sizeof(struct btrfs_trans_handle), 0,
6775 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
6776 	if (!btrfs_trans_handle_cachep)
6777 		goto fail;
6778 
6779 	btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
6780 			sizeof(struct btrfs_transaction), 0,
6781 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
6782 	if (!btrfs_transaction_cachep)
6783 		goto fail;
6784 
6785 	btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
6786 			sizeof(struct btrfs_path), 0,
6787 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
6788 	if (!btrfs_path_cachep)
6789 		goto fail;
6790 
6791 	return 0;
6792 fail:
6793 	btrfs_destroy_cachep();
6794 	return -ENOMEM;
6795 }
6796 
6797 static int btrfs_getattr(struct vfsmount *mnt,
6798 			 struct dentry *dentry, struct kstat *stat)
6799 {
6800 	struct inode *inode = dentry->d_inode;
6801 	generic_fillattr(inode, stat);
6802 	stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
6803 	stat->blksize = PAGE_CACHE_SIZE;
6804 	stat->blocks = (inode_get_bytes(inode) +
6805 			BTRFS_I(inode)->delalloc_bytes) >> 9;
6806 	return 0;
6807 }
6808 
6809 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
6810 			   struct inode *new_dir, struct dentry *new_dentry)
6811 {
6812 	struct btrfs_trans_handle *trans;
6813 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
6814 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
6815 	struct inode *new_inode = new_dentry->d_inode;
6816 	struct inode *old_inode = old_dentry->d_inode;
6817 	struct timespec ctime = CURRENT_TIME;
6818 	u64 index = 0;
6819 	u64 root_objectid;
6820 	int ret;
6821 
6822 	if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
6823 		return -EPERM;
6824 
6825 	/* we only allow rename subvolume link between subvolumes */
6826 	if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
6827 		return -EXDEV;
6828 
6829 	if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
6830 	    (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID))
6831 		return -ENOTEMPTY;
6832 
6833 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
6834 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
6835 		return -ENOTEMPTY;
6836 	/*
6837 	 * we're using rename to replace one file with another.
6838 	 * and the replacement file is large.  Start IO on it now so
6839 	 * we don't add too much work to the end of the transaction
6840 	 */
6841 	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
6842 	    old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
6843 		filemap_flush(old_inode->i_mapping);
6844 
6845 	/* close the racy window with snapshot create/destroy ioctl */
6846 	if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
6847 		down_read(&root->fs_info->subvol_sem);
6848 	/*
6849 	 * We want to reserve the absolute worst case amount of items.  So if
6850 	 * both inodes are subvols and we need to unlink them then that would
6851 	 * require 4 item modifications, but if they are both normal inodes it
6852 	 * would require 5 item modifications, so we'll assume their normal
6853 	 * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
6854 	 * should cover the worst case number of items we'll modify.
6855 	 */
6856 	trans = btrfs_start_transaction(root, 20);
6857 	if (IS_ERR(trans))
6858 		return PTR_ERR(trans);
6859 
6860 	btrfs_set_trans_block_group(trans, new_dir);
6861 
6862 	if (dest != root)
6863 		btrfs_record_root_in_trans(trans, dest);
6864 
6865 	ret = btrfs_set_inode_index(new_dir, &index);
6866 	if (ret)
6867 		goto out_fail;
6868 
6869 	if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
6870 		/* force full log commit if subvolume involved. */
6871 		root->fs_info->last_trans_log_full_commit = trans->transid;
6872 	} else {
6873 		ret = btrfs_insert_inode_ref(trans, dest,
6874 					     new_dentry->d_name.name,
6875 					     new_dentry->d_name.len,
6876 					     old_inode->i_ino,
6877 					     new_dir->i_ino, index);
6878 		if (ret)
6879 			goto out_fail;
6880 		/*
6881 		 * this is an ugly little race, but the rename is required
6882 		 * to make sure that if we crash, the inode is either at the
6883 		 * old name or the new one.  pinning the log transaction lets
6884 		 * us make sure we don't allow a log commit to come in after
6885 		 * we unlink the name but before we add the new name back in.
6886 		 */
6887 		btrfs_pin_log_trans(root);
6888 	}
6889 	/*
6890 	 * make sure the inode gets flushed if it is replacing
6891 	 * something.
6892 	 */
6893 	if (new_inode && new_inode->i_size &&
6894 	    old_inode && S_ISREG(old_inode->i_mode)) {
6895 		btrfs_add_ordered_operation(trans, root, old_inode);
6896 	}
6897 
6898 	old_dir->i_ctime = old_dir->i_mtime = ctime;
6899 	new_dir->i_ctime = new_dir->i_mtime = ctime;
6900 	old_inode->i_ctime = ctime;
6901 
6902 	if (old_dentry->d_parent != new_dentry->d_parent)
6903 		btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
6904 
6905 	if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
6906 		root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
6907 		ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
6908 					old_dentry->d_name.name,
6909 					old_dentry->d_name.len);
6910 	} else {
6911 		btrfs_inc_nlink(old_dentry->d_inode);
6912 		ret = btrfs_unlink_inode(trans, root, old_dir,
6913 					 old_dentry->d_inode,
6914 					 old_dentry->d_name.name,
6915 					 old_dentry->d_name.len);
6916 	}
6917 	BUG_ON(ret);
6918 
6919 	if (new_inode) {
6920 		new_inode->i_ctime = CURRENT_TIME;
6921 		if (unlikely(new_inode->i_ino ==
6922 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
6923 			root_objectid = BTRFS_I(new_inode)->location.objectid;
6924 			ret = btrfs_unlink_subvol(trans, dest, new_dir,
6925 						root_objectid,
6926 						new_dentry->d_name.name,
6927 						new_dentry->d_name.len);
6928 			BUG_ON(new_inode->i_nlink == 0);
6929 		} else {
6930 			ret = btrfs_unlink_inode(trans, dest, new_dir,
6931 						 new_dentry->d_inode,
6932 						 new_dentry->d_name.name,
6933 						 new_dentry->d_name.len);
6934 		}
6935 		BUG_ON(ret);
6936 		if (new_inode->i_nlink == 0) {
6937 			ret = btrfs_orphan_add(trans, new_dentry->d_inode);
6938 			BUG_ON(ret);
6939 		}
6940 	}
6941 
6942 	ret = btrfs_add_link(trans, new_dir, old_inode,
6943 			     new_dentry->d_name.name,
6944 			     new_dentry->d_name.len, 0, index);
6945 	BUG_ON(ret);
6946 
6947 	if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
6948 		struct dentry *parent = dget_parent(new_dentry);
6949 		btrfs_log_new_name(trans, old_inode, old_dir, parent);
6950 		dput(parent);
6951 		btrfs_end_log_trans(root);
6952 	}
6953 out_fail:
6954 	btrfs_end_transaction_throttle(trans, root);
6955 
6956 	if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
6957 		up_read(&root->fs_info->subvol_sem);
6958 
6959 	return ret;
6960 }
6961 
6962 /*
6963  * some fairly slow code that needs optimization. This walks the list
6964  * of all the inodes with pending delalloc and forces them to disk.
6965  */
6966 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
6967 {
6968 	struct list_head *head = &root->fs_info->delalloc_inodes;
6969 	struct btrfs_inode *binode;
6970 	struct inode *inode;
6971 
6972 	if (root->fs_info->sb->s_flags & MS_RDONLY)
6973 		return -EROFS;
6974 
6975 	spin_lock(&root->fs_info->delalloc_lock);
6976 	while (!list_empty(head)) {
6977 		binode = list_entry(head->next, struct btrfs_inode,
6978 				    delalloc_inodes);
6979 		inode = igrab(&binode->vfs_inode);
6980 		if (!inode)
6981 			list_del_init(&binode->delalloc_inodes);
6982 		spin_unlock(&root->fs_info->delalloc_lock);
6983 		if (inode) {
6984 			filemap_flush(inode->i_mapping);
6985 			if (delay_iput)
6986 				btrfs_add_delayed_iput(inode);
6987 			else
6988 				iput(inode);
6989 		}
6990 		cond_resched();
6991 		spin_lock(&root->fs_info->delalloc_lock);
6992 	}
6993 	spin_unlock(&root->fs_info->delalloc_lock);
6994 
6995 	/* the filemap_flush will queue IO into the worker threads, but
6996 	 * we have to make sure the IO is actually started and that
6997 	 * ordered extents get created before we return
6998 	 */
6999 	atomic_inc(&root->fs_info->async_submit_draining);
7000 	while (atomic_read(&root->fs_info->nr_async_submits) ||
7001 	      atomic_read(&root->fs_info->async_delalloc_pages)) {
7002 		wait_event(root->fs_info->async_submit_wait,
7003 		   (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
7004 		    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
7005 	}
7006 	atomic_dec(&root->fs_info->async_submit_draining);
7007 	return 0;
7008 }
7009 
7010 int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput,
7011 				   int sync)
7012 {
7013 	struct btrfs_inode *binode;
7014 	struct inode *inode = NULL;
7015 
7016 	spin_lock(&root->fs_info->delalloc_lock);
7017 	while (!list_empty(&root->fs_info->delalloc_inodes)) {
7018 		binode = list_entry(root->fs_info->delalloc_inodes.next,
7019 				    struct btrfs_inode, delalloc_inodes);
7020 		inode = igrab(&binode->vfs_inode);
7021 		if (inode) {
7022 			list_move_tail(&binode->delalloc_inodes,
7023 				       &root->fs_info->delalloc_inodes);
7024 			break;
7025 		}
7026 
7027 		list_del_init(&binode->delalloc_inodes);
7028 		cond_resched_lock(&root->fs_info->delalloc_lock);
7029 	}
7030 	spin_unlock(&root->fs_info->delalloc_lock);
7031 
7032 	if (inode) {
7033 		if (sync) {
7034 			filemap_write_and_wait(inode->i_mapping);
7035 			/*
7036 			 * We have to do this because compression doesn't
7037 			 * actually set PG_writeback until it submits the pages
7038 			 * for IO, which happens in an async thread, so we could
7039 			 * race and not actually wait for any writeback pages
7040 			 * because they've not been submitted yet.  Technically
7041 			 * this could still be the case for the ordered stuff
7042 			 * since the async thread may not have started to do its
7043 			 * work yet.  If this becomes the case then we need to
7044 			 * figure out a way to make sure that in writepage we
7045 			 * wait for any async pages to be submitted before
7046 			 * returning so that fdatawait does what its supposed to
7047 			 * do.
7048 			 */
7049 			btrfs_wait_ordered_range(inode, 0, (u64)-1);
7050 		} else {
7051 			filemap_flush(inode->i_mapping);
7052 		}
7053 		if (delay_iput)
7054 			btrfs_add_delayed_iput(inode);
7055 		else
7056 			iput(inode);
7057 		return 1;
7058 	}
7059 	return 0;
7060 }
7061 
7062 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7063 			 const char *symname)
7064 {
7065 	struct btrfs_trans_handle *trans;
7066 	struct btrfs_root *root = BTRFS_I(dir)->root;
7067 	struct btrfs_path *path;
7068 	struct btrfs_key key;
7069 	struct inode *inode = NULL;
7070 	int err;
7071 	int drop_inode = 0;
7072 	u64 objectid;
7073 	u64 index = 0 ;
7074 	int name_len;
7075 	int datasize;
7076 	unsigned long ptr;
7077 	struct btrfs_file_extent_item *ei;
7078 	struct extent_buffer *leaf;
7079 	unsigned long nr = 0;
7080 
7081 	name_len = strlen(symname) + 1;
7082 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
7083 		return -ENAMETOOLONG;
7084 
7085 	err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
7086 	if (err)
7087 		return err;
7088 	/*
7089 	 * 2 items for inode item and ref
7090 	 * 2 items for dir items
7091 	 * 1 item for xattr if selinux is on
7092 	 */
7093 	trans = btrfs_start_transaction(root, 5);
7094 	if (IS_ERR(trans))
7095 		return PTR_ERR(trans);
7096 
7097 	btrfs_set_trans_block_group(trans, dir);
7098 
7099 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
7100 				dentry->d_name.len, dir->i_ino, objectid,
7101 				BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
7102 				&index);
7103 	err = PTR_ERR(inode);
7104 	if (IS_ERR(inode))
7105 		goto out_unlock;
7106 
7107 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
7108 	if (err) {
7109 		drop_inode = 1;
7110 		goto out_unlock;
7111 	}
7112 
7113 	btrfs_set_trans_block_group(trans, inode);
7114 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
7115 	if (err)
7116 		drop_inode = 1;
7117 	else {
7118 		inode->i_mapping->a_ops = &btrfs_aops;
7119 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
7120 		inode->i_fop = &btrfs_file_operations;
7121 		inode->i_op = &btrfs_file_inode_operations;
7122 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
7123 	}
7124 	btrfs_update_inode_block_group(trans, inode);
7125 	btrfs_update_inode_block_group(trans, dir);
7126 	if (drop_inode)
7127 		goto out_unlock;
7128 
7129 	path = btrfs_alloc_path();
7130 	BUG_ON(!path);
7131 	key.objectid = inode->i_ino;
7132 	key.offset = 0;
7133 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
7134 	datasize = btrfs_file_extent_calc_inline_size(name_len);
7135 	err = btrfs_insert_empty_item(trans, root, path, &key,
7136 				      datasize);
7137 	if (err) {
7138 		drop_inode = 1;
7139 		goto out_unlock;
7140 	}
7141 	leaf = path->nodes[0];
7142 	ei = btrfs_item_ptr(leaf, path->slots[0],
7143 			    struct btrfs_file_extent_item);
7144 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
7145 	btrfs_set_file_extent_type(leaf, ei,
7146 				   BTRFS_FILE_EXTENT_INLINE);
7147 	btrfs_set_file_extent_encryption(leaf, ei, 0);
7148 	btrfs_set_file_extent_compression(leaf, ei, 0);
7149 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
7150 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
7151 
7152 	ptr = btrfs_file_extent_inline_start(ei);
7153 	write_extent_buffer(leaf, symname, ptr, name_len);
7154 	btrfs_mark_buffer_dirty(leaf);
7155 	btrfs_free_path(path);
7156 
7157 	inode->i_op = &btrfs_symlink_inode_operations;
7158 	inode->i_mapping->a_ops = &btrfs_symlink_aops;
7159 	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
7160 	inode_set_bytes(inode, name_len);
7161 	btrfs_i_size_write(inode, name_len - 1);
7162 	err = btrfs_update_inode(trans, root, inode);
7163 	if (err)
7164 		drop_inode = 1;
7165 
7166 out_unlock:
7167 	nr = trans->blocks_used;
7168 	btrfs_end_transaction_throttle(trans, root);
7169 	if (drop_inode) {
7170 		inode_dec_link_count(inode);
7171 		iput(inode);
7172 	}
7173 	btrfs_btree_balance_dirty(root, nr);
7174 	return err;
7175 }
7176 
7177 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
7178 				       u64 start, u64 num_bytes, u64 min_size,
7179 				       loff_t actual_len, u64 *alloc_hint,
7180 				       struct btrfs_trans_handle *trans)
7181 {
7182 	struct btrfs_root *root = BTRFS_I(inode)->root;
7183 	struct btrfs_key ins;
7184 	u64 cur_offset = start;
7185 	u64 i_size;
7186 	int ret = 0;
7187 	bool own_trans = true;
7188 
7189 	if (trans)
7190 		own_trans = false;
7191 	while (num_bytes > 0) {
7192 		if (own_trans) {
7193 			trans = btrfs_start_transaction(root, 3);
7194 			if (IS_ERR(trans)) {
7195 				ret = PTR_ERR(trans);
7196 				break;
7197 			}
7198 		}
7199 
7200 		ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
7201 					   0, *alloc_hint, (u64)-1, &ins, 1);
7202 		if (ret) {
7203 			if (own_trans)
7204 				btrfs_end_transaction(trans, root);
7205 			break;
7206 		}
7207 
7208 		ret = insert_reserved_file_extent(trans, inode,
7209 						  cur_offset, ins.objectid,
7210 						  ins.offset, ins.offset,
7211 						  ins.offset, 0, 0, 0,
7212 						  BTRFS_FILE_EXTENT_PREALLOC);
7213 		BUG_ON(ret);
7214 		btrfs_drop_extent_cache(inode, cur_offset,
7215 					cur_offset + ins.offset -1, 0);
7216 
7217 		num_bytes -= ins.offset;
7218 		cur_offset += ins.offset;
7219 		*alloc_hint = ins.objectid + ins.offset;
7220 
7221 		inode->i_ctime = CURRENT_TIME;
7222 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
7223 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
7224 		    (actual_len > inode->i_size) &&
7225 		    (cur_offset > inode->i_size)) {
7226 			if (cur_offset > actual_len)
7227 				i_size = actual_len;
7228 			else
7229 				i_size = cur_offset;
7230 			i_size_write(inode, i_size);
7231 			btrfs_ordered_update_i_size(inode, i_size, NULL);
7232 		}
7233 
7234 		ret = btrfs_update_inode(trans, root, inode);
7235 		BUG_ON(ret);
7236 
7237 		if (own_trans)
7238 			btrfs_end_transaction(trans, root);
7239 	}
7240 	return ret;
7241 }
7242 
7243 int btrfs_prealloc_file_range(struct inode *inode, int mode,
7244 			      u64 start, u64 num_bytes, u64 min_size,
7245 			      loff_t actual_len, u64 *alloc_hint)
7246 {
7247 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
7248 					   min_size, actual_len, alloc_hint,
7249 					   NULL);
7250 }
7251 
7252 int btrfs_prealloc_file_range_trans(struct inode *inode,
7253 				    struct btrfs_trans_handle *trans, int mode,
7254 				    u64 start, u64 num_bytes, u64 min_size,
7255 				    loff_t actual_len, u64 *alloc_hint)
7256 {
7257 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
7258 					   min_size, actual_len, alloc_hint, trans);
7259 }
7260 
7261 static int btrfs_set_page_dirty(struct page *page)
7262 {
7263 	return __set_page_dirty_nobuffers(page);
7264 }
7265 
7266 static int btrfs_permission(struct inode *inode, int mask, unsigned int flags)
7267 {
7268 	struct btrfs_root *root = BTRFS_I(inode)->root;
7269 
7270 	if (btrfs_root_readonly(root) && (mask & MAY_WRITE))
7271 		return -EROFS;
7272 	if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE))
7273 		return -EACCES;
7274 	return generic_permission(inode, mask, flags, btrfs_check_acl);
7275 }
7276 
7277 static const struct inode_operations btrfs_dir_inode_operations = {
7278 	.getattr	= btrfs_getattr,
7279 	.lookup		= btrfs_lookup,
7280 	.create		= btrfs_create,
7281 	.unlink		= btrfs_unlink,
7282 	.link		= btrfs_link,
7283 	.mkdir		= btrfs_mkdir,
7284 	.rmdir		= btrfs_rmdir,
7285 	.rename		= btrfs_rename,
7286 	.symlink	= btrfs_symlink,
7287 	.setattr	= btrfs_setattr,
7288 	.mknod		= btrfs_mknod,
7289 	.setxattr	= btrfs_setxattr,
7290 	.getxattr	= btrfs_getxattr,
7291 	.listxattr	= btrfs_listxattr,
7292 	.removexattr	= btrfs_removexattr,
7293 	.permission	= btrfs_permission,
7294 };
7295 static const struct inode_operations btrfs_dir_ro_inode_operations = {
7296 	.lookup		= btrfs_lookup,
7297 	.permission	= btrfs_permission,
7298 };
7299 
7300 static const struct file_operations btrfs_dir_file_operations = {
7301 	.llseek		= generic_file_llseek,
7302 	.read		= generic_read_dir,
7303 	.readdir	= btrfs_real_readdir,
7304 	.unlocked_ioctl	= btrfs_ioctl,
7305 #ifdef CONFIG_COMPAT
7306 	.compat_ioctl	= btrfs_ioctl,
7307 #endif
7308 	.release        = btrfs_release_file,
7309 	.fsync		= btrfs_sync_file,
7310 };
7311 
7312 static struct extent_io_ops btrfs_extent_io_ops = {
7313 	.fill_delalloc = run_delalloc_range,
7314 	.submit_bio_hook = btrfs_submit_bio_hook,
7315 	.merge_bio_hook = btrfs_merge_bio_hook,
7316 	.readpage_end_io_hook = btrfs_readpage_end_io_hook,
7317 	.writepage_end_io_hook = btrfs_writepage_end_io_hook,
7318 	.writepage_start_hook = btrfs_writepage_start_hook,
7319 	.readpage_io_failed_hook = btrfs_io_failed_hook,
7320 	.set_bit_hook = btrfs_set_bit_hook,
7321 	.clear_bit_hook = btrfs_clear_bit_hook,
7322 	.merge_extent_hook = btrfs_merge_extent_hook,
7323 	.split_extent_hook = btrfs_split_extent_hook,
7324 };
7325 
7326 /*
7327  * btrfs doesn't support the bmap operation because swapfiles
7328  * use bmap to make a mapping of extents in the file.  They assume
7329  * these extents won't change over the life of the file and they
7330  * use the bmap result to do IO directly to the drive.
7331  *
7332  * the btrfs bmap call would return logical addresses that aren't
7333  * suitable for IO and they also will change frequently as COW
7334  * operations happen.  So, swapfile + btrfs == corruption.
7335  *
7336  * For now we're avoiding this by dropping bmap.
7337  */
7338 static const struct address_space_operations btrfs_aops = {
7339 	.readpage	= btrfs_readpage,
7340 	.writepage	= btrfs_writepage,
7341 	.writepages	= btrfs_writepages,
7342 	.readpages	= btrfs_readpages,
7343 	.sync_page	= block_sync_page,
7344 	.direct_IO	= btrfs_direct_IO,
7345 	.invalidatepage = btrfs_invalidatepage,
7346 	.releasepage	= btrfs_releasepage,
7347 	.set_page_dirty	= btrfs_set_page_dirty,
7348 	.error_remove_page = generic_error_remove_page,
7349 };
7350 
7351 static const struct address_space_operations btrfs_symlink_aops = {
7352 	.readpage	= btrfs_readpage,
7353 	.writepage	= btrfs_writepage,
7354 	.invalidatepage = btrfs_invalidatepage,
7355 	.releasepage	= btrfs_releasepage,
7356 };
7357 
7358 static const struct inode_operations btrfs_file_inode_operations = {
7359 	.truncate	= btrfs_truncate,
7360 	.getattr	= btrfs_getattr,
7361 	.setattr	= btrfs_setattr,
7362 	.setxattr	= btrfs_setxattr,
7363 	.getxattr	= btrfs_getxattr,
7364 	.listxattr      = btrfs_listxattr,
7365 	.removexattr	= btrfs_removexattr,
7366 	.permission	= btrfs_permission,
7367 	.fiemap		= btrfs_fiemap,
7368 };
7369 static const struct inode_operations btrfs_special_inode_operations = {
7370 	.getattr	= btrfs_getattr,
7371 	.setattr	= btrfs_setattr,
7372 	.permission	= btrfs_permission,
7373 	.setxattr	= btrfs_setxattr,
7374 	.getxattr	= btrfs_getxattr,
7375 	.listxattr	= btrfs_listxattr,
7376 	.removexattr	= btrfs_removexattr,
7377 };
7378 static const struct inode_operations btrfs_symlink_inode_operations = {
7379 	.readlink	= generic_readlink,
7380 	.follow_link	= page_follow_link_light,
7381 	.put_link	= page_put_link,
7382 	.getattr	= btrfs_getattr,
7383 	.permission	= btrfs_permission,
7384 	.setxattr	= btrfs_setxattr,
7385 	.getxattr	= btrfs_getxattr,
7386 	.listxattr	= btrfs_listxattr,
7387 	.removexattr	= btrfs_removexattr,
7388 };
7389 
7390 const struct dentry_operations btrfs_dentry_operations = {
7391 	.d_delete	= btrfs_dentry_delete,
7392 };
7393