xref: /openbmc/linux/fs/btrfs/inode.c (revision 7490ca1e)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include <linux/slab.h>
40 #include <linux/ratelimit.h>
41 #include <linux/mount.h>
42 #include "compat.h"
43 #include "ctree.h"
44 #include "disk-io.h"
45 #include "transaction.h"
46 #include "btrfs_inode.h"
47 #include "ioctl.h"
48 #include "print-tree.h"
49 #include "ordered-data.h"
50 #include "xattr.h"
51 #include "tree-log.h"
52 #include "volumes.h"
53 #include "compression.h"
54 #include "locking.h"
55 #include "free-space-cache.h"
56 #include "inode-map.h"
57 
58 struct btrfs_iget_args {
59 	u64 ino;
60 	struct btrfs_root *root;
61 };
62 
63 static const struct inode_operations btrfs_dir_inode_operations;
64 static const struct inode_operations btrfs_symlink_inode_operations;
65 static const struct inode_operations btrfs_dir_ro_inode_operations;
66 static const struct inode_operations btrfs_special_inode_operations;
67 static const struct inode_operations btrfs_file_inode_operations;
68 static const struct address_space_operations btrfs_aops;
69 static const struct address_space_operations btrfs_symlink_aops;
70 static const struct file_operations btrfs_dir_file_operations;
71 static struct extent_io_ops btrfs_extent_io_ops;
72 
73 static struct kmem_cache *btrfs_inode_cachep;
74 struct kmem_cache *btrfs_trans_handle_cachep;
75 struct kmem_cache *btrfs_transaction_cachep;
76 struct kmem_cache *btrfs_path_cachep;
77 struct kmem_cache *btrfs_free_space_cachep;
78 
79 #define S_SHIFT 12
80 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
81 	[S_IFREG >> S_SHIFT]	= BTRFS_FT_REG_FILE,
82 	[S_IFDIR >> S_SHIFT]	= BTRFS_FT_DIR,
83 	[S_IFCHR >> S_SHIFT]	= BTRFS_FT_CHRDEV,
84 	[S_IFBLK >> S_SHIFT]	= BTRFS_FT_BLKDEV,
85 	[S_IFIFO >> S_SHIFT]	= BTRFS_FT_FIFO,
86 	[S_IFSOCK >> S_SHIFT]	= BTRFS_FT_SOCK,
87 	[S_IFLNK >> S_SHIFT]	= BTRFS_FT_SYMLINK,
88 };
89 
90 static int btrfs_setsize(struct inode *inode, loff_t newsize);
91 static int btrfs_truncate(struct inode *inode);
92 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
93 static noinline int cow_file_range(struct inode *inode,
94 				   struct page *locked_page,
95 				   u64 start, u64 end, int *page_started,
96 				   unsigned long *nr_written, int unlock);
97 static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
98 				struct btrfs_root *root, struct inode *inode);
99 
100 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
101 				     struct inode *inode,  struct inode *dir,
102 				     const struct qstr *qstr)
103 {
104 	int err;
105 
106 	err = btrfs_init_acl(trans, inode, dir);
107 	if (!err)
108 		err = btrfs_xattr_security_init(trans, inode, dir, qstr);
109 	return err;
110 }
111 
112 /*
113  * this does all the hard work for inserting an inline extent into
114  * the btree.  The caller should have done a btrfs_drop_extents so that
115  * no overlapping inline items exist in the btree
116  */
117 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
118 				struct btrfs_root *root, struct inode *inode,
119 				u64 start, size_t size, size_t compressed_size,
120 				int compress_type,
121 				struct page **compressed_pages)
122 {
123 	struct btrfs_key key;
124 	struct btrfs_path *path;
125 	struct extent_buffer *leaf;
126 	struct page *page = NULL;
127 	char *kaddr;
128 	unsigned long ptr;
129 	struct btrfs_file_extent_item *ei;
130 	int err = 0;
131 	int ret;
132 	size_t cur_size = size;
133 	size_t datasize;
134 	unsigned long offset;
135 
136 	if (compressed_size && compressed_pages)
137 		cur_size = compressed_size;
138 
139 	path = btrfs_alloc_path();
140 	if (!path)
141 		return -ENOMEM;
142 
143 	path->leave_spinning = 1;
144 
145 	key.objectid = btrfs_ino(inode);
146 	key.offset = start;
147 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
148 	datasize = btrfs_file_extent_calc_inline_size(cur_size);
149 
150 	inode_add_bytes(inode, size);
151 	ret = btrfs_insert_empty_item(trans, root, path, &key,
152 				      datasize);
153 	BUG_ON(ret);
154 	if (ret) {
155 		err = ret;
156 		goto fail;
157 	}
158 	leaf = path->nodes[0];
159 	ei = btrfs_item_ptr(leaf, path->slots[0],
160 			    struct btrfs_file_extent_item);
161 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
162 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
163 	btrfs_set_file_extent_encryption(leaf, ei, 0);
164 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
165 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
166 	ptr = btrfs_file_extent_inline_start(ei);
167 
168 	if (compress_type != BTRFS_COMPRESS_NONE) {
169 		struct page *cpage;
170 		int i = 0;
171 		while (compressed_size > 0) {
172 			cpage = compressed_pages[i];
173 			cur_size = min_t(unsigned long, compressed_size,
174 				       PAGE_CACHE_SIZE);
175 
176 			kaddr = kmap_atomic(cpage, KM_USER0);
177 			write_extent_buffer(leaf, kaddr, ptr, cur_size);
178 			kunmap_atomic(kaddr, KM_USER0);
179 
180 			i++;
181 			ptr += cur_size;
182 			compressed_size -= cur_size;
183 		}
184 		btrfs_set_file_extent_compression(leaf, ei,
185 						  compress_type);
186 	} else {
187 		page = find_get_page(inode->i_mapping,
188 				     start >> PAGE_CACHE_SHIFT);
189 		btrfs_set_file_extent_compression(leaf, ei, 0);
190 		kaddr = kmap_atomic(page, KM_USER0);
191 		offset = start & (PAGE_CACHE_SIZE - 1);
192 		write_extent_buffer(leaf, kaddr + offset, ptr, size);
193 		kunmap_atomic(kaddr, KM_USER0);
194 		page_cache_release(page);
195 	}
196 	btrfs_mark_buffer_dirty(leaf);
197 	btrfs_free_path(path);
198 
199 	/*
200 	 * we're an inline extent, so nobody can
201 	 * extend the file past i_size without locking
202 	 * a page we already have locked.
203 	 *
204 	 * We must do any isize and inode updates
205 	 * before we unlock the pages.  Otherwise we
206 	 * could end up racing with unlink.
207 	 */
208 	BTRFS_I(inode)->disk_i_size = inode->i_size;
209 	btrfs_update_inode(trans, root, inode);
210 
211 	return 0;
212 fail:
213 	btrfs_free_path(path);
214 	return err;
215 }
216 
217 
218 /*
219  * conditionally insert an inline extent into the file.  This
220  * does the checks required to make sure the data is small enough
221  * to fit as an inline extent.
222  */
223 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
224 				 struct btrfs_root *root,
225 				 struct inode *inode, u64 start, u64 end,
226 				 size_t compressed_size, int compress_type,
227 				 struct page **compressed_pages)
228 {
229 	u64 isize = i_size_read(inode);
230 	u64 actual_end = min(end + 1, isize);
231 	u64 inline_len = actual_end - start;
232 	u64 aligned_end = (end + root->sectorsize - 1) &
233 			~((u64)root->sectorsize - 1);
234 	u64 hint_byte;
235 	u64 data_len = inline_len;
236 	int ret;
237 
238 	if (compressed_size)
239 		data_len = compressed_size;
240 
241 	if (start > 0 ||
242 	    actual_end >= PAGE_CACHE_SIZE ||
243 	    data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
244 	    (!compressed_size &&
245 	    (actual_end & (root->sectorsize - 1)) == 0) ||
246 	    end + 1 < isize ||
247 	    data_len > root->fs_info->max_inline) {
248 		return 1;
249 	}
250 
251 	ret = btrfs_drop_extents(trans, inode, start, aligned_end,
252 				 &hint_byte, 1);
253 	BUG_ON(ret);
254 
255 	if (isize > actual_end)
256 		inline_len = min_t(u64, isize, actual_end);
257 	ret = insert_inline_extent(trans, root, inode, start,
258 				   inline_len, compressed_size,
259 				   compress_type, compressed_pages);
260 	BUG_ON(ret);
261 	btrfs_delalloc_release_metadata(inode, end + 1 - start);
262 	btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
263 	return 0;
264 }
265 
266 struct async_extent {
267 	u64 start;
268 	u64 ram_size;
269 	u64 compressed_size;
270 	struct page **pages;
271 	unsigned long nr_pages;
272 	int compress_type;
273 	struct list_head list;
274 };
275 
276 struct async_cow {
277 	struct inode *inode;
278 	struct btrfs_root *root;
279 	struct page *locked_page;
280 	u64 start;
281 	u64 end;
282 	struct list_head extents;
283 	struct btrfs_work work;
284 };
285 
286 static noinline int add_async_extent(struct async_cow *cow,
287 				     u64 start, u64 ram_size,
288 				     u64 compressed_size,
289 				     struct page **pages,
290 				     unsigned long nr_pages,
291 				     int compress_type)
292 {
293 	struct async_extent *async_extent;
294 
295 	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
296 	BUG_ON(!async_extent);
297 	async_extent->start = start;
298 	async_extent->ram_size = ram_size;
299 	async_extent->compressed_size = compressed_size;
300 	async_extent->pages = pages;
301 	async_extent->nr_pages = nr_pages;
302 	async_extent->compress_type = compress_type;
303 	list_add_tail(&async_extent->list, &cow->extents);
304 	return 0;
305 }
306 
307 /*
308  * we create compressed extents in two phases.  The first
309  * phase compresses a range of pages that have already been
310  * locked (both pages and state bits are locked).
311  *
312  * This is done inside an ordered work queue, and the compression
313  * is spread across many cpus.  The actual IO submission is step
314  * two, and the ordered work queue takes care of making sure that
315  * happens in the same order things were put onto the queue by
316  * writepages and friends.
317  *
318  * If this code finds it can't get good compression, it puts an
319  * entry onto the work queue to write the uncompressed bytes.  This
320  * makes sure that both compressed inodes and uncompressed inodes
321  * are written in the same order that pdflush sent them down.
322  */
323 static noinline int compress_file_range(struct inode *inode,
324 					struct page *locked_page,
325 					u64 start, u64 end,
326 					struct async_cow *async_cow,
327 					int *num_added)
328 {
329 	struct btrfs_root *root = BTRFS_I(inode)->root;
330 	struct btrfs_trans_handle *trans;
331 	u64 num_bytes;
332 	u64 blocksize = root->sectorsize;
333 	u64 actual_end;
334 	u64 isize = i_size_read(inode);
335 	int ret = 0;
336 	struct page **pages = NULL;
337 	unsigned long nr_pages;
338 	unsigned long nr_pages_ret = 0;
339 	unsigned long total_compressed = 0;
340 	unsigned long total_in = 0;
341 	unsigned long max_compressed = 128 * 1024;
342 	unsigned long max_uncompressed = 128 * 1024;
343 	int i;
344 	int will_compress;
345 	int compress_type = root->fs_info->compress_type;
346 
347 	/* if this is a small write inside eof, kick off a defragbot */
348 	if (end <= BTRFS_I(inode)->disk_i_size && (end - start + 1) < 16 * 1024)
349 		btrfs_add_inode_defrag(NULL, inode);
350 
351 	actual_end = min_t(u64, isize, end + 1);
352 again:
353 	will_compress = 0;
354 	nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
355 	nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
356 
357 	/*
358 	 * we don't want to send crud past the end of i_size through
359 	 * compression, that's just a waste of CPU time.  So, if the
360 	 * end of the file is before the start of our current
361 	 * requested range of bytes, we bail out to the uncompressed
362 	 * cleanup code that can deal with all of this.
363 	 *
364 	 * It isn't really the fastest way to fix things, but this is a
365 	 * very uncommon corner.
366 	 */
367 	if (actual_end <= start)
368 		goto cleanup_and_bail_uncompressed;
369 
370 	total_compressed = actual_end - start;
371 
372 	/* we want to make sure that amount of ram required to uncompress
373 	 * an extent is reasonable, so we limit the total size in ram
374 	 * of a compressed extent to 128k.  This is a crucial number
375 	 * because it also controls how easily we can spread reads across
376 	 * cpus for decompression.
377 	 *
378 	 * We also want to make sure the amount of IO required to do
379 	 * a random read is reasonably small, so we limit the size of
380 	 * a compressed extent to 128k.
381 	 */
382 	total_compressed = min(total_compressed, max_uncompressed);
383 	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
384 	num_bytes = max(blocksize,  num_bytes);
385 	total_in = 0;
386 	ret = 0;
387 
388 	/*
389 	 * we do compression for mount -o compress and when the
390 	 * inode has not been flagged as nocompress.  This flag can
391 	 * change at any time if we discover bad compression ratios.
392 	 */
393 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
394 	    (btrfs_test_opt(root, COMPRESS) ||
395 	     (BTRFS_I(inode)->force_compress) ||
396 	     (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
397 		WARN_ON(pages);
398 		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
399 		if (!pages) {
400 			/* just bail out to the uncompressed code */
401 			goto cont;
402 		}
403 
404 		if (BTRFS_I(inode)->force_compress)
405 			compress_type = BTRFS_I(inode)->force_compress;
406 
407 		ret = btrfs_compress_pages(compress_type,
408 					   inode->i_mapping, start,
409 					   total_compressed, pages,
410 					   nr_pages, &nr_pages_ret,
411 					   &total_in,
412 					   &total_compressed,
413 					   max_compressed);
414 
415 		if (!ret) {
416 			unsigned long offset = total_compressed &
417 				(PAGE_CACHE_SIZE - 1);
418 			struct page *page = pages[nr_pages_ret - 1];
419 			char *kaddr;
420 
421 			/* zero the tail end of the last page, we might be
422 			 * sending it down to disk
423 			 */
424 			if (offset) {
425 				kaddr = kmap_atomic(page, KM_USER0);
426 				memset(kaddr + offset, 0,
427 				       PAGE_CACHE_SIZE - offset);
428 				kunmap_atomic(kaddr, KM_USER0);
429 			}
430 			will_compress = 1;
431 		}
432 	}
433 cont:
434 	if (start == 0) {
435 		trans = btrfs_join_transaction(root);
436 		BUG_ON(IS_ERR(trans));
437 		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
438 
439 		/* lets try to make an inline extent */
440 		if (ret || total_in < (actual_end - start)) {
441 			/* we didn't compress the entire range, try
442 			 * to make an uncompressed inline extent.
443 			 */
444 			ret = cow_file_range_inline(trans, root, inode,
445 						    start, end, 0, 0, NULL);
446 		} else {
447 			/* try making a compressed inline extent */
448 			ret = cow_file_range_inline(trans, root, inode,
449 						    start, end,
450 						    total_compressed,
451 						    compress_type, pages);
452 		}
453 		if (ret == 0) {
454 			/*
455 			 * inline extent creation worked, we don't need
456 			 * to create any more async work items.  Unlock
457 			 * and free up our temp pages.
458 			 */
459 			extent_clear_unlock_delalloc(inode,
460 			     &BTRFS_I(inode)->io_tree,
461 			     start, end, NULL,
462 			     EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
463 			     EXTENT_CLEAR_DELALLOC |
464 			     EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
465 
466 			btrfs_end_transaction(trans, root);
467 			goto free_pages_out;
468 		}
469 		btrfs_end_transaction(trans, root);
470 	}
471 
472 	if (will_compress) {
473 		/*
474 		 * we aren't doing an inline extent round the compressed size
475 		 * up to a block size boundary so the allocator does sane
476 		 * things
477 		 */
478 		total_compressed = (total_compressed + blocksize - 1) &
479 			~(blocksize - 1);
480 
481 		/*
482 		 * one last check to make sure the compression is really a
483 		 * win, compare the page count read with the blocks on disk
484 		 */
485 		total_in = (total_in + PAGE_CACHE_SIZE - 1) &
486 			~(PAGE_CACHE_SIZE - 1);
487 		if (total_compressed >= total_in) {
488 			will_compress = 0;
489 		} else {
490 			num_bytes = total_in;
491 		}
492 	}
493 	if (!will_compress && pages) {
494 		/*
495 		 * the compression code ran but failed to make things smaller,
496 		 * free any pages it allocated and our page pointer array
497 		 */
498 		for (i = 0; i < nr_pages_ret; i++) {
499 			WARN_ON(pages[i]->mapping);
500 			page_cache_release(pages[i]);
501 		}
502 		kfree(pages);
503 		pages = NULL;
504 		total_compressed = 0;
505 		nr_pages_ret = 0;
506 
507 		/* flag the file so we don't compress in the future */
508 		if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
509 		    !(BTRFS_I(inode)->force_compress)) {
510 			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
511 		}
512 	}
513 	if (will_compress) {
514 		*num_added += 1;
515 
516 		/* the async work queues will take care of doing actual
517 		 * allocation on disk for these compressed pages,
518 		 * and will submit them to the elevator.
519 		 */
520 		add_async_extent(async_cow, start, num_bytes,
521 				 total_compressed, pages, nr_pages_ret,
522 				 compress_type);
523 
524 		if (start + num_bytes < end) {
525 			start += num_bytes;
526 			pages = NULL;
527 			cond_resched();
528 			goto again;
529 		}
530 	} else {
531 cleanup_and_bail_uncompressed:
532 		/*
533 		 * No compression, but we still need to write the pages in
534 		 * the file we've been given so far.  redirty the locked
535 		 * page if it corresponds to our extent and set things up
536 		 * for the async work queue to run cow_file_range to do
537 		 * the normal delalloc dance
538 		 */
539 		if (page_offset(locked_page) >= start &&
540 		    page_offset(locked_page) <= end) {
541 			__set_page_dirty_nobuffers(locked_page);
542 			/* unlocked later on in the async handlers */
543 		}
544 		add_async_extent(async_cow, start, end - start + 1,
545 				 0, NULL, 0, BTRFS_COMPRESS_NONE);
546 		*num_added += 1;
547 	}
548 
549 out:
550 	return 0;
551 
552 free_pages_out:
553 	for (i = 0; i < nr_pages_ret; i++) {
554 		WARN_ON(pages[i]->mapping);
555 		page_cache_release(pages[i]);
556 	}
557 	kfree(pages);
558 
559 	goto out;
560 }
561 
562 /*
563  * phase two of compressed writeback.  This is the ordered portion
564  * of the code, which only gets called in the order the work was
565  * queued.  We walk all the async extents created by compress_file_range
566  * and send them down to the disk.
567  */
568 static noinline int submit_compressed_extents(struct inode *inode,
569 					      struct async_cow *async_cow)
570 {
571 	struct async_extent *async_extent;
572 	u64 alloc_hint = 0;
573 	struct btrfs_trans_handle *trans;
574 	struct btrfs_key ins;
575 	struct extent_map *em;
576 	struct btrfs_root *root = BTRFS_I(inode)->root;
577 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
578 	struct extent_io_tree *io_tree;
579 	int ret = 0;
580 
581 	if (list_empty(&async_cow->extents))
582 		return 0;
583 
584 
585 	while (!list_empty(&async_cow->extents)) {
586 		async_extent = list_entry(async_cow->extents.next,
587 					  struct async_extent, list);
588 		list_del(&async_extent->list);
589 
590 		io_tree = &BTRFS_I(inode)->io_tree;
591 
592 retry:
593 		/* did the compression code fall back to uncompressed IO? */
594 		if (!async_extent->pages) {
595 			int page_started = 0;
596 			unsigned long nr_written = 0;
597 
598 			lock_extent(io_tree, async_extent->start,
599 					 async_extent->start +
600 					 async_extent->ram_size - 1, GFP_NOFS);
601 
602 			/* allocate blocks */
603 			ret = cow_file_range(inode, async_cow->locked_page,
604 					     async_extent->start,
605 					     async_extent->start +
606 					     async_extent->ram_size - 1,
607 					     &page_started, &nr_written, 0);
608 
609 			/*
610 			 * if page_started, cow_file_range inserted an
611 			 * inline extent and took care of all the unlocking
612 			 * and IO for us.  Otherwise, we need to submit
613 			 * all those pages down to the drive.
614 			 */
615 			if (!page_started && !ret)
616 				extent_write_locked_range(io_tree,
617 						  inode, async_extent->start,
618 						  async_extent->start +
619 						  async_extent->ram_size - 1,
620 						  btrfs_get_extent,
621 						  WB_SYNC_ALL);
622 			kfree(async_extent);
623 			cond_resched();
624 			continue;
625 		}
626 
627 		lock_extent(io_tree, async_extent->start,
628 			    async_extent->start + async_extent->ram_size - 1,
629 			    GFP_NOFS);
630 
631 		trans = btrfs_join_transaction(root);
632 		BUG_ON(IS_ERR(trans));
633 		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
634 		ret = btrfs_reserve_extent(trans, root,
635 					   async_extent->compressed_size,
636 					   async_extent->compressed_size,
637 					   0, alloc_hint,
638 					   (u64)-1, &ins, 1);
639 		btrfs_end_transaction(trans, root);
640 
641 		if (ret) {
642 			int i;
643 			for (i = 0; i < async_extent->nr_pages; i++) {
644 				WARN_ON(async_extent->pages[i]->mapping);
645 				page_cache_release(async_extent->pages[i]);
646 			}
647 			kfree(async_extent->pages);
648 			async_extent->nr_pages = 0;
649 			async_extent->pages = NULL;
650 			unlock_extent(io_tree, async_extent->start,
651 				      async_extent->start +
652 				      async_extent->ram_size - 1, GFP_NOFS);
653 			goto retry;
654 		}
655 
656 		/*
657 		 * here we're doing allocation and writeback of the
658 		 * compressed pages
659 		 */
660 		btrfs_drop_extent_cache(inode, async_extent->start,
661 					async_extent->start +
662 					async_extent->ram_size - 1, 0);
663 
664 		em = alloc_extent_map();
665 		BUG_ON(!em);
666 		em->start = async_extent->start;
667 		em->len = async_extent->ram_size;
668 		em->orig_start = em->start;
669 
670 		em->block_start = ins.objectid;
671 		em->block_len = ins.offset;
672 		em->bdev = root->fs_info->fs_devices->latest_bdev;
673 		em->compress_type = async_extent->compress_type;
674 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
675 		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
676 
677 		while (1) {
678 			write_lock(&em_tree->lock);
679 			ret = add_extent_mapping(em_tree, em);
680 			write_unlock(&em_tree->lock);
681 			if (ret != -EEXIST) {
682 				free_extent_map(em);
683 				break;
684 			}
685 			btrfs_drop_extent_cache(inode, async_extent->start,
686 						async_extent->start +
687 						async_extent->ram_size - 1, 0);
688 		}
689 
690 		ret = btrfs_add_ordered_extent_compress(inode,
691 						async_extent->start,
692 						ins.objectid,
693 						async_extent->ram_size,
694 						ins.offset,
695 						BTRFS_ORDERED_COMPRESSED,
696 						async_extent->compress_type);
697 		BUG_ON(ret);
698 
699 		/*
700 		 * clear dirty, set writeback and unlock the pages.
701 		 */
702 		extent_clear_unlock_delalloc(inode,
703 				&BTRFS_I(inode)->io_tree,
704 				async_extent->start,
705 				async_extent->start +
706 				async_extent->ram_size - 1,
707 				NULL, EXTENT_CLEAR_UNLOCK_PAGE |
708 				EXTENT_CLEAR_UNLOCK |
709 				EXTENT_CLEAR_DELALLOC |
710 				EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
711 
712 		ret = btrfs_submit_compressed_write(inode,
713 				    async_extent->start,
714 				    async_extent->ram_size,
715 				    ins.objectid,
716 				    ins.offset, async_extent->pages,
717 				    async_extent->nr_pages);
718 
719 		BUG_ON(ret);
720 		alloc_hint = ins.objectid + ins.offset;
721 		kfree(async_extent);
722 		cond_resched();
723 	}
724 
725 	return 0;
726 }
727 
728 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
729 				      u64 num_bytes)
730 {
731 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
732 	struct extent_map *em;
733 	u64 alloc_hint = 0;
734 
735 	read_lock(&em_tree->lock);
736 	em = search_extent_mapping(em_tree, start, num_bytes);
737 	if (em) {
738 		/*
739 		 * if block start isn't an actual block number then find the
740 		 * first block in this inode and use that as a hint.  If that
741 		 * block is also bogus then just don't worry about it.
742 		 */
743 		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
744 			free_extent_map(em);
745 			em = search_extent_mapping(em_tree, 0, 0);
746 			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
747 				alloc_hint = em->block_start;
748 			if (em)
749 				free_extent_map(em);
750 		} else {
751 			alloc_hint = em->block_start;
752 			free_extent_map(em);
753 		}
754 	}
755 	read_unlock(&em_tree->lock);
756 
757 	return alloc_hint;
758 }
759 
760 /*
761  * when extent_io.c finds a delayed allocation range in the file,
762  * the call backs end up in this code.  The basic idea is to
763  * allocate extents on disk for the range, and create ordered data structs
764  * in ram to track those extents.
765  *
766  * locked_page is the page that writepage had locked already.  We use
767  * it to make sure we don't do extra locks or unlocks.
768  *
769  * *page_started is set to one if we unlock locked_page and do everything
770  * required to start IO on it.  It may be clean and already done with
771  * IO when we return.
772  */
773 static noinline int cow_file_range(struct inode *inode,
774 				   struct page *locked_page,
775 				   u64 start, u64 end, int *page_started,
776 				   unsigned long *nr_written,
777 				   int unlock)
778 {
779 	struct btrfs_root *root = BTRFS_I(inode)->root;
780 	struct btrfs_trans_handle *trans;
781 	u64 alloc_hint = 0;
782 	u64 num_bytes;
783 	unsigned long ram_size;
784 	u64 disk_num_bytes;
785 	u64 cur_alloc_size;
786 	u64 blocksize = root->sectorsize;
787 	struct btrfs_key ins;
788 	struct extent_map *em;
789 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
790 	int ret = 0;
791 
792 	BUG_ON(btrfs_is_free_space_inode(root, inode));
793 	trans = btrfs_join_transaction(root);
794 	BUG_ON(IS_ERR(trans));
795 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
796 
797 	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
798 	num_bytes = max(blocksize,  num_bytes);
799 	disk_num_bytes = num_bytes;
800 	ret = 0;
801 
802 	/* if this is a small write inside eof, kick off defrag */
803 	if (end <= BTRFS_I(inode)->disk_i_size && num_bytes < 64 * 1024)
804 		btrfs_add_inode_defrag(trans, inode);
805 
806 	if (start == 0) {
807 		/* lets try to make an inline extent */
808 		ret = cow_file_range_inline(trans, root, inode,
809 					    start, end, 0, 0, NULL);
810 		if (ret == 0) {
811 			extent_clear_unlock_delalloc(inode,
812 				     &BTRFS_I(inode)->io_tree,
813 				     start, end, NULL,
814 				     EXTENT_CLEAR_UNLOCK_PAGE |
815 				     EXTENT_CLEAR_UNLOCK |
816 				     EXTENT_CLEAR_DELALLOC |
817 				     EXTENT_CLEAR_DIRTY |
818 				     EXTENT_SET_WRITEBACK |
819 				     EXTENT_END_WRITEBACK);
820 
821 			*nr_written = *nr_written +
822 			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
823 			*page_started = 1;
824 			ret = 0;
825 			goto out;
826 		}
827 	}
828 
829 	BUG_ON(disk_num_bytes >
830 	       btrfs_super_total_bytes(root->fs_info->super_copy));
831 
832 	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
833 	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
834 
835 	while (disk_num_bytes > 0) {
836 		unsigned long op;
837 
838 		cur_alloc_size = disk_num_bytes;
839 		ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
840 					   root->sectorsize, 0, alloc_hint,
841 					   (u64)-1, &ins, 1);
842 		BUG_ON(ret);
843 
844 		em = alloc_extent_map();
845 		BUG_ON(!em);
846 		em->start = start;
847 		em->orig_start = em->start;
848 		ram_size = ins.offset;
849 		em->len = ins.offset;
850 
851 		em->block_start = ins.objectid;
852 		em->block_len = ins.offset;
853 		em->bdev = root->fs_info->fs_devices->latest_bdev;
854 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
855 
856 		while (1) {
857 			write_lock(&em_tree->lock);
858 			ret = add_extent_mapping(em_tree, em);
859 			write_unlock(&em_tree->lock);
860 			if (ret != -EEXIST) {
861 				free_extent_map(em);
862 				break;
863 			}
864 			btrfs_drop_extent_cache(inode, start,
865 						start + ram_size - 1, 0);
866 		}
867 
868 		cur_alloc_size = ins.offset;
869 		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
870 					       ram_size, cur_alloc_size, 0);
871 		BUG_ON(ret);
872 
873 		if (root->root_key.objectid ==
874 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
875 			ret = btrfs_reloc_clone_csums(inode, start,
876 						      cur_alloc_size);
877 			BUG_ON(ret);
878 		}
879 
880 		if (disk_num_bytes < cur_alloc_size)
881 			break;
882 
883 		/* we're not doing compressed IO, don't unlock the first
884 		 * page (which the caller expects to stay locked), don't
885 		 * clear any dirty bits and don't set any writeback bits
886 		 *
887 		 * Do set the Private2 bit so we know this page was properly
888 		 * setup for writepage
889 		 */
890 		op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
891 		op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
892 			EXTENT_SET_PRIVATE2;
893 
894 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
895 					     start, start + ram_size - 1,
896 					     locked_page, op);
897 		disk_num_bytes -= cur_alloc_size;
898 		num_bytes -= cur_alloc_size;
899 		alloc_hint = ins.objectid + ins.offset;
900 		start += cur_alloc_size;
901 	}
902 out:
903 	ret = 0;
904 	btrfs_end_transaction(trans, root);
905 
906 	return ret;
907 }
908 
909 /*
910  * work queue call back to started compression on a file and pages
911  */
912 static noinline void async_cow_start(struct btrfs_work *work)
913 {
914 	struct async_cow *async_cow;
915 	int num_added = 0;
916 	async_cow = container_of(work, struct async_cow, work);
917 
918 	compress_file_range(async_cow->inode, async_cow->locked_page,
919 			    async_cow->start, async_cow->end, async_cow,
920 			    &num_added);
921 	if (num_added == 0)
922 		async_cow->inode = NULL;
923 }
924 
925 /*
926  * work queue call back to submit previously compressed pages
927  */
928 static noinline void async_cow_submit(struct btrfs_work *work)
929 {
930 	struct async_cow *async_cow;
931 	struct btrfs_root *root;
932 	unsigned long nr_pages;
933 
934 	async_cow = container_of(work, struct async_cow, work);
935 
936 	root = async_cow->root;
937 	nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
938 		PAGE_CACHE_SHIFT;
939 
940 	atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
941 
942 	if (atomic_read(&root->fs_info->async_delalloc_pages) <
943 	    5 * 1042 * 1024 &&
944 	    waitqueue_active(&root->fs_info->async_submit_wait))
945 		wake_up(&root->fs_info->async_submit_wait);
946 
947 	if (async_cow->inode)
948 		submit_compressed_extents(async_cow->inode, async_cow);
949 }
950 
951 static noinline void async_cow_free(struct btrfs_work *work)
952 {
953 	struct async_cow *async_cow;
954 	async_cow = container_of(work, struct async_cow, work);
955 	kfree(async_cow);
956 }
957 
958 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
959 				u64 start, u64 end, int *page_started,
960 				unsigned long *nr_written)
961 {
962 	struct async_cow *async_cow;
963 	struct btrfs_root *root = BTRFS_I(inode)->root;
964 	unsigned long nr_pages;
965 	u64 cur_end;
966 	int limit = 10 * 1024 * 1042;
967 
968 	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
969 			 1, 0, NULL, GFP_NOFS);
970 	while (start < end) {
971 		async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
972 		BUG_ON(!async_cow);
973 		async_cow->inode = inode;
974 		async_cow->root = root;
975 		async_cow->locked_page = locked_page;
976 		async_cow->start = start;
977 
978 		if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
979 			cur_end = end;
980 		else
981 			cur_end = min(end, start + 512 * 1024 - 1);
982 
983 		async_cow->end = cur_end;
984 		INIT_LIST_HEAD(&async_cow->extents);
985 
986 		async_cow->work.func = async_cow_start;
987 		async_cow->work.ordered_func = async_cow_submit;
988 		async_cow->work.ordered_free = async_cow_free;
989 		async_cow->work.flags = 0;
990 
991 		nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
992 			PAGE_CACHE_SHIFT;
993 		atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
994 
995 		btrfs_queue_worker(&root->fs_info->delalloc_workers,
996 				   &async_cow->work);
997 
998 		if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
999 			wait_event(root->fs_info->async_submit_wait,
1000 			   (atomic_read(&root->fs_info->async_delalloc_pages) <
1001 			    limit));
1002 		}
1003 
1004 		while (atomic_read(&root->fs_info->async_submit_draining) &&
1005 		      atomic_read(&root->fs_info->async_delalloc_pages)) {
1006 			wait_event(root->fs_info->async_submit_wait,
1007 			  (atomic_read(&root->fs_info->async_delalloc_pages) ==
1008 			   0));
1009 		}
1010 
1011 		*nr_written += nr_pages;
1012 		start = cur_end + 1;
1013 	}
1014 	*page_started = 1;
1015 	return 0;
1016 }
1017 
1018 static noinline int csum_exist_in_range(struct btrfs_root *root,
1019 					u64 bytenr, u64 num_bytes)
1020 {
1021 	int ret;
1022 	struct btrfs_ordered_sum *sums;
1023 	LIST_HEAD(list);
1024 
1025 	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
1026 				       bytenr + num_bytes - 1, &list, 0);
1027 	if (ret == 0 && list_empty(&list))
1028 		return 0;
1029 
1030 	while (!list_empty(&list)) {
1031 		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1032 		list_del(&sums->list);
1033 		kfree(sums);
1034 	}
1035 	return 1;
1036 }
1037 
1038 /*
1039  * when nowcow writeback call back.  This checks for snapshots or COW copies
1040  * of the extents that exist in the file, and COWs the file as required.
1041  *
1042  * If no cow copies or snapshots exist, we write directly to the existing
1043  * blocks on disk
1044  */
1045 static noinline int run_delalloc_nocow(struct inode *inode,
1046 				       struct page *locked_page,
1047 			      u64 start, u64 end, int *page_started, int force,
1048 			      unsigned long *nr_written)
1049 {
1050 	struct btrfs_root *root = BTRFS_I(inode)->root;
1051 	struct btrfs_trans_handle *trans;
1052 	struct extent_buffer *leaf;
1053 	struct btrfs_path *path;
1054 	struct btrfs_file_extent_item *fi;
1055 	struct btrfs_key found_key;
1056 	u64 cow_start;
1057 	u64 cur_offset;
1058 	u64 extent_end;
1059 	u64 extent_offset;
1060 	u64 disk_bytenr;
1061 	u64 num_bytes;
1062 	int extent_type;
1063 	int ret;
1064 	int type;
1065 	int nocow;
1066 	int check_prev = 1;
1067 	bool nolock;
1068 	u64 ino = btrfs_ino(inode);
1069 
1070 	path = btrfs_alloc_path();
1071 	if (!path)
1072 		return -ENOMEM;
1073 
1074 	nolock = btrfs_is_free_space_inode(root, inode);
1075 
1076 	if (nolock)
1077 		trans = btrfs_join_transaction_nolock(root);
1078 	else
1079 		trans = btrfs_join_transaction(root);
1080 
1081 	BUG_ON(IS_ERR(trans));
1082 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1083 
1084 	cow_start = (u64)-1;
1085 	cur_offset = start;
1086 	while (1) {
1087 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
1088 					       cur_offset, 0);
1089 		BUG_ON(ret < 0);
1090 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
1091 			leaf = path->nodes[0];
1092 			btrfs_item_key_to_cpu(leaf, &found_key,
1093 					      path->slots[0] - 1);
1094 			if (found_key.objectid == ino &&
1095 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
1096 				path->slots[0]--;
1097 		}
1098 		check_prev = 0;
1099 next_slot:
1100 		leaf = path->nodes[0];
1101 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1102 			ret = btrfs_next_leaf(root, path);
1103 			if (ret < 0)
1104 				BUG_ON(1);
1105 			if (ret > 0)
1106 				break;
1107 			leaf = path->nodes[0];
1108 		}
1109 
1110 		nocow = 0;
1111 		disk_bytenr = 0;
1112 		num_bytes = 0;
1113 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1114 
1115 		if (found_key.objectid > ino ||
1116 		    found_key.type > BTRFS_EXTENT_DATA_KEY ||
1117 		    found_key.offset > end)
1118 			break;
1119 
1120 		if (found_key.offset > cur_offset) {
1121 			extent_end = found_key.offset;
1122 			extent_type = 0;
1123 			goto out_check;
1124 		}
1125 
1126 		fi = btrfs_item_ptr(leaf, path->slots[0],
1127 				    struct btrfs_file_extent_item);
1128 		extent_type = btrfs_file_extent_type(leaf, fi);
1129 
1130 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
1131 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1132 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1133 			extent_offset = btrfs_file_extent_offset(leaf, fi);
1134 			extent_end = found_key.offset +
1135 				btrfs_file_extent_num_bytes(leaf, fi);
1136 			if (extent_end <= start) {
1137 				path->slots[0]++;
1138 				goto next_slot;
1139 			}
1140 			if (disk_bytenr == 0)
1141 				goto out_check;
1142 			if (btrfs_file_extent_compression(leaf, fi) ||
1143 			    btrfs_file_extent_encryption(leaf, fi) ||
1144 			    btrfs_file_extent_other_encoding(leaf, fi))
1145 				goto out_check;
1146 			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1147 				goto out_check;
1148 			if (btrfs_extent_readonly(root, disk_bytenr))
1149 				goto out_check;
1150 			if (btrfs_cross_ref_exist(trans, root, ino,
1151 						  found_key.offset -
1152 						  extent_offset, disk_bytenr))
1153 				goto out_check;
1154 			disk_bytenr += extent_offset;
1155 			disk_bytenr += cur_offset - found_key.offset;
1156 			num_bytes = min(end + 1, extent_end) - cur_offset;
1157 			/*
1158 			 * force cow if csum exists in the range.
1159 			 * this ensure that csum for a given extent are
1160 			 * either valid or do not exist.
1161 			 */
1162 			if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1163 				goto out_check;
1164 			nocow = 1;
1165 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1166 			extent_end = found_key.offset +
1167 				btrfs_file_extent_inline_len(leaf, fi);
1168 			extent_end = ALIGN(extent_end, root->sectorsize);
1169 		} else {
1170 			BUG_ON(1);
1171 		}
1172 out_check:
1173 		if (extent_end <= start) {
1174 			path->slots[0]++;
1175 			goto next_slot;
1176 		}
1177 		if (!nocow) {
1178 			if (cow_start == (u64)-1)
1179 				cow_start = cur_offset;
1180 			cur_offset = extent_end;
1181 			if (cur_offset > end)
1182 				break;
1183 			path->slots[0]++;
1184 			goto next_slot;
1185 		}
1186 
1187 		btrfs_release_path(path);
1188 		if (cow_start != (u64)-1) {
1189 			ret = cow_file_range(inode, locked_page, cow_start,
1190 					found_key.offset - 1, page_started,
1191 					nr_written, 1);
1192 			BUG_ON(ret);
1193 			cow_start = (u64)-1;
1194 		}
1195 
1196 		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1197 			struct extent_map *em;
1198 			struct extent_map_tree *em_tree;
1199 			em_tree = &BTRFS_I(inode)->extent_tree;
1200 			em = alloc_extent_map();
1201 			BUG_ON(!em);
1202 			em->start = cur_offset;
1203 			em->orig_start = em->start;
1204 			em->len = num_bytes;
1205 			em->block_len = num_bytes;
1206 			em->block_start = disk_bytenr;
1207 			em->bdev = root->fs_info->fs_devices->latest_bdev;
1208 			set_bit(EXTENT_FLAG_PINNED, &em->flags);
1209 			while (1) {
1210 				write_lock(&em_tree->lock);
1211 				ret = add_extent_mapping(em_tree, em);
1212 				write_unlock(&em_tree->lock);
1213 				if (ret != -EEXIST) {
1214 					free_extent_map(em);
1215 					break;
1216 				}
1217 				btrfs_drop_extent_cache(inode, em->start,
1218 						em->start + em->len - 1, 0);
1219 			}
1220 			type = BTRFS_ORDERED_PREALLOC;
1221 		} else {
1222 			type = BTRFS_ORDERED_NOCOW;
1223 		}
1224 
1225 		ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1226 					       num_bytes, num_bytes, type);
1227 		BUG_ON(ret);
1228 
1229 		if (root->root_key.objectid ==
1230 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
1231 			ret = btrfs_reloc_clone_csums(inode, cur_offset,
1232 						      num_bytes);
1233 			BUG_ON(ret);
1234 		}
1235 
1236 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1237 				cur_offset, cur_offset + num_bytes - 1,
1238 				locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1239 				EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1240 				EXTENT_SET_PRIVATE2);
1241 		cur_offset = extent_end;
1242 		if (cur_offset > end)
1243 			break;
1244 	}
1245 	btrfs_release_path(path);
1246 
1247 	if (cur_offset <= end && cow_start == (u64)-1)
1248 		cow_start = cur_offset;
1249 	if (cow_start != (u64)-1) {
1250 		ret = cow_file_range(inode, locked_page, cow_start, end,
1251 				     page_started, nr_written, 1);
1252 		BUG_ON(ret);
1253 	}
1254 
1255 	if (nolock) {
1256 		ret = btrfs_end_transaction_nolock(trans, root);
1257 		BUG_ON(ret);
1258 	} else {
1259 		ret = btrfs_end_transaction(trans, root);
1260 		BUG_ON(ret);
1261 	}
1262 	btrfs_free_path(path);
1263 	return 0;
1264 }
1265 
1266 /*
1267  * extent_io.c call back to do delayed allocation processing
1268  */
1269 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1270 			      u64 start, u64 end, int *page_started,
1271 			      unsigned long *nr_written)
1272 {
1273 	int ret;
1274 	struct btrfs_root *root = BTRFS_I(inode)->root;
1275 
1276 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
1277 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1278 					 page_started, 1, nr_written);
1279 	else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
1280 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1281 					 page_started, 0, nr_written);
1282 	else if (!btrfs_test_opt(root, COMPRESS) &&
1283 		 !(BTRFS_I(inode)->force_compress) &&
1284 		 !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))
1285 		ret = cow_file_range(inode, locked_page, start, end,
1286 				      page_started, nr_written, 1);
1287 	else
1288 		ret = cow_file_range_async(inode, locked_page, start, end,
1289 					   page_started, nr_written);
1290 	return ret;
1291 }
1292 
1293 static void btrfs_split_extent_hook(struct inode *inode,
1294 				    struct extent_state *orig, u64 split)
1295 {
1296 	/* not delalloc, ignore it */
1297 	if (!(orig->state & EXTENT_DELALLOC))
1298 		return;
1299 
1300 	spin_lock(&BTRFS_I(inode)->lock);
1301 	BTRFS_I(inode)->outstanding_extents++;
1302 	spin_unlock(&BTRFS_I(inode)->lock);
1303 }
1304 
1305 /*
1306  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1307  * extents so we can keep track of new extents that are just merged onto old
1308  * extents, such as when we are doing sequential writes, so we can properly
1309  * account for the metadata space we'll need.
1310  */
1311 static void btrfs_merge_extent_hook(struct inode *inode,
1312 				    struct extent_state *new,
1313 				    struct extent_state *other)
1314 {
1315 	/* not delalloc, ignore it */
1316 	if (!(other->state & EXTENT_DELALLOC))
1317 		return;
1318 
1319 	spin_lock(&BTRFS_I(inode)->lock);
1320 	BTRFS_I(inode)->outstanding_extents--;
1321 	spin_unlock(&BTRFS_I(inode)->lock);
1322 }
1323 
1324 /*
1325  * extent_io.c set_bit_hook, used to track delayed allocation
1326  * bytes in this file, and to maintain the list of inodes that
1327  * have pending delalloc work to be done.
1328  */
1329 static void btrfs_set_bit_hook(struct inode *inode,
1330 			       struct extent_state *state, int *bits)
1331 {
1332 
1333 	/*
1334 	 * set_bit and clear bit hooks normally require _irqsave/restore
1335 	 * but in this case, we are only testing for the DELALLOC
1336 	 * bit, which is only set or cleared with irqs on
1337 	 */
1338 	if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1339 		struct btrfs_root *root = BTRFS_I(inode)->root;
1340 		u64 len = state->end + 1 - state->start;
1341 		bool do_list = !btrfs_is_free_space_inode(root, inode);
1342 
1343 		if (*bits & EXTENT_FIRST_DELALLOC) {
1344 			*bits &= ~EXTENT_FIRST_DELALLOC;
1345 		} else {
1346 			spin_lock(&BTRFS_I(inode)->lock);
1347 			BTRFS_I(inode)->outstanding_extents++;
1348 			spin_unlock(&BTRFS_I(inode)->lock);
1349 		}
1350 
1351 		spin_lock(&root->fs_info->delalloc_lock);
1352 		BTRFS_I(inode)->delalloc_bytes += len;
1353 		root->fs_info->delalloc_bytes += len;
1354 		if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1355 			list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1356 				      &root->fs_info->delalloc_inodes);
1357 		}
1358 		spin_unlock(&root->fs_info->delalloc_lock);
1359 	}
1360 }
1361 
1362 /*
1363  * extent_io.c clear_bit_hook, see set_bit_hook for why
1364  */
1365 static void btrfs_clear_bit_hook(struct inode *inode,
1366 				 struct extent_state *state, int *bits)
1367 {
1368 	/*
1369 	 * set_bit and clear bit hooks normally require _irqsave/restore
1370 	 * but in this case, we are only testing for the DELALLOC
1371 	 * bit, which is only set or cleared with irqs on
1372 	 */
1373 	if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1374 		struct btrfs_root *root = BTRFS_I(inode)->root;
1375 		u64 len = state->end + 1 - state->start;
1376 		bool do_list = !btrfs_is_free_space_inode(root, inode);
1377 
1378 		if (*bits & EXTENT_FIRST_DELALLOC) {
1379 			*bits &= ~EXTENT_FIRST_DELALLOC;
1380 		} else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1381 			spin_lock(&BTRFS_I(inode)->lock);
1382 			BTRFS_I(inode)->outstanding_extents--;
1383 			spin_unlock(&BTRFS_I(inode)->lock);
1384 		}
1385 
1386 		if (*bits & EXTENT_DO_ACCOUNTING)
1387 			btrfs_delalloc_release_metadata(inode, len);
1388 
1389 		if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1390 		    && do_list)
1391 			btrfs_free_reserved_data_space(inode, len);
1392 
1393 		spin_lock(&root->fs_info->delalloc_lock);
1394 		root->fs_info->delalloc_bytes -= len;
1395 		BTRFS_I(inode)->delalloc_bytes -= len;
1396 
1397 		if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1398 		    !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1399 			list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1400 		}
1401 		spin_unlock(&root->fs_info->delalloc_lock);
1402 	}
1403 }
1404 
1405 /*
1406  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1407  * we don't create bios that span stripes or chunks
1408  */
1409 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1410 			 size_t size, struct bio *bio,
1411 			 unsigned long bio_flags)
1412 {
1413 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1414 	struct btrfs_mapping_tree *map_tree;
1415 	u64 logical = (u64)bio->bi_sector << 9;
1416 	u64 length = 0;
1417 	u64 map_length;
1418 	int ret;
1419 
1420 	if (bio_flags & EXTENT_BIO_COMPRESSED)
1421 		return 0;
1422 
1423 	length = bio->bi_size;
1424 	map_tree = &root->fs_info->mapping_tree;
1425 	map_length = length;
1426 	ret = btrfs_map_block(map_tree, READ, logical,
1427 			      &map_length, NULL, 0);
1428 
1429 	if (map_length < length + size)
1430 		return 1;
1431 	return ret;
1432 }
1433 
1434 /*
1435  * in order to insert checksums into the metadata in large chunks,
1436  * we wait until bio submission time.   All the pages in the bio are
1437  * checksummed and sums are attached onto the ordered extent record.
1438  *
1439  * At IO completion time the cums attached on the ordered extent record
1440  * are inserted into the btree
1441  */
1442 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1443 				    struct bio *bio, int mirror_num,
1444 				    unsigned long bio_flags,
1445 				    u64 bio_offset)
1446 {
1447 	struct btrfs_root *root = BTRFS_I(inode)->root;
1448 	int ret = 0;
1449 
1450 	ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1451 	BUG_ON(ret);
1452 	return 0;
1453 }
1454 
1455 /*
1456  * in order to insert checksums into the metadata in large chunks,
1457  * we wait until bio submission time.   All the pages in the bio are
1458  * checksummed and sums are attached onto the ordered extent record.
1459  *
1460  * At IO completion time the cums attached on the ordered extent record
1461  * are inserted into the btree
1462  */
1463 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1464 			  int mirror_num, unsigned long bio_flags,
1465 			  u64 bio_offset)
1466 {
1467 	struct btrfs_root *root = BTRFS_I(inode)->root;
1468 	return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1469 }
1470 
1471 /*
1472  * extent_io.c submission hook. This does the right thing for csum calculation
1473  * on write, or reading the csums from the tree before a read
1474  */
1475 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1476 			  int mirror_num, unsigned long bio_flags,
1477 			  u64 bio_offset)
1478 {
1479 	struct btrfs_root *root = BTRFS_I(inode)->root;
1480 	int ret = 0;
1481 	int skip_sum;
1482 
1483 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1484 
1485 	if (btrfs_is_free_space_inode(root, inode))
1486 		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2);
1487 	else
1488 		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1489 	BUG_ON(ret);
1490 
1491 	if (!(rw & REQ_WRITE)) {
1492 		if (bio_flags & EXTENT_BIO_COMPRESSED) {
1493 			return btrfs_submit_compressed_read(inode, bio,
1494 						    mirror_num, bio_flags);
1495 		} else if (!skip_sum) {
1496 			ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1497 			if (ret)
1498 				return ret;
1499 		}
1500 		goto mapit;
1501 	} else if (!skip_sum) {
1502 		/* csum items have already been cloned */
1503 		if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1504 			goto mapit;
1505 		/* we're doing a write, do the async checksumming */
1506 		return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1507 				   inode, rw, bio, mirror_num,
1508 				   bio_flags, bio_offset,
1509 				   __btrfs_submit_bio_start,
1510 				   __btrfs_submit_bio_done);
1511 	}
1512 
1513 mapit:
1514 	return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1515 }
1516 
1517 /*
1518  * given a list of ordered sums record them in the inode.  This happens
1519  * at IO completion time based on sums calculated at bio submission time.
1520  */
1521 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1522 			     struct inode *inode, u64 file_offset,
1523 			     struct list_head *list)
1524 {
1525 	struct btrfs_ordered_sum *sum;
1526 
1527 	list_for_each_entry(sum, list, list) {
1528 		btrfs_csum_file_blocks(trans,
1529 		       BTRFS_I(inode)->root->fs_info->csum_root, sum);
1530 	}
1531 	return 0;
1532 }
1533 
1534 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1535 			      struct extent_state **cached_state)
1536 {
1537 	if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1538 		WARN_ON(1);
1539 	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1540 				   cached_state, GFP_NOFS);
1541 }
1542 
1543 /* see btrfs_writepage_start_hook for details on why this is required */
1544 struct btrfs_writepage_fixup {
1545 	struct page *page;
1546 	struct btrfs_work work;
1547 };
1548 
1549 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1550 {
1551 	struct btrfs_writepage_fixup *fixup;
1552 	struct btrfs_ordered_extent *ordered;
1553 	struct extent_state *cached_state = NULL;
1554 	struct page *page;
1555 	struct inode *inode;
1556 	u64 page_start;
1557 	u64 page_end;
1558 	int ret;
1559 
1560 	fixup = container_of(work, struct btrfs_writepage_fixup, work);
1561 	page = fixup->page;
1562 again:
1563 	lock_page(page);
1564 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1565 		ClearPageChecked(page);
1566 		goto out_page;
1567 	}
1568 
1569 	inode = page->mapping->host;
1570 	page_start = page_offset(page);
1571 	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1572 
1573 	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1574 			 &cached_state, GFP_NOFS);
1575 
1576 	/* already ordered? We're done */
1577 	if (PagePrivate2(page))
1578 		goto out;
1579 
1580 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
1581 	if (ordered) {
1582 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1583 				     page_end, &cached_state, GFP_NOFS);
1584 		unlock_page(page);
1585 		btrfs_start_ordered_extent(inode, ordered, 1);
1586 		btrfs_put_ordered_extent(ordered);
1587 		goto again;
1588 	}
1589 
1590 	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
1591 	if (ret) {
1592 		mapping_set_error(page->mapping, ret);
1593 		end_extent_writepage(page, ret, page_start, page_end);
1594 		ClearPageChecked(page);
1595 		goto out;
1596 	 }
1597 
1598 	btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1599 	ClearPageChecked(page);
1600 	set_page_dirty(page);
1601 out:
1602 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1603 			     &cached_state, GFP_NOFS);
1604 out_page:
1605 	unlock_page(page);
1606 	page_cache_release(page);
1607 	kfree(fixup);
1608 }
1609 
1610 /*
1611  * There are a few paths in the higher layers of the kernel that directly
1612  * set the page dirty bit without asking the filesystem if it is a
1613  * good idea.  This causes problems because we want to make sure COW
1614  * properly happens and the data=ordered rules are followed.
1615  *
1616  * In our case any range that doesn't have the ORDERED bit set
1617  * hasn't been properly setup for IO.  We kick off an async process
1618  * to fix it up.  The async helper will wait for ordered extents, set
1619  * the delalloc bit and make it safe to write the page.
1620  */
1621 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1622 {
1623 	struct inode *inode = page->mapping->host;
1624 	struct btrfs_writepage_fixup *fixup;
1625 	struct btrfs_root *root = BTRFS_I(inode)->root;
1626 
1627 	/* this page is properly in the ordered list */
1628 	if (TestClearPagePrivate2(page))
1629 		return 0;
1630 
1631 	if (PageChecked(page))
1632 		return -EAGAIN;
1633 
1634 	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1635 	if (!fixup)
1636 		return -EAGAIN;
1637 
1638 	SetPageChecked(page);
1639 	page_cache_get(page);
1640 	fixup->work.func = btrfs_writepage_fixup_worker;
1641 	fixup->page = page;
1642 	btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1643 	return -EBUSY;
1644 }
1645 
1646 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1647 				       struct inode *inode, u64 file_pos,
1648 				       u64 disk_bytenr, u64 disk_num_bytes,
1649 				       u64 num_bytes, u64 ram_bytes,
1650 				       u8 compression, u8 encryption,
1651 				       u16 other_encoding, int extent_type)
1652 {
1653 	struct btrfs_root *root = BTRFS_I(inode)->root;
1654 	struct btrfs_file_extent_item *fi;
1655 	struct btrfs_path *path;
1656 	struct extent_buffer *leaf;
1657 	struct btrfs_key ins;
1658 	u64 hint;
1659 	int ret;
1660 
1661 	path = btrfs_alloc_path();
1662 	if (!path)
1663 		return -ENOMEM;
1664 
1665 	path->leave_spinning = 1;
1666 
1667 	/*
1668 	 * we may be replacing one extent in the tree with another.
1669 	 * The new extent is pinned in the extent map, and we don't want
1670 	 * to drop it from the cache until it is completely in the btree.
1671 	 *
1672 	 * So, tell btrfs_drop_extents to leave this extent in the cache.
1673 	 * the caller is expected to unpin it and allow it to be merged
1674 	 * with the others.
1675 	 */
1676 	ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
1677 				 &hint, 0);
1678 	BUG_ON(ret);
1679 
1680 	ins.objectid = btrfs_ino(inode);
1681 	ins.offset = file_pos;
1682 	ins.type = BTRFS_EXTENT_DATA_KEY;
1683 	ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1684 	BUG_ON(ret);
1685 	leaf = path->nodes[0];
1686 	fi = btrfs_item_ptr(leaf, path->slots[0],
1687 			    struct btrfs_file_extent_item);
1688 	btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1689 	btrfs_set_file_extent_type(leaf, fi, extent_type);
1690 	btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1691 	btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1692 	btrfs_set_file_extent_offset(leaf, fi, 0);
1693 	btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1694 	btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1695 	btrfs_set_file_extent_compression(leaf, fi, compression);
1696 	btrfs_set_file_extent_encryption(leaf, fi, encryption);
1697 	btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1698 
1699 	btrfs_unlock_up_safe(path, 1);
1700 	btrfs_set_lock_blocking(leaf);
1701 
1702 	btrfs_mark_buffer_dirty(leaf);
1703 
1704 	inode_add_bytes(inode, num_bytes);
1705 
1706 	ins.objectid = disk_bytenr;
1707 	ins.offset = disk_num_bytes;
1708 	ins.type = BTRFS_EXTENT_ITEM_KEY;
1709 	ret = btrfs_alloc_reserved_file_extent(trans, root,
1710 					root->root_key.objectid,
1711 					btrfs_ino(inode), file_pos, &ins);
1712 	BUG_ON(ret);
1713 	btrfs_free_path(path);
1714 
1715 	return 0;
1716 }
1717 
1718 /*
1719  * helper function for btrfs_finish_ordered_io, this
1720  * just reads in some of the csum leaves to prime them into ram
1721  * before we start the transaction.  It limits the amount of btree
1722  * reads required while inside the transaction.
1723  */
1724 /* as ordered data IO finishes, this gets called so we can finish
1725  * an ordered extent if the range of bytes in the file it covers are
1726  * fully written.
1727  */
1728 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1729 {
1730 	struct btrfs_root *root = BTRFS_I(inode)->root;
1731 	struct btrfs_trans_handle *trans = NULL;
1732 	struct btrfs_ordered_extent *ordered_extent = NULL;
1733 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1734 	struct extent_state *cached_state = NULL;
1735 	int compress_type = 0;
1736 	int ret;
1737 	bool nolock;
1738 
1739 	ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
1740 					     end - start + 1);
1741 	if (!ret)
1742 		return 0;
1743 	BUG_ON(!ordered_extent);
1744 
1745 	nolock = btrfs_is_free_space_inode(root, inode);
1746 
1747 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1748 		BUG_ON(!list_empty(&ordered_extent->list));
1749 		ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1750 		if (!ret) {
1751 			if (nolock)
1752 				trans = btrfs_join_transaction_nolock(root);
1753 			else
1754 				trans = btrfs_join_transaction(root);
1755 			BUG_ON(IS_ERR(trans));
1756 			trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1757 			ret = btrfs_update_inode_fallback(trans, root, inode);
1758 			BUG_ON(ret);
1759 		}
1760 		goto out;
1761 	}
1762 
1763 	lock_extent_bits(io_tree, ordered_extent->file_offset,
1764 			 ordered_extent->file_offset + ordered_extent->len - 1,
1765 			 0, &cached_state, GFP_NOFS);
1766 
1767 	if (nolock)
1768 		trans = btrfs_join_transaction_nolock(root);
1769 	else
1770 		trans = btrfs_join_transaction(root);
1771 	BUG_ON(IS_ERR(trans));
1772 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1773 
1774 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1775 		compress_type = ordered_extent->compress_type;
1776 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1777 		BUG_ON(compress_type);
1778 		ret = btrfs_mark_extent_written(trans, inode,
1779 						ordered_extent->file_offset,
1780 						ordered_extent->file_offset +
1781 						ordered_extent->len);
1782 		BUG_ON(ret);
1783 	} else {
1784 		BUG_ON(root == root->fs_info->tree_root);
1785 		ret = insert_reserved_file_extent(trans, inode,
1786 						ordered_extent->file_offset,
1787 						ordered_extent->start,
1788 						ordered_extent->disk_len,
1789 						ordered_extent->len,
1790 						ordered_extent->len,
1791 						compress_type, 0, 0,
1792 						BTRFS_FILE_EXTENT_REG);
1793 		unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1794 				   ordered_extent->file_offset,
1795 				   ordered_extent->len);
1796 		BUG_ON(ret);
1797 	}
1798 	unlock_extent_cached(io_tree, ordered_extent->file_offset,
1799 			     ordered_extent->file_offset +
1800 			     ordered_extent->len - 1, &cached_state, GFP_NOFS);
1801 
1802 	add_pending_csums(trans, inode, ordered_extent->file_offset,
1803 			  &ordered_extent->list);
1804 
1805 	ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1806 	if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1807 		ret = btrfs_update_inode_fallback(trans, root, inode);
1808 		BUG_ON(ret);
1809 	}
1810 	ret = 0;
1811 out:
1812 	if (root != root->fs_info->tree_root)
1813 		btrfs_delalloc_release_metadata(inode, ordered_extent->len);
1814 	if (trans) {
1815 		if (nolock)
1816 			btrfs_end_transaction_nolock(trans, root);
1817 		else
1818 			btrfs_end_transaction(trans, root);
1819 	}
1820 
1821 	/* once for us */
1822 	btrfs_put_ordered_extent(ordered_extent);
1823 	/* once for the tree */
1824 	btrfs_put_ordered_extent(ordered_extent);
1825 
1826 	return 0;
1827 }
1828 
1829 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1830 				struct extent_state *state, int uptodate)
1831 {
1832 	trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
1833 
1834 	ClearPagePrivate2(page);
1835 	return btrfs_finish_ordered_io(page->mapping->host, start, end);
1836 }
1837 
1838 /*
1839  * when reads are done, we need to check csums to verify the data is correct
1840  * if there's a match, we allow the bio to finish.  If not, the code in
1841  * extent_io.c will try to find good copies for us.
1842  */
1843 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1844 			       struct extent_state *state)
1845 {
1846 	size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1847 	struct inode *inode = page->mapping->host;
1848 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1849 	char *kaddr;
1850 	u64 private = ~(u32)0;
1851 	int ret;
1852 	struct btrfs_root *root = BTRFS_I(inode)->root;
1853 	u32 csum = ~(u32)0;
1854 
1855 	if (PageChecked(page)) {
1856 		ClearPageChecked(page);
1857 		goto good;
1858 	}
1859 
1860 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1861 		goto good;
1862 
1863 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1864 	    test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
1865 		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1866 				  GFP_NOFS);
1867 		return 0;
1868 	}
1869 
1870 	if (state && state->start == start) {
1871 		private = state->private;
1872 		ret = 0;
1873 	} else {
1874 		ret = get_state_private(io_tree, start, &private);
1875 	}
1876 	kaddr = kmap_atomic(page, KM_USER0);
1877 	if (ret)
1878 		goto zeroit;
1879 
1880 	csum = btrfs_csum_data(root, kaddr + offset, csum,  end - start + 1);
1881 	btrfs_csum_final(csum, (char *)&csum);
1882 	if (csum != private)
1883 		goto zeroit;
1884 
1885 	kunmap_atomic(kaddr, KM_USER0);
1886 good:
1887 	return 0;
1888 
1889 zeroit:
1890 	printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
1891 		       "private %llu\n",
1892 		       (unsigned long long)btrfs_ino(page->mapping->host),
1893 		       (unsigned long long)start, csum,
1894 		       (unsigned long long)private);
1895 	memset(kaddr + offset, 1, end - start + 1);
1896 	flush_dcache_page(page);
1897 	kunmap_atomic(kaddr, KM_USER0);
1898 	if (private == 0)
1899 		return 0;
1900 	return -EIO;
1901 }
1902 
1903 struct delayed_iput {
1904 	struct list_head list;
1905 	struct inode *inode;
1906 };
1907 
1908 void btrfs_add_delayed_iput(struct inode *inode)
1909 {
1910 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1911 	struct delayed_iput *delayed;
1912 
1913 	if (atomic_add_unless(&inode->i_count, -1, 1))
1914 		return;
1915 
1916 	delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
1917 	delayed->inode = inode;
1918 
1919 	spin_lock(&fs_info->delayed_iput_lock);
1920 	list_add_tail(&delayed->list, &fs_info->delayed_iputs);
1921 	spin_unlock(&fs_info->delayed_iput_lock);
1922 }
1923 
1924 void btrfs_run_delayed_iputs(struct btrfs_root *root)
1925 {
1926 	LIST_HEAD(list);
1927 	struct btrfs_fs_info *fs_info = root->fs_info;
1928 	struct delayed_iput *delayed;
1929 	int empty;
1930 
1931 	spin_lock(&fs_info->delayed_iput_lock);
1932 	empty = list_empty(&fs_info->delayed_iputs);
1933 	spin_unlock(&fs_info->delayed_iput_lock);
1934 	if (empty)
1935 		return;
1936 
1937 	down_read(&root->fs_info->cleanup_work_sem);
1938 	spin_lock(&fs_info->delayed_iput_lock);
1939 	list_splice_init(&fs_info->delayed_iputs, &list);
1940 	spin_unlock(&fs_info->delayed_iput_lock);
1941 
1942 	while (!list_empty(&list)) {
1943 		delayed = list_entry(list.next, struct delayed_iput, list);
1944 		list_del(&delayed->list);
1945 		iput(delayed->inode);
1946 		kfree(delayed);
1947 	}
1948 	up_read(&root->fs_info->cleanup_work_sem);
1949 }
1950 
1951 enum btrfs_orphan_cleanup_state {
1952 	ORPHAN_CLEANUP_STARTED	= 1,
1953 	ORPHAN_CLEANUP_DONE	= 2,
1954 };
1955 
1956 /*
1957  * This is called in transaction commit time. If there are no orphan
1958  * files in the subvolume, it removes orphan item and frees block_rsv
1959  * structure.
1960  */
1961 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
1962 			      struct btrfs_root *root)
1963 {
1964 	struct btrfs_block_rsv *block_rsv;
1965 	int ret;
1966 
1967 	if (!list_empty(&root->orphan_list) ||
1968 	    root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
1969 		return;
1970 
1971 	spin_lock(&root->orphan_lock);
1972 	if (!list_empty(&root->orphan_list)) {
1973 		spin_unlock(&root->orphan_lock);
1974 		return;
1975 	}
1976 
1977 	if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
1978 		spin_unlock(&root->orphan_lock);
1979 		return;
1980 	}
1981 
1982 	block_rsv = root->orphan_block_rsv;
1983 	root->orphan_block_rsv = NULL;
1984 	spin_unlock(&root->orphan_lock);
1985 
1986 	if (root->orphan_item_inserted &&
1987 	    btrfs_root_refs(&root->root_item) > 0) {
1988 		ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
1989 					    root->root_key.objectid);
1990 		BUG_ON(ret);
1991 		root->orphan_item_inserted = 0;
1992 	}
1993 
1994 	if (block_rsv) {
1995 		WARN_ON(block_rsv->size > 0);
1996 		btrfs_free_block_rsv(root, block_rsv);
1997 	}
1998 }
1999 
2000 /*
2001  * This creates an orphan entry for the given inode in case something goes
2002  * wrong in the middle of an unlink/truncate.
2003  *
2004  * NOTE: caller of this function should reserve 5 units of metadata for
2005  *	 this function.
2006  */
2007 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2008 {
2009 	struct btrfs_root *root = BTRFS_I(inode)->root;
2010 	struct btrfs_block_rsv *block_rsv = NULL;
2011 	int reserve = 0;
2012 	int insert = 0;
2013 	int ret;
2014 
2015 	if (!root->orphan_block_rsv) {
2016 		block_rsv = btrfs_alloc_block_rsv(root);
2017 		if (!block_rsv)
2018 			return -ENOMEM;
2019 	}
2020 
2021 	spin_lock(&root->orphan_lock);
2022 	if (!root->orphan_block_rsv) {
2023 		root->orphan_block_rsv = block_rsv;
2024 	} else if (block_rsv) {
2025 		btrfs_free_block_rsv(root, block_rsv);
2026 		block_rsv = NULL;
2027 	}
2028 
2029 	if (list_empty(&BTRFS_I(inode)->i_orphan)) {
2030 		list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2031 #if 0
2032 		/*
2033 		 * For proper ENOSPC handling, we should do orphan
2034 		 * cleanup when mounting. But this introduces backward
2035 		 * compatibility issue.
2036 		 */
2037 		if (!xchg(&root->orphan_item_inserted, 1))
2038 			insert = 2;
2039 		else
2040 			insert = 1;
2041 #endif
2042 		insert = 1;
2043 	}
2044 
2045 	if (!BTRFS_I(inode)->orphan_meta_reserved) {
2046 		BTRFS_I(inode)->orphan_meta_reserved = 1;
2047 		reserve = 1;
2048 	}
2049 	spin_unlock(&root->orphan_lock);
2050 
2051 	/* grab metadata reservation from transaction handle */
2052 	if (reserve) {
2053 		ret = btrfs_orphan_reserve_metadata(trans, inode);
2054 		BUG_ON(ret);
2055 	}
2056 
2057 	/* insert an orphan item to track this unlinked/truncated file */
2058 	if (insert >= 1) {
2059 		ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
2060 		BUG_ON(ret && ret != -EEXIST);
2061 	}
2062 
2063 	/* insert an orphan item to track subvolume contains orphan files */
2064 	if (insert >= 2) {
2065 		ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
2066 					       root->root_key.objectid);
2067 		BUG_ON(ret);
2068 	}
2069 	return 0;
2070 }
2071 
2072 /*
2073  * We have done the truncate/delete so we can go ahead and remove the orphan
2074  * item for this particular inode.
2075  */
2076 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2077 {
2078 	struct btrfs_root *root = BTRFS_I(inode)->root;
2079 	int delete_item = 0;
2080 	int release_rsv = 0;
2081 	int ret = 0;
2082 
2083 	spin_lock(&root->orphan_lock);
2084 	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
2085 		list_del_init(&BTRFS_I(inode)->i_orphan);
2086 		delete_item = 1;
2087 	}
2088 
2089 	if (BTRFS_I(inode)->orphan_meta_reserved) {
2090 		BTRFS_I(inode)->orphan_meta_reserved = 0;
2091 		release_rsv = 1;
2092 	}
2093 	spin_unlock(&root->orphan_lock);
2094 
2095 	if (trans && delete_item) {
2096 		ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
2097 		BUG_ON(ret);
2098 	}
2099 
2100 	if (release_rsv)
2101 		btrfs_orphan_release_metadata(inode);
2102 
2103 	return 0;
2104 }
2105 
2106 /*
2107  * this cleans up any orphans that may be left on the list from the last use
2108  * of this root.
2109  */
2110 int btrfs_orphan_cleanup(struct btrfs_root *root)
2111 {
2112 	struct btrfs_path *path;
2113 	struct extent_buffer *leaf;
2114 	struct btrfs_key key, found_key;
2115 	struct btrfs_trans_handle *trans;
2116 	struct inode *inode;
2117 	u64 last_objectid = 0;
2118 	int ret = 0, nr_unlink = 0, nr_truncate = 0;
2119 
2120 	if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
2121 		return 0;
2122 
2123 	path = btrfs_alloc_path();
2124 	if (!path) {
2125 		ret = -ENOMEM;
2126 		goto out;
2127 	}
2128 	path->reada = -1;
2129 
2130 	key.objectid = BTRFS_ORPHAN_OBJECTID;
2131 	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
2132 	key.offset = (u64)-1;
2133 
2134 	while (1) {
2135 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2136 		if (ret < 0)
2137 			goto out;
2138 
2139 		/*
2140 		 * if ret == 0 means we found what we were searching for, which
2141 		 * is weird, but possible, so only screw with path if we didn't
2142 		 * find the key and see if we have stuff that matches
2143 		 */
2144 		if (ret > 0) {
2145 			ret = 0;
2146 			if (path->slots[0] == 0)
2147 				break;
2148 			path->slots[0]--;
2149 		}
2150 
2151 		/* pull out the item */
2152 		leaf = path->nodes[0];
2153 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2154 
2155 		/* make sure the item matches what we want */
2156 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
2157 			break;
2158 		if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
2159 			break;
2160 
2161 		/* release the path since we're done with it */
2162 		btrfs_release_path(path);
2163 
2164 		/*
2165 		 * this is where we are basically btrfs_lookup, without the
2166 		 * crossing root thing.  we store the inode number in the
2167 		 * offset of the orphan item.
2168 		 */
2169 
2170 		if (found_key.offset == last_objectid) {
2171 			printk(KERN_ERR "btrfs: Error removing orphan entry, "
2172 			       "stopping orphan cleanup\n");
2173 			ret = -EINVAL;
2174 			goto out;
2175 		}
2176 
2177 		last_objectid = found_key.offset;
2178 
2179 		found_key.objectid = found_key.offset;
2180 		found_key.type = BTRFS_INODE_ITEM_KEY;
2181 		found_key.offset = 0;
2182 		inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
2183 		ret = PTR_RET(inode);
2184 		if (ret && ret != -ESTALE)
2185 			goto out;
2186 
2187 		if (ret == -ESTALE && root == root->fs_info->tree_root) {
2188 			struct btrfs_root *dead_root;
2189 			struct btrfs_fs_info *fs_info = root->fs_info;
2190 			int is_dead_root = 0;
2191 
2192 			/*
2193 			 * this is an orphan in the tree root. Currently these
2194 			 * could come from 2 sources:
2195 			 *  a) a snapshot deletion in progress
2196 			 *  b) a free space cache inode
2197 			 * We need to distinguish those two, as the snapshot
2198 			 * orphan must not get deleted.
2199 			 * find_dead_roots already ran before us, so if this
2200 			 * is a snapshot deletion, we should find the root
2201 			 * in the dead_roots list
2202 			 */
2203 			spin_lock(&fs_info->trans_lock);
2204 			list_for_each_entry(dead_root, &fs_info->dead_roots,
2205 					    root_list) {
2206 				if (dead_root->root_key.objectid ==
2207 				    found_key.objectid) {
2208 					is_dead_root = 1;
2209 					break;
2210 				}
2211 			}
2212 			spin_unlock(&fs_info->trans_lock);
2213 			if (is_dead_root) {
2214 				/* prevent this orphan from being found again */
2215 				key.offset = found_key.objectid - 1;
2216 				continue;
2217 			}
2218 		}
2219 		/*
2220 		 * Inode is already gone but the orphan item is still there,
2221 		 * kill the orphan item.
2222 		 */
2223 		if (ret == -ESTALE) {
2224 			trans = btrfs_start_transaction(root, 1);
2225 			if (IS_ERR(trans)) {
2226 				ret = PTR_ERR(trans);
2227 				goto out;
2228 			}
2229 			ret = btrfs_del_orphan_item(trans, root,
2230 						    found_key.objectid);
2231 			BUG_ON(ret);
2232 			btrfs_end_transaction(trans, root);
2233 			continue;
2234 		}
2235 
2236 		/*
2237 		 * add this inode to the orphan list so btrfs_orphan_del does
2238 		 * the proper thing when we hit it
2239 		 */
2240 		spin_lock(&root->orphan_lock);
2241 		list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2242 		spin_unlock(&root->orphan_lock);
2243 
2244 		/* if we have links, this was a truncate, lets do that */
2245 		if (inode->i_nlink) {
2246 			if (!S_ISREG(inode->i_mode)) {
2247 				WARN_ON(1);
2248 				iput(inode);
2249 				continue;
2250 			}
2251 			nr_truncate++;
2252 			ret = btrfs_truncate(inode);
2253 		} else {
2254 			nr_unlink++;
2255 		}
2256 
2257 		/* this will do delete_inode and everything for us */
2258 		iput(inode);
2259 		if (ret)
2260 			goto out;
2261 	}
2262 	/* release the path since we're done with it */
2263 	btrfs_release_path(path);
2264 
2265 	root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
2266 
2267 	if (root->orphan_block_rsv)
2268 		btrfs_block_rsv_release(root, root->orphan_block_rsv,
2269 					(u64)-1);
2270 
2271 	if (root->orphan_block_rsv || root->orphan_item_inserted) {
2272 		trans = btrfs_join_transaction(root);
2273 		if (!IS_ERR(trans))
2274 			btrfs_end_transaction(trans, root);
2275 	}
2276 
2277 	if (nr_unlink)
2278 		printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2279 	if (nr_truncate)
2280 		printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2281 
2282 out:
2283 	if (ret)
2284 		printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret);
2285 	btrfs_free_path(path);
2286 	return ret;
2287 }
2288 
2289 /*
2290  * very simple check to peek ahead in the leaf looking for xattrs.  If we
2291  * don't find any xattrs, we know there can't be any acls.
2292  *
2293  * slot is the slot the inode is in, objectid is the objectid of the inode
2294  */
2295 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2296 					  int slot, u64 objectid)
2297 {
2298 	u32 nritems = btrfs_header_nritems(leaf);
2299 	struct btrfs_key found_key;
2300 	int scanned = 0;
2301 
2302 	slot++;
2303 	while (slot < nritems) {
2304 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
2305 
2306 		/* we found a different objectid, there must not be acls */
2307 		if (found_key.objectid != objectid)
2308 			return 0;
2309 
2310 		/* we found an xattr, assume we've got an acl */
2311 		if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2312 			return 1;
2313 
2314 		/*
2315 		 * we found a key greater than an xattr key, there can't
2316 		 * be any acls later on
2317 		 */
2318 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2319 			return 0;
2320 
2321 		slot++;
2322 		scanned++;
2323 
2324 		/*
2325 		 * it goes inode, inode backrefs, xattrs, extents,
2326 		 * so if there are a ton of hard links to an inode there can
2327 		 * be a lot of backrefs.  Don't waste time searching too hard,
2328 		 * this is just an optimization
2329 		 */
2330 		if (scanned >= 8)
2331 			break;
2332 	}
2333 	/* we hit the end of the leaf before we found an xattr or
2334 	 * something larger than an xattr.  We have to assume the inode
2335 	 * has acls
2336 	 */
2337 	return 1;
2338 }
2339 
2340 /*
2341  * read an inode from the btree into the in-memory inode
2342  */
2343 static void btrfs_read_locked_inode(struct inode *inode)
2344 {
2345 	struct btrfs_path *path;
2346 	struct extent_buffer *leaf;
2347 	struct btrfs_inode_item *inode_item;
2348 	struct btrfs_timespec *tspec;
2349 	struct btrfs_root *root = BTRFS_I(inode)->root;
2350 	struct btrfs_key location;
2351 	int maybe_acls;
2352 	u32 rdev;
2353 	int ret;
2354 	bool filled = false;
2355 
2356 	ret = btrfs_fill_inode(inode, &rdev);
2357 	if (!ret)
2358 		filled = true;
2359 
2360 	path = btrfs_alloc_path();
2361 	if (!path)
2362 		goto make_bad;
2363 
2364 	path->leave_spinning = 1;
2365 	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2366 
2367 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2368 	if (ret)
2369 		goto make_bad;
2370 
2371 	leaf = path->nodes[0];
2372 
2373 	if (filled)
2374 		goto cache_acl;
2375 
2376 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
2377 				    struct btrfs_inode_item);
2378 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2379 	set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
2380 	inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2381 	inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2382 	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2383 
2384 	tspec = btrfs_inode_atime(inode_item);
2385 	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2386 	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2387 
2388 	tspec = btrfs_inode_mtime(inode_item);
2389 	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2390 	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2391 
2392 	tspec = btrfs_inode_ctime(inode_item);
2393 	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2394 	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2395 
2396 	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2397 	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2398 	BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2399 	inode->i_generation = BTRFS_I(inode)->generation;
2400 	inode->i_rdev = 0;
2401 	rdev = btrfs_inode_rdev(leaf, inode_item);
2402 
2403 	BTRFS_I(inode)->index_cnt = (u64)-1;
2404 	BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2405 cache_acl:
2406 	/*
2407 	 * try to precache a NULL acl entry for files that don't have
2408 	 * any xattrs or acls
2409 	 */
2410 	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
2411 					   btrfs_ino(inode));
2412 	if (!maybe_acls)
2413 		cache_no_acl(inode);
2414 
2415 	btrfs_free_path(path);
2416 
2417 	switch (inode->i_mode & S_IFMT) {
2418 	case S_IFREG:
2419 		inode->i_mapping->a_ops = &btrfs_aops;
2420 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2421 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2422 		inode->i_fop = &btrfs_file_operations;
2423 		inode->i_op = &btrfs_file_inode_operations;
2424 		break;
2425 	case S_IFDIR:
2426 		inode->i_fop = &btrfs_dir_file_operations;
2427 		if (root == root->fs_info->tree_root)
2428 			inode->i_op = &btrfs_dir_ro_inode_operations;
2429 		else
2430 			inode->i_op = &btrfs_dir_inode_operations;
2431 		break;
2432 	case S_IFLNK:
2433 		inode->i_op = &btrfs_symlink_inode_operations;
2434 		inode->i_mapping->a_ops = &btrfs_symlink_aops;
2435 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2436 		break;
2437 	default:
2438 		inode->i_op = &btrfs_special_inode_operations;
2439 		init_special_inode(inode, inode->i_mode, rdev);
2440 		break;
2441 	}
2442 
2443 	btrfs_update_iflags(inode);
2444 	return;
2445 
2446 make_bad:
2447 	btrfs_free_path(path);
2448 	make_bad_inode(inode);
2449 }
2450 
2451 /*
2452  * given a leaf and an inode, copy the inode fields into the leaf
2453  */
2454 static void fill_inode_item(struct btrfs_trans_handle *trans,
2455 			    struct extent_buffer *leaf,
2456 			    struct btrfs_inode_item *item,
2457 			    struct inode *inode)
2458 {
2459 	btrfs_set_inode_uid(leaf, item, inode->i_uid);
2460 	btrfs_set_inode_gid(leaf, item, inode->i_gid);
2461 	btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2462 	btrfs_set_inode_mode(leaf, item, inode->i_mode);
2463 	btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2464 
2465 	btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2466 			       inode->i_atime.tv_sec);
2467 	btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2468 				inode->i_atime.tv_nsec);
2469 
2470 	btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2471 			       inode->i_mtime.tv_sec);
2472 	btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2473 				inode->i_mtime.tv_nsec);
2474 
2475 	btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2476 			       inode->i_ctime.tv_sec);
2477 	btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2478 				inode->i_ctime.tv_nsec);
2479 
2480 	btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2481 	btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2482 	btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2483 	btrfs_set_inode_transid(leaf, item, trans->transid);
2484 	btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2485 	btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2486 	btrfs_set_inode_block_group(leaf, item, 0);
2487 }
2488 
2489 /*
2490  * copy everything in the in-memory inode into the btree.
2491  */
2492 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
2493 				struct btrfs_root *root, struct inode *inode)
2494 {
2495 	struct btrfs_inode_item *inode_item;
2496 	struct btrfs_path *path;
2497 	struct extent_buffer *leaf;
2498 	int ret;
2499 
2500 	path = btrfs_alloc_path();
2501 	if (!path)
2502 		return -ENOMEM;
2503 
2504 	path->leave_spinning = 1;
2505 	ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
2506 				 1);
2507 	if (ret) {
2508 		if (ret > 0)
2509 			ret = -ENOENT;
2510 		goto failed;
2511 	}
2512 
2513 	btrfs_unlock_up_safe(path, 1);
2514 	leaf = path->nodes[0];
2515 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
2516 				    struct btrfs_inode_item);
2517 
2518 	fill_inode_item(trans, leaf, inode_item, inode);
2519 	btrfs_mark_buffer_dirty(leaf);
2520 	btrfs_set_inode_last_trans(trans, inode);
2521 	ret = 0;
2522 failed:
2523 	btrfs_free_path(path);
2524 	return ret;
2525 }
2526 
2527 /*
2528  * copy everything in the in-memory inode into the btree.
2529  */
2530 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2531 				struct btrfs_root *root, struct inode *inode)
2532 {
2533 	int ret;
2534 
2535 	/*
2536 	 * If the inode is a free space inode, we can deadlock during commit
2537 	 * if we put it into the delayed code.
2538 	 *
2539 	 * The data relocation inode should also be directly updated
2540 	 * without delay
2541 	 */
2542 	if (!btrfs_is_free_space_inode(root, inode)
2543 	    && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
2544 		ret = btrfs_delayed_update_inode(trans, root, inode);
2545 		if (!ret)
2546 			btrfs_set_inode_last_trans(trans, inode);
2547 		return ret;
2548 	}
2549 
2550 	return btrfs_update_inode_item(trans, root, inode);
2551 }
2552 
2553 static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
2554 				struct btrfs_root *root, struct inode *inode)
2555 {
2556 	int ret;
2557 
2558 	ret = btrfs_update_inode(trans, root, inode);
2559 	if (ret == -ENOSPC)
2560 		return btrfs_update_inode_item(trans, root, inode);
2561 	return ret;
2562 }
2563 
2564 /*
2565  * unlink helper that gets used here in inode.c and in the tree logging
2566  * recovery code.  It remove a link in a directory with a given name, and
2567  * also drops the back refs in the inode to the directory
2568  */
2569 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2570 				struct btrfs_root *root,
2571 				struct inode *dir, struct inode *inode,
2572 				const char *name, int name_len)
2573 {
2574 	struct btrfs_path *path;
2575 	int ret = 0;
2576 	struct extent_buffer *leaf;
2577 	struct btrfs_dir_item *di;
2578 	struct btrfs_key key;
2579 	u64 index;
2580 	u64 ino = btrfs_ino(inode);
2581 	u64 dir_ino = btrfs_ino(dir);
2582 
2583 	path = btrfs_alloc_path();
2584 	if (!path) {
2585 		ret = -ENOMEM;
2586 		goto out;
2587 	}
2588 
2589 	path->leave_spinning = 1;
2590 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2591 				    name, name_len, -1);
2592 	if (IS_ERR(di)) {
2593 		ret = PTR_ERR(di);
2594 		goto err;
2595 	}
2596 	if (!di) {
2597 		ret = -ENOENT;
2598 		goto err;
2599 	}
2600 	leaf = path->nodes[0];
2601 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
2602 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2603 	if (ret)
2604 		goto err;
2605 	btrfs_release_path(path);
2606 
2607 	ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
2608 				  dir_ino, &index);
2609 	if (ret) {
2610 		printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2611 		       "inode %llu parent %llu\n", name_len, name,
2612 		       (unsigned long long)ino, (unsigned long long)dir_ino);
2613 		goto err;
2614 	}
2615 
2616 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
2617 	if (ret)
2618 		goto err;
2619 
2620 	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2621 					 inode, dir_ino);
2622 	BUG_ON(ret != 0 && ret != -ENOENT);
2623 
2624 	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2625 					   dir, index);
2626 	if (ret == -ENOENT)
2627 		ret = 0;
2628 err:
2629 	btrfs_free_path(path);
2630 	if (ret)
2631 		goto out;
2632 
2633 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2634 	inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2635 	btrfs_update_inode(trans, root, dir);
2636 out:
2637 	return ret;
2638 }
2639 
2640 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2641 		       struct btrfs_root *root,
2642 		       struct inode *dir, struct inode *inode,
2643 		       const char *name, int name_len)
2644 {
2645 	int ret;
2646 	ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
2647 	if (!ret) {
2648 		btrfs_drop_nlink(inode);
2649 		ret = btrfs_update_inode(trans, root, inode);
2650 	}
2651 	return ret;
2652 }
2653 
2654 
2655 /* helper to check if there is any shared block in the path */
2656 static int check_path_shared(struct btrfs_root *root,
2657 			     struct btrfs_path *path)
2658 {
2659 	struct extent_buffer *eb;
2660 	int level;
2661 	u64 refs = 1;
2662 
2663 	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2664 		int ret;
2665 
2666 		if (!path->nodes[level])
2667 			break;
2668 		eb = path->nodes[level];
2669 		if (!btrfs_block_can_be_shared(root, eb))
2670 			continue;
2671 		ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len,
2672 					       &refs, NULL);
2673 		if (refs > 1)
2674 			return 1;
2675 	}
2676 	return 0;
2677 }
2678 
2679 /*
2680  * helper to start transaction for unlink and rmdir.
2681  *
2682  * unlink and rmdir are special in btrfs, they do not always free space.
2683  * so in enospc case, we should make sure they will free space before
2684  * allowing them to use the global metadata reservation.
2685  */
2686 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2687 						       struct dentry *dentry)
2688 {
2689 	struct btrfs_trans_handle *trans;
2690 	struct btrfs_root *root = BTRFS_I(dir)->root;
2691 	struct btrfs_path *path;
2692 	struct btrfs_inode_ref *ref;
2693 	struct btrfs_dir_item *di;
2694 	struct inode *inode = dentry->d_inode;
2695 	u64 index;
2696 	int check_link = 1;
2697 	int err = -ENOSPC;
2698 	int ret;
2699 	u64 ino = btrfs_ino(inode);
2700 	u64 dir_ino = btrfs_ino(dir);
2701 
2702 	/*
2703 	 * 1 for the possible orphan item
2704 	 * 1 for the dir item
2705 	 * 1 for the dir index
2706 	 * 1 for the inode ref
2707 	 * 1 for the inode ref in the tree log
2708 	 * 2 for the dir entries in the log
2709 	 * 1 for the inode
2710 	 */
2711 	trans = btrfs_start_transaction(root, 8);
2712 	if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
2713 		return trans;
2714 
2715 	if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
2716 		return ERR_PTR(-ENOSPC);
2717 
2718 	/* check if there is someone else holds reference */
2719 	if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
2720 		return ERR_PTR(-ENOSPC);
2721 
2722 	if (atomic_read(&inode->i_count) > 2)
2723 		return ERR_PTR(-ENOSPC);
2724 
2725 	if (xchg(&root->fs_info->enospc_unlink, 1))
2726 		return ERR_PTR(-ENOSPC);
2727 
2728 	path = btrfs_alloc_path();
2729 	if (!path) {
2730 		root->fs_info->enospc_unlink = 0;
2731 		return ERR_PTR(-ENOMEM);
2732 	}
2733 
2734 	/* 1 for the orphan item */
2735 	trans = btrfs_start_transaction(root, 1);
2736 	if (IS_ERR(trans)) {
2737 		btrfs_free_path(path);
2738 		root->fs_info->enospc_unlink = 0;
2739 		return trans;
2740 	}
2741 
2742 	path->skip_locking = 1;
2743 	path->search_commit_root = 1;
2744 
2745 	ret = btrfs_lookup_inode(trans, root, path,
2746 				&BTRFS_I(dir)->location, 0);
2747 	if (ret < 0) {
2748 		err = ret;
2749 		goto out;
2750 	}
2751 	if (ret == 0) {
2752 		if (check_path_shared(root, path))
2753 			goto out;
2754 	} else {
2755 		check_link = 0;
2756 	}
2757 	btrfs_release_path(path);
2758 
2759 	ret = btrfs_lookup_inode(trans, root, path,
2760 				&BTRFS_I(inode)->location, 0);
2761 	if (ret < 0) {
2762 		err = ret;
2763 		goto out;
2764 	}
2765 	if (ret == 0) {
2766 		if (check_path_shared(root, path))
2767 			goto out;
2768 	} else {
2769 		check_link = 0;
2770 	}
2771 	btrfs_release_path(path);
2772 
2773 	if (ret == 0 && S_ISREG(inode->i_mode)) {
2774 		ret = btrfs_lookup_file_extent(trans, root, path,
2775 					       ino, (u64)-1, 0);
2776 		if (ret < 0) {
2777 			err = ret;
2778 			goto out;
2779 		}
2780 		BUG_ON(ret == 0);
2781 		if (check_path_shared(root, path))
2782 			goto out;
2783 		btrfs_release_path(path);
2784 	}
2785 
2786 	if (!check_link) {
2787 		err = 0;
2788 		goto out;
2789 	}
2790 
2791 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2792 				dentry->d_name.name, dentry->d_name.len, 0);
2793 	if (IS_ERR(di)) {
2794 		err = PTR_ERR(di);
2795 		goto out;
2796 	}
2797 	if (di) {
2798 		if (check_path_shared(root, path))
2799 			goto out;
2800 	} else {
2801 		err = 0;
2802 		goto out;
2803 	}
2804 	btrfs_release_path(path);
2805 
2806 	ref = btrfs_lookup_inode_ref(trans, root, path,
2807 				dentry->d_name.name, dentry->d_name.len,
2808 				ino, dir_ino, 0);
2809 	if (IS_ERR(ref)) {
2810 		err = PTR_ERR(ref);
2811 		goto out;
2812 	}
2813 	BUG_ON(!ref);
2814 	if (check_path_shared(root, path))
2815 		goto out;
2816 	index = btrfs_inode_ref_index(path->nodes[0], ref);
2817 	btrfs_release_path(path);
2818 
2819 	/*
2820 	 * This is a commit root search, if we can lookup inode item and other
2821 	 * relative items in the commit root, it means the transaction of
2822 	 * dir/file creation has been committed, and the dir index item that we
2823 	 * delay to insert has also been inserted into the commit root. So
2824 	 * we needn't worry about the delayed insertion of the dir index item
2825 	 * here.
2826 	 */
2827 	di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
2828 				dentry->d_name.name, dentry->d_name.len, 0);
2829 	if (IS_ERR(di)) {
2830 		err = PTR_ERR(di);
2831 		goto out;
2832 	}
2833 	BUG_ON(ret == -ENOENT);
2834 	if (check_path_shared(root, path))
2835 		goto out;
2836 
2837 	err = 0;
2838 out:
2839 	btrfs_free_path(path);
2840 	/* Migrate the orphan reservation over */
2841 	if (!err)
2842 		err = btrfs_block_rsv_migrate(trans->block_rsv,
2843 				&root->fs_info->global_block_rsv,
2844 				trans->bytes_reserved);
2845 
2846 	if (err) {
2847 		btrfs_end_transaction(trans, root);
2848 		root->fs_info->enospc_unlink = 0;
2849 		return ERR_PTR(err);
2850 	}
2851 
2852 	trans->block_rsv = &root->fs_info->global_block_rsv;
2853 	return trans;
2854 }
2855 
2856 static void __unlink_end_trans(struct btrfs_trans_handle *trans,
2857 			       struct btrfs_root *root)
2858 {
2859 	if (trans->block_rsv == &root->fs_info->global_block_rsv) {
2860 		btrfs_block_rsv_release(root, trans->block_rsv,
2861 					trans->bytes_reserved);
2862 		trans->block_rsv = &root->fs_info->trans_block_rsv;
2863 		BUG_ON(!root->fs_info->enospc_unlink);
2864 		root->fs_info->enospc_unlink = 0;
2865 	}
2866 	btrfs_end_transaction(trans, root);
2867 }
2868 
2869 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2870 {
2871 	struct btrfs_root *root = BTRFS_I(dir)->root;
2872 	struct btrfs_trans_handle *trans;
2873 	struct inode *inode = dentry->d_inode;
2874 	int ret;
2875 	unsigned long nr = 0;
2876 
2877 	trans = __unlink_start_trans(dir, dentry);
2878 	if (IS_ERR(trans))
2879 		return PTR_ERR(trans);
2880 
2881 	btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
2882 
2883 	ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2884 				 dentry->d_name.name, dentry->d_name.len);
2885 	if (ret)
2886 		goto out;
2887 
2888 	if (inode->i_nlink == 0) {
2889 		ret = btrfs_orphan_add(trans, inode);
2890 		if (ret)
2891 			goto out;
2892 	}
2893 
2894 out:
2895 	nr = trans->blocks_used;
2896 	__unlink_end_trans(trans, root);
2897 	btrfs_btree_balance_dirty(root, nr);
2898 	return ret;
2899 }
2900 
2901 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
2902 			struct btrfs_root *root,
2903 			struct inode *dir, u64 objectid,
2904 			const char *name, int name_len)
2905 {
2906 	struct btrfs_path *path;
2907 	struct extent_buffer *leaf;
2908 	struct btrfs_dir_item *di;
2909 	struct btrfs_key key;
2910 	u64 index;
2911 	int ret;
2912 	u64 dir_ino = btrfs_ino(dir);
2913 
2914 	path = btrfs_alloc_path();
2915 	if (!path)
2916 		return -ENOMEM;
2917 
2918 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2919 				   name, name_len, -1);
2920 	BUG_ON(IS_ERR_OR_NULL(di));
2921 
2922 	leaf = path->nodes[0];
2923 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
2924 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2925 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2926 	BUG_ON(ret);
2927 	btrfs_release_path(path);
2928 
2929 	ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
2930 				 objectid, root->root_key.objectid,
2931 				 dir_ino, &index, name, name_len);
2932 	if (ret < 0) {
2933 		BUG_ON(ret != -ENOENT);
2934 		di = btrfs_search_dir_index_item(root, path, dir_ino,
2935 						 name, name_len);
2936 		BUG_ON(IS_ERR_OR_NULL(di));
2937 
2938 		leaf = path->nodes[0];
2939 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2940 		btrfs_release_path(path);
2941 		index = key.offset;
2942 	}
2943 	btrfs_release_path(path);
2944 
2945 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
2946 	BUG_ON(ret);
2947 
2948 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2949 	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2950 	ret = btrfs_update_inode(trans, root, dir);
2951 	BUG_ON(ret);
2952 
2953 	btrfs_free_path(path);
2954 	return 0;
2955 }
2956 
2957 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2958 {
2959 	struct inode *inode = dentry->d_inode;
2960 	int err = 0;
2961 	struct btrfs_root *root = BTRFS_I(dir)->root;
2962 	struct btrfs_trans_handle *trans;
2963 	unsigned long nr = 0;
2964 
2965 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2966 	    btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
2967 		return -ENOTEMPTY;
2968 
2969 	trans = __unlink_start_trans(dir, dentry);
2970 	if (IS_ERR(trans))
2971 		return PTR_ERR(trans);
2972 
2973 	if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
2974 		err = btrfs_unlink_subvol(trans, root, dir,
2975 					  BTRFS_I(inode)->location.objectid,
2976 					  dentry->d_name.name,
2977 					  dentry->d_name.len);
2978 		goto out;
2979 	}
2980 
2981 	err = btrfs_orphan_add(trans, inode);
2982 	if (err)
2983 		goto out;
2984 
2985 	/* now the directory is empty */
2986 	err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2987 				 dentry->d_name.name, dentry->d_name.len);
2988 	if (!err)
2989 		btrfs_i_size_write(inode, 0);
2990 out:
2991 	nr = trans->blocks_used;
2992 	__unlink_end_trans(trans, root);
2993 	btrfs_btree_balance_dirty(root, nr);
2994 
2995 	return err;
2996 }
2997 
2998 /*
2999  * this can truncate away extent items, csum items and directory items.
3000  * It starts at a high offset and removes keys until it can't find
3001  * any higher than new_size
3002  *
3003  * csum items that cross the new i_size are truncated to the new size
3004  * as well.
3005  *
3006  * min_type is the minimum key type to truncate down to.  If set to 0, this
3007  * will kill all the items on this inode, including the INODE_ITEM_KEY.
3008  */
3009 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3010 			       struct btrfs_root *root,
3011 			       struct inode *inode,
3012 			       u64 new_size, u32 min_type)
3013 {
3014 	struct btrfs_path *path;
3015 	struct extent_buffer *leaf;
3016 	struct btrfs_file_extent_item *fi;
3017 	struct btrfs_key key;
3018 	struct btrfs_key found_key;
3019 	u64 extent_start = 0;
3020 	u64 extent_num_bytes = 0;
3021 	u64 extent_offset = 0;
3022 	u64 item_end = 0;
3023 	u64 mask = root->sectorsize - 1;
3024 	u32 found_type = (u8)-1;
3025 	int found_extent;
3026 	int del_item;
3027 	int pending_del_nr = 0;
3028 	int pending_del_slot = 0;
3029 	int extent_type = -1;
3030 	int ret;
3031 	int err = 0;
3032 	u64 ino = btrfs_ino(inode);
3033 
3034 	BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
3035 
3036 	path = btrfs_alloc_path();
3037 	if (!path)
3038 		return -ENOMEM;
3039 	path->reada = -1;
3040 
3041 	if (root->ref_cows || root == root->fs_info->tree_root)
3042 		btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
3043 
3044 	/*
3045 	 * This function is also used to drop the items in the log tree before
3046 	 * we relog the inode, so if root != BTRFS_I(inode)->root, it means
3047 	 * it is used to drop the loged items. So we shouldn't kill the delayed
3048 	 * items.
3049 	 */
3050 	if (min_type == 0 && root == BTRFS_I(inode)->root)
3051 		btrfs_kill_delayed_inode_items(inode);
3052 
3053 	key.objectid = ino;
3054 	key.offset = (u64)-1;
3055 	key.type = (u8)-1;
3056 
3057 search_again:
3058 	path->leave_spinning = 1;
3059 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3060 	if (ret < 0) {
3061 		err = ret;
3062 		goto out;
3063 	}
3064 
3065 	if (ret > 0) {
3066 		/* there are no items in the tree for us to truncate, we're
3067 		 * done
3068 		 */
3069 		if (path->slots[0] == 0)
3070 			goto out;
3071 		path->slots[0]--;
3072 	}
3073 
3074 	while (1) {
3075 		fi = NULL;
3076 		leaf = path->nodes[0];
3077 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3078 		found_type = btrfs_key_type(&found_key);
3079 
3080 		if (found_key.objectid != ino)
3081 			break;
3082 
3083 		if (found_type < min_type)
3084 			break;
3085 
3086 		item_end = found_key.offset;
3087 		if (found_type == BTRFS_EXTENT_DATA_KEY) {
3088 			fi = btrfs_item_ptr(leaf, path->slots[0],
3089 					    struct btrfs_file_extent_item);
3090 			extent_type = btrfs_file_extent_type(leaf, fi);
3091 			if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3092 				item_end +=
3093 				    btrfs_file_extent_num_bytes(leaf, fi);
3094 			} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3095 				item_end += btrfs_file_extent_inline_len(leaf,
3096 									 fi);
3097 			}
3098 			item_end--;
3099 		}
3100 		if (found_type > min_type) {
3101 			del_item = 1;
3102 		} else {
3103 			if (item_end < new_size)
3104 				break;
3105 			if (found_key.offset >= new_size)
3106 				del_item = 1;
3107 			else
3108 				del_item = 0;
3109 		}
3110 		found_extent = 0;
3111 		/* FIXME, shrink the extent if the ref count is only 1 */
3112 		if (found_type != BTRFS_EXTENT_DATA_KEY)
3113 			goto delete;
3114 
3115 		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3116 			u64 num_dec;
3117 			extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
3118 			if (!del_item) {
3119 				u64 orig_num_bytes =
3120 					btrfs_file_extent_num_bytes(leaf, fi);
3121 				extent_num_bytes = new_size -
3122 					found_key.offset + root->sectorsize - 1;
3123 				extent_num_bytes = extent_num_bytes &
3124 					~((u64)root->sectorsize - 1);
3125 				btrfs_set_file_extent_num_bytes(leaf, fi,
3126 							 extent_num_bytes);
3127 				num_dec = (orig_num_bytes -
3128 					   extent_num_bytes);
3129 				if (root->ref_cows && extent_start != 0)
3130 					inode_sub_bytes(inode, num_dec);
3131 				btrfs_mark_buffer_dirty(leaf);
3132 			} else {
3133 				extent_num_bytes =
3134 					btrfs_file_extent_disk_num_bytes(leaf,
3135 									 fi);
3136 				extent_offset = found_key.offset -
3137 					btrfs_file_extent_offset(leaf, fi);
3138 
3139 				/* FIXME blocksize != 4096 */
3140 				num_dec = btrfs_file_extent_num_bytes(leaf, fi);
3141 				if (extent_start != 0) {
3142 					found_extent = 1;
3143 					if (root->ref_cows)
3144 						inode_sub_bytes(inode, num_dec);
3145 				}
3146 			}
3147 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3148 			/*
3149 			 * we can't truncate inline items that have had
3150 			 * special encodings
3151 			 */
3152 			if (!del_item &&
3153 			    btrfs_file_extent_compression(leaf, fi) == 0 &&
3154 			    btrfs_file_extent_encryption(leaf, fi) == 0 &&
3155 			    btrfs_file_extent_other_encoding(leaf, fi) == 0) {
3156 				u32 size = new_size - found_key.offset;
3157 
3158 				if (root->ref_cows) {
3159 					inode_sub_bytes(inode, item_end + 1 -
3160 							new_size);
3161 				}
3162 				size =
3163 				    btrfs_file_extent_calc_inline_size(size);
3164 				ret = btrfs_truncate_item(trans, root, path,
3165 							  size, 1);
3166 			} else if (root->ref_cows) {
3167 				inode_sub_bytes(inode, item_end + 1 -
3168 						found_key.offset);
3169 			}
3170 		}
3171 delete:
3172 		if (del_item) {
3173 			if (!pending_del_nr) {
3174 				/* no pending yet, add ourselves */
3175 				pending_del_slot = path->slots[0];
3176 				pending_del_nr = 1;
3177 			} else if (pending_del_nr &&
3178 				   path->slots[0] + 1 == pending_del_slot) {
3179 				/* hop on the pending chunk */
3180 				pending_del_nr++;
3181 				pending_del_slot = path->slots[0];
3182 			} else {
3183 				BUG();
3184 			}
3185 		} else {
3186 			break;
3187 		}
3188 		if (found_extent && (root->ref_cows ||
3189 				     root == root->fs_info->tree_root)) {
3190 			btrfs_set_path_blocking(path);
3191 			ret = btrfs_free_extent(trans, root, extent_start,
3192 						extent_num_bytes, 0,
3193 						btrfs_header_owner(leaf),
3194 						ino, extent_offset, 0);
3195 			BUG_ON(ret);
3196 		}
3197 
3198 		if (found_type == BTRFS_INODE_ITEM_KEY)
3199 			break;
3200 
3201 		if (path->slots[0] == 0 ||
3202 		    path->slots[0] != pending_del_slot) {
3203 			if (root->ref_cows &&
3204 			    BTRFS_I(inode)->location.objectid !=
3205 						BTRFS_FREE_INO_OBJECTID) {
3206 				err = -EAGAIN;
3207 				goto out;
3208 			}
3209 			if (pending_del_nr) {
3210 				ret = btrfs_del_items(trans, root, path,
3211 						pending_del_slot,
3212 						pending_del_nr);
3213 				BUG_ON(ret);
3214 				pending_del_nr = 0;
3215 			}
3216 			btrfs_release_path(path);
3217 			goto search_again;
3218 		} else {
3219 			path->slots[0]--;
3220 		}
3221 	}
3222 out:
3223 	if (pending_del_nr) {
3224 		ret = btrfs_del_items(trans, root, path, pending_del_slot,
3225 				      pending_del_nr);
3226 		BUG_ON(ret);
3227 	}
3228 	btrfs_free_path(path);
3229 	return err;
3230 }
3231 
3232 /*
3233  * taken from block_truncate_page, but does cow as it zeros out
3234  * any bytes left in the last page in the file.
3235  */
3236 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3237 {
3238 	struct inode *inode = mapping->host;
3239 	struct btrfs_root *root = BTRFS_I(inode)->root;
3240 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3241 	struct btrfs_ordered_extent *ordered;
3242 	struct extent_state *cached_state = NULL;
3243 	char *kaddr;
3244 	u32 blocksize = root->sectorsize;
3245 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
3246 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
3247 	struct page *page;
3248 	gfp_t mask = btrfs_alloc_write_mask(mapping);
3249 	int ret = 0;
3250 	u64 page_start;
3251 	u64 page_end;
3252 
3253 	if ((offset & (blocksize - 1)) == 0)
3254 		goto out;
3255 	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
3256 	if (ret)
3257 		goto out;
3258 
3259 	ret = -ENOMEM;
3260 again:
3261 	page = find_or_create_page(mapping, index, mask);
3262 	if (!page) {
3263 		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
3264 		goto out;
3265 	}
3266 
3267 	page_start = page_offset(page);
3268 	page_end = page_start + PAGE_CACHE_SIZE - 1;
3269 
3270 	if (!PageUptodate(page)) {
3271 		ret = btrfs_readpage(NULL, page);
3272 		lock_page(page);
3273 		if (page->mapping != mapping) {
3274 			unlock_page(page);
3275 			page_cache_release(page);
3276 			goto again;
3277 		}
3278 		if (!PageUptodate(page)) {
3279 			ret = -EIO;
3280 			goto out_unlock;
3281 		}
3282 	}
3283 	wait_on_page_writeback(page);
3284 
3285 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
3286 			 GFP_NOFS);
3287 	set_page_extent_mapped(page);
3288 
3289 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
3290 	if (ordered) {
3291 		unlock_extent_cached(io_tree, page_start, page_end,
3292 				     &cached_state, GFP_NOFS);
3293 		unlock_page(page);
3294 		page_cache_release(page);
3295 		btrfs_start_ordered_extent(inode, ordered, 1);
3296 		btrfs_put_ordered_extent(ordered);
3297 		goto again;
3298 	}
3299 
3300 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
3301 			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
3302 			  0, 0, &cached_state, GFP_NOFS);
3303 
3304 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
3305 					&cached_state);
3306 	if (ret) {
3307 		unlock_extent_cached(io_tree, page_start, page_end,
3308 				     &cached_state, GFP_NOFS);
3309 		goto out_unlock;
3310 	}
3311 
3312 	ret = 0;
3313 	if (offset != PAGE_CACHE_SIZE) {
3314 		kaddr = kmap(page);
3315 		memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
3316 		flush_dcache_page(page);
3317 		kunmap(page);
3318 	}
3319 	ClearPageChecked(page);
3320 	set_page_dirty(page);
3321 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
3322 			     GFP_NOFS);
3323 
3324 out_unlock:
3325 	if (ret)
3326 		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
3327 	unlock_page(page);
3328 	page_cache_release(page);
3329 out:
3330 	return ret;
3331 }
3332 
3333 /*
3334  * This function puts in dummy file extents for the area we're creating a hole
3335  * for.  So if we are truncating this file to a larger size we need to insert
3336  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
3337  * the range between oldsize and size
3338  */
3339 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3340 {
3341 	struct btrfs_trans_handle *trans;
3342 	struct btrfs_root *root = BTRFS_I(inode)->root;
3343 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3344 	struct extent_map *em = NULL;
3345 	struct extent_state *cached_state = NULL;
3346 	u64 mask = root->sectorsize - 1;
3347 	u64 hole_start = (oldsize + mask) & ~mask;
3348 	u64 block_end = (size + mask) & ~mask;
3349 	u64 last_byte;
3350 	u64 cur_offset;
3351 	u64 hole_size;
3352 	int err = 0;
3353 
3354 	if (size <= hole_start)
3355 		return 0;
3356 
3357 	while (1) {
3358 		struct btrfs_ordered_extent *ordered;
3359 		btrfs_wait_ordered_range(inode, hole_start,
3360 					 block_end - hole_start);
3361 		lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
3362 				 &cached_state, GFP_NOFS);
3363 		ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3364 		if (!ordered)
3365 			break;
3366 		unlock_extent_cached(io_tree, hole_start, block_end - 1,
3367 				     &cached_state, GFP_NOFS);
3368 		btrfs_put_ordered_extent(ordered);
3369 	}
3370 
3371 	cur_offset = hole_start;
3372 	while (1) {
3373 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
3374 				block_end - cur_offset, 0);
3375 		BUG_ON(IS_ERR_OR_NULL(em));
3376 		last_byte = min(extent_map_end(em), block_end);
3377 		last_byte = (last_byte + mask) & ~mask;
3378 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3379 			u64 hint_byte = 0;
3380 			hole_size = last_byte - cur_offset;
3381 
3382 			trans = btrfs_start_transaction(root, 3);
3383 			if (IS_ERR(trans)) {
3384 				err = PTR_ERR(trans);
3385 				break;
3386 			}
3387 
3388 			err = btrfs_drop_extents(trans, inode, cur_offset,
3389 						 cur_offset + hole_size,
3390 						 &hint_byte, 1);
3391 			if (err) {
3392 				btrfs_update_inode(trans, root, inode);
3393 				btrfs_end_transaction(trans, root);
3394 				break;
3395 			}
3396 
3397 			err = btrfs_insert_file_extent(trans, root,
3398 					btrfs_ino(inode), cur_offset, 0,
3399 					0, hole_size, 0, hole_size,
3400 					0, 0, 0);
3401 			if (err) {
3402 				btrfs_update_inode(trans, root, inode);
3403 				btrfs_end_transaction(trans, root);
3404 				break;
3405 			}
3406 
3407 			btrfs_drop_extent_cache(inode, hole_start,
3408 					last_byte - 1, 0);
3409 
3410 			btrfs_update_inode(trans, root, inode);
3411 			btrfs_end_transaction(trans, root);
3412 		}
3413 		free_extent_map(em);
3414 		em = NULL;
3415 		cur_offset = last_byte;
3416 		if (cur_offset >= block_end)
3417 			break;
3418 	}
3419 
3420 	free_extent_map(em);
3421 	unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
3422 			     GFP_NOFS);
3423 	return err;
3424 }
3425 
3426 static int btrfs_setsize(struct inode *inode, loff_t newsize)
3427 {
3428 	struct btrfs_root *root = BTRFS_I(inode)->root;
3429 	struct btrfs_trans_handle *trans;
3430 	loff_t oldsize = i_size_read(inode);
3431 	int ret;
3432 
3433 	if (newsize == oldsize)
3434 		return 0;
3435 
3436 	if (newsize > oldsize) {
3437 		truncate_pagecache(inode, oldsize, newsize);
3438 		ret = btrfs_cont_expand(inode, oldsize, newsize);
3439 		if (ret)
3440 			return ret;
3441 
3442 		trans = btrfs_start_transaction(root, 1);
3443 		if (IS_ERR(trans))
3444 			return PTR_ERR(trans);
3445 
3446 		i_size_write(inode, newsize);
3447 		btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
3448 		ret = btrfs_update_inode(trans, root, inode);
3449 		btrfs_end_transaction(trans, root);
3450 	} else {
3451 
3452 		/*
3453 		 * We're truncating a file that used to have good data down to
3454 		 * zero. Make sure it gets into the ordered flush list so that
3455 		 * any new writes get down to disk quickly.
3456 		 */
3457 		if (newsize == 0)
3458 			BTRFS_I(inode)->ordered_data_close = 1;
3459 
3460 		/* we don't support swapfiles, so vmtruncate shouldn't fail */
3461 		truncate_setsize(inode, newsize);
3462 		ret = btrfs_truncate(inode);
3463 	}
3464 
3465 	return ret;
3466 }
3467 
3468 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3469 {
3470 	struct inode *inode = dentry->d_inode;
3471 	struct btrfs_root *root = BTRFS_I(inode)->root;
3472 	int err;
3473 
3474 	if (btrfs_root_readonly(root))
3475 		return -EROFS;
3476 
3477 	err = inode_change_ok(inode, attr);
3478 	if (err)
3479 		return err;
3480 
3481 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3482 		err = btrfs_setsize(inode, attr->ia_size);
3483 		if (err)
3484 			return err;
3485 	}
3486 
3487 	if (attr->ia_valid) {
3488 		setattr_copy(inode, attr);
3489 		err = btrfs_dirty_inode(inode);
3490 
3491 		if (!err && attr->ia_valid & ATTR_MODE)
3492 			err = btrfs_acl_chmod(inode);
3493 	}
3494 
3495 	return err;
3496 }
3497 
3498 void btrfs_evict_inode(struct inode *inode)
3499 {
3500 	struct btrfs_trans_handle *trans;
3501 	struct btrfs_root *root = BTRFS_I(inode)->root;
3502 	struct btrfs_block_rsv *rsv, *global_rsv;
3503 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
3504 	unsigned long nr;
3505 	int ret;
3506 
3507 	trace_btrfs_inode_evict(inode);
3508 
3509 	truncate_inode_pages(&inode->i_data, 0);
3510 	if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
3511 			       btrfs_is_free_space_inode(root, inode)))
3512 		goto no_delete;
3513 
3514 	if (is_bad_inode(inode)) {
3515 		btrfs_orphan_del(NULL, inode);
3516 		goto no_delete;
3517 	}
3518 	/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
3519 	btrfs_wait_ordered_range(inode, 0, (u64)-1);
3520 
3521 	if (root->fs_info->log_root_recovering) {
3522 		BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
3523 		goto no_delete;
3524 	}
3525 
3526 	if (inode->i_nlink > 0) {
3527 		BUG_ON(btrfs_root_refs(&root->root_item) != 0);
3528 		goto no_delete;
3529 	}
3530 
3531 	rsv = btrfs_alloc_block_rsv(root);
3532 	if (!rsv) {
3533 		btrfs_orphan_del(NULL, inode);
3534 		goto no_delete;
3535 	}
3536 	rsv->size = min_size;
3537 	global_rsv = &root->fs_info->global_block_rsv;
3538 
3539 	btrfs_i_size_write(inode, 0);
3540 
3541 	/*
3542 	 * This is a bit simpler than btrfs_truncate since
3543 	 *
3544 	 * 1) We've already reserved our space for our orphan item in the
3545 	 *    unlink.
3546 	 * 2) We're going to delete the inode item, so we don't need to update
3547 	 *    it at all.
3548 	 *
3549 	 * So we just need to reserve some slack space in case we add bytes when
3550 	 * doing the truncate.
3551 	 */
3552 	while (1) {
3553 		ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
3554 
3555 		/*
3556 		 * Try and steal from the global reserve since we will
3557 		 * likely not use this space anyway, we want to try as
3558 		 * hard as possible to get this to work.
3559 		 */
3560 		if (ret)
3561 			ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);
3562 
3563 		if (ret) {
3564 			printk(KERN_WARNING "Could not get space for a "
3565 			       "delete, will truncate on mount %d\n", ret);
3566 			btrfs_orphan_del(NULL, inode);
3567 			btrfs_free_block_rsv(root, rsv);
3568 			goto no_delete;
3569 		}
3570 
3571 		trans = btrfs_start_transaction(root, 0);
3572 		if (IS_ERR(trans)) {
3573 			btrfs_orphan_del(NULL, inode);
3574 			btrfs_free_block_rsv(root, rsv);
3575 			goto no_delete;
3576 		}
3577 
3578 		trans->block_rsv = rsv;
3579 
3580 		ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
3581 		if (ret != -EAGAIN)
3582 			break;
3583 
3584 		nr = trans->blocks_used;
3585 		btrfs_end_transaction(trans, root);
3586 		trans = NULL;
3587 		btrfs_btree_balance_dirty(root, nr);
3588 	}
3589 
3590 	btrfs_free_block_rsv(root, rsv);
3591 
3592 	if (ret == 0) {
3593 		trans->block_rsv = root->orphan_block_rsv;
3594 		ret = btrfs_orphan_del(trans, inode);
3595 		BUG_ON(ret);
3596 	}
3597 
3598 	trans->block_rsv = &root->fs_info->trans_block_rsv;
3599 	if (!(root == root->fs_info->tree_root ||
3600 	      root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
3601 		btrfs_return_ino(root, btrfs_ino(inode));
3602 
3603 	nr = trans->blocks_used;
3604 	btrfs_end_transaction(trans, root);
3605 	btrfs_btree_balance_dirty(root, nr);
3606 no_delete:
3607 	end_writeback(inode);
3608 	return;
3609 }
3610 
3611 /*
3612  * this returns the key found in the dir entry in the location pointer.
3613  * If no dir entries were found, location->objectid is 0.
3614  */
3615 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3616 			       struct btrfs_key *location)
3617 {
3618 	const char *name = dentry->d_name.name;
3619 	int namelen = dentry->d_name.len;
3620 	struct btrfs_dir_item *di;
3621 	struct btrfs_path *path;
3622 	struct btrfs_root *root = BTRFS_I(dir)->root;
3623 	int ret = 0;
3624 
3625 	path = btrfs_alloc_path();
3626 	if (!path)
3627 		return -ENOMEM;
3628 
3629 	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
3630 				    namelen, 0);
3631 	if (IS_ERR(di))
3632 		ret = PTR_ERR(di);
3633 
3634 	if (IS_ERR_OR_NULL(di))
3635 		goto out_err;
3636 
3637 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3638 out:
3639 	btrfs_free_path(path);
3640 	return ret;
3641 out_err:
3642 	location->objectid = 0;
3643 	goto out;
3644 }
3645 
3646 /*
3647  * when we hit a tree root in a directory, the btrfs part of the inode
3648  * needs to be changed to reflect the root directory of the tree root.  This
3649  * is kind of like crossing a mount point.
3650  */
3651 static int fixup_tree_root_location(struct btrfs_root *root,
3652 				    struct inode *dir,
3653 				    struct dentry *dentry,
3654 				    struct btrfs_key *location,
3655 				    struct btrfs_root **sub_root)
3656 {
3657 	struct btrfs_path *path;
3658 	struct btrfs_root *new_root;
3659 	struct btrfs_root_ref *ref;
3660 	struct extent_buffer *leaf;
3661 	int ret;
3662 	int err = 0;
3663 
3664 	path = btrfs_alloc_path();
3665 	if (!path) {
3666 		err = -ENOMEM;
3667 		goto out;
3668 	}
3669 
3670 	err = -ENOENT;
3671 	ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
3672 				  BTRFS_I(dir)->root->root_key.objectid,
3673 				  location->objectid);
3674 	if (ret) {
3675 		if (ret < 0)
3676 			err = ret;
3677 		goto out;
3678 	}
3679 
3680 	leaf = path->nodes[0];
3681 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
3682 	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
3683 	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
3684 		goto out;
3685 
3686 	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
3687 				   (unsigned long)(ref + 1),
3688 				   dentry->d_name.len);
3689 	if (ret)
3690 		goto out;
3691 
3692 	btrfs_release_path(path);
3693 
3694 	new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
3695 	if (IS_ERR(new_root)) {
3696 		err = PTR_ERR(new_root);
3697 		goto out;
3698 	}
3699 
3700 	if (btrfs_root_refs(&new_root->root_item) == 0) {
3701 		err = -ENOENT;
3702 		goto out;
3703 	}
3704 
3705 	*sub_root = new_root;
3706 	location->objectid = btrfs_root_dirid(&new_root->root_item);
3707 	location->type = BTRFS_INODE_ITEM_KEY;
3708 	location->offset = 0;
3709 	err = 0;
3710 out:
3711 	btrfs_free_path(path);
3712 	return err;
3713 }
3714 
3715 static void inode_tree_add(struct inode *inode)
3716 {
3717 	struct btrfs_root *root = BTRFS_I(inode)->root;
3718 	struct btrfs_inode *entry;
3719 	struct rb_node **p;
3720 	struct rb_node *parent;
3721 	u64 ino = btrfs_ino(inode);
3722 again:
3723 	p = &root->inode_tree.rb_node;
3724 	parent = NULL;
3725 
3726 	if (inode_unhashed(inode))
3727 		return;
3728 
3729 	spin_lock(&root->inode_lock);
3730 	while (*p) {
3731 		parent = *p;
3732 		entry = rb_entry(parent, struct btrfs_inode, rb_node);
3733 
3734 		if (ino < btrfs_ino(&entry->vfs_inode))
3735 			p = &parent->rb_left;
3736 		else if (ino > btrfs_ino(&entry->vfs_inode))
3737 			p = &parent->rb_right;
3738 		else {
3739 			WARN_ON(!(entry->vfs_inode.i_state &
3740 				  (I_WILL_FREE | I_FREEING)));
3741 			rb_erase(parent, &root->inode_tree);
3742 			RB_CLEAR_NODE(parent);
3743 			spin_unlock(&root->inode_lock);
3744 			goto again;
3745 		}
3746 	}
3747 	rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
3748 	rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3749 	spin_unlock(&root->inode_lock);
3750 }
3751 
3752 static void inode_tree_del(struct inode *inode)
3753 {
3754 	struct btrfs_root *root = BTRFS_I(inode)->root;
3755 	int empty = 0;
3756 
3757 	spin_lock(&root->inode_lock);
3758 	if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
3759 		rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3760 		RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3761 		empty = RB_EMPTY_ROOT(&root->inode_tree);
3762 	}
3763 	spin_unlock(&root->inode_lock);
3764 
3765 	/*
3766 	 * Free space cache has inodes in the tree root, but the tree root has a
3767 	 * root_refs of 0, so this could end up dropping the tree root as a
3768 	 * snapshot, so we need the extra !root->fs_info->tree_root check to
3769 	 * make sure we don't drop it.
3770 	 */
3771 	if (empty && btrfs_root_refs(&root->root_item) == 0 &&
3772 	    root != root->fs_info->tree_root) {
3773 		synchronize_srcu(&root->fs_info->subvol_srcu);
3774 		spin_lock(&root->inode_lock);
3775 		empty = RB_EMPTY_ROOT(&root->inode_tree);
3776 		spin_unlock(&root->inode_lock);
3777 		if (empty)
3778 			btrfs_add_dead_root(root);
3779 	}
3780 }
3781 
3782 int btrfs_invalidate_inodes(struct btrfs_root *root)
3783 {
3784 	struct rb_node *node;
3785 	struct rb_node *prev;
3786 	struct btrfs_inode *entry;
3787 	struct inode *inode;
3788 	u64 objectid = 0;
3789 
3790 	WARN_ON(btrfs_root_refs(&root->root_item) != 0);
3791 
3792 	spin_lock(&root->inode_lock);
3793 again:
3794 	node = root->inode_tree.rb_node;
3795 	prev = NULL;
3796 	while (node) {
3797 		prev = node;
3798 		entry = rb_entry(node, struct btrfs_inode, rb_node);
3799 
3800 		if (objectid < btrfs_ino(&entry->vfs_inode))
3801 			node = node->rb_left;
3802 		else if (objectid > btrfs_ino(&entry->vfs_inode))
3803 			node = node->rb_right;
3804 		else
3805 			break;
3806 	}
3807 	if (!node) {
3808 		while (prev) {
3809 			entry = rb_entry(prev, struct btrfs_inode, rb_node);
3810 			if (objectid <= btrfs_ino(&entry->vfs_inode)) {
3811 				node = prev;
3812 				break;
3813 			}
3814 			prev = rb_next(prev);
3815 		}
3816 	}
3817 	while (node) {
3818 		entry = rb_entry(node, struct btrfs_inode, rb_node);
3819 		objectid = btrfs_ino(&entry->vfs_inode) + 1;
3820 		inode = igrab(&entry->vfs_inode);
3821 		if (inode) {
3822 			spin_unlock(&root->inode_lock);
3823 			if (atomic_read(&inode->i_count) > 1)
3824 				d_prune_aliases(inode);
3825 			/*
3826 			 * btrfs_drop_inode will have it removed from
3827 			 * the inode cache when its usage count
3828 			 * hits zero.
3829 			 */
3830 			iput(inode);
3831 			cond_resched();
3832 			spin_lock(&root->inode_lock);
3833 			goto again;
3834 		}
3835 
3836 		if (cond_resched_lock(&root->inode_lock))
3837 			goto again;
3838 
3839 		node = rb_next(node);
3840 	}
3841 	spin_unlock(&root->inode_lock);
3842 	return 0;
3843 }
3844 
3845 static int btrfs_init_locked_inode(struct inode *inode, void *p)
3846 {
3847 	struct btrfs_iget_args *args = p;
3848 	inode->i_ino = args->ino;
3849 	BTRFS_I(inode)->root = args->root;
3850 	btrfs_set_inode_space_info(args->root, inode);
3851 	return 0;
3852 }
3853 
3854 static int btrfs_find_actor(struct inode *inode, void *opaque)
3855 {
3856 	struct btrfs_iget_args *args = opaque;
3857 	return args->ino == btrfs_ino(inode) &&
3858 		args->root == BTRFS_I(inode)->root;
3859 }
3860 
3861 static struct inode *btrfs_iget_locked(struct super_block *s,
3862 				       u64 objectid,
3863 				       struct btrfs_root *root)
3864 {
3865 	struct inode *inode;
3866 	struct btrfs_iget_args args;
3867 	args.ino = objectid;
3868 	args.root = root;
3869 
3870 	inode = iget5_locked(s, objectid, btrfs_find_actor,
3871 			     btrfs_init_locked_inode,
3872 			     (void *)&args);
3873 	return inode;
3874 }
3875 
3876 /* Get an inode object given its location and corresponding root.
3877  * Returns in *is_new if the inode was read from disk
3878  */
3879 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3880 			 struct btrfs_root *root, int *new)
3881 {
3882 	struct inode *inode;
3883 
3884 	inode = btrfs_iget_locked(s, location->objectid, root);
3885 	if (!inode)
3886 		return ERR_PTR(-ENOMEM);
3887 
3888 	if (inode->i_state & I_NEW) {
3889 		BTRFS_I(inode)->root = root;
3890 		memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3891 		btrfs_read_locked_inode(inode);
3892 		if (!is_bad_inode(inode)) {
3893 			inode_tree_add(inode);
3894 			unlock_new_inode(inode);
3895 			if (new)
3896 				*new = 1;
3897 		} else {
3898 			unlock_new_inode(inode);
3899 			iput(inode);
3900 			inode = ERR_PTR(-ESTALE);
3901 		}
3902 	}
3903 
3904 	return inode;
3905 }
3906 
3907 static struct inode *new_simple_dir(struct super_block *s,
3908 				    struct btrfs_key *key,
3909 				    struct btrfs_root *root)
3910 {
3911 	struct inode *inode = new_inode(s);
3912 
3913 	if (!inode)
3914 		return ERR_PTR(-ENOMEM);
3915 
3916 	BTRFS_I(inode)->root = root;
3917 	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
3918 	BTRFS_I(inode)->dummy_inode = 1;
3919 
3920 	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
3921 	inode->i_op = &simple_dir_inode_operations;
3922 	inode->i_fop = &simple_dir_operations;
3923 	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
3924 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3925 
3926 	return inode;
3927 }
3928 
3929 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3930 {
3931 	struct inode *inode;
3932 	struct btrfs_root *root = BTRFS_I(dir)->root;
3933 	struct btrfs_root *sub_root = root;
3934 	struct btrfs_key location;
3935 	int index;
3936 	int ret = 0;
3937 
3938 	if (dentry->d_name.len > BTRFS_NAME_LEN)
3939 		return ERR_PTR(-ENAMETOOLONG);
3940 
3941 	if (unlikely(d_need_lookup(dentry))) {
3942 		memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key));
3943 		kfree(dentry->d_fsdata);
3944 		dentry->d_fsdata = NULL;
3945 		/* This thing is hashed, drop it for now */
3946 		d_drop(dentry);
3947 	} else {
3948 		ret = btrfs_inode_by_name(dir, dentry, &location);
3949 	}
3950 
3951 	if (ret < 0)
3952 		return ERR_PTR(ret);
3953 
3954 	if (location.objectid == 0)
3955 		return NULL;
3956 
3957 	if (location.type == BTRFS_INODE_ITEM_KEY) {
3958 		inode = btrfs_iget(dir->i_sb, &location, root, NULL);
3959 		return inode;
3960 	}
3961 
3962 	BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
3963 
3964 	index = srcu_read_lock(&root->fs_info->subvol_srcu);
3965 	ret = fixup_tree_root_location(root, dir, dentry,
3966 				       &location, &sub_root);
3967 	if (ret < 0) {
3968 		if (ret != -ENOENT)
3969 			inode = ERR_PTR(ret);
3970 		else
3971 			inode = new_simple_dir(dir->i_sb, &location, sub_root);
3972 	} else {
3973 		inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
3974 	}
3975 	srcu_read_unlock(&root->fs_info->subvol_srcu, index);
3976 
3977 	if (!IS_ERR(inode) && root != sub_root) {
3978 		down_read(&root->fs_info->cleanup_work_sem);
3979 		if (!(inode->i_sb->s_flags & MS_RDONLY))
3980 			ret = btrfs_orphan_cleanup(sub_root);
3981 		up_read(&root->fs_info->cleanup_work_sem);
3982 		if (ret)
3983 			inode = ERR_PTR(ret);
3984 	}
3985 
3986 	return inode;
3987 }
3988 
3989 static int btrfs_dentry_delete(const struct dentry *dentry)
3990 {
3991 	struct btrfs_root *root;
3992 
3993 	if (!dentry->d_inode && !IS_ROOT(dentry))
3994 		dentry = dentry->d_parent;
3995 
3996 	if (dentry->d_inode) {
3997 		root = BTRFS_I(dentry->d_inode)->root;
3998 		if (btrfs_root_refs(&root->root_item) == 0)
3999 			return 1;
4000 	}
4001 	return 0;
4002 }
4003 
4004 static void btrfs_dentry_release(struct dentry *dentry)
4005 {
4006 	if (dentry->d_fsdata)
4007 		kfree(dentry->d_fsdata);
4008 }
4009 
4010 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
4011 				   struct nameidata *nd)
4012 {
4013 	struct dentry *ret;
4014 
4015 	ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
4016 	if (unlikely(d_need_lookup(dentry))) {
4017 		spin_lock(&dentry->d_lock);
4018 		dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
4019 		spin_unlock(&dentry->d_lock);
4020 	}
4021 	return ret;
4022 }
4023 
4024 unsigned char btrfs_filetype_table[] = {
4025 	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
4026 };
4027 
4028 static int btrfs_real_readdir(struct file *filp, void *dirent,
4029 			      filldir_t filldir)
4030 {
4031 	struct inode *inode = filp->f_dentry->d_inode;
4032 	struct btrfs_root *root = BTRFS_I(inode)->root;
4033 	struct btrfs_item *item;
4034 	struct btrfs_dir_item *di;
4035 	struct btrfs_key key;
4036 	struct btrfs_key found_key;
4037 	struct btrfs_path *path;
4038 	struct list_head ins_list;
4039 	struct list_head del_list;
4040 	struct qstr q;
4041 	int ret;
4042 	struct extent_buffer *leaf;
4043 	int slot;
4044 	unsigned char d_type;
4045 	int over = 0;
4046 	u32 di_cur;
4047 	u32 di_total;
4048 	u32 di_len;
4049 	int key_type = BTRFS_DIR_INDEX_KEY;
4050 	char tmp_name[32];
4051 	char *name_ptr;
4052 	int name_len;
4053 	int is_curr = 0;	/* filp->f_pos points to the current index? */
4054 
4055 	/* FIXME, use a real flag for deciding about the key type */
4056 	if (root->fs_info->tree_root == root)
4057 		key_type = BTRFS_DIR_ITEM_KEY;
4058 
4059 	/* special case for "." */
4060 	if (filp->f_pos == 0) {
4061 		over = filldir(dirent, ".", 1,
4062 			       filp->f_pos, btrfs_ino(inode), DT_DIR);
4063 		if (over)
4064 			return 0;
4065 		filp->f_pos = 1;
4066 	}
4067 	/* special case for .., just use the back ref */
4068 	if (filp->f_pos == 1) {
4069 		u64 pino = parent_ino(filp->f_path.dentry);
4070 		over = filldir(dirent, "..", 2,
4071 			       filp->f_pos, pino, DT_DIR);
4072 		if (over)
4073 			return 0;
4074 		filp->f_pos = 2;
4075 	}
4076 	path = btrfs_alloc_path();
4077 	if (!path)
4078 		return -ENOMEM;
4079 
4080 	path->reada = 1;
4081 
4082 	if (key_type == BTRFS_DIR_INDEX_KEY) {
4083 		INIT_LIST_HEAD(&ins_list);
4084 		INIT_LIST_HEAD(&del_list);
4085 		btrfs_get_delayed_items(inode, &ins_list, &del_list);
4086 	}
4087 
4088 	btrfs_set_key_type(&key, key_type);
4089 	key.offset = filp->f_pos;
4090 	key.objectid = btrfs_ino(inode);
4091 
4092 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4093 	if (ret < 0)
4094 		goto err;
4095 
4096 	while (1) {
4097 		leaf = path->nodes[0];
4098 		slot = path->slots[0];
4099 		if (slot >= btrfs_header_nritems(leaf)) {
4100 			ret = btrfs_next_leaf(root, path);
4101 			if (ret < 0)
4102 				goto err;
4103 			else if (ret > 0)
4104 				break;
4105 			continue;
4106 		}
4107 
4108 		item = btrfs_item_nr(leaf, slot);
4109 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
4110 
4111 		if (found_key.objectid != key.objectid)
4112 			break;
4113 		if (btrfs_key_type(&found_key) != key_type)
4114 			break;
4115 		if (found_key.offset < filp->f_pos)
4116 			goto next;
4117 		if (key_type == BTRFS_DIR_INDEX_KEY &&
4118 		    btrfs_should_delete_dir_index(&del_list,
4119 						  found_key.offset))
4120 			goto next;
4121 
4122 		filp->f_pos = found_key.offset;
4123 		is_curr = 1;
4124 
4125 		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
4126 		di_cur = 0;
4127 		di_total = btrfs_item_size(leaf, item);
4128 
4129 		while (di_cur < di_total) {
4130 			struct btrfs_key location;
4131 			struct dentry *tmp;
4132 
4133 			if (verify_dir_item(root, leaf, di))
4134 				break;
4135 
4136 			name_len = btrfs_dir_name_len(leaf, di);
4137 			if (name_len <= sizeof(tmp_name)) {
4138 				name_ptr = tmp_name;
4139 			} else {
4140 				name_ptr = kmalloc(name_len, GFP_NOFS);
4141 				if (!name_ptr) {
4142 					ret = -ENOMEM;
4143 					goto err;
4144 				}
4145 			}
4146 			read_extent_buffer(leaf, name_ptr,
4147 					   (unsigned long)(di + 1), name_len);
4148 
4149 			d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
4150 			btrfs_dir_item_key_to_cpu(leaf, di, &location);
4151 
4152 			q.name = name_ptr;
4153 			q.len = name_len;
4154 			q.hash = full_name_hash(q.name, q.len);
4155 			tmp = d_lookup(filp->f_dentry, &q);
4156 			if (!tmp) {
4157 				struct btrfs_key *newkey;
4158 
4159 				newkey = kzalloc(sizeof(struct btrfs_key),
4160 						 GFP_NOFS);
4161 				if (!newkey)
4162 					goto no_dentry;
4163 				tmp = d_alloc(filp->f_dentry, &q);
4164 				if (!tmp) {
4165 					kfree(newkey);
4166 					dput(tmp);
4167 					goto no_dentry;
4168 				}
4169 				memcpy(newkey, &location,
4170 				       sizeof(struct btrfs_key));
4171 				tmp->d_fsdata = newkey;
4172 				tmp->d_flags |= DCACHE_NEED_LOOKUP;
4173 				d_rehash(tmp);
4174 				dput(tmp);
4175 			} else {
4176 				dput(tmp);
4177 			}
4178 no_dentry:
4179 			/* is this a reference to our own snapshot? If so
4180 			 * skip it
4181 			 */
4182 			if (location.type == BTRFS_ROOT_ITEM_KEY &&
4183 			    location.objectid == root->root_key.objectid) {
4184 				over = 0;
4185 				goto skip;
4186 			}
4187 			over = filldir(dirent, name_ptr, name_len,
4188 				       found_key.offset, location.objectid,
4189 				       d_type);
4190 
4191 skip:
4192 			if (name_ptr != tmp_name)
4193 				kfree(name_ptr);
4194 
4195 			if (over)
4196 				goto nopos;
4197 			di_len = btrfs_dir_name_len(leaf, di) +
4198 				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
4199 			di_cur += di_len;
4200 			di = (struct btrfs_dir_item *)((char *)di + di_len);
4201 		}
4202 next:
4203 		path->slots[0]++;
4204 	}
4205 
4206 	if (key_type == BTRFS_DIR_INDEX_KEY) {
4207 		if (is_curr)
4208 			filp->f_pos++;
4209 		ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir,
4210 						      &ins_list);
4211 		if (ret)
4212 			goto nopos;
4213 	}
4214 
4215 	/* Reached end of directory/root. Bump pos past the last item. */
4216 	if (key_type == BTRFS_DIR_INDEX_KEY)
4217 		/*
4218 		 * 32-bit glibc will use getdents64, but then strtol -
4219 		 * so the last number we can serve is this.
4220 		 */
4221 		filp->f_pos = 0x7fffffff;
4222 	else
4223 		filp->f_pos++;
4224 nopos:
4225 	ret = 0;
4226 err:
4227 	if (key_type == BTRFS_DIR_INDEX_KEY)
4228 		btrfs_put_delayed_items(&ins_list, &del_list);
4229 	btrfs_free_path(path);
4230 	return ret;
4231 }
4232 
4233 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
4234 {
4235 	struct btrfs_root *root = BTRFS_I(inode)->root;
4236 	struct btrfs_trans_handle *trans;
4237 	int ret = 0;
4238 	bool nolock = false;
4239 
4240 	if (BTRFS_I(inode)->dummy_inode)
4241 		return 0;
4242 
4243 	if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(root, inode))
4244 		nolock = true;
4245 
4246 	if (wbc->sync_mode == WB_SYNC_ALL) {
4247 		if (nolock)
4248 			trans = btrfs_join_transaction_nolock(root);
4249 		else
4250 			trans = btrfs_join_transaction(root);
4251 		if (IS_ERR(trans))
4252 			return PTR_ERR(trans);
4253 		if (nolock)
4254 			ret = btrfs_end_transaction_nolock(trans, root);
4255 		else
4256 			ret = btrfs_commit_transaction(trans, root);
4257 	}
4258 	return ret;
4259 }
4260 
4261 /*
4262  * This is somewhat expensive, updating the tree every time the
4263  * inode changes.  But, it is most likely to find the inode in cache.
4264  * FIXME, needs more benchmarking...there are no reasons other than performance
4265  * to keep or drop this code.
4266  */
4267 int btrfs_dirty_inode(struct inode *inode)
4268 {
4269 	struct btrfs_root *root = BTRFS_I(inode)->root;
4270 	struct btrfs_trans_handle *trans;
4271 	int ret;
4272 
4273 	if (BTRFS_I(inode)->dummy_inode)
4274 		return 0;
4275 
4276 	trans = btrfs_join_transaction(root);
4277 	if (IS_ERR(trans))
4278 		return PTR_ERR(trans);
4279 
4280 	ret = btrfs_update_inode(trans, root, inode);
4281 	if (ret && ret == -ENOSPC) {
4282 		/* whoops, lets try again with the full transaction */
4283 		btrfs_end_transaction(trans, root);
4284 		trans = btrfs_start_transaction(root, 1);
4285 		if (IS_ERR(trans))
4286 			return PTR_ERR(trans);
4287 
4288 		ret = btrfs_update_inode(trans, root, inode);
4289 	}
4290 	btrfs_end_transaction(trans, root);
4291 	if (BTRFS_I(inode)->delayed_node)
4292 		btrfs_balance_delayed_items(root);
4293 
4294 	return ret;
4295 }
4296 
4297 /*
4298  * This is a copy of file_update_time.  We need this so we can return error on
4299  * ENOSPC for updating the inode in the case of file write and mmap writes.
4300  */
4301 int btrfs_update_time(struct file *file)
4302 {
4303 	struct inode *inode = file->f_path.dentry->d_inode;
4304 	struct timespec now;
4305 	int ret;
4306 	enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
4307 
4308 	/* First try to exhaust all avenues to not sync */
4309 	if (IS_NOCMTIME(inode))
4310 		return 0;
4311 
4312 	now = current_fs_time(inode->i_sb);
4313 	if (!timespec_equal(&inode->i_mtime, &now))
4314 		sync_it = S_MTIME;
4315 
4316 	if (!timespec_equal(&inode->i_ctime, &now))
4317 		sync_it |= S_CTIME;
4318 
4319 	if (IS_I_VERSION(inode))
4320 		sync_it |= S_VERSION;
4321 
4322 	if (!sync_it)
4323 		return 0;
4324 
4325 	/* Finally allowed to write? Takes lock. */
4326 	if (mnt_want_write_file(file))
4327 		return 0;
4328 
4329 	/* Only change inode inside the lock region */
4330 	if (sync_it & S_VERSION)
4331 		inode_inc_iversion(inode);
4332 	if (sync_it & S_CTIME)
4333 		inode->i_ctime = now;
4334 	if (sync_it & S_MTIME)
4335 		inode->i_mtime = now;
4336 	ret = btrfs_dirty_inode(inode);
4337 	if (!ret)
4338 		mark_inode_dirty_sync(inode);
4339 	mnt_drop_write(file->f_path.mnt);
4340 	return ret;
4341 }
4342 
4343 /*
4344  * find the highest existing sequence number in a directory
4345  * and then set the in-memory index_cnt variable to reflect
4346  * free sequence numbers
4347  */
4348 static int btrfs_set_inode_index_count(struct inode *inode)
4349 {
4350 	struct btrfs_root *root = BTRFS_I(inode)->root;
4351 	struct btrfs_key key, found_key;
4352 	struct btrfs_path *path;
4353 	struct extent_buffer *leaf;
4354 	int ret;
4355 
4356 	key.objectid = btrfs_ino(inode);
4357 	btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
4358 	key.offset = (u64)-1;
4359 
4360 	path = btrfs_alloc_path();
4361 	if (!path)
4362 		return -ENOMEM;
4363 
4364 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4365 	if (ret < 0)
4366 		goto out;
4367 	/* FIXME: we should be able to handle this */
4368 	if (ret == 0)
4369 		goto out;
4370 	ret = 0;
4371 
4372 	/*
4373 	 * MAGIC NUMBER EXPLANATION:
4374 	 * since we search a directory based on f_pos we have to start at 2
4375 	 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
4376 	 * else has to start at 2
4377 	 */
4378 	if (path->slots[0] == 0) {
4379 		BTRFS_I(inode)->index_cnt = 2;
4380 		goto out;
4381 	}
4382 
4383 	path->slots[0]--;
4384 
4385 	leaf = path->nodes[0];
4386 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4387 
4388 	if (found_key.objectid != btrfs_ino(inode) ||
4389 	    btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
4390 		BTRFS_I(inode)->index_cnt = 2;
4391 		goto out;
4392 	}
4393 
4394 	BTRFS_I(inode)->index_cnt = found_key.offset + 1;
4395 out:
4396 	btrfs_free_path(path);
4397 	return ret;
4398 }
4399 
4400 /*
4401  * helper to find a free sequence number in a given directory.  This current
4402  * code is very simple, later versions will do smarter things in the btree
4403  */
4404 int btrfs_set_inode_index(struct inode *dir, u64 *index)
4405 {
4406 	int ret = 0;
4407 
4408 	if (BTRFS_I(dir)->index_cnt == (u64)-1) {
4409 		ret = btrfs_inode_delayed_dir_index_count(dir);
4410 		if (ret) {
4411 			ret = btrfs_set_inode_index_count(dir);
4412 			if (ret)
4413 				return ret;
4414 		}
4415 	}
4416 
4417 	*index = BTRFS_I(dir)->index_cnt;
4418 	BTRFS_I(dir)->index_cnt++;
4419 
4420 	return ret;
4421 }
4422 
4423 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4424 				     struct btrfs_root *root,
4425 				     struct inode *dir,
4426 				     const char *name, int name_len,
4427 				     u64 ref_objectid, u64 objectid,
4428 				     umode_t mode, u64 *index)
4429 {
4430 	struct inode *inode;
4431 	struct btrfs_inode_item *inode_item;
4432 	struct btrfs_key *location;
4433 	struct btrfs_path *path;
4434 	struct btrfs_inode_ref *ref;
4435 	struct btrfs_key key[2];
4436 	u32 sizes[2];
4437 	unsigned long ptr;
4438 	int ret;
4439 	int owner;
4440 
4441 	path = btrfs_alloc_path();
4442 	if (!path)
4443 		return ERR_PTR(-ENOMEM);
4444 
4445 	inode = new_inode(root->fs_info->sb);
4446 	if (!inode) {
4447 		btrfs_free_path(path);
4448 		return ERR_PTR(-ENOMEM);
4449 	}
4450 
4451 	/*
4452 	 * we have to initialize this early, so we can reclaim the inode
4453 	 * number if we fail afterwards in this function.
4454 	 */
4455 	inode->i_ino = objectid;
4456 
4457 	if (dir) {
4458 		trace_btrfs_inode_request(dir);
4459 
4460 		ret = btrfs_set_inode_index(dir, index);
4461 		if (ret) {
4462 			btrfs_free_path(path);
4463 			iput(inode);
4464 			return ERR_PTR(ret);
4465 		}
4466 	}
4467 	/*
4468 	 * index_cnt is ignored for everything but a dir,
4469 	 * btrfs_get_inode_index_count has an explanation for the magic
4470 	 * number
4471 	 */
4472 	BTRFS_I(inode)->index_cnt = 2;
4473 	BTRFS_I(inode)->root = root;
4474 	BTRFS_I(inode)->generation = trans->transid;
4475 	inode->i_generation = BTRFS_I(inode)->generation;
4476 	btrfs_set_inode_space_info(root, inode);
4477 
4478 	if (S_ISDIR(mode))
4479 		owner = 0;
4480 	else
4481 		owner = 1;
4482 
4483 	key[0].objectid = objectid;
4484 	btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
4485 	key[0].offset = 0;
4486 
4487 	key[1].objectid = objectid;
4488 	btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
4489 	key[1].offset = ref_objectid;
4490 
4491 	sizes[0] = sizeof(struct btrfs_inode_item);
4492 	sizes[1] = name_len + sizeof(*ref);
4493 
4494 	path->leave_spinning = 1;
4495 	ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
4496 	if (ret != 0)
4497 		goto fail;
4498 
4499 	inode_init_owner(inode, dir, mode);
4500 	inode_set_bytes(inode, 0);
4501 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4502 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4503 				  struct btrfs_inode_item);
4504 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
4505 
4506 	ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
4507 			     struct btrfs_inode_ref);
4508 	btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
4509 	btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
4510 	ptr = (unsigned long)(ref + 1);
4511 	write_extent_buffer(path->nodes[0], name, ptr, name_len);
4512 
4513 	btrfs_mark_buffer_dirty(path->nodes[0]);
4514 	btrfs_free_path(path);
4515 
4516 	location = &BTRFS_I(inode)->location;
4517 	location->objectid = objectid;
4518 	location->offset = 0;
4519 	btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
4520 
4521 	btrfs_inherit_iflags(inode, dir);
4522 
4523 	if (S_ISREG(mode)) {
4524 		if (btrfs_test_opt(root, NODATASUM))
4525 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
4526 		if (btrfs_test_opt(root, NODATACOW) ||
4527 		    (BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW))
4528 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
4529 	}
4530 
4531 	insert_inode_hash(inode);
4532 	inode_tree_add(inode);
4533 
4534 	trace_btrfs_inode_new(inode);
4535 	btrfs_set_inode_last_trans(trans, inode);
4536 
4537 	return inode;
4538 fail:
4539 	if (dir)
4540 		BTRFS_I(dir)->index_cnt--;
4541 	btrfs_free_path(path);
4542 	iput(inode);
4543 	return ERR_PTR(ret);
4544 }
4545 
4546 static inline u8 btrfs_inode_type(struct inode *inode)
4547 {
4548 	return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
4549 }
4550 
4551 /*
4552  * utility function to add 'inode' into 'parent_inode' with
4553  * a give name and a given sequence number.
4554  * if 'add_backref' is true, also insert a backref from the
4555  * inode to the parent directory.
4556  */
4557 int btrfs_add_link(struct btrfs_trans_handle *trans,
4558 		   struct inode *parent_inode, struct inode *inode,
4559 		   const char *name, int name_len, int add_backref, u64 index)
4560 {
4561 	int ret = 0;
4562 	struct btrfs_key key;
4563 	struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4564 	u64 ino = btrfs_ino(inode);
4565 	u64 parent_ino = btrfs_ino(parent_inode);
4566 
4567 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4568 		memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
4569 	} else {
4570 		key.objectid = ino;
4571 		btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
4572 		key.offset = 0;
4573 	}
4574 
4575 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4576 		ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
4577 					 key.objectid, root->root_key.objectid,
4578 					 parent_ino, index, name, name_len);
4579 	} else if (add_backref) {
4580 		ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
4581 					     parent_ino, index);
4582 	}
4583 
4584 	if (ret == 0) {
4585 		ret = btrfs_insert_dir_item(trans, root, name, name_len,
4586 					    parent_inode, &key,
4587 					    btrfs_inode_type(inode), index);
4588 		if (ret)
4589 			goto fail_dir_item;
4590 
4591 		btrfs_i_size_write(parent_inode, parent_inode->i_size +
4592 				   name_len * 2);
4593 		parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4594 		ret = btrfs_update_inode(trans, root, parent_inode);
4595 	}
4596 	return ret;
4597 
4598 fail_dir_item:
4599 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4600 		u64 local_index;
4601 		int err;
4602 		err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
4603 				 key.objectid, root->root_key.objectid,
4604 				 parent_ino, &local_index, name, name_len);
4605 
4606 	} else if (add_backref) {
4607 		u64 local_index;
4608 		int err;
4609 
4610 		err = btrfs_del_inode_ref(trans, root, name, name_len,
4611 					  ino, parent_ino, &local_index);
4612 	}
4613 	return ret;
4614 }
4615 
4616 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4617 			    struct inode *dir, struct dentry *dentry,
4618 			    struct inode *inode, int backref, u64 index)
4619 {
4620 	int err = btrfs_add_link(trans, dir, inode,
4621 				 dentry->d_name.name, dentry->d_name.len,
4622 				 backref, index);
4623 	if (err > 0)
4624 		err = -EEXIST;
4625 	return err;
4626 }
4627 
4628 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4629 			umode_t mode, dev_t rdev)
4630 {
4631 	struct btrfs_trans_handle *trans;
4632 	struct btrfs_root *root = BTRFS_I(dir)->root;
4633 	struct inode *inode = NULL;
4634 	int err;
4635 	int drop_inode = 0;
4636 	u64 objectid;
4637 	unsigned long nr = 0;
4638 	u64 index = 0;
4639 
4640 	if (!new_valid_dev(rdev))
4641 		return -EINVAL;
4642 
4643 	/*
4644 	 * 2 for inode item and ref
4645 	 * 2 for dir items
4646 	 * 1 for xattr if selinux is on
4647 	 */
4648 	trans = btrfs_start_transaction(root, 5);
4649 	if (IS_ERR(trans))
4650 		return PTR_ERR(trans);
4651 
4652 	err = btrfs_find_free_ino(root, &objectid);
4653 	if (err)
4654 		goto out_unlock;
4655 
4656 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4657 				dentry->d_name.len, btrfs_ino(dir), objectid,
4658 				mode, &index);
4659 	if (IS_ERR(inode)) {
4660 		err = PTR_ERR(inode);
4661 		goto out_unlock;
4662 	}
4663 
4664 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
4665 	if (err) {
4666 		drop_inode = 1;
4667 		goto out_unlock;
4668 	}
4669 
4670 	/*
4671 	* If the active LSM wants to access the inode during
4672 	* d_instantiate it needs these. Smack checks to see
4673 	* if the filesystem supports xattrs by looking at the
4674 	* ops vector.
4675 	*/
4676 
4677 	inode->i_op = &btrfs_special_inode_operations;
4678 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4679 	if (err)
4680 		drop_inode = 1;
4681 	else {
4682 		init_special_inode(inode, inode->i_mode, rdev);
4683 		btrfs_update_inode(trans, root, inode);
4684 		d_instantiate(dentry, inode);
4685 	}
4686 out_unlock:
4687 	nr = trans->blocks_used;
4688 	btrfs_end_transaction(trans, root);
4689 	btrfs_btree_balance_dirty(root, nr);
4690 	if (drop_inode) {
4691 		inode_dec_link_count(inode);
4692 		iput(inode);
4693 	}
4694 	return err;
4695 }
4696 
4697 static int btrfs_create(struct inode *dir, struct dentry *dentry,
4698 			umode_t mode, struct nameidata *nd)
4699 {
4700 	struct btrfs_trans_handle *trans;
4701 	struct btrfs_root *root = BTRFS_I(dir)->root;
4702 	struct inode *inode = NULL;
4703 	int drop_inode = 0;
4704 	int err;
4705 	unsigned long nr = 0;
4706 	u64 objectid;
4707 	u64 index = 0;
4708 
4709 	/*
4710 	 * 2 for inode item and ref
4711 	 * 2 for dir items
4712 	 * 1 for xattr if selinux is on
4713 	 */
4714 	trans = btrfs_start_transaction(root, 5);
4715 	if (IS_ERR(trans))
4716 		return PTR_ERR(trans);
4717 
4718 	err = btrfs_find_free_ino(root, &objectid);
4719 	if (err)
4720 		goto out_unlock;
4721 
4722 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4723 				dentry->d_name.len, btrfs_ino(dir), objectid,
4724 				mode, &index);
4725 	if (IS_ERR(inode)) {
4726 		err = PTR_ERR(inode);
4727 		goto out_unlock;
4728 	}
4729 
4730 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
4731 	if (err) {
4732 		drop_inode = 1;
4733 		goto out_unlock;
4734 	}
4735 
4736 	/*
4737 	* If the active LSM wants to access the inode during
4738 	* d_instantiate it needs these. Smack checks to see
4739 	* if the filesystem supports xattrs by looking at the
4740 	* ops vector.
4741 	*/
4742 	inode->i_fop = &btrfs_file_operations;
4743 	inode->i_op = &btrfs_file_inode_operations;
4744 
4745 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4746 	if (err)
4747 		drop_inode = 1;
4748 	else {
4749 		inode->i_mapping->a_ops = &btrfs_aops;
4750 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4751 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4752 		d_instantiate(dentry, inode);
4753 	}
4754 out_unlock:
4755 	nr = trans->blocks_used;
4756 	btrfs_end_transaction(trans, root);
4757 	if (drop_inode) {
4758 		inode_dec_link_count(inode);
4759 		iput(inode);
4760 	}
4761 	btrfs_btree_balance_dirty(root, nr);
4762 	return err;
4763 }
4764 
4765 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4766 		      struct dentry *dentry)
4767 {
4768 	struct btrfs_trans_handle *trans;
4769 	struct btrfs_root *root = BTRFS_I(dir)->root;
4770 	struct inode *inode = old_dentry->d_inode;
4771 	u64 index;
4772 	unsigned long nr = 0;
4773 	int err;
4774 	int drop_inode = 0;
4775 
4776 	/* do not allow sys_link's with other subvols of the same device */
4777 	if (root->objectid != BTRFS_I(inode)->root->objectid)
4778 		return -EXDEV;
4779 
4780 	if (inode->i_nlink == ~0U)
4781 		return -EMLINK;
4782 
4783 	err = btrfs_set_inode_index(dir, &index);
4784 	if (err)
4785 		goto fail;
4786 
4787 	/*
4788 	 * 2 items for inode and inode ref
4789 	 * 2 items for dir items
4790 	 * 1 item for parent inode
4791 	 */
4792 	trans = btrfs_start_transaction(root, 5);
4793 	if (IS_ERR(trans)) {
4794 		err = PTR_ERR(trans);
4795 		goto fail;
4796 	}
4797 
4798 	btrfs_inc_nlink(inode);
4799 	inode->i_ctime = CURRENT_TIME;
4800 	ihold(inode);
4801 
4802 	err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
4803 
4804 	if (err) {
4805 		drop_inode = 1;
4806 	} else {
4807 		struct dentry *parent = dentry->d_parent;
4808 		err = btrfs_update_inode(trans, root, inode);
4809 		BUG_ON(err);
4810 		d_instantiate(dentry, inode);
4811 		btrfs_log_new_name(trans, inode, NULL, parent);
4812 	}
4813 
4814 	nr = trans->blocks_used;
4815 	btrfs_end_transaction(trans, root);
4816 fail:
4817 	if (drop_inode) {
4818 		inode_dec_link_count(inode);
4819 		iput(inode);
4820 	}
4821 	btrfs_btree_balance_dirty(root, nr);
4822 	return err;
4823 }
4824 
4825 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
4826 {
4827 	struct inode *inode = NULL;
4828 	struct btrfs_trans_handle *trans;
4829 	struct btrfs_root *root = BTRFS_I(dir)->root;
4830 	int err = 0;
4831 	int drop_on_err = 0;
4832 	u64 objectid = 0;
4833 	u64 index = 0;
4834 	unsigned long nr = 1;
4835 
4836 	/*
4837 	 * 2 items for inode and ref
4838 	 * 2 items for dir items
4839 	 * 1 for xattr if selinux is on
4840 	 */
4841 	trans = btrfs_start_transaction(root, 5);
4842 	if (IS_ERR(trans))
4843 		return PTR_ERR(trans);
4844 
4845 	err = btrfs_find_free_ino(root, &objectid);
4846 	if (err)
4847 		goto out_fail;
4848 
4849 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4850 				dentry->d_name.len, btrfs_ino(dir), objectid,
4851 				S_IFDIR | mode, &index);
4852 	if (IS_ERR(inode)) {
4853 		err = PTR_ERR(inode);
4854 		goto out_fail;
4855 	}
4856 
4857 	drop_on_err = 1;
4858 
4859 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
4860 	if (err)
4861 		goto out_fail;
4862 
4863 	inode->i_op = &btrfs_dir_inode_operations;
4864 	inode->i_fop = &btrfs_dir_file_operations;
4865 
4866 	btrfs_i_size_write(inode, 0);
4867 	err = btrfs_update_inode(trans, root, inode);
4868 	if (err)
4869 		goto out_fail;
4870 
4871 	err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
4872 			     dentry->d_name.len, 0, index);
4873 	if (err)
4874 		goto out_fail;
4875 
4876 	d_instantiate(dentry, inode);
4877 	drop_on_err = 0;
4878 
4879 out_fail:
4880 	nr = trans->blocks_used;
4881 	btrfs_end_transaction(trans, root);
4882 	if (drop_on_err)
4883 		iput(inode);
4884 	btrfs_btree_balance_dirty(root, nr);
4885 	return err;
4886 }
4887 
4888 /* helper for btfs_get_extent.  Given an existing extent in the tree,
4889  * and an extent that you want to insert, deal with overlap and insert
4890  * the new extent into the tree.
4891  */
4892 static int merge_extent_mapping(struct extent_map_tree *em_tree,
4893 				struct extent_map *existing,
4894 				struct extent_map *em,
4895 				u64 map_start, u64 map_len)
4896 {
4897 	u64 start_diff;
4898 
4899 	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
4900 	start_diff = map_start - em->start;
4901 	em->start = map_start;
4902 	em->len = map_len;
4903 	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
4904 	    !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
4905 		em->block_start += start_diff;
4906 		em->block_len -= start_diff;
4907 	}
4908 	return add_extent_mapping(em_tree, em);
4909 }
4910 
4911 static noinline int uncompress_inline(struct btrfs_path *path,
4912 				      struct inode *inode, struct page *page,
4913 				      size_t pg_offset, u64 extent_offset,
4914 				      struct btrfs_file_extent_item *item)
4915 {
4916 	int ret;
4917 	struct extent_buffer *leaf = path->nodes[0];
4918 	char *tmp;
4919 	size_t max_size;
4920 	unsigned long inline_size;
4921 	unsigned long ptr;
4922 	int compress_type;
4923 
4924 	WARN_ON(pg_offset != 0);
4925 	compress_type = btrfs_file_extent_compression(leaf, item);
4926 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
4927 	inline_size = btrfs_file_extent_inline_item_len(leaf,
4928 					btrfs_item_nr(leaf, path->slots[0]));
4929 	tmp = kmalloc(inline_size, GFP_NOFS);
4930 	if (!tmp)
4931 		return -ENOMEM;
4932 	ptr = btrfs_file_extent_inline_start(item);
4933 
4934 	read_extent_buffer(leaf, tmp, ptr, inline_size);
4935 
4936 	max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
4937 	ret = btrfs_decompress(compress_type, tmp, page,
4938 			       extent_offset, inline_size, max_size);
4939 	if (ret) {
4940 		char *kaddr = kmap_atomic(page, KM_USER0);
4941 		unsigned long copy_size = min_t(u64,
4942 				  PAGE_CACHE_SIZE - pg_offset,
4943 				  max_size - extent_offset);
4944 		memset(kaddr + pg_offset, 0, copy_size);
4945 		kunmap_atomic(kaddr, KM_USER0);
4946 	}
4947 	kfree(tmp);
4948 	return 0;
4949 }
4950 
4951 /*
4952  * a bit scary, this does extent mapping from logical file offset to the disk.
4953  * the ugly parts come from merging extents from the disk with the in-ram
4954  * representation.  This gets more complex because of the data=ordered code,
4955  * where the in-ram extents might be locked pending data=ordered completion.
4956  *
4957  * This also copies inline extents directly into the page.
4958  */
4959 
4960 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
4961 				    size_t pg_offset, u64 start, u64 len,
4962 				    int create)
4963 {
4964 	int ret;
4965 	int err = 0;
4966 	u64 bytenr;
4967 	u64 extent_start = 0;
4968 	u64 extent_end = 0;
4969 	u64 objectid = btrfs_ino(inode);
4970 	u32 found_type;
4971 	struct btrfs_path *path = NULL;
4972 	struct btrfs_root *root = BTRFS_I(inode)->root;
4973 	struct btrfs_file_extent_item *item;
4974 	struct extent_buffer *leaf;
4975 	struct btrfs_key found_key;
4976 	struct extent_map *em = NULL;
4977 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4978 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4979 	struct btrfs_trans_handle *trans = NULL;
4980 	int compress_type;
4981 
4982 again:
4983 	read_lock(&em_tree->lock);
4984 	em = lookup_extent_mapping(em_tree, start, len);
4985 	if (em)
4986 		em->bdev = root->fs_info->fs_devices->latest_bdev;
4987 	read_unlock(&em_tree->lock);
4988 
4989 	if (em) {
4990 		if (em->start > start || em->start + em->len <= start)
4991 			free_extent_map(em);
4992 		else if (em->block_start == EXTENT_MAP_INLINE && page)
4993 			free_extent_map(em);
4994 		else
4995 			goto out;
4996 	}
4997 	em = alloc_extent_map();
4998 	if (!em) {
4999 		err = -ENOMEM;
5000 		goto out;
5001 	}
5002 	em->bdev = root->fs_info->fs_devices->latest_bdev;
5003 	em->start = EXTENT_MAP_HOLE;
5004 	em->orig_start = EXTENT_MAP_HOLE;
5005 	em->len = (u64)-1;
5006 	em->block_len = (u64)-1;
5007 
5008 	if (!path) {
5009 		path = btrfs_alloc_path();
5010 		if (!path) {
5011 			err = -ENOMEM;
5012 			goto out;
5013 		}
5014 		/*
5015 		 * Chances are we'll be called again, so go ahead and do
5016 		 * readahead
5017 		 */
5018 		path->reada = 1;
5019 	}
5020 
5021 	ret = btrfs_lookup_file_extent(trans, root, path,
5022 				       objectid, start, trans != NULL);
5023 	if (ret < 0) {
5024 		err = ret;
5025 		goto out;
5026 	}
5027 
5028 	if (ret != 0) {
5029 		if (path->slots[0] == 0)
5030 			goto not_found;
5031 		path->slots[0]--;
5032 	}
5033 
5034 	leaf = path->nodes[0];
5035 	item = btrfs_item_ptr(leaf, path->slots[0],
5036 			      struct btrfs_file_extent_item);
5037 	/* are we inside the extent that was found? */
5038 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5039 	found_type = btrfs_key_type(&found_key);
5040 	if (found_key.objectid != objectid ||
5041 	    found_type != BTRFS_EXTENT_DATA_KEY) {
5042 		goto not_found;
5043 	}
5044 
5045 	found_type = btrfs_file_extent_type(leaf, item);
5046 	extent_start = found_key.offset;
5047 	compress_type = btrfs_file_extent_compression(leaf, item);
5048 	if (found_type == BTRFS_FILE_EXTENT_REG ||
5049 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
5050 		extent_end = extent_start +
5051 		       btrfs_file_extent_num_bytes(leaf, item);
5052 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5053 		size_t size;
5054 		size = btrfs_file_extent_inline_len(leaf, item);
5055 		extent_end = (extent_start + size + root->sectorsize - 1) &
5056 			~((u64)root->sectorsize - 1);
5057 	}
5058 
5059 	if (start >= extent_end) {
5060 		path->slots[0]++;
5061 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
5062 			ret = btrfs_next_leaf(root, path);
5063 			if (ret < 0) {
5064 				err = ret;
5065 				goto out;
5066 			}
5067 			if (ret > 0)
5068 				goto not_found;
5069 			leaf = path->nodes[0];
5070 		}
5071 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5072 		if (found_key.objectid != objectid ||
5073 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
5074 			goto not_found;
5075 		if (start + len <= found_key.offset)
5076 			goto not_found;
5077 		em->start = start;
5078 		em->len = found_key.offset - start;
5079 		goto not_found_em;
5080 	}
5081 
5082 	if (found_type == BTRFS_FILE_EXTENT_REG ||
5083 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
5084 		em->start = extent_start;
5085 		em->len = extent_end - extent_start;
5086 		em->orig_start = extent_start -
5087 				 btrfs_file_extent_offset(leaf, item);
5088 		bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
5089 		if (bytenr == 0) {
5090 			em->block_start = EXTENT_MAP_HOLE;
5091 			goto insert;
5092 		}
5093 		if (compress_type != BTRFS_COMPRESS_NONE) {
5094 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5095 			em->compress_type = compress_type;
5096 			em->block_start = bytenr;
5097 			em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
5098 									 item);
5099 		} else {
5100 			bytenr += btrfs_file_extent_offset(leaf, item);
5101 			em->block_start = bytenr;
5102 			em->block_len = em->len;
5103 			if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
5104 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
5105 		}
5106 		goto insert;
5107 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5108 		unsigned long ptr;
5109 		char *map;
5110 		size_t size;
5111 		size_t extent_offset;
5112 		size_t copy_size;
5113 
5114 		em->block_start = EXTENT_MAP_INLINE;
5115 		if (!page || create) {
5116 			em->start = extent_start;
5117 			em->len = extent_end - extent_start;
5118 			goto out;
5119 		}
5120 
5121 		size = btrfs_file_extent_inline_len(leaf, item);
5122 		extent_offset = page_offset(page) + pg_offset - extent_start;
5123 		copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
5124 				size - extent_offset);
5125 		em->start = extent_start + extent_offset;
5126 		em->len = (copy_size + root->sectorsize - 1) &
5127 			~((u64)root->sectorsize - 1);
5128 		em->orig_start = EXTENT_MAP_INLINE;
5129 		if (compress_type) {
5130 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5131 			em->compress_type = compress_type;
5132 		}
5133 		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
5134 		if (create == 0 && !PageUptodate(page)) {
5135 			if (btrfs_file_extent_compression(leaf, item) !=
5136 			    BTRFS_COMPRESS_NONE) {
5137 				ret = uncompress_inline(path, inode, page,
5138 							pg_offset,
5139 							extent_offset, item);
5140 				BUG_ON(ret);
5141 			} else {
5142 				map = kmap(page);
5143 				read_extent_buffer(leaf, map + pg_offset, ptr,
5144 						   copy_size);
5145 				if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
5146 					memset(map + pg_offset + copy_size, 0,
5147 					       PAGE_CACHE_SIZE - pg_offset -
5148 					       copy_size);
5149 				}
5150 				kunmap(page);
5151 			}
5152 			flush_dcache_page(page);
5153 		} else if (create && PageUptodate(page)) {
5154 			BUG();
5155 			if (!trans) {
5156 				kunmap(page);
5157 				free_extent_map(em);
5158 				em = NULL;
5159 
5160 				btrfs_release_path(path);
5161 				trans = btrfs_join_transaction(root);
5162 
5163 				if (IS_ERR(trans))
5164 					return ERR_CAST(trans);
5165 				goto again;
5166 			}
5167 			map = kmap(page);
5168 			write_extent_buffer(leaf, map + pg_offset, ptr,
5169 					    copy_size);
5170 			kunmap(page);
5171 			btrfs_mark_buffer_dirty(leaf);
5172 		}
5173 		set_extent_uptodate(io_tree, em->start,
5174 				    extent_map_end(em) - 1, NULL, GFP_NOFS);
5175 		goto insert;
5176 	} else {
5177 		printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
5178 		WARN_ON(1);
5179 	}
5180 not_found:
5181 	em->start = start;
5182 	em->len = len;
5183 not_found_em:
5184 	em->block_start = EXTENT_MAP_HOLE;
5185 	set_bit(EXTENT_FLAG_VACANCY, &em->flags);
5186 insert:
5187 	btrfs_release_path(path);
5188 	if (em->start > start || extent_map_end(em) <= start) {
5189 		printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
5190 		       "[%llu %llu]\n", (unsigned long long)em->start,
5191 		       (unsigned long long)em->len,
5192 		       (unsigned long long)start,
5193 		       (unsigned long long)len);
5194 		err = -EIO;
5195 		goto out;
5196 	}
5197 
5198 	err = 0;
5199 	write_lock(&em_tree->lock);
5200 	ret = add_extent_mapping(em_tree, em);
5201 	/* it is possible that someone inserted the extent into the tree
5202 	 * while we had the lock dropped.  It is also possible that
5203 	 * an overlapping map exists in the tree
5204 	 */
5205 	if (ret == -EEXIST) {
5206 		struct extent_map *existing;
5207 
5208 		ret = 0;
5209 
5210 		existing = lookup_extent_mapping(em_tree, start, len);
5211 		if (existing && (existing->start > start ||
5212 		    existing->start + existing->len <= start)) {
5213 			free_extent_map(existing);
5214 			existing = NULL;
5215 		}
5216 		if (!existing) {
5217 			existing = lookup_extent_mapping(em_tree, em->start,
5218 							 em->len);
5219 			if (existing) {
5220 				err = merge_extent_mapping(em_tree, existing,
5221 							   em, start,
5222 							   root->sectorsize);
5223 				free_extent_map(existing);
5224 				if (err) {
5225 					free_extent_map(em);
5226 					em = NULL;
5227 				}
5228 			} else {
5229 				err = -EIO;
5230 				free_extent_map(em);
5231 				em = NULL;
5232 			}
5233 		} else {
5234 			free_extent_map(em);
5235 			em = existing;
5236 			err = 0;
5237 		}
5238 	}
5239 	write_unlock(&em_tree->lock);
5240 out:
5241 
5242 	trace_btrfs_get_extent(root, em);
5243 
5244 	if (path)
5245 		btrfs_free_path(path);
5246 	if (trans) {
5247 		ret = btrfs_end_transaction(trans, root);
5248 		if (!err)
5249 			err = ret;
5250 	}
5251 	if (err) {
5252 		free_extent_map(em);
5253 		return ERR_PTR(err);
5254 	}
5255 	return em;
5256 }
5257 
5258 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
5259 					   size_t pg_offset, u64 start, u64 len,
5260 					   int create)
5261 {
5262 	struct extent_map *em;
5263 	struct extent_map *hole_em = NULL;
5264 	u64 range_start = start;
5265 	u64 end;
5266 	u64 found;
5267 	u64 found_end;
5268 	int err = 0;
5269 
5270 	em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
5271 	if (IS_ERR(em))
5272 		return em;
5273 	if (em) {
5274 		/*
5275 		 * if our em maps to a hole, there might
5276 		 * actually be delalloc bytes behind it
5277 		 */
5278 		if (em->block_start != EXTENT_MAP_HOLE)
5279 			return em;
5280 		else
5281 			hole_em = em;
5282 	}
5283 
5284 	/* check to see if we've wrapped (len == -1 or similar) */
5285 	end = start + len;
5286 	if (end < start)
5287 		end = (u64)-1;
5288 	else
5289 		end -= 1;
5290 
5291 	em = NULL;
5292 
5293 	/* ok, we didn't find anything, lets look for delalloc */
5294 	found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
5295 				 end, len, EXTENT_DELALLOC, 1);
5296 	found_end = range_start + found;
5297 	if (found_end < range_start)
5298 		found_end = (u64)-1;
5299 
5300 	/*
5301 	 * we didn't find anything useful, return
5302 	 * the original results from get_extent()
5303 	 */
5304 	if (range_start > end || found_end <= start) {
5305 		em = hole_em;
5306 		hole_em = NULL;
5307 		goto out;
5308 	}
5309 
5310 	/* adjust the range_start to make sure it doesn't
5311 	 * go backwards from the start they passed in
5312 	 */
5313 	range_start = max(start,range_start);
5314 	found = found_end - range_start;
5315 
5316 	if (found > 0) {
5317 		u64 hole_start = start;
5318 		u64 hole_len = len;
5319 
5320 		em = alloc_extent_map();
5321 		if (!em) {
5322 			err = -ENOMEM;
5323 			goto out;
5324 		}
5325 		/*
5326 		 * when btrfs_get_extent can't find anything it
5327 		 * returns one huge hole
5328 		 *
5329 		 * make sure what it found really fits our range, and
5330 		 * adjust to make sure it is based on the start from
5331 		 * the caller
5332 		 */
5333 		if (hole_em) {
5334 			u64 calc_end = extent_map_end(hole_em);
5335 
5336 			if (calc_end <= start || (hole_em->start > end)) {
5337 				free_extent_map(hole_em);
5338 				hole_em = NULL;
5339 			} else {
5340 				hole_start = max(hole_em->start, start);
5341 				hole_len = calc_end - hole_start;
5342 			}
5343 		}
5344 		em->bdev = NULL;
5345 		if (hole_em && range_start > hole_start) {
5346 			/* our hole starts before our delalloc, so we
5347 			 * have to return just the parts of the hole
5348 			 * that go until  the delalloc starts
5349 			 */
5350 			em->len = min(hole_len,
5351 				      range_start - hole_start);
5352 			em->start = hole_start;
5353 			em->orig_start = hole_start;
5354 			/*
5355 			 * don't adjust block start at all,
5356 			 * it is fixed at EXTENT_MAP_HOLE
5357 			 */
5358 			em->block_start = hole_em->block_start;
5359 			em->block_len = hole_len;
5360 		} else {
5361 			em->start = range_start;
5362 			em->len = found;
5363 			em->orig_start = range_start;
5364 			em->block_start = EXTENT_MAP_DELALLOC;
5365 			em->block_len = found;
5366 		}
5367 	} else if (hole_em) {
5368 		return hole_em;
5369 	}
5370 out:
5371 
5372 	free_extent_map(hole_em);
5373 	if (err) {
5374 		free_extent_map(em);
5375 		return ERR_PTR(err);
5376 	}
5377 	return em;
5378 }
5379 
5380 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5381 						  struct extent_map *em,
5382 						  u64 start, u64 len)
5383 {
5384 	struct btrfs_root *root = BTRFS_I(inode)->root;
5385 	struct btrfs_trans_handle *trans;
5386 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5387 	struct btrfs_key ins;
5388 	u64 alloc_hint;
5389 	int ret;
5390 	bool insert = false;
5391 
5392 	/*
5393 	 * Ok if the extent map we looked up is a hole and is for the exact
5394 	 * range we want, there is no reason to allocate a new one, however if
5395 	 * it is not right then we need to free this one and drop the cache for
5396 	 * our range.
5397 	 */
5398 	if (em->block_start != EXTENT_MAP_HOLE || em->start != start ||
5399 	    em->len != len) {
5400 		free_extent_map(em);
5401 		em = NULL;
5402 		insert = true;
5403 		btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
5404 	}
5405 
5406 	trans = btrfs_join_transaction(root);
5407 	if (IS_ERR(trans))
5408 		return ERR_CAST(trans);
5409 
5410 	if (start <= BTRFS_I(inode)->disk_i_size && len < 64 * 1024)
5411 		btrfs_add_inode_defrag(trans, inode);
5412 
5413 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
5414 
5415 	alloc_hint = get_extent_allocation_hint(inode, start, len);
5416 	ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
5417 				   alloc_hint, (u64)-1, &ins, 1);
5418 	if (ret) {
5419 		em = ERR_PTR(ret);
5420 		goto out;
5421 	}
5422 
5423 	if (!em) {
5424 		em = alloc_extent_map();
5425 		if (!em) {
5426 			em = ERR_PTR(-ENOMEM);
5427 			goto out;
5428 		}
5429 	}
5430 
5431 	em->start = start;
5432 	em->orig_start = em->start;
5433 	em->len = ins.offset;
5434 
5435 	em->block_start = ins.objectid;
5436 	em->block_len = ins.offset;
5437 	em->bdev = root->fs_info->fs_devices->latest_bdev;
5438 
5439 	/*
5440 	 * We need to do this because if we're using the original em we searched
5441 	 * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that.
5442 	 */
5443 	em->flags = 0;
5444 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
5445 
5446 	while (insert) {
5447 		write_lock(&em_tree->lock);
5448 		ret = add_extent_mapping(em_tree, em);
5449 		write_unlock(&em_tree->lock);
5450 		if (ret != -EEXIST)
5451 			break;
5452 		btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0);
5453 	}
5454 
5455 	ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
5456 					   ins.offset, ins.offset, 0);
5457 	if (ret) {
5458 		btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
5459 		em = ERR_PTR(ret);
5460 	}
5461 out:
5462 	btrfs_end_transaction(trans, root);
5463 	return em;
5464 }
5465 
5466 /*
5467  * returns 1 when the nocow is safe, < 1 on error, 0 if the
5468  * block must be cow'd
5469  */
5470 static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
5471 				      struct inode *inode, u64 offset, u64 len)
5472 {
5473 	struct btrfs_path *path;
5474 	int ret;
5475 	struct extent_buffer *leaf;
5476 	struct btrfs_root *root = BTRFS_I(inode)->root;
5477 	struct btrfs_file_extent_item *fi;
5478 	struct btrfs_key key;
5479 	u64 disk_bytenr;
5480 	u64 backref_offset;
5481 	u64 extent_end;
5482 	u64 num_bytes;
5483 	int slot;
5484 	int found_type;
5485 
5486 	path = btrfs_alloc_path();
5487 	if (!path)
5488 		return -ENOMEM;
5489 
5490 	ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
5491 				       offset, 0);
5492 	if (ret < 0)
5493 		goto out;
5494 
5495 	slot = path->slots[0];
5496 	if (ret == 1) {
5497 		if (slot == 0) {
5498 			/* can't find the item, must cow */
5499 			ret = 0;
5500 			goto out;
5501 		}
5502 		slot--;
5503 	}
5504 	ret = 0;
5505 	leaf = path->nodes[0];
5506 	btrfs_item_key_to_cpu(leaf, &key, slot);
5507 	if (key.objectid != btrfs_ino(inode) ||
5508 	    key.type != BTRFS_EXTENT_DATA_KEY) {
5509 		/* not our file or wrong item type, must cow */
5510 		goto out;
5511 	}
5512 
5513 	if (key.offset > offset) {
5514 		/* Wrong offset, must cow */
5515 		goto out;
5516 	}
5517 
5518 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5519 	found_type = btrfs_file_extent_type(leaf, fi);
5520 	if (found_type != BTRFS_FILE_EXTENT_REG &&
5521 	    found_type != BTRFS_FILE_EXTENT_PREALLOC) {
5522 		/* not a regular extent, must cow */
5523 		goto out;
5524 	}
5525 	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
5526 	backref_offset = btrfs_file_extent_offset(leaf, fi);
5527 
5528 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
5529 	if (extent_end < offset + len) {
5530 		/* extent doesn't include our full range, must cow */
5531 		goto out;
5532 	}
5533 
5534 	if (btrfs_extent_readonly(root, disk_bytenr))
5535 		goto out;
5536 
5537 	/*
5538 	 * look for other files referencing this extent, if we
5539 	 * find any we must cow
5540 	 */
5541 	if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
5542 				  key.offset - backref_offset, disk_bytenr))
5543 		goto out;
5544 
5545 	/*
5546 	 * adjust disk_bytenr and num_bytes to cover just the bytes
5547 	 * in this extent we are about to write.  If there
5548 	 * are any csums in that range we have to cow in order
5549 	 * to keep the csums correct
5550 	 */
5551 	disk_bytenr += backref_offset;
5552 	disk_bytenr += offset - key.offset;
5553 	num_bytes = min(offset + len, extent_end) - offset;
5554 	if (csum_exist_in_range(root, disk_bytenr, num_bytes))
5555 				goto out;
5556 	/*
5557 	 * all of the above have passed, it is safe to overwrite this extent
5558 	 * without cow
5559 	 */
5560 	ret = 1;
5561 out:
5562 	btrfs_free_path(path);
5563 	return ret;
5564 }
5565 
5566 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5567 				   struct buffer_head *bh_result, int create)
5568 {
5569 	struct extent_map *em;
5570 	struct btrfs_root *root = BTRFS_I(inode)->root;
5571 	u64 start = iblock << inode->i_blkbits;
5572 	u64 len = bh_result->b_size;
5573 	struct btrfs_trans_handle *trans;
5574 
5575 	em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
5576 	if (IS_ERR(em))
5577 		return PTR_ERR(em);
5578 
5579 	/*
5580 	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
5581 	 * io.  INLINE is special, and we could probably kludge it in here, but
5582 	 * it's still buffered so for safety lets just fall back to the generic
5583 	 * buffered path.
5584 	 *
5585 	 * For COMPRESSED we _have_ to read the entire extent in so we can
5586 	 * decompress it, so there will be buffering required no matter what we
5587 	 * do, so go ahead and fallback to buffered.
5588 	 *
5589 	 * We return -ENOTBLK because thats what makes DIO go ahead and go back
5590 	 * to buffered IO.  Don't blame me, this is the price we pay for using
5591 	 * the generic code.
5592 	 */
5593 	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
5594 	    em->block_start == EXTENT_MAP_INLINE) {
5595 		free_extent_map(em);
5596 		return -ENOTBLK;
5597 	}
5598 
5599 	/* Just a good old fashioned hole, return */
5600 	if (!create && (em->block_start == EXTENT_MAP_HOLE ||
5601 			test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
5602 		free_extent_map(em);
5603 		/* DIO will do one hole at a time, so just unlock a sector */
5604 		unlock_extent(&BTRFS_I(inode)->io_tree, start,
5605 			      start + root->sectorsize - 1, GFP_NOFS);
5606 		return 0;
5607 	}
5608 
5609 	/*
5610 	 * We don't allocate a new extent in the following cases
5611 	 *
5612 	 * 1) The inode is marked as NODATACOW.  In this case we'll just use the
5613 	 * existing extent.
5614 	 * 2) The extent is marked as PREALLOC.  We're good to go here and can
5615 	 * just use the extent.
5616 	 *
5617 	 */
5618 	if (!create) {
5619 		len = em->len - (start - em->start);
5620 		goto map;
5621 	}
5622 
5623 	if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
5624 	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
5625 	     em->block_start != EXTENT_MAP_HOLE)) {
5626 		int type;
5627 		int ret;
5628 		u64 block_start;
5629 
5630 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5631 			type = BTRFS_ORDERED_PREALLOC;
5632 		else
5633 			type = BTRFS_ORDERED_NOCOW;
5634 		len = min(len, em->len - (start - em->start));
5635 		block_start = em->block_start + (start - em->start);
5636 
5637 		/*
5638 		 * we're not going to log anything, but we do need
5639 		 * to make sure the current transaction stays open
5640 		 * while we look for nocow cross refs
5641 		 */
5642 		trans = btrfs_join_transaction(root);
5643 		if (IS_ERR(trans))
5644 			goto must_cow;
5645 
5646 		if (can_nocow_odirect(trans, inode, start, len) == 1) {
5647 			ret = btrfs_add_ordered_extent_dio(inode, start,
5648 					   block_start, len, len, type);
5649 			btrfs_end_transaction(trans, root);
5650 			if (ret) {
5651 				free_extent_map(em);
5652 				return ret;
5653 			}
5654 			goto unlock;
5655 		}
5656 		btrfs_end_transaction(trans, root);
5657 	}
5658 must_cow:
5659 	/*
5660 	 * this will cow the extent, reset the len in case we changed
5661 	 * it above
5662 	 */
5663 	len = bh_result->b_size;
5664 	em = btrfs_new_extent_direct(inode, em, start, len);
5665 	if (IS_ERR(em))
5666 		return PTR_ERR(em);
5667 	len = min(len, em->len - (start - em->start));
5668 unlock:
5669 	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1,
5670 			  EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1,
5671 			  0, NULL, GFP_NOFS);
5672 map:
5673 	bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
5674 		inode->i_blkbits;
5675 	bh_result->b_size = len;
5676 	bh_result->b_bdev = em->bdev;
5677 	set_buffer_mapped(bh_result);
5678 	if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5679 		set_buffer_new(bh_result);
5680 
5681 	free_extent_map(em);
5682 
5683 	return 0;
5684 }
5685 
5686 struct btrfs_dio_private {
5687 	struct inode *inode;
5688 	u64 logical_offset;
5689 	u64 disk_bytenr;
5690 	u64 bytes;
5691 	u32 *csums;
5692 	void *private;
5693 
5694 	/* number of bios pending for this dio */
5695 	atomic_t pending_bios;
5696 
5697 	/* IO errors */
5698 	int errors;
5699 
5700 	struct bio *orig_bio;
5701 };
5702 
5703 static void btrfs_endio_direct_read(struct bio *bio, int err)
5704 {
5705 	struct btrfs_dio_private *dip = bio->bi_private;
5706 	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
5707 	struct bio_vec *bvec = bio->bi_io_vec;
5708 	struct inode *inode = dip->inode;
5709 	struct btrfs_root *root = BTRFS_I(inode)->root;
5710 	u64 start;
5711 	u32 *private = dip->csums;
5712 
5713 	start = dip->logical_offset;
5714 	do {
5715 		if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
5716 			struct page *page = bvec->bv_page;
5717 			char *kaddr;
5718 			u32 csum = ~(u32)0;
5719 			unsigned long flags;
5720 
5721 			local_irq_save(flags);
5722 			kaddr = kmap_atomic(page, KM_IRQ0);
5723 			csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
5724 					       csum, bvec->bv_len);
5725 			btrfs_csum_final(csum, (char *)&csum);
5726 			kunmap_atomic(kaddr, KM_IRQ0);
5727 			local_irq_restore(flags);
5728 
5729 			flush_dcache_page(bvec->bv_page);
5730 			if (csum != *private) {
5731 				printk(KERN_ERR "btrfs csum failed ino %llu off"
5732 				      " %llu csum %u private %u\n",
5733 				      (unsigned long long)btrfs_ino(inode),
5734 				      (unsigned long long)start,
5735 				      csum, *private);
5736 				err = -EIO;
5737 			}
5738 		}
5739 
5740 		start += bvec->bv_len;
5741 		private++;
5742 		bvec++;
5743 	} while (bvec <= bvec_end);
5744 
5745 	unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
5746 		      dip->logical_offset + dip->bytes - 1, GFP_NOFS);
5747 	bio->bi_private = dip->private;
5748 
5749 	kfree(dip->csums);
5750 	kfree(dip);
5751 
5752 	/* If we had a csum failure make sure to clear the uptodate flag */
5753 	if (err)
5754 		clear_bit(BIO_UPTODATE, &bio->bi_flags);
5755 	dio_end_io(bio, err);
5756 }
5757 
5758 static void btrfs_endio_direct_write(struct bio *bio, int err)
5759 {
5760 	struct btrfs_dio_private *dip = bio->bi_private;
5761 	struct inode *inode = dip->inode;
5762 	struct btrfs_root *root = BTRFS_I(inode)->root;
5763 	struct btrfs_trans_handle *trans;
5764 	struct btrfs_ordered_extent *ordered = NULL;
5765 	struct extent_state *cached_state = NULL;
5766 	u64 ordered_offset = dip->logical_offset;
5767 	u64 ordered_bytes = dip->bytes;
5768 	int ret;
5769 
5770 	if (err)
5771 		goto out_done;
5772 again:
5773 	ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
5774 						   &ordered_offset,
5775 						   ordered_bytes);
5776 	if (!ret)
5777 		goto out_test;
5778 
5779 	BUG_ON(!ordered);
5780 
5781 	trans = btrfs_join_transaction(root);
5782 	if (IS_ERR(trans)) {
5783 		err = -ENOMEM;
5784 		goto out;
5785 	}
5786 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
5787 
5788 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
5789 		ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5790 		if (!ret)
5791 			err = btrfs_update_inode_fallback(trans, root, inode);
5792 		goto out;
5793 	}
5794 
5795 	lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
5796 			 ordered->file_offset + ordered->len - 1, 0,
5797 			 &cached_state, GFP_NOFS);
5798 
5799 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
5800 		ret = btrfs_mark_extent_written(trans, inode,
5801 						ordered->file_offset,
5802 						ordered->file_offset +
5803 						ordered->len);
5804 		if (ret) {
5805 			err = ret;
5806 			goto out_unlock;
5807 		}
5808 	} else {
5809 		ret = insert_reserved_file_extent(trans, inode,
5810 						  ordered->file_offset,
5811 						  ordered->start,
5812 						  ordered->disk_len,
5813 						  ordered->len,
5814 						  ordered->len,
5815 						  0, 0, 0,
5816 						  BTRFS_FILE_EXTENT_REG);
5817 		unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
5818 				   ordered->file_offset, ordered->len);
5819 		if (ret) {
5820 			err = ret;
5821 			WARN_ON(1);
5822 			goto out_unlock;
5823 		}
5824 	}
5825 
5826 	add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
5827 	ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5828 	if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
5829 		btrfs_update_inode_fallback(trans, root, inode);
5830 	ret = 0;
5831 out_unlock:
5832 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
5833 			     ordered->file_offset + ordered->len - 1,
5834 			     &cached_state, GFP_NOFS);
5835 out:
5836 	btrfs_delalloc_release_metadata(inode, ordered->len);
5837 	btrfs_end_transaction(trans, root);
5838 	ordered_offset = ordered->file_offset + ordered->len;
5839 	btrfs_put_ordered_extent(ordered);
5840 	btrfs_put_ordered_extent(ordered);
5841 
5842 out_test:
5843 	/*
5844 	 * our bio might span multiple ordered extents.  If we haven't
5845 	 * completed the accounting for the whole dio, go back and try again
5846 	 */
5847 	if (ordered_offset < dip->logical_offset + dip->bytes) {
5848 		ordered_bytes = dip->logical_offset + dip->bytes -
5849 			ordered_offset;
5850 		goto again;
5851 	}
5852 out_done:
5853 	bio->bi_private = dip->private;
5854 
5855 	kfree(dip->csums);
5856 	kfree(dip);
5857 
5858 	/* If we had an error make sure to clear the uptodate flag */
5859 	if (err)
5860 		clear_bit(BIO_UPTODATE, &bio->bi_flags);
5861 	dio_end_io(bio, err);
5862 }
5863 
5864 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
5865 				    struct bio *bio, int mirror_num,
5866 				    unsigned long bio_flags, u64 offset)
5867 {
5868 	int ret;
5869 	struct btrfs_root *root = BTRFS_I(inode)->root;
5870 	ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
5871 	BUG_ON(ret);
5872 	return 0;
5873 }
5874 
5875 static void btrfs_end_dio_bio(struct bio *bio, int err)
5876 {
5877 	struct btrfs_dio_private *dip = bio->bi_private;
5878 
5879 	if (err) {
5880 		printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
5881 		      "sector %#Lx len %u err no %d\n",
5882 		      (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw,
5883 		      (unsigned long long)bio->bi_sector, bio->bi_size, err);
5884 		dip->errors = 1;
5885 
5886 		/*
5887 		 * before atomic variable goto zero, we must make sure
5888 		 * dip->errors is perceived to be set.
5889 		 */
5890 		smp_mb__before_atomic_dec();
5891 	}
5892 
5893 	/* if there are more bios still pending for this dio, just exit */
5894 	if (!atomic_dec_and_test(&dip->pending_bios))
5895 		goto out;
5896 
5897 	if (dip->errors)
5898 		bio_io_error(dip->orig_bio);
5899 	else {
5900 		set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
5901 		bio_endio(dip->orig_bio, 0);
5902 	}
5903 out:
5904 	bio_put(bio);
5905 }
5906 
5907 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
5908 				       u64 first_sector, gfp_t gfp_flags)
5909 {
5910 	int nr_vecs = bio_get_nr_vecs(bdev);
5911 	return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
5912 }
5913 
5914 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
5915 					 int rw, u64 file_offset, int skip_sum,
5916 					 u32 *csums, int async_submit)
5917 {
5918 	int write = rw & REQ_WRITE;
5919 	struct btrfs_root *root = BTRFS_I(inode)->root;
5920 	int ret;
5921 
5922 	bio_get(bio);
5923 	ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
5924 	if (ret)
5925 		goto err;
5926 
5927 	if (skip_sum)
5928 		goto map;
5929 
5930 	if (write && async_submit) {
5931 		ret = btrfs_wq_submit_bio(root->fs_info,
5932 				   inode, rw, bio, 0, 0,
5933 				   file_offset,
5934 				   __btrfs_submit_bio_start_direct_io,
5935 				   __btrfs_submit_bio_done);
5936 		goto err;
5937 	} else if (write) {
5938 		/*
5939 		 * If we aren't doing async submit, calculate the csum of the
5940 		 * bio now.
5941 		 */
5942 		ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
5943 		if (ret)
5944 			goto err;
5945 	} else if (!skip_sum) {
5946 		ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
5947 					  file_offset, csums);
5948 		if (ret)
5949 			goto err;
5950 	}
5951 
5952 map:
5953 	ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
5954 err:
5955 	bio_put(bio);
5956 	return ret;
5957 }
5958 
5959 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
5960 				    int skip_sum)
5961 {
5962 	struct inode *inode = dip->inode;
5963 	struct btrfs_root *root = BTRFS_I(inode)->root;
5964 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
5965 	struct bio *bio;
5966 	struct bio *orig_bio = dip->orig_bio;
5967 	struct bio_vec *bvec = orig_bio->bi_io_vec;
5968 	u64 start_sector = orig_bio->bi_sector;
5969 	u64 file_offset = dip->logical_offset;
5970 	u64 submit_len = 0;
5971 	u64 map_length;
5972 	int nr_pages = 0;
5973 	u32 *csums = dip->csums;
5974 	int ret = 0;
5975 	int async_submit = 0;
5976 	int write = rw & REQ_WRITE;
5977 
5978 	map_length = orig_bio->bi_size;
5979 	ret = btrfs_map_block(map_tree, READ, start_sector << 9,
5980 			      &map_length, NULL, 0);
5981 	if (ret) {
5982 		bio_put(orig_bio);
5983 		return -EIO;
5984 	}
5985 
5986 	if (map_length >= orig_bio->bi_size) {
5987 		bio = orig_bio;
5988 		goto submit;
5989 	}
5990 
5991 	async_submit = 1;
5992 	bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
5993 	if (!bio)
5994 		return -ENOMEM;
5995 	bio->bi_private = dip;
5996 	bio->bi_end_io = btrfs_end_dio_bio;
5997 	atomic_inc(&dip->pending_bios);
5998 
5999 	while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
6000 		if (unlikely(map_length < submit_len + bvec->bv_len ||
6001 		    bio_add_page(bio, bvec->bv_page, bvec->bv_len,
6002 				 bvec->bv_offset) < bvec->bv_len)) {
6003 			/*
6004 			 * inc the count before we submit the bio so
6005 			 * we know the end IO handler won't happen before
6006 			 * we inc the count. Otherwise, the dip might get freed
6007 			 * before we're done setting it up
6008 			 */
6009 			atomic_inc(&dip->pending_bios);
6010 			ret = __btrfs_submit_dio_bio(bio, inode, rw,
6011 						     file_offset, skip_sum,
6012 						     csums, async_submit);
6013 			if (ret) {
6014 				bio_put(bio);
6015 				atomic_dec(&dip->pending_bios);
6016 				goto out_err;
6017 			}
6018 
6019 			/* Write's use the ordered csums */
6020 			if (!write && !skip_sum)
6021 				csums = csums + nr_pages;
6022 			start_sector += submit_len >> 9;
6023 			file_offset += submit_len;
6024 
6025 			submit_len = 0;
6026 			nr_pages = 0;
6027 
6028 			bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
6029 						  start_sector, GFP_NOFS);
6030 			if (!bio)
6031 				goto out_err;
6032 			bio->bi_private = dip;
6033 			bio->bi_end_io = btrfs_end_dio_bio;
6034 
6035 			map_length = orig_bio->bi_size;
6036 			ret = btrfs_map_block(map_tree, READ, start_sector << 9,
6037 					      &map_length, NULL, 0);
6038 			if (ret) {
6039 				bio_put(bio);
6040 				goto out_err;
6041 			}
6042 		} else {
6043 			submit_len += bvec->bv_len;
6044 			nr_pages ++;
6045 			bvec++;
6046 		}
6047 	}
6048 
6049 submit:
6050 	ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
6051 				     csums, async_submit);
6052 	if (!ret)
6053 		return 0;
6054 
6055 	bio_put(bio);
6056 out_err:
6057 	dip->errors = 1;
6058 	/*
6059 	 * before atomic variable goto zero, we must
6060 	 * make sure dip->errors is perceived to be set.
6061 	 */
6062 	smp_mb__before_atomic_dec();
6063 	if (atomic_dec_and_test(&dip->pending_bios))
6064 		bio_io_error(dip->orig_bio);
6065 
6066 	/* bio_end_io() will handle error, so we needn't return it */
6067 	return 0;
6068 }
6069 
6070 static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
6071 				loff_t file_offset)
6072 {
6073 	struct btrfs_root *root = BTRFS_I(inode)->root;
6074 	struct btrfs_dio_private *dip;
6075 	struct bio_vec *bvec = bio->bi_io_vec;
6076 	int skip_sum;
6077 	int write = rw & REQ_WRITE;
6078 	int ret = 0;
6079 
6080 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
6081 
6082 	dip = kmalloc(sizeof(*dip), GFP_NOFS);
6083 	if (!dip) {
6084 		ret = -ENOMEM;
6085 		goto free_ordered;
6086 	}
6087 	dip->csums = NULL;
6088 
6089 	/* Write's use the ordered csum stuff, so we don't need dip->csums */
6090 	if (!write && !skip_sum) {
6091 		dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
6092 		if (!dip->csums) {
6093 			kfree(dip);
6094 			ret = -ENOMEM;
6095 			goto free_ordered;
6096 		}
6097 	}
6098 
6099 	dip->private = bio->bi_private;
6100 	dip->inode = inode;
6101 	dip->logical_offset = file_offset;
6102 
6103 	dip->bytes = 0;
6104 	do {
6105 		dip->bytes += bvec->bv_len;
6106 		bvec++;
6107 	} while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));
6108 
6109 	dip->disk_bytenr = (u64)bio->bi_sector << 9;
6110 	bio->bi_private = dip;
6111 	dip->errors = 0;
6112 	dip->orig_bio = bio;
6113 	atomic_set(&dip->pending_bios, 0);
6114 
6115 	if (write)
6116 		bio->bi_end_io = btrfs_endio_direct_write;
6117 	else
6118 		bio->bi_end_io = btrfs_endio_direct_read;
6119 
6120 	ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
6121 	if (!ret)
6122 		return;
6123 free_ordered:
6124 	/*
6125 	 * If this is a write, we need to clean up the reserved space and kill
6126 	 * the ordered extent.
6127 	 */
6128 	if (write) {
6129 		struct btrfs_ordered_extent *ordered;
6130 		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
6131 		if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
6132 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
6133 			btrfs_free_reserved_extent(root, ordered->start,
6134 						   ordered->disk_len);
6135 		btrfs_put_ordered_extent(ordered);
6136 		btrfs_put_ordered_extent(ordered);
6137 	}
6138 	bio_endio(bio, ret);
6139 }
6140 
6141 static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
6142 			const struct iovec *iov, loff_t offset,
6143 			unsigned long nr_segs)
6144 {
6145 	int seg;
6146 	int i;
6147 	size_t size;
6148 	unsigned long addr;
6149 	unsigned blocksize_mask = root->sectorsize - 1;
6150 	ssize_t retval = -EINVAL;
6151 	loff_t end = offset;
6152 
6153 	if (offset & blocksize_mask)
6154 		goto out;
6155 
6156 	/* Check the memory alignment.  Blocks cannot straddle pages */
6157 	for (seg = 0; seg < nr_segs; seg++) {
6158 		addr = (unsigned long)iov[seg].iov_base;
6159 		size = iov[seg].iov_len;
6160 		end += size;
6161 		if ((addr & blocksize_mask) || (size & blocksize_mask))
6162 			goto out;
6163 
6164 		/* If this is a write we don't need to check anymore */
6165 		if (rw & WRITE)
6166 			continue;
6167 
6168 		/*
6169 		 * Check to make sure we don't have duplicate iov_base's in this
6170 		 * iovec, if so return EINVAL, otherwise we'll get csum errors
6171 		 * when reading back.
6172 		 */
6173 		for (i = seg + 1; i < nr_segs; i++) {
6174 			if (iov[seg].iov_base == iov[i].iov_base)
6175 				goto out;
6176 		}
6177 	}
6178 	retval = 0;
6179 out:
6180 	return retval;
6181 }
6182 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
6183 			const struct iovec *iov, loff_t offset,
6184 			unsigned long nr_segs)
6185 {
6186 	struct file *file = iocb->ki_filp;
6187 	struct inode *inode = file->f_mapping->host;
6188 	struct btrfs_ordered_extent *ordered;
6189 	struct extent_state *cached_state = NULL;
6190 	u64 lockstart, lockend;
6191 	ssize_t ret;
6192 	int writing = rw & WRITE;
6193 	int write_bits = 0;
6194 	size_t count = iov_length(iov, nr_segs);
6195 
6196 	if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
6197 			    offset, nr_segs)) {
6198 		return 0;
6199 	}
6200 
6201 	lockstart = offset;
6202 	lockend = offset + count - 1;
6203 
6204 	if (writing) {
6205 		ret = btrfs_delalloc_reserve_space(inode, count);
6206 		if (ret)
6207 			goto out;
6208 	}
6209 
6210 	while (1) {
6211 		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6212 				 0, &cached_state, GFP_NOFS);
6213 		/*
6214 		 * We're concerned with the entire range that we're going to be
6215 		 * doing DIO to, so we need to make sure theres no ordered
6216 		 * extents in this range.
6217 		 */
6218 		ordered = btrfs_lookup_ordered_range(inode, lockstart,
6219 						     lockend - lockstart + 1);
6220 		if (!ordered)
6221 			break;
6222 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6223 				     &cached_state, GFP_NOFS);
6224 		btrfs_start_ordered_extent(inode, ordered, 1);
6225 		btrfs_put_ordered_extent(ordered);
6226 		cond_resched();
6227 	}
6228 
6229 	/*
6230 	 * we don't use btrfs_set_extent_delalloc because we don't want
6231 	 * the dirty or uptodate bits
6232 	 */
6233 	if (writing) {
6234 		write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
6235 		ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6236 				     EXTENT_DELALLOC, 0, NULL, &cached_state,
6237 				     GFP_NOFS);
6238 		if (ret) {
6239 			clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6240 					 lockend, EXTENT_LOCKED | write_bits,
6241 					 1, 0, &cached_state, GFP_NOFS);
6242 			goto out;
6243 		}
6244 	}
6245 
6246 	free_extent_state(cached_state);
6247 	cached_state = NULL;
6248 
6249 	ret = __blockdev_direct_IO(rw, iocb, inode,
6250 		   BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
6251 		   iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
6252 		   btrfs_submit_direct, 0);
6253 
6254 	if (ret < 0 && ret != -EIOCBQUEUED) {
6255 		clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
6256 			      offset + iov_length(iov, nr_segs) - 1,
6257 			      EXTENT_LOCKED | write_bits, 1, 0,
6258 			      &cached_state, GFP_NOFS);
6259 	} else if (ret >= 0 && ret < iov_length(iov, nr_segs)) {
6260 		/*
6261 		 * We're falling back to buffered, unlock the section we didn't
6262 		 * do IO on.
6263 		 */
6264 		clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
6265 			      offset + iov_length(iov, nr_segs) - 1,
6266 			      EXTENT_LOCKED | write_bits, 1, 0,
6267 			      &cached_state, GFP_NOFS);
6268 	}
6269 out:
6270 	free_extent_state(cached_state);
6271 	return ret;
6272 }
6273 
6274 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
6275 		__u64 start, __u64 len)
6276 {
6277 	return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
6278 }
6279 
6280 int btrfs_readpage(struct file *file, struct page *page)
6281 {
6282 	struct extent_io_tree *tree;
6283 	tree = &BTRFS_I(page->mapping->host)->io_tree;
6284 	return extent_read_full_page(tree, page, btrfs_get_extent, 0);
6285 }
6286 
6287 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
6288 {
6289 	struct extent_io_tree *tree;
6290 
6291 
6292 	if (current->flags & PF_MEMALLOC) {
6293 		redirty_page_for_writepage(wbc, page);
6294 		unlock_page(page);
6295 		return 0;
6296 	}
6297 	tree = &BTRFS_I(page->mapping->host)->io_tree;
6298 	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
6299 }
6300 
6301 int btrfs_writepages(struct address_space *mapping,
6302 		     struct writeback_control *wbc)
6303 {
6304 	struct extent_io_tree *tree;
6305 
6306 	tree = &BTRFS_I(mapping->host)->io_tree;
6307 	return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
6308 }
6309 
6310 static int
6311 btrfs_readpages(struct file *file, struct address_space *mapping,
6312 		struct list_head *pages, unsigned nr_pages)
6313 {
6314 	struct extent_io_tree *tree;
6315 	tree = &BTRFS_I(mapping->host)->io_tree;
6316 	return extent_readpages(tree, mapping, pages, nr_pages,
6317 				btrfs_get_extent);
6318 }
6319 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
6320 {
6321 	struct extent_io_tree *tree;
6322 	struct extent_map_tree *map;
6323 	int ret;
6324 
6325 	tree = &BTRFS_I(page->mapping->host)->io_tree;
6326 	map = &BTRFS_I(page->mapping->host)->extent_tree;
6327 	ret = try_release_extent_mapping(map, tree, page, gfp_flags);
6328 	if (ret == 1) {
6329 		ClearPagePrivate(page);
6330 		set_page_private(page, 0);
6331 		page_cache_release(page);
6332 	}
6333 	return ret;
6334 }
6335 
6336 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
6337 {
6338 	if (PageWriteback(page) || PageDirty(page))
6339 		return 0;
6340 	return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
6341 }
6342 
6343 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
6344 {
6345 	struct extent_io_tree *tree;
6346 	struct btrfs_ordered_extent *ordered;
6347 	struct extent_state *cached_state = NULL;
6348 	u64 page_start = page_offset(page);
6349 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
6350 
6351 
6352 	/*
6353 	 * we have the page locked, so new writeback can't start,
6354 	 * and the dirty bit won't be cleared while we are here.
6355 	 *
6356 	 * Wait for IO on this page so that we can safely clear
6357 	 * the PagePrivate2 bit and do ordered accounting
6358 	 */
6359 	wait_on_page_writeback(page);
6360 
6361 	tree = &BTRFS_I(page->mapping->host)->io_tree;
6362 	if (offset) {
6363 		btrfs_releasepage(page, GFP_NOFS);
6364 		return;
6365 	}
6366 	lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
6367 			 GFP_NOFS);
6368 	ordered = btrfs_lookup_ordered_extent(page->mapping->host,
6369 					   page_offset(page));
6370 	if (ordered) {
6371 		/*
6372 		 * IO on this page will never be started, so we need
6373 		 * to account for any ordered extents now
6374 		 */
6375 		clear_extent_bit(tree, page_start, page_end,
6376 				 EXTENT_DIRTY | EXTENT_DELALLOC |
6377 				 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
6378 				 &cached_state, GFP_NOFS);
6379 		/*
6380 		 * whoever cleared the private bit is responsible
6381 		 * for the finish_ordered_io
6382 		 */
6383 		if (TestClearPagePrivate2(page)) {
6384 			btrfs_finish_ordered_io(page->mapping->host,
6385 						page_start, page_end);
6386 		}
6387 		btrfs_put_ordered_extent(ordered);
6388 		cached_state = NULL;
6389 		lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
6390 				 GFP_NOFS);
6391 	}
6392 	clear_extent_bit(tree, page_start, page_end,
6393 		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
6394 		 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS);
6395 	__btrfs_releasepage(page, GFP_NOFS);
6396 
6397 	ClearPageChecked(page);
6398 	if (PagePrivate(page)) {
6399 		ClearPagePrivate(page);
6400 		set_page_private(page, 0);
6401 		page_cache_release(page);
6402 	}
6403 }
6404 
6405 /*
6406  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
6407  * called from a page fault handler when a page is first dirtied. Hence we must
6408  * be careful to check for EOF conditions here. We set the page up correctly
6409  * for a written page which means we get ENOSPC checking when writing into
6410  * holes and correct delalloc and unwritten extent mapping on filesystems that
6411  * support these features.
6412  *
6413  * We are not allowed to take the i_mutex here so we have to play games to
6414  * protect against truncate races as the page could now be beyond EOF.  Because
6415  * vmtruncate() writes the inode size before removing pages, once we have the
6416  * page lock we can determine safely if the page is beyond EOF. If it is not
6417  * beyond EOF, then the page is guaranteed safe against truncation until we
6418  * unlock the page.
6419  */
6420 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
6421 {
6422 	struct page *page = vmf->page;
6423 	struct inode *inode = fdentry(vma->vm_file)->d_inode;
6424 	struct btrfs_root *root = BTRFS_I(inode)->root;
6425 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6426 	struct btrfs_ordered_extent *ordered;
6427 	struct extent_state *cached_state = NULL;
6428 	char *kaddr;
6429 	unsigned long zero_start;
6430 	loff_t size;
6431 	int ret;
6432 	int reserved = 0;
6433 	u64 page_start;
6434 	u64 page_end;
6435 
6436 	ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
6437 	if (!ret) {
6438 		ret = btrfs_update_time(vma->vm_file);
6439 		reserved = 1;
6440 	}
6441 	if (ret) {
6442 		if (ret == -ENOMEM)
6443 			ret = VM_FAULT_OOM;
6444 		else /* -ENOSPC, -EIO, etc */
6445 			ret = VM_FAULT_SIGBUS;
6446 		if (reserved)
6447 			goto out;
6448 		goto out_noreserve;
6449 	}
6450 
6451 	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
6452 again:
6453 	lock_page(page);
6454 	size = i_size_read(inode);
6455 	page_start = page_offset(page);
6456 	page_end = page_start + PAGE_CACHE_SIZE - 1;
6457 
6458 	if ((page->mapping != inode->i_mapping) ||
6459 	    (page_start >= size)) {
6460 		/* page got truncated out from underneath us */
6461 		goto out_unlock;
6462 	}
6463 	wait_on_page_writeback(page);
6464 
6465 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
6466 			 GFP_NOFS);
6467 	set_page_extent_mapped(page);
6468 
6469 	/*
6470 	 * we can't set the delalloc bits if there are pending ordered
6471 	 * extents.  Drop our locks and wait for them to finish
6472 	 */
6473 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
6474 	if (ordered) {
6475 		unlock_extent_cached(io_tree, page_start, page_end,
6476 				     &cached_state, GFP_NOFS);
6477 		unlock_page(page);
6478 		btrfs_start_ordered_extent(inode, ordered, 1);
6479 		btrfs_put_ordered_extent(ordered);
6480 		goto again;
6481 	}
6482 
6483 	/*
6484 	 * XXX - page_mkwrite gets called every time the page is dirtied, even
6485 	 * if it was already dirty, so for space accounting reasons we need to
6486 	 * clear any delalloc bits for the range we are fixing to save.  There
6487 	 * is probably a better way to do this, but for now keep consistent with
6488 	 * prepare_pages in the normal write path.
6489 	 */
6490 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
6491 			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
6492 			  0, 0, &cached_state, GFP_NOFS);
6493 
6494 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
6495 					&cached_state);
6496 	if (ret) {
6497 		unlock_extent_cached(io_tree, page_start, page_end,
6498 				     &cached_state, GFP_NOFS);
6499 		ret = VM_FAULT_SIGBUS;
6500 		goto out_unlock;
6501 	}
6502 	ret = 0;
6503 
6504 	/* page is wholly or partially inside EOF */
6505 	if (page_start + PAGE_CACHE_SIZE > size)
6506 		zero_start = size & ~PAGE_CACHE_MASK;
6507 	else
6508 		zero_start = PAGE_CACHE_SIZE;
6509 
6510 	if (zero_start != PAGE_CACHE_SIZE) {
6511 		kaddr = kmap(page);
6512 		memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
6513 		flush_dcache_page(page);
6514 		kunmap(page);
6515 	}
6516 	ClearPageChecked(page);
6517 	set_page_dirty(page);
6518 	SetPageUptodate(page);
6519 
6520 	BTRFS_I(inode)->last_trans = root->fs_info->generation;
6521 	BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
6522 
6523 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
6524 
6525 out_unlock:
6526 	if (!ret)
6527 		return VM_FAULT_LOCKED;
6528 	unlock_page(page);
6529 out:
6530 	btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
6531 out_noreserve:
6532 	return ret;
6533 }
6534 
6535 static int btrfs_truncate(struct inode *inode)
6536 {
6537 	struct btrfs_root *root = BTRFS_I(inode)->root;
6538 	struct btrfs_block_rsv *rsv;
6539 	int ret;
6540 	int err = 0;
6541 	struct btrfs_trans_handle *trans;
6542 	unsigned long nr;
6543 	u64 mask = root->sectorsize - 1;
6544 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
6545 
6546 	ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
6547 	if (ret)
6548 		return ret;
6549 
6550 	btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
6551 	btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
6552 
6553 	/*
6554 	 * Yes ladies and gentelment, this is indeed ugly.  The fact is we have
6555 	 * 3 things going on here
6556 	 *
6557 	 * 1) We need to reserve space for our orphan item and the space to
6558 	 * delete our orphan item.  Lord knows we don't want to have a dangling
6559 	 * orphan item because we didn't reserve space to remove it.
6560 	 *
6561 	 * 2) We need to reserve space to update our inode.
6562 	 *
6563 	 * 3) We need to have something to cache all the space that is going to
6564 	 * be free'd up by the truncate operation, but also have some slack
6565 	 * space reserved in case it uses space during the truncate (thank you
6566 	 * very much snapshotting).
6567 	 *
6568 	 * And we need these to all be seperate.  The fact is we can use alot of
6569 	 * space doing the truncate, and we have no earthly idea how much space
6570 	 * we will use, so we need the truncate reservation to be seperate so it
6571 	 * doesn't end up using space reserved for updating the inode or
6572 	 * removing the orphan item.  We also need to be able to stop the
6573 	 * transaction and start a new one, which means we need to be able to
6574 	 * update the inode several times, and we have no idea of knowing how
6575 	 * many times that will be, so we can't just reserve 1 item for the
6576 	 * entirety of the opration, so that has to be done seperately as well.
6577 	 * Then there is the orphan item, which does indeed need to be held on
6578 	 * to for the whole operation, and we need nobody to touch this reserved
6579 	 * space except the orphan code.
6580 	 *
6581 	 * So that leaves us with
6582 	 *
6583 	 * 1) root->orphan_block_rsv - for the orphan deletion.
6584 	 * 2) rsv - for the truncate reservation, which we will steal from the
6585 	 * transaction reservation.
6586 	 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
6587 	 * updating the inode.
6588 	 */
6589 	rsv = btrfs_alloc_block_rsv(root);
6590 	if (!rsv)
6591 		return -ENOMEM;
6592 	rsv->size = min_size;
6593 
6594 	/*
6595 	 * 1 for the truncate slack space
6596 	 * 1 for the orphan item we're going to add
6597 	 * 1 for the orphan item deletion
6598 	 * 1 for updating the inode.
6599 	 */
6600 	trans = btrfs_start_transaction(root, 4);
6601 	if (IS_ERR(trans)) {
6602 		err = PTR_ERR(trans);
6603 		goto out;
6604 	}
6605 
6606 	/* Migrate the slack space for the truncate to our reserve */
6607 	ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
6608 				      min_size);
6609 	BUG_ON(ret);
6610 
6611 	ret = btrfs_orphan_add(trans, inode);
6612 	if (ret) {
6613 		btrfs_end_transaction(trans, root);
6614 		goto out;
6615 	}
6616 
6617 	/*
6618 	 * setattr is responsible for setting the ordered_data_close flag,
6619 	 * but that is only tested during the last file release.  That
6620 	 * could happen well after the next commit, leaving a great big
6621 	 * window where new writes may get lost if someone chooses to write
6622 	 * to this file after truncating to zero
6623 	 *
6624 	 * The inode doesn't have any dirty data here, and so if we commit
6625 	 * this is a noop.  If someone immediately starts writing to the inode
6626 	 * it is very likely we'll catch some of their writes in this
6627 	 * transaction, and the commit will find this file on the ordered
6628 	 * data list with good things to send down.
6629 	 *
6630 	 * This is a best effort solution, there is still a window where
6631 	 * using truncate to replace the contents of the file will
6632 	 * end up with a zero length file after a crash.
6633 	 */
6634 	if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
6635 		btrfs_add_ordered_operation(trans, root, inode);
6636 
6637 	while (1) {
6638 		ret = btrfs_block_rsv_refill(root, rsv, min_size);
6639 		if (ret) {
6640 			/*
6641 			 * This can only happen with the original transaction we
6642 			 * started above, every other time we shouldn't have a
6643 			 * transaction started yet.
6644 			 */
6645 			if (ret == -EAGAIN)
6646 				goto end_trans;
6647 			err = ret;
6648 			break;
6649 		}
6650 
6651 		if (!trans) {
6652 			/* Just need the 1 for updating the inode */
6653 			trans = btrfs_start_transaction(root, 1);
6654 			if (IS_ERR(trans)) {
6655 				ret = err = PTR_ERR(trans);
6656 				trans = NULL;
6657 				break;
6658 			}
6659 		}
6660 
6661 		trans->block_rsv = rsv;
6662 
6663 		ret = btrfs_truncate_inode_items(trans, root, inode,
6664 						 inode->i_size,
6665 						 BTRFS_EXTENT_DATA_KEY);
6666 		if (ret != -EAGAIN) {
6667 			err = ret;
6668 			break;
6669 		}
6670 
6671 		trans->block_rsv = &root->fs_info->trans_block_rsv;
6672 		ret = btrfs_update_inode(trans, root, inode);
6673 		if (ret) {
6674 			err = ret;
6675 			break;
6676 		}
6677 end_trans:
6678 		nr = trans->blocks_used;
6679 		btrfs_end_transaction(trans, root);
6680 		trans = NULL;
6681 		btrfs_btree_balance_dirty(root, nr);
6682 	}
6683 
6684 	if (ret == 0 && inode->i_nlink > 0) {
6685 		trans->block_rsv = root->orphan_block_rsv;
6686 		ret = btrfs_orphan_del(trans, inode);
6687 		if (ret)
6688 			err = ret;
6689 	} else if (ret && inode->i_nlink > 0) {
6690 		/*
6691 		 * Failed to do the truncate, remove us from the in memory
6692 		 * orphan list.
6693 		 */
6694 		ret = btrfs_orphan_del(NULL, inode);
6695 	}
6696 
6697 	if (trans) {
6698 		trans->block_rsv = &root->fs_info->trans_block_rsv;
6699 		ret = btrfs_update_inode(trans, root, inode);
6700 		if (ret && !err)
6701 			err = ret;
6702 
6703 		nr = trans->blocks_used;
6704 		ret = btrfs_end_transaction(trans, root);
6705 		btrfs_btree_balance_dirty(root, nr);
6706 	}
6707 
6708 out:
6709 	btrfs_free_block_rsv(root, rsv);
6710 
6711 	if (ret && !err)
6712 		err = ret;
6713 
6714 	return err;
6715 }
6716 
6717 /*
6718  * create a new subvolume directory/inode (helper for the ioctl).
6719  */
6720 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
6721 			     struct btrfs_root *new_root, u64 new_dirid)
6722 {
6723 	struct inode *inode;
6724 	int err;
6725 	u64 index = 0;
6726 
6727 	inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
6728 				new_dirid, new_dirid,
6729 				S_IFDIR | (~current_umask() & S_IRWXUGO),
6730 				&index);
6731 	if (IS_ERR(inode))
6732 		return PTR_ERR(inode);
6733 	inode->i_op = &btrfs_dir_inode_operations;
6734 	inode->i_fop = &btrfs_dir_file_operations;
6735 
6736 	set_nlink(inode, 1);
6737 	btrfs_i_size_write(inode, 0);
6738 
6739 	err = btrfs_update_inode(trans, new_root, inode);
6740 	BUG_ON(err);
6741 
6742 	iput(inode);
6743 	return 0;
6744 }
6745 
6746 struct inode *btrfs_alloc_inode(struct super_block *sb)
6747 {
6748 	struct btrfs_inode *ei;
6749 	struct inode *inode;
6750 
6751 	ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
6752 	if (!ei)
6753 		return NULL;
6754 
6755 	ei->root = NULL;
6756 	ei->space_info = NULL;
6757 	ei->generation = 0;
6758 	ei->sequence = 0;
6759 	ei->last_trans = 0;
6760 	ei->last_sub_trans = 0;
6761 	ei->logged_trans = 0;
6762 	ei->delalloc_bytes = 0;
6763 	ei->disk_i_size = 0;
6764 	ei->flags = 0;
6765 	ei->csum_bytes = 0;
6766 	ei->index_cnt = (u64)-1;
6767 	ei->last_unlink_trans = 0;
6768 
6769 	spin_lock_init(&ei->lock);
6770 	ei->outstanding_extents = 0;
6771 	ei->reserved_extents = 0;
6772 
6773 	ei->ordered_data_close = 0;
6774 	ei->orphan_meta_reserved = 0;
6775 	ei->dummy_inode = 0;
6776 	ei->in_defrag = 0;
6777 	ei->delalloc_meta_reserved = 0;
6778 	ei->force_compress = BTRFS_COMPRESS_NONE;
6779 
6780 	ei->delayed_node = NULL;
6781 
6782 	inode = &ei->vfs_inode;
6783 	extent_map_tree_init(&ei->extent_tree);
6784 	extent_io_tree_init(&ei->io_tree, &inode->i_data);
6785 	extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
6786 	mutex_init(&ei->log_mutex);
6787 	mutex_init(&ei->delalloc_mutex);
6788 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
6789 	INIT_LIST_HEAD(&ei->i_orphan);
6790 	INIT_LIST_HEAD(&ei->delalloc_inodes);
6791 	INIT_LIST_HEAD(&ei->ordered_operations);
6792 	RB_CLEAR_NODE(&ei->rb_node);
6793 
6794 	return inode;
6795 }
6796 
6797 static void btrfs_i_callback(struct rcu_head *head)
6798 {
6799 	struct inode *inode = container_of(head, struct inode, i_rcu);
6800 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
6801 }
6802 
6803 void btrfs_destroy_inode(struct inode *inode)
6804 {
6805 	struct btrfs_ordered_extent *ordered;
6806 	struct btrfs_root *root = BTRFS_I(inode)->root;
6807 
6808 	WARN_ON(!list_empty(&inode->i_dentry));
6809 	WARN_ON(inode->i_data.nrpages);
6810 	WARN_ON(BTRFS_I(inode)->outstanding_extents);
6811 	WARN_ON(BTRFS_I(inode)->reserved_extents);
6812 	WARN_ON(BTRFS_I(inode)->delalloc_bytes);
6813 	WARN_ON(BTRFS_I(inode)->csum_bytes);
6814 
6815 	/*
6816 	 * This can happen where we create an inode, but somebody else also
6817 	 * created the same inode and we need to destroy the one we already
6818 	 * created.
6819 	 */
6820 	if (!root)
6821 		goto free;
6822 
6823 	/*
6824 	 * Make sure we're properly removed from the ordered operation
6825 	 * lists.
6826 	 */
6827 	smp_mb();
6828 	if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
6829 		spin_lock(&root->fs_info->ordered_extent_lock);
6830 		list_del_init(&BTRFS_I(inode)->ordered_operations);
6831 		spin_unlock(&root->fs_info->ordered_extent_lock);
6832 	}
6833 
6834 	spin_lock(&root->orphan_lock);
6835 	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
6836 		printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
6837 		       (unsigned long long)btrfs_ino(inode));
6838 		list_del_init(&BTRFS_I(inode)->i_orphan);
6839 	}
6840 	spin_unlock(&root->orphan_lock);
6841 
6842 	while (1) {
6843 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
6844 		if (!ordered)
6845 			break;
6846 		else {
6847 			printk(KERN_ERR "btrfs found ordered "
6848 			       "extent %llu %llu on inode cleanup\n",
6849 			       (unsigned long long)ordered->file_offset,
6850 			       (unsigned long long)ordered->len);
6851 			btrfs_remove_ordered_extent(inode, ordered);
6852 			btrfs_put_ordered_extent(ordered);
6853 			btrfs_put_ordered_extent(ordered);
6854 		}
6855 	}
6856 	inode_tree_del(inode);
6857 	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
6858 free:
6859 	btrfs_remove_delayed_node(inode);
6860 	call_rcu(&inode->i_rcu, btrfs_i_callback);
6861 }
6862 
6863 int btrfs_drop_inode(struct inode *inode)
6864 {
6865 	struct btrfs_root *root = BTRFS_I(inode)->root;
6866 
6867 	if (btrfs_root_refs(&root->root_item) == 0 &&
6868 	    !btrfs_is_free_space_inode(root, inode))
6869 		return 1;
6870 	else
6871 		return generic_drop_inode(inode);
6872 }
6873 
6874 static void init_once(void *foo)
6875 {
6876 	struct btrfs_inode *ei = (struct btrfs_inode *) foo;
6877 
6878 	inode_init_once(&ei->vfs_inode);
6879 }
6880 
6881 void btrfs_destroy_cachep(void)
6882 {
6883 	if (btrfs_inode_cachep)
6884 		kmem_cache_destroy(btrfs_inode_cachep);
6885 	if (btrfs_trans_handle_cachep)
6886 		kmem_cache_destroy(btrfs_trans_handle_cachep);
6887 	if (btrfs_transaction_cachep)
6888 		kmem_cache_destroy(btrfs_transaction_cachep);
6889 	if (btrfs_path_cachep)
6890 		kmem_cache_destroy(btrfs_path_cachep);
6891 	if (btrfs_free_space_cachep)
6892 		kmem_cache_destroy(btrfs_free_space_cachep);
6893 }
6894 
6895 int btrfs_init_cachep(void)
6896 {
6897 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
6898 			sizeof(struct btrfs_inode), 0,
6899 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
6900 	if (!btrfs_inode_cachep)
6901 		goto fail;
6902 
6903 	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
6904 			sizeof(struct btrfs_trans_handle), 0,
6905 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
6906 	if (!btrfs_trans_handle_cachep)
6907 		goto fail;
6908 
6909 	btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
6910 			sizeof(struct btrfs_transaction), 0,
6911 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
6912 	if (!btrfs_transaction_cachep)
6913 		goto fail;
6914 
6915 	btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
6916 			sizeof(struct btrfs_path), 0,
6917 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
6918 	if (!btrfs_path_cachep)
6919 		goto fail;
6920 
6921 	btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache",
6922 			sizeof(struct btrfs_free_space), 0,
6923 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
6924 	if (!btrfs_free_space_cachep)
6925 		goto fail;
6926 
6927 	return 0;
6928 fail:
6929 	btrfs_destroy_cachep();
6930 	return -ENOMEM;
6931 }
6932 
6933 static int btrfs_getattr(struct vfsmount *mnt,
6934 			 struct dentry *dentry, struct kstat *stat)
6935 {
6936 	struct inode *inode = dentry->d_inode;
6937 	u32 blocksize = inode->i_sb->s_blocksize;
6938 
6939 	generic_fillattr(inode, stat);
6940 	stat->dev = BTRFS_I(inode)->root->anon_dev;
6941 	stat->blksize = PAGE_CACHE_SIZE;
6942 	stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
6943 		ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9;
6944 	return 0;
6945 }
6946 
6947 /*
6948  * If a file is moved, it will inherit the cow and compression flags of the new
6949  * directory.
6950  */
6951 static void fixup_inode_flags(struct inode *dir, struct inode *inode)
6952 {
6953 	struct btrfs_inode *b_dir = BTRFS_I(dir);
6954 	struct btrfs_inode *b_inode = BTRFS_I(inode);
6955 
6956 	if (b_dir->flags & BTRFS_INODE_NODATACOW)
6957 		b_inode->flags |= BTRFS_INODE_NODATACOW;
6958 	else
6959 		b_inode->flags &= ~BTRFS_INODE_NODATACOW;
6960 
6961 	if (b_dir->flags & BTRFS_INODE_COMPRESS)
6962 		b_inode->flags |= BTRFS_INODE_COMPRESS;
6963 	else
6964 		b_inode->flags &= ~BTRFS_INODE_COMPRESS;
6965 }
6966 
6967 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
6968 			   struct inode *new_dir, struct dentry *new_dentry)
6969 {
6970 	struct btrfs_trans_handle *trans;
6971 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
6972 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
6973 	struct inode *new_inode = new_dentry->d_inode;
6974 	struct inode *old_inode = old_dentry->d_inode;
6975 	struct timespec ctime = CURRENT_TIME;
6976 	u64 index = 0;
6977 	u64 root_objectid;
6978 	int ret;
6979 	u64 old_ino = btrfs_ino(old_inode);
6980 
6981 	if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
6982 		return -EPERM;
6983 
6984 	/* we only allow rename subvolume link between subvolumes */
6985 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
6986 		return -EXDEV;
6987 
6988 	if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
6989 	    (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
6990 		return -ENOTEMPTY;
6991 
6992 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
6993 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
6994 		return -ENOTEMPTY;
6995 	/*
6996 	 * we're using rename to replace one file with another.
6997 	 * and the replacement file is large.  Start IO on it now so
6998 	 * we don't add too much work to the end of the transaction
6999 	 */
7000 	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
7001 	    old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
7002 		filemap_flush(old_inode->i_mapping);
7003 
7004 	/* close the racy window with snapshot create/destroy ioctl */
7005 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
7006 		down_read(&root->fs_info->subvol_sem);
7007 	/*
7008 	 * We want to reserve the absolute worst case amount of items.  So if
7009 	 * both inodes are subvols and we need to unlink them then that would
7010 	 * require 4 item modifications, but if they are both normal inodes it
7011 	 * would require 5 item modifications, so we'll assume their normal
7012 	 * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
7013 	 * should cover the worst case number of items we'll modify.
7014 	 */
7015 	trans = btrfs_start_transaction(root, 20);
7016 	if (IS_ERR(trans)) {
7017                 ret = PTR_ERR(trans);
7018                 goto out_notrans;
7019         }
7020 
7021 	if (dest != root)
7022 		btrfs_record_root_in_trans(trans, dest);
7023 
7024 	ret = btrfs_set_inode_index(new_dir, &index);
7025 	if (ret)
7026 		goto out_fail;
7027 
7028 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
7029 		/* force full log commit if subvolume involved. */
7030 		root->fs_info->last_trans_log_full_commit = trans->transid;
7031 	} else {
7032 		ret = btrfs_insert_inode_ref(trans, dest,
7033 					     new_dentry->d_name.name,
7034 					     new_dentry->d_name.len,
7035 					     old_ino,
7036 					     btrfs_ino(new_dir), index);
7037 		if (ret)
7038 			goto out_fail;
7039 		/*
7040 		 * this is an ugly little race, but the rename is required
7041 		 * to make sure that if we crash, the inode is either at the
7042 		 * old name or the new one.  pinning the log transaction lets
7043 		 * us make sure we don't allow a log commit to come in after
7044 		 * we unlink the name but before we add the new name back in.
7045 		 */
7046 		btrfs_pin_log_trans(root);
7047 	}
7048 	/*
7049 	 * make sure the inode gets flushed if it is replacing
7050 	 * something.
7051 	 */
7052 	if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
7053 		btrfs_add_ordered_operation(trans, root, old_inode);
7054 
7055 	old_dir->i_ctime = old_dir->i_mtime = ctime;
7056 	new_dir->i_ctime = new_dir->i_mtime = ctime;
7057 	old_inode->i_ctime = ctime;
7058 
7059 	if (old_dentry->d_parent != new_dentry->d_parent)
7060 		btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
7061 
7062 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
7063 		root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
7064 		ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
7065 					old_dentry->d_name.name,
7066 					old_dentry->d_name.len);
7067 	} else {
7068 		ret = __btrfs_unlink_inode(trans, root, old_dir,
7069 					old_dentry->d_inode,
7070 					old_dentry->d_name.name,
7071 					old_dentry->d_name.len);
7072 		if (!ret)
7073 			ret = btrfs_update_inode(trans, root, old_inode);
7074 	}
7075 	BUG_ON(ret);
7076 
7077 	if (new_inode) {
7078 		new_inode->i_ctime = CURRENT_TIME;
7079 		if (unlikely(btrfs_ino(new_inode) ==
7080 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
7081 			root_objectid = BTRFS_I(new_inode)->location.objectid;
7082 			ret = btrfs_unlink_subvol(trans, dest, new_dir,
7083 						root_objectid,
7084 						new_dentry->d_name.name,
7085 						new_dentry->d_name.len);
7086 			BUG_ON(new_inode->i_nlink == 0);
7087 		} else {
7088 			ret = btrfs_unlink_inode(trans, dest, new_dir,
7089 						 new_dentry->d_inode,
7090 						 new_dentry->d_name.name,
7091 						 new_dentry->d_name.len);
7092 		}
7093 		BUG_ON(ret);
7094 		if (new_inode->i_nlink == 0) {
7095 			ret = btrfs_orphan_add(trans, new_dentry->d_inode);
7096 			BUG_ON(ret);
7097 		}
7098 	}
7099 
7100 	fixup_inode_flags(new_dir, old_inode);
7101 
7102 	ret = btrfs_add_link(trans, new_dir, old_inode,
7103 			     new_dentry->d_name.name,
7104 			     new_dentry->d_name.len, 0, index);
7105 	BUG_ON(ret);
7106 
7107 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
7108 		struct dentry *parent = new_dentry->d_parent;
7109 		btrfs_log_new_name(trans, old_inode, old_dir, parent);
7110 		btrfs_end_log_trans(root);
7111 	}
7112 out_fail:
7113 	btrfs_end_transaction(trans, root);
7114 out_notrans:
7115 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
7116 		up_read(&root->fs_info->subvol_sem);
7117 
7118 	return ret;
7119 }
7120 
7121 /*
7122  * some fairly slow code that needs optimization. This walks the list
7123  * of all the inodes with pending delalloc and forces them to disk.
7124  */
7125 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
7126 {
7127 	struct list_head *head = &root->fs_info->delalloc_inodes;
7128 	struct btrfs_inode *binode;
7129 	struct inode *inode;
7130 
7131 	if (root->fs_info->sb->s_flags & MS_RDONLY)
7132 		return -EROFS;
7133 
7134 	spin_lock(&root->fs_info->delalloc_lock);
7135 	while (!list_empty(head)) {
7136 		binode = list_entry(head->next, struct btrfs_inode,
7137 				    delalloc_inodes);
7138 		inode = igrab(&binode->vfs_inode);
7139 		if (!inode)
7140 			list_del_init(&binode->delalloc_inodes);
7141 		spin_unlock(&root->fs_info->delalloc_lock);
7142 		if (inode) {
7143 			filemap_flush(inode->i_mapping);
7144 			if (delay_iput)
7145 				btrfs_add_delayed_iput(inode);
7146 			else
7147 				iput(inode);
7148 		}
7149 		cond_resched();
7150 		spin_lock(&root->fs_info->delalloc_lock);
7151 	}
7152 	spin_unlock(&root->fs_info->delalloc_lock);
7153 
7154 	/* the filemap_flush will queue IO into the worker threads, but
7155 	 * we have to make sure the IO is actually started and that
7156 	 * ordered extents get created before we return
7157 	 */
7158 	atomic_inc(&root->fs_info->async_submit_draining);
7159 	while (atomic_read(&root->fs_info->nr_async_submits) ||
7160 	      atomic_read(&root->fs_info->async_delalloc_pages)) {
7161 		wait_event(root->fs_info->async_submit_wait,
7162 		   (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
7163 		    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
7164 	}
7165 	atomic_dec(&root->fs_info->async_submit_draining);
7166 	return 0;
7167 }
7168 
7169 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7170 			 const char *symname)
7171 {
7172 	struct btrfs_trans_handle *trans;
7173 	struct btrfs_root *root = BTRFS_I(dir)->root;
7174 	struct btrfs_path *path;
7175 	struct btrfs_key key;
7176 	struct inode *inode = NULL;
7177 	int err;
7178 	int drop_inode = 0;
7179 	u64 objectid;
7180 	u64 index = 0 ;
7181 	int name_len;
7182 	int datasize;
7183 	unsigned long ptr;
7184 	struct btrfs_file_extent_item *ei;
7185 	struct extent_buffer *leaf;
7186 	unsigned long nr = 0;
7187 
7188 	name_len = strlen(symname) + 1;
7189 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
7190 		return -ENAMETOOLONG;
7191 
7192 	/*
7193 	 * 2 items for inode item and ref
7194 	 * 2 items for dir items
7195 	 * 1 item for xattr if selinux is on
7196 	 */
7197 	trans = btrfs_start_transaction(root, 5);
7198 	if (IS_ERR(trans))
7199 		return PTR_ERR(trans);
7200 
7201 	err = btrfs_find_free_ino(root, &objectid);
7202 	if (err)
7203 		goto out_unlock;
7204 
7205 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
7206 				dentry->d_name.len, btrfs_ino(dir), objectid,
7207 				S_IFLNK|S_IRWXUGO, &index);
7208 	if (IS_ERR(inode)) {
7209 		err = PTR_ERR(inode);
7210 		goto out_unlock;
7211 	}
7212 
7213 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
7214 	if (err) {
7215 		drop_inode = 1;
7216 		goto out_unlock;
7217 	}
7218 
7219 	/*
7220 	* If the active LSM wants to access the inode during
7221 	* d_instantiate it needs these. Smack checks to see
7222 	* if the filesystem supports xattrs by looking at the
7223 	* ops vector.
7224 	*/
7225 	inode->i_fop = &btrfs_file_operations;
7226 	inode->i_op = &btrfs_file_inode_operations;
7227 
7228 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
7229 	if (err)
7230 		drop_inode = 1;
7231 	else {
7232 		inode->i_mapping->a_ops = &btrfs_aops;
7233 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
7234 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
7235 	}
7236 	if (drop_inode)
7237 		goto out_unlock;
7238 
7239 	path = btrfs_alloc_path();
7240 	if (!path) {
7241 		err = -ENOMEM;
7242 		drop_inode = 1;
7243 		goto out_unlock;
7244 	}
7245 	key.objectid = btrfs_ino(inode);
7246 	key.offset = 0;
7247 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
7248 	datasize = btrfs_file_extent_calc_inline_size(name_len);
7249 	err = btrfs_insert_empty_item(trans, root, path, &key,
7250 				      datasize);
7251 	if (err) {
7252 		drop_inode = 1;
7253 		btrfs_free_path(path);
7254 		goto out_unlock;
7255 	}
7256 	leaf = path->nodes[0];
7257 	ei = btrfs_item_ptr(leaf, path->slots[0],
7258 			    struct btrfs_file_extent_item);
7259 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
7260 	btrfs_set_file_extent_type(leaf, ei,
7261 				   BTRFS_FILE_EXTENT_INLINE);
7262 	btrfs_set_file_extent_encryption(leaf, ei, 0);
7263 	btrfs_set_file_extent_compression(leaf, ei, 0);
7264 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
7265 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
7266 
7267 	ptr = btrfs_file_extent_inline_start(ei);
7268 	write_extent_buffer(leaf, symname, ptr, name_len);
7269 	btrfs_mark_buffer_dirty(leaf);
7270 	btrfs_free_path(path);
7271 
7272 	inode->i_op = &btrfs_symlink_inode_operations;
7273 	inode->i_mapping->a_ops = &btrfs_symlink_aops;
7274 	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
7275 	inode_set_bytes(inode, name_len);
7276 	btrfs_i_size_write(inode, name_len - 1);
7277 	err = btrfs_update_inode(trans, root, inode);
7278 	if (err)
7279 		drop_inode = 1;
7280 
7281 out_unlock:
7282 	if (!err)
7283 		d_instantiate(dentry, inode);
7284 	nr = trans->blocks_used;
7285 	btrfs_end_transaction(trans, root);
7286 	if (drop_inode) {
7287 		inode_dec_link_count(inode);
7288 		iput(inode);
7289 	}
7290 	btrfs_btree_balance_dirty(root, nr);
7291 	return err;
7292 }
7293 
7294 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
7295 				       u64 start, u64 num_bytes, u64 min_size,
7296 				       loff_t actual_len, u64 *alloc_hint,
7297 				       struct btrfs_trans_handle *trans)
7298 {
7299 	struct btrfs_root *root = BTRFS_I(inode)->root;
7300 	struct btrfs_key ins;
7301 	u64 cur_offset = start;
7302 	u64 i_size;
7303 	int ret = 0;
7304 	bool own_trans = true;
7305 
7306 	if (trans)
7307 		own_trans = false;
7308 	while (num_bytes > 0) {
7309 		if (own_trans) {
7310 			trans = btrfs_start_transaction(root, 3);
7311 			if (IS_ERR(trans)) {
7312 				ret = PTR_ERR(trans);
7313 				break;
7314 			}
7315 		}
7316 
7317 		ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
7318 					   0, *alloc_hint, (u64)-1, &ins, 1);
7319 		if (ret) {
7320 			if (own_trans)
7321 				btrfs_end_transaction(trans, root);
7322 			break;
7323 		}
7324 
7325 		ret = insert_reserved_file_extent(trans, inode,
7326 						  cur_offset, ins.objectid,
7327 						  ins.offset, ins.offset,
7328 						  ins.offset, 0, 0, 0,
7329 						  BTRFS_FILE_EXTENT_PREALLOC);
7330 		BUG_ON(ret);
7331 		btrfs_drop_extent_cache(inode, cur_offset,
7332 					cur_offset + ins.offset -1, 0);
7333 
7334 		num_bytes -= ins.offset;
7335 		cur_offset += ins.offset;
7336 		*alloc_hint = ins.objectid + ins.offset;
7337 
7338 		inode->i_ctime = CURRENT_TIME;
7339 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
7340 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
7341 		    (actual_len > inode->i_size) &&
7342 		    (cur_offset > inode->i_size)) {
7343 			if (cur_offset > actual_len)
7344 				i_size = actual_len;
7345 			else
7346 				i_size = cur_offset;
7347 			i_size_write(inode, i_size);
7348 			btrfs_ordered_update_i_size(inode, i_size, NULL);
7349 		}
7350 
7351 		ret = btrfs_update_inode(trans, root, inode);
7352 		BUG_ON(ret);
7353 
7354 		if (own_trans)
7355 			btrfs_end_transaction(trans, root);
7356 	}
7357 	return ret;
7358 }
7359 
7360 int btrfs_prealloc_file_range(struct inode *inode, int mode,
7361 			      u64 start, u64 num_bytes, u64 min_size,
7362 			      loff_t actual_len, u64 *alloc_hint)
7363 {
7364 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
7365 					   min_size, actual_len, alloc_hint,
7366 					   NULL);
7367 }
7368 
7369 int btrfs_prealloc_file_range_trans(struct inode *inode,
7370 				    struct btrfs_trans_handle *trans, int mode,
7371 				    u64 start, u64 num_bytes, u64 min_size,
7372 				    loff_t actual_len, u64 *alloc_hint)
7373 {
7374 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
7375 					   min_size, actual_len, alloc_hint, trans);
7376 }
7377 
7378 static int btrfs_set_page_dirty(struct page *page)
7379 {
7380 	return __set_page_dirty_nobuffers(page);
7381 }
7382 
7383 static int btrfs_permission(struct inode *inode, int mask)
7384 {
7385 	struct btrfs_root *root = BTRFS_I(inode)->root;
7386 	umode_t mode = inode->i_mode;
7387 
7388 	if (mask & MAY_WRITE &&
7389 	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
7390 		if (btrfs_root_readonly(root))
7391 			return -EROFS;
7392 		if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
7393 			return -EACCES;
7394 	}
7395 	return generic_permission(inode, mask);
7396 }
7397 
7398 static const struct inode_operations btrfs_dir_inode_operations = {
7399 	.getattr	= btrfs_getattr,
7400 	.lookup		= btrfs_lookup,
7401 	.create		= btrfs_create,
7402 	.unlink		= btrfs_unlink,
7403 	.link		= btrfs_link,
7404 	.mkdir		= btrfs_mkdir,
7405 	.rmdir		= btrfs_rmdir,
7406 	.rename		= btrfs_rename,
7407 	.symlink	= btrfs_symlink,
7408 	.setattr	= btrfs_setattr,
7409 	.mknod		= btrfs_mknod,
7410 	.setxattr	= btrfs_setxattr,
7411 	.getxattr	= btrfs_getxattr,
7412 	.listxattr	= btrfs_listxattr,
7413 	.removexattr	= btrfs_removexattr,
7414 	.permission	= btrfs_permission,
7415 	.get_acl	= btrfs_get_acl,
7416 };
7417 static const struct inode_operations btrfs_dir_ro_inode_operations = {
7418 	.lookup		= btrfs_lookup,
7419 	.permission	= btrfs_permission,
7420 	.get_acl	= btrfs_get_acl,
7421 };
7422 
7423 static const struct file_operations btrfs_dir_file_operations = {
7424 	.llseek		= generic_file_llseek,
7425 	.read		= generic_read_dir,
7426 	.readdir	= btrfs_real_readdir,
7427 	.unlocked_ioctl	= btrfs_ioctl,
7428 #ifdef CONFIG_COMPAT
7429 	.compat_ioctl	= btrfs_ioctl,
7430 #endif
7431 	.release        = btrfs_release_file,
7432 	.fsync		= btrfs_sync_file,
7433 };
7434 
7435 static struct extent_io_ops btrfs_extent_io_ops = {
7436 	.fill_delalloc = run_delalloc_range,
7437 	.submit_bio_hook = btrfs_submit_bio_hook,
7438 	.merge_bio_hook = btrfs_merge_bio_hook,
7439 	.readpage_end_io_hook = btrfs_readpage_end_io_hook,
7440 	.writepage_end_io_hook = btrfs_writepage_end_io_hook,
7441 	.writepage_start_hook = btrfs_writepage_start_hook,
7442 	.set_bit_hook = btrfs_set_bit_hook,
7443 	.clear_bit_hook = btrfs_clear_bit_hook,
7444 	.merge_extent_hook = btrfs_merge_extent_hook,
7445 	.split_extent_hook = btrfs_split_extent_hook,
7446 };
7447 
7448 /*
7449  * btrfs doesn't support the bmap operation because swapfiles
7450  * use bmap to make a mapping of extents in the file.  They assume
7451  * these extents won't change over the life of the file and they
7452  * use the bmap result to do IO directly to the drive.
7453  *
7454  * the btrfs bmap call would return logical addresses that aren't
7455  * suitable for IO and they also will change frequently as COW
7456  * operations happen.  So, swapfile + btrfs == corruption.
7457  *
7458  * For now we're avoiding this by dropping bmap.
7459  */
7460 static const struct address_space_operations btrfs_aops = {
7461 	.readpage	= btrfs_readpage,
7462 	.writepage	= btrfs_writepage,
7463 	.writepages	= btrfs_writepages,
7464 	.readpages	= btrfs_readpages,
7465 	.direct_IO	= btrfs_direct_IO,
7466 	.invalidatepage = btrfs_invalidatepage,
7467 	.releasepage	= btrfs_releasepage,
7468 	.set_page_dirty	= btrfs_set_page_dirty,
7469 	.error_remove_page = generic_error_remove_page,
7470 };
7471 
7472 static const struct address_space_operations btrfs_symlink_aops = {
7473 	.readpage	= btrfs_readpage,
7474 	.writepage	= btrfs_writepage,
7475 	.invalidatepage = btrfs_invalidatepage,
7476 	.releasepage	= btrfs_releasepage,
7477 };
7478 
7479 static const struct inode_operations btrfs_file_inode_operations = {
7480 	.getattr	= btrfs_getattr,
7481 	.setattr	= btrfs_setattr,
7482 	.setxattr	= btrfs_setxattr,
7483 	.getxattr	= btrfs_getxattr,
7484 	.listxattr      = btrfs_listxattr,
7485 	.removexattr	= btrfs_removexattr,
7486 	.permission	= btrfs_permission,
7487 	.fiemap		= btrfs_fiemap,
7488 	.get_acl	= btrfs_get_acl,
7489 };
7490 static const struct inode_operations btrfs_special_inode_operations = {
7491 	.getattr	= btrfs_getattr,
7492 	.setattr	= btrfs_setattr,
7493 	.permission	= btrfs_permission,
7494 	.setxattr	= btrfs_setxattr,
7495 	.getxattr	= btrfs_getxattr,
7496 	.listxattr	= btrfs_listxattr,
7497 	.removexattr	= btrfs_removexattr,
7498 	.get_acl	= btrfs_get_acl,
7499 };
7500 static const struct inode_operations btrfs_symlink_inode_operations = {
7501 	.readlink	= generic_readlink,
7502 	.follow_link	= page_follow_link_light,
7503 	.put_link	= page_put_link,
7504 	.getattr	= btrfs_getattr,
7505 	.setattr	= btrfs_setattr,
7506 	.permission	= btrfs_permission,
7507 	.setxattr	= btrfs_setxattr,
7508 	.getxattr	= btrfs_getxattr,
7509 	.listxattr	= btrfs_listxattr,
7510 	.removexattr	= btrfs_removexattr,
7511 	.get_acl	= btrfs_get_acl,
7512 };
7513 
7514 const struct dentry_operations btrfs_dentry_operations = {
7515 	.d_delete	= btrfs_dentry_delete,
7516 	.d_release	= btrfs_dentry_release,
7517 };
7518