xref: /openbmc/linux/fs/btrfs/inode.c (revision 5104d265)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/aio.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/xattr.h>
38 #include <linux/posix_acl.h>
39 #include <linux/falloc.h>
40 #include <linux/slab.h>
41 #include <linux/ratelimit.h>
42 #include <linux/mount.h>
43 #include <linux/btrfs.h>
44 #include <linux/blkdev.h>
45 #include <linux/posix_acl_xattr.h>
46 #include "compat.h"
47 #include "ctree.h"
48 #include "disk-io.h"
49 #include "transaction.h"
50 #include "btrfs_inode.h"
51 #include "print-tree.h"
52 #include "ordered-data.h"
53 #include "xattr.h"
54 #include "tree-log.h"
55 #include "volumes.h"
56 #include "compression.h"
57 #include "locking.h"
58 #include "free-space-cache.h"
59 #include "inode-map.h"
60 #include "backref.h"
61 #include "hash.h"
62 
63 struct btrfs_iget_args {
64 	u64 ino;
65 	struct btrfs_root *root;
66 };
67 
68 static const struct inode_operations btrfs_dir_inode_operations;
69 static const struct inode_operations btrfs_symlink_inode_operations;
70 static const struct inode_operations btrfs_dir_ro_inode_operations;
71 static const struct inode_operations btrfs_special_inode_operations;
72 static const struct inode_operations btrfs_file_inode_operations;
73 static const struct address_space_operations btrfs_aops;
74 static const struct address_space_operations btrfs_symlink_aops;
75 static const struct file_operations btrfs_dir_file_operations;
76 static struct extent_io_ops btrfs_extent_io_ops;
77 
78 static struct kmem_cache *btrfs_inode_cachep;
79 static struct kmem_cache *btrfs_delalloc_work_cachep;
80 struct kmem_cache *btrfs_trans_handle_cachep;
81 struct kmem_cache *btrfs_transaction_cachep;
82 struct kmem_cache *btrfs_path_cachep;
83 struct kmem_cache *btrfs_free_space_cachep;
84 
85 #define S_SHIFT 12
86 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
87 	[S_IFREG >> S_SHIFT]	= BTRFS_FT_REG_FILE,
88 	[S_IFDIR >> S_SHIFT]	= BTRFS_FT_DIR,
89 	[S_IFCHR >> S_SHIFT]	= BTRFS_FT_CHRDEV,
90 	[S_IFBLK >> S_SHIFT]	= BTRFS_FT_BLKDEV,
91 	[S_IFIFO >> S_SHIFT]	= BTRFS_FT_FIFO,
92 	[S_IFSOCK >> S_SHIFT]	= BTRFS_FT_SOCK,
93 	[S_IFLNK >> S_SHIFT]	= BTRFS_FT_SYMLINK,
94 };
95 
96 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
97 static int btrfs_truncate(struct inode *inode);
98 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
99 static noinline int cow_file_range(struct inode *inode,
100 				   struct page *locked_page,
101 				   u64 start, u64 end, int *page_started,
102 				   unsigned long *nr_written, int unlock);
103 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
104 					   u64 len, u64 orig_start,
105 					   u64 block_start, u64 block_len,
106 					   u64 orig_block_len, u64 ram_bytes,
107 					   int type);
108 
109 static int btrfs_dirty_inode(struct inode *inode);
110 
111 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
112 				     struct inode *inode,  struct inode *dir,
113 				     const struct qstr *qstr)
114 {
115 	int err;
116 
117 	err = btrfs_init_acl(trans, inode, dir);
118 	if (!err)
119 		err = btrfs_xattr_security_init(trans, inode, dir, qstr);
120 	return err;
121 }
122 
123 /*
124  * this does all the hard work for inserting an inline extent into
125  * the btree.  The caller should have done a btrfs_drop_extents so that
126  * no overlapping inline items exist in the btree
127  */
128 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
129 				struct btrfs_root *root, struct inode *inode,
130 				u64 start, size_t size, size_t compressed_size,
131 				int compress_type,
132 				struct page **compressed_pages)
133 {
134 	struct btrfs_key key;
135 	struct btrfs_path *path;
136 	struct extent_buffer *leaf;
137 	struct page *page = NULL;
138 	char *kaddr;
139 	unsigned long ptr;
140 	struct btrfs_file_extent_item *ei;
141 	int err = 0;
142 	int ret;
143 	size_t cur_size = size;
144 	size_t datasize;
145 	unsigned long offset;
146 
147 	if (compressed_size && compressed_pages)
148 		cur_size = compressed_size;
149 
150 	path = btrfs_alloc_path();
151 	if (!path)
152 		return -ENOMEM;
153 
154 	path->leave_spinning = 1;
155 
156 	key.objectid = btrfs_ino(inode);
157 	key.offset = start;
158 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
159 	datasize = btrfs_file_extent_calc_inline_size(cur_size);
160 
161 	inode_add_bytes(inode, size);
162 	ret = btrfs_insert_empty_item(trans, root, path, &key,
163 				      datasize);
164 	if (ret) {
165 		err = ret;
166 		goto fail;
167 	}
168 	leaf = path->nodes[0];
169 	ei = btrfs_item_ptr(leaf, path->slots[0],
170 			    struct btrfs_file_extent_item);
171 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
172 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
173 	btrfs_set_file_extent_encryption(leaf, ei, 0);
174 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
175 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
176 	ptr = btrfs_file_extent_inline_start(ei);
177 
178 	if (compress_type != BTRFS_COMPRESS_NONE) {
179 		struct page *cpage;
180 		int i = 0;
181 		while (compressed_size > 0) {
182 			cpage = compressed_pages[i];
183 			cur_size = min_t(unsigned long, compressed_size,
184 				       PAGE_CACHE_SIZE);
185 
186 			kaddr = kmap_atomic(cpage);
187 			write_extent_buffer(leaf, kaddr, ptr, cur_size);
188 			kunmap_atomic(kaddr);
189 
190 			i++;
191 			ptr += cur_size;
192 			compressed_size -= cur_size;
193 		}
194 		btrfs_set_file_extent_compression(leaf, ei,
195 						  compress_type);
196 	} else {
197 		page = find_get_page(inode->i_mapping,
198 				     start >> PAGE_CACHE_SHIFT);
199 		btrfs_set_file_extent_compression(leaf, ei, 0);
200 		kaddr = kmap_atomic(page);
201 		offset = start & (PAGE_CACHE_SIZE - 1);
202 		write_extent_buffer(leaf, kaddr + offset, ptr, size);
203 		kunmap_atomic(kaddr);
204 		page_cache_release(page);
205 	}
206 	btrfs_mark_buffer_dirty(leaf);
207 	btrfs_free_path(path);
208 
209 	/*
210 	 * we're an inline extent, so nobody can
211 	 * extend the file past i_size without locking
212 	 * a page we already have locked.
213 	 *
214 	 * We must do any isize and inode updates
215 	 * before we unlock the pages.  Otherwise we
216 	 * could end up racing with unlink.
217 	 */
218 	BTRFS_I(inode)->disk_i_size = inode->i_size;
219 	ret = btrfs_update_inode(trans, root, inode);
220 
221 	return ret;
222 fail:
223 	btrfs_free_path(path);
224 	return err;
225 }
226 
227 
228 /*
229  * conditionally insert an inline extent into the file.  This
230  * does the checks required to make sure the data is small enough
231  * to fit as an inline extent.
232  */
233 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
234 				 struct btrfs_root *root,
235 				 struct inode *inode, u64 start, u64 end,
236 				 size_t compressed_size, int compress_type,
237 				 struct page **compressed_pages)
238 {
239 	u64 isize = i_size_read(inode);
240 	u64 actual_end = min(end + 1, isize);
241 	u64 inline_len = actual_end - start;
242 	u64 aligned_end = ALIGN(end, root->sectorsize);
243 	u64 data_len = inline_len;
244 	int ret;
245 
246 	if (compressed_size)
247 		data_len = compressed_size;
248 
249 	if (start > 0 ||
250 	    actual_end >= PAGE_CACHE_SIZE ||
251 	    data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
252 	    (!compressed_size &&
253 	    (actual_end & (root->sectorsize - 1)) == 0) ||
254 	    end + 1 < isize ||
255 	    data_len > root->fs_info->max_inline) {
256 		return 1;
257 	}
258 
259 	ret = btrfs_drop_extents(trans, root, inode, start, aligned_end, 1);
260 	if (ret)
261 		return ret;
262 
263 	if (isize > actual_end)
264 		inline_len = min_t(u64, isize, actual_end);
265 	ret = insert_inline_extent(trans, root, inode, start,
266 				   inline_len, compressed_size,
267 				   compress_type, compressed_pages);
268 	if (ret && ret != -ENOSPC) {
269 		btrfs_abort_transaction(trans, root, ret);
270 		return ret;
271 	} else if (ret == -ENOSPC) {
272 		return 1;
273 	}
274 
275 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
276 	btrfs_delalloc_release_metadata(inode, end + 1 - start);
277 	btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
278 	return 0;
279 }
280 
281 struct async_extent {
282 	u64 start;
283 	u64 ram_size;
284 	u64 compressed_size;
285 	struct page **pages;
286 	unsigned long nr_pages;
287 	int compress_type;
288 	struct list_head list;
289 };
290 
291 struct async_cow {
292 	struct inode *inode;
293 	struct btrfs_root *root;
294 	struct page *locked_page;
295 	u64 start;
296 	u64 end;
297 	struct list_head extents;
298 	struct btrfs_work work;
299 };
300 
301 static noinline int add_async_extent(struct async_cow *cow,
302 				     u64 start, u64 ram_size,
303 				     u64 compressed_size,
304 				     struct page **pages,
305 				     unsigned long nr_pages,
306 				     int compress_type)
307 {
308 	struct async_extent *async_extent;
309 
310 	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
311 	BUG_ON(!async_extent); /* -ENOMEM */
312 	async_extent->start = start;
313 	async_extent->ram_size = ram_size;
314 	async_extent->compressed_size = compressed_size;
315 	async_extent->pages = pages;
316 	async_extent->nr_pages = nr_pages;
317 	async_extent->compress_type = compress_type;
318 	list_add_tail(&async_extent->list, &cow->extents);
319 	return 0;
320 }
321 
322 /*
323  * we create compressed extents in two phases.  The first
324  * phase compresses a range of pages that have already been
325  * locked (both pages and state bits are locked).
326  *
327  * This is done inside an ordered work queue, and the compression
328  * is spread across many cpus.  The actual IO submission is step
329  * two, and the ordered work queue takes care of making sure that
330  * happens in the same order things were put onto the queue by
331  * writepages and friends.
332  *
333  * If this code finds it can't get good compression, it puts an
334  * entry onto the work queue to write the uncompressed bytes.  This
335  * makes sure that both compressed inodes and uncompressed inodes
336  * are written in the same order that the flusher thread sent them
337  * down.
338  */
339 static noinline int compress_file_range(struct inode *inode,
340 					struct page *locked_page,
341 					u64 start, u64 end,
342 					struct async_cow *async_cow,
343 					int *num_added)
344 {
345 	struct btrfs_root *root = BTRFS_I(inode)->root;
346 	struct btrfs_trans_handle *trans;
347 	u64 num_bytes;
348 	u64 blocksize = root->sectorsize;
349 	u64 actual_end;
350 	u64 isize = i_size_read(inode);
351 	int ret = 0;
352 	struct page **pages = NULL;
353 	unsigned long nr_pages;
354 	unsigned long nr_pages_ret = 0;
355 	unsigned long total_compressed = 0;
356 	unsigned long total_in = 0;
357 	unsigned long max_compressed = 128 * 1024;
358 	unsigned long max_uncompressed = 128 * 1024;
359 	int i;
360 	int will_compress;
361 	int compress_type = root->fs_info->compress_type;
362 	int redirty = 0;
363 
364 	/* if this is a small write inside eof, kick off a defrag */
365 	if ((end - start + 1) < 16 * 1024 &&
366 	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
367 		btrfs_add_inode_defrag(NULL, inode);
368 
369 	actual_end = min_t(u64, isize, end + 1);
370 again:
371 	will_compress = 0;
372 	nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
373 	nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
374 
375 	/*
376 	 * we don't want to send crud past the end of i_size through
377 	 * compression, that's just a waste of CPU time.  So, if the
378 	 * end of the file is before the start of our current
379 	 * requested range of bytes, we bail out to the uncompressed
380 	 * cleanup code that can deal with all of this.
381 	 *
382 	 * It isn't really the fastest way to fix things, but this is a
383 	 * very uncommon corner.
384 	 */
385 	if (actual_end <= start)
386 		goto cleanup_and_bail_uncompressed;
387 
388 	total_compressed = actual_end - start;
389 
390 	/* we want to make sure that amount of ram required to uncompress
391 	 * an extent is reasonable, so we limit the total size in ram
392 	 * of a compressed extent to 128k.  This is a crucial number
393 	 * because it also controls how easily we can spread reads across
394 	 * cpus for decompression.
395 	 *
396 	 * We also want to make sure the amount of IO required to do
397 	 * a random read is reasonably small, so we limit the size of
398 	 * a compressed extent to 128k.
399 	 */
400 	total_compressed = min(total_compressed, max_uncompressed);
401 	num_bytes = ALIGN(end - start + 1, blocksize);
402 	num_bytes = max(blocksize,  num_bytes);
403 	total_in = 0;
404 	ret = 0;
405 
406 	/*
407 	 * we do compression for mount -o compress and when the
408 	 * inode has not been flagged as nocompress.  This flag can
409 	 * change at any time if we discover bad compression ratios.
410 	 */
411 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
412 	    (btrfs_test_opt(root, COMPRESS) ||
413 	     (BTRFS_I(inode)->force_compress) ||
414 	     (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
415 		WARN_ON(pages);
416 		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
417 		if (!pages) {
418 			/* just bail out to the uncompressed code */
419 			goto cont;
420 		}
421 
422 		if (BTRFS_I(inode)->force_compress)
423 			compress_type = BTRFS_I(inode)->force_compress;
424 
425 		/*
426 		 * we need to call clear_page_dirty_for_io on each
427 		 * page in the range.  Otherwise applications with the file
428 		 * mmap'd can wander in and change the page contents while
429 		 * we are compressing them.
430 		 *
431 		 * If the compression fails for any reason, we set the pages
432 		 * dirty again later on.
433 		 */
434 		extent_range_clear_dirty_for_io(inode, start, end);
435 		redirty = 1;
436 		ret = btrfs_compress_pages(compress_type,
437 					   inode->i_mapping, start,
438 					   total_compressed, pages,
439 					   nr_pages, &nr_pages_ret,
440 					   &total_in,
441 					   &total_compressed,
442 					   max_compressed);
443 
444 		if (!ret) {
445 			unsigned long offset = total_compressed &
446 				(PAGE_CACHE_SIZE - 1);
447 			struct page *page = pages[nr_pages_ret - 1];
448 			char *kaddr;
449 
450 			/* zero the tail end of the last page, we might be
451 			 * sending it down to disk
452 			 */
453 			if (offset) {
454 				kaddr = kmap_atomic(page);
455 				memset(kaddr + offset, 0,
456 				       PAGE_CACHE_SIZE - offset);
457 				kunmap_atomic(kaddr);
458 			}
459 			will_compress = 1;
460 		}
461 	}
462 cont:
463 	if (start == 0) {
464 		trans = btrfs_join_transaction(root);
465 		if (IS_ERR(trans)) {
466 			ret = PTR_ERR(trans);
467 			trans = NULL;
468 			goto cleanup_and_out;
469 		}
470 		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
471 
472 		/* lets try to make an inline extent */
473 		if (ret || total_in < (actual_end - start)) {
474 			/* we didn't compress the entire range, try
475 			 * to make an uncompressed inline extent.
476 			 */
477 			ret = cow_file_range_inline(trans, root, inode,
478 						    start, end, 0, 0, NULL);
479 		} else {
480 			/* try making a compressed inline extent */
481 			ret = cow_file_range_inline(trans, root, inode,
482 						    start, end,
483 						    total_compressed,
484 						    compress_type, pages);
485 		}
486 		if (ret <= 0) {
487 			/*
488 			 * inline extent creation worked or returned error,
489 			 * we don't need to create any more async work items.
490 			 * Unlock and free up our temp pages.
491 			 */
492 			extent_clear_unlock_delalloc(inode,
493 			     &BTRFS_I(inode)->io_tree,
494 			     start, end, NULL,
495 			     EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
496 			     EXTENT_CLEAR_DELALLOC |
497 			     EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
498 
499 			btrfs_end_transaction(trans, root);
500 			goto free_pages_out;
501 		}
502 		btrfs_end_transaction(trans, root);
503 	}
504 
505 	if (will_compress) {
506 		/*
507 		 * we aren't doing an inline extent round the compressed size
508 		 * up to a block size boundary so the allocator does sane
509 		 * things
510 		 */
511 		total_compressed = ALIGN(total_compressed, blocksize);
512 
513 		/*
514 		 * one last check to make sure the compression is really a
515 		 * win, compare the page count read with the blocks on disk
516 		 */
517 		total_in = ALIGN(total_in, PAGE_CACHE_SIZE);
518 		if (total_compressed >= total_in) {
519 			will_compress = 0;
520 		} else {
521 			num_bytes = total_in;
522 		}
523 	}
524 	if (!will_compress && pages) {
525 		/*
526 		 * the compression code ran but failed to make things smaller,
527 		 * free any pages it allocated and our page pointer array
528 		 */
529 		for (i = 0; i < nr_pages_ret; i++) {
530 			WARN_ON(pages[i]->mapping);
531 			page_cache_release(pages[i]);
532 		}
533 		kfree(pages);
534 		pages = NULL;
535 		total_compressed = 0;
536 		nr_pages_ret = 0;
537 
538 		/* flag the file so we don't compress in the future */
539 		if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
540 		    !(BTRFS_I(inode)->force_compress)) {
541 			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
542 		}
543 	}
544 	if (will_compress) {
545 		*num_added += 1;
546 
547 		/* the async work queues will take care of doing actual
548 		 * allocation on disk for these compressed pages,
549 		 * and will submit them to the elevator.
550 		 */
551 		add_async_extent(async_cow, start, num_bytes,
552 				 total_compressed, pages, nr_pages_ret,
553 				 compress_type);
554 
555 		if (start + num_bytes < end) {
556 			start += num_bytes;
557 			pages = NULL;
558 			cond_resched();
559 			goto again;
560 		}
561 	} else {
562 cleanup_and_bail_uncompressed:
563 		/*
564 		 * No compression, but we still need to write the pages in
565 		 * the file we've been given so far.  redirty the locked
566 		 * page if it corresponds to our extent and set things up
567 		 * for the async work queue to run cow_file_range to do
568 		 * the normal delalloc dance
569 		 */
570 		if (page_offset(locked_page) >= start &&
571 		    page_offset(locked_page) <= end) {
572 			__set_page_dirty_nobuffers(locked_page);
573 			/* unlocked later on in the async handlers */
574 		}
575 		if (redirty)
576 			extent_range_redirty_for_io(inode, start, end);
577 		add_async_extent(async_cow, start, end - start + 1,
578 				 0, NULL, 0, BTRFS_COMPRESS_NONE);
579 		*num_added += 1;
580 	}
581 
582 out:
583 	return ret;
584 
585 free_pages_out:
586 	for (i = 0; i < nr_pages_ret; i++) {
587 		WARN_ON(pages[i]->mapping);
588 		page_cache_release(pages[i]);
589 	}
590 	kfree(pages);
591 
592 	goto out;
593 
594 cleanup_and_out:
595 	extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
596 				     start, end, NULL,
597 				     EXTENT_CLEAR_UNLOCK_PAGE |
598 				     EXTENT_CLEAR_DIRTY |
599 				     EXTENT_CLEAR_DELALLOC |
600 				     EXTENT_SET_WRITEBACK |
601 				     EXTENT_END_WRITEBACK);
602 	if (!trans || IS_ERR(trans))
603 		btrfs_error(root->fs_info, ret, "Failed to join transaction");
604 	else
605 		btrfs_abort_transaction(trans, root, ret);
606 	goto free_pages_out;
607 }
608 
609 /*
610  * phase two of compressed writeback.  This is the ordered portion
611  * of the code, which only gets called in the order the work was
612  * queued.  We walk all the async extents created by compress_file_range
613  * and send them down to the disk.
614  */
615 static noinline int submit_compressed_extents(struct inode *inode,
616 					      struct async_cow *async_cow)
617 {
618 	struct async_extent *async_extent;
619 	u64 alloc_hint = 0;
620 	struct btrfs_trans_handle *trans;
621 	struct btrfs_key ins;
622 	struct extent_map *em;
623 	struct btrfs_root *root = BTRFS_I(inode)->root;
624 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
625 	struct extent_io_tree *io_tree;
626 	int ret = 0;
627 
628 	if (list_empty(&async_cow->extents))
629 		return 0;
630 
631 again:
632 	while (!list_empty(&async_cow->extents)) {
633 		async_extent = list_entry(async_cow->extents.next,
634 					  struct async_extent, list);
635 		list_del(&async_extent->list);
636 
637 		io_tree = &BTRFS_I(inode)->io_tree;
638 
639 retry:
640 		/* did the compression code fall back to uncompressed IO? */
641 		if (!async_extent->pages) {
642 			int page_started = 0;
643 			unsigned long nr_written = 0;
644 
645 			lock_extent(io_tree, async_extent->start,
646 					 async_extent->start +
647 					 async_extent->ram_size - 1);
648 
649 			/* allocate blocks */
650 			ret = cow_file_range(inode, async_cow->locked_page,
651 					     async_extent->start,
652 					     async_extent->start +
653 					     async_extent->ram_size - 1,
654 					     &page_started, &nr_written, 0);
655 
656 			/* JDM XXX */
657 
658 			/*
659 			 * if page_started, cow_file_range inserted an
660 			 * inline extent and took care of all the unlocking
661 			 * and IO for us.  Otherwise, we need to submit
662 			 * all those pages down to the drive.
663 			 */
664 			if (!page_started && !ret)
665 				extent_write_locked_range(io_tree,
666 						  inode, async_extent->start,
667 						  async_extent->start +
668 						  async_extent->ram_size - 1,
669 						  btrfs_get_extent,
670 						  WB_SYNC_ALL);
671 			else if (ret)
672 				unlock_page(async_cow->locked_page);
673 			kfree(async_extent);
674 			cond_resched();
675 			continue;
676 		}
677 
678 		lock_extent(io_tree, async_extent->start,
679 			    async_extent->start + async_extent->ram_size - 1);
680 
681 		trans = btrfs_join_transaction(root);
682 		if (IS_ERR(trans)) {
683 			ret = PTR_ERR(trans);
684 		} else {
685 			trans->block_rsv = &root->fs_info->delalloc_block_rsv;
686 			ret = btrfs_reserve_extent(trans, root,
687 					   async_extent->compressed_size,
688 					   async_extent->compressed_size,
689 					   0, alloc_hint, &ins, 1);
690 			if (ret && ret != -ENOSPC)
691 				btrfs_abort_transaction(trans, root, ret);
692 			btrfs_end_transaction(trans, root);
693 		}
694 
695 		if (ret) {
696 			int i;
697 
698 			for (i = 0; i < async_extent->nr_pages; i++) {
699 				WARN_ON(async_extent->pages[i]->mapping);
700 				page_cache_release(async_extent->pages[i]);
701 			}
702 			kfree(async_extent->pages);
703 			async_extent->nr_pages = 0;
704 			async_extent->pages = NULL;
705 
706 			if (ret == -ENOSPC) {
707 				unlock_extent(io_tree, async_extent->start,
708 					      async_extent->start +
709 					      async_extent->ram_size - 1);
710 				goto retry;
711 			}
712 			goto out_free;
713 		}
714 
715 		/*
716 		 * here we're doing allocation and writeback of the
717 		 * compressed pages
718 		 */
719 		btrfs_drop_extent_cache(inode, async_extent->start,
720 					async_extent->start +
721 					async_extent->ram_size - 1, 0);
722 
723 		em = alloc_extent_map();
724 		if (!em) {
725 			ret = -ENOMEM;
726 			goto out_free_reserve;
727 		}
728 		em->start = async_extent->start;
729 		em->len = async_extent->ram_size;
730 		em->orig_start = em->start;
731 		em->mod_start = em->start;
732 		em->mod_len = em->len;
733 
734 		em->block_start = ins.objectid;
735 		em->block_len = ins.offset;
736 		em->orig_block_len = ins.offset;
737 		em->ram_bytes = async_extent->ram_size;
738 		em->bdev = root->fs_info->fs_devices->latest_bdev;
739 		em->compress_type = async_extent->compress_type;
740 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
741 		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
742 		em->generation = -1;
743 
744 		while (1) {
745 			write_lock(&em_tree->lock);
746 			ret = add_extent_mapping(em_tree, em, 1);
747 			write_unlock(&em_tree->lock);
748 			if (ret != -EEXIST) {
749 				free_extent_map(em);
750 				break;
751 			}
752 			btrfs_drop_extent_cache(inode, async_extent->start,
753 						async_extent->start +
754 						async_extent->ram_size - 1, 0);
755 		}
756 
757 		if (ret)
758 			goto out_free_reserve;
759 
760 		ret = btrfs_add_ordered_extent_compress(inode,
761 						async_extent->start,
762 						ins.objectid,
763 						async_extent->ram_size,
764 						ins.offset,
765 						BTRFS_ORDERED_COMPRESSED,
766 						async_extent->compress_type);
767 		if (ret)
768 			goto out_free_reserve;
769 
770 		/*
771 		 * clear dirty, set writeback and unlock the pages.
772 		 */
773 		extent_clear_unlock_delalloc(inode,
774 				&BTRFS_I(inode)->io_tree,
775 				async_extent->start,
776 				async_extent->start +
777 				async_extent->ram_size - 1,
778 				NULL, EXTENT_CLEAR_UNLOCK_PAGE |
779 				EXTENT_CLEAR_UNLOCK |
780 				EXTENT_CLEAR_DELALLOC |
781 				EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
782 
783 		ret = btrfs_submit_compressed_write(inode,
784 				    async_extent->start,
785 				    async_extent->ram_size,
786 				    ins.objectid,
787 				    ins.offset, async_extent->pages,
788 				    async_extent->nr_pages);
789 		alloc_hint = ins.objectid + ins.offset;
790 		kfree(async_extent);
791 		if (ret)
792 			goto out;
793 		cond_resched();
794 	}
795 	ret = 0;
796 out:
797 	return ret;
798 out_free_reserve:
799 	btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
800 out_free:
801 	extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
802 				     async_extent->start,
803 				     async_extent->start +
804 				     async_extent->ram_size - 1,
805 				     NULL, EXTENT_CLEAR_UNLOCK_PAGE |
806 				     EXTENT_CLEAR_UNLOCK |
807 				     EXTENT_CLEAR_DELALLOC |
808 				     EXTENT_CLEAR_DIRTY |
809 				     EXTENT_SET_WRITEBACK |
810 				     EXTENT_END_WRITEBACK);
811 	kfree(async_extent);
812 	goto again;
813 }
814 
815 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
816 				      u64 num_bytes)
817 {
818 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
819 	struct extent_map *em;
820 	u64 alloc_hint = 0;
821 
822 	read_lock(&em_tree->lock);
823 	em = search_extent_mapping(em_tree, start, num_bytes);
824 	if (em) {
825 		/*
826 		 * if block start isn't an actual block number then find the
827 		 * first block in this inode and use that as a hint.  If that
828 		 * block is also bogus then just don't worry about it.
829 		 */
830 		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
831 			free_extent_map(em);
832 			em = search_extent_mapping(em_tree, 0, 0);
833 			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
834 				alloc_hint = em->block_start;
835 			if (em)
836 				free_extent_map(em);
837 		} else {
838 			alloc_hint = em->block_start;
839 			free_extent_map(em);
840 		}
841 	}
842 	read_unlock(&em_tree->lock);
843 
844 	return alloc_hint;
845 }
846 
847 /*
848  * when extent_io.c finds a delayed allocation range in the file,
849  * the call backs end up in this code.  The basic idea is to
850  * allocate extents on disk for the range, and create ordered data structs
851  * in ram to track those extents.
852  *
853  * locked_page is the page that writepage had locked already.  We use
854  * it to make sure we don't do extra locks or unlocks.
855  *
856  * *page_started is set to one if we unlock locked_page and do everything
857  * required to start IO on it.  It may be clean and already done with
858  * IO when we return.
859  */
860 static noinline int __cow_file_range(struct btrfs_trans_handle *trans,
861 				     struct inode *inode,
862 				     struct btrfs_root *root,
863 				     struct page *locked_page,
864 				     u64 start, u64 end, int *page_started,
865 				     unsigned long *nr_written,
866 				     int unlock)
867 {
868 	u64 alloc_hint = 0;
869 	u64 num_bytes;
870 	unsigned long ram_size;
871 	u64 disk_num_bytes;
872 	u64 cur_alloc_size;
873 	u64 blocksize = root->sectorsize;
874 	struct btrfs_key ins;
875 	struct extent_map *em;
876 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
877 	int ret = 0;
878 
879 	BUG_ON(btrfs_is_free_space_inode(inode));
880 
881 	num_bytes = ALIGN(end - start + 1, blocksize);
882 	num_bytes = max(blocksize,  num_bytes);
883 	disk_num_bytes = num_bytes;
884 
885 	/* if this is a small write inside eof, kick off defrag */
886 	if (num_bytes < 64 * 1024 &&
887 	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
888 		btrfs_add_inode_defrag(trans, inode);
889 
890 	if (start == 0) {
891 		/* lets try to make an inline extent */
892 		ret = cow_file_range_inline(trans, root, inode,
893 					    start, end, 0, 0, NULL);
894 		if (ret == 0) {
895 			extent_clear_unlock_delalloc(inode,
896 				     &BTRFS_I(inode)->io_tree,
897 				     start, end, NULL,
898 				     EXTENT_CLEAR_UNLOCK_PAGE |
899 				     EXTENT_CLEAR_UNLOCK |
900 				     EXTENT_CLEAR_DELALLOC |
901 				     EXTENT_CLEAR_DIRTY |
902 				     EXTENT_SET_WRITEBACK |
903 				     EXTENT_END_WRITEBACK);
904 
905 			*nr_written = *nr_written +
906 			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
907 			*page_started = 1;
908 			goto out;
909 		} else if (ret < 0) {
910 			btrfs_abort_transaction(trans, root, ret);
911 			goto out_unlock;
912 		}
913 	}
914 
915 	BUG_ON(disk_num_bytes >
916 	       btrfs_super_total_bytes(root->fs_info->super_copy));
917 
918 	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
919 	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
920 
921 	while (disk_num_bytes > 0) {
922 		unsigned long op;
923 
924 		cur_alloc_size = disk_num_bytes;
925 		ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
926 					   root->sectorsize, 0, alloc_hint,
927 					   &ins, 1);
928 		if (ret < 0) {
929 			btrfs_abort_transaction(trans, root, ret);
930 			goto out_unlock;
931 		}
932 
933 		em = alloc_extent_map();
934 		if (!em) {
935 			ret = -ENOMEM;
936 			goto out_reserve;
937 		}
938 		em->start = start;
939 		em->orig_start = em->start;
940 		ram_size = ins.offset;
941 		em->len = ins.offset;
942 		em->mod_start = em->start;
943 		em->mod_len = em->len;
944 
945 		em->block_start = ins.objectid;
946 		em->block_len = ins.offset;
947 		em->orig_block_len = ins.offset;
948 		em->ram_bytes = ram_size;
949 		em->bdev = root->fs_info->fs_devices->latest_bdev;
950 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
951 		em->generation = -1;
952 
953 		while (1) {
954 			write_lock(&em_tree->lock);
955 			ret = add_extent_mapping(em_tree, em, 1);
956 			write_unlock(&em_tree->lock);
957 			if (ret != -EEXIST) {
958 				free_extent_map(em);
959 				break;
960 			}
961 			btrfs_drop_extent_cache(inode, start,
962 						start + ram_size - 1, 0);
963 		}
964 		if (ret)
965 			goto out_reserve;
966 
967 		cur_alloc_size = ins.offset;
968 		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
969 					       ram_size, cur_alloc_size, 0);
970 		if (ret)
971 			goto out_reserve;
972 
973 		if (root->root_key.objectid ==
974 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
975 			ret = btrfs_reloc_clone_csums(inode, start,
976 						      cur_alloc_size);
977 			if (ret) {
978 				btrfs_abort_transaction(trans, root, ret);
979 				goto out_reserve;
980 			}
981 		}
982 
983 		if (disk_num_bytes < cur_alloc_size)
984 			break;
985 
986 		/* we're not doing compressed IO, don't unlock the first
987 		 * page (which the caller expects to stay locked), don't
988 		 * clear any dirty bits and don't set any writeback bits
989 		 *
990 		 * Do set the Private2 bit so we know this page was properly
991 		 * setup for writepage
992 		 */
993 		op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
994 		op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
995 			EXTENT_SET_PRIVATE2;
996 
997 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
998 					     start, start + ram_size - 1,
999 					     locked_page, op);
1000 		disk_num_bytes -= cur_alloc_size;
1001 		num_bytes -= cur_alloc_size;
1002 		alloc_hint = ins.objectid + ins.offset;
1003 		start += cur_alloc_size;
1004 	}
1005 out:
1006 	return ret;
1007 
1008 out_reserve:
1009 	btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
1010 out_unlock:
1011 	extent_clear_unlock_delalloc(inode,
1012 		     &BTRFS_I(inode)->io_tree,
1013 		     start, end, locked_page,
1014 		     EXTENT_CLEAR_UNLOCK_PAGE |
1015 		     EXTENT_CLEAR_UNLOCK |
1016 		     EXTENT_CLEAR_DELALLOC |
1017 		     EXTENT_CLEAR_DIRTY |
1018 		     EXTENT_SET_WRITEBACK |
1019 		     EXTENT_END_WRITEBACK);
1020 
1021 	goto out;
1022 }
1023 
1024 static noinline int cow_file_range(struct inode *inode,
1025 				   struct page *locked_page,
1026 				   u64 start, u64 end, int *page_started,
1027 				   unsigned long *nr_written,
1028 				   int unlock)
1029 {
1030 	struct btrfs_trans_handle *trans;
1031 	struct btrfs_root *root = BTRFS_I(inode)->root;
1032 	int ret;
1033 
1034 	trans = btrfs_join_transaction(root);
1035 	if (IS_ERR(trans)) {
1036 		extent_clear_unlock_delalloc(inode,
1037 			     &BTRFS_I(inode)->io_tree,
1038 			     start, end, locked_page,
1039 			     EXTENT_CLEAR_UNLOCK_PAGE |
1040 			     EXTENT_CLEAR_UNLOCK |
1041 			     EXTENT_CLEAR_DELALLOC |
1042 			     EXTENT_CLEAR_DIRTY |
1043 			     EXTENT_SET_WRITEBACK |
1044 			     EXTENT_END_WRITEBACK);
1045 		return PTR_ERR(trans);
1046 	}
1047 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1048 
1049 	ret = __cow_file_range(trans, inode, root, locked_page, start, end,
1050 			       page_started, nr_written, unlock);
1051 
1052 	btrfs_end_transaction(trans, root);
1053 
1054 	return ret;
1055 }
1056 
1057 /*
1058  * work queue call back to started compression on a file and pages
1059  */
1060 static noinline void async_cow_start(struct btrfs_work *work)
1061 {
1062 	struct async_cow *async_cow;
1063 	int num_added = 0;
1064 	async_cow = container_of(work, struct async_cow, work);
1065 
1066 	compress_file_range(async_cow->inode, async_cow->locked_page,
1067 			    async_cow->start, async_cow->end, async_cow,
1068 			    &num_added);
1069 	if (num_added == 0) {
1070 		btrfs_add_delayed_iput(async_cow->inode);
1071 		async_cow->inode = NULL;
1072 	}
1073 }
1074 
1075 /*
1076  * work queue call back to submit previously compressed pages
1077  */
1078 static noinline void async_cow_submit(struct btrfs_work *work)
1079 {
1080 	struct async_cow *async_cow;
1081 	struct btrfs_root *root;
1082 	unsigned long nr_pages;
1083 
1084 	async_cow = container_of(work, struct async_cow, work);
1085 
1086 	root = async_cow->root;
1087 	nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
1088 		PAGE_CACHE_SHIFT;
1089 
1090 	if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
1091 	    5 * 1024 * 1024 &&
1092 	    waitqueue_active(&root->fs_info->async_submit_wait))
1093 		wake_up(&root->fs_info->async_submit_wait);
1094 
1095 	if (async_cow->inode)
1096 		submit_compressed_extents(async_cow->inode, async_cow);
1097 }
1098 
1099 static noinline void async_cow_free(struct btrfs_work *work)
1100 {
1101 	struct async_cow *async_cow;
1102 	async_cow = container_of(work, struct async_cow, work);
1103 	if (async_cow->inode)
1104 		btrfs_add_delayed_iput(async_cow->inode);
1105 	kfree(async_cow);
1106 }
1107 
1108 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1109 				u64 start, u64 end, int *page_started,
1110 				unsigned long *nr_written)
1111 {
1112 	struct async_cow *async_cow;
1113 	struct btrfs_root *root = BTRFS_I(inode)->root;
1114 	unsigned long nr_pages;
1115 	u64 cur_end;
1116 	int limit = 10 * 1024 * 1024;
1117 
1118 	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1119 			 1, 0, NULL, GFP_NOFS);
1120 	while (start < end) {
1121 		async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1122 		BUG_ON(!async_cow); /* -ENOMEM */
1123 		async_cow->inode = igrab(inode);
1124 		async_cow->root = root;
1125 		async_cow->locked_page = locked_page;
1126 		async_cow->start = start;
1127 
1128 		if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
1129 			cur_end = end;
1130 		else
1131 			cur_end = min(end, start + 512 * 1024 - 1);
1132 
1133 		async_cow->end = cur_end;
1134 		INIT_LIST_HEAD(&async_cow->extents);
1135 
1136 		async_cow->work.func = async_cow_start;
1137 		async_cow->work.ordered_func = async_cow_submit;
1138 		async_cow->work.ordered_free = async_cow_free;
1139 		async_cow->work.flags = 0;
1140 
1141 		nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
1142 			PAGE_CACHE_SHIFT;
1143 		atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1144 
1145 		btrfs_queue_worker(&root->fs_info->delalloc_workers,
1146 				   &async_cow->work);
1147 
1148 		if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
1149 			wait_event(root->fs_info->async_submit_wait,
1150 			   (atomic_read(&root->fs_info->async_delalloc_pages) <
1151 			    limit));
1152 		}
1153 
1154 		while (atomic_read(&root->fs_info->async_submit_draining) &&
1155 		      atomic_read(&root->fs_info->async_delalloc_pages)) {
1156 			wait_event(root->fs_info->async_submit_wait,
1157 			  (atomic_read(&root->fs_info->async_delalloc_pages) ==
1158 			   0));
1159 		}
1160 
1161 		*nr_written += nr_pages;
1162 		start = cur_end + 1;
1163 	}
1164 	*page_started = 1;
1165 	return 0;
1166 }
1167 
1168 static noinline int csum_exist_in_range(struct btrfs_root *root,
1169 					u64 bytenr, u64 num_bytes)
1170 {
1171 	int ret;
1172 	struct btrfs_ordered_sum *sums;
1173 	LIST_HEAD(list);
1174 
1175 	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
1176 				       bytenr + num_bytes - 1, &list, 0);
1177 	if (ret == 0 && list_empty(&list))
1178 		return 0;
1179 
1180 	while (!list_empty(&list)) {
1181 		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1182 		list_del(&sums->list);
1183 		kfree(sums);
1184 	}
1185 	return 1;
1186 }
1187 
1188 /*
1189  * when nowcow writeback call back.  This checks for snapshots or COW copies
1190  * of the extents that exist in the file, and COWs the file as required.
1191  *
1192  * If no cow copies or snapshots exist, we write directly to the existing
1193  * blocks on disk
1194  */
1195 static noinline int run_delalloc_nocow(struct inode *inode,
1196 				       struct page *locked_page,
1197 			      u64 start, u64 end, int *page_started, int force,
1198 			      unsigned long *nr_written)
1199 {
1200 	struct btrfs_root *root = BTRFS_I(inode)->root;
1201 	struct btrfs_trans_handle *trans;
1202 	struct extent_buffer *leaf;
1203 	struct btrfs_path *path;
1204 	struct btrfs_file_extent_item *fi;
1205 	struct btrfs_key found_key;
1206 	u64 cow_start;
1207 	u64 cur_offset;
1208 	u64 extent_end;
1209 	u64 extent_offset;
1210 	u64 disk_bytenr;
1211 	u64 num_bytes;
1212 	u64 disk_num_bytes;
1213 	u64 ram_bytes;
1214 	int extent_type;
1215 	int ret, err;
1216 	int type;
1217 	int nocow;
1218 	int check_prev = 1;
1219 	bool nolock;
1220 	u64 ino = btrfs_ino(inode);
1221 
1222 	path = btrfs_alloc_path();
1223 	if (!path) {
1224 		extent_clear_unlock_delalloc(inode,
1225 			     &BTRFS_I(inode)->io_tree,
1226 			     start, end, locked_page,
1227 			     EXTENT_CLEAR_UNLOCK_PAGE |
1228 			     EXTENT_CLEAR_UNLOCK |
1229 			     EXTENT_CLEAR_DELALLOC |
1230 			     EXTENT_CLEAR_DIRTY |
1231 			     EXTENT_SET_WRITEBACK |
1232 			     EXTENT_END_WRITEBACK);
1233 		return -ENOMEM;
1234 	}
1235 
1236 	nolock = btrfs_is_free_space_inode(inode);
1237 
1238 	if (nolock)
1239 		trans = btrfs_join_transaction_nolock(root);
1240 	else
1241 		trans = btrfs_join_transaction(root);
1242 
1243 	if (IS_ERR(trans)) {
1244 		extent_clear_unlock_delalloc(inode,
1245 			     &BTRFS_I(inode)->io_tree,
1246 			     start, end, locked_page,
1247 			     EXTENT_CLEAR_UNLOCK_PAGE |
1248 			     EXTENT_CLEAR_UNLOCK |
1249 			     EXTENT_CLEAR_DELALLOC |
1250 			     EXTENT_CLEAR_DIRTY |
1251 			     EXTENT_SET_WRITEBACK |
1252 			     EXTENT_END_WRITEBACK);
1253 		btrfs_free_path(path);
1254 		return PTR_ERR(trans);
1255 	}
1256 
1257 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1258 
1259 	cow_start = (u64)-1;
1260 	cur_offset = start;
1261 	while (1) {
1262 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
1263 					       cur_offset, 0);
1264 		if (ret < 0) {
1265 			btrfs_abort_transaction(trans, root, ret);
1266 			goto error;
1267 		}
1268 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
1269 			leaf = path->nodes[0];
1270 			btrfs_item_key_to_cpu(leaf, &found_key,
1271 					      path->slots[0] - 1);
1272 			if (found_key.objectid == ino &&
1273 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
1274 				path->slots[0]--;
1275 		}
1276 		check_prev = 0;
1277 next_slot:
1278 		leaf = path->nodes[0];
1279 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1280 			ret = btrfs_next_leaf(root, path);
1281 			if (ret < 0) {
1282 				btrfs_abort_transaction(trans, root, ret);
1283 				goto error;
1284 			}
1285 			if (ret > 0)
1286 				break;
1287 			leaf = path->nodes[0];
1288 		}
1289 
1290 		nocow = 0;
1291 		disk_bytenr = 0;
1292 		num_bytes = 0;
1293 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1294 
1295 		if (found_key.objectid > ino ||
1296 		    found_key.type > BTRFS_EXTENT_DATA_KEY ||
1297 		    found_key.offset > end)
1298 			break;
1299 
1300 		if (found_key.offset > cur_offset) {
1301 			extent_end = found_key.offset;
1302 			extent_type = 0;
1303 			goto out_check;
1304 		}
1305 
1306 		fi = btrfs_item_ptr(leaf, path->slots[0],
1307 				    struct btrfs_file_extent_item);
1308 		extent_type = btrfs_file_extent_type(leaf, fi);
1309 
1310 		ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1311 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
1312 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1313 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1314 			extent_offset = btrfs_file_extent_offset(leaf, fi);
1315 			extent_end = found_key.offset +
1316 				btrfs_file_extent_num_bytes(leaf, fi);
1317 			disk_num_bytes =
1318 				btrfs_file_extent_disk_num_bytes(leaf, fi);
1319 			if (extent_end <= start) {
1320 				path->slots[0]++;
1321 				goto next_slot;
1322 			}
1323 			if (disk_bytenr == 0)
1324 				goto out_check;
1325 			if (btrfs_file_extent_compression(leaf, fi) ||
1326 			    btrfs_file_extent_encryption(leaf, fi) ||
1327 			    btrfs_file_extent_other_encoding(leaf, fi))
1328 				goto out_check;
1329 			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1330 				goto out_check;
1331 			if (btrfs_extent_readonly(root, disk_bytenr))
1332 				goto out_check;
1333 			if (btrfs_cross_ref_exist(trans, root, ino,
1334 						  found_key.offset -
1335 						  extent_offset, disk_bytenr))
1336 				goto out_check;
1337 			disk_bytenr += extent_offset;
1338 			disk_bytenr += cur_offset - found_key.offset;
1339 			num_bytes = min(end + 1, extent_end) - cur_offset;
1340 			/*
1341 			 * force cow if csum exists in the range.
1342 			 * this ensure that csum for a given extent are
1343 			 * either valid or do not exist.
1344 			 */
1345 			if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1346 				goto out_check;
1347 			nocow = 1;
1348 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1349 			extent_end = found_key.offset +
1350 				btrfs_file_extent_inline_len(leaf, fi);
1351 			extent_end = ALIGN(extent_end, root->sectorsize);
1352 		} else {
1353 			BUG_ON(1);
1354 		}
1355 out_check:
1356 		if (extent_end <= start) {
1357 			path->slots[0]++;
1358 			goto next_slot;
1359 		}
1360 		if (!nocow) {
1361 			if (cow_start == (u64)-1)
1362 				cow_start = cur_offset;
1363 			cur_offset = extent_end;
1364 			if (cur_offset > end)
1365 				break;
1366 			path->slots[0]++;
1367 			goto next_slot;
1368 		}
1369 
1370 		btrfs_release_path(path);
1371 		if (cow_start != (u64)-1) {
1372 			ret = __cow_file_range(trans, inode, root, locked_page,
1373 					       cow_start, found_key.offset - 1,
1374 					       page_started, nr_written, 1);
1375 			if (ret) {
1376 				btrfs_abort_transaction(trans, root, ret);
1377 				goto error;
1378 			}
1379 			cow_start = (u64)-1;
1380 		}
1381 
1382 		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1383 			struct extent_map *em;
1384 			struct extent_map_tree *em_tree;
1385 			em_tree = &BTRFS_I(inode)->extent_tree;
1386 			em = alloc_extent_map();
1387 			BUG_ON(!em); /* -ENOMEM */
1388 			em->start = cur_offset;
1389 			em->orig_start = found_key.offset - extent_offset;
1390 			em->len = num_bytes;
1391 			em->block_len = num_bytes;
1392 			em->block_start = disk_bytenr;
1393 			em->orig_block_len = disk_num_bytes;
1394 			em->ram_bytes = ram_bytes;
1395 			em->bdev = root->fs_info->fs_devices->latest_bdev;
1396 			em->mod_start = em->start;
1397 			em->mod_len = em->len;
1398 			set_bit(EXTENT_FLAG_PINNED, &em->flags);
1399 			set_bit(EXTENT_FLAG_FILLING, &em->flags);
1400 			em->generation = -1;
1401 			while (1) {
1402 				write_lock(&em_tree->lock);
1403 				ret = add_extent_mapping(em_tree, em, 1);
1404 				write_unlock(&em_tree->lock);
1405 				if (ret != -EEXIST) {
1406 					free_extent_map(em);
1407 					break;
1408 				}
1409 				btrfs_drop_extent_cache(inode, em->start,
1410 						em->start + em->len - 1, 0);
1411 			}
1412 			type = BTRFS_ORDERED_PREALLOC;
1413 		} else {
1414 			type = BTRFS_ORDERED_NOCOW;
1415 		}
1416 
1417 		ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1418 					       num_bytes, num_bytes, type);
1419 		BUG_ON(ret); /* -ENOMEM */
1420 
1421 		if (root->root_key.objectid ==
1422 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
1423 			ret = btrfs_reloc_clone_csums(inode, cur_offset,
1424 						      num_bytes);
1425 			if (ret) {
1426 				btrfs_abort_transaction(trans, root, ret);
1427 				goto error;
1428 			}
1429 		}
1430 
1431 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1432 				cur_offset, cur_offset + num_bytes - 1,
1433 				locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1434 				EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1435 				EXTENT_SET_PRIVATE2);
1436 		cur_offset = extent_end;
1437 		if (cur_offset > end)
1438 			break;
1439 	}
1440 	btrfs_release_path(path);
1441 
1442 	if (cur_offset <= end && cow_start == (u64)-1) {
1443 		cow_start = cur_offset;
1444 		cur_offset = end;
1445 	}
1446 
1447 	if (cow_start != (u64)-1) {
1448 		ret = __cow_file_range(trans, inode, root, locked_page,
1449 				       cow_start, end,
1450 				       page_started, nr_written, 1);
1451 		if (ret) {
1452 			btrfs_abort_transaction(trans, root, ret);
1453 			goto error;
1454 		}
1455 	}
1456 
1457 error:
1458 	err = btrfs_end_transaction(trans, root);
1459 	if (!ret)
1460 		ret = err;
1461 
1462 	if (ret && cur_offset < end)
1463 		extent_clear_unlock_delalloc(inode,
1464 			     &BTRFS_I(inode)->io_tree,
1465 			     cur_offset, end, locked_page,
1466 			     EXTENT_CLEAR_UNLOCK_PAGE |
1467 			     EXTENT_CLEAR_UNLOCK |
1468 			     EXTENT_CLEAR_DELALLOC |
1469 			     EXTENT_CLEAR_DIRTY |
1470 			     EXTENT_SET_WRITEBACK |
1471 			     EXTENT_END_WRITEBACK);
1472 
1473 	btrfs_free_path(path);
1474 	return ret;
1475 }
1476 
1477 /*
1478  * extent_io.c call back to do delayed allocation processing
1479  */
1480 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1481 			      u64 start, u64 end, int *page_started,
1482 			      unsigned long *nr_written)
1483 {
1484 	int ret;
1485 	struct btrfs_root *root = BTRFS_I(inode)->root;
1486 
1487 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) {
1488 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1489 					 page_started, 1, nr_written);
1490 	} else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) {
1491 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1492 					 page_started, 0, nr_written);
1493 	} else if (!btrfs_test_opt(root, COMPRESS) &&
1494 		   !(BTRFS_I(inode)->force_compress) &&
1495 		   !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) {
1496 		ret = cow_file_range(inode, locked_page, start, end,
1497 				      page_started, nr_written, 1);
1498 	} else {
1499 		set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1500 			&BTRFS_I(inode)->runtime_flags);
1501 		ret = cow_file_range_async(inode, locked_page, start, end,
1502 					   page_started, nr_written);
1503 	}
1504 	return ret;
1505 }
1506 
1507 static void btrfs_split_extent_hook(struct inode *inode,
1508 				    struct extent_state *orig, u64 split)
1509 {
1510 	/* not delalloc, ignore it */
1511 	if (!(orig->state & EXTENT_DELALLOC))
1512 		return;
1513 
1514 	spin_lock(&BTRFS_I(inode)->lock);
1515 	BTRFS_I(inode)->outstanding_extents++;
1516 	spin_unlock(&BTRFS_I(inode)->lock);
1517 }
1518 
1519 /*
1520  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1521  * extents so we can keep track of new extents that are just merged onto old
1522  * extents, such as when we are doing sequential writes, so we can properly
1523  * account for the metadata space we'll need.
1524  */
1525 static void btrfs_merge_extent_hook(struct inode *inode,
1526 				    struct extent_state *new,
1527 				    struct extent_state *other)
1528 {
1529 	/* not delalloc, ignore it */
1530 	if (!(other->state & EXTENT_DELALLOC))
1531 		return;
1532 
1533 	spin_lock(&BTRFS_I(inode)->lock);
1534 	BTRFS_I(inode)->outstanding_extents--;
1535 	spin_unlock(&BTRFS_I(inode)->lock);
1536 }
1537 
1538 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
1539 				      struct inode *inode)
1540 {
1541 	spin_lock(&root->delalloc_lock);
1542 	if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1543 		list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1544 			      &root->delalloc_inodes);
1545 		set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1546 			&BTRFS_I(inode)->runtime_flags);
1547 		root->nr_delalloc_inodes++;
1548 		if (root->nr_delalloc_inodes == 1) {
1549 			spin_lock(&root->fs_info->delalloc_root_lock);
1550 			BUG_ON(!list_empty(&root->delalloc_root));
1551 			list_add_tail(&root->delalloc_root,
1552 				      &root->fs_info->delalloc_roots);
1553 			spin_unlock(&root->fs_info->delalloc_root_lock);
1554 		}
1555 	}
1556 	spin_unlock(&root->delalloc_lock);
1557 }
1558 
1559 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
1560 				     struct inode *inode)
1561 {
1562 	spin_lock(&root->delalloc_lock);
1563 	if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1564 		list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1565 		clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1566 			  &BTRFS_I(inode)->runtime_flags);
1567 		root->nr_delalloc_inodes--;
1568 		if (!root->nr_delalloc_inodes) {
1569 			spin_lock(&root->fs_info->delalloc_root_lock);
1570 			BUG_ON(list_empty(&root->delalloc_root));
1571 			list_del_init(&root->delalloc_root);
1572 			spin_unlock(&root->fs_info->delalloc_root_lock);
1573 		}
1574 	}
1575 	spin_unlock(&root->delalloc_lock);
1576 }
1577 
1578 /*
1579  * extent_io.c set_bit_hook, used to track delayed allocation
1580  * bytes in this file, and to maintain the list of inodes that
1581  * have pending delalloc work to be done.
1582  */
1583 static void btrfs_set_bit_hook(struct inode *inode,
1584 			       struct extent_state *state, unsigned long *bits)
1585 {
1586 
1587 	/*
1588 	 * set_bit and clear bit hooks normally require _irqsave/restore
1589 	 * but in this case, we are only testing for the DELALLOC
1590 	 * bit, which is only set or cleared with irqs on
1591 	 */
1592 	if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1593 		struct btrfs_root *root = BTRFS_I(inode)->root;
1594 		u64 len = state->end + 1 - state->start;
1595 		bool do_list = !btrfs_is_free_space_inode(inode);
1596 
1597 		if (*bits & EXTENT_FIRST_DELALLOC) {
1598 			*bits &= ~EXTENT_FIRST_DELALLOC;
1599 		} else {
1600 			spin_lock(&BTRFS_I(inode)->lock);
1601 			BTRFS_I(inode)->outstanding_extents++;
1602 			spin_unlock(&BTRFS_I(inode)->lock);
1603 		}
1604 
1605 		__percpu_counter_add(&root->fs_info->delalloc_bytes, len,
1606 				     root->fs_info->delalloc_batch);
1607 		spin_lock(&BTRFS_I(inode)->lock);
1608 		BTRFS_I(inode)->delalloc_bytes += len;
1609 		if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1610 					 &BTRFS_I(inode)->runtime_flags))
1611 			btrfs_add_delalloc_inodes(root, inode);
1612 		spin_unlock(&BTRFS_I(inode)->lock);
1613 	}
1614 }
1615 
1616 /*
1617  * extent_io.c clear_bit_hook, see set_bit_hook for why
1618  */
1619 static void btrfs_clear_bit_hook(struct inode *inode,
1620 				 struct extent_state *state,
1621 				 unsigned long *bits)
1622 {
1623 	/*
1624 	 * set_bit and clear bit hooks normally require _irqsave/restore
1625 	 * but in this case, we are only testing for the DELALLOC
1626 	 * bit, which is only set or cleared with irqs on
1627 	 */
1628 	if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1629 		struct btrfs_root *root = BTRFS_I(inode)->root;
1630 		u64 len = state->end + 1 - state->start;
1631 		bool do_list = !btrfs_is_free_space_inode(inode);
1632 
1633 		if (*bits & EXTENT_FIRST_DELALLOC) {
1634 			*bits &= ~EXTENT_FIRST_DELALLOC;
1635 		} else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1636 			spin_lock(&BTRFS_I(inode)->lock);
1637 			BTRFS_I(inode)->outstanding_extents--;
1638 			spin_unlock(&BTRFS_I(inode)->lock);
1639 		}
1640 
1641 		if (*bits & EXTENT_DO_ACCOUNTING)
1642 			btrfs_delalloc_release_metadata(inode, len);
1643 
1644 		if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1645 		    && do_list && !(state->state & EXTENT_NORESERVE))
1646 			btrfs_free_reserved_data_space(inode, len);
1647 
1648 		__percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
1649 				     root->fs_info->delalloc_batch);
1650 		spin_lock(&BTRFS_I(inode)->lock);
1651 		BTRFS_I(inode)->delalloc_bytes -= len;
1652 		if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1653 		    test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1654 			     &BTRFS_I(inode)->runtime_flags))
1655 			btrfs_del_delalloc_inode(root, inode);
1656 		spin_unlock(&BTRFS_I(inode)->lock);
1657 	}
1658 }
1659 
1660 /*
1661  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1662  * we don't create bios that span stripes or chunks
1663  */
1664 int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
1665 			 size_t size, struct bio *bio,
1666 			 unsigned long bio_flags)
1667 {
1668 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1669 	u64 logical = (u64)bio->bi_sector << 9;
1670 	u64 length = 0;
1671 	u64 map_length;
1672 	int ret;
1673 
1674 	if (bio_flags & EXTENT_BIO_COMPRESSED)
1675 		return 0;
1676 
1677 	length = bio->bi_size;
1678 	map_length = length;
1679 	ret = btrfs_map_block(root->fs_info, rw, logical,
1680 			      &map_length, NULL, 0);
1681 	/* Will always return 0 with map_multi == NULL */
1682 	BUG_ON(ret < 0);
1683 	if (map_length < length + size)
1684 		return 1;
1685 	return 0;
1686 }
1687 
1688 /*
1689  * in order to insert checksums into the metadata in large chunks,
1690  * we wait until bio submission time.   All the pages in the bio are
1691  * checksummed and sums are attached onto the ordered extent record.
1692  *
1693  * At IO completion time the cums attached on the ordered extent record
1694  * are inserted into the btree
1695  */
1696 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1697 				    struct bio *bio, int mirror_num,
1698 				    unsigned long bio_flags,
1699 				    u64 bio_offset)
1700 {
1701 	struct btrfs_root *root = BTRFS_I(inode)->root;
1702 	int ret = 0;
1703 
1704 	ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1705 	BUG_ON(ret); /* -ENOMEM */
1706 	return 0;
1707 }
1708 
1709 /*
1710  * in order to insert checksums into the metadata in large chunks,
1711  * we wait until bio submission time.   All the pages in the bio are
1712  * checksummed and sums are attached onto the ordered extent record.
1713  *
1714  * At IO completion time the cums attached on the ordered extent record
1715  * are inserted into the btree
1716  */
1717 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1718 			  int mirror_num, unsigned long bio_flags,
1719 			  u64 bio_offset)
1720 {
1721 	struct btrfs_root *root = BTRFS_I(inode)->root;
1722 	int ret;
1723 
1724 	ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
1725 	if (ret)
1726 		bio_endio(bio, ret);
1727 	return ret;
1728 }
1729 
1730 /*
1731  * extent_io.c submission hook. This does the right thing for csum calculation
1732  * on write, or reading the csums from the tree before a read
1733  */
1734 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1735 			  int mirror_num, unsigned long bio_flags,
1736 			  u64 bio_offset)
1737 {
1738 	struct btrfs_root *root = BTRFS_I(inode)->root;
1739 	int ret = 0;
1740 	int skip_sum;
1741 	int metadata = 0;
1742 	int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
1743 
1744 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1745 
1746 	if (btrfs_is_free_space_inode(inode))
1747 		metadata = 2;
1748 
1749 	if (!(rw & REQ_WRITE)) {
1750 		ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1751 		if (ret)
1752 			goto out;
1753 
1754 		if (bio_flags & EXTENT_BIO_COMPRESSED) {
1755 			ret = btrfs_submit_compressed_read(inode, bio,
1756 							   mirror_num,
1757 							   bio_flags);
1758 			goto out;
1759 		} else if (!skip_sum) {
1760 			ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1761 			if (ret)
1762 				goto out;
1763 		}
1764 		goto mapit;
1765 	} else if (async && !skip_sum) {
1766 		/* csum items have already been cloned */
1767 		if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1768 			goto mapit;
1769 		/* we're doing a write, do the async checksumming */
1770 		ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1771 				   inode, rw, bio, mirror_num,
1772 				   bio_flags, bio_offset,
1773 				   __btrfs_submit_bio_start,
1774 				   __btrfs_submit_bio_done);
1775 		goto out;
1776 	} else if (!skip_sum) {
1777 		ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1778 		if (ret)
1779 			goto out;
1780 	}
1781 
1782 mapit:
1783 	ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
1784 
1785 out:
1786 	if (ret < 0)
1787 		bio_endio(bio, ret);
1788 	return ret;
1789 }
1790 
1791 /*
1792  * given a list of ordered sums record them in the inode.  This happens
1793  * at IO completion time based on sums calculated at bio submission time.
1794  */
1795 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1796 			     struct inode *inode, u64 file_offset,
1797 			     struct list_head *list)
1798 {
1799 	struct btrfs_ordered_sum *sum;
1800 
1801 	list_for_each_entry(sum, list, list) {
1802 		trans->adding_csums = 1;
1803 		btrfs_csum_file_blocks(trans,
1804 		       BTRFS_I(inode)->root->fs_info->csum_root, sum);
1805 		trans->adding_csums = 0;
1806 	}
1807 	return 0;
1808 }
1809 
1810 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1811 			      struct extent_state **cached_state)
1812 {
1813 	WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0);
1814 	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1815 				   cached_state, GFP_NOFS);
1816 }
1817 
1818 /* see btrfs_writepage_start_hook for details on why this is required */
1819 struct btrfs_writepage_fixup {
1820 	struct page *page;
1821 	struct btrfs_work work;
1822 };
1823 
1824 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1825 {
1826 	struct btrfs_writepage_fixup *fixup;
1827 	struct btrfs_ordered_extent *ordered;
1828 	struct extent_state *cached_state = NULL;
1829 	struct page *page;
1830 	struct inode *inode;
1831 	u64 page_start;
1832 	u64 page_end;
1833 	int ret;
1834 
1835 	fixup = container_of(work, struct btrfs_writepage_fixup, work);
1836 	page = fixup->page;
1837 again:
1838 	lock_page(page);
1839 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1840 		ClearPageChecked(page);
1841 		goto out_page;
1842 	}
1843 
1844 	inode = page->mapping->host;
1845 	page_start = page_offset(page);
1846 	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1847 
1848 	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1849 			 &cached_state);
1850 
1851 	/* already ordered? We're done */
1852 	if (PagePrivate2(page))
1853 		goto out;
1854 
1855 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
1856 	if (ordered) {
1857 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1858 				     page_end, &cached_state, GFP_NOFS);
1859 		unlock_page(page);
1860 		btrfs_start_ordered_extent(inode, ordered, 1);
1861 		btrfs_put_ordered_extent(ordered);
1862 		goto again;
1863 	}
1864 
1865 	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
1866 	if (ret) {
1867 		mapping_set_error(page->mapping, ret);
1868 		end_extent_writepage(page, ret, page_start, page_end);
1869 		ClearPageChecked(page);
1870 		goto out;
1871 	 }
1872 
1873 	btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1874 	ClearPageChecked(page);
1875 	set_page_dirty(page);
1876 out:
1877 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1878 			     &cached_state, GFP_NOFS);
1879 out_page:
1880 	unlock_page(page);
1881 	page_cache_release(page);
1882 	kfree(fixup);
1883 }
1884 
1885 /*
1886  * There are a few paths in the higher layers of the kernel that directly
1887  * set the page dirty bit without asking the filesystem if it is a
1888  * good idea.  This causes problems because we want to make sure COW
1889  * properly happens and the data=ordered rules are followed.
1890  *
1891  * In our case any range that doesn't have the ORDERED bit set
1892  * hasn't been properly setup for IO.  We kick off an async process
1893  * to fix it up.  The async helper will wait for ordered extents, set
1894  * the delalloc bit and make it safe to write the page.
1895  */
1896 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1897 {
1898 	struct inode *inode = page->mapping->host;
1899 	struct btrfs_writepage_fixup *fixup;
1900 	struct btrfs_root *root = BTRFS_I(inode)->root;
1901 
1902 	/* this page is properly in the ordered list */
1903 	if (TestClearPagePrivate2(page))
1904 		return 0;
1905 
1906 	if (PageChecked(page))
1907 		return -EAGAIN;
1908 
1909 	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1910 	if (!fixup)
1911 		return -EAGAIN;
1912 
1913 	SetPageChecked(page);
1914 	page_cache_get(page);
1915 	fixup->work.func = btrfs_writepage_fixup_worker;
1916 	fixup->page = page;
1917 	btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1918 	return -EBUSY;
1919 }
1920 
1921 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1922 				       struct inode *inode, u64 file_pos,
1923 				       u64 disk_bytenr, u64 disk_num_bytes,
1924 				       u64 num_bytes, u64 ram_bytes,
1925 				       u8 compression, u8 encryption,
1926 				       u16 other_encoding, int extent_type)
1927 {
1928 	struct btrfs_root *root = BTRFS_I(inode)->root;
1929 	struct btrfs_file_extent_item *fi;
1930 	struct btrfs_path *path;
1931 	struct extent_buffer *leaf;
1932 	struct btrfs_key ins;
1933 	int ret;
1934 
1935 	path = btrfs_alloc_path();
1936 	if (!path)
1937 		return -ENOMEM;
1938 
1939 	path->leave_spinning = 1;
1940 
1941 	/*
1942 	 * we may be replacing one extent in the tree with another.
1943 	 * The new extent is pinned in the extent map, and we don't want
1944 	 * to drop it from the cache until it is completely in the btree.
1945 	 *
1946 	 * So, tell btrfs_drop_extents to leave this extent in the cache.
1947 	 * the caller is expected to unpin it and allow it to be merged
1948 	 * with the others.
1949 	 */
1950 	ret = btrfs_drop_extents(trans, root, inode, file_pos,
1951 				 file_pos + num_bytes, 0);
1952 	if (ret)
1953 		goto out;
1954 
1955 	ins.objectid = btrfs_ino(inode);
1956 	ins.offset = file_pos;
1957 	ins.type = BTRFS_EXTENT_DATA_KEY;
1958 	ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1959 	if (ret)
1960 		goto out;
1961 	leaf = path->nodes[0];
1962 	fi = btrfs_item_ptr(leaf, path->slots[0],
1963 			    struct btrfs_file_extent_item);
1964 	btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1965 	btrfs_set_file_extent_type(leaf, fi, extent_type);
1966 	btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1967 	btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1968 	btrfs_set_file_extent_offset(leaf, fi, 0);
1969 	btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1970 	btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1971 	btrfs_set_file_extent_compression(leaf, fi, compression);
1972 	btrfs_set_file_extent_encryption(leaf, fi, encryption);
1973 	btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1974 
1975 	btrfs_mark_buffer_dirty(leaf);
1976 	btrfs_release_path(path);
1977 
1978 	inode_add_bytes(inode, num_bytes);
1979 
1980 	ins.objectid = disk_bytenr;
1981 	ins.offset = disk_num_bytes;
1982 	ins.type = BTRFS_EXTENT_ITEM_KEY;
1983 	ret = btrfs_alloc_reserved_file_extent(trans, root,
1984 					root->root_key.objectid,
1985 					btrfs_ino(inode), file_pos, &ins);
1986 out:
1987 	btrfs_free_path(path);
1988 
1989 	return ret;
1990 }
1991 
1992 /* snapshot-aware defrag */
1993 struct sa_defrag_extent_backref {
1994 	struct rb_node node;
1995 	struct old_sa_defrag_extent *old;
1996 	u64 root_id;
1997 	u64 inum;
1998 	u64 file_pos;
1999 	u64 extent_offset;
2000 	u64 num_bytes;
2001 	u64 generation;
2002 };
2003 
2004 struct old_sa_defrag_extent {
2005 	struct list_head list;
2006 	struct new_sa_defrag_extent *new;
2007 
2008 	u64 extent_offset;
2009 	u64 bytenr;
2010 	u64 offset;
2011 	u64 len;
2012 	int count;
2013 };
2014 
2015 struct new_sa_defrag_extent {
2016 	struct rb_root root;
2017 	struct list_head head;
2018 	struct btrfs_path *path;
2019 	struct inode *inode;
2020 	u64 file_pos;
2021 	u64 len;
2022 	u64 bytenr;
2023 	u64 disk_len;
2024 	u8 compress_type;
2025 };
2026 
2027 static int backref_comp(struct sa_defrag_extent_backref *b1,
2028 			struct sa_defrag_extent_backref *b2)
2029 {
2030 	if (b1->root_id < b2->root_id)
2031 		return -1;
2032 	else if (b1->root_id > b2->root_id)
2033 		return 1;
2034 
2035 	if (b1->inum < b2->inum)
2036 		return -1;
2037 	else if (b1->inum > b2->inum)
2038 		return 1;
2039 
2040 	if (b1->file_pos < b2->file_pos)
2041 		return -1;
2042 	else if (b1->file_pos > b2->file_pos)
2043 		return 1;
2044 
2045 	/*
2046 	 * [------------------------------] ===> (a range of space)
2047 	 *     |<--->|   |<---->| =============> (fs/file tree A)
2048 	 * |<---------------------------->| ===> (fs/file tree B)
2049 	 *
2050 	 * A range of space can refer to two file extents in one tree while
2051 	 * refer to only one file extent in another tree.
2052 	 *
2053 	 * So we may process a disk offset more than one time(two extents in A)
2054 	 * and locate at the same extent(one extent in B), then insert two same
2055 	 * backrefs(both refer to the extent in B).
2056 	 */
2057 	return 0;
2058 }
2059 
2060 static void backref_insert(struct rb_root *root,
2061 			   struct sa_defrag_extent_backref *backref)
2062 {
2063 	struct rb_node **p = &root->rb_node;
2064 	struct rb_node *parent = NULL;
2065 	struct sa_defrag_extent_backref *entry;
2066 	int ret;
2067 
2068 	while (*p) {
2069 		parent = *p;
2070 		entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
2071 
2072 		ret = backref_comp(backref, entry);
2073 		if (ret < 0)
2074 			p = &(*p)->rb_left;
2075 		else
2076 			p = &(*p)->rb_right;
2077 	}
2078 
2079 	rb_link_node(&backref->node, parent, p);
2080 	rb_insert_color(&backref->node, root);
2081 }
2082 
2083 /*
2084  * Note the backref might has changed, and in this case we just return 0.
2085  */
2086 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2087 				       void *ctx)
2088 {
2089 	struct btrfs_file_extent_item *extent;
2090 	struct btrfs_fs_info *fs_info;
2091 	struct old_sa_defrag_extent *old = ctx;
2092 	struct new_sa_defrag_extent *new = old->new;
2093 	struct btrfs_path *path = new->path;
2094 	struct btrfs_key key;
2095 	struct btrfs_root *root;
2096 	struct sa_defrag_extent_backref *backref;
2097 	struct extent_buffer *leaf;
2098 	struct inode *inode = new->inode;
2099 	int slot;
2100 	int ret;
2101 	u64 extent_offset;
2102 	u64 num_bytes;
2103 
2104 	if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2105 	    inum == btrfs_ino(inode))
2106 		return 0;
2107 
2108 	key.objectid = root_id;
2109 	key.type = BTRFS_ROOT_ITEM_KEY;
2110 	key.offset = (u64)-1;
2111 
2112 	fs_info = BTRFS_I(inode)->root->fs_info;
2113 	root = btrfs_read_fs_root_no_name(fs_info, &key);
2114 	if (IS_ERR(root)) {
2115 		if (PTR_ERR(root) == -ENOENT)
2116 			return 0;
2117 		WARN_ON(1);
2118 		pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
2119 			 inum, offset, root_id);
2120 		return PTR_ERR(root);
2121 	}
2122 
2123 	key.objectid = inum;
2124 	key.type = BTRFS_EXTENT_DATA_KEY;
2125 	if (offset > (u64)-1 << 32)
2126 		key.offset = 0;
2127 	else
2128 		key.offset = offset;
2129 
2130 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2131 	if (ret < 0) {
2132 		WARN_ON(1);
2133 		return ret;
2134 	}
2135 
2136 	while (1) {
2137 		cond_resched();
2138 
2139 		leaf = path->nodes[0];
2140 		slot = path->slots[0];
2141 
2142 		if (slot >= btrfs_header_nritems(leaf)) {
2143 			ret = btrfs_next_leaf(root, path);
2144 			if (ret < 0) {
2145 				goto out;
2146 			} else if (ret > 0) {
2147 				ret = 0;
2148 				goto out;
2149 			}
2150 			continue;
2151 		}
2152 
2153 		path->slots[0]++;
2154 
2155 		btrfs_item_key_to_cpu(leaf, &key, slot);
2156 
2157 		if (key.objectid > inum)
2158 			goto out;
2159 
2160 		if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2161 			continue;
2162 
2163 		extent = btrfs_item_ptr(leaf, slot,
2164 					struct btrfs_file_extent_item);
2165 
2166 		if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2167 			continue;
2168 
2169 		extent_offset = btrfs_file_extent_offset(leaf, extent);
2170 		if (key.offset - extent_offset != offset)
2171 			continue;
2172 
2173 		num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2174 		if (extent_offset >= old->extent_offset + old->offset +
2175 		    old->len || extent_offset + num_bytes <=
2176 		    old->extent_offset + old->offset)
2177 			continue;
2178 
2179 		break;
2180 	}
2181 
2182 	backref = kmalloc(sizeof(*backref), GFP_NOFS);
2183 	if (!backref) {
2184 		ret = -ENOENT;
2185 		goto out;
2186 	}
2187 
2188 	backref->root_id = root_id;
2189 	backref->inum = inum;
2190 	backref->file_pos = offset + extent_offset;
2191 	backref->num_bytes = num_bytes;
2192 	backref->extent_offset = extent_offset;
2193 	backref->generation = btrfs_file_extent_generation(leaf, extent);
2194 	backref->old = old;
2195 	backref_insert(&new->root, backref);
2196 	old->count++;
2197 out:
2198 	btrfs_release_path(path);
2199 	WARN_ON(ret);
2200 	return ret;
2201 }
2202 
2203 static noinline bool record_extent_backrefs(struct btrfs_path *path,
2204 				   struct new_sa_defrag_extent *new)
2205 {
2206 	struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info;
2207 	struct old_sa_defrag_extent *old, *tmp;
2208 	int ret;
2209 
2210 	new->path = path;
2211 
2212 	list_for_each_entry_safe(old, tmp, &new->head, list) {
2213 		ret = iterate_inodes_from_logical(old->bytenr, fs_info,
2214 						  path, record_one_backref,
2215 						  old);
2216 		BUG_ON(ret < 0 && ret != -ENOENT);
2217 
2218 		/* no backref to be processed for this extent */
2219 		if (!old->count) {
2220 			list_del(&old->list);
2221 			kfree(old);
2222 		}
2223 	}
2224 
2225 	if (list_empty(&new->head))
2226 		return false;
2227 
2228 	return true;
2229 }
2230 
2231 static int relink_is_mergable(struct extent_buffer *leaf,
2232 			      struct btrfs_file_extent_item *fi,
2233 			      u64 disk_bytenr)
2234 {
2235 	if (btrfs_file_extent_disk_bytenr(leaf, fi) != disk_bytenr)
2236 		return 0;
2237 
2238 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2239 		return 0;
2240 
2241 	if (btrfs_file_extent_compression(leaf, fi) ||
2242 	    btrfs_file_extent_encryption(leaf, fi) ||
2243 	    btrfs_file_extent_other_encoding(leaf, fi))
2244 		return 0;
2245 
2246 	return 1;
2247 }
2248 
2249 /*
2250  * Note the backref might has changed, and in this case we just return 0.
2251  */
2252 static noinline int relink_extent_backref(struct btrfs_path *path,
2253 				 struct sa_defrag_extent_backref *prev,
2254 				 struct sa_defrag_extent_backref *backref)
2255 {
2256 	struct btrfs_file_extent_item *extent;
2257 	struct btrfs_file_extent_item *item;
2258 	struct btrfs_ordered_extent *ordered;
2259 	struct btrfs_trans_handle *trans;
2260 	struct btrfs_fs_info *fs_info;
2261 	struct btrfs_root *root;
2262 	struct btrfs_key key;
2263 	struct extent_buffer *leaf;
2264 	struct old_sa_defrag_extent *old = backref->old;
2265 	struct new_sa_defrag_extent *new = old->new;
2266 	struct inode *src_inode = new->inode;
2267 	struct inode *inode;
2268 	struct extent_state *cached = NULL;
2269 	int ret = 0;
2270 	u64 start;
2271 	u64 len;
2272 	u64 lock_start;
2273 	u64 lock_end;
2274 	bool merge = false;
2275 	int index;
2276 
2277 	if (prev && prev->root_id == backref->root_id &&
2278 	    prev->inum == backref->inum &&
2279 	    prev->file_pos + prev->num_bytes == backref->file_pos)
2280 		merge = true;
2281 
2282 	/* step 1: get root */
2283 	key.objectid = backref->root_id;
2284 	key.type = BTRFS_ROOT_ITEM_KEY;
2285 	key.offset = (u64)-1;
2286 
2287 	fs_info = BTRFS_I(src_inode)->root->fs_info;
2288 	index = srcu_read_lock(&fs_info->subvol_srcu);
2289 
2290 	root = btrfs_read_fs_root_no_name(fs_info, &key);
2291 	if (IS_ERR(root)) {
2292 		srcu_read_unlock(&fs_info->subvol_srcu, index);
2293 		if (PTR_ERR(root) == -ENOENT)
2294 			return 0;
2295 		return PTR_ERR(root);
2296 	}
2297 
2298 	/* step 2: get inode */
2299 	key.objectid = backref->inum;
2300 	key.type = BTRFS_INODE_ITEM_KEY;
2301 	key.offset = 0;
2302 
2303 	inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2304 	if (IS_ERR(inode)) {
2305 		srcu_read_unlock(&fs_info->subvol_srcu, index);
2306 		return 0;
2307 	}
2308 
2309 	srcu_read_unlock(&fs_info->subvol_srcu, index);
2310 
2311 	/* step 3: relink backref */
2312 	lock_start = backref->file_pos;
2313 	lock_end = backref->file_pos + backref->num_bytes - 1;
2314 	lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2315 			 0, &cached);
2316 
2317 	ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2318 	if (ordered) {
2319 		btrfs_put_ordered_extent(ordered);
2320 		goto out_unlock;
2321 	}
2322 
2323 	trans = btrfs_join_transaction(root);
2324 	if (IS_ERR(trans)) {
2325 		ret = PTR_ERR(trans);
2326 		goto out_unlock;
2327 	}
2328 
2329 	key.objectid = backref->inum;
2330 	key.type = BTRFS_EXTENT_DATA_KEY;
2331 	key.offset = backref->file_pos;
2332 
2333 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2334 	if (ret < 0) {
2335 		goto out_free_path;
2336 	} else if (ret > 0) {
2337 		ret = 0;
2338 		goto out_free_path;
2339 	}
2340 
2341 	extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2342 				struct btrfs_file_extent_item);
2343 
2344 	if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2345 	    backref->generation)
2346 		goto out_free_path;
2347 
2348 	btrfs_release_path(path);
2349 
2350 	start = backref->file_pos;
2351 	if (backref->extent_offset < old->extent_offset + old->offset)
2352 		start += old->extent_offset + old->offset -
2353 			 backref->extent_offset;
2354 
2355 	len = min(backref->extent_offset + backref->num_bytes,
2356 		  old->extent_offset + old->offset + old->len);
2357 	len -= max(backref->extent_offset, old->extent_offset + old->offset);
2358 
2359 	ret = btrfs_drop_extents(trans, root, inode, start,
2360 				 start + len, 1);
2361 	if (ret)
2362 		goto out_free_path;
2363 again:
2364 	key.objectid = btrfs_ino(inode);
2365 	key.type = BTRFS_EXTENT_DATA_KEY;
2366 	key.offset = start;
2367 
2368 	path->leave_spinning = 1;
2369 	if (merge) {
2370 		struct btrfs_file_extent_item *fi;
2371 		u64 extent_len;
2372 		struct btrfs_key found_key;
2373 
2374 		ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
2375 		if (ret < 0)
2376 			goto out_free_path;
2377 
2378 		path->slots[0]--;
2379 		leaf = path->nodes[0];
2380 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2381 
2382 		fi = btrfs_item_ptr(leaf, path->slots[0],
2383 				    struct btrfs_file_extent_item);
2384 		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2385 
2386 		if (relink_is_mergable(leaf, fi, new->bytenr) &&
2387 		    extent_len + found_key.offset == start) {
2388 			btrfs_set_file_extent_num_bytes(leaf, fi,
2389 							extent_len + len);
2390 			btrfs_mark_buffer_dirty(leaf);
2391 			inode_add_bytes(inode, len);
2392 
2393 			ret = 1;
2394 			goto out_free_path;
2395 		} else {
2396 			merge = false;
2397 			btrfs_release_path(path);
2398 			goto again;
2399 		}
2400 	}
2401 
2402 	ret = btrfs_insert_empty_item(trans, root, path, &key,
2403 					sizeof(*extent));
2404 	if (ret) {
2405 		btrfs_abort_transaction(trans, root, ret);
2406 		goto out_free_path;
2407 	}
2408 
2409 	leaf = path->nodes[0];
2410 	item = btrfs_item_ptr(leaf, path->slots[0],
2411 				struct btrfs_file_extent_item);
2412 	btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2413 	btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2414 	btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2415 	btrfs_set_file_extent_num_bytes(leaf, item, len);
2416 	btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2417 	btrfs_set_file_extent_generation(leaf, item, trans->transid);
2418 	btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2419 	btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2420 	btrfs_set_file_extent_encryption(leaf, item, 0);
2421 	btrfs_set_file_extent_other_encoding(leaf, item, 0);
2422 
2423 	btrfs_mark_buffer_dirty(leaf);
2424 	inode_add_bytes(inode, len);
2425 	btrfs_release_path(path);
2426 
2427 	ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
2428 			new->disk_len, 0,
2429 			backref->root_id, backref->inum,
2430 			new->file_pos, 0);	/* start - extent_offset */
2431 	if (ret) {
2432 		btrfs_abort_transaction(trans, root, ret);
2433 		goto out_free_path;
2434 	}
2435 
2436 	ret = 1;
2437 out_free_path:
2438 	btrfs_release_path(path);
2439 	path->leave_spinning = 0;
2440 	btrfs_end_transaction(trans, root);
2441 out_unlock:
2442 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2443 			     &cached, GFP_NOFS);
2444 	iput(inode);
2445 	return ret;
2446 }
2447 
2448 static void relink_file_extents(struct new_sa_defrag_extent *new)
2449 {
2450 	struct btrfs_path *path;
2451 	struct old_sa_defrag_extent *old, *tmp;
2452 	struct sa_defrag_extent_backref *backref;
2453 	struct sa_defrag_extent_backref *prev = NULL;
2454 	struct inode *inode;
2455 	struct btrfs_root *root;
2456 	struct rb_node *node;
2457 	int ret;
2458 
2459 	inode = new->inode;
2460 	root = BTRFS_I(inode)->root;
2461 
2462 	path = btrfs_alloc_path();
2463 	if (!path)
2464 		return;
2465 
2466 	if (!record_extent_backrefs(path, new)) {
2467 		btrfs_free_path(path);
2468 		goto out;
2469 	}
2470 	btrfs_release_path(path);
2471 
2472 	while (1) {
2473 		node = rb_first(&new->root);
2474 		if (!node)
2475 			break;
2476 		rb_erase(node, &new->root);
2477 
2478 		backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2479 
2480 		ret = relink_extent_backref(path, prev, backref);
2481 		WARN_ON(ret < 0);
2482 
2483 		kfree(prev);
2484 
2485 		if (ret == 1)
2486 			prev = backref;
2487 		else
2488 			prev = NULL;
2489 		cond_resched();
2490 	}
2491 	kfree(prev);
2492 
2493 	btrfs_free_path(path);
2494 
2495 	list_for_each_entry_safe(old, tmp, &new->head, list) {
2496 		list_del(&old->list);
2497 		kfree(old);
2498 	}
2499 out:
2500 	atomic_dec(&root->fs_info->defrag_running);
2501 	wake_up(&root->fs_info->transaction_wait);
2502 
2503 	kfree(new);
2504 }
2505 
2506 static struct new_sa_defrag_extent *
2507 record_old_file_extents(struct inode *inode,
2508 			struct btrfs_ordered_extent *ordered)
2509 {
2510 	struct btrfs_root *root = BTRFS_I(inode)->root;
2511 	struct btrfs_path *path;
2512 	struct btrfs_key key;
2513 	struct old_sa_defrag_extent *old, *tmp;
2514 	struct new_sa_defrag_extent *new;
2515 	int ret;
2516 
2517 	new = kmalloc(sizeof(*new), GFP_NOFS);
2518 	if (!new)
2519 		return NULL;
2520 
2521 	new->inode = inode;
2522 	new->file_pos = ordered->file_offset;
2523 	new->len = ordered->len;
2524 	new->bytenr = ordered->start;
2525 	new->disk_len = ordered->disk_len;
2526 	new->compress_type = ordered->compress_type;
2527 	new->root = RB_ROOT;
2528 	INIT_LIST_HEAD(&new->head);
2529 
2530 	path = btrfs_alloc_path();
2531 	if (!path)
2532 		goto out_kfree;
2533 
2534 	key.objectid = btrfs_ino(inode);
2535 	key.type = BTRFS_EXTENT_DATA_KEY;
2536 	key.offset = new->file_pos;
2537 
2538 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2539 	if (ret < 0)
2540 		goto out_free_path;
2541 	if (ret > 0 && path->slots[0] > 0)
2542 		path->slots[0]--;
2543 
2544 	/* find out all the old extents for the file range */
2545 	while (1) {
2546 		struct btrfs_file_extent_item *extent;
2547 		struct extent_buffer *l;
2548 		int slot;
2549 		u64 num_bytes;
2550 		u64 offset;
2551 		u64 end;
2552 		u64 disk_bytenr;
2553 		u64 extent_offset;
2554 
2555 		l = path->nodes[0];
2556 		slot = path->slots[0];
2557 
2558 		if (slot >= btrfs_header_nritems(l)) {
2559 			ret = btrfs_next_leaf(root, path);
2560 			if (ret < 0)
2561 				goto out_free_list;
2562 			else if (ret > 0)
2563 				break;
2564 			continue;
2565 		}
2566 
2567 		btrfs_item_key_to_cpu(l, &key, slot);
2568 
2569 		if (key.objectid != btrfs_ino(inode))
2570 			break;
2571 		if (key.type != BTRFS_EXTENT_DATA_KEY)
2572 			break;
2573 		if (key.offset >= new->file_pos + new->len)
2574 			break;
2575 
2576 		extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
2577 
2578 		num_bytes = btrfs_file_extent_num_bytes(l, extent);
2579 		if (key.offset + num_bytes < new->file_pos)
2580 			goto next;
2581 
2582 		disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
2583 		if (!disk_bytenr)
2584 			goto next;
2585 
2586 		extent_offset = btrfs_file_extent_offset(l, extent);
2587 
2588 		old = kmalloc(sizeof(*old), GFP_NOFS);
2589 		if (!old)
2590 			goto out_free_list;
2591 
2592 		offset = max(new->file_pos, key.offset);
2593 		end = min(new->file_pos + new->len, key.offset + num_bytes);
2594 
2595 		old->bytenr = disk_bytenr;
2596 		old->extent_offset = extent_offset;
2597 		old->offset = offset - key.offset;
2598 		old->len = end - offset;
2599 		old->new = new;
2600 		old->count = 0;
2601 		list_add_tail(&old->list, &new->head);
2602 next:
2603 		path->slots[0]++;
2604 		cond_resched();
2605 	}
2606 
2607 	btrfs_free_path(path);
2608 	atomic_inc(&root->fs_info->defrag_running);
2609 
2610 	return new;
2611 
2612 out_free_list:
2613 	list_for_each_entry_safe(old, tmp, &new->head, list) {
2614 		list_del(&old->list);
2615 		kfree(old);
2616 	}
2617 out_free_path:
2618 	btrfs_free_path(path);
2619 out_kfree:
2620 	kfree(new);
2621 	return NULL;
2622 }
2623 
2624 /*
2625  * helper function for btrfs_finish_ordered_io, this
2626  * just reads in some of the csum leaves to prime them into ram
2627  * before we start the transaction.  It limits the amount of btree
2628  * reads required while inside the transaction.
2629  */
2630 /* as ordered data IO finishes, this gets called so we can finish
2631  * an ordered extent if the range of bytes in the file it covers are
2632  * fully written.
2633  */
2634 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2635 {
2636 	struct inode *inode = ordered_extent->inode;
2637 	struct btrfs_root *root = BTRFS_I(inode)->root;
2638 	struct btrfs_trans_handle *trans = NULL;
2639 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2640 	struct extent_state *cached_state = NULL;
2641 	struct new_sa_defrag_extent *new = NULL;
2642 	int compress_type = 0;
2643 	int ret;
2644 	bool nolock;
2645 
2646 	nolock = btrfs_is_free_space_inode(inode);
2647 
2648 	if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2649 		ret = -EIO;
2650 		goto out;
2651 	}
2652 
2653 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
2654 		BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
2655 		btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2656 		if (nolock)
2657 			trans = btrfs_join_transaction_nolock(root);
2658 		else
2659 			trans = btrfs_join_transaction(root);
2660 		if (IS_ERR(trans)) {
2661 			ret = PTR_ERR(trans);
2662 			trans = NULL;
2663 			goto out;
2664 		}
2665 		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2666 		ret = btrfs_update_inode_fallback(trans, root, inode);
2667 		if (ret) /* -ENOMEM or corruption */
2668 			btrfs_abort_transaction(trans, root, ret);
2669 		goto out;
2670 	}
2671 
2672 	lock_extent_bits(io_tree, ordered_extent->file_offset,
2673 			 ordered_extent->file_offset + ordered_extent->len - 1,
2674 			 0, &cached_state);
2675 
2676 	ret = test_range_bit(io_tree, ordered_extent->file_offset,
2677 			ordered_extent->file_offset + ordered_extent->len - 1,
2678 			EXTENT_DEFRAG, 1, cached_state);
2679 	if (ret) {
2680 		u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2681 		if (last_snapshot >= BTRFS_I(inode)->generation)
2682 			/* the inode is shared */
2683 			new = record_old_file_extents(inode, ordered_extent);
2684 
2685 		clear_extent_bit(io_tree, ordered_extent->file_offset,
2686 			ordered_extent->file_offset + ordered_extent->len - 1,
2687 			EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
2688 	}
2689 
2690 	if (nolock)
2691 		trans = btrfs_join_transaction_nolock(root);
2692 	else
2693 		trans = btrfs_join_transaction(root);
2694 	if (IS_ERR(trans)) {
2695 		ret = PTR_ERR(trans);
2696 		trans = NULL;
2697 		goto out_unlock;
2698 	}
2699 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2700 
2701 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
2702 		compress_type = ordered_extent->compress_type;
2703 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
2704 		BUG_ON(compress_type);
2705 		ret = btrfs_mark_extent_written(trans, inode,
2706 						ordered_extent->file_offset,
2707 						ordered_extent->file_offset +
2708 						ordered_extent->len);
2709 	} else {
2710 		BUG_ON(root == root->fs_info->tree_root);
2711 		ret = insert_reserved_file_extent(trans, inode,
2712 						ordered_extent->file_offset,
2713 						ordered_extent->start,
2714 						ordered_extent->disk_len,
2715 						ordered_extent->len,
2716 						ordered_extent->len,
2717 						compress_type, 0, 0,
2718 						BTRFS_FILE_EXTENT_REG);
2719 	}
2720 	unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
2721 			   ordered_extent->file_offset, ordered_extent->len,
2722 			   trans->transid);
2723 	if (ret < 0) {
2724 		btrfs_abort_transaction(trans, root, ret);
2725 		goto out_unlock;
2726 	}
2727 
2728 	add_pending_csums(trans, inode, ordered_extent->file_offset,
2729 			  &ordered_extent->list);
2730 
2731 	btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2732 	ret = btrfs_update_inode_fallback(trans, root, inode);
2733 	if (ret) { /* -ENOMEM or corruption */
2734 		btrfs_abort_transaction(trans, root, ret);
2735 		goto out_unlock;
2736 	}
2737 	ret = 0;
2738 out_unlock:
2739 	unlock_extent_cached(io_tree, ordered_extent->file_offset,
2740 			     ordered_extent->file_offset +
2741 			     ordered_extent->len - 1, &cached_state, GFP_NOFS);
2742 out:
2743 	if (root != root->fs_info->tree_root)
2744 		btrfs_delalloc_release_metadata(inode, ordered_extent->len);
2745 	if (trans)
2746 		btrfs_end_transaction(trans, root);
2747 
2748 	if (ret) {
2749 		clear_extent_uptodate(io_tree, ordered_extent->file_offset,
2750 				      ordered_extent->file_offset +
2751 				      ordered_extent->len - 1, NULL, GFP_NOFS);
2752 
2753 		/*
2754 		 * If the ordered extent had an IOERR or something else went
2755 		 * wrong we need to return the space for this ordered extent
2756 		 * back to the allocator.
2757 		 */
2758 		if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
2759 		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
2760 			btrfs_free_reserved_extent(root, ordered_extent->start,
2761 						   ordered_extent->disk_len);
2762 	}
2763 
2764 
2765 	/*
2766 	 * This needs to be done to make sure anybody waiting knows we are done
2767 	 * updating everything for this ordered extent.
2768 	 */
2769 	btrfs_remove_ordered_extent(inode, ordered_extent);
2770 
2771 	/* for snapshot-aware defrag */
2772 	if (new)
2773 		relink_file_extents(new);
2774 
2775 	/* once for us */
2776 	btrfs_put_ordered_extent(ordered_extent);
2777 	/* once for the tree */
2778 	btrfs_put_ordered_extent(ordered_extent);
2779 
2780 	return ret;
2781 }
2782 
2783 static void finish_ordered_fn(struct btrfs_work *work)
2784 {
2785 	struct btrfs_ordered_extent *ordered_extent;
2786 	ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
2787 	btrfs_finish_ordered_io(ordered_extent);
2788 }
2789 
2790 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
2791 				struct extent_state *state, int uptodate)
2792 {
2793 	struct inode *inode = page->mapping->host;
2794 	struct btrfs_root *root = BTRFS_I(inode)->root;
2795 	struct btrfs_ordered_extent *ordered_extent = NULL;
2796 	struct btrfs_workers *workers;
2797 
2798 	trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
2799 
2800 	ClearPagePrivate2(page);
2801 	if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
2802 					    end - start + 1, uptodate))
2803 		return 0;
2804 
2805 	ordered_extent->work.func = finish_ordered_fn;
2806 	ordered_extent->work.flags = 0;
2807 
2808 	if (btrfs_is_free_space_inode(inode))
2809 		workers = &root->fs_info->endio_freespace_worker;
2810 	else
2811 		workers = &root->fs_info->endio_write_workers;
2812 	btrfs_queue_worker(workers, &ordered_extent->work);
2813 
2814 	return 0;
2815 }
2816 
2817 /*
2818  * when reads are done, we need to check csums to verify the data is correct
2819  * if there's a match, we allow the bio to finish.  If not, the code in
2820  * extent_io.c will try to find good copies for us.
2821  */
2822 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
2823 			       struct extent_state *state, int mirror)
2824 {
2825 	size_t offset = start - page_offset(page);
2826 	struct inode *inode = page->mapping->host;
2827 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2828 	char *kaddr;
2829 	u64 private = ~(u32)0;
2830 	int ret;
2831 	struct btrfs_root *root = BTRFS_I(inode)->root;
2832 	u32 csum = ~(u32)0;
2833 	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
2834 	                              DEFAULT_RATELIMIT_BURST);
2835 
2836 	if (PageChecked(page)) {
2837 		ClearPageChecked(page);
2838 		goto good;
2839 	}
2840 
2841 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
2842 		goto good;
2843 
2844 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
2845 	    test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
2846 		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
2847 				  GFP_NOFS);
2848 		return 0;
2849 	}
2850 
2851 	if (state && state->start == start) {
2852 		private = state->private;
2853 		ret = 0;
2854 	} else {
2855 		ret = get_state_private(io_tree, start, &private);
2856 	}
2857 	kaddr = kmap_atomic(page);
2858 	if (ret)
2859 		goto zeroit;
2860 
2861 	csum = btrfs_csum_data(kaddr + offset, csum,  end - start + 1);
2862 	btrfs_csum_final(csum, (char *)&csum);
2863 	if (csum != private)
2864 		goto zeroit;
2865 
2866 	kunmap_atomic(kaddr);
2867 good:
2868 	return 0;
2869 
2870 zeroit:
2871 	if (__ratelimit(&_rs))
2872 		btrfs_info(root->fs_info, "csum failed ino %llu off %llu csum %u private %llu",
2873 			(unsigned long long)btrfs_ino(page->mapping->host),
2874 			(unsigned long long)start, csum,
2875 			(unsigned long long)private);
2876 	memset(kaddr + offset, 1, end - start + 1);
2877 	flush_dcache_page(page);
2878 	kunmap_atomic(kaddr);
2879 	if (private == 0)
2880 		return 0;
2881 	return -EIO;
2882 }
2883 
2884 struct delayed_iput {
2885 	struct list_head list;
2886 	struct inode *inode;
2887 };
2888 
2889 /* JDM: If this is fs-wide, why can't we add a pointer to
2890  * btrfs_inode instead and avoid the allocation? */
2891 void btrfs_add_delayed_iput(struct inode *inode)
2892 {
2893 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2894 	struct delayed_iput *delayed;
2895 
2896 	if (atomic_add_unless(&inode->i_count, -1, 1))
2897 		return;
2898 
2899 	delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
2900 	delayed->inode = inode;
2901 
2902 	spin_lock(&fs_info->delayed_iput_lock);
2903 	list_add_tail(&delayed->list, &fs_info->delayed_iputs);
2904 	spin_unlock(&fs_info->delayed_iput_lock);
2905 }
2906 
2907 void btrfs_run_delayed_iputs(struct btrfs_root *root)
2908 {
2909 	LIST_HEAD(list);
2910 	struct btrfs_fs_info *fs_info = root->fs_info;
2911 	struct delayed_iput *delayed;
2912 	int empty;
2913 
2914 	spin_lock(&fs_info->delayed_iput_lock);
2915 	empty = list_empty(&fs_info->delayed_iputs);
2916 	spin_unlock(&fs_info->delayed_iput_lock);
2917 	if (empty)
2918 		return;
2919 
2920 	spin_lock(&fs_info->delayed_iput_lock);
2921 	list_splice_init(&fs_info->delayed_iputs, &list);
2922 	spin_unlock(&fs_info->delayed_iput_lock);
2923 
2924 	while (!list_empty(&list)) {
2925 		delayed = list_entry(list.next, struct delayed_iput, list);
2926 		list_del(&delayed->list);
2927 		iput(delayed->inode);
2928 		kfree(delayed);
2929 	}
2930 }
2931 
2932 /*
2933  * This is called in transaction commit time. If there are no orphan
2934  * files in the subvolume, it removes orphan item and frees block_rsv
2935  * structure.
2936  */
2937 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
2938 			      struct btrfs_root *root)
2939 {
2940 	struct btrfs_block_rsv *block_rsv;
2941 	int ret;
2942 
2943 	if (atomic_read(&root->orphan_inodes) ||
2944 	    root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
2945 		return;
2946 
2947 	spin_lock(&root->orphan_lock);
2948 	if (atomic_read(&root->orphan_inodes)) {
2949 		spin_unlock(&root->orphan_lock);
2950 		return;
2951 	}
2952 
2953 	if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
2954 		spin_unlock(&root->orphan_lock);
2955 		return;
2956 	}
2957 
2958 	block_rsv = root->orphan_block_rsv;
2959 	root->orphan_block_rsv = NULL;
2960 	spin_unlock(&root->orphan_lock);
2961 
2962 	if (root->orphan_item_inserted &&
2963 	    btrfs_root_refs(&root->root_item) > 0) {
2964 		ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
2965 					    root->root_key.objectid);
2966 		BUG_ON(ret);
2967 		root->orphan_item_inserted = 0;
2968 	}
2969 
2970 	if (block_rsv) {
2971 		WARN_ON(block_rsv->size > 0);
2972 		btrfs_free_block_rsv(root, block_rsv);
2973 	}
2974 }
2975 
2976 /*
2977  * This creates an orphan entry for the given inode in case something goes
2978  * wrong in the middle of an unlink/truncate.
2979  *
2980  * NOTE: caller of this function should reserve 5 units of metadata for
2981  *	 this function.
2982  */
2983 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2984 {
2985 	struct btrfs_root *root = BTRFS_I(inode)->root;
2986 	struct btrfs_block_rsv *block_rsv = NULL;
2987 	int reserve = 0;
2988 	int insert = 0;
2989 	int ret;
2990 
2991 	if (!root->orphan_block_rsv) {
2992 		block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
2993 		if (!block_rsv)
2994 			return -ENOMEM;
2995 	}
2996 
2997 	spin_lock(&root->orphan_lock);
2998 	if (!root->orphan_block_rsv) {
2999 		root->orphan_block_rsv = block_rsv;
3000 	} else if (block_rsv) {
3001 		btrfs_free_block_rsv(root, block_rsv);
3002 		block_rsv = NULL;
3003 	}
3004 
3005 	if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3006 			      &BTRFS_I(inode)->runtime_flags)) {
3007 #if 0
3008 		/*
3009 		 * For proper ENOSPC handling, we should do orphan
3010 		 * cleanup when mounting. But this introduces backward
3011 		 * compatibility issue.
3012 		 */
3013 		if (!xchg(&root->orphan_item_inserted, 1))
3014 			insert = 2;
3015 		else
3016 			insert = 1;
3017 #endif
3018 		insert = 1;
3019 		atomic_inc(&root->orphan_inodes);
3020 	}
3021 
3022 	if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3023 			      &BTRFS_I(inode)->runtime_flags))
3024 		reserve = 1;
3025 	spin_unlock(&root->orphan_lock);
3026 
3027 	/* grab metadata reservation from transaction handle */
3028 	if (reserve) {
3029 		ret = btrfs_orphan_reserve_metadata(trans, inode);
3030 		BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
3031 	}
3032 
3033 	/* insert an orphan item to track this unlinked/truncated file */
3034 	if (insert >= 1) {
3035 		ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
3036 		if (ret && ret != -EEXIST) {
3037 			clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3038 				  &BTRFS_I(inode)->runtime_flags);
3039 			btrfs_abort_transaction(trans, root, ret);
3040 			return ret;
3041 		}
3042 		ret = 0;
3043 	}
3044 
3045 	/* insert an orphan item to track subvolume contains orphan files */
3046 	if (insert >= 2) {
3047 		ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
3048 					       root->root_key.objectid);
3049 		if (ret && ret != -EEXIST) {
3050 			btrfs_abort_transaction(trans, root, ret);
3051 			return ret;
3052 		}
3053 	}
3054 	return 0;
3055 }
3056 
3057 /*
3058  * We have done the truncate/delete so we can go ahead and remove the orphan
3059  * item for this particular inode.
3060  */
3061 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3062 			    struct inode *inode)
3063 {
3064 	struct btrfs_root *root = BTRFS_I(inode)->root;
3065 	int delete_item = 0;
3066 	int release_rsv = 0;
3067 	int ret = 0;
3068 
3069 	spin_lock(&root->orphan_lock);
3070 	if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3071 			       &BTRFS_I(inode)->runtime_flags))
3072 		delete_item = 1;
3073 
3074 	if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3075 			       &BTRFS_I(inode)->runtime_flags))
3076 		release_rsv = 1;
3077 	spin_unlock(&root->orphan_lock);
3078 
3079 	if (trans && delete_item) {
3080 		ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
3081 		BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
3082 	}
3083 
3084 	if (release_rsv) {
3085 		btrfs_orphan_release_metadata(inode);
3086 		atomic_dec(&root->orphan_inodes);
3087 	}
3088 
3089 	return 0;
3090 }
3091 
3092 /*
3093  * this cleans up any orphans that may be left on the list from the last use
3094  * of this root.
3095  */
3096 int btrfs_orphan_cleanup(struct btrfs_root *root)
3097 {
3098 	struct btrfs_path *path;
3099 	struct extent_buffer *leaf;
3100 	struct btrfs_key key, found_key;
3101 	struct btrfs_trans_handle *trans;
3102 	struct inode *inode;
3103 	u64 last_objectid = 0;
3104 	int ret = 0, nr_unlink = 0, nr_truncate = 0;
3105 
3106 	if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
3107 		return 0;
3108 
3109 	path = btrfs_alloc_path();
3110 	if (!path) {
3111 		ret = -ENOMEM;
3112 		goto out;
3113 	}
3114 	path->reada = -1;
3115 
3116 	key.objectid = BTRFS_ORPHAN_OBJECTID;
3117 	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
3118 	key.offset = (u64)-1;
3119 
3120 	while (1) {
3121 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3122 		if (ret < 0)
3123 			goto out;
3124 
3125 		/*
3126 		 * if ret == 0 means we found what we were searching for, which
3127 		 * is weird, but possible, so only screw with path if we didn't
3128 		 * find the key and see if we have stuff that matches
3129 		 */
3130 		if (ret > 0) {
3131 			ret = 0;
3132 			if (path->slots[0] == 0)
3133 				break;
3134 			path->slots[0]--;
3135 		}
3136 
3137 		/* pull out the item */
3138 		leaf = path->nodes[0];
3139 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3140 
3141 		/* make sure the item matches what we want */
3142 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3143 			break;
3144 		if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
3145 			break;
3146 
3147 		/* release the path since we're done with it */
3148 		btrfs_release_path(path);
3149 
3150 		/*
3151 		 * this is where we are basically btrfs_lookup, without the
3152 		 * crossing root thing.  we store the inode number in the
3153 		 * offset of the orphan item.
3154 		 */
3155 
3156 		if (found_key.offset == last_objectid) {
3157 			btrfs_err(root->fs_info,
3158 				"Error removing orphan entry, stopping orphan cleanup");
3159 			ret = -EINVAL;
3160 			goto out;
3161 		}
3162 
3163 		last_objectid = found_key.offset;
3164 
3165 		found_key.objectid = found_key.offset;
3166 		found_key.type = BTRFS_INODE_ITEM_KEY;
3167 		found_key.offset = 0;
3168 		inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
3169 		ret = PTR_RET(inode);
3170 		if (ret && ret != -ESTALE)
3171 			goto out;
3172 
3173 		if (ret == -ESTALE && root == root->fs_info->tree_root) {
3174 			struct btrfs_root *dead_root;
3175 			struct btrfs_fs_info *fs_info = root->fs_info;
3176 			int is_dead_root = 0;
3177 
3178 			/*
3179 			 * this is an orphan in the tree root. Currently these
3180 			 * could come from 2 sources:
3181 			 *  a) a snapshot deletion in progress
3182 			 *  b) a free space cache inode
3183 			 * We need to distinguish those two, as the snapshot
3184 			 * orphan must not get deleted.
3185 			 * find_dead_roots already ran before us, so if this
3186 			 * is a snapshot deletion, we should find the root
3187 			 * in the dead_roots list
3188 			 */
3189 			spin_lock(&fs_info->trans_lock);
3190 			list_for_each_entry(dead_root, &fs_info->dead_roots,
3191 					    root_list) {
3192 				if (dead_root->root_key.objectid ==
3193 				    found_key.objectid) {
3194 					is_dead_root = 1;
3195 					break;
3196 				}
3197 			}
3198 			spin_unlock(&fs_info->trans_lock);
3199 			if (is_dead_root) {
3200 				/* prevent this orphan from being found again */
3201 				key.offset = found_key.objectid - 1;
3202 				continue;
3203 			}
3204 		}
3205 		/*
3206 		 * Inode is already gone but the orphan item is still there,
3207 		 * kill the orphan item.
3208 		 */
3209 		if (ret == -ESTALE) {
3210 			trans = btrfs_start_transaction(root, 1);
3211 			if (IS_ERR(trans)) {
3212 				ret = PTR_ERR(trans);
3213 				goto out;
3214 			}
3215 			btrfs_debug(root->fs_info, "auto deleting %Lu",
3216 				found_key.objectid);
3217 			ret = btrfs_del_orphan_item(trans, root,
3218 						    found_key.objectid);
3219 			BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
3220 			btrfs_end_transaction(trans, root);
3221 			continue;
3222 		}
3223 
3224 		/*
3225 		 * add this inode to the orphan list so btrfs_orphan_del does
3226 		 * the proper thing when we hit it
3227 		 */
3228 		set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3229 			&BTRFS_I(inode)->runtime_flags);
3230 		atomic_inc(&root->orphan_inodes);
3231 
3232 		/* if we have links, this was a truncate, lets do that */
3233 		if (inode->i_nlink) {
3234 			if (!S_ISREG(inode->i_mode)) {
3235 				WARN_ON(1);
3236 				iput(inode);
3237 				continue;
3238 			}
3239 			nr_truncate++;
3240 
3241 			/* 1 for the orphan item deletion. */
3242 			trans = btrfs_start_transaction(root, 1);
3243 			if (IS_ERR(trans)) {
3244 				iput(inode);
3245 				ret = PTR_ERR(trans);
3246 				goto out;
3247 			}
3248 			ret = btrfs_orphan_add(trans, inode);
3249 			btrfs_end_transaction(trans, root);
3250 			if (ret) {
3251 				iput(inode);
3252 				goto out;
3253 			}
3254 
3255 			ret = btrfs_truncate(inode);
3256 			if (ret)
3257 				btrfs_orphan_del(NULL, inode);
3258 		} else {
3259 			nr_unlink++;
3260 		}
3261 
3262 		/* this will do delete_inode and everything for us */
3263 		iput(inode);
3264 		if (ret)
3265 			goto out;
3266 	}
3267 	/* release the path since we're done with it */
3268 	btrfs_release_path(path);
3269 
3270 	root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3271 
3272 	if (root->orphan_block_rsv)
3273 		btrfs_block_rsv_release(root, root->orphan_block_rsv,
3274 					(u64)-1);
3275 
3276 	if (root->orphan_block_rsv || root->orphan_item_inserted) {
3277 		trans = btrfs_join_transaction(root);
3278 		if (!IS_ERR(trans))
3279 			btrfs_end_transaction(trans, root);
3280 	}
3281 
3282 	if (nr_unlink)
3283 		btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink);
3284 	if (nr_truncate)
3285 		btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate);
3286 
3287 out:
3288 	if (ret)
3289 		btrfs_crit(root->fs_info,
3290 			"could not do orphan cleanup %d", ret);
3291 	btrfs_free_path(path);
3292 	return ret;
3293 }
3294 
3295 /*
3296  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3297  * don't find any xattrs, we know there can't be any acls.
3298  *
3299  * slot is the slot the inode is in, objectid is the objectid of the inode
3300  */
3301 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3302 					  int slot, u64 objectid)
3303 {
3304 	u32 nritems = btrfs_header_nritems(leaf);
3305 	struct btrfs_key found_key;
3306 	static u64 xattr_access = 0;
3307 	static u64 xattr_default = 0;
3308 	int scanned = 0;
3309 
3310 	if (!xattr_access) {
3311 		xattr_access = btrfs_name_hash(POSIX_ACL_XATTR_ACCESS,
3312 					strlen(POSIX_ACL_XATTR_ACCESS));
3313 		xattr_default = btrfs_name_hash(POSIX_ACL_XATTR_DEFAULT,
3314 					strlen(POSIX_ACL_XATTR_DEFAULT));
3315 	}
3316 
3317 	slot++;
3318 	while (slot < nritems) {
3319 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3320 
3321 		/* we found a different objectid, there must not be acls */
3322 		if (found_key.objectid != objectid)
3323 			return 0;
3324 
3325 		/* we found an xattr, assume we've got an acl */
3326 		if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3327 			if (found_key.offset == xattr_access ||
3328 			    found_key.offset == xattr_default)
3329 				return 1;
3330 		}
3331 
3332 		/*
3333 		 * we found a key greater than an xattr key, there can't
3334 		 * be any acls later on
3335 		 */
3336 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3337 			return 0;
3338 
3339 		slot++;
3340 		scanned++;
3341 
3342 		/*
3343 		 * it goes inode, inode backrefs, xattrs, extents,
3344 		 * so if there are a ton of hard links to an inode there can
3345 		 * be a lot of backrefs.  Don't waste time searching too hard,
3346 		 * this is just an optimization
3347 		 */
3348 		if (scanned >= 8)
3349 			break;
3350 	}
3351 	/* we hit the end of the leaf before we found an xattr or
3352 	 * something larger than an xattr.  We have to assume the inode
3353 	 * has acls
3354 	 */
3355 	return 1;
3356 }
3357 
3358 /*
3359  * read an inode from the btree into the in-memory inode
3360  */
3361 static void btrfs_read_locked_inode(struct inode *inode)
3362 {
3363 	struct btrfs_path *path;
3364 	struct extent_buffer *leaf;
3365 	struct btrfs_inode_item *inode_item;
3366 	struct btrfs_timespec *tspec;
3367 	struct btrfs_root *root = BTRFS_I(inode)->root;
3368 	struct btrfs_key location;
3369 	int maybe_acls;
3370 	u32 rdev;
3371 	int ret;
3372 	bool filled = false;
3373 
3374 	ret = btrfs_fill_inode(inode, &rdev);
3375 	if (!ret)
3376 		filled = true;
3377 
3378 	path = btrfs_alloc_path();
3379 	if (!path)
3380 		goto make_bad;
3381 
3382 	path->leave_spinning = 1;
3383 	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3384 
3385 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3386 	if (ret)
3387 		goto make_bad;
3388 
3389 	leaf = path->nodes[0];
3390 
3391 	if (filled)
3392 		goto cache_acl;
3393 
3394 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
3395 				    struct btrfs_inode_item);
3396 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3397 	set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3398 	i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3399 	i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3400 	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
3401 
3402 	tspec = btrfs_inode_atime(inode_item);
3403 	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
3404 	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3405 
3406 	tspec = btrfs_inode_mtime(inode_item);
3407 	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
3408 	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3409 
3410 	tspec = btrfs_inode_ctime(inode_item);
3411 	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
3412 	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3413 
3414 	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3415 	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3416 	BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3417 
3418 	/*
3419 	 * If we were modified in the current generation and evicted from memory
3420 	 * and then re-read we need to do a full sync since we don't have any
3421 	 * idea about which extents were modified before we were evicted from
3422 	 * cache.
3423 	 */
3424 	if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
3425 		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3426 			&BTRFS_I(inode)->runtime_flags);
3427 
3428 	inode->i_version = btrfs_inode_sequence(leaf, inode_item);
3429 	inode->i_generation = BTRFS_I(inode)->generation;
3430 	inode->i_rdev = 0;
3431 	rdev = btrfs_inode_rdev(leaf, inode_item);
3432 
3433 	BTRFS_I(inode)->index_cnt = (u64)-1;
3434 	BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3435 cache_acl:
3436 	/*
3437 	 * try to precache a NULL acl entry for files that don't have
3438 	 * any xattrs or acls
3439 	 */
3440 	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3441 					   btrfs_ino(inode));
3442 	if (!maybe_acls)
3443 		cache_no_acl(inode);
3444 
3445 	btrfs_free_path(path);
3446 
3447 	switch (inode->i_mode & S_IFMT) {
3448 	case S_IFREG:
3449 		inode->i_mapping->a_ops = &btrfs_aops;
3450 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3451 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3452 		inode->i_fop = &btrfs_file_operations;
3453 		inode->i_op = &btrfs_file_inode_operations;
3454 		break;
3455 	case S_IFDIR:
3456 		inode->i_fop = &btrfs_dir_file_operations;
3457 		if (root == root->fs_info->tree_root)
3458 			inode->i_op = &btrfs_dir_ro_inode_operations;
3459 		else
3460 			inode->i_op = &btrfs_dir_inode_operations;
3461 		break;
3462 	case S_IFLNK:
3463 		inode->i_op = &btrfs_symlink_inode_operations;
3464 		inode->i_mapping->a_ops = &btrfs_symlink_aops;
3465 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3466 		break;
3467 	default:
3468 		inode->i_op = &btrfs_special_inode_operations;
3469 		init_special_inode(inode, inode->i_mode, rdev);
3470 		break;
3471 	}
3472 
3473 	btrfs_update_iflags(inode);
3474 	return;
3475 
3476 make_bad:
3477 	btrfs_free_path(path);
3478 	make_bad_inode(inode);
3479 }
3480 
3481 /*
3482  * given a leaf and an inode, copy the inode fields into the leaf
3483  */
3484 static void fill_inode_item(struct btrfs_trans_handle *trans,
3485 			    struct extent_buffer *leaf,
3486 			    struct btrfs_inode_item *item,
3487 			    struct inode *inode)
3488 {
3489 	struct btrfs_map_token token;
3490 
3491 	btrfs_init_map_token(&token);
3492 
3493 	btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3494 	btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3495 	btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
3496 				   &token);
3497 	btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3498 	btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3499 
3500 	btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
3501 				     inode->i_atime.tv_sec, &token);
3502 	btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
3503 				      inode->i_atime.tv_nsec, &token);
3504 
3505 	btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
3506 				     inode->i_mtime.tv_sec, &token);
3507 	btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
3508 				      inode->i_mtime.tv_nsec, &token);
3509 
3510 	btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
3511 				     inode->i_ctime.tv_sec, &token);
3512 	btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
3513 				      inode->i_ctime.tv_nsec, &token);
3514 
3515 	btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3516 				     &token);
3517 	btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
3518 					 &token);
3519 	btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3520 	btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3521 	btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3522 	btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3523 	btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3524 }
3525 
3526 /*
3527  * copy everything in the in-memory inode into the btree.
3528  */
3529 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
3530 				struct btrfs_root *root, struct inode *inode)
3531 {
3532 	struct btrfs_inode_item *inode_item;
3533 	struct btrfs_path *path;
3534 	struct extent_buffer *leaf;
3535 	int ret;
3536 
3537 	path = btrfs_alloc_path();
3538 	if (!path)
3539 		return -ENOMEM;
3540 
3541 	path->leave_spinning = 1;
3542 	ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
3543 				 1);
3544 	if (ret) {
3545 		if (ret > 0)
3546 			ret = -ENOENT;
3547 		goto failed;
3548 	}
3549 
3550 	btrfs_unlock_up_safe(path, 1);
3551 	leaf = path->nodes[0];
3552 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
3553 				    struct btrfs_inode_item);
3554 
3555 	fill_inode_item(trans, leaf, inode_item, inode);
3556 	btrfs_mark_buffer_dirty(leaf);
3557 	btrfs_set_inode_last_trans(trans, inode);
3558 	ret = 0;
3559 failed:
3560 	btrfs_free_path(path);
3561 	return ret;
3562 }
3563 
3564 /*
3565  * copy everything in the in-memory inode into the btree.
3566  */
3567 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
3568 				struct btrfs_root *root, struct inode *inode)
3569 {
3570 	int ret;
3571 
3572 	/*
3573 	 * If the inode is a free space inode, we can deadlock during commit
3574 	 * if we put it into the delayed code.
3575 	 *
3576 	 * The data relocation inode should also be directly updated
3577 	 * without delay
3578 	 */
3579 	if (!btrfs_is_free_space_inode(inode)
3580 	    && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
3581 		btrfs_update_root_times(trans, root);
3582 
3583 		ret = btrfs_delayed_update_inode(trans, root, inode);
3584 		if (!ret)
3585 			btrfs_set_inode_last_trans(trans, inode);
3586 		return ret;
3587 	}
3588 
3589 	return btrfs_update_inode_item(trans, root, inode);
3590 }
3591 
3592 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3593 					 struct btrfs_root *root,
3594 					 struct inode *inode)
3595 {
3596 	int ret;
3597 
3598 	ret = btrfs_update_inode(trans, root, inode);
3599 	if (ret == -ENOSPC)
3600 		return btrfs_update_inode_item(trans, root, inode);
3601 	return ret;
3602 }
3603 
3604 /*
3605  * unlink helper that gets used here in inode.c and in the tree logging
3606  * recovery code.  It remove a link in a directory with a given name, and
3607  * also drops the back refs in the inode to the directory
3608  */
3609 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3610 				struct btrfs_root *root,
3611 				struct inode *dir, struct inode *inode,
3612 				const char *name, int name_len)
3613 {
3614 	struct btrfs_path *path;
3615 	int ret = 0;
3616 	struct extent_buffer *leaf;
3617 	struct btrfs_dir_item *di;
3618 	struct btrfs_key key;
3619 	u64 index;
3620 	u64 ino = btrfs_ino(inode);
3621 	u64 dir_ino = btrfs_ino(dir);
3622 
3623 	path = btrfs_alloc_path();
3624 	if (!path) {
3625 		ret = -ENOMEM;
3626 		goto out;
3627 	}
3628 
3629 	path->leave_spinning = 1;
3630 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3631 				    name, name_len, -1);
3632 	if (IS_ERR(di)) {
3633 		ret = PTR_ERR(di);
3634 		goto err;
3635 	}
3636 	if (!di) {
3637 		ret = -ENOENT;
3638 		goto err;
3639 	}
3640 	leaf = path->nodes[0];
3641 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
3642 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
3643 	if (ret)
3644 		goto err;
3645 	btrfs_release_path(path);
3646 
3647 	ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
3648 				  dir_ino, &index);
3649 	if (ret) {
3650 		btrfs_info(root->fs_info,
3651 			"failed to delete reference to %.*s, inode %llu parent %llu",
3652 			name_len, name,
3653 			(unsigned long long)ino, (unsigned long long)dir_ino);
3654 		btrfs_abort_transaction(trans, root, ret);
3655 		goto err;
3656 	}
3657 
3658 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
3659 	if (ret) {
3660 		btrfs_abort_transaction(trans, root, ret);
3661 		goto err;
3662 	}
3663 
3664 	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
3665 					 inode, dir_ino);
3666 	if (ret != 0 && ret != -ENOENT) {
3667 		btrfs_abort_transaction(trans, root, ret);
3668 		goto err;
3669 	}
3670 
3671 	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
3672 					   dir, index);
3673 	if (ret == -ENOENT)
3674 		ret = 0;
3675 	else if (ret)
3676 		btrfs_abort_transaction(trans, root, ret);
3677 err:
3678 	btrfs_free_path(path);
3679 	if (ret)
3680 		goto out;
3681 
3682 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3683 	inode_inc_iversion(inode);
3684 	inode_inc_iversion(dir);
3685 	inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3686 	ret = btrfs_update_inode(trans, root, dir);
3687 out:
3688 	return ret;
3689 }
3690 
3691 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3692 		       struct btrfs_root *root,
3693 		       struct inode *dir, struct inode *inode,
3694 		       const char *name, int name_len)
3695 {
3696 	int ret;
3697 	ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
3698 	if (!ret) {
3699 		btrfs_drop_nlink(inode);
3700 		ret = btrfs_update_inode(trans, root, inode);
3701 	}
3702 	return ret;
3703 }
3704 
3705 /*
3706  * helper to start transaction for unlink and rmdir.
3707  *
3708  * unlink and rmdir are special in btrfs, they do not always free space, so
3709  * if we cannot make our reservations the normal way try and see if there is
3710  * plenty of slack room in the global reserve to migrate, otherwise we cannot
3711  * allow the unlink to occur.
3712  */
3713 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
3714 {
3715 	struct btrfs_trans_handle *trans;
3716 	struct btrfs_root *root = BTRFS_I(dir)->root;
3717 	int ret;
3718 
3719 	/*
3720 	 * 1 for the possible orphan item
3721 	 * 1 for the dir item
3722 	 * 1 for the dir index
3723 	 * 1 for the inode ref
3724 	 * 1 for the inode
3725 	 */
3726 	trans = btrfs_start_transaction(root, 5);
3727 	if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
3728 		return trans;
3729 
3730 	if (PTR_ERR(trans) == -ENOSPC) {
3731 		u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
3732 
3733 		trans = btrfs_start_transaction(root, 0);
3734 		if (IS_ERR(trans))
3735 			return trans;
3736 		ret = btrfs_cond_migrate_bytes(root->fs_info,
3737 					       &root->fs_info->trans_block_rsv,
3738 					       num_bytes, 5);
3739 		if (ret) {
3740 			btrfs_end_transaction(trans, root);
3741 			return ERR_PTR(ret);
3742 		}
3743 		trans->block_rsv = &root->fs_info->trans_block_rsv;
3744 		trans->bytes_reserved = num_bytes;
3745 	}
3746 	return trans;
3747 }
3748 
3749 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
3750 {
3751 	struct btrfs_root *root = BTRFS_I(dir)->root;
3752 	struct btrfs_trans_handle *trans;
3753 	struct inode *inode = dentry->d_inode;
3754 	int ret;
3755 
3756 	trans = __unlink_start_trans(dir);
3757 	if (IS_ERR(trans))
3758 		return PTR_ERR(trans);
3759 
3760 	btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
3761 
3762 	ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3763 				 dentry->d_name.name, dentry->d_name.len);
3764 	if (ret)
3765 		goto out;
3766 
3767 	if (inode->i_nlink == 0) {
3768 		ret = btrfs_orphan_add(trans, inode);
3769 		if (ret)
3770 			goto out;
3771 	}
3772 
3773 out:
3774 	btrfs_end_transaction(trans, root);
3775 	btrfs_btree_balance_dirty(root);
3776 	return ret;
3777 }
3778 
3779 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3780 			struct btrfs_root *root,
3781 			struct inode *dir, u64 objectid,
3782 			const char *name, int name_len)
3783 {
3784 	struct btrfs_path *path;
3785 	struct extent_buffer *leaf;
3786 	struct btrfs_dir_item *di;
3787 	struct btrfs_key key;
3788 	u64 index;
3789 	int ret;
3790 	u64 dir_ino = btrfs_ino(dir);
3791 
3792 	path = btrfs_alloc_path();
3793 	if (!path)
3794 		return -ENOMEM;
3795 
3796 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3797 				   name, name_len, -1);
3798 	if (IS_ERR_OR_NULL(di)) {
3799 		if (!di)
3800 			ret = -ENOENT;
3801 		else
3802 			ret = PTR_ERR(di);
3803 		goto out;
3804 	}
3805 
3806 	leaf = path->nodes[0];
3807 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
3808 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
3809 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
3810 	if (ret) {
3811 		btrfs_abort_transaction(trans, root, ret);
3812 		goto out;
3813 	}
3814 	btrfs_release_path(path);
3815 
3816 	ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
3817 				 objectid, root->root_key.objectid,
3818 				 dir_ino, &index, name, name_len);
3819 	if (ret < 0) {
3820 		if (ret != -ENOENT) {
3821 			btrfs_abort_transaction(trans, root, ret);
3822 			goto out;
3823 		}
3824 		di = btrfs_search_dir_index_item(root, path, dir_ino,
3825 						 name, name_len);
3826 		if (IS_ERR_OR_NULL(di)) {
3827 			if (!di)
3828 				ret = -ENOENT;
3829 			else
3830 				ret = PTR_ERR(di);
3831 			btrfs_abort_transaction(trans, root, ret);
3832 			goto out;
3833 		}
3834 
3835 		leaf = path->nodes[0];
3836 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3837 		btrfs_release_path(path);
3838 		index = key.offset;
3839 	}
3840 	btrfs_release_path(path);
3841 
3842 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
3843 	if (ret) {
3844 		btrfs_abort_transaction(trans, root, ret);
3845 		goto out;
3846 	}
3847 
3848 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3849 	inode_inc_iversion(dir);
3850 	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3851 	ret = btrfs_update_inode_fallback(trans, root, dir);
3852 	if (ret)
3853 		btrfs_abort_transaction(trans, root, ret);
3854 out:
3855 	btrfs_free_path(path);
3856 	return ret;
3857 }
3858 
3859 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3860 {
3861 	struct inode *inode = dentry->d_inode;
3862 	int err = 0;
3863 	struct btrfs_root *root = BTRFS_I(dir)->root;
3864 	struct btrfs_trans_handle *trans;
3865 
3866 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
3867 		return -ENOTEMPTY;
3868 	if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
3869 		return -EPERM;
3870 
3871 	trans = __unlink_start_trans(dir);
3872 	if (IS_ERR(trans))
3873 		return PTR_ERR(trans);
3874 
3875 	if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
3876 		err = btrfs_unlink_subvol(trans, root, dir,
3877 					  BTRFS_I(inode)->location.objectid,
3878 					  dentry->d_name.name,
3879 					  dentry->d_name.len);
3880 		goto out;
3881 	}
3882 
3883 	err = btrfs_orphan_add(trans, inode);
3884 	if (err)
3885 		goto out;
3886 
3887 	/* now the directory is empty */
3888 	err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3889 				 dentry->d_name.name, dentry->d_name.len);
3890 	if (!err)
3891 		btrfs_i_size_write(inode, 0);
3892 out:
3893 	btrfs_end_transaction(trans, root);
3894 	btrfs_btree_balance_dirty(root);
3895 
3896 	return err;
3897 }
3898 
3899 /*
3900  * this can truncate away extent items, csum items and directory items.
3901  * It starts at a high offset and removes keys until it can't find
3902  * any higher than new_size
3903  *
3904  * csum items that cross the new i_size are truncated to the new size
3905  * as well.
3906  *
3907  * min_type is the minimum key type to truncate down to.  If set to 0, this
3908  * will kill all the items on this inode, including the INODE_ITEM_KEY.
3909  */
3910 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3911 			       struct btrfs_root *root,
3912 			       struct inode *inode,
3913 			       u64 new_size, u32 min_type)
3914 {
3915 	struct btrfs_path *path;
3916 	struct extent_buffer *leaf;
3917 	struct btrfs_file_extent_item *fi;
3918 	struct btrfs_key key;
3919 	struct btrfs_key found_key;
3920 	u64 extent_start = 0;
3921 	u64 extent_num_bytes = 0;
3922 	u64 extent_offset = 0;
3923 	u64 item_end = 0;
3924 	u32 found_type = (u8)-1;
3925 	int found_extent;
3926 	int del_item;
3927 	int pending_del_nr = 0;
3928 	int pending_del_slot = 0;
3929 	int extent_type = -1;
3930 	int ret;
3931 	int err = 0;
3932 	u64 ino = btrfs_ino(inode);
3933 
3934 	BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
3935 
3936 	path = btrfs_alloc_path();
3937 	if (!path)
3938 		return -ENOMEM;
3939 	path->reada = -1;
3940 
3941 	/*
3942 	 * We want to drop from the next block forward in case this new size is
3943 	 * not block aligned since we will be keeping the last block of the
3944 	 * extent just the way it is.
3945 	 */
3946 	if (root->ref_cows || root == root->fs_info->tree_root)
3947 		btrfs_drop_extent_cache(inode, ALIGN(new_size,
3948 					root->sectorsize), (u64)-1, 0);
3949 
3950 	/*
3951 	 * This function is also used to drop the items in the log tree before
3952 	 * we relog the inode, so if root != BTRFS_I(inode)->root, it means
3953 	 * it is used to drop the loged items. So we shouldn't kill the delayed
3954 	 * items.
3955 	 */
3956 	if (min_type == 0 && root == BTRFS_I(inode)->root)
3957 		btrfs_kill_delayed_inode_items(inode);
3958 
3959 	key.objectid = ino;
3960 	key.offset = (u64)-1;
3961 	key.type = (u8)-1;
3962 
3963 search_again:
3964 	path->leave_spinning = 1;
3965 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3966 	if (ret < 0) {
3967 		err = ret;
3968 		goto out;
3969 	}
3970 
3971 	if (ret > 0) {
3972 		/* there are no items in the tree for us to truncate, we're
3973 		 * done
3974 		 */
3975 		if (path->slots[0] == 0)
3976 			goto out;
3977 		path->slots[0]--;
3978 	}
3979 
3980 	while (1) {
3981 		fi = NULL;
3982 		leaf = path->nodes[0];
3983 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3984 		found_type = btrfs_key_type(&found_key);
3985 
3986 		if (found_key.objectid != ino)
3987 			break;
3988 
3989 		if (found_type < min_type)
3990 			break;
3991 
3992 		item_end = found_key.offset;
3993 		if (found_type == BTRFS_EXTENT_DATA_KEY) {
3994 			fi = btrfs_item_ptr(leaf, path->slots[0],
3995 					    struct btrfs_file_extent_item);
3996 			extent_type = btrfs_file_extent_type(leaf, fi);
3997 			if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3998 				item_end +=
3999 				    btrfs_file_extent_num_bytes(leaf, fi);
4000 			} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4001 				item_end += btrfs_file_extent_inline_len(leaf,
4002 									 fi);
4003 			}
4004 			item_end--;
4005 		}
4006 		if (found_type > min_type) {
4007 			del_item = 1;
4008 		} else {
4009 			if (item_end < new_size)
4010 				break;
4011 			if (found_key.offset >= new_size)
4012 				del_item = 1;
4013 			else
4014 				del_item = 0;
4015 		}
4016 		found_extent = 0;
4017 		/* FIXME, shrink the extent if the ref count is only 1 */
4018 		if (found_type != BTRFS_EXTENT_DATA_KEY)
4019 			goto delete;
4020 
4021 		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4022 			u64 num_dec;
4023 			extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
4024 			if (!del_item) {
4025 				u64 orig_num_bytes =
4026 					btrfs_file_extent_num_bytes(leaf, fi);
4027 				extent_num_bytes = ALIGN(new_size -
4028 						found_key.offset,
4029 						root->sectorsize);
4030 				btrfs_set_file_extent_num_bytes(leaf, fi,
4031 							 extent_num_bytes);
4032 				num_dec = (orig_num_bytes -
4033 					   extent_num_bytes);
4034 				if (root->ref_cows && extent_start != 0)
4035 					inode_sub_bytes(inode, num_dec);
4036 				btrfs_mark_buffer_dirty(leaf);
4037 			} else {
4038 				extent_num_bytes =
4039 					btrfs_file_extent_disk_num_bytes(leaf,
4040 									 fi);
4041 				extent_offset = found_key.offset -
4042 					btrfs_file_extent_offset(leaf, fi);
4043 
4044 				/* FIXME blocksize != 4096 */
4045 				num_dec = btrfs_file_extent_num_bytes(leaf, fi);
4046 				if (extent_start != 0) {
4047 					found_extent = 1;
4048 					if (root->ref_cows)
4049 						inode_sub_bytes(inode, num_dec);
4050 				}
4051 			}
4052 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4053 			/*
4054 			 * we can't truncate inline items that have had
4055 			 * special encodings
4056 			 */
4057 			if (!del_item &&
4058 			    btrfs_file_extent_compression(leaf, fi) == 0 &&
4059 			    btrfs_file_extent_encryption(leaf, fi) == 0 &&
4060 			    btrfs_file_extent_other_encoding(leaf, fi) == 0) {
4061 				u32 size = new_size - found_key.offset;
4062 
4063 				if (root->ref_cows) {
4064 					inode_sub_bytes(inode, item_end + 1 -
4065 							new_size);
4066 				}
4067 				size =
4068 				    btrfs_file_extent_calc_inline_size(size);
4069 				btrfs_truncate_item(root, path, size, 1);
4070 			} else if (root->ref_cows) {
4071 				inode_sub_bytes(inode, item_end + 1 -
4072 						found_key.offset);
4073 			}
4074 		}
4075 delete:
4076 		if (del_item) {
4077 			if (!pending_del_nr) {
4078 				/* no pending yet, add ourselves */
4079 				pending_del_slot = path->slots[0];
4080 				pending_del_nr = 1;
4081 			} else if (pending_del_nr &&
4082 				   path->slots[0] + 1 == pending_del_slot) {
4083 				/* hop on the pending chunk */
4084 				pending_del_nr++;
4085 				pending_del_slot = path->slots[0];
4086 			} else {
4087 				BUG();
4088 			}
4089 		} else {
4090 			break;
4091 		}
4092 		if (found_extent && (root->ref_cows ||
4093 				     root == root->fs_info->tree_root)) {
4094 			btrfs_set_path_blocking(path);
4095 			ret = btrfs_free_extent(trans, root, extent_start,
4096 						extent_num_bytes, 0,
4097 						btrfs_header_owner(leaf),
4098 						ino, extent_offset, 0);
4099 			BUG_ON(ret);
4100 		}
4101 
4102 		if (found_type == BTRFS_INODE_ITEM_KEY)
4103 			break;
4104 
4105 		if (path->slots[0] == 0 ||
4106 		    path->slots[0] != pending_del_slot) {
4107 			if (pending_del_nr) {
4108 				ret = btrfs_del_items(trans, root, path,
4109 						pending_del_slot,
4110 						pending_del_nr);
4111 				if (ret) {
4112 					btrfs_abort_transaction(trans,
4113 								root, ret);
4114 					goto error;
4115 				}
4116 				pending_del_nr = 0;
4117 			}
4118 			btrfs_release_path(path);
4119 			goto search_again;
4120 		} else {
4121 			path->slots[0]--;
4122 		}
4123 	}
4124 out:
4125 	if (pending_del_nr) {
4126 		ret = btrfs_del_items(trans, root, path, pending_del_slot,
4127 				      pending_del_nr);
4128 		if (ret)
4129 			btrfs_abort_transaction(trans, root, ret);
4130 	}
4131 error:
4132 	btrfs_free_path(path);
4133 	return err;
4134 }
4135 
4136 /*
4137  * btrfs_truncate_page - read, zero a chunk and write a page
4138  * @inode - inode that we're zeroing
4139  * @from - the offset to start zeroing
4140  * @len - the length to zero, 0 to zero the entire range respective to the
4141  *	offset
4142  * @front - zero up to the offset instead of from the offset on
4143  *
4144  * This will find the page for the "from" offset and cow the page and zero the
4145  * part we want to zero.  This is used with truncate and hole punching.
4146  */
4147 int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
4148 			int front)
4149 {
4150 	struct address_space *mapping = inode->i_mapping;
4151 	struct btrfs_root *root = BTRFS_I(inode)->root;
4152 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4153 	struct btrfs_ordered_extent *ordered;
4154 	struct extent_state *cached_state = NULL;
4155 	char *kaddr;
4156 	u32 blocksize = root->sectorsize;
4157 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
4158 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
4159 	struct page *page;
4160 	gfp_t mask = btrfs_alloc_write_mask(mapping);
4161 	int ret = 0;
4162 	u64 page_start;
4163 	u64 page_end;
4164 
4165 	if ((offset & (blocksize - 1)) == 0 &&
4166 	    (!len || ((len & (blocksize - 1)) == 0)))
4167 		goto out;
4168 	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
4169 	if (ret)
4170 		goto out;
4171 
4172 again:
4173 	page = find_or_create_page(mapping, index, mask);
4174 	if (!page) {
4175 		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
4176 		ret = -ENOMEM;
4177 		goto out;
4178 	}
4179 
4180 	page_start = page_offset(page);
4181 	page_end = page_start + PAGE_CACHE_SIZE - 1;
4182 
4183 	if (!PageUptodate(page)) {
4184 		ret = btrfs_readpage(NULL, page);
4185 		lock_page(page);
4186 		if (page->mapping != mapping) {
4187 			unlock_page(page);
4188 			page_cache_release(page);
4189 			goto again;
4190 		}
4191 		if (!PageUptodate(page)) {
4192 			ret = -EIO;
4193 			goto out_unlock;
4194 		}
4195 	}
4196 	wait_on_page_writeback(page);
4197 
4198 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
4199 	set_page_extent_mapped(page);
4200 
4201 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
4202 	if (ordered) {
4203 		unlock_extent_cached(io_tree, page_start, page_end,
4204 				     &cached_state, GFP_NOFS);
4205 		unlock_page(page);
4206 		page_cache_release(page);
4207 		btrfs_start_ordered_extent(inode, ordered, 1);
4208 		btrfs_put_ordered_extent(ordered);
4209 		goto again;
4210 	}
4211 
4212 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
4213 			  EXTENT_DIRTY | EXTENT_DELALLOC |
4214 			  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4215 			  0, 0, &cached_state, GFP_NOFS);
4216 
4217 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
4218 					&cached_state);
4219 	if (ret) {
4220 		unlock_extent_cached(io_tree, page_start, page_end,
4221 				     &cached_state, GFP_NOFS);
4222 		goto out_unlock;
4223 	}
4224 
4225 	if (offset != PAGE_CACHE_SIZE) {
4226 		if (!len)
4227 			len = PAGE_CACHE_SIZE - offset;
4228 		kaddr = kmap(page);
4229 		if (front)
4230 			memset(kaddr, 0, offset);
4231 		else
4232 			memset(kaddr + offset, 0, len);
4233 		flush_dcache_page(page);
4234 		kunmap(page);
4235 	}
4236 	ClearPageChecked(page);
4237 	set_page_dirty(page);
4238 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
4239 			     GFP_NOFS);
4240 
4241 out_unlock:
4242 	if (ret)
4243 		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
4244 	unlock_page(page);
4245 	page_cache_release(page);
4246 out:
4247 	return ret;
4248 }
4249 
4250 /*
4251  * This function puts in dummy file extents for the area we're creating a hole
4252  * for.  So if we are truncating this file to a larger size we need to insert
4253  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4254  * the range between oldsize and size
4255  */
4256 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
4257 {
4258 	struct btrfs_trans_handle *trans;
4259 	struct btrfs_root *root = BTRFS_I(inode)->root;
4260 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4261 	struct extent_map *em = NULL;
4262 	struct extent_state *cached_state = NULL;
4263 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4264 	u64 hole_start = ALIGN(oldsize, root->sectorsize);
4265 	u64 block_end = ALIGN(size, root->sectorsize);
4266 	u64 last_byte;
4267 	u64 cur_offset;
4268 	u64 hole_size;
4269 	int err = 0;
4270 
4271 	/*
4272 	 * If our size started in the middle of a page we need to zero out the
4273 	 * rest of the page before we expand the i_size, otherwise we could
4274 	 * expose stale data.
4275 	 */
4276 	err = btrfs_truncate_page(inode, oldsize, 0, 0);
4277 	if (err)
4278 		return err;
4279 
4280 	if (size <= hole_start)
4281 		return 0;
4282 
4283 	while (1) {
4284 		struct btrfs_ordered_extent *ordered;
4285 		btrfs_wait_ordered_range(inode, hole_start,
4286 					 block_end - hole_start);
4287 		lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
4288 				 &cached_state);
4289 		ordered = btrfs_lookup_ordered_extent(inode, hole_start);
4290 		if (!ordered)
4291 			break;
4292 		unlock_extent_cached(io_tree, hole_start, block_end - 1,
4293 				     &cached_state, GFP_NOFS);
4294 		btrfs_put_ordered_extent(ordered);
4295 	}
4296 
4297 	cur_offset = hole_start;
4298 	while (1) {
4299 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4300 				block_end - cur_offset, 0);
4301 		if (IS_ERR(em)) {
4302 			err = PTR_ERR(em);
4303 			em = NULL;
4304 			break;
4305 		}
4306 		last_byte = min(extent_map_end(em), block_end);
4307 		last_byte = ALIGN(last_byte , root->sectorsize);
4308 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
4309 			struct extent_map *hole_em;
4310 			hole_size = last_byte - cur_offset;
4311 
4312 			trans = btrfs_start_transaction(root, 3);
4313 			if (IS_ERR(trans)) {
4314 				err = PTR_ERR(trans);
4315 				break;
4316 			}
4317 
4318 			err = btrfs_drop_extents(trans, root, inode,
4319 						 cur_offset,
4320 						 cur_offset + hole_size, 1);
4321 			if (err) {
4322 				btrfs_abort_transaction(trans, root, err);
4323 				btrfs_end_transaction(trans, root);
4324 				break;
4325 			}
4326 
4327 			err = btrfs_insert_file_extent(trans, root,
4328 					btrfs_ino(inode), cur_offset, 0,
4329 					0, hole_size, 0, hole_size,
4330 					0, 0, 0);
4331 			if (err) {
4332 				btrfs_abort_transaction(trans, root, err);
4333 				btrfs_end_transaction(trans, root);
4334 				break;
4335 			}
4336 
4337 			btrfs_drop_extent_cache(inode, cur_offset,
4338 						cur_offset + hole_size - 1, 0);
4339 			hole_em = alloc_extent_map();
4340 			if (!hole_em) {
4341 				set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4342 					&BTRFS_I(inode)->runtime_flags);
4343 				goto next;
4344 			}
4345 			hole_em->start = cur_offset;
4346 			hole_em->len = hole_size;
4347 			hole_em->orig_start = cur_offset;
4348 
4349 			hole_em->block_start = EXTENT_MAP_HOLE;
4350 			hole_em->block_len = 0;
4351 			hole_em->orig_block_len = 0;
4352 			hole_em->ram_bytes = hole_size;
4353 			hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
4354 			hole_em->compress_type = BTRFS_COMPRESS_NONE;
4355 			hole_em->generation = trans->transid;
4356 
4357 			while (1) {
4358 				write_lock(&em_tree->lock);
4359 				err = add_extent_mapping(em_tree, hole_em, 1);
4360 				write_unlock(&em_tree->lock);
4361 				if (err != -EEXIST)
4362 					break;
4363 				btrfs_drop_extent_cache(inode, cur_offset,
4364 							cur_offset +
4365 							hole_size - 1, 0);
4366 			}
4367 			free_extent_map(hole_em);
4368 next:
4369 			btrfs_update_inode(trans, root, inode);
4370 			btrfs_end_transaction(trans, root);
4371 		}
4372 		free_extent_map(em);
4373 		em = NULL;
4374 		cur_offset = last_byte;
4375 		if (cur_offset >= block_end)
4376 			break;
4377 	}
4378 
4379 	free_extent_map(em);
4380 	unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
4381 			     GFP_NOFS);
4382 	return err;
4383 }
4384 
4385 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4386 {
4387 	struct btrfs_root *root = BTRFS_I(inode)->root;
4388 	struct btrfs_trans_handle *trans;
4389 	loff_t oldsize = i_size_read(inode);
4390 	loff_t newsize = attr->ia_size;
4391 	int mask = attr->ia_valid;
4392 	int ret;
4393 
4394 	if (newsize == oldsize)
4395 		return 0;
4396 
4397 	/*
4398 	 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4399 	 * special case where we need to update the times despite not having
4400 	 * these flags set.  For all other operations the VFS set these flags
4401 	 * explicitly if it wants a timestamp update.
4402 	 */
4403 	if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME))))
4404 		inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
4405 
4406 	if (newsize > oldsize) {
4407 		truncate_pagecache(inode, oldsize, newsize);
4408 		ret = btrfs_cont_expand(inode, oldsize, newsize);
4409 		if (ret)
4410 			return ret;
4411 
4412 		trans = btrfs_start_transaction(root, 1);
4413 		if (IS_ERR(trans))
4414 			return PTR_ERR(trans);
4415 
4416 		i_size_write(inode, newsize);
4417 		btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
4418 		ret = btrfs_update_inode(trans, root, inode);
4419 		btrfs_end_transaction(trans, root);
4420 	} else {
4421 
4422 		/*
4423 		 * We're truncating a file that used to have good data down to
4424 		 * zero. Make sure it gets into the ordered flush list so that
4425 		 * any new writes get down to disk quickly.
4426 		 */
4427 		if (newsize == 0)
4428 			set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
4429 				&BTRFS_I(inode)->runtime_flags);
4430 
4431 		/*
4432 		 * 1 for the orphan item we're going to add
4433 		 * 1 for the orphan item deletion.
4434 		 */
4435 		trans = btrfs_start_transaction(root, 2);
4436 		if (IS_ERR(trans))
4437 			return PTR_ERR(trans);
4438 
4439 		/*
4440 		 * We need to do this in case we fail at _any_ point during the
4441 		 * actual truncate.  Once we do the truncate_setsize we could
4442 		 * invalidate pages which forces any outstanding ordered io to
4443 		 * be instantly completed which will give us extents that need
4444 		 * to be truncated.  If we fail to get an orphan inode down we
4445 		 * could have left over extents that were never meant to live,
4446 		 * so we need to garuntee from this point on that everything
4447 		 * will be consistent.
4448 		 */
4449 		ret = btrfs_orphan_add(trans, inode);
4450 		btrfs_end_transaction(trans, root);
4451 		if (ret)
4452 			return ret;
4453 
4454 		/* we don't support swapfiles, so vmtruncate shouldn't fail */
4455 		truncate_setsize(inode, newsize);
4456 
4457 		/* Disable nonlocked read DIO to avoid the end less truncate */
4458 		btrfs_inode_block_unlocked_dio(inode);
4459 		inode_dio_wait(inode);
4460 		btrfs_inode_resume_unlocked_dio(inode);
4461 
4462 		ret = btrfs_truncate(inode);
4463 		if (ret && inode->i_nlink)
4464 			btrfs_orphan_del(NULL, inode);
4465 	}
4466 
4467 	return ret;
4468 }
4469 
4470 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
4471 {
4472 	struct inode *inode = dentry->d_inode;
4473 	struct btrfs_root *root = BTRFS_I(inode)->root;
4474 	int err;
4475 
4476 	if (btrfs_root_readonly(root))
4477 		return -EROFS;
4478 
4479 	err = inode_change_ok(inode, attr);
4480 	if (err)
4481 		return err;
4482 
4483 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
4484 		err = btrfs_setsize(inode, attr);
4485 		if (err)
4486 			return err;
4487 	}
4488 
4489 	if (attr->ia_valid) {
4490 		setattr_copy(inode, attr);
4491 		inode_inc_iversion(inode);
4492 		err = btrfs_dirty_inode(inode);
4493 
4494 		if (!err && attr->ia_valid & ATTR_MODE)
4495 			err = btrfs_acl_chmod(inode);
4496 	}
4497 
4498 	return err;
4499 }
4500 
4501 void btrfs_evict_inode(struct inode *inode)
4502 {
4503 	struct btrfs_trans_handle *trans;
4504 	struct btrfs_root *root = BTRFS_I(inode)->root;
4505 	struct btrfs_block_rsv *rsv, *global_rsv;
4506 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
4507 	int ret;
4508 
4509 	trace_btrfs_inode_evict(inode);
4510 
4511 	truncate_inode_pages(&inode->i_data, 0);
4512 	if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
4513 			       btrfs_is_free_space_inode(inode)))
4514 		goto no_delete;
4515 
4516 	if (is_bad_inode(inode)) {
4517 		btrfs_orphan_del(NULL, inode);
4518 		goto no_delete;
4519 	}
4520 	/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
4521 	btrfs_wait_ordered_range(inode, 0, (u64)-1);
4522 
4523 	if (root->fs_info->log_root_recovering) {
4524 		BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
4525 				 &BTRFS_I(inode)->runtime_flags));
4526 		goto no_delete;
4527 	}
4528 
4529 	if (inode->i_nlink > 0) {
4530 		BUG_ON(btrfs_root_refs(&root->root_item) != 0);
4531 		goto no_delete;
4532 	}
4533 
4534 	ret = btrfs_commit_inode_delayed_inode(inode);
4535 	if (ret) {
4536 		btrfs_orphan_del(NULL, inode);
4537 		goto no_delete;
4538 	}
4539 
4540 	rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
4541 	if (!rsv) {
4542 		btrfs_orphan_del(NULL, inode);
4543 		goto no_delete;
4544 	}
4545 	rsv->size = min_size;
4546 	rsv->failfast = 1;
4547 	global_rsv = &root->fs_info->global_block_rsv;
4548 
4549 	btrfs_i_size_write(inode, 0);
4550 
4551 	/*
4552 	 * This is a bit simpler than btrfs_truncate since we've already
4553 	 * reserved our space for our orphan item in the unlink, so we just
4554 	 * need to reserve some slack space in case we add bytes and update
4555 	 * inode item when doing the truncate.
4556 	 */
4557 	while (1) {
4558 		ret = btrfs_block_rsv_refill(root, rsv, min_size,
4559 					     BTRFS_RESERVE_FLUSH_LIMIT);
4560 
4561 		/*
4562 		 * Try and steal from the global reserve since we will
4563 		 * likely not use this space anyway, we want to try as
4564 		 * hard as possible to get this to work.
4565 		 */
4566 		if (ret)
4567 			ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);
4568 
4569 		if (ret) {
4570 			btrfs_warn(root->fs_info,
4571 				"Could not get space for a delete, will truncate on mount %d",
4572 				ret);
4573 			btrfs_orphan_del(NULL, inode);
4574 			btrfs_free_block_rsv(root, rsv);
4575 			goto no_delete;
4576 		}
4577 
4578 		trans = btrfs_join_transaction(root);
4579 		if (IS_ERR(trans)) {
4580 			btrfs_orphan_del(NULL, inode);
4581 			btrfs_free_block_rsv(root, rsv);
4582 			goto no_delete;
4583 		}
4584 
4585 		trans->block_rsv = rsv;
4586 
4587 		ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
4588 		if (ret != -ENOSPC)
4589 			break;
4590 
4591 		trans->block_rsv = &root->fs_info->trans_block_rsv;
4592 		btrfs_end_transaction(trans, root);
4593 		trans = NULL;
4594 		btrfs_btree_balance_dirty(root);
4595 	}
4596 
4597 	btrfs_free_block_rsv(root, rsv);
4598 
4599 	if (ret == 0) {
4600 		trans->block_rsv = root->orphan_block_rsv;
4601 		ret = btrfs_orphan_del(trans, inode);
4602 		BUG_ON(ret);
4603 	}
4604 
4605 	trans->block_rsv = &root->fs_info->trans_block_rsv;
4606 	if (!(root == root->fs_info->tree_root ||
4607 	      root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
4608 		btrfs_return_ino(root, btrfs_ino(inode));
4609 
4610 	btrfs_end_transaction(trans, root);
4611 	btrfs_btree_balance_dirty(root);
4612 no_delete:
4613 	btrfs_remove_delayed_node(inode);
4614 	clear_inode(inode);
4615 	return;
4616 }
4617 
4618 /*
4619  * this returns the key found in the dir entry in the location pointer.
4620  * If no dir entries were found, location->objectid is 0.
4621  */
4622 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
4623 			       struct btrfs_key *location)
4624 {
4625 	const char *name = dentry->d_name.name;
4626 	int namelen = dentry->d_name.len;
4627 	struct btrfs_dir_item *di;
4628 	struct btrfs_path *path;
4629 	struct btrfs_root *root = BTRFS_I(dir)->root;
4630 	int ret = 0;
4631 
4632 	path = btrfs_alloc_path();
4633 	if (!path)
4634 		return -ENOMEM;
4635 
4636 	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
4637 				    namelen, 0);
4638 	if (IS_ERR(di))
4639 		ret = PTR_ERR(di);
4640 
4641 	if (IS_ERR_OR_NULL(di))
4642 		goto out_err;
4643 
4644 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
4645 out:
4646 	btrfs_free_path(path);
4647 	return ret;
4648 out_err:
4649 	location->objectid = 0;
4650 	goto out;
4651 }
4652 
4653 /*
4654  * when we hit a tree root in a directory, the btrfs part of the inode
4655  * needs to be changed to reflect the root directory of the tree root.  This
4656  * is kind of like crossing a mount point.
4657  */
4658 static int fixup_tree_root_location(struct btrfs_root *root,
4659 				    struct inode *dir,
4660 				    struct dentry *dentry,
4661 				    struct btrfs_key *location,
4662 				    struct btrfs_root **sub_root)
4663 {
4664 	struct btrfs_path *path;
4665 	struct btrfs_root *new_root;
4666 	struct btrfs_root_ref *ref;
4667 	struct extent_buffer *leaf;
4668 	int ret;
4669 	int err = 0;
4670 
4671 	path = btrfs_alloc_path();
4672 	if (!path) {
4673 		err = -ENOMEM;
4674 		goto out;
4675 	}
4676 
4677 	err = -ENOENT;
4678 	ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
4679 				  BTRFS_I(dir)->root->root_key.objectid,
4680 				  location->objectid);
4681 	if (ret) {
4682 		if (ret < 0)
4683 			err = ret;
4684 		goto out;
4685 	}
4686 
4687 	leaf = path->nodes[0];
4688 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
4689 	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
4690 	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
4691 		goto out;
4692 
4693 	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
4694 				   (unsigned long)(ref + 1),
4695 				   dentry->d_name.len);
4696 	if (ret)
4697 		goto out;
4698 
4699 	btrfs_release_path(path);
4700 
4701 	new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
4702 	if (IS_ERR(new_root)) {
4703 		err = PTR_ERR(new_root);
4704 		goto out;
4705 	}
4706 
4707 	*sub_root = new_root;
4708 	location->objectid = btrfs_root_dirid(&new_root->root_item);
4709 	location->type = BTRFS_INODE_ITEM_KEY;
4710 	location->offset = 0;
4711 	err = 0;
4712 out:
4713 	btrfs_free_path(path);
4714 	return err;
4715 }
4716 
4717 static void inode_tree_add(struct inode *inode)
4718 {
4719 	struct btrfs_root *root = BTRFS_I(inode)->root;
4720 	struct btrfs_inode *entry;
4721 	struct rb_node **p;
4722 	struct rb_node *parent;
4723 	u64 ino = btrfs_ino(inode);
4724 
4725 	if (inode_unhashed(inode))
4726 		return;
4727 again:
4728 	parent = NULL;
4729 	spin_lock(&root->inode_lock);
4730 	p = &root->inode_tree.rb_node;
4731 	while (*p) {
4732 		parent = *p;
4733 		entry = rb_entry(parent, struct btrfs_inode, rb_node);
4734 
4735 		if (ino < btrfs_ino(&entry->vfs_inode))
4736 			p = &parent->rb_left;
4737 		else if (ino > btrfs_ino(&entry->vfs_inode))
4738 			p = &parent->rb_right;
4739 		else {
4740 			WARN_ON(!(entry->vfs_inode.i_state &
4741 				  (I_WILL_FREE | I_FREEING)));
4742 			rb_erase(parent, &root->inode_tree);
4743 			RB_CLEAR_NODE(parent);
4744 			spin_unlock(&root->inode_lock);
4745 			goto again;
4746 		}
4747 	}
4748 	rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
4749 	rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
4750 	spin_unlock(&root->inode_lock);
4751 }
4752 
4753 static void inode_tree_del(struct inode *inode)
4754 {
4755 	struct btrfs_root *root = BTRFS_I(inode)->root;
4756 	int empty = 0;
4757 
4758 	spin_lock(&root->inode_lock);
4759 	if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
4760 		rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
4761 		RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
4762 		empty = RB_EMPTY_ROOT(&root->inode_tree);
4763 	}
4764 	spin_unlock(&root->inode_lock);
4765 
4766 	/*
4767 	 * Free space cache has inodes in the tree root, but the tree root has a
4768 	 * root_refs of 0, so this could end up dropping the tree root as a
4769 	 * snapshot, so we need the extra !root->fs_info->tree_root check to
4770 	 * make sure we don't drop it.
4771 	 */
4772 	if (empty && btrfs_root_refs(&root->root_item) == 0 &&
4773 	    root != root->fs_info->tree_root) {
4774 		synchronize_srcu(&root->fs_info->subvol_srcu);
4775 		spin_lock(&root->inode_lock);
4776 		empty = RB_EMPTY_ROOT(&root->inode_tree);
4777 		spin_unlock(&root->inode_lock);
4778 		if (empty)
4779 			btrfs_add_dead_root(root);
4780 	}
4781 }
4782 
4783 void btrfs_invalidate_inodes(struct btrfs_root *root)
4784 {
4785 	struct rb_node *node;
4786 	struct rb_node *prev;
4787 	struct btrfs_inode *entry;
4788 	struct inode *inode;
4789 	u64 objectid = 0;
4790 
4791 	WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4792 
4793 	spin_lock(&root->inode_lock);
4794 again:
4795 	node = root->inode_tree.rb_node;
4796 	prev = NULL;
4797 	while (node) {
4798 		prev = node;
4799 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4800 
4801 		if (objectid < btrfs_ino(&entry->vfs_inode))
4802 			node = node->rb_left;
4803 		else if (objectid > btrfs_ino(&entry->vfs_inode))
4804 			node = node->rb_right;
4805 		else
4806 			break;
4807 	}
4808 	if (!node) {
4809 		while (prev) {
4810 			entry = rb_entry(prev, struct btrfs_inode, rb_node);
4811 			if (objectid <= btrfs_ino(&entry->vfs_inode)) {
4812 				node = prev;
4813 				break;
4814 			}
4815 			prev = rb_next(prev);
4816 		}
4817 	}
4818 	while (node) {
4819 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4820 		objectid = btrfs_ino(&entry->vfs_inode) + 1;
4821 		inode = igrab(&entry->vfs_inode);
4822 		if (inode) {
4823 			spin_unlock(&root->inode_lock);
4824 			if (atomic_read(&inode->i_count) > 1)
4825 				d_prune_aliases(inode);
4826 			/*
4827 			 * btrfs_drop_inode will have it removed from
4828 			 * the inode cache when its usage count
4829 			 * hits zero.
4830 			 */
4831 			iput(inode);
4832 			cond_resched();
4833 			spin_lock(&root->inode_lock);
4834 			goto again;
4835 		}
4836 
4837 		if (cond_resched_lock(&root->inode_lock))
4838 			goto again;
4839 
4840 		node = rb_next(node);
4841 	}
4842 	spin_unlock(&root->inode_lock);
4843 }
4844 
4845 static int btrfs_init_locked_inode(struct inode *inode, void *p)
4846 {
4847 	struct btrfs_iget_args *args = p;
4848 	inode->i_ino = args->ino;
4849 	BTRFS_I(inode)->root = args->root;
4850 	return 0;
4851 }
4852 
4853 static int btrfs_find_actor(struct inode *inode, void *opaque)
4854 {
4855 	struct btrfs_iget_args *args = opaque;
4856 	return args->ino == btrfs_ino(inode) &&
4857 		args->root == BTRFS_I(inode)->root;
4858 }
4859 
4860 static struct inode *btrfs_iget_locked(struct super_block *s,
4861 				       u64 objectid,
4862 				       struct btrfs_root *root)
4863 {
4864 	struct inode *inode;
4865 	struct btrfs_iget_args args;
4866 	args.ino = objectid;
4867 	args.root = root;
4868 
4869 	inode = iget5_locked(s, objectid, btrfs_find_actor,
4870 			     btrfs_init_locked_inode,
4871 			     (void *)&args);
4872 	return inode;
4873 }
4874 
4875 /* Get an inode object given its location and corresponding root.
4876  * Returns in *is_new if the inode was read from disk
4877  */
4878 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
4879 			 struct btrfs_root *root, int *new)
4880 {
4881 	struct inode *inode;
4882 
4883 	inode = btrfs_iget_locked(s, location->objectid, root);
4884 	if (!inode)
4885 		return ERR_PTR(-ENOMEM);
4886 
4887 	if (inode->i_state & I_NEW) {
4888 		BTRFS_I(inode)->root = root;
4889 		memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
4890 		btrfs_read_locked_inode(inode);
4891 		if (!is_bad_inode(inode)) {
4892 			inode_tree_add(inode);
4893 			unlock_new_inode(inode);
4894 			if (new)
4895 				*new = 1;
4896 		} else {
4897 			unlock_new_inode(inode);
4898 			iput(inode);
4899 			inode = ERR_PTR(-ESTALE);
4900 		}
4901 	}
4902 
4903 	return inode;
4904 }
4905 
4906 static struct inode *new_simple_dir(struct super_block *s,
4907 				    struct btrfs_key *key,
4908 				    struct btrfs_root *root)
4909 {
4910 	struct inode *inode = new_inode(s);
4911 
4912 	if (!inode)
4913 		return ERR_PTR(-ENOMEM);
4914 
4915 	BTRFS_I(inode)->root = root;
4916 	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
4917 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
4918 
4919 	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
4920 	inode->i_op = &btrfs_dir_ro_inode_operations;
4921 	inode->i_fop = &simple_dir_operations;
4922 	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
4923 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4924 
4925 	return inode;
4926 }
4927 
4928 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
4929 {
4930 	struct inode *inode;
4931 	struct btrfs_root *root = BTRFS_I(dir)->root;
4932 	struct btrfs_root *sub_root = root;
4933 	struct btrfs_key location;
4934 	int index;
4935 	int ret = 0;
4936 
4937 	if (dentry->d_name.len > BTRFS_NAME_LEN)
4938 		return ERR_PTR(-ENAMETOOLONG);
4939 
4940 	ret = btrfs_inode_by_name(dir, dentry, &location);
4941 	if (ret < 0)
4942 		return ERR_PTR(ret);
4943 
4944 	if (location.objectid == 0)
4945 		return NULL;
4946 
4947 	if (location.type == BTRFS_INODE_ITEM_KEY) {
4948 		inode = btrfs_iget(dir->i_sb, &location, root, NULL);
4949 		return inode;
4950 	}
4951 
4952 	BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
4953 
4954 	index = srcu_read_lock(&root->fs_info->subvol_srcu);
4955 	ret = fixup_tree_root_location(root, dir, dentry,
4956 				       &location, &sub_root);
4957 	if (ret < 0) {
4958 		if (ret != -ENOENT)
4959 			inode = ERR_PTR(ret);
4960 		else
4961 			inode = new_simple_dir(dir->i_sb, &location, sub_root);
4962 	} else {
4963 		inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
4964 	}
4965 	srcu_read_unlock(&root->fs_info->subvol_srcu, index);
4966 
4967 	if (!IS_ERR(inode) && root != sub_root) {
4968 		down_read(&root->fs_info->cleanup_work_sem);
4969 		if (!(inode->i_sb->s_flags & MS_RDONLY))
4970 			ret = btrfs_orphan_cleanup(sub_root);
4971 		up_read(&root->fs_info->cleanup_work_sem);
4972 		if (ret) {
4973 			iput(inode);
4974 			inode = ERR_PTR(ret);
4975 		}
4976 	}
4977 
4978 	return inode;
4979 }
4980 
4981 static int btrfs_dentry_delete(const struct dentry *dentry)
4982 {
4983 	struct btrfs_root *root;
4984 	struct inode *inode = dentry->d_inode;
4985 
4986 	if (!inode && !IS_ROOT(dentry))
4987 		inode = dentry->d_parent->d_inode;
4988 
4989 	if (inode) {
4990 		root = BTRFS_I(inode)->root;
4991 		if (btrfs_root_refs(&root->root_item) == 0)
4992 			return 1;
4993 
4994 		if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
4995 			return 1;
4996 	}
4997 	return 0;
4998 }
4999 
5000 static void btrfs_dentry_release(struct dentry *dentry)
5001 {
5002 	if (dentry->d_fsdata)
5003 		kfree(dentry->d_fsdata);
5004 }
5005 
5006 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5007 				   unsigned int flags)
5008 {
5009 	struct dentry *ret;
5010 
5011 	ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
5012 	return ret;
5013 }
5014 
5015 unsigned char btrfs_filetype_table[] = {
5016 	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
5017 };
5018 
5019 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5020 {
5021 	struct inode *inode = file_inode(file);
5022 	struct btrfs_root *root = BTRFS_I(inode)->root;
5023 	struct btrfs_item *item;
5024 	struct btrfs_dir_item *di;
5025 	struct btrfs_key key;
5026 	struct btrfs_key found_key;
5027 	struct btrfs_path *path;
5028 	struct list_head ins_list;
5029 	struct list_head del_list;
5030 	int ret;
5031 	struct extent_buffer *leaf;
5032 	int slot;
5033 	unsigned char d_type;
5034 	int over = 0;
5035 	u32 di_cur;
5036 	u32 di_total;
5037 	u32 di_len;
5038 	int key_type = BTRFS_DIR_INDEX_KEY;
5039 	char tmp_name[32];
5040 	char *name_ptr;
5041 	int name_len;
5042 	int is_curr = 0;	/* ctx->pos points to the current index? */
5043 
5044 	/* FIXME, use a real flag for deciding about the key type */
5045 	if (root->fs_info->tree_root == root)
5046 		key_type = BTRFS_DIR_ITEM_KEY;
5047 
5048 	if (!dir_emit_dots(file, ctx))
5049 		return 0;
5050 
5051 	path = btrfs_alloc_path();
5052 	if (!path)
5053 		return -ENOMEM;
5054 
5055 	path->reada = 1;
5056 
5057 	if (key_type == BTRFS_DIR_INDEX_KEY) {
5058 		INIT_LIST_HEAD(&ins_list);
5059 		INIT_LIST_HEAD(&del_list);
5060 		btrfs_get_delayed_items(inode, &ins_list, &del_list);
5061 	}
5062 
5063 	btrfs_set_key_type(&key, key_type);
5064 	key.offset = ctx->pos;
5065 	key.objectid = btrfs_ino(inode);
5066 
5067 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5068 	if (ret < 0)
5069 		goto err;
5070 
5071 	while (1) {
5072 		leaf = path->nodes[0];
5073 		slot = path->slots[0];
5074 		if (slot >= btrfs_header_nritems(leaf)) {
5075 			ret = btrfs_next_leaf(root, path);
5076 			if (ret < 0)
5077 				goto err;
5078 			else if (ret > 0)
5079 				break;
5080 			continue;
5081 		}
5082 
5083 		item = btrfs_item_nr(leaf, slot);
5084 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
5085 
5086 		if (found_key.objectid != key.objectid)
5087 			break;
5088 		if (btrfs_key_type(&found_key) != key_type)
5089 			break;
5090 		if (found_key.offset < ctx->pos)
5091 			goto next;
5092 		if (key_type == BTRFS_DIR_INDEX_KEY &&
5093 		    btrfs_should_delete_dir_index(&del_list,
5094 						  found_key.offset))
5095 			goto next;
5096 
5097 		ctx->pos = found_key.offset;
5098 		is_curr = 1;
5099 
5100 		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
5101 		di_cur = 0;
5102 		di_total = btrfs_item_size(leaf, item);
5103 
5104 		while (di_cur < di_total) {
5105 			struct btrfs_key location;
5106 
5107 			if (verify_dir_item(root, leaf, di))
5108 				break;
5109 
5110 			name_len = btrfs_dir_name_len(leaf, di);
5111 			if (name_len <= sizeof(tmp_name)) {
5112 				name_ptr = tmp_name;
5113 			} else {
5114 				name_ptr = kmalloc(name_len, GFP_NOFS);
5115 				if (!name_ptr) {
5116 					ret = -ENOMEM;
5117 					goto err;
5118 				}
5119 			}
5120 			read_extent_buffer(leaf, name_ptr,
5121 					   (unsigned long)(di + 1), name_len);
5122 
5123 			d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
5124 			btrfs_dir_item_key_to_cpu(leaf, di, &location);
5125 
5126 
5127 			/* is this a reference to our own snapshot? If so
5128 			 * skip it.
5129 			 *
5130 			 * In contrast to old kernels, we insert the snapshot's
5131 			 * dir item and dir index after it has been created, so
5132 			 * we won't find a reference to our own snapshot. We
5133 			 * still keep the following code for backward
5134 			 * compatibility.
5135 			 */
5136 			if (location.type == BTRFS_ROOT_ITEM_KEY &&
5137 			    location.objectid == root->root_key.objectid) {
5138 				over = 0;
5139 				goto skip;
5140 			}
5141 			over = !dir_emit(ctx, name_ptr, name_len,
5142 				       location.objectid, d_type);
5143 
5144 skip:
5145 			if (name_ptr != tmp_name)
5146 				kfree(name_ptr);
5147 
5148 			if (over)
5149 				goto nopos;
5150 			di_len = btrfs_dir_name_len(leaf, di) +
5151 				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
5152 			di_cur += di_len;
5153 			di = (struct btrfs_dir_item *)((char *)di + di_len);
5154 		}
5155 next:
5156 		path->slots[0]++;
5157 	}
5158 
5159 	if (key_type == BTRFS_DIR_INDEX_KEY) {
5160 		if (is_curr)
5161 			ctx->pos++;
5162 		ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
5163 		if (ret)
5164 			goto nopos;
5165 	}
5166 
5167 	/* Reached end of directory/root. Bump pos past the last item. */
5168 	if (key_type == BTRFS_DIR_INDEX_KEY)
5169 		/*
5170 		 * 32-bit glibc will use getdents64, but then strtol -
5171 		 * so the last number we can serve is this.
5172 		 */
5173 		ctx->pos = 0x7fffffff;
5174 	else
5175 		ctx->pos++;
5176 nopos:
5177 	ret = 0;
5178 err:
5179 	if (key_type == BTRFS_DIR_INDEX_KEY)
5180 		btrfs_put_delayed_items(&ins_list, &del_list);
5181 	btrfs_free_path(path);
5182 	return ret;
5183 }
5184 
5185 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
5186 {
5187 	struct btrfs_root *root = BTRFS_I(inode)->root;
5188 	struct btrfs_trans_handle *trans;
5189 	int ret = 0;
5190 	bool nolock = false;
5191 
5192 	if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5193 		return 0;
5194 
5195 	if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
5196 		nolock = true;
5197 
5198 	if (wbc->sync_mode == WB_SYNC_ALL) {
5199 		if (nolock)
5200 			trans = btrfs_join_transaction_nolock(root);
5201 		else
5202 			trans = btrfs_join_transaction(root);
5203 		if (IS_ERR(trans))
5204 			return PTR_ERR(trans);
5205 		ret = btrfs_commit_transaction(trans, root);
5206 	}
5207 	return ret;
5208 }
5209 
5210 /*
5211  * This is somewhat expensive, updating the tree every time the
5212  * inode changes.  But, it is most likely to find the inode in cache.
5213  * FIXME, needs more benchmarking...there are no reasons other than performance
5214  * to keep or drop this code.
5215  */
5216 static int btrfs_dirty_inode(struct inode *inode)
5217 {
5218 	struct btrfs_root *root = BTRFS_I(inode)->root;
5219 	struct btrfs_trans_handle *trans;
5220 	int ret;
5221 
5222 	if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5223 		return 0;
5224 
5225 	trans = btrfs_join_transaction(root);
5226 	if (IS_ERR(trans))
5227 		return PTR_ERR(trans);
5228 
5229 	ret = btrfs_update_inode(trans, root, inode);
5230 	if (ret && ret == -ENOSPC) {
5231 		/* whoops, lets try again with the full transaction */
5232 		btrfs_end_transaction(trans, root);
5233 		trans = btrfs_start_transaction(root, 1);
5234 		if (IS_ERR(trans))
5235 			return PTR_ERR(trans);
5236 
5237 		ret = btrfs_update_inode(trans, root, inode);
5238 	}
5239 	btrfs_end_transaction(trans, root);
5240 	if (BTRFS_I(inode)->delayed_node)
5241 		btrfs_balance_delayed_items(root);
5242 
5243 	return ret;
5244 }
5245 
5246 /*
5247  * This is a copy of file_update_time.  We need this so we can return error on
5248  * ENOSPC for updating the inode in the case of file write and mmap writes.
5249  */
5250 static int btrfs_update_time(struct inode *inode, struct timespec *now,
5251 			     int flags)
5252 {
5253 	struct btrfs_root *root = BTRFS_I(inode)->root;
5254 
5255 	if (btrfs_root_readonly(root))
5256 		return -EROFS;
5257 
5258 	if (flags & S_VERSION)
5259 		inode_inc_iversion(inode);
5260 	if (flags & S_CTIME)
5261 		inode->i_ctime = *now;
5262 	if (flags & S_MTIME)
5263 		inode->i_mtime = *now;
5264 	if (flags & S_ATIME)
5265 		inode->i_atime = *now;
5266 	return btrfs_dirty_inode(inode);
5267 }
5268 
5269 /*
5270  * find the highest existing sequence number in a directory
5271  * and then set the in-memory index_cnt variable to reflect
5272  * free sequence numbers
5273  */
5274 static int btrfs_set_inode_index_count(struct inode *inode)
5275 {
5276 	struct btrfs_root *root = BTRFS_I(inode)->root;
5277 	struct btrfs_key key, found_key;
5278 	struct btrfs_path *path;
5279 	struct extent_buffer *leaf;
5280 	int ret;
5281 
5282 	key.objectid = btrfs_ino(inode);
5283 	btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
5284 	key.offset = (u64)-1;
5285 
5286 	path = btrfs_alloc_path();
5287 	if (!path)
5288 		return -ENOMEM;
5289 
5290 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5291 	if (ret < 0)
5292 		goto out;
5293 	/* FIXME: we should be able to handle this */
5294 	if (ret == 0)
5295 		goto out;
5296 	ret = 0;
5297 
5298 	/*
5299 	 * MAGIC NUMBER EXPLANATION:
5300 	 * since we search a directory based on f_pos we have to start at 2
5301 	 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
5302 	 * else has to start at 2
5303 	 */
5304 	if (path->slots[0] == 0) {
5305 		BTRFS_I(inode)->index_cnt = 2;
5306 		goto out;
5307 	}
5308 
5309 	path->slots[0]--;
5310 
5311 	leaf = path->nodes[0];
5312 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5313 
5314 	if (found_key.objectid != btrfs_ino(inode) ||
5315 	    btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
5316 		BTRFS_I(inode)->index_cnt = 2;
5317 		goto out;
5318 	}
5319 
5320 	BTRFS_I(inode)->index_cnt = found_key.offset + 1;
5321 out:
5322 	btrfs_free_path(path);
5323 	return ret;
5324 }
5325 
5326 /*
5327  * helper to find a free sequence number in a given directory.  This current
5328  * code is very simple, later versions will do smarter things in the btree
5329  */
5330 int btrfs_set_inode_index(struct inode *dir, u64 *index)
5331 {
5332 	int ret = 0;
5333 
5334 	if (BTRFS_I(dir)->index_cnt == (u64)-1) {
5335 		ret = btrfs_inode_delayed_dir_index_count(dir);
5336 		if (ret) {
5337 			ret = btrfs_set_inode_index_count(dir);
5338 			if (ret)
5339 				return ret;
5340 		}
5341 	}
5342 
5343 	*index = BTRFS_I(dir)->index_cnt;
5344 	BTRFS_I(dir)->index_cnt++;
5345 
5346 	return ret;
5347 }
5348 
5349 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
5350 				     struct btrfs_root *root,
5351 				     struct inode *dir,
5352 				     const char *name, int name_len,
5353 				     u64 ref_objectid, u64 objectid,
5354 				     umode_t mode, u64 *index)
5355 {
5356 	struct inode *inode;
5357 	struct btrfs_inode_item *inode_item;
5358 	struct btrfs_key *location;
5359 	struct btrfs_path *path;
5360 	struct btrfs_inode_ref *ref;
5361 	struct btrfs_key key[2];
5362 	u32 sizes[2];
5363 	unsigned long ptr;
5364 	int ret;
5365 	int owner;
5366 
5367 	path = btrfs_alloc_path();
5368 	if (!path)
5369 		return ERR_PTR(-ENOMEM);
5370 
5371 	inode = new_inode(root->fs_info->sb);
5372 	if (!inode) {
5373 		btrfs_free_path(path);
5374 		return ERR_PTR(-ENOMEM);
5375 	}
5376 
5377 	/*
5378 	 * we have to initialize this early, so we can reclaim the inode
5379 	 * number if we fail afterwards in this function.
5380 	 */
5381 	inode->i_ino = objectid;
5382 
5383 	if (dir) {
5384 		trace_btrfs_inode_request(dir);
5385 
5386 		ret = btrfs_set_inode_index(dir, index);
5387 		if (ret) {
5388 			btrfs_free_path(path);
5389 			iput(inode);
5390 			return ERR_PTR(ret);
5391 		}
5392 	}
5393 	/*
5394 	 * index_cnt is ignored for everything but a dir,
5395 	 * btrfs_get_inode_index_count has an explanation for the magic
5396 	 * number
5397 	 */
5398 	BTRFS_I(inode)->index_cnt = 2;
5399 	BTRFS_I(inode)->root = root;
5400 	BTRFS_I(inode)->generation = trans->transid;
5401 	inode->i_generation = BTRFS_I(inode)->generation;
5402 
5403 	/*
5404 	 * We could have gotten an inode number from somebody who was fsynced
5405 	 * and then removed in this same transaction, so let's just set full
5406 	 * sync since it will be a full sync anyway and this will blow away the
5407 	 * old info in the log.
5408 	 */
5409 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
5410 
5411 	if (S_ISDIR(mode))
5412 		owner = 0;
5413 	else
5414 		owner = 1;
5415 
5416 	key[0].objectid = objectid;
5417 	btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
5418 	key[0].offset = 0;
5419 
5420 	/*
5421 	 * Start new inodes with an inode_ref. This is slightly more
5422 	 * efficient for small numbers of hard links since they will
5423 	 * be packed into one item. Extended refs will kick in if we
5424 	 * add more hard links than can fit in the ref item.
5425 	 */
5426 	key[1].objectid = objectid;
5427 	btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
5428 	key[1].offset = ref_objectid;
5429 
5430 	sizes[0] = sizeof(struct btrfs_inode_item);
5431 	sizes[1] = name_len + sizeof(*ref);
5432 
5433 	path->leave_spinning = 1;
5434 	ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
5435 	if (ret != 0)
5436 		goto fail;
5437 
5438 	inode_init_owner(inode, dir, mode);
5439 	inode_set_bytes(inode, 0);
5440 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
5441 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
5442 				  struct btrfs_inode_item);
5443 	memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
5444 			     sizeof(*inode_item));
5445 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
5446 
5447 	ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
5448 			     struct btrfs_inode_ref);
5449 	btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
5450 	btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
5451 	ptr = (unsigned long)(ref + 1);
5452 	write_extent_buffer(path->nodes[0], name, ptr, name_len);
5453 
5454 	btrfs_mark_buffer_dirty(path->nodes[0]);
5455 	btrfs_free_path(path);
5456 
5457 	location = &BTRFS_I(inode)->location;
5458 	location->objectid = objectid;
5459 	location->offset = 0;
5460 	btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
5461 
5462 	btrfs_inherit_iflags(inode, dir);
5463 
5464 	if (S_ISREG(mode)) {
5465 		if (btrfs_test_opt(root, NODATASUM))
5466 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
5467 		if (btrfs_test_opt(root, NODATACOW))
5468 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
5469 				BTRFS_INODE_NODATASUM;
5470 	}
5471 
5472 	insert_inode_hash(inode);
5473 	inode_tree_add(inode);
5474 
5475 	trace_btrfs_inode_new(inode);
5476 	btrfs_set_inode_last_trans(trans, inode);
5477 
5478 	btrfs_update_root_times(trans, root);
5479 
5480 	return inode;
5481 fail:
5482 	if (dir)
5483 		BTRFS_I(dir)->index_cnt--;
5484 	btrfs_free_path(path);
5485 	iput(inode);
5486 	return ERR_PTR(ret);
5487 }
5488 
5489 static inline u8 btrfs_inode_type(struct inode *inode)
5490 {
5491 	return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
5492 }
5493 
5494 /*
5495  * utility function to add 'inode' into 'parent_inode' with
5496  * a give name and a given sequence number.
5497  * if 'add_backref' is true, also insert a backref from the
5498  * inode to the parent directory.
5499  */
5500 int btrfs_add_link(struct btrfs_trans_handle *trans,
5501 		   struct inode *parent_inode, struct inode *inode,
5502 		   const char *name, int name_len, int add_backref, u64 index)
5503 {
5504 	int ret = 0;
5505 	struct btrfs_key key;
5506 	struct btrfs_root *root = BTRFS_I(parent_inode)->root;
5507 	u64 ino = btrfs_ino(inode);
5508 	u64 parent_ino = btrfs_ino(parent_inode);
5509 
5510 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
5511 		memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
5512 	} else {
5513 		key.objectid = ino;
5514 		btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
5515 		key.offset = 0;
5516 	}
5517 
5518 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
5519 		ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
5520 					 key.objectid, root->root_key.objectid,
5521 					 parent_ino, index, name, name_len);
5522 	} else if (add_backref) {
5523 		ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
5524 					     parent_ino, index);
5525 	}
5526 
5527 	/* Nothing to clean up yet */
5528 	if (ret)
5529 		return ret;
5530 
5531 	ret = btrfs_insert_dir_item(trans, root, name, name_len,
5532 				    parent_inode, &key,
5533 				    btrfs_inode_type(inode), index);
5534 	if (ret == -EEXIST || ret == -EOVERFLOW)
5535 		goto fail_dir_item;
5536 	else if (ret) {
5537 		btrfs_abort_transaction(trans, root, ret);
5538 		return ret;
5539 	}
5540 
5541 	btrfs_i_size_write(parent_inode, parent_inode->i_size +
5542 			   name_len * 2);
5543 	inode_inc_iversion(parent_inode);
5544 	parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
5545 	ret = btrfs_update_inode(trans, root, parent_inode);
5546 	if (ret)
5547 		btrfs_abort_transaction(trans, root, ret);
5548 	return ret;
5549 
5550 fail_dir_item:
5551 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
5552 		u64 local_index;
5553 		int err;
5554 		err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
5555 				 key.objectid, root->root_key.objectid,
5556 				 parent_ino, &local_index, name, name_len);
5557 
5558 	} else if (add_backref) {
5559 		u64 local_index;
5560 		int err;
5561 
5562 		err = btrfs_del_inode_ref(trans, root, name, name_len,
5563 					  ino, parent_ino, &local_index);
5564 	}
5565 	return ret;
5566 }
5567 
5568 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
5569 			    struct inode *dir, struct dentry *dentry,
5570 			    struct inode *inode, int backref, u64 index)
5571 {
5572 	int err = btrfs_add_link(trans, dir, inode,
5573 				 dentry->d_name.name, dentry->d_name.len,
5574 				 backref, index);
5575 	if (err > 0)
5576 		err = -EEXIST;
5577 	return err;
5578 }
5579 
5580 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
5581 			umode_t mode, dev_t rdev)
5582 {
5583 	struct btrfs_trans_handle *trans;
5584 	struct btrfs_root *root = BTRFS_I(dir)->root;
5585 	struct inode *inode = NULL;
5586 	int err;
5587 	int drop_inode = 0;
5588 	u64 objectid;
5589 	u64 index = 0;
5590 
5591 	if (!new_valid_dev(rdev))
5592 		return -EINVAL;
5593 
5594 	/*
5595 	 * 2 for inode item and ref
5596 	 * 2 for dir items
5597 	 * 1 for xattr if selinux is on
5598 	 */
5599 	trans = btrfs_start_transaction(root, 5);
5600 	if (IS_ERR(trans))
5601 		return PTR_ERR(trans);
5602 
5603 	err = btrfs_find_free_ino(root, &objectid);
5604 	if (err)
5605 		goto out_unlock;
5606 
5607 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5608 				dentry->d_name.len, btrfs_ino(dir), objectid,
5609 				mode, &index);
5610 	if (IS_ERR(inode)) {
5611 		err = PTR_ERR(inode);
5612 		goto out_unlock;
5613 	}
5614 
5615 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
5616 	if (err) {
5617 		drop_inode = 1;
5618 		goto out_unlock;
5619 	}
5620 
5621 	/*
5622 	* If the active LSM wants to access the inode during
5623 	* d_instantiate it needs these. Smack checks to see
5624 	* if the filesystem supports xattrs by looking at the
5625 	* ops vector.
5626 	*/
5627 
5628 	inode->i_op = &btrfs_special_inode_operations;
5629 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
5630 	if (err)
5631 		drop_inode = 1;
5632 	else {
5633 		init_special_inode(inode, inode->i_mode, rdev);
5634 		btrfs_update_inode(trans, root, inode);
5635 		d_instantiate(dentry, inode);
5636 	}
5637 out_unlock:
5638 	btrfs_end_transaction(trans, root);
5639 	btrfs_btree_balance_dirty(root);
5640 	if (drop_inode) {
5641 		inode_dec_link_count(inode);
5642 		iput(inode);
5643 	}
5644 	return err;
5645 }
5646 
5647 static int btrfs_create(struct inode *dir, struct dentry *dentry,
5648 			umode_t mode, bool excl)
5649 {
5650 	struct btrfs_trans_handle *trans;
5651 	struct btrfs_root *root = BTRFS_I(dir)->root;
5652 	struct inode *inode = NULL;
5653 	int drop_inode_on_err = 0;
5654 	int err;
5655 	u64 objectid;
5656 	u64 index = 0;
5657 
5658 	/*
5659 	 * 2 for inode item and ref
5660 	 * 2 for dir items
5661 	 * 1 for xattr if selinux is on
5662 	 */
5663 	trans = btrfs_start_transaction(root, 5);
5664 	if (IS_ERR(trans))
5665 		return PTR_ERR(trans);
5666 
5667 	err = btrfs_find_free_ino(root, &objectid);
5668 	if (err)
5669 		goto out_unlock;
5670 
5671 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5672 				dentry->d_name.len, btrfs_ino(dir), objectid,
5673 				mode, &index);
5674 	if (IS_ERR(inode)) {
5675 		err = PTR_ERR(inode);
5676 		goto out_unlock;
5677 	}
5678 	drop_inode_on_err = 1;
5679 
5680 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
5681 	if (err)
5682 		goto out_unlock;
5683 
5684 	err = btrfs_update_inode(trans, root, inode);
5685 	if (err)
5686 		goto out_unlock;
5687 
5688 	/*
5689 	* If the active LSM wants to access the inode during
5690 	* d_instantiate it needs these. Smack checks to see
5691 	* if the filesystem supports xattrs by looking at the
5692 	* ops vector.
5693 	*/
5694 	inode->i_fop = &btrfs_file_operations;
5695 	inode->i_op = &btrfs_file_inode_operations;
5696 
5697 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
5698 	if (err)
5699 		goto out_unlock;
5700 
5701 	inode->i_mapping->a_ops = &btrfs_aops;
5702 	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5703 	BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
5704 	d_instantiate(dentry, inode);
5705 
5706 out_unlock:
5707 	btrfs_end_transaction(trans, root);
5708 	if (err && drop_inode_on_err) {
5709 		inode_dec_link_count(inode);
5710 		iput(inode);
5711 	}
5712 	btrfs_btree_balance_dirty(root);
5713 	return err;
5714 }
5715 
5716 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
5717 		      struct dentry *dentry)
5718 {
5719 	struct btrfs_trans_handle *trans;
5720 	struct btrfs_root *root = BTRFS_I(dir)->root;
5721 	struct inode *inode = old_dentry->d_inode;
5722 	u64 index;
5723 	int err;
5724 	int drop_inode = 0;
5725 
5726 	/* do not allow sys_link's with other subvols of the same device */
5727 	if (root->objectid != BTRFS_I(inode)->root->objectid)
5728 		return -EXDEV;
5729 
5730 	if (inode->i_nlink >= BTRFS_LINK_MAX)
5731 		return -EMLINK;
5732 
5733 	err = btrfs_set_inode_index(dir, &index);
5734 	if (err)
5735 		goto fail;
5736 
5737 	/*
5738 	 * 2 items for inode and inode ref
5739 	 * 2 items for dir items
5740 	 * 1 item for parent inode
5741 	 */
5742 	trans = btrfs_start_transaction(root, 5);
5743 	if (IS_ERR(trans)) {
5744 		err = PTR_ERR(trans);
5745 		goto fail;
5746 	}
5747 
5748 	btrfs_inc_nlink(inode);
5749 	inode_inc_iversion(inode);
5750 	inode->i_ctime = CURRENT_TIME;
5751 	ihold(inode);
5752 	set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
5753 
5754 	err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
5755 
5756 	if (err) {
5757 		drop_inode = 1;
5758 	} else {
5759 		struct dentry *parent = dentry->d_parent;
5760 		err = btrfs_update_inode(trans, root, inode);
5761 		if (err)
5762 			goto fail;
5763 		d_instantiate(dentry, inode);
5764 		btrfs_log_new_name(trans, inode, NULL, parent);
5765 	}
5766 
5767 	btrfs_end_transaction(trans, root);
5768 fail:
5769 	if (drop_inode) {
5770 		inode_dec_link_count(inode);
5771 		iput(inode);
5772 	}
5773 	btrfs_btree_balance_dirty(root);
5774 	return err;
5775 }
5776 
5777 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
5778 {
5779 	struct inode *inode = NULL;
5780 	struct btrfs_trans_handle *trans;
5781 	struct btrfs_root *root = BTRFS_I(dir)->root;
5782 	int err = 0;
5783 	int drop_on_err = 0;
5784 	u64 objectid = 0;
5785 	u64 index = 0;
5786 
5787 	/*
5788 	 * 2 items for inode and ref
5789 	 * 2 items for dir items
5790 	 * 1 for xattr if selinux is on
5791 	 */
5792 	trans = btrfs_start_transaction(root, 5);
5793 	if (IS_ERR(trans))
5794 		return PTR_ERR(trans);
5795 
5796 	err = btrfs_find_free_ino(root, &objectid);
5797 	if (err)
5798 		goto out_fail;
5799 
5800 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5801 				dentry->d_name.len, btrfs_ino(dir), objectid,
5802 				S_IFDIR | mode, &index);
5803 	if (IS_ERR(inode)) {
5804 		err = PTR_ERR(inode);
5805 		goto out_fail;
5806 	}
5807 
5808 	drop_on_err = 1;
5809 
5810 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
5811 	if (err)
5812 		goto out_fail;
5813 
5814 	inode->i_op = &btrfs_dir_inode_operations;
5815 	inode->i_fop = &btrfs_dir_file_operations;
5816 
5817 	btrfs_i_size_write(inode, 0);
5818 	err = btrfs_update_inode(trans, root, inode);
5819 	if (err)
5820 		goto out_fail;
5821 
5822 	err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
5823 			     dentry->d_name.len, 0, index);
5824 	if (err)
5825 		goto out_fail;
5826 
5827 	d_instantiate(dentry, inode);
5828 	drop_on_err = 0;
5829 
5830 out_fail:
5831 	btrfs_end_transaction(trans, root);
5832 	if (drop_on_err)
5833 		iput(inode);
5834 	btrfs_btree_balance_dirty(root);
5835 	return err;
5836 }
5837 
5838 /* helper for btfs_get_extent.  Given an existing extent in the tree,
5839  * and an extent that you want to insert, deal with overlap and insert
5840  * the new extent into the tree.
5841  */
5842 static int merge_extent_mapping(struct extent_map_tree *em_tree,
5843 				struct extent_map *existing,
5844 				struct extent_map *em,
5845 				u64 map_start, u64 map_len)
5846 {
5847 	u64 start_diff;
5848 
5849 	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
5850 	start_diff = map_start - em->start;
5851 	em->start = map_start;
5852 	em->len = map_len;
5853 	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
5854 	    !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
5855 		em->block_start += start_diff;
5856 		em->block_len -= start_diff;
5857 	}
5858 	return add_extent_mapping(em_tree, em, 0);
5859 }
5860 
5861 static noinline int uncompress_inline(struct btrfs_path *path,
5862 				      struct inode *inode, struct page *page,
5863 				      size_t pg_offset, u64 extent_offset,
5864 				      struct btrfs_file_extent_item *item)
5865 {
5866 	int ret;
5867 	struct extent_buffer *leaf = path->nodes[0];
5868 	char *tmp;
5869 	size_t max_size;
5870 	unsigned long inline_size;
5871 	unsigned long ptr;
5872 	int compress_type;
5873 
5874 	WARN_ON(pg_offset != 0);
5875 	compress_type = btrfs_file_extent_compression(leaf, item);
5876 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
5877 	inline_size = btrfs_file_extent_inline_item_len(leaf,
5878 					btrfs_item_nr(leaf, path->slots[0]));
5879 	tmp = kmalloc(inline_size, GFP_NOFS);
5880 	if (!tmp)
5881 		return -ENOMEM;
5882 	ptr = btrfs_file_extent_inline_start(item);
5883 
5884 	read_extent_buffer(leaf, tmp, ptr, inline_size);
5885 
5886 	max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
5887 	ret = btrfs_decompress(compress_type, tmp, page,
5888 			       extent_offset, inline_size, max_size);
5889 	if (ret) {
5890 		char *kaddr = kmap_atomic(page);
5891 		unsigned long copy_size = min_t(u64,
5892 				  PAGE_CACHE_SIZE - pg_offset,
5893 				  max_size - extent_offset);
5894 		memset(kaddr + pg_offset, 0, copy_size);
5895 		kunmap_atomic(kaddr);
5896 	}
5897 	kfree(tmp);
5898 	return 0;
5899 }
5900 
5901 /*
5902  * a bit scary, this does extent mapping from logical file offset to the disk.
5903  * the ugly parts come from merging extents from the disk with the in-ram
5904  * representation.  This gets more complex because of the data=ordered code,
5905  * where the in-ram extents might be locked pending data=ordered completion.
5906  *
5907  * This also copies inline extents directly into the page.
5908  */
5909 
5910 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
5911 				    size_t pg_offset, u64 start, u64 len,
5912 				    int create)
5913 {
5914 	int ret;
5915 	int err = 0;
5916 	u64 bytenr;
5917 	u64 extent_start = 0;
5918 	u64 extent_end = 0;
5919 	u64 objectid = btrfs_ino(inode);
5920 	u32 found_type;
5921 	struct btrfs_path *path = NULL;
5922 	struct btrfs_root *root = BTRFS_I(inode)->root;
5923 	struct btrfs_file_extent_item *item;
5924 	struct extent_buffer *leaf;
5925 	struct btrfs_key found_key;
5926 	struct extent_map *em = NULL;
5927 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5928 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5929 	struct btrfs_trans_handle *trans = NULL;
5930 	int compress_type;
5931 
5932 again:
5933 	read_lock(&em_tree->lock);
5934 	em = lookup_extent_mapping(em_tree, start, len);
5935 	if (em)
5936 		em->bdev = root->fs_info->fs_devices->latest_bdev;
5937 	read_unlock(&em_tree->lock);
5938 
5939 	if (em) {
5940 		if (em->start > start || em->start + em->len <= start)
5941 			free_extent_map(em);
5942 		else if (em->block_start == EXTENT_MAP_INLINE && page)
5943 			free_extent_map(em);
5944 		else
5945 			goto out;
5946 	}
5947 	em = alloc_extent_map();
5948 	if (!em) {
5949 		err = -ENOMEM;
5950 		goto out;
5951 	}
5952 	em->bdev = root->fs_info->fs_devices->latest_bdev;
5953 	em->start = EXTENT_MAP_HOLE;
5954 	em->orig_start = EXTENT_MAP_HOLE;
5955 	em->len = (u64)-1;
5956 	em->block_len = (u64)-1;
5957 
5958 	if (!path) {
5959 		path = btrfs_alloc_path();
5960 		if (!path) {
5961 			err = -ENOMEM;
5962 			goto out;
5963 		}
5964 		/*
5965 		 * Chances are we'll be called again, so go ahead and do
5966 		 * readahead
5967 		 */
5968 		path->reada = 1;
5969 	}
5970 
5971 	ret = btrfs_lookup_file_extent(trans, root, path,
5972 				       objectid, start, trans != NULL);
5973 	if (ret < 0) {
5974 		err = ret;
5975 		goto out;
5976 	}
5977 
5978 	if (ret != 0) {
5979 		if (path->slots[0] == 0)
5980 			goto not_found;
5981 		path->slots[0]--;
5982 	}
5983 
5984 	leaf = path->nodes[0];
5985 	item = btrfs_item_ptr(leaf, path->slots[0],
5986 			      struct btrfs_file_extent_item);
5987 	/* are we inside the extent that was found? */
5988 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5989 	found_type = btrfs_key_type(&found_key);
5990 	if (found_key.objectid != objectid ||
5991 	    found_type != BTRFS_EXTENT_DATA_KEY) {
5992 		goto not_found;
5993 	}
5994 
5995 	found_type = btrfs_file_extent_type(leaf, item);
5996 	extent_start = found_key.offset;
5997 	compress_type = btrfs_file_extent_compression(leaf, item);
5998 	if (found_type == BTRFS_FILE_EXTENT_REG ||
5999 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6000 		extent_end = extent_start +
6001 		       btrfs_file_extent_num_bytes(leaf, item);
6002 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6003 		size_t size;
6004 		size = btrfs_file_extent_inline_len(leaf, item);
6005 		extent_end = ALIGN(extent_start + size, root->sectorsize);
6006 	}
6007 
6008 	if (start >= extent_end) {
6009 		path->slots[0]++;
6010 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6011 			ret = btrfs_next_leaf(root, path);
6012 			if (ret < 0) {
6013 				err = ret;
6014 				goto out;
6015 			}
6016 			if (ret > 0)
6017 				goto not_found;
6018 			leaf = path->nodes[0];
6019 		}
6020 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6021 		if (found_key.objectid != objectid ||
6022 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
6023 			goto not_found;
6024 		if (start + len <= found_key.offset)
6025 			goto not_found;
6026 		em->start = start;
6027 		em->orig_start = start;
6028 		em->len = found_key.offset - start;
6029 		goto not_found_em;
6030 	}
6031 
6032 	em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
6033 	if (found_type == BTRFS_FILE_EXTENT_REG ||
6034 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6035 		em->start = extent_start;
6036 		em->len = extent_end - extent_start;
6037 		em->orig_start = extent_start -
6038 				 btrfs_file_extent_offset(leaf, item);
6039 		em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf,
6040 								      item);
6041 		bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
6042 		if (bytenr == 0) {
6043 			em->block_start = EXTENT_MAP_HOLE;
6044 			goto insert;
6045 		}
6046 		if (compress_type != BTRFS_COMPRESS_NONE) {
6047 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
6048 			em->compress_type = compress_type;
6049 			em->block_start = bytenr;
6050 			em->block_len = em->orig_block_len;
6051 		} else {
6052 			bytenr += btrfs_file_extent_offset(leaf, item);
6053 			em->block_start = bytenr;
6054 			em->block_len = em->len;
6055 			if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
6056 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
6057 		}
6058 		goto insert;
6059 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6060 		unsigned long ptr;
6061 		char *map;
6062 		size_t size;
6063 		size_t extent_offset;
6064 		size_t copy_size;
6065 
6066 		em->block_start = EXTENT_MAP_INLINE;
6067 		if (!page || create) {
6068 			em->start = extent_start;
6069 			em->len = extent_end - extent_start;
6070 			goto out;
6071 		}
6072 
6073 		size = btrfs_file_extent_inline_len(leaf, item);
6074 		extent_offset = page_offset(page) + pg_offset - extent_start;
6075 		copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
6076 				size - extent_offset);
6077 		em->start = extent_start + extent_offset;
6078 		em->len = ALIGN(copy_size, root->sectorsize);
6079 		em->orig_block_len = em->len;
6080 		em->orig_start = em->start;
6081 		if (compress_type) {
6082 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
6083 			em->compress_type = compress_type;
6084 		}
6085 		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
6086 		if (create == 0 && !PageUptodate(page)) {
6087 			if (btrfs_file_extent_compression(leaf, item) !=
6088 			    BTRFS_COMPRESS_NONE) {
6089 				ret = uncompress_inline(path, inode, page,
6090 							pg_offset,
6091 							extent_offset, item);
6092 				BUG_ON(ret); /* -ENOMEM */
6093 			} else {
6094 				map = kmap(page);
6095 				read_extent_buffer(leaf, map + pg_offset, ptr,
6096 						   copy_size);
6097 				if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
6098 					memset(map + pg_offset + copy_size, 0,
6099 					       PAGE_CACHE_SIZE - pg_offset -
6100 					       copy_size);
6101 				}
6102 				kunmap(page);
6103 			}
6104 			flush_dcache_page(page);
6105 		} else if (create && PageUptodate(page)) {
6106 			BUG();
6107 			if (!trans) {
6108 				kunmap(page);
6109 				free_extent_map(em);
6110 				em = NULL;
6111 
6112 				btrfs_release_path(path);
6113 				trans = btrfs_join_transaction(root);
6114 
6115 				if (IS_ERR(trans))
6116 					return ERR_CAST(trans);
6117 				goto again;
6118 			}
6119 			map = kmap(page);
6120 			write_extent_buffer(leaf, map + pg_offset, ptr,
6121 					    copy_size);
6122 			kunmap(page);
6123 			btrfs_mark_buffer_dirty(leaf);
6124 		}
6125 		set_extent_uptodate(io_tree, em->start,
6126 				    extent_map_end(em) - 1, NULL, GFP_NOFS);
6127 		goto insert;
6128 	} else {
6129 		WARN(1, KERN_ERR "btrfs unknown found_type %d\n", found_type);
6130 	}
6131 not_found:
6132 	em->start = start;
6133 	em->orig_start = start;
6134 	em->len = len;
6135 not_found_em:
6136 	em->block_start = EXTENT_MAP_HOLE;
6137 	set_bit(EXTENT_FLAG_VACANCY, &em->flags);
6138 insert:
6139 	btrfs_release_path(path);
6140 	if (em->start > start || extent_map_end(em) <= start) {
6141 		btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]",
6142 			(unsigned long long)em->start,
6143 			(unsigned long long)em->len,
6144 			(unsigned long long)start,
6145 			(unsigned long long)len);
6146 		err = -EIO;
6147 		goto out;
6148 	}
6149 
6150 	err = 0;
6151 	write_lock(&em_tree->lock);
6152 	ret = add_extent_mapping(em_tree, em, 0);
6153 	/* it is possible that someone inserted the extent into the tree
6154 	 * while we had the lock dropped.  It is also possible that
6155 	 * an overlapping map exists in the tree
6156 	 */
6157 	if (ret == -EEXIST) {
6158 		struct extent_map *existing;
6159 
6160 		ret = 0;
6161 
6162 		existing = lookup_extent_mapping(em_tree, start, len);
6163 		if (existing && (existing->start > start ||
6164 		    existing->start + existing->len <= start)) {
6165 			free_extent_map(existing);
6166 			existing = NULL;
6167 		}
6168 		if (!existing) {
6169 			existing = lookup_extent_mapping(em_tree, em->start,
6170 							 em->len);
6171 			if (existing) {
6172 				err = merge_extent_mapping(em_tree, existing,
6173 							   em, start,
6174 							   root->sectorsize);
6175 				free_extent_map(existing);
6176 				if (err) {
6177 					free_extent_map(em);
6178 					em = NULL;
6179 				}
6180 			} else {
6181 				err = -EIO;
6182 				free_extent_map(em);
6183 				em = NULL;
6184 			}
6185 		} else {
6186 			free_extent_map(em);
6187 			em = existing;
6188 			err = 0;
6189 		}
6190 	}
6191 	write_unlock(&em_tree->lock);
6192 out:
6193 
6194 	if (em)
6195 		trace_btrfs_get_extent(root, em);
6196 
6197 	if (path)
6198 		btrfs_free_path(path);
6199 	if (trans) {
6200 		ret = btrfs_end_transaction(trans, root);
6201 		if (!err)
6202 			err = ret;
6203 	}
6204 	if (err) {
6205 		free_extent_map(em);
6206 		return ERR_PTR(err);
6207 	}
6208 	BUG_ON(!em); /* Error is always set */
6209 	return em;
6210 }
6211 
6212 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
6213 					   size_t pg_offset, u64 start, u64 len,
6214 					   int create)
6215 {
6216 	struct extent_map *em;
6217 	struct extent_map *hole_em = NULL;
6218 	u64 range_start = start;
6219 	u64 end;
6220 	u64 found;
6221 	u64 found_end;
6222 	int err = 0;
6223 
6224 	em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
6225 	if (IS_ERR(em))
6226 		return em;
6227 	if (em) {
6228 		/*
6229 		 * if our em maps to
6230 		 * -  a hole or
6231 		 * -  a pre-alloc extent,
6232 		 * there might actually be delalloc bytes behind it.
6233 		 */
6234 		if (em->block_start != EXTENT_MAP_HOLE &&
6235 		    !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6236 			return em;
6237 		else
6238 			hole_em = em;
6239 	}
6240 
6241 	/* check to see if we've wrapped (len == -1 or similar) */
6242 	end = start + len;
6243 	if (end < start)
6244 		end = (u64)-1;
6245 	else
6246 		end -= 1;
6247 
6248 	em = NULL;
6249 
6250 	/* ok, we didn't find anything, lets look for delalloc */
6251 	found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
6252 				 end, len, EXTENT_DELALLOC, 1);
6253 	found_end = range_start + found;
6254 	if (found_end < range_start)
6255 		found_end = (u64)-1;
6256 
6257 	/*
6258 	 * we didn't find anything useful, return
6259 	 * the original results from get_extent()
6260 	 */
6261 	if (range_start > end || found_end <= start) {
6262 		em = hole_em;
6263 		hole_em = NULL;
6264 		goto out;
6265 	}
6266 
6267 	/* adjust the range_start to make sure it doesn't
6268 	 * go backwards from the start they passed in
6269 	 */
6270 	range_start = max(start,range_start);
6271 	found = found_end - range_start;
6272 
6273 	if (found > 0) {
6274 		u64 hole_start = start;
6275 		u64 hole_len = len;
6276 
6277 		em = alloc_extent_map();
6278 		if (!em) {
6279 			err = -ENOMEM;
6280 			goto out;
6281 		}
6282 		/*
6283 		 * when btrfs_get_extent can't find anything it
6284 		 * returns one huge hole
6285 		 *
6286 		 * make sure what it found really fits our range, and
6287 		 * adjust to make sure it is based on the start from
6288 		 * the caller
6289 		 */
6290 		if (hole_em) {
6291 			u64 calc_end = extent_map_end(hole_em);
6292 
6293 			if (calc_end <= start || (hole_em->start > end)) {
6294 				free_extent_map(hole_em);
6295 				hole_em = NULL;
6296 			} else {
6297 				hole_start = max(hole_em->start, start);
6298 				hole_len = calc_end - hole_start;
6299 			}
6300 		}
6301 		em->bdev = NULL;
6302 		if (hole_em && range_start > hole_start) {
6303 			/* our hole starts before our delalloc, so we
6304 			 * have to return just the parts of the hole
6305 			 * that go until  the delalloc starts
6306 			 */
6307 			em->len = min(hole_len,
6308 				      range_start - hole_start);
6309 			em->start = hole_start;
6310 			em->orig_start = hole_start;
6311 			/*
6312 			 * don't adjust block start at all,
6313 			 * it is fixed at EXTENT_MAP_HOLE
6314 			 */
6315 			em->block_start = hole_em->block_start;
6316 			em->block_len = hole_len;
6317 			if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
6318 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
6319 		} else {
6320 			em->start = range_start;
6321 			em->len = found;
6322 			em->orig_start = range_start;
6323 			em->block_start = EXTENT_MAP_DELALLOC;
6324 			em->block_len = found;
6325 		}
6326 	} else if (hole_em) {
6327 		return hole_em;
6328 	}
6329 out:
6330 
6331 	free_extent_map(hole_em);
6332 	if (err) {
6333 		free_extent_map(em);
6334 		return ERR_PTR(err);
6335 	}
6336 	return em;
6337 }
6338 
6339 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
6340 						  u64 start, u64 len)
6341 {
6342 	struct btrfs_root *root = BTRFS_I(inode)->root;
6343 	struct btrfs_trans_handle *trans;
6344 	struct extent_map *em;
6345 	struct btrfs_key ins;
6346 	u64 alloc_hint;
6347 	int ret;
6348 
6349 	trans = btrfs_join_transaction(root);
6350 	if (IS_ERR(trans))
6351 		return ERR_CAST(trans);
6352 
6353 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
6354 
6355 	alloc_hint = get_extent_allocation_hint(inode, start, len);
6356 	ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
6357 				   alloc_hint, &ins, 1);
6358 	if (ret) {
6359 		em = ERR_PTR(ret);
6360 		goto out;
6361 	}
6362 
6363 	em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
6364 			      ins.offset, ins.offset, ins.offset, 0);
6365 	if (IS_ERR(em))
6366 		goto out;
6367 
6368 	ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
6369 					   ins.offset, ins.offset, 0);
6370 	if (ret) {
6371 		btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
6372 		em = ERR_PTR(ret);
6373 	}
6374 out:
6375 	btrfs_end_transaction(trans, root);
6376 	return em;
6377 }
6378 
6379 /*
6380  * returns 1 when the nocow is safe, < 1 on error, 0 if the
6381  * block must be cow'd
6382  */
6383 noinline int can_nocow_extent(struct btrfs_trans_handle *trans,
6384 			      struct inode *inode, u64 offset, u64 *len,
6385 			      u64 *orig_start, u64 *orig_block_len,
6386 			      u64 *ram_bytes)
6387 {
6388 	struct btrfs_path *path;
6389 	int ret;
6390 	struct extent_buffer *leaf;
6391 	struct btrfs_root *root = BTRFS_I(inode)->root;
6392 	struct btrfs_file_extent_item *fi;
6393 	struct btrfs_key key;
6394 	u64 disk_bytenr;
6395 	u64 backref_offset;
6396 	u64 extent_end;
6397 	u64 num_bytes;
6398 	int slot;
6399 	int found_type;
6400 	bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
6401 	path = btrfs_alloc_path();
6402 	if (!path)
6403 		return -ENOMEM;
6404 
6405 	ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
6406 				       offset, 0);
6407 	if (ret < 0)
6408 		goto out;
6409 
6410 	slot = path->slots[0];
6411 	if (ret == 1) {
6412 		if (slot == 0) {
6413 			/* can't find the item, must cow */
6414 			ret = 0;
6415 			goto out;
6416 		}
6417 		slot--;
6418 	}
6419 	ret = 0;
6420 	leaf = path->nodes[0];
6421 	btrfs_item_key_to_cpu(leaf, &key, slot);
6422 	if (key.objectid != btrfs_ino(inode) ||
6423 	    key.type != BTRFS_EXTENT_DATA_KEY) {
6424 		/* not our file or wrong item type, must cow */
6425 		goto out;
6426 	}
6427 
6428 	if (key.offset > offset) {
6429 		/* Wrong offset, must cow */
6430 		goto out;
6431 	}
6432 
6433 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
6434 	found_type = btrfs_file_extent_type(leaf, fi);
6435 	if (found_type != BTRFS_FILE_EXTENT_REG &&
6436 	    found_type != BTRFS_FILE_EXTENT_PREALLOC) {
6437 		/* not a regular extent, must cow */
6438 		goto out;
6439 	}
6440 
6441 	if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
6442 		goto out;
6443 
6444 	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
6445 	if (disk_bytenr == 0)
6446 		goto out;
6447 
6448 	if (btrfs_file_extent_compression(leaf, fi) ||
6449 	    btrfs_file_extent_encryption(leaf, fi) ||
6450 	    btrfs_file_extent_other_encoding(leaf, fi))
6451 		goto out;
6452 
6453 	backref_offset = btrfs_file_extent_offset(leaf, fi);
6454 
6455 	if (orig_start) {
6456 		*orig_start = key.offset - backref_offset;
6457 		*orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
6458 		*ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
6459 	}
6460 
6461 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
6462 
6463 	if (btrfs_extent_readonly(root, disk_bytenr))
6464 		goto out;
6465 
6466 	/*
6467 	 * look for other files referencing this extent, if we
6468 	 * find any we must cow
6469 	 */
6470 	if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
6471 				  key.offset - backref_offset, disk_bytenr))
6472 		goto out;
6473 
6474 	/*
6475 	 * adjust disk_bytenr and num_bytes to cover just the bytes
6476 	 * in this extent we are about to write.  If there
6477 	 * are any csums in that range we have to cow in order
6478 	 * to keep the csums correct
6479 	 */
6480 	disk_bytenr += backref_offset;
6481 	disk_bytenr += offset - key.offset;
6482 	num_bytes = min(offset + *len, extent_end) - offset;
6483 	if (csum_exist_in_range(root, disk_bytenr, num_bytes))
6484 				goto out;
6485 	/*
6486 	 * all of the above have passed, it is safe to overwrite this extent
6487 	 * without cow
6488 	 */
6489 	*len = num_bytes;
6490 	ret = 1;
6491 out:
6492 	btrfs_free_path(path);
6493 	return ret;
6494 }
6495 
6496 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
6497 			      struct extent_state **cached_state, int writing)
6498 {
6499 	struct btrfs_ordered_extent *ordered;
6500 	int ret = 0;
6501 
6502 	while (1) {
6503 		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6504 				 0, cached_state);
6505 		/*
6506 		 * We're concerned with the entire range that we're going to be
6507 		 * doing DIO to, so we need to make sure theres no ordered
6508 		 * extents in this range.
6509 		 */
6510 		ordered = btrfs_lookup_ordered_range(inode, lockstart,
6511 						     lockend - lockstart + 1);
6512 
6513 		/*
6514 		 * We need to make sure there are no buffered pages in this
6515 		 * range either, we could have raced between the invalidate in
6516 		 * generic_file_direct_write and locking the extent.  The
6517 		 * invalidate needs to happen so that reads after a write do not
6518 		 * get stale data.
6519 		 */
6520 		if (!ordered && (!writing ||
6521 		    !test_range_bit(&BTRFS_I(inode)->io_tree,
6522 				    lockstart, lockend, EXTENT_UPTODATE, 0,
6523 				    *cached_state)))
6524 			break;
6525 
6526 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6527 				     cached_state, GFP_NOFS);
6528 
6529 		if (ordered) {
6530 			btrfs_start_ordered_extent(inode, ordered, 1);
6531 			btrfs_put_ordered_extent(ordered);
6532 		} else {
6533 			/* Screw you mmap */
6534 			ret = filemap_write_and_wait_range(inode->i_mapping,
6535 							   lockstart,
6536 							   lockend);
6537 			if (ret)
6538 				break;
6539 
6540 			/*
6541 			 * If we found a page that couldn't be invalidated just
6542 			 * fall back to buffered.
6543 			 */
6544 			ret = invalidate_inode_pages2_range(inode->i_mapping,
6545 					lockstart >> PAGE_CACHE_SHIFT,
6546 					lockend >> PAGE_CACHE_SHIFT);
6547 			if (ret)
6548 				break;
6549 		}
6550 
6551 		cond_resched();
6552 	}
6553 
6554 	return ret;
6555 }
6556 
6557 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
6558 					   u64 len, u64 orig_start,
6559 					   u64 block_start, u64 block_len,
6560 					   u64 orig_block_len, u64 ram_bytes,
6561 					   int type)
6562 {
6563 	struct extent_map_tree *em_tree;
6564 	struct extent_map *em;
6565 	struct btrfs_root *root = BTRFS_I(inode)->root;
6566 	int ret;
6567 
6568 	em_tree = &BTRFS_I(inode)->extent_tree;
6569 	em = alloc_extent_map();
6570 	if (!em)
6571 		return ERR_PTR(-ENOMEM);
6572 
6573 	em->start = start;
6574 	em->orig_start = orig_start;
6575 	em->mod_start = start;
6576 	em->mod_len = len;
6577 	em->len = len;
6578 	em->block_len = block_len;
6579 	em->block_start = block_start;
6580 	em->bdev = root->fs_info->fs_devices->latest_bdev;
6581 	em->orig_block_len = orig_block_len;
6582 	em->ram_bytes = ram_bytes;
6583 	em->generation = -1;
6584 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
6585 	if (type == BTRFS_ORDERED_PREALLOC)
6586 		set_bit(EXTENT_FLAG_FILLING, &em->flags);
6587 
6588 	do {
6589 		btrfs_drop_extent_cache(inode, em->start,
6590 				em->start + em->len - 1, 0);
6591 		write_lock(&em_tree->lock);
6592 		ret = add_extent_mapping(em_tree, em, 1);
6593 		write_unlock(&em_tree->lock);
6594 	} while (ret == -EEXIST);
6595 
6596 	if (ret) {
6597 		free_extent_map(em);
6598 		return ERR_PTR(ret);
6599 	}
6600 
6601 	return em;
6602 }
6603 
6604 
6605 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
6606 				   struct buffer_head *bh_result, int create)
6607 {
6608 	struct extent_map *em;
6609 	struct btrfs_root *root = BTRFS_I(inode)->root;
6610 	struct extent_state *cached_state = NULL;
6611 	u64 start = iblock << inode->i_blkbits;
6612 	u64 lockstart, lockend;
6613 	u64 len = bh_result->b_size;
6614 	struct btrfs_trans_handle *trans;
6615 	int unlock_bits = EXTENT_LOCKED;
6616 	int ret = 0;
6617 
6618 	if (create)
6619 		unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY;
6620 	else
6621 		len = min_t(u64, len, root->sectorsize);
6622 
6623 	lockstart = start;
6624 	lockend = start + len - 1;
6625 
6626 	/*
6627 	 * If this errors out it's because we couldn't invalidate pagecache for
6628 	 * this range and we need to fallback to buffered.
6629 	 */
6630 	if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
6631 		return -ENOTBLK;
6632 
6633 	em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
6634 	if (IS_ERR(em)) {
6635 		ret = PTR_ERR(em);
6636 		goto unlock_err;
6637 	}
6638 
6639 	/*
6640 	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
6641 	 * io.  INLINE is special, and we could probably kludge it in here, but
6642 	 * it's still buffered so for safety lets just fall back to the generic
6643 	 * buffered path.
6644 	 *
6645 	 * For COMPRESSED we _have_ to read the entire extent in so we can
6646 	 * decompress it, so there will be buffering required no matter what we
6647 	 * do, so go ahead and fallback to buffered.
6648 	 *
6649 	 * We return -ENOTBLK because thats what makes DIO go ahead and go back
6650 	 * to buffered IO.  Don't blame me, this is the price we pay for using
6651 	 * the generic code.
6652 	 */
6653 	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
6654 	    em->block_start == EXTENT_MAP_INLINE) {
6655 		free_extent_map(em);
6656 		ret = -ENOTBLK;
6657 		goto unlock_err;
6658 	}
6659 
6660 	/* Just a good old fashioned hole, return */
6661 	if (!create && (em->block_start == EXTENT_MAP_HOLE ||
6662 			test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
6663 		free_extent_map(em);
6664 		goto unlock_err;
6665 	}
6666 
6667 	/*
6668 	 * We don't allocate a new extent in the following cases
6669 	 *
6670 	 * 1) The inode is marked as NODATACOW.  In this case we'll just use the
6671 	 * existing extent.
6672 	 * 2) The extent is marked as PREALLOC.  We're good to go here and can
6673 	 * just use the extent.
6674 	 *
6675 	 */
6676 	if (!create) {
6677 		len = min(len, em->len - (start - em->start));
6678 		lockstart = start + len;
6679 		goto unlock;
6680 	}
6681 
6682 	if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
6683 	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
6684 	     em->block_start != EXTENT_MAP_HOLE)) {
6685 		int type;
6686 		int ret;
6687 		u64 block_start, orig_start, orig_block_len, ram_bytes;
6688 
6689 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6690 			type = BTRFS_ORDERED_PREALLOC;
6691 		else
6692 			type = BTRFS_ORDERED_NOCOW;
6693 		len = min(len, em->len - (start - em->start));
6694 		block_start = em->block_start + (start - em->start);
6695 
6696 		/*
6697 		 * we're not going to log anything, but we do need
6698 		 * to make sure the current transaction stays open
6699 		 * while we look for nocow cross refs
6700 		 */
6701 		trans = btrfs_join_transaction(root);
6702 		if (IS_ERR(trans))
6703 			goto must_cow;
6704 
6705 		if (can_nocow_extent(trans, inode, start, &len, &orig_start,
6706 				     &orig_block_len, &ram_bytes) == 1) {
6707 			if (type == BTRFS_ORDERED_PREALLOC) {
6708 				free_extent_map(em);
6709 				em = create_pinned_em(inode, start, len,
6710 						       orig_start,
6711 						       block_start, len,
6712 						       orig_block_len,
6713 						       ram_bytes, type);
6714 				if (IS_ERR(em)) {
6715 					btrfs_end_transaction(trans, root);
6716 					goto unlock_err;
6717 				}
6718 			}
6719 
6720 			ret = btrfs_add_ordered_extent_dio(inode, start,
6721 					   block_start, len, len, type);
6722 			btrfs_end_transaction(trans, root);
6723 			if (ret) {
6724 				free_extent_map(em);
6725 				goto unlock_err;
6726 			}
6727 			goto unlock;
6728 		}
6729 		btrfs_end_transaction(trans, root);
6730 	}
6731 must_cow:
6732 	/*
6733 	 * this will cow the extent, reset the len in case we changed
6734 	 * it above
6735 	 */
6736 	len = bh_result->b_size;
6737 	free_extent_map(em);
6738 	em = btrfs_new_extent_direct(inode, start, len);
6739 	if (IS_ERR(em)) {
6740 		ret = PTR_ERR(em);
6741 		goto unlock_err;
6742 	}
6743 	len = min(len, em->len - (start - em->start));
6744 unlock:
6745 	bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
6746 		inode->i_blkbits;
6747 	bh_result->b_size = len;
6748 	bh_result->b_bdev = em->bdev;
6749 	set_buffer_mapped(bh_result);
6750 	if (create) {
6751 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6752 			set_buffer_new(bh_result);
6753 
6754 		/*
6755 		 * Need to update the i_size under the extent lock so buffered
6756 		 * readers will get the updated i_size when we unlock.
6757 		 */
6758 		if (start + len > i_size_read(inode))
6759 			i_size_write(inode, start + len);
6760 
6761 		spin_lock(&BTRFS_I(inode)->lock);
6762 		BTRFS_I(inode)->outstanding_extents++;
6763 		spin_unlock(&BTRFS_I(inode)->lock);
6764 
6765 		ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6766 				     lockstart + len - 1, EXTENT_DELALLOC, NULL,
6767 				     &cached_state, GFP_NOFS);
6768 		BUG_ON(ret);
6769 	}
6770 
6771 	/*
6772 	 * In the case of write we need to clear and unlock the entire range,
6773 	 * in the case of read we need to unlock only the end area that we
6774 	 * aren't using if there is any left over space.
6775 	 */
6776 	if (lockstart < lockend) {
6777 		clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6778 				 lockend, unlock_bits, 1, 0,
6779 				 &cached_state, GFP_NOFS);
6780 	} else {
6781 		free_extent_state(cached_state);
6782 	}
6783 
6784 	free_extent_map(em);
6785 
6786 	return 0;
6787 
6788 unlock_err:
6789 	clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6790 			 unlock_bits, 1, 0, &cached_state, GFP_NOFS);
6791 	return ret;
6792 }
6793 
6794 struct btrfs_dio_private {
6795 	struct inode *inode;
6796 	u64 logical_offset;
6797 	u64 disk_bytenr;
6798 	u64 bytes;
6799 	void *private;
6800 
6801 	/* number of bios pending for this dio */
6802 	atomic_t pending_bios;
6803 
6804 	/* IO errors */
6805 	int errors;
6806 
6807 	/* orig_bio is our btrfs_io_bio */
6808 	struct bio *orig_bio;
6809 
6810 	/* dio_bio came from fs/direct-io.c */
6811 	struct bio *dio_bio;
6812 };
6813 
6814 static void btrfs_endio_direct_read(struct bio *bio, int err)
6815 {
6816 	struct btrfs_dio_private *dip = bio->bi_private;
6817 	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
6818 	struct bio_vec *bvec = bio->bi_io_vec;
6819 	struct inode *inode = dip->inode;
6820 	struct btrfs_root *root = BTRFS_I(inode)->root;
6821 	struct bio *dio_bio;
6822 	u64 start;
6823 
6824 	start = dip->logical_offset;
6825 	do {
6826 		if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
6827 			struct page *page = bvec->bv_page;
6828 			char *kaddr;
6829 			u32 csum = ~(u32)0;
6830 			u64 private = ~(u32)0;
6831 			unsigned long flags;
6832 
6833 			if (get_state_private(&BTRFS_I(inode)->io_tree,
6834 					      start, &private))
6835 				goto failed;
6836 			local_irq_save(flags);
6837 			kaddr = kmap_atomic(page);
6838 			csum = btrfs_csum_data(kaddr + bvec->bv_offset,
6839 					       csum, bvec->bv_len);
6840 			btrfs_csum_final(csum, (char *)&csum);
6841 			kunmap_atomic(kaddr);
6842 			local_irq_restore(flags);
6843 
6844 			flush_dcache_page(bvec->bv_page);
6845 			if (csum != private) {
6846 failed:
6847 				btrfs_err(root->fs_info, "csum failed ino %llu off %llu csum %u private %u",
6848 					(unsigned long long)btrfs_ino(inode),
6849 					(unsigned long long)start,
6850 					csum, (unsigned)private);
6851 				err = -EIO;
6852 			}
6853 		}
6854 
6855 		start += bvec->bv_len;
6856 		bvec++;
6857 	} while (bvec <= bvec_end);
6858 
6859 	unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
6860 		      dip->logical_offset + dip->bytes - 1);
6861 	dio_bio = dip->dio_bio;
6862 
6863 	kfree(dip);
6864 
6865 	/* If we had a csum failure make sure to clear the uptodate flag */
6866 	if (err)
6867 		clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
6868 	dio_end_io(dio_bio, err);
6869 	bio_put(bio);
6870 }
6871 
6872 static void btrfs_endio_direct_write(struct bio *bio, int err)
6873 {
6874 	struct btrfs_dio_private *dip = bio->bi_private;
6875 	struct inode *inode = dip->inode;
6876 	struct btrfs_root *root = BTRFS_I(inode)->root;
6877 	struct btrfs_ordered_extent *ordered = NULL;
6878 	u64 ordered_offset = dip->logical_offset;
6879 	u64 ordered_bytes = dip->bytes;
6880 	struct bio *dio_bio;
6881 	int ret;
6882 
6883 	if (err)
6884 		goto out_done;
6885 again:
6886 	ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
6887 						   &ordered_offset,
6888 						   ordered_bytes, !err);
6889 	if (!ret)
6890 		goto out_test;
6891 
6892 	ordered->work.func = finish_ordered_fn;
6893 	ordered->work.flags = 0;
6894 	btrfs_queue_worker(&root->fs_info->endio_write_workers,
6895 			   &ordered->work);
6896 out_test:
6897 	/*
6898 	 * our bio might span multiple ordered extents.  If we haven't
6899 	 * completed the accounting for the whole dio, go back and try again
6900 	 */
6901 	if (ordered_offset < dip->logical_offset + dip->bytes) {
6902 		ordered_bytes = dip->logical_offset + dip->bytes -
6903 			ordered_offset;
6904 		ordered = NULL;
6905 		goto again;
6906 	}
6907 out_done:
6908 	dio_bio = dip->dio_bio;
6909 
6910 	kfree(dip);
6911 
6912 	/* If we had an error make sure to clear the uptodate flag */
6913 	if (err)
6914 		clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
6915 	dio_end_io(dio_bio, err);
6916 	bio_put(bio);
6917 }
6918 
6919 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
6920 				    struct bio *bio, int mirror_num,
6921 				    unsigned long bio_flags, u64 offset)
6922 {
6923 	int ret;
6924 	struct btrfs_root *root = BTRFS_I(inode)->root;
6925 	ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
6926 	BUG_ON(ret); /* -ENOMEM */
6927 	return 0;
6928 }
6929 
6930 static void btrfs_end_dio_bio(struct bio *bio, int err)
6931 {
6932 	struct btrfs_dio_private *dip = bio->bi_private;
6933 
6934 	if (err) {
6935 		printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
6936 		      "sector %#Lx len %u err no %d\n",
6937 		      (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw,
6938 		      (unsigned long long)bio->bi_sector, bio->bi_size, err);
6939 		dip->errors = 1;
6940 
6941 		/*
6942 		 * before atomic variable goto zero, we must make sure
6943 		 * dip->errors is perceived to be set.
6944 		 */
6945 		smp_mb__before_atomic_dec();
6946 	}
6947 
6948 	/* if there are more bios still pending for this dio, just exit */
6949 	if (!atomic_dec_and_test(&dip->pending_bios))
6950 		goto out;
6951 
6952 	if (dip->errors) {
6953 		bio_io_error(dip->orig_bio);
6954 	} else {
6955 		set_bit(BIO_UPTODATE, &dip->dio_bio->bi_flags);
6956 		bio_endio(dip->orig_bio, 0);
6957 	}
6958 out:
6959 	bio_put(bio);
6960 }
6961 
6962 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
6963 				       u64 first_sector, gfp_t gfp_flags)
6964 {
6965 	int nr_vecs = bio_get_nr_vecs(bdev);
6966 	return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
6967 }
6968 
6969 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
6970 					 int rw, u64 file_offset, int skip_sum,
6971 					 int async_submit)
6972 {
6973 	int write = rw & REQ_WRITE;
6974 	struct btrfs_root *root = BTRFS_I(inode)->root;
6975 	int ret;
6976 
6977 	if (async_submit)
6978 		async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
6979 
6980 	bio_get(bio);
6981 
6982 	if (!write) {
6983 		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
6984 		if (ret)
6985 			goto err;
6986 	}
6987 
6988 	if (skip_sum)
6989 		goto map;
6990 
6991 	if (write && async_submit) {
6992 		ret = btrfs_wq_submit_bio(root->fs_info,
6993 				   inode, rw, bio, 0, 0,
6994 				   file_offset,
6995 				   __btrfs_submit_bio_start_direct_io,
6996 				   __btrfs_submit_bio_done);
6997 		goto err;
6998 	} else if (write) {
6999 		/*
7000 		 * If we aren't doing async submit, calculate the csum of the
7001 		 * bio now.
7002 		 */
7003 		ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
7004 		if (ret)
7005 			goto err;
7006 	} else if (!skip_sum) {
7007 		ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset);
7008 		if (ret)
7009 			goto err;
7010 	}
7011 
7012 map:
7013 	ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
7014 err:
7015 	bio_put(bio);
7016 	return ret;
7017 }
7018 
7019 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
7020 				    int skip_sum)
7021 {
7022 	struct inode *inode = dip->inode;
7023 	struct btrfs_root *root = BTRFS_I(inode)->root;
7024 	struct bio *bio;
7025 	struct bio *orig_bio = dip->orig_bio;
7026 	struct bio_vec *bvec = orig_bio->bi_io_vec;
7027 	u64 start_sector = orig_bio->bi_sector;
7028 	u64 file_offset = dip->logical_offset;
7029 	u64 submit_len = 0;
7030 	u64 map_length;
7031 	int nr_pages = 0;
7032 	int ret = 0;
7033 	int async_submit = 0;
7034 
7035 	map_length = orig_bio->bi_size;
7036 	ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
7037 			      &map_length, NULL, 0);
7038 	if (ret) {
7039 		bio_put(orig_bio);
7040 		return -EIO;
7041 	}
7042 	if (map_length >= orig_bio->bi_size) {
7043 		bio = orig_bio;
7044 		goto submit;
7045 	}
7046 
7047 	/* async crcs make it difficult to collect full stripe writes. */
7048 	if (btrfs_get_alloc_profile(root, 1) &
7049 	    (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6))
7050 		async_submit = 0;
7051 	else
7052 		async_submit = 1;
7053 
7054 	bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
7055 	if (!bio)
7056 		return -ENOMEM;
7057 	bio->bi_private = dip;
7058 	bio->bi_end_io = btrfs_end_dio_bio;
7059 	atomic_inc(&dip->pending_bios);
7060 
7061 	while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
7062 		if (unlikely(map_length < submit_len + bvec->bv_len ||
7063 		    bio_add_page(bio, bvec->bv_page, bvec->bv_len,
7064 				 bvec->bv_offset) < bvec->bv_len)) {
7065 			/*
7066 			 * inc the count before we submit the bio so
7067 			 * we know the end IO handler won't happen before
7068 			 * we inc the count. Otherwise, the dip might get freed
7069 			 * before we're done setting it up
7070 			 */
7071 			atomic_inc(&dip->pending_bios);
7072 			ret = __btrfs_submit_dio_bio(bio, inode, rw,
7073 						     file_offset, skip_sum,
7074 						     async_submit);
7075 			if (ret) {
7076 				bio_put(bio);
7077 				atomic_dec(&dip->pending_bios);
7078 				goto out_err;
7079 			}
7080 
7081 			start_sector += submit_len >> 9;
7082 			file_offset += submit_len;
7083 
7084 			submit_len = 0;
7085 			nr_pages = 0;
7086 
7087 			bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
7088 						  start_sector, GFP_NOFS);
7089 			if (!bio)
7090 				goto out_err;
7091 			bio->bi_private = dip;
7092 			bio->bi_end_io = btrfs_end_dio_bio;
7093 
7094 			map_length = orig_bio->bi_size;
7095 			ret = btrfs_map_block(root->fs_info, rw,
7096 					      start_sector << 9,
7097 					      &map_length, NULL, 0);
7098 			if (ret) {
7099 				bio_put(bio);
7100 				goto out_err;
7101 			}
7102 		} else {
7103 			submit_len += bvec->bv_len;
7104 			nr_pages ++;
7105 			bvec++;
7106 		}
7107 	}
7108 
7109 submit:
7110 	ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
7111 				     async_submit);
7112 	if (!ret)
7113 		return 0;
7114 
7115 	bio_put(bio);
7116 out_err:
7117 	dip->errors = 1;
7118 	/*
7119 	 * before atomic variable goto zero, we must
7120 	 * make sure dip->errors is perceived to be set.
7121 	 */
7122 	smp_mb__before_atomic_dec();
7123 	if (atomic_dec_and_test(&dip->pending_bios))
7124 		bio_io_error(dip->orig_bio);
7125 
7126 	/* bio_end_io() will handle error, so we needn't return it */
7127 	return 0;
7128 }
7129 
7130 static void btrfs_submit_direct(int rw, struct bio *dio_bio,
7131 				struct inode *inode, loff_t file_offset)
7132 {
7133 	struct btrfs_root *root = BTRFS_I(inode)->root;
7134 	struct btrfs_dio_private *dip;
7135 	struct bio *io_bio;
7136 	int skip_sum;
7137 	int write = rw & REQ_WRITE;
7138 	int ret = 0;
7139 
7140 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
7141 
7142 	io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS);
7143 
7144 	if (!io_bio) {
7145 		ret = -ENOMEM;
7146 		goto free_ordered;
7147 	}
7148 
7149 	dip = kmalloc(sizeof(*dip), GFP_NOFS);
7150 	if (!dip) {
7151 		ret = -ENOMEM;
7152 		goto free_io_bio;
7153 	}
7154 
7155 	dip->private = dio_bio->bi_private;
7156 	dip->inode = inode;
7157 	dip->logical_offset = file_offset;
7158 	dip->bytes = dio_bio->bi_size;
7159 	dip->disk_bytenr = (u64)dio_bio->bi_sector << 9;
7160 	io_bio->bi_private = dip;
7161 	dip->errors = 0;
7162 	dip->orig_bio = io_bio;
7163 	dip->dio_bio = dio_bio;
7164 	atomic_set(&dip->pending_bios, 0);
7165 
7166 	if (write)
7167 		io_bio->bi_end_io = btrfs_endio_direct_write;
7168 	else
7169 		io_bio->bi_end_io = btrfs_endio_direct_read;
7170 
7171 	ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
7172 	if (!ret)
7173 		return;
7174 
7175 free_io_bio:
7176 	bio_put(io_bio);
7177 
7178 free_ordered:
7179 	/*
7180 	 * If this is a write, we need to clean up the reserved space and kill
7181 	 * the ordered extent.
7182 	 */
7183 	if (write) {
7184 		struct btrfs_ordered_extent *ordered;
7185 		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
7186 		if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
7187 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
7188 			btrfs_free_reserved_extent(root, ordered->start,
7189 						   ordered->disk_len);
7190 		btrfs_put_ordered_extent(ordered);
7191 		btrfs_put_ordered_extent(ordered);
7192 	}
7193 	bio_endio(dio_bio, ret);
7194 }
7195 
7196 static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
7197 			const struct iovec *iov, loff_t offset,
7198 			unsigned long nr_segs)
7199 {
7200 	int seg;
7201 	int i;
7202 	size_t size;
7203 	unsigned long addr;
7204 	unsigned blocksize_mask = root->sectorsize - 1;
7205 	ssize_t retval = -EINVAL;
7206 	loff_t end = offset;
7207 
7208 	if (offset & blocksize_mask)
7209 		goto out;
7210 
7211 	/* Check the memory alignment.  Blocks cannot straddle pages */
7212 	for (seg = 0; seg < nr_segs; seg++) {
7213 		addr = (unsigned long)iov[seg].iov_base;
7214 		size = iov[seg].iov_len;
7215 		end += size;
7216 		if ((addr & blocksize_mask) || (size & blocksize_mask))
7217 			goto out;
7218 
7219 		/* If this is a write we don't need to check anymore */
7220 		if (rw & WRITE)
7221 			continue;
7222 
7223 		/*
7224 		 * Check to make sure we don't have duplicate iov_base's in this
7225 		 * iovec, if so return EINVAL, otherwise we'll get csum errors
7226 		 * when reading back.
7227 		 */
7228 		for (i = seg + 1; i < nr_segs; i++) {
7229 			if (iov[seg].iov_base == iov[i].iov_base)
7230 				goto out;
7231 		}
7232 	}
7233 	retval = 0;
7234 out:
7235 	return retval;
7236 }
7237 
7238 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
7239 			const struct iovec *iov, loff_t offset,
7240 			unsigned long nr_segs)
7241 {
7242 	struct file *file = iocb->ki_filp;
7243 	struct inode *inode = file->f_mapping->host;
7244 	size_t count = 0;
7245 	int flags = 0;
7246 	bool wakeup = true;
7247 	bool relock = false;
7248 	ssize_t ret;
7249 
7250 	if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
7251 			    offset, nr_segs))
7252 		return 0;
7253 
7254 	atomic_inc(&inode->i_dio_count);
7255 	smp_mb__after_atomic_inc();
7256 
7257 	/*
7258 	 * The generic stuff only does filemap_write_and_wait_range, which isn't
7259 	 * enough if we've written compressed pages to this area, so we need to
7260 	 * call btrfs_wait_ordered_range to make absolutely sure that any
7261 	 * outstanding dirty pages are on disk.
7262 	 */
7263 	count = iov_length(iov, nr_segs);
7264 	btrfs_wait_ordered_range(inode, offset, count);
7265 
7266 	if (rw & WRITE) {
7267 		/*
7268 		 * If the write DIO is beyond the EOF, we need update
7269 		 * the isize, but it is protected by i_mutex. So we can
7270 		 * not unlock the i_mutex at this case.
7271 		 */
7272 		if (offset + count <= inode->i_size) {
7273 			mutex_unlock(&inode->i_mutex);
7274 			relock = true;
7275 		}
7276 		ret = btrfs_delalloc_reserve_space(inode, count);
7277 		if (ret)
7278 			goto out;
7279 	} else if (unlikely(test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
7280 				     &BTRFS_I(inode)->runtime_flags))) {
7281 		inode_dio_done(inode);
7282 		flags = DIO_LOCKING | DIO_SKIP_HOLES;
7283 		wakeup = false;
7284 	}
7285 
7286 	ret = __blockdev_direct_IO(rw, iocb, inode,
7287 			BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
7288 			iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
7289 			btrfs_submit_direct, flags);
7290 	if (rw & WRITE) {
7291 		if (ret < 0 && ret != -EIOCBQUEUED)
7292 			btrfs_delalloc_release_space(inode, count);
7293 		else if (ret >= 0 && (size_t)ret < count)
7294 			btrfs_delalloc_release_space(inode,
7295 						     count - (size_t)ret);
7296 		else
7297 			btrfs_delalloc_release_metadata(inode, 0);
7298 	}
7299 out:
7300 	if (wakeup)
7301 		inode_dio_done(inode);
7302 	if (relock)
7303 		mutex_lock(&inode->i_mutex);
7304 
7305 	return ret;
7306 }
7307 
7308 #define BTRFS_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC)
7309 
7310 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
7311 		__u64 start, __u64 len)
7312 {
7313 	int	ret;
7314 
7315 	ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
7316 	if (ret)
7317 		return ret;
7318 
7319 	return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
7320 }
7321 
7322 int btrfs_readpage(struct file *file, struct page *page)
7323 {
7324 	struct extent_io_tree *tree;
7325 	tree = &BTRFS_I(page->mapping->host)->io_tree;
7326 	return extent_read_full_page(tree, page, btrfs_get_extent, 0);
7327 }
7328 
7329 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
7330 {
7331 	struct extent_io_tree *tree;
7332 
7333 
7334 	if (current->flags & PF_MEMALLOC) {
7335 		redirty_page_for_writepage(wbc, page);
7336 		unlock_page(page);
7337 		return 0;
7338 	}
7339 	tree = &BTRFS_I(page->mapping->host)->io_tree;
7340 	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
7341 }
7342 
7343 static int btrfs_writepages(struct address_space *mapping,
7344 			    struct writeback_control *wbc)
7345 {
7346 	struct extent_io_tree *tree;
7347 
7348 	tree = &BTRFS_I(mapping->host)->io_tree;
7349 	return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
7350 }
7351 
7352 static int
7353 btrfs_readpages(struct file *file, struct address_space *mapping,
7354 		struct list_head *pages, unsigned nr_pages)
7355 {
7356 	struct extent_io_tree *tree;
7357 	tree = &BTRFS_I(mapping->host)->io_tree;
7358 	return extent_readpages(tree, mapping, pages, nr_pages,
7359 				btrfs_get_extent);
7360 }
7361 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
7362 {
7363 	struct extent_io_tree *tree;
7364 	struct extent_map_tree *map;
7365 	int ret;
7366 
7367 	tree = &BTRFS_I(page->mapping->host)->io_tree;
7368 	map = &BTRFS_I(page->mapping->host)->extent_tree;
7369 	ret = try_release_extent_mapping(map, tree, page, gfp_flags);
7370 	if (ret == 1) {
7371 		ClearPagePrivate(page);
7372 		set_page_private(page, 0);
7373 		page_cache_release(page);
7374 	}
7375 	return ret;
7376 }
7377 
7378 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
7379 {
7380 	if (PageWriteback(page) || PageDirty(page))
7381 		return 0;
7382 	return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
7383 }
7384 
7385 static void btrfs_invalidatepage(struct page *page, unsigned int offset,
7386 				 unsigned int length)
7387 {
7388 	struct inode *inode = page->mapping->host;
7389 	struct extent_io_tree *tree;
7390 	struct btrfs_ordered_extent *ordered;
7391 	struct extent_state *cached_state = NULL;
7392 	u64 page_start = page_offset(page);
7393 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
7394 
7395 	/*
7396 	 * we have the page locked, so new writeback can't start,
7397 	 * and the dirty bit won't be cleared while we are here.
7398 	 *
7399 	 * Wait for IO on this page so that we can safely clear
7400 	 * the PagePrivate2 bit and do ordered accounting
7401 	 */
7402 	wait_on_page_writeback(page);
7403 
7404 	tree = &BTRFS_I(inode)->io_tree;
7405 	if (offset) {
7406 		btrfs_releasepage(page, GFP_NOFS);
7407 		return;
7408 	}
7409 	lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
7410 	ordered = btrfs_lookup_ordered_extent(inode, page_offset(page));
7411 	if (ordered) {
7412 		/*
7413 		 * IO on this page will never be started, so we need
7414 		 * to account for any ordered extents now
7415 		 */
7416 		clear_extent_bit(tree, page_start, page_end,
7417 				 EXTENT_DIRTY | EXTENT_DELALLOC |
7418 				 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
7419 				 EXTENT_DEFRAG, 1, 0, &cached_state, GFP_NOFS);
7420 		/*
7421 		 * whoever cleared the private bit is responsible
7422 		 * for the finish_ordered_io
7423 		 */
7424 		if (TestClearPagePrivate2(page) &&
7425 		    btrfs_dec_test_ordered_pending(inode, &ordered, page_start,
7426 						   PAGE_CACHE_SIZE, 1)) {
7427 			btrfs_finish_ordered_io(ordered);
7428 		}
7429 		btrfs_put_ordered_extent(ordered);
7430 		cached_state = NULL;
7431 		lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
7432 	}
7433 	clear_extent_bit(tree, page_start, page_end,
7434 		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
7435 		 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
7436 		 &cached_state, GFP_NOFS);
7437 	__btrfs_releasepage(page, GFP_NOFS);
7438 
7439 	ClearPageChecked(page);
7440 	if (PagePrivate(page)) {
7441 		ClearPagePrivate(page);
7442 		set_page_private(page, 0);
7443 		page_cache_release(page);
7444 	}
7445 }
7446 
7447 /*
7448  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
7449  * called from a page fault handler when a page is first dirtied. Hence we must
7450  * be careful to check for EOF conditions here. We set the page up correctly
7451  * for a written page which means we get ENOSPC checking when writing into
7452  * holes and correct delalloc and unwritten extent mapping on filesystems that
7453  * support these features.
7454  *
7455  * We are not allowed to take the i_mutex here so we have to play games to
7456  * protect against truncate races as the page could now be beyond EOF.  Because
7457  * vmtruncate() writes the inode size before removing pages, once we have the
7458  * page lock we can determine safely if the page is beyond EOF. If it is not
7459  * beyond EOF, then the page is guaranteed safe against truncation until we
7460  * unlock the page.
7461  */
7462 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
7463 {
7464 	struct page *page = vmf->page;
7465 	struct inode *inode = file_inode(vma->vm_file);
7466 	struct btrfs_root *root = BTRFS_I(inode)->root;
7467 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7468 	struct btrfs_ordered_extent *ordered;
7469 	struct extent_state *cached_state = NULL;
7470 	char *kaddr;
7471 	unsigned long zero_start;
7472 	loff_t size;
7473 	int ret;
7474 	int reserved = 0;
7475 	u64 page_start;
7476 	u64 page_end;
7477 
7478 	sb_start_pagefault(inode->i_sb);
7479 	ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
7480 	if (!ret) {
7481 		ret = file_update_time(vma->vm_file);
7482 		reserved = 1;
7483 	}
7484 	if (ret) {
7485 		if (ret == -ENOMEM)
7486 			ret = VM_FAULT_OOM;
7487 		else /* -ENOSPC, -EIO, etc */
7488 			ret = VM_FAULT_SIGBUS;
7489 		if (reserved)
7490 			goto out;
7491 		goto out_noreserve;
7492 	}
7493 
7494 	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
7495 again:
7496 	lock_page(page);
7497 	size = i_size_read(inode);
7498 	page_start = page_offset(page);
7499 	page_end = page_start + PAGE_CACHE_SIZE - 1;
7500 
7501 	if ((page->mapping != inode->i_mapping) ||
7502 	    (page_start >= size)) {
7503 		/* page got truncated out from underneath us */
7504 		goto out_unlock;
7505 	}
7506 	wait_on_page_writeback(page);
7507 
7508 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
7509 	set_page_extent_mapped(page);
7510 
7511 	/*
7512 	 * we can't set the delalloc bits if there are pending ordered
7513 	 * extents.  Drop our locks and wait for them to finish
7514 	 */
7515 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
7516 	if (ordered) {
7517 		unlock_extent_cached(io_tree, page_start, page_end,
7518 				     &cached_state, GFP_NOFS);
7519 		unlock_page(page);
7520 		btrfs_start_ordered_extent(inode, ordered, 1);
7521 		btrfs_put_ordered_extent(ordered);
7522 		goto again;
7523 	}
7524 
7525 	/*
7526 	 * XXX - page_mkwrite gets called every time the page is dirtied, even
7527 	 * if it was already dirty, so for space accounting reasons we need to
7528 	 * clear any delalloc bits for the range we are fixing to save.  There
7529 	 * is probably a better way to do this, but for now keep consistent with
7530 	 * prepare_pages in the normal write path.
7531 	 */
7532 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
7533 			  EXTENT_DIRTY | EXTENT_DELALLOC |
7534 			  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
7535 			  0, 0, &cached_state, GFP_NOFS);
7536 
7537 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
7538 					&cached_state);
7539 	if (ret) {
7540 		unlock_extent_cached(io_tree, page_start, page_end,
7541 				     &cached_state, GFP_NOFS);
7542 		ret = VM_FAULT_SIGBUS;
7543 		goto out_unlock;
7544 	}
7545 	ret = 0;
7546 
7547 	/* page is wholly or partially inside EOF */
7548 	if (page_start + PAGE_CACHE_SIZE > size)
7549 		zero_start = size & ~PAGE_CACHE_MASK;
7550 	else
7551 		zero_start = PAGE_CACHE_SIZE;
7552 
7553 	if (zero_start != PAGE_CACHE_SIZE) {
7554 		kaddr = kmap(page);
7555 		memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
7556 		flush_dcache_page(page);
7557 		kunmap(page);
7558 	}
7559 	ClearPageChecked(page);
7560 	set_page_dirty(page);
7561 	SetPageUptodate(page);
7562 
7563 	BTRFS_I(inode)->last_trans = root->fs_info->generation;
7564 	BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
7565 	BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
7566 
7567 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
7568 
7569 out_unlock:
7570 	if (!ret) {
7571 		sb_end_pagefault(inode->i_sb);
7572 		return VM_FAULT_LOCKED;
7573 	}
7574 	unlock_page(page);
7575 out:
7576 	btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
7577 out_noreserve:
7578 	sb_end_pagefault(inode->i_sb);
7579 	return ret;
7580 }
7581 
7582 static int btrfs_truncate(struct inode *inode)
7583 {
7584 	struct btrfs_root *root = BTRFS_I(inode)->root;
7585 	struct btrfs_block_rsv *rsv;
7586 	int ret = 0;
7587 	int err = 0;
7588 	struct btrfs_trans_handle *trans;
7589 	u64 mask = root->sectorsize - 1;
7590 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
7591 
7592 	btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
7593 	btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
7594 
7595 	/*
7596 	 * Yes ladies and gentelment, this is indeed ugly.  The fact is we have
7597 	 * 3 things going on here
7598 	 *
7599 	 * 1) We need to reserve space for our orphan item and the space to
7600 	 * delete our orphan item.  Lord knows we don't want to have a dangling
7601 	 * orphan item because we didn't reserve space to remove it.
7602 	 *
7603 	 * 2) We need to reserve space to update our inode.
7604 	 *
7605 	 * 3) We need to have something to cache all the space that is going to
7606 	 * be free'd up by the truncate operation, but also have some slack
7607 	 * space reserved in case it uses space during the truncate (thank you
7608 	 * very much snapshotting).
7609 	 *
7610 	 * And we need these to all be seperate.  The fact is we can use alot of
7611 	 * space doing the truncate, and we have no earthly idea how much space
7612 	 * we will use, so we need the truncate reservation to be seperate so it
7613 	 * doesn't end up using space reserved for updating the inode or
7614 	 * removing the orphan item.  We also need to be able to stop the
7615 	 * transaction and start a new one, which means we need to be able to
7616 	 * update the inode several times, and we have no idea of knowing how
7617 	 * many times that will be, so we can't just reserve 1 item for the
7618 	 * entirety of the opration, so that has to be done seperately as well.
7619 	 * Then there is the orphan item, which does indeed need to be held on
7620 	 * to for the whole operation, and we need nobody to touch this reserved
7621 	 * space except the orphan code.
7622 	 *
7623 	 * So that leaves us with
7624 	 *
7625 	 * 1) root->orphan_block_rsv - for the orphan deletion.
7626 	 * 2) rsv - for the truncate reservation, which we will steal from the
7627 	 * transaction reservation.
7628 	 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
7629 	 * updating the inode.
7630 	 */
7631 	rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
7632 	if (!rsv)
7633 		return -ENOMEM;
7634 	rsv->size = min_size;
7635 	rsv->failfast = 1;
7636 
7637 	/*
7638 	 * 1 for the truncate slack space
7639 	 * 1 for updating the inode.
7640 	 */
7641 	trans = btrfs_start_transaction(root, 2);
7642 	if (IS_ERR(trans)) {
7643 		err = PTR_ERR(trans);
7644 		goto out;
7645 	}
7646 
7647 	/* Migrate the slack space for the truncate to our reserve */
7648 	ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
7649 				      min_size);
7650 	BUG_ON(ret);
7651 
7652 	/*
7653 	 * setattr is responsible for setting the ordered_data_close flag,
7654 	 * but that is only tested during the last file release.  That
7655 	 * could happen well after the next commit, leaving a great big
7656 	 * window where new writes may get lost if someone chooses to write
7657 	 * to this file after truncating to zero
7658 	 *
7659 	 * The inode doesn't have any dirty data here, and so if we commit
7660 	 * this is a noop.  If someone immediately starts writing to the inode
7661 	 * it is very likely we'll catch some of their writes in this
7662 	 * transaction, and the commit will find this file on the ordered
7663 	 * data list with good things to send down.
7664 	 *
7665 	 * This is a best effort solution, there is still a window where
7666 	 * using truncate to replace the contents of the file will
7667 	 * end up with a zero length file after a crash.
7668 	 */
7669 	if (inode->i_size == 0 && test_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
7670 					   &BTRFS_I(inode)->runtime_flags))
7671 		btrfs_add_ordered_operation(trans, root, inode);
7672 
7673 	/*
7674 	 * So if we truncate and then write and fsync we normally would just
7675 	 * write the extents that changed, which is a problem if we need to
7676 	 * first truncate that entire inode.  So set this flag so we write out
7677 	 * all of the extents in the inode to the sync log so we're completely
7678 	 * safe.
7679 	 */
7680 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
7681 	trans->block_rsv = rsv;
7682 
7683 	while (1) {
7684 		ret = btrfs_truncate_inode_items(trans, root, inode,
7685 						 inode->i_size,
7686 						 BTRFS_EXTENT_DATA_KEY);
7687 		if (ret != -ENOSPC) {
7688 			err = ret;
7689 			break;
7690 		}
7691 
7692 		trans->block_rsv = &root->fs_info->trans_block_rsv;
7693 		ret = btrfs_update_inode(trans, root, inode);
7694 		if (ret) {
7695 			err = ret;
7696 			break;
7697 		}
7698 
7699 		btrfs_end_transaction(trans, root);
7700 		btrfs_btree_balance_dirty(root);
7701 
7702 		trans = btrfs_start_transaction(root, 2);
7703 		if (IS_ERR(trans)) {
7704 			ret = err = PTR_ERR(trans);
7705 			trans = NULL;
7706 			break;
7707 		}
7708 
7709 		ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
7710 					      rsv, min_size);
7711 		BUG_ON(ret);	/* shouldn't happen */
7712 		trans->block_rsv = rsv;
7713 	}
7714 
7715 	if (ret == 0 && inode->i_nlink > 0) {
7716 		trans->block_rsv = root->orphan_block_rsv;
7717 		ret = btrfs_orphan_del(trans, inode);
7718 		if (ret)
7719 			err = ret;
7720 	}
7721 
7722 	if (trans) {
7723 		trans->block_rsv = &root->fs_info->trans_block_rsv;
7724 		ret = btrfs_update_inode(trans, root, inode);
7725 		if (ret && !err)
7726 			err = ret;
7727 
7728 		ret = btrfs_end_transaction(trans, root);
7729 		btrfs_btree_balance_dirty(root);
7730 	}
7731 
7732 out:
7733 	btrfs_free_block_rsv(root, rsv);
7734 
7735 	if (ret && !err)
7736 		err = ret;
7737 
7738 	return err;
7739 }
7740 
7741 /*
7742  * create a new subvolume directory/inode (helper for the ioctl).
7743  */
7744 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
7745 			     struct btrfs_root *new_root, u64 new_dirid)
7746 {
7747 	struct inode *inode;
7748 	int err;
7749 	u64 index = 0;
7750 
7751 	inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
7752 				new_dirid, new_dirid,
7753 				S_IFDIR | (~current_umask() & S_IRWXUGO),
7754 				&index);
7755 	if (IS_ERR(inode))
7756 		return PTR_ERR(inode);
7757 	inode->i_op = &btrfs_dir_inode_operations;
7758 	inode->i_fop = &btrfs_dir_file_operations;
7759 
7760 	set_nlink(inode, 1);
7761 	btrfs_i_size_write(inode, 0);
7762 
7763 	err = btrfs_update_inode(trans, new_root, inode);
7764 
7765 	iput(inode);
7766 	return err;
7767 }
7768 
7769 struct inode *btrfs_alloc_inode(struct super_block *sb)
7770 {
7771 	struct btrfs_inode *ei;
7772 	struct inode *inode;
7773 
7774 	ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
7775 	if (!ei)
7776 		return NULL;
7777 
7778 	ei->root = NULL;
7779 	ei->generation = 0;
7780 	ei->last_trans = 0;
7781 	ei->last_sub_trans = 0;
7782 	ei->logged_trans = 0;
7783 	ei->delalloc_bytes = 0;
7784 	ei->disk_i_size = 0;
7785 	ei->flags = 0;
7786 	ei->csum_bytes = 0;
7787 	ei->index_cnt = (u64)-1;
7788 	ei->last_unlink_trans = 0;
7789 	ei->last_log_commit = 0;
7790 
7791 	spin_lock_init(&ei->lock);
7792 	ei->outstanding_extents = 0;
7793 	ei->reserved_extents = 0;
7794 
7795 	ei->runtime_flags = 0;
7796 	ei->force_compress = BTRFS_COMPRESS_NONE;
7797 
7798 	ei->delayed_node = NULL;
7799 
7800 	inode = &ei->vfs_inode;
7801 	extent_map_tree_init(&ei->extent_tree);
7802 	extent_io_tree_init(&ei->io_tree, &inode->i_data);
7803 	extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
7804 	ei->io_tree.track_uptodate = 1;
7805 	ei->io_failure_tree.track_uptodate = 1;
7806 	atomic_set(&ei->sync_writers, 0);
7807 	mutex_init(&ei->log_mutex);
7808 	mutex_init(&ei->delalloc_mutex);
7809 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
7810 	INIT_LIST_HEAD(&ei->delalloc_inodes);
7811 	INIT_LIST_HEAD(&ei->ordered_operations);
7812 	RB_CLEAR_NODE(&ei->rb_node);
7813 
7814 	return inode;
7815 }
7816 
7817 static void btrfs_i_callback(struct rcu_head *head)
7818 {
7819 	struct inode *inode = container_of(head, struct inode, i_rcu);
7820 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
7821 }
7822 
7823 void btrfs_destroy_inode(struct inode *inode)
7824 {
7825 	struct btrfs_ordered_extent *ordered;
7826 	struct btrfs_root *root = BTRFS_I(inode)->root;
7827 
7828 	WARN_ON(!hlist_empty(&inode->i_dentry));
7829 	WARN_ON(inode->i_data.nrpages);
7830 	WARN_ON(BTRFS_I(inode)->outstanding_extents);
7831 	WARN_ON(BTRFS_I(inode)->reserved_extents);
7832 	WARN_ON(BTRFS_I(inode)->delalloc_bytes);
7833 	WARN_ON(BTRFS_I(inode)->csum_bytes);
7834 
7835 	/*
7836 	 * This can happen where we create an inode, but somebody else also
7837 	 * created the same inode and we need to destroy the one we already
7838 	 * created.
7839 	 */
7840 	if (!root)
7841 		goto free;
7842 
7843 	/*
7844 	 * Make sure we're properly removed from the ordered operation
7845 	 * lists.
7846 	 */
7847 	smp_mb();
7848 	if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
7849 		spin_lock(&root->fs_info->ordered_root_lock);
7850 		list_del_init(&BTRFS_I(inode)->ordered_operations);
7851 		spin_unlock(&root->fs_info->ordered_root_lock);
7852 	}
7853 
7854 	if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
7855 		     &BTRFS_I(inode)->runtime_flags)) {
7856 		btrfs_info(root->fs_info, "inode %llu still on the orphan list",
7857 			(unsigned long long)btrfs_ino(inode));
7858 		atomic_dec(&root->orphan_inodes);
7859 	}
7860 
7861 	while (1) {
7862 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
7863 		if (!ordered)
7864 			break;
7865 		else {
7866 			btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup",
7867 				(unsigned long long)ordered->file_offset,
7868 				(unsigned long long)ordered->len);
7869 			btrfs_remove_ordered_extent(inode, ordered);
7870 			btrfs_put_ordered_extent(ordered);
7871 			btrfs_put_ordered_extent(ordered);
7872 		}
7873 	}
7874 	inode_tree_del(inode);
7875 	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
7876 free:
7877 	call_rcu(&inode->i_rcu, btrfs_i_callback);
7878 }
7879 
7880 int btrfs_drop_inode(struct inode *inode)
7881 {
7882 	struct btrfs_root *root = BTRFS_I(inode)->root;
7883 
7884 	if (root == NULL)
7885 		return 1;
7886 
7887 	/* the snap/subvol tree is on deleting */
7888 	if (btrfs_root_refs(&root->root_item) == 0 &&
7889 	    root != root->fs_info->tree_root)
7890 		return 1;
7891 	else
7892 		return generic_drop_inode(inode);
7893 }
7894 
7895 static void init_once(void *foo)
7896 {
7897 	struct btrfs_inode *ei = (struct btrfs_inode *) foo;
7898 
7899 	inode_init_once(&ei->vfs_inode);
7900 }
7901 
7902 void btrfs_destroy_cachep(void)
7903 {
7904 	/*
7905 	 * Make sure all delayed rcu free inodes are flushed before we
7906 	 * destroy cache.
7907 	 */
7908 	rcu_barrier();
7909 	if (btrfs_inode_cachep)
7910 		kmem_cache_destroy(btrfs_inode_cachep);
7911 	if (btrfs_trans_handle_cachep)
7912 		kmem_cache_destroy(btrfs_trans_handle_cachep);
7913 	if (btrfs_transaction_cachep)
7914 		kmem_cache_destroy(btrfs_transaction_cachep);
7915 	if (btrfs_path_cachep)
7916 		kmem_cache_destroy(btrfs_path_cachep);
7917 	if (btrfs_free_space_cachep)
7918 		kmem_cache_destroy(btrfs_free_space_cachep);
7919 	if (btrfs_delalloc_work_cachep)
7920 		kmem_cache_destroy(btrfs_delalloc_work_cachep);
7921 }
7922 
7923 int btrfs_init_cachep(void)
7924 {
7925 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
7926 			sizeof(struct btrfs_inode), 0,
7927 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
7928 	if (!btrfs_inode_cachep)
7929 		goto fail;
7930 
7931 	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
7932 			sizeof(struct btrfs_trans_handle), 0,
7933 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7934 	if (!btrfs_trans_handle_cachep)
7935 		goto fail;
7936 
7937 	btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction",
7938 			sizeof(struct btrfs_transaction), 0,
7939 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7940 	if (!btrfs_transaction_cachep)
7941 		goto fail;
7942 
7943 	btrfs_path_cachep = kmem_cache_create("btrfs_path",
7944 			sizeof(struct btrfs_path), 0,
7945 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7946 	if (!btrfs_path_cachep)
7947 		goto fail;
7948 
7949 	btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
7950 			sizeof(struct btrfs_free_space), 0,
7951 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7952 	if (!btrfs_free_space_cachep)
7953 		goto fail;
7954 
7955 	btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work",
7956 			sizeof(struct btrfs_delalloc_work), 0,
7957 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
7958 			NULL);
7959 	if (!btrfs_delalloc_work_cachep)
7960 		goto fail;
7961 
7962 	return 0;
7963 fail:
7964 	btrfs_destroy_cachep();
7965 	return -ENOMEM;
7966 }
7967 
7968 static int btrfs_getattr(struct vfsmount *mnt,
7969 			 struct dentry *dentry, struct kstat *stat)
7970 {
7971 	u64 delalloc_bytes;
7972 	struct inode *inode = dentry->d_inode;
7973 	u32 blocksize = inode->i_sb->s_blocksize;
7974 
7975 	generic_fillattr(inode, stat);
7976 	stat->dev = BTRFS_I(inode)->root->anon_dev;
7977 	stat->blksize = PAGE_CACHE_SIZE;
7978 
7979 	spin_lock(&BTRFS_I(inode)->lock);
7980 	delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
7981 	spin_unlock(&BTRFS_I(inode)->lock);
7982 	stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
7983 			ALIGN(delalloc_bytes, blocksize)) >> 9;
7984 	return 0;
7985 }
7986 
7987 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7988 			   struct inode *new_dir, struct dentry *new_dentry)
7989 {
7990 	struct btrfs_trans_handle *trans;
7991 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
7992 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
7993 	struct inode *new_inode = new_dentry->d_inode;
7994 	struct inode *old_inode = old_dentry->d_inode;
7995 	struct timespec ctime = CURRENT_TIME;
7996 	u64 index = 0;
7997 	u64 root_objectid;
7998 	int ret;
7999 	u64 old_ino = btrfs_ino(old_inode);
8000 
8001 	if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
8002 		return -EPERM;
8003 
8004 	/* we only allow rename subvolume link between subvolumes */
8005 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
8006 		return -EXDEV;
8007 
8008 	if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
8009 	    (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
8010 		return -ENOTEMPTY;
8011 
8012 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
8013 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
8014 		return -ENOTEMPTY;
8015 
8016 
8017 	/* check for collisions, even if the  name isn't there */
8018 	ret = btrfs_check_dir_item_collision(root, new_dir->i_ino,
8019 			     new_dentry->d_name.name,
8020 			     new_dentry->d_name.len);
8021 
8022 	if (ret) {
8023 		if (ret == -EEXIST) {
8024 			/* we shouldn't get
8025 			 * eexist without a new_inode */
8026 			if (!new_inode) {
8027 				WARN_ON(1);
8028 				return ret;
8029 			}
8030 		} else {
8031 			/* maybe -EOVERFLOW */
8032 			return ret;
8033 		}
8034 	}
8035 	ret = 0;
8036 
8037 	/*
8038 	 * we're using rename to replace one file with another.
8039 	 * and the replacement file is large.  Start IO on it now so
8040 	 * we don't add too much work to the end of the transaction
8041 	 */
8042 	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
8043 	    old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
8044 		filemap_flush(old_inode->i_mapping);
8045 
8046 	/* close the racy window with snapshot create/destroy ioctl */
8047 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
8048 		down_read(&root->fs_info->subvol_sem);
8049 	/*
8050 	 * We want to reserve the absolute worst case amount of items.  So if
8051 	 * both inodes are subvols and we need to unlink them then that would
8052 	 * require 4 item modifications, but if they are both normal inodes it
8053 	 * would require 5 item modifications, so we'll assume their normal
8054 	 * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
8055 	 * should cover the worst case number of items we'll modify.
8056 	 */
8057 	trans = btrfs_start_transaction(root, 11);
8058 	if (IS_ERR(trans)) {
8059                 ret = PTR_ERR(trans);
8060                 goto out_notrans;
8061         }
8062 
8063 	if (dest != root)
8064 		btrfs_record_root_in_trans(trans, dest);
8065 
8066 	ret = btrfs_set_inode_index(new_dir, &index);
8067 	if (ret)
8068 		goto out_fail;
8069 
8070 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
8071 		/* force full log commit if subvolume involved. */
8072 		root->fs_info->last_trans_log_full_commit = trans->transid;
8073 	} else {
8074 		ret = btrfs_insert_inode_ref(trans, dest,
8075 					     new_dentry->d_name.name,
8076 					     new_dentry->d_name.len,
8077 					     old_ino,
8078 					     btrfs_ino(new_dir), index);
8079 		if (ret)
8080 			goto out_fail;
8081 		/*
8082 		 * this is an ugly little race, but the rename is required
8083 		 * to make sure that if we crash, the inode is either at the
8084 		 * old name or the new one.  pinning the log transaction lets
8085 		 * us make sure we don't allow a log commit to come in after
8086 		 * we unlink the name but before we add the new name back in.
8087 		 */
8088 		btrfs_pin_log_trans(root);
8089 	}
8090 	/*
8091 	 * make sure the inode gets flushed if it is replacing
8092 	 * something.
8093 	 */
8094 	if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
8095 		btrfs_add_ordered_operation(trans, root, old_inode);
8096 
8097 	inode_inc_iversion(old_dir);
8098 	inode_inc_iversion(new_dir);
8099 	inode_inc_iversion(old_inode);
8100 	old_dir->i_ctime = old_dir->i_mtime = ctime;
8101 	new_dir->i_ctime = new_dir->i_mtime = ctime;
8102 	old_inode->i_ctime = ctime;
8103 
8104 	if (old_dentry->d_parent != new_dentry->d_parent)
8105 		btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
8106 
8107 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
8108 		root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
8109 		ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
8110 					old_dentry->d_name.name,
8111 					old_dentry->d_name.len);
8112 	} else {
8113 		ret = __btrfs_unlink_inode(trans, root, old_dir,
8114 					old_dentry->d_inode,
8115 					old_dentry->d_name.name,
8116 					old_dentry->d_name.len);
8117 		if (!ret)
8118 			ret = btrfs_update_inode(trans, root, old_inode);
8119 	}
8120 	if (ret) {
8121 		btrfs_abort_transaction(trans, root, ret);
8122 		goto out_fail;
8123 	}
8124 
8125 	if (new_inode) {
8126 		inode_inc_iversion(new_inode);
8127 		new_inode->i_ctime = CURRENT_TIME;
8128 		if (unlikely(btrfs_ino(new_inode) ==
8129 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
8130 			root_objectid = BTRFS_I(new_inode)->location.objectid;
8131 			ret = btrfs_unlink_subvol(trans, dest, new_dir,
8132 						root_objectid,
8133 						new_dentry->d_name.name,
8134 						new_dentry->d_name.len);
8135 			BUG_ON(new_inode->i_nlink == 0);
8136 		} else {
8137 			ret = btrfs_unlink_inode(trans, dest, new_dir,
8138 						 new_dentry->d_inode,
8139 						 new_dentry->d_name.name,
8140 						 new_dentry->d_name.len);
8141 		}
8142 		if (!ret && new_inode->i_nlink == 0) {
8143 			ret = btrfs_orphan_add(trans, new_dentry->d_inode);
8144 			BUG_ON(ret);
8145 		}
8146 		if (ret) {
8147 			btrfs_abort_transaction(trans, root, ret);
8148 			goto out_fail;
8149 		}
8150 	}
8151 
8152 	ret = btrfs_add_link(trans, new_dir, old_inode,
8153 			     new_dentry->d_name.name,
8154 			     new_dentry->d_name.len, 0, index);
8155 	if (ret) {
8156 		btrfs_abort_transaction(trans, root, ret);
8157 		goto out_fail;
8158 	}
8159 
8160 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
8161 		struct dentry *parent = new_dentry->d_parent;
8162 		btrfs_log_new_name(trans, old_inode, old_dir, parent);
8163 		btrfs_end_log_trans(root);
8164 	}
8165 out_fail:
8166 	btrfs_end_transaction(trans, root);
8167 out_notrans:
8168 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
8169 		up_read(&root->fs_info->subvol_sem);
8170 
8171 	return ret;
8172 }
8173 
8174 static void btrfs_run_delalloc_work(struct btrfs_work *work)
8175 {
8176 	struct btrfs_delalloc_work *delalloc_work;
8177 
8178 	delalloc_work = container_of(work, struct btrfs_delalloc_work,
8179 				     work);
8180 	if (delalloc_work->wait)
8181 		btrfs_wait_ordered_range(delalloc_work->inode, 0, (u64)-1);
8182 	else
8183 		filemap_flush(delalloc_work->inode->i_mapping);
8184 
8185 	if (delalloc_work->delay_iput)
8186 		btrfs_add_delayed_iput(delalloc_work->inode);
8187 	else
8188 		iput(delalloc_work->inode);
8189 	complete(&delalloc_work->completion);
8190 }
8191 
8192 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
8193 						    int wait, int delay_iput)
8194 {
8195 	struct btrfs_delalloc_work *work;
8196 
8197 	work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS);
8198 	if (!work)
8199 		return NULL;
8200 
8201 	init_completion(&work->completion);
8202 	INIT_LIST_HEAD(&work->list);
8203 	work->inode = inode;
8204 	work->wait = wait;
8205 	work->delay_iput = delay_iput;
8206 	work->work.func = btrfs_run_delalloc_work;
8207 
8208 	return work;
8209 }
8210 
8211 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
8212 {
8213 	wait_for_completion(&work->completion);
8214 	kmem_cache_free(btrfs_delalloc_work_cachep, work);
8215 }
8216 
8217 /*
8218  * some fairly slow code that needs optimization. This walks the list
8219  * of all the inodes with pending delalloc and forces them to disk.
8220  */
8221 static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
8222 {
8223 	struct btrfs_inode *binode;
8224 	struct inode *inode;
8225 	struct btrfs_delalloc_work *work, *next;
8226 	struct list_head works;
8227 	struct list_head splice;
8228 	int ret = 0;
8229 
8230 	INIT_LIST_HEAD(&works);
8231 	INIT_LIST_HEAD(&splice);
8232 
8233 	spin_lock(&root->delalloc_lock);
8234 	list_splice_init(&root->delalloc_inodes, &splice);
8235 	while (!list_empty(&splice)) {
8236 		binode = list_entry(splice.next, struct btrfs_inode,
8237 				    delalloc_inodes);
8238 
8239 		list_move_tail(&binode->delalloc_inodes,
8240 			       &root->delalloc_inodes);
8241 		inode = igrab(&binode->vfs_inode);
8242 		if (!inode) {
8243 			cond_resched_lock(&root->delalloc_lock);
8244 			continue;
8245 		}
8246 		spin_unlock(&root->delalloc_lock);
8247 
8248 		work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
8249 		if (unlikely(!work)) {
8250 			ret = -ENOMEM;
8251 			goto out;
8252 		}
8253 		list_add_tail(&work->list, &works);
8254 		btrfs_queue_worker(&root->fs_info->flush_workers,
8255 				   &work->work);
8256 
8257 		cond_resched();
8258 		spin_lock(&root->delalloc_lock);
8259 	}
8260 	spin_unlock(&root->delalloc_lock);
8261 
8262 	list_for_each_entry_safe(work, next, &works, list) {
8263 		list_del_init(&work->list);
8264 		btrfs_wait_and_free_delalloc_work(work);
8265 	}
8266 	return 0;
8267 out:
8268 	list_for_each_entry_safe(work, next, &works, list) {
8269 		list_del_init(&work->list);
8270 		btrfs_wait_and_free_delalloc_work(work);
8271 	}
8272 
8273 	if (!list_empty_careful(&splice)) {
8274 		spin_lock(&root->delalloc_lock);
8275 		list_splice_tail(&splice, &root->delalloc_inodes);
8276 		spin_unlock(&root->delalloc_lock);
8277 	}
8278 	return ret;
8279 }
8280 
8281 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
8282 {
8283 	int ret;
8284 
8285 	if (root->fs_info->sb->s_flags & MS_RDONLY)
8286 		return -EROFS;
8287 
8288 	ret = __start_delalloc_inodes(root, delay_iput);
8289 	/*
8290 	 * the filemap_flush will queue IO into the worker threads, but
8291 	 * we have to make sure the IO is actually started and that
8292 	 * ordered extents get created before we return
8293 	 */
8294 	atomic_inc(&root->fs_info->async_submit_draining);
8295 	while (atomic_read(&root->fs_info->nr_async_submits) ||
8296 	      atomic_read(&root->fs_info->async_delalloc_pages)) {
8297 		wait_event(root->fs_info->async_submit_wait,
8298 		   (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
8299 		    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
8300 	}
8301 	atomic_dec(&root->fs_info->async_submit_draining);
8302 	return ret;
8303 }
8304 
8305 int btrfs_start_all_delalloc_inodes(struct btrfs_fs_info *fs_info,
8306 				    int delay_iput)
8307 {
8308 	struct btrfs_root *root;
8309 	struct list_head splice;
8310 	int ret;
8311 
8312 	if (fs_info->sb->s_flags & MS_RDONLY)
8313 		return -EROFS;
8314 
8315 	INIT_LIST_HEAD(&splice);
8316 
8317 	spin_lock(&fs_info->delalloc_root_lock);
8318 	list_splice_init(&fs_info->delalloc_roots, &splice);
8319 	while (!list_empty(&splice)) {
8320 		root = list_first_entry(&splice, struct btrfs_root,
8321 					delalloc_root);
8322 		root = btrfs_grab_fs_root(root);
8323 		BUG_ON(!root);
8324 		list_move_tail(&root->delalloc_root,
8325 			       &fs_info->delalloc_roots);
8326 		spin_unlock(&fs_info->delalloc_root_lock);
8327 
8328 		ret = __start_delalloc_inodes(root, delay_iput);
8329 		btrfs_put_fs_root(root);
8330 		if (ret)
8331 			goto out;
8332 
8333 		spin_lock(&fs_info->delalloc_root_lock);
8334 	}
8335 	spin_unlock(&fs_info->delalloc_root_lock);
8336 
8337 	atomic_inc(&fs_info->async_submit_draining);
8338 	while (atomic_read(&fs_info->nr_async_submits) ||
8339 	      atomic_read(&fs_info->async_delalloc_pages)) {
8340 		wait_event(fs_info->async_submit_wait,
8341 		   (atomic_read(&fs_info->nr_async_submits) == 0 &&
8342 		    atomic_read(&fs_info->async_delalloc_pages) == 0));
8343 	}
8344 	atomic_dec(&fs_info->async_submit_draining);
8345 	return 0;
8346 out:
8347 	if (!list_empty_careful(&splice)) {
8348 		spin_lock(&fs_info->delalloc_root_lock);
8349 		list_splice_tail(&splice, &fs_info->delalloc_roots);
8350 		spin_unlock(&fs_info->delalloc_root_lock);
8351 	}
8352 	return ret;
8353 }
8354 
8355 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
8356 			 const char *symname)
8357 {
8358 	struct btrfs_trans_handle *trans;
8359 	struct btrfs_root *root = BTRFS_I(dir)->root;
8360 	struct btrfs_path *path;
8361 	struct btrfs_key key;
8362 	struct inode *inode = NULL;
8363 	int err;
8364 	int drop_inode = 0;
8365 	u64 objectid;
8366 	u64 index = 0 ;
8367 	int name_len;
8368 	int datasize;
8369 	unsigned long ptr;
8370 	struct btrfs_file_extent_item *ei;
8371 	struct extent_buffer *leaf;
8372 
8373 	name_len = strlen(symname) + 1;
8374 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
8375 		return -ENAMETOOLONG;
8376 
8377 	/*
8378 	 * 2 items for inode item and ref
8379 	 * 2 items for dir items
8380 	 * 1 item for xattr if selinux is on
8381 	 */
8382 	trans = btrfs_start_transaction(root, 5);
8383 	if (IS_ERR(trans))
8384 		return PTR_ERR(trans);
8385 
8386 	err = btrfs_find_free_ino(root, &objectid);
8387 	if (err)
8388 		goto out_unlock;
8389 
8390 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
8391 				dentry->d_name.len, btrfs_ino(dir), objectid,
8392 				S_IFLNK|S_IRWXUGO, &index);
8393 	if (IS_ERR(inode)) {
8394 		err = PTR_ERR(inode);
8395 		goto out_unlock;
8396 	}
8397 
8398 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
8399 	if (err) {
8400 		drop_inode = 1;
8401 		goto out_unlock;
8402 	}
8403 
8404 	/*
8405 	* If the active LSM wants to access the inode during
8406 	* d_instantiate it needs these. Smack checks to see
8407 	* if the filesystem supports xattrs by looking at the
8408 	* ops vector.
8409 	*/
8410 	inode->i_fop = &btrfs_file_operations;
8411 	inode->i_op = &btrfs_file_inode_operations;
8412 
8413 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
8414 	if (err)
8415 		drop_inode = 1;
8416 	else {
8417 		inode->i_mapping->a_ops = &btrfs_aops;
8418 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
8419 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
8420 	}
8421 	if (drop_inode)
8422 		goto out_unlock;
8423 
8424 	path = btrfs_alloc_path();
8425 	if (!path) {
8426 		err = -ENOMEM;
8427 		drop_inode = 1;
8428 		goto out_unlock;
8429 	}
8430 	key.objectid = btrfs_ino(inode);
8431 	key.offset = 0;
8432 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
8433 	datasize = btrfs_file_extent_calc_inline_size(name_len);
8434 	err = btrfs_insert_empty_item(trans, root, path, &key,
8435 				      datasize);
8436 	if (err) {
8437 		drop_inode = 1;
8438 		btrfs_free_path(path);
8439 		goto out_unlock;
8440 	}
8441 	leaf = path->nodes[0];
8442 	ei = btrfs_item_ptr(leaf, path->slots[0],
8443 			    struct btrfs_file_extent_item);
8444 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
8445 	btrfs_set_file_extent_type(leaf, ei,
8446 				   BTRFS_FILE_EXTENT_INLINE);
8447 	btrfs_set_file_extent_encryption(leaf, ei, 0);
8448 	btrfs_set_file_extent_compression(leaf, ei, 0);
8449 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
8450 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
8451 
8452 	ptr = btrfs_file_extent_inline_start(ei);
8453 	write_extent_buffer(leaf, symname, ptr, name_len);
8454 	btrfs_mark_buffer_dirty(leaf);
8455 	btrfs_free_path(path);
8456 
8457 	inode->i_op = &btrfs_symlink_inode_operations;
8458 	inode->i_mapping->a_ops = &btrfs_symlink_aops;
8459 	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
8460 	inode_set_bytes(inode, name_len);
8461 	btrfs_i_size_write(inode, name_len - 1);
8462 	err = btrfs_update_inode(trans, root, inode);
8463 	if (err)
8464 		drop_inode = 1;
8465 
8466 out_unlock:
8467 	if (!err)
8468 		d_instantiate(dentry, inode);
8469 	btrfs_end_transaction(trans, root);
8470 	if (drop_inode) {
8471 		inode_dec_link_count(inode);
8472 		iput(inode);
8473 	}
8474 	btrfs_btree_balance_dirty(root);
8475 	return err;
8476 }
8477 
8478 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
8479 				       u64 start, u64 num_bytes, u64 min_size,
8480 				       loff_t actual_len, u64 *alloc_hint,
8481 				       struct btrfs_trans_handle *trans)
8482 {
8483 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
8484 	struct extent_map *em;
8485 	struct btrfs_root *root = BTRFS_I(inode)->root;
8486 	struct btrfs_key ins;
8487 	u64 cur_offset = start;
8488 	u64 i_size;
8489 	u64 cur_bytes;
8490 	int ret = 0;
8491 	bool own_trans = true;
8492 
8493 	if (trans)
8494 		own_trans = false;
8495 	while (num_bytes > 0) {
8496 		if (own_trans) {
8497 			trans = btrfs_start_transaction(root, 3);
8498 			if (IS_ERR(trans)) {
8499 				ret = PTR_ERR(trans);
8500 				break;
8501 			}
8502 		}
8503 
8504 		cur_bytes = min(num_bytes, 256ULL * 1024 * 1024);
8505 		cur_bytes = max(cur_bytes, min_size);
8506 		ret = btrfs_reserve_extent(trans, root, cur_bytes,
8507 					   min_size, 0, *alloc_hint, &ins, 1);
8508 		if (ret) {
8509 			if (own_trans)
8510 				btrfs_end_transaction(trans, root);
8511 			break;
8512 		}
8513 
8514 		ret = insert_reserved_file_extent(trans, inode,
8515 						  cur_offset, ins.objectid,
8516 						  ins.offset, ins.offset,
8517 						  ins.offset, 0, 0, 0,
8518 						  BTRFS_FILE_EXTENT_PREALLOC);
8519 		if (ret) {
8520 			btrfs_abort_transaction(trans, root, ret);
8521 			if (own_trans)
8522 				btrfs_end_transaction(trans, root);
8523 			break;
8524 		}
8525 		btrfs_drop_extent_cache(inode, cur_offset,
8526 					cur_offset + ins.offset -1, 0);
8527 
8528 		em = alloc_extent_map();
8529 		if (!em) {
8530 			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
8531 				&BTRFS_I(inode)->runtime_flags);
8532 			goto next;
8533 		}
8534 
8535 		em->start = cur_offset;
8536 		em->orig_start = cur_offset;
8537 		em->len = ins.offset;
8538 		em->block_start = ins.objectid;
8539 		em->block_len = ins.offset;
8540 		em->orig_block_len = ins.offset;
8541 		em->ram_bytes = ins.offset;
8542 		em->bdev = root->fs_info->fs_devices->latest_bdev;
8543 		set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
8544 		em->generation = trans->transid;
8545 
8546 		while (1) {
8547 			write_lock(&em_tree->lock);
8548 			ret = add_extent_mapping(em_tree, em, 1);
8549 			write_unlock(&em_tree->lock);
8550 			if (ret != -EEXIST)
8551 				break;
8552 			btrfs_drop_extent_cache(inode, cur_offset,
8553 						cur_offset + ins.offset - 1,
8554 						0);
8555 		}
8556 		free_extent_map(em);
8557 next:
8558 		num_bytes -= ins.offset;
8559 		cur_offset += ins.offset;
8560 		*alloc_hint = ins.objectid + ins.offset;
8561 
8562 		inode_inc_iversion(inode);
8563 		inode->i_ctime = CURRENT_TIME;
8564 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
8565 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
8566 		    (actual_len > inode->i_size) &&
8567 		    (cur_offset > inode->i_size)) {
8568 			if (cur_offset > actual_len)
8569 				i_size = actual_len;
8570 			else
8571 				i_size = cur_offset;
8572 			i_size_write(inode, i_size);
8573 			btrfs_ordered_update_i_size(inode, i_size, NULL);
8574 		}
8575 
8576 		ret = btrfs_update_inode(trans, root, inode);
8577 
8578 		if (ret) {
8579 			btrfs_abort_transaction(trans, root, ret);
8580 			if (own_trans)
8581 				btrfs_end_transaction(trans, root);
8582 			break;
8583 		}
8584 
8585 		if (own_trans)
8586 			btrfs_end_transaction(trans, root);
8587 	}
8588 	return ret;
8589 }
8590 
8591 int btrfs_prealloc_file_range(struct inode *inode, int mode,
8592 			      u64 start, u64 num_bytes, u64 min_size,
8593 			      loff_t actual_len, u64 *alloc_hint)
8594 {
8595 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
8596 					   min_size, actual_len, alloc_hint,
8597 					   NULL);
8598 }
8599 
8600 int btrfs_prealloc_file_range_trans(struct inode *inode,
8601 				    struct btrfs_trans_handle *trans, int mode,
8602 				    u64 start, u64 num_bytes, u64 min_size,
8603 				    loff_t actual_len, u64 *alloc_hint)
8604 {
8605 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
8606 					   min_size, actual_len, alloc_hint, trans);
8607 }
8608 
8609 static int btrfs_set_page_dirty(struct page *page)
8610 {
8611 	return __set_page_dirty_nobuffers(page);
8612 }
8613 
8614 static int btrfs_permission(struct inode *inode, int mask)
8615 {
8616 	struct btrfs_root *root = BTRFS_I(inode)->root;
8617 	umode_t mode = inode->i_mode;
8618 
8619 	if (mask & MAY_WRITE &&
8620 	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
8621 		if (btrfs_root_readonly(root))
8622 			return -EROFS;
8623 		if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
8624 			return -EACCES;
8625 	}
8626 	return generic_permission(inode, mask);
8627 }
8628 
8629 static const struct inode_operations btrfs_dir_inode_operations = {
8630 	.getattr	= btrfs_getattr,
8631 	.lookup		= btrfs_lookup,
8632 	.create		= btrfs_create,
8633 	.unlink		= btrfs_unlink,
8634 	.link		= btrfs_link,
8635 	.mkdir		= btrfs_mkdir,
8636 	.rmdir		= btrfs_rmdir,
8637 	.rename		= btrfs_rename,
8638 	.symlink	= btrfs_symlink,
8639 	.setattr	= btrfs_setattr,
8640 	.mknod		= btrfs_mknod,
8641 	.setxattr	= btrfs_setxattr,
8642 	.getxattr	= btrfs_getxattr,
8643 	.listxattr	= btrfs_listxattr,
8644 	.removexattr	= btrfs_removexattr,
8645 	.permission	= btrfs_permission,
8646 	.get_acl	= btrfs_get_acl,
8647 };
8648 static const struct inode_operations btrfs_dir_ro_inode_operations = {
8649 	.lookup		= btrfs_lookup,
8650 	.permission	= btrfs_permission,
8651 	.get_acl	= btrfs_get_acl,
8652 };
8653 
8654 static const struct file_operations btrfs_dir_file_operations = {
8655 	.llseek		= generic_file_llseek,
8656 	.read		= generic_read_dir,
8657 	.iterate	= btrfs_real_readdir,
8658 	.unlocked_ioctl	= btrfs_ioctl,
8659 #ifdef CONFIG_COMPAT
8660 	.compat_ioctl	= btrfs_ioctl,
8661 #endif
8662 	.release        = btrfs_release_file,
8663 	.fsync		= btrfs_sync_file,
8664 };
8665 
8666 static struct extent_io_ops btrfs_extent_io_ops = {
8667 	.fill_delalloc = run_delalloc_range,
8668 	.submit_bio_hook = btrfs_submit_bio_hook,
8669 	.merge_bio_hook = btrfs_merge_bio_hook,
8670 	.readpage_end_io_hook = btrfs_readpage_end_io_hook,
8671 	.writepage_end_io_hook = btrfs_writepage_end_io_hook,
8672 	.writepage_start_hook = btrfs_writepage_start_hook,
8673 	.set_bit_hook = btrfs_set_bit_hook,
8674 	.clear_bit_hook = btrfs_clear_bit_hook,
8675 	.merge_extent_hook = btrfs_merge_extent_hook,
8676 	.split_extent_hook = btrfs_split_extent_hook,
8677 };
8678 
8679 /*
8680  * btrfs doesn't support the bmap operation because swapfiles
8681  * use bmap to make a mapping of extents in the file.  They assume
8682  * these extents won't change over the life of the file and they
8683  * use the bmap result to do IO directly to the drive.
8684  *
8685  * the btrfs bmap call would return logical addresses that aren't
8686  * suitable for IO and they also will change frequently as COW
8687  * operations happen.  So, swapfile + btrfs == corruption.
8688  *
8689  * For now we're avoiding this by dropping bmap.
8690  */
8691 static const struct address_space_operations btrfs_aops = {
8692 	.readpage	= btrfs_readpage,
8693 	.writepage	= btrfs_writepage,
8694 	.writepages	= btrfs_writepages,
8695 	.readpages	= btrfs_readpages,
8696 	.direct_IO	= btrfs_direct_IO,
8697 	.invalidatepage = btrfs_invalidatepage,
8698 	.releasepage	= btrfs_releasepage,
8699 	.set_page_dirty	= btrfs_set_page_dirty,
8700 	.error_remove_page = generic_error_remove_page,
8701 };
8702 
8703 static const struct address_space_operations btrfs_symlink_aops = {
8704 	.readpage	= btrfs_readpage,
8705 	.writepage	= btrfs_writepage,
8706 	.invalidatepage = btrfs_invalidatepage,
8707 	.releasepage	= btrfs_releasepage,
8708 };
8709 
8710 static const struct inode_operations btrfs_file_inode_operations = {
8711 	.getattr	= btrfs_getattr,
8712 	.setattr	= btrfs_setattr,
8713 	.setxattr	= btrfs_setxattr,
8714 	.getxattr	= btrfs_getxattr,
8715 	.listxattr      = btrfs_listxattr,
8716 	.removexattr	= btrfs_removexattr,
8717 	.permission	= btrfs_permission,
8718 	.fiemap		= btrfs_fiemap,
8719 	.get_acl	= btrfs_get_acl,
8720 	.update_time	= btrfs_update_time,
8721 };
8722 static const struct inode_operations btrfs_special_inode_operations = {
8723 	.getattr	= btrfs_getattr,
8724 	.setattr	= btrfs_setattr,
8725 	.permission	= btrfs_permission,
8726 	.setxattr	= btrfs_setxattr,
8727 	.getxattr	= btrfs_getxattr,
8728 	.listxattr	= btrfs_listxattr,
8729 	.removexattr	= btrfs_removexattr,
8730 	.get_acl	= btrfs_get_acl,
8731 	.update_time	= btrfs_update_time,
8732 };
8733 static const struct inode_operations btrfs_symlink_inode_operations = {
8734 	.readlink	= generic_readlink,
8735 	.follow_link	= page_follow_link_light,
8736 	.put_link	= page_put_link,
8737 	.getattr	= btrfs_getattr,
8738 	.setattr	= btrfs_setattr,
8739 	.permission	= btrfs_permission,
8740 	.setxattr	= btrfs_setxattr,
8741 	.getxattr	= btrfs_getxattr,
8742 	.listxattr	= btrfs_listxattr,
8743 	.removexattr	= btrfs_removexattr,
8744 	.get_acl	= btrfs_get_acl,
8745 	.update_time	= btrfs_update_time,
8746 };
8747 
8748 const struct dentry_operations btrfs_dentry_operations = {
8749 	.d_delete	= btrfs_dentry_delete,
8750 	.d_release	= btrfs_dentry_release,
8751 };
8752