xref: /openbmc/linux/fs/btrfs/file.c (revision 840ef8b7)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/slab.h>
33 #include <linux/btrfs.h>
34 #include "ctree.h"
35 #include "disk-io.h"
36 #include "transaction.h"
37 #include "btrfs_inode.h"
38 #include "print-tree.h"
39 #include "tree-log.h"
40 #include "locking.h"
41 #include "compat.h"
42 #include "volumes.h"
43 
44 static struct kmem_cache *btrfs_inode_defrag_cachep;
45 /*
46  * when auto defrag is enabled we
47  * queue up these defrag structs to remember which
48  * inodes need defragging passes
49  */
50 struct inode_defrag {
51 	struct rb_node rb_node;
52 	/* objectid */
53 	u64 ino;
54 	/*
55 	 * transid where the defrag was added, we search for
56 	 * extents newer than this
57 	 */
58 	u64 transid;
59 
60 	/* root objectid */
61 	u64 root;
62 
63 	/* last offset we were able to defrag */
64 	u64 last_offset;
65 
66 	/* if we've wrapped around back to zero once already */
67 	int cycled;
68 };
69 
70 static int __compare_inode_defrag(struct inode_defrag *defrag1,
71 				  struct inode_defrag *defrag2)
72 {
73 	if (defrag1->root > defrag2->root)
74 		return 1;
75 	else if (defrag1->root < defrag2->root)
76 		return -1;
77 	else if (defrag1->ino > defrag2->ino)
78 		return 1;
79 	else if (defrag1->ino < defrag2->ino)
80 		return -1;
81 	else
82 		return 0;
83 }
84 
85 /* pop a record for an inode into the defrag tree.  The lock
86  * must be held already
87  *
88  * If you're inserting a record for an older transid than an
89  * existing record, the transid already in the tree is lowered
90  *
91  * If an existing record is found the defrag item you
92  * pass in is freed
93  */
94 static int __btrfs_add_inode_defrag(struct inode *inode,
95 				    struct inode_defrag *defrag)
96 {
97 	struct btrfs_root *root = BTRFS_I(inode)->root;
98 	struct inode_defrag *entry;
99 	struct rb_node **p;
100 	struct rb_node *parent = NULL;
101 	int ret;
102 
103 	p = &root->fs_info->defrag_inodes.rb_node;
104 	while (*p) {
105 		parent = *p;
106 		entry = rb_entry(parent, struct inode_defrag, rb_node);
107 
108 		ret = __compare_inode_defrag(defrag, entry);
109 		if (ret < 0)
110 			p = &parent->rb_left;
111 		else if (ret > 0)
112 			p = &parent->rb_right;
113 		else {
114 			/* if we're reinserting an entry for
115 			 * an old defrag run, make sure to
116 			 * lower the transid of our existing record
117 			 */
118 			if (defrag->transid < entry->transid)
119 				entry->transid = defrag->transid;
120 			if (defrag->last_offset > entry->last_offset)
121 				entry->last_offset = defrag->last_offset;
122 			return -EEXIST;
123 		}
124 	}
125 	set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
126 	rb_link_node(&defrag->rb_node, parent, p);
127 	rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
128 	return 0;
129 }
130 
131 static inline int __need_auto_defrag(struct btrfs_root *root)
132 {
133 	if (!btrfs_test_opt(root, AUTO_DEFRAG))
134 		return 0;
135 
136 	if (btrfs_fs_closing(root->fs_info))
137 		return 0;
138 
139 	return 1;
140 }
141 
142 /*
143  * insert a defrag record for this inode if auto defrag is
144  * enabled
145  */
146 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
147 			   struct inode *inode)
148 {
149 	struct btrfs_root *root = BTRFS_I(inode)->root;
150 	struct inode_defrag *defrag;
151 	u64 transid;
152 	int ret;
153 
154 	if (!__need_auto_defrag(root))
155 		return 0;
156 
157 	if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
158 		return 0;
159 
160 	if (trans)
161 		transid = trans->transid;
162 	else
163 		transid = BTRFS_I(inode)->root->last_trans;
164 
165 	defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
166 	if (!defrag)
167 		return -ENOMEM;
168 
169 	defrag->ino = btrfs_ino(inode);
170 	defrag->transid = transid;
171 	defrag->root = root->root_key.objectid;
172 
173 	spin_lock(&root->fs_info->defrag_inodes_lock);
174 	if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) {
175 		/*
176 		 * If we set IN_DEFRAG flag and evict the inode from memory,
177 		 * and then re-read this inode, this new inode doesn't have
178 		 * IN_DEFRAG flag. At the case, we may find the existed defrag.
179 		 */
180 		ret = __btrfs_add_inode_defrag(inode, defrag);
181 		if (ret)
182 			kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
183 	} else {
184 		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
185 	}
186 	spin_unlock(&root->fs_info->defrag_inodes_lock);
187 	return 0;
188 }
189 
190 /*
191  * Requeue the defrag object. If there is a defrag object that points to
192  * the same inode in the tree, we will merge them together (by
193  * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
194  */
195 void btrfs_requeue_inode_defrag(struct inode *inode,
196 				struct inode_defrag *defrag)
197 {
198 	struct btrfs_root *root = BTRFS_I(inode)->root;
199 	int ret;
200 
201 	if (!__need_auto_defrag(root))
202 		goto out;
203 
204 	/*
205 	 * Here we don't check the IN_DEFRAG flag, because we need merge
206 	 * them together.
207 	 */
208 	spin_lock(&root->fs_info->defrag_inodes_lock);
209 	ret = __btrfs_add_inode_defrag(inode, defrag);
210 	spin_unlock(&root->fs_info->defrag_inodes_lock);
211 	if (ret)
212 		goto out;
213 	return;
214 out:
215 	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
216 }
217 
218 /*
219  * pick the defragable inode that we want, if it doesn't exist, we will get
220  * the next one.
221  */
222 static struct inode_defrag *
223 btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
224 {
225 	struct inode_defrag *entry = NULL;
226 	struct inode_defrag tmp;
227 	struct rb_node *p;
228 	struct rb_node *parent = NULL;
229 	int ret;
230 
231 	tmp.ino = ino;
232 	tmp.root = root;
233 
234 	spin_lock(&fs_info->defrag_inodes_lock);
235 	p = fs_info->defrag_inodes.rb_node;
236 	while (p) {
237 		parent = p;
238 		entry = rb_entry(parent, struct inode_defrag, rb_node);
239 
240 		ret = __compare_inode_defrag(&tmp, entry);
241 		if (ret < 0)
242 			p = parent->rb_left;
243 		else if (ret > 0)
244 			p = parent->rb_right;
245 		else
246 			goto out;
247 	}
248 
249 	if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
250 		parent = rb_next(parent);
251 		if (parent)
252 			entry = rb_entry(parent, struct inode_defrag, rb_node);
253 		else
254 			entry = NULL;
255 	}
256 out:
257 	if (entry)
258 		rb_erase(parent, &fs_info->defrag_inodes);
259 	spin_unlock(&fs_info->defrag_inodes_lock);
260 	return entry;
261 }
262 
263 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
264 {
265 	struct inode_defrag *defrag;
266 	struct rb_node *node;
267 
268 	spin_lock(&fs_info->defrag_inodes_lock);
269 	node = rb_first(&fs_info->defrag_inodes);
270 	while (node) {
271 		rb_erase(node, &fs_info->defrag_inodes);
272 		defrag = rb_entry(node, struct inode_defrag, rb_node);
273 		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
274 
275 		if (need_resched()) {
276 			spin_unlock(&fs_info->defrag_inodes_lock);
277 			cond_resched();
278 			spin_lock(&fs_info->defrag_inodes_lock);
279 		}
280 
281 		node = rb_first(&fs_info->defrag_inodes);
282 	}
283 	spin_unlock(&fs_info->defrag_inodes_lock);
284 }
285 
286 #define BTRFS_DEFRAG_BATCH	1024
287 
288 static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
289 				    struct inode_defrag *defrag)
290 {
291 	struct btrfs_root *inode_root;
292 	struct inode *inode;
293 	struct btrfs_key key;
294 	struct btrfs_ioctl_defrag_range_args range;
295 	int num_defrag;
296 	int index;
297 	int ret;
298 
299 	/* get the inode */
300 	key.objectid = defrag->root;
301 	btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
302 	key.offset = (u64)-1;
303 
304 	index = srcu_read_lock(&fs_info->subvol_srcu);
305 
306 	inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
307 	if (IS_ERR(inode_root)) {
308 		ret = PTR_ERR(inode_root);
309 		goto cleanup;
310 	}
311 	if (btrfs_root_refs(&inode_root->root_item) == 0) {
312 		ret = -ENOENT;
313 		goto cleanup;
314 	}
315 
316 	key.objectid = defrag->ino;
317 	btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
318 	key.offset = 0;
319 	inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
320 	if (IS_ERR(inode)) {
321 		ret = PTR_ERR(inode);
322 		goto cleanup;
323 	}
324 	srcu_read_unlock(&fs_info->subvol_srcu, index);
325 
326 	/* do a chunk of defrag */
327 	clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
328 	memset(&range, 0, sizeof(range));
329 	range.len = (u64)-1;
330 	range.start = defrag->last_offset;
331 
332 	sb_start_write(fs_info->sb);
333 	num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
334 				       BTRFS_DEFRAG_BATCH);
335 	sb_end_write(fs_info->sb);
336 	/*
337 	 * if we filled the whole defrag batch, there
338 	 * must be more work to do.  Queue this defrag
339 	 * again
340 	 */
341 	if (num_defrag == BTRFS_DEFRAG_BATCH) {
342 		defrag->last_offset = range.start;
343 		btrfs_requeue_inode_defrag(inode, defrag);
344 	} else if (defrag->last_offset && !defrag->cycled) {
345 		/*
346 		 * we didn't fill our defrag batch, but
347 		 * we didn't start at zero.  Make sure we loop
348 		 * around to the start of the file.
349 		 */
350 		defrag->last_offset = 0;
351 		defrag->cycled = 1;
352 		btrfs_requeue_inode_defrag(inode, defrag);
353 	} else {
354 		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
355 	}
356 
357 	iput(inode);
358 	return 0;
359 cleanup:
360 	srcu_read_unlock(&fs_info->subvol_srcu, index);
361 	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
362 	return ret;
363 }
364 
365 /*
366  * run through the list of inodes in the FS that need
367  * defragging
368  */
369 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
370 {
371 	struct inode_defrag *defrag;
372 	u64 first_ino = 0;
373 	u64 root_objectid = 0;
374 
375 	atomic_inc(&fs_info->defrag_running);
376 	while(1) {
377 		/* Pause the auto defragger. */
378 		if (test_bit(BTRFS_FS_STATE_REMOUNTING,
379 			     &fs_info->fs_state))
380 			break;
381 
382 		if (!__need_auto_defrag(fs_info->tree_root))
383 			break;
384 
385 		/* find an inode to defrag */
386 		defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
387 						 first_ino);
388 		if (!defrag) {
389 			if (root_objectid || first_ino) {
390 				root_objectid = 0;
391 				first_ino = 0;
392 				continue;
393 			} else {
394 				break;
395 			}
396 		}
397 
398 		first_ino = defrag->ino + 1;
399 		root_objectid = defrag->root;
400 
401 		__btrfs_run_defrag_inode(fs_info, defrag);
402 	}
403 	atomic_dec(&fs_info->defrag_running);
404 
405 	/*
406 	 * during unmount, we use the transaction_wait queue to
407 	 * wait for the defragger to stop
408 	 */
409 	wake_up(&fs_info->transaction_wait);
410 	return 0;
411 }
412 
413 /* simple helper to fault in pages and copy.  This should go away
414  * and be replaced with calls into generic code.
415  */
416 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
417 					 size_t write_bytes,
418 					 struct page **prepared_pages,
419 					 struct iov_iter *i)
420 {
421 	size_t copied = 0;
422 	size_t total_copied = 0;
423 	int pg = 0;
424 	int offset = pos & (PAGE_CACHE_SIZE - 1);
425 
426 	while (write_bytes > 0) {
427 		size_t count = min_t(size_t,
428 				     PAGE_CACHE_SIZE - offset, write_bytes);
429 		struct page *page = prepared_pages[pg];
430 		/*
431 		 * Copy data from userspace to the current page
432 		 *
433 		 * Disable pagefault to avoid recursive lock since
434 		 * the pages are already locked
435 		 */
436 		pagefault_disable();
437 		copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
438 		pagefault_enable();
439 
440 		/* Flush processor's dcache for this page */
441 		flush_dcache_page(page);
442 
443 		/*
444 		 * if we get a partial write, we can end up with
445 		 * partially up to date pages.  These add
446 		 * a lot of complexity, so make sure they don't
447 		 * happen by forcing this copy to be retried.
448 		 *
449 		 * The rest of the btrfs_file_write code will fall
450 		 * back to page at a time copies after we return 0.
451 		 */
452 		if (!PageUptodate(page) && copied < count)
453 			copied = 0;
454 
455 		iov_iter_advance(i, copied);
456 		write_bytes -= copied;
457 		total_copied += copied;
458 
459 		/* Return to btrfs_file_aio_write to fault page */
460 		if (unlikely(copied == 0))
461 			break;
462 
463 		if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
464 			offset += copied;
465 		} else {
466 			pg++;
467 			offset = 0;
468 		}
469 	}
470 	return total_copied;
471 }
472 
473 /*
474  * unlocks pages after btrfs_file_write is done with them
475  */
476 void btrfs_drop_pages(struct page **pages, size_t num_pages)
477 {
478 	size_t i;
479 	for (i = 0; i < num_pages; i++) {
480 		/* page checked is some magic around finding pages that
481 		 * have been modified without going through btrfs_set_page_dirty
482 		 * clear it here
483 		 */
484 		ClearPageChecked(pages[i]);
485 		unlock_page(pages[i]);
486 		mark_page_accessed(pages[i]);
487 		page_cache_release(pages[i]);
488 	}
489 }
490 
491 /*
492  * after copy_from_user, pages need to be dirtied and we need to make
493  * sure holes are created between the current EOF and the start of
494  * any next extents (if required).
495  *
496  * this also makes the decision about creating an inline extent vs
497  * doing real data extents, marking pages dirty and delalloc as required.
498  */
499 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
500 		      struct page **pages, size_t num_pages,
501 		      loff_t pos, size_t write_bytes,
502 		      struct extent_state **cached)
503 {
504 	int err = 0;
505 	int i;
506 	u64 num_bytes;
507 	u64 start_pos;
508 	u64 end_of_last_block;
509 	u64 end_pos = pos + write_bytes;
510 	loff_t isize = i_size_read(inode);
511 
512 	start_pos = pos & ~((u64)root->sectorsize - 1);
513 	num_bytes = ALIGN(write_bytes + pos - start_pos, root->sectorsize);
514 
515 	end_of_last_block = start_pos + num_bytes - 1;
516 	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
517 					cached);
518 	if (err)
519 		return err;
520 
521 	for (i = 0; i < num_pages; i++) {
522 		struct page *p = pages[i];
523 		SetPageUptodate(p);
524 		ClearPageChecked(p);
525 		set_page_dirty(p);
526 	}
527 
528 	/*
529 	 * we've only changed i_size in ram, and we haven't updated
530 	 * the disk i_size.  There is no need to log the inode
531 	 * at this time.
532 	 */
533 	if (end_pos > isize)
534 		i_size_write(inode, end_pos);
535 	return 0;
536 }
537 
538 /*
539  * this drops all the extents in the cache that intersect the range
540  * [start, end].  Existing extents are split as required.
541  */
542 void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
543 			     int skip_pinned)
544 {
545 	struct extent_map *em;
546 	struct extent_map *split = NULL;
547 	struct extent_map *split2 = NULL;
548 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
549 	u64 len = end - start + 1;
550 	u64 gen;
551 	int ret;
552 	int testend = 1;
553 	unsigned long flags;
554 	int compressed = 0;
555 
556 	WARN_ON(end < start);
557 	if (end == (u64)-1) {
558 		len = (u64)-1;
559 		testend = 0;
560 	}
561 	while (1) {
562 		int no_splits = 0;
563 
564 		if (!split)
565 			split = alloc_extent_map();
566 		if (!split2)
567 			split2 = alloc_extent_map();
568 		if (!split || !split2)
569 			no_splits = 1;
570 
571 		write_lock(&em_tree->lock);
572 		em = lookup_extent_mapping(em_tree, start, len);
573 		if (!em) {
574 			write_unlock(&em_tree->lock);
575 			break;
576 		}
577 		flags = em->flags;
578 		gen = em->generation;
579 		if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
580 			if (testend && em->start + em->len >= start + len) {
581 				free_extent_map(em);
582 				write_unlock(&em_tree->lock);
583 				break;
584 			}
585 			start = em->start + em->len;
586 			if (testend)
587 				len = start + len - (em->start + em->len);
588 			free_extent_map(em);
589 			write_unlock(&em_tree->lock);
590 			continue;
591 		}
592 		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
593 		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
594 		remove_extent_mapping(em_tree, em);
595 		if (no_splits)
596 			goto next;
597 
598 		if (em->block_start < EXTENT_MAP_LAST_BYTE &&
599 		    em->start < start) {
600 			split->start = em->start;
601 			split->len = start - em->start;
602 			split->orig_start = em->orig_start;
603 			split->block_start = em->block_start;
604 
605 			if (compressed)
606 				split->block_len = em->block_len;
607 			else
608 				split->block_len = split->len;
609 			split->orig_block_len = max(split->block_len,
610 						    em->orig_block_len);
611 			split->generation = gen;
612 			split->bdev = em->bdev;
613 			split->flags = flags;
614 			split->compress_type = em->compress_type;
615 			ret = add_extent_mapping(em_tree, split);
616 			BUG_ON(ret); /* Logic error */
617 			list_move(&split->list, &em_tree->modified_extents);
618 			free_extent_map(split);
619 			split = split2;
620 			split2 = NULL;
621 		}
622 		if (em->block_start < EXTENT_MAP_LAST_BYTE &&
623 		    testend && em->start + em->len > start + len) {
624 			u64 diff = start + len - em->start;
625 
626 			split->start = start + len;
627 			split->len = em->start + em->len - (start + len);
628 			split->bdev = em->bdev;
629 			split->flags = flags;
630 			split->compress_type = em->compress_type;
631 			split->generation = gen;
632 			split->orig_block_len = max(em->block_len,
633 						    em->orig_block_len);
634 
635 			if (compressed) {
636 				split->block_len = em->block_len;
637 				split->block_start = em->block_start;
638 				split->orig_start = em->orig_start;
639 			} else {
640 				split->block_len = split->len;
641 				split->block_start = em->block_start + diff;
642 				split->orig_start = em->orig_start;
643 			}
644 
645 			ret = add_extent_mapping(em_tree, split);
646 			BUG_ON(ret); /* Logic error */
647 			list_move(&split->list, &em_tree->modified_extents);
648 			free_extent_map(split);
649 			split = NULL;
650 		}
651 next:
652 		write_unlock(&em_tree->lock);
653 
654 		/* once for us */
655 		free_extent_map(em);
656 		/* once for the tree*/
657 		free_extent_map(em);
658 	}
659 	if (split)
660 		free_extent_map(split);
661 	if (split2)
662 		free_extent_map(split2);
663 }
664 
665 /*
666  * this is very complex, but the basic idea is to drop all extents
667  * in the range start - end.  hint_block is filled in with a block number
668  * that would be a good hint to the block allocator for this file.
669  *
670  * If an extent intersects the range but is not entirely inside the range
671  * it is either truncated or split.  Anything entirely inside the range
672  * is deleted from the tree.
673  */
674 int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
675 			 struct btrfs_root *root, struct inode *inode,
676 			 struct btrfs_path *path, u64 start, u64 end,
677 			 u64 *drop_end, int drop_cache)
678 {
679 	struct extent_buffer *leaf;
680 	struct btrfs_file_extent_item *fi;
681 	struct btrfs_key key;
682 	struct btrfs_key new_key;
683 	u64 ino = btrfs_ino(inode);
684 	u64 search_start = start;
685 	u64 disk_bytenr = 0;
686 	u64 num_bytes = 0;
687 	u64 extent_offset = 0;
688 	u64 extent_end = 0;
689 	int del_nr = 0;
690 	int del_slot = 0;
691 	int extent_type;
692 	int recow;
693 	int ret;
694 	int modify_tree = -1;
695 	int update_refs = (root->ref_cows || root == root->fs_info->tree_root);
696 	int found = 0;
697 
698 	if (drop_cache)
699 		btrfs_drop_extent_cache(inode, start, end - 1, 0);
700 
701 	if (start >= BTRFS_I(inode)->disk_i_size)
702 		modify_tree = 0;
703 
704 	while (1) {
705 		recow = 0;
706 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
707 					       search_start, modify_tree);
708 		if (ret < 0)
709 			break;
710 		if (ret > 0 && path->slots[0] > 0 && search_start == start) {
711 			leaf = path->nodes[0];
712 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
713 			if (key.objectid == ino &&
714 			    key.type == BTRFS_EXTENT_DATA_KEY)
715 				path->slots[0]--;
716 		}
717 		ret = 0;
718 next_slot:
719 		leaf = path->nodes[0];
720 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
721 			BUG_ON(del_nr > 0);
722 			ret = btrfs_next_leaf(root, path);
723 			if (ret < 0)
724 				break;
725 			if (ret > 0) {
726 				ret = 0;
727 				break;
728 			}
729 			leaf = path->nodes[0];
730 			recow = 1;
731 		}
732 
733 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
734 		if (key.objectid > ino ||
735 		    key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
736 			break;
737 
738 		fi = btrfs_item_ptr(leaf, path->slots[0],
739 				    struct btrfs_file_extent_item);
740 		extent_type = btrfs_file_extent_type(leaf, fi);
741 
742 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
743 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
744 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
745 			num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
746 			extent_offset = btrfs_file_extent_offset(leaf, fi);
747 			extent_end = key.offset +
748 				btrfs_file_extent_num_bytes(leaf, fi);
749 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
750 			extent_end = key.offset +
751 				btrfs_file_extent_inline_len(leaf, fi);
752 		} else {
753 			WARN_ON(1);
754 			extent_end = search_start;
755 		}
756 
757 		if (extent_end <= search_start) {
758 			path->slots[0]++;
759 			goto next_slot;
760 		}
761 
762 		found = 1;
763 		search_start = max(key.offset, start);
764 		if (recow || !modify_tree) {
765 			modify_tree = -1;
766 			btrfs_release_path(path);
767 			continue;
768 		}
769 
770 		/*
771 		 *     | - range to drop - |
772 		 *  | -------- extent -------- |
773 		 */
774 		if (start > key.offset && end < extent_end) {
775 			BUG_ON(del_nr > 0);
776 			BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
777 
778 			memcpy(&new_key, &key, sizeof(new_key));
779 			new_key.offset = start;
780 			ret = btrfs_duplicate_item(trans, root, path,
781 						   &new_key);
782 			if (ret == -EAGAIN) {
783 				btrfs_release_path(path);
784 				continue;
785 			}
786 			if (ret < 0)
787 				break;
788 
789 			leaf = path->nodes[0];
790 			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
791 					    struct btrfs_file_extent_item);
792 			btrfs_set_file_extent_num_bytes(leaf, fi,
793 							start - key.offset);
794 
795 			fi = btrfs_item_ptr(leaf, path->slots[0],
796 					    struct btrfs_file_extent_item);
797 
798 			extent_offset += start - key.offset;
799 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
800 			btrfs_set_file_extent_num_bytes(leaf, fi,
801 							extent_end - start);
802 			btrfs_mark_buffer_dirty(leaf);
803 
804 			if (update_refs && disk_bytenr > 0) {
805 				ret = btrfs_inc_extent_ref(trans, root,
806 						disk_bytenr, num_bytes, 0,
807 						root->root_key.objectid,
808 						new_key.objectid,
809 						start - extent_offset, 0);
810 				BUG_ON(ret); /* -ENOMEM */
811 			}
812 			key.offset = start;
813 		}
814 		/*
815 		 *  | ---- range to drop ----- |
816 		 *      | -------- extent -------- |
817 		 */
818 		if (start <= key.offset && end < extent_end) {
819 			BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
820 
821 			memcpy(&new_key, &key, sizeof(new_key));
822 			new_key.offset = end;
823 			btrfs_set_item_key_safe(trans, root, path, &new_key);
824 
825 			extent_offset += end - key.offset;
826 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
827 			btrfs_set_file_extent_num_bytes(leaf, fi,
828 							extent_end - end);
829 			btrfs_mark_buffer_dirty(leaf);
830 			if (update_refs && disk_bytenr > 0)
831 				inode_sub_bytes(inode, end - key.offset);
832 			break;
833 		}
834 
835 		search_start = extent_end;
836 		/*
837 		 *       | ---- range to drop ----- |
838 		 *  | -------- extent -------- |
839 		 */
840 		if (start > key.offset && end >= extent_end) {
841 			BUG_ON(del_nr > 0);
842 			BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
843 
844 			btrfs_set_file_extent_num_bytes(leaf, fi,
845 							start - key.offset);
846 			btrfs_mark_buffer_dirty(leaf);
847 			if (update_refs && disk_bytenr > 0)
848 				inode_sub_bytes(inode, extent_end - start);
849 			if (end == extent_end)
850 				break;
851 
852 			path->slots[0]++;
853 			goto next_slot;
854 		}
855 
856 		/*
857 		 *  | ---- range to drop ----- |
858 		 *    | ------ extent ------ |
859 		 */
860 		if (start <= key.offset && end >= extent_end) {
861 			if (del_nr == 0) {
862 				del_slot = path->slots[0];
863 				del_nr = 1;
864 			} else {
865 				BUG_ON(del_slot + del_nr != path->slots[0]);
866 				del_nr++;
867 			}
868 
869 			if (update_refs &&
870 			    extent_type == BTRFS_FILE_EXTENT_INLINE) {
871 				inode_sub_bytes(inode,
872 						extent_end - key.offset);
873 				extent_end = ALIGN(extent_end,
874 						   root->sectorsize);
875 			} else if (update_refs && disk_bytenr > 0) {
876 				ret = btrfs_free_extent(trans, root,
877 						disk_bytenr, num_bytes, 0,
878 						root->root_key.objectid,
879 						key.objectid, key.offset -
880 						extent_offset, 0);
881 				BUG_ON(ret); /* -ENOMEM */
882 				inode_sub_bytes(inode,
883 						extent_end - key.offset);
884 			}
885 
886 			if (end == extent_end)
887 				break;
888 
889 			if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
890 				path->slots[0]++;
891 				goto next_slot;
892 			}
893 
894 			ret = btrfs_del_items(trans, root, path, del_slot,
895 					      del_nr);
896 			if (ret) {
897 				btrfs_abort_transaction(trans, root, ret);
898 				break;
899 			}
900 
901 			del_nr = 0;
902 			del_slot = 0;
903 
904 			btrfs_release_path(path);
905 			continue;
906 		}
907 
908 		BUG_ON(1);
909 	}
910 
911 	if (!ret && del_nr > 0) {
912 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
913 		if (ret)
914 			btrfs_abort_transaction(trans, root, ret);
915 	}
916 
917 	if (drop_end)
918 		*drop_end = found ? min(end, extent_end) : end;
919 	btrfs_release_path(path);
920 	return ret;
921 }
922 
923 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
924 		       struct btrfs_root *root, struct inode *inode, u64 start,
925 		       u64 end, int drop_cache)
926 {
927 	struct btrfs_path *path;
928 	int ret;
929 
930 	path = btrfs_alloc_path();
931 	if (!path)
932 		return -ENOMEM;
933 	ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
934 				   drop_cache);
935 	btrfs_free_path(path);
936 	return ret;
937 }
938 
939 static int extent_mergeable(struct extent_buffer *leaf, int slot,
940 			    u64 objectid, u64 bytenr, u64 orig_offset,
941 			    u64 *start, u64 *end)
942 {
943 	struct btrfs_file_extent_item *fi;
944 	struct btrfs_key key;
945 	u64 extent_end;
946 
947 	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
948 		return 0;
949 
950 	btrfs_item_key_to_cpu(leaf, &key, slot);
951 	if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
952 		return 0;
953 
954 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
955 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
956 	    btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
957 	    btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
958 	    btrfs_file_extent_compression(leaf, fi) ||
959 	    btrfs_file_extent_encryption(leaf, fi) ||
960 	    btrfs_file_extent_other_encoding(leaf, fi))
961 		return 0;
962 
963 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
964 	if ((*start && *start != key.offset) || (*end && *end != extent_end))
965 		return 0;
966 
967 	*start = key.offset;
968 	*end = extent_end;
969 	return 1;
970 }
971 
972 /*
973  * Mark extent in the range start - end as written.
974  *
975  * This changes extent type from 'pre-allocated' to 'regular'. If only
976  * part of extent is marked as written, the extent will be split into
977  * two or three.
978  */
979 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
980 			      struct inode *inode, u64 start, u64 end)
981 {
982 	struct btrfs_root *root = BTRFS_I(inode)->root;
983 	struct extent_buffer *leaf;
984 	struct btrfs_path *path;
985 	struct btrfs_file_extent_item *fi;
986 	struct btrfs_key key;
987 	struct btrfs_key new_key;
988 	u64 bytenr;
989 	u64 num_bytes;
990 	u64 extent_end;
991 	u64 orig_offset;
992 	u64 other_start;
993 	u64 other_end;
994 	u64 split;
995 	int del_nr = 0;
996 	int del_slot = 0;
997 	int recow;
998 	int ret;
999 	u64 ino = btrfs_ino(inode);
1000 
1001 	path = btrfs_alloc_path();
1002 	if (!path)
1003 		return -ENOMEM;
1004 again:
1005 	recow = 0;
1006 	split = start;
1007 	key.objectid = ino;
1008 	key.type = BTRFS_EXTENT_DATA_KEY;
1009 	key.offset = split;
1010 
1011 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1012 	if (ret < 0)
1013 		goto out;
1014 	if (ret > 0 && path->slots[0] > 0)
1015 		path->slots[0]--;
1016 
1017 	leaf = path->nodes[0];
1018 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1019 	BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
1020 	fi = btrfs_item_ptr(leaf, path->slots[0],
1021 			    struct btrfs_file_extent_item);
1022 	BUG_ON(btrfs_file_extent_type(leaf, fi) !=
1023 	       BTRFS_FILE_EXTENT_PREALLOC);
1024 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1025 	BUG_ON(key.offset > start || extent_end < end);
1026 
1027 	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1028 	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1029 	orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1030 	memcpy(&new_key, &key, sizeof(new_key));
1031 
1032 	if (start == key.offset && end < extent_end) {
1033 		other_start = 0;
1034 		other_end = start;
1035 		if (extent_mergeable(leaf, path->slots[0] - 1,
1036 				     ino, bytenr, orig_offset,
1037 				     &other_start, &other_end)) {
1038 			new_key.offset = end;
1039 			btrfs_set_item_key_safe(trans, root, path, &new_key);
1040 			fi = btrfs_item_ptr(leaf, path->slots[0],
1041 					    struct btrfs_file_extent_item);
1042 			btrfs_set_file_extent_generation(leaf, fi,
1043 							 trans->transid);
1044 			btrfs_set_file_extent_num_bytes(leaf, fi,
1045 							extent_end - end);
1046 			btrfs_set_file_extent_offset(leaf, fi,
1047 						     end - orig_offset);
1048 			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1049 					    struct btrfs_file_extent_item);
1050 			btrfs_set_file_extent_generation(leaf, fi,
1051 							 trans->transid);
1052 			btrfs_set_file_extent_num_bytes(leaf, fi,
1053 							end - other_start);
1054 			btrfs_mark_buffer_dirty(leaf);
1055 			goto out;
1056 		}
1057 	}
1058 
1059 	if (start > key.offset && end == extent_end) {
1060 		other_start = end;
1061 		other_end = 0;
1062 		if (extent_mergeable(leaf, path->slots[0] + 1,
1063 				     ino, bytenr, orig_offset,
1064 				     &other_start, &other_end)) {
1065 			fi = btrfs_item_ptr(leaf, path->slots[0],
1066 					    struct btrfs_file_extent_item);
1067 			btrfs_set_file_extent_num_bytes(leaf, fi,
1068 							start - key.offset);
1069 			btrfs_set_file_extent_generation(leaf, fi,
1070 							 trans->transid);
1071 			path->slots[0]++;
1072 			new_key.offset = start;
1073 			btrfs_set_item_key_safe(trans, root, path, &new_key);
1074 
1075 			fi = btrfs_item_ptr(leaf, path->slots[0],
1076 					    struct btrfs_file_extent_item);
1077 			btrfs_set_file_extent_generation(leaf, fi,
1078 							 trans->transid);
1079 			btrfs_set_file_extent_num_bytes(leaf, fi,
1080 							other_end - start);
1081 			btrfs_set_file_extent_offset(leaf, fi,
1082 						     start - orig_offset);
1083 			btrfs_mark_buffer_dirty(leaf);
1084 			goto out;
1085 		}
1086 	}
1087 
1088 	while (start > key.offset || end < extent_end) {
1089 		if (key.offset == start)
1090 			split = end;
1091 
1092 		new_key.offset = split;
1093 		ret = btrfs_duplicate_item(trans, root, path, &new_key);
1094 		if (ret == -EAGAIN) {
1095 			btrfs_release_path(path);
1096 			goto again;
1097 		}
1098 		if (ret < 0) {
1099 			btrfs_abort_transaction(trans, root, ret);
1100 			goto out;
1101 		}
1102 
1103 		leaf = path->nodes[0];
1104 		fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1105 				    struct btrfs_file_extent_item);
1106 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1107 		btrfs_set_file_extent_num_bytes(leaf, fi,
1108 						split - key.offset);
1109 
1110 		fi = btrfs_item_ptr(leaf, path->slots[0],
1111 				    struct btrfs_file_extent_item);
1112 
1113 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1114 		btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1115 		btrfs_set_file_extent_num_bytes(leaf, fi,
1116 						extent_end - split);
1117 		btrfs_mark_buffer_dirty(leaf);
1118 
1119 		ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
1120 					   root->root_key.objectid,
1121 					   ino, orig_offset, 0);
1122 		BUG_ON(ret); /* -ENOMEM */
1123 
1124 		if (split == start) {
1125 			key.offset = start;
1126 		} else {
1127 			BUG_ON(start != key.offset);
1128 			path->slots[0]--;
1129 			extent_end = end;
1130 		}
1131 		recow = 1;
1132 	}
1133 
1134 	other_start = end;
1135 	other_end = 0;
1136 	if (extent_mergeable(leaf, path->slots[0] + 1,
1137 			     ino, bytenr, orig_offset,
1138 			     &other_start, &other_end)) {
1139 		if (recow) {
1140 			btrfs_release_path(path);
1141 			goto again;
1142 		}
1143 		extent_end = other_end;
1144 		del_slot = path->slots[0] + 1;
1145 		del_nr++;
1146 		ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1147 					0, root->root_key.objectid,
1148 					ino, orig_offset, 0);
1149 		BUG_ON(ret); /* -ENOMEM */
1150 	}
1151 	other_start = 0;
1152 	other_end = start;
1153 	if (extent_mergeable(leaf, path->slots[0] - 1,
1154 			     ino, bytenr, orig_offset,
1155 			     &other_start, &other_end)) {
1156 		if (recow) {
1157 			btrfs_release_path(path);
1158 			goto again;
1159 		}
1160 		key.offset = other_start;
1161 		del_slot = path->slots[0];
1162 		del_nr++;
1163 		ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1164 					0, root->root_key.objectid,
1165 					ino, orig_offset, 0);
1166 		BUG_ON(ret); /* -ENOMEM */
1167 	}
1168 	if (del_nr == 0) {
1169 		fi = btrfs_item_ptr(leaf, path->slots[0],
1170 			   struct btrfs_file_extent_item);
1171 		btrfs_set_file_extent_type(leaf, fi,
1172 					   BTRFS_FILE_EXTENT_REG);
1173 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1174 		btrfs_mark_buffer_dirty(leaf);
1175 	} else {
1176 		fi = btrfs_item_ptr(leaf, del_slot - 1,
1177 			   struct btrfs_file_extent_item);
1178 		btrfs_set_file_extent_type(leaf, fi,
1179 					   BTRFS_FILE_EXTENT_REG);
1180 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1181 		btrfs_set_file_extent_num_bytes(leaf, fi,
1182 						extent_end - key.offset);
1183 		btrfs_mark_buffer_dirty(leaf);
1184 
1185 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1186 		if (ret < 0) {
1187 			btrfs_abort_transaction(trans, root, ret);
1188 			goto out;
1189 		}
1190 	}
1191 out:
1192 	btrfs_free_path(path);
1193 	return 0;
1194 }
1195 
1196 /*
1197  * on error we return an unlocked page and the error value
1198  * on success we return a locked page and 0
1199  */
1200 static int prepare_uptodate_page(struct page *page, u64 pos,
1201 				 bool force_uptodate)
1202 {
1203 	int ret = 0;
1204 
1205 	if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1206 	    !PageUptodate(page)) {
1207 		ret = btrfs_readpage(NULL, page);
1208 		if (ret)
1209 			return ret;
1210 		lock_page(page);
1211 		if (!PageUptodate(page)) {
1212 			unlock_page(page);
1213 			return -EIO;
1214 		}
1215 	}
1216 	return 0;
1217 }
1218 
1219 /*
1220  * this gets pages into the page cache and locks them down, it also properly
1221  * waits for data=ordered extents to finish before allowing the pages to be
1222  * modified.
1223  */
1224 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
1225 			 struct page **pages, size_t num_pages,
1226 			 loff_t pos, unsigned long first_index,
1227 			 size_t write_bytes, bool force_uptodate)
1228 {
1229 	struct extent_state *cached_state = NULL;
1230 	int i;
1231 	unsigned long index = pos >> PAGE_CACHE_SHIFT;
1232 	struct inode *inode = file_inode(file);
1233 	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1234 	int err = 0;
1235 	int faili = 0;
1236 	u64 start_pos;
1237 	u64 last_pos;
1238 
1239 	start_pos = pos & ~((u64)root->sectorsize - 1);
1240 	last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
1241 
1242 again:
1243 	for (i = 0; i < num_pages; i++) {
1244 		pages[i] = find_or_create_page(inode->i_mapping, index + i,
1245 					       mask | __GFP_WRITE);
1246 		if (!pages[i]) {
1247 			faili = i - 1;
1248 			err = -ENOMEM;
1249 			goto fail;
1250 		}
1251 
1252 		if (i == 0)
1253 			err = prepare_uptodate_page(pages[i], pos,
1254 						    force_uptodate);
1255 		if (i == num_pages - 1)
1256 			err = prepare_uptodate_page(pages[i],
1257 						    pos + write_bytes, false);
1258 		if (err) {
1259 			page_cache_release(pages[i]);
1260 			faili = i - 1;
1261 			goto fail;
1262 		}
1263 		wait_on_page_writeback(pages[i]);
1264 	}
1265 	err = 0;
1266 	if (start_pos < inode->i_size) {
1267 		struct btrfs_ordered_extent *ordered;
1268 		lock_extent_bits(&BTRFS_I(inode)->io_tree,
1269 				 start_pos, last_pos - 1, 0, &cached_state);
1270 		ordered = btrfs_lookup_first_ordered_extent(inode,
1271 							    last_pos - 1);
1272 		if (ordered &&
1273 		    ordered->file_offset + ordered->len > start_pos &&
1274 		    ordered->file_offset < last_pos) {
1275 			btrfs_put_ordered_extent(ordered);
1276 			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1277 					     start_pos, last_pos - 1,
1278 					     &cached_state, GFP_NOFS);
1279 			for (i = 0; i < num_pages; i++) {
1280 				unlock_page(pages[i]);
1281 				page_cache_release(pages[i]);
1282 			}
1283 			btrfs_wait_ordered_range(inode, start_pos,
1284 						 last_pos - start_pos);
1285 			goto again;
1286 		}
1287 		if (ordered)
1288 			btrfs_put_ordered_extent(ordered);
1289 
1290 		clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
1291 				  last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1292 				  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
1293 				  0, 0, &cached_state, GFP_NOFS);
1294 		unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1295 				     start_pos, last_pos - 1, &cached_state,
1296 				     GFP_NOFS);
1297 	}
1298 	for (i = 0; i < num_pages; i++) {
1299 		if (clear_page_dirty_for_io(pages[i]))
1300 			account_page_redirty(pages[i]);
1301 		set_page_extent_mapped(pages[i]);
1302 		WARN_ON(!PageLocked(pages[i]));
1303 	}
1304 	return 0;
1305 fail:
1306 	while (faili >= 0) {
1307 		unlock_page(pages[faili]);
1308 		page_cache_release(pages[faili]);
1309 		faili--;
1310 	}
1311 	return err;
1312 
1313 }
1314 
1315 static noinline ssize_t __btrfs_buffered_write(struct file *file,
1316 					       struct iov_iter *i,
1317 					       loff_t pos)
1318 {
1319 	struct inode *inode = file_inode(file);
1320 	struct btrfs_root *root = BTRFS_I(inode)->root;
1321 	struct page **pages = NULL;
1322 	unsigned long first_index;
1323 	size_t num_written = 0;
1324 	int nrptrs;
1325 	int ret = 0;
1326 	bool force_page_uptodate = false;
1327 
1328 	nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
1329 		     PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
1330 		     (sizeof(struct page *)));
1331 	nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1332 	nrptrs = max(nrptrs, 8);
1333 	pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1334 	if (!pages)
1335 		return -ENOMEM;
1336 
1337 	first_index = pos >> PAGE_CACHE_SHIFT;
1338 
1339 	while (iov_iter_count(i) > 0) {
1340 		size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1341 		size_t write_bytes = min(iov_iter_count(i),
1342 					 nrptrs * (size_t)PAGE_CACHE_SIZE -
1343 					 offset);
1344 		size_t num_pages = (write_bytes + offset +
1345 				    PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1346 		size_t dirty_pages;
1347 		size_t copied;
1348 
1349 		WARN_ON(num_pages > nrptrs);
1350 
1351 		/*
1352 		 * Fault pages before locking them in prepare_pages
1353 		 * to avoid recursive lock
1354 		 */
1355 		if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1356 			ret = -EFAULT;
1357 			break;
1358 		}
1359 
1360 		ret = btrfs_delalloc_reserve_space(inode,
1361 					num_pages << PAGE_CACHE_SHIFT);
1362 		if (ret)
1363 			break;
1364 
1365 		/*
1366 		 * This is going to setup the pages array with the number of
1367 		 * pages we want, so we don't really need to worry about the
1368 		 * contents of pages from loop to loop
1369 		 */
1370 		ret = prepare_pages(root, file, pages, num_pages,
1371 				    pos, first_index, write_bytes,
1372 				    force_page_uptodate);
1373 		if (ret) {
1374 			btrfs_delalloc_release_space(inode,
1375 					num_pages << PAGE_CACHE_SHIFT);
1376 			break;
1377 		}
1378 
1379 		copied = btrfs_copy_from_user(pos, num_pages,
1380 					   write_bytes, pages, i);
1381 
1382 		/*
1383 		 * if we have trouble faulting in the pages, fall
1384 		 * back to one page at a time
1385 		 */
1386 		if (copied < write_bytes)
1387 			nrptrs = 1;
1388 
1389 		if (copied == 0) {
1390 			force_page_uptodate = true;
1391 			dirty_pages = 0;
1392 		} else {
1393 			force_page_uptodate = false;
1394 			dirty_pages = (copied + offset +
1395 				       PAGE_CACHE_SIZE - 1) >>
1396 				       PAGE_CACHE_SHIFT;
1397 		}
1398 
1399 		/*
1400 		 * If we had a short copy we need to release the excess delaloc
1401 		 * bytes we reserved.  We need to increment outstanding_extents
1402 		 * because btrfs_delalloc_release_space will decrement it, but
1403 		 * we still have an outstanding extent for the chunk we actually
1404 		 * managed to copy.
1405 		 */
1406 		if (num_pages > dirty_pages) {
1407 			if (copied > 0) {
1408 				spin_lock(&BTRFS_I(inode)->lock);
1409 				BTRFS_I(inode)->outstanding_extents++;
1410 				spin_unlock(&BTRFS_I(inode)->lock);
1411 			}
1412 			btrfs_delalloc_release_space(inode,
1413 					(num_pages - dirty_pages) <<
1414 					PAGE_CACHE_SHIFT);
1415 		}
1416 
1417 		if (copied > 0) {
1418 			ret = btrfs_dirty_pages(root, inode, pages,
1419 						dirty_pages, pos, copied,
1420 						NULL);
1421 			if (ret) {
1422 				btrfs_delalloc_release_space(inode,
1423 					dirty_pages << PAGE_CACHE_SHIFT);
1424 				btrfs_drop_pages(pages, num_pages);
1425 				break;
1426 			}
1427 		}
1428 
1429 		btrfs_drop_pages(pages, num_pages);
1430 
1431 		cond_resched();
1432 
1433 		balance_dirty_pages_ratelimited(inode->i_mapping);
1434 		if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1435 			btrfs_btree_balance_dirty(root);
1436 
1437 		pos += copied;
1438 		num_written += copied;
1439 	}
1440 
1441 	kfree(pages);
1442 
1443 	return num_written ? num_written : ret;
1444 }
1445 
1446 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1447 				    const struct iovec *iov,
1448 				    unsigned long nr_segs, loff_t pos,
1449 				    loff_t *ppos, size_t count, size_t ocount)
1450 {
1451 	struct file *file = iocb->ki_filp;
1452 	struct iov_iter i;
1453 	ssize_t written;
1454 	ssize_t written_buffered;
1455 	loff_t endbyte;
1456 	int err;
1457 
1458 	written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
1459 					    count, ocount);
1460 
1461 	if (written < 0 || written == count)
1462 		return written;
1463 
1464 	pos += written;
1465 	count -= written;
1466 	iov_iter_init(&i, iov, nr_segs, count, written);
1467 	written_buffered = __btrfs_buffered_write(file, &i, pos);
1468 	if (written_buffered < 0) {
1469 		err = written_buffered;
1470 		goto out;
1471 	}
1472 	endbyte = pos + written_buffered - 1;
1473 	err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
1474 	if (err)
1475 		goto out;
1476 	written += written_buffered;
1477 	*ppos = pos + written_buffered;
1478 	invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1479 				 endbyte >> PAGE_CACHE_SHIFT);
1480 out:
1481 	return written ? written : err;
1482 }
1483 
1484 static void update_time_for_write(struct inode *inode)
1485 {
1486 	struct timespec now;
1487 
1488 	if (IS_NOCMTIME(inode))
1489 		return;
1490 
1491 	now = current_fs_time(inode->i_sb);
1492 	if (!timespec_equal(&inode->i_mtime, &now))
1493 		inode->i_mtime = now;
1494 
1495 	if (!timespec_equal(&inode->i_ctime, &now))
1496 		inode->i_ctime = now;
1497 
1498 	if (IS_I_VERSION(inode))
1499 		inode_inc_iversion(inode);
1500 }
1501 
1502 static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1503 				    const struct iovec *iov,
1504 				    unsigned long nr_segs, loff_t pos)
1505 {
1506 	struct file *file = iocb->ki_filp;
1507 	struct inode *inode = file_inode(file);
1508 	struct btrfs_root *root = BTRFS_I(inode)->root;
1509 	loff_t *ppos = &iocb->ki_pos;
1510 	u64 start_pos;
1511 	ssize_t num_written = 0;
1512 	ssize_t err = 0;
1513 	size_t count, ocount;
1514 	bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
1515 
1516 	sb_start_write(inode->i_sb);
1517 
1518 	mutex_lock(&inode->i_mutex);
1519 
1520 	err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1521 	if (err) {
1522 		mutex_unlock(&inode->i_mutex);
1523 		goto out;
1524 	}
1525 	count = ocount;
1526 
1527 	current->backing_dev_info = inode->i_mapping->backing_dev_info;
1528 	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1529 	if (err) {
1530 		mutex_unlock(&inode->i_mutex);
1531 		goto out;
1532 	}
1533 
1534 	if (count == 0) {
1535 		mutex_unlock(&inode->i_mutex);
1536 		goto out;
1537 	}
1538 
1539 	err = file_remove_suid(file);
1540 	if (err) {
1541 		mutex_unlock(&inode->i_mutex);
1542 		goto out;
1543 	}
1544 
1545 	/*
1546 	 * If BTRFS flips readonly due to some impossible error
1547 	 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1548 	 * although we have opened a file as writable, we have
1549 	 * to stop this write operation to ensure FS consistency.
1550 	 */
1551 	if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
1552 		mutex_unlock(&inode->i_mutex);
1553 		err = -EROFS;
1554 		goto out;
1555 	}
1556 
1557 	/*
1558 	 * We reserve space for updating the inode when we reserve space for the
1559 	 * extent we are going to write, so we will enospc out there.  We don't
1560 	 * need to start yet another transaction to update the inode as we will
1561 	 * update the inode when we finish writing whatever data we write.
1562 	 */
1563 	update_time_for_write(inode);
1564 
1565 	start_pos = round_down(pos, root->sectorsize);
1566 	if (start_pos > i_size_read(inode)) {
1567 		err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
1568 		if (err) {
1569 			mutex_unlock(&inode->i_mutex);
1570 			goto out;
1571 		}
1572 	}
1573 
1574 	if (sync)
1575 		atomic_inc(&BTRFS_I(inode)->sync_writers);
1576 
1577 	if (unlikely(file->f_flags & O_DIRECT)) {
1578 		num_written = __btrfs_direct_write(iocb, iov, nr_segs,
1579 						   pos, ppos, count, ocount);
1580 	} else {
1581 		struct iov_iter i;
1582 
1583 		iov_iter_init(&i, iov, nr_segs, count, num_written);
1584 
1585 		num_written = __btrfs_buffered_write(file, &i, pos);
1586 		if (num_written > 0)
1587 			*ppos = pos + num_written;
1588 	}
1589 
1590 	mutex_unlock(&inode->i_mutex);
1591 
1592 	/*
1593 	 * we want to make sure fsync finds this change
1594 	 * but we haven't joined a transaction running right now.
1595 	 *
1596 	 * Later on, someone is sure to update the inode and get the
1597 	 * real transid recorded.
1598 	 *
1599 	 * We set last_trans now to the fs_info generation + 1,
1600 	 * this will either be one more than the running transaction
1601 	 * or the generation used for the next transaction if there isn't
1602 	 * one running right now.
1603 	 *
1604 	 * We also have to set last_sub_trans to the current log transid,
1605 	 * otherwise subsequent syncs to a file that's been synced in this
1606 	 * transaction will appear to have already occured.
1607 	 */
1608 	BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1609 	BTRFS_I(inode)->last_sub_trans = root->log_transid;
1610 	if (num_written > 0 || num_written == -EIOCBQUEUED) {
1611 		err = generic_write_sync(file, pos, num_written);
1612 		if (err < 0 && num_written > 0)
1613 			num_written = err;
1614 	}
1615 
1616 	if (sync)
1617 		atomic_dec(&BTRFS_I(inode)->sync_writers);
1618 out:
1619 	sb_end_write(inode->i_sb);
1620 	current->backing_dev_info = NULL;
1621 	return num_written ? num_written : err;
1622 }
1623 
1624 int btrfs_release_file(struct inode *inode, struct file *filp)
1625 {
1626 	/*
1627 	 * ordered_data_close is set by settattr when we are about to truncate
1628 	 * a file from a non-zero size to a zero size.  This tries to
1629 	 * flush down new bytes that may have been written if the
1630 	 * application were using truncate to replace a file in place.
1631 	 */
1632 	if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
1633 			       &BTRFS_I(inode)->runtime_flags)) {
1634 		struct btrfs_trans_handle *trans;
1635 		struct btrfs_root *root = BTRFS_I(inode)->root;
1636 
1637 		/*
1638 		 * We need to block on a committing transaction to keep us from
1639 		 * throwing a ordered operation on to the list and causing
1640 		 * something like sync to deadlock trying to flush out this
1641 		 * inode.
1642 		 */
1643 		trans = btrfs_start_transaction(root, 0);
1644 		if (IS_ERR(trans))
1645 			return PTR_ERR(trans);
1646 		btrfs_add_ordered_operation(trans, BTRFS_I(inode)->root, inode);
1647 		btrfs_end_transaction(trans, root);
1648 		if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1649 			filemap_flush(inode->i_mapping);
1650 	}
1651 	if (filp->private_data)
1652 		btrfs_ioctl_trans_end(filp);
1653 	return 0;
1654 }
1655 
1656 /*
1657  * fsync call for both files and directories.  This logs the inode into
1658  * the tree log instead of forcing full commits whenever possible.
1659  *
1660  * It needs to call filemap_fdatawait so that all ordered extent updates are
1661  * in the metadata btree are up to date for copying to the log.
1662  *
1663  * It drops the inode mutex before doing the tree log commit.  This is an
1664  * important optimization for directories because holding the mutex prevents
1665  * new operations on the dir while we write to disk.
1666  */
1667 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1668 {
1669 	struct dentry *dentry = file->f_path.dentry;
1670 	struct inode *inode = dentry->d_inode;
1671 	struct btrfs_root *root = BTRFS_I(inode)->root;
1672 	int ret = 0;
1673 	struct btrfs_trans_handle *trans;
1674 	bool full_sync = 0;
1675 
1676 	trace_btrfs_sync_file(file, datasync);
1677 
1678 	/*
1679 	 * We write the dirty pages in the range and wait until they complete
1680 	 * out of the ->i_mutex. If so, we can flush the dirty pages by
1681 	 * multi-task, and make the performance up.  See
1682 	 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
1683 	 */
1684 	atomic_inc(&BTRFS_I(inode)->sync_writers);
1685 	ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
1686 	if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1687 			     &BTRFS_I(inode)->runtime_flags))
1688 		ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
1689 	atomic_dec(&BTRFS_I(inode)->sync_writers);
1690 	if (ret)
1691 		return ret;
1692 
1693 	mutex_lock(&inode->i_mutex);
1694 
1695 	/*
1696 	 * We flush the dirty pages again to avoid some dirty pages in the
1697 	 * range being left.
1698 	 */
1699 	atomic_inc(&root->log_batch);
1700 	full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1701 			     &BTRFS_I(inode)->runtime_flags);
1702 	if (full_sync)
1703 		btrfs_wait_ordered_range(inode, start, end - start + 1);
1704 	atomic_inc(&root->log_batch);
1705 
1706 	/*
1707 	 * check the transaction that last modified this inode
1708 	 * and see if its already been committed
1709 	 */
1710 	if (!BTRFS_I(inode)->last_trans) {
1711 		mutex_unlock(&inode->i_mutex);
1712 		goto out;
1713 	}
1714 
1715 	/*
1716 	 * if the last transaction that changed this file was before
1717 	 * the current transaction, we can bail out now without any
1718 	 * syncing
1719 	 */
1720 	smp_mb();
1721 	if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
1722 	    BTRFS_I(inode)->last_trans <=
1723 	    root->fs_info->last_trans_committed) {
1724 		BTRFS_I(inode)->last_trans = 0;
1725 
1726 		/*
1727 		 * We'v had everything committed since the last time we were
1728 		 * modified so clear this flag in case it was set for whatever
1729 		 * reason, it's no longer relevant.
1730 		 */
1731 		clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1732 			  &BTRFS_I(inode)->runtime_flags);
1733 		mutex_unlock(&inode->i_mutex);
1734 		goto out;
1735 	}
1736 
1737 	/*
1738 	 * ok we haven't committed the transaction yet, lets do a commit
1739 	 */
1740 	if (file->private_data)
1741 		btrfs_ioctl_trans_end(file);
1742 
1743 	trans = btrfs_start_transaction(root, 0);
1744 	if (IS_ERR(trans)) {
1745 		ret = PTR_ERR(trans);
1746 		mutex_unlock(&inode->i_mutex);
1747 		goto out;
1748 	}
1749 
1750 	ret = btrfs_log_dentry_safe(trans, root, dentry);
1751 	if (ret < 0) {
1752 		mutex_unlock(&inode->i_mutex);
1753 		goto out;
1754 	}
1755 
1756 	/* we've logged all the items and now have a consistent
1757 	 * version of the file in the log.  It is possible that
1758 	 * someone will come in and modify the file, but that's
1759 	 * fine because the log is consistent on disk, and we
1760 	 * have references to all of the file's extents
1761 	 *
1762 	 * It is possible that someone will come in and log the
1763 	 * file again, but that will end up using the synchronization
1764 	 * inside btrfs_sync_log to keep things safe.
1765 	 */
1766 	mutex_unlock(&inode->i_mutex);
1767 
1768 	if (ret != BTRFS_NO_LOG_SYNC) {
1769 		if (ret > 0) {
1770 			/*
1771 			 * If we didn't already wait for ordered extents we need
1772 			 * to do that now.
1773 			 */
1774 			if (!full_sync)
1775 				btrfs_wait_ordered_range(inode, start,
1776 							 end - start + 1);
1777 			ret = btrfs_commit_transaction(trans, root);
1778 		} else {
1779 			ret = btrfs_sync_log(trans, root);
1780 			if (ret == 0) {
1781 				ret = btrfs_end_transaction(trans, root);
1782 			} else {
1783 				if (!full_sync)
1784 					btrfs_wait_ordered_range(inode, start,
1785 								 end -
1786 								 start + 1);
1787 				ret = btrfs_commit_transaction(trans, root);
1788 			}
1789 		}
1790 	} else {
1791 		ret = btrfs_end_transaction(trans, root);
1792 	}
1793 out:
1794 	return ret > 0 ? -EIO : ret;
1795 }
1796 
1797 static const struct vm_operations_struct btrfs_file_vm_ops = {
1798 	.fault		= filemap_fault,
1799 	.page_mkwrite	= btrfs_page_mkwrite,
1800 	.remap_pages	= generic_file_remap_pages,
1801 };
1802 
1803 static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
1804 {
1805 	struct address_space *mapping = filp->f_mapping;
1806 
1807 	if (!mapping->a_ops->readpage)
1808 		return -ENOEXEC;
1809 
1810 	file_accessed(filp);
1811 	vma->vm_ops = &btrfs_file_vm_ops;
1812 
1813 	return 0;
1814 }
1815 
1816 static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
1817 			  int slot, u64 start, u64 end)
1818 {
1819 	struct btrfs_file_extent_item *fi;
1820 	struct btrfs_key key;
1821 
1822 	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1823 		return 0;
1824 
1825 	btrfs_item_key_to_cpu(leaf, &key, slot);
1826 	if (key.objectid != btrfs_ino(inode) ||
1827 	    key.type != BTRFS_EXTENT_DATA_KEY)
1828 		return 0;
1829 
1830 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1831 
1832 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
1833 		return 0;
1834 
1835 	if (btrfs_file_extent_disk_bytenr(leaf, fi))
1836 		return 0;
1837 
1838 	if (key.offset == end)
1839 		return 1;
1840 	if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
1841 		return 1;
1842 	return 0;
1843 }
1844 
1845 static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
1846 		      struct btrfs_path *path, u64 offset, u64 end)
1847 {
1848 	struct btrfs_root *root = BTRFS_I(inode)->root;
1849 	struct extent_buffer *leaf;
1850 	struct btrfs_file_extent_item *fi;
1851 	struct extent_map *hole_em;
1852 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1853 	struct btrfs_key key;
1854 	int ret;
1855 
1856 	key.objectid = btrfs_ino(inode);
1857 	key.type = BTRFS_EXTENT_DATA_KEY;
1858 	key.offset = offset;
1859 
1860 
1861 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1862 	if (ret < 0)
1863 		return ret;
1864 	BUG_ON(!ret);
1865 
1866 	leaf = path->nodes[0];
1867 	if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
1868 		u64 num_bytes;
1869 
1870 		path->slots[0]--;
1871 		fi = btrfs_item_ptr(leaf, path->slots[0],
1872 				    struct btrfs_file_extent_item);
1873 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
1874 			end - offset;
1875 		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1876 		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1877 		btrfs_set_file_extent_offset(leaf, fi, 0);
1878 		btrfs_mark_buffer_dirty(leaf);
1879 		goto out;
1880 	}
1881 
1882 	if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) {
1883 		u64 num_bytes;
1884 
1885 		path->slots[0]++;
1886 		key.offset = offset;
1887 		btrfs_set_item_key_safe(trans, root, path, &key);
1888 		fi = btrfs_item_ptr(leaf, path->slots[0],
1889 				    struct btrfs_file_extent_item);
1890 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
1891 			offset;
1892 		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1893 		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1894 		btrfs_set_file_extent_offset(leaf, fi, 0);
1895 		btrfs_mark_buffer_dirty(leaf);
1896 		goto out;
1897 	}
1898 	btrfs_release_path(path);
1899 
1900 	ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
1901 				       0, 0, end - offset, 0, end - offset,
1902 				       0, 0, 0);
1903 	if (ret)
1904 		return ret;
1905 
1906 out:
1907 	btrfs_release_path(path);
1908 
1909 	hole_em = alloc_extent_map();
1910 	if (!hole_em) {
1911 		btrfs_drop_extent_cache(inode, offset, end - 1, 0);
1912 		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1913 			&BTRFS_I(inode)->runtime_flags);
1914 	} else {
1915 		hole_em->start = offset;
1916 		hole_em->len = end - offset;
1917 		hole_em->orig_start = offset;
1918 
1919 		hole_em->block_start = EXTENT_MAP_HOLE;
1920 		hole_em->block_len = 0;
1921 		hole_em->orig_block_len = 0;
1922 		hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
1923 		hole_em->compress_type = BTRFS_COMPRESS_NONE;
1924 		hole_em->generation = trans->transid;
1925 
1926 		do {
1927 			btrfs_drop_extent_cache(inode, offset, end - 1, 0);
1928 			write_lock(&em_tree->lock);
1929 			ret = add_extent_mapping(em_tree, hole_em);
1930 			if (!ret)
1931 				list_move(&hole_em->list,
1932 					  &em_tree->modified_extents);
1933 			write_unlock(&em_tree->lock);
1934 		} while (ret == -EEXIST);
1935 		free_extent_map(hole_em);
1936 		if (ret)
1937 			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1938 				&BTRFS_I(inode)->runtime_flags);
1939 	}
1940 
1941 	return 0;
1942 }
1943 
1944 static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
1945 {
1946 	struct btrfs_root *root = BTRFS_I(inode)->root;
1947 	struct extent_state *cached_state = NULL;
1948 	struct btrfs_path *path;
1949 	struct btrfs_block_rsv *rsv;
1950 	struct btrfs_trans_handle *trans;
1951 	u64 lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize);
1952 	u64 lockend = round_down(offset + len,
1953 				 BTRFS_I(inode)->root->sectorsize) - 1;
1954 	u64 cur_offset = lockstart;
1955 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
1956 	u64 drop_end;
1957 	int ret = 0;
1958 	int err = 0;
1959 	bool same_page = ((offset >> PAGE_CACHE_SHIFT) ==
1960 			  ((offset + len - 1) >> PAGE_CACHE_SHIFT));
1961 
1962 	btrfs_wait_ordered_range(inode, offset, len);
1963 
1964 	mutex_lock(&inode->i_mutex);
1965 	/*
1966 	 * We needn't truncate any page which is beyond the end of the file
1967 	 * because we are sure there is no data there.
1968 	 */
1969 	/*
1970 	 * Only do this if we are in the same page and we aren't doing the
1971 	 * entire page.
1972 	 */
1973 	if (same_page && len < PAGE_CACHE_SIZE) {
1974 		if (offset < round_up(inode->i_size, PAGE_CACHE_SIZE))
1975 			ret = btrfs_truncate_page(inode, offset, len, 0);
1976 		mutex_unlock(&inode->i_mutex);
1977 		return ret;
1978 	}
1979 
1980 	/* zero back part of the first page */
1981 	if (offset < round_up(inode->i_size, PAGE_CACHE_SIZE)) {
1982 		ret = btrfs_truncate_page(inode, offset, 0, 0);
1983 		if (ret) {
1984 			mutex_unlock(&inode->i_mutex);
1985 			return ret;
1986 		}
1987 	}
1988 
1989 	/* zero the front end of the last page */
1990 	if (offset + len < round_up(inode->i_size, PAGE_CACHE_SIZE)) {
1991 		ret = btrfs_truncate_page(inode, offset + len, 0, 1);
1992 		if (ret) {
1993 			mutex_unlock(&inode->i_mutex);
1994 			return ret;
1995 		}
1996 	}
1997 
1998 	if (lockend < lockstart) {
1999 		mutex_unlock(&inode->i_mutex);
2000 		return 0;
2001 	}
2002 
2003 	while (1) {
2004 		struct btrfs_ordered_extent *ordered;
2005 
2006 		truncate_pagecache_range(inode, lockstart, lockend);
2007 
2008 		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2009 				 0, &cached_state);
2010 		ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
2011 
2012 		/*
2013 		 * We need to make sure we have no ordered extents in this range
2014 		 * and nobody raced in and read a page in this range, if we did
2015 		 * we need to try again.
2016 		 */
2017 		if ((!ordered ||
2018 		    (ordered->file_offset + ordered->len < lockstart ||
2019 		     ordered->file_offset > lockend)) &&
2020 		     !test_range_bit(&BTRFS_I(inode)->io_tree, lockstart,
2021 				     lockend, EXTENT_UPTODATE, 0,
2022 				     cached_state)) {
2023 			if (ordered)
2024 				btrfs_put_ordered_extent(ordered);
2025 			break;
2026 		}
2027 		if (ordered)
2028 			btrfs_put_ordered_extent(ordered);
2029 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2030 				     lockend, &cached_state, GFP_NOFS);
2031 		btrfs_wait_ordered_range(inode, lockstart,
2032 					 lockend - lockstart + 1);
2033 	}
2034 
2035 	path = btrfs_alloc_path();
2036 	if (!path) {
2037 		ret = -ENOMEM;
2038 		goto out;
2039 	}
2040 
2041 	rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
2042 	if (!rsv) {
2043 		ret = -ENOMEM;
2044 		goto out_free;
2045 	}
2046 	rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
2047 	rsv->failfast = 1;
2048 
2049 	/*
2050 	 * 1 - update the inode
2051 	 * 1 - removing the extents in the range
2052 	 * 1 - adding the hole extent
2053 	 */
2054 	trans = btrfs_start_transaction(root, 3);
2055 	if (IS_ERR(trans)) {
2056 		err = PTR_ERR(trans);
2057 		goto out_free;
2058 	}
2059 
2060 	ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
2061 				      min_size);
2062 	BUG_ON(ret);
2063 	trans->block_rsv = rsv;
2064 
2065 	while (cur_offset < lockend) {
2066 		ret = __btrfs_drop_extents(trans, root, inode, path,
2067 					   cur_offset, lockend + 1,
2068 					   &drop_end, 1);
2069 		if (ret != -ENOSPC)
2070 			break;
2071 
2072 		trans->block_rsv = &root->fs_info->trans_block_rsv;
2073 
2074 		ret = fill_holes(trans, inode, path, cur_offset, drop_end);
2075 		if (ret) {
2076 			err = ret;
2077 			break;
2078 		}
2079 
2080 		cur_offset = drop_end;
2081 
2082 		ret = btrfs_update_inode(trans, root, inode);
2083 		if (ret) {
2084 			err = ret;
2085 			break;
2086 		}
2087 
2088 		btrfs_end_transaction(trans, root);
2089 		btrfs_btree_balance_dirty(root);
2090 
2091 		trans = btrfs_start_transaction(root, 3);
2092 		if (IS_ERR(trans)) {
2093 			ret = PTR_ERR(trans);
2094 			trans = NULL;
2095 			break;
2096 		}
2097 
2098 		ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
2099 					      rsv, min_size);
2100 		BUG_ON(ret);	/* shouldn't happen */
2101 		trans->block_rsv = rsv;
2102 	}
2103 
2104 	if (ret) {
2105 		err = ret;
2106 		goto out_trans;
2107 	}
2108 
2109 	trans->block_rsv = &root->fs_info->trans_block_rsv;
2110 	ret = fill_holes(trans, inode, path, cur_offset, drop_end);
2111 	if (ret) {
2112 		err = ret;
2113 		goto out_trans;
2114 	}
2115 
2116 out_trans:
2117 	if (!trans)
2118 		goto out_free;
2119 
2120 	inode_inc_iversion(inode);
2121 	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2122 
2123 	trans->block_rsv = &root->fs_info->trans_block_rsv;
2124 	ret = btrfs_update_inode(trans, root, inode);
2125 	btrfs_end_transaction(trans, root);
2126 	btrfs_btree_balance_dirty(root);
2127 out_free:
2128 	btrfs_free_path(path);
2129 	btrfs_free_block_rsv(root, rsv);
2130 out:
2131 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2132 			     &cached_state, GFP_NOFS);
2133 	mutex_unlock(&inode->i_mutex);
2134 	if (ret && !err)
2135 		err = ret;
2136 	return err;
2137 }
2138 
2139 static long btrfs_fallocate(struct file *file, int mode,
2140 			    loff_t offset, loff_t len)
2141 {
2142 	struct inode *inode = file_inode(file);
2143 	struct extent_state *cached_state = NULL;
2144 	u64 cur_offset;
2145 	u64 last_byte;
2146 	u64 alloc_start;
2147 	u64 alloc_end;
2148 	u64 alloc_hint = 0;
2149 	u64 locked_end;
2150 	struct extent_map *em;
2151 	int blocksize = BTRFS_I(inode)->root->sectorsize;
2152 	int ret;
2153 
2154 	alloc_start = round_down(offset, blocksize);
2155 	alloc_end = round_up(offset + len, blocksize);
2156 
2157 	/* Make sure we aren't being give some crap mode */
2158 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2159 		return -EOPNOTSUPP;
2160 
2161 	if (mode & FALLOC_FL_PUNCH_HOLE)
2162 		return btrfs_punch_hole(inode, offset, len);
2163 
2164 	/*
2165 	 * Make sure we have enough space before we do the
2166 	 * allocation.
2167 	 */
2168 	ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
2169 	if (ret)
2170 		return ret;
2171 
2172 	/*
2173 	 * wait for ordered IO before we have any locks.  We'll loop again
2174 	 * below with the locks held.
2175 	 */
2176 	btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
2177 
2178 	mutex_lock(&inode->i_mutex);
2179 	ret = inode_newsize_ok(inode, alloc_end);
2180 	if (ret)
2181 		goto out;
2182 
2183 	if (alloc_start > inode->i_size) {
2184 		ret = btrfs_cont_expand(inode, i_size_read(inode),
2185 					alloc_start);
2186 		if (ret)
2187 			goto out;
2188 	}
2189 
2190 	locked_end = alloc_end - 1;
2191 	while (1) {
2192 		struct btrfs_ordered_extent *ordered;
2193 
2194 		/* the extent lock is ordered inside the running
2195 		 * transaction
2196 		 */
2197 		lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
2198 				 locked_end, 0, &cached_state);
2199 		ordered = btrfs_lookup_first_ordered_extent(inode,
2200 							    alloc_end - 1);
2201 		if (ordered &&
2202 		    ordered->file_offset + ordered->len > alloc_start &&
2203 		    ordered->file_offset < alloc_end) {
2204 			btrfs_put_ordered_extent(ordered);
2205 			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
2206 					     alloc_start, locked_end,
2207 					     &cached_state, GFP_NOFS);
2208 			/*
2209 			 * we can't wait on the range with the transaction
2210 			 * running or with the extent lock held
2211 			 */
2212 			btrfs_wait_ordered_range(inode, alloc_start,
2213 						 alloc_end - alloc_start);
2214 		} else {
2215 			if (ordered)
2216 				btrfs_put_ordered_extent(ordered);
2217 			break;
2218 		}
2219 	}
2220 
2221 	cur_offset = alloc_start;
2222 	while (1) {
2223 		u64 actual_end;
2224 
2225 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2226 				      alloc_end - cur_offset, 0);
2227 		if (IS_ERR_OR_NULL(em)) {
2228 			if (!em)
2229 				ret = -ENOMEM;
2230 			else
2231 				ret = PTR_ERR(em);
2232 			break;
2233 		}
2234 		last_byte = min(extent_map_end(em), alloc_end);
2235 		actual_end = min_t(u64, extent_map_end(em), offset + len);
2236 		last_byte = ALIGN(last_byte, blocksize);
2237 
2238 		if (em->block_start == EXTENT_MAP_HOLE ||
2239 		    (cur_offset >= inode->i_size &&
2240 		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
2241 			ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
2242 							last_byte - cur_offset,
2243 							1 << inode->i_blkbits,
2244 							offset + len,
2245 							&alloc_hint);
2246 
2247 			if (ret < 0) {
2248 				free_extent_map(em);
2249 				break;
2250 			}
2251 		} else if (actual_end > inode->i_size &&
2252 			   !(mode & FALLOC_FL_KEEP_SIZE)) {
2253 			/*
2254 			 * We didn't need to allocate any more space, but we
2255 			 * still extended the size of the file so we need to
2256 			 * update i_size.
2257 			 */
2258 			inode->i_ctime = CURRENT_TIME;
2259 			i_size_write(inode, actual_end);
2260 			btrfs_ordered_update_i_size(inode, actual_end, NULL);
2261 		}
2262 		free_extent_map(em);
2263 
2264 		cur_offset = last_byte;
2265 		if (cur_offset >= alloc_end) {
2266 			ret = 0;
2267 			break;
2268 		}
2269 	}
2270 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
2271 			     &cached_state, GFP_NOFS);
2272 out:
2273 	mutex_unlock(&inode->i_mutex);
2274 	/* Let go of our reservation. */
2275 	btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
2276 	return ret;
2277 }
2278 
2279 static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
2280 {
2281 	struct btrfs_root *root = BTRFS_I(inode)->root;
2282 	struct extent_map *em;
2283 	struct extent_state *cached_state = NULL;
2284 	u64 lockstart = *offset;
2285 	u64 lockend = i_size_read(inode);
2286 	u64 start = *offset;
2287 	u64 orig_start = *offset;
2288 	u64 len = i_size_read(inode);
2289 	u64 last_end = 0;
2290 	int ret = 0;
2291 
2292 	lockend = max_t(u64, root->sectorsize, lockend);
2293 	if (lockend <= lockstart)
2294 		lockend = lockstart + root->sectorsize;
2295 
2296 	lockend--;
2297 	len = lockend - lockstart + 1;
2298 
2299 	len = max_t(u64, len, root->sectorsize);
2300 	if (inode->i_size == 0)
2301 		return -ENXIO;
2302 
2303 	lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
2304 			 &cached_state);
2305 
2306 	/*
2307 	 * Delalloc is such a pain.  If we have a hole and we have pending
2308 	 * delalloc for a portion of the hole we will get back a hole that
2309 	 * exists for the entire range since it hasn't been actually written
2310 	 * yet.  So to take care of this case we need to look for an extent just
2311 	 * before the position we want in case there is outstanding delalloc
2312 	 * going on here.
2313 	 */
2314 	if (whence == SEEK_HOLE && start != 0) {
2315 		if (start <= root->sectorsize)
2316 			em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
2317 						     root->sectorsize, 0);
2318 		else
2319 			em = btrfs_get_extent_fiemap(inode, NULL, 0,
2320 						     start - root->sectorsize,
2321 						     root->sectorsize, 0);
2322 		if (IS_ERR(em)) {
2323 			ret = PTR_ERR(em);
2324 			goto out;
2325 		}
2326 		last_end = em->start + em->len;
2327 		if (em->block_start == EXTENT_MAP_DELALLOC)
2328 			last_end = min_t(u64, last_end, inode->i_size);
2329 		free_extent_map(em);
2330 	}
2331 
2332 	while (1) {
2333 		em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
2334 		if (IS_ERR(em)) {
2335 			ret = PTR_ERR(em);
2336 			break;
2337 		}
2338 
2339 		if (em->block_start == EXTENT_MAP_HOLE) {
2340 			if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2341 				if (last_end <= orig_start) {
2342 					free_extent_map(em);
2343 					ret = -ENXIO;
2344 					break;
2345 				}
2346 			}
2347 
2348 			if (whence == SEEK_HOLE) {
2349 				*offset = start;
2350 				free_extent_map(em);
2351 				break;
2352 			}
2353 		} else {
2354 			if (whence == SEEK_DATA) {
2355 				if (em->block_start == EXTENT_MAP_DELALLOC) {
2356 					if (start >= inode->i_size) {
2357 						free_extent_map(em);
2358 						ret = -ENXIO;
2359 						break;
2360 					}
2361 				}
2362 
2363 				if (!test_bit(EXTENT_FLAG_PREALLOC,
2364 					      &em->flags)) {
2365 					*offset = start;
2366 					free_extent_map(em);
2367 					break;
2368 				}
2369 			}
2370 		}
2371 
2372 		start = em->start + em->len;
2373 		last_end = em->start + em->len;
2374 
2375 		if (em->block_start == EXTENT_MAP_DELALLOC)
2376 			last_end = min_t(u64, last_end, inode->i_size);
2377 
2378 		if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2379 			free_extent_map(em);
2380 			ret = -ENXIO;
2381 			break;
2382 		}
2383 		free_extent_map(em);
2384 		cond_resched();
2385 	}
2386 	if (!ret)
2387 		*offset = min(*offset, inode->i_size);
2388 out:
2389 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2390 			     &cached_state, GFP_NOFS);
2391 	return ret;
2392 }
2393 
2394 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
2395 {
2396 	struct inode *inode = file->f_mapping->host;
2397 	int ret;
2398 
2399 	mutex_lock(&inode->i_mutex);
2400 	switch (whence) {
2401 	case SEEK_END:
2402 	case SEEK_CUR:
2403 		offset = generic_file_llseek(file, offset, whence);
2404 		goto out;
2405 	case SEEK_DATA:
2406 	case SEEK_HOLE:
2407 		if (offset >= i_size_read(inode)) {
2408 			mutex_unlock(&inode->i_mutex);
2409 			return -ENXIO;
2410 		}
2411 
2412 		ret = find_desired_extent(inode, &offset, whence);
2413 		if (ret) {
2414 			mutex_unlock(&inode->i_mutex);
2415 			return ret;
2416 		}
2417 	}
2418 
2419 	if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
2420 		offset = -EINVAL;
2421 		goto out;
2422 	}
2423 	if (offset > inode->i_sb->s_maxbytes) {
2424 		offset = -EINVAL;
2425 		goto out;
2426 	}
2427 
2428 	/* Special lock needed here? */
2429 	if (offset != file->f_pos) {
2430 		file->f_pos = offset;
2431 		file->f_version = 0;
2432 	}
2433 out:
2434 	mutex_unlock(&inode->i_mutex);
2435 	return offset;
2436 }
2437 
2438 const struct file_operations btrfs_file_operations = {
2439 	.llseek		= btrfs_file_llseek,
2440 	.read		= do_sync_read,
2441 	.write		= do_sync_write,
2442 	.aio_read       = generic_file_aio_read,
2443 	.splice_read	= generic_file_splice_read,
2444 	.aio_write	= btrfs_file_aio_write,
2445 	.mmap		= btrfs_file_mmap,
2446 	.open		= generic_file_open,
2447 	.release	= btrfs_release_file,
2448 	.fsync		= btrfs_sync_file,
2449 	.fallocate	= btrfs_fallocate,
2450 	.unlocked_ioctl	= btrfs_ioctl,
2451 #ifdef CONFIG_COMPAT
2452 	.compat_ioctl	= btrfs_ioctl,
2453 #endif
2454 };
2455 
2456 void btrfs_auto_defrag_exit(void)
2457 {
2458 	if (btrfs_inode_defrag_cachep)
2459 		kmem_cache_destroy(btrfs_inode_defrag_cachep);
2460 }
2461 
2462 int btrfs_auto_defrag_init(void)
2463 {
2464 	btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
2465 					sizeof(struct inode_defrag), 0,
2466 					SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2467 					NULL);
2468 	if (!btrfs_inode_defrag_cachep)
2469 		return -ENOMEM;
2470 
2471 	return 0;
2472 }
2473