xref: /openbmc/linux/fs/btrfs/file.c (revision d0b73b48)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/slab.h>
33 #include "ctree.h"
34 #include "disk-io.h"
35 #include "transaction.h"
36 #include "btrfs_inode.h"
37 #include "ioctl.h"
38 #include "print-tree.h"
39 #include "tree-log.h"
40 #include "locking.h"
41 #include "compat.h"
42 #include "volumes.h"
43 
44 static struct kmem_cache *btrfs_inode_defrag_cachep;
45 /*
46  * when auto defrag is enabled we
47  * queue up these defrag structs to remember which
48  * inodes need defragging passes
49  */
50 struct inode_defrag {
51 	struct rb_node rb_node;
52 	/* objectid */
53 	u64 ino;
54 	/*
55 	 * transid where the defrag was added, we search for
56 	 * extents newer than this
57 	 */
58 	u64 transid;
59 
60 	/* root objectid */
61 	u64 root;
62 
63 	/* last offset we were able to defrag */
64 	u64 last_offset;
65 
66 	/* if we've wrapped around back to zero once already */
67 	int cycled;
68 };
69 
70 static int __compare_inode_defrag(struct inode_defrag *defrag1,
71 				  struct inode_defrag *defrag2)
72 {
73 	if (defrag1->root > defrag2->root)
74 		return 1;
75 	else if (defrag1->root < defrag2->root)
76 		return -1;
77 	else if (defrag1->ino > defrag2->ino)
78 		return 1;
79 	else if (defrag1->ino < defrag2->ino)
80 		return -1;
81 	else
82 		return 0;
83 }
84 
85 /* pop a record for an inode into the defrag tree.  The lock
86  * must be held already
87  *
88  * If you're inserting a record for an older transid than an
89  * existing record, the transid already in the tree is lowered
90  *
91  * If an existing record is found the defrag item you
92  * pass in is freed
93  */
94 static int __btrfs_add_inode_defrag(struct inode *inode,
95 				    struct inode_defrag *defrag)
96 {
97 	struct btrfs_root *root = BTRFS_I(inode)->root;
98 	struct inode_defrag *entry;
99 	struct rb_node **p;
100 	struct rb_node *parent = NULL;
101 	int ret;
102 
103 	p = &root->fs_info->defrag_inodes.rb_node;
104 	while (*p) {
105 		parent = *p;
106 		entry = rb_entry(parent, struct inode_defrag, rb_node);
107 
108 		ret = __compare_inode_defrag(defrag, entry);
109 		if (ret < 0)
110 			p = &parent->rb_left;
111 		else if (ret > 0)
112 			p = &parent->rb_right;
113 		else {
114 			/* if we're reinserting an entry for
115 			 * an old defrag run, make sure to
116 			 * lower the transid of our existing record
117 			 */
118 			if (defrag->transid < entry->transid)
119 				entry->transid = defrag->transid;
120 			if (defrag->last_offset > entry->last_offset)
121 				entry->last_offset = defrag->last_offset;
122 			return -EEXIST;
123 		}
124 	}
125 	set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
126 	rb_link_node(&defrag->rb_node, parent, p);
127 	rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
128 	return 0;
129 }
130 
131 static inline int __need_auto_defrag(struct btrfs_root *root)
132 {
133 	if (!btrfs_test_opt(root, AUTO_DEFRAG))
134 		return 0;
135 
136 	if (btrfs_fs_closing(root->fs_info))
137 		return 0;
138 
139 	return 1;
140 }
141 
142 /*
143  * insert a defrag record for this inode if auto defrag is
144  * enabled
145  */
146 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
147 			   struct inode *inode)
148 {
149 	struct btrfs_root *root = BTRFS_I(inode)->root;
150 	struct inode_defrag *defrag;
151 	u64 transid;
152 	int ret;
153 
154 	if (!__need_auto_defrag(root))
155 		return 0;
156 
157 	if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
158 		return 0;
159 
160 	if (trans)
161 		transid = trans->transid;
162 	else
163 		transid = BTRFS_I(inode)->root->last_trans;
164 
165 	defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
166 	if (!defrag)
167 		return -ENOMEM;
168 
169 	defrag->ino = btrfs_ino(inode);
170 	defrag->transid = transid;
171 	defrag->root = root->root_key.objectid;
172 
173 	spin_lock(&root->fs_info->defrag_inodes_lock);
174 	if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) {
175 		/*
176 		 * If we set IN_DEFRAG flag and evict the inode from memory,
177 		 * and then re-read this inode, this new inode doesn't have
178 		 * IN_DEFRAG flag. At the case, we may find the existed defrag.
179 		 */
180 		ret = __btrfs_add_inode_defrag(inode, defrag);
181 		if (ret)
182 			kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
183 	} else {
184 		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
185 	}
186 	spin_unlock(&root->fs_info->defrag_inodes_lock);
187 	return 0;
188 }
189 
190 /*
191  * Requeue the defrag object. If there is a defrag object that points to
192  * the same inode in the tree, we will merge them together (by
193  * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
194  */
195 void btrfs_requeue_inode_defrag(struct inode *inode,
196 				struct inode_defrag *defrag)
197 {
198 	struct btrfs_root *root = BTRFS_I(inode)->root;
199 	int ret;
200 
201 	if (!__need_auto_defrag(root))
202 		goto out;
203 
204 	/*
205 	 * Here we don't check the IN_DEFRAG flag, because we need merge
206 	 * them together.
207 	 */
208 	spin_lock(&root->fs_info->defrag_inodes_lock);
209 	ret = __btrfs_add_inode_defrag(inode, defrag);
210 	spin_unlock(&root->fs_info->defrag_inodes_lock);
211 	if (ret)
212 		goto out;
213 	return;
214 out:
215 	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
216 }
217 
218 /*
219  * pick the defragable inode that we want, if it doesn't exist, we will get
220  * the next one.
221  */
222 static struct inode_defrag *
223 btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
224 {
225 	struct inode_defrag *entry = NULL;
226 	struct inode_defrag tmp;
227 	struct rb_node *p;
228 	struct rb_node *parent = NULL;
229 	int ret;
230 
231 	tmp.ino = ino;
232 	tmp.root = root;
233 
234 	spin_lock(&fs_info->defrag_inodes_lock);
235 	p = fs_info->defrag_inodes.rb_node;
236 	while (p) {
237 		parent = p;
238 		entry = rb_entry(parent, struct inode_defrag, rb_node);
239 
240 		ret = __compare_inode_defrag(&tmp, entry);
241 		if (ret < 0)
242 			p = parent->rb_left;
243 		else if (ret > 0)
244 			p = parent->rb_right;
245 		else
246 			goto out;
247 	}
248 
249 	if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
250 		parent = rb_next(parent);
251 		if (parent)
252 			entry = rb_entry(parent, struct inode_defrag, rb_node);
253 		else
254 			entry = NULL;
255 	}
256 out:
257 	if (entry)
258 		rb_erase(parent, &fs_info->defrag_inodes);
259 	spin_unlock(&fs_info->defrag_inodes_lock);
260 	return entry;
261 }
262 
263 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
264 {
265 	struct inode_defrag *defrag;
266 	struct rb_node *node;
267 
268 	spin_lock(&fs_info->defrag_inodes_lock);
269 	node = rb_first(&fs_info->defrag_inodes);
270 	while (node) {
271 		rb_erase(node, &fs_info->defrag_inodes);
272 		defrag = rb_entry(node, struct inode_defrag, rb_node);
273 		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
274 
275 		if (need_resched()) {
276 			spin_unlock(&fs_info->defrag_inodes_lock);
277 			cond_resched();
278 			spin_lock(&fs_info->defrag_inodes_lock);
279 		}
280 
281 		node = rb_first(&fs_info->defrag_inodes);
282 	}
283 	spin_unlock(&fs_info->defrag_inodes_lock);
284 }
285 
286 #define BTRFS_DEFRAG_BATCH	1024
287 
288 static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
289 				    struct inode_defrag *defrag)
290 {
291 	struct btrfs_root *inode_root;
292 	struct inode *inode;
293 	struct btrfs_key key;
294 	struct btrfs_ioctl_defrag_range_args range;
295 	int num_defrag;
296 	int index;
297 	int ret;
298 
299 	/* get the inode */
300 	key.objectid = defrag->root;
301 	btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
302 	key.offset = (u64)-1;
303 
304 	index = srcu_read_lock(&fs_info->subvol_srcu);
305 
306 	inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
307 	if (IS_ERR(inode_root)) {
308 		ret = PTR_ERR(inode_root);
309 		goto cleanup;
310 	}
311 	if (btrfs_root_refs(&inode_root->root_item) == 0) {
312 		ret = -ENOENT;
313 		goto cleanup;
314 	}
315 
316 	key.objectid = defrag->ino;
317 	btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
318 	key.offset = 0;
319 	inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
320 	if (IS_ERR(inode)) {
321 		ret = PTR_ERR(inode);
322 		goto cleanup;
323 	}
324 	srcu_read_unlock(&fs_info->subvol_srcu, index);
325 
326 	/* do a chunk of defrag */
327 	clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
328 	memset(&range, 0, sizeof(range));
329 	range.len = (u64)-1;
330 	range.start = defrag->last_offset;
331 
332 	sb_start_write(fs_info->sb);
333 	num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
334 				       BTRFS_DEFRAG_BATCH);
335 	sb_end_write(fs_info->sb);
336 	/*
337 	 * if we filled the whole defrag batch, there
338 	 * must be more work to do.  Queue this defrag
339 	 * again
340 	 */
341 	if (num_defrag == BTRFS_DEFRAG_BATCH) {
342 		defrag->last_offset = range.start;
343 		btrfs_requeue_inode_defrag(inode, defrag);
344 	} else if (defrag->last_offset && !defrag->cycled) {
345 		/*
346 		 * we didn't fill our defrag batch, but
347 		 * we didn't start at zero.  Make sure we loop
348 		 * around to the start of the file.
349 		 */
350 		defrag->last_offset = 0;
351 		defrag->cycled = 1;
352 		btrfs_requeue_inode_defrag(inode, defrag);
353 	} else {
354 		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
355 	}
356 
357 	iput(inode);
358 	return 0;
359 cleanup:
360 	srcu_read_unlock(&fs_info->subvol_srcu, index);
361 	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
362 	return ret;
363 }
364 
365 /*
366  * run through the list of inodes in the FS that need
367  * defragging
368  */
369 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
370 {
371 	struct inode_defrag *defrag;
372 	u64 first_ino = 0;
373 	u64 root_objectid = 0;
374 
375 	atomic_inc(&fs_info->defrag_running);
376 	while(1) {
377 		if (!__need_auto_defrag(fs_info->tree_root))
378 			break;
379 
380 		/* find an inode to defrag */
381 		defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
382 						 first_ino);
383 		if (!defrag) {
384 			if (root_objectid || first_ino) {
385 				root_objectid = 0;
386 				first_ino = 0;
387 				continue;
388 			} else {
389 				break;
390 			}
391 		}
392 
393 		first_ino = defrag->ino + 1;
394 		root_objectid = defrag->root;
395 
396 		__btrfs_run_defrag_inode(fs_info, defrag);
397 	}
398 	atomic_dec(&fs_info->defrag_running);
399 
400 	/*
401 	 * during unmount, we use the transaction_wait queue to
402 	 * wait for the defragger to stop
403 	 */
404 	wake_up(&fs_info->transaction_wait);
405 	return 0;
406 }
407 
408 /* simple helper to fault in pages and copy.  This should go away
409  * and be replaced with calls into generic code.
410  */
411 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
412 					 size_t write_bytes,
413 					 struct page **prepared_pages,
414 					 struct iov_iter *i)
415 {
416 	size_t copied = 0;
417 	size_t total_copied = 0;
418 	int pg = 0;
419 	int offset = pos & (PAGE_CACHE_SIZE - 1);
420 
421 	while (write_bytes > 0) {
422 		size_t count = min_t(size_t,
423 				     PAGE_CACHE_SIZE - offset, write_bytes);
424 		struct page *page = prepared_pages[pg];
425 		/*
426 		 * Copy data from userspace to the current page
427 		 *
428 		 * Disable pagefault to avoid recursive lock since
429 		 * the pages are already locked
430 		 */
431 		pagefault_disable();
432 		copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
433 		pagefault_enable();
434 
435 		/* Flush processor's dcache for this page */
436 		flush_dcache_page(page);
437 
438 		/*
439 		 * if we get a partial write, we can end up with
440 		 * partially up to date pages.  These add
441 		 * a lot of complexity, so make sure they don't
442 		 * happen by forcing this copy to be retried.
443 		 *
444 		 * The rest of the btrfs_file_write code will fall
445 		 * back to page at a time copies after we return 0.
446 		 */
447 		if (!PageUptodate(page) && copied < count)
448 			copied = 0;
449 
450 		iov_iter_advance(i, copied);
451 		write_bytes -= copied;
452 		total_copied += copied;
453 
454 		/* Return to btrfs_file_aio_write to fault page */
455 		if (unlikely(copied == 0))
456 			break;
457 
458 		if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
459 			offset += copied;
460 		} else {
461 			pg++;
462 			offset = 0;
463 		}
464 	}
465 	return total_copied;
466 }
467 
468 /*
469  * unlocks pages after btrfs_file_write is done with them
470  */
471 void btrfs_drop_pages(struct page **pages, size_t num_pages)
472 {
473 	size_t i;
474 	for (i = 0; i < num_pages; i++) {
475 		/* page checked is some magic around finding pages that
476 		 * have been modified without going through btrfs_set_page_dirty
477 		 * clear it here
478 		 */
479 		ClearPageChecked(pages[i]);
480 		unlock_page(pages[i]);
481 		mark_page_accessed(pages[i]);
482 		page_cache_release(pages[i]);
483 	}
484 }
485 
486 /*
487  * after copy_from_user, pages need to be dirtied and we need to make
488  * sure holes are created between the current EOF and the start of
489  * any next extents (if required).
490  *
491  * this also makes the decision about creating an inline extent vs
492  * doing real data extents, marking pages dirty and delalloc as required.
493  */
494 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
495 		      struct page **pages, size_t num_pages,
496 		      loff_t pos, size_t write_bytes,
497 		      struct extent_state **cached)
498 {
499 	int err = 0;
500 	int i;
501 	u64 num_bytes;
502 	u64 start_pos;
503 	u64 end_of_last_block;
504 	u64 end_pos = pos + write_bytes;
505 	loff_t isize = i_size_read(inode);
506 
507 	start_pos = pos & ~((u64)root->sectorsize - 1);
508 	num_bytes = (write_bytes + pos - start_pos +
509 		    root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
510 
511 	end_of_last_block = start_pos + num_bytes - 1;
512 	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
513 					cached);
514 	if (err)
515 		return err;
516 
517 	for (i = 0; i < num_pages; i++) {
518 		struct page *p = pages[i];
519 		SetPageUptodate(p);
520 		ClearPageChecked(p);
521 		set_page_dirty(p);
522 	}
523 
524 	/*
525 	 * we've only changed i_size in ram, and we haven't updated
526 	 * the disk i_size.  There is no need to log the inode
527 	 * at this time.
528 	 */
529 	if (end_pos > isize)
530 		i_size_write(inode, end_pos);
531 	return 0;
532 }
533 
534 /*
535  * this drops all the extents in the cache that intersect the range
536  * [start, end].  Existing extents are split as required.
537  */
538 void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
539 			     int skip_pinned)
540 {
541 	struct extent_map *em;
542 	struct extent_map *split = NULL;
543 	struct extent_map *split2 = NULL;
544 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
545 	u64 len = end - start + 1;
546 	u64 gen;
547 	int ret;
548 	int testend = 1;
549 	unsigned long flags;
550 	int compressed = 0;
551 
552 	WARN_ON(end < start);
553 	if (end == (u64)-1) {
554 		len = (u64)-1;
555 		testend = 0;
556 	}
557 	while (1) {
558 		int no_splits = 0;
559 
560 		if (!split)
561 			split = alloc_extent_map();
562 		if (!split2)
563 			split2 = alloc_extent_map();
564 		if (!split || !split2)
565 			no_splits = 1;
566 
567 		write_lock(&em_tree->lock);
568 		em = lookup_extent_mapping(em_tree, start, len);
569 		if (!em) {
570 			write_unlock(&em_tree->lock);
571 			break;
572 		}
573 		flags = em->flags;
574 		gen = em->generation;
575 		if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
576 			if (testend && em->start + em->len >= start + len) {
577 				free_extent_map(em);
578 				write_unlock(&em_tree->lock);
579 				break;
580 			}
581 			start = em->start + em->len;
582 			if (testend)
583 				len = start + len - (em->start + em->len);
584 			free_extent_map(em);
585 			write_unlock(&em_tree->lock);
586 			continue;
587 		}
588 		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
589 		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
590 		remove_extent_mapping(em_tree, em);
591 		if (no_splits)
592 			goto next;
593 
594 		if (em->block_start < EXTENT_MAP_LAST_BYTE &&
595 		    em->start < start) {
596 			split->start = em->start;
597 			split->len = start - em->start;
598 			split->orig_start = em->orig_start;
599 			split->block_start = em->block_start;
600 
601 			if (compressed)
602 				split->block_len = em->block_len;
603 			else
604 				split->block_len = split->len;
605 			split->orig_block_len = max(split->block_len,
606 						    em->orig_block_len);
607 			split->generation = gen;
608 			split->bdev = em->bdev;
609 			split->flags = flags;
610 			split->compress_type = em->compress_type;
611 			ret = add_extent_mapping(em_tree, split);
612 			BUG_ON(ret); /* Logic error */
613 			list_move(&split->list, &em_tree->modified_extents);
614 			free_extent_map(split);
615 			split = split2;
616 			split2 = NULL;
617 		}
618 		if (em->block_start < EXTENT_MAP_LAST_BYTE &&
619 		    testend && em->start + em->len > start + len) {
620 			u64 diff = start + len - em->start;
621 
622 			split->start = start + len;
623 			split->len = em->start + em->len - (start + len);
624 			split->bdev = em->bdev;
625 			split->flags = flags;
626 			split->compress_type = em->compress_type;
627 			split->generation = gen;
628 			split->orig_block_len = max(em->block_len,
629 						    em->orig_block_len);
630 
631 			if (compressed) {
632 				split->block_len = em->block_len;
633 				split->block_start = em->block_start;
634 				split->orig_start = em->orig_start;
635 			} else {
636 				split->block_len = split->len;
637 				split->block_start = em->block_start + diff;
638 				split->orig_start = em->orig_start;
639 			}
640 
641 			ret = add_extent_mapping(em_tree, split);
642 			BUG_ON(ret); /* Logic error */
643 			list_move(&split->list, &em_tree->modified_extents);
644 			free_extent_map(split);
645 			split = NULL;
646 		}
647 next:
648 		write_unlock(&em_tree->lock);
649 
650 		/* once for us */
651 		free_extent_map(em);
652 		/* once for the tree*/
653 		free_extent_map(em);
654 	}
655 	if (split)
656 		free_extent_map(split);
657 	if (split2)
658 		free_extent_map(split2);
659 }
660 
661 /*
662  * this is very complex, but the basic idea is to drop all extents
663  * in the range start - end.  hint_block is filled in with a block number
664  * that would be a good hint to the block allocator for this file.
665  *
666  * If an extent intersects the range but is not entirely inside the range
667  * it is either truncated or split.  Anything entirely inside the range
668  * is deleted from the tree.
669  */
670 int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
671 			 struct btrfs_root *root, struct inode *inode,
672 			 struct btrfs_path *path, u64 start, u64 end,
673 			 u64 *drop_end, int drop_cache)
674 {
675 	struct extent_buffer *leaf;
676 	struct btrfs_file_extent_item *fi;
677 	struct btrfs_key key;
678 	struct btrfs_key new_key;
679 	u64 ino = btrfs_ino(inode);
680 	u64 search_start = start;
681 	u64 disk_bytenr = 0;
682 	u64 num_bytes = 0;
683 	u64 extent_offset = 0;
684 	u64 extent_end = 0;
685 	int del_nr = 0;
686 	int del_slot = 0;
687 	int extent_type;
688 	int recow;
689 	int ret;
690 	int modify_tree = -1;
691 	int update_refs = (root->ref_cows || root == root->fs_info->tree_root);
692 	int found = 0;
693 
694 	if (drop_cache)
695 		btrfs_drop_extent_cache(inode, start, end - 1, 0);
696 
697 	if (start >= BTRFS_I(inode)->disk_i_size)
698 		modify_tree = 0;
699 
700 	while (1) {
701 		recow = 0;
702 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
703 					       search_start, modify_tree);
704 		if (ret < 0)
705 			break;
706 		if (ret > 0 && path->slots[0] > 0 && search_start == start) {
707 			leaf = path->nodes[0];
708 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
709 			if (key.objectid == ino &&
710 			    key.type == BTRFS_EXTENT_DATA_KEY)
711 				path->slots[0]--;
712 		}
713 		ret = 0;
714 next_slot:
715 		leaf = path->nodes[0];
716 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
717 			BUG_ON(del_nr > 0);
718 			ret = btrfs_next_leaf(root, path);
719 			if (ret < 0)
720 				break;
721 			if (ret > 0) {
722 				ret = 0;
723 				break;
724 			}
725 			leaf = path->nodes[0];
726 			recow = 1;
727 		}
728 
729 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
730 		if (key.objectid > ino ||
731 		    key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
732 			break;
733 
734 		fi = btrfs_item_ptr(leaf, path->slots[0],
735 				    struct btrfs_file_extent_item);
736 		extent_type = btrfs_file_extent_type(leaf, fi);
737 
738 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
739 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
740 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
741 			num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
742 			extent_offset = btrfs_file_extent_offset(leaf, fi);
743 			extent_end = key.offset +
744 				btrfs_file_extent_num_bytes(leaf, fi);
745 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
746 			extent_end = key.offset +
747 				btrfs_file_extent_inline_len(leaf, fi);
748 		} else {
749 			WARN_ON(1);
750 			extent_end = search_start;
751 		}
752 
753 		if (extent_end <= search_start) {
754 			path->slots[0]++;
755 			goto next_slot;
756 		}
757 
758 		found = 1;
759 		search_start = max(key.offset, start);
760 		if (recow || !modify_tree) {
761 			modify_tree = -1;
762 			btrfs_release_path(path);
763 			continue;
764 		}
765 
766 		/*
767 		 *     | - range to drop - |
768 		 *  | -------- extent -------- |
769 		 */
770 		if (start > key.offset && end < extent_end) {
771 			BUG_ON(del_nr > 0);
772 			BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
773 
774 			memcpy(&new_key, &key, sizeof(new_key));
775 			new_key.offset = start;
776 			ret = btrfs_duplicate_item(trans, root, path,
777 						   &new_key);
778 			if (ret == -EAGAIN) {
779 				btrfs_release_path(path);
780 				continue;
781 			}
782 			if (ret < 0)
783 				break;
784 
785 			leaf = path->nodes[0];
786 			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
787 					    struct btrfs_file_extent_item);
788 			btrfs_set_file_extent_num_bytes(leaf, fi,
789 							start - key.offset);
790 
791 			fi = btrfs_item_ptr(leaf, path->slots[0],
792 					    struct btrfs_file_extent_item);
793 
794 			extent_offset += start - key.offset;
795 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
796 			btrfs_set_file_extent_num_bytes(leaf, fi,
797 							extent_end - start);
798 			btrfs_mark_buffer_dirty(leaf);
799 
800 			if (update_refs && disk_bytenr > 0) {
801 				ret = btrfs_inc_extent_ref(trans, root,
802 						disk_bytenr, num_bytes, 0,
803 						root->root_key.objectid,
804 						new_key.objectid,
805 						start - extent_offset, 0);
806 				BUG_ON(ret); /* -ENOMEM */
807 			}
808 			key.offset = start;
809 		}
810 		/*
811 		 *  | ---- range to drop ----- |
812 		 *      | -------- extent -------- |
813 		 */
814 		if (start <= key.offset && end < extent_end) {
815 			BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
816 
817 			memcpy(&new_key, &key, sizeof(new_key));
818 			new_key.offset = end;
819 			btrfs_set_item_key_safe(trans, root, path, &new_key);
820 
821 			extent_offset += end - key.offset;
822 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
823 			btrfs_set_file_extent_num_bytes(leaf, fi,
824 							extent_end - end);
825 			btrfs_mark_buffer_dirty(leaf);
826 			if (update_refs && disk_bytenr > 0)
827 				inode_sub_bytes(inode, end - key.offset);
828 			break;
829 		}
830 
831 		search_start = extent_end;
832 		/*
833 		 *       | ---- range to drop ----- |
834 		 *  | -------- extent -------- |
835 		 */
836 		if (start > key.offset && end >= extent_end) {
837 			BUG_ON(del_nr > 0);
838 			BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
839 
840 			btrfs_set_file_extent_num_bytes(leaf, fi,
841 							start - key.offset);
842 			btrfs_mark_buffer_dirty(leaf);
843 			if (update_refs && disk_bytenr > 0)
844 				inode_sub_bytes(inode, extent_end - start);
845 			if (end == extent_end)
846 				break;
847 
848 			path->slots[0]++;
849 			goto next_slot;
850 		}
851 
852 		/*
853 		 *  | ---- range to drop ----- |
854 		 *    | ------ extent ------ |
855 		 */
856 		if (start <= key.offset && end >= extent_end) {
857 			if (del_nr == 0) {
858 				del_slot = path->slots[0];
859 				del_nr = 1;
860 			} else {
861 				BUG_ON(del_slot + del_nr != path->slots[0]);
862 				del_nr++;
863 			}
864 
865 			if (update_refs &&
866 			    extent_type == BTRFS_FILE_EXTENT_INLINE) {
867 				inode_sub_bytes(inode,
868 						extent_end - key.offset);
869 				extent_end = ALIGN(extent_end,
870 						   root->sectorsize);
871 			} else if (update_refs && disk_bytenr > 0) {
872 				ret = btrfs_free_extent(trans, root,
873 						disk_bytenr, num_bytes, 0,
874 						root->root_key.objectid,
875 						key.objectid, key.offset -
876 						extent_offset, 0);
877 				BUG_ON(ret); /* -ENOMEM */
878 				inode_sub_bytes(inode,
879 						extent_end - key.offset);
880 			}
881 
882 			if (end == extent_end)
883 				break;
884 
885 			if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
886 				path->slots[0]++;
887 				goto next_slot;
888 			}
889 
890 			ret = btrfs_del_items(trans, root, path, del_slot,
891 					      del_nr);
892 			if (ret) {
893 				btrfs_abort_transaction(trans, root, ret);
894 				break;
895 			}
896 
897 			del_nr = 0;
898 			del_slot = 0;
899 
900 			btrfs_release_path(path);
901 			continue;
902 		}
903 
904 		BUG_ON(1);
905 	}
906 
907 	if (!ret && del_nr > 0) {
908 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
909 		if (ret)
910 			btrfs_abort_transaction(trans, root, ret);
911 	}
912 
913 	if (drop_end)
914 		*drop_end = found ? min(end, extent_end) : end;
915 	btrfs_release_path(path);
916 	return ret;
917 }
918 
919 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
920 		       struct btrfs_root *root, struct inode *inode, u64 start,
921 		       u64 end, int drop_cache)
922 {
923 	struct btrfs_path *path;
924 	int ret;
925 
926 	path = btrfs_alloc_path();
927 	if (!path)
928 		return -ENOMEM;
929 	ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
930 				   drop_cache);
931 	btrfs_free_path(path);
932 	return ret;
933 }
934 
935 static int extent_mergeable(struct extent_buffer *leaf, int slot,
936 			    u64 objectid, u64 bytenr, u64 orig_offset,
937 			    u64 *start, u64 *end)
938 {
939 	struct btrfs_file_extent_item *fi;
940 	struct btrfs_key key;
941 	u64 extent_end;
942 
943 	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
944 		return 0;
945 
946 	btrfs_item_key_to_cpu(leaf, &key, slot);
947 	if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
948 		return 0;
949 
950 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
951 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
952 	    btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
953 	    btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
954 	    btrfs_file_extent_compression(leaf, fi) ||
955 	    btrfs_file_extent_encryption(leaf, fi) ||
956 	    btrfs_file_extent_other_encoding(leaf, fi))
957 		return 0;
958 
959 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
960 	if ((*start && *start != key.offset) || (*end && *end != extent_end))
961 		return 0;
962 
963 	*start = key.offset;
964 	*end = extent_end;
965 	return 1;
966 }
967 
968 /*
969  * Mark extent in the range start - end as written.
970  *
971  * This changes extent type from 'pre-allocated' to 'regular'. If only
972  * part of extent is marked as written, the extent will be split into
973  * two or three.
974  */
975 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
976 			      struct inode *inode, u64 start, u64 end)
977 {
978 	struct btrfs_root *root = BTRFS_I(inode)->root;
979 	struct extent_buffer *leaf;
980 	struct btrfs_path *path;
981 	struct btrfs_file_extent_item *fi;
982 	struct btrfs_key key;
983 	struct btrfs_key new_key;
984 	u64 bytenr;
985 	u64 num_bytes;
986 	u64 extent_end;
987 	u64 orig_offset;
988 	u64 other_start;
989 	u64 other_end;
990 	u64 split;
991 	int del_nr = 0;
992 	int del_slot = 0;
993 	int recow;
994 	int ret;
995 	u64 ino = btrfs_ino(inode);
996 
997 	path = btrfs_alloc_path();
998 	if (!path)
999 		return -ENOMEM;
1000 again:
1001 	recow = 0;
1002 	split = start;
1003 	key.objectid = ino;
1004 	key.type = BTRFS_EXTENT_DATA_KEY;
1005 	key.offset = split;
1006 
1007 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1008 	if (ret < 0)
1009 		goto out;
1010 	if (ret > 0 && path->slots[0] > 0)
1011 		path->slots[0]--;
1012 
1013 	leaf = path->nodes[0];
1014 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1015 	BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
1016 	fi = btrfs_item_ptr(leaf, path->slots[0],
1017 			    struct btrfs_file_extent_item);
1018 	BUG_ON(btrfs_file_extent_type(leaf, fi) !=
1019 	       BTRFS_FILE_EXTENT_PREALLOC);
1020 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1021 	BUG_ON(key.offset > start || extent_end < end);
1022 
1023 	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1024 	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1025 	orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1026 	memcpy(&new_key, &key, sizeof(new_key));
1027 
1028 	if (start == key.offset && end < extent_end) {
1029 		other_start = 0;
1030 		other_end = start;
1031 		if (extent_mergeable(leaf, path->slots[0] - 1,
1032 				     ino, bytenr, orig_offset,
1033 				     &other_start, &other_end)) {
1034 			new_key.offset = end;
1035 			btrfs_set_item_key_safe(trans, root, path, &new_key);
1036 			fi = btrfs_item_ptr(leaf, path->slots[0],
1037 					    struct btrfs_file_extent_item);
1038 			btrfs_set_file_extent_generation(leaf, fi,
1039 							 trans->transid);
1040 			btrfs_set_file_extent_num_bytes(leaf, fi,
1041 							extent_end - end);
1042 			btrfs_set_file_extent_offset(leaf, fi,
1043 						     end - orig_offset);
1044 			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1045 					    struct btrfs_file_extent_item);
1046 			btrfs_set_file_extent_generation(leaf, fi,
1047 							 trans->transid);
1048 			btrfs_set_file_extent_num_bytes(leaf, fi,
1049 							end - other_start);
1050 			btrfs_mark_buffer_dirty(leaf);
1051 			goto out;
1052 		}
1053 	}
1054 
1055 	if (start > key.offset && end == extent_end) {
1056 		other_start = end;
1057 		other_end = 0;
1058 		if (extent_mergeable(leaf, path->slots[0] + 1,
1059 				     ino, bytenr, orig_offset,
1060 				     &other_start, &other_end)) {
1061 			fi = btrfs_item_ptr(leaf, path->slots[0],
1062 					    struct btrfs_file_extent_item);
1063 			btrfs_set_file_extent_num_bytes(leaf, fi,
1064 							start - key.offset);
1065 			btrfs_set_file_extent_generation(leaf, fi,
1066 							 trans->transid);
1067 			path->slots[0]++;
1068 			new_key.offset = start;
1069 			btrfs_set_item_key_safe(trans, root, path, &new_key);
1070 
1071 			fi = btrfs_item_ptr(leaf, path->slots[0],
1072 					    struct btrfs_file_extent_item);
1073 			btrfs_set_file_extent_generation(leaf, fi,
1074 							 trans->transid);
1075 			btrfs_set_file_extent_num_bytes(leaf, fi,
1076 							other_end - start);
1077 			btrfs_set_file_extent_offset(leaf, fi,
1078 						     start - orig_offset);
1079 			btrfs_mark_buffer_dirty(leaf);
1080 			goto out;
1081 		}
1082 	}
1083 
1084 	while (start > key.offset || end < extent_end) {
1085 		if (key.offset == start)
1086 			split = end;
1087 
1088 		new_key.offset = split;
1089 		ret = btrfs_duplicate_item(trans, root, path, &new_key);
1090 		if (ret == -EAGAIN) {
1091 			btrfs_release_path(path);
1092 			goto again;
1093 		}
1094 		if (ret < 0) {
1095 			btrfs_abort_transaction(trans, root, ret);
1096 			goto out;
1097 		}
1098 
1099 		leaf = path->nodes[0];
1100 		fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1101 				    struct btrfs_file_extent_item);
1102 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1103 		btrfs_set_file_extent_num_bytes(leaf, fi,
1104 						split - key.offset);
1105 
1106 		fi = btrfs_item_ptr(leaf, path->slots[0],
1107 				    struct btrfs_file_extent_item);
1108 
1109 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1110 		btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1111 		btrfs_set_file_extent_num_bytes(leaf, fi,
1112 						extent_end - split);
1113 		btrfs_mark_buffer_dirty(leaf);
1114 
1115 		ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
1116 					   root->root_key.objectid,
1117 					   ino, orig_offset, 0);
1118 		BUG_ON(ret); /* -ENOMEM */
1119 
1120 		if (split == start) {
1121 			key.offset = start;
1122 		} else {
1123 			BUG_ON(start != key.offset);
1124 			path->slots[0]--;
1125 			extent_end = end;
1126 		}
1127 		recow = 1;
1128 	}
1129 
1130 	other_start = end;
1131 	other_end = 0;
1132 	if (extent_mergeable(leaf, path->slots[0] + 1,
1133 			     ino, bytenr, orig_offset,
1134 			     &other_start, &other_end)) {
1135 		if (recow) {
1136 			btrfs_release_path(path);
1137 			goto again;
1138 		}
1139 		extent_end = other_end;
1140 		del_slot = path->slots[0] + 1;
1141 		del_nr++;
1142 		ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1143 					0, root->root_key.objectid,
1144 					ino, orig_offset, 0);
1145 		BUG_ON(ret); /* -ENOMEM */
1146 	}
1147 	other_start = 0;
1148 	other_end = start;
1149 	if (extent_mergeable(leaf, path->slots[0] - 1,
1150 			     ino, bytenr, orig_offset,
1151 			     &other_start, &other_end)) {
1152 		if (recow) {
1153 			btrfs_release_path(path);
1154 			goto again;
1155 		}
1156 		key.offset = other_start;
1157 		del_slot = path->slots[0];
1158 		del_nr++;
1159 		ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1160 					0, root->root_key.objectid,
1161 					ino, orig_offset, 0);
1162 		BUG_ON(ret); /* -ENOMEM */
1163 	}
1164 	if (del_nr == 0) {
1165 		fi = btrfs_item_ptr(leaf, path->slots[0],
1166 			   struct btrfs_file_extent_item);
1167 		btrfs_set_file_extent_type(leaf, fi,
1168 					   BTRFS_FILE_EXTENT_REG);
1169 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1170 		btrfs_mark_buffer_dirty(leaf);
1171 	} else {
1172 		fi = btrfs_item_ptr(leaf, del_slot - 1,
1173 			   struct btrfs_file_extent_item);
1174 		btrfs_set_file_extent_type(leaf, fi,
1175 					   BTRFS_FILE_EXTENT_REG);
1176 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1177 		btrfs_set_file_extent_num_bytes(leaf, fi,
1178 						extent_end - key.offset);
1179 		btrfs_mark_buffer_dirty(leaf);
1180 
1181 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1182 		if (ret < 0) {
1183 			btrfs_abort_transaction(trans, root, ret);
1184 			goto out;
1185 		}
1186 	}
1187 out:
1188 	btrfs_free_path(path);
1189 	return 0;
1190 }
1191 
1192 /*
1193  * on error we return an unlocked page and the error value
1194  * on success we return a locked page and 0
1195  */
1196 static int prepare_uptodate_page(struct page *page, u64 pos,
1197 				 bool force_uptodate)
1198 {
1199 	int ret = 0;
1200 
1201 	if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1202 	    !PageUptodate(page)) {
1203 		ret = btrfs_readpage(NULL, page);
1204 		if (ret)
1205 			return ret;
1206 		lock_page(page);
1207 		if (!PageUptodate(page)) {
1208 			unlock_page(page);
1209 			return -EIO;
1210 		}
1211 	}
1212 	return 0;
1213 }
1214 
1215 /*
1216  * this gets pages into the page cache and locks them down, it also properly
1217  * waits for data=ordered extents to finish before allowing the pages to be
1218  * modified.
1219  */
1220 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
1221 			 struct page **pages, size_t num_pages,
1222 			 loff_t pos, unsigned long first_index,
1223 			 size_t write_bytes, bool force_uptodate)
1224 {
1225 	struct extent_state *cached_state = NULL;
1226 	int i;
1227 	unsigned long index = pos >> PAGE_CACHE_SHIFT;
1228 	struct inode *inode = fdentry(file)->d_inode;
1229 	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1230 	int err = 0;
1231 	int faili = 0;
1232 	u64 start_pos;
1233 	u64 last_pos;
1234 
1235 	start_pos = pos & ~((u64)root->sectorsize - 1);
1236 	last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
1237 
1238 again:
1239 	for (i = 0; i < num_pages; i++) {
1240 		pages[i] = find_or_create_page(inode->i_mapping, index + i,
1241 					       mask | __GFP_WRITE);
1242 		if (!pages[i]) {
1243 			faili = i - 1;
1244 			err = -ENOMEM;
1245 			goto fail;
1246 		}
1247 
1248 		if (i == 0)
1249 			err = prepare_uptodate_page(pages[i], pos,
1250 						    force_uptodate);
1251 		if (i == num_pages - 1)
1252 			err = prepare_uptodate_page(pages[i],
1253 						    pos + write_bytes, false);
1254 		if (err) {
1255 			page_cache_release(pages[i]);
1256 			faili = i - 1;
1257 			goto fail;
1258 		}
1259 		wait_on_page_writeback(pages[i]);
1260 	}
1261 	err = 0;
1262 	if (start_pos < inode->i_size) {
1263 		struct btrfs_ordered_extent *ordered;
1264 		lock_extent_bits(&BTRFS_I(inode)->io_tree,
1265 				 start_pos, last_pos - 1, 0, &cached_state);
1266 		ordered = btrfs_lookup_first_ordered_extent(inode,
1267 							    last_pos - 1);
1268 		if (ordered &&
1269 		    ordered->file_offset + ordered->len > start_pos &&
1270 		    ordered->file_offset < last_pos) {
1271 			btrfs_put_ordered_extent(ordered);
1272 			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1273 					     start_pos, last_pos - 1,
1274 					     &cached_state, GFP_NOFS);
1275 			for (i = 0; i < num_pages; i++) {
1276 				unlock_page(pages[i]);
1277 				page_cache_release(pages[i]);
1278 			}
1279 			btrfs_wait_ordered_range(inode, start_pos,
1280 						 last_pos - start_pos);
1281 			goto again;
1282 		}
1283 		if (ordered)
1284 			btrfs_put_ordered_extent(ordered);
1285 
1286 		clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
1287 				  last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1288 				  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
1289 				  0, 0, &cached_state, GFP_NOFS);
1290 		unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1291 				     start_pos, last_pos - 1, &cached_state,
1292 				     GFP_NOFS);
1293 	}
1294 	for (i = 0; i < num_pages; i++) {
1295 		if (clear_page_dirty_for_io(pages[i]))
1296 			account_page_redirty(pages[i]);
1297 		set_page_extent_mapped(pages[i]);
1298 		WARN_ON(!PageLocked(pages[i]));
1299 	}
1300 	return 0;
1301 fail:
1302 	while (faili >= 0) {
1303 		unlock_page(pages[faili]);
1304 		page_cache_release(pages[faili]);
1305 		faili--;
1306 	}
1307 	return err;
1308 
1309 }
1310 
1311 static noinline ssize_t __btrfs_buffered_write(struct file *file,
1312 					       struct iov_iter *i,
1313 					       loff_t pos)
1314 {
1315 	struct inode *inode = fdentry(file)->d_inode;
1316 	struct btrfs_root *root = BTRFS_I(inode)->root;
1317 	struct page **pages = NULL;
1318 	unsigned long first_index;
1319 	size_t num_written = 0;
1320 	int nrptrs;
1321 	int ret = 0;
1322 	bool force_page_uptodate = false;
1323 
1324 	nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
1325 		     PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
1326 		     (sizeof(struct page *)));
1327 	nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1328 	nrptrs = max(nrptrs, 8);
1329 	pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1330 	if (!pages)
1331 		return -ENOMEM;
1332 
1333 	first_index = pos >> PAGE_CACHE_SHIFT;
1334 
1335 	while (iov_iter_count(i) > 0) {
1336 		size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1337 		size_t write_bytes = min(iov_iter_count(i),
1338 					 nrptrs * (size_t)PAGE_CACHE_SIZE -
1339 					 offset);
1340 		size_t num_pages = (write_bytes + offset +
1341 				    PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1342 		size_t dirty_pages;
1343 		size_t copied;
1344 
1345 		WARN_ON(num_pages > nrptrs);
1346 
1347 		/*
1348 		 * Fault pages before locking them in prepare_pages
1349 		 * to avoid recursive lock
1350 		 */
1351 		if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1352 			ret = -EFAULT;
1353 			break;
1354 		}
1355 
1356 		ret = btrfs_delalloc_reserve_space(inode,
1357 					num_pages << PAGE_CACHE_SHIFT);
1358 		if (ret)
1359 			break;
1360 
1361 		/*
1362 		 * This is going to setup the pages array with the number of
1363 		 * pages we want, so we don't really need to worry about the
1364 		 * contents of pages from loop to loop
1365 		 */
1366 		ret = prepare_pages(root, file, pages, num_pages,
1367 				    pos, first_index, write_bytes,
1368 				    force_page_uptodate);
1369 		if (ret) {
1370 			btrfs_delalloc_release_space(inode,
1371 					num_pages << PAGE_CACHE_SHIFT);
1372 			break;
1373 		}
1374 
1375 		copied = btrfs_copy_from_user(pos, num_pages,
1376 					   write_bytes, pages, i);
1377 
1378 		/*
1379 		 * if we have trouble faulting in the pages, fall
1380 		 * back to one page at a time
1381 		 */
1382 		if (copied < write_bytes)
1383 			nrptrs = 1;
1384 
1385 		if (copied == 0) {
1386 			force_page_uptodate = true;
1387 			dirty_pages = 0;
1388 		} else {
1389 			force_page_uptodate = false;
1390 			dirty_pages = (copied + offset +
1391 				       PAGE_CACHE_SIZE - 1) >>
1392 				       PAGE_CACHE_SHIFT;
1393 		}
1394 
1395 		/*
1396 		 * If we had a short copy we need to release the excess delaloc
1397 		 * bytes we reserved.  We need to increment outstanding_extents
1398 		 * because btrfs_delalloc_release_space will decrement it, but
1399 		 * we still have an outstanding extent for the chunk we actually
1400 		 * managed to copy.
1401 		 */
1402 		if (num_pages > dirty_pages) {
1403 			if (copied > 0) {
1404 				spin_lock(&BTRFS_I(inode)->lock);
1405 				BTRFS_I(inode)->outstanding_extents++;
1406 				spin_unlock(&BTRFS_I(inode)->lock);
1407 			}
1408 			btrfs_delalloc_release_space(inode,
1409 					(num_pages - dirty_pages) <<
1410 					PAGE_CACHE_SHIFT);
1411 		}
1412 
1413 		if (copied > 0) {
1414 			ret = btrfs_dirty_pages(root, inode, pages,
1415 						dirty_pages, pos, copied,
1416 						NULL);
1417 			if (ret) {
1418 				btrfs_delalloc_release_space(inode,
1419 					dirty_pages << PAGE_CACHE_SHIFT);
1420 				btrfs_drop_pages(pages, num_pages);
1421 				break;
1422 			}
1423 		}
1424 
1425 		btrfs_drop_pages(pages, num_pages);
1426 
1427 		cond_resched();
1428 
1429 		balance_dirty_pages_ratelimited(inode->i_mapping);
1430 		if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1431 			btrfs_btree_balance_dirty(root);
1432 
1433 		pos += copied;
1434 		num_written += copied;
1435 	}
1436 
1437 	kfree(pages);
1438 
1439 	return num_written ? num_written : ret;
1440 }
1441 
1442 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1443 				    const struct iovec *iov,
1444 				    unsigned long nr_segs, loff_t pos,
1445 				    loff_t *ppos, size_t count, size_t ocount)
1446 {
1447 	struct file *file = iocb->ki_filp;
1448 	struct iov_iter i;
1449 	ssize_t written;
1450 	ssize_t written_buffered;
1451 	loff_t endbyte;
1452 	int err;
1453 
1454 	written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
1455 					    count, ocount);
1456 
1457 	if (written < 0 || written == count)
1458 		return written;
1459 
1460 	pos += written;
1461 	count -= written;
1462 	iov_iter_init(&i, iov, nr_segs, count, written);
1463 	written_buffered = __btrfs_buffered_write(file, &i, pos);
1464 	if (written_buffered < 0) {
1465 		err = written_buffered;
1466 		goto out;
1467 	}
1468 	endbyte = pos + written_buffered - 1;
1469 	err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
1470 	if (err)
1471 		goto out;
1472 	written += written_buffered;
1473 	*ppos = pos + written_buffered;
1474 	invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1475 				 endbyte >> PAGE_CACHE_SHIFT);
1476 out:
1477 	return written ? written : err;
1478 }
1479 
1480 static void update_time_for_write(struct inode *inode)
1481 {
1482 	struct timespec now;
1483 
1484 	if (IS_NOCMTIME(inode))
1485 		return;
1486 
1487 	now = current_fs_time(inode->i_sb);
1488 	if (!timespec_equal(&inode->i_mtime, &now))
1489 		inode->i_mtime = now;
1490 
1491 	if (!timespec_equal(&inode->i_ctime, &now))
1492 		inode->i_ctime = now;
1493 
1494 	if (IS_I_VERSION(inode))
1495 		inode_inc_iversion(inode);
1496 }
1497 
1498 static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1499 				    const struct iovec *iov,
1500 				    unsigned long nr_segs, loff_t pos)
1501 {
1502 	struct file *file = iocb->ki_filp;
1503 	struct inode *inode = fdentry(file)->d_inode;
1504 	struct btrfs_root *root = BTRFS_I(inode)->root;
1505 	loff_t *ppos = &iocb->ki_pos;
1506 	u64 start_pos;
1507 	ssize_t num_written = 0;
1508 	ssize_t err = 0;
1509 	size_t count, ocount;
1510 	bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
1511 
1512 	sb_start_write(inode->i_sb);
1513 
1514 	mutex_lock(&inode->i_mutex);
1515 
1516 	err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1517 	if (err) {
1518 		mutex_unlock(&inode->i_mutex);
1519 		goto out;
1520 	}
1521 	count = ocount;
1522 
1523 	current->backing_dev_info = inode->i_mapping->backing_dev_info;
1524 	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1525 	if (err) {
1526 		mutex_unlock(&inode->i_mutex);
1527 		goto out;
1528 	}
1529 
1530 	if (count == 0) {
1531 		mutex_unlock(&inode->i_mutex);
1532 		goto out;
1533 	}
1534 
1535 	err = file_remove_suid(file);
1536 	if (err) {
1537 		mutex_unlock(&inode->i_mutex);
1538 		goto out;
1539 	}
1540 
1541 	/*
1542 	 * If BTRFS flips readonly due to some impossible error
1543 	 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1544 	 * although we have opened a file as writable, we have
1545 	 * to stop this write operation to ensure FS consistency.
1546 	 */
1547 	if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
1548 		mutex_unlock(&inode->i_mutex);
1549 		err = -EROFS;
1550 		goto out;
1551 	}
1552 
1553 	/*
1554 	 * We reserve space for updating the inode when we reserve space for the
1555 	 * extent we are going to write, so we will enospc out there.  We don't
1556 	 * need to start yet another transaction to update the inode as we will
1557 	 * update the inode when we finish writing whatever data we write.
1558 	 */
1559 	update_time_for_write(inode);
1560 
1561 	start_pos = round_down(pos, root->sectorsize);
1562 	if (start_pos > i_size_read(inode)) {
1563 		err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
1564 		if (err) {
1565 			mutex_unlock(&inode->i_mutex);
1566 			goto out;
1567 		}
1568 	}
1569 
1570 	if (sync)
1571 		atomic_inc(&BTRFS_I(inode)->sync_writers);
1572 
1573 	if (unlikely(file->f_flags & O_DIRECT)) {
1574 		num_written = __btrfs_direct_write(iocb, iov, nr_segs,
1575 						   pos, ppos, count, ocount);
1576 	} else {
1577 		struct iov_iter i;
1578 
1579 		iov_iter_init(&i, iov, nr_segs, count, num_written);
1580 
1581 		num_written = __btrfs_buffered_write(file, &i, pos);
1582 		if (num_written > 0)
1583 			*ppos = pos + num_written;
1584 	}
1585 
1586 	mutex_unlock(&inode->i_mutex);
1587 
1588 	/*
1589 	 * we want to make sure fsync finds this change
1590 	 * but we haven't joined a transaction running right now.
1591 	 *
1592 	 * Later on, someone is sure to update the inode and get the
1593 	 * real transid recorded.
1594 	 *
1595 	 * We set last_trans now to the fs_info generation + 1,
1596 	 * this will either be one more than the running transaction
1597 	 * or the generation used for the next transaction if there isn't
1598 	 * one running right now.
1599 	 *
1600 	 * We also have to set last_sub_trans to the current log transid,
1601 	 * otherwise subsequent syncs to a file that's been synced in this
1602 	 * transaction will appear to have already occured.
1603 	 */
1604 	BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1605 	BTRFS_I(inode)->last_sub_trans = root->log_transid;
1606 	if (num_written > 0 || num_written == -EIOCBQUEUED) {
1607 		err = generic_write_sync(file, pos, num_written);
1608 		if (err < 0 && num_written > 0)
1609 			num_written = err;
1610 	}
1611 
1612 	if (sync)
1613 		atomic_dec(&BTRFS_I(inode)->sync_writers);
1614 out:
1615 	sb_end_write(inode->i_sb);
1616 	current->backing_dev_info = NULL;
1617 	return num_written ? num_written : err;
1618 }
1619 
1620 int btrfs_release_file(struct inode *inode, struct file *filp)
1621 {
1622 	/*
1623 	 * ordered_data_close is set by settattr when we are about to truncate
1624 	 * a file from a non-zero size to a zero size.  This tries to
1625 	 * flush down new bytes that may have been written if the
1626 	 * application were using truncate to replace a file in place.
1627 	 */
1628 	if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
1629 			       &BTRFS_I(inode)->runtime_flags)) {
1630 		btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1631 		if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1632 			filemap_flush(inode->i_mapping);
1633 	}
1634 	if (filp->private_data)
1635 		btrfs_ioctl_trans_end(filp);
1636 	return 0;
1637 }
1638 
1639 /*
1640  * fsync call for both files and directories.  This logs the inode into
1641  * the tree log instead of forcing full commits whenever possible.
1642  *
1643  * It needs to call filemap_fdatawait so that all ordered extent updates are
1644  * in the metadata btree are up to date for copying to the log.
1645  *
1646  * It drops the inode mutex before doing the tree log commit.  This is an
1647  * important optimization for directories because holding the mutex prevents
1648  * new operations on the dir while we write to disk.
1649  */
1650 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1651 {
1652 	struct dentry *dentry = file->f_path.dentry;
1653 	struct inode *inode = dentry->d_inode;
1654 	struct btrfs_root *root = BTRFS_I(inode)->root;
1655 	int ret = 0;
1656 	struct btrfs_trans_handle *trans;
1657 
1658 	trace_btrfs_sync_file(file, datasync);
1659 
1660 	/*
1661 	 * We write the dirty pages in the range and wait until they complete
1662 	 * out of the ->i_mutex. If so, we can flush the dirty pages by
1663 	 * multi-task, and make the performance up.
1664 	 */
1665 	atomic_inc(&BTRFS_I(inode)->sync_writers);
1666 	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1667 	atomic_dec(&BTRFS_I(inode)->sync_writers);
1668 	if (ret)
1669 		return ret;
1670 
1671 	mutex_lock(&inode->i_mutex);
1672 
1673 	/*
1674 	 * We flush the dirty pages again to avoid some dirty pages in the
1675 	 * range being left.
1676 	 */
1677 	atomic_inc(&root->log_batch);
1678 	btrfs_wait_ordered_range(inode, start, end - start + 1);
1679 	atomic_inc(&root->log_batch);
1680 
1681 	/*
1682 	 * check the transaction that last modified this inode
1683 	 * and see if its already been committed
1684 	 */
1685 	if (!BTRFS_I(inode)->last_trans) {
1686 		mutex_unlock(&inode->i_mutex);
1687 		goto out;
1688 	}
1689 
1690 	/*
1691 	 * if the last transaction that changed this file was before
1692 	 * the current transaction, we can bail out now without any
1693 	 * syncing
1694 	 */
1695 	smp_mb();
1696 	if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
1697 	    BTRFS_I(inode)->last_trans <=
1698 	    root->fs_info->last_trans_committed) {
1699 		BTRFS_I(inode)->last_trans = 0;
1700 
1701 		/*
1702 		 * We'v had everything committed since the last time we were
1703 		 * modified so clear this flag in case it was set for whatever
1704 		 * reason, it's no longer relevant.
1705 		 */
1706 		clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1707 			  &BTRFS_I(inode)->runtime_flags);
1708 		mutex_unlock(&inode->i_mutex);
1709 		goto out;
1710 	}
1711 
1712 	/*
1713 	 * ok we haven't committed the transaction yet, lets do a commit
1714 	 */
1715 	if (file->private_data)
1716 		btrfs_ioctl_trans_end(file);
1717 
1718 	trans = btrfs_start_transaction(root, 0);
1719 	if (IS_ERR(trans)) {
1720 		ret = PTR_ERR(trans);
1721 		mutex_unlock(&inode->i_mutex);
1722 		goto out;
1723 	}
1724 
1725 	ret = btrfs_log_dentry_safe(trans, root, dentry);
1726 	if (ret < 0) {
1727 		mutex_unlock(&inode->i_mutex);
1728 		goto out;
1729 	}
1730 
1731 	/* we've logged all the items and now have a consistent
1732 	 * version of the file in the log.  It is possible that
1733 	 * someone will come in and modify the file, but that's
1734 	 * fine because the log is consistent on disk, and we
1735 	 * have references to all of the file's extents
1736 	 *
1737 	 * It is possible that someone will come in and log the
1738 	 * file again, but that will end up using the synchronization
1739 	 * inside btrfs_sync_log to keep things safe.
1740 	 */
1741 	mutex_unlock(&inode->i_mutex);
1742 
1743 	if (ret != BTRFS_NO_LOG_SYNC) {
1744 		if (ret > 0) {
1745 			ret = btrfs_commit_transaction(trans, root);
1746 		} else {
1747 			ret = btrfs_sync_log(trans, root);
1748 			if (ret == 0)
1749 				ret = btrfs_end_transaction(trans, root);
1750 			else
1751 				ret = btrfs_commit_transaction(trans, root);
1752 		}
1753 	} else {
1754 		ret = btrfs_end_transaction(trans, root);
1755 	}
1756 out:
1757 	return ret > 0 ? -EIO : ret;
1758 }
1759 
1760 static const struct vm_operations_struct btrfs_file_vm_ops = {
1761 	.fault		= filemap_fault,
1762 	.page_mkwrite	= btrfs_page_mkwrite,
1763 	.remap_pages	= generic_file_remap_pages,
1764 };
1765 
1766 static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
1767 {
1768 	struct address_space *mapping = filp->f_mapping;
1769 
1770 	if (!mapping->a_ops->readpage)
1771 		return -ENOEXEC;
1772 
1773 	file_accessed(filp);
1774 	vma->vm_ops = &btrfs_file_vm_ops;
1775 
1776 	return 0;
1777 }
1778 
1779 static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
1780 			  int slot, u64 start, u64 end)
1781 {
1782 	struct btrfs_file_extent_item *fi;
1783 	struct btrfs_key key;
1784 
1785 	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1786 		return 0;
1787 
1788 	btrfs_item_key_to_cpu(leaf, &key, slot);
1789 	if (key.objectid != btrfs_ino(inode) ||
1790 	    key.type != BTRFS_EXTENT_DATA_KEY)
1791 		return 0;
1792 
1793 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1794 
1795 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
1796 		return 0;
1797 
1798 	if (btrfs_file_extent_disk_bytenr(leaf, fi))
1799 		return 0;
1800 
1801 	if (key.offset == end)
1802 		return 1;
1803 	if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
1804 		return 1;
1805 	return 0;
1806 }
1807 
1808 static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
1809 		      struct btrfs_path *path, u64 offset, u64 end)
1810 {
1811 	struct btrfs_root *root = BTRFS_I(inode)->root;
1812 	struct extent_buffer *leaf;
1813 	struct btrfs_file_extent_item *fi;
1814 	struct extent_map *hole_em;
1815 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1816 	struct btrfs_key key;
1817 	int ret;
1818 
1819 	key.objectid = btrfs_ino(inode);
1820 	key.type = BTRFS_EXTENT_DATA_KEY;
1821 	key.offset = offset;
1822 
1823 
1824 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1825 	if (ret < 0)
1826 		return ret;
1827 	BUG_ON(!ret);
1828 
1829 	leaf = path->nodes[0];
1830 	if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
1831 		u64 num_bytes;
1832 
1833 		path->slots[0]--;
1834 		fi = btrfs_item_ptr(leaf, path->slots[0],
1835 				    struct btrfs_file_extent_item);
1836 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
1837 			end - offset;
1838 		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1839 		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1840 		btrfs_set_file_extent_offset(leaf, fi, 0);
1841 		btrfs_mark_buffer_dirty(leaf);
1842 		goto out;
1843 	}
1844 
1845 	if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) {
1846 		u64 num_bytes;
1847 
1848 		path->slots[0]++;
1849 		key.offset = offset;
1850 		btrfs_set_item_key_safe(trans, root, path, &key);
1851 		fi = btrfs_item_ptr(leaf, path->slots[0],
1852 				    struct btrfs_file_extent_item);
1853 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
1854 			offset;
1855 		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1856 		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1857 		btrfs_set_file_extent_offset(leaf, fi, 0);
1858 		btrfs_mark_buffer_dirty(leaf);
1859 		goto out;
1860 	}
1861 	btrfs_release_path(path);
1862 
1863 	ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
1864 				       0, 0, end - offset, 0, end - offset,
1865 				       0, 0, 0);
1866 	if (ret)
1867 		return ret;
1868 
1869 out:
1870 	btrfs_release_path(path);
1871 
1872 	hole_em = alloc_extent_map();
1873 	if (!hole_em) {
1874 		btrfs_drop_extent_cache(inode, offset, end - 1, 0);
1875 		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1876 			&BTRFS_I(inode)->runtime_flags);
1877 	} else {
1878 		hole_em->start = offset;
1879 		hole_em->len = end - offset;
1880 		hole_em->orig_start = offset;
1881 
1882 		hole_em->block_start = EXTENT_MAP_HOLE;
1883 		hole_em->block_len = 0;
1884 		hole_em->orig_block_len = 0;
1885 		hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
1886 		hole_em->compress_type = BTRFS_COMPRESS_NONE;
1887 		hole_em->generation = trans->transid;
1888 
1889 		do {
1890 			btrfs_drop_extent_cache(inode, offset, end - 1, 0);
1891 			write_lock(&em_tree->lock);
1892 			ret = add_extent_mapping(em_tree, hole_em);
1893 			if (!ret)
1894 				list_move(&hole_em->list,
1895 					  &em_tree->modified_extents);
1896 			write_unlock(&em_tree->lock);
1897 		} while (ret == -EEXIST);
1898 		free_extent_map(hole_em);
1899 		if (ret)
1900 			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1901 				&BTRFS_I(inode)->runtime_flags);
1902 	}
1903 
1904 	return 0;
1905 }
1906 
1907 static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
1908 {
1909 	struct btrfs_root *root = BTRFS_I(inode)->root;
1910 	struct extent_state *cached_state = NULL;
1911 	struct btrfs_path *path;
1912 	struct btrfs_block_rsv *rsv;
1913 	struct btrfs_trans_handle *trans;
1914 	u64 lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize);
1915 	u64 lockend = round_down(offset + len,
1916 				 BTRFS_I(inode)->root->sectorsize) - 1;
1917 	u64 cur_offset = lockstart;
1918 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
1919 	u64 drop_end;
1920 	int ret = 0;
1921 	int err = 0;
1922 	bool same_page = ((offset >> PAGE_CACHE_SHIFT) ==
1923 			  ((offset + len - 1) >> PAGE_CACHE_SHIFT));
1924 
1925 	btrfs_wait_ordered_range(inode, offset, len);
1926 
1927 	mutex_lock(&inode->i_mutex);
1928 	/*
1929 	 * We needn't truncate any page which is beyond the end of the file
1930 	 * because we are sure there is no data there.
1931 	 */
1932 	/*
1933 	 * Only do this if we are in the same page and we aren't doing the
1934 	 * entire page.
1935 	 */
1936 	if (same_page && len < PAGE_CACHE_SIZE) {
1937 		if (offset < round_up(inode->i_size, PAGE_CACHE_SIZE))
1938 			ret = btrfs_truncate_page(inode, offset, len, 0);
1939 		mutex_unlock(&inode->i_mutex);
1940 		return ret;
1941 	}
1942 
1943 	/* zero back part of the first page */
1944 	if (offset < round_up(inode->i_size, PAGE_CACHE_SIZE)) {
1945 		ret = btrfs_truncate_page(inode, offset, 0, 0);
1946 		if (ret) {
1947 			mutex_unlock(&inode->i_mutex);
1948 			return ret;
1949 		}
1950 	}
1951 
1952 	/* zero the front end of the last page */
1953 	if (offset + len < round_up(inode->i_size, PAGE_CACHE_SIZE)) {
1954 		ret = btrfs_truncate_page(inode, offset + len, 0, 1);
1955 		if (ret) {
1956 			mutex_unlock(&inode->i_mutex);
1957 			return ret;
1958 		}
1959 	}
1960 
1961 	if (lockend < lockstart) {
1962 		mutex_unlock(&inode->i_mutex);
1963 		return 0;
1964 	}
1965 
1966 	while (1) {
1967 		struct btrfs_ordered_extent *ordered;
1968 
1969 		truncate_pagecache_range(inode, lockstart, lockend);
1970 
1971 		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
1972 				 0, &cached_state);
1973 		ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
1974 
1975 		/*
1976 		 * We need to make sure we have no ordered extents in this range
1977 		 * and nobody raced in and read a page in this range, if we did
1978 		 * we need to try again.
1979 		 */
1980 		if ((!ordered ||
1981 		    (ordered->file_offset + ordered->len < lockstart ||
1982 		     ordered->file_offset > lockend)) &&
1983 		     !test_range_bit(&BTRFS_I(inode)->io_tree, lockstart,
1984 				     lockend, EXTENT_UPTODATE, 0,
1985 				     cached_state)) {
1986 			if (ordered)
1987 				btrfs_put_ordered_extent(ordered);
1988 			break;
1989 		}
1990 		if (ordered)
1991 			btrfs_put_ordered_extent(ordered);
1992 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
1993 				     lockend, &cached_state, GFP_NOFS);
1994 		btrfs_wait_ordered_range(inode, lockstart,
1995 					 lockend - lockstart + 1);
1996 	}
1997 
1998 	path = btrfs_alloc_path();
1999 	if (!path) {
2000 		ret = -ENOMEM;
2001 		goto out;
2002 	}
2003 
2004 	rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
2005 	if (!rsv) {
2006 		ret = -ENOMEM;
2007 		goto out_free;
2008 	}
2009 	rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
2010 	rsv->failfast = 1;
2011 
2012 	/*
2013 	 * 1 - update the inode
2014 	 * 1 - removing the extents in the range
2015 	 * 1 - adding the hole extent
2016 	 */
2017 	trans = btrfs_start_transaction(root, 3);
2018 	if (IS_ERR(trans)) {
2019 		err = PTR_ERR(trans);
2020 		goto out_free;
2021 	}
2022 
2023 	ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
2024 				      min_size);
2025 	BUG_ON(ret);
2026 	trans->block_rsv = rsv;
2027 
2028 	while (cur_offset < lockend) {
2029 		ret = __btrfs_drop_extents(trans, root, inode, path,
2030 					   cur_offset, lockend + 1,
2031 					   &drop_end, 1);
2032 		if (ret != -ENOSPC)
2033 			break;
2034 
2035 		trans->block_rsv = &root->fs_info->trans_block_rsv;
2036 
2037 		ret = fill_holes(trans, inode, path, cur_offset, drop_end);
2038 		if (ret) {
2039 			err = ret;
2040 			break;
2041 		}
2042 
2043 		cur_offset = drop_end;
2044 
2045 		ret = btrfs_update_inode(trans, root, inode);
2046 		if (ret) {
2047 			err = ret;
2048 			break;
2049 		}
2050 
2051 		btrfs_end_transaction(trans, root);
2052 		btrfs_btree_balance_dirty(root);
2053 
2054 		trans = btrfs_start_transaction(root, 3);
2055 		if (IS_ERR(trans)) {
2056 			ret = PTR_ERR(trans);
2057 			trans = NULL;
2058 			break;
2059 		}
2060 
2061 		ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
2062 					      rsv, min_size);
2063 		BUG_ON(ret);	/* shouldn't happen */
2064 		trans->block_rsv = rsv;
2065 	}
2066 
2067 	if (ret) {
2068 		err = ret;
2069 		goto out_trans;
2070 	}
2071 
2072 	trans->block_rsv = &root->fs_info->trans_block_rsv;
2073 	ret = fill_holes(trans, inode, path, cur_offset, drop_end);
2074 	if (ret) {
2075 		err = ret;
2076 		goto out_trans;
2077 	}
2078 
2079 out_trans:
2080 	if (!trans)
2081 		goto out_free;
2082 
2083 	inode_inc_iversion(inode);
2084 	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2085 
2086 	trans->block_rsv = &root->fs_info->trans_block_rsv;
2087 	ret = btrfs_update_inode(trans, root, inode);
2088 	btrfs_end_transaction(trans, root);
2089 	btrfs_btree_balance_dirty(root);
2090 out_free:
2091 	btrfs_free_path(path);
2092 	btrfs_free_block_rsv(root, rsv);
2093 out:
2094 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2095 			     &cached_state, GFP_NOFS);
2096 	mutex_unlock(&inode->i_mutex);
2097 	if (ret && !err)
2098 		err = ret;
2099 	return err;
2100 }
2101 
2102 static long btrfs_fallocate(struct file *file, int mode,
2103 			    loff_t offset, loff_t len)
2104 {
2105 	struct inode *inode = file->f_path.dentry->d_inode;
2106 	struct extent_state *cached_state = NULL;
2107 	u64 cur_offset;
2108 	u64 last_byte;
2109 	u64 alloc_start;
2110 	u64 alloc_end;
2111 	u64 alloc_hint = 0;
2112 	u64 locked_end;
2113 	struct extent_map *em;
2114 	int blocksize = BTRFS_I(inode)->root->sectorsize;
2115 	int ret;
2116 
2117 	alloc_start = round_down(offset, blocksize);
2118 	alloc_end = round_up(offset + len, blocksize);
2119 
2120 	/* Make sure we aren't being give some crap mode */
2121 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2122 		return -EOPNOTSUPP;
2123 
2124 	if (mode & FALLOC_FL_PUNCH_HOLE)
2125 		return btrfs_punch_hole(inode, offset, len);
2126 
2127 	/*
2128 	 * Make sure we have enough space before we do the
2129 	 * allocation.
2130 	 */
2131 	ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
2132 	if (ret)
2133 		return ret;
2134 
2135 	/*
2136 	 * wait for ordered IO before we have any locks.  We'll loop again
2137 	 * below with the locks held.
2138 	 */
2139 	btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
2140 
2141 	mutex_lock(&inode->i_mutex);
2142 	ret = inode_newsize_ok(inode, alloc_end);
2143 	if (ret)
2144 		goto out;
2145 
2146 	if (alloc_start > inode->i_size) {
2147 		ret = btrfs_cont_expand(inode, i_size_read(inode),
2148 					alloc_start);
2149 		if (ret)
2150 			goto out;
2151 	}
2152 
2153 	locked_end = alloc_end - 1;
2154 	while (1) {
2155 		struct btrfs_ordered_extent *ordered;
2156 
2157 		/* the extent lock is ordered inside the running
2158 		 * transaction
2159 		 */
2160 		lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
2161 				 locked_end, 0, &cached_state);
2162 		ordered = btrfs_lookup_first_ordered_extent(inode,
2163 							    alloc_end - 1);
2164 		if (ordered &&
2165 		    ordered->file_offset + ordered->len > alloc_start &&
2166 		    ordered->file_offset < alloc_end) {
2167 			btrfs_put_ordered_extent(ordered);
2168 			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
2169 					     alloc_start, locked_end,
2170 					     &cached_state, GFP_NOFS);
2171 			/*
2172 			 * we can't wait on the range with the transaction
2173 			 * running or with the extent lock held
2174 			 */
2175 			btrfs_wait_ordered_range(inode, alloc_start,
2176 						 alloc_end - alloc_start);
2177 		} else {
2178 			if (ordered)
2179 				btrfs_put_ordered_extent(ordered);
2180 			break;
2181 		}
2182 	}
2183 
2184 	cur_offset = alloc_start;
2185 	while (1) {
2186 		u64 actual_end;
2187 
2188 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2189 				      alloc_end - cur_offset, 0);
2190 		if (IS_ERR_OR_NULL(em)) {
2191 			if (!em)
2192 				ret = -ENOMEM;
2193 			else
2194 				ret = PTR_ERR(em);
2195 			break;
2196 		}
2197 		last_byte = min(extent_map_end(em), alloc_end);
2198 		actual_end = min_t(u64, extent_map_end(em), offset + len);
2199 		last_byte = ALIGN(last_byte, blocksize);
2200 
2201 		if (em->block_start == EXTENT_MAP_HOLE ||
2202 		    (cur_offset >= inode->i_size &&
2203 		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
2204 			ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
2205 							last_byte - cur_offset,
2206 							1 << inode->i_blkbits,
2207 							offset + len,
2208 							&alloc_hint);
2209 
2210 			if (ret < 0) {
2211 				free_extent_map(em);
2212 				break;
2213 			}
2214 		} else if (actual_end > inode->i_size &&
2215 			   !(mode & FALLOC_FL_KEEP_SIZE)) {
2216 			/*
2217 			 * We didn't need to allocate any more space, but we
2218 			 * still extended the size of the file so we need to
2219 			 * update i_size.
2220 			 */
2221 			inode->i_ctime = CURRENT_TIME;
2222 			i_size_write(inode, actual_end);
2223 			btrfs_ordered_update_i_size(inode, actual_end, NULL);
2224 		}
2225 		free_extent_map(em);
2226 
2227 		cur_offset = last_byte;
2228 		if (cur_offset >= alloc_end) {
2229 			ret = 0;
2230 			break;
2231 		}
2232 	}
2233 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
2234 			     &cached_state, GFP_NOFS);
2235 out:
2236 	mutex_unlock(&inode->i_mutex);
2237 	/* Let go of our reservation. */
2238 	btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
2239 	return ret;
2240 }
2241 
2242 static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
2243 {
2244 	struct btrfs_root *root = BTRFS_I(inode)->root;
2245 	struct extent_map *em;
2246 	struct extent_state *cached_state = NULL;
2247 	u64 lockstart = *offset;
2248 	u64 lockend = i_size_read(inode);
2249 	u64 start = *offset;
2250 	u64 orig_start = *offset;
2251 	u64 len = i_size_read(inode);
2252 	u64 last_end = 0;
2253 	int ret = 0;
2254 
2255 	lockend = max_t(u64, root->sectorsize, lockend);
2256 	if (lockend <= lockstart)
2257 		lockend = lockstart + root->sectorsize;
2258 
2259 	lockend--;
2260 	len = lockend - lockstart + 1;
2261 
2262 	len = max_t(u64, len, root->sectorsize);
2263 	if (inode->i_size == 0)
2264 		return -ENXIO;
2265 
2266 	lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
2267 			 &cached_state);
2268 
2269 	/*
2270 	 * Delalloc is such a pain.  If we have a hole and we have pending
2271 	 * delalloc for a portion of the hole we will get back a hole that
2272 	 * exists for the entire range since it hasn't been actually written
2273 	 * yet.  So to take care of this case we need to look for an extent just
2274 	 * before the position we want in case there is outstanding delalloc
2275 	 * going on here.
2276 	 */
2277 	if (whence == SEEK_HOLE && start != 0) {
2278 		if (start <= root->sectorsize)
2279 			em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
2280 						     root->sectorsize, 0);
2281 		else
2282 			em = btrfs_get_extent_fiemap(inode, NULL, 0,
2283 						     start - root->sectorsize,
2284 						     root->sectorsize, 0);
2285 		if (IS_ERR(em)) {
2286 			ret = PTR_ERR(em);
2287 			goto out;
2288 		}
2289 		last_end = em->start + em->len;
2290 		if (em->block_start == EXTENT_MAP_DELALLOC)
2291 			last_end = min_t(u64, last_end, inode->i_size);
2292 		free_extent_map(em);
2293 	}
2294 
2295 	while (1) {
2296 		em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
2297 		if (IS_ERR(em)) {
2298 			ret = PTR_ERR(em);
2299 			break;
2300 		}
2301 
2302 		if (em->block_start == EXTENT_MAP_HOLE) {
2303 			if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2304 				if (last_end <= orig_start) {
2305 					free_extent_map(em);
2306 					ret = -ENXIO;
2307 					break;
2308 				}
2309 			}
2310 
2311 			if (whence == SEEK_HOLE) {
2312 				*offset = start;
2313 				free_extent_map(em);
2314 				break;
2315 			}
2316 		} else {
2317 			if (whence == SEEK_DATA) {
2318 				if (em->block_start == EXTENT_MAP_DELALLOC) {
2319 					if (start >= inode->i_size) {
2320 						free_extent_map(em);
2321 						ret = -ENXIO;
2322 						break;
2323 					}
2324 				}
2325 
2326 				if (!test_bit(EXTENT_FLAG_PREALLOC,
2327 					      &em->flags)) {
2328 					*offset = start;
2329 					free_extent_map(em);
2330 					break;
2331 				}
2332 			}
2333 		}
2334 
2335 		start = em->start + em->len;
2336 		last_end = em->start + em->len;
2337 
2338 		if (em->block_start == EXTENT_MAP_DELALLOC)
2339 			last_end = min_t(u64, last_end, inode->i_size);
2340 
2341 		if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2342 			free_extent_map(em);
2343 			ret = -ENXIO;
2344 			break;
2345 		}
2346 		free_extent_map(em);
2347 		cond_resched();
2348 	}
2349 	if (!ret)
2350 		*offset = min(*offset, inode->i_size);
2351 out:
2352 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2353 			     &cached_state, GFP_NOFS);
2354 	return ret;
2355 }
2356 
2357 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
2358 {
2359 	struct inode *inode = file->f_mapping->host;
2360 	int ret;
2361 
2362 	mutex_lock(&inode->i_mutex);
2363 	switch (whence) {
2364 	case SEEK_END:
2365 	case SEEK_CUR:
2366 		offset = generic_file_llseek(file, offset, whence);
2367 		goto out;
2368 	case SEEK_DATA:
2369 	case SEEK_HOLE:
2370 		if (offset >= i_size_read(inode)) {
2371 			mutex_unlock(&inode->i_mutex);
2372 			return -ENXIO;
2373 		}
2374 
2375 		ret = find_desired_extent(inode, &offset, whence);
2376 		if (ret) {
2377 			mutex_unlock(&inode->i_mutex);
2378 			return ret;
2379 		}
2380 	}
2381 
2382 	if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
2383 		offset = -EINVAL;
2384 		goto out;
2385 	}
2386 	if (offset > inode->i_sb->s_maxbytes) {
2387 		offset = -EINVAL;
2388 		goto out;
2389 	}
2390 
2391 	/* Special lock needed here? */
2392 	if (offset != file->f_pos) {
2393 		file->f_pos = offset;
2394 		file->f_version = 0;
2395 	}
2396 out:
2397 	mutex_unlock(&inode->i_mutex);
2398 	return offset;
2399 }
2400 
2401 const struct file_operations btrfs_file_operations = {
2402 	.llseek		= btrfs_file_llseek,
2403 	.read		= do_sync_read,
2404 	.write		= do_sync_write,
2405 	.aio_read       = generic_file_aio_read,
2406 	.splice_read	= generic_file_splice_read,
2407 	.aio_write	= btrfs_file_aio_write,
2408 	.mmap		= btrfs_file_mmap,
2409 	.open		= generic_file_open,
2410 	.release	= btrfs_release_file,
2411 	.fsync		= btrfs_sync_file,
2412 	.fallocate	= btrfs_fallocate,
2413 	.unlocked_ioctl	= btrfs_ioctl,
2414 #ifdef CONFIG_COMPAT
2415 	.compat_ioctl	= btrfs_ioctl,
2416 #endif
2417 };
2418 
2419 void btrfs_auto_defrag_exit(void)
2420 {
2421 	if (btrfs_inode_defrag_cachep)
2422 		kmem_cache_destroy(btrfs_inode_defrag_cachep);
2423 }
2424 
2425 int btrfs_auto_defrag_init(void)
2426 {
2427 	btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
2428 					sizeof(struct inode_defrag), 0,
2429 					SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2430 					NULL);
2431 	if (!btrfs_inode_defrag_cachep)
2432 		return -ENOMEM;
2433 
2434 	return 0;
2435 }
2436