xref: /openbmc/linux/fs/btrfs/ordered-data.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/writeback.h>
22 #include <linux/pagevec.h>
23 #include "ctree.h"
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "extent_io.h"
27 
28 static u64 entry_end(struct btrfs_ordered_extent *entry)
29 {
30 	if (entry->file_offset + entry->len < entry->file_offset)
31 		return (u64)-1;
32 	return entry->file_offset + entry->len;
33 }
34 
35 /* returns NULL if the insertion worked, or it returns the node it did find
36  * in the tree
37  */
38 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
39 				   struct rb_node *node)
40 {
41 	struct rb_node **p = &root->rb_node;
42 	struct rb_node *parent = NULL;
43 	struct btrfs_ordered_extent *entry;
44 
45 	while (*p) {
46 		parent = *p;
47 		entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
48 
49 		if (file_offset < entry->file_offset)
50 			p = &(*p)->rb_left;
51 		else if (file_offset >= entry_end(entry))
52 			p = &(*p)->rb_right;
53 		else
54 			return parent;
55 	}
56 
57 	rb_link_node(node, parent, p);
58 	rb_insert_color(node, root);
59 	return NULL;
60 }
61 
62 /*
63  * look for a given offset in the tree, and if it can't be found return the
64  * first lesser offset
65  */
66 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
67 				     struct rb_node **prev_ret)
68 {
69 	struct rb_node *n = root->rb_node;
70 	struct rb_node *prev = NULL;
71 	struct rb_node *test;
72 	struct btrfs_ordered_extent *entry;
73 	struct btrfs_ordered_extent *prev_entry = NULL;
74 
75 	while (n) {
76 		entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
77 		prev = n;
78 		prev_entry = entry;
79 
80 		if (file_offset < entry->file_offset)
81 			n = n->rb_left;
82 		else if (file_offset >= entry_end(entry))
83 			n = n->rb_right;
84 		else
85 			return n;
86 	}
87 	if (!prev_ret)
88 		return NULL;
89 
90 	while (prev && file_offset >= entry_end(prev_entry)) {
91 		test = rb_next(prev);
92 		if (!test)
93 			break;
94 		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
95 				      rb_node);
96 		if (file_offset < entry_end(prev_entry))
97 			break;
98 
99 		prev = test;
100 	}
101 	if (prev)
102 		prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
103 				      rb_node);
104 	while (prev && file_offset < entry_end(prev_entry)) {
105 		test = rb_prev(prev);
106 		if (!test)
107 			break;
108 		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
109 				      rb_node);
110 		prev = test;
111 	}
112 	*prev_ret = prev;
113 	return NULL;
114 }
115 
116 /*
117  * helper to check if a given offset is inside a given entry
118  */
119 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
120 {
121 	if (file_offset < entry->file_offset ||
122 	    entry->file_offset + entry->len <= file_offset)
123 		return 0;
124 	return 1;
125 }
126 
127 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
128 			  u64 len)
129 {
130 	if (file_offset + len <= entry->file_offset ||
131 	    entry->file_offset + entry->len <= file_offset)
132 		return 0;
133 	return 1;
134 }
135 
136 /*
137  * look find the first ordered struct that has this offset, otherwise
138  * the first one less than this offset
139  */
140 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
141 					  u64 file_offset)
142 {
143 	struct rb_root *root = &tree->tree;
144 	struct rb_node *prev;
145 	struct rb_node *ret;
146 	struct btrfs_ordered_extent *entry;
147 
148 	if (tree->last) {
149 		entry = rb_entry(tree->last, struct btrfs_ordered_extent,
150 				 rb_node);
151 		if (offset_in_entry(entry, file_offset))
152 			return tree->last;
153 	}
154 	ret = __tree_search(root, file_offset, &prev);
155 	if (!ret)
156 		ret = prev;
157 	if (ret)
158 		tree->last = ret;
159 	return ret;
160 }
161 
162 /* allocate and add a new ordered_extent into the per-inode tree.
163  * file_offset is the logical offset in the file
164  *
165  * start is the disk block number of an extent already reserved in the
166  * extent allocation tree
167  *
168  * len is the length of the extent
169  *
170  * The tree is given a single reference on the ordered extent that was
171  * inserted.
172  */
173 static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
174 				      u64 start, u64 len, u64 disk_len,
175 				      int type, int dio)
176 {
177 	struct btrfs_ordered_inode_tree *tree;
178 	struct rb_node *node;
179 	struct btrfs_ordered_extent *entry;
180 
181 	tree = &BTRFS_I(inode)->ordered_tree;
182 	entry = kzalloc(sizeof(*entry), GFP_NOFS);
183 	if (!entry)
184 		return -ENOMEM;
185 
186 	entry->file_offset = file_offset;
187 	entry->start = start;
188 	entry->len = len;
189 	entry->disk_len = disk_len;
190 	entry->bytes_left = len;
191 	entry->inode = inode;
192 	if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
193 		set_bit(type, &entry->flags);
194 
195 	if (dio)
196 		set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
197 
198 	/* one ref for the tree */
199 	atomic_set(&entry->refs, 1);
200 	init_waitqueue_head(&entry->wait);
201 	INIT_LIST_HEAD(&entry->list);
202 	INIT_LIST_HEAD(&entry->root_extent_list);
203 
204 	spin_lock(&tree->lock);
205 	node = tree_insert(&tree->tree, file_offset,
206 			   &entry->rb_node);
207 	BUG_ON(node);
208 	spin_unlock(&tree->lock);
209 
210 	spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
211 	list_add_tail(&entry->root_extent_list,
212 		      &BTRFS_I(inode)->root->fs_info->ordered_extents);
213 	spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
214 
215 	BUG_ON(node);
216 	return 0;
217 }
218 
219 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
220 			     u64 start, u64 len, u64 disk_len, int type)
221 {
222 	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
223 					  disk_len, type, 0);
224 }
225 
226 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
227 				 u64 start, u64 len, u64 disk_len, int type)
228 {
229 	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
230 					  disk_len, type, 1);
231 }
232 
233 /*
234  * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
235  * when an ordered extent is finished.  If the list covers more than one
236  * ordered extent, it is split across multiples.
237  */
238 int btrfs_add_ordered_sum(struct inode *inode,
239 			  struct btrfs_ordered_extent *entry,
240 			  struct btrfs_ordered_sum *sum)
241 {
242 	struct btrfs_ordered_inode_tree *tree;
243 
244 	tree = &BTRFS_I(inode)->ordered_tree;
245 	spin_lock(&tree->lock);
246 	list_add_tail(&sum->list, &entry->list);
247 	spin_unlock(&tree->lock);
248 	return 0;
249 }
250 
251 /*
252  * this is used to account for finished IO across a given range
253  * of the file.  The IO may span ordered extents.  If
254  * a given ordered_extent is completely done, 1 is returned, otherwise
255  * 0.
256  *
257  * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
258  * to make sure this function only returns 1 once for a given ordered extent.
259  *
260  * file_offset is updated to one byte past the range that is recorded as
261  * complete.  This allows you to walk forward in the file.
262  */
263 int btrfs_dec_test_first_ordered_pending(struct inode *inode,
264 				   struct btrfs_ordered_extent **cached,
265 				   u64 *file_offset, u64 io_size)
266 {
267 	struct btrfs_ordered_inode_tree *tree;
268 	struct rb_node *node;
269 	struct btrfs_ordered_extent *entry = NULL;
270 	int ret;
271 	u64 dec_end;
272 	u64 dec_start;
273 	u64 to_dec;
274 
275 	tree = &BTRFS_I(inode)->ordered_tree;
276 	spin_lock(&tree->lock);
277 	node = tree_search(tree, *file_offset);
278 	if (!node) {
279 		ret = 1;
280 		goto out;
281 	}
282 
283 	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
284 	if (!offset_in_entry(entry, *file_offset)) {
285 		ret = 1;
286 		goto out;
287 	}
288 
289 	dec_start = max(*file_offset, entry->file_offset);
290 	dec_end = min(*file_offset + io_size, entry->file_offset +
291 		      entry->len);
292 	*file_offset = dec_end;
293 	if (dec_start > dec_end) {
294 		printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n",
295 		       (unsigned long long)dec_start,
296 		       (unsigned long long)dec_end);
297 	}
298 	to_dec = dec_end - dec_start;
299 	if (to_dec > entry->bytes_left) {
300 		printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
301 		       (unsigned long long)entry->bytes_left,
302 		       (unsigned long long)to_dec);
303 	}
304 	entry->bytes_left -= to_dec;
305 	if (entry->bytes_left == 0)
306 		ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
307 	else
308 		ret = 1;
309 out:
310 	if (!ret && cached && entry) {
311 		*cached = entry;
312 		atomic_inc(&entry->refs);
313 	}
314 	spin_unlock(&tree->lock);
315 	return ret == 0;
316 }
317 
318 /*
319  * this is used to account for finished IO across a given range
320  * of the file.  The IO should not span ordered extents.  If
321  * a given ordered_extent is completely done, 1 is returned, otherwise
322  * 0.
323  *
324  * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
325  * to make sure this function only returns 1 once for a given ordered extent.
326  */
327 int btrfs_dec_test_ordered_pending(struct inode *inode,
328 				   struct btrfs_ordered_extent **cached,
329 				   u64 file_offset, u64 io_size)
330 {
331 	struct btrfs_ordered_inode_tree *tree;
332 	struct rb_node *node;
333 	struct btrfs_ordered_extent *entry = NULL;
334 	int ret;
335 
336 	tree = &BTRFS_I(inode)->ordered_tree;
337 	spin_lock(&tree->lock);
338 	node = tree_search(tree, file_offset);
339 	if (!node) {
340 		ret = 1;
341 		goto out;
342 	}
343 
344 	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
345 	if (!offset_in_entry(entry, file_offset)) {
346 		ret = 1;
347 		goto out;
348 	}
349 
350 	if (io_size > entry->bytes_left) {
351 		printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
352 		       (unsigned long long)entry->bytes_left,
353 		       (unsigned long long)io_size);
354 	}
355 	entry->bytes_left -= io_size;
356 	if (entry->bytes_left == 0)
357 		ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
358 	else
359 		ret = 1;
360 out:
361 	if (!ret && cached && entry) {
362 		*cached = entry;
363 		atomic_inc(&entry->refs);
364 	}
365 	spin_unlock(&tree->lock);
366 	return ret == 0;
367 }
368 
369 /*
370  * used to drop a reference on an ordered extent.  This will free
371  * the extent if the last reference is dropped
372  */
373 int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
374 {
375 	struct list_head *cur;
376 	struct btrfs_ordered_sum *sum;
377 
378 	if (atomic_dec_and_test(&entry->refs)) {
379 		while (!list_empty(&entry->list)) {
380 			cur = entry->list.next;
381 			sum = list_entry(cur, struct btrfs_ordered_sum, list);
382 			list_del(&sum->list);
383 			kfree(sum);
384 		}
385 		kfree(entry);
386 	}
387 	return 0;
388 }
389 
390 /*
391  * remove an ordered extent from the tree.  No references are dropped
392  * and you must wake_up entry->wait.  You must hold the tree lock
393  * while you call this function.
394  */
395 static int __btrfs_remove_ordered_extent(struct inode *inode,
396 				struct btrfs_ordered_extent *entry)
397 {
398 	struct btrfs_ordered_inode_tree *tree;
399 	struct btrfs_root *root = BTRFS_I(inode)->root;
400 	struct rb_node *node;
401 
402 	tree = &BTRFS_I(inode)->ordered_tree;
403 	node = &entry->rb_node;
404 	rb_erase(node, &tree->tree);
405 	tree->last = NULL;
406 	set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
407 
408 	spin_lock(&root->fs_info->ordered_extent_lock);
409 	list_del_init(&entry->root_extent_list);
410 
411 	/*
412 	 * we have no more ordered extents for this inode and
413 	 * no dirty pages.  We can safely remove it from the
414 	 * list of ordered extents
415 	 */
416 	if (RB_EMPTY_ROOT(&tree->tree) &&
417 	    !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
418 		list_del_init(&BTRFS_I(inode)->ordered_operations);
419 	}
420 	spin_unlock(&root->fs_info->ordered_extent_lock);
421 
422 	return 0;
423 }
424 
425 /*
426  * remove an ordered extent from the tree.  No references are dropped
427  * but any waiters are woken.
428  */
429 int btrfs_remove_ordered_extent(struct inode *inode,
430 				struct btrfs_ordered_extent *entry)
431 {
432 	struct btrfs_ordered_inode_tree *tree;
433 	int ret;
434 
435 	tree = &BTRFS_I(inode)->ordered_tree;
436 	spin_lock(&tree->lock);
437 	ret = __btrfs_remove_ordered_extent(inode, entry);
438 	spin_unlock(&tree->lock);
439 	wake_up(&entry->wait);
440 
441 	return ret;
442 }
443 
444 /*
445  * wait for all the ordered extents in a root.  This is done when balancing
446  * space between drives.
447  */
448 int btrfs_wait_ordered_extents(struct btrfs_root *root,
449 			       int nocow_only, int delay_iput)
450 {
451 	struct list_head splice;
452 	struct list_head *cur;
453 	struct btrfs_ordered_extent *ordered;
454 	struct inode *inode;
455 
456 	INIT_LIST_HEAD(&splice);
457 
458 	spin_lock(&root->fs_info->ordered_extent_lock);
459 	list_splice_init(&root->fs_info->ordered_extents, &splice);
460 	while (!list_empty(&splice)) {
461 		cur = splice.next;
462 		ordered = list_entry(cur, struct btrfs_ordered_extent,
463 				     root_extent_list);
464 		if (nocow_only &&
465 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
466 		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
467 			list_move(&ordered->root_extent_list,
468 				  &root->fs_info->ordered_extents);
469 			cond_resched_lock(&root->fs_info->ordered_extent_lock);
470 			continue;
471 		}
472 
473 		list_del_init(&ordered->root_extent_list);
474 		atomic_inc(&ordered->refs);
475 
476 		/*
477 		 * the inode may be getting freed (in sys_unlink path).
478 		 */
479 		inode = igrab(ordered->inode);
480 
481 		spin_unlock(&root->fs_info->ordered_extent_lock);
482 
483 		if (inode) {
484 			btrfs_start_ordered_extent(inode, ordered, 1);
485 			btrfs_put_ordered_extent(ordered);
486 			if (delay_iput)
487 				btrfs_add_delayed_iput(inode);
488 			else
489 				iput(inode);
490 		} else {
491 			btrfs_put_ordered_extent(ordered);
492 		}
493 
494 		spin_lock(&root->fs_info->ordered_extent_lock);
495 	}
496 	spin_unlock(&root->fs_info->ordered_extent_lock);
497 	return 0;
498 }
499 
500 /*
501  * this is used during transaction commit to write all the inodes
502  * added to the ordered operation list.  These files must be fully on
503  * disk before the transaction commits.
504  *
505  * we have two modes here, one is to just start the IO via filemap_flush
506  * and the other is to wait for all the io.  When we wait, we have an
507  * extra check to make sure the ordered operation list really is empty
508  * before we return
509  */
510 int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
511 {
512 	struct btrfs_inode *btrfs_inode;
513 	struct inode *inode;
514 	struct list_head splice;
515 
516 	INIT_LIST_HEAD(&splice);
517 
518 	mutex_lock(&root->fs_info->ordered_operations_mutex);
519 	spin_lock(&root->fs_info->ordered_extent_lock);
520 again:
521 	list_splice_init(&root->fs_info->ordered_operations, &splice);
522 
523 	while (!list_empty(&splice)) {
524 		btrfs_inode = list_entry(splice.next, struct btrfs_inode,
525 				   ordered_operations);
526 
527 		inode = &btrfs_inode->vfs_inode;
528 
529 		list_del_init(&btrfs_inode->ordered_operations);
530 
531 		/*
532 		 * the inode may be getting freed (in sys_unlink path).
533 		 */
534 		inode = igrab(inode);
535 
536 		if (!wait && inode) {
537 			list_add_tail(&BTRFS_I(inode)->ordered_operations,
538 			      &root->fs_info->ordered_operations);
539 		}
540 		spin_unlock(&root->fs_info->ordered_extent_lock);
541 
542 		if (inode) {
543 			if (wait)
544 				btrfs_wait_ordered_range(inode, 0, (u64)-1);
545 			else
546 				filemap_flush(inode->i_mapping);
547 			btrfs_add_delayed_iput(inode);
548 		}
549 
550 		cond_resched();
551 		spin_lock(&root->fs_info->ordered_extent_lock);
552 	}
553 	if (wait && !list_empty(&root->fs_info->ordered_operations))
554 		goto again;
555 
556 	spin_unlock(&root->fs_info->ordered_extent_lock);
557 	mutex_unlock(&root->fs_info->ordered_operations_mutex);
558 
559 	return 0;
560 }
561 
562 /*
563  * Used to start IO or wait for a given ordered extent to finish.
564  *
565  * If wait is one, this effectively waits on page writeback for all the pages
566  * in the extent, and it waits on the io completion code to insert
567  * metadata into the btree corresponding to the extent
568  */
569 void btrfs_start_ordered_extent(struct inode *inode,
570 				       struct btrfs_ordered_extent *entry,
571 				       int wait)
572 {
573 	u64 start = entry->file_offset;
574 	u64 end = start + entry->len - 1;
575 
576 	/*
577 	 * pages in the range can be dirty, clean or writeback.  We
578 	 * start IO on any dirty ones so the wait doesn't stall waiting
579 	 * for pdflush to find them
580 	 */
581 	if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
582 		filemap_fdatawrite_range(inode->i_mapping, start, end);
583 	if (wait) {
584 		wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
585 						 &entry->flags));
586 	}
587 }
588 
589 /*
590  * Used to wait on ordered extents across a large range of bytes.
591  */
592 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
593 {
594 	u64 end;
595 	u64 orig_end;
596 	struct btrfs_ordered_extent *ordered;
597 	int found;
598 
599 	if (start + len < start) {
600 		orig_end = INT_LIMIT(loff_t);
601 	} else {
602 		orig_end = start + len - 1;
603 		if (orig_end > INT_LIMIT(loff_t))
604 			orig_end = INT_LIMIT(loff_t);
605 	}
606 again:
607 	/* start IO across the range first to instantiate any delalloc
608 	 * extents
609 	 */
610 	filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
611 
612 	/* The compression code will leave pages locked but return from
613 	 * writepage without setting the page writeback.  Starting again
614 	 * with WB_SYNC_ALL will end up waiting for the IO to actually start.
615 	 */
616 	filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
617 
618 	filemap_fdatawait_range(inode->i_mapping, start, orig_end);
619 
620 	end = orig_end;
621 	found = 0;
622 	while (1) {
623 		ordered = btrfs_lookup_first_ordered_extent(inode, end);
624 		if (!ordered)
625 			break;
626 		if (ordered->file_offset > orig_end) {
627 			btrfs_put_ordered_extent(ordered);
628 			break;
629 		}
630 		if (ordered->file_offset + ordered->len < start) {
631 			btrfs_put_ordered_extent(ordered);
632 			break;
633 		}
634 		found++;
635 		btrfs_start_ordered_extent(inode, ordered, 1);
636 		end = ordered->file_offset;
637 		btrfs_put_ordered_extent(ordered);
638 		if (end == 0 || end == start)
639 			break;
640 		end--;
641 	}
642 	if (found || test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
643 			   EXTENT_DELALLOC, 0, NULL)) {
644 		schedule_timeout(1);
645 		goto again;
646 	}
647 	return 0;
648 }
649 
650 /*
651  * find an ordered extent corresponding to file_offset.  return NULL if
652  * nothing is found, otherwise take a reference on the extent and return it
653  */
654 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
655 							 u64 file_offset)
656 {
657 	struct btrfs_ordered_inode_tree *tree;
658 	struct rb_node *node;
659 	struct btrfs_ordered_extent *entry = NULL;
660 
661 	tree = &BTRFS_I(inode)->ordered_tree;
662 	spin_lock(&tree->lock);
663 	node = tree_search(tree, file_offset);
664 	if (!node)
665 		goto out;
666 
667 	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
668 	if (!offset_in_entry(entry, file_offset))
669 		entry = NULL;
670 	if (entry)
671 		atomic_inc(&entry->refs);
672 out:
673 	spin_unlock(&tree->lock);
674 	return entry;
675 }
676 
677 /* Since the DIO code tries to lock a wide area we need to look for any ordered
678  * extents that exist in the range, rather than just the start of the range.
679  */
680 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
681 							u64 file_offset,
682 							u64 len)
683 {
684 	struct btrfs_ordered_inode_tree *tree;
685 	struct rb_node *node;
686 	struct btrfs_ordered_extent *entry = NULL;
687 
688 	tree = &BTRFS_I(inode)->ordered_tree;
689 	spin_lock(&tree->lock);
690 	node = tree_search(tree, file_offset);
691 	if (!node) {
692 		node = tree_search(tree, file_offset + len);
693 		if (!node)
694 			goto out;
695 	}
696 
697 	while (1) {
698 		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
699 		if (range_overlaps(entry, file_offset, len))
700 			break;
701 
702 		if (entry->file_offset >= file_offset + len) {
703 			entry = NULL;
704 			break;
705 		}
706 		entry = NULL;
707 		node = rb_next(node);
708 		if (!node)
709 			break;
710 	}
711 out:
712 	if (entry)
713 		atomic_inc(&entry->refs);
714 	spin_unlock(&tree->lock);
715 	return entry;
716 }
717 
718 /*
719  * lookup and return any extent before 'file_offset'.  NULL is returned
720  * if none is found
721  */
722 struct btrfs_ordered_extent *
723 btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
724 {
725 	struct btrfs_ordered_inode_tree *tree;
726 	struct rb_node *node;
727 	struct btrfs_ordered_extent *entry = NULL;
728 
729 	tree = &BTRFS_I(inode)->ordered_tree;
730 	spin_lock(&tree->lock);
731 	node = tree_search(tree, file_offset);
732 	if (!node)
733 		goto out;
734 
735 	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
736 	atomic_inc(&entry->refs);
737 out:
738 	spin_unlock(&tree->lock);
739 	return entry;
740 }
741 
742 /*
743  * After an extent is done, call this to conditionally update the on disk
744  * i_size.  i_size is updated to cover any fully written part of the file.
745  */
746 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
747 				struct btrfs_ordered_extent *ordered)
748 {
749 	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
750 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
751 	u64 disk_i_size;
752 	u64 new_i_size;
753 	u64 i_size_test;
754 	u64 i_size = i_size_read(inode);
755 	struct rb_node *node;
756 	struct rb_node *prev = NULL;
757 	struct btrfs_ordered_extent *test;
758 	int ret = 1;
759 
760 	if (ordered)
761 		offset = entry_end(ordered);
762 	else
763 		offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
764 
765 	spin_lock(&tree->lock);
766 	disk_i_size = BTRFS_I(inode)->disk_i_size;
767 
768 	/* truncate file */
769 	if (disk_i_size > i_size) {
770 		BTRFS_I(inode)->disk_i_size = i_size;
771 		ret = 0;
772 		goto out;
773 	}
774 
775 	/*
776 	 * if the disk i_size is already at the inode->i_size, or
777 	 * this ordered extent is inside the disk i_size, we're done
778 	 */
779 	if (disk_i_size == i_size || offset <= disk_i_size) {
780 		goto out;
781 	}
782 
783 	/*
784 	 * we can't update the disk_isize if there are delalloc bytes
785 	 * between disk_i_size and  this ordered extent
786 	 */
787 	if (test_range_bit(io_tree, disk_i_size, offset - 1,
788 			   EXTENT_DELALLOC, 0, NULL)) {
789 		goto out;
790 	}
791 	/*
792 	 * walk backward from this ordered extent to disk_i_size.
793 	 * if we find an ordered extent then we can't update disk i_size
794 	 * yet
795 	 */
796 	if (ordered) {
797 		node = rb_prev(&ordered->rb_node);
798 	} else {
799 		prev = tree_search(tree, offset);
800 		/*
801 		 * we insert file extents without involving ordered struct,
802 		 * so there should be no ordered struct cover this offset
803 		 */
804 		if (prev) {
805 			test = rb_entry(prev, struct btrfs_ordered_extent,
806 					rb_node);
807 			BUG_ON(offset_in_entry(test, offset));
808 		}
809 		node = prev;
810 	}
811 	while (node) {
812 		test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
813 		if (test->file_offset + test->len <= disk_i_size)
814 			break;
815 		if (test->file_offset >= i_size)
816 			break;
817 		if (test->file_offset >= disk_i_size)
818 			goto out;
819 		node = rb_prev(node);
820 	}
821 	new_i_size = min_t(u64, offset, i_size);
822 
823 	/*
824 	 * at this point, we know we can safely update i_size to at least
825 	 * the offset from this ordered extent.  But, we need to
826 	 * walk forward and see if ios from higher up in the file have
827 	 * finished.
828 	 */
829 	if (ordered) {
830 		node = rb_next(&ordered->rb_node);
831 	} else {
832 		if (prev)
833 			node = rb_next(prev);
834 		else
835 			node = rb_first(&tree->tree);
836 	}
837 	i_size_test = 0;
838 	if (node) {
839 		/*
840 		 * do we have an area where IO might have finished
841 		 * between our ordered extent and the next one.
842 		 */
843 		test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
844 		if (test->file_offset > offset)
845 			i_size_test = test->file_offset;
846 	} else {
847 		i_size_test = i_size;
848 	}
849 
850 	/*
851 	 * i_size_test is the end of a region after this ordered
852 	 * extent where there are no ordered extents.  As long as there
853 	 * are no delalloc bytes in this area, it is safe to update
854 	 * disk_i_size to the end of the region.
855 	 */
856 	if (i_size_test > offset &&
857 	    !test_range_bit(io_tree, offset, i_size_test - 1,
858 			    EXTENT_DELALLOC, 0, NULL)) {
859 		new_i_size = min_t(u64, i_size_test, i_size);
860 	}
861 	BTRFS_I(inode)->disk_i_size = new_i_size;
862 	ret = 0;
863 out:
864 	/*
865 	 * we need to remove the ordered extent with the tree lock held
866 	 * so that other people calling this function don't find our fully
867 	 * processed ordered entry and skip updating the i_size
868 	 */
869 	if (ordered)
870 		__btrfs_remove_ordered_extent(inode, ordered);
871 	spin_unlock(&tree->lock);
872 	if (ordered)
873 		wake_up(&ordered->wait);
874 	return ret;
875 }
876 
877 /*
878  * search the ordered extents for one corresponding to 'offset' and
879  * try to find a checksum.  This is used because we allow pages to
880  * be reclaimed before their checksum is actually put into the btree
881  */
882 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
883 			   u32 *sum)
884 {
885 	struct btrfs_ordered_sum *ordered_sum;
886 	struct btrfs_sector_sum *sector_sums;
887 	struct btrfs_ordered_extent *ordered;
888 	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
889 	unsigned long num_sectors;
890 	unsigned long i;
891 	u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
892 	int ret = 1;
893 
894 	ordered = btrfs_lookup_ordered_extent(inode, offset);
895 	if (!ordered)
896 		return 1;
897 
898 	spin_lock(&tree->lock);
899 	list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
900 		if (disk_bytenr >= ordered_sum->bytenr) {
901 			num_sectors = ordered_sum->len / sectorsize;
902 			sector_sums = ordered_sum->sums;
903 			for (i = 0; i < num_sectors; i++) {
904 				if (sector_sums[i].bytenr == disk_bytenr) {
905 					*sum = sector_sums[i].sum;
906 					ret = 0;
907 					goto out;
908 				}
909 			}
910 		}
911 	}
912 out:
913 	spin_unlock(&tree->lock);
914 	btrfs_put_ordered_extent(ordered);
915 	return ret;
916 }
917 
918 
919 /*
920  * add a given inode to the list of inodes that must be fully on
921  * disk before a transaction commit finishes.
922  *
923  * This basically gives us the ext3 style data=ordered mode, and it is mostly
924  * used to make sure renamed files are fully on disk.
925  *
926  * It is a noop if the inode is already fully on disk.
927  *
928  * If trans is not null, we'll do a friendly check for a transaction that
929  * is already flushing things and force the IO down ourselves.
930  */
931 int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
932 				struct btrfs_root *root,
933 				struct inode *inode)
934 {
935 	u64 last_mod;
936 
937 	last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
938 
939 	/*
940 	 * if this file hasn't been changed since the last transaction
941 	 * commit, we can safely return without doing anything
942 	 */
943 	if (last_mod < root->fs_info->last_trans_committed)
944 		return 0;
945 
946 	/*
947 	 * the transaction is already committing.  Just start the IO and
948 	 * don't bother with all of this list nonsense
949 	 */
950 	if (trans && root->fs_info->running_transaction->blocked) {
951 		btrfs_wait_ordered_range(inode, 0, (u64)-1);
952 		return 0;
953 	}
954 
955 	spin_lock(&root->fs_info->ordered_extent_lock);
956 	if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
957 		list_add_tail(&BTRFS_I(inode)->ordered_operations,
958 			      &root->fs_info->ordered_operations);
959 	}
960 	spin_unlock(&root->fs_info->ordered_extent_lock);
961 
962 	return 0;
963 }
964