xref: /openbmc/linux/fs/btrfs/ordered-data.c (revision 7dd65feb)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/gfp.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
22 #include <linux/writeback.h>
23 #include <linux/pagevec.h>
24 #include "ctree.h"
25 #include "transaction.h"
26 #include "btrfs_inode.h"
27 #include "extent_io.h"
28 
29 static u64 entry_end(struct btrfs_ordered_extent *entry)
30 {
31 	if (entry->file_offset + entry->len < entry->file_offset)
32 		return (u64)-1;
33 	return entry->file_offset + entry->len;
34 }
35 
36 /* returns NULL if the insertion worked, or it returns the node it did find
37  * in the tree
38  */
39 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
40 				   struct rb_node *node)
41 {
42 	struct rb_node **p = &root->rb_node;
43 	struct rb_node *parent = NULL;
44 	struct btrfs_ordered_extent *entry;
45 
46 	while (*p) {
47 		parent = *p;
48 		entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
49 
50 		if (file_offset < entry->file_offset)
51 			p = &(*p)->rb_left;
52 		else if (file_offset >= entry_end(entry))
53 			p = &(*p)->rb_right;
54 		else
55 			return parent;
56 	}
57 
58 	rb_link_node(node, parent, p);
59 	rb_insert_color(node, root);
60 	return NULL;
61 }
62 
63 /*
64  * look for a given offset in the tree, and if it can't be found return the
65  * first lesser offset
66  */
67 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
68 				     struct rb_node **prev_ret)
69 {
70 	struct rb_node *n = root->rb_node;
71 	struct rb_node *prev = NULL;
72 	struct rb_node *test;
73 	struct btrfs_ordered_extent *entry;
74 	struct btrfs_ordered_extent *prev_entry = NULL;
75 
76 	while (n) {
77 		entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
78 		prev = n;
79 		prev_entry = entry;
80 
81 		if (file_offset < entry->file_offset)
82 			n = n->rb_left;
83 		else if (file_offset >= entry_end(entry))
84 			n = n->rb_right;
85 		else
86 			return n;
87 	}
88 	if (!prev_ret)
89 		return NULL;
90 
91 	while (prev && file_offset >= entry_end(prev_entry)) {
92 		test = rb_next(prev);
93 		if (!test)
94 			break;
95 		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
96 				      rb_node);
97 		if (file_offset < entry_end(prev_entry))
98 			break;
99 
100 		prev = test;
101 	}
102 	if (prev)
103 		prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
104 				      rb_node);
105 	while (prev && file_offset < entry_end(prev_entry)) {
106 		test = rb_prev(prev);
107 		if (!test)
108 			break;
109 		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
110 				      rb_node);
111 		prev = test;
112 	}
113 	*prev_ret = prev;
114 	return NULL;
115 }
116 
117 /*
118  * helper to check if a given offset is inside a given entry
119  */
120 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
121 {
122 	if (file_offset < entry->file_offset ||
123 	    entry->file_offset + entry->len <= file_offset)
124 		return 0;
125 	return 1;
126 }
127 
128 /*
129  * look find the first ordered struct that has this offset, otherwise
130  * the first one less than this offset
131  */
132 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
133 					  u64 file_offset)
134 {
135 	struct rb_root *root = &tree->tree;
136 	struct rb_node *prev;
137 	struct rb_node *ret;
138 	struct btrfs_ordered_extent *entry;
139 
140 	if (tree->last) {
141 		entry = rb_entry(tree->last, struct btrfs_ordered_extent,
142 				 rb_node);
143 		if (offset_in_entry(entry, file_offset))
144 			return tree->last;
145 	}
146 	ret = __tree_search(root, file_offset, &prev);
147 	if (!ret)
148 		ret = prev;
149 	if (ret)
150 		tree->last = ret;
151 	return ret;
152 }
153 
154 /* allocate and add a new ordered_extent into the per-inode tree.
155  * file_offset is the logical offset in the file
156  *
157  * start is the disk block number of an extent already reserved in the
158  * extent allocation tree
159  *
160  * len is the length of the extent
161  *
162  * The tree is given a single reference on the ordered extent that was
163  * inserted.
164  */
165 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
166 			     u64 start, u64 len, u64 disk_len, int type)
167 {
168 	struct btrfs_ordered_inode_tree *tree;
169 	struct rb_node *node;
170 	struct btrfs_ordered_extent *entry;
171 
172 	tree = &BTRFS_I(inode)->ordered_tree;
173 	entry = kzalloc(sizeof(*entry), GFP_NOFS);
174 	if (!entry)
175 		return -ENOMEM;
176 
177 	mutex_lock(&tree->mutex);
178 	entry->file_offset = file_offset;
179 	entry->start = start;
180 	entry->len = len;
181 	entry->disk_len = disk_len;
182 	entry->bytes_left = len;
183 	entry->inode = inode;
184 	if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
185 		set_bit(type, &entry->flags);
186 
187 	/* one ref for the tree */
188 	atomic_set(&entry->refs, 1);
189 	init_waitqueue_head(&entry->wait);
190 	INIT_LIST_HEAD(&entry->list);
191 	INIT_LIST_HEAD(&entry->root_extent_list);
192 
193 	node = tree_insert(&tree->tree, file_offset,
194 			   &entry->rb_node);
195 	BUG_ON(node);
196 
197 	spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
198 	list_add_tail(&entry->root_extent_list,
199 		      &BTRFS_I(inode)->root->fs_info->ordered_extents);
200 	spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
201 
202 	mutex_unlock(&tree->mutex);
203 	BUG_ON(node);
204 	return 0;
205 }
206 
207 /*
208  * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
209  * when an ordered extent is finished.  If the list covers more than one
210  * ordered extent, it is split across multiples.
211  */
212 int btrfs_add_ordered_sum(struct inode *inode,
213 			  struct btrfs_ordered_extent *entry,
214 			  struct btrfs_ordered_sum *sum)
215 {
216 	struct btrfs_ordered_inode_tree *tree;
217 
218 	tree = &BTRFS_I(inode)->ordered_tree;
219 	mutex_lock(&tree->mutex);
220 	list_add_tail(&sum->list, &entry->list);
221 	mutex_unlock(&tree->mutex);
222 	return 0;
223 }
224 
225 /*
226  * this is used to account for finished IO across a given range
227  * of the file.  The IO should not span ordered extents.  If
228  * a given ordered_extent is completely done, 1 is returned, otherwise
229  * 0.
230  *
231  * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
232  * to make sure this function only returns 1 once for a given ordered extent.
233  */
234 int btrfs_dec_test_ordered_pending(struct inode *inode,
235 				   u64 file_offset, u64 io_size)
236 {
237 	struct btrfs_ordered_inode_tree *tree;
238 	struct rb_node *node;
239 	struct btrfs_ordered_extent *entry;
240 	int ret;
241 
242 	tree = &BTRFS_I(inode)->ordered_tree;
243 	mutex_lock(&tree->mutex);
244 	node = tree_search(tree, file_offset);
245 	if (!node) {
246 		ret = 1;
247 		goto out;
248 	}
249 
250 	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
251 	if (!offset_in_entry(entry, file_offset)) {
252 		ret = 1;
253 		goto out;
254 	}
255 
256 	if (io_size > entry->bytes_left) {
257 		printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
258 		       (unsigned long long)entry->bytes_left,
259 		       (unsigned long long)io_size);
260 	}
261 	entry->bytes_left -= io_size;
262 	if (entry->bytes_left == 0)
263 		ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
264 	else
265 		ret = 1;
266 out:
267 	mutex_unlock(&tree->mutex);
268 	return ret == 0;
269 }
270 
271 /*
272  * used to drop a reference on an ordered extent.  This will free
273  * the extent if the last reference is dropped
274  */
275 int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
276 {
277 	struct list_head *cur;
278 	struct btrfs_ordered_sum *sum;
279 
280 	if (atomic_dec_and_test(&entry->refs)) {
281 		while (!list_empty(&entry->list)) {
282 			cur = entry->list.next;
283 			sum = list_entry(cur, struct btrfs_ordered_sum, list);
284 			list_del(&sum->list);
285 			kfree(sum);
286 		}
287 		kfree(entry);
288 	}
289 	return 0;
290 }
291 
292 /*
293  * remove an ordered extent from the tree.  No references are dropped
294  * and you must wake_up entry->wait.  You must hold the tree mutex
295  * while you call this function.
296  */
297 static int __btrfs_remove_ordered_extent(struct inode *inode,
298 				struct btrfs_ordered_extent *entry)
299 {
300 	struct btrfs_ordered_inode_tree *tree;
301 	struct rb_node *node;
302 
303 	tree = &BTRFS_I(inode)->ordered_tree;
304 	node = &entry->rb_node;
305 	rb_erase(node, &tree->tree);
306 	tree->last = NULL;
307 	set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
308 
309 	spin_lock(&BTRFS_I(inode)->accounting_lock);
310 	BTRFS_I(inode)->outstanding_extents--;
311 	spin_unlock(&BTRFS_I(inode)->accounting_lock);
312 	btrfs_unreserve_metadata_for_delalloc(BTRFS_I(inode)->root,
313 					      inode, 1);
314 
315 	spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
316 	list_del_init(&entry->root_extent_list);
317 
318 	/*
319 	 * we have no more ordered extents for this inode and
320 	 * no dirty pages.  We can safely remove it from the
321 	 * list of ordered extents
322 	 */
323 	if (RB_EMPTY_ROOT(&tree->tree) &&
324 	    !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
325 		list_del_init(&BTRFS_I(inode)->ordered_operations);
326 	}
327 	spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
328 
329 	return 0;
330 }
331 
332 /*
333  * remove an ordered extent from the tree.  No references are dropped
334  * but any waiters are woken.
335  */
336 int btrfs_remove_ordered_extent(struct inode *inode,
337 				struct btrfs_ordered_extent *entry)
338 {
339 	struct btrfs_ordered_inode_tree *tree;
340 	int ret;
341 
342 	tree = &BTRFS_I(inode)->ordered_tree;
343 	mutex_lock(&tree->mutex);
344 	ret = __btrfs_remove_ordered_extent(inode, entry);
345 	mutex_unlock(&tree->mutex);
346 	wake_up(&entry->wait);
347 
348 	return ret;
349 }
350 
351 /*
352  * wait for all the ordered extents in a root.  This is done when balancing
353  * space between drives.
354  */
355 int btrfs_wait_ordered_extents(struct btrfs_root *root,
356 			       int nocow_only, int delay_iput)
357 {
358 	struct list_head splice;
359 	struct list_head *cur;
360 	struct btrfs_ordered_extent *ordered;
361 	struct inode *inode;
362 
363 	INIT_LIST_HEAD(&splice);
364 
365 	spin_lock(&root->fs_info->ordered_extent_lock);
366 	list_splice_init(&root->fs_info->ordered_extents, &splice);
367 	while (!list_empty(&splice)) {
368 		cur = splice.next;
369 		ordered = list_entry(cur, struct btrfs_ordered_extent,
370 				     root_extent_list);
371 		if (nocow_only &&
372 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
373 		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
374 			list_move(&ordered->root_extent_list,
375 				  &root->fs_info->ordered_extents);
376 			cond_resched_lock(&root->fs_info->ordered_extent_lock);
377 			continue;
378 		}
379 
380 		list_del_init(&ordered->root_extent_list);
381 		atomic_inc(&ordered->refs);
382 
383 		/*
384 		 * the inode may be getting freed (in sys_unlink path).
385 		 */
386 		inode = igrab(ordered->inode);
387 
388 		spin_unlock(&root->fs_info->ordered_extent_lock);
389 
390 		if (inode) {
391 			btrfs_start_ordered_extent(inode, ordered, 1);
392 			btrfs_put_ordered_extent(ordered);
393 			if (delay_iput)
394 				btrfs_add_delayed_iput(inode);
395 			else
396 				iput(inode);
397 		} else {
398 			btrfs_put_ordered_extent(ordered);
399 		}
400 
401 		spin_lock(&root->fs_info->ordered_extent_lock);
402 	}
403 	spin_unlock(&root->fs_info->ordered_extent_lock);
404 	return 0;
405 }
406 
407 /*
408  * this is used during transaction commit to write all the inodes
409  * added to the ordered operation list.  These files must be fully on
410  * disk before the transaction commits.
411  *
412  * we have two modes here, one is to just start the IO via filemap_flush
413  * and the other is to wait for all the io.  When we wait, we have an
414  * extra check to make sure the ordered operation list really is empty
415  * before we return
416  */
417 int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
418 {
419 	struct btrfs_inode *btrfs_inode;
420 	struct inode *inode;
421 	struct list_head splice;
422 
423 	INIT_LIST_HEAD(&splice);
424 
425 	mutex_lock(&root->fs_info->ordered_operations_mutex);
426 	spin_lock(&root->fs_info->ordered_extent_lock);
427 again:
428 	list_splice_init(&root->fs_info->ordered_operations, &splice);
429 
430 	while (!list_empty(&splice)) {
431 		btrfs_inode = list_entry(splice.next, struct btrfs_inode,
432 				   ordered_operations);
433 
434 		inode = &btrfs_inode->vfs_inode;
435 
436 		list_del_init(&btrfs_inode->ordered_operations);
437 
438 		/*
439 		 * the inode may be getting freed (in sys_unlink path).
440 		 */
441 		inode = igrab(inode);
442 
443 		if (!wait && inode) {
444 			list_add_tail(&BTRFS_I(inode)->ordered_operations,
445 			      &root->fs_info->ordered_operations);
446 		}
447 		spin_unlock(&root->fs_info->ordered_extent_lock);
448 
449 		if (inode) {
450 			if (wait)
451 				btrfs_wait_ordered_range(inode, 0, (u64)-1);
452 			else
453 				filemap_flush(inode->i_mapping);
454 			btrfs_add_delayed_iput(inode);
455 		}
456 
457 		cond_resched();
458 		spin_lock(&root->fs_info->ordered_extent_lock);
459 	}
460 	if (wait && !list_empty(&root->fs_info->ordered_operations))
461 		goto again;
462 
463 	spin_unlock(&root->fs_info->ordered_extent_lock);
464 	mutex_unlock(&root->fs_info->ordered_operations_mutex);
465 
466 	return 0;
467 }
468 
469 /*
470  * Used to start IO or wait for a given ordered extent to finish.
471  *
472  * If wait is one, this effectively waits on page writeback for all the pages
473  * in the extent, and it waits on the io completion code to insert
474  * metadata into the btree corresponding to the extent
475  */
476 void btrfs_start_ordered_extent(struct inode *inode,
477 				       struct btrfs_ordered_extent *entry,
478 				       int wait)
479 {
480 	u64 start = entry->file_offset;
481 	u64 end = start + entry->len - 1;
482 
483 	/*
484 	 * pages in the range can be dirty, clean or writeback.  We
485 	 * start IO on any dirty ones so the wait doesn't stall waiting
486 	 * for pdflush to find them
487 	 */
488 	filemap_fdatawrite_range(inode->i_mapping, start, end);
489 	if (wait) {
490 		wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
491 						 &entry->flags));
492 	}
493 }
494 
495 /*
496  * Used to wait on ordered extents across a large range of bytes.
497  */
498 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
499 {
500 	u64 end;
501 	u64 orig_end;
502 	u64 wait_end;
503 	struct btrfs_ordered_extent *ordered;
504 	int found;
505 
506 	if (start + len < start) {
507 		orig_end = INT_LIMIT(loff_t);
508 	} else {
509 		orig_end = start + len - 1;
510 		if (orig_end > INT_LIMIT(loff_t))
511 			orig_end = INT_LIMIT(loff_t);
512 	}
513 	wait_end = orig_end;
514 again:
515 	/* start IO across the range first to instantiate any delalloc
516 	 * extents
517 	 */
518 	filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
519 
520 	/* The compression code will leave pages locked but return from
521 	 * writepage without setting the page writeback.  Starting again
522 	 * with WB_SYNC_ALL will end up waiting for the IO to actually start.
523 	 */
524 	filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
525 
526 	filemap_fdatawait_range(inode->i_mapping, start, orig_end);
527 
528 	end = orig_end;
529 	found = 0;
530 	while (1) {
531 		ordered = btrfs_lookup_first_ordered_extent(inode, end);
532 		if (!ordered)
533 			break;
534 		if (ordered->file_offset > orig_end) {
535 			btrfs_put_ordered_extent(ordered);
536 			break;
537 		}
538 		if (ordered->file_offset + ordered->len < start) {
539 			btrfs_put_ordered_extent(ordered);
540 			break;
541 		}
542 		found++;
543 		btrfs_start_ordered_extent(inode, ordered, 1);
544 		end = ordered->file_offset;
545 		btrfs_put_ordered_extent(ordered);
546 		if (end == 0 || end == start)
547 			break;
548 		end--;
549 	}
550 	if (found || test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
551 			   EXTENT_DELALLOC, 0, NULL)) {
552 		schedule_timeout(1);
553 		goto again;
554 	}
555 	return 0;
556 }
557 
558 /*
559  * find an ordered extent corresponding to file_offset.  return NULL if
560  * nothing is found, otherwise take a reference on the extent and return it
561  */
562 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
563 							 u64 file_offset)
564 {
565 	struct btrfs_ordered_inode_tree *tree;
566 	struct rb_node *node;
567 	struct btrfs_ordered_extent *entry = NULL;
568 
569 	tree = &BTRFS_I(inode)->ordered_tree;
570 	mutex_lock(&tree->mutex);
571 	node = tree_search(tree, file_offset);
572 	if (!node)
573 		goto out;
574 
575 	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
576 	if (!offset_in_entry(entry, file_offset))
577 		entry = NULL;
578 	if (entry)
579 		atomic_inc(&entry->refs);
580 out:
581 	mutex_unlock(&tree->mutex);
582 	return entry;
583 }
584 
585 /*
586  * lookup and return any extent before 'file_offset'.  NULL is returned
587  * if none is found
588  */
589 struct btrfs_ordered_extent *
590 btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
591 {
592 	struct btrfs_ordered_inode_tree *tree;
593 	struct rb_node *node;
594 	struct btrfs_ordered_extent *entry = NULL;
595 
596 	tree = &BTRFS_I(inode)->ordered_tree;
597 	mutex_lock(&tree->mutex);
598 	node = tree_search(tree, file_offset);
599 	if (!node)
600 		goto out;
601 
602 	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
603 	atomic_inc(&entry->refs);
604 out:
605 	mutex_unlock(&tree->mutex);
606 	return entry;
607 }
608 
609 /*
610  * After an extent is done, call this to conditionally update the on disk
611  * i_size.  i_size is updated to cover any fully written part of the file.
612  */
613 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
614 				struct btrfs_ordered_extent *ordered)
615 {
616 	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
617 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
618 	u64 disk_i_size;
619 	u64 new_i_size;
620 	u64 i_size_test;
621 	u64 i_size = i_size_read(inode);
622 	struct rb_node *node;
623 	struct rb_node *prev = NULL;
624 	struct btrfs_ordered_extent *test;
625 	int ret = 1;
626 
627 	if (ordered)
628 		offset = entry_end(ordered);
629 
630 	mutex_lock(&tree->mutex);
631 	disk_i_size = BTRFS_I(inode)->disk_i_size;
632 
633 	/* truncate file */
634 	if (disk_i_size > i_size) {
635 		BTRFS_I(inode)->disk_i_size = i_size;
636 		ret = 0;
637 		goto out;
638 	}
639 
640 	/*
641 	 * if the disk i_size is already at the inode->i_size, or
642 	 * this ordered extent is inside the disk i_size, we're done
643 	 */
644 	if (disk_i_size == i_size || offset <= disk_i_size) {
645 		goto out;
646 	}
647 
648 	/*
649 	 * we can't update the disk_isize if there are delalloc bytes
650 	 * between disk_i_size and  this ordered extent
651 	 */
652 	if (test_range_bit(io_tree, disk_i_size, offset - 1,
653 			   EXTENT_DELALLOC, 0, NULL)) {
654 		goto out;
655 	}
656 	/*
657 	 * walk backward from this ordered extent to disk_i_size.
658 	 * if we find an ordered extent then we can't update disk i_size
659 	 * yet
660 	 */
661 	if (ordered) {
662 		node = rb_prev(&ordered->rb_node);
663 	} else {
664 		prev = tree_search(tree, offset);
665 		/*
666 		 * we insert file extents without involving ordered struct,
667 		 * so there should be no ordered struct cover this offset
668 		 */
669 		if (prev) {
670 			test = rb_entry(prev, struct btrfs_ordered_extent,
671 					rb_node);
672 			BUG_ON(offset_in_entry(test, offset));
673 		}
674 		node = prev;
675 	}
676 	while (node) {
677 		test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
678 		if (test->file_offset + test->len <= disk_i_size)
679 			break;
680 		if (test->file_offset >= i_size)
681 			break;
682 		if (test->file_offset >= disk_i_size)
683 			goto out;
684 		node = rb_prev(node);
685 	}
686 	new_i_size = min_t(u64, offset, i_size);
687 
688 	/*
689 	 * at this point, we know we can safely update i_size to at least
690 	 * the offset from this ordered extent.  But, we need to
691 	 * walk forward and see if ios from higher up in the file have
692 	 * finished.
693 	 */
694 	if (ordered) {
695 		node = rb_next(&ordered->rb_node);
696 	} else {
697 		if (prev)
698 			node = rb_next(prev);
699 		else
700 			node = rb_first(&tree->tree);
701 	}
702 	i_size_test = 0;
703 	if (node) {
704 		/*
705 		 * do we have an area where IO might have finished
706 		 * between our ordered extent and the next one.
707 		 */
708 		test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
709 		if (test->file_offset > offset)
710 			i_size_test = test->file_offset;
711 	} else {
712 		i_size_test = i_size;
713 	}
714 
715 	/*
716 	 * i_size_test is the end of a region after this ordered
717 	 * extent where there are no ordered extents.  As long as there
718 	 * are no delalloc bytes in this area, it is safe to update
719 	 * disk_i_size to the end of the region.
720 	 */
721 	if (i_size_test > offset &&
722 	    !test_range_bit(io_tree, offset, i_size_test - 1,
723 			    EXTENT_DELALLOC, 0, NULL)) {
724 		new_i_size = min_t(u64, i_size_test, i_size);
725 	}
726 	BTRFS_I(inode)->disk_i_size = new_i_size;
727 	ret = 0;
728 out:
729 	/*
730 	 * we need to remove the ordered extent with the tree lock held
731 	 * so that other people calling this function don't find our fully
732 	 * processed ordered entry and skip updating the i_size
733 	 */
734 	if (ordered)
735 		__btrfs_remove_ordered_extent(inode, ordered);
736 	mutex_unlock(&tree->mutex);
737 	if (ordered)
738 		wake_up(&ordered->wait);
739 	return ret;
740 }
741 
742 /*
743  * search the ordered extents for one corresponding to 'offset' and
744  * try to find a checksum.  This is used because we allow pages to
745  * be reclaimed before their checksum is actually put into the btree
746  */
747 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
748 			   u32 *sum)
749 {
750 	struct btrfs_ordered_sum *ordered_sum;
751 	struct btrfs_sector_sum *sector_sums;
752 	struct btrfs_ordered_extent *ordered;
753 	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
754 	unsigned long num_sectors;
755 	unsigned long i;
756 	u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
757 	int ret = 1;
758 
759 	ordered = btrfs_lookup_ordered_extent(inode, offset);
760 	if (!ordered)
761 		return 1;
762 
763 	mutex_lock(&tree->mutex);
764 	list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
765 		if (disk_bytenr >= ordered_sum->bytenr) {
766 			num_sectors = ordered_sum->len / sectorsize;
767 			sector_sums = ordered_sum->sums;
768 			for (i = 0; i < num_sectors; i++) {
769 				if (sector_sums[i].bytenr == disk_bytenr) {
770 					*sum = sector_sums[i].sum;
771 					ret = 0;
772 					goto out;
773 				}
774 			}
775 		}
776 	}
777 out:
778 	mutex_unlock(&tree->mutex);
779 	btrfs_put_ordered_extent(ordered);
780 	return ret;
781 }
782 
783 
784 /*
785  * add a given inode to the list of inodes that must be fully on
786  * disk before a transaction commit finishes.
787  *
788  * This basically gives us the ext3 style data=ordered mode, and it is mostly
789  * used to make sure renamed files are fully on disk.
790  *
791  * It is a noop if the inode is already fully on disk.
792  *
793  * If trans is not null, we'll do a friendly check for a transaction that
794  * is already flushing things and force the IO down ourselves.
795  */
796 int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
797 				struct btrfs_root *root,
798 				struct inode *inode)
799 {
800 	u64 last_mod;
801 
802 	last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
803 
804 	/*
805 	 * if this file hasn't been changed since the last transaction
806 	 * commit, we can safely return without doing anything
807 	 */
808 	if (last_mod < root->fs_info->last_trans_committed)
809 		return 0;
810 
811 	/*
812 	 * the transaction is already committing.  Just start the IO and
813 	 * don't bother with all of this list nonsense
814 	 */
815 	if (trans && root->fs_info->running_transaction->blocked) {
816 		btrfs_wait_ordered_range(inode, 0, (u64)-1);
817 		return 0;
818 	}
819 
820 	spin_lock(&root->fs_info->ordered_extent_lock);
821 	if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
822 		list_add_tail(&BTRFS_I(inode)->ordered_operations,
823 			      &root->fs_info->ordered_operations);
824 	}
825 	spin_unlock(&root->fs_info->ordered_extent_lock);
826 
827 	return 0;
828 }
829