xref: /openbmc/linux/fs/btrfs/ordered-data.c (revision 37be287c)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/writeback.h>
22 #include <linux/pagevec.h>
23 #include "ctree.h"
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "extent_io.h"
27 #include "disk-io.h"
28 
29 static struct kmem_cache *btrfs_ordered_extent_cache;
30 
31 static u64 entry_end(struct btrfs_ordered_extent *entry)
32 {
33 	if (entry->file_offset + entry->len < entry->file_offset)
34 		return (u64)-1;
35 	return entry->file_offset + entry->len;
36 }
37 
38 /* returns NULL if the insertion worked, or it returns the node it did find
39  * in the tree
40  */
41 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
42 				   struct rb_node *node)
43 {
44 	struct rb_node **p = &root->rb_node;
45 	struct rb_node *parent = NULL;
46 	struct btrfs_ordered_extent *entry;
47 
48 	while (*p) {
49 		parent = *p;
50 		entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
51 
52 		if (file_offset < entry->file_offset)
53 			p = &(*p)->rb_left;
54 		else if (file_offset >= entry_end(entry))
55 			p = &(*p)->rb_right;
56 		else
57 			return parent;
58 	}
59 
60 	rb_link_node(node, parent, p);
61 	rb_insert_color(node, root);
62 	return NULL;
63 }
64 
65 static void ordered_data_tree_panic(struct inode *inode, int errno,
66 					       u64 offset)
67 {
68 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
69 	btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
70 		    "%llu\n", offset);
71 }
72 
73 /*
74  * look for a given offset in the tree, and if it can't be found return the
75  * first lesser offset
76  */
77 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
78 				     struct rb_node **prev_ret)
79 {
80 	struct rb_node *n = root->rb_node;
81 	struct rb_node *prev = NULL;
82 	struct rb_node *test;
83 	struct btrfs_ordered_extent *entry;
84 	struct btrfs_ordered_extent *prev_entry = NULL;
85 
86 	while (n) {
87 		entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
88 		prev = n;
89 		prev_entry = entry;
90 
91 		if (file_offset < entry->file_offset)
92 			n = n->rb_left;
93 		else if (file_offset >= entry_end(entry))
94 			n = n->rb_right;
95 		else
96 			return n;
97 	}
98 	if (!prev_ret)
99 		return NULL;
100 
101 	while (prev && file_offset >= entry_end(prev_entry)) {
102 		test = rb_next(prev);
103 		if (!test)
104 			break;
105 		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
106 				      rb_node);
107 		if (file_offset < entry_end(prev_entry))
108 			break;
109 
110 		prev = test;
111 	}
112 	if (prev)
113 		prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
114 				      rb_node);
115 	while (prev && file_offset < entry_end(prev_entry)) {
116 		test = rb_prev(prev);
117 		if (!test)
118 			break;
119 		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
120 				      rb_node);
121 		prev = test;
122 	}
123 	*prev_ret = prev;
124 	return NULL;
125 }
126 
127 /*
128  * helper to check if a given offset is inside a given entry
129  */
130 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
131 {
132 	if (file_offset < entry->file_offset ||
133 	    entry->file_offset + entry->len <= file_offset)
134 		return 0;
135 	return 1;
136 }
137 
138 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
139 			  u64 len)
140 {
141 	if (file_offset + len <= entry->file_offset ||
142 	    entry->file_offset + entry->len <= file_offset)
143 		return 0;
144 	return 1;
145 }
146 
147 /*
148  * look find the first ordered struct that has this offset, otherwise
149  * the first one less than this offset
150  */
151 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
152 					  u64 file_offset)
153 {
154 	struct rb_root *root = &tree->tree;
155 	struct rb_node *prev = NULL;
156 	struct rb_node *ret;
157 	struct btrfs_ordered_extent *entry;
158 
159 	if (tree->last) {
160 		entry = rb_entry(tree->last, struct btrfs_ordered_extent,
161 				 rb_node);
162 		if (offset_in_entry(entry, file_offset))
163 			return tree->last;
164 	}
165 	ret = __tree_search(root, file_offset, &prev);
166 	if (!ret)
167 		ret = prev;
168 	if (ret)
169 		tree->last = ret;
170 	return ret;
171 }
172 
173 /* allocate and add a new ordered_extent into the per-inode tree.
174  * file_offset is the logical offset in the file
175  *
176  * start is the disk block number of an extent already reserved in the
177  * extent allocation tree
178  *
179  * len is the length of the extent
180  *
181  * The tree is given a single reference on the ordered extent that was
182  * inserted.
183  */
184 static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
185 				      u64 start, u64 len, u64 disk_len,
186 				      int type, int dio, int compress_type)
187 {
188 	struct btrfs_root *root = BTRFS_I(inode)->root;
189 	struct btrfs_ordered_inode_tree *tree;
190 	struct rb_node *node;
191 	struct btrfs_ordered_extent *entry;
192 
193 	tree = &BTRFS_I(inode)->ordered_tree;
194 	entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
195 	if (!entry)
196 		return -ENOMEM;
197 
198 	entry->file_offset = file_offset;
199 	entry->start = start;
200 	entry->len = len;
201 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) &&
202 	    !(type == BTRFS_ORDERED_NOCOW))
203 		entry->csum_bytes_left = disk_len;
204 	entry->disk_len = disk_len;
205 	entry->bytes_left = len;
206 	entry->inode = igrab(inode);
207 	entry->compress_type = compress_type;
208 	entry->truncated_len = (u64)-1;
209 	if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
210 		set_bit(type, &entry->flags);
211 
212 	if (dio)
213 		set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
214 
215 	/* one ref for the tree */
216 	atomic_set(&entry->refs, 1);
217 	init_waitqueue_head(&entry->wait);
218 	INIT_LIST_HEAD(&entry->list);
219 	INIT_LIST_HEAD(&entry->root_extent_list);
220 	INIT_LIST_HEAD(&entry->work_list);
221 	init_completion(&entry->completion);
222 	INIT_LIST_HEAD(&entry->log_list);
223 
224 	trace_btrfs_ordered_extent_add(inode, entry);
225 
226 	spin_lock_irq(&tree->lock);
227 	node = tree_insert(&tree->tree, file_offset,
228 			   &entry->rb_node);
229 	if (node)
230 		ordered_data_tree_panic(inode, -EEXIST, file_offset);
231 	spin_unlock_irq(&tree->lock);
232 
233 	spin_lock(&root->ordered_extent_lock);
234 	list_add_tail(&entry->root_extent_list,
235 		      &root->ordered_extents);
236 	root->nr_ordered_extents++;
237 	if (root->nr_ordered_extents == 1) {
238 		spin_lock(&root->fs_info->ordered_root_lock);
239 		BUG_ON(!list_empty(&root->ordered_root));
240 		list_add_tail(&root->ordered_root,
241 			      &root->fs_info->ordered_roots);
242 		spin_unlock(&root->fs_info->ordered_root_lock);
243 	}
244 	spin_unlock(&root->ordered_extent_lock);
245 
246 	return 0;
247 }
248 
249 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
250 			     u64 start, u64 len, u64 disk_len, int type)
251 {
252 	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
253 					  disk_len, type, 0,
254 					  BTRFS_COMPRESS_NONE);
255 }
256 
257 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
258 				 u64 start, u64 len, u64 disk_len, int type)
259 {
260 	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
261 					  disk_len, type, 1,
262 					  BTRFS_COMPRESS_NONE);
263 }
264 
265 int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
266 				      u64 start, u64 len, u64 disk_len,
267 				      int type, int compress_type)
268 {
269 	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
270 					  disk_len, type, 0,
271 					  compress_type);
272 }
273 
274 /*
275  * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
276  * when an ordered extent is finished.  If the list covers more than one
277  * ordered extent, it is split across multiples.
278  */
279 void btrfs_add_ordered_sum(struct inode *inode,
280 			   struct btrfs_ordered_extent *entry,
281 			   struct btrfs_ordered_sum *sum)
282 {
283 	struct btrfs_ordered_inode_tree *tree;
284 
285 	tree = &BTRFS_I(inode)->ordered_tree;
286 	spin_lock_irq(&tree->lock);
287 	list_add_tail(&sum->list, &entry->list);
288 	WARN_ON(entry->csum_bytes_left < sum->len);
289 	entry->csum_bytes_left -= sum->len;
290 	if (entry->csum_bytes_left == 0)
291 		wake_up(&entry->wait);
292 	spin_unlock_irq(&tree->lock);
293 }
294 
295 /*
296  * this is used to account for finished IO across a given range
297  * of the file.  The IO may span ordered extents.  If
298  * a given ordered_extent is completely done, 1 is returned, otherwise
299  * 0.
300  *
301  * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
302  * to make sure this function only returns 1 once for a given ordered extent.
303  *
304  * file_offset is updated to one byte past the range that is recorded as
305  * complete.  This allows you to walk forward in the file.
306  */
307 int btrfs_dec_test_first_ordered_pending(struct inode *inode,
308 				   struct btrfs_ordered_extent **cached,
309 				   u64 *file_offset, u64 io_size, int uptodate)
310 {
311 	struct btrfs_ordered_inode_tree *tree;
312 	struct rb_node *node;
313 	struct btrfs_ordered_extent *entry = NULL;
314 	int ret;
315 	unsigned long flags;
316 	u64 dec_end;
317 	u64 dec_start;
318 	u64 to_dec;
319 
320 	tree = &BTRFS_I(inode)->ordered_tree;
321 	spin_lock_irqsave(&tree->lock, flags);
322 	node = tree_search(tree, *file_offset);
323 	if (!node) {
324 		ret = 1;
325 		goto out;
326 	}
327 
328 	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
329 	if (!offset_in_entry(entry, *file_offset)) {
330 		ret = 1;
331 		goto out;
332 	}
333 
334 	dec_start = max(*file_offset, entry->file_offset);
335 	dec_end = min(*file_offset + io_size, entry->file_offset +
336 		      entry->len);
337 	*file_offset = dec_end;
338 	if (dec_start > dec_end) {
339 		btrfs_crit(BTRFS_I(inode)->root->fs_info,
340 			"bad ordering dec_start %llu end %llu", dec_start, dec_end);
341 	}
342 	to_dec = dec_end - dec_start;
343 	if (to_dec > entry->bytes_left) {
344 		btrfs_crit(BTRFS_I(inode)->root->fs_info,
345 			"bad ordered accounting left %llu size %llu",
346 			entry->bytes_left, to_dec);
347 	}
348 	entry->bytes_left -= to_dec;
349 	if (!uptodate)
350 		set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
351 
352 	if (entry->bytes_left == 0)
353 		ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
354 	else
355 		ret = 1;
356 out:
357 	if (!ret && cached && entry) {
358 		*cached = entry;
359 		atomic_inc(&entry->refs);
360 	}
361 	spin_unlock_irqrestore(&tree->lock, flags);
362 	return ret == 0;
363 }
364 
365 /*
366  * this is used to account for finished IO across a given range
367  * of the file.  The IO should not span ordered extents.  If
368  * a given ordered_extent is completely done, 1 is returned, otherwise
369  * 0.
370  *
371  * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
372  * to make sure this function only returns 1 once for a given ordered extent.
373  */
374 int btrfs_dec_test_ordered_pending(struct inode *inode,
375 				   struct btrfs_ordered_extent **cached,
376 				   u64 file_offset, u64 io_size, int uptodate)
377 {
378 	struct btrfs_ordered_inode_tree *tree;
379 	struct rb_node *node;
380 	struct btrfs_ordered_extent *entry = NULL;
381 	unsigned long flags;
382 	int ret;
383 
384 	tree = &BTRFS_I(inode)->ordered_tree;
385 	spin_lock_irqsave(&tree->lock, flags);
386 	if (cached && *cached) {
387 		entry = *cached;
388 		goto have_entry;
389 	}
390 
391 	node = tree_search(tree, file_offset);
392 	if (!node) {
393 		ret = 1;
394 		goto out;
395 	}
396 
397 	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
398 have_entry:
399 	if (!offset_in_entry(entry, file_offset)) {
400 		ret = 1;
401 		goto out;
402 	}
403 
404 	if (io_size > entry->bytes_left) {
405 		btrfs_crit(BTRFS_I(inode)->root->fs_info,
406 			   "bad ordered accounting left %llu size %llu",
407 		       entry->bytes_left, io_size);
408 	}
409 	entry->bytes_left -= io_size;
410 	if (!uptodate)
411 		set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
412 
413 	if (entry->bytes_left == 0)
414 		ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
415 	else
416 		ret = 1;
417 out:
418 	if (!ret && cached && entry) {
419 		*cached = entry;
420 		atomic_inc(&entry->refs);
421 	}
422 	spin_unlock_irqrestore(&tree->lock, flags);
423 	return ret == 0;
424 }
425 
426 /* Needs to either be called under a log transaction or the log_mutex */
427 void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode)
428 {
429 	struct btrfs_ordered_inode_tree *tree;
430 	struct btrfs_ordered_extent *ordered;
431 	struct rb_node *n;
432 	int index = log->log_transid % 2;
433 
434 	tree = &BTRFS_I(inode)->ordered_tree;
435 	spin_lock_irq(&tree->lock);
436 	for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
437 		ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
438 		spin_lock(&log->log_extents_lock[index]);
439 		if (list_empty(&ordered->log_list)) {
440 			list_add_tail(&ordered->log_list, &log->logged_list[index]);
441 			atomic_inc(&ordered->refs);
442 		}
443 		spin_unlock(&log->log_extents_lock[index]);
444 	}
445 	spin_unlock_irq(&tree->lock);
446 }
447 
448 void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
449 {
450 	struct btrfs_ordered_extent *ordered;
451 	int index = transid % 2;
452 
453 	spin_lock_irq(&log->log_extents_lock[index]);
454 	while (!list_empty(&log->logged_list[index])) {
455 		ordered = list_first_entry(&log->logged_list[index],
456 					   struct btrfs_ordered_extent,
457 					   log_list);
458 		list_del_init(&ordered->log_list);
459 		spin_unlock_irq(&log->log_extents_lock[index]);
460 		wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
461 						   &ordered->flags));
462 		btrfs_put_ordered_extent(ordered);
463 		spin_lock_irq(&log->log_extents_lock[index]);
464 	}
465 	spin_unlock_irq(&log->log_extents_lock[index]);
466 }
467 
468 void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
469 {
470 	struct btrfs_ordered_extent *ordered;
471 	int index = transid % 2;
472 
473 	spin_lock_irq(&log->log_extents_lock[index]);
474 	while (!list_empty(&log->logged_list[index])) {
475 		ordered = list_first_entry(&log->logged_list[index],
476 					   struct btrfs_ordered_extent,
477 					   log_list);
478 		list_del_init(&ordered->log_list);
479 		spin_unlock_irq(&log->log_extents_lock[index]);
480 		btrfs_put_ordered_extent(ordered);
481 		spin_lock_irq(&log->log_extents_lock[index]);
482 	}
483 	spin_unlock_irq(&log->log_extents_lock[index]);
484 }
485 
486 /*
487  * used to drop a reference on an ordered extent.  This will free
488  * the extent if the last reference is dropped
489  */
490 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
491 {
492 	struct list_head *cur;
493 	struct btrfs_ordered_sum *sum;
494 
495 	trace_btrfs_ordered_extent_put(entry->inode, entry);
496 
497 	if (atomic_dec_and_test(&entry->refs)) {
498 		if (entry->inode)
499 			btrfs_add_delayed_iput(entry->inode);
500 		while (!list_empty(&entry->list)) {
501 			cur = entry->list.next;
502 			sum = list_entry(cur, struct btrfs_ordered_sum, list);
503 			list_del(&sum->list);
504 			kfree(sum);
505 		}
506 		kmem_cache_free(btrfs_ordered_extent_cache, entry);
507 	}
508 }
509 
510 /*
511  * remove an ordered extent from the tree.  No references are dropped
512  * and waiters are woken up.
513  */
514 void btrfs_remove_ordered_extent(struct inode *inode,
515 				 struct btrfs_ordered_extent *entry)
516 {
517 	struct btrfs_ordered_inode_tree *tree;
518 	struct btrfs_root *root = BTRFS_I(inode)->root;
519 	struct rb_node *node;
520 
521 	tree = &BTRFS_I(inode)->ordered_tree;
522 	spin_lock_irq(&tree->lock);
523 	node = &entry->rb_node;
524 	rb_erase(node, &tree->tree);
525 	if (tree->last == node)
526 		tree->last = NULL;
527 	set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
528 	spin_unlock_irq(&tree->lock);
529 
530 	spin_lock(&root->ordered_extent_lock);
531 	list_del_init(&entry->root_extent_list);
532 	root->nr_ordered_extents--;
533 
534 	trace_btrfs_ordered_extent_remove(inode, entry);
535 
536 	/*
537 	 * we have no more ordered extents for this inode and
538 	 * no dirty pages.  We can safely remove it from the
539 	 * list of ordered extents
540 	 */
541 	if (RB_EMPTY_ROOT(&tree->tree) &&
542 	    !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
543 		spin_lock(&root->fs_info->ordered_root_lock);
544 		list_del_init(&BTRFS_I(inode)->ordered_operations);
545 		spin_unlock(&root->fs_info->ordered_root_lock);
546 	}
547 
548 	if (!root->nr_ordered_extents) {
549 		spin_lock(&root->fs_info->ordered_root_lock);
550 		BUG_ON(list_empty(&root->ordered_root));
551 		list_del_init(&root->ordered_root);
552 		spin_unlock(&root->fs_info->ordered_root_lock);
553 	}
554 	spin_unlock(&root->ordered_extent_lock);
555 	wake_up(&entry->wait);
556 }
557 
558 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
559 {
560 	struct btrfs_ordered_extent *ordered;
561 
562 	ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
563 	btrfs_start_ordered_extent(ordered->inode, ordered, 1);
564 	complete(&ordered->completion);
565 }
566 
567 /*
568  * wait for all the ordered extents in a root.  This is done when balancing
569  * space between drives.
570  */
571 int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr)
572 {
573 	struct list_head splice, works;
574 	struct btrfs_ordered_extent *ordered, *next;
575 	int count = 0;
576 
577 	INIT_LIST_HEAD(&splice);
578 	INIT_LIST_HEAD(&works);
579 
580 	mutex_lock(&root->fs_info->ordered_operations_mutex);
581 	spin_lock(&root->ordered_extent_lock);
582 	list_splice_init(&root->ordered_extents, &splice);
583 	while (!list_empty(&splice) && nr) {
584 		ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
585 					   root_extent_list);
586 		list_move_tail(&ordered->root_extent_list,
587 			       &root->ordered_extents);
588 		atomic_inc(&ordered->refs);
589 		spin_unlock(&root->ordered_extent_lock);
590 
591 		ordered->flush_work.func = btrfs_run_ordered_extent_work;
592 		list_add_tail(&ordered->work_list, &works);
593 		btrfs_queue_worker(&root->fs_info->flush_workers,
594 				   &ordered->flush_work);
595 
596 		cond_resched();
597 		spin_lock(&root->ordered_extent_lock);
598 		if (nr != -1)
599 			nr--;
600 		count++;
601 	}
602 	list_splice_tail(&splice, &root->ordered_extents);
603 	spin_unlock(&root->ordered_extent_lock);
604 
605 	list_for_each_entry_safe(ordered, next, &works, work_list) {
606 		list_del_init(&ordered->work_list);
607 		wait_for_completion(&ordered->completion);
608 		btrfs_put_ordered_extent(ordered);
609 		cond_resched();
610 	}
611 	mutex_unlock(&root->fs_info->ordered_operations_mutex);
612 
613 	return count;
614 }
615 
616 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr)
617 {
618 	struct btrfs_root *root;
619 	struct list_head splice;
620 	int done;
621 
622 	INIT_LIST_HEAD(&splice);
623 
624 	spin_lock(&fs_info->ordered_root_lock);
625 	list_splice_init(&fs_info->ordered_roots, &splice);
626 	while (!list_empty(&splice) && nr) {
627 		root = list_first_entry(&splice, struct btrfs_root,
628 					ordered_root);
629 		root = btrfs_grab_fs_root(root);
630 		BUG_ON(!root);
631 		list_move_tail(&root->ordered_root,
632 			       &fs_info->ordered_roots);
633 		spin_unlock(&fs_info->ordered_root_lock);
634 
635 		done = btrfs_wait_ordered_extents(root, nr);
636 		btrfs_put_fs_root(root);
637 
638 		spin_lock(&fs_info->ordered_root_lock);
639 		if (nr != -1) {
640 			nr -= done;
641 			WARN_ON(nr < 0);
642 		}
643 	}
644 	list_splice_tail(&splice, &fs_info->ordered_roots);
645 	spin_unlock(&fs_info->ordered_root_lock);
646 }
647 
648 /*
649  * this is used during transaction commit to write all the inodes
650  * added to the ordered operation list.  These files must be fully on
651  * disk before the transaction commits.
652  *
653  * we have two modes here, one is to just start the IO via filemap_flush
654  * and the other is to wait for all the io.  When we wait, we have an
655  * extra check to make sure the ordered operation list really is empty
656  * before we return
657  */
658 int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
659 				 struct btrfs_root *root, int wait)
660 {
661 	struct btrfs_inode *btrfs_inode;
662 	struct inode *inode;
663 	struct btrfs_transaction *cur_trans = trans->transaction;
664 	struct list_head splice;
665 	struct list_head works;
666 	struct btrfs_delalloc_work *work, *next;
667 	int ret = 0;
668 
669 	INIT_LIST_HEAD(&splice);
670 	INIT_LIST_HEAD(&works);
671 
672 	mutex_lock(&root->fs_info->ordered_extent_flush_mutex);
673 	spin_lock(&root->fs_info->ordered_root_lock);
674 	list_splice_init(&cur_trans->ordered_operations, &splice);
675 	while (!list_empty(&splice)) {
676 		btrfs_inode = list_entry(splice.next, struct btrfs_inode,
677 				   ordered_operations);
678 		inode = &btrfs_inode->vfs_inode;
679 
680 		list_del_init(&btrfs_inode->ordered_operations);
681 
682 		/*
683 		 * the inode may be getting freed (in sys_unlink path).
684 		 */
685 		inode = igrab(inode);
686 		if (!inode)
687 			continue;
688 
689 		if (!wait)
690 			list_add_tail(&BTRFS_I(inode)->ordered_operations,
691 				      &cur_trans->ordered_operations);
692 		spin_unlock(&root->fs_info->ordered_root_lock);
693 
694 		work = btrfs_alloc_delalloc_work(inode, wait, 1);
695 		if (!work) {
696 			spin_lock(&root->fs_info->ordered_root_lock);
697 			if (list_empty(&BTRFS_I(inode)->ordered_operations))
698 				list_add_tail(&btrfs_inode->ordered_operations,
699 					      &splice);
700 			list_splice_tail(&splice,
701 					 &cur_trans->ordered_operations);
702 			spin_unlock(&root->fs_info->ordered_root_lock);
703 			ret = -ENOMEM;
704 			goto out;
705 		}
706 		list_add_tail(&work->list, &works);
707 		btrfs_queue_worker(&root->fs_info->flush_workers,
708 				   &work->work);
709 
710 		cond_resched();
711 		spin_lock(&root->fs_info->ordered_root_lock);
712 	}
713 	spin_unlock(&root->fs_info->ordered_root_lock);
714 out:
715 	list_for_each_entry_safe(work, next, &works, list) {
716 		list_del_init(&work->list);
717 		btrfs_wait_and_free_delalloc_work(work);
718 	}
719 	mutex_unlock(&root->fs_info->ordered_extent_flush_mutex);
720 	return ret;
721 }
722 
723 /*
724  * Used to start IO or wait for a given ordered extent to finish.
725  *
726  * If wait is one, this effectively waits on page writeback for all the pages
727  * in the extent, and it waits on the io completion code to insert
728  * metadata into the btree corresponding to the extent
729  */
730 void btrfs_start_ordered_extent(struct inode *inode,
731 				       struct btrfs_ordered_extent *entry,
732 				       int wait)
733 {
734 	u64 start = entry->file_offset;
735 	u64 end = start + entry->len - 1;
736 
737 	trace_btrfs_ordered_extent_start(inode, entry);
738 
739 	/*
740 	 * pages in the range can be dirty, clean or writeback.  We
741 	 * start IO on any dirty ones so the wait doesn't stall waiting
742 	 * for the flusher thread to find them
743 	 */
744 	if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
745 		filemap_fdatawrite_range(inode->i_mapping, start, end);
746 	if (wait) {
747 		wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
748 						 &entry->flags));
749 	}
750 }
751 
752 /*
753  * Used to wait on ordered extents across a large range of bytes.
754  */
755 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
756 {
757 	int ret = 0;
758 	u64 end;
759 	u64 orig_end;
760 	struct btrfs_ordered_extent *ordered;
761 
762 	if (start + len < start) {
763 		orig_end = INT_LIMIT(loff_t);
764 	} else {
765 		orig_end = start + len - 1;
766 		if (orig_end > INT_LIMIT(loff_t))
767 			orig_end = INT_LIMIT(loff_t);
768 	}
769 
770 	/* start IO across the range first to instantiate any delalloc
771 	 * extents
772 	 */
773 	ret = filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
774 	if (ret)
775 		return ret;
776 	/*
777 	 * So with compression we will find and lock a dirty page and clear the
778 	 * first one as dirty, setup an async extent, and immediately return
779 	 * with the entire range locked but with nobody actually marked with
780 	 * writeback.  So we can't just filemap_write_and_wait_range() and
781 	 * expect it to work since it will just kick off a thread to do the
782 	 * actual work.  So we need to call filemap_fdatawrite_range _again_
783 	 * since it will wait on the page lock, which won't be unlocked until
784 	 * after the pages have been marked as writeback and so we're good to go
785 	 * from there.  We have to do this otherwise we'll miss the ordered
786 	 * extents and that results in badness.  Please Josef, do not think you
787 	 * know better and pull this out at some point in the future, it is
788 	 * right and you are wrong.
789 	 */
790 	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
791 		     &BTRFS_I(inode)->runtime_flags)) {
792 		ret = filemap_fdatawrite_range(inode->i_mapping, start,
793 					       orig_end);
794 		if (ret)
795 			return ret;
796 	}
797 	ret = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
798 	if (ret)
799 		return ret;
800 
801 	end = orig_end;
802 	while (1) {
803 		ordered = btrfs_lookup_first_ordered_extent(inode, end);
804 		if (!ordered)
805 			break;
806 		if (ordered->file_offset > orig_end) {
807 			btrfs_put_ordered_extent(ordered);
808 			break;
809 		}
810 		if (ordered->file_offset + ordered->len <= start) {
811 			btrfs_put_ordered_extent(ordered);
812 			break;
813 		}
814 		btrfs_start_ordered_extent(inode, ordered, 1);
815 		end = ordered->file_offset;
816 		if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
817 			ret = -EIO;
818 		btrfs_put_ordered_extent(ordered);
819 		if (ret || end == 0 || end == start)
820 			break;
821 		end--;
822 	}
823 	return ret;
824 }
825 
826 /*
827  * find an ordered extent corresponding to file_offset.  return NULL if
828  * nothing is found, otherwise take a reference on the extent and return it
829  */
830 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
831 							 u64 file_offset)
832 {
833 	struct btrfs_ordered_inode_tree *tree;
834 	struct rb_node *node;
835 	struct btrfs_ordered_extent *entry = NULL;
836 
837 	tree = &BTRFS_I(inode)->ordered_tree;
838 	spin_lock_irq(&tree->lock);
839 	node = tree_search(tree, file_offset);
840 	if (!node)
841 		goto out;
842 
843 	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
844 	if (!offset_in_entry(entry, file_offset))
845 		entry = NULL;
846 	if (entry)
847 		atomic_inc(&entry->refs);
848 out:
849 	spin_unlock_irq(&tree->lock);
850 	return entry;
851 }
852 
853 /* Since the DIO code tries to lock a wide area we need to look for any ordered
854  * extents that exist in the range, rather than just the start of the range.
855  */
856 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
857 							u64 file_offset,
858 							u64 len)
859 {
860 	struct btrfs_ordered_inode_tree *tree;
861 	struct rb_node *node;
862 	struct btrfs_ordered_extent *entry = NULL;
863 
864 	tree = &BTRFS_I(inode)->ordered_tree;
865 	spin_lock_irq(&tree->lock);
866 	node = tree_search(tree, file_offset);
867 	if (!node) {
868 		node = tree_search(tree, file_offset + len);
869 		if (!node)
870 			goto out;
871 	}
872 
873 	while (1) {
874 		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
875 		if (range_overlaps(entry, file_offset, len))
876 			break;
877 
878 		if (entry->file_offset >= file_offset + len) {
879 			entry = NULL;
880 			break;
881 		}
882 		entry = NULL;
883 		node = rb_next(node);
884 		if (!node)
885 			break;
886 	}
887 out:
888 	if (entry)
889 		atomic_inc(&entry->refs);
890 	spin_unlock_irq(&tree->lock);
891 	return entry;
892 }
893 
894 /*
895  * lookup and return any extent before 'file_offset'.  NULL is returned
896  * if none is found
897  */
898 struct btrfs_ordered_extent *
899 btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
900 {
901 	struct btrfs_ordered_inode_tree *tree;
902 	struct rb_node *node;
903 	struct btrfs_ordered_extent *entry = NULL;
904 
905 	tree = &BTRFS_I(inode)->ordered_tree;
906 	spin_lock_irq(&tree->lock);
907 	node = tree_search(tree, file_offset);
908 	if (!node)
909 		goto out;
910 
911 	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
912 	atomic_inc(&entry->refs);
913 out:
914 	spin_unlock_irq(&tree->lock);
915 	return entry;
916 }
917 
918 /*
919  * After an extent is done, call this to conditionally update the on disk
920  * i_size.  i_size is updated to cover any fully written part of the file.
921  */
922 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
923 				struct btrfs_ordered_extent *ordered)
924 {
925 	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
926 	u64 disk_i_size;
927 	u64 new_i_size;
928 	u64 i_size = i_size_read(inode);
929 	struct rb_node *node;
930 	struct rb_node *prev = NULL;
931 	struct btrfs_ordered_extent *test;
932 	int ret = 1;
933 
934 	spin_lock_irq(&tree->lock);
935 	if (ordered) {
936 		offset = entry_end(ordered);
937 		if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags))
938 			offset = min(offset,
939 				     ordered->file_offset +
940 				     ordered->truncated_len);
941 	} else {
942 		offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
943 	}
944 	disk_i_size = BTRFS_I(inode)->disk_i_size;
945 
946 	/* truncate file */
947 	if (disk_i_size > i_size) {
948 		BTRFS_I(inode)->disk_i_size = i_size;
949 		ret = 0;
950 		goto out;
951 	}
952 
953 	/*
954 	 * if the disk i_size is already at the inode->i_size, or
955 	 * this ordered extent is inside the disk i_size, we're done
956 	 */
957 	if (disk_i_size == i_size)
958 		goto out;
959 
960 	/*
961 	 * We still need to update disk_i_size if outstanding_isize is greater
962 	 * than disk_i_size.
963 	 */
964 	if (offset <= disk_i_size &&
965 	    (!ordered || ordered->outstanding_isize <= disk_i_size))
966 		goto out;
967 
968 	/*
969 	 * walk backward from this ordered extent to disk_i_size.
970 	 * if we find an ordered extent then we can't update disk i_size
971 	 * yet
972 	 */
973 	if (ordered) {
974 		node = rb_prev(&ordered->rb_node);
975 	} else {
976 		prev = tree_search(tree, offset);
977 		/*
978 		 * we insert file extents without involving ordered struct,
979 		 * so there should be no ordered struct cover this offset
980 		 */
981 		if (prev) {
982 			test = rb_entry(prev, struct btrfs_ordered_extent,
983 					rb_node);
984 			BUG_ON(offset_in_entry(test, offset));
985 		}
986 		node = prev;
987 	}
988 	for (; node; node = rb_prev(node)) {
989 		test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
990 
991 		/* We treat this entry as if it doesnt exist */
992 		if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
993 			continue;
994 		if (test->file_offset + test->len <= disk_i_size)
995 			break;
996 		if (test->file_offset >= i_size)
997 			break;
998 		if (entry_end(test) > disk_i_size) {
999 			/*
1000 			 * we don't update disk_i_size now, so record this
1001 			 * undealt i_size. Or we will not know the real
1002 			 * i_size.
1003 			 */
1004 			if (test->outstanding_isize < offset)
1005 				test->outstanding_isize = offset;
1006 			if (ordered &&
1007 			    ordered->outstanding_isize >
1008 			    test->outstanding_isize)
1009 				test->outstanding_isize =
1010 						ordered->outstanding_isize;
1011 			goto out;
1012 		}
1013 	}
1014 	new_i_size = min_t(u64, offset, i_size);
1015 
1016 	/*
1017 	 * Some ordered extents may completed before the current one, and
1018 	 * we hold the real i_size in ->outstanding_isize.
1019 	 */
1020 	if (ordered && ordered->outstanding_isize > new_i_size)
1021 		new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
1022 	BTRFS_I(inode)->disk_i_size = new_i_size;
1023 	ret = 0;
1024 out:
1025 	/*
1026 	 * We need to do this because we can't remove ordered extents until
1027 	 * after the i_disk_size has been updated and then the inode has been
1028 	 * updated to reflect the change, so we need to tell anybody who finds
1029 	 * this ordered extent that we've already done all the real work, we
1030 	 * just haven't completed all the other work.
1031 	 */
1032 	if (ordered)
1033 		set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
1034 	spin_unlock_irq(&tree->lock);
1035 	return ret;
1036 }
1037 
1038 /*
1039  * search the ordered extents for one corresponding to 'offset' and
1040  * try to find a checksum.  This is used because we allow pages to
1041  * be reclaimed before their checksum is actually put into the btree
1042  */
1043 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
1044 			   u32 *sum, int len)
1045 {
1046 	struct btrfs_ordered_sum *ordered_sum;
1047 	struct btrfs_ordered_extent *ordered;
1048 	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
1049 	unsigned long num_sectors;
1050 	unsigned long i;
1051 	u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
1052 	int index = 0;
1053 
1054 	ordered = btrfs_lookup_ordered_extent(inode, offset);
1055 	if (!ordered)
1056 		return 0;
1057 
1058 	spin_lock_irq(&tree->lock);
1059 	list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
1060 		if (disk_bytenr >= ordered_sum->bytenr &&
1061 		    disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
1062 			i = (disk_bytenr - ordered_sum->bytenr) >>
1063 			    inode->i_sb->s_blocksize_bits;
1064 			num_sectors = ordered_sum->len >>
1065 				      inode->i_sb->s_blocksize_bits;
1066 			num_sectors = min_t(int, len - index, num_sectors - i);
1067 			memcpy(sum + index, ordered_sum->sums + i,
1068 			       num_sectors);
1069 
1070 			index += (int)num_sectors;
1071 			if (index == len)
1072 				goto out;
1073 			disk_bytenr += num_sectors * sectorsize;
1074 		}
1075 	}
1076 out:
1077 	spin_unlock_irq(&tree->lock);
1078 	btrfs_put_ordered_extent(ordered);
1079 	return index;
1080 }
1081 
1082 
1083 /*
1084  * add a given inode to the list of inodes that must be fully on
1085  * disk before a transaction commit finishes.
1086  *
1087  * This basically gives us the ext3 style data=ordered mode, and it is mostly
1088  * used to make sure renamed files are fully on disk.
1089  *
1090  * It is a noop if the inode is already fully on disk.
1091  *
1092  * If trans is not null, we'll do a friendly check for a transaction that
1093  * is already flushing things and force the IO down ourselves.
1094  */
1095 void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
1096 				 struct btrfs_root *root, struct inode *inode)
1097 {
1098 	struct btrfs_transaction *cur_trans = trans->transaction;
1099 	u64 last_mod;
1100 
1101 	last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
1102 
1103 	/*
1104 	 * if this file hasn't been changed since the last transaction
1105 	 * commit, we can safely return without doing anything
1106 	 */
1107 	if (last_mod <= root->fs_info->last_trans_committed)
1108 		return;
1109 
1110 	spin_lock(&root->fs_info->ordered_root_lock);
1111 	if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
1112 		list_add_tail(&BTRFS_I(inode)->ordered_operations,
1113 			      &cur_trans->ordered_operations);
1114 	}
1115 	spin_unlock(&root->fs_info->ordered_root_lock);
1116 }
1117 
1118 int __init ordered_data_init(void)
1119 {
1120 	btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1121 				     sizeof(struct btrfs_ordered_extent), 0,
1122 				     SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
1123 				     NULL);
1124 	if (!btrfs_ordered_extent_cache)
1125 		return -ENOMEM;
1126 
1127 	return 0;
1128 }
1129 
1130 void ordered_data_exit(void)
1131 {
1132 	if (btrfs_ordered_extent_cache)
1133 		kmem_cache_destroy(btrfs_ordered_extent_cache);
1134 }
1135