xref: /openbmc/linux/fs/btrfs/disk-io.c (revision e8e0929d)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/crc32c.h>
30 #include "compat.h"
31 #include "ctree.h"
32 #include "disk-io.h"
33 #include "transaction.h"
34 #include "btrfs_inode.h"
35 #include "volumes.h"
36 #include "print-tree.h"
37 #include "async-thread.h"
38 #include "locking.h"
39 #include "tree-log.h"
40 #include "free-space-cache.h"
41 
42 static struct extent_io_ops btree_extent_io_ops;
43 static void end_workqueue_fn(struct btrfs_work *work);
44 static void free_fs_root(struct btrfs_root *root);
45 
46 static atomic_t btrfs_bdi_num = ATOMIC_INIT(0);
47 
48 /*
49  * end_io_wq structs are used to do processing in task context when an IO is
50  * complete.  This is used during reads to verify checksums, and it is used
51  * by writes to insert metadata for new file extents after IO is complete.
52  */
53 struct end_io_wq {
54 	struct bio *bio;
55 	bio_end_io_t *end_io;
56 	void *private;
57 	struct btrfs_fs_info *info;
58 	int error;
59 	int metadata;
60 	struct list_head list;
61 	struct btrfs_work work;
62 };
63 
64 /*
65  * async submit bios are used to offload expensive checksumming
66  * onto the worker threads.  They checksum file and metadata bios
67  * just before they are sent down the IO stack.
68  */
69 struct async_submit_bio {
70 	struct inode *inode;
71 	struct bio *bio;
72 	struct list_head list;
73 	extent_submit_bio_hook_t *submit_bio_start;
74 	extent_submit_bio_hook_t *submit_bio_done;
75 	int rw;
76 	int mirror_num;
77 	unsigned long bio_flags;
78 	struct btrfs_work work;
79 };
80 
81 /* These are used to set the lockdep class on the extent buffer locks.
82  * The class is set by the readpage_end_io_hook after the buffer has
83  * passed csum validation but before the pages are unlocked.
84  *
85  * The lockdep class is also set by btrfs_init_new_buffer on freshly
86  * allocated blocks.
87  *
88  * The class is based on the level in the tree block, which allows lockdep
89  * to know that lower nodes nest inside the locks of higher nodes.
90  *
91  * We also add a check to make sure the highest level of the tree is
92  * the same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this
93  * code needs update as well.
94  */
95 #ifdef CONFIG_DEBUG_LOCK_ALLOC
96 # if BTRFS_MAX_LEVEL != 8
97 #  error
98 # endif
99 static struct lock_class_key btrfs_eb_class[BTRFS_MAX_LEVEL + 1];
100 static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = {
101 	/* leaf */
102 	"btrfs-extent-00",
103 	"btrfs-extent-01",
104 	"btrfs-extent-02",
105 	"btrfs-extent-03",
106 	"btrfs-extent-04",
107 	"btrfs-extent-05",
108 	"btrfs-extent-06",
109 	"btrfs-extent-07",
110 	/* highest possible level */
111 	"btrfs-extent-08",
112 };
113 #endif
114 
115 /*
116  * extents on the btree inode are pretty simple, there's one extent
117  * that covers the entire device
118  */
119 static struct extent_map *btree_get_extent(struct inode *inode,
120 		struct page *page, size_t page_offset, u64 start, u64 len,
121 		int create)
122 {
123 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
124 	struct extent_map *em;
125 	int ret;
126 
127 	read_lock(&em_tree->lock);
128 	em = lookup_extent_mapping(em_tree, start, len);
129 	if (em) {
130 		em->bdev =
131 			BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
132 		read_unlock(&em_tree->lock);
133 		goto out;
134 	}
135 	read_unlock(&em_tree->lock);
136 
137 	em = alloc_extent_map(GFP_NOFS);
138 	if (!em) {
139 		em = ERR_PTR(-ENOMEM);
140 		goto out;
141 	}
142 	em->start = 0;
143 	em->len = (u64)-1;
144 	em->block_len = (u64)-1;
145 	em->block_start = 0;
146 	em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
147 
148 	write_lock(&em_tree->lock);
149 	ret = add_extent_mapping(em_tree, em);
150 	if (ret == -EEXIST) {
151 		u64 failed_start = em->start;
152 		u64 failed_len = em->len;
153 
154 		free_extent_map(em);
155 		em = lookup_extent_mapping(em_tree, start, len);
156 		if (em) {
157 			ret = 0;
158 		} else {
159 			em = lookup_extent_mapping(em_tree, failed_start,
160 						   failed_len);
161 			ret = -EIO;
162 		}
163 	} else if (ret) {
164 		free_extent_map(em);
165 		em = NULL;
166 	}
167 	write_unlock(&em_tree->lock);
168 
169 	if (ret)
170 		em = ERR_PTR(ret);
171 out:
172 	return em;
173 }
174 
175 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
176 {
177 	return crc32c(seed, data, len);
178 }
179 
180 void btrfs_csum_final(u32 crc, char *result)
181 {
182 	*(__le32 *)result = ~cpu_to_le32(crc);
183 }
184 
185 /*
186  * compute the csum for a btree block, and either verify it or write it
187  * into the csum field of the block.
188  */
189 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
190 			   int verify)
191 {
192 	u16 csum_size =
193 		btrfs_super_csum_size(&root->fs_info->super_copy);
194 	char *result = NULL;
195 	unsigned long len;
196 	unsigned long cur_len;
197 	unsigned long offset = BTRFS_CSUM_SIZE;
198 	char *map_token = NULL;
199 	char *kaddr;
200 	unsigned long map_start;
201 	unsigned long map_len;
202 	int err;
203 	u32 crc = ~(u32)0;
204 	unsigned long inline_result;
205 
206 	len = buf->len - offset;
207 	while (len > 0) {
208 		err = map_private_extent_buffer(buf, offset, 32,
209 					&map_token, &kaddr,
210 					&map_start, &map_len, KM_USER0);
211 		if (err)
212 			return 1;
213 		cur_len = min(len, map_len - (offset - map_start));
214 		crc = btrfs_csum_data(root, kaddr + offset - map_start,
215 				      crc, cur_len);
216 		len -= cur_len;
217 		offset += cur_len;
218 		unmap_extent_buffer(buf, map_token, KM_USER0);
219 	}
220 	if (csum_size > sizeof(inline_result)) {
221 		result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
222 		if (!result)
223 			return 1;
224 	} else {
225 		result = (char *)&inline_result;
226 	}
227 
228 	btrfs_csum_final(crc, result);
229 
230 	if (verify) {
231 		if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
232 			u32 val;
233 			u32 found = 0;
234 			memcpy(&found, result, csum_size);
235 
236 			read_extent_buffer(buf, &val, 0, csum_size);
237 			if (printk_ratelimit()) {
238 				printk(KERN_INFO "btrfs: %s checksum verify "
239 				       "failed on %llu wanted %X found %X "
240 				       "level %d\n",
241 				       root->fs_info->sb->s_id,
242 				       (unsigned long long)buf->start, val, found,
243 				       btrfs_header_level(buf));
244 			}
245 			if (result != (char *)&inline_result)
246 				kfree(result);
247 			return 1;
248 		}
249 	} else {
250 		write_extent_buffer(buf, result, 0, csum_size);
251 	}
252 	if (result != (char *)&inline_result)
253 		kfree(result);
254 	return 0;
255 }
256 
257 /*
258  * we can't consider a given block up to date unless the transid of the
259  * block matches the transid in the parent node's pointer.  This is how we
260  * detect blocks that either didn't get written at all or got written
261  * in the wrong place.
262  */
263 static int verify_parent_transid(struct extent_io_tree *io_tree,
264 				 struct extent_buffer *eb, u64 parent_transid)
265 {
266 	int ret;
267 
268 	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
269 		return 0;
270 
271 	lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS);
272 	if (extent_buffer_uptodate(io_tree, eb) &&
273 	    btrfs_header_generation(eb) == parent_transid) {
274 		ret = 0;
275 		goto out;
276 	}
277 	if (printk_ratelimit()) {
278 		printk("parent transid verify failed on %llu wanted %llu "
279 		       "found %llu\n",
280 		       (unsigned long long)eb->start,
281 		       (unsigned long long)parent_transid,
282 		       (unsigned long long)btrfs_header_generation(eb));
283 	}
284 	ret = 1;
285 	clear_extent_buffer_uptodate(io_tree, eb);
286 out:
287 	unlock_extent(io_tree, eb->start, eb->start + eb->len - 1,
288 		      GFP_NOFS);
289 	return ret;
290 }
291 
292 /*
293  * helper to read a given tree block, doing retries as required when
294  * the checksums don't match and we have alternate mirrors to try.
295  */
296 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
297 					  struct extent_buffer *eb,
298 					  u64 start, u64 parent_transid)
299 {
300 	struct extent_io_tree *io_tree;
301 	int ret;
302 	int num_copies = 0;
303 	int mirror_num = 0;
304 
305 	io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
306 	while (1) {
307 		ret = read_extent_buffer_pages(io_tree, eb, start, 1,
308 					       btree_get_extent, mirror_num);
309 		if (!ret &&
310 		    !verify_parent_transid(io_tree, eb, parent_transid))
311 			return ret;
312 
313 		num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
314 					      eb->start, eb->len);
315 		if (num_copies == 1)
316 			return ret;
317 
318 		mirror_num++;
319 		if (mirror_num > num_copies)
320 			return ret;
321 	}
322 	return -EIO;
323 }
324 
325 /*
326  * checksum a dirty tree block before IO.  This has extra checks to make sure
327  * we only fill in the checksum field in the first page of a multi-page block
328  */
329 
330 static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
331 {
332 	struct extent_io_tree *tree;
333 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
334 	u64 found_start;
335 	int found_level;
336 	unsigned long len;
337 	struct extent_buffer *eb;
338 	int ret;
339 
340 	tree = &BTRFS_I(page->mapping->host)->io_tree;
341 
342 	if (page->private == EXTENT_PAGE_PRIVATE)
343 		goto out;
344 	if (!page->private)
345 		goto out;
346 	len = page->private >> 2;
347 	WARN_ON(len == 0);
348 
349 	eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
350 	ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
351 					     btrfs_header_generation(eb));
352 	BUG_ON(ret);
353 	found_start = btrfs_header_bytenr(eb);
354 	if (found_start != start) {
355 		WARN_ON(1);
356 		goto err;
357 	}
358 	if (eb->first_page != page) {
359 		WARN_ON(1);
360 		goto err;
361 	}
362 	if (!PageUptodate(page)) {
363 		WARN_ON(1);
364 		goto err;
365 	}
366 	found_level = btrfs_header_level(eb);
367 
368 	csum_tree_block(root, eb, 0);
369 err:
370 	free_extent_buffer(eb);
371 out:
372 	return 0;
373 }
374 
375 static int check_tree_block_fsid(struct btrfs_root *root,
376 				 struct extent_buffer *eb)
377 {
378 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
379 	u8 fsid[BTRFS_UUID_SIZE];
380 	int ret = 1;
381 
382 	read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
383 			   BTRFS_FSID_SIZE);
384 	while (fs_devices) {
385 		if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
386 			ret = 0;
387 			break;
388 		}
389 		fs_devices = fs_devices->seed;
390 	}
391 	return ret;
392 }
393 
394 #ifdef CONFIG_DEBUG_LOCK_ALLOC
395 void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level)
396 {
397 	lockdep_set_class_and_name(&eb->lock,
398 			   &btrfs_eb_class[level],
399 			   btrfs_eb_name[level]);
400 }
401 #endif
402 
403 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
404 			       struct extent_state *state)
405 {
406 	struct extent_io_tree *tree;
407 	u64 found_start;
408 	int found_level;
409 	unsigned long len;
410 	struct extent_buffer *eb;
411 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
412 	int ret = 0;
413 
414 	tree = &BTRFS_I(page->mapping->host)->io_tree;
415 	if (page->private == EXTENT_PAGE_PRIVATE)
416 		goto out;
417 	if (!page->private)
418 		goto out;
419 
420 	len = page->private >> 2;
421 	WARN_ON(len == 0);
422 
423 	eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
424 
425 	found_start = btrfs_header_bytenr(eb);
426 	if (found_start != start) {
427 		if (printk_ratelimit()) {
428 			printk(KERN_INFO "btrfs bad tree block start "
429 			       "%llu %llu\n",
430 			       (unsigned long long)found_start,
431 			       (unsigned long long)eb->start);
432 		}
433 		ret = -EIO;
434 		goto err;
435 	}
436 	if (eb->first_page != page) {
437 		printk(KERN_INFO "btrfs bad first page %lu %lu\n",
438 		       eb->first_page->index, page->index);
439 		WARN_ON(1);
440 		ret = -EIO;
441 		goto err;
442 	}
443 	if (check_tree_block_fsid(root, eb)) {
444 		if (printk_ratelimit()) {
445 			printk(KERN_INFO "btrfs bad fsid on block %llu\n",
446 			       (unsigned long long)eb->start);
447 		}
448 		ret = -EIO;
449 		goto err;
450 	}
451 	found_level = btrfs_header_level(eb);
452 
453 	btrfs_set_buffer_lockdep_class(eb, found_level);
454 
455 	ret = csum_tree_block(root, eb, 1);
456 	if (ret)
457 		ret = -EIO;
458 
459 	end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
460 	end = eb->start + end - 1;
461 err:
462 	free_extent_buffer(eb);
463 out:
464 	return ret;
465 }
466 
467 static void end_workqueue_bio(struct bio *bio, int err)
468 {
469 	struct end_io_wq *end_io_wq = bio->bi_private;
470 	struct btrfs_fs_info *fs_info;
471 
472 	fs_info = end_io_wq->info;
473 	end_io_wq->error = err;
474 	end_io_wq->work.func = end_workqueue_fn;
475 	end_io_wq->work.flags = 0;
476 
477 	if (bio->bi_rw & (1 << BIO_RW)) {
478 		if (end_io_wq->metadata)
479 			btrfs_queue_worker(&fs_info->endio_meta_write_workers,
480 					   &end_io_wq->work);
481 		else
482 			btrfs_queue_worker(&fs_info->endio_write_workers,
483 					   &end_io_wq->work);
484 	} else {
485 		if (end_io_wq->metadata)
486 			btrfs_queue_worker(&fs_info->endio_meta_workers,
487 					   &end_io_wq->work);
488 		else
489 			btrfs_queue_worker(&fs_info->endio_workers,
490 					   &end_io_wq->work);
491 	}
492 }
493 
494 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
495 			int metadata)
496 {
497 	struct end_io_wq *end_io_wq;
498 	end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
499 	if (!end_io_wq)
500 		return -ENOMEM;
501 
502 	end_io_wq->private = bio->bi_private;
503 	end_io_wq->end_io = bio->bi_end_io;
504 	end_io_wq->info = info;
505 	end_io_wq->error = 0;
506 	end_io_wq->bio = bio;
507 	end_io_wq->metadata = metadata;
508 
509 	bio->bi_private = end_io_wq;
510 	bio->bi_end_io = end_workqueue_bio;
511 	return 0;
512 }
513 
514 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
515 {
516 	unsigned long limit = min_t(unsigned long,
517 				    info->workers.max_workers,
518 				    info->fs_devices->open_devices);
519 	return 256 * limit;
520 }
521 
522 int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
523 {
524 	return atomic_read(&info->nr_async_bios) >
525 		btrfs_async_submit_limit(info);
526 }
527 
528 static void run_one_async_start(struct btrfs_work *work)
529 {
530 	struct btrfs_fs_info *fs_info;
531 	struct async_submit_bio *async;
532 
533 	async = container_of(work, struct  async_submit_bio, work);
534 	fs_info = BTRFS_I(async->inode)->root->fs_info;
535 	async->submit_bio_start(async->inode, async->rw, async->bio,
536 			       async->mirror_num, async->bio_flags);
537 }
538 
539 static void run_one_async_done(struct btrfs_work *work)
540 {
541 	struct btrfs_fs_info *fs_info;
542 	struct async_submit_bio *async;
543 	int limit;
544 
545 	async = container_of(work, struct  async_submit_bio, work);
546 	fs_info = BTRFS_I(async->inode)->root->fs_info;
547 
548 	limit = btrfs_async_submit_limit(fs_info);
549 	limit = limit * 2 / 3;
550 
551 	atomic_dec(&fs_info->nr_async_submits);
552 
553 	if (atomic_read(&fs_info->nr_async_submits) < limit &&
554 	    waitqueue_active(&fs_info->async_submit_wait))
555 		wake_up(&fs_info->async_submit_wait);
556 
557 	async->submit_bio_done(async->inode, async->rw, async->bio,
558 			       async->mirror_num, async->bio_flags);
559 }
560 
561 static void run_one_async_free(struct btrfs_work *work)
562 {
563 	struct async_submit_bio *async;
564 
565 	async = container_of(work, struct  async_submit_bio, work);
566 	kfree(async);
567 }
568 
569 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
570 			int rw, struct bio *bio, int mirror_num,
571 			unsigned long bio_flags,
572 			extent_submit_bio_hook_t *submit_bio_start,
573 			extent_submit_bio_hook_t *submit_bio_done)
574 {
575 	struct async_submit_bio *async;
576 
577 	async = kmalloc(sizeof(*async), GFP_NOFS);
578 	if (!async)
579 		return -ENOMEM;
580 
581 	async->inode = inode;
582 	async->rw = rw;
583 	async->bio = bio;
584 	async->mirror_num = mirror_num;
585 	async->submit_bio_start = submit_bio_start;
586 	async->submit_bio_done = submit_bio_done;
587 
588 	async->work.func = run_one_async_start;
589 	async->work.ordered_func = run_one_async_done;
590 	async->work.ordered_free = run_one_async_free;
591 
592 	async->work.flags = 0;
593 	async->bio_flags = bio_flags;
594 
595 	atomic_inc(&fs_info->nr_async_submits);
596 
597 	if (rw & (1 << BIO_RW_SYNCIO))
598 		btrfs_set_work_high_prio(&async->work);
599 
600 	btrfs_queue_worker(&fs_info->workers, &async->work);
601 
602 	while (atomic_read(&fs_info->async_submit_draining) &&
603 	      atomic_read(&fs_info->nr_async_submits)) {
604 		wait_event(fs_info->async_submit_wait,
605 			   (atomic_read(&fs_info->nr_async_submits) == 0));
606 	}
607 
608 	return 0;
609 }
610 
611 static int btree_csum_one_bio(struct bio *bio)
612 {
613 	struct bio_vec *bvec = bio->bi_io_vec;
614 	int bio_index = 0;
615 	struct btrfs_root *root;
616 
617 	WARN_ON(bio->bi_vcnt <= 0);
618 	while (bio_index < bio->bi_vcnt) {
619 		root = BTRFS_I(bvec->bv_page->mapping->host)->root;
620 		csum_dirty_buffer(root, bvec->bv_page);
621 		bio_index++;
622 		bvec++;
623 	}
624 	return 0;
625 }
626 
627 static int __btree_submit_bio_start(struct inode *inode, int rw,
628 				    struct bio *bio, int mirror_num,
629 				    unsigned long bio_flags)
630 {
631 	/*
632 	 * when we're called for a write, we're already in the async
633 	 * submission context.  Just jump into btrfs_map_bio
634 	 */
635 	btree_csum_one_bio(bio);
636 	return 0;
637 }
638 
639 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
640 				 int mirror_num, unsigned long bio_flags)
641 {
642 	/*
643 	 * when we're called for a write, we're already in the async
644 	 * submission context.  Just jump into btrfs_map_bio
645 	 */
646 	return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
647 }
648 
649 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
650 				 int mirror_num, unsigned long bio_flags)
651 {
652 	int ret;
653 
654 	ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
655 					  bio, 1);
656 	BUG_ON(ret);
657 
658 	if (!(rw & (1 << BIO_RW))) {
659 		/*
660 		 * called for a read, do the setup so that checksum validation
661 		 * can happen in the async kernel threads
662 		 */
663 		return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
664 				     mirror_num, 0);
665 	}
666 
667 	/*
668 	 * kthread helpers are used to submit writes so that checksumming
669 	 * can happen in parallel across all CPUs
670 	 */
671 	return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
672 				   inode, rw, bio, mirror_num, 0,
673 				   __btree_submit_bio_start,
674 				   __btree_submit_bio_done);
675 }
676 
677 static int btree_writepage(struct page *page, struct writeback_control *wbc)
678 {
679 	struct extent_io_tree *tree;
680 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
681 	struct extent_buffer *eb;
682 	int was_dirty;
683 
684 	tree = &BTRFS_I(page->mapping->host)->io_tree;
685 	if (!(current->flags & PF_MEMALLOC)) {
686 		return extent_write_full_page(tree, page,
687 					      btree_get_extent, wbc);
688 	}
689 
690 	redirty_page_for_writepage(wbc, page);
691 	eb = btrfs_find_tree_block(root, page_offset(page),
692 				      PAGE_CACHE_SIZE);
693 	WARN_ON(!eb);
694 
695 	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
696 	if (!was_dirty) {
697 		spin_lock(&root->fs_info->delalloc_lock);
698 		root->fs_info->dirty_metadata_bytes += PAGE_CACHE_SIZE;
699 		spin_unlock(&root->fs_info->delalloc_lock);
700 	}
701 	free_extent_buffer(eb);
702 
703 	unlock_page(page);
704 	return 0;
705 }
706 
707 static int btree_writepages(struct address_space *mapping,
708 			    struct writeback_control *wbc)
709 {
710 	struct extent_io_tree *tree;
711 	tree = &BTRFS_I(mapping->host)->io_tree;
712 	if (wbc->sync_mode == WB_SYNC_NONE) {
713 		struct btrfs_root *root = BTRFS_I(mapping->host)->root;
714 		u64 num_dirty;
715 		unsigned long thresh = 32 * 1024 * 1024;
716 
717 		if (wbc->for_kupdate)
718 			return 0;
719 
720 		/* this is a bit racy, but that's ok */
721 		num_dirty = root->fs_info->dirty_metadata_bytes;
722 		if (num_dirty < thresh)
723 			return 0;
724 	}
725 	return extent_writepages(tree, mapping, btree_get_extent, wbc);
726 }
727 
728 static int btree_readpage(struct file *file, struct page *page)
729 {
730 	struct extent_io_tree *tree;
731 	tree = &BTRFS_I(page->mapping->host)->io_tree;
732 	return extent_read_full_page(tree, page, btree_get_extent);
733 }
734 
735 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
736 {
737 	struct extent_io_tree *tree;
738 	struct extent_map_tree *map;
739 	int ret;
740 
741 	if (PageWriteback(page) || PageDirty(page))
742 		return 0;
743 
744 	tree = &BTRFS_I(page->mapping->host)->io_tree;
745 	map = &BTRFS_I(page->mapping->host)->extent_tree;
746 
747 	ret = try_release_extent_state(map, tree, page, gfp_flags);
748 	if (!ret)
749 		return 0;
750 
751 	ret = try_release_extent_buffer(tree, page);
752 	if (ret == 1) {
753 		ClearPagePrivate(page);
754 		set_page_private(page, 0);
755 		page_cache_release(page);
756 	}
757 
758 	return ret;
759 }
760 
761 static void btree_invalidatepage(struct page *page, unsigned long offset)
762 {
763 	struct extent_io_tree *tree;
764 	tree = &BTRFS_I(page->mapping->host)->io_tree;
765 	extent_invalidatepage(tree, page, offset);
766 	btree_releasepage(page, GFP_NOFS);
767 	if (PagePrivate(page)) {
768 		printk(KERN_WARNING "btrfs warning page private not zero "
769 		       "on page %llu\n", (unsigned long long)page_offset(page));
770 		ClearPagePrivate(page);
771 		set_page_private(page, 0);
772 		page_cache_release(page);
773 	}
774 }
775 
776 static const struct address_space_operations btree_aops = {
777 	.readpage	= btree_readpage,
778 	.writepage	= btree_writepage,
779 	.writepages	= btree_writepages,
780 	.releasepage	= btree_releasepage,
781 	.invalidatepage = btree_invalidatepage,
782 	.sync_page	= block_sync_page,
783 };
784 
785 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
786 			 u64 parent_transid)
787 {
788 	struct extent_buffer *buf = NULL;
789 	struct inode *btree_inode = root->fs_info->btree_inode;
790 	int ret = 0;
791 
792 	buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
793 	if (!buf)
794 		return 0;
795 	read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
796 				 buf, 0, 0, btree_get_extent, 0);
797 	free_extent_buffer(buf);
798 	return ret;
799 }
800 
801 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
802 					    u64 bytenr, u32 blocksize)
803 {
804 	struct inode *btree_inode = root->fs_info->btree_inode;
805 	struct extent_buffer *eb;
806 	eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
807 				bytenr, blocksize, GFP_NOFS);
808 	return eb;
809 }
810 
811 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
812 						 u64 bytenr, u32 blocksize)
813 {
814 	struct inode *btree_inode = root->fs_info->btree_inode;
815 	struct extent_buffer *eb;
816 
817 	eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
818 				 bytenr, blocksize, NULL, GFP_NOFS);
819 	return eb;
820 }
821 
822 
823 int btrfs_write_tree_block(struct extent_buffer *buf)
824 {
825 	return filemap_fdatawrite_range(buf->first_page->mapping, buf->start,
826 					buf->start + buf->len - 1);
827 }
828 
829 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
830 {
831 	return filemap_fdatawait_range(buf->first_page->mapping,
832 				       buf->start, buf->start + buf->len - 1);
833 }
834 
835 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
836 				      u32 blocksize, u64 parent_transid)
837 {
838 	struct extent_buffer *buf = NULL;
839 	struct inode *btree_inode = root->fs_info->btree_inode;
840 	struct extent_io_tree *io_tree;
841 	int ret;
842 
843 	io_tree = &BTRFS_I(btree_inode)->io_tree;
844 
845 	buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
846 	if (!buf)
847 		return NULL;
848 
849 	ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
850 
851 	if (ret == 0)
852 		set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
853 	return buf;
854 
855 }
856 
857 int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
858 		     struct extent_buffer *buf)
859 {
860 	struct inode *btree_inode = root->fs_info->btree_inode;
861 	if (btrfs_header_generation(buf) ==
862 	    root->fs_info->running_transaction->transid) {
863 		btrfs_assert_tree_locked(buf);
864 
865 		if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
866 			spin_lock(&root->fs_info->delalloc_lock);
867 			if (root->fs_info->dirty_metadata_bytes >= buf->len)
868 				root->fs_info->dirty_metadata_bytes -= buf->len;
869 			else
870 				WARN_ON(1);
871 			spin_unlock(&root->fs_info->delalloc_lock);
872 		}
873 
874 		/* ugh, clear_extent_buffer_dirty needs to lock the page */
875 		btrfs_set_lock_blocking(buf);
876 		clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
877 					  buf);
878 	}
879 	return 0;
880 }
881 
882 static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
883 			u32 stripesize, struct btrfs_root *root,
884 			struct btrfs_fs_info *fs_info,
885 			u64 objectid)
886 {
887 	root->node = NULL;
888 	root->commit_root = NULL;
889 	root->sectorsize = sectorsize;
890 	root->nodesize = nodesize;
891 	root->leafsize = leafsize;
892 	root->stripesize = stripesize;
893 	root->ref_cows = 0;
894 	root->track_dirty = 0;
895 
896 	root->fs_info = fs_info;
897 	root->objectid = objectid;
898 	root->last_trans = 0;
899 	root->highest_objectid = 0;
900 	root->name = NULL;
901 	root->in_sysfs = 0;
902 	root->inode_tree.rb_node = NULL;
903 
904 	INIT_LIST_HEAD(&root->dirty_list);
905 	INIT_LIST_HEAD(&root->orphan_list);
906 	INIT_LIST_HEAD(&root->root_list);
907 	spin_lock_init(&root->node_lock);
908 	spin_lock_init(&root->list_lock);
909 	spin_lock_init(&root->inode_lock);
910 	mutex_init(&root->objectid_mutex);
911 	mutex_init(&root->log_mutex);
912 	init_waitqueue_head(&root->log_writer_wait);
913 	init_waitqueue_head(&root->log_commit_wait[0]);
914 	init_waitqueue_head(&root->log_commit_wait[1]);
915 	atomic_set(&root->log_commit[0], 0);
916 	atomic_set(&root->log_commit[1], 0);
917 	atomic_set(&root->log_writers, 0);
918 	root->log_batch = 0;
919 	root->log_transid = 0;
920 	extent_io_tree_init(&root->dirty_log_pages,
921 			     fs_info->btree_inode->i_mapping, GFP_NOFS);
922 
923 	memset(&root->root_key, 0, sizeof(root->root_key));
924 	memset(&root->root_item, 0, sizeof(root->root_item));
925 	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
926 	memset(&root->root_kobj, 0, sizeof(root->root_kobj));
927 	root->defrag_trans_start = fs_info->generation;
928 	init_completion(&root->kobj_unregister);
929 	root->defrag_running = 0;
930 	root->defrag_level = 0;
931 	root->root_key.objectid = objectid;
932 	root->anon_super.s_root = NULL;
933 	root->anon_super.s_dev = 0;
934 	INIT_LIST_HEAD(&root->anon_super.s_list);
935 	INIT_LIST_HEAD(&root->anon_super.s_instances);
936 	init_rwsem(&root->anon_super.s_umount);
937 
938 	return 0;
939 }
940 
941 static int find_and_setup_root(struct btrfs_root *tree_root,
942 			       struct btrfs_fs_info *fs_info,
943 			       u64 objectid,
944 			       struct btrfs_root *root)
945 {
946 	int ret;
947 	u32 blocksize;
948 	u64 generation;
949 
950 	__setup_root(tree_root->nodesize, tree_root->leafsize,
951 		     tree_root->sectorsize, tree_root->stripesize,
952 		     root, fs_info, objectid);
953 	ret = btrfs_find_last_root(tree_root, objectid,
954 				   &root->root_item, &root->root_key);
955 	if (ret > 0)
956 		return -ENOENT;
957 	BUG_ON(ret);
958 
959 	generation = btrfs_root_generation(&root->root_item);
960 	blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
961 	root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
962 				     blocksize, generation);
963 	BUG_ON(!root->node);
964 	root->commit_root = btrfs_root_node(root);
965 	return 0;
966 }
967 
968 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
969 			     struct btrfs_fs_info *fs_info)
970 {
971 	struct extent_buffer *eb;
972 	struct btrfs_root *log_root_tree = fs_info->log_root_tree;
973 	u64 start = 0;
974 	u64 end = 0;
975 	int ret;
976 
977 	if (!log_root_tree)
978 		return 0;
979 
980 	while (1) {
981 		ret = find_first_extent_bit(&log_root_tree->dirty_log_pages,
982 				    0, &start, &end, EXTENT_DIRTY);
983 		if (ret)
984 			break;
985 
986 		clear_extent_dirty(&log_root_tree->dirty_log_pages,
987 				   start, end, GFP_NOFS);
988 	}
989 	eb = fs_info->log_root_tree->node;
990 
991 	WARN_ON(btrfs_header_level(eb) != 0);
992 	WARN_ON(btrfs_header_nritems(eb) != 0);
993 
994 	ret = btrfs_free_reserved_extent(fs_info->tree_root,
995 				eb->start, eb->len);
996 	BUG_ON(ret);
997 
998 	free_extent_buffer(eb);
999 	kfree(fs_info->log_root_tree);
1000 	fs_info->log_root_tree = NULL;
1001 	return 0;
1002 }
1003 
1004 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1005 					 struct btrfs_fs_info *fs_info)
1006 {
1007 	struct btrfs_root *root;
1008 	struct btrfs_root *tree_root = fs_info->tree_root;
1009 	struct extent_buffer *leaf;
1010 
1011 	root = kzalloc(sizeof(*root), GFP_NOFS);
1012 	if (!root)
1013 		return ERR_PTR(-ENOMEM);
1014 
1015 	__setup_root(tree_root->nodesize, tree_root->leafsize,
1016 		     tree_root->sectorsize, tree_root->stripesize,
1017 		     root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1018 
1019 	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1020 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1021 	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1022 	/*
1023 	 * log trees do not get reference counted because they go away
1024 	 * before a real commit is actually done.  They do store pointers
1025 	 * to file data extents, and those reference counts still get
1026 	 * updated (along with back refs to the log tree).
1027 	 */
1028 	root->ref_cows = 0;
1029 
1030 	leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
1031 				      BTRFS_TREE_LOG_OBJECTID, NULL, 0, 0, 0);
1032 	if (IS_ERR(leaf)) {
1033 		kfree(root);
1034 		return ERR_CAST(leaf);
1035 	}
1036 
1037 	memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1038 	btrfs_set_header_bytenr(leaf, leaf->start);
1039 	btrfs_set_header_generation(leaf, trans->transid);
1040 	btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1041 	btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1042 	root->node = leaf;
1043 
1044 	write_extent_buffer(root->node, root->fs_info->fsid,
1045 			    (unsigned long)btrfs_header_fsid(root->node),
1046 			    BTRFS_FSID_SIZE);
1047 	btrfs_mark_buffer_dirty(root->node);
1048 	btrfs_tree_unlock(root->node);
1049 	return root;
1050 }
1051 
1052 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1053 			     struct btrfs_fs_info *fs_info)
1054 {
1055 	struct btrfs_root *log_root;
1056 
1057 	log_root = alloc_log_tree(trans, fs_info);
1058 	if (IS_ERR(log_root))
1059 		return PTR_ERR(log_root);
1060 	WARN_ON(fs_info->log_root_tree);
1061 	fs_info->log_root_tree = log_root;
1062 	return 0;
1063 }
1064 
1065 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1066 		       struct btrfs_root *root)
1067 {
1068 	struct btrfs_root *log_root;
1069 	struct btrfs_inode_item *inode_item;
1070 
1071 	log_root = alloc_log_tree(trans, root->fs_info);
1072 	if (IS_ERR(log_root))
1073 		return PTR_ERR(log_root);
1074 
1075 	log_root->last_trans = trans->transid;
1076 	log_root->root_key.offset = root->root_key.objectid;
1077 
1078 	inode_item = &log_root->root_item.inode;
1079 	inode_item->generation = cpu_to_le64(1);
1080 	inode_item->size = cpu_to_le64(3);
1081 	inode_item->nlink = cpu_to_le32(1);
1082 	inode_item->nbytes = cpu_to_le64(root->leafsize);
1083 	inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1084 
1085 	btrfs_set_root_node(&log_root->root_item, log_root->node);
1086 
1087 	WARN_ON(root->log_root);
1088 	root->log_root = log_root;
1089 	root->log_transid = 0;
1090 	return 0;
1091 }
1092 
1093 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1094 					       struct btrfs_key *location)
1095 {
1096 	struct btrfs_root *root;
1097 	struct btrfs_fs_info *fs_info = tree_root->fs_info;
1098 	struct btrfs_path *path;
1099 	struct extent_buffer *l;
1100 	u64 generation;
1101 	u32 blocksize;
1102 	int ret = 0;
1103 
1104 	root = kzalloc(sizeof(*root), GFP_NOFS);
1105 	if (!root)
1106 		return ERR_PTR(-ENOMEM);
1107 	if (location->offset == (u64)-1) {
1108 		ret = find_and_setup_root(tree_root, fs_info,
1109 					  location->objectid, root);
1110 		if (ret) {
1111 			kfree(root);
1112 			return ERR_PTR(ret);
1113 		}
1114 		goto out;
1115 	}
1116 
1117 	__setup_root(tree_root->nodesize, tree_root->leafsize,
1118 		     tree_root->sectorsize, tree_root->stripesize,
1119 		     root, fs_info, location->objectid);
1120 
1121 	path = btrfs_alloc_path();
1122 	BUG_ON(!path);
1123 	ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
1124 	if (ret == 0) {
1125 		l = path->nodes[0];
1126 		read_extent_buffer(l, &root->root_item,
1127 				btrfs_item_ptr_offset(l, path->slots[0]),
1128 				sizeof(root->root_item));
1129 		memcpy(&root->root_key, location, sizeof(*location));
1130 	}
1131 	btrfs_free_path(path);
1132 	if (ret) {
1133 		if (ret > 0)
1134 			ret = -ENOENT;
1135 		return ERR_PTR(ret);
1136 	}
1137 
1138 	generation = btrfs_root_generation(&root->root_item);
1139 	blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1140 	root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1141 				     blocksize, generation);
1142 	root->commit_root = btrfs_root_node(root);
1143 	BUG_ON(!root->node);
1144 out:
1145 	if (location->objectid != BTRFS_TREE_LOG_OBJECTID)
1146 		root->ref_cows = 1;
1147 
1148 	return root;
1149 }
1150 
1151 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1152 					u64 root_objectid)
1153 {
1154 	struct btrfs_root *root;
1155 
1156 	if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
1157 		return fs_info->tree_root;
1158 	if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
1159 		return fs_info->extent_root;
1160 
1161 	root = radix_tree_lookup(&fs_info->fs_roots_radix,
1162 				 (unsigned long)root_objectid);
1163 	return root;
1164 }
1165 
1166 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1167 					      struct btrfs_key *location)
1168 {
1169 	struct btrfs_root *root;
1170 	int ret;
1171 
1172 	if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1173 		return fs_info->tree_root;
1174 	if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1175 		return fs_info->extent_root;
1176 	if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1177 		return fs_info->chunk_root;
1178 	if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1179 		return fs_info->dev_root;
1180 	if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1181 		return fs_info->csum_root;
1182 again:
1183 	spin_lock(&fs_info->fs_roots_radix_lock);
1184 	root = radix_tree_lookup(&fs_info->fs_roots_radix,
1185 				 (unsigned long)location->objectid);
1186 	spin_unlock(&fs_info->fs_roots_radix_lock);
1187 	if (root)
1188 		return root;
1189 
1190 	ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1191 	if (ret == 0)
1192 		ret = -ENOENT;
1193 	if (ret < 0)
1194 		return ERR_PTR(ret);
1195 
1196 	root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1197 	if (IS_ERR(root))
1198 		return root;
1199 
1200 	WARN_ON(btrfs_root_refs(&root->root_item) == 0);
1201 	set_anon_super(&root->anon_super, NULL);
1202 
1203 	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1204 	if (ret)
1205 		goto fail;
1206 
1207 	spin_lock(&fs_info->fs_roots_radix_lock);
1208 	ret = radix_tree_insert(&fs_info->fs_roots_radix,
1209 				(unsigned long)root->root_key.objectid,
1210 				root);
1211 	if (ret == 0)
1212 		root->in_radix = 1;
1213 	spin_unlock(&fs_info->fs_roots_radix_lock);
1214 	radix_tree_preload_end();
1215 	if (ret) {
1216 		if (ret == -EEXIST) {
1217 			free_fs_root(root);
1218 			goto again;
1219 		}
1220 		goto fail;
1221 	}
1222 
1223 	ret = btrfs_find_dead_roots(fs_info->tree_root,
1224 				    root->root_key.objectid);
1225 	WARN_ON(ret);
1226 
1227 	if (!(fs_info->sb->s_flags & MS_RDONLY))
1228 		btrfs_orphan_cleanup(root);
1229 
1230 	return root;
1231 fail:
1232 	free_fs_root(root);
1233 	return ERR_PTR(ret);
1234 }
1235 
1236 struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
1237 				      struct btrfs_key *location,
1238 				      const char *name, int namelen)
1239 {
1240 	return btrfs_read_fs_root_no_name(fs_info, location);
1241 #if 0
1242 	struct btrfs_root *root;
1243 	int ret;
1244 
1245 	root = btrfs_read_fs_root_no_name(fs_info, location);
1246 	if (!root)
1247 		return NULL;
1248 
1249 	if (root->in_sysfs)
1250 		return root;
1251 
1252 	ret = btrfs_set_root_name(root, name, namelen);
1253 	if (ret) {
1254 		free_extent_buffer(root->node);
1255 		kfree(root);
1256 		return ERR_PTR(ret);
1257 	}
1258 
1259 	ret = btrfs_sysfs_add_root(root);
1260 	if (ret) {
1261 		free_extent_buffer(root->node);
1262 		kfree(root->name);
1263 		kfree(root);
1264 		return ERR_PTR(ret);
1265 	}
1266 	root->in_sysfs = 1;
1267 	return root;
1268 #endif
1269 }
1270 
1271 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1272 {
1273 	struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1274 	int ret = 0;
1275 	struct btrfs_device *device;
1276 	struct backing_dev_info *bdi;
1277 
1278 	list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
1279 		if (!device->bdev)
1280 			continue;
1281 		bdi = blk_get_backing_dev_info(device->bdev);
1282 		if (bdi && bdi_congested(bdi, bdi_bits)) {
1283 			ret = 1;
1284 			break;
1285 		}
1286 	}
1287 	return ret;
1288 }
1289 
1290 /*
1291  * this unplugs every device on the box, and it is only used when page
1292  * is null
1293  */
1294 static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1295 {
1296 	struct btrfs_device *device;
1297 	struct btrfs_fs_info *info;
1298 
1299 	info = (struct btrfs_fs_info *)bdi->unplug_io_data;
1300 	list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
1301 		if (!device->bdev)
1302 			continue;
1303 
1304 		bdi = blk_get_backing_dev_info(device->bdev);
1305 		if (bdi->unplug_io_fn)
1306 			bdi->unplug_io_fn(bdi, page);
1307 	}
1308 }
1309 
1310 static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1311 {
1312 	struct inode *inode;
1313 	struct extent_map_tree *em_tree;
1314 	struct extent_map *em;
1315 	struct address_space *mapping;
1316 	u64 offset;
1317 
1318 	/* the generic O_DIRECT read code does this */
1319 	if (1 || !page) {
1320 		__unplug_io_fn(bdi, page);
1321 		return;
1322 	}
1323 
1324 	/*
1325 	 * page->mapping may change at any time.  Get a consistent copy
1326 	 * and use that for everything below
1327 	 */
1328 	smp_mb();
1329 	mapping = page->mapping;
1330 	if (!mapping)
1331 		return;
1332 
1333 	inode = mapping->host;
1334 
1335 	/*
1336 	 * don't do the expensive searching for a small number of
1337 	 * devices
1338 	 */
1339 	if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) {
1340 		__unplug_io_fn(bdi, page);
1341 		return;
1342 	}
1343 
1344 	offset = page_offset(page);
1345 
1346 	em_tree = &BTRFS_I(inode)->extent_tree;
1347 	read_lock(&em_tree->lock);
1348 	em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
1349 	read_unlock(&em_tree->lock);
1350 	if (!em) {
1351 		__unplug_io_fn(bdi, page);
1352 		return;
1353 	}
1354 
1355 	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1356 		free_extent_map(em);
1357 		__unplug_io_fn(bdi, page);
1358 		return;
1359 	}
1360 	offset = offset - em->start;
1361 	btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
1362 			  em->block_start + offset, page);
1363 	free_extent_map(em);
1364 }
1365 
1366 /*
1367  * If this fails, caller must call bdi_destroy() to get rid of the
1368  * bdi again.
1369  */
1370 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1371 {
1372 	int err;
1373 
1374 	bdi->name = "btrfs";
1375 	bdi->capabilities = BDI_CAP_MAP_COPY;
1376 	err = bdi_init(bdi);
1377 	if (err)
1378 		return err;
1379 
1380 	err = bdi_register(bdi, NULL, "btrfs-%d",
1381 				atomic_inc_return(&btrfs_bdi_num));
1382 	if (err) {
1383 		bdi_destroy(bdi);
1384 		return err;
1385 	}
1386 
1387 	bdi->ra_pages	= default_backing_dev_info.ra_pages;
1388 	bdi->unplug_io_fn	= btrfs_unplug_io_fn;
1389 	bdi->unplug_io_data	= info;
1390 	bdi->congested_fn	= btrfs_congested_fn;
1391 	bdi->congested_data	= info;
1392 	return 0;
1393 }
1394 
1395 static int bio_ready_for_csum(struct bio *bio)
1396 {
1397 	u64 length = 0;
1398 	u64 buf_len = 0;
1399 	u64 start = 0;
1400 	struct page *page;
1401 	struct extent_io_tree *io_tree = NULL;
1402 	struct btrfs_fs_info *info = NULL;
1403 	struct bio_vec *bvec;
1404 	int i;
1405 	int ret;
1406 
1407 	bio_for_each_segment(bvec, bio, i) {
1408 		page = bvec->bv_page;
1409 		if (page->private == EXTENT_PAGE_PRIVATE) {
1410 			length += bvec->bv_len;
1411 			continue;
1412 		}
1413 		if (!page->private) {
1414 			length += bvec->bv_len;
1415 			continue;
1416 		}
1417 		length = bvec->bv_len;
1418 		buf_len = page->private >> 2;
1419 		start = page_offset(page) + bvec->bv_offset;
1420 		io_tree = &BTRFS_I(page->mapping->host)->io_tree;
1421 		info = BTRFS_I(page->mapping->host)->root->fs_info;
1422 	}
1423 	/* are we fully contained in this bio? */
1424 	if (buf_len <= length)
1425 		return 1;
1426 
1427 	ret = extent_range_uptodate(io_tree, start + length,
1428 				    start + buf_len - 1);
1429 	return ret;
1430 }
1431 
1432 /*
1433  * called by the kthread helper functions to finally call the bio end_io
1434  * functions.  This is where read checksum verification actually happens
1435  */
1436 static void end_workqueue_fn(struct btrfs_work *work)
1437 {
1438 	struct bio *bio;
1439 	struct end_io_wq *end_io_wq;
1440 	struct btrfs_fs_info *fs_info;
1441 	int error;
1442 
1443 	end_io_wq = container_of(work, struct end_io_wq, work);
1444 	bio = end_io_wq->bio;
1445 	fs_info = end_io_wq->info;
1446 
1447 	/* metadata bio reads are special because the whole tree block must
1448 	 * be checksummed at once.  This makes sure the entire block is in
1449 	 * ram and up to date before trying to verify things.  For
1450 	 * blocksize <= pagesize, it is basically a noop
1451 	 */
1452 	if (!(bio->bi_rw & (1 << BIO_RW)) && end_io_wq->metadata &&
1453 	    !bio_ready_for_csum(bio)) {
1454 		btrfs_queue_worker(&fs_info->endio_meta_workers,
1455 				   &end_io_wq->work);
1456 		return;
1457 	}
1458 	error = end_io_wq->error;
1459 	bio->bi_private = end_io_wq->private;
1460 	bio->bi_end_io = end_io_wq->end_io;
1461 	kfree(end_io_wq);
1462 	bio_endio(bio, error);
1463 }
1464 
1465 static int cleaner_kthread(void *arg)
1466 {
1467 	struct btrfs_root *root = arg;
1468 
1469 	do {
1470 		smp_mb();
1471 		if (root->fs_info->closing)
1472 			break;
1473 
1474 		vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1475 
1476 		if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
1477 		    mutex_trylock(&root->fs_info->cleaner_mutex)) {
1478 			btrfs_clean_old_snapshots(root);
1479 			mutex_unlock(&root->fs_info->cleaner_mutex);
1480 		}
1481 
1482 		if (freezing(current)) {
1483 			refrigerator();
1484 		} else {
1485 			smp_mb();
1486 			if (root->fs_info->closing)
1487 				break;
1488 			set_current_state(TASK_INTERRUPTIBLE);
1489 			schedule();
1490 			__set_current_state(TASK_RUNNING);
1491 		}
1492 	} while (!kthread_should_stop());
1493 	return 0;
1494 }
1495 
1496 static int transaction_kthread(void *arg)
1497 {
1498 	struct btrfs_root *root = arg;
1499 	struct btrfs_trans_handle *trans;
1500 	struct btrfs_transaction *cur;
1501 	unsigned long now;
1502 	unsigned long delay;
1503 	int ret;
1504 
1505 	do {
1506 		smp_mb();
1507 		if (root->fs_info->closing)
1508 			break;
1509 
1510 		delay = HZ * 30;
1511 		vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1512 		mutex_lock(&root->fs_info->transaction_kthread_mutex);
1513 
1514 		mutex_lock(&root->fs_info->trans_mutex);
1515 		cur = root->fs_info->running_transaction;
1516 		if (!cur) {
1517 			mutex_unlock(&root->fs_info->trans_mutex);
1518 			goto sleep;
1519 		}
1520 
1521 		now = get_seconds();
1522 		if (now < cur->start_time || now - cur->start_time < 30) {
1523 			mutex_unlock(&root->fs_info->trans_mutex);
1524 			delay = HZ * 5;
1525 			goto sleep;
1526 		}
1527 		mutex_unlock(&root->fs_info->trans_mutex);
1528 		trans = btrfs_start_transaction(root, 1);
1529 		ret = btrfs_commit_transaction(trans, root);
1530 
1531 sleep:
1532 		wake_up_process(root->fs_info->cleaner_kthread);
1533 		mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1534 
1535 		if (freezing(current)) {
1536 			refrigerator();
1537 		} else {
1538 			if (root->fs_info->closing)
1539 				break;
1540 			set_current_state(TASK_INTERRUPTIBLE);
1541 			schedule_timeout(delay);
1542 			__set_current_state(TASK_RUNNING);
1543 		}
1544 	} while (!kthread_should_stop());
1545 	return 0;
1546 }
1547 
1548 struct btrfs_root *open_ctree(struct super_block *sb,
1549 			      struct btrfs_fs_devices *fs_devices,
1550 			      char *options)
1551 {
1552 	u32 sectorsize;
1553 	u32 nodesize;
1554 	u32 leafsize;
1555 	u32 blocksize;
1556 	u32 stripesize;
1557 	u64 generation;
1558 	u64 features;
1559 	struct btrfs_key location;
1560 	struct buffer_head *bh;
1561 	struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root),
1562 						 GFP_NOFS);
1563 	struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
1564 						 GFP_NOFS);
1565 	struct btrfs_root *tree_root = kzalloc(sizeof(struct btrfs_root),
1566 					       GFP_NOFS);
1567 	struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
1568 						GFP_NOFS);
1569 	struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
1570 						GFP_NOFS);
1571 	struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
1572 					      GFP_NOFS);
1573 	struct btrfs_root *log_tree_root;
1574 
1575 	int ret;
1576 	int err = -EINVAL;
1577 
1578 	struct btrfs_super_block *disk_super;
1579 
1580 	if (!extent_root || !tree_root || !fs_info ||
1581 	    !chunk_root || !dev_root || !csum_root) {
1582 		err = -ENOMEM;
1583 		goto fail;
1584 	}
1585 
1586 	ret = init_srcu_struct(&fs_info->subvol_srcu);
1587 	if (ret) {
1588 		err = ret;
1589 		goto fail;
1590 	}
1591 
1592 	ret = setup_bdi(fs_info, &fs_info->bdi);
1593 	if (ret) {
1594 		err = ret;
1595 		goto fail_srcu;
1596 	}
1597 
1598 	fs_info->btree_inode = new_inode(sb);
1599 	if (!fs_info->btree_inode) {
1600 		err = -ENOMEM;
1601 		goto fail_bdi;
1602 	}
1603 
1604 	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
1605 	INIT_LIST_HEAD(&fs_info->trans_list);
1606 	INIT_LIST_HEAD(&fs_info->dead_roots);
1607 	INIT_LIST_HEAD(&fs_info->hashers);
1608 	INIT_LIST_HEAD(&fs_info->delalloc_inodes);
1609 	INIT_LIST_HEAD(&fs_info->ordered_operations);
1610 	INIT_LIST_HEAD(&fs_info->caching_block_groups);
1611 	spin_lock_init(&fs_info->delalloc_lock);
1612 	spin_lock_init(&fs_info->new_trans_lock);
1613 	spin_lock_init(&fs_info->ref_cache_lock);
1614 	spin_lock_init(&fs_info->fs_roots_radix_lock);
1615 
1616 	init_completion(&fs_info->kobj_unregister);
1617 	fs_info->tree_root = tree_root;
1618 	fs_info->extent_root = extent_root;
1619 	fs_info->csum_root = csum_root;
1620 	fs_info->chunk_root = chunk_root;
1621 	fs_info->dev_root = dev_root;
1622 	fs_info->fs_devices = fs_devices;
1623 	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1624 	INIT_LIST_HEAD(&fs_info->space_info);
1625 	btrfs_mapping_init(&fs_info->mapping_tree);
1626 	atomic_set(&fs_info->nr_async_submits, 0);
1627 	atomic_set(&fs_info->async_delalloc_pages, 0);
1628 	atomic_set(&fs_info->async_submit_draining, 0);
1629 	atomic_set(&fs_info->nr_async_bios, 0);
1630 	fs_info->sb = sb;
1631 	fs_info->max_extent = (u64)-1;
1632 	fs_info->max_inline = 8192 * 1024;
1633 	fs_info->metadata_ratio = 0;
1634 
1635 	fs_info->thread_pool_size = min_t(unsigned long,
1636 					  num_online_cpus() + 2, 8);
1637 
1638 	INIT_LIST_HEAD(&fs_info->ordered_extents);
1639 	spin_lock_init(&fs_info->ordered_extent_lock);
1640 
1641 	sb->s_blocksize = 4096;
1642 	sb->s_blocksize_bits = blksize_bits(4096);
1643 	sb->s_bdi = &fs_info->bdi;
1644 
1645 	fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
1646 	fs_info->btree_inode->i_nlink = 1;
1647 	/*
1648 	 * we set the i_size on the btree inode to the max possible int.
1649 	 * the real end of the address space is determined by all of
1650 	 * the devices in the system
1651 	 */
1652 	fs_info->btree_inode->i_size = OFFSET_MAX;
1653 	fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
1654 	fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
1655 
1656 	RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
1657 	extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1658 			     fs_info->btree_inode->i_mapping,
1659 			     GFP_NOFS);
1660 	extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
1661 			     GFP_NOFS);
1662 
1663 	BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1664 
1665 	BTRFS_I(fs_info->btree_inode)->root = tree_root;
1666 	memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
1667 	       sizeof(struct btrfs_key));
1668 	BTRFS_I(fs_info->btree_inode)->dummy_inode = 1;
1669 	insert_inode_hash(fs_info->btree_inode);
1670 
1671 	spin_lock_init(&fs_info->block_group_cache_lock);
1672 	fs_info->block_group_cache_tree.rb_node = NULL;
1673 
1674 	extent_io_tree_init(&fs_info->freed_extents[0],
1675 			     fs_info->btree_inode->i_mapping, GFP_NOFS);
1676 	extent_io_tree_init(&fs_info->freed_extents[1],
1677 			     fs_info->btree_inode->i_mapping, GFP_NOFS);
1678 	fs_info->pinned_extents = &fs_info->freed_extents[0];
1679 	fs_info->do_barriers = 1;
1680 
1681 
1682 	mutex_init(&fs_info->trans_mutex);
1683 	mutex_init(&fs_info->ordered_operations_mutex);
1684 	mutex_init(&fs_info->tree_log_mutex);
1685 	mutex_init(&fs_info->chunk_mutex);
1686 	mutex_init(&fs_info->transaction_kthread_mutex);
1687 	mutex_init(&fs_info->cleaner_mutex);
1688 	mutex_init(&fs_info->volume_mutex);
1689 	init_rwsem(&fs_info->extent_commit_sem);
1690 	init_rwsem(&fs_info->subvol_sem);
1691 
1692 	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
1693 	btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
1694 
1695 	init_waitqueue_head(&fs_info->transaction_throttle);
1696 	init_waitqueue_head(&fs_info->transaction_wait);
1697 	init_waitqueue_head(&fs_info->async_submit_wait);
1698 
1699 	__setup_root(4096, 4096, 4096, 4096, tree_root,
1700 		     fs_info, BTRFS_ROOT_TREE_OBJECTID);
1701 
1702 
1703 	bh = btrfs_read_dev_super(fs_devices->latest_bdev);
1704 	if (!bh)
1705 		goto fail_iput;
1706 
1707 	memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
1708 	memcpy(&fs_info->super_for_commit, &fs_info->super_copy,
1709 	       sizeof(fs_info->super_for_commit));
1710 	brelse(bh);
1711 
1712 	memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
1713 
1714 	disk_super = &fs_info->super_copy;
1715 	if (!btrfs_super_root(disk_super))
1716 		goto fail_iput;
1717 
1718 	ret = btrfs_parse_options(tree_root, options);
1719 	if (ret) {
1720 		err = ret;
1721 		goto fail_iput;
1722 	}
1723 
1724 	features = btrfs_super_incompat_flags(disk_super) &
1725 		~BTRFS_FEATURE_INCOMPAT_SUPP;
1726 	if (features) {
1727 		printk(KERN_ERR "BTRFS: couldn't mount because of "
1728 		       "unsupported optional features (%Lx).\n",
1729 		       (unsigned long long)features);
1730 		err = -EINVAL;
1731 		goto fail_iput;
1732 	}
1733 
1734 	features = btrfs_super_incompat_flags(disk_super);
1735 	if (!(features & BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF)) {
1736 		features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
1737 		btrfs_set_super_incompat_flags(disk_super, features);
1738 	}
1739 
1740 	features = btrfs_super_compat_ro_flags(disk_super) &
1741 		~BTRFS_FEATURE_COMPAT_RO_SUPP;
1742 	if (!(sb->s_flags & MS_RDONLY) && features) {
1743 		printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
1744 		       "unsupported option features (%Lx).\n",
1745 		       (unsigned long long)features);
1746 		err = -EINVAL;
1747 		goto fail_iput;
1748 	}
1749 printk("thread pool is %d\n", fs_info->thread_pool_size);
1750 	/*
1751 	 * we need to start all the end_io workers up front because the
1752 	 * queue work function gets called at interrupt time, and so it
1753 	 * cannot dynamically grow.
1754 	 */
1755 	btrfs_init_workers(&fs_info->workers, "worker",
1756 			   fs_info->thread_pool_size);
1757 
1758 	btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
1759 			   fs_info->thread_pool_size);
1760 
1761 	btrfs_init_workers(&fs_info->submit_workers, "submit",
1762 			   min_t(u64, fs_devices->num_devices,
1763 			   fs_info->thread_pool_size));
1764 
1765 	/* a higher idle thresh on the submit workers makes it much more
1766 	 * likely that bios will be send down in a sane order to the
1767 	 * devices
1768 	 */
1769 	fs_info->submit_workers.idle_thresh = 64;
1770 
1771 	fs_info->workers.idle_thresh = 16;
1772 	fs_info->workers.ordered = 1;
1773 
1774 	fs_info->delalloc_workers.idle_thresh = 2;
1775 	fs_info->delalloc_workers.ordered = 1;
1776 
1777 	btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1);
1778 	btrfs_init_workers(&fs_info->endio_workers, "endio",
1779 			   fs_info->thread_pool_size);
1780 	btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
1781 			   fs_info->thread_pool_size);
1782 	btrfs_init_workers(&fs_info->endio_meta_write_workers,
1783 			   "endio-meta-write", fs_info->thread_pool_size);
1784 	btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
1785 			   fs_info->thread_pool_size);
1786 
1787 	/*
1788 	 * endios are largely parallel and should have a very
1789 	 * low idle thresh
1790 	 */
1791 	fs_info->endio_workers.idle_thresh = 4;
1792 	fs_info->endio_meta_workers.idle_thresh = 4;
1793 
1794 	fs_info->endio_write_workers.idle_thresh = 2;
1795 	fs_info->endio_meta_write_workers.idle_thresh = 2;
1796 
1797 	fs_info->endio_workers.atomic_worker_start = 1;
1798 	fs_info->endio_meta_workers.atomic_worker_start = 1;
1799 	fs_info->endio_write_workers.atomic_worker_start = 1;
1800 	fs_info->endio_meta_write_workers.atomic_worker_start = 1;
1801 
1802 	btrfs_start_workers(&fs_info->workers, 1);
1803 	btrfs_start_workers(&fs_info->submit_workers, 1);
1804 	btrfs_start_workers(&fs_info->delalloc_workers, 1);
1805 	btrfs_start_workers(&fs_info->fixup_workers, 1);
1806 	btrfs_start_workers(&fs_info->endio_workers, 1);
1807 	btrfs_start_workers(&fs_info->endio_meta_workers, 1);
1808 	btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
1809 	btrfs_start_workers(&fs_info->endio_write_workers, 1);
1810 
1811 	fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1812 	fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
1813 				    4 * 1024 * 1024 / PAGE_CACHE_SIZE);
1814 
1815 	nodesize = btrfs_super_nodesize(disk_super);
1816 	leafsize = btrfs_super_leafsize(disk_super);
1817 	sectorsize = btrfs_super_sectorsize(disk_super);
1818 	stripesize = btrfs_super_stripesize(disk_super);
1819 	tree_root->nodesize = nodesize;
1820 	tree_root->leafsize = leafsize;
1821 	tree_root->sectorsize = sectorsize;
1822 	tree_root->stripesize = stripesize;
1823 
1824 	sb->s_blocksize = sectorsize;
1825 	sb->s_blocksize_bits = blksize_bits(sectorsize);
1826 
1827 	if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
1828 		    sizeof(disk_super->magic))) {
1829 		printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
1830 		goto fail_sb_buffer;
1831 	}
1832 
1833 	mutex_lock(&fs_info->chunk_mutex);
1834 	ret = btrfs_read_sys_array(tree_root);
1835 	mutex_unlock(&fs_info->chunk_mutex);
1836 	if (ret) {
1837 		printk(KERN_WARNING "btrfs: failed to read the system "
1838 		       "array on %s\n", sb->s_id);
1839 		goto fail_sb_buffer;
1840 	}
1841 
1842 	blocksize = btrfs_level_size(tree_root,
1843 				     btrfs_super_chunk_root_level(disk_super));
1844 	generation = btrfs_super_chunk_root_generation(disk_super);
1845 
1846 	__setup_root(nodesize, leafsize, sectorsize, stripesize,
1847 		     chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
1848 
1849 	chunk_root->node = read_tree_block(chunk_root,
1850 					   btrfs_super_chunk_root(disk_super),
1851 					   blocksize, generation);
1852 	BUG_ON(!chunk_root->node);
1853 	if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
1854 		printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
1855 		       sb->s_id);
1856 		goto fail_chunk_root;
1857 	}
1858 	btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
1859 	chunk_root->commit_root = btrfs_root_node(chunk_root);
1860 
1861 	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
1862 	   (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
1863 	   BTRFS_UUID_SIZE);
1864 
1865 	mutex_lock(&fs_info->chunk_mutex);
1866 	ret = btrfs_read_chunk_tree(chunk_root);
1867 	mutex_unlock(&fs_info->chunk_mutex);
1868 	if (ret) {
1869 		printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
1870 		       sb->s_id);
1871 		goto fail_chunk_root;
1872 	}
1873 
1874 	btrfs_close_extra_devices(fs_devices);
1875 
1876 	blocksize = btrfs_level_size(tree_root,
1877 				     btrfs_super_root_level(disk_super));
1878 	generation = btrfs_super_generation(disk_super);
1879 
1880 	tree_root->node = read_tree_block(tree_root,
1881 					  btrfs_super_root(disk_super),
1882 					  blocksize, generation);
1883 	if (!tree_root->node)
1884 		goto fail_chunk_root;
1885 	if (!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
1886 		printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
1887 		       sb->s_id);
1888 		goto fail_tree_root;
1889 	}
1890 	btrfs_set_root_node(&tree_root->root_item, tree_root->node);
1891 	tree_root->commit_root = btrfs_root_node(tree_root);
1892 
1893 	ret = find_and_setup_root(tree_root, fs_info,
1894 				  BTRFS_EXTENT_TREE_OBJECTID, extent_root);
1895 	if (ret)
1896 		goto fail_tree_root;
1897 	extent_root->track_dirty = 1;
1898 
1899 	ret = find_and_setup_root(tree_root, fs_info,
1900 				  BTRFS_DEV_TREE_OBJECTID, dev_root);
1901 	if (ret)
1902 		goto fail_extent_root;
1903 	dev_root->track_dirty = 1;
1904 
1905 	ret = find_and_setup_root(tree_root, fs_info,
1906 				  BTRFS_CSUM_TREE_OBJECTID, csum_root);
1907 	if (ret)
1908 		goto fail_dev_root;
1909 
1910 	csum_root->track_dirty = 1;
1911 
1912 	btrfs_read_block_groups(extent_root);
1913 
1914 	fs_info->generation = generation;
1915 	fs_info->last_trans_committed = generation;
1916 	fs_info->data_alloc_profile = (u64)-1;
1917 	fs_info->metadata_alloc_profile = (u64)-1;
1918 	fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1919 	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
1920 					       "btrfs-cleaner");
1921 	if (IS_ERR(fs_info->cleaner_kthread))
1922 		goto fail_csum_root;
1923 
1924 	fs_info->transaction_kthread = kthread_run(transaction_kthread,
1925 						   tree_root,
1926 						   "btrfs-transaction");
1927 	if (IS_ERR(fs_info->transaction_kthread))
1928 		goto fail_cleaner;
1929 
1930 	if (!btrfs_test_opt(tree_root, SSD) &&
1931 	    !btrfs_test_opt(tree_root, NOSSD) &&
1932 	    !fs_info->fs_devices->rotating) {
1933 		printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
1934 		       "mode\n");
1935 		btrfs_set_opt(fs_info->mount_opt, SSD);
1936 	}
1937 
1938 	if (btrfs_super_log_root(disk_super) != 0) {
1939 		u64 bytenr = btrfs_super_log_root(disk_super);
1940 
1941 		if (fs_devices->rw_devices == 0) {
1942 			printk(KERN_WARNING "Btrfs log replay required "
1943 			       "on RO media\n");
1944 			err = -EIO;
1945 			goto fail_trans_kthread;
1946 		}
1947 		blocksize =
1948 		     btrfs_level_size(tree_root,
1949 				      btrfs_super_log_root_level(disk_super));
1950 
1951 		log_tree_root = kzalloc(sizeof(struct btrfs_root),
1952 						      GFP_NOFS);
1953 
1954 		__setup_root(nodesize, leafsize, sectorsize, stripesize,
1955 			     log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1956 
1957 		log_tree_root->node = read_tree_block(tree_root, bytenr,
1958 						      blocksize,
1959 						      generation + 1);
1960 		ret = btrfs_recover_log_trees(log_tree_root);
1961 		BUG_ON(ret);
1962 
1963 		if (sb->s_flags & MS_RDONLY) {
1964 			ret =  btrfs_commit_super(tree_root);
1965 			BUG_ON(ret);
1966 		}
1967 	}
1968 
1969 	ret = btrfs_find_orphan_roots(tree_root);
1970 	BUG_ON(ret);
1971 
1972 	if (!(sb->s_flags & MS_RDONLY)) {
1973 		ret = btrfs_recover_relocation(tree_root);
1974 		BUG_ON(ret);
1975 	}
1976 
1977 	location.objectid = BTRFS_FS_TREE_OBJECTID;
1978 	location.type = BTRFS_ROOT_ITEM_KEY;
1979 	location.offset = (u64)-1;
1980 
1981 	fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
1982 	if (!fs_info->fs_root)
1983 		goto fail_trans_kthread;
1984 
1985 	return tree_root;
1986 
1987 fail_trans_kthread:
1988 	kthread_stop(fs_info->transaction_kthread);
1989 fail_cleaner:
1990 	kthread_stop(fs_info->cleaner_kthread);
1991 
1992 	/*
1993 	 * make sure we're done with the btree inode before we stop our
1994 	 * kthreads
1995 	 */
1996 	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
1997 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
1998 
1999 fail_csum_root:
2000 	free_extent_buffer(csum_root->node);
2001 	free_extent_buffer(csum_root->commit_root);
2002 fail_dev_root:
2003 	free_extent_buffer(dev_root->node);
2004 	free_extent_buffer(dev_root->commit_root);
2005 fail_extent_root:
2006 	free_extent_buffer(extent_root->node);
2007 	free_extent_buffer(extent_root->commit_root);
2008 fail_tree_root:
2009 	free_extent_buffer(tree_root->node);
2010 	free_extent_buffer(tree_root->commit_root);
2011 fail_chunk_root:
2012 	free_extent_buffer(chunk_root->node);
2013 	free_extent_buffer(chunk_root->commit_root);
2014 fail_sb_buffer:
2015 	btrfs_stop_workers(&fs_info->fixup_workers);
2016 	btrfs_stop_workers(&fs_info->delalloc_workers);
2017 	btrfs_stop_workers(&fs_info->workers);
2018 	btrfs_stop_workers(&fs_info->endio_workers);
2019 	btrfs_stop_workers(&fs_info->endio_meta_workers);
2020 	btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2021 	btrfs_stop_workers(&fs_info->endio_write_workers);
2022 	btrfs_stop_workers(&fs_info->submit_workers);
2023 fail_iput:
2024 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2025 	iput(fs_info->btree_inode);
2026 
2027 	btrfs_close_devices(fs_info->fs_devices);
2028 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
2029 fail_bdi:
2030 	bdi_destroy(&fs_info->bdi);
2031 fail_srcu:
2032 	cleanup_srcu_struct(&fs_info->subvol_srcu);
2033 fail:
2034 	kfree(extent_root);
2035 	kfree(tree_root);
2036 	kfree(fs_info);
2037 	kfree(chunk_root);
2038 	kfree(dev_root);
2039 	kfree(csum_root);
2040 	return ERR_PTR(err);
2041 }
2042 
2043 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2044 {
2045 	char b[BDEVNAME_SIZE];
2046 
2047 	if (uptodate) {
2048 		set_buffer_uptodate(bh);
2049 	} else {
2050 		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
2051 			printk(KERN_WARNING "lost page write due to "
2052 					"I/O error on %s\n",
2053 				       bdevname(bh->b_bdev, b));
2054 		}
2055 		/* note, we dont' set_buffer_write_io_error because we have
2056 		 * our own ways of dealing with the IO errors
2057 		 */
2058 		clear_buffer_uptodate(bh);
2059 	}
2060 	unlock_buffer(bh);
2061 	put_bh(bh);
2062 }
2063 
2064 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
2065 {
2066 	struct buffer_head *bh;
2067 	struct buffer_head *latest = NULL;
2068 	struct btrfs_super_block *super;
2069 	int i;
2070 	u64 transid = 0;
2071 	u64 bytenr;
2072 
2073 	/* we would like to check all the supers, but that would make
2074 	 * a btrfs mount succeed after a mkfs from a different FS.
2075 	 * So, we need to add a special mount option to scan for
2076 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2077 	 */
2078 	for (i = 0; i < 1; i++) {
2079 		bytenr = btrfs_sb_offset(i);
2080 		if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
2081 			break;
2082 		bh = __bread(bdev, bytenr / 4096, 4096);
2083 		if (!bh)
2084 			continue;
2085 
2086 		super = (struct btrfs_super_block *)bh->b_data;
2087 		if (btrfs_super_bytenr(super) != bytenr ||
2088 		    strncmp((char *)(&super->magic), BTRFS_MAGIC,
2089 			    sizeof(super->magic))) {
2090 			brelse(bh);
2091 			continue;
2092 		}
2093 
2094 		if (!latest || btrfs_super_generation(super) > transid) {
2095 			brelse(latest);
2096 			latest = bh;
2097 			transid = btrfs_super_generation(super);
2098 		} else {
2099 			brelse(bh);
2100 		}
2101 	}
2102 	return latest;
2103 }
2104 
2105 /*
2106  * this should be called twice, once with wait == 0 and
2107  * once with wait == 1.  When wait == 0 is done, all the buffer heads
2108  * we write are pinned.
2109  *
2110  * They are released when wait == 1 is done.
2111  * max_mirrors must be the same for both runs, and it indicates how
2112  * many supers on this one device should be written.
2113  *
2114  * max_mirrors == 0 means to write them all.
2115  */
2116 static int write_dev_supers(struct btrfs_device *device,
2117 			    struct btrfs_super_block *sb,
2118 			    int do_barriers, int wait, int max_mirrors)
2119 {
2120 	struct buffer_head *bh;
2121 	int i;
2122 	int ret;
2123 	int errors = 0;
2124 	u32 crc;
2125 	u64 bytenr;
2126 	int last_barrier = 0;
2127 
2128 	if (max_mirrors == 0)
2129 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
2130 
2131 	/* make sure only the last submit_bh does a barrier */
2132 	if (do_barriers) {
2133 		for (i = 0; i < max_mirrors; i++) {
2134 			bytenr = btrfs_sb_offset(i);
2135 			if (bytenr + BTRFS_SUPER_INFO_SIZE >=
2136 			    device->total_bytes)
2137 				break;
2138 			last_barrier = i;
2139 		}
2140 	}
2141 
2142 	for (i = 0; i < max_mirrors; i++) {
2143 		bytenr = btrfs_sb_offset(i);
2144 		if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
2145 			break;
2146 
2147 		if (wait) {
2148 			bh = __find_get_block(device->bdev, bytenr / 4096,
2149 					      BTRFS_SUPER_INFO_SIZE);
2150 			BUG_ON(!bh);
2151 			wait_on_buffer(bh);
2152 			if (!buffer_uptodate(bh))
2153 				errors++;
2154 
2155 			/* drop our reference */
2156 			brelse(bh);
2157 
2158 			/* drop the reference from the wait == 0 run */
2159 			brelse(bh);
2160 			continue;
2161 		} else {
2162 			btrfs_set_super_bytenr(sb, bytenr);
2163 
2164 			crc = ~(u32)0;
2165 			crc = btrfs_csum_data(NULL, (char *)sb +
2166 					      BTRFS_CSUM_SIZE, crc,
2167 					      BTRFS_SUPER_INFO_SIZE -
2168 					      BTRFS_CSUM_SIZE);
2169 			btrfs_csum_final(crc, sb->csum);
2170 
2171 			/*
2172 			 * one reference for us, and we leave it for the
2173 			 * caller
2174 			 */
2175 			bh = __getblk(device->bdev, bytenr / 4096,
2176 				      BTRFS_SUPER_INFO_SIZE);
2177 			memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
2178 
2179 			/* one reference for submit_bh */
2180 			get_bh(bh);
2181 
2182 			set_buffer_uptodate(bh);
2183 			lock_buffer(bh);
2184 			bh->b_end_io = btrfs_end_buffer_write_sync;
2185 		}
2186 
2187 		if (i == last_barrier && do_barriers && device->barriers) {
2188 			ret = submit_bh(WRITE_BARRIER, bh);
2189 			if (ret == -EOPNOTSUPP) {
2190 				printk("btrfs: disabling barriers on dev %s\n",
2191 				       device->name);
2192 				set_buffer_uptodate(bh);
2193 				device->barriers = 0;
2194 				/* one reference for submit_bh */
2195 				get_bh(bh);
2196 				lock_buffer(bh);
2197 				ret = submit_bh(WRITE_SYNC, bh);
2198 			}
2199 		} else {
2200 			ret = submit_bh(WRITE_SYNC, bh);
2201 		}
2202 
2203 		if (ret)
2204 			errors++;
2205 	}
2206 	return errors < i ? 0 : -1;
2207 }
2208 
2209 int write_all_supers(struct btrfs_root *root, int max_mirrors)
2210 {
2211 	struct list_head *head;
2212 	struct btrfs_device *dev;
2213 	struct btrfs_super_block *sb;
2214 	struct btrfs_dev_item *dev_item;
2215 	int ret;
2216 	int do_barriers;
2217 	int max_errors;
2218 	int total_errors = 0;
2219 	u64 flags;
2220 
2221 	max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
2222 	do_barriers = !btrfs_test_opt(root, NOBARRIER);
2223 
2224 	sb = &root->fs_info->super_for_commit;
2225 	dev_item = &sb->dev_item;
2226 
2227 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2228 	head = &root->fs_info->fs_devices->devices;
2229 	list_for_each_entry(dev, head, dev_list) {
2230 		if (!dev->bdev) {
2231 			total_errors++;
2232 			continue;
2233 		}
2234 		if (!dev->in_fs_metadata || !dev->writeable)
2235 			continue;
2236 
2237 		btrfs_set_stack_device_generation(dev_item, 0);
2238 		btrfs_set_stack_device_type(dev_item, dev->type);
2239 		btrfs_set_stack_device_id(dev_item, dev->devid);
2240 		btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
2241 		btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
2242 		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
2243 		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
2244 		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
2245 		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
2246 		memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
2247 
2248 		flags = btrfs_super_flags(sb);
2249 		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
2250 
2251 		ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
2252 		if (ret)
2253 			total_errors++;
2254 	}
2255 	if (total_errors > max_errors) {
2256 		printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2257 		       total_errors);
2258 		BUG();
2259 	}
2260 
2261 	total_errors = 0;
2262 	list_for_each_entry(dev, head, dev_list) {
2263 		if (!dev->bdev)
2264 			continue;
2265 		if (!dev->in_fs_metadata || !dev->writeable)
2266 			continue;
2267 
2268 		ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
2269 		if (ret)
2270 			total_errors++;
2271 	}
2272 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2273 	if (total_errors > max_errors) {
2274 		printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2275 		       total_errors);
2276 		BUG();
2277 	}
2278 	return 0;
2279 }
2280 
2281 int write_ctree_super(struct btrfs_trans_handle *trans,
2282 		      struct btrfs_root *root, int max_mirrors)
2283 {
2284 	int ret;
2285 
2286 	ret = write_all_supers(root, max_mirrors);
2287 	return ret;
2288 }
2289 
2290 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2291 {
2292 	spin_lock(&fs_info->fs_roots_radix_lock);
2293 	radix_tree_delete(&fs_info->fs_roots_radix,
2294 			  (unsigned long)root->root_key.objectid);
2295 	spin_unlock(&fs_info->fs_roots_radix_lock);
2296 
2297 	if (btrfs_root_refs(&root->root_item) == 0)
2298 		synchronize_srcu(&fs_info->subvol_srcu);
2299 
2300 	free_fs_root(root);
2301 	return 0;
2302 }
2303 
2304 static void free_fs_root(struct btrfs_root *root)
2305 {
2306 	WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
2307 	if (root->anon_super.s_dev) {
2308 		down_write(&root->anon_super.s_umount);
2309 		kill_anon_super(&root->anon_super);
2310 	}
2311 	free_extent_buffer(root->node);
2312 	free_extent_buffer(root->commit_root);
2313 	kfree(root->name);
2314 	kfree(root);
2315 }
2316 
2317 static int del_fs_roots(struct btrfs_fs_info *fs_info)
2318 {
2319 	int ret;
2320 	struct btrfs_root *gang[8];
2321 	int i;
2322 
2323 	while (!list_empty(&fs_info->dead_roots)) {
2324 		gang[0] = list_entry(fs_info->dead_roots.next,
2325 				     struct btrfs_root, root_list);
2326 		list_del(&gang[0]->root_list);
2327 
2328 		if (gang[0]->in_radix) {
2329 			btrfs_free_fs_root(fs_info, gang[0]);
2330 		} else {
2331 			free_extent_buffer(gang[0]->node);
2332 			free_extent_buffer(gang[0]->commit_root);
2333 			kfree(gang[0]);
2334 		}
2335 	}
2336 
2337 	while (1) {
2338 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2339 					     (void **)gang, 0,
2340 					     ARRAY_SIZE(gang));
2341 		if (!ret)
2342 			break;
2343 		for (i = 0; i < ret; i++)
2344 			btrfs_free_fs_root(fs_info, gang[i]);
2345 	}
2346 	return 0;
2347 }
2348 
2349 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2350 {
2351 	u64 root_objectid = 0;
2352 	struct btrfs_root *gang[8];
2353 	int i;
2354 	int ret;
2355 
2356 	while (1) {
2357 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2358 					     (void **)gang, root_objectid,
2359 					     ARRAY_SIZE(gang));
2360 		if (!ret)
2361 			break;
2362 
2363 		root_objectid = gang[ret - 1]->root_key.objectid + 1;
2364 		for (i = 0; i < ret; i++) {
2365 			root_objectid = gang[i]->root_key.objectid;
2366 			btrfs_orphan_cleanup(gang[i]);
2367 		}
2368 		root_objectid++;
2369 	}
2370 	return 0;
2371 }
2372 
2373 int btrfs_commit_super(struct btrfs_root *root)
2374 {
2375 	struct btrfs_trans_handle *trans;
2376 	int ret;
2377 
2378 	mutex_lock(&root->fs_info->cleaner_mutex);
2379 	btrfs_clean_old_snapshots(root);
2380 	mutex_unlock(&root->fs_info->cleaner_mutex);
2381 	trans = btrfs_start_transaction(root, 1);
2382 	ret = btrfs_commit_transaction(trans, root);
2383 	BUG_ON(ret);
2384 	/* run commit again to drop the original snapshot */
2385 	trans = btrfs_start_transaction(root, 1);
2386 	btrfs_commit_transaction(trans, root);
2387 	ret = btrfs_write_and_wait_transaction(NULL, root);
2388 	BUG_ON(ret);
2389 
2390 	ret = write_ctree_super(NULL, root, 0);
2391 	return ret;
2392 }
2393 
2394 int close_ctree(struct btrfs_root *root)
2395 {
2396 	struct btrfs_fs_info *fs_info = root->fs_info;
2397 	int ret;
2398 
2399 	fs_info->closing = 1;
2400 	smp_mb();
2401 
2402 	kthread_stop(root->fs_info->transaction_kthread);
2403 	kthread_stop(root->fs_info->cleaner_kthread);
2404 
2405 	if (!(fs_info->sb->s_flags & MS_RDONLY)) {
2406 		ret =  btrfs_commit_super(root);
2407 		if (ret)
2408 			printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
2409 	}
2410 
2411 	fs_info->closing = 2;
2412 	smp_mb();
2413 
2414 	if (fs_info->delalloc_bytes) {
2415 		printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
2416 		       (unsigned long long)fs_info->delalloc_bytes);
2417 	}
2418 	if (fs_info->total_ref_cache_size) {
2419 		printk(KERN_INFO "btrfs: at umount reference cache size %llu\n",
2420 		       (unsigned long long)fs_info->total_ref_cache_size);
2421 	}
2422 
2423 	free_extent_buffer(fs_info->extent_root->node);
2424 	free_extent_buffer(fs_info->extent_root->commit_root);
2425 	free_extent_buffer(fs_info->tree_root->node);
2426 	free_extent_buffer(fs_info->tree_root->commit_root);
2427 	free_extent_buffer(root->fs_info->chunk_root->node);
2428 	free_extent_buffer(root->fs_info->chunk_root->commit_root);
2429 	free_extent_buffer(root->fs_info->dev_root->node);
2430 	free_extent_buffer(root->fs_info->dev_root->commit_root);
2431 	free_extent_buffer(root->fs_info->csum_root->node);
2432 	free_extent_buffer(root->fs_info->csum_root->commit_root);
2433 
2434 	btrfs_free_block_groups(root->fs_info);
2435 
2436 	del_fs_roots(fs_info);
2437 
2438 	iput(fs_info->btree_inode);
2439 
2440 	btrfs_stop_workers(&fs_info->fixup_workers);
2441 	btrfs_stop_workers(&fs_info->delalloc_workers);
2442 	btrfs_stop_workers(&fs_info->workers);
2443 	btrfs_stop_workers(&fs_info->endio_workers);
2444 	btrfs_stop_workers(&fs_info->endio_meta_workers);
2445 	btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2446 	btrfs_stop_workers(&fs_info->endio_write_workers);
2447 	btrfs_stop_workers(&fs_info->submit_workers);
2448 
2449 	btrfs_close_devices(fs_info->fs_devices);
2450 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
2451 
2452 	bdi_destroy(&fs_info->bdi);
2453 	cleanup_srcu_struct(&fs_info->subvol_srcu);
2454 
2455 	kfree(fs_info->extent_root);
2456 	kfree(fs_info->tree_root);
2457 	kfree(fs_info->chunk_root);
2458 	kfree(fs_info->dev_root);
2459 	kfree(fs_info->csum_root);
2460 	return 0;
2461 }
2462 
2463 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
2464 {
2465 	int ret;
2466 	struct inode *btree_inode = buf->first_page->mapping->host;
2467 
2468 	ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf);
2469 	if (!ret)
2470 		return ret;
2471 
2472 	ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
2473 				    parent_transid);
2474 	return !ret;
2475 }
2476 
2477 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
2478 {
2479 	struct inode *btree_inode = buf->first_page->mapping->host;
2480 	return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
2481 					  buf);
2482 }
2483 
2484 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
2485 {
2486 	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2487 	u64 transid = btrfs_header_generation(buf);
2488 	struct inode *btree_inode = root->fs_info->btree_inode;
2489 	int was_dirty;
2490 
2491 	btrfs_assert_tree_locked(buf);
2492 	if (transid != root->fs_info->generation) {
2493 		printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
2494 		       "found %llu running %llu\n",
2495 			(unsigned long long)buf->start,
2496 			(unsigned long long)transid,
2497 			(unsigned long long)root->fs_info->generation);
2498 		WARN_ON(1);
2499 	}
2500 	was_dirty = set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
2501 					    buf);
2502 	if (!was_dirty) {
2503 		spin_lock(&root->fs_info->delalloc_lock);
2504 		root->fs_info->dirty_metadata_bytes += buf->len;
2505 		spin_unlock(&root->fs_info->delalloc_lock);
2506 	}
2507 }
2508 
2509 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
2510 {
2511 	/*
2512 	 * looks as though older kernels can get into trouble with
2513 	 * this code, they end up stuck in balance_dirty_pages forever
2514 	 */
2515 	u64 num_dirty;
2516 	unsigned long thresh = 32 * 1024 * 1024;
2517 
2518 	if (current->flags & PF_MEMALLOC)
2519 		return;
2520 
2521 	num_dirty = root->fs_info->dirty_metadata_bytes;
2522 
2523 	if (num_dirty > thresh) {
2524 		balance_dirty_pages_ratelimited_nr(
2525 				   root->fs_info->btree_inode->i_mapping, 1);
2526 	}
2527 	return;
2528 }
2529 
2530 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
2531 {
2532 	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2533 	int ret;
2534 	ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
2535 	if (ret == 0)
2536 		set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
2537 	return ret;
2538 }
2539 
2540 int btree_lock_page_hook(struct page *page)
2541 {
2542 	struct inode *inode = page->mapping->host;
2543 	struct btrfs_root *root = BTRFS_I(inode)->root;
2544 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2545 	struct extent_buffer *eb;
2546 	unsigned long len;
2547 	u64 bytenr = page_offset(page);
2548 
2549 	if (page->private == EXTENT_PAGE_PRIVATE)
2550 		goto out;
2551 
2552 	len = page->private >> 2;
2553 	eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS);
2554 	if (!eb)
2555 		goto out;
2556 
2557 	btrfs_tree_lock(eb);
2558 	btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
2559 
2560 	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
2561 		spin_lock(&root->fs_info->delalloc_lock);
2562 		if (root->fs_info->dirty_metadata_bytes >= eb->len)
2563 			root->fs_info->dirty_metadata_bytes -= eb->len;
2564 		else
2565 			WARN_ON(1);
2566 		spin_unlock(&root->fs_info->delalloc_lock);
2567 	}
2568 
2569 	btrfs_tree_unlock(eb);
2570 	free_extent_buffer(eb);
2571 out:
2572 	lock_page(page);
2573 	return 0;
2574 }
2575 
2576 static struct extent_io_ops btree_extent_io_ops = {
2577 	.write_cache_pages_lock_hook = btree_lock_page_hook,
2578 	.readpage_end_io_hook = btree_readpage_end_io_hook,
2579 	.submit_bio_hook = btree_submit_bio_hook,
2580 	/* note we're sharing with inode.c for the merge bio hook */
2581 	.merge_bio_hook = btrfs_merge_bio_hook,
2582 };
2583