xref: /openbmc/linux/fs/btrfs/disk-io.c (revision b6dcefde)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/crc32c.h>
30 #include "compat.h"
31 #include "ctree.h"
32 #include "disk-io.h"
33 #include "transaction.h"
34 #include "btrfs_inode.h"
35 #include "volumes.h"
36 #include "print-tree.h"
37 #include "async-thread.h"
38 #include "locking.h"
39 #include "tree-log.h"
40 #include "free-space-cache.h"
41 
42 static struct extent_io_ops btree_extent_io_ops;
43 static void end_workqueue_fn(struct btrfs_work *work);
44 static void free_fs_root(struct btrfs_root *root);
45 
46 static atomic_t btrfs_bdi_num = ATOMIC_INIT(0);
47 
48 /*
49  * end_io_wq structs are used to do processing in task context when an IO is
50  * complete.  This is used during reads to verify checksums, and it is used
51  * by writes to insert metadata for new file extents after IO is complete.
52  */
53 struct end_io_wq {
54 	struct bio *bio;
55 	bio_end_io_t *end_io;
56 	void *private;
57 	struct btrfs_fs_info *info;
58 	int error;
59 	int metadata;
60 	struct list_head list;
61 	struct btrfs_work work;
62 };
63 
64 /*
65  * async submit bios are used to offload expensive checksumming
66  * onto the worker threads.  They checksum file and metadata bios
67  * just before they are sent down the IO stack.
68  */
69 struct async_submit_bio {
70 	struct inode *inode;
71 	struct bio *bio;
72 	struct list_head list;
73 	extent_submit_bio_hook_t *submit_bio_start;
74 	extent_submit_bio_hook_t *submit_bio_done;
75 	int rw;
76 	int mirror_num;
77 	unsigned long bio_flags;
78 	struct btrfs_work work;
79 };
80 
81 /* These are used to set the lockdep class on the extent buffer locks.
82  * The class is set by the readpage_end_io_hook after the buffer has
83  * passed csum validation but before the pages are unlocked.
84  *
85  * The lockdep class is also set by btrfs_init_new_buffer on freshly
86  * allocated blocks.
87  *
88  * The class is based on the level in the tree block, which allows lockdep
89  * to know that lower nodes nest inside the locks of higher nodes.
90  *
91  * We also add a check to make sure the highest level of the tree is
92  * the same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this
93  * code needs update as well.
94  */
95 #ifdef CONFIG_DEBUG_LOCK_ALLOC
96 # if BTRFS_MAX_LEVEL != 8
97 #  error
98 # endif
99 static struct lock_class_key btrfs_eb_class[BTRFS_MAX_LEVEL + 1];
100 static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = {
101 	/* leaf */
102 	"btrfs-extent-00",
103 	"btrfs-extent-01",
104 	"btrfs-extent-02",
105 	"btrfs-extent-03",
106 	"btrfs-extent-04",
107 	"btrfs-extent-05",
108 	"btrfs-extent-06",
109 	"btrfs-extent-07",
110 	/* highest possible level */
111 	"btrfs-extent-08",
112 };
113 #endif
114 
115 /*
116  * extents on the btree inode are pretty simple, there's one extent
117  * that covers the entire device
118  */
119 static struct extent_map *btree_get_extent(struct inode *inode,
120 		struct page *page, size_t page_offset, u64 start, u64 len,
121 		int create)
122 {
123 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
124 	struct extent_map *em;
125 	int ret;
126 
127 	read_lock(&em_tree->lock);
128 	em = lookup_extent_mapping(em_tree, start, len);
129 	if (em) {
130 		em->bdev =
131 			BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
132 		read_unlock(&em_tree->lock);
133 		goto out;
134 	}
135 	read_unlock(&em_tree->lock);
136 
137 	em = alloc_extent_map(GFP_NOFS);
138 	if (!em) {
139 		em = ERR_PTR(-ENOMEM);
140 		goto out;
141 	}
142 	em->start = 0;
143 	em->len = (u64)-1;
144 	em->block_len = (u64)-1;
145 	em->block_start = 0;
146 	em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
147 
148 	write_lock(&em_tree->lock);
149 	ret = add_extent_mapping(em_tree, em);
150 	if (ret == -EEXIST) {
151 		u64 failed_start = em->start;
152 		u64 failed_len = em->len;
153 
154 		free_extent_map(em);
155 		em = lookup_extent_mapping(em_tree, start, len);
156 		if (em) {
157 			ret = 0;
158 		} else {
159 			em = lookup_extent_mapping(em_tree, failed_start,
160 						   failed_len);
161 			ret = -EIO;
162 		}
163 	} else if (ret) {
164 		free_extent_map(em);
165 		em = NULL;
166 	}
167 	write_unlock(&em_tree->lock);
168 
169 	if (ret)
170 		em = ERR_PTR(ret);
171 out:
172 	return em;
173 }
174 
175 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
176 {
177 	return crc32c(seed, data, len);
178 }
179 
180 void btrfs_csum_final(u32 crc, char *result)
181 {
182 	*(__le32 *)result = ~cpu_to_le32(crc);
183 }
184 
185 /*
186  * compute the csum for a btree block, and either verify it or write it
187  * into the csum field of the block.
188  */
189 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
190 			   int verify)
191 {
192 	u16 csum_size =
193 		btrfs_super_csum_size(&root->fs_info->super_copy);
194 	char *result = NULL;
195 	unsigned long len;
196 	unsigned long cur_len;
197 	unsigned long offset = BTRFS_CSUM_SIZE;
198 	char *map_token = NULL;
199 	char *kaddr;
200 	unsigned long map_start;
201 	unsigned long map_len;
202 	int err;
203 	u32 crc = ~(u32)0;
204 	unsigned long inline_result;
205 
206 	len = buf->len - offset;
207 	while (len > 0) {
208 		err = map_private_extent_buffer(buf, offset, 32,
209 					&map_token, &kaddr,
210 					&map_start, &map_len, KM_USER0);
211 		if (err)
212 			return 1;
213 		cur_len = min(len, map_len - (offset - map_start));
214 		crc = btrfs_csum_data(root, kaddr + offset - map_start,
215 				      crc, cur_len);
216 		len -= cur_len;
217 		offset += cur_len;
218 		unmap_extent_buffer(buf, map_token, KM_USER0);
219 	}
220 	if (csum_size > sizeof(inline_result)) {
221 		result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
222 		if (!result)
223 			return 1;
224 	} else {
225 		result = (char *)&inline_result;
226 	}
227 
228 	btrfs_csum_final(crc, result);
229 
230 	if (verify) {
231 		if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
232 			u32 val;
233 			u32 found = 0;
234 			memcpy(&found, result, csum_size);
235 
236 			read_extent_buffer(buf, &val, 0, csum_size);
237 			if (printk_ratelimit()) {
238 				printk(KERN_INFO "btrfs: %s checksum verify "
239 				       "failed on %llu wanted %X found %X "
240 				       "level %d\n",
241 				       root->fs_info->sb->s_id,
242 				       (unsigned long long)buf->start, val, found,
243 				       btrfs_header_level(buf));
244 			}
245 			if (result != (char *)&inline_result)
246 				kfree(result);
247 			return 1;
248 		}
249 	} else {
250 		write_extent_buffer(buf, result, 0, csum_size);
251 	}
252 	if (result != (char *)&inline_result)
253 		kfree(result);
254 	return 0;
255 }
256 
257 /*
258  * we can't consider a given block up to date unless the transid of the
259  * block matches the transid in the parent node's pointer.  This is how we
260  * detect blocks that either didn't get written at all or got written
261  * in the wrong place.
262  */
263 static int verify_parent_transid(struct extent_io_tree *io_tree,
264 				 struct extent_buffer *eb, u64 parent_transid)
265 {
266 	int ret;
267 
268 	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
269 		return 0;
270 
271 	lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS);
272 	if (extent_buffer_uptodate(io_tree, eb) &&
273 	    btrfs_header_generation(eb) == parent_transid) {
274 		ret = 0;
275 		goto out;
276 	}
277 	if (printk_ratelimit()) {
278 		printk("parent transid verify failed on %llu wanted %llu "
279 		       "found %llu\n",
280 		       (unsigned long long)eb->start,
281 		       (unsigned long long)parent_transid,
282 		       (unsigned long long)btrfs_header_generation(eb));
283 	}
284 	ret = 1;
285 	clear_extent_buffer_uptodate(io_tree, eb);
286 out:
287 	unlock_extent(io_tree, eb->start, eb->start + eb->len - 1,
288 		      GFP_NOFS);
289 	return ret;
290 }
291 
292 /*
293  * helper to read a given tree block, doing retries as required when
294  * the checksums don't match and we have alternate mirrors to try.
295  */
296 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
297 					  struct extent_buffer *eb,
298 					  u64 start, u64 parent_transid)
299 {
300 	struct extent_io_tree *io_tree;
301 	int ret;
302 	int num_copies = 0;
303 	int mirror_num = 0;
304 
305 	io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
306 	while (1) {
307 		ret = read_extent_buffer_pages(io_tree, eb, start, 1,
308 					       btree_get_extent, mirror_num);
309 		if (!ret &&
310 		    !verify_parent_transid(io_tree, eb, parent_transid))
311 			return ret;
312 
313 		num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
314 					      eb->start, eb->len);
315 		if (num_copies == 1)
316 			return ret;
317 
318 		mirror_num++;
319 		if (mirror_num > num_copies)
320 			return ret;
321 	}
322 	return -EIO;
323 }
324 
325 /*
326  * checksum a dirty tree block before IO.  This has extra checks to make sure
327  * we only fill in the checksum field in the first page of a multi-page block
328  */
329 
330 static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
331 {
332 	struct extent_io_tree *tree;
333 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
334 	u64 found_start;
335 	int found_level;
336 	unsigned long len;
337 	struct extent_buffer *eb;
338 	int ret;
339 
340 	tree = &BTRFS_I(page->mapping->host)->io_tree;
341 
342 	if (page->private == EXTENT_PAGE_PRIVATE)
343 		goto out;
344 	if (!page->private)
345 		goto out;
346 	len = page->private >> 2;
347 	WARN_ON(len == 0);
348 
349 	eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
350 	ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
351 					     btrfs_header_generation(eb));
352 	BUG_ON(ret);
353 	found_start = btrfs_header_bytenr(eb);
354 	if (found_start != start) {
355 		WARN_ON(1);
356 		goto err;
357 	}
358 	if (eb->first_page != page) {
359 		WARN_ON(1);
360 		goto err;
361 	}
362 	if (!PageUptodate(page)) {
363 		WARN_ON(1);
364 		goto err;
365 	}
366 	found_level = btrfs_header_level(eb);
367 
368 	csum_tree_block(root, eb, 0);
369 err:
370 	free_extent_buffer(eb);
371 out:
372 	return 0;
373 }
374 
375 static int check_tree_block_fsid(struct btrfs_root *root,
376 				 struct extent_buffer *eb)
377 {
378 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
379 	u8 fsid[BTRFS_UUID_SIZE];
380 	int ret = 1;
381 
382 	read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
383 			   BTRFS_FSID_SIZE);
384 	while (fs_devices) {
385 		if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
386 			ret = 0;
387 			break;
388 		}
389 		fs_devices = fs_devices->seed;
390 	}
391 	return ret;
392 }
393 
394 #ifdef CONFIG_DEBUG_LOCK_ALLOC
395 void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level)
396 {
397 	lockdep_set_class_and_name(&eb->lock,
398 			   &btrfs_eb_class[level],
399 			   btrfs_eb_name[level]);
400 }
401 #endif
402 
403 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
404 			       struct extent_state *state)
405 {
406 	struct extent_io_tree *tree;
407 	u64 found_start;
408 	int found_level;
409 	unsigned long len;
410 	struct extent_buffer *eb;
411 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
412 	int ret = 0;
413 
414 	tree = &BTRFS_I(page->mapping->host)->io_tree;
415 	if (page->private == EXTENT_PAGE_PRIVATE)
416 		goto out;
417 	if (!page->private)
418 		goto out;
419 
420 	len = page->private >> 2;
421 	WARN_ON(len == 0);
422 
423 	eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
424 
425 	found_start = btrfs_header_bytenr(eb);
426 	if (found_start != start) {
427 		if (printk_ratelimit()) {
428 			printk(KERN_INFO "btrfs bad tree block start "
429 			       "%llu %llu\n",
430 			       (unsigned long long)found_start,
431 			       (unsigned long long)eb->start);
432 		}
433 		ret = -EIO;
434 		goto err;
435 	}
436 	if (eb->first_page != page) {
437 		printk(KERN_INFO "btrfs bad first page %lu %lu\n",
438 		       eb->first_page->index, page->index);
439 		WARN_ON(1);
440 		ret = -EIO;
441 		goto err;
442 	}
443 	if (check_tree_block_fsid(root, eb)) {
444 		if (printk_ratelimit()) {
445 			printk(KERN_INFO "btrfs bad fsid on block %llu\n",
446 			       (unsigned long long)eb->start);
447 		}
448 		ret = -EIO;
449 		goto err;
450 	}
451 	found_level = btrfs_header_level(eb);
452 
453 	btrfs_set_buffer_lockdep_class(eb, found_level);
454 
455 	ret = csum_tree_block(root, eb, 1);
456 	if (ret)
457 		ret = -EIO;
458 
459 	end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
460 	end = eb->start + end - 1;
461 err:
462 	free_extent_buffer(eb);
463 out:
464 	return ret;
465 }
466 
467 static void end_workqueue_bio(struct bio *bio, int err)
468 {
469 	struct end_io_wq *end_io_wq = bio->bi_private;
470 	struct btrfs_fs_info *fs_info;
471 
472 	fs_info = end_io_wq->info;
473 	end_io_wq->error = err;
474 	end_io_wq->work.func = end_workqueue_fn;
475 	end_io_wq->work.flags = 0;
476 
477 	if (bio->bi_rw & (1 << BIO_RW)) {
478 		if (end_io_wq->metadata)
479 			btrfs_queue_worker(&fs_info->endio_meta_write_workers,
480 					   &end_io_wq->work);
481 		else
482 			btrfs_queue_worker(&fs_info->endio_write_workers,
483 					   &end_io_wq->work);
484 	} else {
485 		if (end_io_wq->metadata)
486 			btrfs_queue_worker(&fs_info->endio_meta_workers,
487 					   &end_io_wq->work);
488 		else
489 			btrfs_queue_worker(&fs_info->endio_workers,
490 					   &end_io_wq->work);
491 	}
492 }
493 
494 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
495 			int metadata)
496 {
497 	struct end_io_wq *end_io_wq;
498 	end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
499 	if (!end_io_wq)
500 		return -ENOMEM;
501 
502 	end_io_wq->private = bio->bi_private;
503 	end_io_wq->end_io = bio->bi_end_io;
504 	end_io_wq->info = info;
505 	end_io_wq->error = 0;
506 	end_io_wq->bio = bio;
507 	end_io_wq->metadata = metadata;
508 
509 	bio->bi_private = end_io_wq;
510 	bio->bi_end_io = end_workqueue_bio;
511 	return 0;
512 }
513 
514 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
515 {
516 	unsigned long limit = min_t(unsigned long,
517 				    info->workers.max_workers,
518 				    info->fs_devices->open_devices);
519 	return 256 * limit;
520 }
521 
522 int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
523 {
524 	return atomic_read(&info->nr_async_bios) >
525 		btrfs_async_submit_limit(info);
526 }
527 
528 static void run_one_async_start(struct btrfs_work *work)
529 {
530 	struct btrfs_fs_info *fs_info;
531 	struct async_submit_bio *async;
532 
533 	async = container_of(work, struct  async_submit_bio, work);
534 	fs_info = BTRFS_I(async->inode)->root->fs_info;
535 	async->submit_bio_start(async->inode, async->rw, async->bio,
536 			       async->mirror_num, async->bio_flags);
537 }
538 
539 static void run_one_async_done(struct btrfs_work *work)
540 {
541 	struct btrfs_fs_info *fs_info;
542 	struct async_submit_bio *async;
543 	int limit;
544 
545 	async = container_of(work, struct  async_submit_bio, work);
546 	fs_info = BTRFS_I(async->inode)->root->fs_info;
547 
548 	limit = btrfs_async_submit_limit(fs_info);
549 	limit = limit * 2 / 3;
550 
551 	atomic_dec(&fs_info->nr_async_submits);
552 
553 	if (atomic_read(&fs_info->nr_async_submits) < limit &&
554 	    waitqueue_active(&fs_info->async_submit_wait))
555 		wake_up(&fs_info->async_submit_wait);
556 
557 	async->submit_bio_done(async->inode, async->rw, async->bio,
558 			       async->mirror_num, async->bio_flags);
559 }
560 
561 static void run_one_async_free(struct btrfs_work *work)
562 {
563 	struct async_submit_bio *async;
564 
565 	async = container_of(work, struct  async_submit_bio, work);
566 	kfree(async);
567 }
568 
569 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
570 			int rw, struct bio *bio, int mirror_num,
571 			unsigned long bio_flags,
572 			extent_submit_bio_hook_t *submit_bio_start,
573 			extent_submit_bio_hook_t *submit_bio_done)
574 {
575 	struct async_submit_bio *async;
576 
577 	async = kmalloc(sizeof(*async), GFP_NOFS);
578 	if (!async)
579 		return -ENOMEM;
580 
581 	async->inode = inode;
582 	async->rw = rw;
583 	async->bio = bio;
584 	async->mirror_num = mirror_num;
585 	async->submit_bio_start = submit_bio_start;
586 	async->submit_bio_done = submit_bio_done;
587 
588 	async->work.func = run_one_async_start;
589 	async->work.ordered_func = run_one_async_done;
590 	async->work.ordered_free = run_one_async_free;
591 
592 	async->work.flags = 0;
593 	async->bio_flags = bio_flags;
594 
595 	atomic_inc(&fs_info->nr_async_submits);
596 
597 	if (rw & (1 << BIO_RW_SYNCIO))
598 		btrfs_set_work_high_prio(&async->work);
599 
600 	btrfs_queue_worker(&fs_info->workers, &async->work);
601 
602 	while (atomic_read(&fs_info->async_submit_draining) &&
603 	      atomic_read(&fs_info->nr_async_submits)) {
604 		wait_event(fs_info->async_submit_wait,
605 			   (atomic_read(&fs_info->nr_async_submits) == 0));
606 	}
607 
608 	return 0;
609 }
610 
611 static int btree_csum_one_bio(struct bio *bio)
612 {
613 	struct bio_vec *bvec = bio->bi_io_vec;
614 	int bio_index = 0;
615 	struct btrfs_root *root;
616 
617 	WARN_ON(bio->bi_vcnt <= 0);
618 	while (bio_index < bio->bi_vcnt) {
619 		root = BTRFS_I(bvec->bv_page->mapping->host)->root;
620 		csum_dirty_buffer(root, bvec->bv_page);
621 		bio_index++;
622 		bvec++;
623 	}
624 	return 0;
625 }
626 
627 static int __btree_submit_bio_start(struct inode *inode, int rw,
628 				    struct bio *bio, int mirror_num,
629 				    unsigned long bio_flags)
630 {
631 	/*
632 	 * when we're called for a write, we're already in the async
633 	 * submission context.  Just jump into btrfs_map_bio
634 	 */
635 	btree_csum_one_bio(bio);
636 	return 0;
637 }
638 
639 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
640 				 int mirror_num, unsigned long bio_flags)
641 {
642 	/*
643 	 * when we're called for a write, we're already in the async
644 	 * submission context.  Just jump into btrfs_map_bio
645 	 */
646 	return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
647 }
648 
649 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
650 				 int mirror_num, unsigned long bio_flags)
651 {
652 	int ret;
653 
654 	ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
655 					  bio, 1);
656 	BUG_ON(ret);
657 
658 	if (!(rw & (1 << BIO_RW))) {
659 		/*
660 		 * called for a read, do the setup so that checksum validation
661 		 * can happen in the async kernel threads
662 		 */
663 		return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
664 				     mirror_num, 0);
665 	}
666 
667 	/*
668 	 * kthread helpers are used to submit writes so that checksumming
669 	 * can happen in parallel across all CPUs
670 	 */
671 	return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
672 				   inode, rw, bio, mirror_num, 0,
673 				   __btree_submit_bio_start,
674 				   __btree_submit_bio_done);
675 }
676 
677 static int btree_writepage(struct page *page, struct writeback_control *wbc)
678 {
679 	struct extent_io_tree *tree;
680 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
681 	struct extent_buffer *eb;
682 	int was_dirty;
683 
684 	tree = &BTRFS_I(page->mapping->host)->io_tree;
685 	if (!(current->flags & PF_MEMALLOC)) {
686 		return extent_write_full_page(tree, page,
687 					      btree_get_extent, wbc);
688 	}
689 
690 	redirty_page_for_writepage(wbc, page);
691 	eb = btrfs_find_tree_block(root, page_offset(page),
692 				      PAGE_CACHE_SIZE);
693 	WARN_ON(!eb);
694 
695 	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
696 	if (!was_dirty) {
697 		spin_lock(&root->fs_info->delalloc_lock);
698 		root->fs_info->dirty_metadata_bytes += PAGE_CACHE_SIZE;
699 		spin_unlock(&root->fs_info->delalloc_lock);
700 	}
701 	free_extent_buffer(eb);
702 
703 	unlock_page(page);
704 	return 0;
705 }
706 
707 static int btree_writepages(struct address_space *mapping,
708 			    struct writeback_control *wbc)
709 {
710 	struct extent_io_tree *tree;
711 	tree = &BTRFS_I(mapping->host)->io_tree;
712 	if (wbc->sync_mode == WB_SYNC_NONE) {
713 		struct btrfs_root *root = BTRFS_I(mapping->host)->root;
714 		u64 num_dirty;
715 		unsigned long thresh = 32 * 1024 * 1024;
716 
717 		if (wbc->for_kupdate)
718 			return 0;
719 
720 		/* this is a bit racy, but that's ok */
721 		num_dirty = root->fs_info->dirty_metadata_bytes;
722 		if (num_dirty < thresh)
723 			return 0;
724 	}
725 	return extent_writepages(tree, mapping, btree_get_extent, wbc);
726 }
727 
728 static int btree_readpage(struct file *file, struct page *page)
729 {
730 	struct extent_io_tree *tree;
731 	tree = &BTRFS_I(page->mapping->host)->io_tree;
732 	return extent_read_full_page(tree, page, btree_get_extent);
733 }
734 
735 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
736 {
737 	struct extent_io_tree *tree;
738 	struct extent_map_tree *map;
739 	int ret;
740 
741 	if (PageWriteback(page) || PageDirty(page))
742 		return 0;
743 
744 	tree = &BTRFS_I(page->mapping->host)->io_tree;
745 	map = &BTRFS_I(page->mapping->host)->extent_tree;
746 
747 	ret = try_release_extent_state(map, tree, page, gfp_flags);
748 	if (!ret)
749 		return 0;
750 
751 	ret = try_release_extent_buffer(tree, page);
752 	if (ret == 1) {
753 		ClearPagePrivate(page);
754 		set_page_private(page, 0);
755 		page_cache_release(page);
756 	}
757 
758 	return ret;
759 }
760 
761 static void btree_invalidatepage(struct page *page, unsigned long offset)
762 {
763 	struct extent_io_tree *tree;
764 	tree = &BTRFS_I(page->mapping->host)->io_tree;
765 	extent_invalidatepage(tree, page, offset);
766 	btree_releasepage(page, GFP_NOFS);
767 	if (PagePrivate(page)) {
768 		printk(KERN_WARNING "btrfs warning page private not zero "
769 		       "on page %llu\n", (unsigned long long)page_offset(page));
770 		ClearPagePrivate(page);
771 		set_page_private(page, 0);
772 		page_cache_release(page);
773 	}
774 }
775 
776 static const struct address_space_operations btree_aops = {
777 	.readpage	= btree_readpage,
778 	.writepage	= btree_writepage,
779 	.writepages	= btree_writepages,
780 	.releasepage	= btree_releasepage,
781 	.invalidatepage = btree_invalidatepage,
782 	.sync_page	= block_sync_page,
783 };
784 
785 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
786 			 u64 parent_transid)
787 {
788 	struct extent_buffer *buf = NULL;
789 	struct inode *btree_inode = root->fs_info->btree_inode;
790 	int ret = 0;
791 
792 	buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
793 	if (!buf)
794 		return 0;
795 	read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
796 				 buf, 0, 0, btree_get_extent, 0);
797 	free_extent_buffer(buf);
798 	return ret;
799 }
800 
801 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
802 					    u64 bytenr, u32 blocksize)
803 {
804 	struct inode *btree_inode = root->fs_info->btree_inode;
805 	struct extent_buffer *eb;
806 	eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
807 				bytenr, blocksize, GFP_NOFS);
808 	return eb;
809 }
810 
811 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
812 						 u64 bytenr, u32 blocksize)
813 {
814 	struct inode *btree_inode = root->fs_info->btree_inode;
815 	struct extent_buffer *eb;
816 
817 	eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
818 				 bytenr, blocksize, NULL, GFP_NOFS);
819 	return eb;
820 }
821 
822 
823 int btrfs_write_tree_block(struct extent_buffer *buf)
824 {
825 	return filemap_fdatawrite_range(buf->first_page->mapping, buf->start,
826 					buf->start + buf->len - 1);
827 }
828 
829 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
830 {
831 	return filemap_fdatawait_range(buf->first_page->mapping,
832 				       buf->start, buf->start + buf->len - 1);
833 }
834 
835 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
836 				      u32 blocksize, u64 parent_transid)
837 {
838 	struct extent_buffer *buf = NULL;
839 	struct inode *btree_inode = root->fs_info->btree_inode;
840 	struct extent_io_tree *io_tree;
841 	int ret;
842 
843 	io_tree = &BTRFS_I(btree_inode)->io_tree;
844 
845 	buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
846 	if (!buf)
847 		return NULL;
848 
849 	ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
850 
851 	if (ret == 0)
852 		set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
853 	return buf;
854 
855 }
856 
857 int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
858 		     struct extent_buffer *buf)
859 {
860 	struct inode *btree_inode = root->fs_info->btree_inode;
861 	if (btrfs_header_generation(buf) ==
862 	    root->fs_info->running_transaction->transid) {
863 		btrfs_assert_tree_locked(buf);
864 
865 		if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
866 			spin_lock(&root->fs_info->delalloc_lock);
867 			if (root->fs_info->dirty_metadata_bytes >= buf->len)
868 				root->fs_info->dirty_metadata_bytes -= buf->len;
869 			else
870 				WARN_ON(1);
871 			spin_unlock(&root->fs_info->delalloc_lock);
872 		}
873 
874 		/* ugh, clear_extent_buffer_dirty needs to lock the page */
875 		btrfs_set_lock_blocking(buf);
876 		clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
877 					  buf);
878 	}
879 	return 0;
880 }
881 
882 static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
883 			u32 stripesize, struct btrfs_root *root,
884 			struct btrfs_fs_info *fs_info,
885 			u64 objectid)
886 {
887 	root->node = NULL;
888 	root->commit_root = NULL;
889 	root->sectorsize = sectorsize;
890 	root->nodesize = nodesize;
891 	root->leafsize = leafsize;
892 	root->stripesize = stripesize;
893 	root->ref_cows = 0;
894 	root->track_dirty = 0;
895 	root->in_radix = 0;
896 	root->clean_orphans = 0;
897 
898 	root->fs_info = fs_info;
899 	root->objectid = objectid;
900 	root->last_trans = 0;
901 	root->highest_objectid = 0;
902 	root->name = NULL;
903 	root->in_sysfs = 0;
904 	root->inode_tree.rb_node = NULL;
905 
906 	INIT_LIST_HEAD(&root->dirty_list);
907 	INIT_LIST_HEAD(&root->orphan_list);
908 	INIT_LIST_HEAD(&root->root_list);
909 	spin_lock_init(&root->node_lock);
910 	spin_lock_init(&root->list_lock);
911 	spin_lock_init(&root->inode_lock);
912 	mutex_init(&root->objectid_mutex);
913 	mutex_init(&root->log_mutex);
914 	init_waitqueue_head(&root->log_writer_wait);
915 	init_waitqueue_head(&root->log_commit_wait[0]);
916 	init_waitqueue_head(&root->log_commit_wait[1]);
917 	atomic_set(&root->log_commit[0], 0);
918 	atomic_set(&root->log_commit[1], 0);
919 	atomic_set(&root->log_writers, 0);
920 	root->log_batch = 0;
921 	root->log_transid = 0;
922 	root->last_log_commit = 0;
923 	extent_io_tree_init(&root->dirty_log_pages,
924 			     fs_info->btree_inode->i_mapping, GFP_NOFS);
925 
926 	memset(&root->root_key, 0, sizeof(root->root_key));
927 	memset(&root->root_item, 0, sizeof(root->root_item));
928 	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
929 	memset(&root->root_kobj, 0, sizeof(root->root_kobj));
930 	root->defrag_trans_start = fs_info->generation;
931 	init_completion(&root->kobj_unregister);
932 	root->defrag_running = 0;
933 	root->root_key.objectid = objectid;
934 	root->anon_super.s_root = NULL;
935 	root->anon_super.s_dev = 0;
936 	INIT_LIST_HEAD(&root->anon_super.s_list);
937 	INIT_LIST_HEAD(&root->anon_super.s_instances);
938 	init_rwsem(&root->anon_super.s_umount);
939 
940 	return 0;
941 }
942 
943 static int find_and_setup_root(struct btrfs_root *tree_root,
944 			       struct btrfs_fs_info *fs_info,
945 			       u64 objectid,
946 			       struct btrfs_root *root)
947 {
948 	int ret;
949 	u32 blocksize;
950 	u64 generation;
951 
952 	__setup_root(tree_root->nodesize, tree_root->leafsize,
953 		     tree_root->sectorsize, tree_root->stripesize,
954 		     root, fs_info, objectid);
955 	ret = btrfs_find_last_root(tree_root, objectid,
956 				   &root->root_item, &root->root_key);
957 	if (ret > 0)
958 		return -ENOENT;
959 	BUG_ON(ret);
960 
961 	generation = btrfs_root_generation(&root->root_item);
962 	blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
963 	root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
964 				     blocksize, generation);
965 	BUG_ON(!root->node);
966 	root->commit_root = btrfs_root_node(root);
967 	return 0;
968 }
969 
970 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
971 			     struct btrfs_fs_info *fs_info)
972 {
973 	struct extent_buffer *eb;
974 	struct btrfs_root *log_root_tree = fs_info->log_root_tree;
975 	u64 start = 0;
976 	u64 end = 0;
977 	int ret;
978 
979 	if (!log_root_tree)
980 		return 0;
981 
982 	while (1) {
983 		ret = find_first_extent_bit(&log_root_tree->dirty_log_pages,
984 				0, &start, &end, EXTENT_DIRTY | EXTENT_NEW);
985 		if (ret)
986 			break;
987 
988 		clear_extent_bits(&log_root_tree->dirty_log_pages, start, end,
989 				  EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
990 	}
991 	eb = fs_info->log_root_tree->node;
992 
993 	WARN_ON(btrfs_header_level(eb) != 0);
994 	WARN_ON(btrfs_header_nritems(eb) != 0);
995 
996 	ret = btrfs_free_reserved_extent(fs_info->tree_root,
997 				eb->start, eb->len);
998 	BUG_ON(ret);
999 
1000 	free_extent_buffer(eb);
1001 	kfree(fs_info->log_root_tree);
1002 	fs_info->log_root_tree = NULL;
1003 	return 0;
1004 }
1005 
1006 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1007 					 struct btrfs_fs_info *fs_info)
1008 {
1009 	struct btrfs_root *root;
1010 	struct btrfs_root *tree_root = fs_info->tree_root;
1011 	struct extent_buffer *leaf;
1012 
1013 	root = kzalloc(sizeof(*root), GFP_NOFS);
1014 	if (!root)
1015 		return ERR_PTR(-ENOMEM);
1016 
1017 	__setup_root(tree_root->nodesize, tree_root->leafsize,
1018 		     tree_root->sectorsize, tree_root->stripesize,
1019 		     root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1020 
1021 	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1022 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1023 	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1024 	/*
1025 	 * log trees do not get reference counted because they go away
1026 	 * before a real commit is actually done.  They do store pointers
1027 	 * to file data extents, and those reference counts still get
1028 	 * updated (along with back refs to the log tree).
1029 	 */
1030 	root->ref_cows = 0;
1031 
1032 	leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
1033 				      BTRFS_TREE_LOG_OBJECTID, NULL, 0, 0, 0);
1034 	if (IS_ERR(leaf)) {
1035 		kfree(root);
1036 		return ERR_CAST(leaf);
1037 	}
1038 
1039 	memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1040 	btrfs_set_header_bytenr(leaf, leaf->start);
1041 	btrfs_set_header_generation(leaf, trans->transid);
1042 	btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1043 	btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1044 	root->node = leaf;
1045 
1046 	write_extent_buffer(root->node, root->fs_info->fsid,
1047 			    (unsigned long)btrfs_header_fsid(root->node),
1048 			    BTRFS_FSID_SIZE);
1049 	btrfs_mark_buffer_dirty(root->node);
1050 	btrfs_tree_unlock(root->node);
1051 	return root;
1052 }
1053 
1054 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1055 			     struct btrfs_fs_info *fs_info)
1056 {
1057 	struct btrfs_root *log_root;
1058 
1059 	log_root = alloc_log_tree(trans, fs_info);
1060 	if (IS_ERR(log_root))
1061 		return PTR_ERR(log_root);
1062 	WARN_ON(fs_info->log_root_tree);
1063 	fs_info->log_root_tree = log_root;
1064 	return 0;
1065 }
1066 
1067 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1068 		       struct btrfs_root *root)
1069 {
1070 	struct btrfs_root *log_root;
1071 	struct btrfs_inode_item *inode_item;
1072 
1073 	log_root = alloc_log_tree(trans, root->fs_info);
1074 	if (IS_ERR(log_root))
1075 		return PTR_ERR(log_root);
1076 
1077 	log_root->last_trans = trans->transid;
1078 	log_root->root_key.offset = root->root_key.objectid;
1079 
1080 	inode_item = &log_root->root_item.inode;
1081 	inode_item->generation = cpu_to_le64(1);
1082 	inode_item->size = cpu_to_le64(3);
1083 	inode_item->nlink = cpu_to_le32(1);
1084 	inode_item->nbytes = cpu_to_le64(root->leafsize);
1085 	inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1086 
1087 	btrfs_set_root_node(&log_root->root_item, log_root->node);
1088 
1089 	WARN_ON(root->log_root);
1090 	root->log_root = log_root;
1091 	root->log_transid = 0;
1092 	root->last_log_commit = 0;
1093 	return 0;
1094 }
1095 
1096 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1097 					       struct btrfs_key *location)
1098 {
1099 	struct btrfs_root *root;
1100 	struct btrfs_fs_info *fs_info = tree_root->fs_info;
1101 	struct btrfs_path *path;
1102 	struct extent_buffer *l;
1103 	u64 generation;
1104 	u32 blocksize;
1105 	int ret = 0;
1106 
1107 	root = kzalloc(sizeof(*root), GFP_NOFS);
1108 	if (!root)
1109 		return ERR_PTR(-ENOMEM);
1110 	if (location->offset == (u64)-1) {
1111 		ret = find_and_setup_root(tree_root, fs_info,
1112 					  location->objectid, root);
1113 		if (ret) {
1114 			kfree(root);
1115 			return ERR_PTR(ret);
1116 		}
1117 		goto out;
1118 	}
1119 
1120 	__setup_root(tree_root->nodesize, tree_root->leafsize,
1121 		     tree_root->sectorsize, tree_root->stripesize,
1122 		     root, fs_info, location->objectid);
1123 
1124 	path = btrfs_alloc_path();
1125 	BUG_ON(!path);
1126 	ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
1127 	if (ret == 0) {
1128 		l = path->nodes[0];
1129 		read_extent_buffer(l, &root->root_item,
1130 				btrfs_item_ptr_offset(l, path->slots[0]),
1131 				sizeof(root->root_item));
1132 		memcpy(&root->root_key, location, sizeof(*location));
1133 	}
1134 	btrfs_free_path(path);
1135 	if (ret) {
1136 		if (ret > 0)
1137 			ret = -ENOENT;
1138 		return ERR_PTR(ret);
1139 	}
1140 
1141 	generation = btrfs_root_generation(&root->root_item);
1142 	blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1143 	root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1144 				     blocksize, generation);
1145 	root->commit_root = btrfs_root_node(root);
1146 	BUG_ON(!root->node);
1147 out:
1148 	if (location->objectid != BTRFS_TREE_LOG_OBJECTID)
1149 		root->ref_cows = 1;
1150 
1151 	return root;
1152 }
1153 
1154 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1155 					u64 root_objectid)
1156 {
1157 	struct btrfs_root *root;
1158 
1159 	if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
1160 		return fs_info->tree_root;
1161 	if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
1162 		return fs_info->extent_root;
1163 
1164 	root = radix_tree_lookup(&fs_info->fs_roots_radix,
1165 				 (unsigned long)root_objectid);
1166 	return root;
1167 }
1168 
1169 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1170 					      struct btrfs_key *location)
1171 {
1172 	struct btrfs_root *root;
1173 	int ret;
1174 
1175 	if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1176 		return fs_info->tree_root;
1177 	if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1178 		return fs_info->extent_root;
1179 	if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1180 		return fs_info->chunk_root;
1181 	if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1182 		return fs_info->dev_root;
1183 	if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1184 		return fs_info->csum_root;
1185 again:
1186 	spin_lock(&fs_info->fs_roots_radix_lock);
1187 	root = radix_tree_lookup(&fs_info->fs_roots_radix,
1188 				 (unsigned long)location->objectid);
1189 	spin_unlock(&fs_info->fs_roots_radix_lock);
1190 	if (root)
1191 		return root;
1192 
1193 	ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1194 	if (ret == 0)
1195 		ret = -ENOENT;
1196 	if (ret < 0)
1197 		return ERR_PTR(ret);
1198 
1199 	root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1200 	if (IS_ERR(root))
1201 		return root;
1202 
1203 	WARN_ON(btrfs_root_refs(&root->root_item) == 0);
1204 	set_anon_super(&root->anon_super, NULL);
1205 
1206 	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1207 	if (ret)
1208 		goto fail;
1209 
1210 	spin_lock(&fs_info->fs_roots_radix_lock);
1211 	ret = radix_tree_insert(&fs_info->fs_roots_radix,
1212 				(unsigned long)root->root_key.objectid,
1213 				root);
1214 	if (ret == 0) {
1215 		root->in_radix = 1;
1216 		root->clean_orphans = 1;
1217 	}
1218 	spin_unlock(&fs_info->fs_roots_radix_lock);
1219 	radix_tree_preload_end();
1220 	if (ret) {
1221 		if (ret == -EEXIST) {
1222 			free_fs_root(root);
1223 			goto again;
1224 		}
1225 		goto fail;
1226 	}
1227 
1228 	ret = btrfs_find_dead_roots(fs_info->tree_root,
1229 				    root->root_key.objectid);
1230 	WARN_ON(ret);
1231 	return root;
1232 fail:
1233 	free_fs_root(root);
1234 	return ERR_PTR(ret);
1235 }
1236 
1237 struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
1238 				      struct btrfs_key *location,
1239 				      const char *name, int namelen)
1240 {
1241 	return btrfs_read_fs_root_no_name(fs_info, location);
1242 #if 0
1243 	struct btrfs_root *root;
1244 	int ret;
1245 
1246 	root = btrfs_read_fs_root_no_name(fs_info, location);
1247 	if (!root)
1248 		return NULL;
1249 
1250 	if (root->in_sysfs)
1251 		return root;
1252 
1253 	ret = btrfs_set_root_name(root, name, namelen);
1254 	if (ret) {
1255 		free_extent_buffer(root->node);
1256 		kfree(root);
1257 		return ERR_PTR(ret);
1258 	}
1259 
1260 	ret = btrfs_sysfs_add_root(root);
1261 	if (ret) {
1262 		free_extent_buffer(root->node);
1263 		kfree(root->name);
1264 		kfree(root);
1265 		return ERR_PTR(ret);
1266 	}
1267 	root->in_sysfs = 1;
1268 	return root;
1269 #endif
1270 }
1271 
1272 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1273 {
1274 	struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1275 	int ret = 0;
1276 	struct btrfs_device *device;
1277 	struct backing_dev_info *bdi;
1278 
1279 	list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
1280 		if (!device->bdev)
1281 			continue;
1282 		bdi = blk_get_backing_dev_info(device->bdev);
1283 		if (bdi && bdi_congested(bdi, bdi_bits)) {
1284 			ret = 1;
1285 			break;
1286 		}
1287 	}
1288 	return ret;
1289 }
1290 
1291 /*
1292  * this unplugs every device on the box, and it is only used when page
1293  * is null
1294  */
1295 static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1296 {
1297 	struct btrfs_device *device;
1298 	struct btrfs_fs_info *info;
1299 
1300 	info = (struct btrfs_fs_info *)bdi->unplug_io_data;
1301 	list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
1302 		if (!device->bdev)
1303 			continue;
1304 
1305 		bdi = blk_get_backing_dev_info(device->bdev);
1306 		if (bdi->unplug_io_fn)
1307 			bdi->unplug_io_fn(bdi, page);
1308 	}
1309 }
1310 
1311 static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1312 {
1313 	struct inode *inode;
1314 	struct extent_map_tree *em_tree;
1315 	struct extent_map *em;
1316 	struct address_space *mapping;
1317 	u64 offset;
1318 
1319 	/* the generic O_DIRECT read code does this */
1320 	if (1 || !page) {
1321 		__unplug_io_fn(bdi, page);
1322 		return;
1323 	}
1324 
1325 	/*
1326 	 * page->mapping may change at any time.  Get a consistent copy
1327 	 * and use that for everything below
1328 	 */
1329 	smp_mb();
1330 	mapping = page->mapping;
1331 	if (!mapping)
1332 		return;
1333 
1334 	inode = mapping->host;
1335 
1336 	/*
1337 	 * don't do the expensive searching for a small number of
1338 	 * devices
1339 	 */
1340 	if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) {
1341 		__unplug_io_fn(bdi, page);
1342 		return;
1343 	}
1344 
1345 	offset = page_offset(page);
1346 
1347 	em_tree = &BTRFS_I(inode)->extent_tree;
1348 	read_lock(&em_tree->lock);
1349 	em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
1350 	read_unlock(&em_tree->lock);
1351 	if (!em) {
1352 		__unplug_io_fn(bdi, page);
1353 		return;
1354 	}
1355 
1356 	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1357 		free_extent_map(em);
1358 		__unplug_io_fn(bdi, page);
1359 		return;
1360 	}
1361 	offset = offset - em->start;
1362 	btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
1363 			  em->block_start + offset, page);
1364 	free_extent_map(em);
1365 }
1366 
1367 /*
1368  * If this fails, caller must call bdi_destroy() to get rid of the
1369  * bdi again.
1370  */
1371 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1372 {
1373 	int err;
1374 
1375 	bdi->name = "btrfs";
1376 	bdi->capabilities = BDI_CAP_MAP_COPY;
1377 	err = bdi_init(bdi);
1378 	if (err)
1379 		return err;
1380 
1381 	err = bdi_register(bdi, NULL, "btrfs-%d",
1382 				atomic_inc_return(&btrfs_bdi_num));
1383 	if (err) {
1384 		bdi_destroy(bdi);
1385 		return err;
1386 	}
1387 
1388 	bdi->ra_pages	= default_backing_dev_info.ra_pages;
1389 	bdi->unplug_io_fn	= btrfs_unplug_io_fn;
1390 	bdi->unplug_io_data	= info;
1391 	bdi->congested_fn	= btrfs_congested_fn;
1392 	bdi->congested_data	= info;
1393 	return 0;
1394 }
1395 
1396 static int bio_ready_for_csum(struct bio *bio)
1397 {
1398 	u64 length = 0;
1399 	u64 buf_len = 0;
1400 	u64 start = 0;
1401 	struct page *page;
1402 	struct extent_io_tree *io_tree = NULL;
1403 	struct btrfs_fs_info *info = NULL;
1404 	struct bio_vec *bvec;
1405 	int i;
1406 	int ret;
1407 
1408 	bio_for_each_segment(bvec, bio, i) {
1409 		page = bvec->bv_page;
1410 		if (page->private == EXTENT_PAGE_PRIVATE) {
1411 			length += bvec->bv_len;
1412 			continue;
1413 		}
1414 		if (!page->private) {
1415 			length += bvec->bv_len;
1416 			continue;
1417 		}
1418 		length = bvec->bv_len;
1419 		buf_len = page->private >> 2;
1420 		start = page_offset(page) + bvec->bv_offset;
1421 		io_tree = &BTRFS_I(page->mapping->host)->io_tree;
1422 		info = BTRFS_I(page->mapping->host)->root->fs_info;
1423 	}
1424 	/* are we fully contained in this bio? */
1425 	if (buf_len <= length)
1426 		return 1;
1427 
1428 	ret = extent_range_uptodate(io_tree, start + length,
1429 				    start + buf_len - 1);
1430 	return ret;
1431 }
1432 
1433 /*
1434  * called by the kthread helper functions to finally call the bio end_io
1435  * functions.  This is where read checksum verification actually happens
1436  */
1437 static void end_workqueue_fn(struct btrfs_work *work)
1438 {
1439 	struct bio *bio;
1440 	struct end_io_wq *end_io_wq;
1441 	struct btrfs_fs_info *fs_info;
1442 	int error;
1443 
1444 	end_io_wq = container_of(work, struct end_io_wq, work);
1445 	bio = end_io_wq->bio;
1446 	fs_info = end_io_wq->info;
1447 
1448 	/* metadata bio reads are special because the whole tree block must
1449 	 * be checksummed at once.  This makes sure the entire block is in
1450 	 * ram and up to date before trying to verify things.  For
1451 	 * blocksize <= pagesize, it is basically a noop
1452 	 */
1453 	if (!(bio->bi_rw & (1 << BIO_RW)) && end_io_wq->metadata &&
1454 	    !bio_ready_for_csum(bio)) {
1455 		btrfs_queue_worker(&fs_info->endio_meta_workers,
1456 				   &end_io_wq->work);
1457 		return;
1458 	}
1459 	error = end_io_wq->error;
1460 	bio->bi_private = end_io_wq->private;
1461 	bio->bi_end_io = end_io_wq->end_io;
1462 	kfree(end_io_wq);
1463 	bio_endio(bio, error);
1464 }
1465 
1466 static int cleaner_kthread(void *arg)
1467 {
1468 	struct btrfs_root *root = arg;
1469 
1470 	do {
1471 		smp_mb();
1472 		if (root->fs_info->closing)
1473 			break;
1474 
1475 		vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1476 
1477 		if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
1478 		    mutex_trylock(&root->fs_info->cleaner_mutex)) {
1479 			btrfs_run_delayed_iputs(root);
1480 			btrfs_clean_old_snapshots(root);
1481 			mutex_unlock(&root->fs_info->cleaner_mutex);
1482 		}
1483 
1484 		if (freezing(current)) {
1485 			refrigerator();
1486 		} else {
1487 			smp_mb();
1488 			if (root->fs_info->closing)
1489 				break;
1490 			set_current_state(TASK_INTERRUPTIBLE);
1491 			schedule();
1492 			__set_current_state(TASK_RUNNING);
1493 		}
1494 	} while (!kthread_should_stop());
1495 	return 0;
1496 }
1497 
1498 static int transaction_kthread(void *arg)
1499 {
1500 	struct btrfs_root *root = arg;
1501 	struct btrfs_trans_handle *trans;
1502 	struct btrfs_transaction *cur;
1503 	unsigned long now;
1504 	unsigned long delay;
1505 	int ret;
1506 
1507 	do {
1508 		smp_mb();
1509 		if (root->fs_info->closing)
1510 			break;
1511 
1512 		delay = HZ * 30;
1513 		vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1514 		mutex_lock(&root->fs_info->transaction_kthread_mutex);
1515 
1516 		mutex_lock(&root->fs_info->trans_mutex);
1517 		cur = root->fs_info->running_transaction;
1518 		if (!cur) {
1519 			mutex_unlock(&root->fs_info->trans_mutex);
1520 			goto sleep;
1521 		}
1522 
1523 		now = get_seconds();
1524 		if (now < cur->start_time || now - cur->start_time < 30) {
1525 			mutex_unlock(&root->fs_info->trans_mutex);
1526 			delay = HZ * 5;
1527 			goto sleep;
1528 		}
1529 		mutex_unlock(&root->fs_info->trans_mutex);
1530 		trans = btrfs_start_transaction(root, 1);
1531 		ret = btrfs_commit_transaction(trans, root);
1532 
1533 sleep:
1534 		wake_up_process(root->fs_info->cleaner_kthread);
1535 		mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1536 
1537 		if (freezing(current)) {
1538 			refrigerator();
1539 		} else {
1540 			if (root->fs_info->closing)
1541 				break;
1542 			set_current_state(TASK_INTERRUPTIBLE);
1543 			schedule_timeout(delay);
1544 			__set_current_state(TASK_RUNNING);
1545 		}
1546 	} while (!kthread_should_stop());
1547 	return 0;
1548 }
1549 
1550 struct btrfs_root *open_ctree(struct super_block *sb,
1551 			      struct btrfs_fs_devices *fs_devices,
1552 			      char *options)
1553 {
1554 	u32 sectorsize;
1555 	u32 nodesize;
1556 	u32 leafsize;
1557 	u32 blocksize;
1558 	u32 stripesize;
1559 	u64 generation;
1560 	u64 features;
1561 	struct btrfs_key location;
1562 	struct buffer_head *bh;
1563 	struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root),
1564 						 GFP_NOFS);
1565 	struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
1566 						 GFP_NOFS);
1567 	struct btrfs_root *tree_root = kzalloc(sizeof(struct btrfs_root),
1568 					       GFP_NOFS);
1569 	struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
1570 						GFP_NOFS);
1571 	struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
1572 						GFP_NOFS);
1573 	struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
1574 					      GFP_NOFS);
1575 	struct btrfs_root *log_tree_root;
1576 
1577 	int ret;
1578 	int err = -EINVAL;
1579 
1580 	struct btrfs_super_block *disk_super;
1581 
1582 	if (!extent_root || !tree_root || !fs_info ||
1583 	    !chunk_root || !dev_root || !csum_root) {
1584 		err = -ENOMEM;
1585 		goto fail;
1586 	}
1587 
1588 	ret = init_srcu_struct(&fs_info->subvol_srcu);
1589 	if (ret) {
1590 		err = ret;
1591 		goto fail;
1592 	}
1593 
1594 	ret = setup_bdi(fs_info, &fs_info->bdi);
1595 	if (ret) {
1596 		err = ret;
1597 		goto fail_srcu;
1598 	}
1599 
1600 	fs_info->btree_inode = new_inode(sb);
1601 	if (!fs_info->btree_inode) {
1602 		err = -ENOMEM;
1603 		goto fail_bdi;
1604 	}
1605 
1606 	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
1607 	INIT_LIST_HEAD(&fs_info->trans_list);
1608 	INIT_LIST_HEAD(&fs_info->dead_roots);
1609 	INIT_LIST_HEAD(&fs_info->delayed_iputs);
1610 	INIT_LIST_HEAD(&fs_info->hashers);
1611 	INIT_LIST_HEAD(&fs_info->delalloc_inodes);
1612 	INIT_LIST_HEAD(&fs_info->ordered_operations);
1613 	INIT_LIST_HEAD(&fs_info->caching_block_groups);
1614 	spin_lock_init(&fs_info->delalloc_lock);
1615 	spin_lock_init(&fs_info->new_trans_lock);
1616 	spin_lock_init(&fs_info->ref_cache_lock);
1617 	spin_lock_init(&fs_info->fs_roots_radix_lock);
1618 	spin_lock_init(&fs_info->delayed_iput_lock);
1619 
1620 	init_completion(&fs_info->kobj_unregister);
1621 	fs_info->tree_root = tree_root;
1622 	fs_info->extent_root = extent_root;
1623 	fs_info->csum_root = csum_root;
1624 	fs_info->chunk_root = chunk_root;
1625 	fs_info->dev_root = dev_root;
1626 	fs_info->fs_devices = fs_devices;
1627 	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1628 	INIT_LIST_HEAD(&fs_info->space_info);
1629 	btrfs_mapping_init(&fs_info->mapping_tree);
1630 	atomic_set(&fs_info->nr_async_submits, 0);
1631 	atomic_set(&fs_info->async_delalloc_pages, 0);
1632 	atomic_set(&fs_info->async_submit_draining, 0);
1633 	atomic_set(&fs_info->nr_async_bios, 0);
1634 	fs_info->sb = sb;
1635 	fs_info->max_extent = (u64)-1;
1636 	fs_info->max_inline = 8192 * 1024;
1637 	fs_info->metadata_ratio = 0;
1638 
1639 	fs_info->thread_pool_size = min_t(unsigned long,
1640 					  num_online_cpus() + 2, 8);
1641 
1642 	INIT_LIST_HEAD(&fs_info->ordered_extents);
1643 	spin_lock_init(&fs_info->ordered_extent_lock);
1644 
1645 	sb->s_blocksize = 4096;
1646 	sb->s_blocksize_bits = blksize_bits(4096);
1647 	sb->s_bdi = &fs_info->bdi;
1648 
1649 	fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
1650 	fs_info->btree_inode->i_nlink = 1;
1651 	/*
1652 	 * we set the i_size on the btree inode to the max possible int.
1653 	 * the real end of the address space is determined by all of
1654 	 * the devices in the system
1655 	 */
1656 	fs_info->btree_inode->i_size = OFFSET_MAX;
1657 	fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
1658 	fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
1659 
1660 	RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
1661 	extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1662 			     fs_info->btree_inode->i_mapping,
1663 			     GFP_NOFS);
1664 	extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
1665 			     GFP_NOFS);
1666 
1667 	BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1668 
1669 	BTRFS_I(fs_info->btree_inode)->root = tree_root;
1670 	memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
1671 	       sizeof(struct btrfs_key));
1672 	BTRFS_I(fs_info->btree_inode)->dummy_inode = 1;
1673 	insert_inode_hash(fs_info->btree_inode);
1674 
1675 	spin_lock_init(&fs_info->block_group_cache_lock);
1676 	fs_info->block_group_cache_tree.rb_node = NULL;
1677 
1678 	extent_io_tree_init(&fs_info->freed_extents[0],
1679 			     fs_info->btree_inode->i_mapping, GFP_NOFS);
1680 	extent_io_tree_init(&fs_info->freed_extents[1],
1681 			     fs_info->btree_inode->i_mapping, GFP_NOFS);
1682 	fs_info->pinned_extents = &fs_info->freed_extents[0];
1683 	fs_info->do_barriers = 1;
1684 
1685 
1686 	mutex_init(&fs_info->trans_mutex);
1687 	mutex_init(&fs_info->ordered_operations_mutex);
1688 	mutex_init(&fs_info->tree_log_mutex);
1689 	mutex_init(&fs_info->chunk_mutex);
1690 	mutex_init(&fs_info->transaction_kthread_mutex);
1691 	mutex_init(&fs_info->cleaner_mutex);
1692 	mutex_init(&fs_info->volume_mutex);
1693 	init_rwsem(&fs_info->extent_commit_sem);
1694 	init_rwsem(&fs_info->cleanup_work_sem);
1695 	init_rwsem(&fs_info->subvol_sem);
1696 
1697 	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
1698 	btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
1699 
1700 	init_waitqueue_head(&fs_info->transaction_throttle);
1701 	init_waitqueue_head(&fs_info->transaction_wait);
1702 	init_waitqueue_head(&fs_info->async_submit_wait);
1703 
1704 	__setup_root(4096, 4096, 4096, 4096, tree_root,
1705 		     fs_info, BTRFS_ROOT_TREE_OBJECTID);
1706 
1707 
1708 	bh = btrfs_read_dev_super(fs_devices->latest_bdev);
1709 	if (!bh)
1710 		goto fail_iput;
1711 
1712 	memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
1713 	memcpy(&fs_info->super_for_commit, &fs_info->super_copy,
1714 	       sizeof(fs_info->super_for_commit));
1715 	brelse(bh);
1716 
1717 	memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
1718 
1719 	disk_super = &fs_info->super_copy;
1720 	if (!btrfs_super_root(disk_super))
1721 		goto fail_iput;
1722 
1723 	ret = btrfs_parse_options(tree_root, options);
1724 	if (ret) {
1725 		err = ret;
1726 		goto fail_iput;
1727 	}
1728 
1729 	features = btrfs_super_incompat_flags(disk_super) &
1730 		~BTRFS_FEATURE_INCOMPAT_SUPP;
1731 	if (features) {
1732 		printk(KERN_ERR "BTRFS: couldn't mount because of "
1733 		       "unsupported optional features (%Lx).\n",
1734 		       (unsigned long long)features);
1735 		err = -EINVAL;
1736 		goto fail_iput;
1737 	}
1738 
1739 	features = btrfs_super_incompat_flags(disk_super);
1740 	if (!(features & BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF)) {
1741 		features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
1742 		btrfs_set_super_incompat_flags(disk_super, features);
1743 	}
1744 
1745 	features = btrfs_super_compat_ro_flags(disk_super) &
1746 		~BTRFS_FEATURE_COMPAT_RO_SUPP;
1747 	if (!(sb->s_flags & MS_RDONLY) && features) {
1748 		printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
1749 		       "unsupported option features (%Lx).\n",
1750 		       (unsigned long long)features);
1751 		err = -EINVAL;
1752 		goto fail_iput;
1753 	}
1754 
1755 	btrfs_init_workers(&fs_info->generic_worker,
1756 			   "genwork", 1, NULL);
1757 
1758 	btrfs_init_workers(&fs_info->workers, "worker",
1759 			   fs_info->thread_pool_size,
1760 			   &fs_info->generic_worker);
1761 
1762 	btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
1763 			   fs_info->thread_pool_size,
1764 			   &fs_info->generic_worker);
1765 
1766 	btrfs_init_workers(&fs_info->submit_workers, "submit",
1767 			   min_t(u64, fs_devices->num_devices,
1768 			   fs_info->thread_pool_size),
1769 			   &fs_info->generic_worker);
1770 	btrfs_init_workers(&fs_info->enospc_workers, "enospc",
1771 			   fs_info->thread_pool_size,
1772 			   &fs_info->generic_worker);
1773 
1774 	/* a higher idle thresh on the submit workers makes it much more
1775 	 * likely that bios will be send down in a sane order to the
1776 	 * devices
1777 	 */
1778 	fs_info->submit_workers.idle_thresh = 64;
1779 
1780 	fs_info->workers.idle_thresh = 16;
1781 	fs_info->workers.ordered = 1;
1782 
1783 	fs_info->delalloc_workers.idle_thresh = 2;
1784 	fs_info->delalloc_workers.ordered = 1;
1785 
1786 	btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
1787 			   &fs_info->generic_worker);
1788 	btrfs_init_workers(&fs_info->endio_workers, "endio",
1789 			   fs_info->thread_pool_size,
1790 			   &fs_info->generic_worker);
1791 	btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
1792 			   fs_info->thread_pool_size,
1793 			   &fs_info->generic_worker);
1794 	btrfs_init_workers(&fs_info->endio_meta_write_workers,
1795 			   "endio-meta-write", fs_info->thread_pool_size,
1796 			   &fs_info->generic_worker);
1797 	btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
1798 			   fs_info->thread_pool_size,
1799 			   &fs_info->generic_worker);
1800 
1801 	/*
1802 	 * endios are largely parallel and should have a very
1803 	 * low idle thresh
1804 	 */
1805 	fs_info->endio_workers.idle_thresh = 4;
1806 	fs_info->endio_meta_workers.idle_thresh = 4;
1807 
1808 	fs_info->endio_write_workers.idle_thresh = 2;
1809 	fs_info->endio_meta_write_workers.idle_thresh = 2;
1810 
1811 	btrfs_start_workers(&fs_info->workers, 1);
1812 	btrfs_start_workers(&fs_info->generic_worker, 1);
1813 	btrfs_start_workers(&fs_info->submit_workers, 1);
1814 	btrfs_start_workers(&fs_info->delalloc_workers, 1);
1815 	btrfs_start_workers(&fs_info->fixup_workers, 1);
1816 	btrfs_start_workers(&fs_info->endio_workers, 1);
1817 	btrfs_start_workers(&fs_info->endio_meta_workers, 1);
1818 	btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
1819 	btrfs_start_workers(&fs_info->endio_write_workers, 1);
1820 	btrfs_start_workers(&fs_info->enospc_workers, 1);
1821 
1822 	fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1823 	fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
1824 				    4 * 1024 * 1024 / PAGE_CACHE_SIZE);
1825 
1826 	nodesize = btrfs_super_nodesize(disk_super);
1827 	leafsize = btrfs_super_leafsize(disk_super);
1828 	sectorsize = btrfs_super_sectorsize(disk_super);
1829 	stripesize = btrfs_super_stripesize(disk_super);
1830 	tree_root->nodesize = nodesize;
1831 	tree_root->leafsize = leafsize;
1832 	tree_root->sectorsize = sectorsize;
1833 	tree_root->stripesize = stripesize;
1834 
1835 	sb->s_blocksize = sectorsize;
1836 	sb->s_blocksize_bits = blksize_bits(sectorsize);
1837 
1838 	if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
1839 		    sizeof(disk_super->magic))) {
1840 		printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
1841 		goto fail_sb_buffer;
1842 	}
1843 
1844 	mutex_lock(&fs_info->chunk_mutex);
1845 	ret = btrfs_read_sys_array(tree_root);
1846 	mutex_unlock(&fs_info->chunk_mutex);
1847 	if (ret) {
1848 		printk(KERN_WARNING "btrfs: failed to read the system "
1849 		       "array on %s\n", sb->s_id);
1850 		goto fail_sb_buffer;
1851 	}
1852 
1853 	blocksize = btrfs_level_size(tree_root,
1854 				     btrfs_super_chunk_root_level(disk_super));
1855 	generation = btrfs_super_chunk_root_generation(disk_super);
1856 
1857 	__setup_root(nodesize, leafsize, sectorsize, stripesize,
1858 		     chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
1859 
1860 	chunk_root->node = read_tree_block(chunk_root,
1861 					   btrfs_super_chunk_root(disk_super),
1862 					   blocksize, generation);
1863 	BUG_ON(!chunk_root->node);
1864 	if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
1865 		printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
1866 		       sb->s_id);
1867 		goto fail_chunk_root;
1868 	}
1869 	btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
1870 	chunk_root->commit_root = btrfs_root_node(chunk_root);
1871 
1872 	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
1873 	   (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
1874 	   BTRFS_UUID_SIZE);
1875 
1876 	mutex_lock(&fs_info->chunk_mutex);
1877 	ret = btrfs_read_chunk_tree(chunk_root);
1878 	mutex_unlock(&fs_info->chunk_mutex);
1879 	if (ret) {
1880 		printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
1881 		       sb->s_id);
1882 		goto fail_chunk_root;
1883 	}
1884 
1885 	btrfs_close_extra_devices(fs_devices);
1886 
1887 	blocksize = btrfs_level_size(tree_root,
1888 				     btrfs_super_root_level(disk_super));
1889 	generation = btrfs_super_generation(disk_super);
1890 
1891 	tree_root->node = read_tree_block(tree_root,
1892 					  btrfs_super_root(disk_super),
1893 					  blocksize, generation);
1894 	if (!tree_root->node)
1895 		goto fail_chunk_root;
1896 	if (!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
1897 		printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
1898 		       sb->s_id);
1899 		goto fail_tree_root;
1900 	}
1901 	btrfs_set_root_node(&tree_root->root_item, tree_root->node);
1902 	tree_root->commit_root = btrfs_root_node(tree_root);
1903 
1904 	ret = find_and_setup_root(tree_root, fs_info,
1905 				  BTRFS_EXTENT_TREE_OBJECTID, extent_root);
1906 	if (ret)
1907 		goto fail_tree_root;
1908 	extent_root->track_dirty = 1;
1909 
1910 	ret = find_and_setup_root(tree_root, fs_info,
1911 				  BTRFS_DEV_TREE_OBJECTID, dev_root);
1912 	if (ret)
1913 		goto fail_extent_root;
1914 	dev_root->track_dirty = 1;
1915 
1916 	ret = find_and_setup_root(tree_root, fs_info,
1917 				  BTRFS_CSUM_TREE_OBJECTID, csum_root);
1918 	if (ret)
1919 		goto fail_dev_root;
1920 
1921 	csum_root->track_dirty = 1;
1922 
1923 	btrfs_read_block_groups(extent_root);
1924 
1925 	fs_info->generation = generation;
1926 	fs_info->last_trans_committed = generation;
1927 	fs_info->data_alloc_profile = (u64)-1;
1928 	fs_info->metadata_alloc_profile = (u64)-1;
1929 	fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1930 	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
1931 					       "btrfs-cleaner");
1932 	if (IS_ERR(fs_info->cleaner_kthread))
1933 		goto fail_csum_root;
1934 
1935 	fs_info->transaction_kthread = kthread_run(transaction_kthread,
1936 						   tree_root,
1937 						   "btrfs-transaction");
1938 	if (IS_ERR(fs_info->transaction_kthread))
1939 		goto fail_cleaner;
1940 
1941 	if (!btrfs_test_opt(tree_root, SSD) &&
1942 	    !btrfs_test_opt(tree_root, NOSSD) &&
1943 	    !fs_info->fs_devices->rotating) {
1944 		printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
1945 		       "mode\n");
1946 		btrfs_set_opt(fs_info->mount_opt, SSD);
1947 	}
1948 
1949 	if (btrfs_super_log_root(disk_super) != 0) {
1950 		u64 bytenr = btrfs_super_log_root(disk_super);
1951 
1952 		if (fs_devices->rw_devices == 0) {
1953 			printk(KERN_WARNING "Btrfs log replay required "
1954 			       "on RO media\n");
1955 			err = -EIO;
1956 			goto fail_trans_kthread;
1957 		}
1958 		blocksize =
1959 		     btrfs_level_size(tree_root,
1960 				      btrfs_super_log_root_level(disk_super));
1961 
1962 		log_tree_root = kzalloc(sizeof(struct btrfs_root),
1963 						      GFP_NOFS);
1964 
1965 		__setup_root(nodesize, leafsize, sectorsize, stripesize,
1966 			     log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1967 
1968 		log_tree_root->node = read_tree_block(tree_root, bytenr,
1969 						      blocksize,
1970 						      generation + 1);
1971 		ret = btrfs_recover_log_trees(log_tree_root);
1972 		BUG_ON(ret);
1973 
1974 		if (sb->s_flags & MS_RDONLY) {
1975 			ret =  btrfs_commit_super(tree_root);
1976 			BUG_ON(ret);
1977 		}
1978 	}
1979 
1980 	ret = btrfs_find_orphan_roots(tree_root);
1981 	BUG_ON(ret);
1982 
1983 	if (!(sb->s_flags & MS_RDONLY)) {
1984 		ret = btrfs_recover_relocation(tree_root);
1985 		if (ret < 0) {
1986 			printk(KERN_WARNING
1987 			       "btrfs: failed to recover relocation\n");
1988 			err = -EINVAL;
1989 			goto fail_trans_kthread;
1990 		}
1991 	}
1992 
1993 	location.objectid = BTRFS_FS_TREE_OBJECTID;
1994 	location.type = BTRFS_ROOT_ITEM_KEY;
1995 	location.offset = (u64)-1;
1996 
1997 	fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
1998 	if (!fs_info->fs_root)
1999 		goto fail_trans_kthread;
2000 
2001 	if (!(sb->s_flags & MS_RDONLY)) {
2002 		down_read(&fs_info->cleanup_work_sem);
2003 		btrfs_orphan_cleanup(fs_info->fs_root);
2004 		up_read(&fs_info->cleanup_work_sem);
2005 	}
2006 
2007 	return tree_root;
2008 
2009 fail_trans_kthread:
2010 	kthread_stop(fs_info->transaction_kthread);
2011 fail_cleaner:
2012 	kthread_stop(fs_info->cleaner_kthread);
2013 
2014 	/*
2015 	 * make sure we're done with the btree inode before we stop our
2016 	 * kthreads
2017 	 */
2018 	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
2019 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2020 
2021 fail_csum_root:
2022 	free_extent_buffer(csum_root->node);
2023 	free_extent_buffer(csum_root->commit_root);
2024 fail_dev_root:
2025 	free_extent_buffer(dev_root->node);
2026 	free_extent_buffer(dev_root->commit_root);
2027 fail_extent_root:
2028 	free_extent_buffer(extent_root->node);
2029 	free_extent_buffer(extent_root->commit_root);
2030 fail_tree_root:
2031 	free_extent_buffer(tree_root->node);
2032 	free_extent_buffer(tree_root->commit_root);
2033 fail_chunk_root:
2034 	free_extent_buffer(chunk_root->node);
2035 	free_extent_buffer(chunk_root->commit_root);
2036 fail_sb_buffer:
2037 	btrfs_stop_workers(&fs_info->generic_worker);
2038 	btrfs_stop_workers(&fs_info->fixup_workers);
2039 	btrfs_stop_workers(&fs_info->delalloc_workers);
2040 	btrfs_stop_workers(&fs_info->workers);
2041 	btrfs_stop_workers(&fs_info->endio_workers);
2042 	btrfs_stop_workers(&fs_info->endio_meta_workers);
2043 	btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2044 	btrfs_stop_workers(&fs_info->endio_write_workers);
2045 	btrfs_stop_workers(&fs_info->submit_workers);
2046 	btrfs_stop_workers(&fs_info->enospc_workers);
2047 fail_iput:
2048 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2049 	iput(fs_info->btree_inode);
2050 
2051 	btrfs_close_devices(fs_info->fs_devices);
2052 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
2053 fail_bdi:
2054 	bdi_destroy(&fs_info->bdi);
2055 fail_srcu:
2056 	cleanup_srcu_struct(&fs_info->subvol_srcu);
2057 fail:
2058 	kfree(extent_root);
2059 	kfree(tree_root);
2060 	kfree(fs_info);
2061 	kfree(chunk_root);
2062 	kfree(dev_root);
2063 	kfree(csum_root);
2064 	return ERR_PTR(err);
2065 }
2066 
2067 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2068 {
2069 	char b[BDEVNAME_SIZE];
2070 
2071 	if (uptodate) {
2072 		set_buffer_uptodate(bh);
2073 	} else {
2074 		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
2075 			printk(KERN_WARNING "lost page write due to "
2076 					"I/O error on %s\n",
2077 				       bdevname(bh->b_bdev, b));
2078 		}
2079 		/* note, we dont' set_buffer_write_io_error because we have
2080 		 * our own ways of dealing with the IO errors
2081 		 */
2082 		clear_buffer_uptodate(bh);
2083 	}
2084 	unlock_buffer(bh);
2085 	put_bh(bh);
2086 }
2087 
2088 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
2089 {
2090 	struct buffer_head *bh;
2091 	struct buffer_head *latest = NULL;
2092 	struct btrfs_super_block *super;
2093 	int i;
2094 	u64 transid = 0;
2095 	u64 bytenr;
2096 
2097 	/* we would like to check all the supers, but that would make
2098 	 * a btrfs mount succeed after a mkfs from a different FS.
2099 	 * So, we need to add a special mount option to scan for
2100 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2101 	 */
2102 	for (i = 0; i < 1; i++) {
2103 		bytenr = btrfs_sb_offset(i);
2104 		if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
2105 			break;
2106 		bh = __bread(bdev, bytenr / 4096, 4096);
2107 		if (!bh)
2108 			continue;
2109 
2110 		super = (struct btrfs_super_block *)bh->b_data;
2111 		if (btrfs_super_bytenr(super) != bytenr ||
2112 		    strncmp((char *)(&super->magic), BTRFS_MAGIC,
2113 			    sizeof(super->magic))) {
2114 			brelse(bh);
2115 			continue;
2116 		}
2117 
2118 		if (!latest || btrfs_super_generation(super) > transid) {
2119 			brelse(latest);
2120 			latest = bh;
2121 			transid = btrfs_super_generation(super);
2122 		} else {
2123 			brelse(bh);
2124 		}
2125 	}
2126 	return latest;
2127 }
2128 
2129 /*
2130  * this should be called twice, once with wait == 0 and
2131  * once with wait == 1.  When wait == 0 is done, all the buffer heads
2132  * we write are pinned.
2133  *
2134  * They are released when wait == 1 is done.
2135  * max_mirrors must be the same for both runs, and it indicates how
2136  * many supers on this one device should be written.
2137  *
2138  * max_mirrors == 0 means to write them all.
2139  */
2140 static int write_dev_supers(struct btrfs_device *device,
2141 			    struct btrfs_super_block *sb,
2142 			    int do_barriers, int wait, int max_mirrors)
2143 {
2144 	struct buffer_head *bh;
2145 	int i;
2146 	int ret;
2147 	int errors = 0;
2148 	u32 crc;
2149 	u64 bytenr;
2150 	int last_barrier = 0;
2151 
2152 	if (max_mirrors == 0)
2153 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
2154 
2155 	/* make sure only the last submit_bh does a barrier */
2156 	if (do_barriers) {
2157 		for (i = 0; i < max_mirrors; i++) {
2158 			bytenr = btrfs_sb_offset(i);
2159 			if (bytenr + BTRFS_SUPER_INFO_SIZE >=
2160 			    device->total_bytes)
2161 				break;
2162 			last_barrier = i;
2163 		}
2164 	}
2165 
2166 	for (i = 0; i < max_mirrors; i++) {
2167 		bytenr = btrfs_sb_offset(i);
2168 		if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
2169 			break;
2170 
2171 		if (wait) {
2172 			bh = __find_get_block(device->bdev, bytenr / 4096,
2173 					      BTRFS_SUPER_INFO_SIZE);
2174 			BUG_ON(!bh);
2175 			wait_on_buffer(bh);
2176 			if (!buffer_uptodate(bh))
2177 				errors++;
2178 
2179 			/* drop our reference */
2180 			brelse(bh);
2181 
2182 			/* drop the reference from the wait == 0 run */
2183 			brelse(bh);
2184 			continue;
2185 		} else {
2186 			btrfs_set_super_bytenr(sb, bytenr);
2187 
2188 			crc = ~(u32)0;
2189 			crc = btrfs_csum_data(NULL, (char *)sb +
2190 					      BTRFS_CSUM_SIZE, crc,
2191 					      BTRFS_SUPER_INFO_SIZE -
2192 					      BTRFS_CSUM_SIZE);
2193 			btrfs_csum_final(crc, sb->csum);
2194 
2195 			/*
2196 			 * one reference for us, and we leave it for the
2197 			 * caller
2198 			 */
2199 			bh = __getblk(device->bdev, bytenr / 4096,
2200 				      BTRFS_SUPER_INFO_SIZE);
2201 			memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
2202 
2203 			/* one reference for submit_bh */
2204 			get_bh(bh);
2205 
2206 			set_buffer_uptodate(bh);
2207 			lock_buffer(bh);
2208 			bh->b_end_io = btrfs_end_buffer_write_sync;
2209 		}
2210 
2211 		if (i == last_barrier && do_barriers && device->barriers) {
2212 			ret = submit_bh(WRITE_BARRIER, bh);
2213 			if (ret == -EOPNOTSUPP) {
2214 				printk("btrfs: disabling barriers on dev %s\n",
2215 				       device->name);
2216 				set_buffer_uptodate(bh);
2217 				device->barriers = 0;
2218 				/* one reference for submit_bh */
2219 				get_bh(bh);
2220 				lock_buffer(bh);
2221 				ret = submit_bh(WRITE_SYNC, bh);
2222 			}
2223 		} else {
2224 			ret = submit_bh(WRITE_SYNC, bh);
2225 		}
2226 
2227 		if (ret)
2228 			errors++;
2229 	}
2230 	return errors < i ? 0 : -1;
2231 }
2232 
2233 int write_all_supers(struct btrfs_root *root, int max_mirrors)
2234 {
2235 	struct list_head *head;
2236 	struct btrfs_device *dev;
2237 	struct btrfs_super_block *sb;
2238 	struct btrfs_dev_item *dev_item;
2239 	int ret;
2240 	int do_barriers;
2241 	int max_errors;
2242 	int total_errors = 0;
2243 	u64 flags;
2244 
2245 	max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
2246 	do_barriers = !btrfs_test_opt(root, NOBARRIER);
2247 
2248 	sb = &root->fs_info->super_for_commit;
2249 	dev_item = &sb->dev_item;
2250 
2251 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2252 	head = &root->fs_info->fs_devices->devices;
2253 	list_for_each_entry(dev, head, dev_list) {
2254 		if (!dev->bdev) {
2255 			total_errors++;
2256 			continue;
2257 		}
2258 		if (!dev->in_fs_metadata || !dev->writeable)
2259 			continue;
2260 
2261 		btrfs_set_stack_device_generation(dev_item, 0);
2262 		btrfs_set_stack_device_type(dev_item, dev->type);
2263 		btrfs_set_stack_device_id(dev_item, dev->devid);
2264 		btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
2265 		btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
2266 		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
2267 		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
2268 		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
2269 		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
2270 		memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
2271 
2272 		flags = btrfs_super_flags(sb);
2273 		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
2274 
2275 		ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
2276 		if (ret)
2277 			total_errors++;
2278 	}
2279 	if (total_errors > max_errors) {
2280 		printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2281 		       total_errors);
2282 		BUG();
2283 	}
2284 
2285 	total_errors = 0;
2286 	list_for_each_entry(dev, head, dev_list) {
2287 		if (!dev->bdev)
2288 			continue;
2289 		if (!dev->in_fs_metadata || !dev->writeable)
2290 			continue;
2291 
2292 		ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
2293 		if (ret)
2294 			total_errors++;
2295 	}
2296 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2297 	if (total_errors > max_errors) {
2298 		printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2299 		       total_errors);
2300 		BUG();
2301 	}
2302 	return 0;
2303 }
2304 
2305 int write_ctree_super(struct btrfs_trans_handle *trans,
2306 		      struct btrfs_root *root, int max_mirrors)
2307 {
2308 	int ret;
2309 
2310 	ret = write_all_supers(root, max_mirrors);
2311 	return ret;
2312 }
2313 
2314 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2315 {
2316 	spin_lock(&fs_info->fs_roots_radix_lock);
2317 	radix_tree_delete(&fs_info->fs_roots_radix,
2318 			  (unsigned long)root->root_key.objectid);
2319 	spin_unlock(&fs_info->fs_roots_radix_lock);
2320 
2321 	if (btrfs_root_refs(&root->root_item) == 0)
2322 		synchronize_srcu(&fs_info->subvol_srcu);
2323 
2324 	free_fs_root(root);
2325 	return 0;
2326 }
2327 
2328 static void free_fs_root(struct btrfs_root *root)
2329 {
2330 	WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
2331 	if (root->anon_super.s_dev) {
2332 		down_write(&root->anon_super.s_umount);
2333 		kill_anon_super(&root->anon_super);
2334 	}
2335 	free_extent_buffer(root->node);
2336 	free_extent_buffer(root->commit_root);
2337 	kfree(root->name);
2338 	kfree(root);
2339 }
2340 
2341 static int del_fs_roots(struct btrfs_fs_info *fs_info)
2342 {
2343 	int ret;
2344 	struct btrfs_root *gang[8];
2345 	int i;
2346 
2347 	while (!list_empty(&fs_info->dead_roots)) {
2348 		gang[0] = list_entry(fs_info->dead_roots.next,
2349 				     struct btrfs_root, root_list);
2350 		list_del(&gang[0]->root_list);
2351 
2352 		if (gang[0]->in_radix) {
2353 			btrfs_free_fs_root(fs_info, gang[0]);
2354 		} else {
2355 			free_extent_buffer(gang[0]->node);
2356 			free_extent_buffer(gang[0]->commit_root);
2357 			kfree(gang[0]);
2358 		}
2359 	}
2360 
2361 	while (1) {
2362 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2363 					     (void **)gang, 0,
2364 					     ARRAY_SIZE(gang));
2365 		if (!ret)
2366 			break;
2367 		for (i = 0; i < ret; i++)
2368 			btrfs_free_fs_root(fs_info, gang[i]);
2369 	}
2370 	return 0;
2371 }
2372 
2373 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2374 {
2375 	u64 root_objectid = 0;
2376 	struct btrfs_root *gang[8];
2377 	int i;
2378 	int ret;
2379 
2380 	while (1) {
2381 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2382 					     (void **)gang, root_objectid,
2383 					     ARRAY_SIZE(gang));
2384 		if (!ret)
2385 			break;
2386 
2387 		root_objectid = gang[ret - 1]->root_key.objectid + 1;
2388 		for (i = 0; i < ret; i++) {
2389 			root_objectid = gang[i]->root_key.objectid;
2390 			btrfs_orphan_cleanup(gang[i]);
2391 		}
2392 		root_objectid++;
2393 	}
2394 	return 0;
2395 }
2396 
2397 int btrfs_commit_super(struct btrfs_root *root)
2398 {
2399 	struct btrfs_trans_handle *trans;
2400 	int ret;
2401 
2402 	mutex_lock(&root->fs_info->cleaner_mutex);
2403 	btrfs_run_delayed_iputs(root);
2404 	btrfs_clean_old_snapshots(root);
2405 	mutex_unlock(&root->fs_info->cleaner_mutex);
2406 
2407 	/* wait until ongoing cleanup work done */
2408 	down_write(&root->fs_info->cleanup_work_sem);
2409 	up_write(&root->fs_info->cleanup_work_sem);
2410 
2411 	trans = btrfs_start_transaction(root, 1);
2412 	ret = btrfs_commit_transaction(trans, root);
2413 	BUG_ON(ret);
2414 	/* run commit again to drop the original snapshot */
2415 	trans = btrfs_start_transaction(root, 1);
2416 	btrfs_commit_transaction(trans, root);
2417 	ret = btrfs_write_and_wait_transaction(NULL, root);
2418 	BUG_ON(ret);
2419 
2420 	ret = write_ctree_super(NULL, root, 0);
2421 	return ret;
2422 }
2423 
2424 int close_ctree(struct btrfs_root *root)
2425 {
2426 	struct btrfs_fs_info *fs_info = root->fs_info;
2427 	int ret;
2428 
2429 	fs_info->closing = 1;
2430 	smp_mb();
2431 
2432 	kthread_stop(root->fs_info->transaction_kthread);
2433 	kthread_stop(root->fs_info->cleaner_kthread);
2434 
2435 	if (!(fs_info->sb->s_flags & MS_RDONLY)) {
2436 		ret =  btrfs_commit_super(root);
2437 		if (ret)
2438 			printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
2439 	}
2440 
2441 	fs_info->closing = 2;
2442 	smp_mb();
2443 
2444 	if (fs_info->delalloc_bytes) {
2445 		printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
2446 		       (unsigned long long)fs_info->delalloc_bytes);
2447 	}
2448 	if (fs_info->total_ref_cache_size) {
2449 		printk(KERN_INFO "btrfs: at umount reference cache size %llu\n",
2450 		       (unsigned long long)fs_info->total_ref_cache_size);
2451 	}
2452 
2453 	free_extent_buffer(fs_info->extent_root->node);
2454 	free_extent_buffer(fs_info->extent_root->commit_root);
2455 	free_extent_buffer(fs_info->tree_root->node);
2456 	free_extent_buffer(fs_info->tree_root->commit_root);
2457 	free_extent_buffer(root->fs_info->chunk_root->node);
2458 	free_extent_buffer(root->fs_info->chunk_root->commit_root);
2459 	free_extent_buffer(root->fs_info->dev_root->node);
2460 	free_extent_buffer(root->fs_info->dev_root->commit_root);
2461 	free_extent_buffer(root->fs_info->csum_root->node);
2462 	free_extent_buffer(root->fs_info->csum_root->commit_root);
2463 
2464 	btrfs_free_block_groups(root->fs_info);
2465 
2466 	del_fs_roots(fs_info);
2467 
2468 	iput(fs_info->btree_inode);
2469 
2470 	btrfs_stop_workers(&fs_info->generic_worker);
2471 	btrfs_stop_workers(&fs_info->fixup_workers);
2472 	btrfs_stop_workers(&fs_info->delalloc_workers);
2473 	btrfs_stop_workers(&fs_info->workers);
2474 	btrfs_stop_workers(&fs_info->endio_workers);
2475 	btrfs_stop_workers(&fs_info->endio_meta_workers);
2476 	btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2477 	btrfs_stop_workers(&fs_info->endio_write_workers);
2478 	btrfs_stop_workers(&fs_info->submit_workers);
2479 	btrfs_stop_workers(&fs_info->enospc_workers);
2480 
2481 	btrfs_close_devices(fs_info->fs_devices);
2482 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
2483 
2484 	bdi_destroy(&fs_info->bdi);
2485 	cleanup_srcu_struct(&fs_info->subvol_srcu);
2486 
2487 	kfree(fs_info->extent_root);
2488 	kfree(fs_info->tree_root);
2489 	kfree(fs_info->chunk_root);
2490 	kfree(fs_info->dev_root);
2491 	kfree(fs_info->csum_root);
2492 	return 0;
2493 }
2494 
2495 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
2496 {
2497 	int ret;
2498 	struct inode *btree_inode = buf->first_page->mapping->host;
2499 
2500 	ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf);
2501 	if (!ret)
2502 		return ret;
2503 
2504 	ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
2505 				    parent_transid);
2506 	return !ret;
2507 }
2508 
2509 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
2510 {
2511 	struct inode *btree_inode = buf->first_page->mapping->host;
2512 	return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
2513 					  buf);
2514 }
2515 
2516 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
2517 {
2518 	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2519 	u64 transid = btrfs_header_generation(buf);
2520 	struct inode *btree_inode = root->fs_info->btree_inode;
2521 	int was_dirty;
2522 
2523 	btrfs_assert_tree_locked(buf);
2524 	if (transid != root->fs_info->generation) {
2525 		printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
2526 		       "found %llu running %llu\n",
2527 			(unsigned long long)buf->start,
2528 			(unsigned long long)transid,
2529 			(unsigned long long)root->fs_info->generation);
2530 		WARN_ON(1);
2531 	}
2532 	was_dirty = set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
2533 					    buf);
2534 	if (!was_dirty) {
2535 		spin_lock(&root->fs_info->delalloc_lock);
2536 		root->fs_info->dirty_metadata_bytes += buf->len;
2537 		spin_unlock(&root->fs_info->delalloc_lock);
2538 	}
2539 }
2540 
2541 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
2542 {
2543 	/*
2544 	 * looks as though older kernels can get into trouble with
2545 	 * this code, they end up stuck in balance_dirty_pages forever
2546 	 */
2547 	u64 num_dirty;
2548 	unsigned long thresh = 32 * 1024 * 1024;
2549 
2550 	if (current->flags & PF_MEMALLOC)
2551 		return;
2552 
2553 	num_dirty = root->fs_info->dirty_metadata_bytes;
2554 
2555 	if (num_dirty > thresh) {
2556 		balance_dirty_pages_ratelimited_nr(
2557 				   root->fs_info->btree_inode->i_mapping, 1);
2558 	}
2559 	return;
2560 }
2561 
2562 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
2563 {
2564 	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2565 	int ret;
2566 	ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
2567 	if (ret == 0)
2568 		set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
2569 	return ret;
2570 }
2571 
2572 int btree_lock_page_hook(struct page *page)
2573 {
2574 	struct inode *inode = page->mapping->host;
2575 	struct btrfs_root *root = BTRFS_I(inode)->root;
2576 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2577 	struct extent_buffer *eb;
2578 	unsigned long len;
2579 	u64 bytenr = page_offset(page);
2580 
2581 	if (page->private == EXTENT_PAGE_PRIVATE)
2582 		goto out;
2583 
2584 	len = page->private >> 2;
2585 	eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS);
2586 	if (!eb)
2587 		goto out;
2588 
2589 	btrfs_tree_lock(eb);
2590 	btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
2591 
2592 	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
2593 		spin_lock(&root->fs_info->delalloc_lock);
2594 		if (root->fs_info->dirty_metadata_bytes >= eb->len)
2595 			root->fs_info->dirty_metadata_bytes -= eb->len;
2596 		else
2597 			WARN_ON(1);
2598 		spin_unlock(&root->fs_info->delalloc_lock);
2599 	}
2600 
2601 	btrfs_tree_unlock(eb);
2602 	free_extent_buffer(eb);
2603 out:
2604 	lock_page(page);
2605 	return 0;
2606 }
2607 
2608 static struct extent_io_ops btree_extent_io_ops = {
2609 	.write_cache_pages_lock_hook = btree_lock_page_hook,
2610 	.readpage_end_io_hook = btree_readpage_end_io_hook,
2611 	.submit_bio_hook = btree_submit_bio_hook,
2612 	/* note we're sharing with inode.c for the merge bio hook */
2613 	.merge_bio_hook = btrfs_merge_bio_hook,
2614 };
2615