xref: /openbmc/linux/fs/btrfs/volumes.c (revision 089a49b6)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include <linux/raid/pq.h>
29 #include <linux/semaphore.h>
30 #include <asm/div64.h>
31 #include "compat.h"
32 #include "ctree.h"
33 #include "extent_map.h"
34 #include "disk-io.h"
35 #include "transaction.h"
36 #include "print-tree.h"
37 #include "volumes.h"
38 #include "raid56.h"
39 #include "async-thread.h"
40 #include "check-integrity.h"
41 #include "rcu-string.h"
42 #include "math.h"
43 #include "dev-replace.h"
44 
45 static int init_first_rw_device(struct btrfs_trans_handle *trans,
46 				struct btrfs_root *root,
47 				struct btrfs_device *device);
48 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
49 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
50 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
51 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
52 
53 static DEFINE_MUTEX(uuid_mutex);
54 static LIST_HEAD(fs_uuids);
55 
56 static void lock_chunks(struct btrfs_root *root)
57 {
58 	mutex_lock(&root->fs_info->chunk_mutex);
59 }
60 
61 static void unlock_chunks(struct btrfs_root *root)
62 {
63 	mutex_unlock(&root->fs_info->chunk_mutex);
64 }
65 
66 static struct btrfs_fs_devices *__alloc_fs_devices(void)
67 {
68 	struct btrfs_fs_devices *fs_devs;
69 
70 	fs_devs = kzalloc(sizeof(*fs_devs), GFP_NOFS);
71 	if (!fs_devs)
72 		return ERR_PTR(-ENOMEM);
73 
74 	mutex_init(&fs_devs->device_list_mutex);
75 
76 	INIT_LIST_HEAD(&fs_devs->devices);
77 	INIT_LIST_HEAD(&fs_devs->alloc_list);
78 	INIT_LIST_HEAD(&fs_devs->list);
79 
80 	return fs_devs;
81 }
82 
83 /**
84  * alloc_fs_devices - allocate struct btrfs_fs_devices
85  * @fsid:	a pointer to UUID for this FS.  If NULL a new UUID is
86  *		generated.
87  *
88  * Return: a pointer to a new &struct btrfs_fs_devices on success;
89  * ERR_PTR() on error.  Returned struct is not linked onto any lists and
90  * can be destroyed with kfree() right away.
91  */
92 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
93 {
94 	struct btrfs_fs_devices *fs_devs;
95 
96 	fs_devs = __alloc_fs_devices();
97 	if (IS_ERR(fs_devs))
98 		return fs_devs;
99 
100 	if (fsid)
101 		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
102 	else
103 		generate_random_uuid(fs_devs->fsid);
104 
105 	return fs_devs;
106 }
107 
108 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
109 {
110 	struct btrfs_device *device;
111 	WARN_ON(fs_devices->opened);
112 	while (!list_empty(&fs_devices->devices)) {
113 		device = list_entry(fs_devices->devices.next,
114 				    struct btrfs_device, dev_list);
115 		list_del(&device->dev_list);
116 		rcu_string_free(device->name);
117 		kfree(device);
118 	}
119 	kfree(fs_devices);
120 }
121 
122 static void btrfs_kobject_uevent(struct block_device *bdev,
123 				 enum kobject_action action)
124 {
125 	int ret;
126 
127 	ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
128 	if (ret)
129 		pr_warn("Sending event '%d' to kobject: '%s' (%p): failed\n",
130 			action,
131 			kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
132 			&disk_to_dev(bdev->bd_disk)->kobj);
133 }
134 
135 void btrfs_cleanup_fs_uuids(void)
136 {
137 	struct btrfs_fs_devices *fs_devices;
138 
139 	while (!list_empty(&fs_uuids)) {
140 		fs_devices = list_entry(fs_uuids.next,
141 					struct btrfs_fs_devices, list);
142 		list_del(&fs_devices->list);
143 		free_fs_devices(fs_devices);
144 	}
145 }
146 
147 static struct btrfs_device *__alloc_device(void)
148 {
149 	struct btrfs_device *dev;
150 
151 	dev = kzalloc(sizeof(*dev), GFP_NOFS);
152 	if (!dev)
153 		return ERR_PTR(-ENOMEM);
154 
155 	INIT_LIST_HEAD(&dev->dev_list);
156 	INIT_LIST_HEAD(&dev->dev_alloc_list);
157 
158 	spin_lock_init(&dev->io_lock);
159 
160 	spin_lock_init(&dev->reada_lock);
161 	atomic_set(&dev->reada_in_flight, 0);
162 	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_WAIT);
163 	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_WAIT);
164 
165 	return dev;
166 }
167 
168 static noinline struct btrfs_device *__find_device(struct list_head *head,
169 						   u64 devid, u8 *uuid)
170 {
171 	struct btrfs_device *dev;
172 
173 	list_for_each_entry(dev, head, dev_list) {
174 		if (dev->devid == devid &&
175 		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
176 			return dev;
177 		}
178 	}
179 	return NULL;
180 }
181 
182 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
183 {
184 	struct btrfs_fs_devices *fs_devices;
185 
186 	list_for_each_entry(fs_devices, &fs_uuids, list) {
187 		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
188 			return fs_devices;
189 	}
190 	return NULL;
191 }
192 
193 static int
194 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
195 		      int flush, struct block_device **bdev,
196 		      struct buffer_head **bh)
197 {
198 	int ret;
199 
200 	*bdev = blkdev_get_by_path(device_path, flags, holder);
201 
202 	if (IS_ERR(*bdev)) {
203 		ret = PTR_ERR(*bdev);
204 		printk(KERN_INFO "btrfs: open %s failed\n", device_path);
205 		goto error;
206 	}
207 
208 	if (flush)
209 		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
210 	ret = set_blocksize(*bdev, 4096);
211 	if (ret) {
212 		blkdev_put(*bdev, flags);
213 		goto error;
214 	}
215 	invalidate_bdev(*bdev);
216 	*bh = btrfs_read_dev_super(*bdev);
217 	if (!*bh) {
218 		ret = -EINVAL;
219 		blkdev_put(*bdev, flags);
220 		goto error;
221 	}
222 
223 	return 0;
224 
225 error:
226 	*bdev = NULL;
227 	*bh = NULL;
228 	return ret;
229 }
230 
231 static void requeue_list(struct btrfs_pending_bios *pending_bios,
232 			struct bio *head, struct bio *tail)
233 {
234 
235 	struct bio *old_head;
236 
237 	old_head = pending_bios->head;
238 	pending_bios->head = head;
239 	if (pending_bios->tail)
240 		tail->bi_next = old_head;
241 	else
242 		pending_bios->tail = tail;
243 }
244 
245 /*
246  * we try to collect pending bios for a device so we don't get a large
247  * number of procs sending bios down to the same device.  This greatly
248  * improves the schedulers ability to collect and merge the bios.
249  *
250  * But, it also turns into a long list of bios to process and that is sure
251  * to eventually make the worker thread block.  The solution here is to
252  * make some progress and then put this work struct back at the end of
253  * the list if the block device is congested.  This way, multiple devices
254  * can make progress from a single worker thread.
255  */
256 static noinline void run_scheduled_bios(struct btrfs_device *device)
257 {
258 	struct bio *pending;
259 	struct backing_dev_info *bdi;
260 	struct btrfs_fs_info *fs_info;
261 	struct btrfs_pending_bios *pending_bios;
262 	struct bio *tail;
263 	struct bio *cur;
264 	int again = 0;
265 	unsigned long num_run;
266 	unsigned long batch_run = 0;
267 	unsigned long limit;
268 	unsigned long last_waited = 0;
269 	int force_reg = 0;
270 	int sync_pending = 0;
271 	struct blk_plug plug;
272 
273 	/*
274 	 * this function runs all the bios we've collected for
275 	 * a particular device.  We don't want to wander off to
276 	 * another device without first sending all of these down.
277 	 * So, setup a plug here and finish it off before we return
278 	 */
279 	blk_start_plug(&plug);
280 
281 	bdi = blk_get_backing_dev_info(device->bdev);
282 	fs_info = device->dev_root->fs_info;
283 	limit = btrfs_async_submit_limit(fs_info);
284 	limit = limit * 2 / 3;
285 
286 loop:
287 	spin_lock(&device->io_lock);
288 
289 loop_lock:
290 	num_run = 0;
291 
292 	/* take all the bios off the list at once and process them
293 	 * later on (without the lock held).  But, remember the
294 	 * tail and other pointers so the bios can be properly reinserted
295 	 * into the list if we hit congestion
296 	 */
297 	if (!force_reg && device->pending_sync_bios.head) {
298 		pending_bios = &device->pending_sync_bios;
299 		force_reg = 1;
300 	} else {
301 		pending_bios = &device->pending_bios;
302 		force_reg = 0;
303 	}
304 
305 	pending = pending_bios->head;
306 	tail = pending_bios->tail;
307 	WARN_ON(pending && !tail);
308 
309 	/*
310 	 * if pending was null this time around, no bios need processing
311 	 * at all and we can stop.  Otherwise it'll loop back up again
312 	 * and do an additional check so no bios are missed.
313 	 *
314 	 * device->running_pending is used to synchronize with the
315 	 * schedule_bio code.
316 	 */
317 	if (device->pending_sync_bios.head == NULL &&
318 	    device->pending_bios.head == NULL) {
319 		again = 0;
320 		device->running_pending = 0;
321 	} else {
322 		again = 1;
323 		device->running_pending = 1;
324 	}
325 
326 	pending_bios->head = NULL;
327 	pending_bios->tail = NULL;
328 
329 	spin_unlock(&device->io_lock);
330 
331 	while (pending) {
332 
333 		rmb();
334 		/* we want to work on both lists, but do more bios on the
335 		 * sync list than the regular list
336 		 */
337 		if ((num_run > 32 &&
338 		    pending_bios != &device->pending_sync_bios &&
339 		    device->pending_sync_bios.head) ||
340 		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
341 		    device->pending_bios.head)) {
342 			spin_lock(&device->io_lock);
343 			requeue_list(pending_bios, pending, tail);
344 			goto loop_lock;
345 		}
346 
347 		cur = pending;
348 		pending = pending->bi_next;
349 		cur->bi_next = NULL;
350 
351 		if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
352 		    waitqueue_active(&fs_info->async_submit_wait))
353 			wake_up(&fs_info->async_submit_wait);
354 
355 		BUG_ON(atomic_read(&cur->bi_cnt) == 0);
356 
357 		/*
358 		 * if we're doing the sync list, record that our
359 		 * plug has some sync requests on it
360 		 *
361 		 * If we're doing the regular list and there are
362 		 * sync requests sitting around, unplug before
363 		 * we add more
364 		 */
365 		if (pending_bios == &device->pending_sync_bios) {
366 			sync_pending = 1;
367 		} else if (sync_pending) {
368 			blk_finish_plug(&plug);
369 			blk_start_plug(&plug);
370 			sync_pending = 0;
371 		}
372 
373 		btrfsic_submit_bio(cur->bi_rw, cur);
374 		num_run++;
375 		batch_run++;
376 		if (need_resched())
377 			cond_resched();
378 
379 		/*
380 		 * we made progress, there is more work to do and the bdi
381 		 * is now congested.  Back off and let other work structs
382 		 * run instead
383 		 */
384 		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
385 		    fs_info->fs_devices->open_devices > 1) {
386 			struct io_context *ioc;
387 
388 			ioc = current->io_context;
389 
390 			/*
391 			 * the main goal here is that we don't want to
392 			 * block if we're going to be able to submit
393 			 * more requests without blocking.
394 			 *
395 			 * This code does two great things, it pokes into
396 			 * the elevator code from a filesystem _and_
397 			 * it makes assumptions about how batching works.
398 			 */
399 			if (ioc && ioc->nr_batch_requests > 0 &&
400 			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
401 			    (last_waited == 0 ||
402 			     ioc->last_waited == last_waited)) {
403 				/*
404 				 * we want to go through our batch of
405 				 * requests and stop.  So, we copy out
406 				 * the ioc->last_waited time and test
407 				 * against it before looping
408 				 */
409 				last_waited = ioc->last_waited;
410 				if (need_resched())
411 					cond_resched();
412 				continue;
413 			}
414 			spin_lock(&device->io_lock);
415 			requeue_list(pending_bios, pending, tail);
416 			device->running_pending = 1;
417 
418 			spin_unlock(&device->io_lock);
419 			btrfs_requeue_work(&device->work);
420 			goto done;
421 		}
422 		/* unplug every 64 requests just for good measure */
423 		if (batch_run % 64 == 0) {
424 			blk_finish_plug(&plug);
425 			blk_start_plug(&plug);
426 			sync_pending = 0;
427 		}
428 	}
429 
430 	cond_resched();
431 	if (again)
432 		goto loop;
433 
434 	spin_lock(&device->io_lock);
435 	if (device->pending_bios.head || device->pending_sync_bios.head)
436 		goto loop_lock;
437 	spin_unlock(&device->io_lock);
438 
439 done:
440 	blk_finish_plug(&plug);
441 }
442 
443 static void pending_bios_fn(struct btrfs_work *work)
444 {
445 	struct btrfs_device *device;
446 
447 	device = container_of(work, struct btrfs_device, work);
448 	run_scheduled_bios(device);
449 }
450 
451 static noinline int device_list_add(const char *path,
452 			   struct btrfs_super_block *disk_super,
453 			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
454 {
455 	struct btrfs_device *device;
456 	struct btrfs_fs_devices *fs_devices;
457 	struct rcu_string *name;
458 	u64 found_transid = btrfs_super_generation(disk_super);
459 
460 	fs_devices = find_fsid(disk_super->fsid);
461 	if (!fs_devices) {
462 		fs_devices = alloc_fs_devices(disk_super->fsid);
463 		if (IS_ERR(fs_devices))
464 			return PTR_ERR(fs_devices);
465 
466 		list_add(&fs_devices->list, &fs_uuids);
467 		fs_devices->latest_devid = devid;
468 		fs_devices->latest_trans = found_transid;
469 
470 		device = NULL;
471 	} else {
472 		device = __find_device(&fs_devices->devices, devid,
473 				       disk_super->dev_item.uuid);
474 	}
475 	if (!device) {
476 		if (fs_devices->opened)
477 			return -EBUSY;
478 
479 		device = btrfs_alloc_device(NULL, &devid,
480 					    disk_super->dev_item.uuid);
481 		if (IS_ERR(device)) {
482 			/* we can safely leave the fs_devices entry around */
483 			return PTR_ERR(device);
484 		}
485 
486 		name = rcu_string_strdup(path, GFP_NOFS);
487 		if (!name) {
488 			kfree(device);
489 			return -ENOMEM;
490 		}
491 		rcu_assign_pointer(device->name, name);
492 
493 		mutex_lock(&fs_devices->device_list_mutex);
494 		list_add_rcu(&device->dev_list, &fs_devices->devices);
495 		fs_devices->num_devices++;
496 		mutex_unlock(&fs_devices->device_list_mutex);
497 
498 		device->fs_devices = fs_devices;
499 	} else if (!device->name || strcmp(device->name->str, path)) {
500 		name = rcu_string_strdup(path, GFP_NOFS);
501 		if (!name)
502 			return -ENOMEM;
503 		rcu_string_free(device->name);
504 		rcu_assign_pointer(device->name, name);
505 		if (device->missing) {
506 			fs_devices->missing_devices--;
507 			device->missing = 0;
508 		}
509 	}
510 
511 	if (found_transid > fs_devices->latest_trans) {
512 		fs_devices->latest_devid = devid;
513 		fs_devices->latest_trans = found_transid;
514 	}
515 	*fs_devices_ret = fs_devices;
516 	return 0;
517 }
518 
519 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
520 {
521 	struct btrfs_fs_devices *fs_devices;
522 	struct btrfs_device *device;
523 	struct btrfs_device *orig_dev;
524 
525 	fs_devices = alloc_fs_devices(orig->fsid);
526 	if (IS_ERR(fs_devices))
527 		return fs_devices;
528 
529 	fs_devices->latest_devid = orig->latest_devid;
530 	fs_devices->latest_trans = orig->latest_trans;
531 	fs_devices->total_devices = orig->total_devices;
532 
533 	/* We have held the volume lock, it is safe to get the devices. */
534 	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
535 		struct rcu_string *name;
536 
537 		device = btrfs_alloc_device(NULL, &orig_dev->devid,
538 					    orig_dev->uuid);
539 		if (IS_ERR(device))
540 			goto error;
541 
542 		/*
543 		 * This is ok to do without rcu read locked because we hold the
544 		 * uuid mutex so nothing we touch in here is going to disappear.
545 		 */
546 		name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
547 		if (!name) {
548 			kfree(device);
549 			goto error;
550 		}
551 		rcu_assign_pointer(device->name, name);
552 
553 		list_add(&device->dev_list, &fs_devices->devices);
554 		device->fs_devices = fs_devices;
555 		fs_devices->num_devices++;
556 	}
557 	return fs_devices;
558 error:
559 	free_fs_devices(fs_devices);
560 	return ERR_PTR(-ENOMEM);
561 }
562 
563 void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
564 			       struct btrfs_fs_devices *fs_devices, int step)
565 {
566 	struct btrfs_device *device, *next;
567 
568 	struct block_device *latest_bdev = NULL;
569 	u64 latest_devid = 0;
570 	u64 latest_transid = 0;
571 
572 	mutex_lock(&uuid_mutex);
573 again:
574 	/* This is the initialized path, it is safe to release the devices. */
575 	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
576 		if (device->in_fs_metadata) {
577 			if (!device->is_tgtdev_for_dev_replace &&
578 			    (!latest_transid ||
579 			     device->generation > latest_transid)) {
580 				latest_devid = device->devid;
581 				latest_transid = device->generation;
582 				latest_bdev = device->bdev;
583 			}
584 			continue;
585 		}
586 
587 		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
588 			/*
589 			 * In the first step, keep the device which has
590 			 * the correct fsid and the devid that is used
591 			 * for the dev_replace procedure.
592 			 * In the second step, the dev_replace state is
593 			 * read from the device tree and it is known
594 			 * whether the procedure is really active or
595 			 * not, which means whether this device is
596 			 * used or whether it should be removed.
597 			 */
598 			if (step == 0 || device->is_tgtdev_for_dev_replace) {
599 				continue;
600 			}
601 		}
602 		if (device->bdev) {
603 			blkdev_put(device->bdev, device->mode);
604 			device->bdev = NULL;
605 			fs_devices->open_devices--;
606 		}
607 		if (device->writeable) {
608 			list_del_init(&device->dev_alloc_list);
609 			device->writeable = 0;
610 			if (!device->is_tgtdev_for_dev_replace)
611 				fs_devices->rw_devices--;
612 		}
613 		list_del_init(&device->dev_list);
614 		fs_devices->num_devices--;
615 		rcu_string_free(device->name);
616 		kfree(device);
617 	}
618 
619 	if (fs_devices->seed) {
620 		fs_devices = fs_devices->seed;
621 		goto again;
622 	}
623 
624 	fs_devices->latest_bdev = latest_bdev;
625 	fs_devices->latest_devid = latest_devid;
626 	fs_devices->latest_trans = latest_transid;
627 
628 	mutex_unlock(&uuid_mutex);
629 }
630 
631 static void __free_device(struct work_struct *work)
632 {
633 	struct btrfs_device *device;
634 
635 	device = container_of(work, struct btrfs_device, rcu_work);
636 
637 	if (device->bdev)
638 		blkdev_put(device->bdev, device->mode);
639 
640 	rcu_string_free(device->name);
641 	kfree(device);
642 }
643 
644 static void free_device(struct rcu_head *head)
645 {
646 	struct btrfs_device *device;
647 
648 	device = container_of(head, struct btrfs_device, rcu);
649 
650 	INIT_WORK(&device->rcu_work, __free_device);
651 	schedule_work(&device->rcu_work);
652 }
653 
654 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
655 {
656 	struct btrfs_device *device;
657 
658 	if (--fs_devices->opened > 0)
659 		return 0;
660 
661 	mutex_lock(&fs_devices->device_list_mutex);
662 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
663 		struct btrfs_device *new_device;
664 		struct rcu_string *name;
665 
666 		if (device->bdev)
667 			fs_devices->open_devices--;
668 
669 		if (device->writeable && !device->is_tgtdev_for_dev_replace) {
670 			list_del_init(&device->dev_alloc_list);
671 			fs_devices->rw_devices--;
672 		}
673 
674 		if (device->can_discard)
675 			fs_devices->num_can_discard--;
676 		if (device->missing)
677 			fs_devices->missing_devices--;
678 
679 		new_device = btrfs_alloc_device(NULL, &device->devid,
680 						device->uuid);
681 		BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
682 
683 		/* Safe because we are under uuid_mutex */
684 		if (device->name) {
685 			name = rcu_string_strdup(device->name->str, GFP_NOFS);
686 			BUG_ON(!name); /* -ENOMEM */
687 			rcu_assign_pointer(new_device->name, name);
688 		}
689 
690 		list_replace_rcu(&device->dev_list, &new_device->dev_list);
691 		new_device->fs_devices = device->fs_devices;
692 
693 		call_rcu(&device->rcu, free_device);
694 	}
695 	mutex_unlock(&fs_devices->device_list_mutex);
696 
697 	WARN_ON(fs_devices->open_devices);
698 	WARN_ON(fs_devices->rw_devices);
699 	fs_devices->opened = 0;
700 	fs_devices->seeding = 0;
701 
702 	return 0;
703 }
704 
705 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
706 {
707 	struct btrfs_fs_devices *seed_devices = NULL;
708 	int ret;
709 
710 	mutex_lock(&uuid_mutex);
711 	ret = __btrfs_close_devices(fs_devices);
712 	if (!fs_devices->opened) {
713 		seed_devices = fs_devices->seed;
714 		fs_devices->seed = NULL;
715 	}
716 	mutex_unlock(&uuid_mutex);
717 
718 	while (seed_devices) {
719 		fs_devices = seed_devices;
720 		seed_devices = fs_devices->seed;
721 		__btrfs_close_devices(fs_devices);
722 		free_fs_devices(fs_devices);
723 	}
724 	/*
725 	 * Wait for rcu kworkers under __btrfs_close_devices
726 	 * to finish all blkdev_puts so device is really
727 	 * free when umount is done.
728 	 */
729 	rcu_barrier();
730 	return ret;
731 }
732 
733 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
734 				fmode_t flags, void *holder)
735 {
736 	struct request_queue *q;
737 	struct block_device *bdev;
738 	struct list_head *head = &fs_devices->devices;
739 	struct btrfs_device *device;
740 	struct block_device *latest_bdev = NULL;
741 	struct buffer_head *bh;
742 	struct btrfs_super_block *disk_super;
743 	u64 latest_devid = 0;
744 	u64 latest_transid = 0;
745 	u64 devid;
746 	int seeding = 1;
747 	int ret = 0;
748 
749 	flags |= FMODE_EXCL;
750 
751 	list_for_each_entry(device, head, dev_list) {
752 		if (device->bdev)
753 			continue;
754 		if (!device->name)
755 			continue;
756 
757 		/* Just open everything we can; ignore failures here */
758 		if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
759 					    &bdev, &bh))
760 			continue;
761 
762 		disk_super = (struct btrfs_super_block *)bh->b_data;
763 		devid = btrfs_stack_device_id(&disk_super->dev_item);
764 		if (devid != device->devid)
765 			goto error_brelse;
766 
767 		if (memcmp(device->uuid, disk_super->dev_item.uuid,
768 			   BTRFS_UUID_SIZE))
769 			goto error_brelse;
770 
771 		device->generation = btrfs_super_generation(disk_super);
772 		if (!latest_transid || device->generation > latest_transid) {
773 			latest_devid = devid;
774 			latest_transid = device->generation;
775 			latest_bdev = bdev;
776 		}
777 
778 		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
779 			device->writeable = 0;
780 		} else {
781 			device->writeable = !bdev_read_only(bdev);
782 			seeding = 0;
783 		}
784 
785 		q = bdev_get_queue(bdev);
786 		if (blk_queue_discard(q)) {
787 			device->can_discard = 1;
788 			fs_devices->num_can_discard++;
789 		}
790 
791 		device->bdev = bdev;
792 		device->in_fs_metadata = 0;
793 		device->mode = flags;
794 
795 		if (!blk_queue_nonrot(bdev_get_queue(bdev)))
796 			fs_devices->rotating = 1;
797 
798 		fs_devices->open_devices++;
799 		if (device->writeable &&
800 		    device->devid != BTRFS_DEV_REPLACE_DEVID) {
801 			fs_devices->rw_devices++;
802 			list_add(&device->dev_alloc_list,
803 				 &fs_devices->alloc_list);
804 		}
805 		brelse(bh);
806 		continue;
807 
808 error_brelse:
809 		brelse(bh);
810 		blkdev_put(bdev, flags);
811 		continue;
812 	}
813 	if (fs_devices->open_devices == 0) {
814 		ret = -EINVAL;
815 		goto out;
816 	}
817 	fs_devices->seeding = seeding;
818 	fs_devices->opened = 1;
819 	fs_devices->latest_bdev = latest_bdev;
820 	fs_devices->latest_devid = latest_devid;
821 	fs_devices->latest_trans = latest_transid;
822 	fs_devices->total_rw_bytes = 0;
823 out:
824 	return ret;
825 }
826 
827 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
828 		       fmode_t flags, void *holder)
829 {
830 	int ret;
831 
832 	mutex_lock(&uuid_mutex);
833 	if (fs_devices->opened) {
834 		fs_devices->opened++;
835 		ret = 0;
836 	} else {
837 		ret = __btrfs_open_devices(fs_devices, flags, holder);
838 	}
839 	mutex_unlock(&uuid_mutex);
840 	return ret;
841 }
842 
843 /*
844  * Look for a btrfs signature on a device. This may be called out of the mount path
845  * and we are not allowed to call set_blocksize during the scan. The superblock
846  * is read via pagecache
847  */
848 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
849 			  struct btrfs_fs_devices **fs_devices_ret)
850 {
851 	struct btrfs_super_block *disk_super;
852 	struct block_device *bdev;
853 	struct page *page;
854 	void *p;
855 	int ret = -EINVAL;
856 	u64 devid;
857 	u64 transid;
858 	u64 total_devices;
859 	u64 bytenr;
860 	pgoff_t index;
861 
862 	/*
863 	 * we would like to check all the supers, but that would make
864 	 * a btrfs mount succeed after a mkfs from a different FS.
865 	 * So, we need to add a special mount option to scan for
866 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
867 	 */
868 	bytenr = btrfs_sb_offset(0);
869 	flags |= FMODE_EXCL;
870 	mutex_lock(&uuid_mutex);
871 
872 	bdev = blkdev_get_by_path(path, flags, holder);
873 
874 	if (IS_ERR(bdev)) {
875 		ret = PTR_ERR(bdev);
876 		goto error;
877 	}
878 
879 	/* make sure our super fits in the device */
880 	if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
881 		goto error_bdev_put;
882 
883 	/* make sure our super fits in the page */
884 	if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
885 		goto error_bdev_put;
886 
887 	/* make sure our super doesn't straddle pages on disk */
888 	index = bytenr >> PAGE_CACHE_SHIFT;
889 	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
890 		goto error_bdev_put;
891 
892 	/* pull in the page with our super */
893 	page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
894 				   index, GFP_NOFS);
895 
896 	if (IS_ERR_OR_NULL(page))
897 		goto error_bdev_put;
898 
899 	p = kmap(page);
900 
901 	/* align our pointer to the offset of the super block */
902 	disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
903 
904 	if (btrfs_super_bytenr(disk_super) != bytenr ||
905 	    btrfs_super_magic(disk_super) != BTRFS_MAGIC)
906 		goto error_unmap;
907 
908 	devid = btrfs_stack_device_id(&disk_super->dev_item);
909 	transid = btrfs_super_generation(disk_super);
910 	total_devices = btrfs_super_num_devices(disk_super);
911 
912 	if (disk_super->label[0]) {
913 		if (disk_super->label[BTRFS_LABEL_SIZE - 1])
914 			disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
915 		printk(KERN_INFO "btrfs: device label %s ", disk_super->label);
916 	} else {
917 		printk(KERN_INFO "btrfs: device fsid %pU ", disk_super->fsid);
918 	}
919 
920 	printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
921 
922 	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
923 	if (!ret && fs_devices_ret)
924 		(*fs_devices_ret)->total_devices = total_devices;
925 
926 error_unmap:
927 	kunmap(page);
928 	page_cache_release(page);
929 
930 error_bdev_put:
931 	blkdev_put(bdev, flags);
932 error:
933 	mutex_unlock(&uuid_mutex);
934 	return ret;
935 }
936 
937 /* helper to account the used device space in the range */
938 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
939 				   u64 end, u64 *length)
940 {
941 	struct btrfs_key key;
942 	struct btrfs_root *root = device->dev_root;
943 	struct btrfs_dev_extent *dev_extent;
944 	struct btrfs_path *path;
945 	u64 extent_end;
946 	int ret;
947 	int slot;
948 	struct extent_buffer *l;
949 
950 	*length = 0;
951 
952 	if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
953 		return 0;
954 
955 	path = btrfs_alloc_path();
956 	if (!path)
957 		return -ENOMEM;
958 	path->reada = 2;
959 
960 	key.objectid = device->devid;
961 	key.offset = start;
962 	key.type = BTRFS_DEV_EXTENT_KEY;
963 
964 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
965 	if (ret < 0)
966 		goto out;
967 	if (ret > 0) {
968 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
969 		if (ret < 0)
970 			goto out;
971 	}
972 
973 	while (1) {
974 		l = path->nodes[0];
975 		slot = path->slots[0];
976 		if (slot >= btrfs_header_nritems(l)) {
977 			ret = btrfs_next_leaf(root, path);
978 			if (ret == 0)
979 				continue;
980 			if (ret < 0)
981 				goto out;
982 
983 			break;
984 		}
985 		btrfs_item_key_to_cpu(l, &key, slot);
986 
987 		if (key.objectid < device->devid)
988 			goto next;
989 
990 		if (key.objectid > device->devid)
991 			break;
992 
993 		if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
994 			goto next;
995 
996 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
997 		extent_end = key.offset + btrfs_dev_extent_length(l,
998 								  dev_extent);
999 		if (key.offset <= start && extent_end > end) {
1000 			*length = end - start + 1;
1001 			break;
1002 		} else if (key.offset <= start && extent_end > start)
1003 			*length += extent_end - start;
1004 		else if (key.offset > start && extent_end <= end)
1005 			*length += extent_end - key.offset;
1006 		else if (key.offset > start && key.offset <= end) {
1007 			*length += end - key.offset + 1;
1008 			break;
1009 		} else if (key.offset > end)
1010 			break;
1011 
1012 next:
1013 		path->slots[0]++;
1014 	}
1015 	ret = 0;
1016 out:
1017 	btrfs_free_path(path);
1018 	return ret;
1019 }
1020 
1021 static int contains_pending_extent(struct btrfs_trans_handle *trans,
1022 				   struct btrfs_device *device,
1023 				   u64 *start, u64 len)
1024 {
1025 	struct extent_map *em;
1026 	int ret = 0;
1027 
1028 	list_for_each_entry(em, &trans->transaction->pending_chunks, list) {
1029 		struct map_lookup *map;
1030 		int i;
1031 
1032 		map = (struct map_lookup *)em->bdev;
1033 		for (i = 0; i < map->num_stripes; i++) {
1034 			if (map->stripes[i].dev != device)
1035 				continue;
1036 			if (map->stripes[i].physical >= *start + len ||
1037 			    map->stripes[i].physical + em->orig_block_len <=
1038 			    *start)
1039 				continue;
1040 			*start = map->stripes[i].physical +
1041 				em->orig_block_len;
1042 			ret = 1;
1043 		}
1044 	}
1045 
1046 	return ret;
1047 }
1048 
1049 
1050 /*
1051  * find_free_dev_extent - find free space in the specified device
1052  * @device:	the device which we search the free space in
1053  * @num_bytes:	the size of the free space that we need
1054  * @start:	store the start of the free space.
1055  * @len:	the size of the free space. that we find, or the size of the max
1056  * 		free space if we don't find suitable free space
1057  *
1058  * this uses a pretty simple search, the expectation is that it is
1059  * called very infrequently and that a given device has a small number
1060  * of extents
1061  *
1062  * @start is used to store the start of the free space if we find. But if we
1063  * don't find suitable free space, it will be used to store the start position
1064  * of the max free space.
1065  *
1066  * @len is used to store the size of the free space that we find.
1067  * But if we don't find suitable free space, it is used to store the size of
1068  * the max free space.
1069  */
1070 int find_free_dev_extent(struct btrfs_trans_handle *trans,
1071 			 struct btrfs_device *device, u64 num_bytes,
1072 			 u64 *start, u64 *len)
1073 {
1074 	struct btrfs_key key;
1075 	struct btrfs_root *root = device->dev_root;
1076 	struct btrfs_dev_extent *dev_extent;
1077 	struct btrfs_path *path;
1078 	u64 hole_size;
1079 	u64 max_hole_start;
1080 	u64 max_hole_size;
1081 	u64 extent_end;
1082 	u64 search_start;
1083 	u64 search_end = device->total_bytes;
1084 	int ret;
1085 	int slot;
1086 	struct extent_buffer *l;
1087 
1088 	/* FIXME use last free of some kind */
1089 
1090 	/* we don't want to overwrite the superblock on the drive,
1091 	 * so we make sure to start at an offset of at least 1MB
1092 	 */
1093 	search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1094 
1095 	path = btrfs_alloc_path();
1096 	if (!path)
1097 		return -ENOMEM;
1098 again:
1099 	max_hole_start = search_start;
1100 	max_hole_size = 0;
1101 	hole_size = 0;
1102 
1103 	if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1104 		ret = -ENOSPC;
1105 		goto out;
1106 	}
1107 
1108 	path->reada = 2;
1109 	path->search_commit_root = 1;
1110 	path->skip_locking = 1;
1111 
1112 	key.objectid = device->devid;
1113 	key.offset = search_start;
1114 	key.type = BTRFS_DEV_EXTENT_KEY;
1115 
1116 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1117 	if (ret < 0)
1118 		goto out;
1119 	if (ret > 0) {
1120 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1121 		if (ret < 0)
1122 			goto out;
1123 	}
1124 
1125 	while (1) {
1126 		l = path->nodes[0];
1127 		slot = path->slots[0];
1128 		if (slot >= btrfs_header_nritems(l)) {
1129 			ret = btrfs_next_leaf(root, path);
1130 			if (ret == 0)
1131 				continue;
1132 			if (ret < 0)
1133 				goto out;
1134 
1135 			break;
1136 		}
1137 		btrfs_item_key_to_cpu(l, &key, slot);
1138 
1139 		if (key.objectid < device->devid)
1140 			goto next;
1141 
1142 		if (key.objectid > device->devid)
1143 			break;
1144 
1145 		if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
1146 			goto next;
1147 
1148 		if (key.offset > search_start) {
1149 			hole_size = key.offset - search_start;
1150 
1151 			/*
1152 			 * Have to check before we set max_hole_start, otherwise
1153 			 * we could end up sending back this offset anyway.
1154 			 */
1155 			if (contains_pending_extent(trans, device,
1156 						    &search_start,
1157 						    hole_size))
1158 				hole_size = 0;
1159 
1160 			if (hole_size > max_hole_size) {
1161 				max_hole_start = search_start;
1162 				max_hole_size = hole_size;
1163 			}
1164 
1165 			/*
1166 			 * If this free space is greater than which we need,
1167 			 * it must be the max free space that we have found
1168 			 * until now, so max_hole_start must point to the start
1169 			 * of this free space and the length of this free space
1170 			 * is stored in max_hole_size. Thus, we return
1171 			 * max_hole_start and max_hole_size and go back to the
1172 			 * caller.
1173 			 */
1174 			if (hole_size >= num_bytes) {
1175 				ret = 0;
1176 				goto out;
1177 			}
1178 		}
1179 
1180 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1181 		extent_end = key.offset + btrfs_dev_extent_length(l,
1182 								  dev_extent);
1183 		if (extent_end > search_start)
1184 			search_start = extent_end;
1185 next:
1186 		path->slots[0]++;
1187 		cond_resched();
1188 	}
1189 
1190 	/*
1191 	 * At this point, search_start should be the end of
1192 	 * allocated dev extents, and when shrinking the device,
1193 	 * search_end may be smaller than search_start.
1194 	 */
1195 	if (search_end > search_start)
1196 		hole_size = search_end - search_start;
1197 
1198 	if (hole_size > max_hole_size) {
1199 		max_hole_start = search_start;
1200 		max_hole_size = hole_size;
1201 	}
1202 
1203 	if (contains_pending_extent(trans, device, &search_start, hole_size)) {
1204 		btrfs_release_path(path);
1205 		goto again;
1206 	}
1207 
1208 	/* See above. */
1209 	if (hole_size < num_bytes)
1210 		ret = -ENOSPC;
1211 	else
1212 		ret = 0;
1213 
1214 out:
1215 	btrfs_free_path(path);
1216 	*start = max_hole_start;
1217 	if (len)
1218 		*len = max_hole_size;
1219 	return ret;
1220 }
1221 
1222 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1223 			  struct btrfs_device *device,
1224 			  u64 start)
1225 {
1226 	int ret;
1227 	struct btrfs_path *path;
1228 	struct btrfs_root *root = device->dev_root;
1229 	struct btrfs_key key;
1230 	struct btrfs_key found_key;
1231 	struct extent_buffer *leaf = NULL;
1232 	struct btrfs_dev_extent *extent = NULL;
1233 
1234 	path = btrfs_alloc_path();
1235 	if (!path)
1236 		return -ENOMEM;
1237 
1238 	key.objectid = device->devid;
1239 	key.offset = start;
1240 	key.type = BTRFS_DEV_EXTENT_KEY;
1241 again:
1242 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1243 	if (ret > 0) {
1244 		ret = btrfs_previous_item(root, path, key.objectid,
1245 					  BTRFS_DEV_EXTENT_KEY);
1246 		if (ret)
1247 			goto out;
1248 		leaf = path->nodes[0];
1249 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1250 		extent = btrfs_item_ptr(leaf, path->slots[0],
1251 					struct btrfs_dev_extent);
1252 		BUG_ON(found_key.offset > start || found_key.offset +
1253 		       btrfs_dev_extent_length(leaf, extent) < start);
1254 		key = found_key;
1255 		btrfs_release_path(path);
1256 		goto again;
1257 	} else if (ret == 0) {
1258 		leaf = path->nodes[0];
1259 		extent = btrfs_item_ptr(leaf, path->slots[0],
1260 					struct btrfs_dev_extent);
1261 	} else {
1262 		btrfs_error(root->fs_info, ret, "Slot search failed");
1263 		goto out;
1264 	}
1265 
1266 	if (device->bytes_used > 0) {
1267 		u64 len = btrfs_dev_extent_length(leaf, extent);
1268 		device->bytes_used -= len;
1269 		spin_lock(&root->fs_info->free_chunk_lock);
1270 		root->fs_info->free_chunk_space += len;
1271 		spin_unlock(&root->fs_info->free_chunk_lock);
1272 	}
1273 	ret = btrfs_del_item(trans, root, path);
1274 	if (ret) {
1275 		btrfs_error(root->fs_info, ret,
1276 			    "Failed to remove dev extent item");
1277 	}
1278 out:
1279 	btrfs_free_path(path);
1280 	return ret;
1281 }
1282 
1283 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1284 				  struct btrfs_device *device,
1285 				  u64 chunk_tree, u64 chunk_objectid,
1286 				  u64 chunk_offset, u64 start, u64 num_bytes)
1287 {
1288 	int ret;
1289 	struct btrfs_path *path;
1290 	struct btrfs_root *root = device->dev_root;
1291 	struct btrfs_dev_extent *extent;
1292 	struct extent_buffer *leaf;
1293 	struct btrfs_key key;
1294 
1295 	WARN_ON(!device->in_fs_metadata);
1296 	WARN_ON(device->is_tgtdev_for_dev_replace);
1297 	path = btrfs_alloc_path();
1298 	if (!path)
1299 		return -ENOMEM;
1300 
1301 	key.objectid = device->devid;
1302 	key.offset = start;
1303 	key.type = BTRFS_DEV_EXTENT_KEY;
1304 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1305 				      sizeof(*extent));
1306 	if (ret)
1307 		goto out;
1308 
1309 	leaf = path->nodes[0];
1310 	extent = btrfs_item_ptr(leaf, path->slots[0],
1311 				struct btrfs_dev_extent);
1312 	btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1313 	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1314 	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1315 
1316 	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1317 		    btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE);
1318 
1319 	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1320 	btrfs_mark_buffer_dirty(leaf);
1321 out:
1322 	btrfs_free_path(path);
1323 	return ret;
1324 }
1325 
1326 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1327 {
1328 	struct extent_map_tree *em_tree;
1329 	struct extent_map *em;
1330 	struct rb_node *n;
1331 	u64 ret = 0;
1332 
1333 	em_tree = &fs_info->mapping_tree.map_tree;
1334 	read_lock(&em_tree->lock);
1335 	n = rb_last(&em_tree->map);
1336 	if (n) {
1337 		em = rb_entry(n, struct extent_map, rb_node);
1338 		ret = em->start + em->len;
1339 	}
1340 	read_unlock(&em_tree->lock);
1341 
1342 	return ret;
1343 }
1344 
1345 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1346 				    u64 *devid_ret)
1347 {
1348 	int ret;
1349 	struct btrfs_key key;
1350 	struct btrfs_key found_key;
1351 	struct btrfs_path *path;
1352 
1353 	path = btrfs_alloc_path();
1354 	if (!path)
1355 		return -ENOMEM;
1356 
1357 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1358 	key.type = BTRFS_DEV_ITEM_KEY;
1359 	key.offset = (u64)-1;
1360 
1361 	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1362 	if (ret < 0)
1363 		goto error;
1364 
1365 	BUG_ON(ret == 0); /* Corruption */
1366 
1367 	ret = btrfs_previous_item(fs_info->chunk_root, path,
1368 				  BTRFS_DEV_ITEMS_OBJECTID,
1369 				  BTRFS_DEV_ITEM_KEY);
1370 	if (ret) {
1371 		*devid_ret = 1;
1372 	} else {
1373 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1374 				      path->slots[0]);
1375 		*devid_ret = found_key.offset + 1;
1376 	}
1377 	ret = 0;
1378 error:
1379 	btrfs_free_path(path);
1380 	return ret;
1381 }
1382 
1383 /*
1384  * the device information is stored in the chunk root
1385  * the btrfs_device struct should be fully filled in
1386  */
1387 static int btrfs_add_device(struct btrfs_trans_handle *trans,
1388 			    struct btrfs_root *root,
1389 			    struct btrfs_device *device)
1390 {
1391 	int ret;
1392 	struct btrfs_path *path;
1393 	struct btrfs_dev_item *dev_item;
1394 	struct extent_buffer *leaf;
1395 	struct btrfs_key key;
1396 	unsigned long ptr;
1397 
1398 	root = root->fs_info->chunk_root;
1399 
1400 	path = btrfs_alloc_path();
1401 	if (!path)
1402 		return -ENOMEM;
1403 
1404 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1405 	key.type = BTRFS_DEV_ITEM_KEY;
1406 	key.offset = device->devid;
1407 
1408 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1409 				      sizeof(*dev_item));
1410 	if (ret)
1411 		goto out;
1412 
1413 	leaf = path->nodes[0];
1414 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1415 
1416 	btrfs_set_device_id(leaf, dev_item, device->devid);
1417 	btrfs_set_device_generation(leaf, dev_item, 0);
1418 	btrfs_set_device_type(leaf, dev_item, device->type);
1419 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1420 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1421 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1422 	btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1423 	btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1424 	btrfs_set_device_group(leaf, dev_item, 0);
1425 	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1426 	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1427 	btrfs_set_device_start_offset(leaf, dev_item, 0);
1428 
1429 	ptr = btrfs_device_uuid(dev_item);
1430 	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1431 	ptr = btrfs_device_fsid(dev_item);
1432 	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1433 	btrfs_mark_buffer_dirty(leaf);
1434 
1435 	ret = 0;
1436 out:
1437 	btrfs_free_path(path);
1438 	return ret;
1439 }
1440 
1441 static int btrfs_rm_dev_item(struct btrfs_root *root,
1442 			     struct btrfs_device *device)
1443 {
1444 	int ret;
1445 	struct btrfs_path *path;
1446 	struct btrfs_key key;
1447 	struct btrfs_trans_handle *trans;
1448 
1449 	root = root->fs_info->chunk_root;
1450 
1451 	path = btrfs_alloc_path();
1452 	if (!path)
1453 		return -ENOMEM;
1454 
1455 	trans = btrfs_start_transaction(root, 0);
1456 	if (IS_ERR(trans)) {
1457 		btrfs_free_path(path);
1458 		return PTR_ERR(trans);
1459 	}
1460 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1461 	key.type = BTRFS_DEV_ITEM_KEY;
1462 	key.offset = device->devid;
1463 	lock_chunks(root);
1464 
1465 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1466 	if (ret < 0)
1467 		goto out;
1468 
1469 	if (ret > 0) {
1470 		ret = -ENOENT;
1471 		goto out;
1472 	}
1473 
1474 	ret = btrfs_del_item(trans, root, path);
1475 	if (ret)
1476 		goto out;
1477 out:
1478 	btrfs_free_path(path);
1479 	unlock_chunks(root);
1480 	btrfs_commit_transaction(trans, root);
1481 	return ret;
1482 }
1483 
1484 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1485 {
1486 	struct btrfs_device *device;
1487 	struct btrfs_device *next_device;
1488 	struct block_device *bdev;
1489 	struct buffer_head *bh = NULL;
1490 	struct btrfs_super_block *disk_super;
1491 	struct btrfs_fs_devices *cur_devices;
1492 	u64 all_avail;
1493 	u64 devid;
1494 	u64 num_devices;
1495 	u8 *dev_uuid;
1496 	unsigned seq;
1497 	int ret = 0;
1498 	bool clear_super = false;
1499 
1500 	mutex_lock(&uuid_mutex);
1501 
1502 	do {
1503 		seq = read_seqbegin(&root->fs_info->profiles_lock);
1504 
1505 		all_avail = root->fs_info->avail_data_alloc_bits |
1506 			    root->fs_info->avail_system_alloc_bits |
1507 			    root->fs_info->avail_metadata_alloc_bits;
1508 	} while (read_seqretry(&root->fs_info->profiles_lock, seq));
1509 
1510 	num_devices = root->fs_info->fs_devices->num_devices;
1511 	btrfs_dev_replace_lock(&root->fs_info->dev_replace);
1512 	if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1513 		WARN_ON(num_devices < 1);
1514 		num_devices--;
1515 	}
1516 	btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
1517 
1518 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1519 		ret = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET;
1520 		goto out;
1521 	}
1522 
1523 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1524 		ret = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET;
1525 		goto out;
1526 	}
1527 
1528 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
1529 	    root->fs_info->fs_devices->rw_devices <= 2) {
1530 		ret = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET;
1531 		goto out;
1532 	}
1533 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
1534 	    root->fs_info->fs_devices->rw_devices <= 3) {
1535 		ret = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET;
1536 		goto out;
1537 	}
1538 
1539 	if (strcmp(device_path, "missing") == 0) {
1540 		struct list_head *devices;
1541 		struct btrfs_device *tmp;
1542 
1543 		device = NULL;
1544 		devices = &root->fs_info->fs_devices->devices;
1545 		/*
1546 		 * It is safe to read the devices since the volume_mutex
1547 		 * is held.
1548 		 */
1549 		list_for_each_entry(tmp, devices, dev_list) {
1550 			if (tmp->in_fs_metadata &&
1551 			    !tmp->is_tgtdev_for_dev_replace &&
1552 			    !tmp->bdev) {
1553 				device = tmp;
1554 				break;
1555 			}
1556 		}
1557 		bdev = NULL;
1558 		bh = NULL;
1559 		disk_super = NULL;
1560 		if (!device) {
1561 			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
1562 			goto out;
1563 		}
1564 	} else {
1565 		ret = btrfs_get_bdev_and_sb(device_path,
1566 					    FMODE_WRITE | FMODE_EXCL,
1567 					    root->fs_info->bdev_holder, 0,
1568 					    &bdev, &bh);
1569 		if (ret)
1570 			goto out;
1571 		disk_super = (struct btrfs_super_block *)bh->b_data;
1572 		devid = btrfs_stack_device_id(&disk_super->dev_item);
1573 		dev_uuid = disk_super->dev_item.uuid;
1574 		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1575 					   disk_super->fsid);
1576 		if (!device) {
1577 			ret = -ENOENT;
1578 			goto error_brelse;
1579 		}
1580 	}
1581 
1582 	if (device->is_tgtdev_for_dev_replace) {
1583 		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1584 		goto error_brelse;
1585 	}
1586 
1587 	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1588 		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1589 		goto error_brelse;
1590 	}
1591 
1592 	if (device->writeable) {
1593 		lock_chunks(root);
1594 		list_del_init(&device->dev_alloc_list);
1595 		unlock_chunks(root);
1596 		root->fs_info->fs_devices->rw_devices--;
1597 		clear_super = true;
1598 	}
1599 
1600 	mutex_unlock(&uuid_mutex);
1601 	ret = btrfs_shrink_device(device, 0);
1602 	mutex_lock(&uuid_mutex);
1603 	if (ret)
1604 		goto error_undo;
1605 
1606 	/*
1607 	 * TODO: the superblock still includes this device in its num_devices
1608 	 * counter although write_all_supers() is not locked out. This
1609 	 * could give a filesystem state which requires a degraded mount.
1610 	 */
1611 	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1612 	if (ret)
1613 		goto error_undo;
1614 
1615 	spin_lock(&root->fs_info->free_chunk_lock);
1616 	root->fs_info->free_chunk_space = device->total_bytes -
1617 		device->bytes_used;
1618 	spin_unlock(&root->fs_info->free_chunk_lock);
1619 
1620 	device->in_fs_metadata = 0;
1621 	btrfs_scrub_cancel_dev(root->fs_info, device);
1622 
1623 	/*
1624 	 * the device list mutex makes sure that we don't change
1625 	 * the device list while someone else is writing out all
1626 	 * the device supers. Whoever is writing all supers, should
1627 	 * lock the device list mutex before getting the number of
1628 	 * devices in the super block (super_copy). Conversely,
1629 	 * whoever updates the number of devices in the super block
1630 	 * (super_copy) should hold the device list mutex.
1631 	 */
1632 
1633 	cur_devices = device->fs_devices;
1634 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1635 	list_del_rcu(&device->dev_list);
1636 
1637 	device->fs_devices->num_devices--;
1638 	device->fs_devices->total_devices--;
1639 
1640 	if (device->missing)
1641 		root->fs_info->fs_devices->missing_devices--;
1642 
1643 	next_device = list_entry(root->fs_info->fs_devices->devices.next,
1644 				 struct btrfs_device, dev_list);
1645 	if (device->bdev == root->fs_info->sb->s_bdev)
1646 		root->fs_info->sb->s_bdev = next_device->bdev;
1647 	if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1648 		root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1649 
1650 	if (device->bdev)
1651 		device->fs_devices->open_devices--;
1652 
1653 	call_rcu(&device->rcu, free_device);
1654 
1655 	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1656 	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1657 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1658 
1659 	if (cur_devices->open_devices == 0) {
1660 		struct btrfs_fs_devices *fs_devices;
1661 		fs_devices = root->fs_info->fs_devices;
1662 		while (fs_devices) {
1663 			if (fs_devices->seed == cur_devices)
1664 				break;
1665 			fs_devices = fs_devices->seed;
1666 		}
1667 		fs_devices->seed = cur_devices->seed;
1668 		cur_devices->seed = NULL;
1669 		lock_chunks(root);
1670 		__btrfs_close_devices(cur_devices);
1671 		unlock_chunks(root);
1672 		free_fs_devices(cur_devices);
1673 	}
1674 
1675 	root->fs_info->num_tolerated_disk_barrier_failures =
1676 		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1677 
1678 	/*
1679 	 * at this point, the device is zero sized.  We want to
1680 	 * remove it from the devices list and zero out the old super
1681 	 */
1682 	if (clear_super && disk_super) {
1683 		/* make sure this device isn't detected as part of
1684 		 * the FS anymore
1685 		 */
1686 		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1687 		set_buffer_dirty(bh);
1688 		sync_dirty_buffer(bh);
1689 	}
1690 
1691 	ret = 0;
1692 
1693 	/* Notify udev that device has changed */
1694 	if (bdev)
1695 		btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1696 
1697 error_brelse:
1698 	brelse(bh);
1699 	if (bdev)
1700 		blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1701 out:
1702 	mutex_unlock(&uuid_mutex);
1703 	return ret;
1704 error_undo:
1705 	if (device->writeable) {
1706 		lock_chunks(root);
1707 		list_add(&device->dev_alloc_list,
1708 			 &root->fs_info->fs_devices->alloc_list);
1709 		unlock_chunks(root);
1710 		root->fs_info->fs_devices->rw_devices++;
1711 	}
1712 	goto error_brelse;
1713 }
1714 
1715 void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1716 				 struct btrfs_device *srcdev)
1717 {
1718 	WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1719 	list_del_rcu(&srcdev->dev_list);
1720 	list_del_rcu(&srcdev->dev_alloc_list);
1721 	fs_info->fs_devices->num_devices--;
1722 	if (srcdev->missing) {
1723 		fs_info->fs_devices->missing_devices--;
1724 		fs_info->fs_devices->rw_devices++;
1725 	}
1726 	if (srcdev->can_discard)
1727 		fs_info->fs_devices->num_can_discard--;
1728 	if (srcdev->bdev)
1729 		fs_info->fs_devices->open_devices--;
1730 
1731 	call_rcu(&srcdev->rcu, free_device);
1732 }
1733 
1734 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
1735 				      struct btrfs_device *tgtdev)
1736 {
1737 	struct btrfs_device *next_device;
1738 
1739 	WARN_ON(!tgtdev);
1740 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
1741 	if (tgtdev->bdev) {
1742 		btrfs_scratch_superblock(tgtdev);
1743 		fs_info->fs_devices->open_devices--;
1744 	}
1745 	fs_info->fs_devices->num_devices--;
1746 	if (tgtdev->can_discard)
1747 		fs_info->fs_devices->num_can_discard++;
1748 
1749 	next_device = list_entry(fs_info->fs_devices->devices.next,
1750 				 struct btrfs_device, dev_list);
1751 	if (tgtdev->bdev == fs_info->sb->s_bdev)
1752 		fs_info->sb->s_bdev = next_device->bdev;
1753 	if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
1754 		fs_info->fs_devices->latest_bdev = next_device->bdev;
1755 	list_del_rcu(&tgtdev->dev_list);
1756 
1757 	call_rcu(&tgtdev->rcu, free_device);
1758 
1759 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1760 }
1761 
1762 static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
1763 				     struct btrfs_device **device)
1764 {
1765 	int ret = 0;
1766 	struct btrfs_super_block *disk_super;
1767 	u64 devid;
1768 	u8 *dev_uuid;
1769 	struct block_device *bdev;
1770 	struct buffer_head *bh;
1771 
1772 	*device = NULL;
1773 	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
1774 				    root->fs_info->bdev_holder, 0, &bdev, &bh);
1775 	if (ret)
1776 		return ret;
1777 	disk_super = (struct btrfs_super_block *)bh->b_data;
1778 	devid = btrfs_stack_device_id(&disk_super->dev_item);
1779 	dev_uuid = disk_super->dev_item.uuid;
1780 	*device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1781 				    disk_super->fsid);
1782 	brelse(bh);
1783 	if (!*device)
1784 		ret = -ENOENT;
1785 	blkdev_put(bdev, FMODE_READ);
1786 	return ret;
1787 }
1788 
1789 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
1790 					 char *device_path,
1791 					 struct btrfs_device **device)
1792 {
1793 	*device = NULL;
1794 	if (strcmp(device_path, "missing") == 0) {
1795 		struct list_head *devices;
1796 		struct btrfs_device *tmp;
1797 
1798 		devices = &root->fs_info->fs_devices->devices;
1799 		/*
1800 		 * It is safe to read the devices since the volume_mutex
1801 		 * is held by the caller.
1802 		 */
1803 		list_for_each_entry(tmp, devices, dev_list) {
1804 			if (tmp->in_fs_metadata && !tmp->bdev) {
1805 				*device = tmp;
1806 				break;
1807 			}
1808 		}
1809 
1810 		if (!*device) {
1811 			pr_err("btrfs: no missing device found\n");
1812 			return -ENOENT;
1813 		}
1814 
1815 		return 0;
1816 	} else {
1817 		return btrfs_find_device_by_path(root, device_path, device);
1818 	}
1819 }
1820 
1821 /*
1822  * does all the dirty work required for changing file system's UUID.
1823  */
1824 static int btrfs_prepare_sprout(struct btrfs_root *root)
1825 {
1826 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1827 	struct btrfs_fs_devices *old_devices;
1828 	struct btrfs_fs_devices *seed_devices;
1829 	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1830 	struct btrfs_device *device;
1831 	u64 super_flags;
1832 
1833 	BUG_ON(!mutex_is_locked(&uuid_mutex));
1834 	if (!fs_devices->seeding)
1835 		return -EINVAL;
1836 
1837 	seed_devices = __alloc_fs_devices();
1838 	if (IS_ERR(seed_devices))
1839 		return PTR_ERR(seed_devices);
1840 
1841 	old_devices = clone_fs_devices(fs_devices);
1842 	if (IS_ERR(old_devices)) {
1843 		kfree(seed_devices);
1844 		return PTR_ERR(old_devices);
1845 	}
1846 
1847 	list_add(&old_devices->list, &fs_uuids);
1848 
1849 	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1850 	seed_devices->opened = 1;
1851 	INIT_LIST_HEAD(&seed_devices->devices);
1852 	INIT_LIST_HEAD(&seed_devices->alloc_list);
1853 	mutex_init(&seed_devices->device_list_mutex);
1854 
1855 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1856 	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1857 			      synchronize_rcu);
1858 
1859 	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1860 	list_for_each_entry(device, &seed_devices->devices, dev_list) {
1861 		device->fs_devices = seed_devices;
1862 	}
1863 
1864 	fs_devices->seeding = 0;
1865 	fs_devices->num_devices = 0;
1866 	fs_devices->open_devices = 0;
1867 	fs_devices->total_devices = 0;
1868 	fs_devices->seed = seed_devices;
1869 
1870 	generate_random_uuid(fs_devices->fsid);
1871 	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1872 	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1873 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1874 
1875 	super_flags = btrfs_super_flags(disk_super) &
1876 		      ~BTRFS_SUPER_FLAG_SEEDING;
1877 	btrfs_set_super_flags(disk_super, super_flags);
1878 
1879 	return 0;
1880 }
1881 
1882 /*
1883  * strore the expected generation for seed devices in device items.
1884  */
1885 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1886 			       struct btrfs_root *root)
1887 {
1888 	struct btrfs_path *path;
1889 	struct extent_buffer *leaf;
1890 	struct btrfs_dev_item *dev_item;
1891 	struct btrfs_device *device;
1892 	struct btrfs_key key;
1893 	u8 fs_uuid[BTRFS_UUID_SIZE];
1894 	u8 dev_uuid[BTRFS_UUID_SIZE];
1895 	u64 devid;
1896 	int ret;
1897 
1898 	path = btrfs_alloc_path();
1899 	if (!path)
1900 		return -ENOMEM;
1901 
1902 	root = root->fs_info->chunk_root;
1903 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1904 	key.offset = 0;
1905 	key.type = BTRFS_DEV_ITEM_KEY;
1906 
1907 	while (1) {
1908 		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1909 		if (ret < 0)
1910 			goto error;
1911 
1912 		leaf = path->nodes[0];
1913 next_slot:
1914 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1915 			ret = btrfs_next_leaf(root, path);
1916 			if (ret > 0)
1917 				break;
1918 			if (ret < 0)
1919 				goto error;
1920 			leaf = path->nodes[0];
1921 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1922 			btrfs_release_path(path);
1923 			continue;
1924 		}
1925 
1926 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1927 		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1928 		    key.type != BTRFS_DEV_ITEM_KEY)
1929 			break;
1930 
1931 		dev_item = btrfs_item_ptr(leaf, path->slots[0],
1932 					  struct btrfs_dev_item);
1933 		devid = btrfs_device_id(leaf, dev_item);
1934 		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
1935 				   BTRFS_UUID_SIZE);
1936 		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
1937 				   BTRFS_UUID_SIZE);
1938 		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1939 					   fs_uuid);
1940 		BUG_ON(!device); /* Logic error */
1941 
1942 		if (device->fs_devices->seeding) {
1943 			btrfs_set_device_generation(leaf, dev_item,
1944 						    device->generation);
1945 			btrfs_mark_buffer_dirty(leaf);
1946 		}
1947 
1948 		path->slots[0]++;
1949 		goto next_slot;
1950 	}
1951 	ret = 0;
1952 error:
1953 	btrfs_free_path(path);
1954 	return ret;
1955 }
1956 
1957 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1958 {
1959 	struct request_queue *q;
1960 	struct btrfs_trans_handle *trans;
1961 	struct btrfs_device *device;
1962 	struct block_device *bdev;
1963 	struct list_head *devices;
1964 	struct super_block *sb = root->fs_info->sb;
1965 	struct rcu_string *name;
1966 	u64 total_bytes;
1967 	int seeding_dev = 0;
1968 	int ret = 0;
1969 
1970 	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1971 		return -EROFS;
1972 
1973 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1974 				  root->fs_info->bdev_holder);
1975 	if (IS_ERR(bdev))
1976 		return PTR_ERR(bdev);
1977 
1978 	if (root->fs_info->fs_devices->seeding) {
1979 		seeding_dev = 1;
1980 		down_write(&sb->s_umount);
1981 		mutex_lock(&uuid_mutex);
1982 	}
1983 
1984 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
1985 
1986 	devices = &root->fs_info->fs_devices->devices;
1987 
1988 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1989 	list_for_each_entry(device, devices, dev_list) {
1990 		if (device->bdev == bdev) {
1991 			ret = -EEXIST;
1992 			mutex_unlock(
1993 				&root->fs_info->fs_devices->device_list_mutex);
1994 			goto error;
1995 		}
1996 	}
1997 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1998 
1999 	device = btrfs_alloc_device(root->fs_info, NULL, NULL);
2000 	if (IS_ERR(device)) {
2001 		/* we can safely leave the fs_devices entry around */
2002 		ret = PTR_ERR(device);
2003 		goto error;
2004 	}
2005 
2006 	name = rcu_string_strdup(device_path, GFP_NOFS);
2007 	if (!name) {
2008 		kfree(device);
2009 		ret = -ENOMEM;
2010 		goto error;
2011 	}
2012 	rcu_assign_pointer(device->name, name);
2013 
2014 	trans = btrfs_start_transaction(root, 0);
2015 	if (IS_ERR(trans)) {
2016 		rcu_string_free(device->name);
2017 		kfree(device);
2018 		ret = PTR_ERR(trans);
2019 		goto error;
2020 	}
2021 
2022 	lock_chunks(root);
2023 
2024 	q = bdev_get_queue(bdev);
2025 	if (blk_queue_discard(q))
2026 		device->can_discard = 1;
2027 	device->writeable = 1;
2028 	device->generation = trans->transid;
2029 	device->io_width = root->sectorsize;
2030 	device->io_align = root->sectorsize;
2031 	device->sector_size = root->sectorsize;
2032 	device->total_bytes = i_size_read(bdev->bd_inode);
2033 	device->disk_total_bytes = device->total_bytes;
2034 	device->dev_root = root->fs_info->dev_root;
2035 	device->bdev = bdev;
2036 	device->in_fs_metadata = 1;
2037 	device->is_tgtdev_for_dev_replace = 0;
2038 	device->mode = FMODE_EXCL;
2039 	set_blocksize(device->bdev, 4096);
2040 
2041 	if (seeding_dev) {
2042 		sb->s_flags &= ~MS_RDONLY;
2043 		ret = btrfs_prepare_sprout(root);
2044 		BUG_ON(ret); /* -ENOMEM */
2045 	}
2046 
2047 	device->fs_devices = root->fs_info->fs_devices;
2048 
2049 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2050 	list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2051 	list_add(&device->dev_alloc_list,
2052 		 &root->fs_info->fs_devices->alloc_list);
2053 	root->fs_info->fs_devices->num_devices++;
2054 	root->fs_info->fs_devices->open_devices++;
2055 	root->fs_info->fs_devices->rw_devices++;
2056 	root->fs_info->fs_devices->total_devices++;
2057 	if (device->can_discard)
2058 		root->fs_info->fs_devices->num_can_discard++;
2059 	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2060 
2061 	spin_lock(&root->fs_info->free_chunk_lock);
2062 	root->fs_info->free_chunk_space += device->total_bytes;
2063 	spin_unlock(&root->fs_info->free_chunk_lock);
2064 
2065 	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2066 		root->fs_info->fs_devices->rotating = 1;
2067 
2068 	total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
2069 	btrfs_set_super_total_bytes(root->fs_info->super_copy,
2070 				    total_bytes + device->total_bytes);
2071 
2072 	total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
2073 	btrfs_set_super_num_devices(root->fs_info->super_copy,
2074 				    total_bytes + 1);
2075 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2076 
2077 	if (seeding_dev) {
2078 		ret = init_first_rw_device(trans, root, device);
2079 		if (ret) {
2080 			btrfs_abort_transaction(trans, root, ret);
2081 			goto error_trans;
2082 		}
2083 		ret = btrfs_finish_sprout(trans, root);
2084 		if (ret) {
2085 			btrfs_abort_transaction(trans, root, ret);
2086 			goto error_trans;
2087 		}
2088 	} else {
2089 		ret = btrfs_add_device(trans, root, device);
2090 		if (ret) {
2091 			btrfs_abort_transaction(trans, root, ret);
2092 			goto error_trans;
2093 		}
2094 	}
2095 
2096 	/*
2097 	 * we've got more storage, clear any full flags on the space
2098 	 * infos
2099 	 */
2100 	btrfs_clear_space_info_full(root->fs_info);
2101 
2102 	unlock_chunks(root);
2103 	root->fs_info->num_tolerated_disk_barrier_failures =
2104 		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2105 	ret = btrfs_commit_transaction(trans, root);
2106 
2107 	if (seeding_dev) {
2108 		mutex_unlock(&uuid_mutex);
2109 		up_write(&sb->s_umount);
2110 
2111 		if (ret) /* transaction commit */
2112 			return ret;
2113 
2114 		ret = btrfs_relocate_sys_chunks(root);
2115 		if (ret < 0)
2116 			btrfs_error(root->fs_info, ret,
2117 				    "Failed to relocate sys chunks after "
2118 				    "device initialization. This can be fixed "
2119 				    "using the \"btrfs balance\" command.");
2120 		trans = btrfs_attach_transaction(root);
2121 		if (IS_ERR(trans)) {
2122 			if (PTR_ERR(trans) == -ENOENT)
2123 				return 0;
2124 			return PTR_ERR(trans);
2125 		}
2126 		ret = btrfs_commit_transaction(trans, root);
2127 	}
2128 
2129 	return ret;
2130 
2131 error_trans:
2132 	unlock_chunks(root);
2133 	btrfs_end_transaction(trans, root);
2134 	rcu_string_free(device->name);
2135 	kfree(device);
2136 error:
2137 	blkdev_put(bdev, FMODE_EXCL);
2138 	if (seeding_dev) {
2139 		mutex_unlock(&uuid_mutex);
2140 		up_write(&sb->s_umount);
2141 	}
2142 	return ret;
2143 }
2144 
2145 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2146 				  struct btrfs_device **device_out)
2147 {
2148 	struct request_queue *q;
2149 	struct btrfs_device *device;
2150 	struct block_device *bdev;
2151 	struct btrfs_fs_info *fs_info = root->fs_info;
2152 	struct list_head *devices;
2153 	struct rcu_string *name;
2154 	u64 devid = BTRFS_DEV_REPLACE_DEVID;
2155 	int ret = 0;
2156 
2157 	*device_out = NULL;
2158 	if (fs_info->fs_devices->seeding)
2159 		return -EINVAL;
2160 
2161 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2162 				  fs_info->bdev_holder);
2163 	if (IS_ERR(bdev))
2164 		return PTR_ERR(bdev);
2165 
2166 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2167 
2168 	devices = &fs_info->fs_devices->devices;
2169 	list_for_each_entry(device, devices, dev_list) {
2170 		if (device->bdev == bdev) {
2171 			ret = -EEXIST;
2172 			goto error;
2173 		}
2174 	}
2175 
2176 	device = btrfs_alloc_device(NULL, &devid, NULL);
2177 	if (IS_ERR(device)) {
2178 		ret = PTR_ERR(device);
2179 		goto error;
2180 	}
2181 
2182 	name = rcu_string_strdup(device_path, GFP_NOFS);
2183 	if (!name) {
2184 		kfree(device);
2185 		ret = -ENOMEM;
2186 		goto error;
2187 	}
2188 	rcu_assign_pointer(device->name, name);
2189 
2190 	q = bdev_get_queue(bdev);
2191 	if (blk_queue_discard(q))
2192 		device->can_discard = 1;
2193 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2194 	device->writeable = 1;
2195 	device->generation = 0;
2196 	device->io_width = root->sectorsize;
2197 	device->io_align = root->sectorsize;
2198 	device->sector_size = root->sectorsize;
2199 	device->total_bytes = i_size_read(bdev->bd_inode);
2200 	device->disk_total_bytes = device->total_bytes;
2201 	device->dev_root = fs_info->dev_root;
2202 	device->bdev = bdev;
2203 	device->in_fs_metadata = 1;
2204 	device->is_tgtdev_for_dev_replace = 1;
2205 	device->mode = FMODE_EXCL;
2206 	set_blocksize(device->bdev, 4096);
2207 	device->fs_devices = fs_info->fs_devices;
2208 	list_add(&device->dev_list, &fs_info->fs_devices->devices);
2209 	fs_info->fs_devices->num_devices++;
2210 	fs_info->fs_devices->open_devices++;
2211 	if (device->can_discard)
2212 		fs_info->fs_devices->num_can_discard++;
2213 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2214 
2215 	*device_out = device;
2216 	return ret;
2217 
2218 error:
2219 	blkdev_put(bdev, FMODE_EXCL);
2220 	return ret;
2221 }
2222 
2223 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2224 					      struct btrfs_device *tgtdev)
2225 {
2226 	WARN_ON(fs_info->fs_devices->rw_devices == 0);
2227 	tgtdev->io_width = fs_info->dev_root->sectorsize;
2228 	tgtdev->io_align = fs_info->dev_root->sectorsize;
2229 	tgtdev->sector_size = fs_info->dev_root->sectorsize;
2230 	tgtdev->dev_root = fs_info->dev_root;
2231 	tgtdev->in_fs_metadata = 1;
2232 }
2233 
2234 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2235 					struct btrfs_device *device)
2236 {
2237 	int ret;
2238 	struct btrfs_path *path;
2239 	struct btrfs_root *root;
2240 	struct btrfs_dev_item *dev_item;
2241 	struct extent_buffer *leaf;
2242 	struct btrfs_key key;
2243 
2244 	root = device->dev_root->fs_info->chunk_root;
2245 
2246 	path = btrfs_alloc_path();
2247 	if (!path)
2248 		return -ENOMEM;
2249 
2250 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2251 	key.type = BTRFS_DEV_ITEM_KEY;
2252 	key.offset = device->devid;
2253 
2254 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2255 	if (ret < 0)
2256 		goto out;
2257 
2258 	if (ret > 0) {
2259 		ret = -ENOENT;
2260 		goto out;
2261 	}
2262 
2263 	leaf = path->nodes[0];
2264 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2265 
2266 	btrfs_set_device_id(leaf, dev_item, device->devid);
2267 	btrfs_set_device_type(leaf, dev_item, device->type);
2268 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2269 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2270 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2271 	btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
2272 	btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
2273 	btrfs_mark_buffer_dirty(leaf);
2274 
2275 out:
2276 	btrfs_free_path(path);
2277 	return ret;
2278 }
2279 
2280 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
2281 		      struct btrfs_device *device, u64 new_size)
2282 {
2283 	struct btrfs_super_block *super_copy =
2284 		device->dev_root->fs_info->super_copy;
2285 	u64 old_total = btrfs_super_total_bytes(super_copy);
2286 	u64 diff = new_size - device->total_bytes;
2287 
2288 	if (!device->writeable)
2289 		return -EACCES;
2290 	if (new_size <= device->total_bytes ||
2291 	    device->is_tgtdev_for_dev_replace)
2292 		return -EINVAL;
2293 
2294 	btrfs_set_super_total_bytes(super_copy, old_total + diff);
2295 	device->fs_devices->total_rw_bytes += diff;
2296 
2297 	device->total_bytes = new_size;
2298 	device->disk_total_bytes = new_size;
2299 	btrfs_clear_space_info_full(device->dev_root->fs_info);
2300 
2301 	return btrfs_update_device(trans, device);
2302 }
2303 
2304 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2305 		      struct btrfs_device *device, u64 new_size)
2306 {
2307 	int ret;
2308 	lock_chunks(device->dev_root);
2309 	ret = __btrfs_grow_device(trans, device, new_size);
2310 	unlock_chunks(device->dev_root);
2311 	return ret;
2312 }
2313 
2314 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2315 			    struct btrfs_root *root,
2316 			    u64 chunk_tree, u64 chunk_objectid,
2317 			    u64 chunk_offset)
2318 {
2319 	int ret;
2320 	struct btrfs_path *path;
2321 	struct btrfs_key key;
2322 
2323 	root = root->fs_info->chunk_root;
2324 	path = btrfs_alloc_path();
2325 	if (!path)
2326 		return -ENOMEM;
2327 
2328 	key.objectid = chunk_objectid;
2329 	key.offset = chunk_offset;
2330 	key.type = BTRFS_CHUNK_ITEM_KEY;
2331 
2332 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2333 	if (ret < 0)
2334 		goto out;
2335 	else if (ret > 0) { /* Logic error or corruption */
2336 		btrfs_error(root->fs_info, -ENOENT,
2337 			    "Failed lookup while freeing chunk.");
2338 		ret = -ENOENT;
2339 		goto out;
2340 	}
2341 
2342 	ret = btrfs_del_item(trans, root, path);
2343 	if (ret < 0)
2344 		btrfs_error(root->fs_info, ret,
2345 			    "Failed to delete chunk item.");
2346 out:
2347 	btrfs_free_path(path);
2348 	return ret;
2349 }
2350 
2351 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2352 			chunk_offset)
2353 {
2354 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2355 	struct btrfs_disk_key *disk_key;
2356 	struct btrfs_chunk *chunk;
2357 	u8 *ptr;
2358 	int ret = 0;
2359 	u32 num_stripes;
2360 	u32 array_size;
2361 	u32 len = 0;
2362 	u32 cur;
2363 	struct btrfs_key key;
2364 
2365 	array_size = btrfs_super_sys_array_size(super_copy);
2366 
2367 	ptr = super_copy->sys_chunk_array;
2368 	cur = 0;
2369 
2370 	while (cur < array_size) {
2371 		disk_key = (struct btrfs_disk_key *)ptr;
2372 		btrfs_disk_key_to_cpu(&key, disk_key);
2373 
2374 		len = sizeof(*disk_key);
2375 
2376 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2377 			chunk = (struct btrfs_chunk *)(ptr + len);
2378 			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2379 			len += btrfs_chunk_item_size(num_stripes);
2380 		} else {
2381 			ret = -EIO;
2382 			break;
2383 		}
2384 		if (key.objectid == chunk_objectid &&
2385 		    key.offset == chunk_offset) {
2386 			memmove(ptr, ptr + len, array_size - (cur + len));
2387 			array_size -= len;
2388 			btrfs_set_super_sys_array_size(super_copy, array_size);
2389 		} else {
2390 			ptr += len;
2391 			cur += len;
2392 		}
2393 	}
2394 	return ret;
2395 }
2396 
2397 static int btrfs_relocate_chunk(struct btrfs_root *root,
2398 			 u64 chunk_tree, u64 chunk_objectid,
2399 			 u64 chunk_offset)
2400 {
2401 	struct extent_map_tree *em_tree;
2402 	struct btrfs_root *extent_root;
2403 	struct btrfs_trans_handle *trans;
2404 	struct extent_map *em;
2405 	struct map_lookup *map;
2406 	int ret;
2407 	int i;
2408 
2409 	root = root->fs_info->chunk_root;
2410 	extent_root = root->fs_info->extent_root;
2411 	em_tree = &root->fs_info->mapping_tree.map_tree;
2412 
2413 	ret = btrfs_can_relocate(extent_root, chunk_offset);
2414 	if (ret)
2415 		return -ENOSPC;
2416 
2417 	/* step one, relocate all the extents inside this chunk */
2418 	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2419 	if (ret)
2420 		return ret;
2421 
2422 	trans = btrfs_start_transaction(root, 0);
2423 	if (IS_ERR(trans)) {
2424 		ret = PTR_ERR(trans);
2425 		btrfs_std_error(root->fs_info, ret);
2426 		return ret;
2427 	}
2428 
2429 	lock_chunks(root);
2430 
2431 	/*
2432 	 * step two, delete the device extents and the
2433 	 * chunk tree entries
2434 	 */
2435 	read_lock(&em_tree->lock);
2436 	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2437 	read_unlock(&em_tree->lock);
2438 
2439 	BUG_ON(!em || em->start > chunk_offset ||
2440 	       em->start + em->len < chunk_offset);
2441 	map = (struct map_lookup *)em->bdev;
2442 
2443 	for (i = 0; i < map->num_stripes; i++) {
2444 		ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2445 					    map->stripes[i].physical);
2446 		BUG_ON(ret);
2447 
2448 		if (map->stripes[i].dev) {
2449 			ret = btrfs_update_device(trans, map->stripes[i].dev);
2450 			BUG_ON(ret);
2451 		}
2452 	}
2453 	ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2454 			       chunk_offset);
2455 
2456 	BUG_ON(ret);
2457 
2458 	trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2459 
2460 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2461 		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2462 		BUG_ON(ret);
2463 	}
2464 
2465 	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2466 	BUG_ON(ret);
2467 
2468 	write_lock(&em_tree->lock);
2469 	remove_extent_mapping(em_tree, em);
2470 	write_unlock(&em_tree->lock);
2471 
2472 	kfree(map);
2473 	em->bdev = NULL;
2474 
2475 	/* once for the tree */
2476 	free_extent_map(em);
2477 	/* once for us */
2478 	free_extent_map(em);
2479 
2480 	unlock_chunks(root);
2481 	btrfs_end_transaction(trans, root);
2482 	return 0;
2483 }
2484 
2485 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2486 {
2487 	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2488 	struct btrfs_path *path;
2489 	struct extent_buffer *leaf;
2490 	struct btrfs_chunk *chunk;
2491 	struct btrfs_key key;
2492 	struct btrfs_key found_key;
2493 	u64 chunk_tree = chunk_root->root_key.objectid;
2494 	u64 chunk_type;
2495 	bool retried = false;
2496 	int failed = 0;
2497 	int ret;
2498 
2499 	path = btrfs_alloc_path();
2500 	if (!path)
2501 		return -ENOMEM;
2502 
2503 again:
2504 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2505 	key.offset = (u64)-1;
2506 	key.type = BTRFS_CHUNK_ITEM_KEY;
2507 
2508 	while (1) {
2509 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2510 		if (ret < 0)
2511 			goto error;
2512 		BUG_ON(ret == 0); /* Corruption */
2513 
2514 		ret = btrfs_previous_item(chunk_root, path, key.objectid,
2515 					  key.type);
2516 		if (ret < 0)
2517 			goto error;
2518 		if (ret > 0)
2519 			break;
2520 
2521 		leaf = path->nodes[0];
2522 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2523 
2524 		chunk = btrfs_item_ptr(leaf, path->slots[0],
2525 				       struct btrfs_chunk);
2526 		chunk_type = btrfs_chunk_type(leaf, chunk);
2527 		btrfs_release_path(path);
2528 
2529 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2530 			ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2531 						   found_key.objectid,
2532 						   found_key.offset);
2533 			if (ret == -ENOSPC)
2534 				failed++;
2535 			else if (ret)
2536 				BUG();
2537 		}
2538 
2539 		if (found_key.offset == 0)
2540 			break;
2541 		key.offset = found_key.offset - 1;
2542 	}
2543 	ret = 0;
2544 	if (failed && !retried) {
2545 		failed = 0;
2546 		retried = true;
2547 		goto again;
2548 	} else if (failed && retried) {
2549 		WARN_ON(1);
2550 		ret = -ENOSPC;
2551 	}
2552 error:
2553 	btrfs_free_path(path);
2554 	return ret;
2555 }
2556 
2557 static int insert_balance_item(struct btrfs_root *root,
2558 			       struct btrfs_balance_control *bctl)
2559 {
2560 	struct btrfs_trans_handle *trans;
2561 	struct btrfs_balance_item *item;
2562 	struct btrfs_disk_balance_args disk_bargs;
2563 	struct btrfs_path *path;
2564 	struct extent_buffer *leaf;
2565 	struct btrfs_key key;
2566 	int ret, err;
2567 
2568 	path = btrfs_alloc_path();
2569 	if (!path)
2570 		return -ENOMEM;
2571 
2572 	trans = btrfs_start_transaction(root, 0);
2573 	if (IS_ERR(trans)) {
2574 		btrfs_free_path(path);
2575 		return PTR_ERR(trans);
2576 	}
2577 
2578 	key.objectid = BTRFS_BALANCE_OBJECTID;
2579 	key.type = BTRFS_BALANCE_ITEM_KEY;
2580 	key.offset = 0;
2581 
2582 	ret = btrfs_insert_empty_item(trans, root, path, &key,
2583 				      sizeof(*item));
2584 	if (ret)
2585 		goto out;
2586 
2587 	leaf = path->nodes[0];
2588 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2589 
2590 	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2591 
2592 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2593 	btrfs_set_balance_data(leaf, item, &disk_bargs);
2594 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2595 	btrfs_set_balance_meta(leaf, item, &disk_bargs);
2596 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2597 	btrfs_set_balance_sys(leaf, item, &disk_bargs);
2598 
2599 	btrfs_set_balance_flags(leaf, item, bctl->flags);
2600 
2601 	btrfs_mark_buffer_dirty(leaf);
2602 out:
2603 	btrfs_free_path(path);
2604 	err = btrfs_commit_transaction(trans, root);
2605 	if (err && !ret)
2606 		ret = err;
2607 	return ret;
2608 }
2609 
2610 static int del_balance_item(struct btrfs_root *root)
2611 {
2612 	struct btrfs_trans_handle *trans;
2613 	struct btrfs_path *path;
2614 	struct btrfs_key key;
2615 	int ret, err;
2616 
2617 	path = btrfs_alloc_path();
2618 	if (!path)
2619 		return -ENOMEM;
2620 
2621 	trans = btrfs_start_transaction(root, 0);
2622 	if (IS_ERR(trans)) {
2623 		btrfs_free_path(path);
2624 		return PTR_ERR(trans);
2625 	}
2626 
2627 	key.objectid = BTRFS_BALANCE_OBJECTID;
2628 	key.type = BTRFS_BALANCE_ITEM_KEY;
2629 	key.offset = 0;
2630 
2631 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2632 	if (ret < 0)
2633 		goto out;
2634 	if (ret > 0) {
2635 		ret = -ENOENT;
2636 		goto out;
2637 	}
2638 
2639 	ret = btrfs_del_item(trans, root, path);
2640 out:
2641 	btrfs_free_path(path);
2642 	err = btrfs_commit_transaction(trans, root);
2643 	if (err && !ret)
2644 		ret = err;
2645 	return ret;
2646 }
2647 
2648 /*
2649  * This is a heuristic used to reduce the number of chunks balanced on
2650  * resume after balance was interrupted.
2651  */
2652 static void update_balance_args(struct btrfs_balance_control *bctl)
2653 {
2654 	/*
2655 	 * Turn on soft mode for chunk types that were being converted.
2656 	 */
2657 	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2658 		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2659 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2660 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2661 	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2662 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2663 
2664 	/*
2665 	 * Turn on usage filter if is not already used.  The idea is
2666 	 * that chunks that we have already balanced should be
2667 	 * reasonably full.  Don't do it for chunks that are being
2668 	 * converted - that will keep us from relocating unconverted
2669 	 * (albeit full) chunks.
2670 	 */
2671 	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2672 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2673 		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2674 		bctl->data.usage = 90;
2675 	}
2676 	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2677 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2678 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2679 		bctl->sys.usage = 90;
2680 	}
2681 	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2682 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2683 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2684 		bctl->meta.usage = 90;
2685 	}
2686 }
2687 
2688 /*
2689  * Should be called with both balance and volume mutexes held to
2690  * serialize other volume operations (add_dev/rm_dev/resize) with
2691  * restriper.  Same goes for unset_balance_control.
2692  */
2693 static void set_balance_control(struct btrfs_balance_control *bctl)
2694 {
2695 	struct btrfs_fs_info *fs_info = bctl->fs_info;
2696 
2697 	BUG_ON(fs_info->balance_ctl);
2698 
2699 	spin_lock(&fs_info->balance_lock);
2700 	fs_info->balance_ctl = bctl;
2701 	spin_unlock(&fs_info->balance_lock);
2702 }
2703 
2704 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2705 {
2706 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2707 
2708 	BUG_ON(!fs_info->balance_ctl);
2709 
2710 	spin_lock(&fs_info->balance_lock);
2711 	fs_info->balance_ctl = NULL;
2712 	spin_unlock(&fs_info->balance_lock);
2713 
2714 	kfree(bctl);
2715 }
2716 
2717 /*
2718  * Balance filters.  Return 1 if chunk should be filtered out
2719  * (should not be balanced).
2720  */
2721 static int chunk_profiles_filter(u64 chunk_type,
2722 				 struct btrfs_balance_args *bargs)
2723 {
2724 	chunk_type = chunk_to_extended(chunk_type) &
2725 				BTRFS_EXTENDED_PROFILE_MASK;
2726 
2727 	if (bargs->profiles & chunk_type)
2728 		return 0;
2729 
2730 	return 1;
2731 }
2732 
2733 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2734 			      struct btrfs_balance_args *bargs)
2735 {
2736 	struct btrfs_block_group_cache *cache;
2737 	u64 chunk_used, user_thresh;
2738 	int ret = 1;
2739 
2740 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2741 	chunk_used = btrfs_block_group_used(&cache->item);
2742 
2743 	if (bargs->usage == 0)
2744 		user_thresh = 1;
2745 	else if (bargs->usage > 100)
2746 		user_thresh = cache->key.offset;
2747 	else
2748 		user_thresh = div_factor_fine(cache->key.offset,
2749 					      bargs->usage);
2750 
2751 	if (chunk_used < user_thresh)
2752 		ret = 0;
2753 
2754 	btrfs_put_block_group(cache);
2755 	return ret;
2756 }
2757 
2758 static int chunk_devid_filter(struct extent_buffer *leaf,
2759 			      struct btrfs_chunk *chunk,
2760 			      struct btrfs_balance_args *bargs)
2761 {
2762 	struct btrfs_stripe *stripe;
2763 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2764 	int i;
2765 
2766 	for (i = 0; i < num_stripes; i++) {
2767 		stripe = btrfs_stripe_nr(chunk, i);
2768 		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2769 			return 0;
2770 	}
2771 
2772 	return 1;
2773 }
2774 
2775 /* [pstart, pend) */
2776 static int chunk_drange_filter(struct extent_buffer *leaf,
2777 			       struct btrfs_chunk *chunk,
2778 			       u64 chunk_offset,
2779 			       struct btrfs_balance_args *bargs)
2780 {
2781 	struct btrfs_stripe *stripe;
2782 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2783 	u64 stripe_offset;
2784 	u64 stripe_length;
2785 	int factor;
2786 	int i;
2787 
2788 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2789 		return 0;
2790 
2791 	if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2792 	     BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
2793 		factor = num_stripes / 2;
2794 	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
2795 		factor = num_stripes - 1;
2796 	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
2797 		factor = num_stripes - 2;
2798 	} else {
2799 		factor = num_stripes;
2800 	}
2801 
2802 	for (i = 0; i < num_stripes; i++) {
2803 		stripe = btrfs_stripe_nr(chunk, i);
2804 		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2805 			continue;
2806 
2807 		stripe_offset = btrfs_stripe_offset(leaf, stripe);
2808 		stripe_length = btrfs_chunk_length(leaf, chunk);
2809 		do_div(stripe_length, factor);
2810 
2811 		if (stripe_offset < bargs->pend &&
2812 		    stripe_offset + stripe_length > bargs->pstart)
2813 			return 0;
2814 	}
2815 
2816 	return 1;
2817 }
2818 
2819 /* [vstart, vend) */
2820 static int chunk_vrange_filter(struct extent_buffer *leaf,
2821 			       struct btrfs_chunk *chunk,
2822 			       u64 chunk_offset,
2823 			       struct btrfs_balance_args *bargs)
2824 {
2825 	if (chunk_offset < bargs->vend &&
2826 	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2827 		/* at least part of the chunk is inside this vrange */
2828 		return 0;
2829 
2830 	return 1;
2831 }
2832 
2833 static int chunk_soft_convert_filter(u64 chunk_type,
2834 				     struct btrfs_balance_args *bargs)
2835 {
2836 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2837 		return 0;
2838 
2839 	chunk_type = chunk_to_extended(chunk_type) &
2840 				BTRFS_EXTENDED_PROFILE_MASK;
2841 
2842 	if (bargs->target == chunk_type)
2843 		return 1;
2844 
2845 	return 0;
2846 }
2847 
2848 static int should_balance_chunk(struct btrfs_root *root,
2849 				struct extent_buffer *leaf,
2850 				struct btrfs_chunk *chunk, u64 chunk_offset)
2851 {
2852 	struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2853 	struct btrfs_balance_args *bargs = NULL;
2854 	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2855 
2856 	/* type filter */
2857 	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2858 	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2859 		return 0;
2860 	}
2861 
2862 	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2863 		bargs = &bctl->data;
2864 	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2865 		bargs = &bctl->sys;
2866 	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2867 		bargs = &bctl->meta;
2868 
2869 	/* profiles filter */
2870 	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2871 	    chunk_profiles_filter(chunk_type, bargs)) {
2872 		return 0;
2873 	}
2874 
2875 	/* usage filter */
2876 	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2877 	    chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2878 		return 0;
2879 	}
2880 
2881 	/* devid filter */
2882 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2883 	    chunk_devid_filter(leaf, chunk, bargs)) {
2884 		return 0;
2885 	}
2886 
2887 	/* drange filter, makes sense only with devid filter */
2888 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2889 	    chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2890 		return 0;
2891 	}
2892 
2893 	/* vrange filter */
2894 	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2895 	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2896 		return 0;
2897 	}
2898 
2899 	/* soft profile changing mode */
2900 	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2901 	    chunk_soft_convert_filter(chunk_type, bargs)) {
2902 		return 0;
2903 	}
2904 
2905 	return 1;
2906 }
2907 
2908 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2909 {
2910 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2911 	struct btrfs_root *chunk_root = fs_info->chunk_root;
2912 	struct btrfs_root *dev_root = fs_info->dev_root;
2913 	struct list_head *devices;
2914 	struct btrfs_device *device;
2915 	u64 old_size;
2916 	u64 size_to_free;
2917 	struct btrfs_chunk *chunk;
2918 	struct btrfs_path *path;
2919 	struct btrfs_key key;
2920 	struct btrfs_key found_key;
2921 	struct btrfs_trans_handle *trans;
2922 	struct extent_buffer *leaf;
2923 	int slot;
2924 	int ret;
2925 	int enospc_errors = 0;
2926 	bool counting = true;
2927 
2928 	/* step one make some room on all the devices */
2929 	devices = &fs_info->fs_devices->devices;
2930 	list_for_each_entry(device, devices, dev_list) {
2931 		old_size = device->total_bytes;
2932 		size_to_free = div_factor(old_size, 1);
2933 		size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2934 		if (!device->writeable ||
2935 		    device->total_bytes - device->bytes_used > size_to_free ||
2936 		    device->is_tgtdev_for_dev_replace)
2937 			continue;
2938 
2939 		ret = btrfs_shrink_device(device, old_size - size_to_free);
2940 		if (ret == -ENOSPC)
2941 			break;
2942 		BUG_ON(ret);
2943 
2944 		trans = btrfs_start_transaction(dev_root, 0);
2945 		BUG_ON(IS_ERR(trans));
2946 
2947 		ret = btrfs_grow_device(trans, device, old_size);
2948 		BUG_ON(ret);
2949 
2950 		btrfs_end_transaction(trans, dev_root);
2951 	}
2952 
2953 	/* step two, relocate all the chunks */
2954 	path = btrfs_alloc_path();
2955 	if (!path) {
2956 		ret = -ENOMEM;
2957 		goto error;
2958 	}
2959 
2960 	/* zero out stat counters */
2961 	spin_lock(&fs_info->balance_lock);
2962 	memset(&bctl->stat, 0, sizeof(bctl->stat));
2963 	spin_unlock(&fs_info->balance_lock);
2964 again:
2965 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2966 	key.offset = (u64)-1;
2967 	key.type = BTRFS_CHUNK_ITEM_KEY;
2968 
2969 	while (1) {
2970 		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2971 		    atomic_read(&fs_info->balance_cancel_req)) {
2972 			ret = -ECANCELED;
2973 			goto error;
2974 		}
2975 
2976 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2977 		if (ret < 0)
2978 			goto error;
2979 
2980 		/*
2981 		 * this shouldn't happen, it means the last relocate
2982 		 * failed
2983 		 */
2984 		if (ret == 0)
2985 			BUG(); /* FIXME break ? */
2986 
2987 		ret = btrfs_previous_item(chunk_root, path, 0,
2988 					  BTRFS_CHUNK_ITEM_KEY);
2989 		if (ret) {
2990 			ret = 0;
2991 			break;
2992 		}
2993 
2994 		leaf = path->nodes[0];
2995 		slot = path->slots[0];
2996 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
2997 
2998 		if (found_key.objectid != key.objectid)
2999 			break;
3000 
3001 		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3002 
3003 		if (!counting) {
3004 			spin_lock(&fs_info->balance_lock);
3005 			bctl->stat.considered++;
3006 			spin_unlock(&fs_info->balance_lock);
3007 		}
3008 
3009 		ret = should_balance_chunk(chunk_root, leaf, chunk,
3010 					   found_key.offset);
3011 		btrfs_release_path(path);
3012 		if (!ret)
3013 			goto loop;
3014 
3015 		if (counting) {
3016 			spin_lock(&fs_info->balance_lock);
3017 			bctl->stat.expected++;
3018 			spin_unlock(&fs_info->balance_lock);
3019 			goto loop;
3020 		}
3021 
3022 		ret = btrfs_relocate_chunk(chunk_root,
3023 					   chunk_root->root_key.objectid,
3024 					   found_key.objectid,
3025 					   found_key.offset);
3026 		if (ret && ret != -ENOSPC)
3027 			goto error;
3028 		if (ret == -ENOSPC) {
3029 			enospc_errors++;
3030 		} else {
3031 			spin_lock(&fs_info->balance_lock);
3032 			bctl->stat.completed++;
3033 			spin_unlock(&fs_info->balance_lock);
3034 		}
3035 loop:
3036 		if (found_key.offset == 0)
3037 			break;
3038 		key.offset = found_key.offset - 1;
3039 	}
3040 
3041 	if (counting) {
3042 		btrfs_release_path(path);
3043 		counting = false;
3044 		goto again;
3045 	}
3046 error:
3047 	btrfs_free_path(path);
3048 	if (enospc_errors) {
3049 		printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
3050 		       enospc_errors);
3051 		if (!ret)
3052 			ret = -ENOSPC;
3053 	}
3054 
3055 	return ret;
3056 }
3057 
3058 /**
3059  * alloc_profile_is_valid - see if a given profile is valid and reduced
3060  * @flags: profile to validate
3061  * @extended: if true @flags is treated as an extended profile
3062  */
3063 static int alloc_profile_is_valid(u64 flags, int extended)
3064 {
3065 	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3066 			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
3067 
3068 	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3069 
3070 	/* 1) check that all other bits are zeroed */
3071 	if (flags & ~mask)
3072 		return 0;
3073 
3074 	/* 2) see if profile is reduced */
3075 	if (flags == 0)
3076 		return !extended; /* "0" is valid for usual profiles */
3077 
3078 	/* true if exactly one bit set */
3079 	return (flags & (flags - 1)) == 0;
3080 }
3081 
3082 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3083 {
3084 	/* cancel requested || normal exit path */
3085 	return atomic_read(&fs_info->balance_cancel_req) ||
3086 		(atomic_read(&fs_info->balance_pause_req) == 0 &&
3087 		 atomic_read(&fs_info->balance_cancel_req) == 0);
3088 }
3089 
3090 static void __cancel_balance(struct btrfs_fs_info *fs_info)
3091 {
3092 	int ret;
3093 
3094 	unset_balance_control(fs_info);
3095 	ret = del_balance_item(fs_info->tree_root);
3096 	if (ret)
3097 		btrfs_std_error(fs_info, ret);
3098 
3099 	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3100 }
3101 
3102 /*
3103  * Should be called with both balance and volume mutexes held
3104  */
3105 int btrfs_balance(struct btrfs_balance_control *bctl,
3106 		  struct btrfs_ioctl_balance_args *bargs)
3107 {
3108 	struct btrfs_fs_info *fs_info = bctl->fs_info;
3109 	u64 allowed;
3110 	int mixed = 0;
3111 	int ret;
3112 	u64 num_devices;
3113 	unsigned seq;
3114 
3115 	if (btrfs_fs_closing(fs_info) ||
3116 	    atomic_read(&fs_info->balance_pause_req) ||
3117 	    atomic_read(&fs_info->balance_cancel_req)) {
3118 		ret = -EINVAL;
3119 		goto out;
3120 	}
3121 
3122 	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3123 	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3124 		mixed = 1;
3125 
3126 	/*
3127 	 * In case of mixed groups both data and meta should be picked,
3128 	 * and identical options should be given for both of them.
3129 	 */
3130 	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3131 	if (mixed && (bctl->flags & allowed)) {
3132 		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3133 		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3134 		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3135 			printk(KERN_ERR "btrfs: with mixed groups data and "
3136 			       "metadata balance options must be the same\n");
3137 			ret = -EINVAL;
3138 			goto out;
3139 		}
3140 	}
3141 
3142 	num_devices = fs_info->fs_devices->num_devices;
3143 	btrfs_dev_replace_lock(&fs_info->dev_replace);
3144 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3145 		BUG_ON(num_devices < 1);
3146 		num_devices--;
3147 	}
3148 	btrfs_dev_replace_unlock(&fs_info->dev_replace);
3149 	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3150 	if (num_devices == 1)
3151 		allowed |= BTRFS_BLOCK_GROUP_DUP;
3152 	else if (num_devices > 1)
3153 		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3154 	if (num_devices > 2)
3155 		allowed |= BTRFS_BLOCK_GROUP_RAID5;
3156 	if (num_devices > 3)
3157 		allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3158 			    BTRFS_BLOCK_GROUP_RAID6);
3159 	if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3160 	    (!alloc_profile_is_valid(bctl->data.target, 1) ||
3161 	     (bctl->data.target & ~allowed))) {
3162 		printk(KERN_ERR "btrfs: unable to start balance with target "
3163 		       "data profile %llu\n",
3164 		       bctl->data.target);
3165 		ret = -EINVAL;
3166 		goto out;
3167 	}
3168 	if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3169 	    (!alloc_profile_is_valid(bctl->meta.target, 1) ||
3170 	     (bctl->meta.target & ~allowed))) {
3171 		printk(KERN_ERR "btrfs: unable to start balance with target "
3172 		       "metadata profile %llu\n",
3173 		       bctl->meta.target);
3174 		ret = -EINVAL;
3175 		goto out;
3176 	}
3177 	if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3178 	    (!alloc_profile_is_valid(bctl->sys.target, 1) ||
3179 	     (bctl->sys.target & ~allowed))) {
3180 		printk(KERN_ERR "btrfs: unable to start balance with target "
3181 		       "system profile %llu\n",
3182 		       bctl->sys.target);
3183 		ret = -EINVAL;
3184 		goto out;
3185 	}
3186 
3187 	/* allow dup'ed data chunks only in mixed mode */
3188 	if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3189 	    (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
3190 		printk(KERN_ERR "btrfs: dup for data is not allowed\n");
3191 		ret = -EINVAL;
3192 		goto out;
3193 	}
3194 
3195 	/* allow to reduce meta or sys integrity only if force set */
3196 	allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3197 			BTRFS_BLOCK_GROUP_RAID10 |
3198 			BTRFS_BLOCK_GROUP_RAID5 |
3199 			BTRFS_BLOCK_GROUP_RAID6;
3200 	do {
3201 		seq = read_seqbegin(&fs_info->profiles_lock);
3202 
3203 		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3204 		     (fs_info->avail_system_alloc_bits & allowed) &&
3205 		     !(bctl->sys.target & allowed)) ||
3206 		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3207 		     (fs_info->avail_metadata_alloc_bits & allowed) &&
3208 		     !(bctl->meta.target & allowed))) {
3209 			if (bctl->flags & BTRFS_BALANCE_FORCE) {
3210 				printk(KERN_INFO "btrfs: force reducing metadata "
3211 				       "integrity\n");
3212 			} else {
3213 				printk(KERN_ERR "btrfs: balance will reduce metadata "
3214 				       "integrity, use force if you want this\n");
3215 				ret = -EINVAL;
3216 				goto out;
3217 			}
3218 		}
3219 	} while (read_seqretry(&fs_info->profiles_lock, seq));
3220 
3221 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3222 		int num_tolerated_disk_barrier_failures;
3223 		u64 target = bctl->sys.target;
3224 
3225 		num_tolerated_disk_barrier_failures =
3226 			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3227 		if (num_tolerated_disk_barrier_failures > 0 &&
3228 		    (target &
3229 		     (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3230 		      BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
3231 			num_tolerated_disk_barrier_failures = 0;
3232 		else if (num_tolerated_disk_barrier_failures > 1 &&
3233 			 (target &
3234 			  (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
3235 			num_tolerated_disk_barrier_failures = 1;
3236 
3237 		fs_info->num_tolerated_disk_barrier_failures =
3238 			num_tolerated_disk_barrier_failures;
3239 	}
3240 
3241 	ret = insert_balance_item(fs_info->tree_root, bctl);
3242 	if (ret && ret != -EEXIST)
3243 		goto out;
3244 
3245 	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3246 		BUG_ON(ret == -EEXIST);
3247 		set_balance_control(bctl);
3248 	} else {
3249 		BUG_ON(ret != -EEXIST);
3250 		spin_lock(&fs_info->balance_lock);
3251 		update_balance_args(bctl);
3252 		spin_unlock(&fs_info->balance_lock);
3253 	}
3254 
3255 	atomic_inc(&fs_info->balance_running);
3256 	mutex_unlock(&fs_info->balance_mutex);
3257 
3258 	ret = __btrfs_balance(fs_info);
3259 
3260 	mutex_lock(&fs_info->balance_mutex);
3261 	atomic_dec(&fs_info->balance_running);
3262 
3263 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3264 		fs_info->num_tolerated_disk_barrier_failures =
3265 			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3266 	}
3267 
3268 	if (bargs) {
3269 		memset(bargs, 0, sizeof(*bargs));
3270 		update_ioctl_balance_args(fs_info, 0, bargs);
3271 	}
3272 
3273 	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3274 	    balance_need_close(fs_info)) {
3275 		__cancel_balance(fs_info);
3276 	}
3277 
3278 	wake_up(&fs_info->balance_wait_q);
3279 
3280 	return ret;
3281 out:
3282 	if (bctl->flags & BTRFS_BALANCE_RESUME)
3283 		__cancel_balance(fs_info);
3284 	else {
3285 		kfree(bctl);
3286 		atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3287 	}
3288 	return ret;
3289 }
3290 
3291 static int balance_kthread(void *data)
3292 {
3293 	struct btrfs_fs_info *fs_info = data;
3294 	int ret = 0;
3295 
3296 	mutex_lock(&fs_info->volume_mutex);
3297 	mutex_lock(&fs_info->balance_mutex);
3298 
3299 	if (fs_info->balance_ctl) {
3300 		printk(KERN_INFO "btrfs: continuing balance\n");
3301 		ret = btrfs_balance(fs_info->balance_ctl, NULL);
3302 	}
3303 
3304 	mutex_unlock(&fs_info->balance_mutex);
3305 	mutex_unlock(&fs_info->volume_mutex);
3306 
3307 	return ret;
3308 }
3309 
3310 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3311 {
3312 	struct task_struct *tsk;
3313 
3314 	spin_lock(&fs_info->balance_lock);
3315 	if (!fs_info->balance_ctl) {
3316 		spin_unlock(&fs_info->balance_lock);
3317 		return 0;
3318 	}
3319 	spin_unlock(&fs_info->balance_lock);
3320 
3321 	if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3322 		printk(KERN_INFO "btrfs: force skipping balance\n");
3323 		return 0;
3324 	}
3325 
3326 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3327 	return PTR_ERR_OR_ZERO(tsk);
3328 }
3329 
3330 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3331 {
3332 	struct btrfs_balance_control *bctl;
3333 	struct btrfs_balance_item *item;
3334 	struct btrfs_disk_balance_args disk_bargs;
3335 	struct btrfs_path *path;
3336 	struct extent_buffer *leaf;
3337 	struct btrfs_key key;
3338 	int ret;
3339 
3340 	path = btrfs_alloc_path();
3341 	if (!path)
3342 		return -ENOMEM;
3343 
3344 	key.objectid = BTRFS_BALANCE_OBJECTID;
3345 	key.type = BTRFS_BALANCE_ITEM_KEY;
3346 	key.offset = 0;
3347 
3348 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3349 	if (ret < 0)
3350 		goto out;
3351 	if (ret > 0) { /* ret = -ENOENT; */
3352 		ret = 0;
3353 		goto out;
3354 	}
3355 
3356 	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3357 	if (!bctl) {
3358 		ret = -ENOMEM;
3359 		goto out;
3360 	}
3361 
3362 	leaf = path->nodes[0];
3363 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3364 
3365 	bctl->fs_info = fs_info;
3366 	bctl->flags = btrfs_balance_flags(leaf, item);
3367 	bctl->flags |= BTRFS_BALANCE_RESUME;
3368 
3369 	btrfs_balance_data(leaf, item, &disk_bargs);
3370 	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3371 	btrfs_balance_meta(leaf, item, &disk_bargs);
3372 	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3373 	btrfs_balance_sys(leaf, item, &disk_bargs);
3374 	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3375 
3376 	WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3377 
3378 	mutex_lock(&fs_info->volume_mutex);
3379 	mutex_lock(&fs_info->balance_mutex);
3380 
3381 	set_balance_control(bctl);
3382 
3383 	mutex_unlock(&fs_info->balance_mutex);
3384 	mutex_unlock(&fs_info->volume_mutex);
3385 out:
3386 	btrfs_free_path(path);
3387 	return ret;
3388 }
3389 
3390 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3391 {
3392 	int ret = 0;
3393 
3394 	mutex_lock(&fs_info->balance_mutex);
3395 	if (!fs_info->balance_ctl) {
3396 		mutex_unlock(&fs_info->balance_mutex);
3397 		return -ENOTCONN;
3398 	}
3399 
3400 	if (atomic_read(&fs_info->balance_running)) {
3401 		atomic_inc(&fs_info->balance_pause_req);
3402 		mutex_unlock(&fs_info->balance_mutex);
3403 
3404 		wait_event(fs_info->balance_wait_q,
3405 			   atomic_read(&fs_info->balance_running) == 0);
3406 
3407 		mutex_lock(&fs_info->balance_mutex);
3408 		/* we are good with balance_ctl ripped off from under us */
3409 		BUG_ON(atomic_read(&fs_info->balance_running));
3410 		atomic_dec(&fs_info->balance_pause_req);
3411 	} else {
3412 		ret = -ENOTCONN;
3413 	}
3414 
3415 	mutex_unlock(&fs_info->balance_mutex);
3416 	return ret;
3417 }
3418 
3419 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3420 {
3421 	mutex_lock(&fs_info->balance_mutex);
3422 	if (!fs_info->balance_ctl) {
3423 		mutex_unlock(&fs_info->balance_mutex);
3424 		return -ENOTCONN;
3425 	}
3426 
3427 	atomic_inc(&fs_info->balance_cancel_req);
3428 	/*
3429 	 * if we are running just wait and return, balance item is
3430 	 * deleted in btrfs_balance in this case
3431 	 */
3432 	if (atomic_read(&fs_info->balance_running)) {
3433 		mutex_unlock(&fs_info->balance_mutex);
3434 		wait_event(fs_info->balance_wait_q,
3435 			   atomic_read(&fs_info->balance_running) == 0);
3436 		mutex_lock(&fs_info->balance_mutex);
3437 	} else {
3438 		/* __cancel_balance needs volume_mutex */
3439 		mutex_unlock(&fs_info->balance_mutex);
3440 		mutex_lock(&fs_info->volume_mutex);
3441 		mutex_lock(&fs_info->balance_mutex);
3442 
3443 		if (fs_info->balance_ctl)
3444 			__cancel_balance(fs_info);
3445 
3446 		mutex_unlock(&fs_info->volume_mutex);
3447 	}
3448 
3449 	BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3450 	atomic_dec(&fs_info->balance_cancel_req);
3451 	mutex_unlock(&fs_info->balance_mutex);
3452 	return 0;
3453 }
3454 
3455 static int btrfs_uuid_scan_kthread(void *data)
3456 {
3457 	struct btrfs_fs_info *fs_info = data;
3458 	struct btrfs_root *root = fs_info->tree_root;
3459 	struct btrfs_key key;
3460 	struct btrfs_key max_key;
3461 	struct btrfs_path *path = NULL;
3462 	int ret = 0;
3463 	struct extent_buffer *eb;
3464 	int slot;
3465 	struct btrfs_root_item root_item;
3466 	u32 item_size;
3467 	struct btrfs_trans_handle *trans = NULL;
3468 
3469 	path = btrfs_alloc_path();
3470 	if (!path) {
3471 		ret = -ENOMEM;
3472 		goto out;
3473 	}
3474 
3475 	key.objectid = 0;
3476 	key.type = BTRFS_ROOT_ITEM_KEY;
3477 	key.offset = 0;
3478 
3479 	max_key.objectid = (u64)-1;
3480 	max_key.type = BTRFS_ROOT_ITEM_KEY;
3481 	max_key.offset = (u64)-1;
3482 
3483 	path->keep_locks = 1;
3484 
3485 	while (1) {
3486 		ret = btrfs_search_forward(root, &key, &max_key, path, 0);
3487 		if (ret) {
3488 			if (ret > 0)
3489 				ret = 0;
3490 			break;
3491 		}
3492 
3493 		if (key.type != BTRFS_ROOT_ITEM_KEY ||
3494 		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
3495 		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
3496 		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
3497 			goto skip;
3498 
3499 		eb = path->nodes[0];
3500 		slot = path->slots[0];
3501 		item_size = btrfs_item_size_nr(eb, slot);
3502 		if (item_size < sizeof(root_item))
3503 			goto skip;
3504 
3505 		read_extent_buffer(eb, &root_item,
3506 				   btrfs_item_ptr_offset(eb, slot),
3507 				   (int)sizeof(root_item));
3508 		if (btrfs_root_refs(&root_item) == 0)
3509 			goto skip;
3510 
3511 		if (!btrfs_is_empty_uuid(root_item.uuid) ||
3512 		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
3513 			if (trans)
3514 				goto update_tree;
3515 
3516 			btrfs_release_path(path);
3517 			/*
3518 			 * 1 - subvol uuid item
3519 			 * 1 - received_subvol uuid item
3520 			 */
3521 			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
3522 			if (IS_ERR(trans)) {
3523 				ret = PTR_ERR(trans);
3524 				break;
3525 			}
3526 			continue;
3527 		} else {
3528 			goto skip;
3529 		}
3530 update_tree:
3531 		if (!btrfs_is_empty_uuid(root_item.uuid)) {
3532 			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
3533 						  root_item.uuid,
3534 						  BTRFS_UUID_KEY_SUBVOL,
3535 						  key.objectid);
3536 			if (ret < 0) {
3537 				pr_warn("btrfs: uuid_tree_add failed %d\n",
3538 					ret);
3539 				break;
3540 			}
3541 		}
3542 
3543 		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
3544 			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
3545 						  root_item.received_uuid,
3546 						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
3547 						  key.objectid);
3548 			if (ret < 0) {
3549 				pr_warn("btrfs: uuid_tree_add failed %d\n",
3550 					ret);
3551 				break;
3552 			}
3553 		}
3554 
3555 skip:
3556 		if (trans) {
3557 			ret = btrfs_end_transaction(trans, fs_info->uuid_root);
3558 			trans = NULL;
3559 			if (ret)
3560 				break;
3561 		}
3562 
3563 		btrfs_release_path(path);
3564 		if (key.offset < (u64)-1) {
3565 			key.offset++;
3566 		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
3567 			key.offset = 0;
3568 			key.type = BTRFS_ROOT_ITEM_KEY;
3569 		} else if (key.objectid < (u64)-1) {
3570 			key.offset = 0;
3571 			key.type = BTRFS_ROOT_ITEM_KEY;
3572 			key.objectid++;
3573 		} else {
3574 			break;
3575 		}
3576 		cond_resched();
3577 	}
3578 
3579 out:
3580 	btrfs_free_path(path);
3581 	if (trans && !IS_ERR(trans))
3582 		btrfs_end_transaction(trans, fs_info->uuid_root);
3583 	if (ret)
3584 		pr_warn("btrfs: btrfs_uuid_scan_kthread failed %d\n", ret);
3585 	else
3586 		fs_info->update_uuid_tree_gen = 1;
3587 	up(&fs_info->uuid_tree_rescan_sem);
3588 	return 0;
3589 }
3590 
3591 /*
3592  * Callback for btrfs_uuid_tree_iterate().
3593  * returns:
3594  * 0	check succeeded, the entry is not outdated.
3595  * < 0	if an error occured.
3596  * > 0	if the check failed, which means the caller shall remove the entry.
3597  */
3598 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
3599 				       u8 *uuid, u8 type, u64 subid)
3600 {
3601 	struct btrfs_key key;
3602 	int ret = 0;
3603 	struct btrfs_root *subvol_root;
3604 
3605 	if (type != BTRFS_UUID_KEY_SUBVOL &&
3606 	    type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
3607 		goto out;
3608 
3609 	key.objectid = subid;
3610 	key.type = BTRFS_ROOT_ITEM_KEY;
3611 	key.offset = (u64)-1;
3612 	subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
3613 	if (IS_ERR(subvol_root)) {
3614 		ret = PTR_ERR(subvol_root);
3615 		if (ret == -ENOENT)
3616 			ret = 1;
3617 		goto out;
3618 	}
3619 
3620 	switch (type) {
3621 	case BTRFS_UUID_KEY_SUBVOL:
3622 		if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
3623 			ret = 1;
3624 		break;
3625 	case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
3626 		if (memcmp(uuid, subvol_root->root_item.received_uuid,
3627 			   BTRFS_UUID_SIZE))
3628 			ret = 1;
3629 		break;
3630 	}
3631 
3632 out:
3633 	return ret;
3634 }
3635 
3636 static int btrfs_uuid_rescan_kthread(void *data)
3637 {
3638 	struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
3639 	int ret;
3640 
3641 	/*
3642 	 * 1st step is to iterate through the existing UUID tree and
3643 	 * to delete all entries that contain outdated data.
3644 	 * 2nd step is to add all missing entries to the UUID tree.
3645 	 */
3646 	ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
3647 	if (ret < 0) {
3648 		pr_warn("btrfs: iterating uuid_tree failed %d\n", ret);
3649 		up(&fs_info->uuid_tree_rescan_sem);
3650 		return ret;
3651 	}
3652 	return btrfs_uuid_scan_kthread(data);
3653 }
3654 
3655 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
3656 {
3657 	struct btrfs_trans_handle *trans;
3658 	struct btrfs_root *tree_root = fs_info->tree_root;
3659 	struct btrfs_root *uuid_root;
3660 	struct task_struct *task;
3661 	int ret;
3662 
3663 	/*
3664 	 * 1 - root node
3665 	 * 1 - root item
3666 	 */
3667 	trans = btrfs_start_transaction(tree_root, 2);
3668 	if (IS_ERR(trans))
3669 		return PTR_ERR(trans);
3670 
3671 	uuid_root = btrfs_create_tree(trans, fs_info,
3672 				      BTRFS_UUID_TREE_OBJECTID);
3673 	if (IS_ERR(uuid_root)) {
3674 		btrfs_abort_transaction(trans, tree_root,
3675 					PTR_ERR(uuid_root));
3676 		return PTR_ERR(uuid_root);
3677 	}
3678 
3679 	fs_info->uuid_root = uuid_root;
3680 
3681 	ret = btrfs_commit_transaction(trans, tree_root);
3682 	if (ret)
3683 		return ret;
3684 
3685 	down(&fs_info->uuid_tree_rescan_sem);
3686 	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
3687 	if (IS_ERR(task)) {
3688 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
3689 		pr_warn("btrfs: failed to start uuid_scan task\n");
3690 		up(&fs_info->uuid_tree_rescan_sem);
3691 		return PTR_ERR(task);
3692 	}
3693 
3694 	return 0;
3695 }
3696 
3697 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
3698 {
3699 	struct task_struct *task;
3700 
3701 	down(&fs_info->uuid_tree_rescan_sem);
3702 	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
3703 	if (IS_ERR(task)) {
3704 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
3705 		pr_warn("btrfs: failed to start uuid_rescan task\n");
3706 		up(&fs_info->uuid_tree_rescan_sem);
3707 		return PTR_ERR(task);
3708 	}
3709 
3710 	return 0;
3711 }
3712 
3713 /*
3714  * shrinking a device means finding all of the device extents past
3715  * the new size, and then following the back refs to the chunks.
3716  * The chunk relocation code actually frees the device extent
3717  */
3718 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3719 {
3720 	struct btrfs_trans_handle *trans;
3721 	struct btrfs_root *root = device->dev_root;
3722 	struct btrfs_dev_extent *dev_extent = NULL;
3723 	struct btrfs_path *path;
3724 	u64 length;
3725 	u64 chunk_tree;
3726 	u64 chunk_objectid;
3727 	u64 chunk_offset;
3728 	int ret;
3729 	int slot;
3730 	int failed = 0;
3731 	bool retried = false;
3732 	struct extent_buffer *l;
3733 	struct btrfs_key key;
3734 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3735 	u64 old_total = btrfs_super_total_bytes(super_copy);
3736 	u64 old_size = device->total_bytes;
3737 	u64 diff = device->total_bytes - new_size;
3738 
3739 	if (device->is_tgtdev_for_dev_replace)
3740 		return -EINVAL;
3741 
3742 	path = btrfs_alloc_path();
3743 	if (!path)
3744 		return -ENOMEM;
3745 
3746 	path->reada = 2;
3747 
3748 	lock_chunks(root);
3749 
3750 	device->total_bytes = new_size;
3751 	if (device->writeable) {
3752 		device->fs_devices->total_rw_bytes -= diff;
3753 		spin_lock(&root->fs_info->free_chunk_lock);
3754 		root->fs_info->free_chunk_space -= diff;
3755 		spin_unlock(&root->fs_info->free_chunk_lock);
3756 	}
3757 	unlock_chunks(root);
3758 
3759 again:
3760 	key.objectid = device->devid;
3761 	key.offset = (u64)-1;
3762 	key.type = BTRFS_DEV_EXTENT_KEY;
3763 
3764 	do {
3765 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3766 		if (ret < 0)
3767 			goto done;
3768 
3769 		ret = btrfs_previous_item(root, path, 0, key.type);
3770 		if (ret < 0)
3771 			goto done;
3772 		if (ret) {
3773 			ret = 0;
3774 			btrfs_release_path(path);
3775 			break;
3776 		}
3777 
3778 		l = path->nodes[0];
3779 		slot = path->slots[0];
3780 		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3781 
3782 		if (key.objectid != device->devid) {
3783 			btrfs_release_path(path);
3784 			break;
3785 		}
3786 
3787 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3788 		length = btrfs_dev_extent_length(l, dev_extent);
3789 
3790 		if (key.offset + length <= new_size) {
3791 			btrfs_release_path(path);
3792 			break;
3793 		}
3794 
3795 		chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3796 		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3797 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3798 		btrfs_release_path(path);
3799 
3800 		ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3801 					   chunk_offset);
3802 		if (ret && ret != -ENOSPC)
3803 			goto done;
3804 		if (ret == -ENOSPC)
3805 			failed++;
3806 	} while (key.offset-- > 0);
3807 
3808 	if (failed && !retried) {
3809 		failed = 0;
3810 		retried = true;
3811 		goto again;
3812 	} else if (failed && retried) {
3813 		ret = -ENOSPC;
3814 		lock_chunks(root);
3815 
3816 		device->total_bytes = old_size;
3817 		if (device->writeable)
3818 			device->fs_devices->total_rw_bytes += diff;
3819 		spin_lock(&root->fs_info->free_chunk_lock);
3820 		root->fs_info->free_chunk_space += diff;
3821 		spin_unlock(&root->fs_info->free_chunk_lock);
3822 		unlock_chunks(root);
3823 		goto done;
3824 	}
3825 
3826 	/* Shrinking succeeded, else we would be at "done". */
3827 	trans = btrfs_start_transaction(root, 0);
3828 	if (IS_ERR(trans)) {
3829 		ret = PTR_ERR(trans);
3830 		goto done;
3831 	}
3832 
3833 	lock_chunks(root);
3834 
3835 	device->disk_total_bytes = new_size;
3836 	/* Now btrfs_update_device() will change the on-disk size. */
3837 	ret = btrfs_update_device(trans, device);
3838 	if (ret) {
3839 		unlock_chunks(root);
3840 		btrfs_end_transaction(trans, root);
3841 		goto done;
3842 	}
3843 	WARN_ON(diff > old_total);
3844 	btrfs_set_super_total_bytes(super_copy, old_total - diff);
3845 	unlock_chunks(root);
3846 	btrfs_end_transaction(trans, root);
3847 done:
3848 	btrfs_free_path(path);
3849 	return ret;
3850 }
3851 
3852 static int btrfs_add_system_chunk(struct btrfs_root *root,
3853 			   struct btrfs_key *key,
3854 			   struct btrfs_chunk *chunk, int item_size)
3855 {
3856 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3857 	struct btrfs_disk_key disk_key;
3858 	u32 array_size;
3859 	u8 *ptr;
3860 
3861 	array_size = btrfs_super_sys_array_size(super_copy);
3862 	if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3863 		return -EFBIG;
3864 
3865 	ptr = super_copy->sys_chunk_array + array_size;
3866 	btrfs_cpu_key_to_disk(&disk_key, key);
3867 	memcpy(ptr, &disk_key, sizeof(disk_key));
3868 	ptr += sizeof(disk_key);
3869 	memcpy(ptr, chunk, item_size);
3870 	item_size += sizeof(disk_key);
3871 	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3872 	return 0;
3873 }
3874 
3875 /*
3876  * sort the devices in descending order by max_avail, total_avail
3877  */
3878 static int btrfs_cmp_device_info(const void *a, const void *b)
3879 {
3880 	const struct btrfs_device_info *di_a = a;
3881 	const struct btrfs_device_info *di_b = b;
3882 
3883 	if (di_a->max_avail > di_b->max_avail)
3884 		return -1;
3885 	if (di_a->max_avail < di_b->max_avail)
3886 		return 1;
3887 	if (di_a->total_avail > di_b->total_avail)
3888 		return -1;
3889 	if (di_a->total_avail < di_b->total_avail)
3890 		return 1;
3891 	return 0;
3892 }
3893 
3894 static struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
3895 	[BTRFS_RAID_RAID10] = {
3896 		.sub_stripes	= 2,
3897 		.dev_stripes	= 1,
3898 		.devs_max	= 0,	/* 0 == as many as possible */
3899 		.devs_min	= 4,
3900 		.devs_increment	= 2,
3901 		.ncopies	= 2,
3902 	},
3903 	[BTRFS_RAID_RAID1] = {
3904 		.sub_stripes	= 1,
3905 		.dev_stripes	= 1,
3906 		.devs_max	= 2,
3907 		.devs_min	= 2,
3908 		.devs_increment	= 2,
3909 		.ncopies	= 2,
3910 	},
3911 	[BTRFS_RAID_DUP] = {
3912 		.sub_stripes	= 1,
3913 		.dev_stripes	= 2,
3914 		.devs_max	= 1,
3915 		.devs_min	= 1,
3916 		.devs_increment	= 1,
3917 		.ncopies	= 2,
3918 	},
3919 	[BTRFS_RAID_RAID0] = {
3920 		.sub_stripes	= 1,
3921 		.dev_stripes	= 1,
3922 		.devs_max	= 0,
3923 		.devs_min	= 2,
3924 		.devs_increment	= 1,
3925 		.ncopies	= 1,
3926 	},
3927 	[BTRFS_RAID_SINGLE] = {
3928 		.sub_stripes	= 1,
3929 		.dev_stripes	= 1,
3930 		.devs_max	= 1,
3931 		.devs_min	= 1,
3932 		.devs_increment	= 1,
3933 		.ncopies	= 1,
3934 	},
3935 	[BTRFS_RAID_RAID5] = {
3936 		.sub_stripes	= 1,
3937 		.dev_stripes	= 1,
3938 		.devs_max	= 0,
3939 		.devs_min	= 2,
3940 		.devs_increment	= 1,
3941 		.ncopies	= 2,
3942 	},
3943 	[BTRFS_RAID_RAID6] = {
3944 		.sub_stripes	= 1,
3945 		.dev_stripes	= 1,
3946 		.devs_max	= 0,
3947 		.devs_min	= 3,
3948 		.devs_increment	= 1,
3949 		.ncopies	= 3,
3950 	},
3951 };
3952 
3953 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
3954 {
3955 	/* TODO allow them to set a preferred stripe size */
3956 	return 64 * 1024;
3957 }
3958 
3959 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
3960 {
3961 	if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
3962 		return;
3963 
3964 	btrfs_set_fs_incompat(info, RAID56);
3965 }
3966 
3967 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3968 			       struct btrfs_root *extent_root, u64 start,
3969 			       u64 type)
3970 {
3971 	struct btrfs_fs_info *info = extent_root->fs_info;
3972 	struct btrfs_fs_devices *fs_devices = info->fs_devices;
3973 	struct list_head *cur;
3974 	struct map_lookup *map = NULL;
3975 	struct extent_map_tree *em_tree;
3976 	struct extent_map *em;
3977 	struct btrfs_device_info *devices_info = NULL;
3978 	u64 total_avail;
3979 	int num_stripes;	/* total number of stripes to allocate */
3980 	int data_stripes;	/* number of stripes that count for
3981 				   block group size */
3982 	int sub_stripes;	/* sub_stripes info for map */
3983 	int dev_stripes;	/* stripes per dev */
3984 	int devs_max;		/* max devs to use */
3985 	int devs_min;		/* min devs needed */
3986 	int devs_increment;	/* ndevs has to be a multiple of this */
3987 	int ncopies;		/* how many copies to data has */
3988 	int ret;
3989 	u64 max_stripe_size;
3990 	u64 max_chunk_size;
3991 	u64 stripe_size;
3992 	u64 num_bytes;
3993 	u64 raid_stripe_len = BTRFS_STRIPE_LEN;
3994 	int ndevs;
3995 	int i;
3996 	int j;
3997 	int index;
3998 
3999 	BUG_ON(!alloc_profile_is_valid(type, 0));
4000 
4001 	if (list_empty(&fs_devices->alloc_list))
4002 		return -ENOSPC;
4003 
4004 	index = __get_raid_index(type);
4005 
4006 	sub_stripes = btrfs_raid_array[index].sub_stripes;
4007 	dev_stripes = btrfs_raid_array[index].dev_stripes;
4008 	devs_max = btrfs_raid_array[index].devs_max;
4009 	devs_min = btrfs_raid_array[index].devs_min;
4010 	devs_increment = btrfs_raid_array[index].devs_increment;
4011 	ncopies = btrfs_raid_array[index].ncopies;
4012 
4013 	if (type & BTRFS_BLOCK_GROUP_DATA) {
4014 		max_stripe_size = 1024 * 1024 * 1024;
4015 		max_chunk_size = 10 * max_stripe_size;
4016 	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4017 		/* for larger filesystems, use larger metadata chunks */
4018 		if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
4019 			max_stripe_size = 1024 * 1024 * 1024;
4020 		else
4021 			max_stripe_size = 256 * 1024 * 1024;
4022 		max_chunk_size = max_stripe_size;
4023 	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4024 		max_stripe_size = 32 * 1024 * 1024;
4025 		max_chunk_size = 2 * max_stripe_size;
4026 	} else {
4027 		printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
4028 		       type);
4029 		BUG_ON(1);
4030 	}
4031 
4032 	/* we don't want a chunk larger than 10% of writeable space */
4033 	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4034 			     max_chunk_size);
4035 
4036 	devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
4037 			       GFP_NOFS);
4038 	if (!devices_info)
4039 		return -ENOMEM;
4040 
4041 	cur = fs_devices->alloc_list.next;
4042 
4043 	/*
4044 	 * in the first pass through the devices list, we gather information
4045 	 * about the available holes on each device.
4046 	 */
4047 	ndevs = 0;
4048 	while (cur != &fs_devices->alloc_list) {
4049 		struct btrfs_device *device;
4050 		u64 max_avail;
4051 		u64 dev_offset;
4052 
4053 		device = list_entry(cur, struct btrfs_device, dev_alloc_list);
4054 
4055 		cur = cur->next;
4056 
4057 		if (!device->writeable) {
4058 			WARN(1, KERN_ERR
4059 			       "btrfs: read-only device in alloc_list\n");
4060 			continue;
4061 		}
4062 
4063 		if (!device->in_fs_metadata ||
4064 		    device->is_tgtdev_for_dev_replace)
4065 			continue;
4066 
4067 		if (device->total_bytes > device->bytes_used)
4068 			total_avail = device->total_bytes - device->bytes_used;
4069 		else
4070 			total_avail = 0;
4071 
4072 		/* If there is no space on this device, skip it. */
4073 		if (total_avail == 0)
4074 			continue;
4075 
4076 		ret = find_free_dev_extent(trans, device,
4077 					   max_stripe_size * dev_stripes,
4078 					   &dev_offset, &max_avail);
4079 		if (ret && ret != -ENOSPC)
4080 			goto error;
4081 
4082 		if (ret == 0)
4083 			max_avail = max_stripe_size * dev_stripes;
4084 
4085 		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
4086 			continue;
4087 
4088 		if (ndevs == fs_devices->rw_devices) {
4089 			WARN(1, "%s: found more than %llu devices\n",
4090 			     __func__, fs_devices->rw_devices);
4091 			break;
4092 		}
4093 		devices_info[ndevs].dev_offset = dev_offset;
4094 		devices_info[ndevs].max_avail = max_avail;
4095 		devices_info[ndevs].total_avail = total_avail;
4096 		devices_info[ndevs].dev = device;
4097 		++ndevs;
4098 	}
4099 
4100 	/*
4101 	 * now sort the devices by hole size / available space
4102 	 */
4103 	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4104 	     btrfs_cmp_device_info, NULL);
4105 
4106 	/* round down to number of usable stripes */
4107 	ndevs -= ndevs % devs_increment;
4108 
4109 	if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
4110 		ret = -ENOSPC;
4111 		goto error;
4112 	}
4113 
4114 	if (devs_max && ndevs > devs_max)
4115 		ndevs = devs_max;
4116 	/*
4117 	 * the primary goal is to maximize the number of stripes, so use as many
4118 	 * devices as possible, even if the stripes are not maximum sized.
4119 	 */
4120 	stripe_size = devices_info[ndevs-1].max_avail;
4121 	num_stripes = ndevs * dev_stripes;
4122 
4123 	/*
4124 	 * this will have to be fixed for RAID1 and RAID10 over
4125 	 * more drives
4126 	 */
4127 	data_stripes = num_stripes / ncopies;
4128 
4129 	if (type & BTRFS_BLOCK_GROUP_RAID5) {
4130 		raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
4131 				 btrfs_super_stripesize(info->super_copy));
4132 		data_stripes = num_stripes - 1;
4133 	}
4134 	if (type & BTRFS_BLOCK_GROUP_RAID6) {
4135 		raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
4136 				 btrfs_super_stripesize(info->super_copy));
4137 		data_stripes = num_stripes - 2;
4138 	}
4139 
4140 	/*
4141 	 * Use the number of data stripes to figure out how big this chunk
4142 	 * is really going to be in terms of logical address space,
4143 	 * and compare that answer with the max chunk size
4144 	 */
4145 	if (stripe_size * data_stripes > max_chunk_size) {
4146 		u64 mask = (1ULL << 24) - 1;
4147 		stripe_size = max_chunk_size;
4148 		do_div(stripe_size, data_stripes);
4149 
4150 		/* bump the answer up to a 16MB boundary */
4151 		stripe_size = (stripe_size + mask) & ~mask;
4152 
4153 		/* but don't go higher than the limits we found
4154 		 * while searching for free extents
4155 		 */
4156 		if (stripe_size > devices_info[ndevs-1].max_avail)
4157 			stripe_size = devices_info[ndevs-1].max_avail;
4158 	}
4159 
4160 	do_div(stripe_size, dev_stripes);
4161 
4162 	/* align to BTRFS_STRIPE_LEN */
4163 	do_div(stripe_size, raid_stripe_len);
4164 	stripe_size *= raid_stripe_len;
4165 
4166 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4167 	if (!map) {
4168 		ret = -ENOMEM;
4169 		goto error;
4170 	}
4171 	map->num_stripes = num_stripes;
4172 
4173 	for (i = 0; i < ndevs; ++i) {
4174 		for (j = 0; j < dev_stripes; ++j) {
4175 			int s = i * dev_stripes + j;
4176 			map->stripes[s].dev = devices_info[i].dev;
4177 			map->stripes[s].physical = devices_info[i].dev_offset +
4178 						   j * stripe_size;
4179 		}
4180 	}
4181 	map->sector_size = extent_root->sectorsize;
4182 	map->stripe_len = raid_stripe_len;
4183 	map->io_align = raid_stripe_len;
4184 	map->io_width = raid_stripe_len;
4185 	map->type = type;
4186 	map->sub_stripes = sub_stripes;
4187 
4188 	num_bytes = stripe_size * data_stripes;
4189 
4190 	trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
4191 
4192 	em = alloc_extent_map();
4193 	if (!em) {
4194 		ret = -ENOMEM;
4195 		goto error;
4196 	}
4197 	em->bdev = (struct block_device *)map;
4198 	em->start = start;
4199 	em->len = num_bytes;
4200 	em->block_start = 0;
4201 	em->block_len = em->len;
4202 	em->orig_block_len = stripe_size;
4203 
4204 	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4205 	write_lock(&em_tree->lock);
4206 	ret = add_extent_mapping(em_tree, em, 0);
4207 	if (!ret) {
4208 		list_add_tail(&em->list, &trans->transaction->pending_chunks);
4209 		atomic_inc(&em->refs);
4210 	}
4211 	write_unlock(&em_tree->lock);
4212 	if (ret) {
4213 		free_extent_map(em);
4214 		goto error;
4215 	}
4216 
4217 	ret = btrfs_make_block_group(trans, extent_root, 0, type,
4218 				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4219 				     start, num_bytes);
4220 	if (ret)
4221 		goto error_del_extent;
4222 
4223 	free_extent_map(em);
4224 	check_raid56_incompat_flag(extent_root->fs_info, type);
4225 
4226 	kfree(devices_info);
4227 	return 0;
4228 
4229 error_del_extent:
4230 	write_lock(&em_tree->lock);
4231 	remove_extent_mapping(em_tree, em);
4232 	write_unlock(&em_tree->lock);
4233 
4234 	/* One for our allocation */
4235 	free_extent_map(em);
4236 	/* One for the tree reference */
4237 	free_extent_map(em);
4238 error:
4239 	kfree(map);
4240 	kfree(devices_info);
4241 	return ret;
4242 }
4243 
4244 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4245 				struct btrfs_root *extent_root,
4246 				u64 chunk_offset, u64 chunk_size)
4247 {
4248 	struct btrfs_key key;
4249 	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
4250 	struct btrfs_device *device;
4251 	struct btrfs_chunk *chunk;
4252 	struct btrfs_stripe *stripe;
4253 	struct extent_map_tree *em_tree;
4254 	struct extent_map *em;
4255 	struct map_lookup *map;
4256 	size_t item_size;
4257 	u64 dev_offset;
4258 	u64 stripe_size;
4259 	int i = 0;
4260 	int ret;
4261 
4262 	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4263 	read_lock(&em_tree->lock);
4264 	em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
4265 	read_unlock(&em_tree->lock);
4266 
4267 	if (!em) {
4268 		btrfs_crit(extent_root->fs_info, "unable to find logical "
4269 			   "%Lu len %Lu", chunk_offset, chunk_size);
4270 		return -EINVAL;
4271 	}
4272 
4273 	if (em->start != chunk_offset || em->len != chunk_size) {
4274 		btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
4275 			  " %Lu-%Lu, found %Lu-%Lu\n", chunk_offset,
4276 			  chunk_size, em->start, em->len);
4277 		free_extent_map(em);
4278 		return -EINVAL;
4279 	}
4280 
4281 	map = (struct map_lookup *)em->bdev;
4282 	item_size = btrfs_chunk_item_size(map->num_stripes);
4283 	stripe_size = em->orig_block_len;
4284 
4285 	chunk = kzalloc(item_size, GFP_NOFS);
4286 	if (!chunk) {
4287 		ret = -ENOMEM;
4288 		goto out;
4289 	}
4290 
4291 	for (i = 0; i < map->num_stripes; i++) {
4292 		device = map->stripes[i].dev;
4293 		dev_offset = map->stripes[i].physical;
4294 
4295 		device->bytes_used += stripe_size;
4296 		ret = btrfs_update_device(trans, device);
4297 		if (ret)
4298 			goto out;
4299 		ret = btrfs_alloc_dev_extent(trans, device,
4300 					     chunk_root->root_key.objectid,
4301 					     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4302 					     chunk_offset, dev_offset,
4303 					     stripe_size);
4304 		if (ret)
4305 			goto out;
4306 	}
4307 
4308 	spin_lock(&extent_root->fs_info->free_chunk_lock);
4309 	extent_root->fs_info->free_chunk_space -= (stripe_size *
4310 						   map->num_stripes);
4311 	spin_unlock(&extent_root->fs_info->free_chunk_lock);
4312 
4313 	stripe = &chunk->stripe;
4314 	for (i = 0; i < map->num_stripes; i++) {
4315 		device = map->stripes[i].dev;
4316 		dev_offset = map->stripes[i].physical;
4317 
4318 		btrfs_set_stack_stripe_devid(stripe, device->devid);
4319 		btrfs_set_stack_stripe_offset(stripe, dev_offset);
4320 		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4321 		stripe++;
4322 	}
4323 
4324 	btrfs_set_stack_chunk_length(chunk, chunk_size);
4325 	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4326 	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4327 	btrfs_set_stack_chunk_type(chunk, map->type);
4328 	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4329 	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4330 	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4331 	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
4332 	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4333 
4334 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4335 	key.type = BTRFS_CHUNK_ITEM_KEY;
4336 	key.offset = chunk_offset;
4337 
4338 	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4339 	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4340 		/*
4341 		 * TODO: Cleanup of inserted chunk root in case of
4342 		 * failure.
4343 		 */
4344 		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
4345 					     item_size);
4346 	}
4347 
4348 out:
4349 	kfree(chunk);
4350 	free_extent_map(em);
4351 	return ret;
4352 }
4353 
4354 /*
4355  * Chunk allocation falls into two parts. The first part does works
4356  * that make the new allocated chunk useable, but not do any operation
4357  * that modifies the chunk tree. The second part does the works that
4358  * require modifying the chunk tree. This division is important for the
4359  * bootstrap process of adding storage to a seed btrfs.
4360  */
4361 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4362 		      struct btrfs_root *extent_root, u64 type)
4363 {
4364 	u64 chunk_offset;
4365 
4366 	chunk_offset = find_next_chunk(extent_root->fs_info);
4367 	return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
4368 }
4369 
4370 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
4371 					 struct btrfs_root *root,
4372 					 struct btrfs_device *device)
4373 {
4374 	u64 chunk_offset;
4375 	u64 sys_chunk_offset;
4376 	u64 alloc_profile;
4377 	struct btrfs_fs_info *fs_info = root->fs_info;
4378 	struct btrfs_root *extent_root = fs_info->extent_root;
4379 	int ret;
4380 
4381 	chunk_offset = find_next_chunk(fs_info);
4382 	alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4383 	ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
4384 				  alloc_profile);
4385 	if (ret)
4386 		return ret;
4387 
4388 	sys_chunk_offset = find_next_chunk(root->fs_info);
4389 	alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4390 	ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
4391 				  alloc_profile);
4392 	if (ret) {
4393 		btrfs_abort_transaction(trans, root, ret);
4394 		goto out;
4395 	}
4396 
4397 	ret = btrfs_add_device(trans, fs_info->chunk_root, device);
4398 	if (ret)
4399 		btrfs_abort_transaction(trans, root, ret);
4400 out:
4401 	return ret;
4402 }
4403 
4404 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
4405 {
4406 	struct extent_map *em;
4407 	struct map_lookup *map;
4408 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4409 	int readonly = 0;
4410 	int i;
4411 
4412 	read_lock(&map_tree->map_tree.lock);
4413 	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
4414 	read_unlock(&map_tree->map_tree.lock);
4415 	if (!em)
4416 		return 1;
4417 
4418 	if (btrfs_test_opt(root, DEGRADED)) {
4419 		free_extent_map(em);
4420 		return 0;
4421 	}
4422 
4423 	map = (struct map_lookup *)em->bdev;
4424 	for (i = 0; i < map->num_stripes; i++) {
4425 		if (!map->stripes[i].dev->writeable) {
4426 			readonly = 1;
4427 			break;
4428 		}
4429 	}
4430 	free_extent_map(em);
4431 	return readonly;
4432 }
4433 
4434 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
4435 {
4436 	extent_map_tree_init(&tree->map_tree);
4437 }
4438 
4439 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
4440 {
4441 	struct extent_map *em;
4442 
4443 	while (1) {
4444 		write_lock(&tree->map_tree.lock);
4445 		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
4446 		if (em)
4447 			remove_extent_mapping(&tree->map_tree, em);
4448 		write_unlock(&tree->map_tree.lock);
4449 		if (!em)
4450 			break;
4451 		kfree(em->bdev);
4452 		/* once for us */
4453 		free_extent_map(em);
4454 		/* once for the tree */
4455 		free_extent_map(em);
4456 	}
4457 }
4458 
4459 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
4460 {
4461 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4462 	struct extent_map *em;
4463 	struct map_lookup *map;
4464 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4465 	int ret;
4466 
4467 	read_lock(&em_tree->lock);
4468 	em = lookup_extent_mapping(em_tree, logical, len);
4469 	read_unlock(&em_tree->lock);
4470 
4471 	/*
4472 	 * We could return errors for these cases, but that could get ugly and
4473 	 * we'd probably do the same thing which is just not do anything else
4474 	 * and exit, so return 1 so the callers don't try to use other copies.
4475 	 */
4476 	if (!em) {
4477 		btrfs_crit(fs_info, "No mapping for %Lu-%Lu\n", logical,
4478 			    logical+len);
4479 		return 1;
4480 	}
4481 
4482 	if (em->start > logical || em->start + em->len < logical) {
4483 		btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
4484 			    "%Lu-%Lu\n", logical, logical+len, em->start,
4485 			    em->start + em->len);
4486 		return 1;
4487 	}
4488 
4489 	map = (struct map_lookup *)em->bdev;
4490 	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
4491 		ret = map->num_stripes;
4492 	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4493 		ret = map->sub_stripes;
4494 	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
4495 		ret = 2;
4496 	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4497 		ret = 3;
4498 	else
4499 		ret = 1;
4500 	free_extent_map(em);
4501 
4502 	btrfs_dev_replace_lock(&fs_info->dev_replace);
4503 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
4504 		ret++;
4505 	btrfs_dev_replace_unlock(&fs_info->dev_replace);
4506 
4507 	return ret;
4508 }
4509 
4510 unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
4511 				    struct btrfs_mapping_tree *map_tree,
4512 				    u64 logical)
4513 {
4514 	struct extent_map *em;
4515 	struct map_lookup *map;
4516 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4517 	unsigned long len = root->sectorsize;
4518 
4519 	read_lock(&em_tree->lock);
4520 	em = lookup_extent_mapping(em_tree, logical, len);
4521 	read_unlock(&em_tree->lock);
4522 	BUG_ON(!em);
4523 
4524 	BUG_ON(em->start > logical || em->start + em->len < logical);
4525 	map = (struct map_lookup *)em->bdev;
4526 	if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4527 			 BTRFS_BLOCK_GROUP_RAID6)) {
4528 		len = map->stripe_len * nr_data_stripes(map);
4529 	}
4530 	free_extent_map(em);
4531 	return len;
4532 }
4533 
4534 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
4535 			   u64 logical, u64 len, int mirror_num)
4536 {
4537 	struct extent_map *em;
4538 	struct map_lookup *map;
4539 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4540 	int ret = 0;
4541 
4542 	read_lock(&em_tree->lock);
4543 	em = lookup_extent_mapping(em_tree, logical, len);
4544 	read_unlock(&em_tree->lock);
4545 	BUG_ON(!em);
4546 
4547 	BUG_ON(em->start > logical || em->start + em->len < logical);
4548 	map = (struct map_lookup *)em->bdev;
4549 	if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4550 			 BTRFS_BLOCK_GROUP_RAID6))
4551 		ret = 1;
4552 	free_extent_map(em);
4553 	return ret;
4554 }
4555 
4556 static int find_live_mirror(struct btrfs_fs_info *fs_info,
4557 			    struct map_lookup *map, int first, int num,
4558 			    int optimal, int dev_replace_is_ongoing)
4559 {
4560 	int i;
4561 	int tolerance;
4562 	struct btrfs_device *srcdev;
4563 
4564 	if (dev_replace_is_ongoing &&
4565 	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
4566 	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
4567 		srcdev = fs_info->dev_replace.srcdev;
4568 	else
4569 		srcdev = NULL;
4570 
4571 	/*
4572 	 * try to avoid the drive that is the source drive for a
4573 	 * dev-replace procedure, only choose it if no other non-missing
4574 	 * mirror is available
4575 	 */
4576 	for (tolerance = 0; tolerance < 2; tolerance++) {
4577 		if (map->stripes[optimal].dev->bdev &&
4578 		    (tolerance || map->stripes[optimal].dev != srcdev))
4579 			return optimal;
4580 		for (i = first; i < first + num; i++) {
4581 			if (map->stripes[i].dev->bdev &&
4582 			    (tolerance || map->stripes[i].dev != srcdev))
4583 				return i;
4584 		}
4585 	}
4586 
4587 	/* we couldn't find one that doesn't fail.  Just return something
4588 	 * and the io error handling code will clean up eventually
4589 	 */
4590 	return optimal;
4591 }
4592 
4593 static inline int parity_smaller(u64 a, u64 b)
4594 {
4595 	return a > b;
4596 }
4597 
4598 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
4599 static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
4600 {
4601 	struct btrfs_bio_stripe s;
4602 	int i;
4603 	u64 l;
4604 	int again = 1;
4605 
4606 	while (again) {
4607 		again = 0;
4608 		for (i = 0; i < bbio->num_stripes - 1; i++) {
4609 			if (parity_smaller(raid_map[i], raid_map[i+1])) {
4610 				s = bbio->stripes[i];
4611 				l = raid_map[i];
4612 				bbio->stripes[i] = bbio->stripes[i+1];
4613 				raid_map[i] = raid_map[i+1];
4614 				bbio->stripes[i+1] = s;
4615 				raid_map[i+1] = l;
4616 				again = 1;
4617 			}
4618 		}
4619 	}
4620 }
4621 
4622 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4623 			     u64 logical, u64 *length,
4624 			     struct btrfs_bio **bbio_ret,
4625 			     int mirror_num, u64 **raid_map_ret)
4626 {
4627 	struct extent_map *em;
4628 	struct map_lookup *map;
4629 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4630 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4631 	u64 offset;
4632 	u64 stripe_offset;
4633 	u64 stripe_end_offset;
4634 	u64 stripe_nr;
4635 	u64 stripe_nr_orig;
4636 	u64 stripe_nr_end;
4637 	u64 stripe_len;
4638 	u64 *raid_map = NULL;
4639 	int stripe_index;
4640 	int i;
4641 	int ret = 0;
4642 	int num_stripes;
4643 	int max_errors = 0;
4644 	struct btrfs_bio *bbio = NULL;
4645 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
4646 	int dev_replace_is_ongoing = 0;
4647 	int num_alloc_stripes;
4648 	int patch_the_first_stripe_for_dev_replace = 0;
4649 	u64 physical_to_patch_in_first_stripe = 0;
4650 	u64 raid56_full_stripe_start = (u64)-1;
4651 
4652 	read_lock(&em_tree->lock);
4653 	em = lookup_extent_mapping(em_tree, logical, *length);
4654 	read_unlock(&em_tree->lock);
4655 
4656 	if (!em) {
4657 		btrfs_crit(fs_info, "unable to find logical %llu len %llu",
4658 			logical, *length);
4659 		return -EINVAL;
4660 	}
4661 
4662 	if (em->start > logical || em->start + em->len < logical) {
4663 		btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
4664 			   "found %Lu-%Lu\n", logical, em->start,
4665 			   em->start + em->len);
4666 		return -EINVAL;
4667 	}
4668 
4669 	map = (struct map_lookup *)em->bdev;
4670 	offset = logical - em->start;
4671 
4672 	stripe_len = map->stripe_len;
4673 	stripe_nr = offset;
4674 	/*
4675 	 * stripe_nr counts the total number of stripes we have to stride
4676 	 * to get to this block
4677 	 */
4678 	do_div(stripe_nr, stripe_len);
4679 
4680 	stripe_offset = stripe_nr * stripe_len;
4681 	BUG_ON(offset < stripe_offset);
4682 
4683 	/* stripe_offset is the offset of this block in its stripe*/
4684 	stripe_offset = offset - stripe_offset;
4685 
4686 	/* if we're here for raid56, we need to know the stripe aligned start */
4687 	if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4688 		unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
4689 		raid56_full_stripe_start = offset;
4690 
4691 		/* allow a write of a full stripe, but make sure we don't
4692 		 * allow straddling of stripes
4693 		 */
4694 		do_div(raid56_full_stripe_start, full_stripe_len);
4695 		raid56_full_stripe_start *= full_stripe_len;
4696 	}
4697 
4698 	if (rw & REQ_DISCARD) {
4699 		/* we don't discard raid56 yet */
4700 		if (map->type &
4701 		    (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4702 			ret = -EOPNOTSUPP;
4703 			goto out;
4704 		}
4705 		*length = min_t(u64, em->len - offset, *length);
4706 	} else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
4707 		u64 max_len;
4708 		/* For writes to RAID[56], allow a full stripeset across all disks.
4709 		   For other RAID types and for RAID[56] reads, just allow a single
4710 		   stripe (on a single disk). */
4711 		if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
4712 		    (rw & REQ_WRITE)) {
4713 			max_len = stripe_len * nr_data_stripes(map) -
4714 				(offset - raid56_full_stripe_start);
4715 		} else {
4716 			/* we limit the length of each bio to what fits in a stripe */
4717 			max_len = stripe_len - stripe_offset;
4718 		}
4719 		*length = min_t(u64, em->len - offset, max_len);
4720 	} else {
4721 		*length = em->len - offset;
4722 	}
4723 
4724 	/* This is for when we're called from btrfs_merge_bio_hook() and all
4725 	   it cares about is the length */
4726 	if (!bbio_ret)
4727 		goto out;
4728 
4729 	btrfs_dev_replace_lock(dev_replace);
4730 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
4731 	if (!dev_replace_is_ongoing)
4732 		btrfs_dev_replace_unlock(dev_replace);
4733 
4734 	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
4735 	    !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
4736 	    dev_replace->tgtdev != NULL) {
4737 		/*
4738 		 * in dev-replace case, for repair case (that's the only
4739 		 * case where the mirror is selected explicitly when
4740 		 * calling btrfs_map_block), blocks left of the left cursor
4741 		 * can also be read from the target drive.
4742 		 * For REQ_GET_READ_MIRRORS, the target drive is added as
4743 		 * the last one to the array of stripes. For READ, it also
4744 		 * needs to be supported using the same mirror number.
4745 		 * If the requested block is not left of the left cursor,
4746 		 * EIO is returned. This can happen because btrfs_num_copies()
4747 		 * returns one more in the dev-replace case.
4748 		 */
4749 		u64 tmp_length = *length;
4750 		struct btrfs_bio *tmp_bbio = NULL;
4751 		int tmp_num_stripes;
4752 		u64 srcdev_devid = dev_replace->srcdev->devid;
4753 		int index_srcdev = 0;
4754 		int found = 0;
4755 		u64 physical_of_found = 0;
4756 
4757 		ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
4758 			     logical, &tmp_length, &tmp_bbio, 0, NULL);
4759 		if (ret) {
4760 			WARN_ON(tmp_bbio != NULL);
4761 			goto out;
4762 		}
4763 
4764 		tmp_num_stripes = tmp_bbio->num_stripes;
4765 		if (mirror_num > tmp_num_stripes) {
4766 			/*
4767 			 * REQ_GET_READ_MIRRORS does not contain this
4768 			 * mirror, that means that the requested area
4769 			 * is not left of the left cursor
4770 			 */
4771 			ret = -EIO;
4772 			kfree(tmp_bbio);
4773 			goto out;
4774 		}
4775 
4776 		/*
4777 		 * process the rest of the function using the mirror_num
4778 		 * of the source drive. Therefore look it up first.
4779 		 * At the end, patch the device pointer to the one of the
4780 		 * target drive.
4781 		 */
4782 		for (i = 0; i < tmp_num_stripes; i++) {
4783 			if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) {
4784 				/*
4785 				 * In case of DUP, in order to keep it
4786 				 * simple, only add the mirror with the
4787 				 * lowest physical address
4788 				 */
4789 				if (found &&
4790 				    physical_of_found <=
4791 				     tmp_bbio->stripes[i].physical)
4792 					continue;
4793 				index_srcdev = i;
4794 				found = 1;
4795 				physical_of_found =
4796 					tmp_bbio->stripes[i].physical;
4797 			}
4798 		}
4799 
4800 		if (found) {
4801 			mirror_num = index_srcdev + 1;
4802 			patch_the_first_stripe_for_dev_replace = 1;
4803 			physical_to_patch_in_first_stripe = physical_of_found;
4804 		} else {
4805 			WARN_ON(1);
4806 			ret = -EIO;
4807 			kfree(tmp_bbio);
4808 			goto out;
4809 		}
4810 
4811 		kfree(tmp_bbio);
4812 	} else if (mirror_num > map->num_stripes) {
4813 		mirror_num = 0;
4814 	}
4815 
4816 	num_stripes = 1;
4817 	stripe_index = 0;
4818 	stripe_nr_orig = stripe_nr;
4819 	stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
4820 	do_div(stripe_nr_end, map->stripe_len);
4821 	stripe_end_offset = stripe_nr_end * map->stripe_len -
4822 			    (offset + *length);
4823 
4824 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4825 		if (rw & REQ_DISCARD)
4826 			num_stripes = min_t(u64, map->num_stripes,
4827 					    stripe_nr_end - stripe_nr_orig);
4828 		stripe_index = do_div(stripe_nr, map->num_stripes);
4829 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
4830 		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
4831 			num_stripes = map->num_stripes;
4832 		else if (mirror_num)
4833 			stripe_index = mirror_num - 1;
4834 		else {
4835 			stripe_index = find_live_mirror(fs_info, map, 0,
4836 					    map->num_stripes,
4837 					    current->pid % map->num_stripes,
4838 					    dev_replace_is_ongoing);
4839 			mirror_num = stripe_index + 1;
4840 		}
4841 
4842 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
4843 		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
4844 			num_stripes = map->num_stripes;
4845 		} else if (mirror_num) {
4846 			stripe_index = mirror_num - 1;
4847 		} else {
4848 			mirror_num = 1;
4849 		}
4850 
4851 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4852 		int factor = map->num_stripes / map->sub_stripes;
4853 
4854 		stripe_index = do_div(stripe_nr, factor);
4855 		stripe_index *= map->sub_stripes;
4856 
4857 		if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
4858 			num_stripes = map->sub_stripes;
4859 		else if (rw & REQ_DISCARD)
4860 			num_stripes = min_t(u64, map->sub_stripes *
4861 					    (stripe_nr_end - stripe_nr_orig),
4862 					    map->num_stripes);
4863 		else if (mirror_num)
4864 			stripe_index += mirror_num - 1;
4865 		else {
4866 			int old_stripe_index = stripe_index;
4867 			stripe_index = find_live_mirror(fs_info, map,
4868 					      stripe_index,
4869 					      map->sub_stripes, stripe_index +
4870 					      current->pid % map->sub_stripes,
4871 					      dev_replace_is_ongoing);
4872 			mirror_num = stripe_index - old_stripe_index + 1;
4873 		}
4874 
4875 	} else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4876 				BTRFS_BLOCK_GROUP_RAID6)) {
4877 		u64 tmp;
4878 
4879 		if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1)
4880 		    && raid_map_ret) {
4881 			int i, rot;
4882 
4883 			/* push stripe_nr back to the start of the full stripe */
4884 			stripe_nr = raid56_full_stripe_start;
4885 			do_div(stripe_nr, stripe_len);
4886 
4887 			stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4888 
4889 			/* RAID[56] write or recovery. Return all stripes */
4890 			num_stripes = map->num_stripes;
4891 			max_errors = nr_parity_stripes(map);
4892 
4893 			raid_map = kmalloc(sizeof(u64) * num_stripes,
4894 					   GFP_NOFS);
4895 			if (!raid_map) {
4896 				ret = -ENOMEM;
4897 				goto out;
4898 			}
4899 
4900 			/* Work out the disk rotation on this stripe-set */
4901 			tmp = stripe_nr;
4902 			rot = do_div(tmp, num_stripes);
4903 
4904 			/* Fill in the logical address of each stripe */
4905 			tmp = stripe_nr * nr_data_stripes(map);
4906 			for (i = 0; i < nr_data_stripes(map); i++)
4907 				raid_map[(i+rot) % num_stripes] =
4908 					em->start + (tmp + i) * map->stripe_len;
4909 
4910 			raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
4911 			if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4912 				raid_map[(i+rot+1) % num_stripes] =
4913 					RAID6_Q_STRIPE;
4914 
4915 			*length = map->stripe_len;
4916 			stripe_index = 0;
4917 			stripe_offset = 0;
4918 		} else {
4919 			/*
4920 			 * Mirror #0 or #1 means the original data block.
4921 			 * Mirror #2 is RAID5 parity block.
4922 			 * Mirror #3 is RAID6 Q block.
4923 			 */
4924 			stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4925 			if (mirror_num > 1)
4926 				stripe_index = nr_data_stripes(map) +
4927 						mirror_num - 2;
4928 
4929 			/* We distribute the parity blocks across stripes */
4930 			tmp = stripe_nr + stripe_index;
4931 			stripe_index = do_div(tmp, map->num_stripes);
4932 		}
4933 	} else {
4934 		/*
4935 		 * after this do_div call, stripe_nr is the number of stripes
4936 		 * on this device we have to walk to find the data, and
4937 		 * stripe_index is the number of our device in the stripe array
4938 		 */
4939 		stripe_index = do_div(stripe_nr, map->num_stripes);
4940 		mirror_num = stripe_index + 1;
4941 	}
4942 	BUG_ON(stripe_index >= map->num_stripes);
4943 
4944 	num_alloc_stripes = num_stripes;
4945 	if (dev_replace_is_ongoing) {
4946 		if (rw & (REQ_WRITE | REQ_DISCARD))
4947 			num_alloc_stripes <<= 1;
4948 		if (rw & REQ_GET_READ_MIRRORS)
4949 			num_alloc_stripes++;
4950 	}
4951 	bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS);
4952 	if (!bbio) {
4953 		kfree(raid_map);
4954 		ret = -ENOMEM;
4955 		goto out;
4956 	}
4957 	atomic_set(&bbio->error, 0);
4958 
4959 	if (rw & REQ_DISCARD) {
4960 		int factor = 0;
4961 		int sub_stripes = 0;
4962 		u64 stripes_per_dev = 0;
4963 		u32 remaining_stripes = 0;
4964 		u32 last_stripe = 0;
4965 
4966 		if (map->type &
4967 		    (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
4968 			if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4969 				sub_stripes = 1;
4970 			else
4971 				sub_stripes = map->sub_stripes;
4972 
4973 			factor = map->num_stripes / sub_stripes;
4974 			stripes_per_dev = div_u64_rem(stripe_nr_end -
4975 						      stripe_nr_orig,
4976 						      factor,
4977 						      &remaining_stripes);
4978 			div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
4979 			last_stripe *= sub_stripes;
4980 		}
4981 
4982 		for (i = 0; i < num_stripes; i++) {
4983 			bbio->stripes[i].physical =
4984 				map->stripes[stripe_index].physical +
4985 				stripe_offset + stripe_nr * map->stripe_len;
4986 			bbio->stripes[i].dev = map->stripes[stripe_index].dev;
4987 
4988 			if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
4989 					 BTRFS_BLOCK_GROUP_RAID10)) {
4990 				bbio->stripes[i].length = stripes_per_dev *
4991 							  map->stripe_len;
4992 
4993 				if (i / sub_stripes < remaining_stripes)
4994 					bbio->stripes[i].length +=
4995 						map->stripe_len;
4996 
4997 				/*
4998 				 * Special for the first stripe and
4999 				 * the last stripe:
5000 				 *
5001 				 * |-------|...|-------|
5002 				 *     |----------|
5003 				 *    off     end_off
5004 				 */
5005 				if (i < sub_stripes)
5006 					bbio->stripes[i].length -=
5007 						stripe_offset;
5008 
5009 				if (stripe_index >= last_stripe &&
5010 				    stripe_index <= (last_stripe +
5011 						     sub_stripes - 1))
5012 					bbio->stripes[i].length -=
5013 						stripe_end_offset;
5014 
5015 				if (i == sub_stripes - 1)
5016 					stripe_offset = 0;
5017 			} else
5018 				bbio->stripes[i].length = *length;
5019 
5020 			stripe_index++;
5021 			if (stripe_index == map->num_stripes) {
5022 				/* This could only happen for RAID0/10 */
5023 				stripe_index = 0;
5024 				stripe_nr++;
5025 			}
5026 		}
5027 	} else {
5028 		for (i = 0; i < num_stripes; i++) {
5029 			bbio->stripes[i].physical =
5030 				map->stripes[stripe_index].physical +
5031 				stripe_offset +
5032 				stripe_nr * map->stripe_len;
5033 			bbio->stripes[i].dev =
5034 				map->stripes[stripe_index].dev;
5035 			stripe_index++;
5036 		}
5037 	}
5038 
5039 	if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
5040 		if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
5041 				 BTRFS_BLOCK_GROUP_RAID10 |
5042 				 BTRFS_BLOCK_GROUP_RAID5 |
5043 				 BTRFS_BLOCK_GROUP_DUP)) {
5044 			max_errors = 1;
5045 		} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
5046 			max_errors = 2;
5047 		}
5048 	}
5049 
5050 	if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
5051 	    dev_replace->tgtdev != NULL) {
5052 		int index_where_to_add;
5053 		u64 srcdev_devid = dev_replace->srcdev->devid;
5054 
5055 		/*
5056 		 * duplicate the write operations while the dev replace
5057 		 * procedure is running. Since the copying of the old disk
5058 		 * to the new disk takes place at run time while the
5059 		 * filesystem is mounted writable, the regular write
5060 		 * operations to the old disk have to be duplicated to go
5061 		 * to the new disk as well.
5062 		 * Note that device->missing is handled by the caller, and
5063 		 * that the write to the old disk is already set up in the
5064 		 * stripes array.
5065 		 */
5066 		index_where_to_add = num_stripes;
5067 		for (i = 0; i < num_stripes; i++) {
5068 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5069 				/* write to new disk, too */
5070 				struct btrfs_bio_stripe *new =
5071 					bbio->stripes + index_where_to_add;
5072 				struct btrfs_bio_stripe *old =
5073 					bbio->stripes + i;
5074 
5075 				new->physical = old->physical;
5076 				new->length = old->length;
5077 				new->dev = dev_replace->tgtdev;
5078 				index_where_to_add++;
5079 				max_errors++;
5080 			}
5081 		}
5082 		num_stripes = index_where_to_add;
5083 	} else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
5084 		   dev_replace->tgtdev != NULL) {
5085 		u64 srcdev_devid = dev_replace->srcdev->devid;
5086 		int index_srcdev = 0;
5087 		int found = 0;
5088 		u64 physical_of_found = 0;
5089 
5090 		/*
5091 		 * During the dev-replace procedure, the target drive can
5092 		 * also be used to read data in case it is needed to repair
5093 		 * a corrupt block elsewhere. This is possible if the
5094 		 * requested area is left of the left cursor. In this area,
5095 		 * the target drive is a full copy of the source drive.
5096 		 */
5097 		for (i = 0; i < num_stripes; i++) {
5098 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5099 				/*
5100 				 * In case of DUP, in order to keep it
5101 				 * simple, only add the mirror with the
5102 				 * lowest physical address
5103 				 */
5104 				if (found &&
5105 				    physical_of_found <=
5106 				     bbio->stripes[i].physical)
5107 					continue;
5108 				index_srcdev = i;
5109 				found = 1;
5110 				physical_of_found = bbio->stripes[i].physical;
5111 			}
5112 		}
5113 		if (found) {
5114 			u64 length = map->stripe_len;
5115 
5116 			if (physical_of_found + length <=
5117 			    dev_replace->cursor_left) {
5118 				struct btrfs_bio_stripe *tgtdev_stripe =
5119 					bbio->stripes + num_stripes;
5120 
5121 				tgtdev_stripe->physical = physical_of_found;
5122 				tgtdev_stripe->length =
5123 					bbio->stripes[index_srcdev].length;
5124 				tgtdev_stripe->dev = dev_replace->tgtdev;
5125 
5126 				num_stripes++;
5127 			}
5128 		}
5129 	}
5130 
5131 	*bbio_ret = bbio;
5132 	bbio->num_stripes = num_stripes;
5133 	bbio->max_errors = max_errors;
5134 	bbio->mirror_num = mirror_num;
5135 
5136 	/*
5137 	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
5138 	 * mirror_num == num_stripes + 1 && dev_replace target drive is
5139 	 * available as a mirror
5140 	 */
5141 	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
5142 		WARN_ON(num_stripes > 1);
5143 		bbio->stripes[0].dev = dev_replace->tgtdev;
5144 		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
5145 		bbio->mirror_num = map->num_stripes + 1;
5146 	}
5147 	if (raid_map) {
5148 		sort_parity_stripes(bbio, raid_map);
5149 		*raid_map_ret = raid_map;
5150 	}
5151 out:
5152 	if (dev_replace_is_ongoing)
5153 		btrfs_dev_replace_unlock(dev_replace);
5154 	free_extent_map(em);
5155 	return ret;
5156 }
5157 
5158 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5159 		      u64 logical, u64 *length,
5160 		      struct btrfs_bio **bbio_ret, int mirror_num)
5161 {
5162 	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5163 				 mirror_num, NULL);
5164 }
5165 
5166 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5167 		     u64 chunk_start, u64 physical, u64 devid,
5168 		     u64 **logical, int *naddrs, int *stripe_len)
5169 {
5170 	struct extent_map_tree *em_tree = &map_tree->map_tree;
5171 	struct extent_map *em;
5172 	struct map_lookup *map;
5173 	u64 *buf;
5174 	u64 bytenr;
5175 	u64 length;
5176 	u64 stripe_nr;
5177 	u64 rmap_len;
5178 	int i, j, nr = 0;
5179 
5180 	read_lock(&em_tree->lock);
5181 	em = lookup_extent_mapping(em_tree, chunk_start, 1);
5182 	read_unlock(&em_tree->lock);
5183 
5184 	if (!em) {
5185 		printk(KERN_ERR "btrfs: couldn't find em for chunk %Lu\n",
5186 		       chunk_start);
5187 		return -EIO;
5188 	}
5189 
5190 	if (em->start != chunk_start) {
5191 		printk(KERN_ERR "btrfs: bad chunk start, em=%Lu, wanted=%Lu\n",
5192 		       em->start, chunk_start);
5193 		free_extent_map(em);
5194 		return -EIO;
5195 	}
5196 	map = (struct map_lookup *)em->bdev;
5197 
5198 	length = em->len;
5199 	rmap_len = map->stripe_len;
5200 
5201 	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5202 		do_div(length, map->num_stripes / map->sub_stripes);
5203 	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5204 		do_div(length, map->num_stripes);
5205 	else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
5206 			      BTRFS_BLOCK_GROUP_RAID6)) {
5207 		do_div(length, nr_data_stripes(map));
5208 		rmap_len = map->stripe_len * nr_data_stripes(map);
5209 	}
5210 
5211 	buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
5212 	BUG_ON(!buf); /* -ENOMEM */
5213 
5214 	for (i = 0; i < map->num_stripes; i++) {
5215 		if (devid && map->stripes[i].dev->devid != devid)
5216 			continue;
5217 		if (map->stripes[i].physical > physical ||
5218 		    map->stripes[i].physical + length <= physical)
5219 			continue;
5220 
5221 		stripe_nr = physical - map->stripes[i].physical;
5222 		do_div(stripe_nr, map->stripe_len);
5223 
5224 		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5225 			stripe_nr = stripe_nr * map->num_stripes + i;
5226 			do_div(stripe_nr, map->sub_stripes);
5227 		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5228 			stripe_nr = stripe_nr * map->num_stripes + i;
5229 		} /* else if RAID[56], multiply by nr_data_stripes().
5230 		   * Alternatively, just use rmap_len below instead of
5231 		   * map->stripe_len */
5232 
5233 		bytenr = chunk_start + stripe_nr * rmap_len;
5234 		WARN_ON(nr >= map->num_stripes);
5235 		for (j = 0; j < nr; j++) {
5236 			if (buf[j] == bytenr)
5237 				break;
5238 		}
5239 		if (j == nr) {
5240 			WARN_ON(nr >= map->num_stripes);
5241 			buf[nr++] = bytenr;
5242 		}
5243 	}
5244 
5245 	*logical = buf;
5246 	*naddrs = nr;
5247 	*stripe_len = rmap_len;
5248 
5249 	free_extent_map(em);
5250 	return 0;
5251 }
5252 
5253 static void btrfs_end_bio(struct bio *bio, int err)
5254 {
5255 	struct btrfs_bio *bbio = bio->bi_private;
5256 	int is_orig_bio = 0;
5257 
5258 	if (err) {
5259 		atomic_inc(&bbio->error);
5260 		if (err == -EIO || err == -EREMOTEIO) {
5261 			unsigned int stripe_index =
5262 				btrfs_io_bio(bio)->stripe_index;
5263 			struct btrfs_device *dev;
5264 
5265 			BUG_ON(stripe_index >= bbio->num_stripes);
5266 			dev = bbio->stripes[stripe_index].dev;
5267 			if (dev->bdev) {
5268 				if (bio->bi_rw & WRITE)
5269 					btrfs_dev_stat_inc(dev,
5270 						BTRFS_DEV_STAT_WRITE_ERRS);
5271 				else
5272 					btrfs_dev_stat_inc(dev,
5273 						BTRFS_DEV_STAT_READ_ERRS);
5274 				if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
5275 					btrfs_dev_stat_inc(dev,
5276 						BTRFS_DEV_STAT_FLUSH_ERRS);
5277 				btrfs_dev_stat_print_on_error(dev);
5278 			}
5279 		}
5280 	}
5281 
5282 	if (bio == bbio->orig_bio)
5283 		is_orig_bio = 1;
5284 
5285 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
5286 		if (!is_orig_bio) {
5287 			bio_put(bio);
5288 			bio = bbio->orig_bio;
5289 		}
5290 		bio->bi_private = bbio->private;
5291 		bio->bi_end_io = bbio->end_io;
5292 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5293 		/* only send an error to the higher layers if it is
5294 		 * beyond the tolerance of the btrfs bio
5295 		 */
5296 		if (atomic_read(&bbio->error) > bbio->max_errors) {
5297 			err = -EIO;
5298 		} else {
5299 			/*
5300 			 * this bio is actually up to date, we didn't
5301 			 * go over the max number of errors
5302 			 */
5303 			set_bit(BIO_UPTODATE, &bio->bi_flags);
5304 			err = 0;
5305 		}
5306 		kfree(bbio);
5307 
5308 		bio_endio(bio, err);
5309 	} else if (!is_orig_bio) {
5310 		bio_put(bio);
5311 	}
5312 }
5313 
5314 struct async_sched {
5315 	struct bio *bio;
5316 	int rw;
5317 	struct btrfs_fs_info *info;
5318 	struct btrfs_work work;
5319 };
5320 
5321 /*
5322  * see run_scheduled_bios for a description of why bios are collected for
5323  * async submit.
5324  *
5325  * This will add one bio to the pending list for a device and make sure
5326  * the work struct is scheduled.
5327  */
5328 static noinline void btrfs_schedule_bio(struct btrfs_root *root,
5329 					struct btrfs_device *device,
5330 					int rw, struct bio *bio)
5331 {
5332 	int should_queue = 1;
5333 	struct btrfs_pending_bios *pending_bios;
5334 
5335 	if (device->missing || !device->bdev) {
5336 		bio_endio(bio, -EIO);
5337 		return;
5338 	}
5339 
5340 	/* don't bother with additional async steps for reads, right now */
5341 	if (!(rw & REQ_WRITE)) {
5342 		bio_get(bio);
5343 		btrfsic_submit_bio(rw, bio);
5344 		bio_put(bio);
5345 		return;
5346 	}
5347 
5348 	/*
5349 	 * nr_async_bios allows us to reliably return congestion to the
5350 	 * higher layers.  Otherwise, the async bio makes it appear we have
5351 	 * made progress against dirty pages when we've really just put it
5352 	 * on a queue for later
5353 	 */
5354 	atomic_inc(&root->fs_info->nr_async_bios);
5355 	WARN_ON(bio->bi_next);
5356 	bio->bi_next = NULL;
5357 	bio->bi_rw |= rw;
5358 
5359 	spin_lock(&device->io_lock);
5360 	if (bio->bi_rw & REQ_SYNC)
5361 		pending_bios = &device->pending_sync_bios;
5362 	else
5363 		pending_bios = &device->pending_bios;
5364 
5365 	if (pending_bios->tail)
5366 		pending_bios->tail->bi_next = bio;
5367 
5368 	pending_bios->tail = bio;
5369 	if (!pending_bios->head)
5370 		pending_bios->head = bio;
5371 	if (device->running_pending)
5372 		should_queue = 0;
5373 
5374 	spin_unlock(&device->io_lock);
5375 
5376 	if (should_queue)
5377 		btrfs_queue_worker(&root->fs_info->submit_workers,
5378 				   &device->work);
5379 }
5380 
5381 static int bio_size_ok(struct block_device *bdev, struct bio *bio,
5382 		       sector_t sector)
5383 {
5384 	struct bio_vec *prev;
5385 	struct request_queue *q = bdev_get_queue(bdev);
5386 	unsigned short max_sectors = queue_max_sectors(q);
5387 	struct bvec_merge_data bvm = {
5388 		.bi_bdev = bdev,
5389 		.bi_sector = sector,
5390 		.bi_rw = bio->bi_rw,
5391 	};
5392 
5393 	if (bio->bi_vcnt == 0) {
5394 		WARN_ON(1);
5395 		return 1;
5396 	}
5397 
5398 	prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
5399 	if (bio_sectors(bio) > max_sectors)
5400 		return 0;
5401 
5402 	if (!q->merge_bvec_fn)
5403 		return 1;
5404 
5405 	bvm.bi_size = bio->bi_size - prev->bv_len;
5406 	if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
5407 		return 0;
5408 	return 1;
5409 }
5410 
5411 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5412 			      struct bio *bio, u64 physical, int dev_nr,
5413 			      int rw, int async)
5414 {
5415 	struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
5416 
5417 	bio->bi_private = bbio;
5418 	btrfs_io_bio(bio)->stripe_index = dev_nr;
5419 	bio->bi_end_io = btrfs_end_bio;
5420 	bio->bi_sector = physical >> 9;
5421 #ifdef DEBUG
5422 	{
5423 		struct rcu_string *name;
5424 
5425 		rcu_read_lock();
5426 		name = rcu_dereference(dev->name);
5427 		pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
5428 			 "(%s id %llu), size=%u\n", rw,
5429 			 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
5430 			 name->str, dev->devid, bio->bi_size);
5431 		rcu_read_unlock();
5432 	}
5433 #endif
5434 	bio->bi_bdev = dev->bdev;
5435 	if (async)
5436 		btrfs_schedule_bio(root, dev, rw, bio);
5437 	else
5438 		btrfsic_submit_bio(rw, bio);
5439 }
5440 
5441 static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5442 			      struct bio *first_bio, struct btrfs_device *dev,
5443 			      int dev_nr, int rw, int async)
5444 {
5445 	struct bio_vec *bvec = first_bio->bi_io_vec;
5446 	struct bio *bio;
5447 	int nr_vecs = bio_get_nr_vecs(dev->bdev);
5448 	u64 physical = bbio->stripes[dev_nr].physical;
5449 
5450 again:
5451 	bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
5452 	if (!bio)
5453 		return -ENOMEM;
5454 
5455 	while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
5456 		if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
5457 				 bvec->bv_offset) < bvec->bv_len) {
5458 			u64 len = bio->bi_size;
5459 
5460 			atomic_inc(&bbio->stripes_pending);
5461 			submit_stripe_bio(root, bbio, bio, physical, dev_nr,
5462 					  rw, async);
5463 			physical += len;
5464 			goto again;
5465 		}
5466 		bvec++;
5467 	}
5468 
5469 	submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
5470 	return 0;
5471 }
5472 
5473 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
5474 {
5475 	atomic_inc(&bbio->error);
5476 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
5477 		bio->bi_private = bbio->private;
5478 		bio->bi_end_io = bbio->end_io;
5479 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5480 		bio->bi_sector = logical >> 9;
5481 		kfree(bbio);
5482 		bio_endio(bio, -EIO);
5483 	}
5484 }
5485 
5486 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5487 		  int mirror_num, int async_submit)
5488 {
5489 	struct btrfs_device *dev;
5490 	struct bio *first_bio = bio;
5491 	u64 logical = (u64)bio->bi_sector << 9;
5492 	u64 length = 0;
5493 	u64 map_length;
5494 	u64 *raid_map = NULL;
5495 	int ret;
5496 	int dev_nr = 0;
5497 	int total_devs = 1;
5498 	struct btrfs_bio *bbio = NULL;
5499 
5500 	length = bio->bi_size;
5501 	map_length = length;
5502 
5503 	ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
5504 			      mirror_num, &raid_map);
5505 	if (ret) /* -ENOMEM */
5506 		return ret;
5507 
5508 	total_devs = bbio->num_stripes;
5509 	bbio->orig_bio = first_bio;
5510 	bbio->private = first_bio->bi_private;
5511 	bbio->end_io = first_bio->bi_end_io;
5512 	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
5513 
5514 	if (raid_map) {
5515 		/* In this case, map_length has been set to the length of
5516 		   a single stripe; not the whole write */
5517 		if (rw & WRITE) {
5518 			return raid56_parity_write(root, bio, bbio,
5519 						   raid_map, map_length);
5520 		} else {
5521 			return raid56_parity_recover(root, bio, bbio,
5522 						     raid_map, map_length,
5523 						     mirror_num);
5524 		}
5525 	}
5526 
5527 	if (map_length < length) {
5528 		btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
5529 			logical, length, map_length);
5530 		BUG();
5531 	}
5532 
5533 	while (dev_nr < total_devs) {
5534 		dev = bbio->stripes[dev_nr].dev;
5535 		if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
5536 			bbio_error(bbio, first_bio, logical);
5537 			dev_nr++;
5538 			continue;
5539 		}
5540 
5541 		/*
5542 		 * Check and see if we're ok with this bio based on it's size
5543 		 * and offset with the given device.
5544 		 */
5545 		if (!bio_size_ok(dev->bdev, first_bio,
5546 				 bbio->stripes[dev_nr].physical >> 9)) {
5547 			ret = breakup_stripe_bio(root, bbio, first_bio, dev,
5548 						 dev_nr, rw, async_submit);
5549 			BUG_ON(ret);
5550 			dev_nr++;
5551 			continue;
5552 		}
5553 
5554 		if (dev_nr < total_devs - 1) {
5555 			bio = btrfs_bio_clone(first_bio, GFP_NOFS);
5556 			BUG_ON(!bio); /* -ENOMEM */
5557 		} else {
5558 			bio = first_bio;
5559 		}
5560 
5561 		submit_stripe_bio(root, bbio, bio,
5562 				  bbio->stripes[dev_nr].physical, dev_nr, rw,
5563 				  async_submit);
5564 		dev_nr++;
5565 	}
5566 	return 0;
5567 }
5568 
5569 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
5570 				       u8 *uuid, u8 *fsid)
5571 {
5572 	struct btrfs_device *device;
5573 	struct btrfs_fs_devices *cur_devices;
5574 
5575 	cur_devices = fs_info->fs_devices;
5576 	while (cur_devices) {
5577 		if (!fsid ||
5578 		    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5579 			device = __find_device(&cur_devices->devices,
5580 					       devid, uuid);
5581 			if (device)
5582 				return device;
5583 		}
5584 		cur_devices = cur_devices->seed;
5585 	}
5586 	return NULL;
5587 }
5588 
5589 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
5590 					    u64 devid, u8 *dev_uuid)
5591 {
5592 	struct btrfs_device *device;
5593 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5594 
5595 	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
5596 	if (IS_ERR(device))
5597 		return NULL;
5598 
5599 	list_add(&device->dev_list, &fs_devices->devices);
5600 	device->fs_devices = fs_devices;
5601 	fs_devices->num_devices++;
5602 
5603 	device->missing = 1;
5604 	fs_devices->missing_devices++;
5605 
5606 	return device;
5607 }
5608 
5609 /**
5610  * btrfs_alloc_device - allocate struct btrfs_device
5611  * @fs_info:	used only for generating a new devid, can be NULL if
5612  *		devid is provided (i.e. @devid != NULL).
5613  * @devid:	a pointer to devid for this device.  If NULL a new devid
5614  *		is generated.
5615  * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
5616  *		is generated.
5617  *
5618  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
5619  * on error.  Returned struct is not linked onto any lists and can be
5620  * destroyed with kfree() right away.
5621  */
5622 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
5623 					const u64 *devid,
5624 					const u8 *uuid)
5625 {
5626 	struct btrfs_device *dev;
5627 	u64 tmp;
5628 
5629 	if (!devid && !fs_info) {
5630 		WARN_ON(1);
5631 		return ERR_PTR(-EINVAL);
5632 	}
5633 
5634 	dev = __alloc_device();
5635 	if (IS_ERR(dev))
5636 		return dev;
5637 
5638 	if (devid)
5639 		tmp = *devid;
5640 	else {
5641 		int ret;
5642 
5643 		ret = find_next_devid(fs_info, &tmp);
5644 		if (ret) {
5645 			kfree(dev);
5646 			return ERR_PTR(ret);
5647 		}
5648 	}
5649 	dev->devid = tmp;
5650 
5651 	if (uuid)
5652 		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
5653 	else
5654 		generate_random_uuid(dev->uuid);
5655 
5656 	dev->work.func = pending_bios_fn;
5657 
5658 	return dev;
5659 }
5660 
5661 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
5662 			  struct extent_buffer *leaf,
5663 			  struct btrfs_chunk *chunk)
5664 {
5665 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
5666 	struct map_lookup *map;
5667 	struct extent_map *em;
5668 	u64 logical;
5669 	u64 length;
5670 	u64 devid;
5671 	u8 uuid[BTRFS_UUID_SIZE];
5672 	int num_stripes;
5673 	int ret;
5674 	int i;
5675 
5676 	logical = key->offset;
5677 	length = btrfs_chunk_length(leaf, chunk);
5678 
5679 	read_lock(&map_tree->map_tree.lock);
5680 	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
5681 	read_unlock(&map_tree->map_tree.lock);
5682 
5683 	/* already mapped? */
5684 	if (em && em->start <= logical && em->start + em->len > logical) {
5685 		free_extent_map(em);
5686 		return 0;
5687 	} else if (em) {
5688 		free_extent_map(em);
5689 	}
5690 
5691 	em = alloc_extent_map();
5692 	if (!em)
5693 		return -ENOMEM;
5694 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
5695 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
5696 	if (!map) {
5697 		free_extent_map(em);
5698 		return -ENOMEM;
5699 	}
5700 
5701 	em->bdev = (struct block_device *)map;
5702 	em->start = logical;
5703 	em->len = length;
5704 	em->orig_start = 0;
5705 	em->block_start = 0;
5706 	em->block_len = em->len;
5707 
5708 	map->num_stripes = num_stripes;
5709 	map->io_width = btrfs_chunk_io_width(leaf, chunk);
5710 	map->io_align = btrfs_chunk_io_align(leaf, chunk);
5711 	map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
5712 	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
5713 	map->type = btrfs_chunk_type(leaf, chunk);
5714 	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
5715 	for (i = 0; i < num_stripes; i++) {
5716 		map->stripes[i].physical =
5717 			btrfs_stripe_offset_nr(leaf, chunk, i);
5718 		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
5719 		read_extent_buffer(leaf, uuid, (unsigned long)
5720 				   btrfs_stripe_dev_uuid_nr(chunk, i),
5721 				   BTRFS_UUID_SIZE);
5722 		map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
5723 							uuid, NULL);
5724 		if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
5725 			kfree(map);
5726 			free_extent_map(em);
5727 			return -EIO;
5728 		}
5729 		if (!map->stripes[i].dev) {
5730 			map->stripes[i].dev =
5731 				add_missing_dev(root, devid, uuid);
5732 			if (!map->stripes[i].dev) {
5733 				kfree(map);
5734 				free_extent_map(em);
5735 				return -EIO;
5736 			}
5737 		}
5738 		map->stripes[i].dev->in_fs_metadata = 1;
5739 	}
5740 
5741 	write_lock(&map_tree->map_tree.lock);
5742 	ret = add_extent_mapping(&map_tree->map_tree, em, 0);
5743 	write_unlock(&map_tree->map_tree.lock);
5744 	BUG_ON(ret); /* Tree corruption */
5745 	free_extent_map(em);
5746 
5747 	return 0;
5748 }
5749 
5750 static void fill_device_from_item(struct extent_buffer *leaf,
5751 				 struct btrfs_dev_item *dev_item,
5752 				 struct btrfs_device *device)
5753 {
5754 	unsigned long ptr;
5755 
5756 	device->devid = btrfs_device_id(leaf, dev_item);
5757 	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
5758 	device->total_bytes = device->disk_total_bytes;
5759 	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
5760 	device->type = btrfs_device_type(leaf, dev_item);
5761 	device->io_align = btrfs_device_io_align(leaf, dev_item);
5762 	device->io_width = btrfs_device_io_width(leaf, dev_item);
5763 	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
5764 	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
5765 	device->is_tgtdev_for_dev_replace = 0;
5766 
5767 	ptr = btrfs_device_uuid(dev_item);
5768 	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
5769 }
5770 
5771 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
5772 {
5773 	struct btrfs_fs_devices *fs_devices;
5774 	int ret;
5775 
5776 	BUG_ON(!mutex_is_locked(&uuid_mutex));
5777 
5778 	fs_devices = root->fs_info->fs_devices->seed;
5779 	while (fs_devices) {
5780 		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5781 			ret = 0;
5782 			goto out;
5783 		}
5784 		fs_devices = fs_devices->seed;
5785 	}
5786 
5787 	fs_devices = find_fsid(fsid);
5788 	if (!fs_devices) {
5789 		ret = -ENOENT;
5790 		goto out;
5791 	}
5792 
5793 	fs_devices = clone_fs_devices(fs_devices);
5794 	if (IS_ERR(fs_devices)) {
5795 		ret = PTR_ERR(fs_devices);
5796 		goto out;
5797 	}
5798 
5799 	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
5800 				   root->fs_info->bdev_holder);
5801 	if (ret) {
5802 		free_fs_devices(fs_devices);
5803 		goto out;
5804 	}
5805 
5806 	if (!fs_devices->seeding) {
5807 		__btrfs_close_devices(fs_devices);
5808 		free_fs_devices(fs_devices);
5809 		ret = -EINVAL;
5810 		goto out;
5811 	}
5812 
5813 	fs_devices->seed = root->fs_info->fs_devices->seed;
5814 	root->fs_info->fs_devices->seed = fs_devices;
5815 out:
5816 	return ret;
5817 }
5818 
5819 static int read_one_dev(struct btrfs_root *root,
5820 			struct extent_buffer *leaf,
5821 			struct btrfs_dev_item *dev_item)
5822 {
5823 	struct btrfs_device *device;
5824 	u64 devid;
5825 	int ret;
5826 	u8 fs_uuid[BTRFS_UUID_SIZE];
5827 	u8 dev_uuid[BTRFS_UUID_SIZE];
5828 
5829 	devid = btrfs_device_id(leaf, dev_item);
5830 	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
5831 			   BTRFS_UUID_SIZE);
5832 	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
5833 			   BTRFS_UUID_SIZE);
5834 
5835 	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
5836 		ret = open_seed_devices(root, fs_uuid);
5837 		if (ret && !btrfs_test_opt(root, DEGRADED))
5838 			return ret;
5839 	}
5840 
5841 	device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
5842 	if (!device || !device->bdev) {
5843 		if (!btrfs_test_opt(root, DEGRADED))
5844 			return -EIO;
5845 
5846 		if (!device) {
5847 			btrfs_warn(root->fs_info, "devid %llu missing", devid);
5848 			device = add_missing_dev(root, devid, dev_uuid);
5849 			if (!device)
5850 				return -ENOMEM;
5851 		} else if (!device->missing) {
5852 			/*
5853 			 * this happens when a device that was properly setup
5854 			 * in the device info lists suddenly goes bad.
5855 			 * device->bdev is NULL, and so we have to set
5856 			 * device->missing to one here
5857 			 */
5858 			root->fs_info->fs_devices->missing_devices++;
5859 			device->missing = 1;
5860 		}
5861 	}
5862 
5863 	if (device->fs_devices != root->fs_info->fs_devices) {
5864 		BUG_ON(device->writeable);
5865 		if (device->generation !=
5866 		    btrfs_device_generation(leaf, dev_item))
5867 			return -EINVAL;
5868 	}
5869 
5870 	fill_device_from_item(leaf, dev_item, device);
5871 	device->in_fs_metadata = 1;
5872 	if (device->writeable && !device->is_tgtdev_for_dev_replace) {
5873 		device->fs_devices->total_rw_bytes += device->total_bytes;
5874 		spin_lock(&root->fs_info->free_chunk_lock);
5875 		root->fs_info->free_chunk_space += device->total_bytes -
5876 			device->bytes_used;
5877 		spin_unlock(&root->fs_info->free_chunk_lock);
5878 	}
5879 	ret = 0;
5880 	return ret;
5881 }
5882 
5883 int btrfs_read_sys_array(struct btrfs_root *root)
5884 {
5885 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
5886 	struct extent_buffer *sb;
5887 	struct btrfs_disk_key *disk_key;
5888 	struct btrfs_chunk *chunk;
5889 	u8 *ptr;
5890 	unsigned long sb_ptr;
5891 	int ret = 0;
5892 	u32 num_stripes;
5893 	u32 array_size;
5894 	u32 len = 0;
5895 	u32 cur;
5896 	struct btrfs_key key;
5897 
5898 	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
5899 					  BTRFS_SUPER_INFO_SIZE);
5900 	if (!sb)
5901 		return -ENOMEM;
5902 	btrfs_set_buffer_uptodate(sb);
5903 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
5904 	/*
5905 	 * The sb extent buffer is artifical and just used to read the system array.
5906 	 * btrfs_set_buffer_uptodate() call does not properly mark all it's
5907 	 * pages up-to-date when the page is larger: extent does not cover the
5908 	 * whole page and consequently check_page_uptodate does not find all
5909 	 * the page's extents up-to-date (the hole beyond sb),
5910 	 * write_extent_buffer then triggers a WARN_ON.
5911 	 *
5912 	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
5913 	 * but sb spans only this function. Add an explicit SetPageUptodate call
5914 	 * to silence the warning eg. on PowerPC 64.
5915 	 */
5916 	if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
5917 		SetPageUptodate(sb->pages[0]);
5918 
5919 	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
5920 	array_size = btrfs_super_sys_array_size(super_copy);
5921 
5922 	ptr = super_copy->sys_chunk_array;
5923 	sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
5924 	cur = 0;
5925 
5926 	while (cur < array_size) {
5927 		disk_key = (struct btrfs_disk_key *)ptr;
5928 		btrfs_disk_key_to_cpu(&key, disk_key);
5929 
5930 		len = sizeof(*disk_key); ptr += len;
5931 		sb_ptr += len;
5932 		cur += len;
5933 
5934 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
5935 			chunk = (struct btrfs_chunk *)sb_ptr;
5936 			ret = read_one_chunk(root, &key, sb, chunk);
5937 			if (ret)
5938 				break;
5939 			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
5940 			len = btrfs_chunk_item_size(num_stripes);
5941 		} else {
5942 			ret = -EIO;
5943 			break;
5944 		}
5945 		ptr += len;
5946 		sb_ptr += len;
5947 		cur += len;
5948 	}
5949 	free_extent_buffer(sb);
5950 	return ret;
5951 }
5952 
5953 int btrfs_read_chunk_tree(struct btrfs_root *root)
5954 {
5955 	struct btrfs_path *path;
5956 	struct extent_buffer *leaf;
5957 	struct btrfs_key key;
5958 	struct btrfs_key found_key;
5959 	int ret;
5960 	int slot;
5961 
5962 	root = root->fs_info->chunk_root;
5963 
5964 	path = btrfs_alloc_path();
5965 	if (!path)
5966 		return -ENOMEM;
5967 
5968 	mutex_lock(&uuid_mutex);
5969 	lock_chunks(root);
5970 
5971 	/*
5972 	 * Read all device items, and then all the chunk items. All
5973 	 * device items are found before any chunk item (their object id
5974 	 * is smaller than the lowest possible object id for a chunk
5975 	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
5976 	 */
5977 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
5978 	key.offset = 0;
5979 	key.type = 0;
5980 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5981 	if (ret < 0)
5982 		goto error;
5983 	while (1) {
5984 		leaf = path->nodes[0];
5985 		slot = path->slots[0];
5986 		if (slot >= btrfs_header_nritems(leaf)) {
5987 			ret = btrfs_next_leaf(root, path);
5988 			if (ret == 0)
5989 				continue;
5990 			if (ret < 0)
5991 				goto error;
5992 			break;
5993 		}
5994 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
5995 		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
5996 			struct btrfs_dev_item *dev_item;
5997 			dev_item = btrfs_item_ptr(leaf, slot,
5998 						  struct btrfs_dev_item);
5999 			ret = read_one_dev(root, leaf, dev_item);
6000 			if (ret)
6001 				goto error;
6002 		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
6003 			struct btrfs_chunk *chunk;
6004 			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
6005 			ret = read_one_chunk(root, &found_key, leaf, chunk);
6006 			if (ret)
6007 				goto error;
6008 		}
6009 		path->slots[0]++;
6010 	}
6011 	ret = 0;
6012 error:
6013 	unlock_chunks(root);
6014 	mutex_unlock(&uuid_mutex);
6015 
6016 	btrfs_free_path(path);
6017 	return ret;
6018 }
6019 
6020 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
6021 {
6022 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6023 	struct btrfs_device *device;
6024 
6025 	mutex_lock(&fs_devices->device_list_mutex);
6026 	list_for_each_entry(device, &fs_devices->devices, dev_list)
6027 		device->dev_root = fs_info->dev_root;
6028 	mutex_unlock(&fs_devices->device_list_mutex);
6029 }
6030 
6031 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
6032 {
6033 	int i;
6034 
6035 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6036 		btrfs_dev_stat_reset(dev, i);
6037 }
6038 
6039 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
6040 {
6041 	struct btrfs_key key;
6042 	struct btrfs_key found_key;
6043 	struct btrfs_root *dev_root = fs_info->dev_root;
6044 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6045 	struct extent_buffer *eb;
6046 	int slot;
6047 	int ret = 0;
6048 	struct btrfs_device *device;
6049 	struct btrfs_path *path = NULL;
6050 	int i;
6051 
6052 	path = btrfs_alloc_path();
6053 	if (!path) {
6054 		ret = -ENOMEM;
6055 		goto out;
6056 	}
6057 
6058 	mutex_lock(&fs_devices->device_list_mutex);
6059 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6060 		int item_size;
6061 		struct btrfs_dev_stats_item *ptr;
6062 
6063 		key.objectid = 0;
6064 		key.type = BTRFS_DEV_STATS_KEY;
6065 		key.offset = device->devid;
6066 		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
6067 		if (ret) {
6068 			__btrfs_reset_dev_stats(device);
6069 			device->dev_stats_valid = 1;
6070 			btrfs_release_path(path);
6071 			continue;
6072 		}
6073 		slot = path->slots[0];
6074 		eb = path->nodes[0];
6075 		btrfs_item_key_to_cpu(eb, &found_key, slot);
6076 		item_size = btrfs_item_size_nr(eb, slot);
6077 
6078 		ptr = btrfs_item_ptr(eb, slot,
6079 				     struct btrfs_dev_stats_item);
6080 
6081 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6082 			if (item_size >= (1 + i) * sizeof(__le64))
6083 				btrfs_dev_stat_set(device, i,
6084 					btrfs_dev_stats_value(eb, ptr, i));
6085 			else
6086 				btrfs_dev_stat_reset(device, i);
6087 		}
6088 
6089 		device->dev_stats_valid = 1;
6090 		btrfs_dev_stat_print_on_load(device);
6091 		btrfs_release_path(path);
6092 	}
6093 	mutex_unlock(&fs_devices->device_list_mutex);
6094 
6095 out:
6096 	btrfs_free_path(path);
6097 	return ret < 0 ? ret : 0;
6098 }
6099 
6100 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
6101 				struct btrfs_root *dev_root,
6102 				struct btrfs_device *device)
6103 {
6104 	struct btrfs_path *path;
6105 	struct btrfs_key key;
6106 	struct extent_buffer *eb;
6107 	struct btrfs_dev_stats_item *ptr;
6108 	int ret;
6109 	int i;
6110 
6111 	key.objectid = 0;
6112 	key.type = BTRFS_DEV_STATS_KEY;
6113 	key.offset = device->devid;
6114 
6115 	path = btrfs_alloc_path();
6116 	BUG_ON(!path);
6117 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
6118 	if (ret < 0) {
6119 		printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
6120 			      ret, rcu_str_deref(device->name));
6121 		goto out;
6122 	}
6123 
6124 	if (ret == 0 &&
6125 	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
6126 		/* need to delete old one and insert a new one */
6127 		ret = btrfs_del_item(trans, dev_root, path);
6128 		if (ret != 0) {
6129 			printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
6130 				      rcu_str_deref(device->name), ret);
6131 			goto out;
6132 		}
6133 		ret = 1;
6134 	}
6135 
6136 	if (ret == 1) {
6137 		/* need to insert a new item */
6138 		btrfs_release_path(path);
6139 		ret = btrfs_insert_empty_item(trans, dev_root, path,
6140 					      &key, sizeof(*ptr));
6141 		if (ret < 0) {
6142 			printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
6143 				      rcu_str_deref(device->name), ret);
6144 			goto out;
6145 		}
6146 	}
6147 
6148 	eb = path->nodes[0];
6149 	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
6150 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6151 		btrfs_set_dev_stats_value(eb, ptr, i,
6152 					  btrfs_dev_stat_read(device, i));
6153 	btrfs_mark_buffer_dirty(eb);
6154 
6155 out:
6156 	btrfs_free_path(path);
6157 	return ret;
6158 }
6159 
6160 /*
6161  * called from commit_transaction. Writes all changed device stats to disk.
6162  */
6163 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
6164 			struct btrfs_fs_info *fs_info)
6165 {
6166 	struct btrfs_root *dev_root = fs_info->dev_root;
6167 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6168 	struct btrfs_device *device;
6169 	int ret = 0;
6170 
6171 	mutex_lock(&fs_devices->device_list_mutex);
6172 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6173 		if (!device->dev_stats_valid || !device->dev_stats_dirty)
6174 			continue;
6175 
6176 		ret = update_dev_stat_item(trans, dev_root, device);
6177 		if (!ret)
6178 			device->dev_stats_dirty = 0;
6179 	}
6180 	mutex_unlock(&fs_devices->device_list_mutex);
6181 
6182 	return ret;
6183 }
6184 
6185 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
6186 {
6187 	btrfs_dev_stat_inc(dev, index);
6188 	btrfs_dev_stat_print_on_error(dev);
6189 }
6190 
6191 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
6192 {
6193 	if (!dev->dev_stats_valid)
6194 		return;
6195 	printk_ratelimited_in_rcu(KERN_ERR
6196 			   "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
6197 			   rcu_str_deref(dev->name),
6198 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6199 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6200 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6201 			   btrfs_dev_stat_read(dev,
6202 					       BTRFS_DEV_STAT_CORRUPTION_ERRS),
6203 			   btrfs_dev_stat_read(dev,
6204 					       BTRFS_DEV_STAT_GENERATION_ERRS));
6205 }
6206 
6207 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
6208 {
6209 	int i;
6210 
6211 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6212 		if (btrfs_dev_stat_read(dev, i) != 0)
6213 			break;
6214 	if (i == BTRFS_DEV_STAT_VALUES_MAX)
6215 		return; /* all values == 0, suppress message */
6216 
6217 	printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
6218 	       rcu_str_deref(dev->name),
6219 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6220 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6221 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6222 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6223 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6224 }
6225 
6226 int btrfs_get_dev_stats(struct btrfs_root *root,
6227 			struct btrfs_ioctl_get_dev_stats *stats)
6228 {
6229 	struct btrfs_device *dev;
6230 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6231 	int i;
6232 
6233 	mutex_lock(&fs_devices->device_list_mutex);
6234 	dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
6235 	mutex_unlock(&fs_devices->device_list_mutex);
6236 
6237 	if (!dev) {
6238 		printk(KERN_WARNING
6239 		       "btrfs: get dev_stats failed, device not found\n");
6240 		return -ENODEV;
6241 	} else if (!dev->dev_stats_valid) {
6242 		printk(KERN_WARNING
6243 		       "btrfs: get dev_stats failed, not yet valid\n");
6244 		return -ENODEV;
6245 	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
6246 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6247 			if (stats->nr_items > i)
6248 				stats->values[i] =
6249 					btrfs_dev_stat_read_and_reset(dev, i);
6250 			else
6251 				btrfs_dev_stat_reset(dev, i);
6252 		}
6253 	} else {
6254 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6255 			if (stats->nr_items > i)
6256 				stats->values[i] = btrfs_dev_stat_read(dev, i);
6257 	}
6258 	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
6259 		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
6260 	return 0;
6261 }
6262 
6263 int btrfs_scratch_superblock(struct btrfs_device *device)
6264 {
6265 	struct buffer_head *bh;
6266 	struct btrfs_super_block *disk_super;
6267 
6268 	bh = btrfs_read_dev_super(device->bdev);
6269 	if (!bh)
6270 		return -EINVAL;
6271 	disk_super = (struct btrfs_super_block *)bh->b_data;
6272 
6273 	memset(&disk_super->magic, 0, sizeof(disk_super->magic));
6274 	set_buffer_dirty(bh);
6275 	sync_dirty_buffer(bh);
6276 	brelse(bh);
6277 
6278 	return 0;
6279 }
6280