xref: /openbmc/linux/fs/btrfs/volumes.c (revision cd5d5810)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include <linux/raid/pq.h>
29 #include <linux/semaphore.h>
30 #include <asm/div64.h>
31 #include "compat.h"
32 #include "ctree.h"
33 #include "extent_map.h"
34 #include "disk-io.h"
35 #include "transaction.h"
36 #include "print-tree.h"
37 #include "volumes.h"
38 #include "raid56.h"
39 #include "async-thread.h"
40 #include "check-integrity.h"
41 #include "rcu-string.h"
42 #include "math.h"
43 #include "dev-replace.h"
44 
45 static int init_first_rw_device(struct btrfs_trans_handle *trans,
46 				struct btrfs_root *root,
47 				struct btrfs_device *device);
48 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
49 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
50 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
51 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
52 
53 static DEFINE_MUTEX(uuid_mutex);
54 static LIST_HEAD(fs_uuids);
55 
56 static void lock_chunks(struct btrfs_root *root)
57 {
58 	mutex_lock(&root->fs_info->chunk_mutex);
59 }
60 
61 static void unlock_chunks(struct btrfs_root *root)
62 {
63 	mutex_unlock(&root->fs_info->chunk_mutex);
64 }
65 
66 static struct btrfs_fs_devices *__alloc_fs_devices(void)
67 {
68 	struct btrfs_fs_devices *fs_devs;
69 
70 	fs_devs = kzalloc(sizeof(*fs_devs), GFP_NOFS);
71 	if (!fs_devs)
72 		return ERR_PTR(-ENOMEM);
73 
74 	mutex_init(&fs_devs->device_list_mutex);
75 
76 	INIT_LIST_HEAD(&fs_devs->devices);
77 	INIT_LIST_HEAD(&fs_devs->alloc_list);
78 	INIT_LIST_HEAD(&fs_devs->list);
79 
80 	return fs_devs;
81 }
82 
83 /**
84  * alloc_fs_devices - allocate struct btrfs_fs_devices
85  * @fsid:	a pointer to UUID for this FS.  If NULL a new UUID is
86  *		generated.
87  *
88  * Return: a pointer to a new &struct btrfs_fs_devices on success;
89  * ERR_PTR() on error.  Returned struct is not linked onto any lists and
90  * can be destroyed with kfree() right away.
91  */
92 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
93 {
94 	struct btrfs_fs_devices *fs_devs;
95 
96 	fs_devs = __alloc_fs_devices();
97 	if (IS_ERR(fs_devs))
98 		return fs_devs;
99 
100 	if (fsid)
101 		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
102 	else
103 		generate_random_uuid(fs_devs->fsid);
104 
105 	return fs_devs;
106 }
107 
108 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
109 {
110 	struct btrfs_device *device;
111 	WARN_ON(fs_devices->opened);
112 	while (!list_empty(&fs_devices->devices)) {
113 		device = list_entry(fs_devices->devices.next,
114 				    struct btrfs_device, dev_list);
115 		list_del(&device->dev_list);
116 		rcu_string_free(device->name);
117 		kfree(device);
118 	}
119 	kfree(fs_devices);
120 }
121 
122 static void btrfs_kobject_uevent(struct block_device *bdev,
123 				 enum kobject_action action)
124 {
125 	int ret;
126 
127 	ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
128 	if (ret)
129 		pr_warn("Sending event '%d' to kobject: '%s' (%p): failed\n",
130 			action,
131 			kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
132 			&disk_to_dev(bdev->bd_disk)->kobj);
133 }
134 
135 void btrfs_cleanup_fs_uuids(void)
136 {
137 	struct btrfs_fs_devices *fs_devices;
138 
139 	while (!list_empty(&fs_uuids)) {
140 		fs_devices = list_entry(fs_uuids.next,
141 					struct btrfs_fs_devices, list);
142 		list_del(&fs_devices->list);
143 		free_fs_devices(fs_devices);
144 	}
145 }
146 
147 static struct btrfs_device *__alloc_device(void)
148 {
149 	struct btrfs_device *dev;
150 
151 	dev = kzalloc(sizeof(*dev), GFP_NOFS);
152 	if (!dev)
153 		return ERR_PTR(-ENOMEM);
154 
155 	INIT_LIST_HEAD(&dev->dev_list);
156 	INIT_LIST_HEAD(&dev->dev_alloc_list);
157 
158 	spin_lock_init(&dev->io_lock);
159 
160 	spin_lock_init(&dev->reada_lock);
161 	atomic_set(&dev->reada_in_flight, 0);
162 	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_WAIT);
163 	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_WAIT);
164 
165 	return dev;
166 }
167 
168 static noinline struct btrfs_device *__find_device(struct list_head *head,
169 						   u64 devid, u8 *uuid)
170 {
171 	struct btrfs_device *dev;
172 
173 	list_for_each_entry(dev, head, dev_list) {
174 		if (dev->devid == devid &&
175 		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
176 			return dev;
177 		}
178 	}
179 	return NULL;
180 }
181 
182 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
183 {
184 	struct btrfs_fs_devices *fs_devices;
185 
186 	list_for_each_entry(fs_devices, &fs_uuids, list) {
187 		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
188 			return fs_devices;
189 	}
190 	return NULL;
191 }
192 
193 static int
194 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
195 		      int flush, struct block_device **bdev,
196 		      struct buffer_head **bh)
197 {
198 	int ret;
199 
200 	*bdev = blkdev_get_by_path(device_path, flags, holder);
201 
202 	if (IS_ERR(*bdev)) {
203 		ret = PTR_ERR(*bdev);
204 		printk(KERN_INFO "btrfs: open %s failed\n", device_path);
205 		goto error;
206 	}
207 
208 	if (flush)
209 		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
210 	ret = set_blocksize(*bdev, 4096);
211 	if (ret) {
212 		blkdev_put(*bdev, flags);
213 		goto error;
214 	}
215 	invalidate_bdev(*bdev);
216 	*bh = btrfs_read_dev_super(*bdev);
217 	if (!*bh) {
218 		ret = -EINVAL;
219 		blkdev_put(*bdev, flags);
220 		goto error;
221 	}
222 
223 	return 0;
224 
225 error:
226 	*bdev = NULL;
227 	*bh = NULL;
228 	return ret;
229 }
230 
231 static void requeue_list(struct btrfs_pending_bios *pending_bios,
232 			struct bio *head, struct bio *tail)
233 {
234 
235 	struct bio *old_head;
236 
237 	old_head = pending_bios->head;
238 	pending_bios->head = head;
239 	if (pending_bios->tail)
240 		tail->bi_next = old_head;
241 	else
242 		pending_bios->tail = tail;
243 }
244 
245 /*
246  * we try to collect pending bios for a device so we don't get a large
247  * number of procs sending bios down to the same device.  This greatly
248  * improves the schedulers ability to collect and merge the bios.
249  *
250  * But, it also turns into a long list of bios to process and that is sure
251  * to eventually make the worker thread block.  The solution here is to
252  * make some progress and then put this work struct back at the end of
253  * the list if the block device is congested.  This way, multiple devices
254  * can make progress from a single worker thread.
255  */
256 static noinline void run_scheduled_bios(struct btrfs_device *device)
257 {
258 	struct bio *pending;
259 	struct backing_dev_info *bdi;
260 	struct btrfs_fs_info *fs_info;
261 	struct btrfs_pending_bios *pending_bios;
262 	struct bio *tail;
263 	struct bio *cur;
264 	int again = 0;
265 	unsigned long num_run;
266 	unsigned long batch_run = 0;
267 	unsigned long limit;
268 	unsigned long last_waited = 0;
269 	int force_reg = 0;
270 	int sync_pending = 0;
271 	struct blk_plug plug;
272 
273 	/*
274 	 * this function runs all the bios we've collected for
275 	 * a particular device.  We don't want to wander off to
276 	 * another device without first sending all of these down.
277 	 * So, setup a plug here and finish it off before we return
278 	 */
279 	blk_start_plug(&plug);
280 
281 	bdi = blk_get_backing_dev_info(device->bdev);
282 	fs_info = device->dev_root->fs_info;
283 	limit = btrfs_async_submit_limit(fs_info);
284 	limit = limit * 2 / 3;
285 
286 loop:
287 	spin_lock(&device->io_lock);
288 
289 loop_lock:
290 	num_run = 0;
291 
292 	/* take all the bios off the list at once and process them
293 	 * later on (without the lock held).  But, remember the
294 	 * tail and other pointers so the bios can be properly reinserted
295 	 * into the list if we hit congestion
296 	 */
297 	if (!force_reg && device->pending_sync_bios.head) {
298 		pending_bios = &device->pending_sync_bios;
299 		force_reg = 1;
300 	} else {
301 		pending_bios = &device->pending_bios;
302 		force_reg = 0;
303 	}
304 
305 	pending = pending_bios->head;
306 	tail = pending_bios->tail;
307 	WARN_ON(pending && !tail);
308 
309 	/*
310 	 * if pending was null this time around, no bios need processing
311 	 * at all and we can stop.  Otherwise it'll loop back up again
312 	 * and do an additional check so no bios are missed.
313 	 *
314 	 * device->running_pending is used to synchronize with the
315 	 * schedule_bio code.
316 	 */
317 	if (device->pending_sync_bios.head == NULL &&
318 	    device->pending_bios.head == NULL) {
319 		again = 0;
320 		device->running_pending = 0;
321 	} else {
322 		again = 1;
323 		device->running_pending = 1;
324 	}
325 
326 	pending_bios->head = NULL;
327 	pending_bios->tail = NULL;
328 
329 	spin_unlock(&device->io_lock);
330 
331 	while (pending) {
332 
333 		rmb();
334 		/* we want to work on both lists, but do more bios on the
335 		 * sync list than the regular list
336 		 */
337 		if ((num_run > 32 &&
338 		    pending_bios != &device->pending_sync_bios &&
339 		    device->pending_sync_bios.head) ||
340 		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
341 		    device->pending_bios.head)) {
342 			spin_lock(&device->io_lock);
343 			requeue_list(pending_bios, pending, tail);
344 			goto loop_lock;
345 		}
346 
347 		cur = pending;
348 		pending = pending->bi_next;
349 		cur->bi_next = NULL;
350 
351 		if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
352 		    waitqueue_active(&fs_info->async_submit_wait))
353 			wake_up(&fs_info->async_submit_wait);
354 
355 		BUG_ON(atomic_read(&cur->bi_cnt) == 0);
356 
357 		/*
358 		 * if we're doing the sync list, record that our
359 		 * plug has some sync requests on it
360 		 *
361 		 * If we're doing the regular list and there are
362 		 * sync requests sitting around, unplug before
363 		 * we add more
364 		 */
365 		if (pending_bios == &device->pending_sync_bios) {
366 			sync_pending = 1;
367 		} else if (sync_pending) {
368 			blk_finish_plug(&plug);
369 			blk_start_plug(&plug);
370 			sync_pending = 0;
371 		}
372 
373 		btrfsic_submit_bio(cur->bi_rw, cur);
374 		num_run++;
375 		batch_run++;
376 		if (need_resched())
377 			cond_resched();
378 
379 		/*
380 		 * we made progress, there is more work to do and the bdi
381 		 * is now congested.  Back off and let other work structs
382 		 * run instead
383 		 */
384 		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
385 		    fs_info->fs_devices->open_devices > 1) {
386 			struct io_context *ioc;
387 
388 			ioc = current->io_context;
389 
390 			/*
391 			 * the main goal here is that we don't want to
392 			 * block if we're going to be able to submit
393 			 * more requests without blocking.
394 			 *
395 			 * This code does two great things, it pokes into
396 			 * the elevator code from a filesystem _and_
397 			 * it makes assumptions about how batching works.
398 			 */
399 			if (ioc && ioc->nr_batch_requests > 0 &&
400 			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
401 			    (last_waited == 0 ||
402 			     ioc->last_waited == last_waited)) {
403 				/*
404 				 * we want to go through our batch of
405 				 * requests and stop.  So, we copy out
406 				 * the ioc->last_waited time and test
407 				 * against it before looping
408 				 */
409 				last_waited = ioc->last_waited;
410 				if (need_resched())
411 					cond_resched();
412 				continue;
413 			}
414 			spin_lock(&device->io_lock);
415 			requeue_list(pending_bios, pending, tail);
416 			device->running_pending = 1;
417 
418 			spin_unlock(&device->io_lock);
419 			btrfs_requeue_work(&device->work);
420 			goto done;
421 		}
422 		/* unplug every 64 requests just for good measure */
423 		if (batch_run % 64 == 0) {
424 			blk_finish_plug(&plug);
425 			blk_start_plug(&plug);
426 			sync_pending = 0;
427 		}
428 	}
429 
430 	cond_resched();
431 	if (again)
432 		goto loop;
433 
434 	spin_lock(&device->io_lock);
435 	if (device->pending_bios.head || device->pending_sync_bios.head)
436 		goto loop_lock;
437 	spin_unlock(&device->io_lock);
438 
439 done:
440 	blk_finish_plug(&plug);
441 }
442 
443 static void pending_bios_fn(struct btrfs_work *work)
444 {
445 	struct btrfs_device *device;
446 
447 	device = container_of(work, struct btrfs_device, work);
448 	run_scheduled_bios(device);
449 }
450 
451 static noinline int device_list_add(const char *path,
452 			   struct btrfs_super_block *disk_super,
453 			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
454 {
455 	struct btrfs_device *device;
456 	struct btrfs_fs_devices *fs_devices;
457 	struct rcu_string *name;
458 	u64 found_transid = btrfs_super_generation(disk_super);
459 
460 	fs_devices = find_fsid(disk_super->fsid);
461 	if (!fs_devices) {
462 		fs_devices = alloc_fs_devices(disk_super->fsid);
463 		if (IS_ERR(fs_devices))
464 			return PTR_ERR(fs_devices);
465 
466 		list_add(&fs_devices->list, &fs_uuids);
467 		fs_devices->latest_devid = devid;
468 		fs_devices->latest_trans = found_transid;
469 
470 		device = NULL;
471 	} else {
472 		device = __find_device(&fs_devices->devices, devid,
473 				       disk_super->dev_item.uuid);
474 	}
475 	if (!device) {
476 		if (fs_devices->opened)
477 			return -EBUSY;
478 
479 		device = btrfs_alloc_device(NULL, &devid,
480 					    disk_super->dev_item.uuid);
481 		if (IS_ERR(device)) {
482 			/* we can safely leave the fs_devices entry around */
483 			return PTR_ERR(device);
484 		}
485 
486 		name = rcu_string_strdup(path, GFP_NOFS);
487 		if (!name) {
488 			kfree(device);
489 			return -ENOMEM;
490 		}
491 		rcu_assign_pointer(device->name, name);
492 
493 		mutex_lock(&fs_devices->device_list_mutex);
494 		list_add_rcu(&device->dev_list, &fs_devices->devices);
495 		fs_devices->num_devices++;
496 		mutex_unlock(&fs_devices->device_list_mutex);
497 
498 		device->fs_devices = fs_devices;
499 	} else if (!device->name || strcmp(device->name->str, path)) {
500 		name = rcu_string_strdup(path, GFP_NOFS);
501 		if (!name)
502 			return -ENOMEM;
503 		rcu_string_free(device->name);
504 		rcu_assign_pointer(device->name, name);
505 		if (device->missing) {
506 			fs_devices->missing_devices--;
507 			device->missing = 0;
508 		}
509 	}
510 
511 	if (found_transid > fs_devices->latest_trans) {
512 		fs_devices->latest_devid = devid;
513 		fs_devices->latest_trans = found_transid;
514 	}
515 	*fs_devices_ret = fs_devices;
516 	return 0;
517 }
518 
519 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
520 {
521 	struct btrfs_fs_devices *fs_devices;
522 	struct btrfs_device *device;
523 	struct btrfs_device *orig_dev;
524 
525 	fs_devices = alloc_fs_devices(orig->fsid);
526 	if (IS_ERR(fs_devices))
527 		return fs_devices;
528 
529 	fs_devices->latest_devid = orig->latest_devid;
530 	fs_devices->latest_trans = orig->latest_trans;
531 	fs_devices->total_devices = orig->total_devices;
532 
533 	/* We have held the volume lock, it is safe to get the devices. */
534 	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
535 		struct rcu_string *name;
536 
537 		device = btrfs_alloc_device(NULL, &orig_dev->devid,
538 					    orig_dev->uuid);
539 		if (IS_ERR(device))
540 			goto error;
541 
542 		/*
543 		 * This is ok to do without rcu read locked because we hold the
544 		 * uuid mutex so nothing we touch in here is going to disappear.
545 		 */
546 		name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
547 		if (!name) {
548 			kfree(device);
549 			goto error;
550 		}
551 		rcu_assign_pointer(device->name, name);
552 
553 		list_add(&device->dev_list, &fs_devices->devices);
554 		device->fs_devices = fs_devices;
555 		fs_devices->num_devices++;
556 	}
557 	return fs_devices;
558 error:
559 	free_fs_devices(fs_devices);
560 	return ERR_PTR(-ENOMEM);
561 }
562 
563 void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
564 			       struct btrfs_fs_devices *fs_devices, int step)
565 {
566 	struct btrfs_device *device, *next;
567 
568 	struct block_device *latest_bdev = NULL;
569 	u64 latest_devid = 0;
570 	u64 latest_transid = 0;
571 
572 	mutex_lock(&uuid_mutex);
573 again:
574 	/* This is the initialized path, it is safe to release the devices. */
575 	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
576 		if (device->in_fs_metadata) {
577 			if (!device->is_tgtdev_for_dev_replace &&
578 			    (!latest_transid ||
579 			     device->generation > latest_transid)) {
580 				latest_devid = device->devid;
581 				latest_transid = device->generation;
582 				latest_bdev = device->bdev;
583 			}
584 			continue;
585 		}
586 
587 		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
588 			/*
589 			 * In the first step, keep the device which has
590 			 * the correct fsid and the devid that is used
591 			 * for the dev_replace procedure.
592 			 * In the second step, the dev_replace state is
593 			 * read from the device tree and it is known
594 			 * whether the procedure is really active or
595 			 * not, which means whether this device is
596 			 * used or whether it should be removed.
597 			 */
598 			if (step == 0 || device->is_tgtdev_for_dev_replace) {
599 				continue;
600 			}
601 		}
602 		if (device->bdev) {
603 			blkdev_put(device->bdev, device->mode);
604 			device->bdev = NULL;
605 			fs_devices->open_devices--;
606 		}
607 		if (device->writeable) {
608 			list_del_init(&device->dev_alloc_list);
609 			device->writeable = 0;
610 			if (!device->is_tgtdev_for_dev_replace)
611 				fs_devices->rw_devices--;
612 		}
613 		list_del_init(&device->dev_list);
614 		fs_devices->num_devices--;
615 		rcu_string_free(device->name);
616 		kfree(device);
617 	}
618 
619 	if (fs_devices->seed) {
620 		fs_devices = fs_devices->seed;
621 		goto again;
622 	}
623 
624 	fs_devices->latest_bdev = latest_bdev;
625 	fs_devices->latest_devid = latest_devid;
626 	fs_devices->latest_trans = latest_transid;
627 
628 	mutex_unlock(&uuid_mutex);
629 }
630 
631 static void __free_device(struct work_struct *work)
632 {
633 	struct btrfs_device *device;
634 
635 	device = container_of(work, struct btrfs_device, rcu_work);
636 
637 	if (device->bdev)
638 		blkdev_put(device->bdev, device->mode);
639 
640 	rcu_string_free(device->name);
641 	kfree(device);
642 }
643 
644 static void free_device(struct rcu_head *head)
645 {
646 	struct btrfs_device *device;
647 
648 	device = container_of(head, struct btrfs_device, rcu);
649 
650 	INIT_WORK(&device->rcu_work, __free_device);
651 	schedule_work(&device->rcu_work);
652 }
653 
654 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
655 {
656 	struct btrfs_device *device;
657 
658 	if (--fs_devices->opened > 0)
659 		return 0;
660 
661 	mutex_lock(&fs_devices->device_list_mutex);
662 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
663 		struct btrfs_device *new_device;
664 		struct rcu_string *name;
665 
666 		if (device->bdev)
667 			fs_devices->open_devices--;
668 
669 		if (device->writeable && !device->is_tgtdev_for_dev_replace) {
670 			list_del_init(&device->dev_alloc_list);
671 			fs_devices->rw_devices--;
672 		}
673 
674 		if (device->can_discard)
675 			fs_devices->num_can_discard--;
676 		if (device->missing)
677 			fs_devices->missing_devices--;
678 
679 		new_device = btrfs_alloc_device(NULL, &device->devid,
680 						device->uuid);
681 		BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
682 
683 		/* Safe because we are under uuid_mutex */
684 		if (device->name) {
685 			name = rcu_string_strdup(device->name->str, GFP_NOFS);
686 			BUG_ON(!name); /* -ENOMEM */
687 			rcu_assign_pointer(new_device->name, name);
688 		}
689 
690 		list_replace_rcu(&device->dev_list, &new_device->dev_list);
691 		new_device->fs_devices = device->fs_devices;
692 
693 		call_rcu(&device->rcu, free_device);
694 	}
695 	mutex_unlock(&fs_devices->device_list_mutex);
696 
697 	WARN_ON(fs_devices->open_devices);
698 	WARN_ON(fs_devices->rw_devices);
699 	fs_devices->opened = 0;
700 	fs_devices->seeding = 0;
701 
702 	return 0;
703 }
704 
705 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
706 {
707 	struct btrfs_fs_devices *seed_devices = NULL;
708 	int ret;
709 
710 	mutex_lock(&uuid_mutex);
711 	ret = __btrfs_close_devices(fs_devices);
712 	if (!fs_devices->opened) {
713 		seed_devices = fs_devices->seed;
714 		fs_devices->seed = NULL;
715 	}
716 	mutex_unlock(&uuid_mutex);
717 
718 	while (seed_devices) {
719 		fs_devices = seed_devices;
720 		seed_devices = fs_devices->seed;
721 		__btrfs_close_devices(fs_devices);
722 		free_fs_devices(fs_devices);
723 	}
724 	/*
725 	 * Wait for rcu kworkers under __btrfs_close_devices
726 	 * to finish all blkdev_puts so device is really
727 	 * free when umount is done.
728 	 */
729 	rcu_barrier();
730 	return ret;
731 }
732 
733 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
734 				fmode_t flags, void *holder)
735 {
736 	struct request_queue *q;
737 	struct block_device *bdev;
738 	struct list_head *head = &fs_devices->devices;
739 	struct btrfs_device *device;
740 	struct block_device *latest_bdev = NULL;
741 	struct buffer_head *bh;
742 	struct btrfs_super_block *disk_super;
743 	u64 latest_devid = 0;
744 	u64 latest_transid = 0;
745 	u64 devid;
746 	int seeding = 1;
747 	int ret = 0;
748 
749 	flags |= FMODE_EXCL;
750 
751 	list_for_each_entry(device, head, dev_list) {
752 		if (device->bdev)
753 			continue;
754 		if (!device->name)
755 			continue;
756 
757 		/* Just open everything we can; ignore failures here */
758 		if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
759 					    &bdev, &bh))
760 			continue;
761 
762 		disk_super = (struct btrfs_super_block *)bh->b_data;
763 		devid = btrfs_stack_device_id(&disk_super->dev_item);
764 		if (devid != device->devid)
765 			goto error_brelse;
766 
767 		if (memcmp(device->uuid, disk_super->dev_item.uuid,
768 			   BTRFS_UUID_SIZE))
769 			goto error_brelse;
770 
771 		device->generation = btrfs_super_generation(disk_super);
772 		if (!latest_transid || device->generation > latest_transid) {
773 			latest_devid = devid;
774 			latest_transid = device->generation;
775 			latest_bdev = bdev;
776 		}
777 
778 		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
779 			device->writeable = 0;
780 		} else {
781 			device->writeable = !bdev_read_only(bdev);
782 			seeding = 0;
783 		}
784 
785 		q = bdev_get_queue(bdev);
786 		if (blk_queue_discard(q)) {
787 			device->can_discard = 1;
788 			fs_devices->num_can_discard++;
789 		}
790 
791 		device->bdev = bdev;
792 		device->in_fs_metadata = 0;
793 		device->mode = flags;
794 
795 		if (!blk_queue_nonrot(bdev_get_queue(bdev)))
796 			fs_devices->rotating = 1;
797 
798 		fs_devices->open_devices++;
799 		if (device->writeable &&
800 		    device->devid != BTRFS_DEV_REPLACE_DEVID) {
801 			fs_devices->rw_devices++;
802 			list_add(&device->dev_alloc_list,
803 				 &fs_devices->alloc_list);
804 		}
805 		brelse(bh);
806 		continue;
807 
808 error_brelse:
809 		brelse(bh);
810 		blkdev_put(bdev, flags);
811 		continue;
812 	}
813 	if (fs_devices->open_devices == 0) {
814 		ret = -EINVAL;
815 		goto out;
816 	}
817 	fs_devices->seeding = seeding;
818 	fs_devices->opened = 1;
819 	fs_devices->latest_bdev = latest_bdev;
820 	fs_devices->latest_devid = latest_devid;
821 	fs_devices->latest_trans = latest_transid;
822 	fs_devices->total_rw_bytes = 0;
823 out:
824 	return ret;
825 }
826 
827 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
828 		       fmode_t flags, void *holder)
829 {
830 	int ret;
831 
832 	mutex_lock(&uuid_mutex);
833 	if (fs_devices->opened) {
834 		fs_devices->opened++;
835 		ret = 0;
836 	} else {
837 		ret = __btrfs_open_devices(fs_devices, flags, holder);
838 	}
839 	mutex_unlock(&uuid_mutex);
840 	return ret;
841 }
842 
843 /*
844  * Look for a btrfs signature on a device. This may be called out of the mount path
845  * and we are not allowed to call set_blocksize during the scan. The superblock
846  * is read via pagecache
847  */
848 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
849 			  struct btrfs_fs_devices **fs_devices_ret)
850 {
851 	struct btrfs_super_block *disk_super;
852 	struct block_device *bdev;
853 	struct page *page;
854 	void *p;
855 	int ret = -EINVAL;
856 	u64 devid;
857 	u64 transid;
858 	u64 total_devices;
859 	u64 bytenr;
860 	pgoff_t index;
861 
862 	/*
863 	 * we would like to check all the supers, but that would make
864 	 * a btrfs mount succeed after a mkfs from a different FS.
865 	 * So, we need to add a special mount option to scan for
866 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
867 	 */
868 	bytenr = btrfs_sb_offset(0);
869 	flags |= FMODE_EXCL;
870 	mutex_lock(&uuid_mutex);
871 
872 	bdev = blkdev_get_by_path(path, flags, holder);
873 
874 	if (IS_ERR(bdev)) {
875 		ret = PTR_ERR(bdev);
876 		goto error;
877 	}
878 
879 	/* make sure our super fits in the device */
880 	if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
881 		goto error_bdev_put;
882 
883 	/* make sure our super fits in the page */
884 	if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
885 		goto error_bdev_put;
886 
887 	/* make sure our super doesn't straddle pages on disk */
888 	index = bytenr >> PAGE_CACHE_SHIFT;
889 	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
890 		goto error_bdev_put;
891 
892 	/* pull in the page with our super */
893 	page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
894 				   index, GFP_NOFS);
895 
896 	if (IS_ERR_OR_NULL(page))
897 		goto error_bdev_put;
898 
899 	p = kmap(page);
900 
901 	/* align our pointer to the offset of the super block */
902 	disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
903 
904 	if (btrfs_super_bytenr(disk_super) != bytenr ||
905 	    btrfs_super_magic(disk_super) != BTRFS_MAGIC)
906 		goto error_unmap;
907 
908 	devid = btrfs_stack_device_id(&disk_super->dev_item);
909 	transid = btrfs_super_generation(disk_super);
910 	total_devices = btrfs_super_num_devices(disk_super);
911 
912 	if (disk_super->label[0]) {
913 		if (disk_super->label[BTRFS_LABEL_SIZE - 1])
914 			disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
915 		printk(KERN_INFO "btrfs: device label %s ", disk_super->label);
916 	} else {
917 		printk(KERN_INFO "btrfs: device fsid %pU ", disk_super->fsid);
918 	}
919 
920 	printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
921 
922 	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
923 	if (!ret && fs_devices_ret)
924 		(*fs_devices_ret)->total_devices = total_devices;
925 
926 error_unmap:
927 	kunmap(page);
928 	page_cache_release(page);
929 
930 error_bdev_put:
931 	blkdev_put(bdev, flags);
932 error:
933 	mutex_unlock(&uuid_mutex);
934 	return ret;
935 }
936 
937 /* helper to account the used device space in the range */
938 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
939 				   u64 end, u64 *length)
940 {
941 	struct btrfs_key key;
942 	struct btrfs_root *root = device->dev_root;
943 	struct btrfs_dev_extent *dev_extent;
944 	struct btrfs_path *path;
945 	u64 extent_end;
946 	int ret;
947 	int slot;
948 	struct extent_buffer *l;
949 
950 	*length = 0;
951 
952 	if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
953 		return 0;
954 
955 	path = btrfs_alloc_path();
956 	if (!path)
957 		return -ENOMEM;
958 	path->reada = 2;
959 
960 	key.objectid = device->devid;
961 	key.offset = start;
962 	key.type = BTRFS_DEV_EXTENT_KEY;
963 
964 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
965 	if (ret < 0)
966 		goto out;
967 	if (ret > 0) {
968 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
969 		if (ret < 0)
970 			goto out;
971 	}
972 
973 	while (1) {
974 		l = path->nodes[0];
975 		slot = path->slots[0];
976 		if (slot >= btrfs_header_nritems(l)) {
977 			ret = btrfs_next_leaf(root, path);
978 			if (ret == 0)
979 				continue;
980 			if (ret < 0)
981 				goto out;
982 
983 			break;
984 		}
985 		btrfs_item_key_to_cpu(l, &key, slot);
986 
987 		if (key.objectid < device->devid)
988 			goto next;
989 
990 		if (key.objectid > device->devid)
991 			break;
992 
993 		if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
994 			goto next;
995 
996 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
997 		extent_end = key.offset + btrfs_dev_extent_length(l,
998 								  dev_extent);
999 		if (key.offset <= start && extent_end > end) {
1000 			*length = end - start + 1;
1001 			break;
1002 		} else if (key.offset <= start && extent_end > start)
1003 			*length += extent_end - start;
1004 		else if (key.offset > start && extent_end <= end)
1005 			*length += extent_end - key.offset;
1006 		else if (key.offset > start && key.offset <= end) {
1007 			*length += end - key.offset + 1;
1008 			break;
1009 		} else if (key.offset > end)
1010 			break;
1011 
1012 next:
1013 		path->slots[0]++;
1014 	}
1015 	ret = 0;
1016 out:
1017 	btrfs_free_path(path);
1018 	return ret;
1019 }
1020 
1021 static int contains_pending_extent(struct btrfs_trans_handle *trans,
1022 				   struct btrfs_device *device,
1023 				   u64 *start, u64 len)
1024 {
1025 	struct extent_map *em;
1026 	int ret = 0;
1027 
1028 	list_for_each_entry(em, &trans->transaction->pending_chunks, list) {
1029 		struct map_lookup *map;
1030 		int i;
1031 
1032 		map = (struct map_lookup *)em->bdev;
1033 		for (i = 0; i < map->num_stripes; i++) {
1034 			if (map->stripes[i].dev != device)
1035 				continue;
1036 			if (map->stripes[i].physical >= *start + len ||
1037 			    map->stripes[i].physical + em->orig_block_len <=
1038 			    *start)
1039 				continue;
1040 			*start = map->stripes[i].physical +
1041 				em->orig_block_len;
1042 			ret = 1;
1043 		}
1044 	}
1045 
1046 	return ret;
1047 }
1048 
1049 
1050 /*
1051  * find_free_dev_extent - find free space in the specified device
1052  * @device:	the device which we search the free space in
1053  * @num_bytes:	the size of the free space that we need
1054  * @start:	store the start of the free space.
1055  * @len:	the size of the free space. that we find, or the size of the max
1056  * 		free space if we don't find suitable free space
1057  *
1058  * this uses a pretty simple search, the expectation is that it is
1059  * called very infrequently and that a given device has a small number
1060  * of extents
1061  *
1062  * @start is used to store the start of the free space if we find. But if we
1063  * don't find suitable free space, it will be used to store the start position
1064  * of the max free space.
1065  *
1066  * @len is used to store the size of the free space that we find.
1067  * But if we don't find suitable free space, it is used to store the size of
1068  * the max free space.
1069  */
1070 int find_free_dev_extent(struct btrfs_trans_handle *trans,
1071 			 struct btrfs_device *device, u64 num_bytes,
1072 			 u64 *start, u64 *len)
1073 {
1074 	struct btrfs_key key;
1075 	struct btrfs_root *root = device->dev_root;
1076 	struct btrfs_dev_extent *dev_extent;
1077 	struct btrfs_path *path;
1078 	u64 hole_size;
1079 	u64 max_hole_start;
1080 	u64 max_hole_size;
1081 	u64 extent_end;
1082 	u64 search_start;
1083 	u64 search_end = device->total_bytes;
1084 	int ret;
1085 	int slot;
1086 	struct extent_buffer *l;
1087 
1088 	/* FIXME use last free of some kind */
1089 
1090 	/* we don't want to overwrite the superblock on the drive,
1091 	 * so we make sure to start at an offset of at least 1MB
1092 	 */
1093 	search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1094 
1095 	path = btrfs_alloc_path();
1096 	if (!path)
1097 		return -ENOMEM;
1098 again:
1099 	max_hole_start = search_start;
1100 	max_hole_size = 0;
1101 	hole_size = 0;
1102 
1103 	if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1104 		ret = -ENOSPC;
1105 		goto out;
1106 	}
1107 
1108 	path->reada = 2;
1109 	path->search_commit_root = 1;
1110 	path->skip_locking = 1;
1111 
1112 	key.objectid = device->devid;
1113 	key.offset = search_start;
1114 	key.type = BTRFS_DEV_EXTENT_KEY;
1115 
1116 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1117 	if (ret < 0)
1118 		goto out;
1119 	if (ret > 0) {
1120 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1121 		if (ret < 0)
1122 			goto out;
1123 	}
1124 
1125 	while (1) {
1126 		l = path->nodes[0];
1127 		slot = path->slots[0];
1128 		if (slot >= btrfs_header_nritems(l)) {
1129 			ret = btrfs_next_leaf(root, path);
1130 			if (ret == 0)
1131 				continue;
1132 			if (ret < 0)
1133 				goto out;
1134 
1135 			break;
1136 		}
1137 		btrfs_item_key_to_cpu(l, &key, slot);
1138 
1139 		if (key.objectid < device->devid)
1140 			goto next;
1141 
1142 		if (key.objectid > device->devid)
1143 			break;
1144 
1145 		if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
1146 			goto next;
1147 
1148 		if (key.offset > search_start) {
1149 			hole_size = key.offset - search_start;
1150 
1151 			/*
1152 			 * Have to check before we set max_hole_start, otherwise
1153 			 * we could end up sending back this offset anyway.
1154 			 */
1155 			if (contains_pending_extent(trans, device,
1156 						    &search_start,
1157 						    hole_size))
1158 				hole_size = 0;
1159 
1160 			if (hole_size > max_hole_size) {
1161 				max_hole_start = search_start;
1162 				max_hole_size = hole_size;
1163 			}
1164 
1165 			/*
1166 			 * If this free space is greater than which we need,
1167 			 * it must be the max free space that we have found
1168 			 * until now, so max_hole_start must point to the start
1169 			 * of this free space and the length of this free space
1170 			 * is stored in max_hole_size. Thus, we return
1171 			 * max_hole_start and max_hole_size and go back to the
1172 			 * caller.
1173 			 */
1174 			if (hole_size >= num_bytes) {
1175 				ret = 0;
1176 				goto out;
1177 			}
1178 		}
1179 
1180 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1181 		extent_end = key.offset + btrfs_dev_extent_length(l,
1182 								  dev_extent);
1183 		if (extent_end > search_start)
1184 			search_start = extent_end;
1185 next:
1186 		path->slots[0]++;
1187 		cond_resched();
1188 	}
1189 
1190 	/*
1191 	 * At this point, search_start should be the end of
1192 	 * allocated dev extents, and when shrinking the device,
1193 	 * search_end may be smaller than search_start.
1194 	 */
1195 	if (search_end > search_start)
1196 		hole_size = search_end - search_start;
1197 
1198 	if (hole_size > max_hole_size) {
1199 		max_hole_start = search_start;
1200 		max_hole_size = hole_size;
1201 	}
1202 
1203 	if (contains_pending_extent(trans, device, &search_start, hole_size)) {
1204 		btrfs_release_path(path);
1205 		goto again;
1206 	}
1207 
1208 	/* See above. */
1209 	if (hole_size < num_bytes)
1210 		ret = -ENOSPC;
1211 	else
1212 		ret = 0;
1213 
1214 out:
1215 	btrfs_free_path(path);
1216 	*start = max_hole_start;
1217 	if (len)
1218 		*len = max_hole_size;
1219 	return ret;
1220 }
1221 
1222 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1223 			  struct btrfs_device *device,
1224 			  u64 start)
1225 {
1226 	int ret;
1227 	struct btrfs_path *path;
1228 	struct btrfs_root *root = device->dev_root;
1229 	struct btrfs_key key;
1230 	struct btrfs_key found_key;
1231 	struct extent_buffer *leaf = NULL;
1232 	struct btrfs_dev_extent *extent = NULL;
1233 
1234 	path = btrfs_alloc_path();
1235 	if (!path)
1236 		return -ENOMEM;
1237 
1238 	key.objectid = device->devid;
1239 	key.offset = start;
1240 	key.type = BTRFS_DEV_EXTENT_KEY;
1241 again:
1242 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1243 	if (ret > 0) {
1244 		ret = btrfs_previous_item(root, path, key.objectid,
1245 					  BTRFS_DEV_EXTENT_KEY);
1246 		if (ret)
1247 			goto out;
1248 		leaf = path->nodes[0];
1249 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1250 		extent = btrfs_item_ptr(leaf, path->slots[0],
1251 					struct btrfs_dev_extent);
1252 		BUG_ON(found_key.offset > start || found_key.offset +
1253 		       btrfs_dev_extent_length(leaf, extent) < start);
1254 		key = found_key;
1255 		btrfs_release_path(path);
1256 		goto again;
1257 	} else if (ret == 0) {
1258 		leaf = path->nodes[0];
1259 		extent = btrfs_item_ptr(leaf, path->slots[0],
1260 					struct btrfs_dev_extent);
1261 	} else {
1262 		btrfs_error(root->fs_info, ret, "Slot search failed");
1263 		goto out;
1264 	}
1265 
1266 	if (device->bytes_used > 0) {
1267 		u64 len = btrfs_dev_extent_length(leaf, extent);
1268 		device->bytes_used -= len;
1269 		spin_lock(&root->fs_info->free_chunk_lock);
1270 		root->fs_info->free_chunk_space += len;
1271 		spin_unlock(&root->fs_info->free_chunk_lock);
1272 	}
1273 	ret = btrfs_del_item(trans, root, path);
1274 	if (ret) {
1275 		btrfs_error(root->fs_info, ret,
1276 			    "Failed to remove dev extent item");
1277 	}
1278 out:
1279 	btrfs_free_path(path);
1280 	return ret;
1281 }
1282 
1283 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1284 				  struct btrfs_device *device,
1285 				  u64 chunk_tree, u64 chunk_objectid,
1286 				  u64 chunk_offset, u64 start, u64 num_bytes)
1287 {
1288 	int ret;
1289 	struct btrfs_path *path;
1290 	struct btrfs_root *root = device->dev_root;
1291 	struct btrfs_dev_extent *extent;
1292 	struct extent_buffer *leaf;
1293 	struct btrfs_key key;
1294 
1295 	WARN_ON(!device->in_fs_metadata);
1296 	WARN_ON(device->is_tgtdev_for_dev_replace);
1297 	path = btrfs_alloc_path();
1298 	if (!path)
1299 		return -ENOMEM;
1300 
1301 	key.objectid = device->devid;
1302 	key.offset = start;
1303 	key.type = BTRFS_DEV_EXTENT_KEY;
1304 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1305 				      sizeof(*extent));
1306 	if (ret)
1307 		goto out;
1308 
1309 	leaf = path->nodes[0];
1310 	extent = btrfs_item_ptr(leaf, path->slots[0],
1311 				struct btrfs_dev_extent);
1312 	btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1313 	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1314 	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1315 
1316 	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1317 		    btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE);
1318 
1319 	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1320 	btrfs_mark_buffer_dirty(leaf);
1321 out:
1322 	btrfs_free_path(path);
1323 	return ret;
1324 }
1325 
1326 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1327 {
1328 	struct extent_map_tree *em_tree;
1329 	struct extent_map *em;
1330 	struct rb_node *n;
1331 	u64 ret = 0;
1332 
1333 	em_tree = &fs_info->mapping_tree.map_tree;
1334 	read_lock(&em_tree->lock);
1335 	n = rb_last(&em_tree->map);
1336 	if (n) {
1337 		em = rb_entry(n, struct extent_map, rb_node);
1338 		ret = em->start + em->len;
1339 	}
1340 	read_unlock(&em_tree->lock);
1341 
1342 	return ret;
1343 }
1344 
1345 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1346 				    u64 *devid_ret)
1347 {
1348 	int ret;
1349 	struct btrfs_key key;
1350 	struct btrfs_key found_key;
1351 	struct btrfs_path *path;
1352 
1353 	path = btrfs_alloc_path();
1354 	if (!path)
1355 		return -ENOMEM;
1356 
1357 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1358 	key.type = BTRFS_DEV_ITEM_KEY;
1359 	key.offset = (u64)-1;
1360 
1361 	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1362 	if (ret < 0)
1363 		goto error;
1364 
1365 	BUG_ON(ret == 0); /* Corruption */
1366 
1367 	ret = btrfs_previous_item(fs_info->chunk_root, path,
1368 				  BTRFS_DEV_ITEMS_OBJECTID,
1369 				  BTRFS_DEV_ITEM_KEY);
1370 	if (ret) {
1371 		*devid_ret = 1;
1372 	} else {
1373 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1374 				      path->slots[0]);
1375 		*devid_ret = found_key.offset + 1;
1376 	}
1377 	ret = 0;
1378 error:
1379 	btrfs_free_path(path);
1380 	return ret;
1381 }
1382 
1383 /*
1384  * the device information is stored in the chunk root
1385  * the btrfs_device struct should be fully filled in
1386  */
1387 static int btrfs_add_device(struct btrfs_trans_handle *trans,
1388 			    struct btrfs_root *root,
1389 			    struct btrfs_device *device)
1390 {
1391 	int ret;
1392 	struct btrfs_path *path;
1393 	struct btrfs_dev_item *dev_item;
1394 	struct extent_buffer *leaf;
1395 	struct btrfs_key key;
1396 	unsigned long ptr;
1397 
1398 	root = root->fs_info->chunk_root;
1399 
1400 	path = btrfs_alloc_path();
1401 	if (!path)
1402 		return -ENOMEM;
1403 
1404 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1405 	key.type = BTRFS_DEV_ITEM_KEY;
1406 	key.offset = device->devid;
1407 
1408 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1409 				      sizeof(*dev_item));
1410 	if (ret)
1411 		goto out;
1412 
1413 	leaf = path->nodes[0];
1414 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1415 
1416 	btrfs_set_device_id(leaf, dev_item, device->devid);
1417 	btrfs_set_device_generation(leaf, dev_item, 0);
1418 	btrfs_set_device_type(leaf, dev_item, device->type);
1419 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1420 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1421 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1422 	btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1423 	btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1424 	btrfs_set_device_group(leaf, dev_item, 0);
1425 	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1426 	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1427 	btrfs_set_device_start_offset(leaf, dev_item, 0);
1428 
1429 	ptr = btrfs_device_uuid(dev_item);
1430 	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1431 	ptr = btrfs_device_fsid(dev_item);
1432 	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1433 	btrfs_mark_buffer_dirty(leaf);
1434 
1435 	ret = 0;
1436 out:
1437 	btrfs_free_path(path);
1438 	return ret;
1439 }
1440 
1441 static int btrfs_rm_dev_item(struct btrfs_root *root,
1442 			     struct btrfs_device *device)
1443 {
1444 	int ret;
1445 	struct btrfs_path *path;
1446 	struct btrfs_key key;
1447 	struct btrfs_trans_handle *trans;
1448 
1449 	root = root->fs_info->chunk_root;
1450 
1451 	path = btrfs_alloc_path();
1452 	if (!path)
1453 		return -ENOMEM;
1454 
1455 	trans = btrfs_start_transaction(root, 0);
1456 	if (IS_ERR(trans)) {
1457 		btrfs_free_path(path);
1458 		return PTR_ERR(trans);
1459 	}
1460 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1461 	key.type = BTRFS_DEV_ITEM_KEY;
1462 	key.offset = device->devid;
1463 	lock_chunks(root);
1464 
1465 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1466 	if (ret < 0)
1467 		goto out;
1468 
1469 	if (ret > 0) {
1470 		ret = -ENOENT;
1471 		goto out;
1472 	}
1473 
1474 	ret = btrfs_del_item(trans, root, path);
1475 	if (ret)
1476 		goto out;
1477 out:
1478 	btrfs_free_path(path);
1479 	unlock_chunks(root);
1480 	btrfs_commit_transaction(trans, root);
1481 	return ret;
1482 }
1483 
1484 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1485 {
1486 	struct btrfs_device *device;
1487 	struct btrfs_device *next_device;
1488 	struct block_device *bdev;
1489 	struct buffer_head *bh = NULL;
1490 	struct btrfs_super_block *disk_super;
1491 	struct btrfs_fs_devices *cur_devices;
1492 	u64 all_avail;
1493 	u64 devid;
1494 	u64 num_devices;
1495 	u8 *dev_uuid;
1496 	unsigned seq;
1497 	int ret = 0;
1498 	bool clear_super = false;
1499 
1500 	mutex_lock(&uuid_mutex);
1501 
1502 	do {
1503 		seq = read_seqbegin(&root->fs_info->profiles_lock);
1504 
1505 		all_avail = root->fs_info->avail_data_alloc_bits |
1506 			    root->fs_info->avail_system_alloc_bits |
1507 			    root->fs_info->avail_metadata_alloc_bits;
1508 	} while (read_seqretry(&root->fs_info->profiles_lock, seq));
1509 
1510 	num_devices = root->fs_info->fs_devices->num_devices;
1511 	btrfs_dev_replace_lock(&root->fs_info->dev_replace);
1512 	if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1513 		WARN_ON(num_devices < 1);
1514 		num_devices--;
1515 	}
1516 	btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
1517 
1518 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1519 		ret = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET;
1520 		goto out;
1521 	}
1522 
1523 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1524 		ret = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET;
1525 		goto out;
1526 	}
1527 
1528 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
1529 	    root->fs_info->fs_devices->rw_devices <= 2) {
1530 		ret = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET;
1531 		goto out;
1532 	}
1533 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
1534 	    root->fs_info->fs_devices->rw_devices <= 3) {
1535 		ret = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET;
1536 		goto out;
1537 	}
1538 
1539 	if (strcmp(device_path, "missing") == 0) {
1540 		struct list_head *devices;
1541 		struct btrfs_device *tmp;
1542 
1543 		device = NULL;
1544 		devices = &root->fs_info->fs_devices->devices;
1545 		/*
1546 		 * It is safe to read the devices since the volume_mutex
1547 		 * is held.
1548 		 */
1549 		list_for_each_entry(tmp, devices, dev_list) {
1550 			if (tmp->in_fs_metadata &&
1551 			    !tmp->is_tgtdev_for_dev_replace &&
1552 			    !tmp->bdev) {
1553 				device = tmp;
1554 				break;
1555 			}
1556 		}
1557 		bdev = NULL;
1558 		bh = NULL;
1559 		disk_super = NULL;
1560 		if (!device) {
1561 			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
1562 			goto out;
1563 		}
1564 	} else {
1565 		ret = btrfs_get_bdev_and_sb(device_path,
1566 					    FMODE_WRITE | FMODE_EXCL,
1567 					    root->fs_info->bdev_holder, 0,
1568 					    &bdev, &bh);
1569 		if (ret)
1570 			goto out;
1571 		disk_super = (struct btrfs_super_block *)bh->b_data;
1572 		devid = btrfs_stack_device_id(&disk_super->dev_item);
1573 		dev_uuid = disk_super->dev_item.uuid;
1574 		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1575 					   disk_super->fsid);
1576 		if (!device) {
1577 			ret = -ENOENT;
1578 			goto error_brelse;
1579 		}
1580 	}
1581 
1582 	if (device->is_tgtdev_for_dev_replace) {
1583 		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1584 		goto error_brelse;
1585 	}
1586 
1587 	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1588 		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1589 		goto error_brelse;
1590 	}
1591 
1592 	if (device->writeable) {
1593 		lock_chunks(root);
1594 		list_del_init(&device->dev_alloc_list);
1595 		unlock_chunks(root);
1596 		root->fs_info->fs_devices->rw_devices--;
1597 		clear_super = true;
1598 	}
1599 
1600 	mutex_unlock(&uuid_mutex);
1601 	ret = btrfs_shrink_device(device, 0);
1602 	mutex_lock(&uuid_mutex);
1603 	if (ret)
1604 		goto error_undo;
1605 
1606 	/*
1607 	 * TODO: the superblock still includes this device in its num_devices
1608 	 * counter although write_all_supers() is not locked out. This
1609 	 * could give a filesystem state which requires a degraded mount.
1610 	 */
1611 	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1612 	if (ret)
1613 		goto error_undo;
1614 
1615 	spin_lock(&root->fs_info->free_chunk_lock);
1616 	root->fs_info->free_chunk_space = device->total_bytes -
1617 		device->bytes_used;
1618 	spin_unlock(&root->fs_info->free_chunk_lock);
1619 
1620 	device->in_fs_metadata = 0;
1621 	btrfs_scrub_cancel_dev(root->fs_info, device);
1622 
1623 	/*
1624 	 * the device list mutex makes sure that we don't change
1625 	 * the device list while someone else is writing out all
1626 	 * the device supers. Whoever is writing all supers, should
1627 	 * lock the device list mutex before getting the number of
1628 	 * devices in the super block (super_copy). Conversely,
1629 	 * whoever updates the number of devices in the super block
1630 	 * (super_copy) should hold the device list mutex.
1631 	 */
1632 
1633 	cur_devices = device->fs_devices;
1634 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1635 	list_del_rcu(&device->dev_list);
1636 
1637 	device->fs_devices->num_devices--;
1638 	device->fs_devices->total_devices--;
1639 
1640 	if (device->missing)
1641 		root->fs_info->fs_devices->missing_devices--;
1642 
1643 	next_device = list_entry(root->fs_info->fs_devices->devices.next,
1644 				 struct btrfs_device, dev_list);
1645 	if (device->bdev == root->fs_info->sb->s_bdev)
1646 		root->fs_info->sb->s_bdev = next_device->bdev;
1647 	if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1648 		root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1649 
1650 	if (device->bdev)
1651 		device->fs_devices->open_devices--;
1652 
1653 	call_rcu(&device->rcu, free_device);
1654 
1655 	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1656 	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1657 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1658 
1659 	if (cur_devices->open_devices == 0) {
1660 		struct btrfs_fs_devices *fs_devices;
1661 		fs_devices = root->fs_info->fs_devices;
1662 		while (fs_devices) {
1663 			if (fs_devices->seed == cur_devices)
1664 				break;
1665 			fs_devices = fs_devices->seed;
1666 		}
1667 		fs_devices->seed = cur_devices->seed;
1668 		cur_devices->seed = NULL;
1669 		lock_chunks(root);
1670 		__btrfs_close_devices(cur_devices);
1671 		unlock_chunks(root);
1672 		free_fs_devices(cur_devices);
1673 	}
1674 
1675 	root->fs_info->num_tolerated_disk_barrier_failures =
1676 		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1677 
1678 	/*
1679 	 * at this point, the device is zero sized.  We want to
1680 	 * remove it from the devices list and zero out the old super
1681 	 */
1682 	if (clear_super && disk_super) {
1683 		/* make sure this device isn't detected as part of
1684 		 * the FS anymore
1685 		 */
1686 		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1687 		set_buffer_dirty(bh);
1688 		sync_dirty_buffer(bh);
1689 	}
1690 
1691 	ret = 0;
1692 
1693 	/* Notify udev that device has changed */
1694 	if (bdev)
1695 		btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1696 
1697 error_brelse:
1698 	brelse(bh);
1699 	if (bdev)
1700 		blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1701 out:
1702 	mutex_unlock(&uuid_mutex);
1703 	return ret;
1704 error_undo:
1705 	if (device->writeable) {
1706 		lock_chunks(root);
1707 		list_add(&device->dev_alloc_list,
1708 			 &root->fs_info->fs_devices->alloc_list);
1709 		unlock_chunks(root);
1710 		root->fs_info->fs_devices->rw_devices++;
1711 	}
1712 	goto error_brelse;
1713 }
1714 
1715 void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1716 				 struct btrfs_device *srcdev)
1717 {
1718 	WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1719 
1720 	list_del_rcu(&srcdev->dev_list);
1721 	list_del_rcu(&srcdev->dev_alloc_list);
1722 	fs_info->fs_devices->num_devices--;
1723 	if (srcdev->missing) {
1724 		fs_info->fs_devices->missing_devices--;
1725 		fs_info->fs_devices->rw_devices++;
1726 	}
1727 	if (srcdev->can_discard)
1728 		fs_info->fs_devices->num_can_discard--;
1729 	if (srcdev->bdev) {
1730 		fs_info->fs_devices->open_devices--;
1731 
1732 		/* zero out the old super */
1733 		btrfs_scratch_superblock(srcdev);
1734 	}
1735 
1736 	call_rcu(&srcdev->rcu, free_device);
1737 }
1738 
1739 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
1740 				      struct btrfs_device *tgtdev)
1741 {
1742 	struct btrfs_device *next_device;
1743 
1744 	WARN_ON(!tgtdev);
1745 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
1746 	if (tgtdev->bdev) {
1747 		btrfs_scratch_superblock(tgtdev);
1748 		fs_info->fs_devices->open_devices--;
1749 	}
1750 	fs_info->fs_devices->num_devices--;
1751 	if (tgtdev->can_discard)
1752 		fs_info->fs_devices->num_can_discard++;
1753 
1754 	next_device = list_entry(fs_info->fs_devices->devices.next,
1755 				 struct btrfs_device, dev_list);
1756 	if (tgtdev->bdev == fs_info->sb->s_bdev)
1757 		fs_info->sb->s_bdev = next_device->bdev;
1758 	if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
1759 		fs_info->fs_devices->latest_bdev = next_device->bdev;
1760 	list_del_rcu(&tgtdev->dev_list);
1761 
1762 	call_rcu(&tgtdev->rcu, free_device);
1763 
1764 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1765 }
1766 
1767 static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
1768 				     struct btrfs_device **device)
1769 {
1770 	int ret = 0;
1771 	struct btrfs_super_block *disk_super;
1772 	u64 devid;
1773 	u8 *dev_uuid;
1774 	struct block_device *bdev;
1775 	struct buffer_head *bh;
1776 
1777 	*device = NULL;
1778 	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
1779 				    root->fs_info->bdev_holder, 0, &bdev, &bh);
1780 	if (ret)
1781 		return ret;
1782 	disk_super = (struct btrfs_super_block *)bh->b_data;
1783 	devid = btrfs_stack_device_id(&disk_super->dev_item);
1784 	dev_uuid = disk_super->dev_item.uuid;
1785 	*device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1786 				    disk_super->fsid);
1787 	brelse(bh);
1788 	if (!*device)
1789 		ret = -ENOENT;
1790 	blkdev_put(bdev, FMODE_READ);
1791 	return ret;
1792 }
1793 
1794 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
1795 					 char *device_path,
1796 					 struct btrfs_device **device)
1797 {
1798 	*device = NULL;
1799 	if (strcmp(device_path, "missing") == 0) {
1800 		struct list_head *devices;
1801 		struct btrfs_device *tmp;
1802 
1803 		devices = &root->fs_info->fs_devices->devices;
1804 		/*
1805 		 * It is safe to read the devices since the volume_mutex
1806 		 * is held by the caller.
1807 		 */
1808 		list_for_each_entry(tmp, devices, dev_list) {
1809 			if (tmp->in_fs_metadata && !tmp->bdev) {
1810 				*device = tmp;
1811 				break;
1812 			}
1813 		}
1814 
1815 		if (!*device) {
1816 			pr_err("btrfs: no missing device found\n");
1817 			return -ENOENT;
1818 		}
1819 
1820 		return 0;
1821 	} else {
1822 		return btrfs_find_device_by_path(root, device_path, device);
1823 	}
1824 }
1825 
1826 /*
1827  * does all the dirty work required for changing file system's UUID.
1828  */
1829 static int btrfs_prepare_sprout(struct btrfs_root *root)
1830 {
1831 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1832 	struct btrfs_fs_devices *old_devices;
1833 	struct btrfs_fs_devices *seed_devices;
1834 	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1835 	struct btrfs_device *device;
1836 	u64 super_flags;
1837 
1838 	BUG_ON(!mutex_is_locked(&uuid_mutex));
1839 	if (!fs_devices->seeding)
1840 		return -EINVAL;
1841 
1842 	seed_devices = __alloc_fs_devices();
1843 	if (IS_ERR(seed_devices))
1844 		return PTR_ERR(seed_devices);
1845 
1846 	old_devices = clone_fs_devices(fs_devices);
1847 	if (IS_ERR(old_devices)) {
1848 		kfree(seed_devices);
1849 		return PTR_ERR(old_devices);
1850 	}
1851 
1852 	list_add(&old_devices->list, &fs_uuids);
1853 
1854 	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1855 	seed_devices->opened = 1;
1856 	INIT_LIST_HEAD(&seed_devices->devices);
1857 	INIT_LIST_HEAD(&seed_devices->alloc_list);
1858 	mutex_init(&seed_devices->device_list_mutex);
1859 
1860 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1861 	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1862 			      synchronize_rcu);
1863 
1864 	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1865 	list_for_each_entry(device, &seed_devices->devices, dev_list) {
1866 		device->fs_devices = seed_devices;
1867 	}
1868 
1869 	fs_devices->seeding = 0;
1870 	fs_devices->num_devices = 0;
1871 	fs_devices->open_devices = 0;
1872 	fs_devices->total_devices = 0;
1873 	fs_devices->seed = seed_devices;
1874 
1875 	generate_random_uuid(fs_devices->fsid);
1876 	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1877 	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1878 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1879 
1880 	super_flags = btrfs_super_flags(disk_super) &
1881 		      ~BTRFS_SUPER_FLAG_SEEDING;
1882 	btrfs_set_super_flags(disk_super, super_flags);
1883 
1884 	return 0;
1885 }
1886 
1887 /*
1888  * strore the expected generation for seed devices in device items.
1889  */
1890 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1891 			       struct btrfs_root *root)
1892 {
1893 	struct btrfs_path *path;
1894 	struct extent_buffer *leaf;
1895 	struct btrfs_dev_item *dev_item;
1896 	struct btrfs_device *device;
1897 	struct btrfs_key key;
1898 	u8 fs_uuid[BTRFS_UUID_SIZE];
1899 	u8 dev_uuid[BTRFS_UUID_SIZE];
1900 	u64 devid;
1901 	int ret;
1902 
1903 	path = btrfs_alloc_path();
1904 	if (!path)
1905 		return -ENOMEM;
1906 
1907 	root = root->fs_info->chunk_root;
1908 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1909 	key.offset = 0;
1910 	key.type = BTRFS_DEV_ITEM_KEY;
1911 
1912 	while (1) {
1913 		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1914 		if (ret < 0)
1915 			goto error;
1916 
1917 		leaf = path->nodes[0];
1918 next_slot:
1919 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1920 			ret = btrfs_next_leaf(root, path);
1921 			if (ret > 0)
1922 				break;
1923 			if (ret < 0)
1924 				goto error;
1925 			leaf = path->nodes[0];
1926 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1927 			btrfs_release_path(path);
1928 			continue;
1929 		}
1930 
1931 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1932 		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1933 		    key.type != BTRFS_DEV_ITEM_KEY)
1934 			break;
1935 
1936 		dev_item = btrfs_item_ptr(leaf, path->slots[0],
1937 					  struct btrfs_dev_item);
1938 		devid = btrfs_device_id(leaf, dev_item);
1939 		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
1940 				   BTRFS_UUID_SIZE);
1941 		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
1942 				   BTRFS_UUID_SIZE);
1943 		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1944 					   fs_uuid);
1945 		BUG_ON(!device); /* Logic error */
1946 
1947 		if (device->fs_devices->seeding) {
1948 			btrfs_set_device_generation(leaf, dev_item,
1949 						    device->generation);
1950 			btrfs_mark_buffer_dirty(leaf);
1951 		}
1952 
1953 		path->slots[0]++;
1954 		goto next_slot;
1955 	}
1956 	ret = 0;
1957 error:
1958 	btrfs_free_path(path);
1959 	return ret;
1960 }
1961 
1962 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1963 {
1964 	struct request_queue *q;
1965 	struct btrfs_trans_handle *trans;
1966 	struct btrfs_device *device;
1967 	struct block_device *bdev;
1968 	struct list_head *devices;
1969 	struct super_block *sb = root->fs_info->sb;
1970 	struct rcu_string *name;
1971 	u64 total_bytes;
1972 	int seeding_dev = 0;
1973 	int ret = 0;
1974 
1975 	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1976 		return -EROFS;
1977 
1978 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1979 				  root->fs_info->bdev_holder);
1980 	if (IS_ERR(bdev))
1981 		return PTR_ERR(bdev);
1982 
1983 	if (root->fs_info->fs_devices->seeding) {
1984 		seeding_dev = 1;
1985 		down_write(&sb->s_umount);
1986 		mutex_lock(&uuid_mutex);
1987 	}
1988 
1989 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
1990 
1991 	devices = &root->fs_info->fs_devices->devices;
1992 
1993 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1994 	list_for_each_entry(device, devices, dev_list) {
1995 		if (device->bdev == bdev) {
1996 			ret = -EEXIST;
1997 			mutex_unlock(
1998 				&root->fs_info->fs_devices->device_list_mutex);
1999 			goto error;
2000 		}
2001 	}
2002 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2003 
2004 	device = btrfs_alloc_device(root->fs_info, NULL, NULL);
2005 	if (IS_ERR(device)) {
2006 		/* we can safely leave the fs_devices entry around */
2007 		ret = PTR_ERR(device);
2008 		goto error;
2009 	}
2010 
2011 	name = rcu_string_strdup(device_path, GFP_NOFS);
2012 	if (!name) {
2013 		kfree(device);
2014 		ret = -ENOMEM;
2015 		goto error;
2016 	}
2017 	rcu_assign_pointer(device->name, name);
2018 
2019 	trans = btrfs_start_transaction(root, 0);
2020 	if (IS_ERR(trans)) {
2021 		rcu_string_free(device->name);
2022 		kfree(device);
2023 		ret = PTR_ERR(trans);
2024 		goto error;
2025 	}
2026 
2027 	lock_chunks(root);
2028 
2029 	q = bdev_get_queue(bdev);
2030 	if (blk_queue_discard(q))
2031 		device->can_discard = 1;
2032 	device->writeable = 1;
2033 	device->generation = trans->transid;
2034 	device->io_width = root->sectorsize;
2035 	device->io_align = root->sectorsize;
2036 	device->sector_size = root->sectorsize;
2037 	device->total_bytes = i_size_read(bdev->bd_inode);
2038 	device->disk_total_bytes = device->total_bytes;
2039 	device->dev_root = root->fs_info->dev_root;
2040 	device->bdev = bdev;
2041 	device->in_fs_metadata = 1;
2042 	device->is_tgtdev_for_dev_replace = 0;
2043 	device->mode = FMODE_EXCL;
2044 	set_blocksize(device->bdev, 4096);
2045 
2046 	if (seeding_dev) {
2047 		sb->s_flags &= ~MS_RDONLY;
2048 		ret = btrfs_prepare_sprout(root);
2049 		BUG_ON(ret); /* -ENOMEM */
2050 	}
2051 
2052 	device->fs_devices = root->fs_info->fs_devices;
2053 
2054 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2055 	list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2056 	list_add(&device->dev_alloc_list,
2057 		 &root->fs_info->fs_devices->alloc_list);
2058 	root->fs_info->fs_devices->num_devices++;
2059 	root->fs_info->fs_devices->open_devices++;
2060 	root->fs_info->fs_devices->rw_devices++;
2061 	root->fs_info->fs_devices->total_devices++;
2062 	if (device->can_discard)
2063 		root->fs_info->fs_devices->num_can_discard++;
2064 	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2065 
2066 	spin_lock(&root->fs_info->free_chunk_lock);
2067 	root->fs_info->free_chunk_space += device->total_bytes;
2068 	spin_unlock(&root->fs_info->free_chunk_lock);
2069 
2070 	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2071 		root->fs_info->fs_devices->rotating = 1;
2072 
2073 	total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
2074 	btrfs_set_super_total_bytes(root->fs_info->super_copy,
2075 				    total_bytes + device->total_bytes);
2076 
2077 	total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
2078 	btrfs_set_super_num_devices(root->fs_info->super_copy,
2079 				    total_bytes + 1);
2080 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2081 
2082 	if (seeding_dev) {
2083 		ret = init_first_rw_device(trans, root, device);
2084 		if (ret) {
2085 			btrfs_abort_transaction(trans, root, ret);
2086 			goto error_trans;
2087 		}
2088 		ret = btrfs_finish_sprout(trans, root);
2089 		if (ret) {
2090 			btrfs_abort_transaction(trans, root, ret);
2091 			goto error_trans;
2092 		}
2093 	} else {
2094 		ret = btrfs_add_device(trans, root, device);
2095 		if (ret) {
2096 			btrfs_abort_transaction(trans, root, ret);
2097 			goto error_trans;
2098 		}
2099 	}
2100 
2101 	/*
2102 	 * we've got more storage, clear any full flags on the space
2103 	 * infos
2104 	 */
2105 	btrfs_clear_space_info_full(root->fs_info);
2106 
2107 	unlock_chunks(root);
2108 	root->fs_info->num_tolerated_disk_barrier_failures =
2109 		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2110 	ret = btrfs_commit_transaction(trans, root);
2111 
2112 	if (seeding_dev) {
2113 		mutex_unlock(&uuid_mutex);
2114 		up_write(&sb->s_umount);
2115 
2116 		if (ret) /* transaction commit */
2117 			return ret;
2118 
2119 		ret = btrfs_relocate_sys_chunks(root);
2120 		if (ret < 0)
2121 			btrfs_error(root->fs_info, ret,
2122 				    "Failed to relocate sys chunks after "
2123 				    "device initialization. This can be fixed "
2124 				    "using the \"btrfs balance\" command.");
2125 		trans = btrfs_attach_transaction(root);
2126 		if (IS_ERR(trans)) {
2127 			if (PTR_ERR(trans) == -ENOENT)
2128 				return 0;
2129 			return PTR_ERR(trans);
2130 		}
2131 		ret = btrfs_commit_transaction(trans, root);
2132 	}
2133 
2134 	return ret;
2135 
2136 error_trans:
2137 	unlock_chunks(root);
2138 	btrfs_end_transaction(trans, root);
2139 	rcu_string_free(device->name);
2140 	kfree(device);
2141 error:
2142 	blkdev_put(bdev, FMODE_EXCL);
2143 	if (seeding_dev) {
2144 		mutex_unlock(&uuid_mutex);
2145 		up_write(&sb->s_umount);
2146 	}
2147 	return ret;
2148 }
2149 
2150 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2151 				  struct btrfs_device **device_out)
2152 {
2153 	struct request_queue *q;
2154 	struct btrfs_device *device;
2155 	struct block_device *bdev;
2156 	struct btrfs_fs_info *fs_info = root->fs_info;
2157 	struct list_head *devices;
2158 	struct rcu_string *name;
2159 	u64 devid = BTRFS_DEV_REPLACE_DEVID;
2160 	int ret = 0;
2161 
2162 	*device_out = NULL;
2163 	if (fs_info->fs_devices->seeding)
2164 		return -EINVAL;
2165 
2166 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2167 				  fs_info->bdev_holder);
2168 	if (IS_ERR(bdev))
2169 		return PTR_ERR(bdev);
2170 
2171 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2172 
2173 	devices = &fs_info->fs_devices->devices;
2174 	list_for_each_entry(device, devices, dev_list) {
2175 		if (device->bdev == bdev) {
2176 			ret = -EEXIST;
2177 			goto error;
2178 		}
2179 	}
2180 
2181 	device = btrfs_alloc_device(NULL, &devid, NULL);
2182 	if (IS_ERR(device)) {
2183 		ret = PTR_ERR(device);
2184 		goto error;
2185 	}
2186 
2187 	name = rcu_string_strdup(device_path, GFP_NOFS);
2188 	if (!name) {
2189 		kfree(device);
2190 		ret = -ENOMEM;
2191 		goto error;
2192 	}
2193 	rcu_assign_pointer(device->name, name);
2194 
2195 	q = bdev_get_queue(bdev);
2196 	if (blk_queue_discard(q))
2197 		device->can_discard = 1;
2198 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2199 	device->writeable = 1;
2200 	device->generation = 0;
2201 	device->io_width = root->sectorsize;
2202 	device->io_align = root->sectorsize;
2203 	device->sector_size = root->sectorsize;
2204 	device->total_bytes = i_size_read(bdev->bd_inode);
2205 	device->disk_total_bytes = device->total_bytes;
2206 	device->dev_root = fs_info->dev_root;
2207 	device->bdev = bdev;
2208 	device->in_fs_metadata = 1;
2209 	device->is_tgtdev_for_dev_replace = 1;
2210 	device->mode = FMODE_EXCL;
2211 	set_blocksize(device->bdev, 4096);
2212 	device->fs_devices = fs_info->fs_devices;
2213 	list_add(&device->dev_list, &fs_info->fs_devices->devices);
2214 	fs_info->fs_devices->num_devices++;
2215 	fs_info->fs_devices->open_devices++;
2216 	if (device->can_discard)
2217 		fs_info->fs_devices->num_can_discard++;
2218 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2219 
2220 	*device_out = device;
2221 	return ret;
2222 
2223 error:
2224 	blkdev_put(bdev, FMODE_EXCL);
2225 	return ret;
2226 }
2227 
2228 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2229 					      struct btrfs_device *tgtdev)
2230 {
2231 	WARN_ON(fs_info->fs_devices->rw_devices == 0);
2232 	tgtdev->io_width = fs_info->dev_root->sectorsize;
2233 	tgtdev->io_align = fs_info->dev_root->sectorsize;
2234 	tgtdev->sector_size = fs_info->dev_root->sectorsize;
2235 	tgtdev->dev_root = fs_info->dev_root;
2236 	tgtdev->in_fs_metadata = 1;
2237 }
2238 
2239 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2240 					struct btrfs_device *device)
2241 {
2242 	int ret;
2243 	struct btrfs_path *path;
2244 	struct btrfs_root *root;
2245 	struct btrfs_dev_item *dev_item;
2246 	struct extent_buffer *leaf;
2247 	struct btrfs_key key;
2248 
2249 	root = device->dev_root->fs_info->chunk_root;
2250 
2251 	path = btrfs_alloc_path();
2252 	if (!path)
2253 		return -ENOMEM;
2254 
2255 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2256 	key.type = BTRFS_DEV_ITEM_KEY;
2257 	key.offset = device->devid;
2258 
2259 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2260 	if (ret < 0)
2261 		goto out;
2262 
2263 	if (ret > 0) {
2264 		ret = -ENOENT;
2265 		goto out;
2266 	}
2267 
2268 	leaf = path->nodes[0];
2269 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2270 
2271 	btrfs_set_device_id(leaf, dev_item, device->devid);
2272 	btrfs_set_device_type(leaf, dev_item, device->type);
2273 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2274 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2275 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2276 	btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
2277 	btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
2278 	btrfs_mark_buffer_dirty(leaf);
2279 
2280 out:
2281 	btrfs_free_path(path);
2282 	return ret;
2283 }
2284 
2285 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
2286 		      struct btrfs_device *device, u64 new_size)
2287 {
2288 	struct btrfs_super_block *super_copy =
2289 		device->dev_root->fs_info->super_copy;
2290 	u64 old_total = btrfs_super_total_bytes(super_copy);
2291 	u64 diff = new_size - device->total_bytes;
2292 
2293 	if (!device->writeable)
2294 		return -EACCES;
2295 	if (new_size <= device->total_bytes ||
2296 	    device->is_tgtdev_for_dev_replace)
2297 		return -EINVAL;
2298 
2299 	btrfs_set_super_total_bytes(super_copy, old_total + diff);
2300 	device->fs_devices->total_rw_bytes += diff;
2301 
2302 	device->total_bytes = new_size;
2303 	device->disk_total_bytes = new_size;
2304 	btrfs_clear_space_info_full(device->dev_root->fs_info);
2305 
2306 	return btrfs_update_device(trans, device);
2307 }
2308 
2309 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2310 		      struct btrfs_device *device, u64 new_size)
2311 {
2312 	int ret;
2313 	lock_chunks(device->dev_root);
2314 	ret = __btrfs_grow_device(trans, device, new_size);
2315 	unlock_chunks(device->dev_root);
2316 	return ret;
2317 }
2318 
2319 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2320 			    struct btrfs_root *root,
2321 			    u64 chunk_tree, u64 chunk_objectid,
2322 			    u64 chunk_offset)
2323 {
2324 	int ret;
2325 	struct btrfs_path *path;
2326 	struct btrfs_key key;
2327 
2328 	root = root->fs_info->chunk_root;
2329 	path = btrfs_alloc_path();
2330 	if (!path)
2331 		return -ENOMEM;
2332 
2333 	key.objectid = chunk_objectid;
2334 	key.offset = chunk_offset;
2335 	key.type = BTRFS_CHUNK_ITEM_KEY;
2336 
2337 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2338 	if (ret < 0)
2339 		goto out;
2340 	else if (ret > 0) { /* Logic error or corruption */
2341 		btrfs_error(root->fs_info, -ENOENT,
2342 			    "Failed lookup while freeing chunk.");
2343 		ret = -ENOENT;
2344 		goto out;
2345 	}
2346 
2347 	ret = btrfs_del_item(trans, root, path);
2348 	if (ret < 0)
2349 		btrfs_error(root->fs_info, ret,
2350 			    "Failed to delete chunk item.");
2351 out:
2352 	btrfs_free_path(path);
2353 	return ret;
2354 }
2355 
2356 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2357 			chunk_offset)
2358 {
2359 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2360 	struct btrfs_disk_key *disk_key;
2361 	struct btrfs_chunk *chunk;
2362 	u8 *ptr;
2363 	int ret = 0;
2364 	u32 num_stripes;
2365 	u32 array_size;
2366 	u32 len = 0;
2367 	u32 cur;
2368 	struct btrfs_key key;
2369 
2370 	array_size = btrfs_super_sys_array_size(super_copy);
2371 
2372 	ptr = super_copy->sys_chunk_array;
2373 	cur = 0;
2374 
2375 	while (cur < array_size) {
2376 		disk_key = (struct btrfs_disk_key *)ptr;
2377 		btrfs_disk_key_to_cpu(&key, disk_key);
2378 
2379 		len = sizeof(*disk_key);
2380 
2381 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2382 			chunk = (struct btrfs_chunk *)(ptr + len);
2383 			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2384 			len += btrfs_chunk_item_size(num_stripes);
2385 		} else {
2386 			ret = -EIO;
2387 			break;
2388 		}
2389 		if (key.objectid == chunk_objectid &&
2390 		    key.offset == chunk_offset) {
2391 			memmove(ptr, ptr + len, array_size - (cur + len));
2392 			array_size -= len;
2393 			btrfs_set_super_sys_array_size(super_copy, array_size);
2394 		} else {
2395 			ptr += len;
2396 			cur += len;
2397 		}
2398 	}
2399 	return ret;
2400 }
2401 
2402 static int btrfs_relocate_chunk(struct btrfs_root *root,
2403 			 u64 chunk_tree, u64 chunk_objectid,
2404 			 u64 chunk_offset)
2405 {
2406 	struct extent_map_tree *em_tree;
2407 	struct btrfs_root *extent_root;
2408 	struct btrfs_trans_handle *trans;
2409 	struct extent_map *em;
2410 	struct map_lookup *map;
2411 	int ret;
2412 	int i;
2413 
2414 	root = root->fs_info->chunk_root;
2415 	extent_root = root->fs_info->extent_root;
2416 	em_tree = &root->fs_info->mapping_tree.map_tree;
2417 
2418 	ret = btrfs_can_relocate(extent_root, chunk_offset);
2419 	if (ret)
2420 		return -ENOSPC;
2421 
2422 	/* step one, relocate all the extents inside this chunk */
2423 	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2424 	if (ret)
2425 		return ret;
2426 
2427 	trans = btrfs_start_transaction(root, 0);
2428 	if (IS_ERR(trans)) {
2429 		ret = PTR_ERR(trans);
2430 		btrfs_std_error(root->fs_info, ret);
2431 		return ret;
2432 	}
2433 
2434 	lock_chunks(root);
2435 
2436 	/*
2437 	 * step two, delete the device extents and the
2438 	 * chunk tree entries
2439 	 */
2440 	read_lock(&em_tree->lock);
2441 	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2442 	read_unlock(&em_tree->lock);
2443 
2444 	BUG_ON(!em || em->start > chunk_offset ||
2445 	       em->start + em->len < chunk_offset);
2446 	map = (struct map_lookup *)em->bdev;
2447 
2448 	for (i = 0; i < map->num_stripes; i++) {
2449 		ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2450 					    map->stripes[i].physical);
2451 		BUG_ON(ret);
2452 
2453 		if (map->stripes[i].dev) {
2454 			ret = btrfs_update_device(trans, map->stripes[i].dev);
2455 			BUG_ON(ret);
2456 		}
2457 	}
2458 	ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2459 			       chunk_offset);
2460 
2461 	BUG_ON(ret);
2462 
2463 	trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2464 
2465 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2466 		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2467 		BUG_ON(ret);
2468 	}
2469 
2470 	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2471 	BUG_ON(ret);
2472 
2473 	write_lock(&em_tree->lock);
2474 	remove_extent_mapping(em_tree, em);
2475 	write_unlock(&em_tree->lock);
2476 
2477 	kfree(map);
2478 	em->bdev = NULL;
2479 
2480 	/* once for the tree */
2481 	free_extent_map(em);
2482 	/* once for us */
2483 	free_extent_map(em);
2484 
2485 	unlock_chunks(root);
2486 	btrfs_end_transaction(trans, root);
2487 	return 0;
2488 }
2489 
2490 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2491 {
2492 	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2493 	struct btrfs_path *path;
2494 	struct extent_buffer *leaf;
2495 	struct btrfs_chunk *chunk;
2496 	struct btrfs_key key;
2497 	struct btrfs_key found_key;
2498 	u64 chunk_tree = chunk_root->root_key.objectid;
2499 	u64 chunk_type;
2500 	bool retried = false;
2501 	int failed = 0;
2502 	int ret;
2503 
2504 	path = btrfs_alloc_path();
2505 	if (!path)
2506 		return -ENOMEM;
2507 
2508 again:
2509 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2510 	key.offset = (u64)-1;
2511 	key.type = BTRFS_CHUNK_ITEM_KEY;
2512 
2513 	while (1) {
2514 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2515 		if (ret < 0)
2516 			goto error;
2517 		BUG_ON(ret == 0); /* Corruption */
2518 
2519 		ret = btrfs_previous_item(chunk_root, path, key.objectid,
2520 					  key.type);
2521 		if (ret < 0)
2522 			goto error;
2523 		if (ret > 0)
2524 			break;
2525 
2526 		leaf = path->nodes[0];
2527 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2528 
2529 		chunk = btrfs_item_ptr(leaf, path->slots[0],
2530 				       struct btrfs_chunk);
2531 		chunk_type = btrfs_chunk_type(leaf, chunk);
2532 		btrfs_release_path(path);
2533 
2534 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2535 			ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2536 						   found_key.objectid,
2537 						   found_key.offset);
2538 			if (ret == -ENOSPC)
2539 				failed++;
2540 			else if (ret)
2541 				BUG();
2542 		}
2543 
2544 		if (found_key.offset == 0)
2545 			break;
2546 		key.offset = found_key.offset - 1;
2547 	}
2548 	ret = 0;
2549 	if (failed && !retried) {
2550 		failed = 0;
2551 		retried = true;
2552 		goto again;
2553 	} else if (failed && retried) {
2554 		WARN_ON(1);
2555 		ret = -ENOSPC;
2556 	}
2557 error:
2558 	btrfs_free_path(path);
2559 	return ret;
2560 }
2561 
2562 static int insert_balance_item(struct btrfs_root *root,
2563 			       struct btrfs_balance_control *bctl)
2564 {
2565 	struct btrfs_trans_handle *trans;
2566 	struct btrfs_balance_item *item;
2567 	struct btrfs_disk_balance_args disk_bargs;
2568 	struct btrfs_path *path;
2569 	struct extent_buffer *leaf;
2570 	struct btrfs_key key;
2571 	int ret, err;
2572 
2573 	path = btrfs_alloc_path();
2574 	if (!path)
2575 		return -ENOMEM;
2576 
2577 	trans = btrfs_start_transaction(root, 0);
2578 	if (IS_ERR(trans)) {
2579 		btrfs_free_path(path);
2580 		return PTR_ERR(trans);
2581 	}
2582 
2583 	key.objectid = BTRFS_BALANCE_OBJECTID;
2584 	key.type = BTRFS_BALANCE_ITEM_KEY;
2585 	key.offset = 0;
2586 
2587 	ret = btrfs_insert_empty_item(trans, root, path, &key,
2588 				      sizeof(*item));
2589 	if (ret)
2590 		goto out;
2591 
2592 	leaf = path->nodes[0];
2593 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2594 
2595 	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2596 
2597 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2598 	btrfs_set_balance_data(leaf, item, &disk_bargs);
2599 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2600 	btrfs_set_balance_meta(leaf, item, &disk_bargs);
2601 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2602 	btrfs_set_balance_sys(leaf, item, &disk_bargs);
2603 
2604 	btrfs_set_balance_flags(leaf, item, bctl->flags);
2605 
2606 	btrfs_mark_buffer_dirty(leaf);
2607 out:
2608 	btrfs_free_path(path);
2609 	err = btrfs_commit_transaction(trans, root);
2610 	if (err && !ret)
2611 		ret = err;
2612 	return ret;
2613 }
2614 
2615 static int del_balance_item(struct btrfs_root *root)
2616 {
2617 	struct btrfs_trans_handle *trans;
2618 	struct btrfs_path *path;
2619 	struct btrfs_key key;
2620 	int ret, err;
2621 
2622 	path = btrfs_alloc_path();
2623 	if (!path)
2624 		return -ENOMEM;
2625 
2626 	trans = btrfs_start_transaction(root, 0);
2627 	if (IS_ERR(trans)) {
2628 		btrfs_free_path(path);
2629 		return PTR_ERR(trans);
2630 	}
2631 
2632 	key.objectid = BTRFS_BALANCE_OBJECTID;
2633 	key.type = BTRFS_BALANCE_ITEM_KEY;
2634 	key.offset = 0;
2635 
2636 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2637 	if (ret < 0)
2638 		goto out;
2639 	if (ret > 0) {
2640 		ret = -ENOENT;
2641 		goto out;
2642 	}
2643 
2644 	ret = btrfs_del_item(trans, root, path);
2645 out:
2646 	btrfs_free_path(path);
2647 	err = btrfs_commit_transaction(trans, root);
2648 	if (err && !ret)
2649 		ret = err;
2650 	return ret;
2651 }
2652 
2653 /*
2654  * This is a heuristic used to reduce the number of chunks balanced on
2655  * resume after balance was interrupted.
2656  */
2657 static void update_balance_args(struct btrfs_balance_control *bctl)
2658 {
2659 	/*
2660 	 * Turn on soft mode for chunk types that were being converted.
2661 	 */
2662 	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2663 		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2664 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2665 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2666 	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2667 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2668 
2669 	/*
2670 	 * Turn on usage filter if is not already used.  The idea is
2671 	 * that chunks that we have already balanced should be
2672 	 * reasonably full.  Don't do it for chunks that are being
2673 	 * converted - that will keep us from relocating unconverted
2674 	 * (albeit full) chunks.
2675 	 */
2676 	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2677 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2678 		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2679 		bctl->data.usage = 90;
2680 	}
2681 	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2682 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2683 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2684 		bctl->sys.usage = 90;
2685 	}
2686 	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2687 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2688 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2689 		bctl->meta.usage = 90;
2690 	}
2691 }
2692 
2693 /*
2694  * Should be called with both balance and volume mutexes held to
2695  * serialize other volume operations (add_dev/rm_dev/resize) with
2696  * restriper.  Same goes for unset_balance_control.
2697  */
2698 static void set_balance_control(struct btrfs_balance_control *bctl)
2699 {
2700 	struct btrfs_fs_info *fs_info = bctl->fs_info;
2701 
2702 	BUG_ON(fs_info->balance_ctl);
2703 
2704 	spin_lock(&fs_info->balance_lock);
2705 	fs_info->balance_ctl = bctl;
2706 	spin_unlock(&fs_info->balance_lock);
2707 }
2708 
2709 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2710 {
2711 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2712 
2713 	BUG_ON(!fs_info->balance_ctl);
2714 
2715 	spin_lock(&fs_info->balance_lock);
2716 	fs_info->balance_ctl = NULL;
2717 	spin_unlock(&fs_info->balance_lock);
2718 
2719 	kfree(bctl);
2720 }
2721 
2722 /*
2723  * Balance filters.  Return 1 if chunk should be filtered out
2724  * (should not be balanced).
2725  */
2726 static int chunk_profiles_filter(u64 chunk_type,
2727 				 struct btrfs_balance_args *bargs)
2728 {
2729 	chunk_type = chunk_to_extended(chunk_type) &
2730 				BTRFS_EXTENDED_PROFILE_MASK;
2731 
2732 	if (bargs->profiles & chunk_type)
2733 		return 0;
2734 
2735 	return 1;
2736 }
2737 
2738 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2739 			      struct btrfs_balance_args *bargs)
2740 {
2741 	struct btrfs_block_group_cache *cache;
2742 	u64 chunk_used, user_thresh;
2743 	int ret = 1;
2744 
2745 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2746 	chunk_used = btrfs_block_group_used(&cache->item);
2747 
2748 	if (bargs->usage == 0)
2749 		user_thresh = 1;
2750 	else if (bargs->usage > 100)
2751 		user_thresh = cache->key.offset;
2752 	else
2753 		user_thresh = div_factor_fine(cache->key.offset,
2754 					      bargs->usage);
2755 
2756 	if (chunk_used < user_thresh)
2757 		ret = 0;
2758 
2759 	btrfs_put_block_group(cache);
2760 	return ret;
2761 }
2762 
2763 static int chunk_devid_filter(struct extent_buffer *leaf,
2764 			      struct btrfs_chunk *chunk,
2765 			      struct btrfs_balance_args *bargs)
2766 {
2767 	struct btrfs_stripe *stripe;
2768 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2769 	int i;
2770 
2771 	for (i = 0; i < num_stripes; i++) {
2772 		stripe = btrfs_stripe_nr(chunk, i);
2773 		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2774 			return 0;
2775 	}
2776 
2777 	return 1;
2778 }
2779 
2780 /* [pstart, pend) */
2781 static int chunk_drange_filter(struct extent_buffer *leaf,
2782 			       struct btrfs_chunk *chunk,
2783 			       u64 chunk_offset,
2784 			       struct btrfs_balance_args *bargs)
2785 {
2786 	struct btrfs_stripe *stripe;
2787 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2788 	u64 stripe_offset;
2789 	u64 stripe_length;
2790 	int factor;
2791 	int i;
2792 
2793 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2794 		return 0;
2795 
2796 	if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2797 	     BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
2798 		factor = num_stripes / 2;
2799 	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
2800 		factor = num_stripes - 1;
2801 	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
2802 		factor = num_stripes - 2;
2803 	} else {
2804 		factor = num_stripes;
2805 	}
2806 
2807 	for (i = 0; i < num_stripes; i++) {
2808 		stripe = btrfs_stripe_nr(chunk, i);
2809 		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2810 			continue;
2811 
2812 		stripe_offset = btrfs_stripe_offset(leaf, stripe);
2813 		stripe_length = btrfs_chunk_length(leaf, chunk);
2814 		do_div(stripe_length, factor);
2815 
2816 		if (stripe_offset < bargs->pend &&
2817 		    stripe_offset + stripe_length > bargs->pstart)
2818 			return 0;
2819 	}
2820 
2821 	return 1;
2822 }
2823 
2824 /* [vstart, vend) */
2825 static int chunk_vrange_filter(struct extent_buffer *leaf,
2826 			       struct btrfs_chunk *chunk,
2827 			       u64 chunk_offset,
2828 			       struct btrfs_balance_args *bargs)
2829 {
2830 	if (chunk_offset < bargs->vend &&
2831 	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2832 		/* at least part of the chunk is inside this vrange */
2833 		return 0;
2834 
2835 	return 1;
2836 }
2837 
2838 static int chunk_soft_convert_filter(u64 chunk_type,
2839 				     struct btrfs_balance_args *bargs)
2840 {
2841 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2842 		return 0;
2843 
2844 	chunk_type = chunk_to_extended(chunk_type) &
2845 				BTRFS_EXTENDED_PROFILE_MASK;
2846 
2847 	if (bargs->target == chunk_type)
2848 		return 1;
2849 
2850 	return 0;
2851 }
2852 
2853 static int should_balance_chunk(struct btrfs_root *root,
2854 				struct extent_buffer *leaf,
2855 				struct btrfs_chunk *chunk, u64 chunk_offset)
2856 {
2857 	struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2858 	struct btrfs_balance_args *bargs = NULL;
2859 	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2860 
2861 	/* type filter */
2862 	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2863 	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2864 		return 0;
2865 	}
2866 
2867 	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2868 		bargs = &bctl->data;
2869 	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2870 		bargs = &bctl->sys;
2871 	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2872 		bargs = &bctl->meta;
2873 
2874 	/* profiles filter */
2875 	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2876 	    chunk_profiles_filter(chunk_type, bargs)) {
2877 		return 0;
2878 	}
2879 
2880 	/* usage filter */
2881 	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2882 	    chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2883 		return 0;
2884 	}
2885 
2886 	/* devid filter */
2887 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2888 	    chunk_devid_filter(leaf, chunk, bargs)) {
2889 		return 0;
2890 	}
2891 
2892 	/* drange filter, makes sense only with devid filter */
2893 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2894 	    chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2895 		return 0;
2896 	}
2897 
2898 	/* vrange filter */
2899 	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2900 	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2901 		return 0;
2902 	}
2903 
2904 	/* soft profile changing mode */
2905 	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2906 	    chunk_soft_convert_filter(chunk_type, bargs)) {
2907 		return 0;
2908 	}
2909 
2910 	return 1;
2911 }
2912 
2913 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2914 {
2915 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2916 	struct btrfs_root *chunk_root = fs_info->chunk_root;
2917 	struct btrfs_root *dev_root = fs_info->dev_root;
2918 	struct list_head *devices;
2919 	struct btrfs_device *device;
2920 	u64 old_size;
2921 	u64 size_to_free;
2922 	struct btrfs_chunk *chunk;
2923 	struct btrfs_path *path;
2924 	struct btrfs_key key;
2925 	struct btrfs_key found_key;
2926 	struct btrfs_trans_handle *trans;
2927 	struct extent_buffer *leaf;
2928 	int slot;
2929 	int ret;
2930 	int enospc_errors = 0;
2931 	bool counting = true;
2932 
2933 	/* step one make some room on all the devices */
2934 	devices = &fs_info->fs_devices->devices;
2935 	list_for_each_entry(device, devices, dev_list) {
2936 		old_size = device->total_bytes;
2937 		size_to_free = div_factor(old_size, 1);
2938 		size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2939 		if (!device->writeable ||
2940 		    device->total_bytes - device->bytes_used > size_to_free ||
2941 		    device->is_tgtdev_for_dev_replace)
2942 			continue;
2943 
2944 		ret = btrfs_shrink_device(device, old_size - size_to_free);
2945 		if (ret == -ENOSPC)
2946 			break;
2947 		BUG_ON(ret);
2948 
2949 		trans = btrfs_start_transaction(dev_root, 0);
2950 		BUG_ON(IS_ERR(trans));
2951 
2952 		ret = btrfs_grow_device(trans, device, old_size);
2953 		BUG_ON(ret);
2954 
2955 		btrfs_end_transaction(trans, dev_root);
2956 	}
2957 
2958 	/* step two, relocate all the chunks */
2959 	path = btrfs_alloc_path();
2960 	if (!path) {
2961 		ret = -ENOMEM;
2962 		goto error;
2963 	}
2964 
2965 	/* zero out stat counters */
2966 	spin_lock(&fs_info->balance_lock);
2967 	memset(&bctl->stat, 0, sizeof(bctl->stat));
2968 	spin_unlock(&fs_info->balance_lock);
2969 again:
2970 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2971 	key.offset = (u64)-1;
2972 	key.type = BTRFS_CHUNK_ITEM_KEY;
2973 
2974 	while (1) {
2975 		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2976 		    atomic_read(&fs_info->balance_cancel_req)) {
2977 			ret = -ECANCELED;
2978 			goto error;
2979 		}
2980 
2981 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2982 		if (ret < 0)
2983 			goto error;
2984 
2985 		/*
2986 		 * this shouldn't happen, it means the last relocate
2987 		 * failed
2988 		 */
2989 		if (ret == 0)
2990 			BUG(); /* FIXME break ? */
2991 
2992 		ret = btrfs_previous_item(chunk_root, path, 0,
2993 					  BTRFS_CHUNK_ITEM_KEY);
2994 		if (ret) {
2995 			ret = 0;
2996 			break;
2997 		}
2998 
2999 		leaf = path->nodes[0];
3000 		slot = path->slots[0];
3001 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3002 
3003 		if (found_key.objectid != key.objectid)
3004 			break;
3005 
3006 		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3007 
3008 		if (!counting) {
3009 			spin_lock(&fs_info->balance_lock);
3010 			bctl->stat.considered++;
3011 			spin_unlock(&fs_info->balance_lock);
3012 		}
3013 
3014 		ret = should_balance_chunk(chunk_root, leaf, chunk,
3015 					   found_key.offset);
3016 		btrfs_release_path(path);
3017 		if (!ret)
3018 			goto loop;
3019 
3020 		if (counting) {
3021 			spin_lock(&fs_info->balance_lock);
3022 			bctl->stat.expected++;
3023 			spin_unlock(&fs_info->balance_lock);
3024 			goto loop;
3025 		}
3026 
3027 		ret = btrfs_relocate_chunk(chunk_root,
3028 					   chunk_root->root_key.objectid,
3029 					   found_key.objectid,
3030 					   found_key.offset);
3031 		if (ret && ret != -ENOSPC)
3032 			goto error;
3033 		if (ret == -ENOSPC) {
3034 			enospc_errors++;
3035 		} else {
3036 			spin_lock(&fs_info->balance_lock);
3037 			bctl->stat.completed++;
3038 			spin_unlock(&fs_info->balance_lock);
3039 		}
3040 loop:
3041 		if (found_key.offset == 0)
3042 			break;
3043 		key.offset = found_key.offset - 1;
3044 	}
3045 
3046 	if (counting) {
3047 		btrfs_release_path(path);
3048 		counting = false;
3049 		goto again;
3050 	}
3051 error:
3052 	btrfs_free_path(path);
3053 	if (enospc_errors) {
3054 		printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
3055 		       enospc_errors);
3056 		if (!ret)
3057 			ret = -ENOSPC;
3058 	}
3059 
3060 	return ret;
3061 }
3062 
3063 /**
3064  * alloc_profile_is_valid - see if a given profile is valid and reduced
3065  * @flags: profile to validate
3066  * @extended: if true @flags is treated as an extended profile
3067  */
3068 static int alloc_profile_is_valid(u64 flags, int extended)
3069 {
3070 	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3071 			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
3072 
3073 	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3074 
3075 	/* 1) check that all other bits are zeroed */
3076 	if (flags & ~mask)
3077 		return 0;
3078 
3079 	/* 2) see if profile is reduced */
3080 	if (flags == 0)
3081 		return !extended; /* "0" is valid for usual profiles */
3082 
3083 	/* true if exactly one bit set */
3084 	return (flags & (flags - 1)) == 0;
3085 }
3086 
3087 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3088 {
3089 	/* cancel requested || normal exit path */
3090 	return atomic_read(&fs_info->balance_cancel_req) ||
3091 		(atomic_read(&fs_info->balance_pause_req) == 0 &&
3092 		 atomic_read(&fs_info->balance_cancel_req) == 0);
3093 }
3094 
3095 static void __cancel_balance(struct btrfs_fs_info *fs_info)
3096 {
3097 	int ret;
3098 
3099 	unset_balance_control(fs_info);
3100 	ret = del_balance_item(fs_info->tree_root);
3101 	if (ret)
3102 		btrfs_std_error(fs_info, ret);
3103 
3104 	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3105 }
3106 
3107 /*
3108  * Should be called with both balance and volume mutexes held
3109  */
3110 int btrfs_balance(struct btrfs_balance_control *bctl,
3111 		  struct btrfs_ioctl_balance_args *bargs)
3112 {
3113 	struct btrfs_fs_info *fs_info = bctl->fs_info;
3114 	u64 allowed;
3115 	int mixed = 0;
3116 	int ret;
3117 	u64 num_devices;
3118 	unsigned seq;
3119 
3120 	if (btrfs_fs_closing(fs_info) ||
3121 	    atomic_read(&fs_info->balance_pause_req) ||
3122 	    atomic_read(&fs_info->balance_cancel_req)) {
3123 		ret = -EINVAL;
3124 		goto out;
3125 	}
3126 
3127 	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3128 	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3129 		mixed = 1;
3130 
3131 	/*
3132 	 * In case of mixed groups both data and meta should be picked,
3133 	 * and identical options should be given for both of them.
3134 	 */
3135 	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3136 	if (mixed && (bctl->flags & allowed)) {
3137 		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3138 		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3139 		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3140 			printk(KERN_ERR "btrfs: with mixed groups data and "
3141 			       "metadata balance options must be the same\n");
3142 			ret = -EINVAL;
3143 			goto out;
3144 		}
3145 	}
3146 
3147 	num_devices = fs_info->fs_devices->num_devices;
3148 	btrfs_dev_replace_lock(&fs_info->dev_replace);
3149 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3150 		BUG_ON(num_devices < 1);
3151 		num_devices--;
3152 	}
3153 	btrfs_dev_replace_unlock(&fs_info->dev_replace);
3154 	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3155 	if (num_devices == 1)
3156 		allowed |= BTRFS_BLOCK_GROUP_DUP;
3157 	else if (num_devices > 1)
3158 		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3159 	if (num_devices > 2)
3160 		allowed |= BTRFS_BLOCK_GROUP_RAID5;
3161 	if (num_devices > 3)
3162 		allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3163 			    BTRFS_BLOCK_GROUP_RAID6);
3164 	if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3165 	    (!alloc_profile_is_valid(bctl->data.target, 1) ||
3166 	     (bctl->data.target & ~allowed))) {
3167 		printk(KERN_ERR "btrfs: unable to start balance with target "
3168 		       "data profile %llu\n",
3169 		       bctl->data.target);
3170 		ret = -EINVAL;
3171 		goto out;
3172 	}
3173 	if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3174 	    (!alloc_profile_is_valid(bctl->meta.target, 1) ||
3175 	     (bctl->meta.target & ~allowed))) {
3176 		printk(KERN_ERR "btrfs: unable to start balance with target "
3177 		       "metadata profile %llu\n",
3178 		       bctl->meta.target);
3179 		ret = -EINVAL;
3180 		goto out;
3181 	}
3182 	if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3183 	    (!alloc_profile_is_valid(bctl->sys.target, 1) ||
3184 	     (bctl->sys.target & ~allowed))) {
3185 		printk(KERN_ERR "btrfs: unable to start balance with target "
3186 		       "system profile %llu\n",
3187 		       bctl->sys.target);
3188 		ret = -EINVAL;
3189 		goto out;
3190 	}
3191 
3192 	/* allow dup'ed data chunks only in mixed mode */
3193 	if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3194 	    (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
3195 		printk(KERN_ERR "btrfs: dup for data is not allowed\n");
3196 		ret = -EINVAL;
3197 		goto out;
3198 	}
3199 
3200 	/* allow to reduce meta or sys integrity only if force set */
3201 	allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3202 			BTRFS_BLOCK_GROUP_RAID10 |
3203 			BTRFS_BLOCK_GROUP_RAID5 |
3204 			BTRFS_BLOCK_GROUP_RAID6;
3205 	do {
3206 		seq = read_seqbegin(&fs_info->profiles_lock);
3207 
3208 		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3209 		     (fs_info->avail_system_alloc_bits & allowed) &&
3210 		     !(bctl->sys.target & allowed)) ||
3211 		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3212 		     (fs_info->avail_metadata_alloc_bits & allowed) &&
3213 		     !(bctl->meta.target & allowed))) {
3214 			if (bctl->flags & BTRFS_BALANCE_FORCE) {
3215 				printk(KERN_INFO "btrfs: force reducing metadata "
3216 				       "integrity\n");
3217 			} else {
3218 				printk(KERN_ERR "btrfs: balance will reduce metadata "
3219 				       "integrity, use force if you want this\n");
3220 				ret = -EINVAL;
3221 				goto out;
3222 			}
3223 		}
3224 	} while (read_seqretry(&fs_info->profiles_lock, seq));
3225 
3226 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3227 		int num_tolerated_disk_barrier_failures;
3228 		u64 target = bctl->sys.target;
3229 
3230 		num_tolerated_disk_barrier_failures =
3231 			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3232 		if (num_tolerated_disk_barrier_failures > 0 &&
3233 		    (target &
3234 		     (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3235 		      BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
3236 			num_tolerated_disk_barrier_failures = 0;
3237 		else if (num_tolerated_disk_barrier_failures > 1 &&
3238 			 (target &
3239 			  (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
3240 			num_tolerated_disk_barrier_failures = 1;
3241 
3242 		fs_info->num_tolerated_disk_barrier_failures =
3243 			num_tolerated_disk_barrier_failures;
3244 	}
3245 
3246 	ret = insert_balance_item(fs_info->tree_root, bctl);
3247 	if (ret && ret != -EEXIST)
3248 		goto out;
3249 
3250 	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3251 		BUG_ON(ret == -EEXIST);
3252 		set_balance_control(bctl);
3253 	} else {
3254 		BUG_ON(ret != -EEXIST);
3255 		spin_lock(&fs_info->balance_lock);
3256 		update_balance_args(bctl);
3257 		spin_unlock(&fs_info->balance_lock);
3258 	}
3259 
3260 	atomic_inc(&fs_info->balance_running);
3261 	mutex_unlock(&fs_info->balance_mutex);
3262 
3263 	ret = __btrfs_balance(fs_info);
3264 
3265 	mutex_lock(&fs_info->balance_mutex);
3266 	atomic_dec(&fs_info->balance_running);
3267 
3268 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3269 		fs_info->num_tolerated_disk_barrier_failures =
3270 			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3271 	}
3272 
3273 	if (bargs) {
3274 		memset(bargs, 0, sizeof(*bargs));
3275 		update_ioctl_balance_args(fs_info, 0, bargs);
3276 	}
3277 
3278 	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3279 	    balance_need_close(fs_info)) {
3280 		__cancel_balance(fs_info);
3281 	}
3282 
3283 	wake_up(&fs_info->balance_wait_q);
3284 
3285 	return ret;
3286 out:
3287 	if (bctl->flags & BTRFS_BALANCE_RESUME)
3288 		__cancel_balance(fs_info);
3289 	else {
3290 		kfree(bctl);
3291 		atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3292 	}
3293 	return ret;
3294 }
3295 
3296 static int balance_kthread(void *data)
3297 {
3298 	struct btrfs_fs_info *fs_info = data;
3299 	int ret = 0;
3300 
3301 	mutex_lock(&fs_info->volume_mutex);
3302 	mutex_lock(&fs_info->balance_mutex);
3303 
3304 	if (fs_info->balance_ctl) {
3305 		printk(KERN_INFO "btrfs: continuing balance\n");
3306 		ret = btrfs_balance(fs_info->balance_ctl, NULL);
3307 	}
3308 
3309 	mutex_unlock(&fs_info->balance_mutex);
3310 	mutex_unlock(&fs_info->volume_mutex);
3311 
3312 	return ret;
3313 }
3314 
3315 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3316 {
3317 	struct task_struct *tsk;
3318 
3319 	spin_lock(&fs_info->balance_lock);
3320 	if (!fs_info->balance_ctl) {
3321 		spin_unlock(&fs_info->balance_lock);
3322 		return 0;
3323 	}
3324 	spin_unlock(&fs_info->balance_lock);
3325 
3326 	if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3327 		printk(KERN_INFO "btrfs: force skipping balance\n");
3328 		return 0;
3329 	}
3330 
3331 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3332 	return PTR_ERR_OR_ZERO(tsk);
3333 }
3334 
3335 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3336 {
3337 	struct btrfs_balance_control *bctl;
3338 	struct btrfs_balance_item *item;
3339 	struct btrfs_disk_balance_args disk_bargs;
3340 	struct btrfs_path *path;
3341 	struct extent_buffer *leaf;
3342 	struct btrfs_key key;
3343 	int ret;
3344 
3345 	path = btrfs_alloc_path();
3346 	if (!path)
3347 		return -ENOMEM;
3348 
3349 	key.objectid = BTRFS_BALANCE_OBJECTID;
3350 	key.type = BTRFS_BALANCE_ITEM_KEY;
3351 	key.offset = 0;
3352 
3353 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3354 	if (ret < 0)
3355 		goto out;
3356 	if (ret > 0) { /* ret = -ENOENT; */
3357 		ret = 0;
3358 		goto out;
3359 	}
3360 
3361 	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3362 	if (!bctl) {
3363 		ret = -ENOMEM;
3364 		goto out;
3365 	}
3366 
3367 	leaf = path->nodes[0];
3368 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3369 
3370 	bctl->fs_info = fs_info;
3371 	bctl->flags = btrfs_balance_flags(leaf, item);
3372 	bctl->flags |= BTRFS_BALANCE_RESUME;
3373 
3374 	btrfs_balance_data(leaf, item, &disk_bargs);
3375 	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3376 	btrfs_balance_meta(leaf, item, &disk_bargs);
3377 	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3378 	btrfs_balance_sys(leaf, item, &disk_bargs);
3379 	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3380 
3381 	WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3382 
3383 	mutex_lock(&fs_info->volume_mutex);
3384 	mutex_lock(&fs_info->balance_mutex);
3385 
3386 	set_balance_control(bctl);
3387 
3388 	mutex_unlock(&fs_info->balance_mutex);
3389 	mutex_unlock(&fs_info->volume_mutex);
3390 out:
3391 	btrfs_free_path(path);
3392 	return ret;
3393 }
3394 
3395 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3396 {
3397 	int ret = 0;
3398 
3399 	mutex_lock(&fs_info->balance_mutex);
3400 	if (!fs_info->balance_ctl) {
3401 		mutex_unlock(&fs_info->balance_mutex);
3402 		return -ENOTCONN;
3403 	}
3404 
3405 	if (atomic_read(&fs_info->balance_running)) {
3406 		atomic_inc(&fs_info->balance_pause_req);
3407 		mutex_unlock(&fs_info->balance_mutex);
3408 
3409 		wait_event(fs_info->balance_wait_q,
3410 			   atomic_read(&fs_info->balance_running) == 0);
3411 
3412 		mutex_lock(&fs_info->balance_mutex);
3413 		/* we are good with balance_ctl ripped off from under us */
3414 		BUG_ON(atomic_read(&fs_info->balance_running));
3415 		atomic_dec(&fs_info->balance_pause_req);
3416 	} else {
3417 		ret = -ENOTCONN;
3418 	}
3419 
3420 	mutex_unlock(&fs_info->balance_mutex);
3421 	return ret;
3422 }
3423 
3424 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3425 {
3426 	mutex_lock(&fs_info->balance_mutex);
3427 	if (!fs_info->balance_ctl) {
3428 		mutex_unlock(&fs_info->balance_mutex);
3429 		return -ENOTCONN;
3430 	}
3431 
3432 	atomic_inc(&fs_info->balance_cancel_req);
3433 	/*
3434 	 * if we are running just wait and return, balance item is
3435 	 * deleted in btrfs_balance in this case
3436 	 */
3437 	if (atomic_read(&fs_info->balance_running)) {
3438 		mutex_unlock(&fs_info->balance_mutex);
3439 		wait_event(fs_info->balance_wait_q,
3440 			   atomic_read(&fs_info->balance_running) == 0);
3441 		mutex_lock(&fs_info->balance_mutex);
3442 	} else {
3443 		/* __cancel_balance needs volume_mutex */
3444 		mutex_unlock(&fs_info->balance_mutex);
3445 		mutex_lock(&fs_info->volume_mutex);
3446 		mutex_lock(&fs_info->balance_mutex);
3447 
3448 		if (fs_info->balance_ctl)
3449 			__cancel_balance(fs_info);
3450 
3451 		mutex_unlock(&fs_info->volume_mutex);
3452 	}
3453 
3454 	BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3455 	atomic_dec(&fs_info->balance_cancel_req);
3456 	mutex_unlock(&fs_info->balance_mutex);
3457 	return 0;
3458 }
3459 
3460 static int btrfs_uuid_scan_kthread(void *data)
3461 {
3462 	struct btrfs_fs_info *fs_info = data;
3463 	struct btrfs_root *root = fs_info->tree_root;
3464 	struct btrfs_key key;
3465 	struct btrfs_key max_key;
3466 	struct btrfs_path *path = NULL;
3467 	int ret = 0;
3468 	struct extent_buffer *eb;
3469 	int slot;
3470 	struct btrfs_root_item root_item;
3471 	u32 item_size;
3472 	struct btrfs_trans_handle *trans = NULL;
3473 
3474 	path = btrfs_alloc_path();
3475 	if (!path) {
3476 		ret = -ENOMEM;
3477 		goto out;
3478 	}
3479 
3480 	key.objectid = 0;
3481 	key.type = BTRFS_ROOT_ITEM_KEY;
3482 	key.offset = 0;
3483 
3484 	max_key.objectid = (u64)-1;
3485 	max_key.type = BTRFS_ROOT_ITEM_KEY;
3486 	max_key.offset = (u64)-1;
3487 
3488 	path->keep_locks = 1;
3489 
3490 	while (1) {
3491 		ret = btrfs_search_forward(root, &key, &max_key, path, 0);
3492 		if (ret) {
3493 			if (ret > 0)
3494 				ret = 0;
3495 			break;
3496 		}
3497 
3498 		if (key.type != BTRFS_ROOT_ITEM_KEY ||
3499 		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
3500 		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
3501 		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
3502 			goto skip;
3503 
3504 		eb = path->nodes[0];
3505 		slot = path->slots[0];
3506 		item_size = btrfs_item_size_nr(eb, slot);
3507 		if (item_size < sizeof(root_item))
3508 			goto skip;
3509 
3510 		read_extent_buffer(eb, &root_item,
3511 				   btrfs_item_ptr_offset(eb, slot),
3512 				   (int)sizeof(root_item));
3513 		if (btrfs_root_refs(&root_item) == 0)
3514 			goto skip;
3515 
3516 		if (!btrfs_is_empty_uuid(root_item.uuid) ||
3517 		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
3518 			if (trans)
3519 				goto update_tree;
3520 
3521 			btrfs_release_path(path);
3522 			/*
3523 			 * 1 - subvol uuid item
3524 			 * 1 - received_subvol uuid item
3525 			 */
3526 			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
3527 			if (IS_ERR(trans)) {
3528 				ret = PTR_ERR(trans);
3529 				break;
3530 			}
3531 			continue;
3532 		} else {
3533 			goto skip;
3534 		}
3535 update_tree:
3536 		if (!btrfs_is_empty_uuid(root_item.uuid)) {
3537 			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
3538 						  root_item.uuid,
3539 						  BTRFS_UUID_KEY_SUBVOL,
3540 						  key.objectid);
3541 			if (ret < 0) {
3542 				pr_warn("btrfs: uuid_tree_add failed %d\n",
3543 					ret);
3544 				break;
3545 			}
3546 		}
3547 
3548 		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
3549 			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
3550 						  root_item.received_uuid,
3551 						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
3552 						  key.objectid);
3553 			if (ret < 0) {
3554 				pr_warn("btrfs: uuid_tree_add failed %d\n",
3555 					ret);
3556 				break;
3557 			}
3558 		}
3559 
3560 skip:
3561 		if (trans) {
3562 			ret = btrfs_end_transaction(trans, fs_info->uuid_root);
3563 			trans = NULL;
3564 			if (ret)
3565 				break;
3566 		}
3567 
3568 		btrfs_release_path(path);
3569 		if (key.offset < (u64)-1) {
3570 			key.offset++;
3571 		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
3572 			key.offset = 0;
3573 			key.type = BTRFS_ROOT_ITEM_KEY;
3574 		} else if (key.objectid < (u64)-1) {
3575 			key.offset = 0;
3576 			key.type = BTRFS_ROOT_ITEM_KEY;
3577 			key.objectid++;
3578 		} else {
3579 			break;
3580 		}
3581 		cond_resched();
3582 	}
3583 
3584 out:
3585 	btrfs_free_path(path);
3586 	if (trans && !IS_ERR(trans))
3587 		btrfs_end_transaction(trans, fs_info->uuid_root);
3588 	if (ret)
3589 		pr_warn("btrfs: btrfs_uuid_scan_kthread failed %d\n", ret);
3590 	else
3591 		fs_info->update_uuid_tree_gen = 1;
3592 	up(&fs_info->uuid_tree_rescan_sem);
3593 	return 0;
3594 }
3595 
3596 /*
3597  * Callback for btrfs_uuid_tree_iterate().
3598  * returns:
3599  * 0	check succeeded, the entry is not outdated.
3600  * < 0	if an error occured.
3601  * > 0	if the check failed, which means the caller shall remove the entry.
3602  */
3603 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
3604 				       u8 *uuid, u8 type, u64 subid)
3605 {
3606 	struct btrfs_key key;
3607 	int ret = 0;
3608 	struct btrfs_root *subvol_root;
3609 
3610 	if (type != BTRFS_UUID_KEY_SUBVOL &&
3611 	    type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
3612 		goto out;
3613 
3614 	key.objectid = subid;
3615 	key.type = BTRFS_ROOT_ITEM_KEY;
3616 	key.offset = (u64)-1;
3617 	subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
3618 	if (IS_ERR(subvol_root)) {
3619 		ret = PTR_ERR(subvol_root);
3620 		if (ret == -ENOENT)
3621 			ret = 1;
3622 		goto out;
3623 	}
3624 
3625 	switch (type) {
3626 	case BTRFS_UUID_KEY_SUBVOL:
3627 		if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
3628 			ret = 1;
3629 		break;
3630 	case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
3631 		if (memcmp(uuid, subvol_root->root_item.received_uuid,
3632 			   BTRFS_UUID_SIZE))
3633 			ret = 1;
3634 		break;
3635 	}
3636 
3637 out:
3638 	return ret;
3639 }
3640 
3641 static int btrfs_uuid_rescan_kthread(void *data)
3642 {
3643 	struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
3644 	int ret;
3645 
3646 	/*
3647 	 * 1st step is to iterate through the existing UUID tree and
3648 	 * to delete all entries that contain outdated data.
3649 	 * 2nd step is to add all missing entries to the UUID tree.
3650 	 */
3651 	ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
3652 	if (ret < 0) {
3653 		pr_warn("btrfs: iterating uuid_tree failed %d\n", ret);
3654 		up(&fs_info->uuid_tree_rescan_sem);
3655 		return ret;
3656 	}
3657 	return btrfs_uuid_scan_kthread(data);
3658 }
3659 
3660 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
3661 {
3662 	struct btrfs_trans_handle *trans;
3663 	struct btrfs_root *tree_root = fs_info->tree_root;
3664 	struct btrfs_root *uuid_root;
3665 	struct task_struct *task;
3666 	int ret;
3667 
3668 	/*
3669 	 * 1 - root node
3670 	 * 1 - root item
3671 	 */
3672 	trans = btrfs_start_transaction(tree_root, 2);
3673 	if (IS_ERR(trans))
3674 		return PTR_ERR(trans);
3675 
3676 	uuid_root = btrfs_create_tree(trans, fs_info,
3677 				      BTRFS_UUID_TREE_OBJECTID);
3678 	if (IS_ERR(uuid_root)) {
3679 		btrfs_abort_transaction(trans, tree_root,
3680 					PTR_ERR(uuid_root));
3681 		return PTR_ERR(uuid_root);
3682 	}
3683 
3684 	fs_info->uuid_root = uuid_root;
3685 
3686 	ret = btrfs_commit_transaction(trans, tree_root);
3687 	if (ret)
3688 		return ret;
3689 
3690 	down(&fs_info->uuid_tree_rescan_sem);
3691 	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
3692 	if (IS_ERR(task)) {
3693 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
3694 		pr_warn("btrfs: failed to start uuid_scan task\n");
3695 		up(&fs_info->uuid_tree_rescan_sem);
3696 		return PTR_ERR(task);
3697 	}
3698 
3699 	return 0;
3700 }
3701 
3702 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
3703 {
3704 	struct task_struct *task;
3705 
3706 	down(&fs_info->uuid_tree_rescan_sem);
3707 	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
3708 	if (IS_ERR(task)) {
3709 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
3710 		pr_warn("btrfs: failed to start uuid_rescan task\n");
3711 		up(&fs_info->uuid_tree_rescan_sem);
3712 		return PTR_ERR(task);
3713 	}
3714 
3715 	return 0;
3716 }
3717 
3718 /*
3719  * shrinking a device means finding all of the device extents past
3720  * the new size, and then following the back refs to the chunks.
3721  * The chunk relocation code actually frees the device extent
3722  */
3723 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3724 {
3725 	struct btrfs_trans_handle *trans;
3726 	struct btrfs_root *root = device->dev_root;
3727 	struct btrfs_dev_extent *dev_extent = NULL;
3728 	struct btrfs_path *path;
3729 	u64 length;
3730 	u64 chunk_tree;
3731 	u64 chunk_objectid;
3732 	u64 chunk_offset;
3733 	int ret;
3734 	int slot;
3735 	int failed = 0;
3736 	bool retried = false;
3737 	struct extent_buffer *l;
3738 	struct btrfs_key key;
3739 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3740 	u64 old_total = btrfs_super_total_bytes(super_copy);
3741 	u64 old_size = device->total_bytes;
3742 	u64 diff = device->total_bytes - new_size;
3743 
3744 	if (device->is_tgtdev_for_dev_replace)
3745 		return -EINVAL;
3746 
3747 	path = btrfs_alloc_path();
3748 	if (!path)
3749 		return -ENOMEM;
3750 
3751 	path->reada = 2;
3752 
3753 	lock_chunks(root);
3754 
3755 	device->total_bytes = new_size;
3756 	if (device->writeable) {
3757 		device->fs_devices->total_rw_bytes -= diff;
3758 		spin_lock(&root->fs_info->free_chunk_lock);
3759 		root->fs_info->free_chunk_space -= diff;
3760 		spin_unlock(&root->fs_info->free_chunk_lock);
3761 	}
3762 	unlock_chunks(root);
3763 
3764 again:
3765 	key.objectid = device->devid;
3766 	key.offset = (u64)-1;
3767 	key.type = BTRFS_DEV_EXTENT_KEY;
3768 
3769 	do {
3770 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3771 		if (ret < 0)
3772 			goto done;
3773 
3774 		ret = btrfs_previous_item(root, path, 0, key.type);
3775 		if (ret < 0)
3776 			goto done;
3777 		if (ret) {
3778 			ret = 0;
3779 			btrfs_release_path(path);
3780 			break;
3781 		}
3782 
3783 		l = path->nodes[0];
3784 		slot = path->slots[0];
3785 		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3786 
3787 		if (key.objectid != device->devid) {
3788 			btrfs_release_path(path);
3789 			break;
3790 		}
3791 
3792 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3793 		length = btrfs_dev_extent_length(l, dev_extent);
3794 
3795 		if (key.offset + length <= new_size) {
3796 			btrfs_release_path(path);
3797 			break;
3798 		}
3799 
3800 		chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3801 		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3802 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3803 		btrfs_release_path(path);
3804 
3805 		ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3806 					   chunk_offset);
3807 		if (ret && ret != -ENOSPC)
3808 			goto done;
3809 		if (ret == -ENOSPC)
3810 			failed++;
3811 	} while (key.offset-- > 0);
3812 
3813 	if (failed && !retried) {
3814 		failed = 0;
3815 		retried = true;
3816 		goto again;
3817 	} else if (failed && retried) {
3818 		ret = -ENOSPC;
3819 		lock_chunks(root);
3820 
3821 		device->total_bytes = old_size;
3822 		if (device->writeable)
3823 			device->fs_devices->total_rw_bytes += diff;
3824 		spin_lock(&root->fs_info->free_chunk_lock);
3825 		root->fs_info->free_chunk_space += diff;
3826 		spin_unlock(&root->fs_info->free_chunk_lock);
3827 		unlock_chunks(root);
3828 		goto done;
3829 	}
3830 
3831 	/* Shrinking succeeded, else we would be at "done". */
3832 	trans = btrfs_start_transaction(root, 0);
3833 	if (IS_ERR(trans)) {
3834 		ret = PTR_ERR(trans);
3835 		goto done;
3836 	}
3837 
3838 	lock_chunks(root);
3839 
3840 	device->disk_total_bytes = new_size;
3841 	/* Now btrfs_update_device() will change the on-disk size. */
3842 	ret = btrfs_update_device(trans, device);
3843 	if (ret) {
3844 		unlock_chunks(root);
3845 		btrfs_end_transaction(trans, root);
3846 		goto done;
3847 	}
3848 	WARN_ON(diff > old_total);
3849 	btrfs_set_super_total_bytes(super_copy, old_total - diff);
3850 	unlock_chunks(root);
3851 	btrfs_end_transaction(trans, root);
3852 done:
3853 	btrfs_free_path(path);
3854 	return ret;
3855 }
3856 
3857 static int btrfs_add_system_chunk(struct btrfs_root *root,
3858 			   struct btrfs_key *key,
3859 			   struct btrfs_chunk *chunk, int item_size)
3860 {
3861 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3862 	struct btrfs_disk_key disk_key;
3863 	u32 array_size;
3864 	u8 *ptr;
3865 
3866 	array_size = btrfs_super_sys_array_size(super_copy);
3867 	if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3868 		return -EFBIG;
3869 
3870 	ptr = super_copy->sys_chunk_array + array_size;
3871 	btrfs_cpu_key_to_disk(&disk_key, key);
3872 	memcpy(ptr, &disk_key, sizeof(disk_key));
3873 	ptr += sizeof(disk_key);
3874 	memcpy(ptr, chunk, item_size);
3875 	item_size += sizeof(disk_key);
3876 	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3877 	return 0;
3878 }
3879 
3880 /*
3881  * sort the devices in descending order by max_avail, total_avail
3882  */
3883 static int btrfs_cmp_device_info(const void *a, const void *b)
3884 {
3885 	const struct btrfs_device_info *di_a = a;
3886 	const struct btrfs_device_info *di_b = b;
3887 
3888 	if (di_a->max_avail > di_b->max_avail)
3889 		return -1;
3890 	if (di_a->max_avail < di_b->max_avail)
3891 		return 1;
3892 	if (di_a->total_avail > di_b->total_avail)
3893 		return -1;
3894 	if (di_a->total_avail < di_b->total_avail)
3895 		return 1;
3896 	return 0;
3897 }
3898 
3899 static struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
3900 	[BTRFS_RAID_RAID10] = {
3901 		.sub_stripes	= 2,
3902 		.dev_stripes	= 1,
3903 		.devs_max	= 0,	/* 0 == as many as possible */
3904 		.devs_min	= 4,
3905 		.devs_increment	= 2,
3906 		.ncopies	= 2,
3907 	},
3908 	[BTRFS_RAID_RAID1] = {
3909 		.sub_stripes	= 1,
3910 		.dev_stripes	= 1,
3911 		.devs_max	= 2,
3912 		.devs_min	= 2,
3913 		.devs_increment	= 2,
3914 		.ncopies	= 2,
3915 	},
3916 	[BTRFS_RAID_DUP] = {
3917 		.sub_stripes	= 1,
3918 		.dev_stripes	= 2,
3919 		.devs_max	= 1,
3920 		.devs_min	= 1,
3921 		.devs_increment	= 1,
3922 		.ncopies	= 2,
3923 	},
3924 	[BTRFS_RAID_RAID0] = {
3925 		.sub_stripes	= 1,
3926 		.dev_stripes	= 1,
3927 		.devs_max	= 0,
3928 		.devs_min	= 2,
3929 		.devs_increment	= 1,
3930 		.ncopies	= 1,
3931 	},
3932 	[BTRFS_RAID_SINGLE] = {
3933 		.sub_stripes	= 1,
3934 		.dev_stripes	= 1,
3935 		.devs_max	= 1,
3936 		.devs_min	= 1,
3937 		.devs_increment	= 1,
3938 		.ncopies	= 1,
3939 	},
3940 	[BTRFS_RAID_RAID5] = {
3941 		.sub_stripes	= 1,
3942 		.dev_stripes	= 1,
3943 		.devs_max	= 0,
3944 		.devs_min	= 2,
3945 		.devs_increment	= 1,
3946 		.ncopies	= 2,
3947 	},
3948 	[BTRFS_RAID_RAID6] = {
3949 		.sub_stripes	= 1,
3950 		.dev_stripes	= 1,
3951 		.devs_max	= 0,
3952 		.devs_min	= 3,
3953 		.devs_increment	= 1,
3954 		.ncopies	= 3,
3955 	},
3956 };
3957 
3958 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
3959 {
3960 	/* TODO allow them to set a preferred stripe size */
3961 	return 64 * 1024;
3962 }
3963 
3964 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
3965 {
3966 	if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
3967 		return;
3968 
3969 	btrfs_set_fs_incompat(info, RAID56);
3970 }
3971 
3972 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3973 			       struct btrfs_root *extent_root, u64 start,
3974 			       u64 type)
3975 {
3976 	struct btrfs_fs_info *info = extent_root->fs_info;
3977 	struct btrfs_fs_devices *fs_devices = info->fs_devices;
3978 	struct list_head *cur;
3979 	struct map_lookup *map = NULL;
3980 	struct extent_map_tree *em_tree;
3981 	struct extent_map *em;
3982 	struct btrfs_device_info *devices_info = NULL;
3983 	u64 total_avail;
3984 	int num_stripes;	/* total number of stripes to allocate */
3985 	int data_stripes;	/* number of stripes that count for
3986 				   block group size */
3987 	int sub_stripes;	/* sub_stripes info for map */
3988 	int dev_stripes;	/* stripes per dev */
3989 	int devs_max;		/* max devs to use */
3990 	int devs_min;		/* min devs needed */
3991 	int devs_increment;	/* ndevs has to be a multiple of this */
3992 	int ncopies;		/* how many copies to data has */
3993 	int ret;
3994 	u64 max_stripe_size;
3995 	u64 max_chunk_size;
3996 	u64 stripe_size;
3997 	u64 num_bytes;
3998 	u64 raid_stripe_len = BTRFS_STRIPE_LEN;
3999 	int ndevs;
4000 	int i;
4001 	int j;
4002 	int index;
4003 
4004 	BUG_ON(!alloc_profile_is_valid(type, 0));
4005 
4006 	if (list_empty(&fs_devices->alloc_list))
4007 		return -ENOSPC;
4008 
4009 	index = __get_raid_index(type);
4010 
4011 	sub_stripes = btrfs_raid_array[index].sub_stripes;
4012 	dev_stripes = btrfs_raid_array[index].dev_stripes;
4013 	devs_max = btrfs_raid_array[index].devs_max;
4014 	devs_min = btrfs_raid_array[index].devs_min;
4015 	devs_increment = btrfs_raid_array[index].devs_increment;
4016 	ncopies = btrfs_raid_array[index].ncopies;
4017 
4018 	if (type & BTRFS_BLOCK_GROUP_DATA) {
4019 		max_stripe_size = 1024 * 1024 * 1024;
4020 		max_chunk_size = 10 * max_stripe_size;
4021 	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4022 		/* for larger filesystems, use larger metadata chunks */
4023 		if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
4024 			max_stripe_size = 1024 * 1024 * 1024;
4025 		else
4026 			max_stripe_size = 256 * 1024 * 1024;
4027 		max_chunk_size = max_stripe_size;
4028 	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4029 		max_stripe_size = 32 * 1024 * 1024;
4030 		max_chunk_size = 2 * max_stripe_size;
4031 	} else {
4032 		printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
4033 		       type);
4034 		BUG_ON(1);
4035 	}
4036 
4037 	/* we don't want a chunk larger than 10% of writeable space */
4038 	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4039 			     max_chunk_size);
4040 
4041 	devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
4042 			       GFP_NOFS);
4043 	if (!devices_info)
4044 		return -ENOMEM;
4045 
4046 	cur = fs_devices->alloc_list.next;
4047 
4048 	/*
4049 	 * in the first pass through the devices list, we gather information
4050 	 * about the available holes on each device.
4051 	 */
4052 	ndevs = 0;
4053 	while (cur != &fs_devices->alloc_list) {
4054 		struct btrfs_device *device;
4055 		u64 max_avail;
4056 		u64 dev_offset;
4057 
4058 		device = list_entry(cur, struct btrfs_device, dev_alloc_list);
4059 
4060 		cur = cur->next;
4061 
4062 		if (!device->writeable) {
4063 			WARN(1, KERN_ERR
4064 			       "btrfs: read-only device in alloc_list\n");
4065 			continue;
4066 		}
4067 
4068 		if (!device->in_fs_metadata ||
4069 		    device->is_tgtdev_for_dev_replace)
4070 			continue;
4071 
4072 		if (device->total_bytes > device->bytes_used)
4073 			total_avail = device->total_bytes - device->bytes_used;
4074 		else
4075 			total_avail = 0;
4076 
4077 		/* If there is no space on this device, skip it. */
4078 		if (total_avail == 0)
4079 			continue;
4080 
4081 		ret = find_free_dev_extent(trans, device,
4082 					   max_stripe_size * dev_stripes,
4083 					   &dev_offset, &max_avail);
4084 		if (ret && ret != -ENOSPC)
4085 			goto error;
4086 
4087 		if (ret == 0)
4088 			max_avail = max_stripe_size * dev_stripes;
4089 
4090 		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
4091 			continue;
4092 
4093 		if (ndevs == fs_devices->rw_devices) {
4094 			WARN(1, "%s: found more than %llu devices\n",
4095 			     __func__, fs_devices->rw_devices);
4096 			break;
4097 		}
4098 		devices_info[ndevs].dev_offset = dev_offset;
4099 		devices_info[ndevs].max_avail = max_avail;
4100 		devices_info[ndevs].total_avail = total_avail;
4101 		devices_info[ndevs].dev = device;
4102 		++ndevs;
4103 	}
4104 
4105 	/*
4106 	 * now sort the devices by hole size / available space
4107 	 */
4108 	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4109 	     btrfs_cmp_device_info, NULL);
4110 
4111 	/* round down to number of usable stripes */
4112 	ndevs -= ndevs % devs_increment;
4113 
4114 	if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
4115 		ret = -ENOSPC;
4116 		goto error;
4117 	}
4118 
4119 	if (devs_max && ndevs > devs_max)
4120 		ndevs = devs_max;
4121 	/*
4122 	 * the primary goal is to maximize the number of stripes, so use as many
4123 	 * devices as possible, even if the stripes are not maximum sized.
4124 	 */
4125 	stripe_size = devices_info[ndevs-1].max_avail;
4126 	num_stripes = ndevs * dev_stripes;
4127 
4128 	/*
4129 	 * this will have to be fixed for RAID1 and RAID10 over
4130 	 * more drives
4131 	 */
4132 	data_stripes = num_stripes / ncopies;
4133 
4134 	if (type & BTRFS_BLOCK_GROUP_RAID5) {
4135 		raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
4136 				 btrfs_super_stripesize(info->super_copy));
4137 		data_stripes = num_stripes - 1;
4138 	}
4139 	if (type & BTRFS_BLOCK_GROUP_RAID6) {
4140 		raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
4141 				 btrfs_super_stripesize(info->super_copy));
4142 		data_stripes = num_stripes - 2;
4143 	}
4144 
4145 	/*
4146 	 * Use the number of data stripes to figure out how big this chunk
4147 	 * is really going to be in terms of logical address space,
4148 	 * and compare that answer with the max chunk size
4149 	 */
4150 	if (stripe_size * data_stripes > max_chunk_size) {
4151 		u64 mask = (1ULL << 24) - 1;
4152 		stripe_size = max_chunk_size;
4153 		do_div(stripe_size, data_stripes);
4154 
4155 		/* bump the answer up to a 16MB boundary */
4156 		stripe_size = (stripe_size + mask) & ~mask;
4157 
4158 		/* but don't go higher than the limits we found
4159 		 * while searching for free extents
4160 		 */
4161 		if (stripe_size > devices_info[ndevs-1].max_avail)
4162 			stripe_size = devices_info[ndevs-1].max_avail;
4163 	}
4164 
4165 	do_div(stripe_size, dev_stripes);
4166 
4167 	/* align to BTRFS_STRIPE_LEN */
4168 	do_div(stripe_size, raid_stripe_len);
4169 	stripe_size *= raid_stripe_len;
4170 
4171 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4172 	if (!map) {
4173 		ret = -ENOMEM;
4174 		goto error;
4175 	}
4176 	map->num_stripes = num_stripes;
4177 
4178 	for (i = 0; i < ndevs; ++i) {
4179 		for (j = 0; j < dev_stripes; ++j) {
4180 			int s = i * dev_stripes + j;
4181 			map->stripes[s].dev = devices_info[i].dev;
4182 			map->stripes[s].physical = devices_info[i].dev_offset +
4183 						   j * stripe_size;
4184 		}
4185 	}
4186 	map->sector_size = extent_root->sectorsize;
4187 	map->stripe_len = raid_stripe_len;
4188 	map->io_align = raid_stripe_len;
4189 	map->io_width = raid_stripe_len;
4190 	map->type = type;
4191 	map->sub_stripes = sub_stripes;
4192 
4193 	num_bytes = stripe_size * data_stripes;
4194 
4195 	trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
4196 
4197 	em = alloc_extent_map();
4198 	if (!em) {
4199 		ret = -ENOMEM;
4200 		goto error;
4201 	}
4202 	em->bdev = (struct block_device *)map;
4203 	em->start = start;
4204 	em->len = num_bytes;
4205 	em->block_start = 0;
4206 	em->block_len = em->len;
4207 	em->orig_block_len = stripe_size;
4208 
4209 	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4210 	write_lock(&em_tree->lock);
4211 	ret = add_extent_mapping(em_tree, em, 0);
4212 	if (!ret) {
4213 		list_add_tail(&em->list, &trans->transaction->pending_chunks);
4214 		atomic_inc(&em->refs);
4215 	}
4216 	write_unlock(&em_tree->lock);
4217 	if (ret) {
4218 		free_extent_map(em);
4219 		goto error;
4220 	}
4221 
4222 	ret = btrfs_make_block_group(trans, extent_root, 0, type,
4223 				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4224 				     start, num_bytes);
4225 	if (ret)
4226 		goto error_del_extent;
4227 
4228 	free_extent_map(em);
4229 	check_raid56_incompat_flag(extent_root->fs_info, type);
4230 
4231 	kfree(devices_info);
4232 	return 0;
4233 
4234 error_del_extent:
4235 	write_lock(&em_tree->lock);
4236 	remove_extent_mapping(em_tree, em);
4237 	write_unlock(&em_tree->lock);
4238 
4239 	/* One for our allocation */
4240 	free_extent_map(em);
4241 	/* One for the tree reference */
4242 	free_extent_map(em);
4243 error:
4244 	kfree(map);
4245 	kfree(devices_info);
4246 	return ret;
4247 }
4248 
4249 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4250 				struct btrfs_root *extent_root,
4251 				u64 chunk_offset, u64 chunk_size)
4252 {
4253 	struct btrfs_key key;
4254 	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
4255 	struct btrfs_device *device;
4256 	struct btrfs_chunk *chunk;
4257 	struct btrfs_stripe *stripe;
4258 	struct extent_map_tree *em_tree;
4259 	struct extent_map *em;
4260 	struct map_lookup *map;
4261 	size_t item_size;
4262 	u64 dev_offset;
4263 	u64 stripe_size;
4264 	int i = 0;
4265 	int ret;
4266 
4267 	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4268 	read_lock(&em_tree->lock);
4269 	em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
4270 	read_unlock(&em_tree->lock);
4271 
4272 	if (!em) {
4273 		btrfs_crit(extent_root->fs_info, "unable to find logical "
4274 			   "%Lu len %Lu", chunk_offset, chunk_size);
4275 		return -EINVAL;
4276 	}
4277 
4278 	if (em->start != chunk_offset || em->len != chunk_size) {
4279 		btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
4280 			  " %Lu-%Lu, found %Lu-%Lu\n", chunk_offset,
4281 			  chunk_size, em->start, em->len);
4282 		free_extent_map(em);
4283 		return -EINVAL;
4284 	}
4285 
4286 	map = (struct map_lookup *)em->bdev;
4287 	item_size = btrfs_chunk_item_size(map->num_stripes);
4288 	stripe_size = em->orig_block_len;
4289 
4290 	chunk = kzalloc(item_size, GFP_NOFS);
4291 	if (!chunk) {
4292 		ret = -ENOMEM;
4293 		goto out;
4294 	}
4295 
4296 	for (i = 0; i < map->num_stripes; i++) {
4297 		device = map->stripes[i].dev;
4298 		dev_offset = map->stripes[i].physical;
4299 
4300 		device->bytes_used += stripe_size;
4301 		ret = btrfs_update_device(trans, device);
4302 		if (ret)
4303 			goto out;
4304 		ret = btrfs_alloc_dev_extent(trans, device,
4305 					     chunk_root->root_key.objectid,
4306 					     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4307 					     chunk_offset, dev_offset,
4308 					     stripe_size);
4309 		if (ret)
4310 			goto out;
4311 	}
4312 
4313 	spin_lock(&extent_root->fs_info->free_chunk_lock);
4314 	extent_root->fs_info->free_chunk_space -= (stripe_size *
4315 						   map->num_stripes);
4316 	spin_unlock(&extent_root->fs_info->free_chunk_lock);
4317 
4318 	stripe = &chunk->stripe;
4319 	for (i = 0; i < map->num_stripes; i++) {
4320 		device = map->stripes[i].dev;
4321 		dev_offset = map->stripes[i].physical;
4322 
4323 		btrfs_set_stack_stripe_devid(stripe, device->devid);
4324 		btrfs_set_stack_stripe_offset(stripe, dev_offset);
4325 		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4326 		stripe++;
4327 	}
4328 
4329 	btrfs_set_stack_chunk_length(chunk, chunk_size);
4330 	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4331 	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4332 	btrfs_set_stack_chunk_type(chunk, map->type);
4333 	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4334 	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4335 	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4336 	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
4337 	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4338 
4339 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4340 	key.type = BTRFS_CHUNK_ITEM_KEY;
4341 	key.offset = chunk_offset;
4342 
4343 	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4344 	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4345 		/*
4346 		 * TODO: Cleanup of inserted chunk root in case of
4347 		 * failure.
4348 		 */
4349 		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
4350 					     item_size);
4351 	}
4352 
4353 out:
4354 	kfree(chunk);
4355 	free_extent_map(em);
4356 	return ret;
4357 }
4358 
4359 /*
4360  * Chunk allocation falls into two parts. The first part does works
4361  * that make the new allocated chunk useable, but not do any operation
4362  * that modifies the chunk tree. The second part does the works that
4363  * require modifying the chunk tree. This division is important for the
4364  * bootstrap process of adding storage to a seed btrfs.
4365  */
4366 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4367 		      struct btrfs_root *extent_root, u64 type)
4368 {
4369 	u64 chunk_offset;
4370 
4371 	chunk_offset = find_next_chunk(extent_root->fs_info);
4372 	return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
4373 }
4374 
4375 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
4376 					 struct btrfs_root *root,
4377 					 struct btrfs_device *device)
4378 {
4379 	u64 chunk_offset;
4380 	u64 sys_chunk_offset;
4381 	u64 alloc_profile;
4382 	struct btrfs_fs_info *fs_info = root->fs_info;
4383 	struct btrfs_root *extent_root = fs_info->extent_root;
4384 	int ret;
4385 
4386 	chunk_offset = find_next_chunk(fs_info);
4387 	alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4388 	ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
4389 				  alloc_profile);
4390 	if (ret)
4391 		return ret;
4392 
4393 	sys_chunk_offset = find_next_chunk(root->fs_info);
4394 	alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4395 	ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
4396 				  alloc_profile);
4397 	if (ret) {
4398 		btrfs_abort_transaction(trans, root, ret);
4399 		goto out;
4400 	}
4401 
4402 	ret = btrfs_add_device(trans, fs_info->chunk_root, device);
4403 	if (ret)
4404 		btrfs_abort_transaction(trans, root, ret);
4405 out:
4406 	return ret;
4407 }
4408 
4409 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
4410 {
4411 	struct extent_map *em;
4412 	struct map_lookup *map;
4413 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4414 	int readonly = 0;
4415 	int i;
4416 
4417 	read_lock(&map_tree->map_tree.lock);
4418 	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
4419 	read_unlock(&map_tree->map_tree.lock);
4420 	if (!em)
4421 		return 1;
4422 
4423 	if (btrfs_test_opt(root, DEGRADED)) {
4424 		free_extent_map(em);
4425 		return 0;
4426 	}
4427 
4428 	map = (struct map_lookup *)em->bdev;
4429 	for (i = 0; i < map->num_stripes; i++) {
4430 		if (!map->stripes[i].dev->writeable) {
4431 			readonly = 1;
4432 			break;
4433 		}
4434 	}
4435 	free_extent_map(em);
4436 	return readonly;
4437 }
4438 
4439 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
4440 {
4441 	extent_map_tree_init(&tree->map_tree);
4442 }
4443 
4444 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
4445 {
4446 	struct extent_map *em;
4447 
4448 	while (1) {
4449 		write_lock(&tree->map_tree.lock);
4450 		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
4451 		if (em)
4452 			remove_extent_mapping(&tree->map_tree, em);
4453 		write_unlock(&tree->map_tree.lock);
4454 		if (!em)
4455 			break;
4456 		kfree(em->bdev);
4457 		/* once for us */
4458 		free_extent_map(em);
4459 		/* once for the tree */
4460 		free_extent_map(em);
4461 	}
4462 }
4463 
4464 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
4465 {
4466 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4467 	struct extent_map *em;
4468 	struct map_lookup *map;
4469 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4470 	int ret;
4471 
4472 	read_lock(&em_tree->lock);
4473 	em = lookup_extent_mapping(em_tree, logical, len);
4474 	read_unlock(&em_tree->lock);
4475 
4476 	/*
4477 	 * We could return errors for these cases, but that could get ugly and
4478 	 * we'd probably do the same thing which is just not do anything else
4479 	 * and exit, so return 1 so the callers don't try to use other copies.
4480 	 */
4481 	if (!em) {
4482 		btrfs_crit(fs_info, "No mapping for %Lu-%Lu\n", logical,
4483 			    logical+len);
4484 		return 1;
4485 	}
4486 
4487 	if (em->start > logical || em->start + em->len < logical) {
4488 		btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
4489 			    "%Lu-%Lu\n", logical, logical+len, em->start,
4490 			    em->start + em->len);
4491 		return 1;
4492 	}
4493 
4494 	map = (struct map_lookup *)em->bdev;
4495 	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
4496 		ret = map->num_stripes;
4497 	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4498 		ret = map->sub_stripes;
4499 	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
4500 		ret = 2;
4501 	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4502 		ret = 3;
4503 	else
4504 		ret = 1;
4505 	free_extent_map(em);
4506 
4507 	btrfs_dev_replace_lock(&fs_info->dev_replace);
4508 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
4509 		ret++;
4510 	btrfs_dev_replace_unlock(&fs_info->dev_replace);
4511 
4512 	return ret;
4513 }
4514 
4515 unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
4516 				    struct btrfs_mapping_tree *map_tree,
4517 				    u64 logical)
4518 {
4519 	struct extent_map *em;
4520 	struct map_lookup *map;
4521 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4522 	unsigned long len = root->sectorsize;
4523 
4524 	read_lock(&em_tree->lock);
4525 	em = lookup_extent_mapping(em_tree, logical, len);
4526 	read_unlock(&em_tree->lock);
4527 	BUG_ON(!em);
4528 
4529 	BUG_ON(em->start > logical || em->start + em->len < logical);
4530 	map = (struct map_lookup *)em->bdev;
4531 	if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4532 			 BTRFS_BLOCK_GROUP_RAID6)) {
4533 		len = map->stripe_len * nr_data_stripes(map);
4534 	}
4535 	free_extent_map(em);
4536 	return len;
4537 }
4538 
4539 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
4540 			   u64 logical, u64 len, int mirror_num)
4541 {
4542 	struct extent_map *em;
4543 	struct map_lookup *map;
4544 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4545 	int ret = 0;
4546 
4547 	read_lock(&em_tree->lock);
4548 	em = lookup_extent_mapping(em_tree, logical, len);
4549 	read_unlock(&em_tree->lock);
4550 	BUG_ON(!em);
4551 
4552 	BUG_ON(em->start > logical || em->start + em->len < logical);
4553 	map = (struct map_lookup *)em->bdev;
4554 	if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4555 			 BTRFS_BLOCK_GROUP_RAID6))
4556 		ret = 1;
4557 	free_extent_map(em);
4558 	return ret;
4559 }
4560 
4561 static int find_live_mirror(struct btrfs_fs_info *fs_info,
4562 			    struct map_lookup *map, int first, int num,
4563 			    int optimal, int dev_replace_is_ongoing)
4564 {
4565 	int i;
4566 	int tolerance;
4567 	struct btrfs_device *srcdev;
4568 
4569 	if (dev_replace_is_ongoing &&
4570 	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
4571 	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
4572 		srcdev = fs_info->dev_replace.srcdev;
4573 	else
4574 		srcdev = NULL;
4575 
4576 	/*
4577 	 * try to avoid the drive that is the source drive for a
4578 	 * dev-replace procedure, only choose it if no other non-missing
4579 	 * mirror is available
4580 	 */
4581 	for (tolerance = 0; tolerance < 2; tolerance++) {
4582 		if (map->stripes[optimal].dev->bdev &&
4583 		    (tolerance || map->stripes[optimal].dev != srcdev))
4584 			return optimal;
4585 		for (i = first; i < first + num; i++) {
4586 			if (map->stripes[i].dev->bdev &&
4587 			    (tolerance || map->stripes[i].dev != srcdev))
4588 				return i;
4589 		}
4590 	}
4591 
4592 	/* we couldn't find one that doesn't fail.  Just return something
4593 	 * and the io error handling code will clean up eventually
4594 	 */
4595 	return optimal;
4596 }
4597 
4598 static inline int parity_smaller(u64 a, u64 b)
4599 {
4600 	return a > b;
4601 }
4602 
4603 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
4604 static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
4605 {
4606 	struct btrfs_bio_stripe s;
4607 	int i;
4608 	u64 l;
4609 	int again = 1;
4610 
4611 	while (again) {
4612 		again = 0;
4613 		for (i = 0; i < bbio->num_stripes - 1; i++) {
4614 			if (parity_smaller(raid_map[i], raid_map[i+1])) {
4615 				s = bbio->stripes[i];
4616 				l = raid_map[i];
4617 				bbio->stripes[i] = bbio->stripes[i+1];
4618 				raid_map[i] = raid_map[i+1];
4619 				bbio->stripes[i+1] = s;
4620 				raid_map[i+1] = l;
4621 				again = 1;
4622 			}
4623 		}
4624 	}
4625 }
4626 
4627 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4628 			     u64 logical, u64 *length,
4629 			     struct btrfs_bio **bbio_ret,
4630 			     int mirror_num, u64 **raid_map_ret)
4631 {
4632 	struct extent_map *em;
4633 	struct map_lookup *map;
4634 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4635 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4636 	u64 offset;
4637 	u64 stripe_offset;
4638 	u64 stripe_end_offset;
4639 	u64 stripe_nr;
4640 	u64 stripe_nr_orig;
4641 	u64 stripe_nr_end;
4642 	u64 stripe_len;
4643 	u64 *raid_map = NULL;
4644 	int stripe_index;
4645 	int i;
4646 	int ret = 0;
4647 	int num_stripes;
4648 	int max_errors = 0;
4649 	struct btrfs_bio *bbio = NULL;
4650 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
4651 	int dev_replace_is_ongoing = 0;
4652 	int num_alloc_stripes;
4653 	int patch_the_first_stripe_for_dev_replace = 0;
4654 	u64 physical_to_patch_in_first_stripe = 0;
4655 	u64 raid56_full_stripe_start = (u64)-1;
4656 
4657 	read_lock(&em_tree->lock);
4658 	em = lookup_extent_mapping(em_tree, logical, *length);
4659 	read_unlock(&em_tree->lock);
4660 
4661 	if (!em) {
4662 		btrfs_crit(fs_info, "unable to find logical %llu len %llu",
4663 			logical, *length);
4664 		return -EINVAL;
4665 	}
4666 
4667 	if (em->start > logical || em->start + em->len < logical) {
4668 		btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
4669 			   "found %Lu-%Lu\n", logical, em->start,
4670 			   em->start + em->len);
4671 		return -EINVAL;
4672 	}
4673 
4674 	map = (struct map_lookup *)em->bdev;
4675 	offset = logical - em->start;
4676 
4677 	stripe_len = map->stripe_len;
4678 	stripe_nr = offset;
4679 	/*
4680 	 * stripe_nr counts the total number of stripes we have to stride
4681 	 * to get to this block
4682 	 */
4683 	do_div(stripe_nr, stripe_len);
4684 
4685 	stripe_offset = stripe_nr * stripe_len;
4686 	BUG_ON(offset < stripe_offset);
4687 
4688 	/* stripe_offset is the offset of this block in its stripe*/
4689 	stripe_offset = offset - stripe_offset;
4690 
4691 	/* if we're here for raid56, we need to know the stripe aligned start */
4692 	if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4693 		unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
4694 		raid56_full_stripe_start = offset;
4695 
4696 		/* allow a write of a full stripe, but make sure we don't
4697 		 * allow straddling of stripes
4698 		 */
4699 		do_div(raid56_full_stripe_start, full_stripe_len);
4700 		raid56_full_stripe_start *= full_stripe_len;
4701 	}
4702 
4703 	if (rw & REQ_DISCARD) {
4704 		/* we don't discard raid56 yet */
4705 		if (map->type &
4706 		    (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4707 			ret = -EOPNOTSUPP;
4708 			goto out;
4709 		}
4710 		*length = min_t(u64, em->len - offset, *length);
4711 	} else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
4712 		u64 max_len;
4713 		/* For writes to RAID[56], allow a full stripeset across all disks.
4714 		   For other RAID types and for RAID[56] reads, just allow a single
4715 		   stripe (on a single disk). */
4716 		if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
4717 		    (rw & REQ_WRITE)) {
4718 			max_len = stripe_len * nr_data_stripes(map) -
4719 				(offset - raid56_full_stripe_start);
4720 		} else {
4721 			/* we limit the length of each bio to what fits in a stripe */
4722 			max_len = stripe_len - stripe_offset;
4723 		}
4724 		*length = min_t(u64, em->len - offset, max_len);
4725 	} else {
4726 		*length = em->len - offset;
4727 	}
4728 
4729 	/* This is for when we're called from btrfs_merge_bio_hook() and all
4730 	   it cares about is the length */
4731 	if (!bbio_ret)
4732 		goto out;
4733 
4734 	btrfs_dev_replace_lock(dev_replace);
4735 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
4736 	if (!dev_replace_is_ongoing)
4737 		btrfs_dev_replace_unlock(dev_replace);
4738 
4739 	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
4740 	    !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
4741 	    dev_replace->tgtdev != NULL) {
4742 		/*
4743 		 * in dev-replace case, for repair case (that's the only
4744 		 * case where the mirror is selected explicitly when
4745 		 * calling btrfs_map_block), blocks left of the left cursor
4746 		 * can also be read from the target drive.
4747 		 * For REQ_GET_READ_MIRRORS, the target drive is added as
4748 		 * the last one to the array of stripes. For READ, it also
4749 		 * needs to be supported using the same mirror number.
4750 		 * If the requested block is not left of the left cursor,
4751 		 * EIO is returned. This can happen because btrfs_num_copies()
4752 		 * returns one more in the dev-replace case.
4753 		 */
4754 		u64 tmp_length = *length;
4755 		struct btrfs_bio *tmp_bbio = NULL;
4756 		int tmp_num_stripes;
4757 		u64 srcdev_devid = dev_replace->srcdev->devid;
4758 		int index_srcdev = 0;
4759 		int found = 0;
4760 		u64 physical_of_found = 0;
4761 
4762 		ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
4763 			     logical, &tmp_length, &tmp_bbio, 0, NULL);
4764 		if (ret) {
4765 			WARN_ON(tmp_bbio != NULL);
4766 			goto out;
4767 		}
4768 
4769 		tmp_num_stripes = tmp_bbio->num_stripes;
4770 		if (mirror_num > tmp_num_stripes) {
4771 			/*
4772 			 * REQ_GET_READ_MIRRORS does not contain this
4773 			 * mirror, that means that the requested area
4774 			 * is not left of the left cursor
4775 			 */
4776 			ret = -EIO;
4777 			kfree(tmp_bbio);
4778 			goto out;
4779 		}
4780 
4781 		/*
4782 		 * process the rest of the function using the mirror_num
4783 		 * of the source drive. Therefore look it up first.
4784 		 * At the end, patch the device pointer to the one of the
4785 		 * target drive.
4786 		 */
4787 		for (i = 0; i < tmp_num_stripes; i++) {
4788 			if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) {
4789 				/*
4790 				 * In case of DUP, in order to keep it
4791 				 * simple, only add the mirror with the
4792 				 * lowest physical address
4793 				 */
4794 				if (found &&
4795 				    physical_of_found <=
4796 				     tmp_bbio->stripes[i].physical)
4797 					continue;
4798 				index_srcdev = i;
4799 				found = 1;
4800 				physical_of_found =
4801 					tmp_bbio->stripes[i].physical;
4802 			}
4803 		}
4804 
4805 		if (found) {
4806 			mirror_num = index_srcdev + 1;
4807 			patch_the_first_stripe_for_dev_replace = 1;
4808 			physical_to_patch_in_first_stripe = physical_of_found;
4809 		} else {
4810 			WARN_ON(1);
4811 			ret = -EIO;
4812 			kfree(tmp_bbio);
4813 			goto out;
4814 		}
4815 
4816 		kfree(tmp_bbio);
4817 	} else if (mirror_num > map->num_stripes) {
4818 		mirror_num = 0;
4819 	}
4820 
4821 	num_stripes = 1;
4822 	stripe_index = 0;
4823 	stripe_nr_orig = stripe_nr;
4824 	stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
4825 	do_div(stripe_nr_end, map->stripe_len);
4826 	stripe_end_offset = stripe_nr_end * map->stripe_len -
4827 			    (offset + *length);
4828 
4829 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4830 		if (rw & REQ_DISCARD)
4831 			num_stripes = min_t(u64, map->num_stripes,
4832 					    stripe_nr_end - stripe_nr_orig);
4833 		stripe_index = do_div(stripe_nr, map->num_stripes);
4834 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
4835 		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
4836 			num_stripes = map->num_stripes;
4837 		else if (mirror_num)
4838 			stripe_index = mirror_num - 1;
4839 		else {
4840 			stripe_index = find_live_mirror(fs_info, map, 0,
4841 					    map->num_stripes,
4842 					    current->pid % map->num_stripes,
4843 					    dev_replace_is_ongoing);
4844 			mirror_num = stripe_index + 1;
4845 		}
4846 
4847 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
4848 		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
4849 			num_stripes = map->num_stripes;
4850 		} else if (mirror_num) {
4851 			stripe_index = mirror_num - 1;
4852 		} else {
4853 			mirror_num = 1;
4854 		}
4855 
4856 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4857 		int factor = map->num_stripes / map->sub_stripes;
4858 
4859 		stripe_index = do_div(stripe_nr, factor);
4860 		stripe_index *= map->sub_stripes;
4861 
4862 		if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
4863 			num_stripes = map->sub_stripes;
4864 		else if (rw & REQ_DISCARD)
4865 			num_stripes = min_t(u64, map->sub_stripes *
4866 					    (stripe_nr_end - stripe_nr_orig),
4867 					    map->num_stripes);
4868 		else if (mirror_num)
4869 			stripe_index += mirror_num - 1;
4870 		else {
4871 			int old_stripe_index = stripe_index;
4872 			stripe_index = find_live_mirror(fs_info, map,
4873 					      stripe_index,
4874 					      map->sub_stripes, stripe_index +
4875 					      current->pid % map->sub_stripes,
4876 					      dev_replace_is_ongoing);
4877 			mirror_num = stripe_index - old_stripe_index + 1;
4878 		}
4879 
4880 	} else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4881 				BTRFS_BLOCK_GROUP_RAID6)) {
4882 		u64 tmp;
4883 
4884 		if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1)
4885 		    && raid_map_ret) {
4886 			int i, rot;
4887 
4888 			/* push stripe_nr back to the start of the full stripe */
4889 			stripe_nr = raid56_full_stripe_start;
4890 			do_div(stripe_nr, stripe_len);
4891 
4892 			stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4893 
4894 			/* RAID[56] write or recovery. Return all stripes */
4895 			num_stripes = map->num_stripes;
4896 			max_errors = nr_parity_stripes(map);
4897 
4898 			raid_map = kmalloc(sizeof(u64) * num_stripes,
4899 					   GFP_NOFS);
4900 			if (!raid_map) {
4901 				ret = -ENOMEM;
4902 				goto out;
4903 			}
4904 
4905 			/* Work out the disk rotation on this stripe-set */
4906 			tmp = stripe_nr;
4907 			rot = do_div(tmp, num_stripes);
4908 
4909 			/* Fill in the logical address of each stripe */
4910 			tmp = stripe_nr * nr_data_stripes(map);
4911 			for (i = 0; i < nr_data_stripes(map); i++)
4912 				raid_map[(i+rot) % num_stripes] =
4913 					em->start + (tmp + i) * map->stripe_len;
4914 
4915 			raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
4916 			if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4917 				raid_map[(i+rot+1) % num_stripes] =
4918 					RAID6_Q_STRIPE;
4919 
4920 			*length = map->stripe_len;
4921 			stripe_index = 0;
4922 			stripe_offset = 0;
4923 		} else {
4924 			/*
4925 			 * Mirror #0 or #1 means the original data block.
4926 			 * Mirror #2 is RAID5 parity block.
4927 			 * Mirror #3 is RAID6 Q block.
4928 			 */
4929 			stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4930 			if (mirror_num > 1)
4931 				stripe_index = nr_data_stripes(map) +
4932 						mirror_num - 2;
4933 
4934 			/* We distribute the parity blocks across stripes */
4935 			tmp = stripe_nr + stripe_index;
4936 			stripe_index = do_div(tmp, map->num_stripes);
4937 		}
4938 	} else {
4939 		/*
4940 		 * after this do_div call, stripe_nr is the number of stripes
4941 		 * on this device we have to walk to find the data, and
4942 		 * stripe_index is the number of our device in the stripe array
4943 		 */
4944 		stripe_index = do_div(stripe_nr, map->num_stripes);
4945 		mirror_num = stripe_index + 1;
4946 	}
4947 	BUG_ON(stripe_index >= map->num_stripes);
4948 
4949 	num_alloc_stripes = num_stripes;
4950 	if (dev_replace_is_ongoing) {
4951 		if (rw & (REQ_WRITE | REQ_DISCARD))
4952 			num_alloc_stripes <<= 1;
4953 		if (rw & REQ_GET_READ_MIRRORS)
4954 			num_alloc_stripes++;
4955 	}
4956 	bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS);
4957 	if (!bbio) {
4958 		kfree(raid_map);
4959 		ret = -ENOMEM;
4960 		goto out;
4961 	}
4962 	atomic_set(&bbio->error, 0);
4963 
4964 	if (rw & REQ_DISCARD) {
4965 		int factor = 0;
4966 		int sub_stripes = 0;
4967 		u64 stripes_per_dev = 0;
4968 		u32 remaining_stripes = 0;
4969 		u32 last_stripe = 0;
4970 
4971 		if (map->type &
4972 		    (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
4973 			if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4974 				sub_stripes = 1;
4975 			else
4976 				sub_stripes = map->sub_stripes;
4977 
4978 			factor = map->num_stripes / sub_stripes;
4979 			stripes_per_dev = div_u64_rem(stripe_nr_end -
4980 						      stripe_nr_orig,
4981 						      factor,
4982 						      &remaining_stripes);
4983 			div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
4984 			last_stripe *= sub_stripes;
4985 		}
4986 
4987 		for (i = 0; i < num_stripes; i++) {
4988 			bbio->stripes[i].physical =
4989 				map->stripes[stripe_index].physical +
4990 				stripe_offset + stripe_nr * map->stripe_len;
4991 			bbio->stripes[i].dev = map->stripes[stripe_index].dev;
4992 
4993 			if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
4994 					 BTRFS_BLOCK_GROUP_RAID10)) {
4995 				bbio->stripes[i].length = stripes_per_dev *
4996 							  map->stripe_len;
4997 
4998 				if (i / sub_stripes < remaining_stripes)
4999 					bbio->stripes[i].length +=
5000 						map->stripe_len;
5001 
5002 				/*
5003 				 * Special for the first stripe and
5004 				 * the last stripe:
5005 				 *
5006 				 * |-------|...|-------|
5007 				 *     |----------|
5008 				 *    off     end_off
5009 				 */
5010 				if (i < sub_stripes)
5011 					bbio->stripes[i].length -=
5012 						stripe_offset;
5013 
5014 				if (stripe_index >= last_stripe &&
5015 				    stripe_index <= (last_stripe +
5016 						     sub_stripes - 1))
5017 					bbio->stripes[i].length -=
5018 						stripe_end_offset;
5019 
5020 				if (i == sub_stripes - 1)
5021 					stripe_offset = 0;
5022 			} else
5023 				bbio->stripes[i].length = *length;
5024 
5025 			stripe_index++;
5026 			if (stripe_index == map->num_stripes) {
5027 				/* This could only happen for RAID0/10 */
5028 				stripe_index = 0;
5029 				stripe_nr++;
5030 			}
5031 		}
5032 	} else {
5033 		for (i = 0; i < num_stripes; i++) {
5034 			bbio->stripes[i].physical =
5035 				map->stripes[stripe_index].physical +
5036 				stripe_offset +
5037 				stripe_nr * map->stripe_len;
5038 			bbio->stripes[i].dev =
5039 				map->stripes[stripe_index].dev;
5040 			stripe_index++;
5041 		}
5042 	}
5043 
5044 	if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
5045 		if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
5046 				 BTRFS_BLOCK_GROUP_RAID10 |
5047 				 BTRFS_BLOCK_GROUP_RAID5 |
5048 				 BTRFS_BLOCK_GROUP_DUP)) {
5049 			max_errors = 1;
5050 		} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
5051 			max_errors = 2;
5052 		}
5053 	}
5054 
5055 	if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
5056 	    dev_replace->tgtdev != NULL) {
5057 		int index_where_to_add;
5058 		u64 srcdev_devid = dev_replace->srcdev->devid;
5059 
5060 		/*
5061 		 * duplicate the write operations while the dev replace
5062 		 * procedure is running. Since the copying of the old disk
5063 		 * to the new disk takes place at run time while the
5064 		 * filesystem is mounted writable, the regular write
5065 		 * operations to the old disk have to be duplicated to go
5066 		 * to the new disk as well.
5067 		 * Note that device->missing is handled by the caller, and
5068 		 * that the write to the old disk is already set up in the
5069 		 * stripes array.
5070 		 */
5071 		index_where_to_add = num_stripes;
5072 		for (i = 0; i < num_stripes; i++) {
5073 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5074 				/* write to new disk, too */
5075 				struct btrfs_bio_stripe *new =
5076 					bbio->stripes + index_where_to_add;
5077 				struct btrfs_bio_stripe *old =
5078 					bbio->stripes + i;
5079 
5080 				new->physical = old->physical;
5081 				new->length = old->length;
5082 				new->dev = dev_replace->tgtdev;
5083 				index_where_to_add++;
5084 				max_errors++;
5085 			}
5086 		}
5087 		num_stripes = index_where_to_add;
5088 	} else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
5089 		   dev_replace->tgtdev != NULL) {
5090 		u64 srcdev_devid = dev_replace->srcdev->devid;
5091 		int index_srcdev = 0;
5092 		int found = 0;
5093 		u64 physical_of_found = 0;
5094 
5095 		/*
5096 		 * During the dev-replace procedure, the target drive can
5097 		 * also be used to read data in case it is needed to repair
5098 		 * a corrupt block elsewhere. This is possible if the
5099 		 * requested area is left of the left cursor. In this area,
5100 		 * the target drive is a full copy of the source drive.
5101 		 */
5102 		for (i = 0; i < num_stripes; i++) {
5103 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5104 				/*
5105 				 * In case of DUP, in order to keep it
5106 				 * simple, only add the mirror with the
5107 				 * lowest physical address
5108 				 */
5109 				if (found &&
5110 				    physical_of_found <=
5111 				     bbio->stripes[i].physical)
5112 					continue;
5113 				index_srcdev = i;
5114 				found = 1;
5115 				physical_of_found = bbio->stripes[i].physical;
5116 			}
5117 		}
5118 		if (found) {
5119 			u64 length = map->stripe_len;
5120 
5121 			if (physical_of_found + length <=
5122 			    dev_replace->cursor_left) {
5123 				struct btrfs_bio_stripe *tgtdev_stripe =
5124 					bbio->stripes + num_stripes;
5125 
5126 				tgtdev_stripe->physical = physical_of_found;
5127 				tgtdev_stripe->length =
5128 					bbio->stripes[index_srcdev].length;
5129 				tgtdev_stripe->dev = dev_replace->tgtdev;
5130 
5131 				num_stripes++;
5132 			}
5133 		}
5134 	}
5135 
5136 	*bbio_ret = bbio;
5137 	bbio->num_stripes = num_stripes;
5138 	bbio->max_errors = max_errors;
5139 	bbio->mirror_num = mirror_num;
5140 
5141 	/*
5142 	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
5143 	 * mirror_num == num_stripes + 1 && dev_replace target drive is
5144 	 * available as a mirror
5145 	 */
5146 	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
5147 		WARN_ON(num_stripes > 1);
5148 		bbio->stripes[0].dev = dev_replace->tgtdev;
5149 		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
5150 		bbio->mirror_num = map->num_stripes + 1;
5151 	}
5152 	if (raid_map) {
5153 		sort_parity_stripes(bbio, raid_map);
5154 		*raid_map_ret = raid_map;
5155 	}
5156 out:
5157 	if (dev_replace_is_ongoing)
5158 		btrfs_dev_replace_unlock(dev_replace);
5159 	free_extent_map(em);
5160 	return ret;
5161 }
5162 
5163 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5164 		      u64 logical, u64 *length,
5165 		      struct btrfs_bio **bbio_ret, int mirror_num)
5166 {
5167 	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5168 				 mirror_num, NULL);
5169 }
5170 
5171 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5172 		     u64 chunk_start, u64 physical, u64 devid,
5173 		     u64 **logical, int *naddrs, int *stripe_len)
5174 {
5175 	struct extent_map_tree *em_tree = &map_tree->map_tree;
5176 	struct extent_map *em;
5177 	struct map_lookup *map;
5178 	u64 *buf;
5179 	u64 bytenr;
5180 	u64 length;
5181 	u64 stripe_nr;
5182 	u64 rmap_len;
5183 	int i, j, nr = 0;
5184 
5185 	read_lock(&em_tree->lock);
5186 	em = lookup_extent_mapping(em_tree, chunk_start, 1);
5187 	read_unlock(&em_tree->lock);
5188 
5189 	if (!em) {
5190 		printk(KERN_ERR "btrfs: couldn't find em for chunk %Lu\n",
5191 		       chunk_start);
5192 		return -EIO;
5193 	}
5194 
5195 	if (em->start != chunk_start) {
5196 		printk(KERN_ERR "btrfs: bad chunk start, em=%Lu, wanted=%Lu\n",
5197 		       em->start, chunk_start);
5198 		free_extent_map(em);
5199 		return -EIO;
5200 	}
5201 	map = (struct map_lookup *)em->bdev;
5202 
5203 	length = em->len;
5204 	rmap_len = map->stripe_len;
5205 
5206 	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5207 		do_div(length, map->num_stripes / map->sub_stripes);
5208 	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5209 		do_div(length, map->num_stripes);
5210 	else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
5211 			      BTRFS_BLOCK_GROUP_RAID6)) {
5212 		do_div(length, nr_data_stripes(map));
5213 		rmap_len = map->stripe_len * nr_data_stripes(map);
5214 	}
5215 
5216 	buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
5217 	BUG_ON(!buf); /* -ENOMEM */
5218 
5219 	for (i = 0; i < map->num_stripes; i++) {
5220 		if (devid && map->stripes[i].dev->devid != devid)
5221 			continue;
5222 		if (map->stripes[i].physical > physical ||
5223 		    map->stripes[i].physical + length <= physical)
5224 			continue;
5225 
5226 		stripe_nr = physical - map->stripes[i].physical;
5227 		do_div(stripe_nr, map->stripe_len);
5228 
5229 		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5230 			stripe_nr = stripe_nr * map->num_stripes + i;
5231 			do_div(stripe_nr, map->sub_stripes);
5232 		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5233 			stripe_nr = stripe_nr * map->num_stripes + i;
5234 		} /* else if RAID[56], multiply by nr_data_stripes().
5235 		   * Alternatively, just use rmap_len below instead of
5236 		   * map->stripe_len */
5237 
5238 		bytenr = chunk_start + stripe_nr * rmap_len;
5239 		WARN_ON(nr >= map->num_stripes);
5240 		for (j = 0; j < nr; j++) {
5241 			if (buf[j] == bytenr)
5242 				break;
5243 		}
5244 		if (j == nr) {
5245 			WARN_ON(nr >= map->num_stripes);
5246 			buf[nr++] = bytenr;
5247 		}
5248 	}
5249 
5250 	*logical = buf;
5251 	*naddrs = nr;
5252 	*stripe_len = rmap_len;
5253 
5254 	free_extent_map(em);
5255 	return 0;
5256 }
5257 
5258 static void btrfs_end_bio(struct bio *bio, int err)
5259 {
5260 	struct btrfs_bio *bbio = bio->bi_private;
5261 	int is_orig_bio = 0;
5262 
5263 	if (err) {
5264 		atomic_inc(&bbio->error);
5265 		if (err == -EIO || err == -EREMOTEIO) {
5266 			unsigned int stripe_index =
5267 				btrfs_io_bio(bio)->stripe_index;
5268 			struct btrfs_device *dev;
5269 
5270 			BUG_ON(stripe_index >= bbio->num_stripes);
5271 			dev = bbio->stripes[stripe_index].dev;
5272 			if (dev->bdev) {
5273 				if (bio->bi_rw & WRITE)
5274 					btrfs_dev_stat_inc(dev,
5275 						BTRFS_DEV_STAT_WRITE_ERRS);
5276 				else
5277 					btrfs_dev_stat_inc(dev,
5278 						BTRFS_DEV_STAT_READ_ERRS);
5279 				if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
5280 					btrfs_dev_stat_inc(dev,
5281 						BTRFS_DEV_STAT_FLUSH_ERRS);
5282 				btrfs_dev_stat_print_on_error(dev);
5283 			}
5284 		}
5285 	}
5286 
5287 	if (bio == bbio->orig_bio)
5288 		is_orig_bio = 1;
5289 
5290 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
5291 		if (!is_orig_bio) {
5292 			bio_put(bio);
5293 			bio = bbio->orig_bio;
5294 		}
5295 		bio->bi_private = bbio->private;
5296 		bio->bi_end_io = bbio->end_io;
5297 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5298 		/* only send an error to the higher layers if it is
5299 		 * beyond the tolerance of the btrfs bio
5300 		 */
5301 		if (atomic_read(&bbio->error) > bbio->max_errors) {
5302 			err = -EIO;
5303 		} else {
5304 			/*
5305 			 * this bio is actually up to date, we didn't
5306 			 * go over the max number of errors
5307 			 */
5308 			set_bit(BIO_UPTODATE, &bio->bi_flags);
5309 			err = 0;
5310 		}
5311 		kfree(bbio);
5312 
5313 		bio_endio(bio, err);
5314 	} else if (!is_orig_bio) {
5315 		bio_put(bio);
5316 	}
5317 }
5318 
5319 struct async_sched {
5320 	struct bio *bio;
5321 	int rw;
5322 	struct btrfs_fs_info *info;
5323 	struct btrfs_work work;
5324 };
5325 
5326 /*
5327  * see run_scheduled_bios for a description of why bios are collected for
5328  * async submit.
5329  *
5330  * This will add one bio to the pending list for a device and make sure
5331  * the work struct is scheduled.
5332  */
5333 static noinline void btrfs_schedule_bio(struct btrfs_root *root,
5334 					struct btrfs_device *device,
5335 					int rw, struct bio *bio)
5336 {
5337 	int should_queue = 1;
5338 	struct btrfs_pending_bios *pending_bios;
5339 
5340 	if (device->missing || !device->bdev) {
5341 		bio_endio(bio, -EIO);
5342 		return;
5343 	}
5344 
5345 	/* don't bother with additional async steps for reads, right now */
5346 	if (!(rw & REQ_WRITE)) {
5347 		bio_get(bio);
5348 		btrfsic_submit_bio(rw, bio);
5349 		bio_put(bio);
5350 		return;
5351 	}
5352 
5353 	/*
5354 	 * nr_async_bios allows us to reliably return congestion to the
5355 	 * higher layers.  Otherwise, the async bio makes it appear we have
5356 	 * made progress against dirty pages when we've really just put it
5357 	 * on a queue for later
5358 	 */
5359 	atomic_inc(&root->fs_info->nr_async_bios);
5360 	WARN_ON(bio->bi_next);
5361 	bio->bi_next = NULL;
5362 	bio->bi_rw |= rw;
5363 
5364 	spin_lock(&device->io_lock);
5365 	if (bio->bi_rw & REQ_SYNC)
5366 		pending_bios = &device->pending_sync_bios;
5367 	else
5368 		pending_bios = &device->pending_bios;
5369 
5370 	if (pending_bios->tail)
5371 		pending_bios->tail->bi_next = bio;
5372 
5373 	pending_bios->tail = bio;
5374 	if (!pending_bios->head)
5375 		pending_bios->head = bio;
5376 	if (device->running_pending)
5377 		should_queue = 0;
5378 
5379 	spin_unlock(&device->io_lock);
5380 
5381 	if (should_queue)
5382 		btrfs_queue_worker(&root->fs_info->submit_workers,
5383 				   &device->work);
5384 }
5385 
5386 static int bio_size_ok(struct block_device *bdev, struct bio *bio,
5387 		       sector_t sector)
5388 {
5389 	struct bio_vec *prev;
5390 	struct request_queue *q = bdev_get_queue(bdev);
5391 	unsigned short max_sectors = queue_max_sectors(q);
5392 	struct bvec_merge_data bvm = {
5393 		.bi_bdev = bdev,
5394 		.bi_sector = sector,
5395 		.bi_rw = bio->bi_rw,
5396 	};
5397 
5398 	if (bio->bi_vcnt == 0) {
5399 		WARN_ON(1);
5400 		return 1;
5401 	}
5402 
5403 	prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
5404 	if (bio_sectors(bio) > max_sectors)
5405 		return 0;
5406 
5407 	if (!q->merge_bvec_fn)
5408 		return 1;
5409 
5410 	bvm.bi_size = bio->bi_size - prev->bv_len;
5411 	if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
5412 		return 0;
5413 	return 1;
5414 }
5415 
5416 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5417 			      struct bio *bio, u64 physical, int dev_nr,
5418 			      int rw, int async)
5419 {
5420 	struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
5421 
5422 	bio->bi_private = bbio;
5423 	btrfs_io_bio(bio)->stripe_index = dev_nr;
5424 	bio->bi_end_io = btrfs_end_bio;
5425 	bio->bi_sector = physical >> 9;
5426 #ifdef DEBUG
5427 	{
5428 		struct rcu_string *name;
5429 
5430 		rcu_read_lock();
5431 		name = rcu_dereference(dev->name);
5432 		pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
5433 			 "(%s id %llu), size=%u\n", rw,
5434 			 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
5435 			 name->str, dev->devid, bio->bi_size);
5436 		rcu_read_unlock();
5437 	}
5438 #endif
5439 	bio->bi_bdev = dev->bdev;
5440 	if (async)
5441 		btrfs_schedule_bio(root, dev, rw, bio);
5442 	else
5443 		btrfsic_submit_bio(rw, bio);
5444 }
5445 
5446 static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5447 			      struct bio *first_bio, struct btrfs_device *dev,
5448 			      int dev_nr, int rw, int async)
5449 {
5450 	struct bio_vec *bvec = first_bio->bi_io_vec;
5451 	struct bio *bio;
5452 	int nr_vecs = bio_get_nr_vecs(dev->bdev);
5453 	u64 physical = bbio->stripes[dev_nr].physical;
5454 
5455 again:
5456 	bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
5457 	if (!bio)
5458 		return -ENOMEM;
5459 
5460 	while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
5461 		if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
5462 				 bvec->bv_offset) < bvec->bv_len) {
5463 			u64 len = bio->bi_size;
5464 
5465 			atomic_inc(&bbio->stripes_pending);
5466 			submit_stripe_bio(root, bbio, bio, physical, dev_nr,
5467 					  rw, async);
5468 			physical += len;
5469 			goto again;
5470 		}
5471 		bvec++;
5472 	}
5473 
5474 	submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
5475 	return 0;
5476 }
5477 
5478 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
5479 {
5480 	atomic_inc(&bbio->error);
5481 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
5482 		bio->bi_private = bbio->private;
5483 		bio->bi_end_io = bbio->end_io;
5484 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5485 		bio->bi_sector = logical >> 9;
5486 		kfree(bbio);
5487 		bio_endio(bio, -EIO);
5488 	}
5489 }
5490 
5491 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5492 		  int mirror_num, int async_submit)
5493 {
5494 	struct btrfs_device *dev;
5495 	struct bio *first_bio = bio;
5496 	u64 logical = (u64)bio->bi_sector << 9;
5497 	u64 length = 0;
5498 	u64 map_length;
5499 	u64 *raid_map = NULL;
5500 	int ret;
5501 	int dev_nr = 0;
5502 	int total_devs = 1;
5503 	struct btrfs_bio *bbio = NULL;
5504 
5505 	length = bio->bi_size;
5506 	map_length = length;
5507 
5508 	ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
5509 			      mirror_num, &raid_map);
5510 	if (ret) /* -ENOMEM */
5511 		return ret;
5512 
5513 	total_devs = bbio->num_stripes;
5514 	bbio->orig_bio = first_bio;
5515 	bbio->private = first_bio->bi_private;
5516 	bbio->end_io = first_bio->bi_end_io;
5517 	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
5518 
5519 	if (raid_map) {
5520 		/* In this case, map_length has been set to the length of
5521 		   a single stripe; not the whole write */
5522 		if (rw & WRITE) {
5523 			return raid56_parity_write(root, bio, bbio,
5524 						   raid_map, map_length);
5525 		} else {
5526 			return raid56_parity_recover(root, bio, bbio,
5527 						     raid_map, map_length,
5528 						     mirror_num);
5529 		}
5530 	}
5531 
5532 	if (map_length < length) {
5533 		btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
5534 			logical, length, map_length);
5535 		BUG();
5536 	}
5537 
5538 	while (dev_nr < total_devs) {
5539 		dev = bbio->stripes[dev_nr].dev;
5540 		if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
5541 			bbio_error(bbio, first_bio, logical);
5542 			dev_nr++;
5543 			continue;
5544 		}
5545 
5546 		/*
5547 		 * Check and see if we're ok with this bio based on it's size
5548 		 * and offset with the given device.
5549 		 */
5550 		if (!bio_size_ok(dev->bdev, first_bio,
5551 				 bbio->stripes[dev_nr].physical >> 9)) {
5552 			ret = breakup_stripe_bio(root, bbio, first_bio, dev,
5553 						 dev_nr, rw, async_submit);
5554 			BUG_ON(ret);
5555 			dev_nr++;
5556 			continue;
5557 		}
5558 
5559 		if (dev_nr < total_devs - 1) {
5560 			bio = btrfs_bio_clone(first_bio, GFP_NOFS);
5561 			BUG_ON(!bio); /* -ENOMEM */
5562 		} else {
5563 			bio = first_bio;
5564 		}
5565 
5566 		submit_stripe_bio(root, bbio, bio,
5567 				  bbio->stripes[dev_nr].physical, dev_nr, rw,
5568 				  async_submit);
5569 		dev_nr++;
5570 	}
5571 	return 0;
5572 }
5573 
5574 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
5575 				       u8 *uuid, u8 *fsid)
5576 {
5577 	struct btrfs_device *device;
5578 	struct btrfs_fs_devices *cur_devices;
5579 
5580 	cur_devices = fs_info->fs_devices;
5581 	while (cur_devices) {
5582 		if (!fsid ||
5583 		    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5584 			device = __find_device(&cur_devices->devices,
5585 					       devid, uuid);
5586 			if (device)
5587 				return device;
5588 		}
5589 		cur_devices = cur_devices->seed;
5590 	}
5591 	return NULL;
5592 }
5593 
5594 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
5595 					    u64 devid, u8 *dev_uuid)
5596 {
5597 	struct btrfs_device *device;
5598 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5599 
5600 	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
5601 	if (IS_ERR(device))
5602 		return NULL;
5603 
5604 	list_add(&device->dev_list, &fs_devices->devices);
5605 	device->fs_devices = fs_devices;
5606 	fs_devices->num_devices++;
5607 
5608 	device->missing = 1;
5609 	fs_devices->missing_devices++;
5610 
5611 	return device;
5612 }
5613 
5614 /**
5615  * btrfs_alloc_device - allocate struct btrfs_device
5616  * @fs_info:	used only for generating a new devid, can be NULL if
5617  *		devid is provided (i.e. @devid != NULL).
5618  * @devid:	a pointer to devid for this device.  If NULL a new devid
5619  *		is generated.
5620  * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
5621  *		is generated.
5622  *
5623  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
5624  * on error.  Returned struct is not linked onto any lists and can be
5625  * destroyed with kfree() right away.
5626  */
5627 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
5628 					const u64 *devid,
5629 					const u8 *uuid)
5630 {
5631 	struct btrfs_device *dev;
5632 	u64 tmp;
5633 
5634 	if (!devid && !fs_info) {
5635 		WARN_ON(1);
5636 		return ERR_PTR(-EINVAL);
5637 	}
5638 
5639 	dev = __alloc_device();
5640 	if (IS_ERR(dev))
5641 		return dev;
5642 
5643 	if (devid)
5644 		tmp = *devid;
5645 	else {
5646 		int ret;
5647 
5648 		ret = find_next_devid(fs_info, &tmp);
5649 		if (ret) {
5650 			kfree(dev);
5651 			return ERR_PTR(ret);
5652 		}
5653 	}
5654 	dev->devid = tmp;
5655 
5656 	if (uuid)
5657 		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
5658 	else
5659 		generate_random_uuid(dev->uuid);
5660 
5661 	dev->work.func = pending_bios_fn;
5662 
5663 	return dev;
5664 }
5665 
5666 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
5667 			  struct extent_buffer *leaf,
5668 			  struct btrfs_chunk *chunk)
5669 {
5670 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
5671 	struct map_lookup *map;
5672 	struct extent_map *em;
5673 	u64 logical;
5674 	u64 length;
5675 	u64 devid;
5676 	u8 uuid[BTRFS_UUID_SIZE];
5677 	int num_stripes;
5678 	int ret;
5679 	int i;
5680 
5681 	logical = key->offset;
5682 	length = btrfs_chunk_length(leaf, chunk);
5683 
5684 	read_lock(&map_tree->map_tree.lock);
5685 	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
5686 	read_unlock(&map_tree->map_tree.lock);
5687 
5688 	/* already mapped? */
5689 	if (em && em->start <= logical && em->start + em->len > logical) {
5690 		free_extent_map(em);
5691 		return 0;
5692 	} else if (em) {
5693 		free_extent_map(em);
5694 	}
5695 
5696 	em = alloc_extent_map();
5697 	if (!em)
5698 		return -ENOMEM;
5699 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
5700 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
5701 	if (!map) {
5702 		free_extent_map(em);
5703 		return -ENOMEM;
5704 	}
5705 
5706 	em->bdev = (struct block_device *)map;
5707 	em->start = logical;
5708 	em->len = length;
5709 	em->orig_start = 0;
5710 	em->block_start = 0;
5711 	em->block_len = em->len;
5712 
5713 	map->num_stripes = num_stripes;
5714 	map->io_width = btrfs_chunk_io_width(leaf, chunk);
5715 	map->io_align = btrfs_chunk_io_align(leaf, chunk);
5716 	map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
5717 	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
5718 	map->type = btrfs_chunk_type(leaf, chunk);
5719 	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
5720 	for (i = 0; i < num_stripes; i++) {
5721 		map->stripes[i].physical =
5722 			btrfs_stripe_offset_nr(leaf, chunk, i);
5723 		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
5724 		read_extent_buffer(leaf, uuid, (unsigned long)
5725 				   btrfs_stripe_dev_uuid_nr(chunk, i),
5726 				   BTRFS_UUID_SIZE);
5727 		map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
5728 							uuid, NULL);
5729 		if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
5730 			kfree(map);
5731 			free_extent_map(em);
5732 			return -EIO;
5733 		}
5734 		if (!map->stripes[i].dev) {
5735 			map->stripes[i].dev =
5736 				add_missing_dev(root, devid, uuid);
5737 			if (!map->stripes[i].dev) {
5738 				kfree(map);
5739 				free_extent_map(em);
5740 				return -EIO;
5741 			}
5742 		}
5743 		map->stripes[i].dev->in_fs_metadata = 1;
5744 	}
5745 
5746 	write_lock(&map_tree->map_tree.lock);
5747 	ret = add_extent_mapping(&map_tree->map_tree, em, 0);
5748 	write_unlock(&map_tree->map_tree.lock);
5749 	BUG_ON(ret); /* Tree corruption */
5750 	free_extent_map(em);
5751 
5752 	return 0;
5753 }
5754 
5755 static void fill_device_from_item(struct extent_buffer *leaf,
5756 				 struct btrfs_dev_item *dev_item,
5757 				 struct btrfs_device *device)
5758 {
5759 	unsigned long ptr;
5760 
5761 	device->devid = btrfs_device_id(leaf, dev_item);
5762 	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
5763 	device->total_bytes = device->disk_total_bytes;
5764 	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
5765 	device->type = btrfs_device_type(leaf, dev_item);
5766 	device->io_align = btrfs_device_io_align(leaf, dev_item);
5767 	device->io_width = btrfs_device_io_width(leaf, dev_item);
5768 	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
5769 	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
5770 	device->is_tgtdev_for_dev_replace = 0;
5771 
5772 	ptr = btrfs_device_uuid(dev_item);
5773 	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
5774 }
5775 
5776 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
5777 {
5778 	struct btrfs_fs_devices *fs_devices;
5779 	int ret;
5780 
5781 	BUG_ON(!mutex_is_locked(&uuid_mutex));
5782 
5783 	fs_devices = root->fs_info->fs_devices->seed;
5784 	while (fs_devices) {
5785 		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5786 			ret = 0;
5787 			goto out;
5788 		}
5789 		fs_devices = fs_devices->seed;
5790 	}
5791 
5792 	fs_devices = find_fsid(fsid);
5793 	if (!fs_devices) {
5794 		ret = -ENOENT;
5795 		goto out;
5796 	}
5797 
5798 	fs_devices = clone_fs_devices(fs_devices);
5799 	if (IS_ERR(fs_devices)) {
5800 		ret = PTR_ERR(fs_devices);
5801 		goto out;
5802 	}
5803 
5804 	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
5805 				   root->fs_info->bdev_holder);
5806 	if (ret) {
5807 		free_fs_devices(fs_devices);
5808 		goto out;
5809 	}
5810 
5811 	if (!fs_devices->seeding) {
5812 		__btrfs_close_devices(fs_devices);
5813 		free_fs_devices(fs_devices);
5814 		ret = -EINVAL;
5815 		goto out;
5816 	}
5817 
5818 	fs_devices->seed = root->fs_info->fs_devices->seed;
5819 	root->fs_info->fs_devices->seed = fs_devices;
5820 out:
5821 	return ret;
5822 }
5823 
5824 static int read_one_dev(struct btrfs_root *root,
5825 			struct extent_buffer *leaf,
5826 			struct btrfs_dev_item *dev_item)
5827 {
5828 	struct btrfs_device *device;
5829 	u64 devid;
5830 	int ret;
5831 	u8 fs_uuid[BTRFS_UUID_SIZE];
5832 	u8 dev_uuid[BTRFS_UUID_SIZE];
5833 
5834 	devid = btrfs_device_id(leaf, dev_item);
5835 	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
5836 			   BTRFS_UUID_SIZE);
5837 	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
5838 			   BTRFS_UUID_SIZE);
5839 
5840 	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
5841 		ret = open_seed_devices(root, fs_uuid);
5842 		if (ret && !btrfs_test_opt(root, DEGRADED))
5843 			return ret;
5844 	}
5845 
5846 	device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
5847 	if (!device || !device->bdev) {
5848 		if (!btrfs_test_opt(root, DEGRADED))
5849 			return -EIO;
5850 
5851 		if (!device) {
5852 			btrfs_warn(root->fs_info, "devid %llu missing", devid);
5853 			device = add_missing_dev(root, devid, dev_uuid);
5854 			if (!device)
5855 				return -ENOMEM;
5856 		} else if (!device->missing) {
5857 			/*
5858 			 * this happens when a device that was properly setup
5859 			 * in the device info lists suddenly goes bad.
5860 			 * device->bdev is NULL, and so we have to set
5861 			 * device->missing to one here
5862 			 */
5863 			root->fs_info->fs_devices->missing_devices++;
5864 			device->missing = 1;
5865 		}
5866 	}
5867 
5868 	if (device->fs_devices != root->fs_info->fs_devices) {
5869 		BUG_ON(device->writeable);
5870 		if (device->generation !=
5871 		    btrfs_device_generation(leaf, dev_item))
5872 			return -EINVAL;
5873 	}
5874 
5875 	fill_device_from_item(leaf, dev_item, device);
5876 	device->in_fs_metadata = 1;
5877 	if (device->writeable && !device->is_tgtdev_for_dev_replace) {
5878 		device->fs_devices->total_rw_bytes += device->total_bytes;
5879 		spin_lock(&root->fs_info->free_chunk_lock);
5880 		root->fs_info->free_chunk_space += device->total_bytes -
5881 			device->bytes_used;
5882 		spin_unlock(&root->fs_info->free_chunk_lock);
5883 	}
5884 	ret = 0;
5885 	return ret;
5886 }
5887 
5888 int btrfs_read_sys_array(struct btrfs_root *root)
5889 {
5890 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
5891 	struct extent_buffer *sb;
5892 	struct btrfs_disk_key *disk_key;
5893 	struct btrfs_chunk *chunk;
5894 	u8 *ptr;
5895 	unsigned long sb_ptr;
5896 	int ret = 0;
5897 	u32 num_stripes;
5898 	u32 array_size;
5899 	u32 len = 0;
5900 	u32 cur;
5901 	struct btrfs_key key;
5902 
5903 	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
5904 					  BTRFS_SUPER_INFO_SIZE);
5905 	if (!sb)
5906 		return -ENOMEM;
5907 	btrfs_set_buffer_uptodate(sb);
5908 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
5909 	/*
5910 	 * The sb extent buffer is artifical and just used to read the system array.
5911 	 * btrfs_set_buffer_uptodate() call does not properly mark all it's
5912 	 * pages up-to-date when the page is larger: extent does not cover the
5913 	 * whole page and consequently check_page_uptodate does not find all
5914 	 * the page's extents up-to-date (the hole beyond sb),
5915 	 * write_extent_buffer then triggers a WARN_ON.
5916 	 *
5917 	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
5918 	 * but sb spans only this function. Add an explicit SetPageUptodate call
5919 	 * to silence the warning eg. on PowerPC 64.
5920 	 */
5921 	if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
5922 		SetPageUptodate(sb->pages[0]);
5923 
5924 	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
5925 	array_size = btrfs_super_sys_array_size(super_copy);
5926 
5927 	ptr = super_copy->sys_chunk_array;
5928 	sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
5929 	cur = 0;
5930 
5931 	while (cur < array_size) {
5932 		disk_key = (struct btrfs_disk_key *)ptr;
5933 		btrfs_disk_key_to_cpu(&key, disk_key);
5934 
5935 		len = sizeof(*disk_key); ptr += len;
5936 		sb_ptr += len;
5937 		cur += len;
5938 
5939 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
5940 			chunk = (struct btrfs_chunk *)sb_ptr;
5941 			ret = read_one_chunk(root, &key, sb, chunk);
5942 			if (ret)
5943 				break;
5944 			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
5945 			len = btrfs_chunk_item_size(num_stripes);
5946 		} else {
5947 			ret = -EIO;
5948 			break;
5949 		}
5950 		ptr += len;
5951 		sb_ptr += len;
5952 		cur += len;
5953 	}
5954 	free_extent_buffer(sb);
5955 	return ret;
5956 }
5957 
5958 int btrfs_read_chunk_tree(struct btrfs_root *root)
5959 {
5960 	struct btrfs_path *path;
5961 	struct extent_buffer *leaf;
5962 	struct btrfs_key key;
5963 	struct btrfs_key found_key;
5964 	int ret;
5965 	int slot;
5966 
5967 	root = root->fs_info->chunk_root;
5968 
5969 	path = btrfs_alloc_path();
5970 	if (!path)
5971 		return -ENOMEM;
5972 
5973 	mutex_lock(&uuid_mutex);
5974 	lock_chunks(root);
5975 
5976 	/*
5977 	 * Read all device items, and then all the chunk items. All
5978 	 * device items are found before any chunk item (their object id
5979 	 * is smaller than the lowest possible object id for a chunk
5980 	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
5981 	 */
5982 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
5983 	key.offset = 0;
5984 	key.type = 0;
5985 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5986 	if (ret < 0)
5987 		goto error;
5988 	while (1) {
5989 		leaf = path->nodes[0];
5990 		slot = path->slots[0];
5991 		if (slot >= btrfs_header_nritems(leaf)) {
5992 			ret = btrfs_next_leaf(root, path);
5993 			if (ret == 0)
5994 				continue;
5995 			if (ret < 0)
5996 				goto error;
5997 			break;
5998 		}
5999 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
6000 		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
6001 			struct btrfs_dev_item *dev_item;
6002 			dev_item = btrfs_item_ptr(leaf, slot,
6003 						  struct btrfs_dev_item);
6004 			ret = read_one_dev(root, leaf, dev_item);
6005 			if (ret)
6006 				goto error;
6007 		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
6008 			struct btrfs_chunk *chunk;
6009 			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
6010 			ret = read_one_chunk(root, &found_key, leaf, chunk);
6011 			if (ret)
6012 				goto error;
6013 		}
6014 		path->slots[0]++;
6015 	}
6016 	ret = 0;
6017 error:
6018 	unlock_chunks(root);
6019 	mutex_unlock(&uuid_mutex);
6020 
6021 	btrfs_free_path(path);
6022 	return ret;
6023 }
6024 
6025 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
6026 {
6027 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6028 	struct btrfs_device *device;
6029 
6030 	mutex_lock(&fs_devices->device_list_mutex);
6031 	list_for_each_entry(device, &fs_devices->devices, dev_list)
6032 		device->dev_root = fs_info->dev_root;
6033 	mutex_unlock(&fs_devices->device_list_mutex);
6034 }
6035 
6036 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
6037 {
6038 	int i;
6039 
6040 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6041 		btrfs_dev_stat_reset(dev, i);
6042 }
6043 
6044 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
6045 {
6046 	struct btrfs_key key;
6047 	struct btrfs_key found_key;
6048 	struct btrfs_root *dev_root = fs_info->dev_root;
6049 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6050 	struct extent_buffer *eb;
6051 	int slot;
6052 	int ret = 0;
6053 	struct btrfs_device *device;
6054 	struct btrfs_path *path = NULL;
6055 	int i;
6056 
6057 	path = btrfs_alloc_path();
6058 	if (!path) {
6059 		ret = -ENOMEM;
6060 		goto out;
6061 	}
6062 
6063 	mutex_lock(&fs_devices->device_list_mutex);
6064 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6065 		int item_size;
6066 		struct btrfs_dev_stats_item *ptr;
6067 
6068 		key.objectid = 0;
6069 		key.type = BTRFS_DEV_STATS_KEY;
6070 		key.offset = device->devid;
6071 		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
6072 		if (ret) {
6073 			__btrfs_reset_dev_stats(device);
6074 			device->dev_stats_valid = 1;
6075 			btrfs_release_path(path);
6076 			continue;
6077 		}
6078 		slot = path->slots[0];
6079 		eb = path->nodes[0];
6080 		btrfs_item_key_to_cpu(eb, &found_key, slot);
6081 		item_size = btrfs_item_size_nr(eb, slot);
6082 
6083 		ptr = btrfs_item_ptr(eb, slot,
6084 				     struct btrfs_dev_stats_item);
6085 
6086 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6087 			if (item_size >= (1 + i) * sizeof(__le64))
6088 				btrfs_dev_stat_set(device, i,
6089 					btrfs_dev_stats_value(eb, ptr, i));
6090 			else
6091 				btrfs_dev_stat_reset(device, i);
6092 		}
6093 
6094 		device->dev_stats_valid = 1;
6095 		btrfs_dev_stat_print_on_load(device);
6096 		btrfs_release_path(path);
6097 	}
6098 	mutex_unlock(&fs_devices->device_list_mutex);
6099 
6100 out:
6101 	btrfs_free_path(path);
6102 	return ret < 0 ? ret : 0;
6103 }
6104 
6105 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
6106 				struct btrfs_root *dev_root,
6107 				struct btrfs_device *device)
6108 {
6109 	struct btrfs_path *path;
6110 	struct btrfs_key key;
6111 	struct extent_buffer *eb;
6112 	struct btrfs_dev_stats_item *ptr;
6113 	int ret;
6114 	int i;
6115 
6116 	key.objectid = 0;
6117 	key.type = BTRFS_DEV_STATS_KEY;
6118 	key.offset = device->devid;
6119 
6120 	path = btrfs_alloc_path();
6121 	BUG_ON(!path);
6122 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
6123 	if (ret < 0) {
6124 		printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
6125 			      ret, rcu_str_deref(device->name));
6126 		goto out;
6127 	}
6128 
6129 	if (ret == 0 &&
6130 	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
6131 		/* need to delete old one and insert a new one */
6132 		ret = btrfs_del_item(trans, dev_root, path);
6133 		if (ret != 0) {
6134 			printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
6135 				      rcu_str_deref(device->name), ret);
6136 			goto out;
6137 		}
6138 		ret = 1;
6139 	}
6140 
6141 	if (ret == 1) {
6142 		/* need to insert a new item */
6143 		btrfs_release_path(path);
6144 		ret = btrfs_insert_empty_item(trans, dev_root, path,
6145 					      &key, sizeof(*ptr));
6146 		if (ret < 0) {
6147 			printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
6148 				      rcu_str_deref(device->name), ret);
6149 			goto out;
6150 		}
6151 	}
6152 
6153 	eb = path->nodes[0];
6154 	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
6155 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6156 		btrfs_set_dev_stats_value(eb, ptr, i,
6157 					  btrfs_dev_stat_read(device, i));
6158 	btrfs_mark_buffer_dirty(eb);
6159 
6160 out:
6161 	btrfs_free_path(path);
6162 	return ret;
6163 }
6164 
6165 /*
6166  * called from commit_transaction. Writes all changed device stats to disk.
6167  */
6168 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
6169 			struct btrfs_fs_info *fs_info)
6170 {
6171 	struct btrfs_root *dev_root = fs_info->dev_root;
6172 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6173 	struct btrfs_device *device;
6174 	int ret = 0;
6175 
6176 	mutex_lock(&fs_devices->device_list_mutex);
6177 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6178 		if (!device->dev_stats_valid || !device->dev_stats_dirty)
6179 			continue;
6180 
6181 		ret = update_dev_stat_item(trans, dev_root, device);
6182 		if (!ret)
6183 			device->dev_stats_dirty = 0;
6184 	}
6185 	mutex_unlock(&fs_devices->device_list_mutex);
6186 
6187 	return ret;
6188 }
6189 
6190 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
6191 {
6192 	btrfs_dev_stat_inc(dev, index);
6193 	btrfs_dev_stat_print_on_error(dev);
6194 }
6195 
6196 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
6197 {
6198 	if (!dev->dev_stats_valid)
6199 		return;
6200 	printk_ratelimited_in_rcu(KERN_ERR
6201 			   "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
6202 			   rcu_str_deref(dev->name),
6203 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6204 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6205 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6206 			   btrfs_dev_stat_read(dev,
6207 					       BTRFS_DEV_STAT_CORRUPTION_ERRS),
6208 			   btrfs_dev_stat_read(dev,
6209 					       BTRFS_DEV_STAT_GENERATION_ERRS));
6210 }
6211 
6212 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
6213 {
6214 	int i;
6215 
6216 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6217 		if (btrfs_dev_stat_read(dev, i) != 0)
6218 			break;
6219 	if (i == BTRFS_DEV_STAT_VALUES_MAX)
6220 		return; /* all values == 0, suppress message */
6221 
6222 	printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
6223 	       rcu_str_deref(dev->name),
6224 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6225 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6226 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6227 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6228 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6229 }
6230 
6231 int btrfs_get_dev_stats(struct btrfs_root *root,
6232 			struct btrfs_ioctl_get_dev_stats *stats)
6233 {
6234 	struct btrfs_device *dev;
6235 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6236 	int i;
6237 
6238 	mutex_lock(&fs_devices->device_list_mutex);
6239 	dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
6240 	mutex_unlock(&fs_devices->device_list_mutex);
6241 
6242 	if (!dev) {
6243 		printk(KERN_WARNING
6244 		       "btrfs: get dev_stats failed, device not found\n");
6245 		return -ENODEV;
6246 	} else if (!dev->dev_stats_valid) {
6247 		printk(KERN_WARNING
6248 		       "btrfs: get dev_stats failed, not yet valid\n");
6249 		return -ENODEV;
6250 	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
6251 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6252 			if (stats->nr_items > i)
6253 				stats->values[i] =
6254 					btrfs_dev_stat_read_and_reset(dev, i);
6255 			else
6256 				btrfs_dev_stat_reset(dev, i);
6257 		}
6258 	} else {
6259 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6260 			if (stats->nr_items > i)
6261 				stats->values[i] = btrfs_dev_stat_read(dev, i);
6262 	}
6263 	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
6264 		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
6265 	return 0;
6266 }
6267 
6268 int btrfs_scratch_superblock(struct btrfs_device *device)
6269 {
6270 	struct buffer_head *bh;
6271 	struct btrfs_super_block *disk_super;
6272 
6273 	bh = btrfs_read_dev_super(device->bdev);
6274 	if (!bh)
6275 		return -EINVAL;
6276 	disk_super = (struct btrfs_super_block *)bh->b_data;
6277 
6278 	memset(&disk_super->magic, 0, sizeof(disk_super->magic));
6279 	set_buffer_dirty(bh);
6280 	sync_dirty_buffer(bh);
6281 	brelse(bh);
6282 
6283 	return 0;
6284 }
6285