xref: /openbmc/linux/fs/btrfs/volumes.c (revision 206e8c00752fbe9cc463184236ac64b2a532cda5)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include <linux/raid/pq.h>
29 #include <linux/semaphore.h>
30 #include <asm/div64.h>
31 #include "ctree.h"
32 #include "extent_map.h"
33 #include "disk-io.h"
34 #include "transaction.h"
35 #include "print-tree.h"
36 #include "volumes.h"
37 #include "raid56.h"
38 #include "async-thread.h"
39 #include "check-integrity.h"
40 #include "rcu-string.h"
41 #include "math.h"
42 #include "dev-replace.h"
43 #include "sysfs.h"
44 
45 static int init_first_rw_device(struct btrfs_trans_handle *trans,
46 				struct btrfs_root *root,
47 				struct btrfs_device *device);
48 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
49 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
50 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
51 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
52 
53 DEFINE_MUTEX(uuid_mutex);
54 static LIST_HEAD(fs_uuids);
55 struct list_head *btrfs_get_fs_uuids(void)
56 {
57 	return &fs_uuids;
58 }
59 
60 static struct btrfs_fs_devices *__alloc_fs_devices(void)
61 {
62 	struct btrfs_fs_devices *fs_devs;
63 
64 	fs_devs = kzalloc(sizeof(*fs_devs), GFP_NOFS);
65 	if (!fs_devs)
66 		return ERR_PTR(-ENOMEM);
67 
68 	mutex_init(&fs_devs->device_list_mutex);
69 
70 	INIT_LIST_HEAD(&fs_devs->devices);
71 	INIT_LIST_HEAD(&fs_devs->resized_devices);
72 	INIT_LIST_HEAD(&fs_devs->alloc_list);
73 	INIT_LIST_HEAD(&fs_devs->list);
74 
75 	return fs_devs;
76 }
77 
78 /**
79  * alloc_fs_devices - allocate struct btrfs_fs_devices
80  * @fsid:	a pointer to UUID for this FS.  If NULL a new UUID is
81  *		generated.
82  *
83  * Return: a pointer to a new &struct btrfs_fs_devices on success;
84  * ERR_PTR() on error.  Returned struct is not linked onto any lists and
85  * can be destroyed with kfree() right away.
86  */
87 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
88 {
89 	struct btrfs_fs_devices *fs_devs;
90 
91 	fs_devs = __alloc_fs_devices();
92 	if (IS_ERR(fs_devs))
93 		return fs_devs;
94 
95 	if (fsid)
96 		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
97 	else
98 		generate_random_uuid(fs_devs->fsid);
99 
100 	return fs_devs;
101 }
102 
103 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
104 {
105 	struct btrfs_device *device;
106 	WARN_ON(fs_devices->opened);
107 	while (!list_empty(&fs_devices->devices)) {
108 		device = list_entry(fs_devices->devices.next,
109 				    struct btrfs_device, dev_list);
110 		list_del(&device->dev_list);
111 		rcu_string_free(device->name);
112 		kfree(device);
113 	}
114 	kfree(fs_devices);
115 }
116 
117 static void btrfs_kobject_uevent(struct block_device *bdev,
118 				 enum kobject_action action)
119 {
120 	int ret;
121 
122 	ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
123 	if (ret)
124 		pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
125 			action,
126 			kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
127 			&disk_to_dev(bdev->bd_disk)->kobj);
128 }
129 
130 void btrfs_cleanup_fs_uuids(void)
131 {
132 	struct btrfs_fs_devices *fs_devices;
133 
134 	while (!list_empty(&fs_uuids)) {
135 		fs_devices = list_entry(fs_uuids.next,
136 					struct btrfs_fs_devices, list);
137 		list_del(&fs_devices->list);
138 		free_fs_devices(fs_devices);
139 	}
140 }
141 
142 static struct btrfs_device *__alloc_device(void)
143 {
144 	struct btrfs_device *dev;
145 
146 	dev = kzalloc(sizeof(*dev), GFP_NOFS);
147 	if (!dev)
148 		return ERR_PTR(-ENOMEM);
149 
150 	INIT_LIST_HEAD(&dev->dev_list);
151 	INIT_LIST_HEAD(&dev->dev_alloc_list);
152 	INIT_LIST_HEAD(&dev->resized_list);
153 
154 	spin_lock_init(&dev->io_lock);
155 
156 	spin_lock_init(&dev->reada_lock);
157 	atomic_set(&dev->reada_in_flight, 0);
158 	atomic_set(&dev->dev_stats_ccnt, 0);
159 	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_WAIT);
160 	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_WAIT);
161 
162 	return dev;
163 }
164 
165 static noinline struct btrfs_device *__find_device(struct list_head *head,
166 						   u64 devid, u8 *uuid)
167 {
168 	struct btrfs_device *dev;
169 
170 	list_for_each_entry(dev, head, dev_list) {
171 		if (dev->devid == devid &&
172 		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
173 			return dev;
174 		}
175 	}
176 	return NULL;
177 }
178 
179 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
180 {
181 	struct btrfs_fs_devices *fs_devices;
182 
183 	list_for_each_entry(fs_devices, &fs_uuids, list) {
184 		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
185 			return fs_devices;
186 	}
187 	return NULL;
188 }
189 
190 static int
191 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
192 		      int flush, struct block_device **bdev,
193 		      struct buffer_head **bh)
194 {
195 	int ret;
196 
197 	*bdev = blkdev_get_by_path(device_path, flags, holder);
198 
199 	if (IS_ERR(*bdev)) {
200 		ret = PTR_ERR(*bdev);
201 		printk(KERN_INFO "BTRFS: open %s failed\n", device_path);
202 		goto error;
203 	}
204 
205 	if (flush)
206 		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
207 	ret = set_blocksize(*bdev, 4096);
208 	if (ret) {
209 		blkdev_put(*bdev, flags);
210 		goto error;
211 	}
212 	invalidate_bdev(*bdev);
213 	*bh = btrfs_read_dev_super(*bdev);
214 	if (!*bh) {
215 		ret = -EINVAL;
216 		blkdev_put(*bdev, flags);
217 		goto error;
218 	}
219 
220 	return 0;
221 
222 error:
223 	*bdev = NULL;
224 	*bh = NULL;
225 	return ret;
226 }
227 
228 static void requeue_list(struct btrfs_pending_bios *pending_bios,
229 			struct bio *head, struct bio *tail)
230 {
231 
232 	struct bio *old_head;
233 
234 	old_head = pending_bios->head;
235 	pending_bios->head = head;
236 	if (pending_bios->tail)
237 		tail->bi_next = old_head;
238 	else
239 		pending_bios->tail = tail;
240 }
241 
242 /*
243  * we try to collect pending bios for a device so we don't get a large
244  * number of procs sending bios down to the same device.  This greatly
245  * improves the schedulers ability to collect and merge the bios.
246  *
247  * But, it also turns into a long list of bios to process and that is sure
248  * to eventually make the worker thread block.  The solution here is to
249  * make some progress and then put this work struct back at the end of
250  * the list if the block device is congested.  This way, multiple devices
251  * can make progress from a single worker thread.
252  */
253 static noinline void run_scheduled_bios(struct btrfs_device *device)
254 {
255 	struct bio *pending;
256 	struct backing_dev_info *bdi;
257 	struct btrfs_fs_info *fs_info;
258 	struct btrfs_pending_bios *pending_bios;
259 	struct bio *tail;
260 	struct bio *cur;
261 	int again = 0;
262 	unsigned long num_run;
263 	unsigned long batch_run = 0;
264 	unsigned long limit;
265 	unsigned long last_waited = 0;
266 	int force_reg = 0;
267 	int sync_pending = 0;
268 	struct blk_plug plug;
269 
270 	/*
271 	 * this function runs all the bios we've collected for
272 	 * a particular device.  We don't want to wander off to
273 	 * another device without first sending all of these down.
274 	 * So, setup a plug here and finish it off before we return
275 	 */
276 	blk_start_plug(&plug);
277 
278 	bdi = blk_get_backing_dev_info(device->bdev);
279 	fs_info = device->dev_root->fs_info;
280 	limit = btrfs_async_submit_limit(fs_info);
281 	limit = limit * 2 / 3;
282 
283 loop:
284 	spin_lock(&device->io_lock);
285 
286 loop_lock:
287 	num_run = 0;
288 
289 	/* take all the bios off the list at once and process them
290 	 * later on (without the lock held).  But, remember the
291 	 * tail and other pointers so the bios can be properly reinserted
292 	 * into the list if we hit congestion
293 	 */
294 	if (!force_reg && device->pending_sync_bios.head) {
295 		pending_bios = &device->pending_sync_bios;
296 		force_reg = 1;
297 	} else {
298 		pending_bios = &device->pending_bios;
299 		force_reg = 0;
300 	}
301 
302 	pending = pending_bios->head;
303 	tail = pending_bios->tail;
304 	WARN_ON(pending && !tail);
305 
306 	/*
307 	 * if pending was null this time around, no bios need processing
308 	 * at all and we can stop.  Otherwise it'll loop back up again
309 	 * and do an additional check so no bios are missed.
310 	 *
311 	 * device->running_pending is used to synchronize with the
312 	 * schedule_bio code.
313 	 */
314 	if (device->pending_sync_bios.head == NULL &&
315 	    device->pending_bios.head == NULL) {
316 		again = 0;
317 		device->running_pending = 0;
318 	} else {
319 		again = 1;
320 		device->running_pending = 1;
321 	}
322 
323 	pending_bios->head = NULL;
324 	pending_bios->tail = NULL;
325 
326 	spin_unlock(&device->io_lock);
327 
328 	while (pending) {
329 
330 		rmb();
331 		/* we want to work on both lists, but do more bios on the
332 		 * sync list than the regular list
333 		 */
334 		if ((num_run > 32 &&
335 		    pending_bios != &device->pending_sync_bios &&
336 		    device->pending_sync_bios.head) ||
337 		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
338 		    device->pending_bios.head)) {
339 			spin_lock(&device->io_lock);
340 			requeue_list(pending_bios, pending, tail);
341 			goto loop_lock;
342 		}
343 
344 		cur = pending;
345 		pending = pending->bi_next;
346 		cur->bi_next = NULL;
347 
348 		if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
349 		    waitqueue_active(&fs_info->async_submit_wait))
350 			wake_up(&fs_info->async_submit_wait);
351 
352 		BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
353 
354 		/*
355 		 * if we're doing the sync list, record that our
356 		 * plug has some sync requests on it
357 		 *
358 		 * If we're doing the regular list and there are
359 		 * sync requests sitting around, unplug before
360 		 * we add more
361 		 */
362 		if (pending_bios == &device->pending_sync_bios) {
363 			sync_pending = 1;
364 		} else if (sync_pending) {
365 			blk_finish_plug(&plug);
366 			blk_start_plug(&plug);
367 			sync_pending = 0;
368 		}
369 
370 		btrfsic_submit_bio(cur->bi_rw, cur);
371 		num_run++;
372 		batch_run++;
373 
374 		cond_resched();
375 
376 		/*
377 		 * we made progress, there is more work to do and the bdi
378 		 * is now congested.  Back off and let other work structs
379 		 * run instead
380 		 */
381 		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
382 		    fs_info->fs_devices->open_devices > 1) {
383 			struct io_context *ioc;
384 
385 			ioc = current->io_context;
386 
387 			/*
388 			 * the main goal here is that we don't want to
389 			 * block if we're going to be able to submit
390 			 * more requests without blocking.
391 			 *
392 			 * This code does two great things, it pokes into
393 			 * the elevator code from a filesystem _and_
394 			 * it makes assumptions about how batching works.
395 			 */
396 			if (ioc && ioc->nr_batch_requests > 0 &&
397 			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
398 			    (last_waited == 0 ||
399 			     ioc->last_waited == last_waited)) {
400 				/*
401 				 * we want to go through our batch of
402 				 * requests and stop.  So, we copy out
403 				 * the ioc->last_waited time and test
404 				 * against it before looping
405 				 */
406 				last_waited = ioc->last_waited;
407 				cond_resched();
408 				continue;
409 			}
410 			spin_lock(&device->io_lock);
411 			requeue_list(pending_bios, pending, tail);
412 			device->running_pending = 1;
413 
414 			spin_unlock(&device->io_lock);
415 			btrfs_queue_work(fs_info->submit_workers,
416 					 &device->work);
417 			goto done;
418 		}
419 		/* unplug every 64 requests just for good measure */
420 		if (batch_run % 64 == 0) {
421 			blk_finish_plug(&plug);
422 			blk_start_plug(&plug);
423 			sync_pending = 0;
424 		}
425 	}
426 
427 	cond_resched();
428 	if (again)
429 		goto loop;
430 
431 	spin_lock(&device->io_lock);
432 	if (device->pending_bios.head || device->pending_sync_bios.head)
433 		goto loop_lock;
434 	spin_unlock(&device->io_lock);
435 
436 done:
437 	blk_finish_plug(&plug);
438 }
439 
440 static void pending_bios_fn(struct btrfs_work *work)
441 {
442 	struct btrfs_device *device;
443 
444 	device = container_of(work, struct btrfs_device, work);
445 	run_scheduled_bios(device);
446 }
447 
448 
449 void btrfs_free_stale_device(struct btrfs_device *cur_dev)
450 {
451 	struct btrfs_fs_devices *fs_devs;
452 	struct btrfs_device *dev;
453 
454 	if (!cur_dev->name)
455 		return;
456 
457 	list_for_each_entry(fs_devs, &fs_uuids, list) {
458 		int del = 1;
459 
460 		if (fs_devs->opened)
461 			continue;
462 		if (fs_devs->seeding)
463 			continue;
464 
465 		list_for_each_entry(dev, &fs_devs->devices, dev_list) {
466 
467 			if (dev == cur_dev)
468 				continue;
469 			if (!dev->name)
470 				continue;
471 
472 			/*
473 			 * Todo: This won't be enough. What if the same device
474 			 * comes back (with new uuid and) with its mapper path?
475 			 * But for now, this does help as mostly an admin will
476 			 * either use mapper or non mapper path throughout.
477 			 */
478 			rcu_read_lock();
479 			del = strcmp(rcu_str_deref(dev->name),
480 						rcu_str_deref(cur_dev->name));
481 			rcu_read_unlock();
482 			if (!del)
483 				break;
484 		}
485 
486 		if (!del) {
487 			/* delete the stale device */
488 			if (fs_devs->num_devices == 1) {
489 				btrfs_sysfs_remove_fsid(fs_devs);
490 				list_del(&fs_devs->list);
491 				free_fs_devices(fs_devs);
492 			} else {
493 				fs_devs->num_devices--;
494 				list_del(&dev->dev_list);
495 				rcu_string_free(dev->name);
496 				kfree(dev);
497 			}
498 			break;
499 		}
500 	}
501 }
502 
503 /*
504  * Add new device to list of registered devices
505  *
506  * Returns:
507  * 1   - first time device is seen
508  * 0   - device already known
509  * < 0 - error
510  */
511 static noinline int device_list_add(const char *path,
512 			   struct btrfs_super_block *disk_super,
513 			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
514 {
515 	struct btrfs_device *device;
516 	struct btrfs_fs_devices *fs_devices;
517 	struct rcu_string *name;
518 	int ret = 0;
519 	u64 found_transid = btrfs_super_generation(disk_super);
520 
521 	fs_devices = find_fsid(disk_super->fsid);
522 	if (!fs_devices) {
523 		fs_devices = alloc_fs_devices(disk_super->fsid);
524 		if (IS_ERR(fs_devices))
525 			return PTR_ERR(fs_devices);
526 
527 		list_add(&fs_devices->list, &fs_uuids);
528 
529 		device = NULL;
530 	} else {
531 		device = __find_device(&fs_devices->devices, devid,
532 				       disk_super->dev_item.uuid);
533 	}
534 
535 	if (!device) {
536 		if (fs_devices->opened)
537 			return -EBUSY;
538 
539 		device = btrfs_alloc_device(NULL, &devid,
540 					    disk_super->dev_item.uuid);
541 		if (IS_ERR(device)) {
542 			/* we can safely leave the fs_devices entry around */
543 			return PTR_ERR(device);
544 		}
545 
546 		name = rcu_string_strdup(path, GFP_NOFS);
547 		if (!name) {
548 			kfree(device);
549 			return -ENOMEM;
550 		}
551 		rcu_assign_pointer(device->name, name);
552 
553 		mutex_lock(&fs_devices->device_list_mutex);
554 		list_add_rcu(&device->dev_list, &fs_devices->devices);
555 		fs_devices->num_devices++;
556 		mutex_unlock(&fs_devices->device_list_mutex);
557 
558 		ret = 1;
559 		device->fs_devices = fs_devices;
560 	} else if (!device->name || strcmp(device->name->str, path)) {
561 		/*
562 		 * When FS is already mounted.
563 		 * 1. If you are here and if the device->name is NULL that
564 		 *    means this device was missing at time of FS mount.
565 		 * 2. If you are here and if the device->name is different
566 		 *    from 'path' that means either
567 		 *      a. The same device disappeared and reappeared with
568 		 *         different name. or
569 		 *      b. The missing-disk-which-was-replaced, has
570 		 *         reappeared now.
571 		 *
572 		 * We must allow 1 and 2a above. But 2b would be a spurious
573 		 * and unintentional.
574 		 *
575 		 * Further in case of 1 and 2a above, the disk at 'path'
576 		 * would have missed some transaction when it was away and
577 		 * in case of 2a the stale bdev has to be updated as well.
578 		 * 2b must not be allowed at all time.
579 		 */
580 
581 		/*
582 		 * For now, we do allow update to btrfs_fs_device through the
583 		 * btrfs dev scan cli after FS has been mounted.  We're still
584 		 * tracking a problem where systems fail mount by subvolume id
585 		 * when we reject replacement on a mounted FS.
586 		 */
587 		if (!fs_devices->opened && found_transid < device->generation) {
588 			/*
589 			 * That is if the FS is _not_ mounted and if you
590 			 * are here, that means there is more than one
591 			 * disk with same uuid and devid.We keep the one
592 			 * with larger generation number or the last-in if
593 			 * generation are equal.
594 			 */
595 			return -EEXIST;
596 		}
597 
598 		name = rcu_string_strdup(path, GFP_NOFS);
599 		if (!name)
600 			return -ENOMEM;
601 		rcu_string_free(device->name);
602 		rcu_assign_pointer(device->name, name);
603 		if (device->missing) {
604 			fs_devices->missing_devices--;
605 			device->missing = 0;
606 		}
607 	}
608 
609 	/*
610 	 * Unmount does not free the btrfs_device struct but would zero
611 	 * generation along with most of the other members. So just update
612 	 * it back. We need it to pick the disk with largest generation
613 	 * (as above).
614 	 */
615 	if (!fs_devices->opened)
616 		device->generation = found_transid;
617 
618 	/*
619 	 * if there is new btrfs on an already registered device,
620 	 * then remove the stale device entry.
621 	 */
622 	btrfs_free_stale_device(device);
623 
624 	*fs_devices_ret = fs_devices;
625 
626 	return ret;
627 }
628 
629 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
630 {
631 	struct btrfs_fs_devices *fs_devices;
632 	struct btrfs_device *device;
633 	struct btrfs_device *orig_dev;
634 
635 	fs_devices = alloc_fs_devices(orig->fsid);
636 	if (IS_ERR(fs_devices))
637 		return fs_devices;
638 
639 	mutex_lock(&orig->device_list_mutex);
640 	fs_devices->total_devices = orig->total_devices;
641 
642 	/* We have held the volume lock, it is safe to get the devices. */
643 	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
644 		struct rcu_string *name;
645 
646 		device = btrfs_alloc_device(NULL, &orig_dev->devid,
647 					    orig_dev->uuid);
648 		if (IS_ERR(device))
649 			goto error;
650 
651 		/*
652 		 * This is ok to do without rcu read locked because we hold the
653 		 * uuid mutex so nothing we touch in here is going to disappear.
654 		 */
655 		if (orig_dev->name) {
656 			name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
657 			if (!name) {
658 				kfree(device);
659 				goto error;
660 			}
661 			rcu_assign_pointer(device->name, name);
662 		}
663 
664 		list_add(&device->dev_list, &fs_devices->devices);
665 		device->fs_devices = fs_devices;
666 		fs_devices->num_devices++;
667 	}
668 	mutex_unlock(&orig->device_list_mutex);
669 	return fs_devices;
670 error:
671 	mutex_unlock(&orig->device_list_mutex);
672 	free_fs_devices(fs_devices);
673 	return ERR_PTR(-ENOMEM);
674 }
675 
676 void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step)
677 {
678 	struct btrfs_device *device, *next;
679 	struct btrfs_device *latest_dev = NULL;
680 
681 	mutex_lock(&uuid_mutex);
682 again:
683 	/* This is the initialized path, it is safe to release the devices. */
684 	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
685 		if (device->in_fs_metadata) {
686 			if (!device->is_tgtdev_for_dev_replace &&
687 			    (!latest_dev ||
688 			     device->generation > latest_dev->generation)) {
689 				latest_dev = device;
690 			}
691 			continue;
692 		}
693 
694 		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
695 			/*
696 			 * In the first step, keep the device which has
697 			 * the correct fsid and the devid that is used
698 			 * for the dev_replace procedure.
699 			 * In the second step, the dev_replace state is
700 			 * read from the device tree and it is known
701 			 * whether the procedure is really active or
702 			 * not, which means whether this device is
703 			 * used or whether it should be removed.
704 			 */
705 			if (step == 0 || device->is_tgtdev_for_dev_replace) {
706 				continue;
707 			}
708 		}
709 		if (device->bdev) {
710 			blkdev_put(device->bdev, device->mode);
711 			device->bdev = NULL;
712 			fs_devices->open_devices--;
713 		}
714 		if (device->writeable) {
715 			list_del_init(&device->dev_alloc_list);
716 			device->writeable = 0;
717 			if (!device->is_tgtdev_for_dev_replace)
718 				fs_devices->rw_devices--;
719 		}
720 		list_del_init(&device->dev_list);
721 		fs_devices->num_devices--;
722 		rcu_string_free(device->name);
723 		kfree(device);
724 	}
725 
726 	if (fs_devices->seed) {
727 		fs_devices = fs_devices->seed;
728 		goto again;
729 	}
730 
731 	fs_devices->latest_bdev = latest_dev->bdev;
732 
733 	mutex_unlock(&uuid_mutex);
734 }
735 
736 static void __free_device(struct work_struct *work)
737 {
738 	struct btrfs_device *device;
739 
740 	device = container_of(work, struct btrfs_device, rcu_work);
741 
742 	if (device->bdev)
743 		blkdev_put(device->bdev, device->mode);
744 
745 	rcu_string_free(device->name);
746 	kfree(device);
747 }
748 
749 static void free_device(struct rcu_head *head)
750 {
751 	struct btrfs_device *device;
752 
753 	device = container_of(head, struct btrfs_device, rcu);
754 
755 	INIT_WORK(&device->rcu_work, __free_device);
756 	schedule_work(&device->rcu_work);
757 }
758 
759 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
760 {
761 	struct btrfs_device *device, *tmp;
762 
763 	if (--fs_devices->opened > 0)
764 		return 0;
765 
766 	mutex_lock(&fs_devices->device_list_mutex);
767 	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
768 		struct btrfs_device *new_device;
769 		struct rcu_string *name;
770 
771 		if (device->bdev)
772 			fs_devices->open_devices--;
773 
774 		if (device->writeable &&
775 		    device->devid != BTRFS_DEV_REPLACE_DEVID) {
776 			list_del_init(&device->dev_alloc_list);
777 			fs_devices->rw_devices--;
778 		}
779 
780 		if (device->missing)
781 			fs_devices->missing_devices--;
782 
783 		new_device = btrfs_alloc_device(NULL, &device->devid,
784 						device->uuid);
785 		BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
786 
787 		/* Safe because we are under uuid_mutex */
788 		if (device->name) {
789 			name = rcu_string_strdup(device->name->str, GFP_NOFS);
790 			BUG_ON(!name); /* -ENOMEM */
791 			rcu_assign_pointer(new_device->name, name);
792 		}
793 
794 		list_replace_rcu(&device->dev_list, &new_device->dev_list);
795 		new_device->fs_devices = device->fs_devices;
796 
797 		call_rcu(&device->rcu, free_device);
798 	}
799 	mutex_unlock(&fs_devices->device_list_mutex);
800 
801 	WARN_ON(fs_devices->open_devices);
802 	WARN_ON(fs_devices->rw_devices);
803 	fs_devices->opened = 0;
804 	fs_devices->seeding = 0;
805 
806 	return 0;
807 }
808 
809 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
810 {
811 	struct btrfs_fs_devices *seed_devices = NULL;
812 	int ret;
813 
814 	mutex_lock(&uuid_mutex);
815 	ret = __btrfs_close_devices(fs_devices);
816 	if (!fs_devices->opened) {
817 		seed_devices = fs_devices->seed;
818 		fs_devices->seed = NULL;
819 	}
820 	mutex_unlock(&uuid_mutex);
821 
822 	while (seed_devices) {
823 		fs_devices = seed_devices;
824 		seed_devices = fs_devices->seed;
825 		__btrfs_close_devices(fs_devices);
826 		free_fs_devices(fs_devices);
827 	}
828 	/*
829 	 * Wait for rcu kworkers under __btrfs_close_devices
830 	 * to finish all blkdev_puts so device is really
831 	 * free when umount is done.
832 	 */
833 	rcu_barrier();
834 	return ret;
835 }
836 
837 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
838 				fmode_t flags, void *holder)
839 {
840 	struct request_queue *q;
841 	struct block_device *bdev;
842 	struct list_head *head = &fs_devices->devices;
843 	struct btrfs_device *device;
844 	struct btrfs_device *latest_dev = NULL;
845 	struct buffer_head *bh;
846 	struct btrfs_super_block *disk_super;
847 	u64 devid;
848 	int seeding = 1;
849 	int ret = 0;
850 
851 	flags |= FMODE_EXCL;
852 
853 	list_for_each_entry(device, head, dev_list) {
854 		if (device->bdev)
855 			continue;
856 		if (!device->name)
857 			continue;
858 
859 		/* Just open everything we can; ignore failures here */
860 		if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
861 					    &bdev, &bh))
862 			continue;
863 
864 		disk_super = (struct btrfs_super_block *)bh->b_data;
865 		devid = btrfs_stack_device_id(&disk_super->dev_item);
866 		if (devid != device->devid)
867 			goto error_brelse;
868 
869 		if (memcmp(device->uuid, disk_super->dev_item.uuid,
870 			   BTRFS_UUID_SIZE))
871 			goto error_brelse;
872 
873 		device->generation = btrfs_super_generation(disk_super);
874 		if (!latest_dev ||
875 		    device->generation > latest_dev->generation)
876 			latest_dev = device;
877 
878 		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
879 			device->writeable = 0;
880 		} else {
881 			device->writeable = !bdev_read_only(bdev);
882 			seeding = 0;
883 		}
884 
885 		q = bdev_get_queue(bdev);
886 		if (blk_queue_discard(q))
887 			device->can_discard = 1;
888 
889 		device->bdev = bdev;
890 		device->in_fs_metadata = 0;
891 		device->mode = flags;
892 
893 		if (!blk_queue_nonrot(bdev_get_queue(bdev)))
894 			fs_devices->rotating = 1;
895 
896 		fs_devices->open_devices++;
897 		if (device->writeable &&
898 		    device->devid != BTRFS_DEV_REPLACE_DEVID) {
899 			fs_devices->rw_devices++;
900 			list_add(&device->dev_alloc_list,
901 				 &fs_devices->alloc_list);
902 		}
903 		brelse(bh);
904 		continue;
905 
906 error_brelse:
907 		brelse(bh);
908 		blkdev_put(bdev, flags);
909 		continue;
910 	}
911 	if (fs_devices->open_devices == 0) {
912 		ret = -EINVAL;
913 		goto out;
914 	}
915 	fs_devices->seeding = seeding;
916 	fs_devices->opened = 1;
917 	fs_devices->latest_bdev = latest_dev->bdev;
918 	fs_devices->total_rw_bytes = 0;
919 out:
920 	return ret;
921 }
922 
923 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
924 		       fmode_t flags, void *holder)
925 {
926 	int ret;
927 
928 	mutex_lock(&uuid_mutex);
929 	if (fs_devices->opened) {
930 		fs_devices->opened++;
931 		ret = 0;
932 	} else {
933 		ret = __btrfs_open_devices(fs_devices, flags, holder);
934 	}
935 	mutex_unlock(&uuid_mutex);
936 	return ret;
937 }
938 
939 /*
940  * Look for a btrfs signature on a device. This may be called out of the mount path
941  * and we are not allowed to call set_blocksize during the scan. The superblock
942  * is read via pagecache
943  */
944 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
945 			  struct btrfs_fs_devices **fs_devices_ret)
946 {
947 	struct btrfs_super_block *disk_super;
948 	struct block_device *bdev;
949 	struct page *page;
950 	void *p;
951 	int ret = -EINVAL;
952 	u64 devid;
953 	u64 transid;
954 	u64 total_devices;
955 	u64 bytenr;
956 	pgoff_t index;
957 
958 	/*
959 	 * we would like to check all the supers, but that would make
960 	 * a btrfs mount succeed after a mkfs from a different FS.
961 	 * So, we need to add a special mount option to scan for
962 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
963 	 */
964 	bytenr = btrfs_sb_offset(0);
965 	flags |= FMODE_EXCL;
966 	mutex_lock(&uuid_mutex);
967 
968 	bdev = blkdev_get_by_path(path, flags, holder);
969 
970 	if (IS_ERR(bdev)) {
971 		ret = PTR_ERR(bdev);
972 		goto error;
973 	}
974 
975 	/* make sure our super fits in the device */
976 	if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
977 		goto error_bdev_put;
978 
979 	/* make sure our super fits in the page */
980 	if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
981 		goto error_bdev_put;
982 
983 	/* make sure our super doesn't straddle pages on disk */
984 	index = bytenr >> PAGE_CACHE_SHIFT;
985 	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
986 		goto error_bdev_put;
987 
988 	/* pull in the page with our super */
989 	page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
990 				   index, GFP_NOFS);
991 
992 	if (IS_ERR_OR_NULL(page))
993 		goto error_bdev_put;
994 
995 	p = kmap(page);
996 
997 	/* align our pointer to the offset of the super block */
998 	disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
999 
1000 	if (btrfs_super_bytenr(disk_super) != bytenr ||
1001 	    btrfs_super_magic(disk_super) != BTRFS_MAGIC)
1002 		goto error_unmap;
1003 
1004 	devid = btrfs_stack_device_id(&disk_super->dev_item);
1005 	transid = btrfs_super_generation(disk_super);
1006 	total_devices = btrfs_super_num_devices(disk_super);
1007 
1008 	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
1009 	if (ret > 0) {
1010 		if (disk_super->label[0]) {
1011 			if (disk_super->label[BTRFS_LABEL_SIZE - 1])
1012 				disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
1013 			printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
1014 		} else {
1015 			printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
1016 		}
1017 
1018 		printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
1019 		ret = 0;
1020 	}
1021 	if (!ret && fs_devices_ret)
1022 		(*fs_devices_ret)->total_devices = total_devices;
1023 
1024 error_unmap:
1025 	kunmap(page);
1026 	page_cache_release(page);
1027 
1028 error_bdev_put:
1029 	blkdev_put(bdev, flags);
1030 error:
1031 	mutex_unlock(&uuid_mutex);
1032 	return ret;
1033 }
1034 
1035 /* helper to account the used device space in the range */
1036 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
1037 				   u64 end, u64 *length)
1038 {
1039 	struct btrfs_key key;
1040 	struct btrfs_root *root = device->dev_root;
1041 	struct btrfs_dev_extent *dev_extent;
1042 	struct btrfs_path *path;
1043 	u64 extent_end;
1044 	int ret;
1045 	int slot;
1046 	struct extent_buffer *l;
1047 
1048 	*length = 0;
1049 
1050 	if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
1051 		return 0;
1052 
1053 	path = btrfs_alloc_path();
1054 	if (!path)
1055 		return -ENOMEM;
1056 	path->reada = 2;
1057 
1058 	key.objectid = device->devid;
1059 	key.offset = start;
1060 	key.type = BTRFS_DEV_EXTENT_KEY;
1061 
1062 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1063 	if (ret < 0)
1064 		goto out;
1065 	if (ret > 0) {
1066 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1067 		if (ret < 0)
1068 			goto out;
1069 	}
1070 
1071 	while (1) {
1072 		l = path->nodes[0];
1073 		slot = path->slots[0];
1074 		if (slot >= btrfs_header_nritems(l)) {
1075 			ret = btrfs_next_leaf(root, path);
1076 			if (ret == 0)
1077 				continue;
1078 			if (ret < 0)
1079 				goto out;
1080 
1081 			break;
1082 		}
1083 		btrfs_item_key_to_cpu(l, &key, slot);
1084 
1085 		if (key.objectid < device->devid)
1086 			goto next;
1087 
1088 		if (key.objectid > device->devid)
1089 			break;
1090 
1091 		if (key.type != BTRFS_DEV_EXTENT_KEY)
1092 			goto next;
1093 
1094 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1095 		extent_end = key.offset + btrfs_dev_extent_length(l,
1096 								  dev_extent);
1097 		if (key.offset <= start && extent_end > end) {
1098 			*length = end - start + 1;
1099 			break;
1100 		} else if (key.offset <= start && extent_end > start)
1101 			*length += extent_end - start;
1102 		else if (key.offset > start && extent_end <= end)
1103 			*length += extent_end - key.offset;
1104 		else if (key.offset > start && key.offset <= end) {
1105 			*length += end - key.offset + 1;
1106 			break;
1107 		} else if (key.offset > end)
1108 			break;
1109 
1110 next:
1111 		path->slots[0]++;
1112 	}
1113 	ret = 0;
1114 out:
1115 	btrfs_free_path(path);
1116 	return ret;
1117 }
1118 
1119 static int contains_pending_extent(struct btrfs_transaction *transaction,
1120 				   struct btrfs_device *device,
1121 				   u64 *start, u64 len)
1122 {
1123 	struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
1124 	struct extent_map *em;
1125 	struct list_head *search_list = &fs_info->pinned_chunks;
1126 	int ret = 0;
1127 	u64 physical_start = *start;
1128 
1129 	if (transaction)
1130 		search_list = &transaction->pending_chunks;
1131 again:
1132 	list_for_each_entry(em, search_list, list) {
1133 		struct map_lookup *map;
1134 		int i;
1135 
1136 		map = (struct map_lookup *)em->bdev;
1137 		for (i = 0; i < map->num_stripes; i++) {
1138 			u64 end;
1139 
1140 			if (map->stripes[i].dev != device)
1141 				continue;
1142 			if (map->stripes[i].physical >= physical_start + len ||
1143 			    map->stripes[i].physical + em->orig_block_len <=
1144 			    physical_start)
1145 				continue;
1146 			/*
1147 			 * Make sure that while processing the pinned list we do
1148 			 * not override our *start with a lower value, because
1149 			 * we can have pinned chunks that fall within this
1150 			 * device hole and that have lower physical addresses
1151 			 * than the pending chunks we processed before. If we
1152 			 * do not take this special care we can end up getting
1153 			 * 2 pending chunks that start at the same physical
1154 			 * device offsets because the end offset of a pinned
1155 			 * chunk can be equal to the start offset of some
1156 			 * pending chunk.
1157 			 */
1158 			end = map->stripes[i].physical + em->orig_block_len;
1159 			if (end > *start) {
1160 				*start = end;
1161 				ret = 1;
1162 			}
1163 		}
1164 	}
1165 	if (search_list != &fs_info->pinned_chunks) {
1166 		search_list = &fs_info->pinned_chunks;
1167 		goto again;
1168 	}
1169 
1170 	return ret;
1171 }
1172 
1173 
1174 /*
1175  * find_free_dev_extent_start - find free space in the specified device
1176  * @device:	  the device which we search the free space in
1177  * @num_bytes:	  the size of the free space that we need
1178  * @search_start: the position from which to begin the search
1179  * @start:	  store the start of the free space.
1180  * @len:	  the size of the free space. that we find, or the size
1181  *		  of the max free space if we don't find suitable free space
1182  *
1183  * this uses a pretty simple search, the expectation is that it is
1184  * called very infrequently and that a given device has a small number
1185  * of extents
1186  *
1187  * @start is used to store the start of the free space if we find. But if we
1188  * don't find suitable free space, it will be used to store the start position
1189  * of the max free space.
1190  *
1191  * @len is used to store the size of the free space that we find.
1192  * But if we don't find suitable free space, it is used to store the size of
1193  * the max free space.
1194  */
1195 int find_free_dev_extent_start(struct btrfs_transaction *transaction,
1196 			       struct btrfs_device *device, u64 num_bytes,
1197 			       u64 search_start, u64 *start, u64 *len)
1198 {
1199 	struct btrfs_key key;
1200 	struct btrfs_root *root = device->dev_root;
1201 	struct btrfs_dev_extent *dev_extent;
1202 	struct btrfs_path *path;
1203 	u64 hole_size;
1204 	u64 max_hole_start;
1205 	u64 max_hole_size;
1206 	u64 extent_end;
1207 	u64 search_end = device->total_bytes;
1208 	int ret;
1209 	int slot;
1210 	struct extent_buffer *l;
1211 
1212 	path = btrfs_alloc_path();
1213 	if (!path)
1214 		return -ENOMEM;
1215 
1216 	max_hole_start = search_start;
1217 	max_hole_size = 0;
1218 
1219 again:
1220 	if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1221 		ret = -ENOSPC;
1222 		goto out;
1223 	}
1224 
1225 	path->reada = 2;
1226 	path->search_commit_root = 1;
1227 	path->skip_locking = 1;
1228 
1229 	key.objectid = device->devid;
1230 	key.offset = search_start;
1231 	key.type = BTRFS_DEV_EXTENT_KEY;
1232 
1233 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1234 	if (ret < 0)
1235 		goto out;
1236 	if (ret > 0) {
1237 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1238 		if (ret < 0)
1239 			goto out;
1240 	}
1241 
1242 	while (1) {
1243 		l = path->nodes[0];
1244 		slot = path->slots[0];
1245 		if (slot >= btrfs_header_nritems(l)) {
1246 			ret = btrfs_next_leaf(root, path);
1247 			if (ret == 0)
1248 				continue;
1249 			if (ret < 0)
1250 				goto out;
1251 
1252 			break;
1253 		}
1254 		btrfs_item_key_to_cpu(l, &key, slot);
1255 
1256 		if (key.objectid < device->devid)
1257 			goto next;
1258 
1259 		if (key.objectid > device->devid)
1260 			break;
1261 
1262 		if (key.type != BTRFS_DEV_EXTENT_KEY)
1263 			goto next;
1264 
1265 		if (key.offset > search_start) {
1266 			hole_size = key.offset - search_start;
1267 
1268 			/*
1269 			 * Have to check before we set max_hole_start, otherwise
1270 			 * we could end up sending back this offset anyway.
1271 			 */
1272 			if (contains_pending_extent(transaction, device,
1273 						    &search_start,
1274 						    hole_size)) {
1275 				if (key.offset >= search_start) {
1276 					hole_size = key.offset - search_start;
1277 				} else {
1278 					WARN_ON_ONCE(1);
1279 					hole_size = 0;
1280 				}
1281 			}
1282 
1283 			if (hole_size > max_hole_size) {
1284 				max_hole_start = search_start;
1285 				max_hole_size = hole_size;
1286 			}
1287 
1288 			/*
1289 			 * If this free space is greater than which we need,
1290 			 * it must be the max free space that we have found
1291 			 * until now, so max_hole_start must point to the start
1292 			 * of this free space and the length of this free space
1293 			 * is stored in max_hole_size. Thus, we return
1294 			 * max_hole_start and max_hole_size and go back to the
1295 			 * caller.
1296 			 */
1297 			if (hole_size >= num_bytes) {
1298 				ret = 0;
1299 				goto out;
1300 			}
1301 		}
1302 
1303 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1304 		extent_end = key.offset + btrfs_dev_extent_length(l,
1305 								  dev_extent);
1306 		if (extent_end > search_start)
1307 			search_start = extent_end;
1308 next:
1309 		path->slots[0]++;
1310 		cond_resched();
1311 	}
1312 
1313 	/*
1314 	 * At this point, search_start should be the end of
1315 	 * allocated dev extents, and when shrinking the device,
1316 	 * search_end may be smaller than search_start.
1317 	 */
1318 	if (search_end > search_start) {
1319 		hole_size = search_end - search_start;
1320 
1321 		if (contains_pending_extent(transaction, device, &search_start,
1322 					    hole_size)) {
1323 			btrfs_release_path(path);
1324 			goto again;
1325 		}
1326 
1327 		if (hole_size > max_hole_size) {
1328 			max_hole_start = search_start;
1329 			max_hole_size = hole_size;
1330 		}
1331 	}
1332 
1333 	/* See above. */
1334 	if (max_hole_size < num_bytes)
1335 		ret = -ENOSPC;
1336 	else
1337 		ret = 0;
1338 
1339 out:
1340 	btrfs_free_path(path);
1341 	*start = max_hole_start;
1342 	if (len)
1343 		*len = max_hole_size;
1344 	return ret;
1345 }
1346 
1347 int find_free_dev_extent(struct btrfs_trans_handle *trans,
1348 			 struct btrfs_device *device, u64 num_bytes,
1349 			 u64 *start, u64 *len)
1350 {
1351 	struct btrfs_root *root = device->dev_root;
1352 	u64 search_start;
1353 
1354 	/* FIXME use last free of some kind */
1355 
1356 	/*
1357 	 * we don't want to overwrite the superblock on the drive,
1358 	 * so we make sure to start at an offset of at least 1MB
1359 	 */
1360 	search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1361 	return find_free_dev_extent_start(trans->transaction, device,
1362 					  num_bytes, search_start, start, len);
1363 }
1364 
1365 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1366 			  struct btrfs_device *device,
1367 			  u64 start, u64 *dev_extent_len)
1368 {
1369 	int ret;
1370 	struct btrfs_path *path;
1371 	struct btrfs_root *root = device->dev_root;
1372 	struct btrfs_key key;
1373 	struct btrfs_key found_key;
1374 	struct extent_buffer *leaf = NULL;
1375 	struct btrfs_dev_extent *extent = NULL;
1376 
1377 	path = btrfs_alloc_path();
1378 	if (!path)
1379 		return -ENOMEM;
1380 
1381 	key.objectid = device->devid;
1382 	key.offset = start;
1383 	key.type = BTRFS_DEV_EXTENT_KEY;
1384 again:
1385 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1386 	if (ret > 0) {
1387 		ret = btrfs_previous_item(root, path, key.objectid,
1388 					  BTRFS_DEV_EXTENT_KEY);
1389 		if (ret)
1390 			goto out;
1391 		leaf = path->nodes[0];
1392 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1393 		extent = btrfs_item_ptr(leaf, path->slots[0],
1394 					struct btrfs_dev_extent);
1395 		BUG_ON(found_key.offset > start || found_key.offset +
1396 		       btrfs_dev_extent_length(leaf, extent) < start);
1397 		key = found_key;
1398 		btrfs_release_path(path);
1399 		goto again;
1400 	} else if (ret == 0) {
1401 		leaf = path->nodes[0];
1402 		extent = btrfs_item_ptr(leaf, path->slots[0],
1403 					struct btrfs_dev_extent);
1404 	} else {
1405 		btrfs_error(root->fs_info, ret, "Slot search failed");
1406 		goto out;
1407 	}
1408 
1409 	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1410 
1411 	ret = btrfs_del_item(trans, root, path);
1412 	if (ret) {
1413 		btrfs_error(root->fs_info, ret,
1414 			    "Failed to remove dev extent item");
1415 	} else {
1416 		trans->transaction->have_free_bgs = 1;
1417 	}
1418 out:
1419 	btrfs_free_path(path);
1420 	return ret;
1421 }
1422 
1423 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1424 				  struct btrfs_device *device,
1425 				  u64 chunk_tree, u64 chunk_objectid,
1426 				  u64 chunk_offset, u64 start, u64 num_bytes)
1427 {
1428 	int ret;
1429 	struct btrfs_path *path;
1430 	struct btrfs_root *root = device->dev_root;
1431 	struct btrfs_dev_extent *extent;
1432 	struct extent_buffer *leaf;
1433 	struct btrfs_key key;
1434 
1435 	WARN_ON(!device->in_fs_metadata);
1436 	WARN_ON(device->is_tgtdev_for_dev_replace);
1437 	path = btrfs_alloc_path();
1438 	if (!path)
1439 		return -ENOMEM;
1440 
1441 	key.objectid = device->devid;
1442 	key.offset = start;
1443 	key.type = BTRFS_DEV_EXTENT_KEY;
1444 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1445 				      sizeof(*extent));
1446 	if (ret)
1447 		goto out;
1448 
1449 	leaf = path->nodes[0];
1450 	extent = btrfs_item_ptr(leaf, path->slots[0],
1451 				struct btrfs_dev_extent);
1452 	btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1453 	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1454 	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1455 
1456 	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1457 		    btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE);
1458 
1459 	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1460 	btrfs_mark_buffer_dirty(leaf);
1461 out:
1462 	btrfs_free_path(path);
1463 	return ret;
1464 }
1465 
1466 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1467 {
1468 	struct extent_map_tree *em_tree;
1469 	struct extent_map *em;
1470 	struct rb_node *n;
1471 	u64 ret = 0;
1472 
1473 	em_tree = &fs_info->mapping_tree.map_tree;
1474 	read_lock(&em_tree->lock);
1475 	n = rb_last(&em_tree->map);
1476 	if (n) {
1477 		em = rb_entry(n, struct extent_map, rb_node);
1478 		ret = em->start + em->len;
1479 	}
1480 	read_unlock(&em_tree->lock);
1481 
1482 	return ret;
1483 }
1484 
1485 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1486 				    u64 *devid_ret)
1487 {
1488 	int ret;
1489 	struct btrfs_key key;
1490 	struct btrfs_key found_key;
1491 	struct btrfs_path *path;
1492 
1493 	path = btrfs_alloc_path();
1494 	if (!path)
1495 		return -ENOMEM;
1496 
1497 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1498 	key.type = BTRFS_DEV_ITEM_KEY;
1499 	key.offset = (u64)-1;
1500 
1501 	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1502 	if (ret < 0)
1503 		goto error;
1504 
1505 	BUG_ON(ret == 0); /* Corruption */
1506 
1507 	ret = btrfs_previous_item(fs_info->chunk_root, path,
1508 				  BTRFS_DEV_ITEMS_OBJECTID,
1509 				  BTRFS_DEV_ITEM_KEY);
1510 	if (ret) {
1511 		*devid_ret = 1;
1512 	} else {
1513 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1514 				      path->slots[0]);
1515 		*devid_ret = found_key.offset + 1;
1516 	}
1517 	ret = 0;
1518 error:
1519 	btrfs_free_path(path);
1520 	return ret;
1521 }
1522 
1523 /*
1524  * the device information is stored in the chunk root
1525  * the btrfs_device struct should be fully filled in
1526  */
1527 static int btrfs_add_device(struct btrfs_trans_handle *trans,
1528 			    struct btrfs_root *root,
1529 			    struct btrfs_device *device)
1530 {
1531 	int ret;
1532 	struct btrfs_path *path;
1533 	struct btrfs_dev_item *dev_item;
1534 	struct extent_buffer *leaf;
1535 	struct btrfs_key key;
1536 	unsigned long ptr;
1537 
1538 	root = root->fs_info->chunk_root;
1539 
1540 	path = btrfs_alloc_path();
1541 	if (!path)
1542 		return -ENOMEM;
1543 
1544 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1545 	key.type = BTRFS_DEV_ITEM_KEY;
1546 	key.offset = device->devid;
1547 
1548 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1549 				      sizeof(*dev_item));
1550 	if (ret)
1551 		goto out;
1552 
1553 	leaf = path->nodes[0];
1554 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1555 
1556 	btrfs_set_device_id(leaf, dev_item, device->devid);
1557 	btrfs_set_device_generation(leaf, dev_item, 0);
1558 	btrfs_set_device_type(leaf, dev_item, device->type);
1559 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1560 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1561 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1562 	btrfs_set_device_total_bytes(leaf, dev_item,
1563 				     btrfs_device_get_disk_total_bytes(device));
1564 	btrfs_set_device_bytes_used(leaf, dev_item,
1565 				    btrfs_device_get_bytes_used(device));
1566 	btrfs_set_device_group(leaf, dev_item, 0);
1567 	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1568 	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1569 	btrfs_set_device_start_offset(leaf, dev_item, 0);
1570 
1571 	ptr = btrfs_device_uuid(dev_item);
1572 	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1573 	ptr = btrfs_device_fsid(dev_item);
1574 	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1575 	btrfs_mark_buffer_dirty(leaf);
1576 
1577 	ret = 0;
1578 out:
1579 	btrfs_free_path(path);
1580 	return ret;
1581 }
1582 
1583 /*
1584  * Function to update ctime/mtime for a given device path.
1585  * Mainly used for ctime/mtime based probe like libblkid.
1586  */
1587 static void update_dev_time(char *path_name)
1588 {
1589 	struct file *filp;
1590 
1591 	filp = filp_open(path_name, O_RDWR, 0);
1592 	if (IS_ERR(filp))
1593 		return;
1594 	file_update_time(filp);
1595 	filp_close(filp, NULL);
1596 	return;
1597 }
1598 
1599 static int btrfs_rm_dev_item(struct btrfs_root *root,
1600 			     struct btrfs_device *device)
1601 {
1602 	int ret;
1603 	struct btrfs_path *path;
1604 	struct btrfs_key key;
1605 	struct btrfs_trans_handle *trans;
1606 
1607 	root = root->fs_info->chunk_root;
1608 
1609 	path = btrfs_alloc_path();
1610 	if (!path)
1611 		return -ENOMEM;
1612 
1613 	trans = btrfs_start_transaction(root, 0);
1614 	if (IS_ERR(trans)) {
1615 		btrfs_free_path(path);
1616 		return PTR_ERR(trans);
1617 	}
1618 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1619 	key.type = BTRFS_DEV_ITEM_KEY;
1620 	key.offset = device->devid;
1621 
1622 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1623 	if (ret < 0)
1624 		goto out;
1625 
1626 	if (ret > 0) {
1627 		ret = -ENOENT;
1628 		goto out;
1629 	}
1630 
1631 	ret = btrfs_del_item(trans, root, path);
1632 	if (ret)
1633 		goto out;
1634 out:
1635 	btrfs_free_path(path);
1636 	btrfs_commit_transaction(trans, root);
1637 	return ret;
1638 }
1639 
1640 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1641 {
1642 	struct btrfs_device *device;
1643 	struct btrfs_device *next_device;
1644 	struct block_device *bdev;
1645 	struct buffer_head *bh = NULL;
1646 	struct btrfs_super_block *disk_super;
1647 	struct btrfs_fs_devices *cur_devices;
1648 	u64 all_avail;
1649 	u64 devid;
1650 	u64 num_devices;
1651 	u8 *dev_uuid;
1652 	unsigned seq;
1653 	int ret = 0;
1654 	bool clear_super = false;
1655 
1656 	mutex_lock(&uuid_mutex);
1657 
1658 	do {
1659 		seq = read_seqbegin(&root->fs_info->profiles_lock);
1660 
1661 		all_avail = root->fs_info->avail_data_alloc_bits |
1662 			    root->fs_info->avail_system_alloc_bits |
1663 			    root->fs_info->avail_metadata_alloc_bits;
1664 	} while (read_seqretry(&root->fs_info->profiles_lock, seq));
1665 
1666 	num_devices = root->fs_info->fs_devices->num_devices;
1667 	btrfs_dev_replace_lock(&root->fs_info->dev_replace);
1668 	if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1669 		WARN_ON(num_devices < 1);
1670 		num_devices--;
1671 	}
1672 	btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
1673 
1674 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1675 		ret = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET;
1676 		goto out;
1677 	}
1678 
1679 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1680 		ret = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET;
1681 		goto out;
1682 	}
1683 
1684 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
1685 	    root->fs_info->fs_devices->rw_devices <= 2) {
1686 		ret = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET;
1687 		goto out;
1688 	}
1689 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
1690 	    root->fs_info->fs_devices->rw_devices <= 3) {
1691 		ret = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET;
1692 		goto out;
1693 	}
1694 
1695 	if (strcmp(device_path, "missing") == 0) {
1696 		struct list_head *devices;
1697 		struct btrfs_device *tmp;
1698 
1699 		device = NULL;
1700 		devices = &root->fs_info->fs_devices->devices;
1701 		/*
1702 		 * It is safe to read the devices since the volume_mutex
1703 		 * is held.
1704 		 */
1705 		list_for_each_entry(tmp, devices, dev_list) {
1706 			if (tmp->in_fs_metadata &&
1707 			    !tmp->is_tgtdev_for_dev_replace &&
1708 			    !tmp->bdev) {
1709 				device = tmp;
1710 				break;
1711 			}
1712 		}
1713 		bdev = NULL;
1714 		bh = NULL;
1715 		disk_super = NULL;
1716 		if (!device) {
1717 			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
1718 			goto out;
1719 		}
1720 	} else {
1721 		ret = btrfs_get_bdev_and_sb(device_path,
1722 					    FMODE_WRITE | FMODE_EXCL,
1723 					    root->fs_info->bdev_holder, 0,
1724 					    &bdev, &bh);
1725 		if (ret)
1726 			goto out;
1727 		disk_super = (struct btrfs_super_block *)bh->b_data;
1728 		devid = btrfs_stack_device_id(&disk_super->dev_item);
1729 		dev_uuid = disk_super->dev_item.uuid;
1730 		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1731 					   disk_super->fsid);
1732 		if (!device) {
1733 			ret = -ENOENT;
1734 			goto error_brelse;
1735 		}
1736 	}
1737 
1738 	if (device->is_tgtdev_for_dev_replace) {
1739 		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1740 		goto error_brelse;
1741 	}
1742 
1743 	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1744 		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1745 		goto error_brelse;
1746 	}
1747 
1748 	if (device->writeable) {
1749 		lock_chunks(root);
1750 		list_del_init(&device->dev_alloc_list);
1751 		device->fs_devices->rw_devices--;
1752 		unlock_chunks(root);
1753 		clear_super = true;
1754 	}
1755 
1756 	mutex_unlock(&uuid_mutex);
1757 	ret = btrfs_shrink_device(device, 0);
1758 	mutex_lock(&uuid_mutex);
1759 	if (ret)
1760 		goto error_undo;
1761 
1762 	/*
1763 	 * TODO: the superblock still includes this device in its num_devices
1764 	 * counter although write_all_supers() is not locked out. This
1765 	 * could give a filesystem state which requires a degraded mount.
1766 	 */
1767 	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1768 	if (ret)
1769 		goto error_undo;
1770 
1771 	device->in_fs_metadata = 0;
1772 	btrfs_scrub_cancel_dev(root->fs_info, device);
1773 
1774 	/*
1775 	 * the device list mutex makes sure that we don't change
1776 	 * the device list while someone else is writing out all
1777 	 * the device supers. Whoever is writing all supers, should
1778 	 * lock the device list mutex before getting the number of
1779 	 * devices in the super block (super_copy). Conversely,
1780 	 * whoever updates the number of devices in the super block
1781 	 * (super_copy) should hold the device list mutex.
1782 	 */
1783 
1784 	cur_devices = device->fs_devices;
1785 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1786 	list_del_rcu(&device->dev_list);
1787 
1788 	device->fs_devices->num_devices--;
1789 	device->fs_devices->total_devices--;
1790 
1791 	if (device->missing)
1792 		device->fs_devices->missing_devices--;
1793 
1794 	next_device = list_entry(root->fs_info->fs_devices->devices.next,
1795 				 struct btrfs_device, dev_list);
1796 	if (device->bdev == root->fs_info->sb->s_bdev)
1797 		root->fs_info->sb->s_bdev = next_device->bdev;
1798 	if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1799 		root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1800 
1801 	if (device->bdev) {
1802 		device->fs_devices->open_devices--;
1803 		/* remove sysfs entry */
1804 		btrfs_kobj_rm_device(root->fs_info->fs_devices, device);
1805 	}
1806 
1807 	call_rcu(&device->rcu, free_device);
1808 
1809 	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1810 	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1811 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1812 
1813 	if (cur_devices->open_devices == 0) {
1814 		struct btrfs_fs_devices *fs_devices;
1815 		fs_devices = root->fs_info->fs_devices;
1816 		while (fs_devices) {
1817 			if (fs_devices->seed == cur_devices) {
1818 				fs_devices->seed = cur_devices->seed;
1819 				break;
1820 			}
1821 			fs_devices = fs_devices->seed;
1822 		}
1823 		cur_devices->seed = NULL;
1824 		__btrfs_close_devices(cur_devices);
1825 		free_fs_devices(cur_devices);
1826 	}
1827 
1828 	root->fs_info->num_tolerated_disk_barrier_failures =
1829 		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1830 
1831 	/*
1832 	 * at this point, the device is zero sized.  We want to
1833 	 * remove it from the devices list and zero out the old super
1834 	 */
1835 	if (clear_super && disk_super) {
1836 		u64 bytenr;
1837 		int i;
1838 
1839 		/* make sure this device isn't detected as part of
1840 		 * the FS anymore
1841 		 */
1842 		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1843 		set_buffer_dirty(bh);
1844 		sync_dirty_buffer(bh);
1845 
1846 		/* clear the mirror copies of super block on the disk
1847 		 * being removed, 0th copy is been taken care above and
1848 		 * the below would take of the rest
1849 		 */
1850 		for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1851 			bytenr = btrfs_sb_offset(i);
1852 			if (bytenr + BTRFS_SUPER_INFO_SIZE >=
1853 					i_size_read(bdev->bd_inode))
1854 				break;
1855 
1856 			brelse(bh);
1857 			bh = __bread(bdev, bytenr / 4096,
1858 					BTRFS_SUPER_INFO_SIZE);
1859 			if (!bh)
1860 				continue;
1861 
1862 			disk_super = (struct btrfs_super_block *)bh->b_data;
1863 
1864 			if (btrfs_super_bytenr(disk_super) != bytenr ||
1865 				btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1866 				continue;
1867 			}
1868 			memset(&disk_super->magic, 0,
1869 						sizeof(disk_super->magic));
1870 			set_buffer_dirty(bh);
1871 			sync_dirty_buffer(bh);
1872 		}
1873 	}
1874 
1875 	ret = 0;
1876 
1877 	if (bdev) {
1878 		/* Notify udev that device has changed */
1879 		btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1880 
1881 		/* Update ctime/mtime for device path for libblkid */
1882 		update_dev_time(device_path);
1883 	}
1884 
1885 error_brelse:
1886 	brelse(bh);
1887 	if (bdev)
1888 		blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1889 out:
1890 	mutex_unlock(&uuid_mutex);
1891 	return ret;
1892 error_undo:
1893 	if (device->writeable) {
1894 		lock_chunks(root);
1895 		list_add(&device->dev_alloc_list,
1896 			 &root->fs_info->fs_devices->alloc_list);
1897 		device->fs_devices->rw_devices++;
1898 		unlock_chunks(root);
1899 	}
1900 	goto error_brelse;
1901 }
1902 
1903 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
1904 					struct btrfs_device *srcdev)
1905 {
1906 	struct btrfs_fs_devices *fs_devices;
1907 
1908 	WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1909 
1910 	/*
1911 	 * in case of fs with no seed, srcdev->fs_devices will point
1912 	 * to fs_devices of fs_info. However when the dev being replaced is
1913 	 * a seed dev it will point to the seed's local fs_devices. In short
1914 	 * srcdev will have its correct fs_devices in both the cases.
1915 	 */
1916 	fs_devices = srcdev->fs_devices;
1917 
1918 	list_del_rcu(&srcdev->dev_list);
1919 	list_del_rcu(&srcdev->dev_alloc_list);
1920 	fs_devices->num_devices--;
1921 	if (srcdev->missing)
1922 		fs_devices->missing_devices--;
1923 
1924 	if (srcdev->writeable) {
1925 		fs_devices->rw_devices--;
1926 		/* zero out the old super if it is writable */
1927 		btrfs_scratch_superblock(srcdev);
1928 	}
1929 
1930 	if (srcdev->bdev)
1931 		fs_devices->open_devices--;
1932 }
1933 
1934 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
1935 				      struct btrfs_device *srcdev)
1936 {
1937 	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
1938 
1939 	call_rcu(&srcdev->rcu, free_device);
1940 
1941 	/*
1942 	 * unless fs_devices is seed fs, num_devices shouldn't go
1943 	 * zero
1944 	 */
1945 	BUG_ON(!fs_devices->num_devices && !fs_devices->seeding);
1946 
1947 	/* if this is no devs we rather delete the fs_devices */
1948 	if (!fs_devices->num_devices) {
1949 		struct btrfs_fs_devices *tmp_fs_devices;
1950 
1951 		tmp_fs_devices = fs_info->fs_devices;
1952 		while (tmp_fs_devices) {
1953 			if (tmp_fs_devices->seed == fs_devices) {
1954 				tmp_fs_devices->seed = fs_devices->seed;
1955 				break;
1956 			}
1957 			tmp_fs_devices = tmp_fs_devices->seed;
1958 		}
1959 		fs_devices->seed = NULL;
1960 		__btrfs_close_devices(fs_devices);
1961 		free_fs_devices(fs_devices);
1962 	}
1963 }
1964 
1965 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
1966 				      struct btrfs_device *tgtdev)
1967 {
1968 	struct btrfs_device *next_device;
1969 
1970 	mutex_lock(&uuid_mutex);
1971 	WARN_ON(!tgtdev);
1972 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
1973 
1974 	btrfs_kobj_rm_device(fs_info->fs_devices, tgtdev);
1975 
1976 	if (tgtdev->bdev) {
1977 		btrfs_scratch_superblock(tgtdev);
1978 		fs_info->fs_devices->open_devices--;
1979 	}
1980 	fs_info->fs_devices->num_devices--;
1981 
1982 	next_device = list_entry(fs_info->fs_devices->devices.next,
1983 				 struct btrfs_device, dev_list);
1984 	if (tgtdev->bdev == fs_info->sb->s_bdev)
1985 		fs_info->sb->s_bdev = next_device->bdev;
1986 	if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
1987 		fs_info->fs_devices->latest_bdev = next_device->bdev;
1988 	list_del_rcu(&tgtdev->dev_list);
1989 
1990 	call_rcu(&tgtdev->rcu, free_device);
1991 
1992 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1993 	mutex_unlock(&uuid_mutex);
1994 }
1995 
1996 static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
1997 				     struct btrfs_device **device)
1998 {
1999 	int ret = 0;
2000 	struct btrfs_super_block *disk_super;
2001 	u64 devid;
2002 	u8 *dev_uuid;
2003 	struct block_device *bdev;
2004 	struct buffer_head *bh;
2005 
2006 	*device = NULL;
2007 	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2008 				    root->fs_info->bdev_holder, 0, &bdev, &bh);
2009 	if (ret)
2010 		return ret;
2011 	disk_super = (struct btrfs_super_block *)bh->b_data;
2012 	devid = btrfs_stack_device_id(&disk_super->dev_item);
2013 	dev_uuid = disk_super->dev_item.uuid;
2014 	*device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2015 				    disk_super->fsid);
2016 	brelse(bh);
2017 	if (!*device)
2018 		ret = -ENOENT;
2019 	blkdev_put(bdev, FMODE_READ);
2020 	return ret;
2021 }
2022 
2023 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
2024 					 char *device_path,
2025 					 struct btrfs_device **device)
2026 {
2027 	*device = NULL;
2028 	if (strcmp(device_path, "missing") == 0) {
2029 		struct list_head *devices;
2030 		struct btrfs_device *tmp;
2031 
2032 		devices = &root->fs_info->fs_devices->devices;
2033 		/*
2034 		 * It is safe to read the devices since the volume_mutex
2035 		 * is held by the caller.
2036 		 */
2037 		list_for_each_entry(tmp, devices, dev_list) {
2038 			if (tmp->in_fs_metadata && !tmp->bdev) {
2039 				*device = tmp;
2040 				break;
2041 			}
2042 		}
2043 
2044 		if (!*device) {
2045 			btrfs_err(root->fs_info, "no missing device found");
2046 			return -ENOENT;
2047 		}
2048 
2049 		return 0;
2050 	} else {
2051 		return btrfs_find_device_by_path(root, device_path, device);
2052 	}
2053 }
2054 
2055 /*
2056  * does all the dirty work required for changing file system's UUID.
2057  */
2058 static int btrfs_prepare_sprout(struct btrfs_root *root)
2059 {
2060 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2061 	struct btrfs_fs_devices *old_devices;
2062 	struct btrfs_fs_devices *seed_devices;
2063 	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
2064 	struct btrfs_device *device;
2065 	u64 super_flags;
2066 
2067 	BUG_ON(!mutex_is_locked(&uuid_mutex));
2068 	if (!fs_devices->seeding)
2069 		return -EINVAL;
2070 
2071 	seed_devices = __alloc_fs_devices();
2072 	if (IS_ERR(seed_devices))
2073 		return PTR_ERR(seed_devices);
2074 
2075 	old_devices = clone_fs_devices(fs_devices);
2076 	if (IS_ERR(old_devices)) {
2077 		kfree(seed_devices);
2078 		return PTR_ERR(old_devices);
2079 	}
2080 
2081 	list_add(&old_devices->list, &fs_uuids);
2082 
2083 	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2084 	seed_devices->opened = 1;
2085 	INIT_LIST_HEAD(&seed_devices->devices);
2086 	INIT_LIST_HEAD(&seed_devices->alloc_list);
2087 	mutex_init(&seed_devices->device_list_mutex);
2088 
2089 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2090 	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2091 			      synchronize_rcu);
2092 	list_for_each_entry(device, &seed_devices->devices, dev_list)
2093 		device->fs_devices = seed_devices;
2094 
2095 	lock_chunks(root);
2096 	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
2097 	unlock_chunks(root);
2098 
2099 	fs_devices->seeding = 0;
2100 	fs_devices->num_devices = 0;
2101 	fs_devices->open_devices = 0;
2102 	fs_devices->missing_devices = 0;
2103 	fs_devices->rotating = 0;
2104 	fs_devices->seed = seed_devices;
2105 
2106 	generate_random_uuid(fs_devices->fsid);
2107 	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2108 	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2109 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2110 
2111 	super_flags = btrfs_super_flags(disk_super) &
2112 		      ~BTRFS_SUPER_FLAG_SEEDING;
2113 	btrfs_set_super_flags(disk_super, super_flags);
2114 
2115 	return 0;
2116 }
2117 
2118 /*
2119  * strore the expected generation for seed devices in device items.
2120  */
2121 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
2122 			       struct btrfs_root *root)
2123 {
2124 	struct btrfs_path *path;
2125 	struct extent_buffer *leaf;
2126 	struct btrfs_dev_item *dev_item;
2127 	struct btrfs_device *device;
2128 	struct btrfs_key key;
2129 	u8 fs_uuid[BTRFS_UUID_SIZE];
2130 	u8 dev_uuid[BTRFS_UUID_SIZE];
2131 	u64 devid;
2132 	int ret;
2133 
2134 	path = btrfs_alloc_path();
2135 	if (!path)
2136 		return -ENOMEM;
2137 
2138 	root = root->fs_info->chunk_root;
2139 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2140 	key.offset = 0;
2141 	key.type = BTRFS_DEV_ITEM_KEY;
2142 
2143 	while (1) {
2144 		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2145 		if (ret < 0)
2146 			goto error;
2147 
2148 		leaf = path->nodes[0];
2149 next_slot:
2150 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2151 			ret = btrfs_next_leaf(root, path);
2152 			if (ret > 0)
2153 				break;
2154 			if (ret < 0)
2155 				goto error;
2156 			leaf = path->nodes[0];
2157 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2158 			btrfs_release_path(path);
2159 			continue;
2160 		}
2161 
2162 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2163 		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2164 		    key.type != BTRFS_DEV_ITEM_KEY)
2165 			break;
2166 
2167 		dev_item = btrfs_item_ptr(leaf, path->slots[0],
2168 					  struct btrfs_dev_item);
2169 		devid = btrfs_device_id(leaf, dev_item);
2170 		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2171 				   BTRFS_UUID_SIZE);
2172 		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2173 				   BTRFS_UUID_SIZE);
2174 		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2175 					   fs_uuid);
2176 		BUG_ON(!device); /* Logic error */
2177 
2178 		if (device->fs_devices->seeding) {
2179 			btrfs_set_device_generation(leaf, dev_item,
2180 						    device->generation);
2181 			btrfs_mark_buffer_dirty(leaf);
2182 		}
2183 
2184 		path->slots[0]++;
2185 		goto next_slot;
2186 	}
2187 	ret = 0;
2188 error:
2189 	btrfs_free_path(path);
2190 	return ret;
2191 }
2192 
2193 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
2194 {
2195 	struct request_queue *q;
2196 	struct btrfs_trans_handle *trans;
2197 	struct btrfs_device *device;
2198 	struct block_device *bdev;
2199 	struct list_head *devices;
2200 	struct super_block *sb = root->fs_info->sb;
2201 	struct rcu_string *name;
2202 	u64 tmp;
2203 	int seeding_dev = 0;
2204 	int ret = 0;
2205 
2206 	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
2207 		return -EROFS;
2208 
2209 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2210 				  root->fs_info->bdev_holder);
2211 	if (IS_ERR(bdev))
2212 		return PTR_ERR(bdev);
2213 
2214 	if (root->fs_info->fs_devices->seeding) {
2215 		seeding_dev = 1;
2216 		down_write(&sb->s_umount);
2217 		mutex_lock(&uuid_mutex);
2218 	}
2219 
2220 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2221 
2222 	devices = &root->fs_info->fs_devices->devices;
2223 
2224 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2225 	list_for_each_entry(device, devices, dev_list) {
2226 		if (device->bdev == bdev) {
2227 			ret = -EEXIST;
2228 			mutex_unlock(
2229 				&root->fs_info->fs_devices->device_list_mutex);
2230 			goto error;
2231 		}
2232 	}
2233 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2234 
2235 	device = btrfs_alloc_device(root->fs_info, NULL, NULL);
2236 	if (IS_ERR(device)) {
2237 		/* we can safely leave the fs_devices entry around */
2238 		ret = PTR_ERR(device);
2239 		goto error;
2240 	}
2241 
2242 	name = rcu_string_strdup(device_path, GFP_NOFS);
2243 	if (!name) {
2244 		kfree(device);
2245 		ret = -ENOMEM;
2246 		goto error;
2247 	}
2248 	rcu_assign_pointer(device->name, name);
2249 
2250 	trans = btrfs_start_transaction(root, 0);
2251 	if (IS_ERR(trans)) {
2252 		rcu_string_free(device->name);
2253 		kfree(device);
2254 		ret = PTR_ERR(trans);
2255 		goto error;
2256 	}
2257 
2258 	q = bdev_get_queue(bdev);
2259 	if (blk_queue_discard(q))
2260 		device->can_discard = 1;
2261 	device->writeable = 1;
2262 	device->generation = trans->transid;
2263 	device->io_width = root->sectorsize;
2264 	device->io_align = root->sectorsize;
2265 	device->sector_size = root->sectorsize;
2266 	device->total_bytes = i_size_read(bdev->bd_inode);
2267 	device->disk_total_bytes = device->total_bytes;
2268 	device->commit_total_bytes = device->total_bytes;
2269 	device->dev_root = root->fs_info->dev_root;
2270 	device->bdev = bdev;
2271 	device->in_fs_metadata = 1;
2272 	device->is_tgtdev_for_dev_replace = 0;
2273 	device->mode = FMODE_EXCL;
2274 	device->dev_stats_valid = 1;
2275 	set_blocksize(device->bdev, 4096);
2276 
2277 	if (seeding_dev) {
2278 		sb->s_flags &= ~MS_RDONLY;
2279 		ret = btrfs_prepare_sprout(root);
2280 		BUG_ON(ret); /* -ENOMEM */
2281 	}
2282 
2283 	device->fs_devices = root->fs_info->fs_devices;
2284 
2285 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2286 	lock_chunks(root);
2287 	list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2288 	list_add(&device->dev_alloc_list,
2289 		 &root->fs_info->fs_devices->alloc_list);
2290 	root->fs_info->fs_devices->num_devices++;
2291 	root->fs_info->fs_devices->open_devices++;
2292 	root->fs_info->fs_devices->rw_devices++;
2293 	root->fs_info->fs_devices->total_devices++;
2294 	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2295 
2296 	spin_lock(&root->fs_info->free_chunk_lock);
2297 	root->fs_info->free_chunk_space += device->total_bytes;
2298 	spin_unlock(&root->fs_info->free_chunk_lock);
2299 
2300 	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2301 		root->fs_info->fs_devices->rotating = 1;
2302 
2303 	tmp = btrfs_super_total_bytes(root->fs_info->super_copy);
2304 	btrfs_set_super_total_bytes(root->fs_info->super_copy,
2305 				    tmp + device->total_bytes);
2306 
2307 	tmp = btrfs_super_num_devices(root->fs_info->super_copy);
2308 	btrfs_set_super_num_devices(root->fs_info->super_copy,
2309 				    tmp + 1);
2310 
2311 	/* add sysfs device entry */
2312 	btrfs_kobj_add_device(root->fs_info->fs_devices, device);
2313 
2314 	/*
2315 	 * we've got more storage, clear any full flags on the space
2316 	 * infos
2317 	 */
2318 	btrfs_clear_space_info_full(root->fs_info);
2319 
2320 	unlock_chunks(root);
2321 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2322 
2323 	if (seeding_dev) {
2324 		lock_chunks(root);
2325 		ret = init_first_rw_device(trans, root, device);
2326 		unlock_chunks(root);
2327 		if (ret) {
2328 			btrfs_abort_transaction(trans, root, ret);
2329 			goto error_trans;
2330 		}
2331 	}
2332 
2333 	ret = btrfs_add_device(trans, root, device);
2334 	if (ret) {
2335 		btrfs_abort_transaction(trans, root, ret);
2336 		goto error_trans;
2337 	}
2338 
2339 	if (seeding_dev) {
2340 		char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
2341 
2342 		ret = btrfs_finish_sprout(trans, root);
2343 		if (ret) {
2344 			btrfs_abort_transaction(trans, root, ret);
2345 			goto error_trans;
2346 		}
2347 
2348 		/* Sprouting would change fsid of the mounted root,
2349 		 * so rename the fsid on the sysfs
2350 		 */
2351 		snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
2352 						root->fs_info->fsid);
2353 		if (kobject_rename(&root->fs_info->fs_devices->super_kobj,
2354 								fsid_buf))
2355 			pr_warn("BTRFS: sysfs: failed to create fsid for sprout\n");
2356 	}
2357 
2358 	root->fs_info->num_tolerated_disk_barrier_failures =
2359 		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2360 	ret = btrfs_commit_transaction(trans, root);
2361 
2362 	if (seeding_dev) {
2363 		mutex_unlock(&uuid_mutex);
2364 		up_write(&sb->s_umount);
2365 
2366 		if (ret) /* transaction commit */
2367 			return ret;
2368 
2369 		ret = btrfs_relocate_sys_chunks(root);
2370 		if (ret < 0)
2371 			btrfs_error(root->fs_info, ret,
2372 				    "Failed to relocate sys chunks after "
2373 				    "device initialization. This can be fixed "
2374 				    "using the \"btrfs balance\" command.");
2375 		trans = btrfs_attach_transaction(root);
2376 		if (IS_ERR(trans)) {
2377 			if (PTR_ERR(trans) == -ENOENT)
2378 				return 0;
2379 			return PTR_ERR(trans);
2380 		}
2381 		ret = btrfs_commit_transaction(trans, root);
2382 	}
2383 
2384 	/* Update ctime/mtime for libblkid */
2385 	update_dev_time(device_path);
2386 	return ret;
2387 
2388 error_trans:
2389 	btrfs_end_transaction(trans, root);
2390 	rcu_string_free(device->name);
2391 	btrfs_kobj_rm_device(root->fs_info->fs_devices, device);
2392 	kfree(device);
2393 error:
2394 	blkdev_put(bdev, FMODE_EXCL);
2395 	if (seeding_dev) {
2396 		mutex_unlock(&uuid_mutex);
2397 		up_write(&sb->s_umount);
2398 	}
2399 	return ret;
2400 }
2401 
2402 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2403 				  struct btrfs_device *srcdev,
2404 				  struct btrfs_device **device_out)
2405 {
2406 	struct request_queue *q;
2407 	struct btrfs_device *device;
2408 	struct block_device *bdev;
2409 	struct btrfs_fs_info *fs_info = root->fs_info;
2410 	struct list_head *devices;
2411 	struct rcu_string *name;
2412 	u64 devid = BTRFS_DEV_REPLACE_DEVID;
2413 	int ret = 0;
2414 
2415 	*device_out = NULL;
2416 	if (fs_info->fs_devices->seeding) {
2417 		btrfs_err(fs_info, "the filesystem is a seed filesystem!");
2418 		return -EINVAL;
2419 	}
2420 
2421 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2422 				  fs_info->bdev_holder);
2423 	if (IS_ERR(bdev)) {
2424 		btrfs_err(fs_info, "target device %s is invalid!", device_path);
2425 		return PTR_ERR(bdev);
2426 	}
2427 
2428 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2429 
2430 	devices = &fs_info->fs_devices->devices;
2431 	list_for_each_entry(device, devices, dev_list) {
2432 		if (device->bdev == bdev) {
2433 			btrfs_err(fs_info, "target device is in the filesystem!");
2434 			ret = -EEXIST;
2435 			goto error;
2436 		}
2437 	}
2438 
2439 
2440 	if (i_size_read(bdev->bd_inode) <
2441 	    btrfs_device_get_total_bytes(srcdev)) {
2442 		btrfs_err(fs_info, "target device is smaller than source device!");
2443 		ret = -EINVAL;
2444 		goto error;
2445 	}
2446 
2447 
2448 	device = btrfs_alloc_device(NULL, &devid, NULL);
2449 	if (IS_ERR(device)) {
2450 		ret = PTR_ERR(device);
2451 		goto error;
2452 	}
2453 
2454 	name = rcu_string_strdup(device_path, GFP_NOFS);
2455 	if (!name) {
2456 		kfree(device);
2457 		ret = -ENOMEM;
2458 		goto error;
2459 	}
2460 	rcu_assign_pointer(device->name, name);
2461 
2462 	q = bdev_get_queue(bdev);
2463 	if (blk_queue_discard(q))
2464 		device->can_discard = 1;
2465 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2466 	device->writeable = 1;
2467 	device->generation = 0;
2468 	device->io_width = root->sectorsize;
2469 	device->io_align = root->sectorsize;
2470 	device->sector_size = root->sectorsize;
2471 	device->total_bytes = btrfs_device_get_total_bytes(srcdev);
2472 	device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev);
2473 	device->bytes_used = btrfs_device_get_bytes_used(srcdev);
2474 	ASSERT(list_empty(&srcdev->resized_list));
2475 	device->commit_total_bytes = srcdev->commit_total_bytes;
2476 	device->commit_bytes_used = device->bytes_used;
2477 	device->dev_root = fs_info->dev_root;
2478 	device->bdev = bdev;
2479 	device->in_fs_metadata = 1;
2480 	device->is_tgtdev_for_dev_replace = 1;
2481 	device->mode = FMODE_EXCL;
2482 	device->dev_stats_valid = 1;
2483 	set_blocksize(device->bdev, 4096);
2484 	device->fs_devices = fs_info->fs_devices;
2485 	list_add(&device->dev_list, &fs_info->fs_devices->devices);
2486 	fs_info->fs_devices->num_devices++;
2487 	fs_info->fs_devices->open_devices++;
2488 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2489 
2490 	*device_out = device;
2491 	return ret;
2492 
2493 error:
2494 	blkdev_put(bdev, FMODE_EXCL);
2495 	return ret;
2496 }
2497 
2498 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2499 					      struct btrfs_device *tgtdev)
2500 {
2501 	WARN_ON(fs_info->fs_devices->rw_devices == 0);
2502 	tgtdev->io_width = fs_info->dev_root->sectorsize;
2503 	tgtdev->io_align = fs_info->dev_root->sectorsize;
2504 	tgtdev->sector_size = fs_info->dev_root->sectorsize;
2505 	tgtdev->dev_root = fs_info->dev_root;
2506 	tgtdev->in_fs_metadata = 1;
2507 }
2508 
2509 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2510 					struct btrfs_device *device)
2511 {
2512 	int ret;
2513 	struct btrfs_path *path;
2514 	struct btrfs_root *root;
2515 	struct btrfs_dev_item *dev_item;
2516 	struct extent_buffer *leaf;
2517 	struct btrfs_key key;
2518 
2519 	root = device->dev_root->fs_info->chunk_root;
2520 
2521 	path = btrfs_alloc_path();
2522 	if (!path)
2523 		return -ENOMEM;
2524 
2525 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2526 	key.type = BTRFS_DEV_ITEM_KEY;
2527 	key.offset = device->devid;
2528 
2529 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2530 	if (ret < 0)
2531 		goto out;
2532 
2533 	if (ret > 0) {
2534 		ret = -ENOENT;
2535 		goto out;
2536 	}
2537 
2538 	leaf = path->nodes[0];
2539 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2540 
2541 	btrfs_set_device_id(leaf, dev_item, device->devid);
2542 	btrfs_set_device_type(leaf, dev_item, device->type);
2543 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2544 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2545 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2546 	btrfs_set_device_total_bytes(leaf, dev_item,
2547 				     btrfs_device_get_disk_total_bytes(device));
2548 	btrfs_set_device_bytes_used(leaf, dev_item,
2549 				    btrfs_device_get_bytes_used(device));
2550 	btrfs_mark_buffer_dirty(leaf);
2551 
2552 out:
2553 	btrfs_free_path(path);
2554 	return ret;
2555 }
2556 
2557 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2558 		      struct btrfs_device *device, u64 new_size)
2559 {
2560 	struct btrfs_super_block *super_copy =
2561 		device->dev_root->fs_info->super_copy;
2562 	struct btrfs_fs_devices *fs_devices;
2563 	u64 old_total;
2564 	u64 diff;
2565 
2566 	if (!device->writeable)
2567 		return -EACCES;
2568 
2569 	lock_chunks(device->dev_root);
2570 	old_total = btrfs_super_total_bytes(super_copy);
2571 	diff = new_size - device->total_bytes;
2572 
2573 	if (new_size <= device->total_bytes ||
2574 	    device->is_tgtdev_for_dev_replace) {
2575 		unlock_chunks(device->dev_root);
2576 		return -EINVAL;
2577 	}
2578 
2579 	fs_devices = device->dev_root->fs_info->fs_devices;
2580 
2581 	btrfs_set_super_total_bytes(super_copy, old_total + diff);
2582 	device->fs_devices->total_rw_bytes += diff;
2583 
2584 	btrfs_device_set_total_bytes(device, new_size);
2585 	btrfs_device_set_disk_total_bytes(device, new_size);
2586 	btrfs_clear_space_info_full(device->dev_root->fs_info);
2587 	if (list_empty(&device->resized_list))
2588 		list_add_tail(&device->resized_list,
2589 			      &fs_devices->resized_devices);
2590 	unlock_chunks(device->dev_root);
2591 
2592 	return btrfs_update_device(trans, device);
2593 }
2594 
2595 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2596 			    struct btrfs_root *root, u64 chunk_objectid,
2597 			    u64 chunk_offset)
2598 {
2599 	int ret;
2600 	struct btrfs_path *path;
2601 	struct btrfs_key key;
2602 
2603 	root = root->fs_info->chunk_root;
2604 	path = btrfs_alloc_path();
2605 	if (!path)
2606 		return -ENOMEM;
2607 
2608 	key.objectid = chunk_objectid;
2609 	key.offset = chunk_offset;
2610 	key.type = BTRFS_CHUNK_ITEM_KEY;
2611 
2612 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2613 	if (ret < 0)
2614 		goto out;
2615 	else if (ret > 0) { /* Logic error or corruption */
2616 		btrfs_error(root->fs_info, -ENOENT,
2617 			    "Failed lookup while freeing chunk.");
2618 		ret = -ENOENT;
2619 		goto out;
2620 	}
2621 
2622 	ret = btrfs_del_item(trans, root, path);
2623 	if (ret < 0)
2624 		btrfs_error(root->fs_info, ret,
2625 			    "Failed to delete chunk item.");
2626 out:
2627 	btrfs_free_path(path);
2628 	return ret;
2629 }
2630 
2631 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2632 			chunk_offset)
2633 {
2634 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2635 	struct btrfs_disk_key *disk_key;
2636 	struct btrfs_chunk *chunk;
2637 	u8 *ptr;
2638 	int ret = 0;
2639 	u32 num_stripes;
2640 	u32 array_size;
2641 	u32 len = 0;
2642 	u32 cur;
2643 	struct btrfs_key key;
2644 
2645 	lock_chunks(root);
2646 	array_size = btrfs_super_sys_array_size(super_copy);
2647 
2648 	ptr = super_copy->sys_chunk_array;
2649 	cur = 0;
2650 
2651 	while (cur < array_size) {
2652 		disk_key = (struct btrfs_disk_key *)ptr;
2653 		btrfs_disk_key_to_cpu(&key, disk_key);
2654 
2655 		len = sizeof(*disk_key);
2656 
2657 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2658 			chunk = (struct btrfs_chunk *)(ptr + len);
2659 			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2660 			len += btrfs_chunk_item_size(num_stripes);
2661 		} else {
2662 			ret = -EIO;
2663 			break;
2664 		}
2665 		if (key.objectid == chunk_objectid &&
2666 		    key.offset == chunk_offset) {
2667 			memmove(ptr, ptr + len, array_size - (cur + len));
2668 			array_size -= len;
2669 			btrfs_set_super_sys_array_size(super_copy, array_size);
2670 		} else {
2671 			ptr += len;
2672 			cur += len;
2673 		}
2674 	}
2675 	unlock_chunks(root);
2676 	return ret;
2677 }
2678 
2679 int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
2680 		       struct btrfs_root *root, u64 chunk_offset)
2681 {
2682 	struct extent_map_tree *em_tree;
2683 	struct extent_map *em;
2684 	struct btrfs_root *extent_root = root->fs_info->extent_root;
2685 	struct map_lookup *map;
2686 	u64 dev_extent_len = 0;
2687 	u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2688 	int i, ret = 0;
2689 
2690 	/* Just in case */
2691 	root = root->fs_info->chunk_root;
2692 	em_tree = &root->fs_info->mapping_tree.map_tree;
2693 
2694 	read_lock(&em_tree->lock);
2695 	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2696 	read_unlock(&em_tree->lock);
2697 
2698 	if (!em || em->start > chunk_offset ||
2699 	    em->start + em->len < chunk_offset) {
2700 		/*
2701 		 * This is a logic error, but we don't want to just rely on the
2702 		 * user having built with ASSERT enabled, so if ASSERT doens't
2703 		 * do anything we still error out.
2704 		 */
2705 		ASSERT(0);
2706 		if (em)
2707 			free_extent_map(em);
2708 		return -EINVAL;
2709 	}
2710 	map = (struct map_lookup *)em->bdev;
2711 	lock_chunks(root->fs_info->chunk_root);
2712 	check_system_chunk(trans, extent_root, map->type);
2713 	unlock_chunks(root->fs_info->chunk_root);
2714 
2715 	for (i = 0; i < map->num_stripes; i++) {
2716 		struct btrfs_device *device = map->stripes[i].dev;
2717 		ret = btrfs_free_dev_extent(trans, device,
2718 					    map->stripes[i].physical,
2719 					    &dev_extent_len);
2720 		if (ret) {
2721 			btrfs_abort_transaction(trans, root, ret);
2722 			goto out;
2723 		}
2724 
2725 		if (device->bytes_used > 0) {
2726 			lock_chunks(root);
2727 			btrfs_device_set_bytes_used(device,
2728 					device->bytes_used - dev_extent_len);
2729 			spin_lock(&root->fs_info->free_chunk_lock);
2730 			root->fs_info->free_chunk_space += dev_extent_len;
2731 			spin_unlock(&root->fs_info->free_chunk_lock);
2732 			btrfs_clear_space_info_full(root->fs_info);
2733 			unlock_chunks(root);
2734 		}
2735 
2736 		if (map->stripes[i].dev) {
2737 			ret = btrfs_update_device(trans, map->stripes[i].dev);
2738 			if (ret) {
2739 				btrfs_abort_transaction(trans, root, ret);
2740 				goto out;
2741 			}
2742 		}
2743 	}
2744 	ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
2745 	if (ret) {
2746 		btrfs_abort_transaction(trans, root, ret);
2747 		goto out;
2748 	}
2749 
2750 	trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2751 
2752 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2753 		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2754 		if (ret) {
2755 			btrfs_abort_transaction(trans, root, ret);
2756 			goto out;
2757 		}
2758 	}
2759 
2760 	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em);
2761 	if (ret) {
2762 		btrfs_abort_transaction(trans, extent_root, ret);
2763 		goto out;
2764 	}
2765 
2766 out:
2767 	/* once for us */
2768 	free_extent_map(em);
2769 	return ret;
2770 }
2771 
2772 static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
2773 {
2774 	struct btrfs_root *extent_root;
2775 	struct btrfs_trans_handle *trans;
2776 	int ret;
2777 
2778 	root = root->fs_info->chunk_root;
2779 	extent_root = root->fs_info->extent_root;
2780 
2781 	/*
2782 	 * Prevent races with automatic removal of unused block groups.
2783 	 * After we relocate and before we remove the chunk with offset
2784 	 * chunk_offset, automatic removal of the block group can kick in,
2785 	 * resulting in a failure when calling btrfs_remove_chunk() below.
2786 	 *
2787 	 * Make sure to acquire this mutex before doing a tree search (dev
2788 	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
2789 	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
2790 	 * we release the path used to search the chunk/dev tree and before
2791 	 * the current task acquires this mutex and calls us.
2792 	 */
2793 	ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex));
2794 
2795 	ret = btrfs_can_relocate(extent_root, chunk_offset);
2796 	if (ret)
2797 		return -ENOSPC;
2798 
2799 	/* step one, relocate all the extents inside this chunk */
2800 	btrfs_scrub_pause(root);
2801 	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2802 	btrfs_scrub_continue(root);
2803 	if (ret)
2804 		return ret;
2805 
2806 	trans = btrfs_start_transaction(root, 0);
2807 	if (IS_ERR(trans)) {
2808 		ret = PTR_ERR(trans);
2809 		btrfs_std_error(root->fs_info, ret);
2810 		return ret;
2811 	}
2812 
2813 	/*
2814 	 * step two, delete the device extents and the
2815 	 * chunk tree entries
2816 	 */
2817 	ret = btrfs_remove_chunk(trans, root, chunk_offset);
2818 	btrfs_end_transaction(trans, root);
2819 	return ret;
2820 }
2821 
2822 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2823 {
2824 	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2825 	struct btrfs_path *path;
2826 	struct extent_buffer *leaf;
2827 	struct btrfs_chunk *chunk;
2828 	struct btrfs_key key;
2829 	struct btrfs_key found_key;
2830 	u64 chunk_type;
2831 	bool retried = false;
2832 	int failed = 0;
2833 	int ret;
2834 
2835 	path = btrfs_alloc_path();
2836 	if (!path)
2837 		return -ENOMEM;
2838 
2839 again:
2840 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2841 	key.offset = (u64)-1;
2842 	key.type = BTRFS_CHUNK_ITEM_KEY;
2843 
2844 	while (1) {
2845 		mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
2846 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2847 		if (ret < 0) {
2848 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2849 			goto error;
2850 		}
2851 		BUG_ON(ret == 0); /* Corruption */
2852 
2853 		ret = btrfs_previous_item(chunk_root, path, key.objectid,
2854 					  key.type);
2855 		if (ret)
2856 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2857 		if (ret < 0)
2858 			goto error;
2859 		if (ret > 0)
2860 			break;
2861 
2862 		leaf = path->nodes[0];
2863 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2864 
2865 		chunk = btrfs_item_ptr(leaf, path->slots[0],
2866 				       struct btrfs_chunk);
2867 		chunk_type = btrfs_chunk_type(leaf, chunk);
2868 		btrfs_release_path(path);
2869 
2870 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2871 			ret = btrfs_relocate_chunk(chunk_root,
2872 						   found_key.offset);
2873 			if (ret == -ENOSPC)
2874 				failed++;
2875 			else
2876 				BUG_ON(ret);
2877 		}
2878 		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2879 
2880 		if (found_key.offset == 0)
2881 			break;
2882 		key.offset = found_key.offset - 1;
2883 	}
2884 	ret = 0;
2885 	if (failed && !retried) {
2886 		failed = 0;
2887 		retried = true;
2888 		goto again;
2889 	} else if (WARN_ON(failed && retried)) {
2890 		ret = -ENOSPC;
2891 	}
2892 error:
2893 	btrfs_free_path(path);
2894 	return ret;
2895 }
2896 
2897 static int insert_balance_item(struct btrfs_root *root,
2898 			       struct btrfs_balance_control *bctl)
2899 {
2900 	struct btrfs_trans_handle *trans;
2901 	struct btrfs_balance_item *item;
2902 	struct btrfs_disk_balance_args disk_bargs;
2903 	struct btrfs_path *path;
2904 	struct extent_buffer *leaf;
2905 	struct btrfs_key key;
2906 	int ret, err;
2907 
2908 	path = btrfs_alloc_path();
2909 	if (!path)
2910 		return -ENOMEM;
2911 
2912 	trans = btrfs_start_transaction(root, 0);
2913 	if (IS_ERR(trans)) {
2914 		btrfs_free_path(path);
2915 		return PTR_ERR(trans);
2916 	}
2917 
2918 	key.objectid = BTRFS_BALANCE_OBJECTID;
2919 	key.type = BTRFS_BALANCE_ITEM_KEY;
2920 	key.offset = 0;
2921 
2922 	ret = btrfs_insert_empty_item(trans, root, path, &key,
2923 				      sizeof(*item));
2924 	if (ret)
2925 		goto out;
2926 
2927 	leaf = path->nodes[0];
2928 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2929 
2930 	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2931 
2932 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2933 	btrfs_set_balance_data(leaf, item, &disk_bargs);
2934 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2935 	btrfs_set_balance_meta(leaf, item, &disk_bargs);
2936 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2937 	btrfs_set_balance_sys(leaf, item, &disk_bargs);
2938 
2939 	btrfs_set_balance_flags(leaf, item, bctl->flags);
2940 
2941 	btrfs_mark_buffer_dirty(leaf);
2942 out:
2943 	btrfs_free_path(path);
2944 	err = btrfs_commit_transaction(trans, root);
2945 	if (err && !ret)
2946 		ret = err;
2947 	return ret;
2948 }
2949 
2950 static int del_balance_item(struct btrfs_root *root)
2951 {
2952 	struct btrfs_trans_handle *trans;
2953 	struct btrfs_path *path;
2954 	struct btrfs_key key;
2955 	int ret, err;
2956 
2957 	path = btrfs_alloc_path();
2958 	if (!path)
2959 		return -ENOMEM;
2960 
2961 	trans = btrfs_start_transaction(root, 0);
2962 	if (IS_ERR(trans)) {
2963 		btrfs_free_path(path);
2964 		return PTR_ERR(trans);
2965 	}
2966 
2967 	key.objectid = BTRFS_BALANCE_OBJECTID;
2968 	key.type = BTRFS_BALANCE_ITEM_KEY;
2969 	key.offset = 0;
2970 
2971 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2972 	if (ret < 0)
2973 		goto out;
2974 	if (ret > 0) {
2975 		ret = -ENOENT;
2976 		goto out;
2977 	}
2978 
2979 	ret = btrfs_del_item(trans, root, path);
2980 out:
2981 	btrfs_free_path(path);
2982 	err = btrfs_commit_transaction(trans, root);
2983 	if (err && !ret)
2984 		ret = err;
2985 	return ret;
2986 }
2987 
2988 /*
2989  * This is a heuristic used to reduce the number of chunks balanced on
2990  * resume after balance was interrupted.
2991  */
2992 static void update_balance_args(struct btrfs_balance_control *bctl)
2993 {
2994 	/*
2995 	 * Turn on soft mode for chunk types that were being converted.
2996 	 */
2997 	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2998 		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2999 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3000 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3001 	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3002 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3003 
3004 	/*
3005 	 * Turn on usage filter if is not already used.  The idea is
3006 	 * that chunks that we have already balanced should be
3007 	 * reasonably full.  Don't do it for chunks that are being
3008 	 * converted - that will keep us from relocating unconverted
3009 	 * (albeit full) chunks.
3010 	 */
3011 	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3012 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3013 		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3014 		bctl->data.usage = 90;
3015 	}
3016 	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3017 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3018 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3019 		bctl->sys.usage = 90;
3020 	}
3021 	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3022 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3023 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3024 		bctl->meta.usage = 90;
3025 	}
3026 }
3027 
3028 /*
3029  * Should be called with both balance and volume mutexes held to
3030  * serialize other volume operations (add_dev/rm_dev/resize) with
3031  * restriper.  Same goes for unset_balance_control.
3032  */
3033 static void set_balance_control(struct btrfs_balance_control *bctl)
3034 {
3035 	struct btrfs_fs_info *fs_info = bctl->fs_info;
3036 
3037 	BUG_ON(fs_info->balance_ctl);
3038 
3039 	spin_lock(&fs_info->balance_lock);
3040 	fs_info->balance_ctl = bctl;
3041 	spin_unlock(&fs_info->balance_lock);
3042 }
3043 
3044 static void unset_balance_control(struct btrfs_fs_info *fs_info)
3045 {
3046 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3047 
3048 	BUG_ON(!fs_info->balance_ctl);
3049 
3050 	spin_lock(&fs_info->balance_lock);
3051 	fs_info->balance_ctl = NULL;
3052 	spin_unlock(&fs_info->balance_lock);
3053 
3054 	kfree(bctl);
3055 }
3056 
3057 /*
3058  * Balance filters.  Return 1 if chunk should be filtered out
3059  * (should not be balanced).
3060  */
3061 static int chunk_profiles_filter(u64 chunk_type,
3062 				 struct btrfs_balance_args *bargs)
3063 {
3064 	chunk_type = chunk_to_extended(chunk_type) &
3065 				BTRFS_EXTENDED_PROFILE_MASK;
3066 
3067 	if (bargs->profiles & chunk_type)
3068 		return 0;
3069 
3070 	return 1;
3071 }
3072 
3073 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3074 			      struct btrfs_balance_args *bargs)
3075 {
3076 	struct btrfs_block_group_cache *cache;
3077 	u64 chunk_used, user_thresh;
3078 	int ret = 1;
3079 
3080 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3081 	chunk_used = btrfs_block_group_used(&cache->item);
3082 
3083 	if (bargs->usage == 0)
3084 		user_thresh = 1;
3085 	else if (bargs->usage > 100)
3086 		user_thresh = cache->key.offset;
3087 	else
3088 		user_thresh = div_factor_fine(cache->key.offset,
3089 					      bargs->usage);
3090 
3091 	if (chunk_used < user_thresh)
3092 		ret = 0;
3093 
3094 	btrfs_put_block_group(cache);
3095 	return ret;
3096 }
3097 
3098 static int chunk_devid_filter(struct extent_buffer *leaf,
3099 			      struct btrfs_chunk *chunk,
3100 			      struct btrfs_balance_args *bargs)
3101 {
3102 	struct btrfs_stripe *stripe;
3103 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3104 	int i;
3105 
3106 	for (i = 0; i < num_stripes; i++) {
3107 		stripe = btrfs_stripe_nr(chunk, i);
3108 		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3109 			return 0;
3110 	}
3111 
3112 	return 1;
3113 }
3114 
3115 /* [pstart, pend) */
3116 static int chunk_drange_filter(struct extent_buffer *leaf,
3117 			       struct btrfs_chunk *chunk,
3118 			       u64 chunk_offset,
3119 			       struct btrfs_balance_args *bargs)
3120 {
3121 	struct btrfs_stripe *stripe;
3122 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3123 	u64 stripe_offset;
3124 	u64 stripe_length;
3125 	int factor;
3126 	int i;
3127 
3128 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3129 		return 0;
3130 
3131 	if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
3132 	     BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
3133 		factor = num_stripes / 2;
3134 	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
3135 		factor = num_stripes - 1;
3136 	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
3137 		factor = num_stripes - 2;
3138 	} else {
3139 		factor = num_stripes;
3140 	}
3141 
3142 	for (i = 0; i < num_stripes; i++) {
3143 		stripe = btrfs_stripe_nr(chunk, i);
3144 		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3145 			continue;
3146 
3147 		stripe_offset = btrfs_stripe_offset(leaf, stripe);
3148 		stripe_length = btrfs_chunk_length(leaf, chunk);
3149 		stripe_length = div_u64(stripe_length, factor);
3150 
3151 		if (stripe_offset < bargs->pend &&
3152 		    stripe_offset + stripe_length > bargs->pstart)
3153 			return 0;
3154 	}
3155 
3156 	return 1;
3157 }
3158 
3159 /* [vstart, vend) */
3160 static int chunk_vrange_filter(struct extent_buffer *leaf,
3161 			       struct btrfs_chunk *chunk,
3162 			       u64 chunk_offset,
3163 			       struct btrfs_balance_args *bargs)
3164 {
3165 	if (chunk_offset < bargs->vend &&
3166 	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3167 		/* at least part of the chunk is inside this vrange */
3168 		return 0;
3169 
3170 	return 1;
3171 }
3172 
3173 static int chunk_soft_convert_filter(u64 chunk_type,
3174 				     struct btrfs_balance_args *bargs)
3175 {
3176 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3177 		return 0;
3178 
3179 	chunk_type = chunk_to_extended(chunk_type) &
3180 				BTRFS_EXTENDED_PROFILE_MASK;
3181 
3182 	if (bargs->target == chunk_type)
3183 		return 1;
3184 
3185 	return 0;
3186 }
3187 
3188 static int should_balance_chunk(struct btrfs_root *root,
3189 				struct extent_buffer *leaf,
3190 				struct btrfs_chunk *chunk, u64 chunk_offset)
3191 {
3192 	struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
3193 	struct btrfs_balance_args *bargs = NULL;
3194 	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3195 
3196 	/* type filter */
3197 	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3198 	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3199 		return 0;
3200 	}
3201 
3202 	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3203 		bargs = &bctl->data;
3204 	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3205 		bargs = &bctl->sys;
3206 	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3207 		bargs = &bctl->meta;
3208 
3209 	/* profiles filter */
3210 	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3211 	    chunk_profiles_filter(chunk_type, bargs)) {
3212 		return 0;
3213 	}
3214 
3215 	/* usage filter */
3216 	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3217 	    chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
3218 		return 0;
3219 	}
3220 
3221 	/* devid filter */
3222 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3223 	    chunk_devid_filter(leaf, chunk, bargs)) {
3224 		return 0;
3225 	}
3226 
3227 	/* drange filter, makes sense only with devid filter */
3228 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3229 	    chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
3230 		return 0;
3231 	}
3232 
3233 	/* vrange filter */
3234 	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3235 	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3236 		return 0;
3237 	}
3238 
3239 	/* soft profile changing mode */
3240 	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3241 	    chunk_soft_convert_filter(chunk_type, bargs)) {
3242 		return 0;
3243 	}
3244 
3245 	/*
3246 	 * limited by count, must be the last filter
3247 	 */
3248 	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3249 		if (bargs->limit == 0)
3250 			return 0;
3251 		else
3252 			bargs->limit--;
3253 	}
3254 
3255 	return 1;
3256 }
3257 
3258 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3259 {
3260 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3261 	struct btrfs_root *chunk_root = fs_info->chunk_root;
3262 	struct btrfs_root *dev_root = fs_info->dev_root;
3263 	struct list_head *devices;
3264 	struct btrfs_device *device;
3265 	u64 old_size;
3266 	u64 size_to_free;
3267 	struct btrfs_chunk *chunk;
3268 	struct btrfs_path *path;
3269 	struct btrfs_key key;
3270 	struct btrfs_key found_key;
3271 	struct btrfs_trans_handle *trans;
3272 	struct extent_buffer *leaf;
3273 	int slot;
3274 	int ret;
3275 	int enospc_errors = 0;
3276 	bool counting = true;
3277 	u64 limit_data = bctl->data.limit;
3278 	u64 limit_meta = bctl->meta.limit;
3279 	u64 limit_sys = bctl->sys.limit;
3280 
3281 	/* step one make some room on all the devices */
3282 	devices = &fs_info->fs_devices->devices;
3283 	list_for_each_entry(device, devices, dev_list) {
3284 		old_size = btrfs_device_get_total_bytes(device);
3285 		size_to_free = div_factor(old_size, 1);
3286 		size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
3287 		if (!device->writeable ||
3288 		    btrfs_device_get_total_bytes(device) -
3289 		    btrfs_device_get_bytes_used(device) > size_to_free ||
3290 		    device->is_tgtdev_for_dev_replace)
3291 			continue;
3292 
3293 		ret = btrfs_shrink_device(device, old_size - size_to_free);
3294 		if (ret == -ENOSPC)
3295 			break;
3296 		BUG_ON(ret);
3297 
3298 		trans = btrfs_start_transaction(dev_root, 0);
3299 		BUG_ON(IS_ERR(trans));
3300 
3301 		ret = btrfs_grow_device(trans, device, old_size);
3302 		BUG_ON(ret);
3303 
3304 		btrfs_end_transaction(trans, dev_root);
3305 	}
3306 
3307 	/* step two, relocate all the chunks */
3308 	path = btrfs_alloc_path();
3309 	if (!path) {
3310 		ret = -ENOMEM;
3311 		goto error;
3312 	}
3313 
3314 	/* zero out stat counters */
3315 	spin_lock(&fs_info->balance_lock);
3316 	memset(&bctl->stat, 0, sizeof(bctl->stat));
3317 	spin_unlock(&fs_info->balance_lock);
3318 again:
3319 	if (!counting) {
3320 		bctl->data.limit = limit_data;
3321 		bctl->meta.limit = limit_meta;
3322 		bctl->sys.limit = limit_sys;
3323 	}
3324 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3325 	key.offset = (u64)-1;
3326 	key.type = BTRFS_CHUNK_ITEM_KEY;
3327 
3328 	while (1) {
3329 		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3330 		    atomic_read(&fs_info->balance_cancel_req)) {
3331 			ret = -ECANCELED;
3332 			goto error;
3333 		}
3334 
3335 		mutex_lock(&fs_info->delete_unused_bgs_mutex);
3336 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3337 		if (ret < 0) {
3338 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3339 			goto error;
3340 		}
3341 
3342 		/*
3343 		 * this shouldn't happen, it means the last relocate
3344 		 * failed
3345 		 */
3346 		if (ret == 0)
3347 			BUG(); /* FIXME break ? */
3348 
3349 		ret = btrfs_previous_item(chunk_root, path, 0,
3350 					  BTRFS_CHUNK_ITEM_KEY);
3351 		if (ret) {
3352 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3353 			ret = 0;
3354 			break;
3355 		}
3356 
3357 		leaf = path->nodes[0];
3358 		slot = path->slots[0];
3359 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3360 
3361 		if (found_key.objectid != key.objectid) {
3362 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3363 			break;
3364 		}
3365 
3366 		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3367 
3368 		if (!counting) {
3369 			spin_lock(&fs_info->balance_lock);
3370 			bctl->stat.considered++;
3371 			spin_unlock(&fs_info->balance_lock);
3372 		}
3373 
3374 		ret = should_balance_chunk(chunk_root, leaf, chunk,
3375 					   found_key.offset);
3376 		btrfs_release_path(path);
3377 		if (!ret) {
3378 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3379 			goto loop;
3380 		}
3381 
3382 		if (counting) {
3383 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3384 			spin_lock(&fs_info->balance_lock);
3385 			bctl->stat.expected++;
3386 			spin_unlock(&fs_info->balance_lock);
3387 			goto loop;
3388 		}
3389 
3390 		ret = btrfs_relocate_chunk(chunk_root,
3391 					   found_key.offset);
3392 		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3393 		if (ret && ret != -ENOSPC)
3394 			goto error;
3395 		if (ret == -ENOSPC) {
3396 			enospc_errors++;
3397 		} else {
3398 			spin_lock(&fs_info->balance_lock);
3399 			bctl->stat.completed++;
3400 			spin_unlock(&fs_info->balance_lock);
3401 		}
3402 loop:
3403 		if (found_key.offset == 0)
3404 			break;
3405 		key.offset = found_key.offset - 1;
3406 	}
3407 
3408 	if (counting) {
3409 		btrfs_release_path(path);
3410 		counting = false;
3411 		goto again;
3412 	}
3413 error:
3414 	btrfs_free_path(path);
3415 	if (enospc_errors) {
3416 		btrfs_info(fs_info, "%d enospc errors during balance",
3417 		       enospc_errors);
3418 		if (!ret)
3419 			ret = -ENOSPC;
3420 	}
3421 
3422 	return ret;
3423 }
3424 
3425 /**
3426  * alloc_profile_is_valid - see if a given profile is valid and reduced
3427  * @flags: profile to validate
3428  * @extended: if true @flags is treated as an extended profile
3429  */
3430 static int alloc_profile_is_valid(u64 flags, int extended)
3431 {
3432 	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3433 			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
3434 
3435 	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3436 
3437 	/* 1) check that all other bits are zeroed */
3438 	if (flags & ~mask)
3439 		return 0;
3440 
3441 	/* 2) see if profile is reduced */
3442 	if (flags == 0)
3443 		return !extended; /* "0" is valid for usual profiles */
3444 
3445 	/* true if exactly one bit set */
3446 	return (flags & (flags - 1)) == 0;
3447 }
3448 
3449 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3450 {
3451 	/* cancel requested || normal exit path */
3452 	return atomic_read(&fs_info->balance_cancel_req) ||
3453 		(atomic_read(&fs_info->balance_pause_req) == 0 &&
3454 		 atomic_read(&fs_info->balance_cancel_req) == 0);
3455 }
3456 
3457 static void __cancel_balance(struct btrfs_fs_info *fs_info)
3458 {
3459 	int ret;
3460 
3461 	unset_balance_control(fs_info);
3462 	ret = del_balance_item(fs_info->tree_root);
3463 	if (ret)
3464 		btrfs_std_error(fs_info, ret);
3465 
3466 	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3467 }
3468 
3469 /*
3470  * Should be called with both balance and volume mutexes held
3471  */
3472 int btrfs_balance(struct btrfs_balance_control *bctl,
3473 		  struct btrfs_ioctl_balance_args *bargs)
3474 {
3475 	struct btrfs_fs_info *fs_info = bctl->fs_info;
3476 	u64 allowed;
3477 	int mixed = 0;
3478 	int ret;
3479 	u64 num_devices;
3480 	unsigned seq;
3481 
3482 	if (btrfs_fs_closing(fs_info) ||
3483 	    atomic_read(&fs_info->balance_pause_req) ||
3484 	    atomic_read(&fs_info->balance_cancel_req)) {
3485 		ret = -EINVAL;
3486 		goto out;
3487 	}
3488 
3489 	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3490 	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3491 		mixed = 1;
3492 
3493 	/*
3494 	 * In case of mixed groups both data and meta should be picked,
3495 	 * and identical options should be given for both of them.
3496 	 */
3497 	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3498 	if (mixed && (bctl->flags & allowed)) {
3499 		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3500 		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3501 		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3502 			btrfs_err(fs_info, "with mixed groups data and "
3503 				   "metadata balance options must be the same");
3504 			ret = -EINVAL;
3505 			goto out;
3506 		}
3507 	}
3508 
3509 	num_devices = fs_info->fs_devices->num_devices;
3510 	btrfs_dev_replace_lock(&fs_info->dev_replace);
3511 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3512 		BUG_ON(num_devices < 1);
3513 		num_devices--;
3514 	}
3515 	btrfs_dev_replace_unlock(&fs_info->dev_replace);
3516 	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3517 	if (num_devices == 1)
3518 		allowed |= BTRFS_BLOCK_GROUP_DUP;
3519 	else if (num_devices > 1)
3520 		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3521 	if (num_devices > 2)
3522 		allowed |= BTRFS_BLOCK_GROUP_RAID5;
3523 	if (num_devices > 3)
3524 		allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3525 			    BTRFS_BLOCK_GROUP_RAID6);
3526 	if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3527 	    (!alloc_profile_is_valid(bctl->data.target, 1) ||
3528 	     (bctl->data.target & ~allowed))) {
3529 		btrfs_err(fs_info, "unable to start balance with target "
3530 			   "data profile %llu",
3531 		       bctl->data.target);
3532 		ret = -EINVAL;
3533 		goto out;
3534 	}
3535 	if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3536 	    (!alloc_profile_is_valid(bctl->meta.target, 1) ||
3537 	     (bctl->meta.target & ~allowed))) {
3538 		btrfs_err(fs_info,
3539 			   "unable to start balance with target metadata profile %llu",
3540 		       bctl->meta.target);
3541 		ret = -EINVAL;
3542 		goto out;
3543 	}
3544 	if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3545 	    (!alloc_profile_is_valid(bctl->sys.target, 1) ||
3546 	     (bctl->sys.target & ~allowed))) {
3547 		btrfs_err(fs_info,
3548 			   "unable to start balance with target system profile %llu",
3549 		       bctl->sys.target);
3550 		ret = -EINVAL;
3551 		goto out;
3552 	}
3553 
3554 	/* allow dup'ed data chunks only in mixed mode */
3555 	if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3556 	    (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
3557 		btrfs_err(fs_info, "dup for data is not allowed");
3558 		ret = -EINVAL;
3559 		goto out;
3560 	}
3561 
3562 	/* allow to reduce meta or sys integrity only if force set */
3563 	allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3564 			BTRFS_BLOCK_GROUP_RAID10 |
3565 			BTRFS_BLOCK_GROUP_RAID5 |
3566 			BTRFS_BLOCK_GROUP_RAID6;
3567 	do {
3568 		seq = read_seqbegin(&fs_info->profiles_lock);
3569 
3570 		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3571 		     (fs_info->avail_system_alloc_bits & allowed) &&
3572 		     !(bctl->sys.target & allowed)) ||
3573 		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3574 		     (fs_info->avail_metadata_alloc_bits & allowed) &&
3575 		     !(bctl->meta.target & allowed))) {
3576 			if (bctl->flags & BTRFS_BALANCE_FORCE) {
3577 				btrfs_info(fs_info, "force reducing metadata integrity");
3578 			} else {
3579 				btrfs_err(fs_info, "balance will reduce metadata "
3580 					   "integrity, use force if you want this");
3581 				ret = -EINVAL;
3582 				goto out;
3583 			}
3584 		}
3585 	} while (read_seqretry(&fs_info->profiles_lock, seq));
3586 
3587 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3588 		int num_tolerated_disk_barrier_failures;
3589 		u64 target = bctl->sys.target;
3590 
3591 		num_tolerated_disk_barrier_failures =
3592 			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3593 		if (num_tolerated_disk_barrier_failures > 0 &&
3594 		    (target &
3595 		     (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3596 		      BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
3597 			num_tolerated_disk_barrier_failures = 0;
3598 		else if (num_tolerated_disk_barrier_failures > 1 &&
3599 			 (target &
3600 			  (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
3601 			num_tolerated_disk_barrier_failures = 1;
3602 
3603 		fs_info->num_tolerated_disk_barrier_failures =
3604 			num_tolerated_disk_barrier_failures;
3605 	}
3606 
3607 	ret = insert_balance_item(fs_info->tree_root, bctl);
3608 	if (ret && ret != -EEXIST)
3609 		goto out;
3610 
3611 	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3612 		BUG_ON(ret == -EEXIST);
3613 		set_balance_control(bctl);
3614 	} else {
3615 		BUG_ON(ret != -EEXIST);
3616 		spin_lock(&fs_info->balance_lock);
3617 		update_balance_args(bctl);
3618 		spin_unlock(&fs_info->balance_lock);
3619 	}
3620 
3621 	atomic_inc(&fs_info->balance_running);
3622 	mutex_unlock(&fs_info->balance_mutex);
3623 
3624 	ret = __btrfs_balance(fs_info);
3625 
3626 	mutex_lock(&fs_info->balance_mutex);
3627 	atomic_dec(&fs_info->balance_running);
3628 
3629 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3630 		fs_info->num_tolerated_disk_barrier_failures =
3631 			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3632 	}
3633 
3634 	if (bargs) {
3635 		memset(bargs, 0, sizeof(*bargs));
3636 		update_ioctl_balance_args(fs_info, 0, bargs);
3637 	}
3638 
3639 	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3640 	    balance_need_close(fs_info)) {
3641 		__cancel_balance(fs_info);
3642 	}
3643 
3644 	wake_up(&fs_info->balance_wait_q);
3645 
3646 	return ret;
3647 out:
3648 	if (bctl->flags & BTRFS_BALANCE_RESUME)
3649 		__cancel_balance(fs_info);
3650 	else {
3651 		kfree(bctl);
3652 		atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3653 	}
3654 	return ret;
3655 }
3656 
3657 static int balance_kthread(void *data)
3658 {
3659 	struct btrfs_fs_info *fs_info = data;
3660 	int ret = 0;
3661 
3662 	mutex_lock(&fs_info->volume_mutex);
3663 	mutex_lock(&fs_info->balance_mutex);
3664 
3665 	if (fs_info->balance_ctl) {
3666 		btrfs_info(fs_info, "continuing balance");
3667 		ret = btrfs_balance(fs_info->balance_ctl, NULL);
3668 	}
3669 
3670 	mutex_unlock(&fs_info->balance_mutex);
3671 	mutex_unlock(&fs_info->volume_mutex);
3672 
3673 	return ret;
3674 }
3675 
3676 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3677 {
3678 	struct task_struct *tsk;
3679 
3680 	spin_lock(&fs_info->balance_lock);
3681 	if (!fs_info->balance_ctl) {
3682 		spin_unlock(&fs_info->balance_lock);
3683 		return 0;
3684 	}
3685 	spin_unlock(&fs_info->balance_lock);
3686 
3687 	if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3688 		btrfs_info(fs_info, "force skipping balance");
3689 		return 0;
3690 	}
3691 
3692 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3693 	return PTR_ERR_OR_ZERO(tsk);
3694 }
3695 
3696 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3697 {
3698 	struct btrfs_balance_control *bctl;
3699 	struct btrfs_balance_item *item;
3700 	struct btrfs_disk_balance_args disk_bargs;
3701 	struct btrfs_path *path;
3702 	struct extent_buffer *leaf;
3703 	struct btrfs_key key;
3704 	int ret;
3705 
3706 	path = btrfs_alloc_path();
3707 	if (!path)
3708 		return -ENOMEM;
3709 
3710 	key.objectid = BTRFS_BALANCE_OBJECTID;
3711 	key.type = BTRFS_BALANCE_ITEM_KEY;
3712 	key.offset = 0;
3713 
3714 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3715 	if (ret < 0)
3716 		goto out;
3717 	if (ret > 0) { /* ret = -ENOENT; */
3718 		ret = 0;
3719 		goto out;
3720 	}
3721 
3722 	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3723 	if (!bctl) {
3724 		ret = -ENOMEM;
3725 		goto out;
3726 	}
3727 
3728 	leaf = path->nodes[0];
3729 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3730 
3731 	bctl->fs_info = fs_info;
3732 	bctl->flags = btrfs_balance_flags(leaf, item);
3733 	bctl->flags |= BTRFS_BALANCE_RESUME;
3734 
3735 	btrfs_balance_data(leaf, item, &disk_bargs);
3736 	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3737 	btrfs_balance_meta(leaf, item, &disk_bargs);
3738 	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3739 	btrfs_balance_sys(leaf, item, &disk_bargs);
3740 	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3741 
3742 	WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3743 
3744 	mutex_lock(&fs_info->volume_mutex);
3745 	mutex_lock(&fs_info->balance_mutex);
3746 
3747 	set_balance_control(bctl);
3748 
3749 	mutex_unlock(&fs_info->balance_mutex);
3750 	mutex_unlock(&fs_info->volume_mutex);
3751 out:
3752 	btrfs_free_path(path);
3753 	return ret;
3754 }
3755 
3756 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3757 {
3758 	int ret = 0;
3759 
3760 	mutex_lock(&fs_info->balance_mutex);
3761 	if (!fs_info->balance_ctl) {
3762 		mutex_unlock(&fs_info->balance_mutex);
3763 		return -ENOTCONN;
3764 	}
3765 
3766 	if (atomic_read(&fs_info->balance_running)) {
3767 		atomic_inc(&fs_info->balance_pause_req);
3768 		mutex_unlock(&fs_info->balance_mutex);
3769 
3770 		wait_event(fs_info->balance_wait_q,
3771 			   atomic_read(&fs_info->balance_running) == 0);
3772 
3773 		mutex_lock(&fs_info->balance_mutex);
3774 		/* we are good with balance_ctl ripped off from under us */
3775 		BUG_ON(atomic_read(&fs_info->balance_running));
3776 		atomic_dec(&fs_info->balance_pause_req);
3777 	} else {
3778 		ret = -ENOTCONN;
3779 	}
3780 
3781 	mutex_unlock(&fs_info->balance_mutex);
3782 	return ret;
3783 }
3784 
3785 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3786 {
3787 	if (fs_info->sb->s_flags & MS_RDONLY)
3788 		return -EROFS;
3789 
3790 	mutex_lock(&fs_info->balance_mutex);
3791 	if (!fs_info->balance_ctl) {
3792 		mutex_unlock(&fs_info->balance_mutex);
3793 		return -ENOTCONN;
3794 	}
3795 
3796 	atomic_inc(&fs_info->balance_cancel_req);
3797 	/*
3798 	 * if we are running just wait and return, balance item is
3799 	 * deleted in btrfs_balance in this case
3800 	 */
3801 	if (atomic_read(&fs_info->balance_running)) {
3802 		mutex_unlock(&fs_info->balance_mutex);
3803 		wait_event(fs_info->balance_wait_q,
3804 			   atomic_read(&fs_info->balance_running) == 0);
3805 		mutex_lock(&fs_info->balance_mutex);
3806 	} else {
3807 		/* __cancel_balance needs volume_mutex */
3808 		mutex_unlock(&fs_info->balance_mutex);
3809 		mutex_lock(&fs_info->volume_mutex);
3810 		mutex_lock(&fs_info->balance_mutex);
3811 
3812 		if (fs_info->balance_ctl)
3813 			__cancel_balance(fs_info);
3814 
3815 		mutex_unlock(&fs_info->volume_mutex);
3816 	}
3817 
3818 	BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3819 	atomic_dec(&fs_info->balance_cancel_req);
3820 	mutex_unlock(&fs_info->balance_mutex);
3821 	return 0;
3822 }
3823 
3824 static int btrfs_uuid_scan_kthread(void *data)
3825 {
3826 	struct btrfs_fs_info *fs_info = data;
3827 	struct btrfs_root *root = fs_info->tree_root;
3828 	struct btrfs_key key;
3829 	struct btrfs_key max_key;
3830 	struct btrfs_path *path = NULL;
3831 	int ret = 0;
3832 	struct extent_buffer *eb;
3833 	int slot;
3834 	struct btrfs_root_item root_item;
3835 	u32 item_size;
3836 	struct btrfs_trans_handle *trans = NULL;
3837 
3838 	path = btrfs_alloc_path();
3839 	if (!path) {
3840 		ret = -ENOMEM;
3841 		goto out;
3842 	}
3843 
3844 	key.objectid = 0;
3845 	key.type = BTRFS_ROOT_ITEM_KEY;
3846 	key.offset = 0;
3847 
3848 	max_key.objectid = (u64)-1;
3849 	max_key.type = BTRFS_ROOT_ITEM_KEY;
3850 	max_key.offset = (u64)-1;
3851 
3852 	while (1) {
3853 		ret = btrfs_search_forward(root, &key, path, 0);
3854 		if (ret) {
3855 			if (ret > 0)
3856 				ret = 0;
3857 			break;
3858 		}
3859 
3860 		if (key.type != BTRFS_ROOT_ITEM_KEY ||
3861 		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
3862 		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
3863 		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
3864 			goto skip;
3865 
3866 		eb = path->nodes[0];
3867 		slot = path->slots[0];
3868 		item_size = btrfs_item_size_nr(eb, slot);
3869 		if (item_size < sizeof(root_item))
3870 			goto skip;
3871 
3872 		read_extent_buffer(eb, &root_item,
3873 				   btrfs_item_ptr_offset(eb, slot),
3874 				   (int)sizeof(root_item));
3875 		if (btrfs_root_refs(&root_item) == 0)
3876 			goto skip;
3877 
3878 		if (!btrfs_is_empty_uuid(root_item.uuid) ||
3879 		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
3880 			if (trans)
3881 				goto update_tree;
3882 
3883 			btrfs_release_path(path);
3884 			/*
3885 			 * 1 - subvol uuid item
3886 			 * 1 - received_subvol uuid item
3887 			 */
3888 			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
3889 			if (IS_ERR(trans)) {
3890 				ret = PTR_ERR(trans);
3891 				break;
3892 			}
3893 			continue;
3894 		} else {
3895 			goto skip;
3896 		}
3897 update_tree:
3898 		if (!btrfs_is_empty_uuid(root_item.uuid)) {
3899 			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
3900 						  root_item.uuid,
3901 						  BTRFS_UUID_KEY_SUBVOL,
3902 						  key.objectid);
3903 			if (ret < 0) {
3904 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
3905 					ret);
3906 				break;
3907 			}
3908 		}
3909 
3910 		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
3911 			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
3912 						  root_item.received_uuid,
3913 						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
3914 						  key.objectid);
3915 			if (ret < 0) {
3916 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
3917 					ret);
3918 				break;
3919 			}
3920 		}
3921 
3922 skip:
3923 		if (trans) {
3924 			ret = btrfs_end_transaction(trans, fs_info->uuid_root);
3925 			trans = NULL;
3926 			if (ret)
3927 				break;
3928 		}
3929 
3930 		btrfs_release_path(path);
3931 		if (key.offset < (u64)-1) {
3932 			key.offset++;
3933 		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
3934 			key.offset = 0;
3935 			key.type = BTRFS_ROOT_ITEM_KEY;
3936 		} else if (key.objectid < (u64)-1) {
3937 			key.offset = 0;
3938 			key.type = BTRFS_ROOT_ITEM_KEY;
3939 			key.objectid++;
3940 		} else {
3941 			break;
3942 		}
3943 		cond_resched();
3944 	}
3945 
3946 out:
3947 	btrfs_free_path(path);
3948 	if (trans && !IS_ERR(trans))
3949 		btrfs_end_transaction(trans, fs_info->uuid_root);
3950 	if (ret)
3951 		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
3952 	else
3953 		fs_info->update_uuid_tree_gen = 1;
3954 	up(&fs_info->uuid_tree_rescan_sem);
3955 	return 0;
3956 }
3957 
3958 /*
3959  * Callback for btrfs_uuid_tree_iterate().
3960  * returns:
3961  * 0	check succeeded, the entry is not outdated.
3962  * < 0	if an error occured.
3963  * > 0	if the check failed, which means the caller shall remove the entry.
3964  */
3965 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
3966 				       u8 *uuid, u8 type, u64 subid)
3967 {
3968 	struct btrfs_key key;
3969 	int ret = 0;
3970 	struct btrfs_root *subvol_root;
3971 
3972 	if (type != BTRFS_UUID_KEY_SUBVOL &&
3973 	    type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
3974 		goto out;
3975 
3976 	key.objectid = subid;
3977 	key.type = BTRFS_ROOT_ITEM_KEY;
3978 	key.offset = (u64)-1;
3979 	subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
3980 	if (IS_ERR(subvol_root)) {
3981 		ret = PTR_ERR(subvol_root);
3982 		if (ret == -ENOENT)
3983 			ret = 1;
3984 		goto out;
3985 	}
3986 
3987 	switch (type) {
3988 	case BTRFS_UUID_KEY_SUBVOL:
3989 		if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
3990 			ret = 1;
3991 		break;
3992 	case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
3993 		if (memcmp(uuid, subvol_root->root_item.received_uuid,
3994 			   BTRFS_UUID_SIZE))
3995 			ret = 1;
3996 		break;
3997 	}
3998 
3999 out:
4000 	return ret;
4001 }
4002 
4003 static int btrfs_uuid_rescan_kthread(void *data)
4004 {
4005 	struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
4006 	int ret;
4007 
4008 	/*
4009 	 * 1st step is to iterate through the existing UUID tree and
4010 	 * to delete all entries that contain outdated data.
4011 	 * 2nd step is to add all missing entries to the UUID tree.
4012 	 */
4013 	ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
4014 	if (ret < 0) {
4015 		btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
4016 		up(&fs_info->uuid_tree_rescan_sem);
4017 		return ret;
4018 	}
4019 	return btrfs_uuid_scan_kthread(data);
4020 }
4021 
4022 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4023 {
4024 	struct btrfs_trans_handle *trans;
4025 	struct btrfs_root *tree_root = fs_info->tree_root;
4026 	struct btrfs_root *uuid_root;
4027 	struct task_struct *task;
4028 	int ret;
4029 
4030 	/*
4031 	 * 1 - root node
4032 	 * 1 - root item
4033 	 */
4034 	trans = btrfs_start_transaction(tree_root, 2);
4035 	if (IS_ERR(trans))
4036 		return PTR_ERR(trans);
4037 
4038 	uuid_root = btrfs_create_tree(trans, fs_info,
4039 				      BTRFS_UUID_TREE_OBJECTID);
4040 	if (IS_ERR(uuid_root)) {
4041 		ret = PTR_ERR(uuid_root);
4042 		btrfs_abort_transaction(trans, tree_root, ret);
4043 		return ret;
4044 	}
4045 
4046 	fs_info->uuid_root = uuid_root;
4047 
4048 	ret = btrfs_commit_transaction(trans, tree_root);
4049 	if (ret)
4050 		return ret;
4051 
4052 	down(&fs_info->uuid_tree_rescan_sem);
4053 	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4054 	if (IS_ERR(task)) {
4055 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4056 		btrfs_warn(fs_info, "failed to start uuid_scan task");
4057 		up(&fs_info->uuid_tree_rescan_sem);
4058 		return PTR_ERR(task);
4059 	}
4060 
4061 	return 0;
4062 }
4063 
4064 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
4065 {
4066 	struct task_struct *task;
4067 
4068 	down(&fs_info->uuid_tree_rescan_sem);
4069 	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
4070 	if (IS_ERR(task)) {
4071 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4072 		btrfs_warn(fs_info, "failed to start uuid_rescan task");
4073 		up(&fs_info->uuid_tree_rescan_sem);
4074 		return PTR_ERR(task);
4075 	}
4076 
4077 	return 0;
4078 }
4079 
4080 /*
4081  * shrinking a device means finding all of the device extents past
4082  * the new size, and then following the back refs to the chunks.
4083  * The chunk relocation code actually frees the device extent
4084  */
4085 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4086 {
4087 	struct btrfs_trans_handle *trans;
4088 	struct btrfs_root *root = device->dev_root;
4089 	struct btrfs_dev_extent *dev_extent = NULL;
4090 	struct btrfs_path *path;
4091 	u64 length;
4092 	u64 chunk_offset;
4093 	int ret;
4094 	int slot;
4095 	int failed = 0;
4096 	bool retried = false;
4097 	bool checked_pending_chunks = false;
4098 	struct extent_buffer *l;
4099 	struct btrfs_key key;
4100 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4101 	u64 old_total = btrfs_super_total_bytes(super_copy);
4102 	u64 old_size = btrfs_device_get_total_bytes(device);
4103 	u64 diff = old_size - new_size;
4104 
4105 	if (device->is_tgtdev_for_dev_replace)
4106 		return -EINVAL;
4107 
4108 	path = btrfs_alloc_path();
4109 	if (!path)
4110 		return -ENOMEM;
4111 
4112 	path->reada = 2;
4113 
4114 	lock_chunks(root);
4115 
4116 	btrfs_device_set_total_bytes(device, new_size);
4117 	if (device->writeable) {
4118 		device->fs_devices->total_rw_bytes -= diff;
4119 		spin_lock(&root->fs_info->free_chunk_lock);
4120 		root->fs_info->free_chunk_space -= diff;
4121 		spin_unlock(&root->fs_info->free_chunk_lock);
4122 	}
4123 	unlock_chunks(root);
4124 
4125 again:
4126 	key.objectid = device->devid;
4127 	key.offset = (u64)-1;
4128 	key.type = BTRFS_DEV_EXTENT_KEY;
4129 
4130 	do {
4131 		mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
4132 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4133 		if (ret < 0) {
4134 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4135 			goto done;
4136 		}
4137 
4138 		ret = btrfs_previous_item(root, path, 0, key.type);
4139 		if (ret)
4140 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4141 		if (ret < 0)
4142 			goto done;
4143 		if (ret) {
4144 			ret = 0;
4145 			btrfs_release_path(path);
4146 			break;
4147 		}
4148 
4149 		l = path->nodes[0];
4150 		slot = path->slots[0];
4151 		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4152 
4153 		if (key.objectid != device->devid) {
4154 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4155 			btrfs_release_path(path);
4156 			break;
4157 		}
4158 
4159 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4160 		length = btrfs_dev_extent_length(l, dev_extent);
4161 
4162 		if (key.offset + length <= new_size) {
4163 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4164 			btrfs_release_path(path);
4165 			break;
4166 		}
4167 
4168 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4169 		btrfs_release_path(path);
4170 
4171 		ret = btrfs_relocate_chunk(root, chunk_offset);
4172 		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4173 		if (ret && ret != -ENOSPC)
4174 			goto done;
4175 		if (ret == -ENOSPC)
4176 			failed++;
4177 	} while (key.offset-- > 0);
4178 
4179 	if (failed && !retried) {
4180 		failed = 0;
4181 		retried = true;
4182 		goto again;
4183 	} else if (failed && retried) {
4184 		ret = -ENOSPC;
4185 		goto done;
4186 	}
4187 
4188 	/* Shrinking succeeded, else we would be at "done". */
4189 	trans = btrfs_start_transaction(root, 0);
4190 	if (IS_ERR(trans)) {
4191 		ret = PTR_ERR(trans);
4192 		goto done;
4193 	}
4194 
4195 	lock_chunks(root);
4196 
4197 	/*
4198 	 * We checked in the above loop all device extents that were already in
4199 	 * the device tree. However before we have updated the device's
4200 	 * total_bytes to the new size, we might have had chunk allocations that
4201 	 * have not complete yet (new block groups attached to transaction
4202 	 * handles), and therefore their device extents were not yet in the
4203 	 * device tree and we missed them in the loop above. So if we have any
4204 	 * pending chunk using a device extent that overlaps the device range
4205 	 * that we can not use anymore, commit the current transaction and
4206 	 * repeat the search on the device tree - this way we guarantee we will
4207 	 * not have chunks using device extents that end beyond 'new_size'.
4208 	 */
4209 	if (!checked_pending_chunks) {
4210 		u64 start = new_size;
4211 		u64 len = old_size - new_size;
4212 
4213 		if (contains_pending_extent(trans->transaction, device,
4214 					    &start, len)) {
4215 			unlock_chunks(root);
4216 			checked_pending_chunks = true;
4217 			failed = 0;
4218 			retried = false;
4219 			ret = btrfs_commit_transaction(trans, root);
4220 			if (ret)
4221 				goto done;
4222 			goto again;
4223 		}
4224 	}
4225 
4226 	btrfs_device_set_disk_total_bytes(device, new_size);
4227 	if (list_empty(&device->resized_list))
4228 		list_add_tail(&device->resized_list,
4229 			      &root->fs_info->fs_devices->resized_devices);
4230 
4231 	WARN_ON(diff > old_total);
4232 	btrfs_set_super_total_bytes(super_copy, old_total - diff);
4233 	unlock_chunks(root);
4234 
4235 	/* Now btrfs_update_device() will change the on-disk size. */
4236 	ret = btrfs_update_device(trans, device);
4237 	btrfs_end_transaction(trans, root);
4238 done:
4239 	btrfs_free_path(path);
4240 	if (ret) {
4241 		lock_chunks(root);
4242 		btrfs_device_set_total_bytes(device, old_size);
4243 		if (device->writeable)
4244 			device->fs_devices->total_rw_bytes += diff;
4245 		spin_lock(&root->fs_info->free_chunk_lock);
4246 		root->fs_info->free_chunk_space += diff;
4247 		spin_unlock(&root->fs_info->free_chunk_lock);
4248 		unlock_chunks(root);
4249 	}
4250 	return ret;
4251 }
4252 
4253 static int btrfs_add_system_chunk(struct btrfs_root *root,
4254 			   struct btrfs_key *key,
4255 			   struct btrfs_chunk *chunk, int item_size)
4256 {
4257 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4258 	struct btrfs_disk_key disk_key;
4259 	u32 array_size;
4260 	u8 *ptr;
4261 
4262 	lock_chunks(root);
4263 	array_size = btrfs_super_sys_array_size(super_copy);
4264 	if (array_size + item_size + sizeof(disk_key)
4265 			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4266 		unlock_chunks(root);
4267 		return -EFBIG;
4268 	}
4269 
4270 	ptr = super_copy->sys_chunk_array + array_size;
4271 	btrfs_cpu_key_to_disk(&disk_key, key);
4272 	memcpy(ptr, &disk_key, sizeof(disk_key));
4273 	ptr += sizeof(disk_key);
4274 	memcpy(ptr, chunk, item_size);
4275 	item_size += sizeof(disk_key);
4276 	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4277 	unlock_chunks(root);
4278 
4279 	return 0;
4280 }
4281 
4282 /*
4283  * sort the devices in descending order by max_avail, total_avail
4284  */
4285 static int btrfs_cmp_device_info(const void *a, const void *b)
4286 {
4287 	const struct btrfs_device_info *di_a = a;
4288 	const struct btrfs_device_info *di_b = b;
4289 
4290 	if (di_a->max_avail > di_b->max_avail)
4291 		return -1;
4292 	if (di_a->max_avail < di_b->max_avail)
4293 		return 1;
4294 	if (di_a->total_avail > di_b->total_avail)
4295 		return -1;
4296 	if (di_a->total_avail < di_b->total_avail)
4297 		return 1;
4298 	return 0;
4299 }
4300 
4301 static const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
4302 	[BTRFS_RAID_RAID10] = {
4303 		.sub_stripes	= 2,
4304 		.dev_stripes	= 1,
4305 		.devs_max	= 0,	/* 0 == as many as possible */
4306 		.devs_min	= 4,
4307 		.devs_increment	= 2,
4308 		.ncopies	= 2,
4309 	},
4310 	[BTRFS_RAID_RAID1] = {
4311 		.sub_stripes	= 1,
4312 		.dev_stripes	= 1,
4313 		.devs_max	= 2,
4314 		.devs_min	= 2,
4315 		.devs_increment	= 2,
4316 		.ncopies	= 2,
4317 	},
4318 	[BTRFS_RAID_DUP] = {
4319 		.sub_stripes	= 1,
4320 		.dev_stripes	= 2,
4321 		.devs_max	= 1,
4322 		.devs_min	= 1,
4323 		.devs_increment	= 1,
4324 		.ncopies	= 2,
4325 	},
4326 	[BTRFS_RAID_RAID0] = {
4327 		.sub_stripes	= 1,
4328 		.dev_stripes	= 1,
4329 		.devs_max	= 0,
4330 		.devs_min	= 2,
4331 		.devs_increment	= 1,
4332 		.ncopies	= 1,
4333 	},
4334 	[BTRFS_RAID_SINGLE] = {
4335 		.sub_stripes	= 1,
4336 		.dev_stripes	= 1,
4337 		.devs_max	= 1,
4338 		.devs_min	= 1,
4339 		.devs_increment	= 1,
4340 		.ncopies	= 1,
4341 	},
4342 	[BTRFS_RAID_RAID5] = {
4343 		.sub_stripes	= 1,
4344 		.dev_stripes	= 1,
4345 		.devs_max	= 0,
4346 		.devs_min	= 2,
4347 		.devs_increment	= 1,
4348 		.ncopies	= 2,
4349 	},
4350 	[BTRFS_RAID_RAID6] = {
4351 		.sub_stripes	= 1,
4352 		.dev_stripes	= 1,
4353 		.devs_max	= 0,
4354 		.devs_min	= 3,
4355 		.devs_increment	= 1,
4356 		.ncopies	= 3,
4357 	},
4358 };
4359 
4360 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
4361 {
4362 	/* TODO allow them to set a preferred stripe size */
4363 	return 64 * 1024;
4364 }
4365 
4366 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4367 {
4368 	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4369 		return;
4370 
4371 	btrfs_set_fs_incompat(info, RAID56);
4372 }
4373 
4374 #define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r)		\
4375 			- sizeof(struct btrfs_item)		\
4376 			- sizeof(struct btrfs_chunk))		\
4377 			/ sizeof(struct btrfs_stripe) + 1)
4378 
4379 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE	\
4380 				- 2 * sizeof(struct btrfs_disk_key)	\
4381 				- 2 * sizeof(struct btrfs_chunk))	\
4382 				/ sizeof(struct btrfs_stripe) + 1)
4383 
4384 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4385 			       struct btrfs_root *extent_root, u64 start,
4386 			       u64 type)
4387 {
4388 	struct btrfs_fs_info *info = extent_root->fs_info;
4389 	struct btrfs_fs_devices *fs_devices = info->fs_devices;
4390 	struct list_head *cur;
4391 	struct map_lookup *map = NULL;
4392 	struct extent_map_tree *em_tree;
4393 	struct extent_map *em;
4394 	struct btrfs_device_info *devices_info = NULL;
4395 	u64 total_avail;
4396 	int num_stripes;	/* total number of stripes to allocate */
4397 	int data_stripes;	/* number of stripes that count for
4398 				   block group size */
4399 	int sub_stripes;	/* sub_stripes info for map */
4400 	int dev_stripes;	/* stripes per dev */
4401 	int devs_max;		/* max devs to use */
4402 	int devs_min;		/* min devs needed */
4403 	int devs_increment;	/* ndevs has to be a multiple of this */
4404 	int ncopies;		/* how many copies to data has */
4405 	int ret;
4406 	u64 max_stripe_size;
4407 	u64 max_chunk_size;
4408 	u64 stripe_size;
4409 	u64 num_bytes;
4410 	u64 raid_stripe_len = BTRFS_STRIPE_LEN;
4411 	int ndevs;
4412 	int i;
4413 	int j;
4414 	int index;
4415 
4416 	BUG_ON(!alloc_profile_is_valid(type, 0));
4417 
4418 	if (list_empty(&fs_devices->alloc_list))
4419 		return -ENOSPC;
4420 
4421 	index = __get_raid_index(type);
4422 
4423 	sub_stripes = btrfs_raid_array[index].sub_stripes;
4424 	dev_stripes = btrfs_raid_array[index].dev_stripes;
4425 	devs_max = btrfs_raid_array[index].devs_max;
4426 	devs_min = btrfs_raid_array[index].devs_min;
4427 	devs_increment = btrfs_raid_array[index].devs_increment;
4428 	ncopies = btrfs_raid_array[index].ncopies;
4429 
4430 	if (type & BTRFS_BLOCK_GROUP_DATA) {
4431 		max_stripe_size = 1024 * 1024 * 1024;
4432 		max_chunk_size = 10 * max_stripe_size;
4433 		if (!devs_max)
4434 			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4435 	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4436 		/* for larger filesystems, use larger metadata chunks */
4437 		if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
4438 			max_stripe_size = 1024 * 1024 * 1024;
4439 		else
4440 			max_stripe_size = 256 * 1024 * 1024;
4441 		max_chunk_size = max_stripe_size;
4442 		if (!devs_max)
4443 			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4444 	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4445 		max_stripe_size = 32 * 1024 * 1024;
4446 		max_chunk_size = 2 * max_stripe_size;
4447 		if (!devs_max)
4448 			devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
4449 	} else {
4450 		btrfs_err(info, "invalid chunk type 0x%llx requested",
4451 		       type);
4452 		BUG_ON(1);
4453 	}
4454 
4455 	/* we don't want a chunk larger than 10% of writeable space */
4456 	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4457 			     max_chunk_size);
4458 
4459 	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
4460 			       GFP_NOFS);
4461 	if (!devices_info)
4462 		return -ENOMEM;
4463 
4464 	cur = fs_devices->alloc_list.next;
4465 
4466 	/*
4467 	 * in the first pass through the devices list, we gather information
4468 	 * about the available holes on each device.
4469 	 */
4470 	ndevs = 0;
4471 	while (cur != &fs_devices->alloc_list) {
4472 		struct btrfs_device *device;
4473 		u64 max_avail;
4474 		u64 dev_offset;
4475 
4476 		device = list_entry(cur, struct btrfs_device, dev_alloc_list);
4477 
4478 		cur = cur->next;
4479 
4480 		if (!device->writeable) {
4481 			WARN(1, KERN_ERR
4482 			       "BTRFS: read-only device in alloc_list\n");
4483 			continue;
4484 		}
4485 
4486 		if (!device->in_fs_metadata ||
4487 		    device->is_tgtdev_for_dev_replace)
4488 			continue;
4489 
4490 		if (device->total_bytes > device->bytes_used)
4491 			total_avail = device->total_bytes - device->bytes_used;
4492 		else
4493 			total_avail = 0;
4494 
4495 		/* If there is no space on this device, skip it. */
4496 		if (total_avail == 0)
4497 			continue;
4498 
4499 		ret = find_free_dev_extent(trans, device,
4500 					   max_stripe_size * dev_stripes,
4501 					   &dev_offset, &max_avail);
4502 		if (ret && ret != -ENOSPC)
4503 			goto error;
4504 
4505 		if (ret == 0)
4506 			max_avail = max_stripe_size * dev_stripes;
4507 
4508 		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
4509 			continue;
4510 
4511 		if (ndevs == fs_devices->rw_devices) {
4512 			WARN(1, "%s: found more than %llu devices\n",
4513 			     __func__, fs_devices->rw_devices);
4514 			break;
4515 		}
4516 		devices_info[ndevs].dev_offset = dev_offset;
4517 		devices_info[ndevs].max_avail = max_avail;
4518 		devices_info[ndevs].total_avail = total_avail;
4519 		devices_info[ndevs].dev = device;
4520 		++ndevs;
4521 	}
4522 
4523 	/*
4524 	 * now sort the devices by hole size / available space
4525 	 */
4526 	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4527 	     btrfs_cmp_device_info, NULL);
4528 
4529 	/* round down to number of usable stripes */
4530 	ndevs -= ndevs % devs_increment;
4531 
4532 	if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
4533 		ret = -ENOSPC;
4534 		goto error;
4535 	}
4536 
4537 	if (devs_max && ndevs > devs_max)
4538 		ndevs = devs_max;
4539 	/*
4540 	 * the primary goal is to maximize the number of stripes, so use as many
4541 	 * devices as possible, even if the stripes are not maximum sized.
4542 	 */
4543 	stripe_size = devices_info[ndevs-1].max_avail;
4544 	num_stripes = ndevs * dev_stripes;
4545 
4546 	/*
4547 	 * this will have to be fixed for RAID1 and RAID10 over
4548 	 * more drives
4549 	 */
4550 	data_stripes = num_stripes / ncopies;
4551 
4552 	if (type & BTRFS_BLOCK_GROUP_RAID5) {
4553 		raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
4554 				 btrfs_super_stripesize(info->super_copy));
4555 		data_stripes = num_stripes - 1;
4556 	}
4557 	if (type & BTRFS_BLOCK_GROUP_RAID6) {
4558 		raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
4559 				 btrfs_super_stripesize(info->super_copy));
4560 		data_stripes = num_stripes - 2;
4561 	}
4562 
4563 	/*
4564 	 * Use the number of data stripes to figure out how big this chunk
4565 	 * is really going to be in terms of logical address space,
4566 	 * and compare that answer with the max chunk size
4567 	 */
4568 	if (stripe_size * data_stripes > max_chunk_size) {
4569 		u64 mask = (1ULL << 24) - 1;
4570 
4571 		stripe_size = div_u64(max_chunk_size, data_stripes);
4572 
4573 		/* bump the answer up to a 16MB boundary */
4574 		stripe_size = (stripe_size + mask) & ~mask;
4575 
4576 		/* but don't go higher than the limits we found
4577 		 * while searching for free extents
4578 		 */
4579 		if (stripe_size > devices_info[ndevs-1].max_avail)
4580 			stripe_size = devices_info[ndevs-1].max_avail;
4581 	}
4582 
4583 	stripe_size = div_u64(stripe_size, dev_stripes);
4584 
4585 	/* align to BTRFS_STRIPE_LEN */
4586 	stripe_size = div_u64(stripe_size, raid_stripe_len);
4587 	stripe_size *= raid_stripe_len;
4588 
4589 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4590 	if (!map) {
4591 		ret = -ENOMEM;
4592 		goto error;
4593 	}
4594 	map->num_stripes = num_stripes;
4595 
4596 	for (i = 0; i < ndevs; ++i) {
4597 		for (j = 0; j < dev_stripes; ++j) {
4598 			int s = i * dev_stripes + j;
4599 			map->stripes[s].dev = devices_info[i].dev;
4600 			map->stripes[s].physical = devices_info[i].dev_offset +
4601 						   j * stripe_size;
4602 		}
4603 	}
4604 	map->sector_size = extent_root->sectorsize;
4605 	map->stripe_len = raid_stripe_len;
4606 	map->io_align = raid_stripe_len;
4607 	map->io_width = raid_stripe_len;
4608 	map->type = type;
4609 	map->sub_stripes = sub_stripes;
4610 
4611 	num_bytes = stripe_size * data_stripes;
4612 
4613 	trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
4614 
4615 	em = alloc_extent_map();
4616 	if (!em) {
4617 		kfree(map);
4618 		ret = -ENOMEM;
4619 		goto error;
4620 	}
4621 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
4622 	em->bdev = (struct block_device *)map;
4623 	em->start = start;
4624 	em->len = num_bytes;
4625 	em->block_start = 0;
4626 	em->block_len = em->len;
4627 	em->orig_block_len = stripe_size;
4628 
4629 	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4630 	write_lock(&em_tree->lock);
4631 	ret = add_extent_mapping(em_tree, em, 0);
4632 	if (!ret) {
4633 		list_add_tail(&em->list, &trans->transaction->pending_chunks);
4634 		atomic_inc(&em->refs);
4635 	}
4636 	write_unlock(&em_tree->lock);
4637 	if (ret) {
4638 		free_extent_map(em);
4639 		goto error;
4640 	}
4641 
4642 	ret = btrfs_make_block_group(trans, extent_root, 0, type,
4643 				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4644 				     start, num_bytes);
4645 	if (ret)
4646 		goto error_del_extent;
4647 
4648 	for (i = 0; i < map->num_stripes; i++) {
4649 		num_bytes = map->stripes[i].dev->bytes_used + stripe_size;
4650 		btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
4651 	}
4652 
4653 	spin_lock(&extent_root->fs_info->free_chunk_lock);
4654 	extent_root->fs_info->free_chunk_space -= (stripe_size *
4655 						   map->num_stripes);
4656 	spin_unlock(&extent_root->fs_info->free_chunk_lock);
4657 
4658 	free_extent_map(em);
4659 	check_raid56_incompat_flag(extent_root->fs_info, type);
4660 
4661 	kfree(devices_info);
4662 	return 0;
4663 
4664 error_del_extent:
4665 	write_lock(&em_tree->lock);
4666 	remove_extent_mapping(em_tree, em);
4667 	write_unlock(&em_tree->lock);
4668 
4669 	/* One for our allocation */
4670 	free_extent_map(em);
4671 	/* One for the tree reference */
4672 	free_extent_map(em);
4673 	/* One for the pending_chunks list reference */
4674 	free_extent_map(em);
4675 error:
4676 	kfree(devices_info);
4677 	return ret;
4678 }
4679 
4680 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4681 				struct btrfs_root *extent_root,
4682 				u64 chunk_offset, u64 chunk_size)
4683 {
4684 	struct btrfs_key key;
4685 	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
4686 	struct btrfs_device *device;
4687 	struct btrfs_chunk *chunk;
4688 	struct btrfs_stripe *stripe;
4689 	struct extent_map_tree *em_tree;
4690 	struct extent_map *em;
4691 	struct map_lookup *map;
4692 	size_t item_size;
4693 	u64 dev_offset;
4694 	u64 stripe_size;
4695 	int i = 0;
4696 	int ret;
4697 
4698 	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4699 	read_lock(&em_tree->lock);
4700 	em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
4701 	read_unlock(&em_tree->lock);
4702 
4703 	if (!em) {
4704 		btrfs_crit(extent_root->fs_info, "unable to find logical "
4705 			   "%Lu len %Lu", chunk_offset, chunk_size);
4706 		return -EINVAL;
4707 	}
4708 
4709 	if (em->start != chunk_offset || em->len != chunk_size) {
4710 		btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
4711 			  " %Lu-%Lu, found %Lu-%Lu", chunk_offset,
4712 			  chunk_size, em->start, em->len);
4713 		free_extent_map(em);
4714 		return -EINVAL;
4715 	}
4716 
4717 	map = (struct map_lookup *)em->bdev;
4718 	item_size = btrfs_chunk_item_size(map->num_stripes);
4719 	stripe_size = em->orig_block_len;
4720 
4721 	chunk = kzalloc(item_size, GFP_NOFS);
4722 	if (!chunk) {
4723 		ret = -ENOMEM;
4724 		goto out;
4725 	}
4726 
4727 	for (i = 0; i < map->num_stripes; i++) {
4728 		device = map->stripes[i].dev;
4729 		dev_offset = map->stripes[i].physical;
4730 
4731 		ret = btrfs_update_device(trans, device);
4732 		if (ret)
4733 			goto out;
4734 		ret = btrfs_alloc_dev_extent(trans, device,
4735 					     chunk_root->root_key.objectid,
4736 					     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4737 					     chunk_offset, dev_offset,
4738 					     stripe_size);
4739 		if (ret)
4740 			goto out;
4741 	}
4742 
4743 	stripe = &chunk->stripe;
4744 	for (i = 0; i < map->num_stripes; i++) {
4745 		device = map->stripes[i].dev;
4746 		dev_offset = map->stripes[i].physical;
4747 
4748 		btrfs_set_stack_stripe_devid(stripe, device->devid);
4749 		btrfs_set_stack_stripe_offset(stripe, dev_offset);
4750 		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4751 		stripe++;
4752 	}
4753 
4754 	btrfs_set_stack_chunk_length(chunk, chunk_size);
4755 	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4756 	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4757 	btrfs_set_stack_chunk_type(chunk, map->type);
4758 	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4759 	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4760 	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4761 	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
4762 	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4763 
4764 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4765 	key.type = BTRFS_CHUNK_ITEM_KEY;
4766 	key.offset = chunk_offset;
4767 
4768 	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4769 	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4770 		/*
4771 		 * TODO: Cleanup of inserted chunk root in case of
4772 		 * failure.
4773 		 */
4774 		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
4775 					     item_size);
4776 	}
4777 
4778 out:
4779 	kfree(chunk);
4780 	free_extent_map(em);
4781 	return ret;
4782 }
4783 
4784 /*
4785  * Chunk allocation falls into two parts. The first part does works
4786  * that make the new allocated chunk useable, but not do any operation
4787  * that modifies the chunk tree. The second part does the works that
4788  * require modifying the chunk tree. This division is important for the
4789  * bootstrap process of adding storage to a seed btrfs.
4790  */
4791 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4792 		      struct btrfs_root *extent_root, u64 type)
4793 {
4794 	u64 chunk_offset;
4795 
4796 	ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
4797 	chunk_offset = find_next_chunk(extent_root->fs_info);
4798 	return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
4799 }
4800 
4801 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
4802 					 struct btrfs_root *root,
4803 					 struct btrfs_device *device)
4804 {
4805 	u64 chunk_offset;
4806 	u64 sys_chunk_offset;
4807 	u64 alloc_profile;
4808 	struct btrfs_fs_info *fs_info = root->fs_info;
4809 	struct btrfs_root *extent_root = fs_info->extent_root;
4810 	int ret;
4811 
4812 	chunk_offset = find_next_chunk(fs_info);
4813 	alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4814 	ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
4815 				  alloc_profile);
4816 	if (ret)
4817 		return ret;
4818 
4819 	sys_chunk_offset = find_next_chunk(root->fs_info);
4820 	alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4821 	ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
4822 				  alloc_profile);
4823 	return ret;
4824 }
4825 
4826 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
4827 {
4828 	int max_errors;
4829 
4830 	if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
4831 			 BTRFS_BLOCK_GROUP_RAID10 |
4832 			 BTRFS_BLOCK_GROUP_RAID5 |
4833 			 BTRFS_BLOCK_GROUP_DUP)) {
4834 		max_errors = 1;
4835 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
4836 		max_errors = 2;
4837 	} else {
4838 		max_errors = 0;
4839 	}
4840 
4841 	return max_errors;
4842 }
4843 
4844 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
4845 {
4846 	struct extent_map *em;
4847 	struct map_lookup *map;
4848 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4849 	int readonly = 0;
4850 	int miss_ndevs = 0;
4851 	int i;
4852 
4853 	read_lock(&map_tree->map_tree.lock);
4854 	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
4855 	read_unlock(&map_tree->map_tree.lock);
4856 	if (!em)
4857 		return 1;
4858 
4859 	map = (struct map_lookup *)em->bdev;
4860 	for (i = 0; i < map->num_stripes; i++) {
4861 		if (map->stripes[i].dev->missing) {
4862 			miss_ndevs++;
4863 			continue;
4864 		}
4865 
4866 		if (!map->stripes[i].dev->writeable) {
4867 			readonly = 1;
4868 			goto end;
4869 		}
4870 	}
4871 
4872 	/*
4873 	 * If the number of missing devices is larger than max errors,
4874 	 * we can not write the data into that chunk successfully, so
4875 	 * set it readonly.
4876 	 */
4877 	if (miss_ndevs > btrfs_chunk_max_errors(map))
4878 		readonly = 1;
4879 end:
4880 	free_extent_map(em);
4881 	return readonly;
4882 }
4883 
4884 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
4885 {
4886 	extent_map_tree_init(&tree->map_tree);
4887 }
4888 
4889 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
4890 {
4891 	struct extent_map *em;
4892 
4893 	while (1) {
4894 		write_lock(&tree->map_tree.lock);
4895 		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
4896 		if (em)
4897 			remove_extent_mapping(&tree->map_tree, em);
4898 		write_unlock(&tree->map_tree.lock);
4899 		if (!em)
4900 			break;
4901 		/* once for us */
4902 		free_extent_map(em);
4903 		/* once for the tree */
4904 		free_extent_map(em);
4905 	}
4906 }
4907 
4908 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
4909 {
4910 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4911 	struct extent_map *em;
4912 	struct map_lookup *map;
4913 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4914 	int ret;
4915 
4916 	read_lock(&em_tree->lock);
4917 	em = lookup_extent_mapping(em_tree, logical, len);
4918 	read_unlock(&em_tree->lock);
4919 
4920 	/*
4921 	 * We could return errors for these cases, but that could get ugly and
4922 	 * we'd probably do the same thing which is just not do anything else
4923 	 * and exit, so return 1 so the callers don't try to use other copies.
4924 	 */
4925 	if (!em) {
4926 		btrfs_crit(fs_info, "No mapping for %Lu-%Lu", logical,
4927 			    logical+len);
4928 		return 1;
4929 	}
4930 
4931 	if (em->start > logical || em->start + em->len < logical) {
4932 		btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
4933 			    "%Lu-%Lu", logical, logical+len, em->start,
4934 			    em->start + em->len);
4935 		free_extent_map(em);
4936 		return 1;
4937 	}
4938 
4939 	map = (struct map_lookup *)em->bdev;
4940 	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
4941 		ret = map->num_stripes;
4942 	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4943 		ret = map->sub_stripes;
4944 	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
4945 		ret = 2;
4946 	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4947 		ret = 3;
4948 	else
4949 		ret = 1;
4950 	free_extent_map(em);
4951 
4952 	btrfs_dev_replace_lock(&fs_info->dev_replace);
4953 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
4954 		ret++;
4955 	btrfs_dev_replace_unlock(&fs_info->dev_replace);
4956 
4957 	return ret;
4958 }
4959 
4960 unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
4961 				    struct btrfs_mapping_tree *map_tree,
4962 				    u64 logical)
4963 {
4964 	struct extent_map *em;
4965 	struct map_lookup *map;
4966 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4967 	unsigned long len = root->sectorsize;
4968 
4969 	read_lock(&em_tree->lock);
4970 	em = lookup_extent_mapping(em_tree, logical, len);
4971 	read_unlock(&em_tree->lock);
4972 	BUG_ON(!em);
4973 
4974 	BUG_ON(em->start > logical || em->start + em->len < logical);
4975 	map = (struct map_lookup *)em->bdev;
4976 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
4977 		len = map->stripe_len * nr_data_stripes(map);
4978 	free_extent_map(em);
4979 	return len;
4980 }
4981 
4982 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
4983 			   u64 logical, u64 len, int mirror_num)
4984 {
4985 	struct extent_map *em;
4986 	struct map_lookup *map;
4987 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4988 	int ret = 0;
4989 
4990 	read_lock(&em_tree->lock);
4991 	em = lookup_extent_mapping(em_tree, logical, len);
4992 	read_unlock(&em_tree->lock);
4993 	BUG_ON(!em);
4994 
4995 	BUG_ON(em->start > logical || em->start + em->len < logical);
4996 	map = (struct map_lookup *)em->bdev;
4997 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
4998 		ret = 1;
4999 	free_extent_map(em);
5000 	return ret;
5001 }
5002 
5003 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5004 			    struct map_lookup *map, int first, int num,
5005 			    int optimal, int dev_replace_is_ongoing)
5006 {
5007 	int i;
5008 	int tolerance;
5009 	struct btrfs_device *srcdev;
5010 
5011 	if (dev_replace_is_ongoing &&
5012 	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5013 	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5014 		srcdev = fs_info->dev_replace.srcdev;
5015 	else
5016 		srcdev = NULL;
5017 
5018 	/*
5019 	 * try to avoid the drive that is the source drive for a
5020 	 * dev-replace procedure, only choose it if no other non-missing
5021 	 * mirror is available
5022 	 */
5023 	for (tolerance = 0; tolerance < 2; tolerance++) {
5024 		if (map->stripes[optimal].dev->bdev &&
5025 		    (tolerance || map->stripes[optimal].dev != srcdev))
5026 			return optimal;
5027 		for (i = first; i < first + num; i++) {
5028 			if (map->stripes[i].dev->bdev &&
5029 			    (tolerance || map->stripes[i].dev != srcdev))
5030 				return i;
5031 		}
5032 	}
5033 
5034 	/* we couldn't find one that doesn't fail.  Just return something
5035 	 * and the io error handling code will clean up eventually
5036 	 */
5037 	return optimal;
5038 }
5039 
5040 static inline int parity_smaller(u64 a, u64 b)
5041 {
5042 	return a > b;
5043 }
5044 
5045 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5046 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5047 {
5048 	struct btrfs_bio_stripe s;
5049 	int i;
5050 	u64 l;
5051 	int again = 1;
5052 
5053 	while (again) {
5054 		again = 0;
5055 		for (i = 0; i < num_stripes - 1; i++) {
5056 			if (parity_smaller(bbio->raid_map[i],
5057 					   bbio->raid_map[i+1])) {
5058 				s = bbio->stripes[i];
5059 				l = bbio->raid_map[i];
5060 				bbio->stripes[i] = bbio->stripes[i+1];
5061 				bbio->raid_map[i] = bbio->raid_map[i+1];
5062 				bbio->stripes[i+1] = s;
5063 				bbio->raid_map[i+1] = l;
5064 
5065 				again = 1;
5066 			}
5067 		}
5068 	}
5069 }
5070 
5071 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5072 {
5073 	struct btrfs_bio *bbio = kzalloc(
5074 		 /* the size of the btrfs_bio */
5075 		sizeof(struct btrfs_bio) +
5076 		/* plus the variable array for the stripes */
5077 		sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5078 		/* plus the variable array for the tgt dev */
5079 		sizeof(int) * (real_stripes) +
5080 		/*
5081 		 * plus the raid_map, which includes both the tgt dev
5082 		 * and the stripes
5083 		 */
5084 		sizeof(u64) * (total_stripes),
5085 		GFP_NOFS|__GFP_NOFAIL);
5086 
5087 	atomic_set(&bbio->error, 0);
5088 	atomic_set(&bbio->refs, 1);
5089 
5090 	return bbio;
5091 }
5092 
5093 void btrfs_get_bbio(struct btrfs_bio *bbio)
5094 {
5095 	WARN_ON(!atomic_read(&bbio->refs));
5096 	atomic_inc(&bbio->refs);
5097 }
5098 
5099 void btrfs_put_bbio(struct btrfs_bio *bbio)
5100 {
5101 	if (!bbio)
5102 		return;
5103 	if (atomic_dec_and_test(&bbio->refs))
5104 		kfree(bbio);
5105 }
5106 
5107 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5108 			     u64 logical, u64 *length,
5109 			     struct btrfs_bio **bbio_ret,
5110 			     int mirror_num, int need_raid_map)
5111 {
5112 	struct extent_map *em;
5113 	struct map_lookup *map;
5114 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5115 	struct extent_map_tree *em_tree = &map_tree->map_tree;
5116 	u64 offset;
5117 	u64 stripe_offset;
5118 	u64 stripe_end_offset;
5119 	u64 stripe_nr;
5120 	u64 stripe_nr_orig;
5121 	u64 stripe_nr_end;
5122 	u64 stripe_len;
5123 	u32 stripe_index;
5124 	int i;
5125 	int ret = 0;
5126 	int num_stripes;
5127 	int max_errors = 0;
5128 	int tgtdev_indexes = 0;
5129 	struct btrfs_bio *bbio = NULL;
5130 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
5131 	int dev_replace_is_ongoing = 0;
5132 	int num_alloc_stripes;
5133 	int patch_the_first_stripe_for_dev_replace = 0;
5134 	u64 physical_to_patch_in_first_stripe = 0;
5135 	u64 raid56_full_stripe_start = (u64)-1;
5136 
5137 	read_lock(&em_tree->lock);
5138 	em = lookup_extent_mapping(em_tree, logical, *length);
5139 	read_unlock(&em_tree->lock);
5140 
5141 	if (!em) {
5142 		btrfs_crit(fs_info, "unable to find logical %llu len %llu",
5143 			logical, *length);
5144 		return -EINVAL;
5145 	}
5146 
5147 	if (em->start > logical || em->start + em->len < logical) {
5148 		btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
5149 			   "found %Lu-%Lu", logical, em->start,
5150 			   em->start + em->len);
5151 		free_extent_map(em);
5152 		return -EINVAL;
5153 	}
5154 
5155 	map = (struct map_lookup *)em->bdev;
5156 	offset = logical - em->start;
5157 
5158 	stripe_len = map->stripe_len;
5159 	stripe_nr = offset;
5160 	/*
5161 	 * stripe_nr counts the total number of stripes we have to stride
5162 	 * to get to this block
5163 	 */
5164 	stripe_nr = div64_u64(stripe_nr, stripe_len);
5165 
5166 	stripe_offset = stripe_nr * stripe_len;
5167 	BUG_ON(offset < stripe_offset);
5168 
5169 	/* stripe_offset is the offset of this block in its stripe*/
5170 	stripe_offset = offset - stripe_offset;
5171 
5172 	/* if we're here for raid56, we need to know the stripe aligned start */
5173 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5174 		unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
5175 		raid56_full_stripe_start = offset;
5176 
5177 		/* allow a write of a full stripe, but make sure we don't
5178 		 * allow straddling of stripes
5179 		 */
5180 		raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
5181 				full_stripe_len);
5182 		raid56_full_stripe_start *= full_stripe_len;
5183 	}
5184 
5185 	if (rw & REQ_DISCARD) {
5186 		/* we don't discard raid56 yet */
5187 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5188 			ret = -EOPNOTSUPP;
5189 			goto out;
5190 		}
5191 		*length = min_t(u64, em->len - offset, *length);
5192 	} else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
5193 		u64 max_len;
5194 		/* For writes to RAID[56], allow a full stripeset across all disks.
5195 		   For other RAID types and for RAID[56] reads, just allow a single
5196 		   stripe (on a single disk). */
5197 		if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
5198 		    (rw & REQ_WRITE)) {
5199 			max_len = stripe_len * nr_data_stripes(map) -
5200 				(offset - raid56_full_stripe_start);
5201 		} else {
5202 			/* we limit the length of each bio to what fits in a stripe */
5203 			max_len = stripe_len - stripe_offset;
5204 		}
5205 		*length = min_t(u64, em->len - offset, max_len);
5206 	} else {
5207 		*length = em->len - offset;
5208 	}
5209 
5210 	/* This is for when we're called from btrfs_merge_bio_hook() and all
5211 	   it cares about is the length */
5212 	if (!bbio_ret)
5213 		goto out;
5214 
5215 	btrfs_dev_replace_lock(dev_replace);
5216 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
5217 	if (!dev_replace_is_ongoing)
5218 		btrfs_dev_replace_unlock(dev_replace);
5219 
5220 	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
5221 	    !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
5222 	    dev_replace->tgtdev != NULL) {
5223 		/*
5224 		 * in dev-replace case, for repair case (that's the only
5225 		 * case where the mirror is selected explicitly when
5226 		 * calling btrfs_map_block), blocks left of the left cursor
5227 		 * can also be read from the target drive.
5228 		 * For REQ_GET_READ_MIRRORS, the target drive is added as
5229 		 * the last one to the array of stripes. For READ, it also
5230 		 * needs to be supported using the same mirror number.
5231 		 * If the requested block is not left of the left cursor,
5232 		 * EIO is returned. This can happen because btrfs_num_copies()
5233 		 * returns one more in the dev-replace case.
5234 		 */
5235 		u64 tmp_length = *length;
5236 		struct btrfs_bio *tmp_bbio = NULL;
5237 		int tmp_num_stripes;
5238 		u64 srcdev_devid = dev_replace->srcdev->devid;
5239 		int index_srcdev = 0;
5240 		int found = 0;
5241 		u64 physical_of_found = 0;
5242 
5243 		ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
5244 			     logical, &tmp_length, &tmp_bbio, 0, 0);
5245 		if (ret) {
5246 			WARN_ON(tmp_bbio != NULL);
5247 			goto out;
5248 		}
5249 
5250 		tmp_num_stripes = tmp_bbio->num_stripes;
5251 		if (mirror_num > tmp_num_stripes) {
5252 			/*
5253 			 * REQ_GET_READ_MIRRORS does not contain this
5254 			 * mirror, that means that the requested area
5255 			 * is not left of the left cursor
5256 			 */
5257 			ret = -EIO;
5258 			btrfs_put_bbio(tmp_bbio);
5259 			goto out;
5260 		}
5261 
5262 		/*
5263 		 * process the rest of the function using the mirror_num
5264 		 * of the source drive. Therefore look it up first.
5265 		 * At the end, patch the device pointer to the one of the
5266 		 * target drive.
5267 		 */
5268 		for (i = 0; i < tmp_num_stripes; i++) {
5269 			if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) {
5270 				/*
5271 				 * In case of DUP, in order to keep it
5272 				 * simple, only add the mirror with the
5273 				 * lowest physical address
5274 				 */
5275 				if (found &&
5276 				    physical_of_found <=
5277 				     tmp_bbio->stripes[i].physical)
5278 					continue;
5279 				index_srcdev = i;
5280 				found = 1;
5281 				physical_of_found =
5282 					tmp_bbio->stripes[i].physical;
5283 			}
5284 		}
5285 
5286 		if (found) {
5287 			mirror_num = index_srcdev + 1;
5288 			patch_the_first_stripe_for_dev_replace = 1;
5289 			physical_to_patch_in_first_stripe = physical_of_found;
5290 		} else {
5291 			WARN_ON(1);
5292 			ret = -EIO;
5293 			btrfs_put_bbio(tmp_bbio);
5294 			goto out;
5295 		}
5296 
5297 		btrfs_put_bbio(tmp_bbio);
5298 	} else if (mirror_num > map->num_stripes) {
5299 		mirror_num = 0;
5300 	}
5301 
5302 	num_stripes = 1;
5303 	stripe_index = 0;
5304 	stripe_nr_orig = stripe_nr;
5305 	stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
5306 	stripe_nr_end = div_u64(stripe_nr_end, map->stripe_len);
5307 	stripe_end_offset = stripe_nr_end * map->stripe_len -
5308 			    (offset + *length);
5309 
5310 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5311 		if (rw & REQ_DISCARD)
5312 			num_stripes = min_t(u64, map->num_stripes,
5313 					    stripe_nr_end - stripe_nr_orig);
5314 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5315 				&stripe_index);
5316 		if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)))
5317 			mirror_num = 1;
5318 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
5319 		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
5320 			num_stripes = map->num_stripes;
5321 		else if (mirror_num)
5322 			stripe_index = mirror_num - 1;
5323 		else {
5324 			stripe_index = find_live_mirror(fs_info, map, 0,
5325 					    map->num_stripes,
5326 					    current->pid % map->num_stripes,
5327 					    dev_replace_is_ongoing);
5328 			mirror_num = stripe_index + 1;
5329 		}
5330 
5331 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
5332 		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
5333 			num_stripes = map->num_stripes;
5334 		} else if (mirror_num) {
5335 			stripe_index = mirror_num - 1;
5336 		} else {
5337 			mirror_num = 1;
5338 		}
5339 
5340 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5341 		u32 factor = map->num_stripes / map->sub_stripes;
5342 
5343 		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5344 		stripe_index *= map->sub_stripes;
5345 
5346 		if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5347 			num_stripes = map->sub_stripes;
5348 		else if (rw & REQ_DISCARD)
5349 			num_stripes = min_t(u64, map->sub_stripes *
5350 					    (stripe_nr_end - stripe_nr_orig),
5351 					    map->num_stripes);
5352 		else if (mirror_num)
5353 			stripe_index += mirror_num - 1;
5354 		else {
5355 			int old_stripe_index = stripe_index;
5356 			stripe_index = find_live_mirror(fs_info, map,
5357 					      stripe_index,
5358 					      map->sub_stripes, stripe_index +
5359 					      current->pid % map->sub_stripes,
5360 					      dev_replace_is_ongoing);
5361 			mirror_num = stripe_index - old_stripe_index + 1;
5362 		}
5363 
5364 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5365 		if (need_raid_map &&
5366 		    ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
5367 		     mirror_num > 1)) {
5368 			/* push stripe_nr back to the start of the full stripe */
5369 			stripe_nr = div_u64(raid56_full_stripe_start,
5370 					stripe_len * nr_data_stripes(map));
5371 
5372 			/* RAID[56] write or recovery. Return all stripes */
5373 			num_stripes = map->num_stripes;
5374 			max_errors = nr_parity_stripes(map);
5375 
5376 			*length = map->stripe_len;
5377 			stripe_index = 0;
5378 			stripe_offset = 0;
5379 		} else {
5380 			/*
5381 			 * Mirror #0 or #1 means the original data block.
5382 			 * Mirror #2 is RAID5 parity block.
5383 			 * Mirror #3 is RAID6 Q block.
5384 			 */
5385 			stripe_nr = div_u64_rem(stripe_nr,
5386 					nr_data_stripes(map), &stripe_index);
5387 			if (mirror_num > 1)
5388 				stripe_index = nr_data_stripes(map) +
5389 						mirror_num - 2;
5390 
5391 			/* We distribute the parity blocks across stripes */
5392 			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
5393 					&stripe_index);
5394 			if (!(rw & (REQ_WRITE | REQ_DISCARD |
5395 				    REQ_GET_READ_MIRRORS)) && mirror_num <= 1)
5396 				mirror_num = 1;
5397 		}
5398 	} else {
5399 		/*
5400 		 * after this, stripe_nr is the number of stripes on this
5401 		 * device we have to walk to find the data, and stripe_index is
5402 		 * the number of our device in the stripe array
5403 		 */
5404 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5405 				&stripe_index);
5406 		mirror_num = stripe_index + 1;
5407 	}
5408 	BUG_ON(stripe_index >= map->num_stripes);
5409 
5410 	num_alloc_stripes = num_stripes;
5411 	if (dev_replace_is_ongoing) {
5412 		if (rw & (REQ_WRITE | REQ_DISCARD))
5413 			num_alloc_stripes <<= 1;
5414 		if (rw & REQ_GET_READ_MIRRORS)
5415 			num_alloc_stripes++;
5416 		tgtdev_indexes = num_stripes;
5417 	}
5418 
5419 	bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
5420 	if (!bbio) {
5421 		ret = -ENOMEM;
5422 		goto out;
5423 	}
5424 	if (dev_replace_is_ongoing)
5425 		bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
5426 
5427 	/* build raid_map */
5428 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
5429 	    need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
5430 	    mirror_num > 1)) {
5431 		u64 tmp;
5432 		unsigned rot;
5433 
5434 		bbio->raid_map = (u64 *)((void *)bbio->stripes +
5435 				 sizeof(struct btrfs_bio_stripe) *
5436 				 num_alloc_stripes +
5437 				 sizeof(int) * tgtdev_indexes);
5438 
5439 		/* Work out the disk rotation on this stripe-set */
5440 		div_u64_rem(stripe_nr, num_stripes, &rot);
5441 
5442 		/* Fill in the logical address of each stripe */
5443 		tmp = stripe_nr * nr_data_stripes(map);
5444 		for (i = 0; i < nr_data_stripes(map); i++)
5445 			bbio->raid_map[(i+rot) % num_stripes] =
5446 				em->start + (tmp + i) * map->stripe_len;
5447 
5448 		bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
5449 		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5450 			bbio->raid_map[(i+rot+1) % num_stripes] =
5451 				RAID6_Q_STRIPE;
5452 	}
5453 
5454 	if (rw & REQ_DISCARD) {
5455 		u32 factor = 0;
5456 		u32 sub_stripes = 0;
5457 		u64 stripes_per_dev = 0;
5458 		u32 remaining_stripes = 0;
5459 		u32 last_stripe = 0;
5460 
5461 		if (map->type &
5462 		    (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
5463 			if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5464 				sub_stripes = 1;
5465 			else
5466 				sub_stripes = map->sub_stripes;
5467 
5468 			factor = map->num_stripes / sub_stripes;
5469 			stripes_per_dev = div_u64_rem(stripe_nr_end -
5470 						      stripe_nr_orig,
5471 						      factor,
5472 						      &remaining_stripes);
5473 			div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5474 			last_stripe *= sub_stripes;
5475 		}
5476 
5477 		for (i = 0; i < num_stripes; i++) {
5478 			bbio->stripes[i].physical =
5479 				map->stripes[stripe_index].physical +
5480 				stripe_offset + stripe_nr * map->stripe_len;
5481 			bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5482 
5483 			if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5484 					 BTRFS_BLOCK_GROUP_RAID10)) {
5485 				bbio->stripes[i].length = stripes_per_dev *
5486 							  map->stripe_len;
5487 
5488 				if (i / sub_stripes < remaining_stripes)
5489 					bbio->stripes[i].length +=
5490 						map->stripe_len;
5491 
5492 				/*
5493 				 * Special for the first stripe and
5494 				 * the last stripe:
5495 				 *
5496 				 * |-------|...|-------|
5497 				 *     |----------|
5498 				 *    off     end_off
5499 				 */
5500 				if (i < sub_stripes)
5501 					bbio->stripes[i].length -=
5502 						stripe_offset;
5503 
5504 				if (stripe_index >= last_stripe &&
5505 				    stripe_index <= (last_stripe +
5506 						     sub_stripes - 1))
5507 					bbio->stripes[i].length -=
5508 						stripe_end_offset;
5509 
5510 				if (i == sub_stripes - 1)
5511 					stripe_offset = 0;
5512 			} else
5513 				bbio->stripes[i].length = *length;
5514 
5515 			stripe_index++;
5516 			if (stripe_index == map->num_stripes) {
5517 				/* This could only happen for RAID0/10 */
5518 				stripe_index = 0;
5519 				stripe_nr++;
5520 			}
5521 		}
5522 	} else {
5523 		for (i = 0; i < num_stripes; i++) {
5524 			bbio->stripes[i].physical =
5525 				map->stripes[stripe_index].physical +
5526 				stripe_offset +
5527 				stripe_nr * map->stripe_len;
5528 			bbio->stripes[i].dev =
5529 				map->stripes[stripe_index].dev;
5530 			stripe_index++;
5531 		}
5532 	}
5533 
5534 	if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5535 		max_errors = btrfs_chunk_max_errors(map);
5536 
5537 	if (bbio->raid_map)
5538 		sort_parity_stripes(bbio, num_stripes);
5539 
5540 	tgtdev_indexes = 0;
5541 	if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
5542 	    dev_replace->tgtdev != NULL) {
5543 		int index_where_to_add;
5544 		u64 srcdev_devid = dev_replace->srcdev->devid;
5545 
5546 		/*
5547 		 * duplicate the write operations while the dev replace
5548 		 * procedure is running. Since the copying of the old disk
5549 		 * to the new disk takes place at run time while the
5550 		 * filesystem is mounted writable, the regular write
5551 		 * operations to the old disk have to be duplicated to go
5552 		 * to the new disk as well.
5553 		 * Note that device->missing is handled by the caller, and
5554 		 * that the write to the old disk is already set up in the
5555 		 * stripes array.
5556 		 */
5557 		index_where_to_add = num_stripes;
5558 		for (i = 0; i < num_stripes; i++) {
5559 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5560 				/* write to new disk, too */
5561 				struct btrfs_bio_stripe *new =
5562 					bbio->stripes + index_where_to_add;
5563 				struct btrfs_bio_stripe *old =
5564 					bbio->stripes + i;
5565 
5566 				new->physical = old->physical;
5567 				new->length = old->length;
5568 				new->dev = dev_replace->tgtdev;
5569 				bbio->tgtdev_map[i] = index_where_to_add;
5570 				index_where_to_add++;
5571 				max_errors++;
5572 				tgtdev_indexes++;
5573 			}
5574 		}
5575 		num_stripes = index_where_to_add;
5576 	} else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
5577 		   dev_replace->tgtdev != NULL) {
5578 		u64 srcdev_devid = dev_replace->srcdev->devid;
5579 		int index_srcdev = 0;
5580 		int found = 0;
5581 		u64 physical_of_found = 0;
5582 
5583 		/*
5584 		 * During the dev-replace procedure, the target drive can
5585 		 * also be used to read data in case it is needed to repair
5586 		 * a corrupt block elsewhere. This is possible if the
5587 		 * requested area is left of the left cursor. In this area,
5588 		 * the target drive is a full copy of the source drive.
5589 		 */
5590 		for (i = 0; i < num_stripes; i++) {
5591 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5592 				/*
5593 				 * In case of DUP, in order to keep it
5594 				 * simple, only add the mirror with the
5595 				 * lowest physical address
5596 				 */
5597 				if (found &&
5598 				    physical_of_found <=
5599 				     bbio->stripes[i].physical)
5600 					continue;
5601 				index_srcdev = i;
5602 				found = 1;
5603 				physical_of_found = bbio->stripes[i].physical;
5604 			}
5605 		}
5606 		if (found) {
5607 			if (physical_of_found + map->stripe_len <=
5608 			    dev_replace->cursor_left) {
5609 				struct btrfs_bio_stripe *tgtdev_stripe =
5610 					bbio->stripes + num_stripes;
5611 
5612 				tgtdev_stripe->physical = physical_of_found;
5613 				tgtdev_stripe->length =
5614 					bbio->stripes[index_srcdev].length;
5615 				tgtdev_stripe->dev = dev_replace->tgtdev;
5616 				bbio->tgtdev_map[index_srcdev] = num_stripes;
5617 
5618 				tgtdev_indexes++;
5619 				num_stripes++;
5620 			}
5621 		}
5622 	}
5623 
5624 	*bbio_ret = bbio;
5625 	bbio->map_type = map->type;
5626 	bbio->num_stripes = num_stripes;
5627 	bbio->max_errors = max_errors;
5628 	bbio->mirror_num = mirror_num;
5629 	bbio->num_tgtdevs = tgtdev_indexes;
5630 
5631 	/*
5632 	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
5633 	 * mirror_num == num_stripes + 1 && dev_replace target drive is
5634 	 * available as a mirror
5635 	 */
5636 	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
5637 		WARN_ON(num_stripes > 1);
5638 		bbio->stripes[0].dev = dev_replace->tgtdev;
5639 		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
5640 		bbio->mirror_num = map->num_stripes + 1;
5641 	}
5642 out:
5643 	if (dev_replace_is_ongoing)
5644 		btrfs_dev_replace_unlock(dev_replace);
5645 	free_extent_map(em);
5646 	return ret;
5647 }
5648 
5649 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5650 		      u64 logical, u64 *length,
5651 		      struct btrfs_bio **bbio_ret, int mirror_num)
5652 {
5653 	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5654 				 mirror_num, 0);
5655 }
5656 
5657 /* For Scrub/replace */
5658 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
5659 		     u64 logical, u64 *length,
5660 		     struct btrfs_bio **bbio_ret, int mirror_num,
5661 		     int need_raid_map)
5662 {
5663 	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5664 				 mirror_num, need_raid_map);
5665 }
5666 
5667 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5668 		     u64 chunk_start, u64 physical, u64 devid,
5669 		     u64 **logical, int *naddrs, int *stripe_len)
5670 {
5671 	struct extent_map_tree *em_tree = &map_tree->map_tree;
5672 	struct extent_map *em;
5673 	struct map_lookup *map;
5674 	u64 *buf;
5675 	u64 bytenr;
5676 	u64 length;
5677 	u64 stripe_nr;
5678 	u64 rmap_len;
5679 	int i, j, nr = 0;
5680 
5681 	read_lock(&em_tree->lock);
5682 	em = lookup_extent_mapping(em_tree, chunk_start, 1);
5683 	read_unlock(&em_tree->lock);
5684 
5685 	if (!em) {
5686 		printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n",
5687 		       chunk_start);
5688 		return -EIO;
5689 	}
5690 
5691 	if (em->start != chunk_start) {
5692 		printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n",
5693 		       em->start, chunk_start);
5694 		free_extent_map(em);
5695 		return -EIO;
5696 	}
5697 	map = (struct map_lookup *)em->bdev;
5698 
5699 	length = em->len;
5700 	rmap_len = map->stripe_len;
5701 
5702 	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5703 		length = div_u64(length, map->num_stripes / map->sub_stripes);
5704 	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5705 		length = div_u64(length, map->num_stripes);
5706 	else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5707 		length = div_u64(length, nr_data_stripes(map));
5708 		rmap_len = map->stripe_len * nr_data_stripes(map);
5709 	}
5710 
5711 	buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
5712 	BUG_ON(!buf); /* -ENOMEM */
5713 
5714 	for (i = 0; i < map->num_stripes; i++) {
5715 		if (devid && map->stripes[i].dev->devid != devid)
5716 			continue;
5717 		if (map->stripes[i].physical > physical ||
5718 		    map->stripes[i].physical + length <= physical)
5719 			continue;
5720 
5721 		stripe_nr = physical - map->stripes[i].physical;
5722 		stripe_nr = div_u64(stripe_nr, map->stripe_len);
5723 
5724 		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5725 			stripe_nr = stripe_nr * map->num_stripes + i;
5726 			stripe_nr = div_u64(stripe_nr, map->sub_stripes);
5727 		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5728 			stripe_nr = stripe_nr * map->num_stripes + i;
5729 		} /* else if RAID[56], multiply by nr_data_stripes().
5730 		   * Alternatively, just use rmap_len below instead of
5731 		   * map->stripe_len */
5732 
5733 		bytenr = chunk_start + stripe_nr * rmap_len;
5734 		WARN_ON(nr >= map->num_stripes);
5735 		for (j = 0; j < nr; j++) {
5736 			if (buf[j] == bytenr)
5737 				break;
5738 		}
5739 		if (j == nr) {
5740 			WARN_ON(nr >= map->num_stripes);
5741 			buf[nr++] = bytenr;
5742 		}
5743 	}
5744 
5745 	*logical = buf;
5746 	*naddrs = nr;
5747 	*stripe_len = rmap_len;
5748 
5749 	free_extent_map(em);
5750 	return 0;
5751 }
5752 
5753 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
5754 {
5755 	bio->bi_private = bbio->private;
5756 	bio->bi_end_io = bbio->end_io;
5757 	bio_endio(bio);
5758 
5759 	btrfs_put_bbio(bbio);
5760 }
5761 
5762 static void btrfs_end_bio(struct bio *bio)
5763 {
5764 	struct btrfs_bio *bbio = bio->bi_private;
5765 	int is_orig_bio = 0;
5766 
5767 	if (bio->bi_error) {
5768 		atomic_inc(&bbio->error);
5769 		if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) {
5770 			unsigned int stripe_index =
5771 				btrfs_io_bio(bio)->stripe_index;
5772 			struct btrfs_device *dev;
5773 
5774 			BUG_ON(stripe_index >= bbio->num_stripes);
5775 			dev = bbio->stripes[stripe_index].dev;
5776 			if (dev->bdev) {
5777 				if (bio->bi_rw & WRITE)
5778 					btrfs_dev_stat_inc(dev,
5779 						BTRFS_DEV_STAT_WRITE_ERRS);
5780 				else
5781 					btrfs_dev_stat_inc(dev,
5782 						BTRFS_DEV_STAT_READ_ERRS);
5783 				if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
5784 					btrfs_dev_stat_inc(dev,
5785 						BTRFS_DEV_STAT_FLUSH_ERRS);
5786 				btrfs_dev_stat_print_on_error(dev);
5787 			}
5788 		}
5789 	}
5790 
5791 	if (bio == bbio->orig_bio)
5792 		is_orig_bio = 1;
5793 
5794 	btrfs_bio_counter_dec(bbio->fs_info);
5795 
5796 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
5797 		if (!is_orig_bio) {
5798 			bio_put(bio);
5799 			bio = bbio->orig_bio;
5800 		}
5801 
5802 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5803 		/* only send an error to the higher layers if it is
5804 		 * beyond the tolerance of the btrfs bio
5805 		 */
5806 		if (atomic_read(&bbio->error) > bbio->max_errors) {
5807 			bio->bi_error = -EIO;
5808 		} else {
5809 			/*
5810 			 * this bio is actually up to date, we didn't
5811 			 * go over the max number of errors
5812 			 */
5813 			bio->bi_error = 0;
5814 		}
5815 
5816 		btrfs_end_bbio(bbio, bio);
5817 	} else if (!is_orig_bio) {
5818 		bio_put(bio);
5819 	}
5820 }
5821 
5822 /*
5823  * see run_scheduled_bios for a description of why bios are collected for
5824  * async submit.
5825  *
5826  * This will add one bio to the pending list for a device and make sure
5827  * the work struct is scheduled.
5828  */
5829 static noinline void btrfs_schedule_bio(struct btrfs_root *root,
5830 					struct btrfs_device *device,
5831 					int rw, struct bio *bio)
5832 {
5833 	int should_queue = 1;
5834 	struct btrfs_pending_bios *pending_bios;
5835 
5836 	if (device->missing || !device->bdev) {
5837 		bio_io_error(bio);
5838 		return;
5839 	}
5840 
5841 	/* don't bother with additional async steps for reads, right now */
5842 	if (!(rw & REQ_WRITE)) {
5843 		bio_get(bio);
5844 		btrfsic_submit_bio(rw, bio);
5845 		bio_put(bio);
5846 		return;
5847 	}
5848 
5849 	/*
5850 	 * nr_async_bios allows us to reliably return congestion to the
5851 	 * higher layers.  Otherwise, the async bio makes it appear we have
5852 	 * made progress against dirty pages when we've really just put it
5853 	 * on a queue for later
5854 	 */
5855 	atomic_inc(&root->fs_info->nr_async_bios);
5856 	WARN_ON(bio->bi_next);
5857 	bio->bi_next = NULL;
5858 	bio->bi_rw |= rw;
5859 
5860 	spin_lock(&device->io_lock);
5861 	if (bio->bi_rw & REQ_SYNC)
5862 		pending_bios = &device->pending_sync_bios;
5863 	else
5864 		pending_bios = &device->pending_bios;
5865 
5866 	if (pending_bios->tail)
5867 		pending_bios->tail->bi_next = bio;
5868 
5869 	pending_bios->tail = bio;
5870 	if (!pending_bios->head)
5871 		pending_bios->head = bio;
5872 	if (device->running_pending)
5873 		should_queue = 0;
5874 
5875 	spin_unlock(&device->io_lock);
5876 
5877 	if (should_queue)
5878 		btrfs_queue_work(root->fs_info->submit_workers,
5879 				 &device->work);
5880 }
5881 
5882 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5883 			      struct bio *bio, u64 physical, int dev_nr,
5884 			      int rw, int async)
5885 {
5886 	struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
5887 
5888 	bio->bi_private = bbio;
5889 	btrfs_io_bio(bio)->stripe_index = dev_nr;
5890 	bio->bi_end_io = btrfs_end_bio;
5891 	bio->bi_iter.bi_sector = physical >> 9;
5892 #ifdef DEBUG
5893 	{
5894 		struct rcu_string *name;
5895 
5896 		rcu_read_lock();
5897 		name = rcu_dereference(dev->name);
5898 		pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
5899 			 "(%s id %llu), size=%u\n", rw,
5900 			 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
5901 			 name->str, dev->devid, bio->bi_iter.bi_size);
5902 		rcu_read_unlock();
5903 	}
5904 #endif
5905 	bio->bi_bdev = dev->bdev;
5906 
5907 	btrfs_bio_counter_inc_noblocked(root->fs_info);
5908 
5909 	if (async)
5910 		btrfs_schedule_bio(root, dev, rw, bio);
5911 	else
5912 		btrfsic_submit_bio(rw, bio);
5913 }
5914 
5915 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
5916 {
5917 	atomic_inc(&bbio->error);
5918 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
5919 		/* Shoud be the original bio. */
5920 		WARN_ON(bio != bbio->orig_bio);
5921 
5922 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5923 		bio->bi_iter.bi_sector = logical >> 9;
5924 		bio->bi_error = -EIO;
5925 		btrfs_end_bbio(bbio, bio);
5926 	}
5927 }
5928 
5929 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5930 		  int mirror_num, int async_submit)
5931 {
5932 	struct btrfs_device *dev;
5933 	struct bio *first_bio = bio;
5934 	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
5935 	u64 length = 0;
5936 	u64 map_length;
5937 	int ret;
5938 	int dev_nr;
5939 	int total_devs;
5940 	struct btrfs_bio *bbio = NULL;
5941 
5942 	length = bio->bi_iter.bi_size;
5943 	map_length = length;
5944 
5945 	btrfs_bio_counter_inc_blocked(root->fs_info);
5946 	ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
5947 			      mirror_num, 1);
5948 	if (ret) {
5949 		btrfs_bio_counter_dec(root->fs_info);
5950 		return ret;
5951 	}
5952 
5953 	total_devs = bbio->num_stripes;
5954 	bbio->orig_bio = first_bio;
5955 	bbio->private = first_bio->bi_private;
5956 	bbio->end_io = first_bio->bi_end_io;
5957 	bbio->fs_info = root->fs_info;
5958 	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
5959 
5960 	if (bbio->raid_map) {
5961 		/* In this case, map_length has been set to the length of
5962 		   a single stripe; not the whole write */
5963 		if (rw & WRITE) {
5964 			ret = raid56_parity_write(root, bio, bbio, map_length);
5965 		} else {
5966 			ret = raid56_parity_recover(root, bio, bbio, map_length,
5967 						    mirror_num, 1);
5968 		}
5969 
5970 		btrfs_bio_counter_dec(root->fs_info);
5971 		return ret;
5972 	}
5973 
5974 	if (map_length < length) {
5975 		btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
5976 			logical, length, map_length);
5977 		BUG();
5978 	}
5979 
5980 	for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
5981 		dev = bbio->stripes[dev_nr].dev;
5982 		if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
5983 			bbio_error(bbio, first_bio, logical);
5984 			continue;
5985 		}
5986 
5987 		if (dev_nr < total_devs - 1) {
5988 			bio = btrfs_bio_clone(first_bio, GFP_NOFS);
5989 			BUG_ON(!bio); /* -ENOMEM */
5990 		} else
5991 			bio = first_bio;
5992 
5993 		submit_stripe_bio(root, bbio, bio,
5994 				  bbio->stripes[dev_nr].physical, dev_nr, rw,
5995 				  async_submit);
5996 	}
5997 	btrfs_bio_counter_dec(root->fs_info);
5998 	return 0;
5999 }
6000 
6001 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
6002 				       u8 *uuid, u8 *fsid)
6003 {
6004 	struct btrfs_device *device;
6005 	struct btrfs_fs_devices *cur_devices;
6006 
6007 	cur_devices = fs_info->fs_devices;
6008 	while (cur_devices) {
6009 		if (!fsid ||
6010 		    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
6011 			device = __find_device(&cur_devices->devices,
6012 					       devid, uuid);
6013 			if (device)
6014 				return device;
6015 		}
6016 		cur_devices = cur_devices->seed;
6017 	}
6018 	return NULL;
6019 }
6020 
6021 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
6022 					    struct btrfs_fs_devices *fs_devices,
6023 					    u64 devid, u8 *dev_uuid)
6024 {
6025 	struct btrfs_device *device;
6026 
6027 	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6028 	if (IS_ERR(device))
6029 		return NULL;
6030 
6031 	list_add(&device->dev_list, &fs_devices->devices);
6032 	device->fs_devices = fs_devices;
6033 	fs_devices->num_devices++;
6034 
6035 	device->missing = 1;
6036 	fs_devices->missing_devices++;
6037 
6038 	return device;
6039 }
6040 
6041 /**
6042  * btrfs_alloc_device - allocate struct btrfs_device
6043  * @fs_info:	used only for generating a new devid, can be NULL if
6044  *		devid is provided (i.e. @devid != NULL).
6045  * @devid:	a pointer to devid for this device.  If NULL a new devid
6046  *		is generated.
6047  * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
6048  *		is generated.
6049  *
6050  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6051  * on error.  Returned struct is not linked onto any lists and can be
6052  * destroyed with kfree() right away.
6053  */
6054 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6055 					const u64 *devid,
6056 					const u8 *uuid)
6057 {
6058 	struct btrfs_device *dev;
6059 	u64 tmp;
6060 
6061 	if (WARN_ON(!devid && !fs_info))
6062 		return ERR_PTR(-EINVAL);
6063 
6064 	dev = __alloc_device();
6065 	if (IS_ERR(dev))
6066 		return dev;
6067 
6068 	if (devid)
6069 		tmp = *devid;
6070 	else {
6071 		int ret;
6072 
6073 		ret = find_next_devid(fs_info, &tmp);
6074 		if (ret) {
6075 			kfree(dev);
6076 			return ERR_PTR(ret);
6077 		}
6078 	}
6079 	dev->devid = tmp;
6080 
6081 	if (uuid)
6082 		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6083 	else
6084 		generate_random_uuid(dev->uuid);
6085 
6086 	btrfs_init_work(&dev->work, btrfs_submit_helper,
6087 			pending_bios_fn, NULL, NULL);
6088 
6089 	return dev;
6090 }
6091 
6092 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
6093 			  struct extent_buffer *leaf,
6094 			  struct btrfs_chunk *chunk)
6095 {
6096 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
6097 	struct map_lookup *map;
6098 	struct extent_map *em;
6099 	u64 logical;
6100 	u64 length;
6101 	u64 devid;
6102 	u8 uuid[BTRFS_UUID_SIZE];
6103 	int num_stripes;
6104 	int ret;
6105 	int i;
6106 
6107 	logical = key->offset;
6108 	length = btrfs_chunk_length(leaf, chunk);
6109 
6110 	read_lock(&map_tree->map_tree.lock);
6111 	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
6112 	read_unlock(&map_tree->map_tree.lock);
6113 
6114 	/* already mapped? */
6115 	if (em && em->start <= logical && em->start + em->len > logical) {
6116 		free_extent_map(em);
6117 		return 0;
6118 	} else if (em) {
6119 		free_extent_map(em);
6120 	}
6121 
6122 	em = alloc_extent_map();
6123 	if (!em)
6124 		return -ENOMEM;
6125 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6126 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6127 	if (!map) {
6128 		free_extent_map(em);
6129 		return -ENOMEM;
6130 	}
6131 
6132 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6133 	em->bdev = (struct block_device *)map;
6134 	em->start = logical;
6135 	em->len = length;
6136 	em->orig_start = 0;
6137 	em->block_start = 0;
6138 	em->block_len = em->len;
6139 
6140 	map->num_stripes = num_stripes;
6141 	map->io_width = btrfs_chunk_io_width(leaf, chunk);
6142 	map->io_align = btrfs_chunk_io_align(leaf, chunk);
6143 	map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
6144 	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6145 	map->type = btrfs_chunk_type(leaf, chunk);
6146 	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6147 	for (i = 0; i < num_stripes; i++) {
6148 		map->stripes[i].physical =
6149 			btrfs_stripe_offset_nr(leaf, chunk, i);
6150 		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6151 		read_extent_buffer(leaf, uuid, (unsigned long)
6152 				   btrfs_stripe_dev_uuid_nr(chunk, i),
6153 				   BTRFS_UUID_SIZE);
6154 		map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
6155 							uuid, NULL);
6156 		if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
6157 			free_extent_map(em);
6158 			return -EIO;
6159 		}
6160 		if (!map->stripes[i].dev) {
6161 			map->stripes[i].dev =
6162 				add_missing_dev(root, root->fs_info->fs_devices,
6163 						devid, uuid);
6164 			if (!map->stripes[i].dev) {
6165 				free_extent_map(em);
6166 				return -EIO;
6167 			}
6168 			btrfs_warn(root->fs_info, "devid %llu uuid %pU is missing",
6169 						devid, uuid);
6170 		}
6171 		map->stripes[i].dev->in_fs_metadata = 1;
6172 	}
6173 
6174 	write_lock(&map_tree->map_tree.lock);
6175 	ret = add_extent_mapping(&map_tree->map_tree, em, 0);
6176 	write_unlock(&map_tree->map_tree.lock);
6177 	BUG_ON(ret); /* Tree corruption */
6178 	free_extent_map(em);
6179 
6180 	return 0;
6181 }
6182 
6183 static void fill_device_from_item(struct extent_buffer *leaf,
6184 				 struct btrfs_dev_item *dev_item,
6185 				 struct btrfs_device *device)
6186 {
6187 	unsigned long ptr;
6188 
6189 	device->devid = btrfs_device_id(leaf, dev_item);
6190 	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6191 	device->total_bytes = device->disk_total_bytes;
6192 	device->commit_total_bytes = device->disk_total_bytes;
6193 	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6194 	device->commit_bytes_used = device->bytes_used;
6195 	device->type = btrfs_device_type(leaf, dev_item);
6196 	device->io_align = btrfs_device_io_align(leaf, dev_item);
6197 	device->io_width = btrfs_device_io_width(leaf, dev_item);
6198 	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6199 	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6200 	device->is_tgtdev_for_dev_replace = 0;
6201 
6202 	ptr = btrfs_device_uuid(dev_item);
6203 	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6204 }
6205 
6206 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
6207 						  u8 *fsid)
6208 {
6209 	struct btrfs_fs_devices *fs_devices;
6210 	int ret;
6211 
6212 	BUG_ON(!mutex_is_locked(&uuid_mutex));
6213 
6214 	fs_devices = root->fs_info->fs_devices->seed;
6215 	while (fs_devices) {
6216 		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE))
6217 			return fs_devices;
6218 
6219 		fs_devices = fs_devices->seed;
6220 	}
6221 
6222 	fs_devices = find_fsid(fsid);
6223 	if (!fs_devices) {
6224 		if (!btrfs_test_opt(root, DEGRADED))
6225 			return ERR_PTR(-ENOENT);
6226 
6227 		fs_devices = alloc_fs_devices(fsid);
6228 		if (IS_ERR(fs_devices))
6229 			return fs_devices;
6230 
6231 		fs_devices->seeding = 1;
6232 		fs_devices->opened = 1;
6233 		return fs_devices;
6234 	}
6235 
6236 	fs_devices = clone_fs_devices(fs_devices);
6237 	if (IS_ERR(fs_devices))
6238 		return fs_devices;
6239 
6240 	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
6241 				   root->fs_info->bdev_holder);
6242 	if (ret) {
6243 		free_fs_devices(fs_devices);
6244 		fs_devices = ERR_PTR(ret);
6245 		goto out;
6246 	}
6247 
6248 	if (!fs_devices->seeding) {
6249 		__btrfs_close_devices(fs_devices);
6250 		free_fs_devices(fs_devices);
6251 		fs_devices = ERR_PTR(-EINVAL);
6252 		goto out;
6253 	}
6254 
6255 	fs_devices->seed = root->fs_info->fs_devices->seed;
6256 	root->fs_info->fs_devices->seed = fs_devices;
6257 out:
6258 	return fs_devices;
6259 }
6260 
6261 static int read_one_dev(struct btrfs_root *root,
6262 			struct extent_buffer *leaf,
6263 			struct btrfs_dev_item *dev_item)
6264 {
6265 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6266 	struct btrfs_device *device;
6267 	u64 devid;
6268 	int ret;
6269 	u8 fs_uuid[BTRFS_UUID_SIZE];
6270 	u8 dev_uuid[BTRFS_UUID_SIZE];
6271 
6272 	devid = btrfs_device_id(leaf, dev_item);
6273 	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6274 			   BTRFS_UUID_SIZE);
6275 	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6276 			   BTRFS_UUID_SIZE);
6277 
6278 	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
6279 		fs_devices = open_seed_devices(root, fs_uuid);
6280 		if (IS_ERR(fs_devices))
6281 			return PTR_ERR(fs_devices);
6282 	}
6283 
6284 	device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
6285 	if (!device) {
6286 		if (!btrfs_test_opt(root, DEGRADED))
6287 			return -EIO;
6288 
6289 		device = add_missing_dev(root, fs_devices, devid, dev_uuid);
6290 		if (!device)
6291 			return -ENOMEM;
6292 		btrfs_warn(root->fs_info, "devid %llu uuid %pU missing",
6293 				devid, dev_uuid);
6294 	} else {
6295 		if (!device->bdev && !btrfs_test_opt(root, DEGRADED))
6296 			return -EIO;
6297 
6298 		if(!device->bdev && !device->missing) {
6299 			/*
6300 			 * this happens when a device that was properly setup
6301 			 * in the device info lists suddenly goes bad.
6302 			 * device->bdev is NULL, and so we have to set
6303 			 * device->missing to one here
6304 			 */
6305 			device->fs_devices->missing_devices++;
6306 			device->missing = 1;
6307 		}
6308 
6309 		/* Move the device to its own fs_devices */
6310 		if (device->fs_devices != fs_devices) {
6311 			ASSERT(device->missing);
6312 
6313 			list_move(&device->dev_list, &fs_devices->devices);
6314 			device->fs_devices->num_devices--;
6315 			fs_devices->num_devices++;
6316 
6317 			device->fs_devices->missing_devices--;
6318 			fs_devices->missing_devices++;
6319 
6320 			device->fs_devices = fs_devices;
6321 		}
6322 	}
6323 
6324 	if (device->fs_devices != root->fs_info->fs_devices) {
6325 		BUG_ON(device->writeable);
6326 		if (device->generation !=
6327 		    btrfs_device_generation(leaf, dev_item))
6328 			return -EINVAL;
6329 	}
6330 
6331 	fill_device_from_item(leaf, dev_item, device);
6332 	device->in_fs_metadata = 1;
6333 	if (device->writeable && !device->is_tgtdev_for_dev_replace) {
6334 		device->fs_devices->total_rw_bytes += device->total_bytes;
6335 		spin_lock(&root->fs_info->free_chunk_lock);
6336 		root->fs_info->free_chunk_space += device->total_bytes -
6337 			device->bytes_used;
6338 		spin_unlock(&root->fs_info->free_chunk_lock);
6339 	}
6340 	ret = 0;
6341 	return ret;
6342 }
6343 
6344 int btrfs_read_sys_array(struct btrfs_root *root)
6345 {
6346 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
6347 	struct extent_buffer *sb;
6348 	struct btrfs_disk_key *disk_key;
6349 	struct btrfs_chunk *chunk;
6350 	u8 *array_ptr;
6351 	unsigned long sb_array_offset;
6352 	int ret = 0;
6353 	u32 num_stripes;
6354 	u32 array_size;
6355 	u32 len = 0;
6356 	u32 cur_offset;
6357 	struct btrfs_key key;
6358 
6359 	ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
6360 	/*
6361 	 * This will create extent buffer of nodesize, superblock size is
6362 	 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6363 	 * overallocate but we can keep it as-is, only the first page is used.
6364 	 */
6365 	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
6366 	if (!sb)
6367 		return -ENOMEM;
6368 	btrfs_set_buffer_uptodate(sb);
6369 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6370 	/*
6371 	 * The sb extent buffer is artifical and just used to read the system array.
6372 	 * btrfs_set_buffer_uptodate() call does not properly mark all it's
6373 	 * pages up-to-date when the page is larger: extent does not cover the
6374 	 * whole page and consequently check_page_uptodate does not find all
6375 	 * the page's extents up-to-date (the hole beyond sb),
6376 	 * write_extent_buffer then triggers a WARN_ON.
6377 	 *
6378 	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
6379 	 * but sb spans only this function. Add an explicit SetPageUptodate call
6380 	 * to silence the warning eg. on PowerPC 64.
6381 	 */
6382 	if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
6383 		SetPageUptodate(sb->pages[0]);
6384 
6385 	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6386 	array_size = btrfs_super_sys_array_size(super_copy);
6387 
6388 	array_ptr = super_copy->sys_chunk_array;
6389 	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
6390 	cur_offset = 0;
6391 
6392 	while (cur_offset < array_size) {
6393 		disk_key = (struct btrfs_disk_key *)array_ptr;
6394 		len = sizeof(*disk_key);
6395 		if (cur_offset + len > array_size)
6396 			goto out_short_read;
6397 
6398 		btrfs_disk_key_to_cpu(&key, disk_key);
6399 
6400 		array_ptr += len;
6401 		sb_array_offset += len;
6402 		cur_offset += len;
6403 
6404 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
6405 			chunk = (struct btrfs_chunk *)sb_array_offset;
6406 			/*
6407 			 * At least one btrfs_chunk with one stripe must be
6408 			 * present, exact stripe count check comes afterwards
6409 			 */
6410 			len = btrfs_chunk_item_size(1);
6411 			if (cur_offset + len > array_size)
6412 				goto out_short_read;
6413 
6414 			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6415 			len = btrfs_chunk_item_size(num_stripes);
6416 			if (cur_offset + len > array_size)
6417 				goto out_short_read;
6418 
6419 			ret = read_one_chunk(root, &key, sb, chunk);
6420 			if (ret)
6421 				break;
6422 		} else {
6423 			ret = -EIO;
6424 			break;
6425 		}
6426 		array_ptr += len;
6427 		sb_array_offset += len;
6428 		cur_offset += len;
6429 	}
6430 	free_extent_buffer(sb);
6431 	return ret;
6432 
6433 out_short_read:
6434 	printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
6435 			len, cur_offset);
6436 	free_extent_buffer(sb);
6437 	return -EIO;
6438 }
6439 
6440 int btrfs_read_chunk_tree(struct btrfs_root *root)
6441 {
6442 	struct btrfs_path *path;
6443 	struct extent_buffer *leaf;
6444 	struct btrfs_key key;
6445 	struct btrfs_key found_key;
6446 	int ret;
6447 	int slot;
6448 
6449 	root = root->fs_info->chunk_root;
6450 
6451 	path = btrfs_alloc_path();
6452 	if (!path)
6453 		return -ENOMEM;
6454 
6455 	mutex_lock(&uuid_mutex);
6456 	lock_chunks(root);
6457 
6458 	/*
6459 	 * Read all device items, and then all the chunk items. All
6460 	 * device items are found before any chunk item (their object id
6461 	 * is smaller than the lowest possible object id for a chunk
6462 	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
6463 	 */
6464 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
6465 	key.offset = 0;
6466 	key.type = 0;
6467 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6468 	if (ret < 0)
6469 		goto error;
6470 	while (1) {
6471 		leaf = path->nodes[0];
6472 		slot = path->slots[0];
6473 		if (slot >= btrfs_header_nritems(leaf)) {
6474 			ret = btrfs_next_leaf(root, path);
6475 			if (ret == 0)
6476 				continue;
6477 			if (ret < 0)
6478 				goto error;
6479 			break;
6480 		}
6481 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
6482 		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
6483 			struct btrfs_dev_item *dev_item;
6484 			dev_item = btrfs_item_ptr(leaf, slot,
6485 						  struct btrfs_dev_item);
6486 			ret = read_one_dev(root, leaf, dev_item);
6487 			if (ret)
6488 				goto error;
6489 		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
6490 			struct btrfs_chunk *chunk;
6491 			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
6492 			ret = read_one_chunk(root, &found_key, leaf, chunk);
6493 			if (ret)
6494 				goto error;
6495 		}
6496 		path->slots[0]++;
6497 	}
6498 	ret = 0;
6499 error:
6500 	unlock_chunks(root);
6501 	mutex_unlock(&uuid_mutex);
6502 
6503 	btrfs_free_path(path);
6504 	return ret;
6505 }
6506 
6507 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
6508 {
6509 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6510 	struct btrfs_device *device;
6511 
6512 	while (fs_devices) {
6513 		mutex_lock(&fs_devices->device_list_mutex);
6514 		list_for_each_entry(device, &fs_devices->devices, dev_list)
6515 			device->dev_root = fs_info->dev_root;
6516 		mutex_unlock(&fs_devices->device_list_mutex);
6517 
6518 		fs_devices = fs_devices->seed;
6519 	}
6520 }
6521 
6522 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
6523 {
6524 	int i;
6525 
6526 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6527 		btrfs_dev_stat_reset(dev, i);
6528 }
6529 
6530 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
6531 {
6532 	struct btrfs_key key;
6533 	struct btrfs_key found_key;
6534 	struct btrfs_root *dev_root = fs_info->dev_root;
6535 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6536 	struct extent_buffer *eb;
6537 	int slot;
6538 	int ret = 0;
6539 	struct btrfs_device *device;
6540 	struct btrfs_path *path = NULL;
6541 	int i;
6542 
6543 	path = btrfs_alloc_path();
6544 	if (!path) {
6545 		ret = -ENOMEM;
6546 		goto out;
6547 	}
6548 
6549 	mutex_lock(&fs_devices->device_list_mutex);
6550 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6551 		int item_size;
6552 		struct btrfs_dev_stats_item *ptr;
6553 
6554 		key.objectid = 0;
6555 		key.type = BTRFS_DEV_STATS_KEY;
6556 		key.offset = device->devid;
6557 		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
6558 		if (ret) {
6559 			__btrfs_reset_dev_stats(device);
6560 			device->dev_stats_valid = 1;
6561 			btrfs_release_path(path);
6562 			continue;
6563 		}
6564 		slot = path->slots[0];
6565 		eb = path->nodes[0];
6566 		btrfs_item_key_to_cpu(eb, &found_key, slot);
6567 		item_size = btrfs_item_size_nr(eb, slot);
6568 
6569 		ptr = btrfs_item_ptr(eb, slot,
6570 				     struct btrfs_dev_stats_item);
6571 
6572 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6573 			if (item_size >= (1 + i) * sizeof(__le64))
6574 				btrfs_dev_stat_set(device, i,
6575 					btrfs_dev_stats_value(eb, ptr, i));
6576 			else
6577 				btrfs_dev_stat_reset(device, i);
6578 		}
6579 
6580 		device->dev_stats_valid = 1;
6581 		btrfs_dev_stat_print_on_load(device);
6582 		btrfs_release_path(path);
6583 	}
6584 	mutex_unlock(&fs_devices->device_list_mutex);
6585 
6586 out:
6587 	btrfs_free_path(path);
6588 	return ret < 0 ? ret : 0;
6589 }
6590 
6591 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
6592 				struct btrfs_root *dev_root,
6593 				struct btrfs_device *device)
6594 {
6595 	struct btrfs_path *path;
6596 	struct btrfs_key key;
6597 	struct extent_buffer *eb;
6598 	struct btrfs_dev_stats_item *ptr;
6599 	int ret;
6600 	int i;
6601 
6602 	key.objectid = 0;
6603 	key.type = BTRFS_DEV_STATS_KEY;
6604 	key.offset = device->devid;
6605 
6606 	path = btrfs_alloc_path();
6607 	BUG_ON(!path);
6608 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
6609 	if (ret < 0) {
6610 		printk_in_rcu(KERN_WARNING "BTRFS: "
6611 			"error %d while searching for dev_stats item for device %s!\n",
6612 			      ret, rcu_str_deref(device->name));
6613 		goto out;
6614 	}
6615 
6616 	if (ret == 0 &&
6617 	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
6618 		/* need to delete old one and insert a new one */
6619 		ret = btrfs_del_item(trans, dev_root, path);
6620 		if (ret != 0) {
6621 			printk_in_rcu(KERN_WARNING "BTRFS: "
6622 				"delete too small dev_stats item for device %s failed %d!\n",
6623 				      rcu_str_deref(device->name), ret);
6624 			goto out;
6625 		}
6626 		ret = 1;
6627 	}
6628 
6629 	if (ret == 1) {
6630 		/* need to insert a new item */
6631 		btrfs_release_path(path);
6632 		ret = btrfs_insert_empty_item(trans, dev_root, path,
6633 					      &key, sizeof(*ptr));
6634 		if (ret < 0) {
6635 			printk_in_rcu(KERN_WARNING "BTRFS: "
6636 					  "insert dev_stats item for device %s failed %d!\n",
6637 				      rcu_str_deref(device->name), ret);
6638 			goto out;
6639 		}
6640 	}
6641 
6642 	eb = path->nodes[0];
6643 	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
6644 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6645 		btrfs_set_dev_stats_value(eb, ptr, i,
6646 					  btrfs_dev_stat_read(device, i));
6647 	btrfs_mark_buffer_dirty(eb);
6648 
6649 out:
6650 	btrfs_free_path(path);
6651 	return ret;
6652 }
6653 
6654 /*
6655  * called from commit_transaction. Writes all changed device stats to disk.
6656  */
6657 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
6658 			struct btrfs_fs_info *fs_info)
6659 {
6660 	struct btrfs_root *dev_root = fs_info->dev_root;
6661 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6662 	struct btrfs_device *device;
6663 	int stats_cnt;
6664 	int ret = 0;
6665 
6666 	mutex_lock(&fs_devices->device_list_mutex);
6667 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6668 		if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
6669 			continue;
6670 
6671 		stats_cnt = atomic_read(&device->dev_stats_ccnt);
6672 		ret = update_dev_stat_item(trans, dev_root, device);
6673 		if (!ret)
6674 			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
6675 	}
6676 	mutex_unlock(&fs_devices->device_list_mutex);
6677 
6678 	return ret;
6679 }
6680 
6681 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
6682 {
6683 	btrfs_dev_stat_inc(dev, index);
6684 	btrfs_dev_stat_print_on_error(dev);
6685 }
6686 
6687 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
6688 {
6689 	if (!dev->dev_stats_valid)
6690 		return;
6691 	printk_ratelimited_in_rcu(KERN_ERR "BTRFS: "
6692 			   "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
6693 			   rcu_str_deref(dev->name),
6694 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6695 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6696 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6697 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6698 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6699 }
6700 
6701 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
6702 {
6703 	int i;
6704 
6705 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6706 		if (btrfs_dev_stat_read(dev, i) != 0)
6707 			break;
6708 	if (i == BTRFS_DEV_STAT_VALUES_MAX)
6709 		return; /* all values == 0, suppress message */
6710 
6711 	printk_in_rcu(KERN_INFO "BTRFS: "
6712 		   "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
6713 	       rcu_str_deref(dev->name),
6714 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6715 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6716 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6717 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6718 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6719 }
6720 
6721 int btrfs_get_dev_stats(struct btrfs_root *root,
6722 			struct btrfs_ioctl_get_dev_stats *stats)
6723 {
6724 	struct btrfs_device *dev;
6725 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6726 	int i;
6727 
6728 	mutex_lock(&fs_devices->device_list_mutex);
6729 	dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
6730 	mutex_unlock(&fs_devices->device_list_mutex);
6731 
6732 	if (!dev) {
6733 		btrfs_warn(root->fs_info, "get dev_stats failed, device not found");
6734 		return -ENODEV;
6735 	} else if (!dev->dev_stats_valid) {
6736 		btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid");
6737 		return -ENODEV;
6738 	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
6739 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6740 			if (stats->nr_items > i)
6741 				stats->values[i] =
6742 					btrfs_dev_stat_read_and_reset(dev, i);
6743 			else
6744 				btrfs_dev_stat_reset(dev, i);
6745 		}
6746 	} else {
6747 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6748 			if (stats->nr_items > i)
6749 				stats->values[i] = btrfs_dev_stat_read(dev, i);
6750 	}
6751 	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
6752 		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
6753 	return 0;
6754 }
6755 
6756 int btrfs_scratch_superblock(struct btrfs_device *device)
6757 {
6758 	struct buffer_head *bh;
6759 	struct btrfs_super_block *disk_super;
6760 
6761 	bh = btrfs_read_dev_super(device->bdev);
6762 	if (!bh)
6763 		return -EINVAL;
6764 	disk_super = (struct btrfs_super_block *)bh->b_data;
6765 
6766 	memset(&disk_super->magic, 0, sizeof(disk_super->magic));
6767 	set_buffer_dirty(bh);
6768 	sync_dirty_buffer(bh);
6769 	brelse(bh);
6770 
6771 	return 0;
6772 }
6773 
6774 /*
6775  * Update the size of all devices, which is used for writing out the
6776  * super blocks.
6777  */
6778 void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
6779 {
6780 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6781 	struct btrfs_device *curr, *next;
6782 
6783 	if (list_empty(&fs_devices->resized_devices))
6784 		return;
6785 
6786 	mutex_lock(&fs_devices->device_list_mutex);
6787 	lock_chunks(fs_info->dev_root);
6788 	list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
6789 				 resized_list) {
6790 		list_del_init(&curr->resized_list);
6791 		curr->commit_total_bytes = curr->disk_total_bytes;
6792 	}
6793 	unlock_chunks(fs_info->dev_root);
6794 	mutex_unlock(&fs_devices->device_list_mutex);
6795 }
6796 
6797 /* Must be invoked during the transaction commit */
6798 void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
6799 					struct btrfs_transaction *transaction)
6800 {
6801 	struct extent_map *em;
6802 	struct map_lookup *map;
6803 	struct btrfs_device *dev;
6804 	int i;
6805 
6806 	if (list_empty(&transaction->pending_chunks))
6807 		return;
6808 
6809 	/* In order to kick the device replace finish process */
6810 	lock_chunks(root);
6811 	list_for_each_entry(em, &transaction->pending_chunks, list) {
6812 		map = (struct map_lookup *)em->bdev;
6813 
6814 		for (i = 0; i < map->num_stripes; i++) {
6815 			dev = map->stripes[i].dev;
6816 			dev->commit_bytes_used = dev->bytes_used;
6817 		}
6818 	}
6819 	unlock_chunks(root);
6820 }
6821 
6822 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
6823 {
6824 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6825 	while (fs_devices) {
6826 		fs_devices->fs_info = fs_info;
6827 		fs_devices = fs_devices->seed;
6828 	}
6829 }
6830 
6831 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
6832 {
6833 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6834 	while (fs_devices) {
6835 		fs_devices->fs_info = NULL;
6836 		fs_devices = fs_devices->seed;
6837 	}
6838 }
6839