xref: /openbmc/linux/fs/btrfs/volumes.c (revision 110e6f26)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include <linux/raid/pq.h>
29 #include <linux/semaphore.h>
30 #include <asm/div64.h>
31 #include "ctree.h"
32 #include "extent_map.h"
33 #include "disk-io.h"
34 #include "transaction.h"
35 #include "print-tree.h"
36 #include "volumes.h"
37 #include "raid56.h"
38 #include "async-thread.h"
39 #include "check-integrity.h"
40 #include "rcu-string.h"
41 #include "math.h"
42 #include "dev-replace.h"
43 #include "sysfs.h"
44 
45 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
46 	[BTRFS_RAID_RAID10] = {
47 		.sub_stripes	= 2,
48 		.dev_stripes	= 1,
49 		.devs_max	= 0,	/* 0 == as many as possible */
50 		.devs_min	= 4,
51 		.tolerated_failures = 1,
52 		.devs_increment	= 2,
53 		.ncopies	= 2,
54 	},
55 	[BTRFS_RAID_RAID1] = {
56 		.sub_stripes	= 1,
57 		.dev_stripes	= 1,
58 		.devs_max	= 2,
59 		.devs_min	= 2,
60 		.tolerated_failures = 1,
61 		.devs_increment	= 2,
62 		.ncopies	= 2,
63 	},
64 	[BTRFS_RAID_DUP] = {
65 		.sub_stripes	= 1,
66 		.dev_stripes	= 2,
67 		.devs_max	= 1,
68 		.devs_min	= 1,
69 		.tolerated_failures = 0,
70 		.devs_increment	= 1,
71 		.ncopies	= 2,
72 	},
73 	[BTRFS_RAID_RAID0] = {
74 		.sub_stripes	= 1,
75 		.dev_stripes	= 1,
76 		.devs_max	= 0,
77 		.devs_min	= 2,
78 		.tolerated_failures = 0,
79 		.devs_increment	= 1,
80 		.ncopies	= 1,
81 	},
82 	[BTRFS_RAID_SINGLE] = {
83 		.sub_stripes	= 1,
84 		.dev_stripes	= 1,
85 		.devs_max	= 1,
86 		.devs_min	= 1,
87 		.tolerated_failures = 0,
88 		.devs_increment	= 1,
89 		.ncopies	= 1,
90 	},
91 	[BTRFS_RAID_RAID5] = {
92 		.sub_stripes	= 1,
93 		.dev_stripes	= 1,
94 		.devs_max	= 0,
95 		.devs_min	= 2,
96 		.tolerated_failures = 1,
97 		.devs_increment	= 1,
98 		.ncopies	= 2,
99 	},
100 	[BTRFS_RAID_RAID6] = {
101 		.sub_stripes	= 1,
102 		.dev_stripes	= 1,
103 		.devs_max	= 0,
104 		.devs_min	= 3,
105 		.tolerated_failures = 2,
106 		.devs_increment	= 1,
107 		.ncopies	= 3,
108 	},
109 };
110 
111 const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
112 	[BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10,
113 	[BTRFS_RAID_RAID1]  = BTRFS_BLOCK_GROUP_RAID1,
114 	[BTRFS_RAID_DUP]    = BTRFS_BLOCK_GROUP_DUP,
115 	[BTRFS_RAID_RAID0]  = BTRFS_BLOCK_GROUP_RAID0,
116 	[BTRFS_RAID_SINGLE] = 0,
117 	[BTRFS_RAID_RAID5]  = BTRFS_BLOCK_GROUP_RAID5,
118 	[BTRFS_RAID_RAID6]  = BTRFS_BLOCK_GROUP_RAID6,
119 };
120 
121 static int init_first_rw_device(struct btrfs_trans_handle *trans,
122 				struct btrfs_root *root,
123 				struct btrfs_device *device);
124 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
125 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
126 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
127 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
128 static void btrfs_close_one_device(struct btrfs_device *device);
129 
130 DEFINE_MUTEX(uuid_mutex);
131 static LIST_HEAD(fs_uuids);
132 struct list_head *btrfs_get_fs_uuids(void)
133 {
134 	return &fs_uuids;
135 }
136 
137 static struct btrfs_fs_devices *__alloc_fs_devices(void)
138 {
139 	struct btrfs_fs_devices *fs_devs;
140 
141 	fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
142 	if (!fs_devs)
143 		return ERR_PTR(-ENOMEM);
144 
145 	mutex_init(&fs_devs->device_list_mutex);
146 
147 	INIT_LIST_HEAD(&fs_devs->devices);
148 	INIT_LIST_HEAD(&fs_devs->resized_devices);
149 	INIT_LIST_HEAD(&fs_devs->alloc_list);
150 	INIT_LIST_HEAD(&fs_devs->list);
151 
152 	return fs_devs;
153 }
154 
155 /**
156  * alloc_fs_devices - allocate struct btrfs_fs_devices
157  * @fsid:	a pointer to UUID for this FS.  If NULL a new UUID is
158  *		generated.
159  *
160  * Return: a pointer to a new &struct btrfs_fs_devices on success;
161  * ERR_PTR() on error.  Returned struct is not linked onto any lists and
162  * can be destroyed with kfree() right away.
163  */
164 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
165 {
166 	struct btrfs_fs_devices *fs_devs;
167 
168 	fs_devs = __alloc_fs_devices();
169 	if (IS_ERR(fs_devs))
170 		return fs_devs;
171 
172 	if (fsid)
173 		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
174 	else
175 		generate_random_uuid(fs_devs->fsid);
176 
177 	return fs_devs;
178 }
179 
180 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
181 {
182 	struct btrfs_device *device;
183 	WARN_ON(fs_devices->opened);
184 	while (!list_empty(&fs_devices->devices)) {
185 		device = list_entry(fs_devices->devices.next,
186 				    struct btrfs_device, dev_list);
187 		list_del(&device->dev_list);
188 		rcu_string_free(device->name);
189 		kfree(device);
190 	}
191 	kfree(fs_devices);
192 }
193 
194 static void btrfs_kobject_uevent(struct block_device *bdev,
195 				 enum kobject_action action)
196 {
197 	int ret;
198 
199 	ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
200 	if (ret)
201 		pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
202 			action,
203 			kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
204 			&disk_to_dev(bdev->bd_disk)->kobj);
205 }
206 
207 void btrfs_cleanup_fs_uuids(void)
208 {
209 	struct btrfs_fs_devices *fs_devices;
210 
211 	while (!list_empty(&fs_uuids)) {
212 		fs_devices = list_entry(fs_uuids.next,
213 					struct btrfs_fs_devices, list);
214 		list_del(&fs_devices->list);
215 		free_fs_devices(fs_devices);
216 	}
217 }
218 
219 static struct btrfs_device *__alloc_device(void)
220 {
221 	struct btrfs_device *dev;
222 
223 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
224 	if (!dev)
225 		return ERR_PTR(-ENOMEM);
226 
227 	INIT_LIST_HEAD(&dev->dev_list);
228 	INIT_LIST_HEAD(&dev->dev_alloc_list);
229 	INIT_LIST_HEAD(&dev->resized_list);
230 
231 	spin_lock_init(&dev->io_lock);
232 
233 	spin_lock_init(&dev->reada_lock);
234 	atomic_set(&dev->reada_in_flight, 0);
235 	atomic_set(&dev->dev_stats_ccnt, 0);
236 	btrfs_device_data_ordered_init(dev);
237 	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
238 	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
239 
240 	return dev;
241 }
242 
243 static noinline struct btrfs_device *__find_device(struct list_head *head,
244 						   u64 devid, u8 *uuid)
245 {
246 	struct btrfs_device *dev;
247 
248 	list_for_each_entry(dev, head, dev_list) {
249 		if (dev->devid == devid &&
250 		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
251 			return dev;
252 		}
253 	}
254 	return NULL;
255 }
256 
257 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
258 {
259 	struct btrfs_fs_devices *fs_devices;
260 
261 	list_for_each_entry(fs_devices, &fs_uuids, list) {
262 		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
263 			return fs_devices;
264 	}
265 	return NULL;
266 }
267 
268 static int
269 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
270 		      int flush, struct block_device **bdev,
271 		      struct buffer_head **bh)
272 {
273 	int ret;
274 
275 	*bdev = blkdev_get_by_path(device_path, flags, holder);
276 
277 	if (IS_ERR(*bdev)) {
278 		ret = PTR_ERR(*bdev);
279 		goto error;
280 	}
281 
282 	if (flush)
283 		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
284 	ret = set_blocksize(*bdev, 4096);
285 	if (ret) {
286 		blkdev_put(*bdev, flags);
287 		goto error;
288 	}
289 	invalidate_bdev(*bdev);
290 	*bh = btrfs_read_dev_super(*bdev);
291 	if (IS_ERR(*bh)) {
292 		ret = PTR_ERR(*bh);
293 		blkdev_put(*bdev, flags);
294 		goto error;
295 	}
296 
297 	return 0;
298 
299 error:
300 	*bdev = NULL;
301 	*bh = NULL;
302 	return ret;
303 }
304 
305 static void requeue_list(struct btrfs_pending_bios *pending_bios,
306 			struct bio *head, struct bio *tail)
307 {
308 
309 	struct bio *old_head;
310 
311 	old_head = pending_bios->head;
312 	pending_bios->head = head;
313 	if (pending_bios->tail)
314 		tail->bi_next = old_head;
315 	else
316 		pending_bios->tail = tail;
317 }
318 
319 /*
320  * we try to collect pending bios for a device so we don't get a large
321  * number of procs sending bios down to the same device.  This greatly
322  * improves the schedulers ability to collect and merge the bios.
323  *
324  * But, it also turns into a long list of bios to process and that is sure
325  * to eventually make the worker thread block.  The solution here is to
326  * make some progress and then put this work struct back at the end of
327  * the list if the block device is congested.  This way, multiple devices
328  * can make progress from a single worker thread.
329  */
330 static noinline void run_scheduled_bios(struct btrfs_device *device)
331 {
332 	struct bio *pending;
333 	struct backing_dev_info *bdi;
334 	struct btrfs_fs_info *fs_info;
335 	struct btrfs_pending_bios *pending_bios;
336 	struct bio *tail;
337 	struct bio *cur;
338 	int again = 0;
339 	unsigned long num_run;
340 	unsigned long batch_run = 0;
341 	unsigned long limit;
342 	unsigned long last_waited = 0;
343 	int force_reg = 0;
344 	int sync_pending = 0;
345 	struct blk_plug plug;
346 
347 	/*
348 	 * this function runs all the bios we've collected for
349 	 * a particular device.  We don't want to wander off to
350 	 * another device without first sending all of these down.
351 	 * So, setup a plug here and finish it off before we return
352 	 */
353 	blk_start_plug(&plug);
354 
355 	bdi = blk_get_backing_dev_info(device->bdev);
356 	fs_info = device->dev_root->fs_info;
357 	limit = btrfs_async_submit_limit(fs_info);
358 	limit = limit * 2 / 3;
359 
360 loop:
361 	spin_lock(&device->io_lock);
362 
363 loop_lock:
364 	num_run = 0;
365 
366 	/* take all the bios off the list at once and process them
367 	 * later on (without the lock held).  But, remember the
368 	 * tail and other pointers so the bios can be properly reinserted
369 	 * into the list if we hit congestion
370 	 */
371 	if (!force_reg && device->pending_sync_bios.head) {
372 		pending_bios = &device->pending_sync_bios;
373 		force_reg = 1;
374 	} else {
375 		pending_bios = &device->pending_bios;
376 		force_reg = 0;
377 	}
378 
379 	pending = pending_bios->head;
380 	tail = pending_bios->tail;
381 	WARN_ON(pending && !tail);
382 
383 	/*
384 	 * if pending was null this time around, no bios need processing
385 	 * at all and we can stop.  Otherwise it'll loop back up again
386 	 * and do an additional check so no bios are missed.
387 	 *
388 	 * device->running_pending is used to synchronize with the
389 	 * schedule_bio code.
390 	 */
391 	if (device->pending_sync_bios.head == NULL &&
392 	    device->pending_bios.head == NULL) {
393 		again = 0;
394 		device->running_pending = 0;
395 	} else {
396 		again = 1;
397 		device->running_pending = 1;
398 	}
399 
400 	pending_bios->head = NULL;
401 	pending_bios->tail = NULL;
402 
403 	spin_unlock(&device->io_lock);
404 
405 	while (pending) {
406 
407 		rmb();
408 		/* we want to work on both lists, but do more bios on the
409 		 * sync list than the regular list
410 		 */
411 		if ((num_run > 32 &&
412 		    pending_bios != &device->pending_sync_bios &&
413 		    device->pending_sync_bios.head) ||
414 		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
415 		    device->pending_bios.head)) {
416 			spin_lock(&device->io_lock);
417 			requeue_list(pending_bios, pending, tail);
418 			goto loop_lock;
419 		}
420 
421 		cur = pending;
422 		pending = pending->bi_next;
423 		cur->bi_next = NULL;
424 
425 		/*
426 		 * atomic_dec_return implies a barrier for waitqueue_active
427 		 */
428 		if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
429 		    waitqueue_active(&fs_info->async_submit_wait))
430 			wake_up(&fs_info->async_submit_wait);
431 
432 		BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
433 
434 		/*
435 		 * if we're doing the sync list, record that our
436 		 * plug has some sync requests on it
437 		 *
438 		 * If we're doing the regular list and there are
439 		 * sync requests sitting around, unplug before
440 		 * we add more
441 		 */
442 		if (pending_bios == &device->pending_sync_bios) {
443 			sync_pending = 1;
444 		} else if (sync_pending) {
445 			blk_finish_plug(&plug);
446 			blk_start_plug(&plug);
447 			sync_pending = 0;
448 		}
449 
450 		btrfsic_submit_bio(cur->bi_rw, cur);
451 		num_run++;
452 		batch_run++;
453 
454 		cond_resched();
455 
456 		/*
457 		 * we made progress, there is more work to do and the bdi
458 		 * is now congested.  Back off and let other work structs
459 		 * run instead
460 		 */
461 		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
462 		    fs_info->fs_devices->open_devices > 1) {
463 			struct io_context *ioc;
464 
465 			ioc = current->io_context;
466 
467 			/*
468 			 * the main goal here is that we don't want to
469 			 * block if we're going to be able to submit
470 			 * more requests without blocking.
471 			 *
472 			 * This code does two great things, it pokes into
473 			 * the elevator code from a filesystem _and_
474 			 * it makes assumptions about how batching works.
475 			 */
476 			if (ioc && ioc->nr_batch_requests > 0 &&
477 			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
478 			    (last_waited == 0 ||
479 			     ioc->last_waited == last_waited)) {
480 				/*
481 				 * we want to go through our batch of
482 				 * requests and stop.  So, we copy out
483 				 * the ioc->last_waited time and test
484 				 * against it before looping
485 				 */
486 				last_waited = ioc->last_waited;
487 				cond_resched();
488 				continue;
489 			}
490 			spin_lock(&device->io_lock);
491 			requeue_list(pending_bios, pending, tail);
492 			device->running_pending = 1;
493 
494 			spin_unlock(&device->io_lock);
495 			btrfs_queue_work(fs_info->submit_workers,
496 					 &device->work);
497 			goto done;
498 		}
499 		/* unplug every 64 requests just for good measure */
500 		if (batch_run % 64 == 0) {
501 			blk_finish_plug(&plug);
502 			blk_start_plug(&plug);
503 			sync_pending = 0;
504 		}
505 	}
506 
507 	cond_resched();
508 	if (again)
509 		goto loop;
510 
511 	spin_lock(&device->io_lock);
512 	if (device->pending_bios.head || device->pending_sync_bios.head)
513 		goto loop_lock;
514 	spin_unlock(&device->io_lock);
515 
516 done:
517 	blk_finish_plug(&plug);
518 }
519 
520 static void pending_bios_fn(struct btrfs_work *work)
521 {
522 	struct btrfs_device *device;
523 
524 	device = container_of(work, struct btrfs_device, work);
525 	run_scheduled_bios(device);
526 }
527 
528 
529 void btrfs_free_stale_device(struct btrfs_device *cur_dev)
530 {
531 	struct btrfs_fs_devices *fs_devs;
532 	struct btrfs_device *dev;
533 
534 	if (!cur_dev->name)
535 		return;
536 
537 	list_for_each_entry(fs_devs, &fs_uuids, list) {
538 		int del = 1;
539 
540 		if (fs_devs->opened)
541 			continue;
542 		if (fs_devs->seeding)
543 			continue;
544 
545 		list_for_each_entry(dev, &fs_devs->devices, dev_list) {
546 
547 			if (dev == cur_dev)
548 				continue;
549 			if (!dev->name)
550 				continue;
551 
552 			/*
553 			 * Todo: This won't be enough. What if the same device
554 			 * comes back (with new uuid and) with its mapper path?
555 			 * But for now, this does help as mostly an admin will
556 			 * either use mapper or non mapper path throughout.
557 			 */
558 			rcu_read_lock();
559 			del = strcmp(rcu_str_deref(dev->name),
560 						rcu_str_deref(cur_dev->name));
561 			rcu_read_unlock();
562 			if (!del)
563 				break;
564 		}
565 
566 		if (!del) {
567 			/* delete the stale device */
568 			if (fs_devs->num_devices == 1) {
569 				btrfs_sysfs_remove_fsid(fs_devs);
570 				list_del(&fs_devs->list);
571 				free_fs_devices(fs_devs);
572 			} else {
573 				fs_devs->num_devices--;
574 				list_del(&dev->dev_list);
575 				rcu_string_free(dev->name);
576 				kfree(dev);
577 			}
578 			break;
579 		}
580 	}
581 }
582 
583 /*
584  * Add new device to list of registered devices
585  *
586  * Returns:
587  * 1   - first time device is seen
588  * 0   - device already known
589  * < 0 - error
590  */
591 static noinline int device_list_add(const char *path,
592 			   struct btrfs_super_block *disk_super,
593 			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
594 {
595 	struct btrfs_device *device;
596 	struct btrfs_fs_devices *fs_devices;
597 	struct rcu_string *name;
598 	int ret = 0;
599 	u64 found_transid = btrfs_super_generation(disk_super);
600 
601 	fs_devices = find_fsid(disk_super->fsid);
602 	if (!fs_devices) {
603 		fs_devices = alloc_fs_devices(disk_super->fsid);
604 		if (IS_ERR(fs_devices))
605 			return PTR_ERR(fs_devices);
606 
607 		list_add(&fs_devices->list, &fs_uuids);
608 
609 		device = NULL;
610 	} else {
611 		device = __find_device(&fs_devices->devices, devid,
612 				       disk_super->dev_item.uuid);
613 	}
614 
615 	if (!device) {
616 		if (fs_devices->opened)
617 			return -EBUSY;
618 
619 		device = btrfs_alloc_device(NULL, &devid,
620 					    disk_super->dev_item.uuid);
621 		if (IS_ERR(device)) {
622 			/* we can safely leave the fs_devices entry around */
623 			return PTR_ERR(device);
624 		}
625 
626 		name = rcu_string_strdup(path, GFP_NOFS);
627 		if (!name) {
628 			kfree(device);
629 			return -ENOMEM;
630 		}
631 		rcu_assign_pointer(device->name, name);
632 
633 		mutex_lock(&fs_devices->device_list_mutex);
634 		list_add_rcu(&device->dev_list, &fs_devices->devices);
635 		fs_devices->num_devices++;
636 		mutex_unlock(&fs_devices->device_list_mutex);
637 
638 		ret = 1;
639 		device->fs_devices = fs_devices;
640 	} else if (!device->name || strcmp(device->name->str, path)) {
641 		/*
642 		 * When FS is already mounted.
643 		 * 1. If you are here and if the device->name is NULL that
644 		 *    means this device was missing at time of FS mount.
645 		 * 2. If you are here and if the device->name is different
646 		 *    from 'path' that means either
647 		 *      a. The same device disappeared and reappeared with
648 		 *         different name. or
649 		 *      b. The missing-disk-which-was-replaced, has
650 		 *         reappeared now.
651 		 *
652 		 * We must allow 1 and 2a above. But 2b would be a spurious
653 		 * and unintentional.
654 		 *
655 		 * Further in case of 1 and 2a above, the disk at 'path'
656 		 * would have missed some transaction when it was away and
657 		 * in case of 2a the stale bdev has to be updated as well.
658 		 * 2b must not be allowed at all time.
659 		 */
660 
661 		/*
662 		 * For now, we do allow update to btrfs_fs_device through the
663 		 * btrfs dev scan cli after FS has been mounted.  We're still
664 		 * tracking a problem where systems fail mount by subvolume id
665 		 * when we reject replacement on a mounted FS.
666 		 */
667 		if (!fs_devices->opened && found_transid < device->generation) {
668 			/*
669 			 * That is if the FS is _not_ mounted and if you
670 			 * are here, that means there is more than one
671 			 * disk with same uuid and devid.We keep the one
672 			 * with larger generation number or the last-in if
673 			 * generation are equal.
674 			 */
675 			return -EEXIST;
676 		}
677 
678 		name = rcu_string_strdup(path, GFP_NOFS);
679 		if (!name)
680 			return -ENOMEM;
681 		rcu_string_free(device->name);
682 		rcu_assign_pointer(device->name, name);
683 		if (device->missing) {
684 			fs_devices->missing_devices--;
685 			device->missing = 0;
686 		}
687 	}
688 
689 	/*
690 	 * Unmount does not free the btrfs_device struct but would zero
691 	 * generation along with most of the other members. So just update
692 	 * it back. We need it to pick the disk with largest generation
693 	 * (as above).
694 	 */
695 	if (!fs_devices->opened)
696 		device->generation = found_transid;
697 
698 	/*
699 	 * if there is new btrfs on an already registered device,
700 	 * then remove the stale device entry.
701 	 */
702 	btrfs_free_stale_device(device);
703 
704 	*fs_devices_ret = fs_devices;
705 
706 	return ret;
707 }
708 
709 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
710 {
711 	struct btrfs_fs_devices *fs_devices;
712 	struct btrfs_device *device;
713 	struct btrfs_device *orig_dev;
714 
715 	fs_devices = alloc_fs_devices(orig->fsid);
716 	if (IS_ERR(fs_devices))
717 		return fs_devices;
718 
719 	mutex_lock(&orig->device_list_mutex);
720 	fs_devices->total_devices = orig->total_devices;
721 
722 	/* We have held the volume lock, it is safe to get the devices. */
723 	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
724 		struct rcu_string *name;
725 
726 		device = btrfs_alloc_device(NULL, &orig_dev->devid,
727 					    orig_dev->uuid);
728 		if (IS_ERR(device))
729 			goto error;
730 
731 		/*
732 		 * This is ok to do without rcu read locked because we hold the
733 		 * uuid mutex so nothing we touch in here is going to disappear.
734 		 */
735 		if (orig_dev->name) {
736 			name = rcu_string_strdup(orig_dev->name->str,
737 					GFP_KERNEL);
738 			if (!name) {
739 				kfree(device);
740 				goto error;
741 			}
742 			rcu_assign_pointer(device->name, name);
743 		}
744 
745 		list_add(&device->dev_list, &fs_devices->devices);
746 		device->fs_devices = fs_devices;
747 		fs_devices->num_devices++;
748 	}
749 	mutex_unlock(&orig->device_list_mutex);
750 	return fs_devices;
751 error:
752 	mutex_unlock(&orig->device_list_mutex);
753 	free_fs_devices(fs_devices);
754 	return ERR_PTR(-ENOMEM);
755 }
756 
757 void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step)
758 {
759 	struct btrfs_device *device, *next;
760 	struct btrfs_device *latest_dev = NULL;
761 
762 	mutex_lock(&uuid_mutex);
763 again:
764 	/* This is the initialized path, it is safe to release the devices. */
765 	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
766 		if (device->in_fs_metadata) {
767 			if (!device->is_tgtdev_for_dev_replace &&
768 			    (!latest_dev ||
769 			     device->generation > latest_dev->generation)) {
770 				latest_dev = device;
771 			}
772 			continue;
773 		}
774 
775 		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
776 			/*
777 			 * In the first step, keep the device which has
778 			 * the correct fsid and the devid that is used
779 			 * for the dev_replace procedure.
780 			 * In the second step, the dev_replace state is
781 			 * read from the device tree and it is known
782 			 * whether the procedure is really active or
783 			 * not, which means whether this device is
784 			 * used or whether it should be removed.
785 			 */
786 			if (step == 0 || device->is_tgtdev_for_dev_replace) {
787 				continue;
788 			}
789 		}
790 		if (device->bdev) {
791 			blkdev_put(device->bdev, device->mode);
792 			device->bdev = NULL;
793 			fs_devices->open_devices--;
794 		}
795 		if (device->writeable) {
796 			list_del_init(&device->dev_alloc_list);
797 			device->writeable = 0;
798 			if (!device->is_tgtdev_for_dev_replace)
799 				fs_devices->rw_devices--;
800 		}
801 		list_del_init(&device->dev_list);
802 		fs_devices->num_devices--;
803 		rcu_string_free(device->name);
804 		kfree(device);
805 	}
806 
807 	if (fs_devices->seed) {
808 		fs_devices = fs_devices->seed;
809 		goto again;
810 	}
811 
812 	fs_devices->latest_bdev = latest_dev->bdev;
813 
814 	mutex_unlock(&uuid_mutex);
815 }
816 
817 static void __free_device(struct work_struct *work)
818 {
819 	struct btrfs_device *device;
820 
821 	device = container_of(work, struct btrfs_device, rcu_work);
822 
823 	if (device->bdev)
824 		blkdev_put(device->bdev, device->mode);
825 
826 	rcu_string_free(device->name);
827 	kfree(device);
828 }
829 
830 static void free_device(struct rcu_head *head)
831 {
832 	struct btrfs_device *device;
833 
834 	device = container_of(head, struct btrfs_device, rcu);
835 
836 	INIT_WORK(&device->rcu_work, __free_device);
837 	schedule_work(&device->rcu_work);
838 }
839 
840 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
841 {
842 	struct btrfs_device *device, *tmp;
843 
844 	if (--fs_devices->opened > 0)
845 		return 0;
846 
847 	mutex_lock(&fs_devices->device_list_mutex);
848 	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
849 		btrfs_close_one_device(device);
850 	}
851 	mutex_unlock(&fs_devices->device_list_mutex);
852 
853 	WARN_ON(fs_devices->open_devices);
854 	WARN_ON(fs_devices->rw_devices);
855 	fs_devices->opened = 0;
856 	fs_devices->seeding = 0;
857 
858 	return 0;
859 }
860 
861 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
862 {
863 	struct btrfs_fs_devices *seed_devices = NULL;
864 	int ret;
865 
866 	mutex_lock(&uuid_mutex);
867 	ret = __btrfs_close_devices(fs_devices);
868 	if (!fs_devices->opened) {
869 		seed_devices = fs_devices->seed;
870 		fs_devices->seed = NULL;
871 	}
872 	mutex_unlock(&uuid_mutex);
873 
874 	while (seed_devices) {
875 		fs_devices = seed_devices;
876 		seed_devices = fs_devices->seed;
877 		__btrfs_close_devices(fs_devices);
878 		free_fs_devices(fs_devices);
879 	}
880 	/*
881 	 * Wait for rcu kworkers under __btrfs_close_devices
882 	 * to finish all blkdev_puts so device is really
883 	 * free when umount is done.
884 	 */
885 	rcu_barrier();
886 	return ret;
887 }
888 
889 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
890 				fmode_t flags, void *holder)
891 {
892 	struct request_queue *q;
893 	struct block_device *bdev;
894 	struct list_head *head = &fs_devices->devices;
895 	struct btrfs_device *device;
896 	struct btrfs_device *latest_dev = NULL;
897 	struct buffer_head *bh;
898 	struct btrfs_super_block *disk_super;
899 	u64 devid;
900 	int seeding = 1;
901 	int ret = 0;
902 
903 	flags |= FMODE_EXCL;
904 
905 	list_for_each_entry(device, head, dev_list) {
906 		if (device->bdev)
907 			continue;
908 		if (!device->name)
909 			continue;
910 
911 		/* Just open everything we can; ignore failures here */
912 		if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
913 					    &bdev, &bh))
914 			continue;
915 
916 		disk_super = (struct btrfs_super_block *)bh->b_data;
917 		devid = btrfs_stack_device_id(&disk_super->dev_item);
918 		if (devid != device->devid)
919 			goto error_brelse;
920 
921 		if (memcmp(device->uuid, disk_super->dev_item.uuid,
922 			   BTRFS_UUID_SIZE))
923 			goto error_brelse;
924 
925 		device->generation = btrfs_super_generation(disk_super);
926 		if (!latest_dev ||
927 		    device->generation > latest_dev->generation)
928 			latest_dev = device;
929 
930 		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
931 			device->writeable = 0;
932 		} else {
933 			device->writeable = !bdev_read_only(bdev);
934 			seeding = 0;
935 		}
936 
937 		q = bdev_get_queue(bdev);
938 		if (blk_queue_discard(q))
939 			device->can_discard = 1;
940 
941 		device->bdev = bdev;
942 		device->in_fs_metadata = 0;
943 		device->mode = flags;
944 
945 		if (!blk_queue_nonrot(bdev_get_queue(bdev)))
946 			fs_devices->rotating = 1;
947 
948 		fs_devices->open_devices++;
949 		if (device->writeable &&
950 		    device->devid != BTRFS_DEV_REPLACE_DEVID) {
951 			fs_devices->rw_devices++;
952 			list_add(&device->dev_alloc_list,
953 				 &fs_devices->alloc_list);
954 		}
955 		brelse(bh);
956 		continue;
957 
958 error_brelse:
959 		brelse(bh);
960 		blkdev_put(bdev, flags);
961 		continue;
962 	}
963 	if (fs_devices->open_devices == 0) {
964 		ret = -EINVAL;
965 		goto out;
966 	}
967 	fs_devices->seeding = seeding;
968 	fs_devices->opened = 1;
969 	fs_devices->latest_bdev = latest_dev->bdev;
970 	fs_devices->total_rw_bytes = 0;
971 out:
972 	return ret;
973 }
974 
975 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
976 		       fmode_t flags, void *holder)
977 {
978 	int ret;
979 
980 	mutex_lock(&uuid_mutex);
981 	if (fs_devices->opened) {
982 		fs_devices->opened++;
983 		ret = 0;
984 	} else {
985 		ret = __btrfs_open_devices(fs_devices, flags, holder);
986 	}
987 	mutex_unlock(&uuid_mutex);
988 	return ret;
989 }
990 
991 /*
992  * Look for a btrfs signature on a device. This may be called out of the mount path
993  * and we are not allowed to call set_blocksize during the scan. The superblock
994  * is read via pagecache
995  */
996 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
997 			  struct btrfs_fs_devices **fs_devices_ret)
998 {
999 	struct btrfs_super_block *disk_super;
1000 	struct block_device *bdev;
1001 	struct page *page;
1002 	void *p;
1003 	int ret = -EINVAL;
1004 	u64 devid;
1005 	u64 transid;
1006 	u64 total_devices;
1007 	u64 bytenr;
1008 	pgoff_t index;
1009 
1010 	/*
1011 	 * we would like to check all the supers, but that would make
1012 	 * a btrfs mount succeed after a mkfs from a different FS.
1013 	 * So, we need to add a special mount option to scan for
1014 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1015 	 */
1016 	bytenr = btrfs_sb_offset(0);
1017 	flags |= FMODE_EXCL;
1018 	mutex_lock(&uuid_mutex);
1019 
1020 	bdev = blkdev_get_by_path(path, flags, holder);
1021 
1022 	if (IS_ERR(bdev)) {
1023 		ret = PTR_ERR(bdev);
1024 		goto error;
1025 	}
1026 
1027 	/* make sure our super fits in the device */
1028 	if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1029 		goto error_bdev_put;
1030 
1031 	/* make sure our super fits in the page */
1032 	if (sizeof(*disk_super) > PAGE_SIZE)
1033 		goto error_bdev_put;
1034 
1035 	/* make sure our super doesn't straddle pages on disk */
1036 	index = bytenr >> PAGE_SHIFT;
1037 	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1038 		goto error_bdev_put;
1039 
1040 	/* pull in the page with our super */
1041 	page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
1042 				   index, GFP_NOFS);
1043 
1044 	if (IS_ERR_OR_NULL(page))
1045 		goto error_bdev_put;
1046 
1047 	p = kmap(page);
1048 
1049 	/* align our pointer to the offset of the super block */
1050 	disk_super = p + (bytenr & ~PAGE_MASK);
1051 
1052 	if (btrfs_super_bytenr(disk_super) != bytenr ||
1053 	    btrfs_super_magic(disk_super) != BTRFS_MAGIC)
1054 		goto error_unmap;
1055 
1056 	devid = btrfs_stack_device_id(&disk_super->dev_item);
1057 	transid = btrfs_super_generation(disk_super);
1058 	total_devices = btrfs_super_num_devices(disk_super);
1059 
1060 	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
1061 	if (ret > 0) {
1062 		if (disk_super->label[0]) {
1063 			if (disk_super->label[BTRFS_LABEL_SIZE - 1])
1064 				disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
1065 			printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
1066 		} else {
1067 			printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
1068 		}
1069 
1070 		printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
1071 		ret = 0;
1072 	}
1073 	if (!ret && fs_devices_ret)
1074 		(*fs_devices_ret)->total_devices = total_devices;
1075 
1076 error_unmap:
1077 	kunmap(page);
1078 	put_page(page);
1079 
1080 error_bdev_put:
1081 	blkdev_put(bdev, flags);
1082 error:
1083 	mutex_unlock(&uuid_mutex);
1084 	return ret;
1085 }
1086 
1087 /* helper to account the used device space in the range */
1088 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
1089 				   u64 end, u64 *length)
1090 {
1091 	struct btrfs_key key;
1092 	struct btrfs_root *root = device->dev_root;
1093 	struct btrfs_dev_extent *dev_extent;
1094 	struct btrfs_path *path;
1095 	u64 extent_end;
1096 	int ret;
1097 	int slot;
1098 	struct extent_buffer *l;
1099 
1100 	*length = 0;
1101 
1102 	if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
1103 		return 0;
1104 
1105 	path = btrfs_alloc_path();
1106 	if (!path)
1107 		return -ENOMEM;
1108 	path->reada = READA_FORWARD;
1109 
1110 	key.objectid = device->devid;
1111 	key.offset = start;
1112 	key.type = BTRFS_DEV_EXTENT_KEY;
1113 
1114 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1115 	if (ret < 0)
1116 		goto out;
1117 	if (ret > 0) {
1118 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1119 		if (ret < 0)
1120 			goto out;
1121 	}
1122 
1123 	while (1) {
1124 		l = path->nodes[0];
1125 		slot = path->slots[0];
1126 		if (slot >= btrfs_header_nritems(l)) {
1127 			ret = btrfs_next_leaf(root, path);
1128 			if (ret == 0)
1129 				continue;
1130 			if (ret < 0)
1131 				goto out;
1132 
1133 			break;
1134 		}
1135 		btrfs_item_key_to_cpu(l, &key, slot);
1136 
1137 		if (key.objectid < device->devid)
1138 			goto next;
1139 
1140 		if (key.objectid > device->devid)
1141 			break;
1142 
1143 		if (key.type != BTRFS_DEV_EXTENT_KEY)
1144 			goto next;
1145 
1146 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1147 		extent_end = key.offset + btrfs_dev_extent_length(l,
1148 								  dev_extent);
1149 		if (key.offset <= start && extent_end > end) {
1150 			*length = end - start + 1;
1151 			break;
1152 		} else if (key.offset <= start && extent_end > start)
1153 			*length += extent_end - start;
1154 		else if (key.offset > start && extent_end <= end)
1155 			*length += extent_end - key.offset;
1156 		else if (key.offset > start && key.offset <= end) {
1157 			*length += end - key.offset + 1;
1158 			break;
1159 		} else if (key.offset > end)
1160 			break;
1161 
1162 next:
1163 		path->slots[0]++;
1164 	}
1165 	ret = 0;
1166 out:
1167 	btrfs_free_path(path);
1168 	return ret;
1169 }
1170 
1171 static int contains_pending_extent(struct btrfs_transaction *transaction,
1172 				   struct btrfs_device *device,
1173 				   u64 *start, u64 len)
1174 {
1175 	struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
1176 	struct extent_map *em;
1177 	struct list_head *search_list = &fs_info->pinned_chunks;
1178 	int ret = 0;
1179 	u64 physical_start = *start;
1180 
1181 	if (transaction)
1182 		search_list = &transaction->pending_chunks;
1183 again:
1184 	list_for_each_entry(em, search_list, list) {
1185 		struct map_lookup *map;
1186 		int i;
1187 
1188 		map = em->map_lookup;
1189 		for (i = 0; i < map->num_stripes; i++) {
1190 			u64 end;
1191 
1192 			if (map->stripes[i].dev != device)
1193 				continue;
1194 			if (map->stripes[i].physical >= physical_start + len ||
1195 			    map->stripes[i].physical + em->orig_block_len <=
1196 			    physical_start)
1197 				continue;
1198 			/*
1199 			 * Make sure that while processing the pinned list we do
1200 			 * not override our *start with a lower value, because
1201 			 * we can have pinned chunks that fall within this
1202 			 * device hole and that have lower physical addresses
1203 			 * than the pending chunks we processed before. If we
1204 			 * do not take this special care we can end up getting
1205 			 * 2 pending chunks that start at the same physical
1206 			 * device offsets because the end offset of a pinned
1207 			 * chunk can be equal to the start offset of some
1208 			 * pending chunk.
1209 			 */
1210 			end = map->stripes[i].physical + em->orig_block_len;
1211 			if (end > *start) {
1212 				*start = end;
1213 				ret = 1;
1214 			}
1215 		}
1216 	}
1217 	if (search_list != &fs_info->pinned_chunks) {
1218 		search_list = &fs_info->pinned_chunks;
1219 		goto again;
1220 	}
1221 
1222 	return ret;
1223 }
1224 
1225 
1226 /*
1227  * find_free_dev_extent_start - find free space in the specified device
1228  * @device:	  the device which we search the free space in
1229  * @num_bytes:	  the size of the free space that we need
1230  * @search_start: the position from which to begin the search
1231  * @start:	  store the start of the free space.
1232  * @len:	  the size of the free space. that we find, or the size
1233  *		  of the max free space if we don't find suitable free space
1234  *
1235  * this uses a pretty simple search, the expectation is that it is
1236  * called very infrequently and that a given device has a small number
1237  * of extents
1238  *
1239  * @start is used to store the start of the free space if we find. But if we
1240  * don't find suitable free space, it will be used to store the start position
1241  * of the max free space.
1242  *
1243  * @len is used to store the size of the free space that we find.
1244  * But if we don't find suitable free space, it is used to store the size of
1245  * the max free space.
1246  */
1247 int find_free_dev_extent_start(struct btrfs_transaction *transaction,
1248 			       struct btrfs_device *device, u64 num_bytes,
1249 			       u64 search_start, u64 *start, u64 *len)
1250 {
1251 	struct btrfs_key key;
1252 	struct btrfs_root *root = device->dev_root;
1253 	struct btrfs_dev_extent *dev_extent;
1254 	struct btrfs_path *path;
1255 	u64 hole_size;
1256 	u64 max_hole_start;
1257 	u64 max_hole_size;
1258 	u64 extent_end;
1259 	u64 search_end = device->total_bytes;
1260 	int ret;
1261 	int slot;
1262 	struct extent_buffer *l;
1263 	u64 min_search_start;
1264 
1265 	/*
1266 	 * We don't want to overwrite the superblock on the drive nor any area
1267 	 * used by the boot loader (grub for example), so we make sure to start
1268 	 * at an offset of at least 1MB.
1269 	 */
1270 	min_search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1271 	search_start = max(search_start, min_search_start);
1272 
1273 	path = btrfs_alloc_path();
1274 	if (!path)
1275 		return -ENOMEM;
1276 
1277 	max_hole_start = search_start;
1278 	max_hole_size = 0;
1279 
1280 again:
1281 	if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1282 		ret = -ENOSPC;
1283 		goto out;
1284 	}
1285 
1286 	path->reada = READA_FORWARD;
1287 	path->search_commit_root = 1;
1288 	path->skip_locking = 1;
1289 
1290 	key.objectid = device->devid;
1291 	key.offset = search_start;
1292 	key.type = BTRFS_DEV_EXTENT_KEY;
1293 
1294 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1295 	if (ret < 0)
1296 		goto out;
1297 	if (ret > 0) {
1298 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1299 		if (ret < 0)
1300 			goto out;
1301 	}
1302 
1303 	while (1) {
1304 		l = path->nodes[0];
1305 		slot = path->slots[0];
1306 		if (slot >= btrfs_header_nritems(l)) {
1307 			ret = btrfs_next_leaf(root, path);
1308 			if (ret == 0)
1309 				continue;
1310 			if (ret < 0)
1311 				goto out;
1312 
1313 			break;
1314 		}
1315 		btrfs_item_key_to_cpu(l, &key, slot);
1316 
1317 		if (key.objectid < device->devid)
1318 			goto next;
1319 
1320 		if (key.objectid > device->devid)
1321 			break;
1322 
1323 		if (key.type != BTRFS_DEV_EXTENT_KEY)
1324 			goto next;
1325 
1326 		if (key.offset > search_start) {
1327 			hole_size = key.offset - search_start;
1328 
1329 			/*
1330 			 * Have to check before we set max_hole_start, otherwise
1331 			 * we could end up sending back this offset anyway.
1332 			 */
1333 			if (contains_pending_extent(transaction, device,
1334 						    &search_start,
1335 						    hole_size)) {
1336 				if (key.offset >= search_start) {
1337 					hole_size = key.offset - search_start;
1338 				} else {
1339 					WARN_ON_ONCE(1);
1340 					hole_size = 0;
1341 				}
1342 			}
1343 
1344 			if (hole_size > max_hole_size) {
1345 				max_hole_start = search_start;
1346 				max_hole_size = hole_size;
1347 			}
1348 
1349 			/*
1350 			 * If this free space is greater than which we need,
1351 			 * it must be the max free space that we have found
1352 			 * until now, so max_hole_start must point to the start
1353 			 * of this free space and the length of this free space
1354 			 * is stored in max_hole_size. Thus, we return
1355 			 * max_hole_start and max_hole_size and go back to the
1356 			 * caller.
1357 			 */
1358 			if (hole_size >= num_bytes) {
1359 				ret = 0;
1360 				goto out;
1361 			}
1362 		}
1363 
1364 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1365 		extent_end = key.offset + btrfs_dev_extent_length(l,
1366 								  dev_extent);
1367 		if (extent_end > search_start)
1368 			search_start = extent_end;
1369 next:
1370 		path->slots[0]++;
1371 		cond_resched();
1372 	}
1373 
1374 	/*
1375 	 * At this point, search_start should be the end of
1376 	 * allocated dev extents, and when shrinking the device,
1377 	 * search_end may be smaller than search_start.
1378 	 */
1379 	if (search_end > search_start) {
1380 		hole_size = search_end - search_start;
1381 
1382 		if (contains_pending_extent(transaction, device, &search_start,
1383 					    hole_size)) {
1384 			btrfs_release_path(path);
1385 			goto again;
1386 		}
1387 
1388 		if (hole_size > max_hole_size) {
1389 			max_hole_start = search_start;
1390 			max_hole_size = hole_size;
1391 		}
1392 	}
1393 
1394 	/* See above. */
1395 	if (max_hole_size < num_bytes)
1396 		ret = -ENOSPC;
1397 	else
1398 		ret = 0;
1399 
1400 out:
1401 	btrfs_free_path(path);
1402 	*start = max_hole_start;
1403 	if (len)
1404 		*len = max_hole_size;
1405 	return ret;
1406 }
1407 
1408 int find_free_dev_extent(struct btrfs_trans_handle *trans,
1409 			 struct btrfs_device *device, u64 num_bytes,
1410 			 u64 *start, u64 *len)
1411 {
1412 	/* FIXME use last free of some kind */
1413 	return find_free_dev_extent_start(trans->transaction, device,
1414 					  num_bytes, 0, start, len);
1415 }
1416 
1417 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1418 			  struct btrfs_device *device,
1419 			  u64 start, u64 *dev_extent_len)
1420 {
1421 	int ret;
1422 	struct btrfs_path *path;
1423 	struct btrfs_root *root = device->dev_root;
1424 	struct btrfs_key key;
1425 	struct btrfs_key found_key;
1426 	struct extent_buffer *leaf = NULL;
1427 	struct btrfs_dev_extent *extent = NULL;
1428 
1429 	path = btrfs_alloc_path();
1430 	if (!path)
1431 		return -ENOMEM;
1432 
1433 	key.objectid = device->devid;
1434 	key.offset = start;
1435 	key.type = BTRFS_DEV_EXTENT_KEY;
1436 again:
1437 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1438 	if (ret > 0) {
1439 		ret = btrfs_previous_item(root, path, key.objectid,
1440 					  BTRFS_DEV_EXTENT_KEY);
1441 		if (ret)
1442 			goto out;
1443 		leaf = path->nodes[0];
1444 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1445 		extent = btrfs_item_ptr(leaf, path->slots[0],
1446 					struct btrfs_dev_extent);
1447 		BUG_ON(found_key.offset > start || found_key.offset +
1448 		       btrfs_dev_extent_length(leaf, extent) < start);
1449 		key = found_key;
1450 		btrfs_release_path(path);
1451 		goto again;
1452 	} else if (ret == 0) {
1453 		leaf = path->nodes[0];
1454 		extent = btrfs_item_ptr(leaf, path->slots[0],
1455 					struct btrfs_dev_extent);
1456 	} else {
1457 		btrfs_std_error(root->fs_info, ret, "Slot search failed");
1458 		goto out;
1459 	}
1460 
1461 	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1462 
1463 	ret = btrfs_del_item(trans, root, path);
1464 	if (ret) {
1465 		btrfs_std_error(root->fs_info, ret,
1466 			    "Failed to remove dev extent item");
1467 	} else {
1468 		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1469 	}
1470 out:
1471 	btrfs_free_path(path);
1472 	return ret;
1473 }
1474 
1475 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1476 				  struct btrfs_device *device,
1477 				  u64 chunk_tree, u64 chunk_objectid,
1478 				  u64 chunk_offset, u64 start, u64 num_bytes)
1479 {
1480 	int ret;
1481 	struct btrfs_path *path;
1482 	struct btrfs_root *root = device->dev_root;
1483 	struct btrfs_dev_extent *extent;
1484 	struct extent_buffer *leaf;
1485 	struct btrfs_key key;
1486 
1487 	WARN_ON(!device->in_fs_metadata);
1488 	WARN_ON(device->is_tgtdev_for_dev_replace);
1489 	path = btrfs_alloc_path();
1490 	if (!path)
1491 		return -ENOMEM;
1492 
1493 	key.objectid = device->devid;
1494 	key.offset = start;
1495 	key.type = BTRFS_DEV_EXTENT_KEY;
1496 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1497 				      sizeof(*extent));
1498 	if (ret)
1499 		goto out;
1500 
1501 	leaf = path->nodes[0];
1502 	extent = btrfs_item_ptr(leaf, path->slots[0],
1503 				struct btrfs_dev_extent);
1504 	btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1505 	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1506 	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1507 
1508 	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1509 		    btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE);
1510 
1511 	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1512 	btrfs_mark_buffer_dirty(leaf);
1513 out:
1514 	btrfs_free_path(path);
1515 	return ret;
1516 }
1517 
1518 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1519 {
1520 	struct extent_map_tree *em_tree;
1521 	struct extent_map *em;
1522 	struct rb_node *n;
1523 	u64 ret = 0;
1524 
1525 	em_tree = &fs_info->mapping_tree.map_tree;
1526 	read_lock(&em_tree->lock);
1527 	n = rb_last(&em_tree->map);
1528 	if (n) {
1529 		em = rb_entry(n, struct extent_map, rb_node);
1530 		ret = em->start + em->len;
1531 	}
1532 	read_unlock(&em_tree->lock);
1533 
1534 	return ret;
1535 }
1536 
1537 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1538 				    u64 *devid_ret)
1539 {
1540 	int ret;
1541 	struct btrfs_key key;
1542 	struct btrfs_key found_key;
1543 	struct btrfs_path *path;
1544 
1545 	path = btrfs_alloc_path();
1546 	if (!path)
1547 		return -ENOMEM;
1548 
1549 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1550 	key.type = BTRFS_DEV_ITEM_KEY;
1551 	key.offset = (u64)-1;
1552 
1553 	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1554 	if (ret < 0)
1555 		goto error;
1556 
1557 	BUG_ON(ret == 0); /* Corruption */
1558 
1559 	ret = btrfs_previous_item(fs_info->chunk_root, path,
1560 				  BTRFS_DEV_ITEMS_OBJECTID,
1561 				  BTRFS_DEV_ITEM_KEY);
1562 	if (ret) {
1563 		*devid_ret = 1;
1564 	} else {
1565 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1566 				      path->slots[0]);
1567 		*devid_ret = found_key.offset + 1;
1568 	}
1569 	ret = 0;
1570 error:
1571 	btrfs_free_path(path);
1572 	return ret;
1573 }
1574 
1575 /*
1576  * the device information is stored in the chunk root
1577  * the btrfs_device struct should be fully filled in
1578  */
1579 static int btrfs_add_device(struct btrfs_trans_handle *trans,
1580 			    struct btrfs_root *root,
1581 			    struct btrfs_device *device)
1582 {
1583 	int ret;
1584 	struct btrfs_path *path;
1585 	struct btrfs_dev_item *dev_item;
1586 	struct extent_buffer *leaf;
1587 	struct btrfs_key key;
1588 	unsigned long ptr;
1589 
1590 	root = root->fs_info->chunk_root;
1591 
1592 	path = btrfs_alloc_path();
1593 	if (!path)
1594 		return -ENOMEM;
1595 
1596 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1597 	key.type = BTRFS_DEV_ITEM_KEY;
1598 	key.offset = device->devid;
1599 
1600 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1601 				      sizeof(*dev_item));
1602 	if (ret)
1603 		goto out;
1604 
1605 	leaf = path->nodes[0];
1606 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1607 
1608 	btrfs_set_device_id(leaf, dev_item, device->devid);
1609 	btrfs_set_device_generation(leaf, dev_item, 0);
1610 	btrfs_set_device_type(leaf, dev_item, device->type);
1611 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1612 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1613 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1614 	btrfs_set_device_total_bytes(leaf, dev_item,
1615 				     btrfs_device_get_disk_total_bytes(device));
1616 	btrfs_set_device_bytes_used(leaf, dev_item,
1617 				    btrfs_device_get_bytes_used(device));
1618 	btrfs_set_device_group(leaf, dev_item, 0);
1619 	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1620 	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1621 	btrfs_set_device_start_offset(leaf, dev_item, 0);
1622 
1623 	ptr = btrfs_device_uuid(dev_item);
1624 	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1625 	ptr = btrfs_device_fsid(dev_item);
1626 	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1627 	btrfs_mark_buffer_dirty(leaf);
1628 
1629 	ret = 0;
1630 out:
1631 	btrfs_free_path(path);
1632 	return ret;
1633 }
1634 
1635 /*
1636  * Function to update ctime/mtime for a given device path.
1637  * Mainly used for ctime/mtime based probe like libblkid.
1638  */
1639 static void update_dev_time(char *path_name)
1640 {
1641 	struct file *filp;
1642 
1643 	filp = filp_open(path_name, O_RDWR, 0);
1644 	if (IS_ERR(filp))
1645 		return;
1646 	file_update_time(filp);
1647 	filp_close(filp, NULL);
1648 }
1649 
1650 static int btrfs_rm_dev_item(struct btrfs_root *root,
1651 			     struct btrfs_device *device)
1652 {
1653 	int ret;
1654 	struct btrfs_path *path;
1655 	struct btrfs_key key;
1656 	struct btrfs_trans_handle *trans;
1657 
1658 	root = root->fs_info->chunk_root;
1659 
1660 	path = btrfs_alloc_path();
1661 	if (!path)
1662 		return -ENOMEM;
1663 
1664 	trans = btrfs_start_transaction(root, 0);
1665 	if (IS_ERR(trans)) {
1666 		btrfs_free_path(path);
1667 		return PTR_ERR(trans);
1668 	}
1669 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1670 	key.type = BTRFS_DEV_ITEM_KEY;
1671 	key.offset = device->devid;
1672 
1673 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1674 	if (ret < 0)
1675 		goto out;
1676 
1677 	if (ret > 0) {
1678 		ret = -ENOENT;
1679 		goto out;
1680 	}
1681 
1682 	ret = btrfs_del_item(trans, root, path);
1683 	if (ret)
1684 		goto out;
1685 out:
1686 	btrfs_free_path(path);
1687 	btrfs_commit_transaction(trans, root);
1688 	return ret;
1689 }
1690 
1691 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1692 {
1693 	struct btrfs_device *device;
1694 	struct btrfs_device *next_device;
1695 	struct block_device *bdev;
1696 	struct buffer_head *bh = NULL;
1697 	struct btrfs_super_block *disk_super;
1698 	struct btrfs_fs_devices *cur_devices;
1699 	u64 all_avail;
1700 	u64 devid;
1701 	u64 num_devices;
1702 	u8 *dev_uuid;
1703 	unsigned seq;
1704 	int ret = 0;
1705 	bool clear_super = false;
1706 
1707 	mutex_lock(&uuid_mutex);
1708 
1709 	do {
1710 		seq = read_seqbegin(&root->fs_info->profiles_lock);
1711 
1712 		all_avail = root->fs_info->avail_data_alloc_bits |
1713 			    root->fs_info->avail_system_alloc_bits |
1714 			    root->fs_info->avail_metadata_alloc_bits;
1715 	} while (read_seqretry(&root->fs_info->profiles_lock, seq));
1716 
1717 	num_devices = root->fs_info->fs_devices->num_devices;
1718 	btrfs_dev_replace_lock(&root->fs_info->dev_replace, 0);
1719 	if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1720 		WARN_ON(num_devices < 1);
1721 		num_devices--;
1722 	}
1723 	btrfs_dev_replace_unlock(&root->fs_info->dev_replace, 0);
1724 
1725 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1726 		ret = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET;
1727 		goto out;
1728 	}
1729 
1730 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1731 		ret = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET;
1732 		goto out;
1733 	}
1734 
1735 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
1736 	    root->fs_info->fs_devices->rw_devices <= 2) {
1737 		ret = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET;
1738 		goto out;
1739 	}
1740 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
1741 	    root->fs_info->fs_devices->rw_devices <= 3) {
1742 		ret = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET;
1743 		goto out;
1744 	}
1745 
1746 	if (strcmp(device_path, "missing") == 0) {
1747 		struct list_head *devices;
1748 		struct btrfs_device *tmp;
1749 
1750 		device = NULL;
1751 		devices = &root->fs_info->fs_devices->devices;
1752 		/*
1753 		 * It is safe to read the devices since the volume_mutex
1754 		 * is held.
1755 		 */
1756 		list_for_each_entry(tmp, devices, dev_list) {
1757 			if (tmp->in_fs_metadata &&
1758 			    !tmp->is_tgtdev_for_dev_replace &&
1759 			    !tmp->bdev) {
1760 				device = tmp;
1761 				break;
1762 			}
1763 		}
1764 		bdev = NULL;
1765 		bh = NULL;
1766 		disk_super = NULL;
1767 		if (!device) {
1768 			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
1769 			goto out;
1770 		}
1771 	} else {
1772 		ret = btrfs_get_bdev_and_sb(device_path,
1773 					    FMODE_WRITE | FMODE_EXCL,
1774 					    root->fs_info->bdev_holder, 0,
1775 					    &bdev, &bh);
1776 		if (ret)
1777 			goto out;
1778 		disk_super = (struct btrfs_super_block *)bh->b_data;
1779 		devid = btrfs_stack_device_id(&disk_super->dev_item);
1780 		dev_uuid = disk_super->dev_item.uuid;
1781 		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1782 					   disk_super->fsid);
1783 		if (!device) {
1784 			ret = -ENOENT;
1785 			goto error_brelse;
1786 		}
1787 	}
1788 
1789 	if (device->is_tgtdev_for_dev_replace) {
1790 		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1791 		goto error_brelse;
1792 	}
1793 
1794 	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1795 		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1796 		goto error_brelse;
1797 	}
1798 
1799 	if (device->writeable) {
1800 		lock_chunks(root);
1801 		list_del_init(&device->dev_alloc_list);
1802 		device->fs_devices->rw_devices--;
1803 		unlock_chunks(root);
1804 		clear_super = true;
1805 	}
1806 
1807 	mutex_unlock(&uuid_mutex);
1808 	ret = btrfs_shrink_device(device, 0);
1809 	mutex_lock(&uuid_mutex);
1810 	if (ret)
1811 		goto error_undo;
1812 
1813 	/*
1814 	 * TODO: the superblock still includes this device in its num_devices
1815 	 * counter although write_all_supers() is not locked out. This
1816 	 * could give a filesystem state which requires a degraded mount.
1817 	 */
1818 	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1819 	if (ret)
1820 		goto error_undo;
1821 
1822 	device->in_fs_metadata = 0;
1823 	btrfs_scrub_cancel_dev(root->fs_info, device);
1824 
1825 	/*
1826 	 * the device list mutex makes sure that we don't change
1827 	 * the device list while someone else is writing out all
1828 	 * the device supers. Whoever is writing all supers, should
1829 	 * lock the device list mutex before getting the number of
1830 	 * devices in the super block (super_copy). Conversely,
1831 	 * whoever updates the number of devices in the super block
1832 	 * (super_copy) should hold the device list mutex.
1833 	 */
1834 
1835 	cur_devices = device->fs_devices;
1836 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1837 	list_del_rcu(&device->dev_list);
1838 
1839 	device->fs_devices->num_devices--;
1840 	device->fs_devices->total_devices--;
1841 
1842 	if (device->missing)
1843 		device->fs_devices->missing_devices--;
1844 
1845 	next_device = list_entry(root->fs_info->fs_devices->devices.next,
1846 				 struct btrfs_device, dev_list);
1847 	if (device->bdev == root->fs_info->sb->s_bdev)
1848 		root->fs_info->sb->s_bdev = next_device->bdev;
1849 	if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1850 		root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1851 
1852 	if (device->bdev) {
1853 		device->fs_devices->open_devices--;
1854 		/* remove sysfs entry */
1855 		btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
1856 	}
1857 
1858 	call_rcu(&device->rcu, free_device);
1859 
1860 	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1861 	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1862 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1863 
1864 	if (cur_devices->open_devices == 0) {
1865 		struct btrfs_fs_devices *fs_devices;
1866 		fs_devices = root->fs_info->fs_devices;
1867 		while (fs_devices) {
1868 			if (fs_devices->seed == cur_devices) {
1869 				fs_devices->seed = cur_devices->seed;
1870 				break;
1871 			}
1872 			fs_devices = fs_devices->seed;
1873 		}
1874 		cur_devices->seed = NULL;
1875 		__btrfs_close_devices(cur_devices);
1876 		free_fs_devices(cur_devices);
1877 	}
1878 
1879 	root->fs_info->num_tolerated_disk_barrier_failures =
1880 		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1881 
1882 	/*
1883 	 * at this point, the device is zero sized.  We want to
1884 	 * remove it from the devices list and zero out the old super
1885 	 */
1886 	if (clear_super && disk_super) {
1887 		u64 bytenr;
1888 		int i;
1889 
1890 		/* make sure this device isn't detected as part of
1891 		 * the FS anymore
1892 		 */
1893 		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1894 		set_buffer_dirty(bh);
1895 		sync_dirty_buffer(bh);
1896 
1897 		/* clear the mirror copies of super block on the disk
1898 		 * being removed, 0th copy is been taken care above and
1899 		 * the below would take of the rest
1900 		 */
1901 		for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1902 			bytenr = btrfs_sb_offset(i);
1903 			if (bytenr + BTRFS_SUPER_INFO_SIZE >=
1904 					i_size_read(bdev->bd_inode))
1905 				break;
1906 
1907 			brelse(bh);
1908 			bh = __bread(bdev, bytenr / 4096,
1909 					BTRFS_SUPER_INFO_SIZE);
1910 			if (!bh)
1911 				continue;
1912 
1913 			disk_super = (struct btrfs_super_block *)bh->b_data;
1914 
1915 			if (btrfs_super_bytenr(disk_super) != bytenr ||
1916 				btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1917 				continue;
1918 			}
1919 			memset(&disk_super->magic, 0,
1920 						sizeof(disk_super->magic));
1921 			set_buffer_dirty(bh);
1922 			sync_dirty_buffer(bh);
1923 		}
1924 	}
1925 
1926 	ret = 0;
1927 
1928 	if (bdev) {
1929 		/* Notify udev that device has changed */
1930 		btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1931 
1932 		/* Update ctime/mtime for device path for libblkid */
1933 		update_dev_time(device_path);
1934 	}
1935 
1936 error_brelse:
1937 	brelse(bh);
1938 	if (bdev)
1939 		blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1940 out:
1941 	mutex_unlock(&uuid_mutex);
1942 	return ret;
1943 error_undo:
1944 	if (device->writeable) {
1945 		lock_chunks(root);
1946 		list_add(&device->dev_alloc_list,
1947 			 &root->fs_info->fs_devices->alloc_list);
1948 		device->fs_devices->rw_devices++;
1949 		unlock_chunks(root);
1950 	}
1951 	goto error_brelse;
1952 }
1953 
1954 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
1955 					struct btrfs_device *srcdev)
1956 {
1957 	struct btrfs_fs_devices *fs_devices;
1958 
1959 	WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1960 
1961 	/*
1962 	 * in case of fs with no seed, srcdev->fs_devices will point
1963 	 * to fs_devices of fs_info. However when the dev being replaced is
1964 	 * a seed dev it will point to the seed's local fs_devices. In short
1965 	 * srcdev will have its correct fs_devices in both the cases.
1966 	 */
1967 	fs_devices = srcdev->fs_devices;
1968 
1969 	list_del_rcu(&srcdev->dev_list);
1970 	list_del_rcu(&srcdev->dev_alloc_list);
1971 	fs_devices->num_devices--;
1972 	if (srcdev->missing)
1973 		fs_devices->missing_devices--;
1974 
1975 	if (srcdev->writeable) {
1976 		fs_devices->rw_devices--;
1977 		/* zero out the old super if it is writable */
1978 		btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
1979 	}
1980 
1981 	if (srcdev->bdev)
1982 		fs_devices->open_devices--;
1983 }
1984 
1985 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
1986 				      struct btrfs_device *srcdev)
1987 {
1988 	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
1989 
1990 	call_rcu(&srcdev->rcu, free_device);
1991 
1992 	/*
1993 	 * unless fs_devices is seed fs, num_devices shouldn't go
1994 	 * zero
1995 	 */
1996 	BUG_ON(!fs_devices->num_devices && !fs_devices->seeding);
1997 
1998 	/* if this is no devs we rather delete the fs_devices */
1999 	if (!fs_devices->num_devices) {
2000 		struct btrfs_fs_devices *tmp_fs_devices;
2001 
2002 		tmp_fs_devices = fs_info->fs_devices;
2003 		while (tmp_fs_devices) {
2004 			if (tmp_fs_devices->seed == fs_devices) {
2005 				tmp_fs_devices->seed = fs_devices->seed;
2006 				break;
2007 			}
2008 			tmp_fs_devices = tmp_fs_devices->seed;
2009 		}
2010 		fs_devices->seed = NULL;
2011 		__btrfs_close_devices(fs_devices);
2012 		free_fs_devices(fs_devices);
2013 	}
2014 }
2015 
2016 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
2017 				      struct btrfs_device *tgtdev)
2018 {
2019 	struct btrfs_device *next_device;
2020 
2021 	mutex_lock(&uuid_mutex);
2022 	WARN_ON(!tgtdev);
2023 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2024 
2025 	btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev);
2026 
2027 	if (tgtdev->bdev) {
2028 		btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
2029 		fs_info->fs_devices->open_devices--;
2030 	}
2031 	fs_info->fs_devices->num_devices--;
2032 
2033 	next_device = list_entry(fs_info->fs_devices->devices.next,
2034 				 struct btrfs_device, dev_list);
2035 	if (tgtdev->bdev == fs_info->sb->s_bdev)
2036 		fs_info->sb->s_bdev = next_device->bdev;
2037 	if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
2038 		fs_info->fs_devices->latest_bdev = next_device->bdev;
2039 	list_del_rcu(&tgtdev->dev_list);
2040 
2041 	call_rcu(&tgtdev->rcu, free_device);
2042 
2043 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2044 	mutex_unlock(&uuid_mutex);
2045 }
2046 
2047 static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
2048 				     struct btrfs_device **device)
2049 {
2050 	int ret = 0;
2051 	struct btrfs_super_block *disk_super;
2052 	u64 devid;
2053 	u8 *dev_uuid;
2054 	struct block_device *bdev;
2055 	struct buffer_head *bh;
2056 
2057 	*device = NULL;
2058 	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2059 				    root->fs_info->bdev_holder, 0, &bdev, &bh);
2060 	if (ret)
2061 		return ret;
2062 	disk_super = (struct btrfs_super_block *)bh->b_data;
2063 	devid = btrfs_stack_device_id(&disk_super->dev_item);
2064 	dev_uuid = disk_super->dev_item.uuid;
2065 	*device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2066 				    disk_super->fsid);
2067 	brelse(bh);
2068 	if (!*device)
2069 		ret = -ENOENT;
2070 	blkdev_put(bdev, FMODE_READ);
2071 	return ret;
2072 }
2073 
2074 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
2075 					 char *device_path,
2076 					 struct btrfs_device **device)
2077 {
2078 	*device = NULL;
2079 	if (strcmp(device_path, "missing") == 0) {
2080 		struct list_head *devices;
2081 		struct btrfs_device *tmp;
2082 
2083 		devices = &root->fs_info->fs_devices->devices;
2084 		/*
2085 		 * It is safe to read the devices since the volume_mutex
2086 		 * is held by the caller.
2087 		 */
2088 		list_for_each_entry(tmp, devices, dev_list) {
2089 			if (tmp->in_fs_metadata && !tmp->bdev) {
2090 				*device = tmp;
2091 				break;
2092 			}
2093 		}
2094 
2095 		if (!*device)
2096 			return BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2097 
2098 		return 0;
2099 	} else {
2100 		return btrfs_find_device_by_path(root, device_path, device);
2101 	}
2102 }
2103 
2104 /*
2105  * does all the dirty work required for changing file system's UUID.
2106  */
2107 static int btrfs_prepare_sprout(struct btrfs_root *root)
2108 {
2109 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2110 	struct btrfs_fs_devices *old_devices;
2111 	struct btrfs_fs_devices *seed_devices;
2112 	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
2113 	struct btrfs_device *device;
2114 	u64 super_flags;
2115 
2116 	BUG_ON(!mutex_is_locked(&uuid_mutex));
2117 	if (!fs_devices->seeding)
2118 		return -EINVAL;
2119 
2120 	seed_devices = __alloc_fs_devices();
2121 	if (IS_ERR(seed_devices))
2122 		return PTR_ERR(seed_devices);
2123 
2124 	old_devices = clone_fs_devices(fs_devices);
2125 	if (IS_ERR(old_devices)) {
2126 		kfree(seed_devices);
2127 		return PTR_ERR(old_devices);
2128 	}
2129 
2130 	list_add(&old_devices->list, &fs_uuids);
2131 
2132 	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2133 	seed_devices->opened = 1;
2134 	INIT_LIST_HEAD(&seed_devices->devices);
2135 	INIT_LIST_HEAD(&seed_devices->alloc_list);
2136 	mutex_init(&seed_devices->device_list_mutex);
2137 
2138 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2139 	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2140 			      synchronize_rcu);
2141 	list_for_each_entry(device, &seed_devices->devices, dev_list)
2142 		device->fs_devices = seed_devices;
2143 
2144 	lock_chunks(root);
2145 	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
2146 	unlock_chunks(root);
2147 
2148 	fs_devices->seeding = 0;
2149 	fs_devices->num_devices = 0;
2150 	fs_devices->open_devices = 0;
2151 	fs_devices->missing_devices = 0;
2152 	fs_devices->rotating = 0;
2153 	fs_devices->seed = seed_devices;
2154 
2155 	generate_random_uuid(fs_devices->fsid);
2156 	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2157 	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2158 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2159 
2160 	super_flags = btrfs_super_flags(disk_super) &
2161 		      ~BTRFS_SUPER_FLAG_SEEDING;
2162 	btrfs_set_super_flags(disk_super, super_flags);
2163 
2164 	return 0;
2165 }
2166 
2167 /*
2168  * strore the expected generation for seed devices in device items.
2169  */
2170 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
2171 			       struct btrfs_root *root)
2172 {
2173 	struct btrfs_path *path;
2174 	struct extent_buffer *leaf;
2175 	struct btrfs_dev_item *dev_item;
2176 	struct btrfs_device *device;
2177 	struct btrfs_key key;
2178 	u8 fs_uuid[BTRFS_UUID_SIZE];
2179 	u8 dev_uuid[BTRFS_UUID_SIZE];
2180 	u64 devid;
2181 	int ret;
2182 
2183 	path = btrfs_alloc_path();
2184 	if (!path)
2185 		return -ENOMEM;
2186 
2187 	root = root->fs_info->chunk_root;
2188 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2189 	key.offset = 0;
2190 	key.type = BTRFS_DEV_ITEM_KEY;
2191 
2192 	while (1) {
2193 		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2194 		if (ret < 0)
2195 			goto error;
2196 
2197 		leaf = path->nodes[0];
2198 next_slot:
2199 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2200 			ret = btrfs_next_leaf(root, path);
2201 			if (ret > 0)
2202 				break;
2203 			if (ret < 0)
2204 				goto error;
2205 			leaf = path->nodes[0];
2206 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2207 			btrfs_release_path(path);
2208 			continue;
2209 		}
2210 
2211 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2212 		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2213 		    key.type != BTRFS_DEV_ITEM_KEY)
2214 			break;
2215 
2216 		dev_item = btrfs_item_ptr(leaf, path->slots[0],
2217 					  struct btrfs_dev_item);
2218 		devid = btrfs_device_id(leaf, dev_item);
2219 		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2220 				   BTRFS_UUID_SIZE);
2221 		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2222 				   BTRFS_UUID_SIZE);
2223 		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2224 					   fs_uuid);
2225 		BUG_ON(!device); /* Logic error */
2226 
2227 		if (device->fs_devices->seeding) {
2228 			btrfs_set_device_generation(leaf, dev_item,
2229 						    device->generation);
2230 			btrfs_mark_buffer_dirty(leaf);
2231 		}
2232 
2233 		path->slots[0]++;
2234 		goto next_slot;
2235 	}
2236 	ret = 0;
2237 error:
2238 	btrfs_free_path(path);
2239 	return ret;
2240 }
2241 
2242 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
2243 {
2244 	struct request_queue *q;
2245 	struct btrfs_trans_handle *trans;
2246 	struct btrfs_device *device;
2247 	struct block_device *bdev;
2248 	struct list_head *devices;
2249 	struct super_block *sb = root->fs_info->sb;
2250 	struct rcu_string *name;
2251 	u64 tmp;
2252 	int seeding_dev = 0;
2253 	int ret = 0;
2254 
2255 	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
2256 		return -EROFS;
2257 
2258 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2259 				  root->fs_info->bdev_holder);
2260 	if (IS_ERR(bdev))
2261 		return PTR_ERR(bdev);
2262 
2263 	if (root->fs_info->fs_devices->seeding) {
2264 		seeding_dev = 1;
2265 		down_write(&sb->s_umount);
2266 		mutex_lock(&uuid_mutex);
2267 	}
2268 
2269 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2270 
2271 	devices = &root->fs_info->fs_devices->devices;
2272 
2273 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2274 	list_for_each_entry(device, devices, dev_list) {
2275 		if (device->bdev == bdev) {
2276 			ret = -EEXIST;
2277 			mutex_unlock(
2278 				&root->fs_info->fs_devices->device_list_mutex);
2279 			goto error;
2280 		}
2281 	}
2282 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2283 
2284 	device = btrfs_alloc_device(root->fs_info, NULL, NULL);
2285 	if (IS_ERR(device)) {
2286 		/* we can safely leave the fs_devices entry around */
2287 		ret = PTR_ERR(device);
2288 		goto error;
2289 	}
2290 
2291 	name = rcu_string_strdup(device_path, GFP_KERNEL);
2292 	if (!name) {
2293 		kfree(device);
2294 		ret = -ENOMEM;
2295 		goto error;
2296 	}
2297 	rcu_assign_pointer(device->name, name);
2298 
2299 	trans = btrfs_start_transaction(root, 0);
2300 	if (IS_ERR(trans)) {
2301 		rcu_string_free(device->name);
2302 		kfree(device);
2303 		ret = PTR_ERR(trans);
2304 		goto error;
2305 	}
2306 
2307 	q = bdev_get_queue(bdev);
2308 	if (blk_queue_discard(q))
2309 		device->can_discard = 1;
2310 	device->writeable = 1;
2311 	device->generation = trans->transid;
2312 	device->io_width = root->sectorsize;
2313 	device->io_align = root->sectorsize;
2314 	device->sector_size = root->sectorsize;
2315 	device->total_bytes = i_size_read(bdev->bd_inode);
2316 	device->disk_total_bytes = device->total_bytes;
2317 	device->commit_total_bytes = device->total_bytes;
2318 	device->dev_root = root->fs_info->dev_root;
2319 	device->bdev = bdev;
2320 	device->in_fs_metadata = 1;
2321 	device->is_tgtdev_for_dev_replace = 0;
2322 	device->mode = FMODE_EXCL;
2323 	device->dev_stats_valid = 1;
2324 	set_blocksize(device->bdev, 4096);
2325 
2326 	if (seeding_dev) {
2327 		sb->s_flags &= ~MS_RDONLY;
2328 		ret = btrfs_prepare_sprout(root);
2329 		BUG_ON(ret); /* -ENOMEM */
2330 	}
2331 
2332 	device->fs_devices = root->fs_info->fs_devices;
2333 
2334 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2335 	lock_chunks(root);
2336 	list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2337 	list_add(&device->dev_alloc_list,
2338 		 &root->fs_info->fs_devices->alloc_list);
2339 	root->fs_info->fs_devices->num_devices++;
2340 	root->fs_info->fs_devices->open_devices++;
2341 	root->fs_info->fs_devices->rw_devices++;
2342 	root->fs_info->fs_devices->total_devices++;
2343 	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2344 
2345 	spin_lock(&root->fs_info->free_chunk_lock);
2346 	root->fs_info->free_chunk_space += device->total_bytes;
2347 	spin_unlock(&root->fs_info->free_chunk_lock);
2348 
2349 	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2350 		root->fs_info->fs_devices->rotating = 1;
2351 
2352 	tmp = btrfs_super_total_bytes(root->fs_info->super_copy);
2353 	btrfs_set_super_total_bytes(root->fs_info->super_copy,
2354 				    tmp + device->total_bytes);
2355 
2356 	tmp = btrfs_super_num_devices(root->fs_info->super_copy);
2357 	btrfs_set_super_num_devices(root->fs_info->super_copy,
2358 				    tmp + 1);
2359 
2360 	/* add sysfs device entry */
2361 	btrfs_sysfs_add_device_link(root->fs_info->fs_devices, device);
2362 
2363 	/*
2364 	 * we've got more storage, clear any full flags on the space
2365 	 * infos
2366 	 */
2367 	btrfs_clear_space_info_full(root->fs_info);
2368 
2369 	unlock_chunks(root);
2370 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2371 
2372 	if (seeding_dev) {
2373 		lock_chunks(root);
2374 		ret = init_first_rw_device(trans, root, device);
2375 		unlock_chunks(root);
2376 		if (ret) {
2377 			btrfs_abort_transaction(trans, root, ret);
2378 			goto error_trans;
2379 		}
2380 	}
2381 
2382 	ret = btrfs_add_device(trans, root, device);
2383 	if (ret) {
2384 		btrfs_abort_transaction(trans, root, ret);
2385 		goto error_trans;
2386 	}
2387 
2388 	if (seeding_dev) {
2389 		char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
2390 
2391 		ret = btrfs_finish_sprout(trans, root);
2392 		if (ret) {
2393 			btrfs_abort_transaction(trans, root, ret);
2394 			goto error_trans;
2395 		}
2396 
2397 		/* Sprouting would change fsid of the mounted root,
2398 		 * so rename the fsid on the sysfs
2399 		 */
2400 		snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
2401 						root->fs_info->fsid);
2402 		if (kobject_rename(&root->fs_info->fs_devices->fsid_kobj,
2403 								fsid_buf))
2404 			btrfs_warn(root->fs_info,
2405 				"sysfs: failed to create fsid for sprout");
2406 	}
2407 
2408 	root->fs_info->num_tolerated_disk_barrier_failures =
2409 		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2410 	ret = btrfs_commit_transaction(trans, root);
2411 
2412 	if (seeding_dev) {
2413 		mutex_unlock(&uuid_mutex);
2414 		up_write(&sb->s_umount);
2415 
2416 		if (ret) /* transaction commit */
2417 			return ret;
2418 
2419 		ret = btrfs_relocate_sys_chunks(root);
2420 		if (ret < 0)
2421 			btrfs_std_error(root->fs_info, ret,
2422 				    "Failed to relocate sys chunks after "
2423 				    "device initialization. This can be fixed "
2424 				    "using the \"btrfs balance\" command.");
2425 		trans = btrfs_attach_transaction(root);
2426 		if (IS_ERR(trans)) {
2427 			if (PTR_ERR(trans) == -ENOENT)
2428 				return 0;
2429 			return PTR_ERR(trans);
2430 		}
2431 		ret = btrfs_commit_transaction(trans, root);
2432 	}
2433 
2434 	/* Update ctime/mtime for libblkid */
2435 	update_dev_time(device_path);
2436 	return ret;
2437 
2438 error_trans:
2439 	btrfs_end_transaction(trans, root);
2440 	rcu_string_free(device->name);
2441 	btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
2442 	kfree(device);
2443 error:
2444 	blkdev_put(bdev, FMODE_EXCL);
2445 	if (seeding_dev) {
2446 		mutex_unlock(&uuid_mutex);
2447 		up_write(&sb->s_umount);
2448 	}
2449 	return ret;
2450 }
2451 
2452 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2453 				  struct btrfs_device *srcdev,
2454 				  struct btrfs_device **device_out)
2455 {
2456 	struct request_queue *q;
2457 	struct btrfs_device *device;
2458 	struct block_device *bdev;
2459 	struct btrfs_fs_info *fs_info = root->fs_info;
2460 	struct list_head *devices;
2461 	struct rcu_string *name;
2462 	u64 devid = BTRFS_DEV_REPLACE_DEVID;
2463 	int ret = 0;
2464 
2465 	*device_out = NULL;
2466 	if (fs_info->fs_devices->seeding) {
2467 		btrfs_err(fs_info, "the filesystem is a seed filesystem!");
2468 		return -EINVAL;
2469 	}
2470 
2471 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2472 				  fs_info->bdev_holder);
2473 	if (IS_ERR(bdev)) {
2474 		btrfs_err(fs_info, "target device %s is invalid!", device_path);
2475 		return PTR_ERR(bdev);
2476 	}
2477 
2478 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2479 
2480 	devices = &fs_info->fs_devices->devices;
2481 	list_for_each_entry(device, devices, dev_list) {
2482 		if (device->bdev == bdev) {
2483 			btrfs_err(fs_info, "target device is in the filesystem!");
2484 			ret = -EEXIST;
2485 			goto error;
2486 		}
2487 	}
2488 
2489 
2490 	if (i_size_read(bdev->bd_inode) <
2491 	    btrfs_device_get_total_bytes(srcdev)) {
2492 		btrfs_err(fs_info, "target device is smaller than source device!");
2493 		ret = -EINVAL;
2494 		goto error;
2495 	}
2496 
2497 
2498 	device = btrfs_alloc_device(NULL, &devid, NULL);
2499 	if (IS_ERR(device)) {
2500 		ret = PTR_ERR(device);
2501 		goto error;
2502 	}
2503 
2504 	name = rcu_string_strdup(device_path, GFP_NOFS);
2505 	if (!name) {
2506 		kfree(device);
2507 		ret = -ENOMEM;
2508 		goto error;
2509 	}
2510 	rcu_assign_pointer(device->name, name);
2511 
2512 	q = bdev_get_queue(bdev);
2513 	if (blk_queue_discard(q))
2514 		device->can_discard = 1;
2515 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2516 	device->writeable = 1;
2517 	device->generation = 0;
2518 	device->io_width = root->sectorsize;
2519 	device->io_align = root->sectorsize;
2520 	device->sector_size = root->sectorsize;
2521 	device->total_bytes = btrfs_device_get_total_bytes(srcdev);
2522 	device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev);
2523 	device->bytes_used = btrfs_device_get_bytes_used(srcdev);
2524 	ASSERT(list_empty(&srcdev->resized_list));
2525 	device->commit_total_bytes = srcdev->commit_total_bytes;
2526 	device->commit_bytes_used = device->bytes_used;
2527 	device->dev_root = fs_info->dev_root;
2528 	device->bdev = bdev;
2529 	device->in_fs_metadata = 1;
2530 	device->is_tgtdev_for_dev_replace = 1;
2531 	device->mode = FMODE_EXCL;
2532 	device->dev_stats_valid = 1;
2533 	set_blocksize(device->bdev, 4096);
2534 	device->fs_devices = fs_info->fs_devices;
2535 	list_add(&device->dev_list, &fs_info->fs_devices->devices);
2536 	fs_info->fs_devices->num_devices++;
2537 	fs_info->fs_devices->open_devices++;
2538 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2539 
2540 	*device_out = device;
2541 	return ret;
2542 
2543 error:
2544 	blkdev_put(bdev, FMODE_EXCL);
2545 	return ret;
2546 }
2547 
2548 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2549 					      struct btrfs_device *tgtdev)
2550 {
2551 	WARN_ON(fs_info->fs_devices->rw_devices == 0);
2552 	tgtdev->io_width = fs_info->dev_root->sectorsize;
2553 	tgtdev->io_align = fs_info->dev_root->sectorsize;
2554 	tgtdev->sector_size = fs_info->dev_root->sectorsize;
2555 	tgtdev->dev_root = fs_info->dev_root;
2556 	tgtdev->in_fs_metadata = 1;
2557 }
2558 
2559 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2560 					struct btrfs_device *device)
2561 {
2562 	int ret;
2563 	struct btrfs_path *path;
2564 	struct btrfs_root *root;
2565 	struct btrfs_dev_item *dev_item;
2566 	struct extent_buffer *leaf;
2567 	struct btrfs_key key;
2568 
2569 	root = device->dev_root->fs_info->chunk_root;
2570 
2571 	path = btrfs_alloc_path();
2572 	if (!path)
2573 		return -ENOMEM;
2574 
2575 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2576 	key.type = BTRFS_DEV_ITEM_KEY;
2577 	key.offset = device->devid;
2578 
2579 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2580 	if (ret < 0)
2581 		goto out;
2582 
2583 	if (ret > 0) {
2584 		ret = -ENOENT;
2585 		goto out;
2586 	}
2587 
2588 	leaf = path->nodes[0];
2589 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2590 
2591 	btrfs_set_device_id(leaf, dev_item, device->devid);
2592 	btrfs_set_device_type(leaf, dev_item, device->type);
2593 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2594 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2595 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2596 	btrfs_set_device_total_bytes(leaf, dev_item,
2597 				     btrfs_device_get_disk_total_bytes(device));
2598 	btrfs_set_device_bytes_used(leaf, dev_item,
2599 				    btrfs_device_get_bytes_used(device));
2600 	btrfs_mark_buffer_dirty(leaf);
2601 
2602 out:
2603 	btrfs_free_path(path);
2604 	return ret;
2605 }
2606 
2607 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2608 		      struct btrfs_device *device, u64 new_size)
2609 {
2610 	struct btrfs_super_block *super_copy =
2611 		device->dev_root->fs_info->super_copy;
2612 	struct btrfs_fs_devices *fs_devices;
2613 	u64 old_total;
2614 	u64 diff;
2615 
2616 	if (!device->writeable)
2617 		return -EACCES;
2618 
2619 	lock_chunks(device->dev_root);
2620 	old_total = btrfs_super_total_bytes(super_copy);
2621 	diff = new_size - device->total_bytes;
2622 
2623 	if (new_size <= device->total_bytes ||
2624 	    device->is_tgtdev_for_dev_replace) {
2625 		unlock_chunks(device->dev_root);
2626 		return -EINVAL;
2627 	}
2628 
2629 	fs_devices = device->dev_root->fs_info->fs_devices;
2630 
2631 	btrfs_set_super_total_bytes(super_copy, old_total + diff);
2632 	device->fs_devices->total_rw_bytes += diff;
2633 
2634 	btrfs_device_set_total_bytes(device, new_size);
2635 	btrfs_device_set_disk_total_bytes(device, new_size);
2636 	btrfs_clear_space_info_full(device->dev_root->fs_info);
2637 	if (list_empty(&device->resized_list))
2638 		list_add_tail(&device->resized_list,
2639 			      &fs_devices->resized_devices);
2640 	unlock_chunks(device->dev_root);
2641 
2642 	return btrfs_update_device(trans, device);
2643 }
2644 
2645 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2646 			    struct btrfs_root *root, u64 chunk_objectid,
2647 			    u64 chunk_offset)
2648 {
2649 	int ret;
2650 	struct btrfs_path *path;
2651 	struct btrfs_key key;
2652 
2653 	root = root->fs_info->chunk_root;
2654 	path = btrfs_alloc_path();
2655 	if (!path)
2656 		return -ENOMEM;
2657 
2658 	key.objectid = chunk_objectid;
2659 	key.offset = chunk_offset;
2660 	key.type = BTRFS_CHUNK_ITEM_KEY;
2661 
2662 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2663 	if (ret < 0)
2664 		goto out;
2665 	else if (ret > 0) { /* Logic error or corruption */
2666 		btrfs_std_error(root->fs_info, -ENOENT,
2667 			    "Failed lookup while freeing chunk.");
2668 		ret = -ENOENT;
2669 		goto out;
2670 	}
2671 
2672 	ret = btrfs_del_item(trans, root, path);
2673 	if (ret < 0)
2674 		btrfs_std_error(root->fs_info, ret,
2675 			    "Failed to delete chunk item.");
2676 out:
2677 	btrfs_free_path(path);
2678 	return ret;
2679 }
2680 
2681 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2682 			chunk_offset)
2683 {
2684 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2685 	struct btrfs_disk_key *disk_key;
2686 	struct btrfs_chunk *chunk;
2687 	u8 *ptr;
2688 	int ret = 0;
2689 	u32 num_stripes;
2690 	u32 array_size;
2691 	u32 len = 0;
2692 	u32 cur;
2693 	struct btrfs_key key;
2694 
2695 	lock_chunks(root);
2696 	array_size = btrfs_super_sys_array_size(super_copy);
2697 
2698 	ptr = super_copy->sys_chunk_array;
2699 	cur = 0;
2700 
2701 	while (cur < array_size) {
2702 		disk_key = (struct btrfs_disk_key *)ptr;
2703 		btrfs_disk_key_to_cpu(&key, disk_key);
2704 
2705 		len = sizeof(*disk_key);
2706 
2707 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2708 			chunk = (struct btrfs_chunk *)(ptr + len);
2709 			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2710 			len += btrfs_chunk_item_size(num_stripes);
2711 		} else {
2712 			ret = -EIO;
2713 			break;
2714 		}
2715 		if (key.objectid == chunk_objectid &&
2716 		    key.offset == chunk_offset) {
2717 			memmove(ptr, ptr + len, array_size - (cur + len));
2718 			array_size -= len;
2719 			btrfs_set_super_sys_array_size(super_copy, array_size);
2720 		} else {
2721 			ptr += len;
2722 			cur += len;
2723 		}
2724 	}
2725 	unlock_chunks(root);
2726 	return ret;
2727 }
2728 
2729 int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
2730 		       struct btrfs_root *root, u64 chunk_offset)
2731 {
2732 	struct extent_map_tree *em_tree;
2733 	struct extent_map *em;
2734 	struct btrfs_root *extent_root = root->fs_info->extent_root;
2735 	struct map_lookup *map;
2736 	u64 dev_extent_len = 0;
2737 	u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2738 	int i, ret = 0;
2739 
2740 	/* Just in case */
2741 	root = root->fs_info->chunk_root;
2742 	em_tree = &root->fs_info->mapping_tree.map_tree;
2743 
2744 	read_lock(&em_tree->lock);
2745 	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2746 	read_unlock(&em_tree->lock);
2747 
2748 	if (!em || em->start > chunk_offset ||
2749 	    em->start + em->len < chunk_offset) {
2750 		/*
2751 		 * This is a logic error, but we don't want to just rely on the
2752 		 * user having built with ASSERT enabled, so if ASSERT doesn't
2753 		 * do anything we still error out.
2754 		 */
2755 		ASSERT(0);
2756 		if (em)
2757 			free_extent_map(em);
2758 		return -EINVAL;
2759 	}
2760 	map = em->map_lookup;
2761 	lock_chunks(root->fs_info->chunk_root);
2762 	check_system_chunk(trans, extent_root, map->type);
2763 	unlock_chunks(root->fs_info->chunk_root);
2764 
2765 	for (i = 0; i < map->num_stripes; i++) {
2766 		struct btrfs_device *device = map->stripes[i].dev;
2767 		ret = btrfs_free_dev_extent(trans, device,
2768 					    map->stripes[i].physical,
2769 					    &dev_extent_len);
2770 		if (ret) {
2771 			btrfs_abort_transaction(trans, root, ret);
2772 			goto out;
2773 		}
2774 
2775 		if (device->bytes_used > 0) {
2776 			lock_chunks(root);
2777 			btrfs_device_set_bytes_used(device,
2778 					device->bytes_used - dev_extent_len);
2779 			spin_lock(&root->fs_info->free_chunk_lock);
2780 			root->fs_info->free_chunk_space += dev_extent_len;
2781 			spin_unlock(&root->fs_info->free_chunk_lock);
2782 			btrfs_clear_space_info_full(root->fs_info);
2783 			unlock_chunks(root);
2784 		}
2785 
2786 		if (map->stripes[i].dev) {
2787 			ret = btrfs_update_device(trans, map->stripes[i].dev);
2788 			if (ret) {
2789 				btrfs_abort_transaction(trans, root, ret);
2790 				goto out;
2791 			}
2792 		}
2793 	}
2794 	ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
2795 	if (ret) {
2796 		btrfs_abort_transaction(trans, root, ret);
2797 		goto out;
2798 	}
2799 
2800 	trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2801 
2802 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2803 		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2804 		if (ret) {
2805 			btrfs_abort_transaction(trans, root, ret);
2806 			goto out;
2807 		}
2808 	}
2809 
2810 	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em);
2811 	if (ret) {
2812 		btrfs_abort_transaction(trans, extent_root, ret);
2813 		goto out;
2814 	}
2815 
2816 out:
2817 	/* once for us */
2818 	free_extent_map(em);
2819 	return ret;
2820 }
2821 
2822 static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
2823 {
2824 	struct btrfs_root *extent_root;
2825 	struct btrfs_trans_handle *trans;
2826 	int ret;
2827 
2828 	root = root->fs_info->chunk_root;
2829 	extent_root = root->fs_info->extent_root;
2830 
2831 	/*
2832 	 * Prevent races with automatic removal of unused block groups.
2833 	 * After we relocate and before we remove the chunk with offset
2834 	 * chunk_offset, automatic removal of the block group can kick in,
2835 	 * resulting in a failure when calling btrfs_remove_chunk() below.
2836 	 *
2837 	 * Make sure to acquire this mutex before doing a tree search (dev
2838 	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
2839 	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
2840 	 * we release the path used to search the chunk/dev tree and before
2841 	 * the current task acquires this mutex and calls us.
2842 	 */
2843 	ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex));
2844 
2845 	ret = btrfs_can_relocate(extent_root, chunk_offset);
2846 	if (ret)
2847 		return -ENOSPC;
2848 
2849 	/* step one, relocate all the extents inside this chunk */
2850 	btrfs_scrub_pause(root);
2851 	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2852 	btrfs_scrub_continue(root);
2853 	if (ret)
2854 		return ret;
2855 
2856 	trans = btrfs_start_trans_remove_block_group(root->fs_info,
2857 						     chunk_offset);
2858 	if (IS_ERR(trans)) {
2859 		ret = PTR_ERR(trans);
2860 		btrfs_std_error(root->fs_info, ret, NULL);
2861 		return ret;
2862 	}
2863 
2864 	/*
2865 	 * step two, delete the device extents and the
2866 	 * chunk tree entries
2867 	 */
2868 	ret = btrfs_remove_chunk(trans, root, chunk_offset);
2869 	btrfs_end_transaction(trans, root);
2870 	return ret;
2871 }
2872 
2873 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2874 {
2875 	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2876 	struct btrfs_path *path;
2877 	struct extent_buffer *leaf;
2878 	struct btrfs_chunk *chunk;
2879 	struct btrfs_key key;
2880 	struct btrfs_key found_key;
2881 	u64 chunk_type;
2882 	bool retried = false;
2883 	int failed = 0;
2884 	int ret;
2885 
2886 	path = btrfs_alloc_path();
2887 	if (!path)
2888 		return -ENOMEM;
2889 
2890 again:
2891 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2892 	key.offset = (u64)-1;
2893 	key.type = BTRFS_CHUNK_ITEM_KEY;
2894 
2895 	while (1) {
2896 		mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
2897 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2898 		if (ret < 0) {
2899 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2900 			goto error;
2901 		}
2902 		BUG_ON(ret == 0); /* Corruption */
2903 
2904 		ret = btrfs_previous_item(chunk_root, path, key.objectid,
2905 					  key.type);
2906 		if (ret)
2907 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2908 		if (ret < 0)
2909 			goto error;
2910 		if (ret > 0)
2911 			break;
2912 
2913 		leaf = path->nodes[0];
2914 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2915 
2916 		chunk = btrfs_item_ptr(leaf, path->slots[0],
2917 				       struct btrfs_chunk);
2918 		chunk_type = btrfs_chunk_type(leaf, chunk);
2919 		btrfs_release_path(path);
2920 
2921 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2922 			ret = btrfs_relocate_chunk(chunk_root,
2923 						   found_key.offset);
2924 			if (ret == -ENOSPC)
2925 				failed++;
2926 			else
2927 				BUG_ON(ret);
2928 		}
2929 		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2930 
2931 		if (found_key.offset == 0)
2932 			break;
2933 		key.offset = found_key.offset - 1;
2934 	}
2935 	ret = 0;
2936 	if (failed && !retried) {
2937 		failed = 0;
2938 		retried = true;
2939 		goto again;
2940 	} else if (WARN_ON(failed && retried)) {
2941 		ret = -ENOSPC;
2942 	}
2943 error:
2944 	btrfs_free_path(path);
2945 	return ret;
2946 }
2947 
2948 static int insert_balance_item(struct btrfs_root *root,
2949 			       struct btrfs_balance_control *bctl)
2950 {
2951 	struct btrfs_trans_handle *trans;
2952 	struct btrfs_balance_item *item;
2953 	struct btrfs_disk_balance_args disk_bargs;
2954 	struct btrfs_path *path;
2955 	struct extent_buffer *leaf;
2956 	struct btrfs_key key;
2957 	int ret, err;
2958 
2959 	path = btrfs_alloc_path();
2960 	if (!path)
2961 		return -ENOMEM;
2962 
2963 	trans = btrfs_start_transaction(root, 0);
2964 	if (IS_ERR(trans)) {
2965 		btrfs_free_path(path);
2966 		return PTR_ERR(trans);
2967 	}
2968 
2969 	key.objectid = BTRFS_BALANCE_OBJECTID;
2970 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
2971 	key.offset = 0;
2972 
2973 	ret = btrfs_insert_empty_item(trans, root, path, &key,
2974 				      sizeof(*item));
2975 	if (ret)
2976 		goto out;
2977 
2978 	leaf = path->nodes[0];
2979 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2980 
2981 	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2982 
2983 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2984 	btrfs_set_balance_data(leaf, item, &disk_bargs);
2985 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2986 	btrfs_set_balance_meta(leaf, item, &disk_bargs);
2987 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2988 	btrfs_set_balance_sys(leaf, item, &disk_bargs);
2989 
2990 	btrfs_set_balance_flags(leaf, item, bctl->flags);
2991 
2992 	btrfs_mark_buffer_dirty(leaf);
2993 out:
2994 	btrfs_free_path(path);
2995 	err = btrfs_commit_transaction(trans, root);
2996 	if (err && !ret)
2997 		ret = err;
2998 	return ret;
2999 }
3000 
3001 static int del_balance_item(struct btrfs_root *root)
3002 {
3003 	struct btrfs_trans_handle *trans;
3004 	struct btrfs_path *path;
3005 	struct btrfs_key key;
3006 	int ret, err;
3007 
3008 	path = btrfs_alloc_path();
3009 	if (!path)
3010 		return -ENOMEM;
3011 
3012 	trans = btrfs_start_transaction(root, 0);
3013 	if (IS_ERR(trans)) {
3014 		btrfs_free_path(path);
3015 		return PTR_ERR(trans);
3016 	}
3017 
3018 	key.objectid = BTRFS_BALANCE_OBJECTID;
3019 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3020 	key.offset = 0;
3021 
3022 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3023 	if (ret < 0)
3024 		goto out;
3025 	if (ret > 0) {
3026 		ret = -ENOENT;
3027 		goto out;
3028 	}
3029 
3030 	ret = btrfs_del_item(trans, root, path);
3031 out:
3032 	btrfs_free_path(path);
3033 	err = btrfs_commit_transaction(trans, root);
3034 	if (err && !ret)
3035 		ret = err;
3036 	return ret;
3037 }
3038 
3039 /*
3040  * This is a heuristic used to reduce the number of chunks balanced on
3041  * resume after balance was interrupted.
3042  */
3043 static void update_balance_args(struct btrfs_balance_control *bctl)
3044 {
3045 	/*
3046 	 * Turn on soft mode for chunk types that were being converted.
3047 	 */
3048 	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3049 		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3050 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3051 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3052 	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3053 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3054 
3055 	/*
3056 	 * Turn on usage filter if is not already used.  The idea is
3057 	 * that chunks that we have already balanced should be
3058 	 * reasonably full.  Don't do it for chunks that are being
3059 	 * converted - that will keep us from relocating unconverted
3060 	 * (albeit full) chunks.
3061 	 */
3062 	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3063 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3064 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3065 		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3066 		bctl->data.usage = 90;
3067 	}
3068 	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3069 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3070 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3071 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3072 		bctl->sys.usage = 90;
3073 	}
3074 	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3075 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3076 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3077 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3078 		bctl->meta.usage = 90;
3079 	}
3080 }
3081 
3082 /*
3083  * Should be called with both balance and volume mutexes held to
3084  * serialize other volume operations (add_dev/rm_dev/resize) with
3085  * restriper.  Same goes for unset_balance_control.
3086  */
3087 static void set_balance_control(struct btrfs_balance_control *bctl)
3088 {
3089 	struct btrfs_fs_info *fs_info = bctl->fs_info;
3090 
3091 	BUG_ON(fs_info->balance_ctl);
3092 
3093 	spin_lock(&fs_info->balance_lock);
3094 	fs_info->balance_ctl = bctl;
3095 	spin_unlock(&fs_info->balance_lock);
3096 }
3097 
3098 static void unset_balance_control(struct btrfs_fs_info *fs_info)
3099 {
3100 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3101 
3102 	BUG_ON(!fs_info->balance_ctl);
3103 
3104 	spin_lock(&fs_info->balance_lock);
3105 	fs_info->balance_ctl = NULL;
3106 	spin_unlock(&fs_info->balance_lock);
3107 
3108 	kfree(bctl);
3109 }
3110 
3111 /*
3112  * Balance filters.  Return 1 if chunk should be filtered out
3113  * (should not be balanced).
3114  */
3115 static int chunk_profiles_filter(u64 chunk_type,
3116 				 struct btrfs_balance_args *bargs)
3117 {
3118 	chunk_type = chunk_to_extended(chunk_type) &
3119 				BTRFS_EXTENDED_PROFILE_MASK;
3120 
3121 	if (bargs->profiles & chunk_type)
3122 		return 0;
3123 
3124 	return 1;
3125 }
3126 
3127 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3128 			      struct btrfs_balance_args *bargs)
3129 {
3130 	struct btrfs_block_group_cache *cache;
3131 	u64 chunk_used;
3132 	u64 user_thresh_min;
3133 	u64 user_thresh_max;
3134 	int ret = 1;
3135 
3136 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3137 	chunk_used = btrfs_block_group_used(&cache->item);
3138 
3139 	if (bargs->usage_min == 0)
3140 		user_thresh_min = 0;
3141 	else
3142 		user_thresh_min = div_factor_fine(cache->key.offset,
3143 					bargs->usage_min);
3144 
3145 	if (bargs->usage_max == 0)
3146 		user_thresh_max = 1;
3147 	else if (bargs->usage_max > 100)
3148 		user_thresh_max = cache->key.offset;
3149 	else
3150 		user_thresh_max = div_factor_fine(cache->key.offset,
3151 					bargs->usage_max);
3152 
3153 	if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3154 		ret = 0;
3155 
3156 	btrfs_put_block_group(cache);
3157 	return ret;
3158 }
3159 
3160 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3161 		u64 chunk_offset, struct btrfs_balance_args *bargs)
3162 {
3163 	struct btrfs_block_group_cache *cache;
3164 	u64 chunk_used, user_thresh;
3165 	int ret = 1;
3166 
3167 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3168 	chunk_used = btrfs_block_group_used(&cache->item);
3169 
3170 	if (bargs->usage_min == 0)
3171 		user_thresh = 1;
3172 	else if (bargs->usage > 100)
3173 		user_thresh = cache->key.offset;
3174 	else
3175 		user_thresh = div_factor_fine(cache->key.offset,
3176 					      bargs->usage);
3177 
3178 	if (chunk_used < user_thresh)
3179 		ret = 0;
3180 
3181 	btrfs_put_block_group(cache);
3182 	return ret;
3183 }
3184 
3185 static int chunk_devid_filter(struct extent_buffer *leaf,
3186 			      struct btrfs_chunk *chunk,
3187 			      struct btrfs_balance_args *bargs)
3188 {
3189 	struct btrfs_stripe *stripe;
3190 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3191 	int i;
3192 
3193 	for (i = 0; i < num_stripes; i++) {
3194 		stripe = btrfs_stripe_nr(chunk, i);
3195 		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3196 			return 0;
3197 	}
3198 
3199 	return 1;
3200 }
3201 
3202 /* [pstart, pend) */
3203 static int chunk_drange_filter(struct extent_buffer *leaf,
3204 			       struct btrfs_chunk *chunk,
3205 			       u64 chunk_offset,
3206 			       struct btrfs_balance_args *bargs)
3207 {
3208 	struct btrfs_stripe *stripe;
3209 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3210 	u64 stripe_offset;
3211 	u64 stripe_length;
3212 	int factor;
3213 	int i;
3214 
3215 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3216 		return 0;
3217 
3218 	if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
3219 	     BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
3220 		factor = num_stripes / 2;
3221 	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
3222 		factor = num_stripes - 1;
3223 	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
3224 		factor = num_stripes - 2;
3225 	} else {
3226 		factor = num_stripes;
3227 	}
3228 
3229 	for (i = 0; i < num_stripes; i++) {
3230 		stripe = btrfs_stripe_nr(chunk, i);
3231 		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3232 			continue;
3233 
3234 		stripe_offset = btrfs_stripe_offset(leaf, stripe);
3235 		stripe_length = btrfs_chunk_length(leaf, chunk);
3236 		stripe_length = div_u64(stripe_length, factor);
3237 
3238 		if (stripe_offset < bargs->pend &&
3239 		    stripe_offset + stripe_length > bargs->pstart)
3240 			return 0;
3241 	}
3242 
3243 	return 1;
3244 }
3245 
3246 /* [vstart, vend) */
3247 static int chunk_vrange_filter(struct extent_buffer *leaf,
3248 			       struct btrfs_chunk *chunk,
3249 			       u64 chunk_offset,
3250 			       struct btrfs_balance_args *bargs)
3251 {
3252 	if (chunk_offset < bargs->vend &&
3253 	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3254 		/* at least part of the chunk is inside this vrange */
3255 		return 0;
3256 
3257 	return 1;
3258 }
3259 
3260 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3261 			       struct btrfs_chunk *chunk,
3262 			       struct btrfs_balance_args *bargs)
3263 {
3264 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3265 
3266 	if (bargs->stripes_min <= num_stripes
3267 			&& num_stripes <= bargs->stripes_max)
3268 		return 0;
3269 
3270 	return 1;
3271 }
3272 
3273 static int chunk_soft_convert_filter(u64 chunk_type,
3274 				     struct btrfs_balance_args *bargs)
3275 {
3276 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3277 		return 0;
3278 
3279 	chunk_type = chunk_to_extended(chunk_type) &
3280 				BTRFS_EXTENDED_PROFILE_MASK;
3281 
3282 	if (bargs->target == chunk_type)
3283 		return 1;
3284 
3285 	return 0;
3286 }
3287 
3288 static int should_balance_chunk(struct btrfs_root *root,
3289 				struct extent_buffer *leaf,
3290 				struct btrfs_chunk *chunk, u64 chunk_offset)
3291 {
3292 	struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
3293 	struct btrfs_balance_args *bargs = NULL;
3294 	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3295 
3296 	/* type filter */
3297 	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3298 	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3299 		return 0;
3300 	}
3301 
3302 	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3303 		bargs = &bctl->data;
3304 	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3305 		bargs = &bctl->sys;
3306 	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3307 		bargs = &bctl->meta;
3308 
3309 	/* profiles filter */
3310 	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3311 	    chunk_profiles_filter(chunk_type, bargs)) {
3312 		return 0;
3313 	}
3314 
3315 	/* usage filter */
3316 	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3317 	    chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
3318 		return 0;
3319 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3320 	    chunk_usage_range_filter(bctl->fs_info, chunk_offset, bargs)) {
3321 		return 0;
3322 	}
3323 
3324 	/* devid filter */
3325 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3326 	    chunk_devid_filter(leaf, chunk, bargs)) {
3327 		return 0;
3328 	}
3329 
3330 	/* drange filter, makes sense only with devid filter */
3331 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3332 	    chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
3333 		return 0;
3334 	}
3335 
3336 	/* vrange filter */
3337 	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3338 	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3339 		return 0;
3340 	}
3341 
3342 	/* stripes filter */
3343 	if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3344 	    chunk_stripes_range_filter(leaf, chunk, bargs)) {
3345 		return 0;
3346 	}
3347 
3348 	/* soft profile changing mode */
3349 	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3350 	    chunk_soft_convert_filter(chunk_type, bargs)) {
3351 		return 0;
3352 	}
3353 
3354 	/*
3355 	 * limited by count, must be the last filter
3356 	 */
3357 	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3358 		if (bargs->limit == 0)
3359 			return 0;
3360 		else
3361 			bargs->limit--;
3362 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3363 		/*
3364 		 * Same logic as the 'limit' filter; the minimum cannot be
3365 		 * determined here because we do not have the global informatoin
3366 		 * about the count of all chunks that satisfy the filters.
3367 		 */
3368 		if (bargs->limit_max == 0)
3369 			return 0;
3370 		else
3371 			bargs->limit_max--;
3372 	}
3373 
3374 	return 1;
3375 }
3376 
3377 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3378 {
3379 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3380 	struct btrfs_root *chunk_root = fs_info->chunk_root;
3381 	struct btrfs_root *dev_root = fs_info->dev_root;
3382 	struct list_head *devices;
3383 	struct btrfs_device *device;
3384 	u64 old_size;
3385 	u64 size_to_free;
3386 	u64 chunk_type;
3387 	struct btrfs_chunk *chunk;
3388 	struct btrfs_path *path;
3389 	struct btrfs_key key;
3390 	struct btrfs_key found_key;
3391 	struct btrfs_trans_handle *trans;
3392 	struct extent_buffer *leaf;
3393 	int slot;
3394 	int ret;
3395 	int enospc_errors = 0;
3396 	bool counting = true;
3397 	/* The single value limit and min/max limits use the same bytes in the */
3398 	u64 limit_data = bctl->data.limit;
3399 	u64 limit_meta = bctl->meta.limit;
3400 	u64 limit_sys = bctl->sys.limit;
3401 	u32 count_data = 0;
3402 	u32 count_meta = 0;
3403 	u32 count_sys = 0;
3404 	int chunk_reserved = 0;
3405 
3406 	/* step one make some room on all the devices */
3407 	devices = &fs_info->fs_devices->devices;
3408 	list_for_each_entry(device, devices, dev_list) {
3409 		old_size = btrfs_device_get_total_bytes(device);
3410 		size_to_free = div_factor(old_size, 1);
3411 		size_to_free = min_t(u64, size_to_free, SZ_1M);
3412 		if (!device->writeable ||
3413 		    btrfs_device_get_total_bytes(device) -
3414 		    btrfs_device_get_bytes_used(device) > size_to_free ||
3415 		    device->is_tgtdev_for_dev_replace)
3416 			continue;
3417 
3418 		ret = btrfs_shrink_device(device, old_size - size_to_free);
3419 		if (ret == -ENOSPC)
3420 			break;
3421 		BUG_ON(ret);
3422 
3423 		trans = btrfs_start_transaction(dev_root, 0);
3424 		BUG_ON(IS_ERR(trans));
3425 
3426 		ret = btrfs_grow_device(trans, device, old_size);
3427 		BUG_ON(ret);
3428 
3429 		btrfs_end_transaction(trans, dev_root);
3430 	}
3431 
3432 	/* step two, relocate all the chunks */
3433 	path = btrfs_alloc_path();
3434 	if (!path) {
3435 		ret = -ENOMEM;
3436 		goto error;
3437 	}
3438 
3439 	/* zero out stat counters */
3440 	spin_lock(&fs_info->balance_lock);
3441 	memset(&bctl->stat, 0, sizeof(bctl->stat));
3442 	spin_unlock(&fs_info->balance_lock);
3443 again:
3444 	if (!counting) {
3445 		/*
3446 		 * The single value limit and min/max limits use the same bytes
3447 		 * in the
3448 		 */
3449 		bctl->data.limit = limit_data;
3450 		bctl->meta.limit = limit_meta;
3451 		bctl->sys.limit = limit_sys;
3452 	}
3453 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3454 	key.offset = (u64)-1;
3455 	key.type = BTRFS_CHUNK_ITEM_KEY;
3456 
3457 	while (1) {
3458 		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3459 		    atomic_read(&fs_info->balance_cancel_req)) {
3460 			ret = -ECANCELED;
3461 			goto error;
3462 		}
3463 
3464 		mutex_lock(&fs_info->delete_unused_bgs_mutex);
3465 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3466 		if (ret < 0) {
3467 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3468 			goto error;
3469 		}
3470 
3471 		/*
3472 		 * this shouldn't happen, it means the last relocate
3473 		 * failed
3474 		 */
3475 		if (ret == 0)
3476 			BUG(); /* FIXME break ? */
3477 
3478 		ret = btrfs_previous_item(chunk_root, path, 0,
3479 					  BTRFS_CHUNK_ITEM_KEY);
3480 		if (ret) {
3481 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3482 			ret = 0;
3483 			break;
3484 		}
3485 
3486 		leaf = path->nodes[0];
3487 		slot = path->slots[0];
3488 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3489 
3490 		if (found_key.objectid != key.objectid) {
3491 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3492 			break;
3493 		}
3494 
3495 		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3496 		chunk_type = btrfs_chunk_type(leaf, chunk);
3497 
3498 		if (!counting) {
3499 			spin_lock(&fs_info->balance_lock);
3500 			bctl->stat.considered++;
3501 			spin_unlock(&fs_info->balance_lock);
3502 		}
3503 
3504 		ret = should_balance_chunk(chunk_root, leaf, chunk,
3505 					   found_key.offset);
3506 
3507 		btrfs_release_path(path);
3508 		if (!ret) {
3509 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3510 			goto loop;
3511 		}
3512 
3513 		if (counting) {
3514 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3515 			spin_lock(&fs_info->balance_lock);
3516 			bctl->stat.expected++;
3517 			spin_unlock(&fs_info->balance_lock);
3518 
3519 			if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3520 				count_data++;
3521 			else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3522 				count_sys++;
3523 			else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3524 				count_meta++;
3525 
3526 			goto loop;
3527 		}
3528 
3529 		/*
3530 		 * Apply limit_min filter, no need to check if the LIMITS
3531 		 * filter is used, limit_min is 0 by default
3532 		 */
3533 		if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3534 					count_data < bctl->data.limit_min)
3535 				|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3536 					count_meta < bctl->meta.limit_min)
3537 				|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3538 					count_sys < bctl->sys.limit_min)) {
3539 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3540 			goto loop;
3541 		}
3542 
3543 		if ((chunk_type & BTRFS_BLOCK_GROUP_DATA) && !chunk_reserved) {
3544 			trans = btrfs_start_transaction(chunk_root, 0);
3545 			if (IS_ERR(trans)) {
3546 				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3547 				ret = PTR_ERR(trans);
3548 				goto error;
3549 			}
3550 
3551 			ret = btrfs_force_chunk_alloc(trans, chunk_root,
3552 						      BTRFS_BLOCK_GROUP_DATA);
3553 			btrfs_end_transaction(trans, chunk_root);
3554 			if (ret < 0) {
3555 				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3556 				goto error;
3557 			}
3558 			chunk_reserved = 1;
3559 		}
3560 
3561 		ret = btrfs_relocate_chunk(chunk_root,
3562 					   found_key.offset);
3563 		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3564 		if (ret && ret != -ENOSPC)
3565 			goto error;
3566 		if (ret == -ENOSPC) {
3567 			enospc_errors++;
3568 		} else {
3569 			spin_lock(&fs_info->balance_lock);
3570 			bctl->stat.completed++;
3571 			spin_unlock(&fs_info->balance_lock);
3572 		}
3573 loop:
3574 		if (found_key.offset == 0)
3575 			break;
3576 		key.offset = found_key.offset - 1;
3577 	}
3578 
3579 	if (counting) {
3580 		btrfs_release_path(path);
3581 		counting = false;
3582 		goto again;
3583 	}
3584 error:
3585 	btrfs_free_path(path);
3586 	if (enospc_errors) {
3587 		btrfs_info(fs_info, "%d enospc errors during balance",
3588 		       enospc_errors);
3589 		if (!ret)
3590 			ret = -ENOSPC;
3591 	}
3592 
3593 	return ret;
3594 }
3595 
3596 /**
3597  * alloc_profile_is_valid - see if a given profile is valid and reduced
3598  * @flags: profile to validate
3599  * @extended: if true @flags is treated as an extended profile
3600  */
3601 static int alloc_profile_is_valid(u64 flags, int extended)
3602 {
3603 	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3604 			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
3605 
3606 	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3607 
3608 	/* 1) check that all other bits are zeroed */
3609 	if (flags & ~mask)
3610 		return 0;
3611 
3612 	/* 2) see if profile is reduced */
3613 	if (flags == 0)
3614 		return !extended; /* "0" is valid for usual profiles */
3615 
3616 	/* true if exactly one bit set */
3617 	return (flags & (flags - 1)) == 0;
3618 }
3619 
3620 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3621 {
3622 	/* cancel requested || normal exit path */
3623 	return atomic_read(&fs_info->balance_cancel_req) ||
3624 		(atomic_read(&fs_info->balance_pause_req) == 0 &&
3625 		 atomic_read(&fs_info->balance_cancel_req) == 0);
3626 }
3627 
3628 static void __cancel_balance(struct btrfs_fs_info *fs_info)
3629 {
3630 	int ret;
3631 
3632 	unset_balance_control(fs_info);
3633 	ret = del_balance_item(fs_info->tree_root);
3634 	if (ret)
3635 		btrfs_std_error(fs_info, ret, NULL);
3636 
3637 	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3638 }
3639 
3640 /* Non-zero return value signifies invalidity */
3641 static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg,
3642 		u64 allowed)
3643 {
3644 	return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3645 		(!alloc_profile_is_valid(bctl_arg->target, 1) ||
3646 		 (bctl_arg->target & ~allowed)));
3647 }
3648 
3649 /*
3650  * Should be called with both balance and volume mutexes held
3651  */
3652 int btrfs_balance(struct btrfs_balance_control *bctl,
3653 		  struct btrfs_ioctl_balance_args *bargs)
3654 {
3655 	struct btrfs_fs_info *fs_info = bctl->fs_info;
3656 	u64 allowed;
3657 	int mixed = 0;
3658 	int ret;
3659 	u64 num_devices;
3660 	unsigned seq;
3661 
3662 	if (btrfs_fs_closing(fs_info) ||
3663 	    atomic_read(&fs_info->balance_pause_req) ||
3664 	    atomic_read(&fs_info->balance_cancel_req)) {
3665 		ret = -EINVAL;
3666 		goto out;
3667 	}
3668 
3669 	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3670 	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3671 		mixed = 1;
3672 
3673 	/*
3674 	 * In case of mixed groups both data and meta should be picked,
3675 	 * and identical options should be given for both of them.
3676 	 */
3677 	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3678 	if (mixed && (bctl->flags & allowed)) {
3679 		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3680 		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3681 		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3682 			btrfs_err(fs_info, "with mixed groups data and "
3683 				   "metadata balance options must be the same");
3684 			ret = -EINVAL;
3685 			goto out;
3686 		}
3687 	}
3688 
3689 	num_devices = fs_info->fs_devices->num_devices;
3690 	btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
3691 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3692 		BUG_ON(num_devices < 1);
3693 		num_devices--;
3694 	}
3695 	btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
3696 	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3697 	if (num_devices == 1)
3698 		allowed |= BTRFS_BLOCK_GROUP_DUP;
3699 	else if (num_devices > 1)
3700 		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3701 	if (num_devices > 2)
3702 		allowed |= BTRFS_BLOCK_GROUP_RAID5;
3703 	if (num_devices > 3)
3704 		allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3705 			    BTRFS_BLOCK_GROUP_RAID6);
3706 	if (validate_convert_profile(&bctl->data, allowed)) {
3707 		btrfs_err(fs_info, "unable to start balance with target "
3708 			   "data profile %llu",
3709 		       bctl->data.target);
3710 		ret = -EINVAL;
3711 		goto out;
3712 	}
3713 	if (validate_convert_profile(&bctl->meta, allowed)) {
3714 		btrfs_err(fs_info,
3715 			   "unable to start balance with target metadata profile %llu",
3716 		       bctl->meta.target);
3717 		ret = -EINVAL;
3718 		goto out;
3719 	}
3720 	if (validate_convert_profile(&bctl->sys, allowed)) {
3721 		btrfs_err(fs_info,
3722 			   "unable to start balance with target system profile %llu",
3723 		       bctl->sys.target);
3724 		ret = -EINVAL;
3725 		goto out;
3726 	}
3727 
3728 	/* allow to reduce meta or sys integrity only if force set */
3729 	allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3730 			BTRFS_BLOCK_GROUP_RAID10 |
3731 			BTRFS_BLOCK_GROUP_RAID5 |
3732 			BTRFS_BLOCK_GROUP_RAID6;
3733 	do {
3734 		seq = read_seqbegin(&fs_info->profiles_lock);
3735 
3736 		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3737 		     (fs_info->avail_system_alloc_bits & allowed) &&
3738 		     !(bctl->sys.target & allowed)) ||
3739 		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3740 		     (fs_info->avail_metadata_alloc_bits & allowed) &&
3741 		     !(bctl->meta.target & allowed))) {
3742 			if (bctl->flags & BTRFS_BALANCE_FORCE) {
3743 				btrfs_info(fs_info, "force reducing metadata integrity");
3744 			} else {
3745 				btrfs_err(fs_info, "balance will reduce metadata "
3746 					   "integrity, use force if you want this");
3747 				ret = -EINVAL;
3748 				goto out;
3749 			}
3750 		}
3751 	} while (read_seqretry(&fs_info->profiles_lock, seq));
3752 
3753 	if (btrfs_get_num_tolerated_disk_barrier_failures(bctl->meta.target) <
3754 		btrfs_get_num_tolerated_disk_barrier_failures(bctl->data.target)) {
3755 		btrfs_warn(fs_info,
3756 	"metadata profile 0x%llx has lower redundancy than data profile 0x%llx",
3757 			bctl->meta.target, bctl->data.target);
3758 	}
3759 
3760 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3761 		fs_info->num_tolerated_disk_barrier_failures = min(
3762 			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info),
3763 			btrfs_get_num_tolerated_disk_barrier_failures(
3764 				bctl->sys.target));
3765 	}
3766 
3767 	ret = insert_balance_item(fs_info->tree_root, bctl);
3768 	if (ret && ret != -EEXIST)
3769 		goto out;
3770 
3771 	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3772 		BUG_ON(ret == -EEXIST);
3773 		set_balance_control(bctl);
3774 	} else {
3775 		BUG_ON(ret != -EEXIST);
3776 		spin_lock(&fs_info->balance_lock);
3777 		update_balance_args(bctl);
3778 		spin_unlock(&fs_info->balance_lock);
3779 	}
3780 
3781 	atomic_inc(&fs_info->balance_running);
3782 	mutex_unlock(&fs_info->balance_mutex);
3783 
3784 	ret = __btrfs_balance(fs_info);
3785 
3786 	mutex_lock(&fs_info->balance_mutex);
3787 	atomic_dec(&fs_info->balance_running);
3788 
3789 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3790 		fs_info->num_tolerated_disk_barrier_failures =
3791 			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3792 	}
3793 
3794 	if (bargs) {
3795 		memset(bargs, 0, sizeof(*bargs));
3796 		update_ioctl_balance_args(fs_info, 0, bargs);
3797 	}
3798 
3799 	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3800 	    balance_need_close(fs_info)) {
3801 		__cancel_balance(fs_info);
3802 	}
3803 
3804 	wake_up(&fs_info->balance_wait_q);
3805 
3806 	return ret;
3807 out:
3808 	if (bctl->flags & BTRFS_BALANCE_RESUME)
3809 		__cancel_balance(fs_info);
3810 	else {
3811 		kfree(bctl);
3812 		atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3813 	}
3814 	return ret;
3815 }
3816 
3817 static int balance_kthread(void *data)
3818 {
3819 	struct btrfs_fs_info *fs_info = data;
3820 	int ret = 0;
3821 
3822 	mutex_lock(&fs_info->volume_mutex);
3823 	mutex_lock(&fs_info->balance_mutex);
3824 
3825 	if (fs_info->balance_ctl) {
3826 		btrfs_info(fs_info, "continuing balance");
3827 		ret = btrfs_balance(fs_info->balance_ctl, NULL);
3828 	}
3829 
3830 	mutex_unlock(&fs_info->balance_mutex);
3831 	mutex_unlock(&fs_info->volume_mutex);
3832 
3833 	return ret;
3834 }
3835 
3836 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3837 {
3838 	struct task_struct *tsk;
3839 
3840 	spin_lock(&fs_info->balance_lock);
3841 	if (!fs_info->balance_ctl) {
3842 		spin_unlock(&fs_info->balance_lock);
3843 		return 0;
3844 	}
3845 	spin_unlock(&fs_info->balance_lock);
3846 
3847 	if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3848 		btrfs_info(fs_info, "force skipping balance");
3849 		return 0;
3850 	}
3851 
3852 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3853 	return PTR_ERR_OR_ZERO(tsk);
3854 }
3855 
3856 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3857 {
3858 	struct btrfs_balance_control *bctl;
3859 	struct btrfs_balance_item *item;
3860 	struct btrfs_disk_balance_args disk_bargs;
3861 	struct btrfs_path *path;
3862 	struct extent_buffer *leaf;
3863 	struct btrfs_key key;
3864 	int ret;
3865 
3866 	path = btrfs_alloc_path();
3867 	if (!path)
3868 		return -ENOMEM;
3869 
3870 	key.objectid = BTRFS_BALANCE_OBJECTID;
3871 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3872 	key.offset = 0;
3873 
3874 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3875 	if (ret < 0)
3876 		goto out;
3877 	if (ret > 0) { /* ret = -ENOENT; */
3878 		ret = 0;
3879 		goto out;
3880 	}
3881 
3882 	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3883 	if (!bctl) {
3884 		ret = -ENOMEM;
3885 		goto out;
3886 	}
3887 
3888 	leaf = path->nodes[0];
3889 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3890 
3891 	bctl->fs_info = fs_info;
3892 	bctl->flags = btrfs_balance_flags(leaf, item);
3893 	bctl->flags |= BTRFS_BALANCE_RESUME;
3894 
3895 	btrfs_balance_data(leaf, item, &disk_bargs);
3896 	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3897 	btrfs_balance_meta(leaf, item, &disk_bargs);
3898 	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3899 	btrfs_balance_sys(leaf, item, &disk_bargs);
3900 	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3901 
3902 	WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3903 
3904 	mutex_lock(&fs_info->volume_mutex);
3905 	mutex_lock(&fs_info->balance_mutex);
3906 
3907 	set_balance_control(bctl);
3908 
3909 	mutex_unlock(&fs_info->balance_mutex);
3910 	mutex_unlock(&fs_info->volume_mutex);
3911 out:
3912 	btrfs_free_path(path);
3913 	return ret;
3914 }
3915 
3916 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3917 {
3918 	int ret = 0;
3919 
3920 	mutex_lock(&fs_info->balance_mutex);
3921 	if (!fs_info->balance_ctl) {
3922 		mutex_unlock(&fs_info->balance_mutex);
3923 		return -ENOTCONN;
3924 	}
3925 
3926 	if (atomic_read(&fs_info->balance_running)) {
3927 		atomic_inc(&fs_info->balance_pause_req);
3928 		mutex_unlock(&fs_info->balance_mutex);
3929 
3930 		wait_event(fs_info->balance_wait_q,
3931 			   atomic_read(&fs_info->balance_running) == 0);
3932 
3933 		mutex_lock(&fs_info->balance_mutex);
3934 		/* we are good with balance_ctl ripped off from under us */
3935 		BUG_ON(atomic_read(&fs_info->balance_running));
3936 		atomic_dec(&fs_info->balance_pause_req);
3937 	} else {
3938 		ret = -ENOTCONN;
3939 	}
3940 
3941 	mutex_unlock(&fs_info->balance_mutex);
3942 	return ret;
3943 }
3944 
3945 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3946 {
3947 	if (fs_info->sb->s_flags & MS_RDONLY)
3948 		return -EROFS;
3949 
3950 	mutex_lock(&fs_info->balance_mutex);
3951 	if (!fs_info->balance_ctl) {
3952 		mutex_unlock(&fs_info->balance_mutex);
3953 		return -ENOTCONN;
3954 	}
3955 
3956 	atomic_inc(&fs_info->balance_cancel_req);
3957 	/*
3958 	 * if we are running just wait and return, balance item is
3959 	 * deleted in btrfs_balance in this case
3960 	 */
3961 	if (atomic_read(&fs_info->balance_running)) {
3962 		mutex_unlock(&fs_info->balance_mutex);
3963 		wait_event(fs_info->balance_wait_q,
3964 			   atomic_read(&fs_info->balance_running) == 0);
3965 		mutex_lock(&fs_info->balance_mutex);
3966 	} else {
3967 		/* __cancel_balance needs volume_mutex */
3968 		mutex_unlock(&fs_info->balance_mutex);
3969 		mutex_lock(&fs_info->volume_mutex);
3970 		mutex_lock(&fs_info->balance_mutex);
3971 
3972 		if (fs_info->balance_ctl)
3973 			__cancel_balance(fs_info);
3974 
3975 		mutex_unlock(&fs_info->volume_mutex);
3976 	}
3977 
3978 	BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3979 	atomic_dec(&fs_info->balance_cancel_req);
3980 	mutex_unlock(&fs_info->balance_mutex);
3981 	return 0;
3982 }
3983 
3984 static int btrfs_uuid_scan_kthread(void *data)
3985 {
3986 	struct btrfs_fs_info *fs_info = data;
3987 	struct btrfs_root *root = fs_info->tree_root;
3988 	struct btrfs_key key;
3989 	struct btrfs_key max_key;
3990 	struct btrfs_path *path = NULL;
3991 	int ret = 0;
3992 	struct extent_buffer *eb;
3993 	int slot;
3994 	struct btrfs_root_item root_item;
3995 	u32 item_size;
3996 	struct btrfs_trans_handle *trans = NULL;
3997 
3998 	path = btrfs_alloc_path();
3999 	if (!path) {
4000 		ret = -ENOMEM;
4001 		goto out;
4002 	}
4003 
4004 	key.objectid = 0;
4005 	key.type = BTRFS_ROOT_ITEM_KEY;
4006 	key.offset = 0;
4007 
4008 	max_key.objectid = (u64)-1;
4009 	max_key.type = BTRFS_ROOT_ITEM_KEY;
4010 	max_key.offset = (u64)-1;
4011 
4012 	while (1) {
4013 		ret = btrfs_search_forward(root, &key, path, 0);
4014 		if (ret) {
4015 			if (ret > 0)
4016 				ret = 0;
4017 			break;
4018 		}
4019 
4020 		if (key.type != BTRFS_ROOT_ITEM_KEY ||
4021 		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4022 		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4023 		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
4024 			goto skip;
4025 
4026 		eb = path->nodes[0];
4027 		slot = path->slots[0];
4028 		item_size = btrfs_item_size_nr(eb, slot);
4029 		if (item_size < sizeof(root_item))
4030 			goto skip;
4031 
4032 		read_extent_buffer(eb, &root_item,
4033 				   btrfs_item_ptr_offset(eb, slot),
4034 				   (int)sizeof(root_item));
4035 		if (btrfs_root_refs(&root_item) == 0)
4036 			goto skip;
4037 
4038 		if (!btrfs_is_empty_uuid(root_item.uuid) ||
4039 		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
4040 			if (trans)
4041 				goto update_tree;
4042 
4043 			btrfs_release_path(path);
4044 			/*
4045 			 * 1 - subvol uuid item
4046 			 * 1 - received_subvol uuid item
4047 			 */
4048 			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4049 			if (IS_ERR(trans)) {
4050 				ret = PTR_ERR(trans);
4051 				break;
4052 			}
4053 			continue;
4054 		} else {
4055 			goto skip;
4056 		}
4057 update_tree:
4058 		if (!btrfs_is_empty_uuid(root_item.uuid)) {
4059 			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
4060 						  root_item.uuid,
4061 						  BTRFS_UUID_KEY_SUBVOL,
4062 						  key.objectid);
4063 			if (ret < 0) {
4064 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4065 					ret);
4066 				break;
4067 			}
4068 		}
4069 
4070 		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4071 			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
4072 						  root_item.received_uuid,
4073 						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4074 						  key.objectid);
4075 			if (ret < 0) {
4076 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4077 					ret);
4078 				break;
4079 			}
4080 		}
4081 
4082 skip:
4083 		if (trans) {
4084 			ret = btrfs_end_transaction(trans, fs_info->uuid_root);
4085 			trans = NULL;
4086 			if (ret)
4087 				break;
4088 		}
4089 
4090 		btrfs_release_path(path);
4091 		if (key.offset < (u64)-1) {
4092 			key.offset++;
4093 		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4094 			key.offset = 0;
4095 			key.type = BTRFS_ROOT_ITEM_KEY;
4096 		} else if (key.objectid < (u64)-1) {
4097 			key.offset = 0;
4098 			key.type = BTRFS_ROOT_ITEM_KEY;
4099 			key.objectid++;
4100 		} else {
4101 			break;
4102 		}
4103 		cond_resched();
4104 	}
4105 
4106 out:
4107 	btrfs_free_path(path);
4108 	if (trans && !IS_ERR(trans))
4109 		btrfs_end_transaction(trans, fs_info->uuid_root);
4110 	if (ret)
4111 		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4112 	else
4113 		fs_info->update_uuid_tree_gen = 1;
4114 	up(&fs_info->uuid_tree_rescan_sem);
4115 	return 0;
4116 }
4117 
4118 /*
4119  * Callback for btrfs_uuid_tree_iterate().
4120  * returns:
4121  * 0	check succeeded, the entry is not outdated.
4122  * < 0	if an error occurred.
4123  * > 0	if the check failed, which means the caller shall remove the entry.
4124  */
4125 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
4126 				       u8 *uuid, u8 type, u64 subid)
4127 {
4128 	struct btrfs_key key;
4129 	int ret = 0;
4130 	struct btrfs_root *subvol_root;
4131 
4132 	if (type != BTRFS_UUID_KEY_SUBVOL &&
4133 	    type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
4134 		goto out;
4135 
4136 	key.objectid = subid;
4137 	key.type = BTRFS_ROOT_ITEM_KEY;
4138 	key.offset = (u64)-1;
4139 	subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
4140 	if (IS_ERR(subvol_root)) {
4141 		ret = PTR_ERR(subvol_root);
4142 		if (ret == -ENOENT)
4143 			ret = 1;
4144 		goto out;
4145 	}
4146 
4147 	switch (type) {
4148 	case BTRFS_UUID_KEY_SUBVOL:
4149 		if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
4150 			ret = 1;
4151 		break;
4152 	case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
4153 		if (memcmp(uuid, subvol_root->root_item.received_uuid,
4154 			   BTRFS_UUID_SIZE))
4155 			ret = 1;
4156 		break;
4157 	}
4158 
4159 out:
4160 	return ret;
4161 }
4162 
4163 static int btrfs_uuid_rescan_kthread(void *data)
4164 {
4165 	struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
4166 	int ret;
4167 
4168 	/*
4169 	 * 1st step is to iterate through the existing UUID tree and
4170 	 * to delete all entries that contain outdated data.
4171 	 * 2nd step is to add all missing entries to the UUID tree.
4172 	 */
4173 	ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
4174 	if (ret < 0) {
4175 		btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
4176 		up(&fs_info->uuid_tree_rescan_sem);
4177 		return ret;
4178 	}
4179 	return btrfs_uuid_scan_kthread(data);
4180 }
4181 
4182 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4183 {
4184 	struct btrfs_trans_handle *trans;
4185 	struct btrfs_root *tree_root = fs_info->tree_root;
4186 	struct btrfs_root *uuid_root;
4187 	struct task_struct *task;
4188 	int ret;
4189 
4190 	/*
4191 	 * 1 - root node
4192 	 * 1 - root item
4193 	 */
4194 	trans = btrfs_start_transaction(tree_root, 2);
4195 	if (IS_ERR(trans))
4196 		return PTR_ERR(trans);
4197 
4198 	uuid_root = btrfs_create_tree(trans, fs_info,
4199 				      BTRFS_UUID_TREE_OBJECTID);
4200 	if (IS_ERR(uuid_root)) {
4201 		ret = PTR_ERR(uuid_root);
4202 		btrfs_abort_transaction(trans, tree_root, ret);
4203 		return ret;
4204 	}
4205 
4206 	fs_info->uuid_root = uuid_root;
4207 
4208 	ret = btrfs_commit_transaction(trans, tree_root);
4209 	if (ret)
4210 		return ret;
4211 
4212 	down(&fs_info->uuid_tree_rescan_sem);
4213 	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4214 	if (IS_ERR(task)) {
4215 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4216 		btrfs_warn(fs_info, "failed to start uuid_scan task");
4217 		up(&fs_info->uuid_tree_rescan_sem);
4218 		return PTR_ERR(task);
4219 	}
4220 
4221 	return 0;
4222 }
4223 
4224 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
4225 {
4226 	struct task_struct *task;
4227 
4228 	down(&fs_info->uuid_tree_rescan_sem);
4229 	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
4230 	if (IS_ERR(task)) {
4231 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4232 		btrfs_warn(fs_info, "failed to start uuid_rescan task");
4233 		up(&fs_info->uuid_tree_rescan_sem);
4234 		return PTR_ERR(task);
4235 	}
4236 
4237 	return 0;
4238 }
4239 
4240 /*
4241  * shrinking a device means finding all of the device extents past
4242  * the new size, and then following the back refs to the chunks.
4243  * The chunk relocation code actually frees the device extent
4244  */
4245 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4246 {
4247 	struct btrfs_trans_handle *trans;
4248 	struct btrfs_root *root = device->dev_root;
4249 	struct btrfs_dev_extent *dev_extent = NULL;
4250 	struct btrfs_path *path;
4251 	u64 length;
4252 	u64 chunk_offset;
4253 	int ret;
4254 	int slot;
4255 	int failed = 0;
4256 	bool retried = false;
4257 	bool checked_pending_chunks = false;
4258 	struct extent_buffer *l;
4259 	struct btrfs_key key;
4260 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4261 	u64 old_total = btrfs_super_total_bytes(super_copy);
4262 	u64 old_size = btrfs_device_get_total_bytes(device);
4263 	u64 diff = old_size - new_size;
4264 
4265 	if (device->is_tgtdev_for_dev_replace)
4266 		return -EINVAL;
4267 
4268 	path = btrfs_alloc_path();
4269 	if (!path)
4270 		return -ENOMEM;
4271 
4272 	path->reada = READA_FORWARD;
4273 
4274 	lock_chunks(root);
4275 
4276 	btrfs_device_set_total_bytes(device, new_size);
4277 	if (device->writeable) {
4278 		device->fs_devices->total_rw_bytes -= diff;
4279 		spin_lock(&root->fs_info->free_chunk_lock);
4280 		root->fs_info->free_chunk_space -= diff;
4281 		spin_unlock(&root->fs_info->free_chunk_lock);
4282 	}
4283 	unlock_chunks(root);
4284 
4285 again:
4286 	key.objectid = device->devid;
4287 	key.offset = (u64)-1;
4288 	key.type = BTRFS_DEV_EXTENT_KEY;
4289 
4290 	do {
4291 		mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
4292 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4293 		if (ret < 0) {
4294 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4295 			goto done;
4296 		}
4297 
4298 		ret = btrfs_previous_item(root, path, 0, key.type);
4299 		if (ret)
4300 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4301 		if (ret < 0)
4302 			goto done;
4303 		if (ret) {
4304 			ret = 0;
4305 			btrfs_release_path(path);
4306 			break;
4307 		}
4308 
4309 		l = path->nodes[0];
4310 		slot = path->slots[0];
4311 		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4312 
4313 		if (key.objectid != device->devid) {
4314 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4315 			btrfs_release_path(path);
4316 			break;
4317 		}
4318 
4319 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4320 		length = btrfs_dev_extent_length(l, dev_extent);
4321 
4322 		if (key.offset + length <= new_size) {
4323 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4324 			btrfs_release_path(path);
4325 			break;
4326 		}
4327 
4328 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4329 		btrfs_release_path(path);
4330 
4331 		ret = btrfs_relocate_chunk(root, chunk_offset);
4332 		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4333 		if (ret && ret != -ENOSPC)
4334 			goto done;
4335 		if (ret == -ENOSPC)
4336 			failed++;
4337 	} while (key.offset-- > 0);
4338 
4339 	if (failed && !retried) {
4340 		failed = 0;
4341 		retried = true;
4342 		goto again;
4343 	} else if (failed && retried) {
4344 		ret = -ENOSPC;
4345 		goto done;
4346 	}
4347 
4348 	/* Shrinking succeeded, else we would be at "done". */
4349 	trans = btrfs_start_transaction(root, 0);
4350 	if (IS_ERR(trans)) {
4351 		ret = PTR_ERR(trans);
4352 		goto done;
4353 	}
4354 
4355 	lock_chunks(root);
4356 
4357 	/*
4358 	 * We checked in the above loop all device extents that were already in
4359 	 * the device tree. However before we have updated the device's
4360 	 * total_bytes to the new size, we might have had chunk allocations that
4361 	 * have not complete yet (new block groups attached to transaction
4362 	 * handles), and therefore their device extents were not yet in the
4363 	 * device tree and we missed them in the loop above. So if we have any
4364 	 * pending chunk using a device extent that overlaps the device range
4365 	 * that we can not use anymore, commit the current transaction and
4366 	 * repeat the search on the device tree - this way we guarantee we will
4367 	 * not have chunks using device extents that end beyond 'new_size'.
4368 	 */
4369 	if (!checked_pending_chunks) {
4370 		u64 start = new_size;
4371 		u64 len = old_size - new_size;
4372 
4373 		if (contains_pending_extent(trans->transaction, device,
4374 					    &start, len)) {
4375 			unlock_chunks(root);
4376 			checked_pending_chunks = true;
4377 			failed = 0;
4378 			retried = false;
4379 			ret = btrfs_commit_transaction(trans, root);
4380 			if (ret)
4381 				goto done;
4382 			goto again;
4383 		}
4384 	}
4385 
4386 	btrfs_device_set_disk_total_bytes(device, new_size);
4387 	if (list_empty(&device->resized_list))
4388 		list_add_tail(&device->resized_list,
4389 			      &root->fs_info->fs_devices->resized_devices);
4390 
4391 	WARN_ON(diff > old_total);
4392 	btrfs_set_super_total_bytes(super_copy, old_total - diff);
4393 	unlock_chunks(root);
4394 
4395 	/* Now btrfs_update_device() will change the on-disk size. */
4396 	ret = btrfs_update_device(trans, device);
4397 	btrfs_end_transaction(trans, root);
4398 done:
4399 	btrfs_free_path(path);
4400 	if (ret) {
4401 		lock_chunks(root);
4402 		btrfs_device_set_total_bytes(device, old_size);
4403 		if (device->writeable)
4404 			device->fs_devices->total_rw_bytes += diff;
4405 		spin_lock(&root->fs_info->free_chunk_lock);
4406 		root->fs_info->free_chunk_space += diff;
4407 		spin_unlock(&root->fs_info->free_chunk_lock);
4408 		unlock_chunks(root);
4409 	}
4410 	return ret;
4411 }
4412 
4413 static int btrfs_add_system_chunk(struct btrfs_root *root,
4414 			   struct btrfs_key *key,
4415 			   struct btrfs_chunk *chunk, int item_size)
4416 {
4417 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4418 	struct btrfs_disk_key disk_key;
4419 	u32 array_size;
4420 	u8 *ptr;
4421 
4422 	lock_chunks(root);
4423 	array_size = btrfs_super_sys_array_size(super_copy);
4424 	if (array_size + item_size + sizeof(disk_key)
4425 			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4426 		unlock_chunks(root);
4427 		return -EFBIG;
4428 	}
4429 
4430 	ptr = super_copy->sys_chunk_array + array_size;
4431 	btrfs_cpu_key_to_disk(&disk_key, key);
4432 	memcpy(ptr, &disk_key, sizeof(disk_key));
4433 	ptr += sizeof(disk_key);
4434 	memcpy(ptr, chunk, item_size);
4435 	item_size += sizeof(disk_key);
4436 	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4437 	unlock_chunks(root);
4438 
4439 	return 0;
4440 }
4441 
4442 /*
4443  * sort the devices in descending order by max_avail, total_avail
4444  */
4445 static int btrfs_cmp_device_info(const void *a, const void *b)
4446 {
4447 	const struct btrfs_device_info *di_a = a;
4448 	const struct btrfs_device_info *di_b = b;
4449 
4450 	if (di_a->max_avail > di_b->max_avail)
4451 		return -1;
4452 	if (di_a->max_avail < di_b->max_avail)
4453 		return 1;
4454 	if (di_a->total_avail > di_b->total_avail)
4455 		return -1;
4456 	if (di_a->total_avail < di_b->total_avail)
4457 		return 1;
4458 	return 0;
4459 }
4460 
4461 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
4462 {
4463 	/* TODO allow them to set a preferred stripe size */
4464 	return SZ_64K;
4465 }
4466 
4467 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4468 {
4469 	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4470 		return;
4471 
4472 	btrfs_set_fs_incompat(info, RAID56);
4473 }
4474 
4475 #define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r)		\
4476 			- sizeof(struct btrfs_item)		\
4477 			- sizeof(struct btrfs_chunk))		\
4478 			/ sizeof(struct btrfs_stripe) + 1)
4479 
4480 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE	\
4481 				- 2 * sizeof(struct btrfs_disk_key)	\
4482 				- 2 * sizeof(struct btrfs_chunk))	\
4483 				/ sizeof(struct btrfs_stripe) + 1)
4484 
4485 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4486 			       struct btrfs_root *extent_root, u64 start,
4487 			       u64 type)
4488 {
4489 	struct btrfs_fs_info *info = extent_root->fs_info;
4490 	struct btrfs_fs_devices *fs_devices = info->fs_devices;
4491 	struct list_head *cur;
4492 	struct map_lookup *map = NULL;
4493 	struct extent_map_tree *em_tree;
4494 	struct extent_map *em;
4495 	struct btrfs_device_info *devices_info = NULL;
4496 	u64 total_avail;
4497 	int num_stripes;	/* total number of stripes to allocate */
4498 	int data_stripes;	/* number of stripes that count for
4499 				   block group size */
4500 	int sub_stripes;	/* sub_stripes info for map */
4501 	int dev_stripes;	/* stripes per dev */
4502 	int devs_max;		/* max devs to use */
4503 	int devs_min;		/* min devs needed */
4504 	int devs_increment;	/* ndevs has to be a multiple of this */
4505 	int ncopies;		/* how many copies to data has */
4506 	int ret;
4507 	u64 max_stripe_size;
4508 	u64 max_chunk_size;
4509 	u64 stripe_size;
4510 	u64 num_bytes;
4511 	u64 raid_stripe_len = BTRFS_STRIPE_LEN;
4512 	int ndevs;
4513 	int i;
4514 	int j;
4515 	int index;
4516 
4517 	BUG_ON(!alloc_profile_is_valid(type, 0));
4518 
4519 	if (list_empty(&fs_devices->alloc_list))
4520 		return -ENOSPC;
4521 
4522 	index = __get_raid_index(type);
4523 
4524 	sub_stripes = btrfs_raid_array[index].sub_stripes;
4525 	dev_stripes = btrfs_raid_array[index].dev_stripes;
4526 	devs_max = btrfs_raid_array[index].devs_max;
4527 	devs_min = btrfs_raid_array[index].devs_min;
4528 	devs_increment = btrfs_raid_array[index].devs_increment;
4529 	ncopies = btrfs_raid_array[index].ncopies;
4530 
4531 	if (type & BTRFS_BLOCK_GROUP_DATA) {
4532 		max_stripe_size = SZ_1G;
4533 		max_chunk_size = 10 * max_stripe_size;
4534 		if (!devs_max)
4535 			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4536 	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4537 		/* for larger filesystems, use larger metadata chunks */
4538 		if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
4539 			max_stripe_size = SZ_1G;
4540 		else
4541 			max_stripe_size = SZ_256M;
4542 		max_chunk_size = max_stripe_size;
4543 		if (!devs_max)
4544 			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4545 	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4546 		max_stripe_size = SZ_32M;
4547 		max_chunk_size = 2 * max_stripe_size;
4548 		if (!devs_max)
4549 			devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
4550 	} else {
4551 		btrfs_err(info, "invalid chunk type 0x%llx requested",
4552 		       type);
4553 		BUG_ON(1);
4554 	}
4555 
4556 	/* we don't want a chunk larger than 10% of writeable space */
4557 	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4558 			     max_chunk_size);
4559 
4560 	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
4561 			       GFP_NOFS);
4562 	if (!devices_info)
4563 		return -ENOMEM;
4564 
4565 	cur = fs_devices->alloc_list.next;
4566 
4567 	/*
4568 	 * in the first pass through the devices list, we gather information
4569 	 * about the available holes on each device.
4570 	 */
4571 	ndevs = 0;
4572 	while (cur != &fs_devices->alloc_list) {
4573 		struct btrfs_device *device;
4574 		u64 max_avail;
4575 		u64 dev_offset;
4576 
4577 		device = list_entry(cur, struct btrfs_device, dev_alloc_list);
4578 
4579 		cur = cur->next;
4580 
4581 		if (!device->writeable) {
4582 			WARN(1, KERN_ERR
4583 			       "BTRFS: read-only device in alloc_list\n");
4584 			continue;
4585 		}
4586 
4587 		if (!device->in_fs_metadata ||
4588 		    device->is_tgtdev_for_dev_replace)
4589 			continue;
4590 
4591 		if (device->total_bytes > device->bytes_used)
4592 			total_avail = device->total_bytes - device->bytes_used;
4593 		else
4594 			total_avail = 0;
4595 
4596 		/* If there is no space on this device, skip it. */
4597 		if (total_avail == 0)
4598 			continue;
4599 
4600 		ret = find_free_dev_extent(trans, device,
4601 					   max_stripe_size * dev_stripes,
4602 					   &dev_offset, &max_avail);
4603 		if (ret && ret != -ENOSPC)
4604 			goto error;
4605 
4606 		if (ret == 0)
4607 			max_avail = max_stripe_size * dev_stripes;
4608 
4609 		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
4610 			continue;
4611 
4612 		if (ndevs == fs_devices->rw_devices) {
4613 			WARN(1, "%s: found more than %llu devices\n",
4614 			     __func__, fs_devices->rw_devices);
4615 			break;
4616 		}
4617 		devices_info[ndevs].dev_offset = dev_offset;
4618 		devices_info[ndevs].max_avail = max_avail;
4619 		devices_info[ndevs].total_avail = total_avail;
4620 		devices_info[ndevs].dev = device;
4621 		++ndevs;
4622 	}
4623 
4624 	/*
4625 	 * now sort the devices by hole size / available space
4626 	 */
4627 	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4628 	     btrfs_cmp_device_info, NULL);
4629 
4630 	/* round down to number of usable stripes */
4631 	ndevs -= ndevs % devs_increment;
4632 
4633 	if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
4634 		ret = -ENOSPC;
4635 		goto error;
4636 	}
4637 
4638 	if (devs_max && ndevs > devs_max)
4639 		ndevs = devs_max;
4640 	/*
4641 	 * the primary goal is to maximize the number of stripes, so use as many
4642 	 * devices as possible, even if the stripes are not maximum sized.
4643 	 */
4644 	stripe_size = devices_info[ndevs-1].max_avail;
4645 	num_stripes = ndevs * dev_stripes;
4646 
4647 	/*
4648 	 * this will have to be fixed for RAID1 and RAID10 over
4649 	 * more drives
4650 	 */
4651 	data_stripes = num_stripes / ncopies;
4652 
4653 	if (type & BTRFS_BLOCK_GROUP_RAID5) {
4654 		raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
4655 				 btrfs_super_stripesize(info->super_copy));
4656 		data_stripes = num_stripes - 1;
4657 	}
4658 	if (type & BTRFS_BLOCK_GROUP_RAID6) {
4659 		raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
4660 				 btrfs_super_stripesize(info->super_copy));
4661 		data_stripes = num_stripes - 2;
4662 	}
4663 
4664 	/*
4665 	 * Use the number of data stripes to figure out how big this chunk
4666 	 * is really going to be in terms of logical address space,
4667 	 * and compare that answer with the max chunk size
4668 	 */
4669 	if (stripe_size * data_stripes > max_chunk_size) {
4670 		u64 mask = (1ULL << 24) - 1;
4671 
4672 		stripe_size = div_u64(max_chunk_size, data_stripes);
4673 
4674 		/* bump the answer up to a 16MB boundary */
4675 		stripe_size = (stripe_size + mask) & ~mask;
4676 
4677 		/* but don't go higher than the limits we found
4678 		 * while searching for free extents
4679 		 */
4680 		if (stripe_size > devices_info[ndevs-1].max_avail)
4681 			stripe_size = devices_info[ndevs-1].max_avail;
4682 	}
4683 
4684 	stripe_size = div_u64(stripe_size, dev_stripes);
4685 
4686 	/* align to BTRFS_STRIPE_LEN */
4687 	stripe_size = div_u64(stripe_size, raid_stripe_len);
4688 	stripe_size *= raid_stripe_len;
4689 
4690 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4691 	if (!map) {
4692 		ret = -ENOMEM;
4693 		goto error;
4694 	}
4695 	map->num_stripes = num_stripes;
4696 
4697 	for (i = 0; i < ndevs; ++i) {
4698 		for (j = 0; j < dev_stripes; ++j) {
4699 			int s = i * dev_stripes + j;
4700 			map->stripes[s].dev = devices_info[i].dev;
4701 			map->stripes[s].physical = devices_info[i].dev_offset +
4702 						   j * stripe_size;
4703 		}
4704 	}
4705 	map->sector_size = extent_root->sectorsize;
4706 	map->stripe_len = raid_stripe_len;
4707 	map->io_align = raid_stripe_len;
4708 	map->io_width = raid_stripe_len;
4709 	map->type = type;
4710 	map->sub_stripes = sub_stripes;
4711 
4712 	num_bytes = stripe_size * data_stripes;
4713 
4714 	trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
4715 
4716 	em = alloc_extent_map();
4717 	if (!em) {
4718 		kfree(map);
4719 		ret = -ENOMEM;
4720 		goto error;
4721 	}
4722 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
4723 	em->map_lookup = map;
4724 	em->start = start;
4725 	em->len = num_bytes;
4726 	em->block_start = 0;
4727 	em->block_len = em->len;
4728 	em->orig_block_len = stripe_size;
4729 
4730 	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4731 	write_lock(&em_tree->lock);
4732 	ret = add_extent_mapping(em_tree, em, 0);
4733 	if (!ret) {
4734 		list_add_tail(&em->list, &trans->transaction->pending_chunks);
4735 		atomic_inc(&em->refs);
4736 	}
4737 	write_unlock(&em_tree->lock);
4738 	if (ret) {
4739 		free_extent_map(em);
4740 		goto error;
4741 	}
4742 
4743 	ret = btrfs_make_block_group(trans, extent_root, 0, type,
4744 				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4745 				     start, num_bytes);
4746 	if (ret)
4747 		goto error_del_extent;
4748 
4749 	for (i = 0; i < map->num_stripes; i++) {
4750 		num_bytes = map->stripes[i].dev->bytes_used + stripe_size;
4751 		btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
4752 	}
4753 
4754 	spin_lock(&extent_root->fs_info->free_chunk_lock);
4755 	extent_root->fs_info->free_chunk_space -= (stripe_size *
4756 						   map->num_stripes);
4757 	spin_unlock(&extent_root->fs_info->free_chunk_lock);
4758 
4759 	free_extent_map(em);
4760 	check_raid56_incompat_flag(extent_root->fs_info, type);
4761 
4762 	kfree(devices_info);
4763 	return 0;
4764 
4765 error_del_extent:
4766 	write_lock(&em_tree->lock);
4767 	remove_extent_mapping(em_tree, em);
4768 	write_unlock(&em_tree->lock);
4769 
4770 	/* One for our allocation */
4771 	free_extent_map(em);
4772 	/* One for the tree reference */
4773 	free_extent_map(em);
4774 	/* One for the pending_chunks list reference */
4775 	free_extent_map(em);
4776 error:
4777 	kfree(devices_info);
4778 	return ret;
4779 }
4780 
4781 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4782 				struct btrfs_root *extent_root,
4783 				u64 chunk_offset, u64 chunk_size)
4784 {
4785 	struct btrfs_key key;
4786 	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
4787 	struct btrfs_device *device;
4788 	struct btrfs_chunk *chunk;
4789 	struct btrfs_stripe *stripe;
4790 	struct extent_map_tree *em_tree;
4791 	struct extent_map *em;
4792 	struct map_lookup *map;
4793 	size_t item_size;
4794 	u64 dev_offset;
4795 	u64 stripe_size;
4796 	int i = 0;
4797 	int ret = 0;
4798 
4799 	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4800 	read_lock(&em_tree->lock);
4801 	em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
4802 	read_unlock(&em_tree->lock);
4803 
4804 	if (!em) {
4805 		btrfs_crit(extent_root->fs_info, "unable to find logical "
4806 			   "%Lu len %Lu", chunk_offset, chunk_size);
4807 		return -EINVAL;
4808 	}
4809 
4810 	if (em->start != chunk_offset || em->len != chunk_size) {
4811 		btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
4812 			  " %Lu-%Lu, found %Lu-%Lu", chunk_offset,
4813 			  chunk_size, em->start, em->len);
4814 		free_extent_map(em);
4815 		return -EINVAL;
4816 	}
4817 
4818 	map = em->map_lookup;
4819 	item_size = btrfs_chunk_item_size(map->num_stripes);
4820 	stripe_size = em->orig_block_len;
4821 
4822 	chunk = kzalloc(item_size, GFP_NOFS);
4823 	if (!chunk) {
4824 		ret = -ENOMEM;
4825 		goto out;
4826 	}
4827 
4828 	/*
4829 	 * Take the device list mutex to prevent races with the final phase of
4830 	 * a device replace operation that replaces the device object associated
4831 	 * with the map's stripes, because the device object's id can change
4832 	 * at any time during that final phase of the device replace operation
4833 	 * (dev-replace.c:btrfs_dev_replace_finishing()).
4834 	 */
4835 	mutex_lock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4836 	for (i = 0; i < map->num_stripes; i++) {
4837 		device = map->stripes[i].dev;
4838 		dev_offset = map->stripes[i].physical;
4839 
4840 		ret = btrfs_update_device(trans, device);
4841 		if (ret)
4842 			break;
4843 		ret = btrfs_alloc_dev_extent(trans, device,
4844 					     chunk_root->root_key.objectid,
4845 					     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4846 					     chunk_offset, dev_offset,
4847 					     stripe_size);
4848 		if (ret)
4849 			break;
4850 	}
4851 	if (ret) {
4852 		mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4853 		goto out;
4854 	}
4855 
4856 	stripe = &chunk->stripe;
4857 	for (i = 0; i < map->num_stripes; i++) {
4858 		device = map->stripes[i].dev;
4859 		dev_offset = map->stripes[i].physical;
4860 
4861 		btrfs_set_stack_stripe_devid(stripe, device->devid);
4862 		btrfs_set_stack_stripe_offset(stripe, dev_offset);
4863 		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4864 		stripe++;
4865 	}
4866 	mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4867 
4868 	btrfs_set_stack_chunk_length(chunk, chunk_size);
4869 	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4870 	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4871 	btrfs_set_stack_chunk_type(chunk, map->type);
4872 	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4873 	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4874 	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4875 	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
4876 	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4877 
4878 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4879 	key.type = BTRFS_CHUNK_ITEM_KEY;
4880 	key.offset = chunk_offset;
4881 
4882 	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4883 	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4884 		/*
4885 		 * TODO: Cleanup of inserted chunk root in case of
4886 		 * failure.
4887 		 */
4888 		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
4889 					     item_size);
4890 	}
4891 
4892 out:
4893 	kfree(chunk);
4894 	free_extent_map(em);
4895 	return ret;
4896 }
4897 
4898 /*
4899  * Chunk allocation falls into two parts. The first part does works
4900  * that make the new allocated chunk useable, but not do any operation
4901  * that modifies the chunk tree. The second part does the works that
4902  * require modifying the chunk tree. This division is important for the
4903  * bootstrap process of adding storage to a seed btrfs.
4904  */
4905 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4906 		      struct btrfs_root *extent_root, u64 type)
4907 {
4908 	u64 chunk_offset;
4909 
4910 	ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
4911 	chunk_offset = find_next_chunk(extent_root->fs_info);
4912 	return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
4913 }
4914 
4915 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
4916 					 struct btrfs_root *root,
4917 					 struct btrfs_device *device)
4918 {
4919 	u64 chunk_offset;
4920 	u64 sys_chunk_offset;
4921 	u64 alloc_profile;
4922 	struct btrfs_fs_info *fs_info = root->fs_info;
4923 	struct btrfs_root *extent_root = fs_info->extent_root;
4924 	int ret;
4925 
4926 	chunk_offset = find_next_chunk(fs_info);
4927 	alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4928 	ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
4929 				  alloc_profile);
4930 	if (ret)
4931 		return ret;
4932 
4933 	sys_chunk_offset = find_next_chunk(root->fs_info);
4934 	alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4935 	ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
4936 				  alloc_profile);
4937 	return ret;
4938 }
4939 
4940 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
4941 {
4942 	int max_errors;
4943 
4944 	if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
4945 			 BTRFS_BLOCK_GROUP_RAID10 |
4946 			 BTRFS_BLOCK_GROUP_RAID5 |
4947 			 BTRFS_BLOCK_GROUP_DUP)) {
4948 		max_errors = 1;
4949 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
4950 		max_errors = 2;
4951 	} else {
4952 		max_errors = 0;
4953 	}
4954 
4955 	return max_errors;
4956 }
4957 
4958 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
4959 {
4960 	struct extent_map *em;
4961 	struct map_lookup *map;
4962 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4963 	int readonly = 0;
4964 	int miss_ndevs = 0;
4965 	int i;
4966 
4967 	read_lock(&map_tree->map_tree.lock);
4968 	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
4969 	read_unlock(&map_tree->map_tree.lock);
4970 	if (!em)
4971 		return 1;
4972 
4973 	map = em->map_lookup;
4974 	for (i = 0; i < map->num_stripes; i++) {
4975 		if (map->stripes[i].dev->missing) {
4976 			miss_ndevs++;
4977 			continue;
4978 		}
4979 
4980 		if (!map->stripes[i].dev->writeable) {
4981 			readonly = 1;
4982 			goto end;
4983 		}
4984 	}
4985 
4986 	/*
4987 	 * If the number of missing devices is larger than max errors,
4988 	 * we can not write the data into that chunk successfully, so
4989 	 * set it readonly.
4990 	 */
4991 	if (miss_ndevs > btrfs_chunk_max_errors(map))
4992 		readonly = 1;
4993 end:
4994 	free_extent_map(em);
4995 	return readonly;
4996 }
4997 
4998 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
4999 {
5000 	extent_map_tree_init(&tree->map_tree);
5001 }
5002 
5003 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
5004 {
5005 	struct extent_map *em;
5006 
5007 	while (1) {
5008 		write_lock(&tree->map_tree.lock);
5009 		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
5010 		if (em)
5011 			remove_extent_mapping(&tree->map_tree, em);
5012 		write_unlock(&tree->map_tree.lock);
5013 		if (!em)
5014 			break;
5015 		/* once for us */
5016 		free_extent_map(em);
5017 		/* once for the tree */
5018 		free_extent_map(em);
5019 	}
5020 }
5021 
5022 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5023 {
5024 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5025 	struct extent_map *em;
5026 	struct map_lookup *map;
5027 	struct extent_map_tree *em_tree = &map_tree->map_tree;
5028 	int ret;
5029 
5030 	read_lock(&em_tree->lock);
5031 	em = lookup_extent_mapping(em_tree, logical, len);
5032 	read_unlock(&em_tree->lock);
5033 
5034 	/*
5035 	 * We could return errors for these cases, but that could get ugly and
5036 	 * we'd probably do the same thing which is just not do anything else
5037 	 * and exit, so return 1 so the callers don't try to use other copies.
5038 	 */
5039 	if (!em) {
5040 		btrfs_crit(fs_info, "No mapping for %Lu-%Lu", logical,
5041 			    logical+len);
5042 		return 1;
5043 	}
5044 
5045 	if (em->start > logical || em->start + em->len < logical) {
5046 		btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
5047 			    "%Lu-%Lu", logical, logical+len, em->start,
5048 			    em->start + em->len);
5049 		free_extent_map(em);
5050 		return 1;
5051 	}
5052 
5053 	map = em->map_lookup;
5054 	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
5055 		ret = map->num_stripes;
5056 	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5057 		ret = map->sub_stripes;
5058 	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5059 		ret = 2;
5060 	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5061 		ret = 3;
5062 	else
5063 		ret = 1;
5064 	free_extent_map(em);
5065 
5066 	btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
5067 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
5068 		ret++;
5069 	btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
5070 
5071 	return ret;
5072 }
5073 
5074 unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
5075 				    struct btrfs_mapping_tree *map_tree,
5076 				    u64 logical)
5077 {
5078 	struct extent_map *em;
5079 	struct map_lookup *map;
5080 	struct extent_map_tree *em_tree = &map_tree->map_tree;
5081 	unsigned long len = root->sectorsize;
5082 
5083 	read_lock(&em_tree->lock);
5084 	em = lookup_extent_mapping(em_tree, logical, len);
5085 	read_unlock(&em_tree->lock);
5086 	BUG_ON(!em);
5087 
5088 	BUG_ON(em->start > logical || em->start + em->len < logical);
5089 	map = em->map_lookup;
5090 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5091 		len = map->stripe_len * nr_data_stripes(map);
5092 	free_extent_map(em);
5093 	return len;
5094 }
5095 
5096 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
5097 			   u64 logical, u64 len, int mirror_num)
5098 {
5099 	struct extent_map *em;
5100 	struct map_lookup *map;
5101 	struct extent_map_tree *em_tree = &map_tree->map_tree;
5102 	int ret = 0;
5103 
5104 	read_lock(&em_tree->lock);
5105 	em = lookup_extent_mapping(em_tree, logical, len);
5106 	read_unlock(&em_tree->lock);
5107 	BUG_ON(!em);
5108 
5109 	BUG_ON(em->start > logical || em->start + em->len < logical);
5110 	map = em->map_lookup;
5111 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5112 		ret = 1;
5113 	free_extent_map(em);
5114 	return ret;
5115 }
5116 
5117 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5118 			    struct map_lookup *map, int first, int num,
5119 			    int optimal, int dev_replace_is_ongoing)
5120 {
5121 	int i;
5122 	int tolerance;
5123 	struct btrfs_device *srcdev;
5124 
5125 	if (dev_replace_is_ongoing &&
5126 	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5127 	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5128 		srcdev = fs_info->dev_replace.srcdev;
5129 	else
5130 		srcdev = NULL;
5131 
5132 	/*
5133 	 * try to avoid the drive that is the source drive for a
5134 	 * dev-replace procedure, only choose it if no other non-missing
5135 	 * mirror is available
5136 	 */
5137 	for (tolerance = 0; tolerance < 2; tolerance++) {
5138 		if (map->stripes[optimal].dev->bdev &&
5139 		    (tolerance || map->stripes[optimal].dev != srcdev))
5140 			return optimal;
5141 		for (i = first; i < first + num; i++) {
5142 			if (map->stripes[i].dev->bdev &&
5143 			    (tolerance || map->stripes[i].dev != srcdev))
5144 				return i;
5145 		}
5146 	}
5147 
5148 	/* we couldn't find one that doesn't fail.  Just return something
5149 	 * and the io error handling code will clean up eventually
5150 	 */
5151 	return optimal;
5152 }
5153 
5154 static inline int parity_smaller(u64 a, u64 b)
5155 {
5156 	return a > b;
5157 }
5158 
5159 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5160 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5161 {
5162 	struct btrfs_bio_stripe s;
5163 	int i;
5164 	u64 l;
5165 	int again = 1;
5166 
5167 	while (again) {
5168 		again = 0;
5169 		for (i = 0; i < num_stripes - 1; i++) {
5170 			if (parity_smaller(bbio->raid_map[i],
5171 					   bbio->raid_map[i+1])) {
5172 				s = bbio->stripes[i];
5173 				l = bbio->raid_map[i];
5174 				bbio->stripes[i] = bbio->stripes[i+1];
5175 				bbio->raid_map[i] = bbio->raid_map[i+1];
5176 				bbio->stripes[i+1] = s;
5177 				bbio->raid_map[i+1] = l;
5178 
5179 				again = 1;
5180 			}
5181 		}
5182 	}
5183 }
5184 
5185 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5186 {
5187 	struct btrfs_bio *bbio = kzalloc(
5188 		 /* the size of the btrfs_bio */
5189 		sizeof(struct btrfs_bio) +
5190 		/* plus the variable array for the stripes */
5191 		sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5192 		/* plus the variable array for the tgt dev */
5193 		sizeof(int) * (real_stripes) +
5194 		/*
5195 		 * plus the raid_map, which includes both the tgt dev
5196 		 * and the stripes
5197 		 */
5198 		sizeof(u64) * (total_stripes),
5199 		GFP_NOFS|__GFP_NOFAIL);
5200 
5201 	atomic_set(&bbio->error, 0);
5202 	atomic_set(&bbio->refs, 1);
5203 
5204 	return bbio;
5205 }
5206 
5207 void btrfs_get_bbio(struct btrfs_bio *bbio)
5208 {
5209 	WARN_ON(!atomic_read(&bbio->refs));
5210 	atomic_inc(&bbio->refs);
5211 }
5212 
5213 void btrfs_put_bbio(struct btrfs_bio *bbio)
5214 {
5215 	if (!bbio)
5216 		return;
5217 	if (atomic_dec_and_test(&bbio->refs))
5218 		kfree(bbio);
5219 }
5220 
5221 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5222 			     u64 logical, u64 *length,
5223 			     struct btrfs_bio **bbio_ret,
5224 			     int mirror_num, int need_raid_map)
5225 {
5226 	struct extent_map *em;
5227 	struct map_lookup *map;
5228 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5229 	struct extent_map_tree *em_tree = &map_tree->map_tree;
5230 	u64 offset;
5231 	u64 stripe_offset;
5232 	u64 stripe_end_offset;
5233 	u64 stripe_nr;
5234 	u64 stripe_nr_orig;
5235 	u64 stripe_nr_end;
5236 	u64 stripe_len;
5237 	u32 stripe_index;
5238 	int i;
5239 	int ret = 0;
5240 	int num_stripes;
5241 	int max_errors = 0;
5242 	int tgtdev_indexes = 0;
5243 	struct btrfs_bio *bbio = NULL;
5244 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
5245 	int dev_replace_is_ongoing = 0;
5246 	int num_alloc_stripes;
5247 	int patch_the_first_stripe_for_dev_replace = 0;
5248 	u64 physical_to_patch_in_first_stripe = 0;
5249 	u64 raid56_full_stripe_start = (u64)-1;
5250 
5251 	read_lock(&em_tree->lock);
5252 	em = lookup_extent_mapping(em_tree, logical, *length);
5253 	read_unlock(&em_tree->lock);
5254 
5255 	if (!em) {
5256 		btrfs_crit(fs_info, "unable to find logical %llu len %llu",
5257 			logical, *length);
5258 		return -EINVAL;
5259 	}
5260 
5261 	if (em->start > logical || em->start + em->len < logical) {
5262 		btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
5263 			   "found %Lu-%Lu", logical, em->start,
5264 			   em->start + em->len);
5265 		free_extent_map(em);
5266 		return -EINVAL;
5267 	}
5268 
5269 	map = em->map_lookup;
5270 	offset = logical - em->start;
5271 
5272 	stripe_len = map->stripe_len;
5273 	stripe_nr = offset;
5274 	/*
5275 	 * stripe_nr counts the total number of stripes we have to stride
5276 	 * to get to this block
5277 	 */
5278 	stripe_nr = div64_u64(stripe_nr, stripe_len);
5279 
5280 	stripe_offset = stripe_nr * stripe_len;
5281 	BUG_ON(offset < stripe_offset);
5282 
5283 	/* stripe_offset is the offset of this block in its stripe*/
5284 	stripe_offset = offset - stripe_offset;
5285 
5286 	/* if we're here for raid56, we need to know the stripe aligned start */
5287 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5288 		unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
5289 		raid56_full_stripe_start = offset;
5290 
5291 		/* allow a write of a full stripe, but make sure we don't
5292 		 * allow straddling of stripes
5293 		 */
5294 		raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
5295 				full_stripe_len);
5296 		raid56_full_stripe_start *= full_stripe_len;
5297 	}
5298 
5299 	if (rw & REQ_DISCARD) {
5300 		/* we don't discard raid56 yet */
5301 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5302 			ret = -EOPNOTSUPP;
5303 			goto out;
5304 		}
5305 		*length = min_t(u64, em->len - offset, *length);
5306 	} else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
5307 		u64 max_len;
5308 		/* For writes to RAID[56], allow a full stripeset across all disks.
5309 		   For other RAID types and for RAID[56] reads, just allow a single
5310 		   stripe (on a single disk). */
5311 		if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
5312 		    (rw & REQ_WRITE)) {
5313 			max_len = stripe_len * nr_data_stripes(map) -
5314 				(offset - raid56_full_stripe_start);
5315 		} else {
5316 			/* we limit the length of each bio to what fits in a stripe */
5317 			max_len = stripe_len - stripe_offset;
5318 		}
5319 		*length = min_t(u64, em->len - offset, max_len);
5320 	} else {
5321 		*length = em->len - offset;
5322 	}
5323 
5324 	/* This is for when we're called from btrfs_merge_bio_hook() and all
5325 	   it cares about is the length */
5326 	if (!bbio_ret)
5327 		goto out;
5328 
5329 	btrfs_dev_replace_lock(dev_replace, 0);
5330 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
5331 	if (!dev_replace_is_ongoing)
5332 		btrfs_dev_replace_unlock(dev_replace, 0);
5333 	else
5334 		btrfs_dev_replace_set_lock_blocking(dev_replace);
5335 
5336 	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
5337 	    !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
5338 	    dev_replace->tgtdev != NULL) {
5339 		/*
5340 		 * in dev-replace case, for repair case (that's the only
5341 		 * case where the mirror is selected explicitly when
5342 		 * calling btrfs_map_block), blocks left of the left cursor
5343 		 * can also be read from the target drive.
5344 		 * For REQ_GET_READ_MIRRORS, the target drive is added as
5345 		 * the last one to the array of stripes. For READ, it also
5346 		 * needs to be supported using the same mirror number.
5347 		 * If the requested block is not left of the left cursor,
5348 		 * EIO is returned. This can happen because btrfs_num_copies()
5349 		 * returns one more in the dev-replace case.
5350 		 */
5351 		u64 tmp_length = *length;
5352 		struct btrfs_bio *tmp_bbio = NULL;
5353 		int tmp_num_stripes;
5354 		u64 srcdev_devid = dev_replace->srcdev->devid;
5355 		int index_srcdev = 0;
5356 		int found = 0;
5357 		u64 physical_of_found = 0;
5358 
5359 		ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
5360 			     logical, &tmp_length, &tmp_bbio, 0, 0);
5361 		if (ret) {
5362 			WARN_ON(tmp_bbio != NULL);
5363 			goto out;
5364 		}
5365 
5366 		tmp_num_stripes = tmp_bbio->num_stripes;
5367 		if (mirror_num > tmp_num_stripes) {
5368 			/*
5369 			 * REQ_GET_READ_MIRRORS does not contain this
5370 			 * mirror, that means that the requested area
5371 			 * is not left of the left cursor
5372 			 */
5373 			ret = -EIO;
5374 			btrfs_put_bbio(tmp_bbio);
5375 			goto out;
5376 		}
5377 
5378 		/*
5379 		 * process the rest of the function using the mirror_num
5380 		 * of the source drive. Therefore look it up first.
5381 		 * At the end, patch the device pointer to the one of the
5382 		 * target drive.
5383 		 */
5384 		for (i = 0; i < tmp_num_stripes; i++) {
5385 			if (tmp_bbio->stripes[i].dev->devid != srcdev_devid)
5386 				continue;
5387 
5388 			/*
5389 			 * In case of DUP, in order to keep it simple, only add
5390 			 * the mirror with the lowest physical address
5391 			 */
5392 			if (found &&
5393 			    physical_of_found <= tmp_bbio->stripes[i].physical)
5394 				continue;
5395 
5396 			index_srcdev = i;
5397 			found = 1;
5398 			physical_of_found = tmp_bbio->stripes[i].physical;
5399 		}
5400 
5401 		btrfs_put_bbio(tmp_bbio);
5402 
5403 		if (!found) {
5404 			WARN_ON(1);
5405 			ret = -EIO;
5406 			goto out;
5407 		}
5408 
5409 		mirror_num = index_srcdev + 1;
5410 		patch_the_first_stripe_for_dev_replace = 1;
5411 		physical_to_patch_in_first_stripe = physical_of_found;
5412 	} else if (mirror_num > map->num_stripes) {
5413 		mirror_num = 0;
5414 	}
5415 
5416 	num_stripes = 1;
5417 	stripe_index = 0;
5418 	stripe_nr_orig = stripe_nr;
5419 	stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
5420 	stripe_nr_end = div_u64(stripe_nr_end, map->stripe_len);
5421 	stripe_end_offset = stripe_nr_end * map->stripe_len -
5422 			    (offset + *length);
5423 
5424 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5425 		if (rw & REQ_DISCARD)
5426 			num_stripes = min_t(u64, map->num_stripes,
5427 					    stripe_nr_end - stripe_nr_orig);
5428 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5429 				&stripe_index);
5430 		if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)))
5431 			mirror_num = 1;
5432 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
5433 		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
5434 			num_stripes = map->num_stripes;
5435 		else if (mirror_num)
5436 			stripe_index = mirror_num - 1;
5437 		else {
5438 			stripe_index = find_live_mirror(fs_info, map, 0,
5439 					    map->num_stripes,
5440 					    current->pid % map->num_stripes,
5441 					    dev_replace_is_ongoing);
5442 			mirror_num = stripe_index + 1;
5443 		}
5444 
5445 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
5446 		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
5447 			num_stripes = map->num_stripes;
5448 		} else if (mirror_num) {
5449 			stripe_index = mirror_num - 1;
5450 		} else {
5451 			mirror_num = 1;
5452 		}
5453 
5454 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5455 		u32 factor = map->num_stripes / map->sub_stripes;
5456 
5457 		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5458 		stripe_index *= map->sub_stripes;
5459 
5460 		if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5461 			num_stripes = map->sub_stripes;
5462 		else if (rw & REQ_DISCARD)
5463 			num_stripes = min_t(u64, map->sub_stripes *
5464 					    (stripe_nr_end - stripe_nr_orig),
5465 					    map->num_stripes);
5466 		else if (mirror_num)
5467 			stripe_index += mirror_num - 1;
5468 		else {
5469 			int old_stripe_index = stripe_index;
5470 			stripe_index = find_live_mirror(fs_info, map,
5471 					      stripe_index,
5472 					      map->sub_stripes, stripe_index +
5473 					      current->pid % map->sub_stripes,
5474 					      dev_replace_is_ongoing);
5475 			mirror_num = stripe_index - old_stripe_index + 1;
5476 		}
5477 
5478 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5479 		if (need_raid_map &&
5480 		    ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
5481 		     mirror_num > 1)) {
5482 			/* push stripe_nr back to the start of the full stripe */
5483 			stripe_nr = div_u64(raid56_full_stripe_start,
5484 					stripe_len * nr_data_stripes(map));
5485 
5486 			/* RAID[56] write or recovery. Return all stripes */
5487 			num_stripes = map->num_stripes;
5488 			max_errors = nr_parity_stripes(map);
5489 
5490 			*length = map->stripe_len;
5491 			stripe_index = 0;
5492 			stripe_offset = 0;
5493 		} else {
5494 			/*
5495 			 * Mirror #0 or #1 means the original data block.
5496 			 * Mirror #2 is RAID5 parity block.
5497 			 * Mirror #3 is RAID6 Q block.
5498 			 */
5499 			stripe_nr = div_u64_rem(stripe_nr,
5500 					nr_data_stripes(map), &stripe_index);
5501 			if (mirror_num > 1)
5502 				stripe_index = nr_data_stripes(map) +
5503 						mirror_num - 2;
5504 
5505 			/* We distribute the parity blocks across stripes */
5506 			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
5507 					&stripe_index);
5508 			if (!(rw & (REQ_WRITE | REQ_DISCARD |
5509 				    REQ_GET_READ_MIRRORS)) && mirror_num <= 1)
5510 				mirror_num = 1;
5511 		}
5512 	} else {
5513 		/*
5514 		 * after this, stripe_nr is the number of stripes on this
5515 		 * device we have to walk to find the data, and stripe_index is
5516 		 * the number of our device in the stripe array
5517 		 */
5518 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5519 				&stripe_index);
5520 		mirror_num = stripe_index + 1;
5521 	}
5522 	BUG_ON(stripe_index >= map->num_stripes);
5523 
5524 	num_alloc_stripes = num_stripes;
5525 	if (dev_replace_is_ongoing) {
5526 		if (rw & (REQ_WRITE | REQ_DISCARD))
5527 			num_alloc_stripes <<= 1;
5528 		if (rw & REQ_GET_READ_MIRRORS)
5529 			num_alloc_stripes++;
5530 		tgtdev_indexes = num_stripes;
5531 	}
5532 
5533 	bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
5534 	if (!bbio) {
5535 		ret = -ENOMEM;
5536 		goto out;
5537 	}
5538 	if (dev_replace_is_ongoing)
5539 		bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
5540 
5541 	/* build raid_map */
5542 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
5543 	    need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
5544 	    mirror_num > 1)) {
5545 		u64 tmp;
5546 		unsigned rot;
5547 
5548 		bbio->raid_map = (u64 *)((void *)bbio->stripes +
5549 				 sizeof(struct btrfs_bio_stripe) *
5550 				 num_alloc_stripes +
5551 				 sizeof(int) * tgtdev_indexes);
5552 
5553 		/* Work out the disk rotation on this stripe-set */
5554 		div_u64_rem(stripe_nr, num_stripes, &rot);
5555 
5556 		/* Fill in the logical address of each stripe */
5557 		tmp = stripe_nr * nr_data_stripes(map);
5558 		for (i = 0; i < nr_data_stripes(map); i++)
5559 			bbio->raid_map[(i+rot) % num_stripes] =
5560 				em->start + (tmp + i) * map->stripe_len;
5561 
5562 		bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
5563 		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5564 			bbio->raid_map[(i+rot+1) % num_stripes] =
5565 				RAID6_Q_STRIPE;
5566 	}
5567 
5568 	if (rw & REQ_DISCARD) {
5569 		u32 factor = 0;
5570 		u32 sub_stripes = 0;
5571 		u64 stripes_per_dev = 0;
5572 		u32 remaining_stripes = 0;
5573 		u32 last_stripe = 0;
5574 
5575 		if (map->type &
5576 		    (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
5577 			if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5578 				sub_stripes = 1;
5579 			else
5580 				sub_stripes = map->sub_stripes;
5581 
5582 			factor = map->num_stripes / sub_stripes;
5583 			stripes_per_dev = div_u64_rem(stripe_nr_end -
5584 						      stripe_nr_orig,
5585 						      factor,
5586 						      &remaining_stripes);
5587 			div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5588 			last_stripe *= sub_stripes;
5589 		}
5590 
5591 		for (i = 0; i < num_stripes; i++) {
5592 			bbio->stripes[i].physical =
5593 				map->stripes[stripe_index].physical +
5594 				stripe_offset + stripe_nr * map->stripe_len;
5595 			bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5596 
5597 			if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5598 					 BTRFS_BLOCK_GROUP_RAID10)) {
5599 				bbio->stripes[i].length = stripes_per_dev *
5600 							  map->stripe_len;
5601 
5602 				if (i / sub_stripes < remaining_stripes)
5603 					bbio->stripes[i].length +=
5604 						map->stripe_len;
5605 
5606 				/*
5607 				 * Special for the first stripe and
5608 				 * the last stripe:
5609 				 *
5610 				 * |-------|...|-------|
5611 				 *     |----------|
5612 				 *    off     end_off
5613 				 */
5614 				if (i < sub_stripes)
5615 					bbio->stripes[i].length -=
5616 						stripe_offset;
5617 
5618 				if (stripe_index >= last_stripe &&
5619 				    stripe_index <= (last_stripe +
5620 						     sub_stripes - 1))
5621 					bbio->stripes[i].length -=
5622 						stripe_end_offset;
5623 
5624 				if (i == sub_stripes - 1)
5625 					stripe_offset = 0;
5626 			} else
5627 				bbio->stripes[i].length = *length;
5628 
5629 			stripe_index++;
5630 			if (stripe_index == map->num_stripes) {
5631 				/* This could only happen for RAID0/10 */
5632 				stripe_index = 0;
5633 				stripe_nr++;
5634 			}
5635 		}
5636 	} else {
5637 		for (i = 0; i < num_stripes; i++) {
5638 			bbio->stripes[i].physical =
5639 				map->stripes[stripe_index].physical +
5640 				stripe_offset +
5641 				stripe_nr * map->stripe_len;
5642 			bbio->stripes[i].dev =
5643 				map->stripes[stripe_index].dev;
5644 			stripe_index++;
5645 		}
5646 	}
5647 
5648 	if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5649 		max_errors = btrfs_chunk_max_errors(map);
5650 
5651 	if (bbio->raid_map)
5652 		sort_parity_stripes(bbio, num_stripes);
5653 
5654 	tgtdev_indexes = 0;
5655 	if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
5656 	    dev_replace->tgtdev != NULL) {
5657 		int index_where_to_add;
5658 		u64 srcdev_devid = dev_replace->srcdev->devid;
5659 
5660 		/*
5661 		 * duplicate the write operations while the dev replace
5662 		 * procedure is running. Since the copying of the old disk
5663 		 * to the new disk takes place at run time while the
5664 		 * filesystem is mounted writable, the regular write
5665 		 * operations to the old disk have to be duplicated to go
5666 		 * to the new disk as well.
5667 		 * Note that device->missing is handled by the caller, and
5668 		 * that the write to the old disk is already set up in the
5669 		 * stripes array.
5670 		 */
5671 		index_where_to_add = num_stripes;
5672 		for (i = 0; i < num_stripes; i++) {
5673 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5674 				/* write to new disk, too */
5675 				struct btrfs_bio_stripe *new =
5676 					bbio->stripes + index_where_to_add;
5677 				struct btrfs_bio_stripe *old =
5678 					bbio->stripes + i;
5679 
5680 				new->physical = old->physical;
5681 				new->length = old->length;
5682 				new->dev = dev_replace->tgtdev;
5683 				bbio->tgtdev_map[i] = index_where_to_add;
5684 				index_where_to_add++;
5685 				max_errors++;
5686 				tgtdev_indexes++;
5687 			}
5688 		}
5689 		num_stripes = index_where_to_add;
5690 	} else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
5691 		   dev_replace->tgtdev != NULL) {
5692 		u64 srcdev_devid = dev_replace->srcdev->devid;
5693 		int index_srcdev = 0;
5694 		int found = 0;
5695 		u64 physical_of_found = 0;
5696 
5697 		/*
5698 		 * During the dev-replace procedure, the target drive can
5699 		 * also be used to read data in case it is needed to repair
5700 		 * a corrupt block elsewhere. This is possible if the
5701 		 * requested area is left of the left cursor. In this area,
5702 		 * the target drive is a full copy of the source drive.
5703 		 */
5704 		for (i = 0; i < num_stripes; i++) {
5705 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5706 				/*
5707 				 * In case of DUP, in order to keep it
5708 				 * simple, only add the mirror with the
5709 				 * lowest physical address
5710 				 */
5711 				if (found &&
5712 				    physical_of_found <=
5713 				     bbio->stripes[i].physical)
5714 					continue;
5715 				index_srcdev = i;
5716 				found = 1;
5717 				physical_of_found = bbio->stripes[i].physical;
5718 			}
5719 		}
5720 		if (found) {
5721 			if (physical_of_found + map->stripe_len <=
5722 			    dev_replace->cursor_left) {
5723 				struct btrfs_bio_stripe *tgtdev_stripe =
5724 					bbio->stripes + num_stripes;
5725 
5726 				tgtdev_stripe->physical = physical_of_found;
5727 				tgtdev_stripe->length =
5728 					bbio->stripes[index_srcdev].length;
5729 				tgtdev_stripe->dev = dev_replace->tgtdev;
5730 				bbio->tgtdev_map[index_srcdev] = num_stripes;
5731 
5732 				tgtdev_indexes++;
5733 				num_stripes++;
5734 			}
5735 		}
5736 	}
5737 
5738 	*bbio_ret = bbio;
5739 	bbio->map_type = map->type;
5740 	bbio->num_stripes = num_stripes;
5741 	bbio->max_errors = max_errors;
5742 	bbio->mirror_num = mirror_num;
5743 	bbio->num_tgtdevs = tgtdev_indexes;
5744 
5745 	/*
5746 	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
5747 	 * mirror_num == num_stripes + 1 && dev_replace target drive is
5748 	 * available as a mirror
5749 	 */
5750 	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
5751 		WARN_ON(num_stripes > 1);
5752 		bbio->stripes[0].dev = dev_replace->tgtdev;
5753 		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
5754 		bbio->mirror_num = map->num_stripes + 1;
5755 	}
5756 out:
5757 	if (dev_replace_is_ongoing) {
5758 		btrfs_dev_replace_clear_lock_blocking(dev_replace);
5759 		btrfs_dev_replace_unlock(dev_replace, 0);
5760 	}
5761 	free_extent_map(em);
5762 	return ret;
5763 }
5764 
5765 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5766 		      u64 logical, u64 *length,
5767 		      struct btrfs_bio **bbio_ret, int mirror_num)
5768 {
5769 	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5770 				 mirror_num, 0);
5771 }
5772 
5773 /* For Scrub/replace */
5774 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
5775 		     u64 logical, u64 *length,
5776 		     struct btrfs_bio **bbio_ret, int mirror_num,
5777 		     int need_raid_map)
5778 {
5779 	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5780 				 mirror_num, need_raid_map);
5781 }
5782 
5783 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5784 		     u64 chunk_start, u64 physical, u64 devid,
5785 		     u64 **logical, int *naddrs, int *stripe_len)
5786 {
5787 	struct extent_map_tree *em_tree = &map_tree->map_tree;
5788 	struct extent_map *em;
5789 	struct map_lookup *map;
5790 	u64 *buf;
5791 	u64 bytenr;
5792 	u64 length;
5793 	u64 stripe_nr;
5794 	u64 rmap_len;
5795 	int i, j, nr = 0;
5796 
5797 	read_lock(&em_tree->lock);
5798 	em = lookup_extent_mapping(em_tree, chunk_start, 1);
5799 	read_unlock(&em_tree->lock);
5800 
5801 	if (!em) {
5802 		printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n",
5803 		       chunk_start);
5804 		return -EIO;
5805 	}
5806 
5807 	if (em->start != chunk_start) {
5808 		printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n",
5809 		       em->start, chunk_start);
5810 		free_extent_map(em);
5811 		return -EIO;
5812 	}
5813 	map = em->map_lookup;
5814 
5815 	length = em->len;
5816 	rmap_len = map->stripe_len;
5817 
5818 	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5819 		length = div_u64(length, map->num_stripes / map->sub_stripes);
5820 	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5821 		length = div_u64(length, map->num_stripes);
5822 	else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5823 		length = div_u64(length, nr_data_stripes(map));
5824 		rmap_len = map->stripe_len * nr_data_stripes(map);
5825 	}
5826 
5827 	buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
5828 	BUG_ON(!buf); /* -ENOMEM */
5829 
5830 	for (i = 0; i < map->num_stripes; i++) {
5831 		if (devid && map->stripes[i].dev->devid != devid)
5832 			continue;
5833 		if (map->stripes[i].physical > physical ||
5834 		    map->stripes[i].physical + length <= physical)
5835 			continue;
5836 
5837 		stripe_nr = physical - map->stripes[i].physical;
5838 		stripe_nr = div_u64(stripe_nr, map->stripe_len);
5839 
5840 		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5841 			stripe_nr = stripe_nr * map->num_stripes + i;
5842 			stripe_nr = div_u64(stripe_nr, map->sub_stripes);
5843 		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5844 			stripe_nr = stripe_nr * map->num_stripes + i;
5845 		} /* else if RAID[56], multiply by nr_data_stripes().
5846 		   * Alternatively, just use rmap_len below instead of
5847 		   * map->stripe_len */
5848 
5849 		bytenr = chunk_start + stripe_nr * rmap_len;
5850 		WARN_ON(nr >= map->num_stripes);
5851 		for (j = 0; j < nr; j++) {
5852 			if (buf[j] == bytenr)
5853 				break;
5854 		}
5855 		if (j == nr) {
5856 			WARN_ON(nr >= map->num_stripes);
5857 			buf[nr++] = bytenr;
5858 		}
5859 	}
5860 
5861 	*logical = buf;
5862 	*naddrs = nr;
5863 	*stripe_len = rmap_len;
5864 
5865 	free_extent_map(em);
5866 	return 0;
5867 }
5868 
5869 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
5870 {
5871 	bio->bi_private = bbio->private;
5872 	bio->bi_end_io = bbio->end_io;
5873 	bio_endio(bio);
5874 
5875 	btrfs_put_bbio(bbio);
5876 }
5877 
5878 static void btrfs_end_bio(struct bio *bio)
5879 {
5880 	struct btrfs_bio *bbio = bio->bi_private;
5881 	int is_orig_bio = 0;
5882 
5883 	if (bio->bi_error) {
5884 		atomic_inc(&bbio->error);
5885 		if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) {
5886 			unsigned int stripe_index =
5887 				btrfs_io_bio(bio)->stripe_index;
5888 			struct btrfs_device *dev;
5889 
5890 			BUG_ON(stripe_index >= bbio->num_stripes);
5891 			dev = bbio->stripes[stripe_index].dev;
5892 			if (dev->bdev) {
5893 				if (bio->bi_rw & WRITE)
5894 					btrfs_dev_stat_inc(dev,
5895 						BTRFS_DEV_STAT_WRITE_ERRS);
5896 				else
5897 					btrfs_dev_stat_inc(dev,
5898 						BTRFS_DEV_STAT_READ_ERRS);
5899 				if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
5900 					btrfs_dev_stat_inc(dev,
5901 						BTRFS_DEV_STAT_FLUSH_ERRS);
5902 				btrfs_dev_stat_print_on_error(dev);
5903 			}
5904 		}
5905 	}
5906 
5907 	if (bio == bbio->orig_bio)
5908 		is_orig_bio = 1;
5909 
5910 	btrfs_bio_counter_dec(bbio->fs_info);
5911 
5912 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
5913 		if (!is_orig_bio) {
5914 			bio_put(bio);
5915 			bio = bbio->orig_bio;
5916 		}
5917 
5918 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5919 		/* only send an error to the higher layers if it is
5920 		 * beyond the tolerance of the btrfs bio
5921 		 */
5922 		if (atomic_read(&bbio->error) > bbio->max_errors) {
5923 			bio->bi_error = -EIO;
5924 		} else {
5925 			/*
5926 			 * this bio is actually up to date, we didn't
5927 			 * go over the max number of errors
5928 			 */
5929 			bio->bi_error = 0;
5930 		}
5931 
5932 		btrfs_end_bbio(bbio, bio);
5933 	} else if (!is_orig_bio) {
5934 		bio_put(bio);
5935 	}
5936 }
5937 
5938 /*
5939  * see run_scheduled_bios for a description of why bios are collected for
5940  * async submit.
5941  *
5942  * This will add one bio to the pending list for a device and make sure
5943  * the work struct is scheduled.
5944  */
5945 static noinline void btrfs_schedule_bio(struct btrfs_root *root,
5946 					struct btrfs_device *device,
5947 					int rw, struct bio *bio)
5948 {
5949 	int should_queue = 1;
5950 	struct btrfs_pending_bios *pending_bios;
5951 
5952 	if (device->missing || !device->bdev) {
5953 		bio_io_error(bio);
5954 		return;
5955 	}
5956 
5957 	/* don't bother with additional async steps for reads, right now */
5958 	if (!(rw & REQ_WRITE)) {
5959 		bio_get(bio);
5960 		btrfsic_submit_bio(rw, bio);
5961 		bio_put(bio);
5962 		return;
5963 	}
5964 
5965 	/*
5966 	 * nr_async_bios allows us to reliably return congestion to the
5967 	 * higher layers.  Otherwise, the async bio makes it appear we have
5968 	 * made progress against dirty pages when we've really just put it
5969 	 * on a queue for later
5970 	 */
5971 	atomic_inc(&root->fs_info->nr_async_bios);
5972 	WARN_ON(bio->bi_next);
5973 	bio->bi_next = NULL;
5974 	bio->bi_rw |= rw;
5975 
5976 	spin_lock(&device->io_lock);
5977 	if (bio->bi_rw & REQ_SYNC)
5978 		pending_bios = &device->pending_sync_bios;
5979 	else
5980 		pending_bios = &device->pending_bios;
5981 
5982 	if (pending_bios->tail)
5983 		pending_bios->tail->bi_next = bio;
5984 
5985 	pending_bios->tail = bio;
5986 	if (!pending_bios->head)
5987 		pending_bios->head = bio;
5988 	if (device->running_pending)
5989 		should_queue = 0;
5990 
5991 	spin_unlock(&device->io_lock);
5992 
5993 	if (should_queue)
5994 		btrfs_queue_work(root->fs_info->submit_workers,
5995 				 &device->work);
5996 }
5997 
5998 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5999 			      struct bio *bio, u64 physical, int dev_nr,
6000 			      int rw, int async)
6001 {
6002 	struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
6003 
6004 	bio->bi_private = bbio;
6005 	btrfs_io_bio(bio)->stripe_index = dev_nr;
6006 	bio->bi_end_io = btrfs_end_bio;
6007 	bio->bi_iter.bi_sector = physical >> 9;
6008 #ifdef DEBUG
6009 	{
6010 		struct rcu_string *name;
6011 
6012 		rcu_read_lock();
6013 		name = rcu_dereference(dev->name);
6014 		pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
6015 			 "(%s id %llu), size=%u\n", rw,
6016 			 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
6017 			 name->str, dev->devid, bio->bi_iter.bi_size);
6018 		rcu_read_unlock();
6019 	}
6020 #endif
6021 	bio->bi_bdev = dev->bdev;
6022 
6023 	btrfs_bio_counter_inc_noblocked(root->fs_info);
6024 
6025 	if (async)
6026 		btrfs_schedule_bio(root, dev, rw, bio);
6027 	else
6028 		btrfsic_submit_bio(rw, bio);
6029 }
6030 
6031 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6032 {
6033 	atomic_inc(&bbio->error);
6034 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6035 		/* Shoud be the original bio. */
6036 		WARN_ON(bio != bbio->orig_bio);
6037 
6038 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6039 		bio->bi_iter.bi_sector = logical >> 9;
6040 		bio->bi_error = -EIO;
6041 		btrfs_end_bbio(bbio, bio);
6042 	}
6043 }
6044 
6045 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
6046 		  int mirror_num, int async_submit)
6047 {
6048 	struct btrfs_device *dev;
6049 	struct bio *first_bio = bio;
6050 	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6051 	u64 length = 0;
6052 	u64 map_length;
6053 	int ret;
6054 	int dev_nr;
6055 	int total_devs;
6056 	struct btrfs_bio *bbio = NULL;
6057 
6058 	length = bio->bi_iter.bi_size;
6059 	map_length = length;
6060 
6061 	btrfs_bio_counter_inc_blocked(root->fs_info);
6062 	ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
6063 			      mirror_num, 1);
6064 	if (ret) {
6065 		btrfs_bio_counter_dec(root->fs_info);
6066 		return ret;
6067 	}
6068 
6069 	total_devs = bbio->num_stripes;
6070 	bbio->orig_bio = first_bio;
6071 	bbio->private = first_bio->bi_private;
6072 	bbio->end_io = first_bio->bi_end_io;
6073 	bbio->fs_info = root->fs_info;
6074 	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6075 
6076 	if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6077 	    ((rw & WRITE) || (mirror_num > 1))) {
6078 		/* In this case, map_length has been set to the length of
6079 		   a single stripe; not the whole write */
6080 		if (rw & WRITE) {
6081 			ret = raid56_parity_write(root, bio, bbio, map_length);
6082 		} else {
6083 			ret = raid56_parity_recover(root, bio, bbio, map_length,
6084 						    mirror_num, 1);
6085 		}
6086 
6087 		btrfs_bio_counter_dec(root->fs_info);
6088 		return ret;
6089 	}
6090 
6091 	if (map_length < length) {
6092 		btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
6093 			logical, length, map_length);
6094 		BUG();
6095 	}
6096 
6097 	for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6098 		dev = bbio->stripes[dev_nr].dev;
6099 		if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
6100 			bbio_error(bbio, first_bio, logical);
6101 			continue;
6102 		}
6103 
6104 		if (dev_nr < total_devs - 1) {
6105 			bio = btrfs_bio_clone(first_bio, GFP_NOFS);
6106 			BUG_ON(!bio); /* -ENOMEM */
6107 		} else
6108 			bio = first_bio;
6109 
6110 		submit_stripe_bio(root, bbio, bio,
6111 				  bbio->stripes[dev_nr].physical, dev_nr, rw,
6112 				  async_submit);
6113 	}
6114 	btrfs_bio_counter_dec(root->fs_info);
6115 	return 0;
6116 }
6117 
6118 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
6119 				       u8 *uuid, u8 *fsid)
6120 {
6121 	struct btrfs_device *device;
6122 	struct btrfs_fs_devices *cur_devices;
6123 
6124 	cur_devices = fs_info->fs_devices;
6125 	while (cur_devices) {
6126 		if (!fsid ||
6127 		    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
6128 			device = __find_device(&cur_devices->devices,
6129 					       devid, uuid);
6130 			if (device)
6131 				return device;
6132 		}
6133 		cur_devices = cur_devices->seed;
6134 	}
6135 	return NULL;
6136 }
6137 
6138 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
6139 					    struct btrfs_fs_devices *fs_devices,
6140 					    u64 devid, u8 *dev_uuid)
6141 {
6142 	struct btrfs_device *device;
6143 
6144 	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6145 	if (IS_ERR(device))
6146 		return NULL;
6147 
6148 	list_add(&device->dev_list, &fs_devices->devices);
6149 	device->fs_devices = fs_devices;
6150 	fs_devices->num_devices++;
6151 
6152 	device->missing = 1;
6153 	fs_devices->missing_devices++;
6154 
6155 	return device;
6156 }
6157 
6158 /**
6159  * btrfs_alloc_device - allocate struct btrfs_device
6160  * @fs_info:	used only for generating a new devid, can be NULL if
6161  *		devid is provided (i.e. @devid != NULL).
6162  * @devid:	a pointer to devid for this device.  If NULL a new devid
6163  *		is generated.
6164  * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
6165  *		is generated.
6166  *
6167  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6168  * on error.  Returned struct is not linked onto any lists and can be
6169  * destroyed with kfree() right away.
6170  */
6171 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6172 					const u64 *devid,
6173 					const u8 *uuid)
6174 {
6175 	struct btrfs_device *dev;
6176 	u64 tmp;
6177 
6178 	if (WARN_ON(!devid && !fs_info))
6179 		return ERR_PTR(-EINVAL);
6180 
6181 	dev = __alloc_device();
6182 	if (IS_ERR(dev))
6183 		return dev;
6184 
6185 	if (devid)
6186 		tmp = *devid;
6187 	else {
6188 		int ret;
6189 
6190 		ret = find_next_devid(fs_info, &tmp);
6191 		if (ret) {
6192 			kfree(dev);
6193 			return ERR_PTR(ret);
6194 		}
6195 	}
6196 	dev->devid = tmp;
6197 
6198 	if (uuid)
6199 		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6200 	else
6201 		generate_random_uuid(dev->uuid);
6202 
6203 	btrfs_init_work(&dev->work, btrfs_submit_helper,
6204 			pending_bios_fn, NULL, NULL);
6205 
6206 	return dev;
6207 }
6208 
6209 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
6210 			  struct extent_buffer *leaf,
6211 			  struct btrfs_chunk *chunk)
6212 {
6213 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
6214 	struct map_lookup *map;
6215 	struct extent_map *em;
6216 	u64 logical;
6217 	u64 length;
6218 	u64 stripe_len;
6219 	u64 devid;
6220 	u8 uuid[BTRFS_UUID_SIZE];
6221 	int num_stripes;
6222 	int ret;
6223 	int i;
6224 
6225 	logical = key->offset;
6226 	length = btrfs_chunk_length(leaf, chunk);
6227 	stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6228 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6229 	/* Validation check */
6230 	if (!num_stripes) {
6231 		btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
6232 			  num_stripes);
6233 		return -EIO;
6234 	}
6235 	if (!IS_ALIGNED(logical, root->sectorsize)) {
6236 		btrfs_err(root->fs_info,
6237 			  "invalid chunk logical %llu", logical);
6238 		return -EIO;
6239 	}
6240 	if (!length || !IS_ALIGNED(length, root->sectorsize)) {
6241 		btrfs_err(root->fs_info,
6242 			"invalid chunk length %llu", length);
6243 		return -EIO;
6244 	}
6245 	if (!is_power_of_2(stripe_len)) {
6246 		btrfs_err(root->fs_info, "invalid chunk stripe length: %llu",
6247 			  stripe_len);
6248 		return -EIO;
6249 	}
6250 	if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6251 	    btrfs_chunk_type(leaf, chunk)) {
6252 		btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
6253 			  ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
6254 			    BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6255 			  btrfs_chunk_type(leaf, chunk));
6256 		return -EIO;
6257 	}
6258 
6259 	read_lock(&map_tree->map_tree.lock);
6260 	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
6261 	read_unlock(&map_tree->map_tree.lock);
6262 
6263 	/* already mapped? */
6264 	if (em && em->start <= logical && em->start + em->len > logical) {
6265 		free_extent_map(em);
6266 		return 0;
6267 	} else if (em) {
6268 		free_extent_map(em);
6269 	}
6270 
6271 	em = alloc_extent_map();
6272 	if (!em)
6273 		return -ENOMEM;
6274 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6275 	if (!map) {
6276 		free_extent_map(em);
6277 		return -ENOMEM;
6278 	}
6279 
6280 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6281 	em->map_lookup = map;
6282 	em->start = logical;
6283 	em->len = length;
6284 	em->orig_start = 0;
6285 	em->block_start = 0;
6286 	em->block_len = em->len;
6287 
6288 	map->num_stripes = num_stripes;
6289 	map->io_width = btrfs_chunk_io_width(leaf, chunk);
6290 	map->io_align = btrfs_chunk_io_align(leaf, chunk);
6291 	map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
6292 	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6293 	map->type = btrfs_chunk_type(leaf, chunk);
6294 	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6295 	for (i = 0; i < num_stripes; i++) {
6296 		map->stripes[i].physical =
6297 			btrfs_stripe_offset_nr(leaf, chunk, i);
6298 		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6299 		read_extent_buffer(leaf, uuid, (unsigned long)
6300 				   btrfs_stripe_dev_uuid_nr(chunk, i),
6301 				   BTRFS_UUID_SIZE);
6302 		map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
6303 							uuid, NULL);
6304 		if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
6305 			free_extent_map(em);
6306 			return -EIO;
6307 		}
6308 		if (!map->stripes[i].dev) {
6309 			map->stripes[i].dev =
6310 				add_missing_dev(root, root->fs_info->fs_devices,
6311 						devid, uuid);
6312 			if (!map->stripes[i].dev) {
6313 				free_extent_map(em);
6314 				return -EIO;
6315 			}
6316 			btrfs_warn(root->fs_info, "devid %llu uuid %pU is missing",
6317 						devid, uuid);
6318 		}
6319 		map->stripes[i].dev->in_fs_metadata = 1;
6320 	}
6321 
6322 	write_lock(&map_tree->map_tree.lock);
6323 	ret = add_extent_mapping(&map_tree->map_tree, em, 0);
6324 	write_unlock(&map_tree->map_tree.lock);
6325 	BUG_ON(ret); /* Tree corruption */
6326 	free_extent_map(em);
6327 
6328 	return 0;
6329 }
6330 
6331 static void fill_device_from_item(struct extent_buffer *leaf,
6332 				 struct btrfs_dev_item *dev_item,
6333 				 struct btrfs_device *device)
6334 {
6335 	unsigned long ptr;
6336 
6337 	device->devid = btrfs_device_id(leaf, dev_item);
6338 	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6339 	device->total_bytes = device->disk_total_bytes;
6340 	device->commit_total_bytes = device->disk_total_bytes;
6341 	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6342 	device->commit_bytes_used = device->bytes_used;
6343 	device->type = btrfs_device_type(leaf, dev_item);
6344 	device->io_align = btrfs_device_io_align(leaf, dev_item);
6345 	device->io_width = btrfs_device_io_width(leaf, dev_item);
6346 	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6347 	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6348 	device->is_tgtdev_for_dev_replace = 0;
6349 
6350 	ptr = btrfs_device_uuid(dev_item);
6351 	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6352 }
6353 
6354 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
6355 						  u8 *fsid)
6356 {
6357 	struct btrfs_fs_devices *fs_devices;
6358 	int ret;
6359 
6360 	BUG_ON(!mutex_is_locked(&uuid_mutex));
6361 
6362 	fs_devices = root->fs_info->fs_devices->seed;
6363 	while (fs_devices) {
6364 		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE))
6365 			return fs_devices;
6366 
6367 		fs_devices = fs_devices->seed;
6368 	}
6369 
6370 	fs_devices = find_fsid(fsid);
6371 	if (!fs_devices) {
6372 		if (!btrfs_test_opt(root, DEGRADED))
6373 			return ERR_PTR(-ENOENT);
6374 
6375 		fs_devices = alloc_fs_devices(fsid);
6376 		if (IS_ERR(fs_devices))
6377 			return fs_devices;
6378 
6379 		fs_devices->seeding = 1;
6380 		fs_devices->opened = 1;
6381 		return fs_devices;
6382 	}
6383 
6384 	fs_devices = clone_fs_devices(fs_devices);
6385 	if (IS_ERR(fs_devices))
6386 		return fs_devices;
6387 
6388 	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
6389 				   root->fs_info->bdev_holder);
6390 	if (ret) {
6391 		free_fs_devices(fs_devices);
6392 		fs_devices = ERR_PTR(ret);
6393 		goto out;
6394 	}
6395 
6396 	if (!fs_devices->seeding) {
6397 		__btrfs_close_devices(fs_devices);
6398 		free_fs_devices(fs_devices);
6399 		fs_devices = ERR_PTR(-EINVAL);
6400 		goto out;
6401 	}
6402 
6403 	fs_devices->seed = root->fs_info->fs_devices->seed;
6404 	root->fs_info->fs_devices->seed = fs_devices;
6405 out:
6406 	return fs_devices;
6407 }
6408 
6409 static int read_one_dev(struct btrfs_root *root,
6410 			struct extent_buffer *leaf,
6411 			struct btrfs_dev_item *dev_item)
6412 {
6413 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6414 	struct btrfs_device *device;
6415 	u64 devid;
6416 	int ret;
6417 	u8 fs_uuid[BTRFS_UUID_SIZE];
6418 	u8 dev_uuid[BTRFS_UUID_SIZE];
6419 
6420 	devid = btrfs_device_id(leaf, dev_item);
6421 	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6422 			   BTRFS_UUID_SIZE);
6423 	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6424 			   BTRFS_UUID_SIZE);
6425 
6426 	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
6427 		fs_devices = open_seed_devices(root, fs_uuid);
6428 		if (IS_ERR(fs_devices))
6429 			return PTR_ERR(fs_devices);
6430 	}
6431 
6432 	device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
6433 	if (!device) {
6434 		if (!btrfs_test_opt(root, DEGRADED))
6435 			return -EIO;
6436 
6437 		device = add_missing_dev(root, fs_devices, devid, dev_uuid);
6438 		if (!device)
6439 			return -ENOMEM;
6440 		btrfs_warn(root->fs_info, "devid %llu uuid %pU missing",
6441 				devid, dev_uuid);
6442 	} else {
6443 		if (!device->bdev && !btrfs_test_opt(root, DEGRADED))
6444 			return -EIO;
6445 
6446 		if(!device->bdev && !device->missing) {
6447 			/*
6448 			 * this happens when a device that was properly setup
6449 			 * in the device info lists suddenly goes bad.
6450 			 * device->bdev is NULL, and so we have to set
6451 			 * device->missing to one here
6452 			 */
6453 			device->fs_devices->missing_devices++;
6454 			device->missing = 1;
6455 		}
6456 
6457 		/* Move the device to its own fs_devices */
6458 		if (device->fs_devices != fs_devices) {
6459 			ASSERT(device->missing);
6460 
6461 			list_move(&device->dev_list, &fs_devices->devices);
6462 			device->fs_devices->num_devices--;
6463 			fs_devices->num_devices++;
6464 
6465 			device->fs_devices->missing_devices--;
6466 			fs_devices->missing_devices++;
6467 
6468 			device->fs_devices = fs_devices;
6469 		}
6470 	}
6471 
6472 	if (device->fs_devices != root->fs_info->fs_devices) {
6473 		BUG_ON(device->writeable);
6474 		if (device->generation !=
6475 		    btrfs_device_generation(leaf, dev_item))
6476 			return -EINVAL;
6477 	}
6478 
6479 	fill_device_from_item(leaf, dev_item, device);
6480 	device->in_fs_metadata = 1;
6481 	if (device->writeable && !device->is_tgtdev_for_dev_replace) {
6482 		device->fs_devices->total_rw_bytes += device->total_bytes;
6483 		spin_lock(&root->fs_info->free_chunk_lock);
6484 		root->fs_info->free_chunk_space += device->total_bytes -
6485 			device->bytes_used;
6486 		spin_unlock(&root->fs_info->free_chunk_lock);
6487 	}
6488 	ret = 0;
6489 	return ret;
6490 }
6491 
6492 int btrfs_read_sys_array(struct btrfs_root *root)
6493 {
6494 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
6495 	struct extent_buffer *sb;
6496 	struct btrfs_disk_key *disk_key;
6497 	struct btrfs_chunk *chunk;
6498 	u8 *array_ptr;
6499 	unsigned long sb_array_offset;
6500 	int ret = 0;
6501 	u32 num_stripes;
6502 	u32 array_size;
6503 	u32 len = 0;
6504 	u32 cur_offset;
6505 	struct btrfs_key key;
6506 
6507 	ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
6508 	/*
6509 	 * This will create extent buffer of nodesize, superblock size is
6510 	 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6511 	 * overallocate but we can keep it as-is, only the first page is used.
6512 	 */
6513 	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
6514 	if (!sb)
6515 		return -ENOMEM;
6516 	set_extent_buffer_uptodate(sb);
6517 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6518 	/*
6519 	 * The sb extent buffer is artifical and just used to read the system array.
6520 	 * set_extent_buffer_uptodate() call does not properly mark all it's
6521 	 * pages up-to-date when the page is larger: extent does not cover the
6522 	 * whole page and consequently check_page_uptodate does not find all
6523 	 * the page's extents up-to-date (the hole beyond sb),
6524 	 * write_extent_buffer then triggers a WARN_ON.
6525 	 *
6526 	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
6527 	 * but sb spans only this function. Add an explicit SetPageUptodate call
6528 	 * to silence the warning eg. on PowerPC 64.
6529 	 */
6530 	if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
6531 		SetPageUptodate(sb->pages[0]);
6532 
6533 	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6534 	array_size = btrfs_super_sys_array_size(super_copy);
6535 
6536 	array_ptr = super_copy->sys_chunk_array;
6537 	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
6538 	cur_offset = 0;
6539 
6540 	while (cur_offset < array_size) {
6541 		disk_key = (struct btrfs_disk_key *)array_ptr;
6542 		len = sizeof(*disk_key);
6543 		if (cur_offset + len > array_size)
6544 			goto out_short_read;
6545 
6546 		btrfs_disk_key_to_cpu(&key, disk_key);
6547 
6548 		array_ptr += len;
6549 		sb_array_offset += len;
6550 		cur_offset += len;
6551 
6552 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
6553 			chunk = (struct btrfs_chunk *)sb_array_offset;
6554 			/*
6555 			 * At least one btrfs_chunk with one stripe must be
6556 			 * present, exact stripe count check comes afterwards
6557 			 */
6558 			len = btrfs_chunk_item_size(1);
6559 			if (cur_offset + len > array_size)
6560 				goto out_short_read;
6561 
6562 			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6563 			if (!num_stripes) {
6564 				printk(KERN_ERR
6565 	    "BTRFS: invalid number of stripes %u in sys_array at offset %u\n",
6566 					num_stripes, cur_offset);
6567 				ret = -EIO;
6568 				break;
6569 			}
6570 
6571 			len = btrfs_chunk_item_size(num_stripes);
6572 			if (cur_offset + len > array_size)
6573 				goto out_short_read;
6574 
6575 			ret = read_one_chunk(root, &key, sb, chunk);
6576 			if (ret)
6577 				break;
6578 		} else {
6579 			printk(KERN_ERR
6580 		"BTRFS: unexpected item type %u in sys_array at offset %u\n",
6581 				(u32)key.type, cur_offset);
6582 			ret = -EIO;
6583 			break;
6584 		}
6585 		array_ptr += len;
6586 		sb_array_offset += len;
6587 		cur_offset += len;
6588 	}
6589 	free_extent_buffer(sb);
6590 	return ret;
6591 
6592 out_short_read:
6593 	printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
6594 			len, cur_offset);
6595 	free_extent_buffer(sb);
6596 	return -EIO;
6597 }
6598 
6599 int btrfs_read_chunk_tree(struct btrfs_root *root)
6600 {
6601 	struct btrfs_path *path;
6602 	struct extent_buffer *leaf;
6603 	struct btrfs_key key;
6604 	struct btrfs_key found_key;
6605 	int ret;
6606 	int slot;
6607 
6608 	root = root->fs_info->chunk_root;
6609 
6610 	path = btrfs_alloc_path();
6611 	if (!path)
6612 		return -ENOMEM;
6613 
6614 	mutex_lock(&uuid_mutex);
6615 	lock_chunks(root);
6616 
6617 	/*
6618 	 * Read all device items, and then all the chunk items. All
6619 	 * device items are found before any chunk item (their object id
6620 	 * is smaller than the lowest possible object id for a chunk
6621 	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
6622 	 */
6623 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
6624 	key.offset = 0;
6625 	key.type = 0;
6626 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6627 	if (ret < 0)
6628 		goto error;
6629 	while (1) {
6630 		leaf = path->nodes[0];
6631 		slot = path->slots[0];
6632 		if (slot >= btrfs_header_nritems(leaf)) {
6633 			ret = btrfs_next_leaf(root, path);
6634 			if (ret == 0)
6635 				continue;
6636 			if (ret < 0)
6637 				goto error;
6638 			break;
6639 		}
6640 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
6641 		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
6642 			struct btrfs_dev_item *dev_item;
6643 			dev_item = btrfs_item_ptr(leaf, slot,
6644 						  struct btrfs_dev_item);
6645 			ret = read_one_dev(root, leaf, dev_item);
6646 			if (ret)
6647 				goto error;
6648 		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
6649 			struct btrfs_chunk *chunk;
6650 			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
6651 			ret = read_one_chunk(root, &found_key, leaf, chunk);
6652 			if (ret)
6653 				goto error;
6654 		}
6655 		path->slots[0]++;
6656 	}
6657 	ret = 0;
6658 error:
6659 	unlock_chunks(root);
6660 	mutex_unlock(&uuid_mutex);
6661 
6662 	btrfs_free_path(path);
6663 	return ret;
6664 }
6665 
6666 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
6667 {
6668 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6669 	struct btrfs_device *device;
6670 
6671 	while (fs_devices) {
6672 		mutex_lock(&fs_devices->device_list_mutex);
6673 		list_for_each_entry(device, &fs_devices->devices, dev_list)
6674 			device->dev_root = fs_info->dev_root;
6675 		mutex_unlock(&fs_devices->device_list_mutex);
6676 
6677 		fs_devices = fs_devices->seed;
6678 	}
6679 }
6680 
6681 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
6682 {
6683 	int i;
6684 
6685 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6686 		btrfs_dev_stat_reset(dev, i);
6687 }
6688 
6689 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
6690 {
6691 	struct btrfs_key key;
6692 	struct btrfs_key found_key;
6693 	struct btrfs_root *dev_root = fs_info->dev_root;
6694 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6695 	struct extent_buffer *eb;
6696 	int slot;
6697 	int ret = 0;
6698 	struct btrfs_device *device;
6699 	struct btrfs_path *path = NULL;
6700 	int i;
6701 
6702 	path = btrfs_alloc_path();
6703 	if (!path) {
6704 		ret = -ENOMEM;
6705 		goto out;
6706 	}
6707 
6708 	mutex_lock(&fs_devices->device_list_mutex);
6709 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6710 		int item_size;
6711 		struct btrfs_dev_stats_item *ptr;
6712 
6713 		key.objectid = BTRFS_DEV_STATS_OBJECTID;
6714 		key.type = BTRFS_PERSISTENT_ITEM_KEY;
6715 		key.offset = device->devid;
6716 		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
6717 		if (ret) {
6718 			__btrfs_reset_dev_stats(device);
6719 			device->dev_stats_valid = 1;
6720 			btrfs_release_path(path);
6721 			continue;
6722 		}
6723 		slot = path->slots[0];
6724 		eb = path->nodes[0];
6725 		btrfs_item_key_to_cpu(eb, &found_key, slot);
6726 		item_size = btrfs_item_size_nr(eb, slot);
6727 
6728 		ptr = btrfs_item_ptr(eb, slot,
6729 				     struct btrfs_dev_stats_item);
6730 
6731 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6732 			if (item_size >= (1 + i) * sizeof(__le64))
6733 				btrfs_dev_stat_set(device, i,
6734 					btrfs_dev_stats_value(eb, ptr, i));
6735 			else
6736 				btrfs_dev_stat_reset(device, i);
6737 		}
6738 
6739 		device->dev_stats_valid = 1;
6740 		btrfs_dev_stat_print_on_load(device);
6741 		btrfs_release_path(path);
6742 	}
6743 	mutex_unlock(&fs_devices->device_list_mutex);
6744 
6745 out:
6746 	btrfs_free_path(path);
6747 	return ret < 0 ? ret : 0;
6748 }
6749 
6750 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
6751 				struct btrfs_root *dev_root,
6752 				struct btrfs_device *device)
6753 {
6754 	struct btrfs_path *path;
6755 	struct btrfs_key key;
6756 	struct extent_buffer *eb;
6757 	struct btrfs_dev_stats_item *ptr;
6758 	int ret;
6759 	int i;
6760 
6761 	key.objectid = BTRFS_DEV_STATS_OBJECTID;
6762 	key.type = BTRFS_PERSISTENT_ITEM_KEY;
6763 	key.offset = device->devid;
6764 
6765 	path = btrfs_alloc_path();
6766 	BUG_ON(!path);
6767 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
6768 	if (ret < 0) {
6769 		btrfs_warn_in_rcu(dev_root->fs_info,
6770 			"error %d while searching for dev_stats item for device %s",
6771 			      ret, rcu_str_deref(device->name));
6772 		goto out;
6773 	}
6774 
6775 	if (ret == 0 &&
6776 	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
6777 		/* need to delete old one and insert a new one */
6778 		ret = btrfs_del_item(trans, dev_root, path);
6779 		if (ret != 0) {
6780 			btrfs_warn_in_rcu(dev_root->fs_info,
6781 				"delete too small dev_stats item for device %s failed %d",
6782 				      rcu_str_deref(device->name), ret);
6783 			goto out;
6784 		}
6785 		ret = 1;
6786 	}
6787 
6788 	if (ret == 1) {
6789 		/* need to insert a new item */
6790 		btrfs_release_path(path);
6791 		ret = btrfs_insert_empty_item(trans, dev_root, path,
6792 					      &key, sizeof(*ptr));
6793 		if (ret < 0) {
6794 			btrfs_warn_in_rcu(dev_root->fs_info,
6795 				"insert dev_stats item for device %s failed %d",
6796 				rcu_str_deref(device->name), ret);
6797 			goto out;
6798 		}
6799 	}
6800 
6801 	eb = path->nodes[0];
6802 	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
6803 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6804 		btrfs_set_dev_stats_value(eb, ptr, i,
6805 					  btrfs_dev_stat_read(device, i));
6806 	btrfs_mark_buffer_dirty(eb);
6807 
6808 out:
6809 	btrfs_free_path(path);
6810 	return ret;
6811 }
6812 
6813 /*
6814  * called from commit_transaction. Writes all changed device stats to disk.
6815  */
6816 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
6817 			struct btrfs_fs_info *fs_info)
6818 {
6819 	struct btrfs_root *dev_root = fs_info->dev_root;
6820 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6821 	struct btrfs_device *device;
6822 	int stats_cnt;
6823 	int ret = 0;
6824 
6825 	mutex_lock(&fs_devices->device_list_mutex);
6826 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6827 		if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
6828 			continue;
6829 
6830 		stats_cnt = atomic_read(&device->dev_stats_ccnt);
6831 		ret = update_dev_stat_item(trans, dev_root, device);
6832 		if (!ret)
6833 			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
6834 	}
6835 	mutex_unlock(&fs_devices->device_list_mutex);
6836 
6837 	return ret;
6838 }
6839 
6840 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
6841 {
6842 	btrfs_dev_stat_inc(dev, index);
6843 	btrfs_dev_stat_print_on_error(dev);
6844 }
6845 
6846 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
6847 {
6848 	if (!dev->dev_stats_valid)
6849 		return;
6850 	btrfs_err_rl_in_rcu(dev->dev_root->fs_info,
6851 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
6852 			   rcu_str_deref(dev->name),
6853 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6854 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6855 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6856 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6857 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6858 }
6859 
6860 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
6861 {
6862 	int i;
6863 
6864 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6865 		if (btrfs_dev_stat_read(dev, i) != 0)
6866 			break;
6867 	if (i == BTRFS_DEV_STAT_VALUES_MAX)
6868 		return; /* all values == 0, suppress message */
6869 
6870 	btrfs_info_in_rcu(dev->dev_root->fs_info,
6871 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
6872 	       rcu_str_deref(dev->name),
6873 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6874 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6875 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6876 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6877 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6878 }
6879 
6880 int btrfs_get_dev_stats(struct btrfs_root *root,
6881 			struct btrfs_ioctl_get_dev_stats *stats)
6882 {
6883 	struct btrfs_device *dev;
6884 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6885 	int i;
6886 
6887 	mutex_lock(&fs_devices->device_list_mutex);
6888 	dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
6889 	mutex_unlock(&fs_devices->device_list_mutex);
6890 
6891 	if (!dev) {
6892 		btrfs_warn(root->fs_info, "get dev_stats failed, device not found");
6893 		return -ENODEV;
6894 	} else if (!dev->dev_stats_valid) {
6895 		btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid");
6896 		return -ENODEV;
6897 	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
6898 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6899 			if (stats->nr_items > i)
6900 				stats->values[i] =
6901 					btrfs_dev_stat_read_and_reset(dev, i);
6902 			else
6903 				btrfs_dev_stat_reset(dev, i);
6904 		}
6905 	} else {
6906 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6907 			if (stats->nr_items > i)
6908 				stats->values[i] = btrfs_dev_stat_read(dev, i);
6909 	}
6910 	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
6911 		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
6912 	return 0;
6913 }
6914 
6915 void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path)
6916 {
6917 	struct buffer_head *bh;
6918 	struct btrfs_super_block *disk_super;
6919 	int copy_num;
6920 
6921 	if (!bdev)
6922 		return;
6923 
6924 	for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX;
6925 		copy_num++) {
6926 
6927 		if (btrfs_read_dev_one_super(bdev, copy_num, &bh))
6928 			continue;
6929 
6930 		disk_super = (struct btrfs_super_block *)bh->b_data;
6931 
6932 		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
6933 		set_buffer_dirty(bh);
6934 		sync_dirty_buffer(bh);
6935 		brelse(bh);
6936 	}
6937 
6938 	/* Notify udev that device has changed */
6939 	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
6940 
6941 	/* Update ctime/mtime for device path for libblkid */
6942 	update_dev_time(device_path);
6943 }
6944 
6945 /*
6946  * Update the size of all devices, which is used for writing out the
6947  * super blocks.
6948  */
6949 void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
6950 {
6951 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6952 	struct btrfs_device *curr, *next;
6953 
6954 	if (list_empty(&fs_devices->resized_devices))
6955 		return;
6956 
6957 	mutex_lock(&fs_devices->device_list_mutex);
6958 	lock_chunks(fs_info->dev_root);
6959 	list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
6960 				 resized_list) {
6961 		list_del_init(&curr->resized_list);
6962 		curr->commit_total_bytes = curr->disk_total_bytes;
6963 	}
6964 	unlock_chunks(fs_info->dev_root);
6965 	mutex_unlock(&fs_devices->device_list_mutex);
6966 }
6967 
6968 /* Must be invoked during the transaction commit */
6969 void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
6970 					struct btrfs_transaction *transaction)
6971 {
6972 	struct extent_map *em;
6973 	struct map_lookup *map;
6974 	struct btrfs_device *dev;
6975 	int i;
6976 
6977 	if (list_empty(&transaction->pending_chunks))
6978 		return;
6979 
6980 	/* In order to kick the device replace finish process */
6981 	lock_chunks(root);
6982 	list_for_each_entry(em, &transaction->pending_chunks, list) {
6983 		map = em->map_lookup;
6984 
6985 		for (i = 0; i < map->num_stripes; i++) {
6986 			dev = map->stripes[i].dev;
6987 			dev->commit_bytes_used = dev->bytes_used;
6988 		}
6989 	}
6990 	unlock_chunks(root);
6991 }
6992 
6993 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
6994 {
6995 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6996 	while (fs_devices) {
6997 		fs_devices->fs_info = fs_info;
6998 		fs_devices = fs_devices->seed;
6999 	}
7000 }
7001 
7002 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
7003 {
7004 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7005 	while (fs_devices) {
7006 		fs_devices->fs_info = NULL;
7007 		fs_devices = fs_devices->seed;
7008 	}
7009 }
7010 
7011 static void btrfs_close_one_device(struct btrfs_device *device)
7012 {
7013 	struct btrfs_fs_devices *fs_devices = device->fs_devices;
7014 	struct btrfs_device *new_device;
7015 	struct rcu_string *name;
7016 
7017 	if (device->bdev)
7018 		fs_devices->open_devices--;
7019 
7020 	if (device->writeable &&
7021 	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
7022 		list_del_init(&device->dev_alloc_list);
7023 		fs_devices->rw_devices--;
7024 	}
7025 
7026 	if (device->missing)
7027 		fs_devices->missing_devices--;
7028 
7029 	new_device = btrfs_alloc_device(NULL, &device->devid,
7030 					device->uuid);
7031 	BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
7032 
7033 	/* Safe because we are under uuid_mutex */
7034 	if (device->name) {
7035 		name = rcu_string_strdup(device->name->str, GFP_NOFS);
7036 		BUG_ON(!name); /* -ENOMEM */
7037 		rcu_assign_pointer(new_device->name, name);
7038 	}
7039 
7040 	list_replace_rcu(&device->dev_list, &new_device->dev_list);
7041 	new_device->fs_devices = device->fs_devices;
7042 
7043 	call_rcu(&device->rcu, free_device);
7044 }
7045