xref: /openbmc/linux/fs/btrfs/volumes.c (revision 0a04480d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/bio.h>
8 #include <linux/slab.h>
9 #include <linux/buffer_head.h>
10 #include <linux/blkdev.h>
11 #include <linux/ratelimit.h>
12 #include <linux/kthread.h>
13 #include <linux/raid/pq.h>
14 #include <linux/semaphore.h>
15 #include <linux/uuid.h>
16 #include <linux/list_sort.h>
17 #include "misc.h"
18 #include "ctree.h"
19 #include "extent_map.h"
20 #include "disk-io.h"
21 #include "transaction.h"
22 #include "print-tree.h"
23 #include "volumes.h"
24 #include "raid56.h"
25 #include "async-thread.h"
26 #include "check-integrity.h"
27 #include "rcu-string.h"
28 #include "dev-replace.h"
29 #include "sysfs.h"
30 #include "tree-checker.h"
31 #include "space-info.h"
32 #include "block-group.h"
33 
34 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
35 	[BTRFS_RAID_RAID10] = {
36 		.sub_stripes	= 2,
37 		.dev_stripes	= 1,
38 		.devs_max	= 0,	/* 0 == as many as possible */
39 		.devs_min	= 4,
40 		.tolerated_failures = 1,
41 		.devs_increment	= 2,
42 		.ncopies	= 2,
43 		.nparity        = 0,
44 		.raid_name	= "raid10",
45 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID10,
46 		.mindev_error	= BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
47 	},
48 	[BTRFS_RAID_RAID1] = {
49 		.sub_stripes	= 1,
50 		.dev_stripes	= 1,
51 		.devs_max	= 2,
52 		.devs_min	= 2,
53 		.tolerated_failures = 1,
54 		.devs_increment	= 2,
55 		.ncopies	= 2,
56 		.nparity        = 0,
57 		.raid_name	= "raid1",
58 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1,
59 		.mindev_error	= BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
60 	},
61 	[BTRFS_RAID_DUP] = {
62 		.sub_stripes	= 1,
63 		.dev_stripes	= 2,
64 		.devs_max	= 1,
65 		.devs_min	= 1,
66 		.tolerated_failures = 0,
67 		.devs_increment	= 1,
68 		.ncopies	= 2,
69 		.nparity        = 0,
70 		.raid_name	= "dup",
71 		.bg_flag	= BTRFS_BLOCK_GROUP_DUP,
72 		.mindev_error	= 0,
73 	},
74 	[BTRFS_RAID_RAID0] = {
75 		.sub_stripes	= 1,
76 		.dev_stripes	= 1,
77 		.devs_max	= 0,
78 		.devs_min	= 2,
79 		.tolerated_failures = 0,
80 		.devs_increment	= 1,
81 		.ncopies	= 1,
82 		.nparity        = 0,
83 		.raid_name	= "raid0",
84 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID0,
85 		.mindev_error	= 0,
86 	},
87 	[BTRFS_RAID_SINGLE] = {
88 		.sub_stripes	= 1,
89 		.dev_stripes	= 1,
90 		.devs_max	= 1,
91 		.devs_min	= 1,
92 		.tolerated_failures = 0,
93 		.devs_increment	= 1,
94 		.ncopies	= 1,
95 		.nparity        = 0,
96 		.raid_name	= "single",
97 		.bg_flag	= 0,
98 		.mindev_error	= 0,
99 	},
100 	[BTRFS_RAID_RAID5] = {
101 		.sub_stripes	= 1,
102 		.dev_stripes	= 1,
103 		.devs_max	= 0,
104 		.devs_min	= 2,
105 		.tolerated_failures = 1,
106 		.devs_increment	= 1,
107 		.ncopies	= 1,
108 		.nparity        = 1,
109 		.raid_name	= "raid5",
110 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID5,
111 		.mindev_error	= BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
112 	},
113 	[BTRFS_RAID_RAID6] = {
114 		.sub_stripes	= 1,
115 		.dev_stripes	= 1,
116 		.devs_max	= 0,
117 		.devs_min	= 3,
118 		.tolerated_failures = 2,
119 		.devs_increment	= 1,
120 		.ncopies	= 1,
121 		.nparity        = 2,
122 		.raid_name	= "raid6",
123 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID6,
124 		.mindev_error	= BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
125 	},
126 };
127 
128 const char *btrfs_bg_type_to_raid_name(u64 flags)
129 {
130 	const int index = btrfs_bg_flags_to_raid_index(flags);
131 
132 	if (index >= BTRFS_NR_RAID_TYPES)
133 		return NULL;
134 
135 	return btrfs_raid_array[index].raid_name;
136 }
137 
138 /*
139  * Fill @buf with textual description of @bg_flags, no more than @size_buf
140  * bytes including terminating null byte.
141  */
142 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
143 {
144 	int i;
145 	int ret;
146 	char *bp = buf;
147 	u64 flags = bg_flags;
148 	u32 size_bp = size_buf;
149 
150 	if (!flags) {
151 		strcpy(bp, "NONE");
152 		return;
153 	}
154 
155 #define DESCRIBE_FLAG(flag, desc)						\
156 	do {								\
157 		if (flags & (flag)) {					\
158 			ret = snprintf(bp, size_bp, "%s|", (desc));	\
159 			if (ret < 0 || ret >= size_bp)			\
160 				goto out_overflow;			\
161 			size_bp -= ret;					\
162 			bp += ret;					\
163 			flags &= ~(flag);				\
164 		}							\
165 	} while (0)
166 
167 	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
168 	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
169 	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
170 
171 	DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
172 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
173 		DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
174 			      btrfs_raid_array[i].raid_name);
175 #undef DESCRIBE_FLAG
176 
177 	if (flags) {
178 		ret = snprintf(bp, size_bp, "0x%llx|", flags);
179 		size_bp -= ret;
180 	}
181 
182 	if (size_bp < size_buf)
183 		buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
184 
185 	/*
186 	 * The text is trimmed, it's up to the caller to provide sufficiently
187 	 * large buffer
188 	 */
189 out_overflow:;
190 }
191 
192 static int init_first_rw_device(struct btrfs_trans_handle *trans);
193 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
194 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
195 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
196 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
197 			     enum btrfs_map_op op,
198 			     u64 logical, u64 *length,
199 			     struct btrfs_bio **bbio_ret,
200 			     int mirror_num, int need_raid_map);
201 
202 /*
203  * Device locking
204  * ==============
205  *
206  * There are several mutexes that protect manipulation of devices and low-level
207  * structures like chunks but not block groups, extents or files
208  *
209  * uuid_mutex (global lock)
210  * ------------------------
211  * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
212  * the SCAN_DEV ioctl registration or from mount either implicitly (the first
213  * device) or requested by the device= mount option
214  *
215  * the mutex can be very coarse and can cover long-running operations
216  *
217  * protects: updates to fs_devices counters like missing devices, rw devices,
218  * seeding, structure cloning, opening/closing devices at mount/umount time
219  *
220  * global::fs_devs - add, remove, updates to the global list
221  *
222  * does not protect: manipulation of the fs_devices::devices list!
223  *
224  * btrfs_device::name - renames (write side), read is RCU
225  *
226  * fs_devices::device_list_mutex (per-fs, with RCU)
227  * ------------------------------------------------
228  * protects updates to fs_devices::devices, ie. adding and deleting
229  *
230  * simple list traversal with read-only actions can be done with RCU protection
231  *
232  * may be used to exclude some operations from running concurrently without any
233  * modifications to the list (see write_all_supers)
234  *
235  * balance_mutex
236  * -------------
237  * protects balance structures (status, state) and context accessed from
238  * several places (internally, ioctl)
239  *
240  * chunk_mutex
241  * -----------
242  * protects chunks, adding or removing during allocation, trim or when a new
243  * device is added/removed. Additionally it also protects post_commit_list of
244  * individual devices, since they can be added to the transaction's
245  * post_commit_list only with chunk_mutex held.
246  *
247  * cleaner_mutex
248  * -------------
249  * a big lock that is held by the cleaner thread and prevents running subvolume
250  * cleaning together with relocation or delayed iputs
251  *
252  *
253  * Lock nesting
254  * ============
255  *
256  * uuid_mutex
257  *   volume_mutex
258  *     device_list_mutex
259  *       chunk_mutex
260  *     balance_mutex
261  *
262  *
263  * Exclusive operations, BTRFS_FS_EXCL_OP
264  * ======================================
265  *
266  * Maintains the exclusivity of the following operations that apply to the
267  * whole filesystem and cannot run in parallel.
268  *
269  * - Balance (*)
270  * - Device add
271  * - Device remove
272  * - Device replace (*)
273  * - Resize
274  *
275  * The device operations (as above) can be in one of the following states:
276  *
277  * - Running state
278  * - Paused state
279  * - Completed state
280  *
281  * Only device operations marked with (*) can go into the Paused state for the
282  * following reasons:
283  *
284  * - ioctl (only Balance can be Paused through ioctl)
285  * - filesystem remounted as read-only
286  * - filesystem unmounted and mounted as read-only
287  * - system power-cycle and filesystem mounted as read-only
288  * - filesystem or device errors leading to forced read-only
289  *
290  * BTRFS_FS_EXCL_OP flag is set and cleared using atomic operations.
291  * During the course of Paused state, the BTRFS_FS_EXCL_OP remains set.
292  * A device operation in Paused or Running state can be canceled or resumed
293  * either by ioctl (Balance only) or when remounted as read-write.
294  * BTRFS_FS_EXCL_OP flag is cleared when the device operation is canceled or
295  * completed.
296  */
297 
298 DEFINE_MUTEX(uuid_mutex);
299 static LIST_HEAD(fs_uuids);
300 struct list_head *btrfs_get_fs_uuids(void)
301 {
302 	return &fs_uuids;
303 }
304 
305 /*
306  * alloc_fs_devices - allocate struct btrfs_fs_devices
307  * @fsid:		if not NULL, copy the UUID to fs_devices::fsid
308  * @metadata_fsid:	if not NULL, copy the UUID to fs_devices::metadata_fsid
309  *
310  * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
311  * The returned struct is not linked onto any lists and can be destroyed with
312  * kfree() right away.
313  */
314 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
315 						 const u8 *metadata_fsid)
316 {
317 	struct btrfs_fs_devices *fs_devs;
318 
319 	fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
320 	if (!fs_devs)
321 		return ERR_PTR(-ENOMEM);
322 
323 	mutex_init(&fs_devs->device_list_mutex);
324 
325 	INIT_LIST_HEAD(&fs_devs->devices);
326 	INIT_LIST_HEAD(&fs_devs->alloc_list);
327 	INIT_LIST_HEAD(&fs_devs->fs_list);
328 	if (fsid)
329 		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
330 
331 	if (metadata_fsid)
332 		memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
333 	else if (fsid)
334 		memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
335 
336 	return fs_devs;
337 }
338 
339 void btrfs_free_device(struct btrfs_device *device)
340 {
341 	WARN_ON(!list_empty(&device->post_commit_list));
342 	rcu_string_free(device->name);
343 	extent_io_tree_release(&device->alloc_state);
344 	bio_put(device->flush_bio);
345 	kfree(device);
346 }
347 
348 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
349 {
350 	struct btrfs_device *device;
351 	WARN_ON(fs_devices->opened);
352 	while (!list_empty(&fs_devices->devices)) {
353 		device = list_entry(fs_devices->devices.next,
354 				    struct btrfs_device, dev_list);
355 		list_del(&device->dev_list);
356 		btrfs_free_device(device);
357 	}
358 	kfree(fs_devices);
359 }
360 
361 void __exit btrfs_cleanup_fs_uuids(void)
362 {
363 	struct btrfs_fs_devices *fs_devices;
364 
365 	while (!list_empty(&fs_uuids)) {
366 		fs_devices = list_entry(fs_uuids.next,
367 					struct btrfs_fs_devices, fs_list);
368 		list_del(&fs_devices->fs_list);
369 		free_fs_devices(fs_devices);
370 	}
371 }
372 
373 /*
374  * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
375  * Returned struct is not linked onto any lists and must be destroyed using
376  * btrfs_free_device.
377  */
378 static struct btrfs_device *__alloc_device(void)
379 {
380 	struct btrfs_device *dev;
381 
382 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
383 	if (!dev)
384 		return ERR_PTR(-ENOMEM);
385 
386 	/*
387 	 * Preallocate a bio that's always going to be used for flushing device
388 	 * barriers and matches the device lifespan
389 	 */
390 	dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL);
391 	if (!dev->flush_bio) {
392 		kfree(dev);
393 		return ERR_PTR(-ENOMEM);
394 	}
395 
396 	INIT_LIST_HEAD(&dev->dev_list);
397 	INIT_LIST_HEAD(&dev->dev_alloc_list);
398 	INIT_LIST_HEAD(&dev->post_commit_list);
399 
400 	spin_lock_init(&dev->io_lock);
401 
402 	atomic_set(&dev->reada_in_flight, 0);
403 	atomic_set(&dev->dev_stats_ccnt, 0);
404 	btrfs_device_data_ordered_init(dev);
405 	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
406 	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
407 	extent_io_tree_init(NULL, &dev->alloc_state, 0, NULL);
408 
409 	return dev;
410 }
411 
412 static noinline struct btrfs_fs_devices *find_fsid(
413 		const u8 *fsid, const u8 *metadata_fsid)
414 {
415 	struct btrfs_fs_devices *fs_devices;
416 
417 	ASSERT(fsid);
418 
419 	if (metadata_fsid) {
420 		/*
421 		 * Handle scanned device having completed its fsid change but
422 		 * belonging to a fs_devices that was created by first scanning
423 		 * a device which didn't have its fsid/metadata_uuid changed
424 		 * at all and the CHANGING_FSID_V2 flag set.
425 		 */
426 		list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
427 			if (fs_devices->fsid_change &&
428 			    memcmp(metadata_fsid, fs_devices->fsid,
429 				   BTRFS_FSID_SIZE) == 0 &&
430 			    memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
431 				   BTRFS_FSID_SIZE) == 0) {
432 				return fs_devices;
433 			}
434 		}
435 		/*
436 		 * Handle scanned device having completed its fsid change but
437 		 * belonging to a fs_devices that was created by a device that
438 		 * has an outdated pair of fsid/metadata_uuid and
439 		 * CHANGING_FSID_V2 flag set.
440 		 */
441 		list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
442 			if (fs_devices->fsid_change &&
443 			    memcmp(fs_devices->metadata_uuid,
444 				   fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
445 			    memcmp(metadata_fsid, fs_devices->metadata_uuid,
446 				   BTRFS_FSID_SIZE) == 0) {
447 				return fs_devices;
448 			}
449 		}
450 	}
451 
452 	/* Handle non-split brain cases */
453 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
454 		if (metadata_fsid) {
455 			if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
456 			    && memcmp(metadata_fsid, fs_devices->metadata_uuid,
457 				      BTRFS_FSID_SIZE) == 0)
458 				return fs_devices;
459 		} else {
460 			if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
461 				return fs_devices;
462 		}
463 	}
464 	return NULL;
465 }
466 
467 static int
468 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
469 		      int flush, struct block_device **bdev,
470 		      struct buffer_head **bh)
471 {
472 	int ret;
473 
474 	*bdev = blkdev_get_by_path(device_path, flags, holder);
475 
476 	if (IS_ERR(*bdev)) {
477 		ret = PTR_ERR(*bdev);
478 		goto error;
479 	}
480 
481 	if (flush)
482 		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
483 	ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
484 	if (ret) {
485 		blkdev_put(*bdev, flags);
486 		goto error;
487 	}
488 	invalidate_bdev(*bdev);
489 	*bh = btrfs_read_dev_super(*bdev);
490 	if (IS_ERR(*bh)) {
491 		ret = PTR_ERR(*bh);
492 		blkdev_put(*bdev, flags);
493 		goto error;
494 	}
495 
496 	return 0;
497 
498 error:
499 	*bdev = NULL;
500 	*bh = NULL;
501 	return ret;
502 }
503 
504 static void requeue_list(struct btrfs_pending_bios *pending_bios,
505 			struct bio *head, struct bio *tail)
506 {
507 
508 	struct bio *old_head;
509 
510 	old_head = pending_bios->head;
511 	pending_bios->head = head;
512 	if (pending_bios->tail)
513 		tail->bi_next = old_head;
514 	else
515 		pending_bios->tail = tail;
516 }
517 
518 /*
519  * we try to collect pending bios for a device so we don't get a large
520  * number of procs sending bios down to the same device.  This greatly
521  * improves the schedulers ability to collect and merge the bios.
522  *
523  * But, it also turns into a long list of bios to process and that is sure
524  * to eventually make the worker thread block.  The solution here is to
525  * make some progress and then put this work struct back at the end of
526  * the list if the block device is congested.  This way, multiple devices
527  * can make progress from a single worker thread.
528  */
529 static noinline void run_scheduled_bios(struct btrfs_device *device)
530 {
531 	struct btrfs_fs_info *fs_info = device->fs_info;
532 	struct bio *pending;
533 	struct backing_dev_info *bdi;
534 	struct btrfs_pending_bios *pending_bios;
535 	struct bio *tail;
536 	struct bio *cur;
537 	int again = 0;
538 	unsigned long num_run;
539 	unsigned long batch_run = 0;
540 	unsigned long last_waited = 0;
541 	int force_reg = 0;
542 	int sync_pending = 0;
543 	struct blk_plug plug;
544 
545 	/*
546 	 * this function runs all the bios we've collected for
547 	 * a particular device.  We don't want to wander off to
548 	 * another device without first sending all of these down.
549 	 * So, setup a plug here and finish it off before we return
550 	 */
551 	blk_start_plug(&plug);
552 
553 	bdi = device->bdev->bd_bdi;
554 
555 loop:
556 	spin_lock(&device->io_lock);
557 
558 loop_lock:
559 	num_run = 0;
560 
561 	/* take all the bios off the list at once and process them
562 	 * later on (without the lock held).  But, remember the
563 	 * tail and other pointers so the bios can be properly reinserted
564 	 * into the list if we hit congestion
565 	 */
566 	if (!force_reg && device->pending_sync_bios.head) {
567 		pending_bios = &device->pending_sync_bios;
568 		force_reg = 1;
569 	} else {
570 		pending_bios = &device->pending_bios;
571 		force_reg = 0;
572 	}
573 
574 	pending = pending_bios->head;
575 	tail = pending_bios->tail;
576 	WARN_ON(pending && !tail);
577 
578 	/*
579 	 * if pending was null this time around, no bios need processing
580 	 * at all and we can stop.  Otherwise it'll loop back up again
581 	 * and do an additional check so no bios are missed.
582 	 *
583 	 * device->running_pending is used to synchronize with the
584 	 * schedule_bio code.
585 	 */
586 	if (device->pending_sync_bios.head == NULL &&
587 	    device->pending_bios.head == NULL) {
588 		again = 0;
589 		device->running_pending = 0;
590 	} else {
591 		again = 1;
592 		device->running_pending = 1;
593 	}
594 
595 	pending_bios->head = NULL;
596 	pending_bios->tail = NULL;
597 
598 	spin_unlock(&device->io_lock);
599 
600 	while (pending) {
601 
602 		rmb();
603 		/* we want to work on both lists, but do more bios on the
604 		 * sync list than the regular list
605 		 */
606 		if ((num_run > 32 &&
607 		    pending_bios != &device->pending_sync_bios &&
608 		    device->pending_sync_bios.head) ||
609 		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
610 		    device->pending_bios.head)) {
611 			spin_lock(&device->io_lock);
612 			requeue_list(pending_bios, pending, tail);
613 			goto loop_lock;
614 		}
615 
616 		cur = pending;
617 		pending = pending->bi_next;
618 		cur->bi_next = NULL;
619 
620 		BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
621 
622 		/*
623 		 * if we're doing the sync list, record that our
624 		 * plug has some sync requests on it
625 		 *
626 		 * If we're doing the regular list and there are
627 		 * sync requests sitting around, unplug before
628 		 * we add more
629 		 */
630 		if (pending_bios == &device->pending_sync_bios) {
631 			sync_pending = 1;
632 		} else if (sync_pending) {
633 			blk_finish_plug(&plug);
634 			blk_start_plug(&plug);
635 			sync_pending = 0;
636 		}
637 
638 		btrfsic_submit_bio(cur);
639 		num_run++;
640 		batch_run++;
641 
642 		cond_resched();
643 
644 		/*
645 		 * we made progress, there is more work to do and the bdi
646 		 * is now congested.  Back off and let other work structs
647 		 * run instead
648 		 */
649 		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
650 		    fs_info->fs_devices->open_devices > 1) {
651 			struct io_context *ioc;
652 
653 			ioc = current->io_context;
654 
655 			/*
656 			 * the main goal here is that we don't want to
657 			 * block if we're going to be able to submit
658 			 * more requests without blocking.
659 			 *
660 			 * This code does two great things, it pokes into
661 			 * the elevator code from a filesystem _and_
662 			 * it makes assumptions about how batching works.
663 			 */
664 			if (ioc && ioc->nr_batch_requests > 0 &&
665 			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
666 			    (last_waited == 0 ||
667 			     ioc->last_waited == last_waited)) {
668 				/*
669 				 * we want to go through our batch of
670 				 * requests and stop.  So, we copy out
671 				 * the ioc->last_waited time and test
672 				 * against it before looping
673 				 */
674 				last_waited = ioc->last_waited;
675 				cond_resched();
676 				continue;
677 			}
678 			spin_lock(&device->io_lock);
679 			requeue_list(pending_bios, pending, tail);
680 			device->running_pending = 1;
681 
682 			spin_unlock(&device->io_lock);
683 			btrfs_queue_work(fs_info->submit_workers,
684 					 &device->work);
685 			goto done;
686 		}
687 	}
688 
689 	cond_resched();
690 	if (again)
691 		goto loop;
692 
693 	spin_lock(&device->io_lock);
694 	if (device->pending_bios.head || device->pending_sync_bios.head)
695 		goto loop_lock;
696 	spin_unlock(&device->io_lock);
697 
698 done:
699 	blk_finish_plug(&plug);
700 }
701 
702 static void pending_bios_fn(struct btrfs_work *work)
703 {
704 	struct btrfs_device *device;
705 
706 	device = container_of(work, struct btrfs_device, work);
707 	run_scheduled_bios(device);
708 }
709 
710 static bool device_path_matched(const char *path, struct btrfs_device *device)
711 {
712 	int found;
713 
714 	rcu_read_lock();
715 	found = strcmp(rcu_str_deref(device->name), path);
716 	rcu_read_unlock();
717 
718 	return found == 0;
719 }
720 
721 /*
722  *  Search and remove all stale (devices which are not mounted) devices.
723  *  When both inputs are NULL, it will search and release all stale devices.
724  *  path:	Optional. When provided will it release all unmounted devices
725  *		matching this path only.
726  *  skip_dev:	Optional. Will skip this device when searching for the stale
727  *		devices.
728  *  Return:	0 for success or if @path is NULL.
729  * 		-EBUSY if @path is a mounted device.
730  * 		-ENOENT if @path does not match any device in the list.
731  */
732 static int btrfs_free_stale_devices(const char *path,
733 				     struct btrfs_device *skip_device)
734 {
735 	struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
736 	struct btrfs_device *device, *tmp_device;
737 	int ret = 0;
738 
739 	if (path)
740 		ret = -ENOENT;
741 
742 	list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
743 
744 		mutex_lock(&fs_devices->device_list_mutex);
745 		list_for_each_entry_safe(device, tmp_device,
746 					 &fs_devices->devices, dev_list) {
747 			if (skip_device && skip_device == device)
748 				continue;
749 			if (path && !device->name)
750 				continue;
751 			if (path && !device_path_matched(path, device))
752 				continue;
753 			if (fs_devices->opened) {
754 				/* for an already deleted device return 0 */
755 				if (path && ret != 0)
756 					ret = -EBUSY;
757 				break;
758 			}
759 
760 			/* delete the stale device */
761 			fs_devices->num_devices--;
762 			list_del(&device->dev_list);
763 			btrfs_free_device(device);
764 
765 			ret = 0;
766 			if (fs_devices->num_devices == 0)
767 				break;
768 		}
769 		mutex_unlock(&fs_devices->device_list_mutex);
770 
771 		if (fs_devices->num_devices == 0) {
772 			btrfs_sysfs_remove_fsid(fs_devices);
773 			list_del(&fs_devices->fs_list);
774 			free_fs_devices(fs_devices);
775 		}
776 	}
777 
778 	return ret;
779 }
780 
781 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
782 			struct btrfs_device *device, fmode_t flags,
783 			void *holder)
784 {
785 	struct request_queue *q;
786 	struct block_device *bdev;
787 	struct buffer_head *bh;
788 	struct btrfs_super_block *disk_super;
789 	u64 devid;
790 	int ret;
791 
792 	if (device->bdev)
793 		return -EINVAL;
794 	if (!device->name)
795 		return -EINVAL;
796 
797 	ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
798 				    &bdev, &bh);
799 	if (ret)
800 		return ret;
801 
802 	disk_super = (struct btrfs_super_block *)bh->b_data;
803 	devid = btrfs_stack_device_id(&disk_super->dev_item);
804 	if (devid != device->devid)
805 		goto error_brelse;
806 
807 	if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
808 		goto error_brelse;
809 
810 	device->generation = btrfs_super_generation(disk_super);
811 
812 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
813 		if (btrfs_super_incompat_flags(disk_super) &
814 		    BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
815 			pr_err(
816 		"BTRFS: Invalid seeding and uuid-changed device detected\n");
817 			goto error_brelse;
818 		}
819 
820 		clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
821 		fs_devices->seeding = 1;
822 	} else {
823 		if (bdev_read_only(bdev))
824 			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
825 		else
826 			set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
827 	}
828 
829 	q = bdev_get_queue(bdev);
830 	if (!blk_queue_nonrot(q))
831 		fs_devices->rotating = 1;
832 
833 	device->bdev = bdev;
834 	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
835 	device->mode = flags;
836 
837 	fs_devices->open_devices++;
838 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
839 	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
840 		fs_devices->rw_devices++;
841 		list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
842 	}
843 	brelse(bh);
844 
845 	return 0;
846 
847 error_brelse:
848 	brelse(bh);
849 	blkdev_put(bdev, flags);
850 
851 	return -EINVAL;
852 }
853 
854 /*
855  * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
856  * being created with a disk that has already completed its fsid change.
857  */
858 static struct btrfs_fs_devices *find_fsid_inprogress(
859 					struct btrfs_super_block *disk_super)
860 {
861 	struct btrfs_fs_devices *fs_devices;
862 
863 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
864 		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
865 			   BTRFS_FSID_SIZE) != 0 &&
866 		    memcmp(fs_devices->metadata_uuid, disk_super->fsid,
867 			   BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
868 			return fs_devices;
869 		}
870 	}
871 
872 	return NULL;
873 }
874 
875 
876 static struct btrfs_fs_devices *find_fsid_changed(
877 					struct btrfs_super_block *disk_super)
878 {
879 	struct btrfs_fs_devices *fs_devices;
880 
881 	/*
882 	 * Handles the case where scanned device is part of an fs that had
883 	 * multiple successful changes of FSID but curently device didn't
884 	 * observe it. Meaning our fsid will be different than theirs.
885 	 */
886 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
887 		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
888 			   BTRFS_FSID_SIZE) != 0 &&
889 		    memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
890 			   BTRFS_FSID_SIZE) == 0 &&
891 		    memcmp(fs_devices->fsid, disk_super->fsid,
892 			   BTRFS_FSID_SIZE) != 0) {
893 			return fs_devices;
894 		}
895 	}
896 
897 	return NULL;
898 }
899 /*
900  * Add new device to list of registered devices
901  *
902  * Returns:
903  * device pointer which was just added or updated when successful
904  * error pointer when failed
905  */
906 static noinline struct btrfs_device *device_list_add(const char *path,
907 			   struct btrfs_super_block *disk_super,
908 			   bool *new_device_added)
909 {
910 	struct btrfs_device *device;
911 	struct btrfs_fs_devices *fs_devices = NULL;
912 	struct rcu_string *name;
913 	u64 found_transid = btrfs_super_generation(disk_super);
914 	u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
915 	bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
916 		BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
917 	bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
918 					BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
919 
920 	if (fsid_change_in_progress) {
921 		if (!has_metadata_uuid) {
922 			/*
923 			 * When we have an image which has CHANGING_FSID_V2 set
924 			 * it might belong to either a filesystem which has
925 			 * disks with completed fsid change or it might belong
926 			 * to fs with no UUID changes in effect, handle both.
927 			 */
928 			fs_devices = find_fsid_inprogress(disk_super);
929 			if (!fs_devices)
930 				fs_devices = find_fsid(disk_super->fsid, NULL);
931 		} else {
932 			fs_devices = find_fsid_changed(disk_super);
933 		}
934 	} else if (has_metadata_uuid) {
935 		fs_devices = find_fsid(disk_super->fsid,
936 				       disk_super->metadata_uuid);
937 	} else {
938 		fs_devices = find_fsid(disk_super->fsid, NULL);
939 	}
940 
941 
942 	if (!fs_devices) {
943 		if (has_metadata_uuid)
944 			fs_devices = alloc_fs_devices(disk_super->fsid,
945 						      disk_super->metadata_uuid);
946 		else
947 			fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
948 
949 		if (IS_ERR(fs_devices))
950 			return ERR_CAST(fs_devices);
951 
952 		fs_devices->fsid_change = fsid_change_in_progress;
953 
954 		mutex_lock(&fs_devices->device_list_mutex);
955 		list_add(&fs_devices->fs_list, &fs_uuids);
956 
957 		device = NULL;
958 	} else {
959 		mutex_lock(&fs_devices->device_list_mutex);
960 		device = btrfs_find_device(fs_devices, devid,
961 				disk_super->dev_item.uuid, NULL, false);
962 
963 		/*
964 		 * If this disk has been pulled into an fs devices created by
965 		 * a device which had the CHANGING_FSID_V2 flag then replace the
966 		 * metadata_uuid/fsid values of the fs_devices.
967 		 */
968 		if (has_metadata_uuid && fs_devices->fsid_change &&
969 		    found_transid > fs_devices->latest_generation) {
970 			memcpy(fs_devices->fsid, disk_super->fsid,
971 					BTRFS_FSID_SIZE);
972 			memcpy(fs_devices->metadata_uuid,
973 					disk_super->metadata_uuid, BTRFS_FSID_SIZE);
974 
975 			fs_devices->fsid_change = false;
976 		}
977 	}
978 
979 	if (!device) {
980 		if (fs_devices->opened) {
981 			mutex_unlock(&fs_devices->device_list_mutex);
982 			return ERR_PTR(-EBUSY);
983 		}
984 
985 		device = btrfs_alloc_device(NULL, &devid,
986 					    disk_super->dev_item.uuid);
987 		if (IS_ERR(device)) {
988 			mutex_unlock(&fs_devices->device_list_mutex);
989 			/* we can safely leave the fs_devices entry around */
990 			return device;
991 		}
992 
993 		name = rcu_string_strdup(path, GFP_NOFS);
994 		if (!name) {
995 			btrfs_free_device(device);
996 			mutex_unlock(&fs_devices->device_list_mutex);
997 			return ERR_PTR(-ENOMEM);
998 		}
999 		rcu_assign_pointer(device->name, name);
1000 
1001 		list_add_rcu(&device->dev_list, &fs_devices->devices);
1002 		fs_devices->num_devices++;
1003 
1004 		device->fs_devices = fs_devices;
1005 		*new_device_added = true;
1006 
1007 		if (disk_super->label[0])
1008 			pr_info("BTRFS: device label %s devid %llu transid %llu %s\n",
1009 				disk_super->label, devid, found_transid, path);
1010 		else
1011 			pr_info("BTRFS: device fsid %pU devid %llu transid %llu %s\n",
1012 				disk_super->fsid, devid, found_transid, path);
1013 
1014 	} else if (!device->name || strcmp(device->name->str, path)) {
1015 		/*
1016 		 * When FS is already mounted.
1017 		 * 1. If you are here and if the device->name is NULL that
1018 		 *    means this device was missing at time of FS mount.
1019 		 * 2. If you are here and if the device->name is different
1020 		 *    from 'path' that means either
1021 		 *      a. The same device disappeared and reappeared with
1022 		 *         different name. or
1023 		 *      b. The missing-disk-which-was-replaced, has
1024 		 *         reappeared now.
1025 		 *
1026 		 * We must allow 1 and 2a above. But 2b would be a spurious
1027 		 * and unintentional.
1028 		 *
1029 		 * Further in case of 1 and 2a above, the disk at 'path'
1030 		 * would have missed some transaction when it was away and
1031 		 * in case of 2a the stale bdev has to be updated as well.
1032 		 * 2b must not be allowed at all time.
1033 		 */
1034 
1035 		/*
1036 		 * For now, we do allow update to btrfs_fs_device through the
1037 		 * btrfs dev scan cli after FS has been mounted.  We're still
1038 		 * tracking a problem where systems fail mount by subvolume id
1039 		 * when we reject replacement on a mounted FS.
1040 		 */
1041 		if (!fs_devices->opened && found_transid < device->generation) {
1042 			/*
1043 			 * That is if the FS is _not_ mounted and if you
1044 			 * are here, that means there is more than one
1045 			 * disk with same uuid and devid.We keep the one
1046 			 * with larger generation number or the last-in if
1047 			 * generation are equal.
1048 			 */
1049 			mutex_unlock(&fs_devices->device_list_mutex);
1050 			return ERR_PTR(-EEXIST);
1051 		}
1052 
1053 		/*
1054 		 * We are going to replace the device path for a given devid,
1055 		 * make sure it's the same device if the device is mounted
1056 		 */
1057 		if (device->bdev) {
1058 			struct block_device *path_bdev;
1059 
1060 			path_bdev = lookup_bdev(path);
1061 			if (IS_ERR(path_bdev)) {
1062 				mutex_unlock(&fs_devices->device_list_mutex);
1063 				return ERR_CAST(path_bdev);
1064 			}
1065 
1066 			if (device->bdev != path_bdev) {
1067 				bdput(path_bdev);
1068 				mutex_unlock(&fs_devices->device_list_mutex);
1069 				btrfs_warn_in_rcu(device->fs_info,
1070 			"duplicate device fsid:devid for %pU:%llu old:%s new:%s",
1071 					disk_super->fsid, devid,
1072 					rcu_str_deref(device->name), path);
1073 				return ERR_PTR(-EEXIST);
1074 			}
1075 			bdput(path_bdev);
1076 			btrfs_info_in_rcu(device->fs_info,
1077 				"device fsid %pU devid %llu moved old:%s new:%s",
1078 				disk_super->fsid, devid,
1079 				rcu_str_deref(device->name), path);
1080 		}
1081 
1082 		name = rcu_string_strdup(path, GFP_NOFS);
1083 		if (!name) {
1084 			mutex_unlock(&fs_devices->device_list_mutex);
1085 			return ERR_PTR(-ENOMEM);
1086 		}
1087 		rcu_string_free(device->name);
1088 		rcu_assign_pointer(device->name, name);
1089 		if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1090 			fs_devices->missing_devices--;
1091 			clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1092 		}
1093 	}
1094 
1095 	/*
1096 	 * Unmount does not free the btrfs_device struct but would zero
1097 	 * generation along with most of the other members. So just update
1098 	 * it back. We need it to pick the disk with largest generation
1099 	 * (as above).
1100 	 */
1101 	if (!fs_devices->opened) {
1102 		device->generation = found_transid;
1103 		fs_devices->latest_generation = max_t(u64, found_transid,
1104 						fs_devices->latest_generation);
1105 	}
1106 
1107 	fs_devices->total_devices = btrfs_super_num_devices(disk_super);
1108 
1109 	mutex_unlock(&fs_devices->device_list_mutex);
1110 	return device;
1111 }
1112 
1113 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
1114 {
1115 	struct btrfs_fs_devices *fs_devices;
1116 	struct btrfs_device *device;
1117 	struct btrfs_device *orig_dev;
1118 	int ret = 0;
1119 
1120 	fs_devices = alloc_fs_devices(orig->fsid, NULL);
1121 	if (IS_ERR(fs_devices))
1122 		return fs_devices;
1123 
1124 	mutex_lock(&orig->device_list_mutex);
1125 	fs_devices->total_devices = orig->total_devices;
1126 
1127 	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
1128 		struct rcu_string *name;
1129 
1130 		device = btrfs_alloc_device(NULL, &orig_dev->devid,
1131 					    orig_dev->uuid);
1132 		if (IS_ERR(device)) {
1133 			ret = PTR_ERR(device);
1134 			goto error;
1135 		}
1136 
1137 		/*
1138 		 * This is ok to do without rcu read locked because we hold the
1139 		 * uuid mutex so nothing we touch in here is going to disappear.
1140 		 */
1141 		if (orig_dev->name) {
1142 			name = rcu_string_strdup(orig_dev->name->str,
1143 					GFP_KERNEL);
1144 			if (!name) {
1145 				btrfs_free_device(device);
1146 				ret = -ENOMEM;
1147 				goto error;
1148 			}
1149 			rcu_assign_pointer(device->name, name);
1150 		}
1151 
1152 		list_add(&device->dev_list, &fs_devices->devices);
1153 		device->fs_devices = fs_devices;
1154 		fs_devices->num_devices++;
1155 	}
1156 	mutex_unlock(&orig->device_list_mutex);
1157 	return fs_devices;
1158 error:
1159 	mutex_unlock(&orig->device_list_mutex);
1160 	free_fs_devices(fs_devices);
1161 	return ERR_PTR(ret);
1162 }
1163 
1164 /*
1165  * After we have read the system tree and know devids belonging to
1166  * this filesystem, remove the device which does not belong there.
1167  */
1168 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step)
1169 {
1170 	struct btrfs_device *device, *next;
1171 	struct btrfs_device *latest_dev = NULL;
1172 
1173 	mutex_lock(&uuid_mutex);
1174 again:
1175 	/* This is the initialized path, it is safe to release the devices. */
1176 	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1177 		if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
1178 							&device->dev_state)) {
1179 			if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1180 			     &device->dev_state) &&
1181 			     (!latest_dev ||
1182 			      device->generation > latest_dev->generation)) {
1183 				latest_dev = device;
1184 			}
1185 			continue;
1186 		}
1187 
1188 		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
1189 			/*
1190 			 * In the first step, keep the device which has
1191 			 * the correct fsid and the devid that is used
1192 			 * for the dev_replace procedure.
1193 			 * In the second step, the dev_replace state is
1194 			 * read from the device tree and it is known
1195 			 * whether the procedure is really active or
1196 			 * not, which means whether this device is
1197 			 * used or whether it should be removed.
1198 			 */
1199 			if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1200 						  &device->dev_state)) {
1201 				continue;
1202 			}
1203 		}
1204 		if (device->bdev) {
1205 			blkdev_put(device->bdev, device->mode);
1206 			device->bdev = NULL;
1207 			fs_devices->open_devices--;
1208 		}
1209 		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1210 			list_del_init(&device->dev_alloc_list);
1211 			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1212 			if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1213 				      &device->dev_state))
1214 				fs_devices->rw_devices--;
1215 		}
1216 		list_del_init(&device->dev_list);
1217 		fs_devices->num_devices--;
1218 		btrfs_free_device(device);
1219 	}
1220 
1221 	if (fs_devices->seed) {
1222 		fs_devices = fs_devices->seed;
1223 		goto again;
1224 	}
1225 
1226 	fs_devices->latest_bdev = latest_dev->bdev;
1227 
1228 	mutex_unlock(&uuid_mutex);
1229 }
1230 
1231 static void btrfs_close_bdev(struct btrfs_device *device)
1232 {
1233 	if (!device->bdev)
1234 		return;
1235 
1236 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1237 		sync_blockdev(device->bdev);
1238 		invalidate_bdev(device->bdev);
1239 	}
1240 
1241 	blkdev_put(device->bdev, device->mode);
1242 }
1243 
1244 static void btrfs_close_one_device(struct btrfs_device *device)
1245 {
1246 	struct btrfs_fs_devices *fs_devices = device->fs_devices;
1247 	struct btrfs_device *new_device;
1248 	struct rcu_string *name;
1249 
1250 	if (device->bdev)
1251 		fs_devices->open_devices--;
1252 
1253 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1254 	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
1255 		list_del_init(&device->dev_alloc_list);
1256 		fs_devices->rw_devices--;
1257 	}
1258 
1259 	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
1260 		fs_devices->missing_devices--;
1261 
1262 	btrfs_close_bdev(device);
1263 
1264 	new_device = btrfs_alloc_device(NULL, &device->devid,
1265 					device->uuid);
1266 	BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
1267 
1268 	/* Safe because we are under uuid_mutex */
1269 	if (device->name) {
1270 		name = rcu_string_strdup(device->name->str, GFP_NOFS);
1271 		BUG_ON(!name); /* -ENOMEM */
1272 		rcu_assign_pointer(new_device->name, name);
1273 	}
1274 
1275 	list_replace_rcu(&device->dev_list, &new_device->dev_list);
1276 	new_device->fs_devices = device->fs_devices;
1277 
1278 	synchronize_rcu();
1279 	btrfs_free_device(device);
1280 }
1281 
1282 static int close_fs_devices(struct btrfs_fs_devices *fs_devices)
1283 {
1284 	struct btrfs_device *device, *tmp;
1285 
1286 	if (--fs_devices->opened > 0)
1287 		return 0;
1288 
1289 	mutex_lock(&fs_devices->device_list_mutex);
1290 	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
1291 		btrfs_close_one_device(device);
1292 	}
1293 	mutex_unlock(&fs_devices->device_list_mutex);
1294 
1295 	WARN_ON(fs_devices->open_devices);
1296 	WARN_ON(fs_devices->rw_devices);
1297 	fs_devices->opened = 0;
1298 	fs_devices->seeding = 0;
1299 
1300 	return 0;
1301 }
1302 
1303 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1304 {
1305 	struct btrfs_fs_devices *seed_devices = NULL;
1306 	int ret;
1307 
1308 	mutex_lock(&uuid_mutex);
1309 	ret = close_fs_devices(fs_devices);
1310 	if (!fs_devices->opened) {
1311 		seed_devices = fs_devices->seed;
1312 		fs_devices->seed = NULL;
1313 	}
1314 	mutex_unlock(&uuid_mutex);
1315 
1316 	while (seed_devices) {
1317 		fs_devices = seed_devices;
1318 		seed_devices = fs_devices->seed;
1319 		close_fs_devices(fs_devices);
1320 		free_fs_devices(fs_devices);
1321 	}
1322 	return ret;
1323 }
1324 
1325 static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1326 				fmode_t flags, void *holder)
1327 {
1328 	struct btrfs_device *device;
1329 	struct btrfs_device *latest_dev = NULL;
1330 	int ret = 0;
1331 
1332 	flags |= FMODE_EXCL;
1333 
1334 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
1335 		/* Just open everything we can; ignore failures here */
1336 		if (btrfs_open_one_device(fs_devices, device, flags, holder))
1337 			continue;
1338 
1339 		if (!latest_dev ||
1340 		    device->generation > latest_dev->generation)
1341 			latest_dev = device;
1342 	}
1343 	if (fs_devices->open_devices == 0) {
1344 		ret = -EINVAL;
1345 		goto out;
1346 	}
1347 	fs_devices->opened = 1;
1348 	fs_devices->latest_bdev = latest_dev->bdev;
1349 	fs_devices->total_rw_bytes = 0;
1350 out:
1351 	return ret;
1352 }
1353 
1354 static int devid_cmp(void *priv, struct list_head *a, struct list_head *b)
1355 {
1356 	struct btrfs_device *dev1, *dev2;
1357 
1358 	dev1 = list_entry(a, struct btrfs_device, dev_list);
1359 	dev2 = list_entry(b, struct btrfs_device, dev_list);
1360 
1361 	if (dev1->devid < dev2->devid)
1362 		return -1;
1363 	else if (dev1->devid > dev2->devid)
1364 		return 1;
1365 	return 0;
1366 }
1367 
1368 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1369 		       fmode_t flags, void *holder)
1370 {
1371 	int ret;
1372 
1373 	lockdep_assert_held(&uuid_mutex);
1374 
1375 	mutex_lock(&fs_devices->device_list_mutex);
1376 	if (fs_devices->opened) {
1377 		fs_devices->opened++;
1378 		ret = 0;
1379 	} else {
1380 		list_sort(NULL, &fs_devices->devices, devid_cmp);
1381 		ret = open_fs_devices(fs_devices, flags, holder);
1382 	}
1383 	mutex_unlock(&fs_devices->device_list_mutex);
1384 
1385 	return ret;
1386 }
1387 
1388 static void btrfs_release_disk_super(struct page *page)
1389 {
1390 	kunmap(page);
1391 	put_page(page);
1392 }
1393 
1394 static int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
1395 				 struct page **page,
1396 				 struct btrfs_super_block **disk_super)
1397 {
1398 	void *p;
1399 	pgoff_t index;
1400 
1401 	/* make sure our super fits in the device */
1402 	if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1403 		return 1;
1404 
1405 	/* make sure our super fits in the page */
1406 	if (sizeof(**disk_super) > PAGE_SIZE)
1407 		return 1;
1408 
1409 	/* make sure our super doesn't straddle pages on disk */
1410 	index = bytenr >> PAGE_SHIFT;
1411 	if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index)
1412 		return 1;
1413 
1414 	/* pull in the page with our super */
1415 	*page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
1416 				   index, GFP_KERNEL);
1417 
1418 	if (IS_ERR_OR_NULL(*page))
1419 		return 1;
1420 
1421 	p = kmap(*page);
1422 
1423 	/* align our pointer to the offset of the super block */
1424 	*disk_super = p + offset_in_page(bytenr);
1425 
1426 	if (btrfs_super_bytenr(*disk_super) != bytenr ||
1427 	    btrfs_super_magic(*disk_super) != BTRFS_MAGIC) {
1428 		btrfs_release_disk_super(*page);
1429 		return 1;
1430 	}
1431 
1432 	if ((*disk_super)->label[0] &&
1433 		(*disk_super)->label[BTRFS_LABEL_SIZE - 1])
1434 		(*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0';
1435 
1436 	return 0;
1437 }
1438 
1439 int btrfs_forget_devices(const char *path)
1440 {
1441 	int ret;
1442 
1443 	mutex_lock(&uuid_mutex);
1444 	ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
1445 	mutex_unlock(&uuid_mutex);
1446 
1447 	return ret;
1448 }
1449 
1450 /*
1451  * Look for a btrfs signature on a device. This may be called out of the mount path
1452  * and we are not allowed to call set_blocksize during the scan. The superblock
1453  * is read via pagecache
1454  */
1455 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1456 					   void *holder)
1457 {
1458 	struct btrfs_super_block *disk_super;
1459 	bool new_device_added = false;
1460 	struct btrfs_device *device = NULL;
1461 	struct block_device *bdev;
1462 	struct page *page;
1463 	u64 bytenr;
1464 
1465 	lockdep_assert_held(&uuid_mutex);
1466 
1467 	/*
1468 	 * we would like to check all the supers, but that would make
1469 	 * a btrfs mount succeed after a mkfs from a different FS.
1470 	 * So, we need to add a special mount option to scan for
1471 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1472 	 */
1473 	bytenr = btrfs_sb_offset(0);
1474 	flags |= FMODE_EXCL;
1475 
1476 	bdev = blkdev_get_by_path(path, flags, holder);
1477 	if (IS_ERR(bdev))
1478 		return ERR_CAST(bdev);
1479 
1480 	if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) {
1481 		device = ERR_PTR(-EINVAL);
1482 		goto error_bdev_put;
1483 	}
1484 
1485 	device = device_list_add(path, disk_super, &new_device_added);
1486 	if (!IS_ERR(device)) {
1487 		if (new_device_added)
1488 			btrfs_free_stale_devices(path, device);
1489 	}
1490 
1491 	btrfs_release_disk_super(page);
1492 
1493 error_bdev_put:
1494 	blkdev_put(bdev, flags);
1495 
1496 	return device;
1497 }
1498 
1499 /*
1500  * Try to find a chunk that intersects [start, start + len] range and when one
1501  * such is found, record the end of it in *start
1502  */
1503 static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1504 				    u64 len)
1505 {
1506 	u64 physical_start, physical_end;
1507 
1508 	lockdep_assert_held(&device->fs_info->chunk_mutex);
1509 
1510 	if (!find_first_extent_bit(&device->alloc_state, *start,
1511 				   &physical_start, &physical_end,
1512 				   CHUNK_ALLOCATED, NULL)) {
1513 
1514 		if (in_range(physical_start, *start, len) ||
1515 		    in_range(*start, physical_start,
1516 			     physical_end - physical_start)) {
1517 			*start = physical_end + 1;
1518 			return true;
1519 		}
1520 	}
1521 	return false;
1522 }
1523 
1524 
1525 /*
1526  * find_free_dev_extent_start - find free space in the specified device
1527  * @device:	  the device which we search the free space in
1528  * @num_bytes:	  the size of the free space that we need
1529  * @search_start: the position from which to begin the search
1530  * @start:	  store the start of the free space.
1531  * @len:	  the size of the free space. that we find, or the size
1532  *		  of the max free space if we don't find suitable free space
1533  *
1534  * this uses a pretty simple search, the expectation is that it is
1535  * called very infrequently and that a given device has a small number
1536  * of extents
1537  *
1538  * @start is used to store the start of the free space if we find. But if we
1539  * don't find suitable free space, it will be used to store the start position
1540  * of the max free space.
1541  *
1542  * @len is used to store the size of the free space that we find.
1543  * But if we don't find suitable free space, it is used to store the size of
1544  * the max free space.
1545  *
1546  * NOTE: This function will search *commit* root of device tree, and does extra
1547  * check to ensure dev extents are not double allocated.
1548  * This makes the function safe to allocate dev extents but may not report
1549  * correct usable device space, as device extent freed in current transaction
1550  * is not reported as avaiable.
1551  */
1552 static int find_free_dev_extent_start(struct btrfs_device *device,
1553 				u64 num_bytes, u64 search_start, u64 *start,
1554 				u64 *len)
1555 {
1556 	struct btrfs_fs_info *fs_info = device->fs_info;
1557 	struct btrfs_root *root = fs_info->dev_root;
1558 	struct btrfs_key key;
1559 	struct btrfs_dev_extent *dev_extent;
1560 	struct btrfs_path *path;
1561 	u64 hole_size;
1562 	u64 max_hole_start;
1563 	u64 max_hole_size;
1564 	u64 extent_end;
1565 	u64 search_end = device->total_bytes;
1566 	int ret;
1567 	int slot;
1568 	struct extent_buffer *l;
1569 
1570 	/*
1571 	 * We don't want to overwrite the superblock on the drive nor any area
1572 	 * used by the boot loader (grub for example), so we make sure to start
1573 	 * at an offset of at least 1MB.
1574 	 */
1575 	search_start = max_t(u64, search_start, SZ_1M);
1576 
1577 	path = btrfs_alloc_path();
1578 	if (!path)
1579 		return -ENOMEM;
1580 
1581 	max_hole_start = search_start;
1582 	max_hole_size = 0;
1583 
1584 again:
1585 	if (search_start >= search_end ||
1586 		test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1587 		ret = -ENOSPC;
1588 		goto out;
1589 	}
1590 
1591 	path->reada = READA_FORWARD;
1592 	path->search_commit_root = 1;
1593 	path->skip_locking = 1;
1594 
1595 	key.objectid = device->devid;
1596 	key.offset = search_start;
1597 	key.type = BTRFS_DEV_EXTENT_KEY;
1598 
1599 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1600 	if (ret < 0)
1601 		goto out;
1602 	if (ret > 0) {
1603 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1604 		if (ret < 0)
1605 			goto out;
1606 	}
1607 
1608 	while (1) {
1609 		l = path->nodes[0];
1610 		slot = path->slots[0];
1611 		if (slot >= btrfs_header_nritems(l)) {
1612 			ret = btrfs_next_leaf(root, path);
1613 			if (ret == 0)
1614 				continue;
1615 			if (ret < 0)
1616 				goto out;
1617 
1618 			break;
1619 		}
1620 		btrfs_item_key_to_cpu(l, &key, slot);
1621 
1622 		if (key.objectid < device->devid)
1623 			goto next;
1624 
1625 		if (key.objectid > device->devid)
1626 			break;
1627 
1628 		if (key.type != BTRFS_DEV_EXTENT_KEY)
1629 			goto next;
1630 
1631 		if (key.offset > search_start) {
1632 			hole_size = key.offset - search_start;
1633 
1634 			/*
1635 			 * Have to check before we set max_hole_start, otherwise
1636 			 * we could end up sending back this offset anyway.
1637 			 */
1638 			if (contains_pending_extent(device, &search_start,
1639 						    hole_size)) {
1640 				if (key.offset >= search_start)
1641 					hole_size = key.offset - search_start;
1642 				else
1643 					hole_size = 0;
1644 			}
1645 
1646 			if (hole_size > max_hole_size) {
1647 				max_hole_start = search_start;
1648 				max_hole_size = hole_size;
1649 			}
1650 
1651 			/*
1652 			 * If this free space is greater than which we need,
1653 			 * it must be the max free space that we have found
1654 			 * until now, so max_hole_start must point to the start
1655 			 * of this free space and the length of this free space
1656 			 * is stored in max_hole_size. Thus, we return
1657 			 * max_hole_start and max_hole_size and go back to the
1658 			 * caller.
1659 			 */
1660 			if (hole_size >= num_bytes) {
1661 				ret = 0;
1662 				goto out;
1663 			}
1664 		}
1665 
1666 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1667 		extent_end = key.offset + btrfs_dev_extent_length(l,
1668 								  dev_extent);
1669 		if (extent_end > search_start)
1670 			search_start = extent_end;
1671 next:
1672 		path->slots[0]++;
1673 		cond_resched();
1674 	}
1675 
1676 	/*
1677 	 * At this point, search_start should be the end of
1678 	 * allocated dev extents, and when shrinking the device,
1679 	 * search_end may be smaller than search_start.
1680 	 */
1681 	if (search_end > search_start) {
1682 		hole_size = search_end - search_start;
1683 
1684 		if (contains_pending_extent(device, &search_start, hole_size)) {
1685 			btrfs_release_path(path);
1686 			goto again;
1687 		}
1688 
1689 		if (hole_size > max_hole_size) {
1690 			max_hole_start = search_start;
1691 			max_hole_size = hole_size;
1692 		}
1693 	}
1694 
1695 	/* See above. */
1696 	if (max_hole_size < num_bytes)
1697 		ret = -ENOSPC;
1698 	else
1699 		ret = 0;
1700 
1701 out:
1702 	btrfs_free_path(path);
1703 	*start = max_hole_start;
1704 	if (len)
1705 		*len = max_hole_size;
1706 	return ret;
1707 }
1708 
1709 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1710 			 u64 *start, u64 *len)
1711 {
1712 	/* FIXME use last free of some kind */
1713 	return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1714 }
1715 
1716 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1717 			  struct btrfs_device *device,
1718 			  u64 start, u64 *dev_extent_len)
1719 {
1720 	struct btrfs_fs_info *fs_info = device->fs_info;
1721 	struct btrfs_root *root = fs_info->dev_root;
1722 	int ret;
1723 	struct btrfs_path *path;
1724 	struct btrfs_key key;
1725 	struct btrfs_key found_key;
1726 	struct extent_buffer *leaf = NULL;
1727 	struct btrfs_dev_extent *extent = NULL;
1728 
1729 	path = btrfs_alloc_path();
1730 	if (!path)
1731 		return -ENOMEM;
1732 
1733 	key.objectid = device->devid;
1734 	key.offset = start;
1735 	key.type = BTRFS_DEV_EXTENT_KEY;
1736 again:
1737 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1738 	if (ret > 0) {
1739 		ret = btrfs_previous_item(root, path, key.objectid,
1740 					  BTRFS_DEV_EXTENT_KEY);
1741 		if (ret)
1742 			goto out;
1743 		leaf = path->nodes[0];
1744 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1745 		extent = btrfs_item_ptr(leaf, path->slots[0],
1746 					struct btrfs_dev_extent);
1747 		BUG_ON(found_key.offset > start || found_key.offset +
1748 		       btrfs_dev_extent_length(leaf, extent) < start);
1749 		key = found_key;
1750 		btrfs_release_path(path);
1751 		goto again;
1752 	} else if (ret == 0) {
1753 		leaf = path->nodes[0];
1754 		extent = btrfs_item_ptr(leaf, path->slots[0],
1755 					struct btrfs_dev_extent);
1756 	} else {
1757 		btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
1758 		goto out;
1759 	}
1760 
1761 	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1762 
1763 	ret = btrfs_del_item(trans, root, path);
1764 	if (ret) {
1765 		btrfs_handle_fs_error(fs_info, ret,
1766 				      "Failed to remove dev extent item");
1767 	} else {
1768 		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1769 	}
1770 out:
1771 	btrfs_free_path(path);
1772 	return ret;
1773 }
1774 
1775 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1776 				  struct btrfs_device *device,
1777 				  u64 chunk_offset, u64 start, u64 num_bytes)
1778 {
1779 	int ret;
1780 	struct btrfs_path *path;
1781 	struct btrfs_fs_info *fs_info = device->fs_info;
1782 	struct btrfs_root *root = fs_info->dev_root;
1783 	struct btrfs_dev_extent *extent;
1784 	struct extent_buffer *leaf;
1785 	struct btrfs_key key;
1786 
1787 	WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
1788 	WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1789 	path = btrfs_alloc_path();
1790 	if (!path)
1791 		return -ENOMEM;
1792 
1793 	key.objectid = device->devid;
1794 	key.offset = start;
1795 	key.type = BTRFS_DEV_EXTENT_KEY;
1796 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1797 				      sizeof(*extent));
1798 	if (ret)
1799 		goto out;
1800 
1801 	leaf = path->nodes[0];
1802 	extent = btrfs_item_ptr(leaf, path->slots[0],
1803 				struct btrfs_dev_extent);
1804 	btrfs_set_dev_extent_chunk_tree(leaf, extent,
1805 					BTRFS_CHUNK_TREE_OBJECTID);
1806 	btrfs_set_dev_extent_chunk_objectid(leaf, extent,
1807 					    BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1808 	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1809 
1810 	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1811 	btrfs_mark_buffer_dirty(leaf);
1812 out:
1813 	btrfs_free_path(path);
1814 	return ret;
1815 }
1816 
1817 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1818 {
1819 	struct extent_map_tree *em_tree;
1820 	struct extent_map *em;
1821 	struct rb_node *n;
1822 	u64 ret = 0;
1823 
1824 	em_tree = &fs_info->mapping_tree;
1825 	read_lock(&em_tree->lock);
1826 	n = rb_last(&em_tree->map.rb_root);
1827 	if (n) {
1828 		em = rb_entry(n, struct extent_map, rb_node);
1829 		ret = em->start + em->len;
1830 	}
1831 	read_unlock(&em_tree->lock);
1832 
1833 	return ret;
1834 }
1835 
1836 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1837 				    u64 *devid_ret)
1838 {
1839 	int ret;
1840 	struct btrfs_key key;
1841 	struct btrfs_key found_key;
1842 	struct btrfs_path *path;
1843 
1844 	path = btrfs_alloc_path();
1845 	if (!path)
1846 		return -ENOMEM;
1847 
1848 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1849 	key.type = BTRFS_DEV_ITEM_KEY;
1850 	key.offset = (u64)-1;
1851 
1852 	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1853 	if (ret < 0)
1854 		goto error;
1855 
1856 	if (ret == 0) {
1857 		/* Corruption */
1858 		btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1859 		ret = -EUCLEAN;
1860 		goto error;
1861 	}
1862 
1863 	ret = btrfs_previous_item(fs_info->chunk_root, path,
1864 				  BTRFS_DEV_ITEMS_OBJECTID,
1865 				  BTRFS_DEV_ITEM_KEY);
1866 	if (ret) {
1867 		*devid_ret = 1;
1868 	} else {
1869 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1870 				      path->slots[0]);
1871 		*devid_ret = found_key.offset + 1;
1872 	}
1873 	ret = 0;
1874 error:
1875 	btrfs_free_path(path);
1876 	return ret;
1877 }
1878 
1879 /*
1880  * the device information is stored in the chunk root
1881  * the btrfs_device struct should be fully filled in
1882  */
1883 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1884 			    struct btrfs_device *device)
1885 {
1886 	int ret;
1887 	struct btrfs_path *path;
1888 	struct btrfs_dev_item *dev_item;
1889 	struct extent_buffer *leaf;
1890 	struct btrfs_key key;
1891 	unsigned long ptr;
1892 
1893 	path = btrfs_alloc_path();
1894 	if (!path)
1895 		return -ENOMEM;
1896 
1897 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1898 	key.type = BTRFS_DEV_ITEM_KEY;
1899 	key.offset = device->devid;
1900 
1901 	ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1902 				      &key, sizeof(*dev_item));
1903 	if (ret)
1904 		goto out;
1905 
1906 	leaf = path->nodes[0];
1907 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1908 
1909 	btrfs_set_device_id(leaf, dev_item, device->devid);
1910 	btrfs_set_device_generation(leaf, dev_item, 0);
1911 	btrfs_set_device_type(leaf, dev_item, device->type);
1912 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1913 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1914 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1915 	btrfs_set_device_total_bytes(leaf, dev_item,
1916 				     btrfs_device_get_disk_total_bytes(device));
1917 	btrfs_set_device_bytes_used(leaf, dev_item,
1918 				    btrfs_device_get_bytes_used(device));
1919 	btrfs_set_device_group(leaf, dev_item, 0);
1920 	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1921 	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1922 	btrfs_set_device_start_offset(leaf, dev_item, 0);
1923 
1924 	ptr = btrfs_device_uuid(dev_item);
1925 	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1926 	ptr = btrfs_device_fsid(dev_item);
1927 	write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1928 			    ptr, BTRFS_FSID_SIZE);
1929 	btrfs_mark_buffer_dirty(leaf);
1930 
1931 	ret = 0;
1932 out:
1933 	btrfs_free_path(path);
1934 	return ret;
1935 }
1936 
1937 /*
1938  * Function to update ctime/mtime for a given device path.
1939  * Mainly used for ctime/mtime based probe like libblkid.
1940  */
1941 static void update_dev_time(const char *path_name)
1942 {
1943 	struct file *filp;
1944 
1945 	filp = filp_open(path_name, O_RDWR, 0);
1946 	if (IS_ERR(filp))
1947 		return;
1948 	file_update_time(filp);
1949 	filp_close(filp, NULL);
1950 }
1951 
1952 static int btrfs_rm_dev_item(struct btrfs_device *device)
1953 {
1954 	struct btrfs_root *root = device->fs_info->chunk_root;
1955 	int ret;
1956 	struct btrfs_path *path;
1957 	struct btrfs_key key;
1958 	struct btrfs_trans_handle *trans;
1959 
1960 	path = btrfs_alloc_path();
1961 	if (!path)
1962 		return -ENOMEM;
1963 
1964 	trans = btrfs_start_transaction(root, 0);
1965 	if (IS_ERR(trans)) {
1966 		btrfs_free_path(path);
1967 		return PTR_ERR(trans);
1968 	}
1969 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1970 	key.type = BTRFS_DEV_ITEM_KEY;
1971 	key.offset = device->devid;
1972 
1973 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1974 	if (ret) {
1975 		if (ret > 0)
1976 			ret = -ENOENT;
1977 		btrfs_abort_transaction(trans, ret);
1978 		btrfs_end_transaction(trans);
1979 		goto out;
1980 	}
1981 
1982 	ret = btrfs_del_item(trans, root, path);
1983 	if (ret) {
1984 		btrfs_abort_transaction(trans, ret);
1985 		btrfs_end_transaction(trans);
1986 	}
1987 
1988 out:
1989 	btrfs_free_path(path);
1990 	if (!ret)
1991 		ret = btrfs_commit_transaction(trans);
1992 	return ret;
1993 }
1994 
1995 /*
1996  * Verify that @num_devices satisfies the RAID profile constraints in the whole
1997  * filesystem. It's up to the caller to adjust that number regarding eg. device
1998  * replace.
1999  */
2000 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
2001 		u64 num_devices)
2002 {
2003 	u64 all_avail;
2004 	unsigned seq;
2005 	int i;
2006 
2007 	do {
2008 		seq = read_seqbegin(&fs_info->profiles_lock);
2009 
2010 		all_avail = fs_info->avail_data_alloc_bits |
2011 			    fs_info->avail_system_alloc_bits |
2012 			    fs_info->avail_metadata_alloc_bits;
2013 	} while (read_seqretry(&fs_info->profiles_lock, seq));
2014 
2015 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2016 		if (!(all_avail & btrfs_raid_array[i].bg_flag))
2017 			continue;
2018 
2019 		if (num_devices < btrfs_raid_array[i].devs_min) {
2020 			int ret = btrfs_raid_array[i].mindev_error;
2021 
2022 			if (ret)
2023 				return ret;
2024 		}
2025 	}
2026 
2027 	return 0;
2028 }
2029 
2030 static struct btrfs_device * btrfs_find_next_active_device(
2031 		struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
2032 {
2033 	struct btrfs_device *next_device;
2034 
2035 	list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
2036 		if (next_device != device &&
2037 		    !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
2038 		    && next_device->bdev)
2039 			return next_device;
2040 	}
2041 
2042 	return NULL;
2043 }
2044 
2045 /*
2046  * Helper function to check if the given device is part of s_bdev / latest_bdev
2047  * and replace it with the provided or the next active device, in the context
2048  * where this function called, there should be always be another device (or
2049  * this_dev) which is active.
2050  */
2051 void btrfs_assign_next_active_device(struct btrfs_device *device,
2052 				     struct btrfs_device *this_dev)
2053 {
2054 	struct btrfs_fs_info *fs_info = device->fs_info;
2055 	struct btrfs_device *next_device;
2056 
2057 	if (this_dev)
2058 		next_device = this_dev;
2059 	else
2060 		next_device = btrfs_find_next_active_device(fs_info->fs_devices,
2061 								device);
2062 	ASSERT(next_device);
2063 
2064 	if (fs_info->sb->s_bdev &&
2065 			(fs_info->sb->s_bdev == device->bdev))
2066 		fs_info->sb->s_bdev = next_device->bdev;
2067 
2068 	if (fs_info->fs_devices->latest_bdev == device->bdev)
2069 		fs_info->fs_devices->latest_bdev = next_device->bdev;
2070 }
2071 
2072 /*
2073  * Return btrfs_fs_devices::num_devices excluding the device that's being
2074  * currently replaced.
2075  */
2076 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
2077 {
2078 	u64 num_devices = fs_info->fs_devices->num_devices;
2079 
2080 	down_read(&fs_info->dev_replace.rwsem);
2081 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
2082 		ASSERT(num_devices > 1);
2083 		num_devices--;
2084 	}
2085 	up_read(&fs_info->dev_replace.rwsem);
2086 
2087 	return num_devices;
2088 }
2089 
2090 int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
2091 		u64 devid)
2092 {
2093 	struct btrfs_device *device;
2094 	struct btrfs_fs_devices *cur_devices;
2095 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2096 	u64 num_devices;
2097 	int ret = 0;
2098 
2099 	mutex_lock(&uuid_mutex);
2100 
2101 	num_devices = btrfs_num_devices(fs_info);
2102 
2103 	ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2104 	if (ret)
2105 		goto out;
2106 
2107 	device = btrfs_find_device_by_devspec(fs_info, devid, device_path);
2108 
2109 	if (IS_ERR(device)) {
2110 		if (PTR_ERR(device) == -ENOENT &&
2111 		    strcmp(device_path, "missing") == 0)
2112 			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2113 		else
2114 			ret = PTR_ERR(device);
2115 		goto out;
2116 	}
2117 
2118 	if (btrfs_pinned_by_swapfile(fs_info, device)) {
2119 		btrfs_warn_in_rcu(fs_info,
2120 		  "cannot remove device %s (devid %llu) due to active swapfile",
2121 				  rcu_str_deref(device->name), device->devid);
2122 		ret = -ETXTBSY;
2123 		goto out;
2124 	}
2125 
2126 	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2127 		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
2128 		goto out;
2129 	}
2130 
2131 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2132 	    fs_info->fs_devices->rw_devices == 1) {
2133 		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
2134 		goto out;
2135 	}
2136 
2137 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2138 		mutex_lock(&fs_info->chunk_mutex);
2139 		list_del_init(&device->dev_alloc_list);
2140 		device->fs_devices->rw_devices--;
2141 		mutex_unlock(&fs_info->chunk_mutex);
2142 	}
2143 
2144 	mutex_unlock(&uuid_mutex);
2145 	ret = btrfs_shrink_device(device, 0);
2146 	mutex_lock(&uuid_mutex);
2147 	if (ret)
2148 		goto error_undo;
2149 
2150 	/*
2151 	 * TODO: the superblock still includes this device in its num_devices
2152 	 * counter although write_all_supers() is not locked out. This
2153 	 * could give a filesystem state which requires a degraded mount.
2154 	 */
2155 	ret = btrfs_rm_dev_item(device);
2156 	if (ret)
2157 		goto error_undo;
2158 
2159 	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2160 	btrfs_scrub_cancel_dev(device);
2161 
2162 	/*
2163 	 * the device list mutex makes sure that we don't change
2164 	 * the device list while someone else is writing out all
2165 	 * the device supers. Whoever is writing all supers, should
2166 	 * lock the device list mutex before getting the number of
2167 	 * devices in the super block (super_copy). Conversely,
2168 	 * whoever updates the number of devices in the super block
2169 	 * (super_copy) should hold the device list mutex.
2170 	 */
2171 
2172 	/*
2173 	 * In normal cases the cur_devices == fs_devices. But in case
2174 	 * of deleting a seed device, the cur_devices should point to
2175 	 * its own fs_devices listed under the fs_devices->seed.
2176 	 */
2177 	cur_devices = device->fs_devices;
2178 	mutex_lock(&fs_devices->device_list_mutex);
2179 	list_del_rcu(&device->dev_list);
2180 
2181 	cur_devices->num_devices--;
2182 	cur_devices->total_devices--;
2183 	/* Update total_devices of the parent fs_devices if it's seed */
2184 	if (cur_devices != fs_devices)
2185 		fs_devices->total_devices--;
2186 
2187 	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2188 		cur_devices->missing_devices--;
2189 
2190 	btrfs_assign_next_active_device(device, NULL);
2191 
2192 	if (device->bdev) {
2193 		cur_devices->open_devices--;
2194 		/* remove sysfs entry */
2195 		btrfs_sysfs_rm_device_link(fs_devices, device);
2196 	}
2197 
2198 	num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2199 	btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2200 	mutex_unlock(&fs_devices->device_list_mutex);
2201 
2202 	/*
2203 	 * at this point, the device is zero sized and detached from
2204 	 * the devices list.  All that's left is to zero out the old
2205 	 * supers and free the device.
2206 	 */
2207 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2208 		btrfs_scratch_superblocks(device->bdev, device->name->str);
2209 
2210 	btrfs_close_bdev(device);
2211 	synchronize_rcu();
2212 	btrfs_free_device(device);
2213 
2214 	if (cur_devices->open_devices == 0) {
2215 		while (fs_devices) {
2216 			if (fs_devices->seed == cur_devices) {
2217 				fs_devices->seed = cur_devices->seed;
2218 				break;
2219 			}
2220 			fs_devices = fs_devices->seed;
2221 		}
2222 		cur_devices->seed = NULL;
2223 		close_fs_devices(cur_devices);
2224 		free_fs_devices(cur_devices);
2225 	}
2226 
2227 out:
2228 	mutex_unlock(&uuid_mutex);
2229 	return ret;
2230 
2231 error_undo:
2232 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2233 		mutex_lock(&fs_info->chunk_mutex);
2234 		list_add(&device->dev_alloc_list,
2235 			 &fs_devices->alloc_list);
2236 		device->fs_devices->rw_devices++;
2237 		mutex_unlock(&fs_info->chunk_mutex);
2238 	}
2239 	goto out;
2240 }
2241 
2242 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2243 {
2244 	struct btrfs_fs_devices *fs_devices;
2245 
2246 	lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2247 
2248 	/*
2249 	 * in case of fs with no seed, srcdev->fs_devices will point
2250 	 * to fs_devices of fs_info. However when the dev being replaced is
2251 	 * a seed dev it will point to the seed's local fs_devices. In short
2252 	 * srcdev will have its correct fs_devices in both the cases.
2253 	 */
2254 	fs_devices = srcdev->fs_devices;
2255 
2256 	list_del_rcu(&srcdev->dev_list);
2257 	list_del(&srcdev->dev_alloc_list);
2258 	fs_devices->num_devices--;
2259 	if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2260 		fs_devices->missing_devices--;
2261 
2262 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2263 		fs_devices->rw_devices--;
2264 
2265 	if (srcdev->bdev)
2266 		fs_devices->open_devices--;
2267 }
2268 
2269 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2270 {
2271 	struct btrfs_fs_info *fs_info = srcdev->fs_info;
2272 	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2273 
2274 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) {
2275 		/* zero out the old super if it is writable */
2276 		btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
2277 	}
2278 
2279 	btrfs_close_bdev(srcdev);
2280 	synchronize_rcu();
2281 	btrfs_free_device(srcdev);
2282 
2283 	/* if this is no devs we rather delete the fs_devices */
2284 	if (!fs_devices->num_devices) {
2285 		struct btrfs_fs_devices *tmp_fs_devices;
2286 
2287 		/*
2288 		 * On a mounted FS, num_devices can't be zero unless it's a
2289 		 * seed. In case of a seed device being replaced, the replace
2290 		 * target added to the sprout FS, so there will be no more
2291 		 * device left under the seed FS.
2292 		 */
2293 		ASSERT(fs_devices->seeding);
2294 
2295 		tmp_fs_devices = fs_info->fs_devices;
2296 		while (tmp_fs_devices) {
2297 			if (tmp_fs_devices->seed == fs_devices) {
2298 				tmp_fs_devices->seed = fs_devices->seed;
2299 				break;
2300 			}
2301 			tmp_fs_devices = tmp_fs_devices->seed;
2302 		}
2303 		fs_devices->seed = NULL;
2304 		close_fs_devices(fs_devices);
2305 		free_fs_devices(fs_devices);
2306 	}
2307 }
2308 
2309 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2310 {
2311 	struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2312 
2313 	WARN_ON(!tgtdev);
2314 	mutex_lock(&fs_devices->device_list_mutex);
2315 
2316 	btrfs_sysfs_rm_device_link(fs_devices, tgtdev);
2317 
2318 	if (tgtdev->bdev)
2319 		fs_devices->open_devices--;
2320 
2321 	fs_devices->num_devices--;
2322 
2323 	btrfs_assign_next_active_device(tgtdev, NULL);
2324 
2325 	list_del_rcu(&tgtdev->dev_list);
2326 
2327 	mutex_unlock(&fs_devices->device_list_mutex);
2328 
2329 	/*
2330 	 * The update_dev_time() with in btrfs_scratch_superblocks()
2331 	 * may lead to a call to btrfs_show_devname() which will try
2332 	 * to hold device_list_mutex. And here this device
2333 	 * is already out of device list, so we don't have to hold
2334 	 * the device_list_mutex lock.
2335 	 */
2336 	btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
2337 
2338 	btrfs_close_bdev(tgtdev);
2339 	synchronize_rcu();
2340 	btrfs_free_device(tgtdev);
2341 }
2342 
2343 static struct btrfs_device *btrfs_find_device_by_path(
2344 		struct btrfs_fs_info *fs_info, const char *device_path)
2345 {
2346 	int ret = 0;
2347 	struct btrfs_super_block *disk_super;
2348 	u64 devid;
2349 	u8 *dev_uuid;
2350 	struct block_device *bdev;
2351 	struct buffer_head *bh;
2352 	struct btrfs_device *device;
2353 
2354 	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2355 				    fs_info->bdev_holder, 0, &bdev, &bh);
2356 	if (ret)
2357 		return ERR_PTR(ret);
2358 	disk_super = (struct btrfs_super_block *)bh->b_data;
2359 	devid = btrfs_stack_device_id(&disk_super->dev_item);
2360 	dev_uuid = disk_super->dev_item.uuid;
2361 	if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2362 		device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2363 					   disk_super->metadata_uuid, true);
2364 	else
2365 		device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2366 					   disk_super->fsid, true);
2367 
2368 	brelse(bh);
2369 	if (!device)
2370 		device = ERR_PTR(-ENOENT);
2371 	blkdev_put(bdev, FMODE_READ);
2372 	return device;
2373 }
2374 
2375 /*
2376  * Lookup a device given by device id, or the path if the id is 0.
2377  */
2378 struct btrfs_device *btrfs_find_device_by_devspec(
2379 		struct btrfs_fs_info *fs_info, u64 devid,
2380 		const char *device_path)
2381 {
2382 	struct btrfs_device *device;
2383 
2384 	if (devid) {
2385 		device = btrfs_find_device(fs_info->fs_devices, devid, NULL,
2386 					   NULL, true);
2387 		if (!device)
2388 			return ERR_PTR(-ENOENT);
2389 		return device;
2390 	}
2391 
2392 	if (!device_path || !device_path[0])
2393 		return ERR_PTR(-EINVAL);
2394 
2395 	if (strcmp(device_path, "missing") == 0) {
2396 		/* Find first missing device */
2397 		list_for_each_entry(device, &fs_info->fs_devices->devices,
2398 				    dev_list) {
2399 			if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
2400 				     &device->dev_state) && !device->bdev)
2401 				return device;
2402 		}
2403 		return ERR_PTR(-ENOENT);
2404 	}
2405 
2406 	return btrfs_find_device_by_path(fs_info, device_path);
2407 }
2408 
2409 /*
2410  * does all the dirty work required for changing file system's UUID.
2411  */
2412 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
2413 {
2414 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2415 	struct btrfs_fs_devices *old_devices;
2416 	struct btrfs_fs_devices *seed_devices;
2417 	struct btrfs_super_block *disk_super = fs_info->super_copy;
2418 	struct btrfs_device *device;
2419 	u64 super_flags;
2420 
2421 	lockdep_assert_held(&uuid_mutex);
2422 	if (!fs_devices->seeding)
2423 		return -EINVAL;
2424 
2425 	seed_devices = alloc_fs_devices(NULL, NULL);
2426 	if (IS_ERR(seed_devices))
2427 		return PTR_ERR(seed_devices);
2428 
2429 	old_devices = clone_fs_devices(fs_devices);
2430 	if (IS_ERR(old_devices)) {
2431 		kfree(seed_devices);
2432 		return PTR_ERR(old_devices);
2433 	}
2434 
2435 	list_add(&old_devices->fs_list, &fs_uuids);
2436 
2437 	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2438 	seed_devices->opened = 1;
2439 	INIT_LIST_HEAD(&seed_devices->devices);
2440 	INIT_LIST_HEAD(&seed_devices->alloc_list);
2441 	mutex_init(&seed_devices->device_list_mutex);
2442 
2443 	mutex_lock(&fs_devices->device_list_mutex);
2444 	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2445 			      synchronize_rcu);
2446 	list_for_each_entry(device, &seed_devices->devices, dev_list)
2447 		device->fs_devices = seed_devices;
2448 
2449 	mutex_lock(&fs_info->chunk_mutex);
2450 	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
2451 	mutex_unlock(&fs_info->chunk_mutex);
2452 
2453 	fs_devices->seeding = 0;
2454 	fs_devices->num_devices = 0;
2455 	fs_devices->open_devices = 0;
2456 	fs_devices->missing_devices = 0;
2457 	fs_devices->rotating = 0;
2458 	fs_devices->seed = seed_devices;
2459 
2460 	generate_random_uuid(fs_devices->fsid);
2461 	memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2462 	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2463 	mutex_unlock(&fs_devices->device_list_mutex);
2464 
2465 	super_flags = btrfs_super_flags(disk_super) &
2466 		      ~BTRFS_SUPER_FLAG_SEEDING;
2467 	btrfs_set_super_flags(disk_super, super_flags);
2468 
2469 	return 0;
2470 }
2471 
2472 /*
2473  * Store the expected generation for seed devices in device items.
2474  */
2475 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
2476 {
2477 	struct btrfs_fs_info *fs_info = trans->fs_info;
2478 	struct btrfs_root *root = fs_info->chunk_root;
2479 	struct btrfs_path *path;
2480 	struct extent_buffer *leaf;
2481 	struct btrfs_dev_item *dev_item;
2482 	struct btrfs_device *device;
2483 	struct btrfs_key key;
2484 	u8 fs_uuid[BTRFS_FSID_SIZE];
2485 	u8 dev_uuid[BTRFS_UUID_SIZE];
2486 	u64 devid;
2487 	int ret;
2488 
2489 	path = btrfs_alloc_path();
2490 	if (!path)
2491 		return -ENOMEM;
2492 
2493 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2494 	key.offset = 0;
2495 	key.type = BTRFS_DEV_ITEM_KEY;
2496 
2497 	while (1) {
2498 		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2499 		if (ret < 0)
2500 			goto error;
2501 
2502 		leaf = path->nodes[0];
2503 next_slot:
2504 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2505 			ret = btrfs_next_leaf(root, path);
2506 			if (ret > 0)
2507 				break;
2508 			if (ret < 0)
2509 				goto error;
2510 			leaf = path->nodes[0];
2511 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2512 			btrfs_release_path(path);
2513 			continue;
2514 		}
2515 
2516 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2517 		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2518 		    key.type != BTRFS_DEV_ITEM_KEY)
2519 			break;
2520 
2521 		dev_item = btrfs_item_ptr(leaf, path->slots[0],
2522 					  struct btrfs_dev_item);
2523 		devid = btrfs_device_id(leaf, dev_item);
2524 		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2525 				   BTRFS_UUID_SIZE);
2526 		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2527 				   BTRFS_FSID_SIZE);
2528 		device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2529 					   fs_uuid, true);
2530 		BUG_ON(!device); /* Logic error */
2531 
2532 		if (device->fs_devices->seeding) {
2533 			btrfs_set_device_generation(leaf, dev_item,
2534 						    device->generation);
2535 			btrfs_mark_buffer_dirty(leaf);
2536 		}
2537 
2538 		path->slots[0]++;
2539 		goto next_slot;
2540 	}
2541 	ret = 0;
2542 error:
2543 	btrfs_free_path(path);
2544 	return ret;
2545 }
2546 
2547 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2548 {
2549 	struct btrfs_root *root = fs_info->dev_root;
2550 	struct request_queue *q;
2551 	struct btrfs_trans_handle *trans;
2552 	struct btrfs_device *device;
2553 	struct block_device *bdev;
2554 	struct super_block *sb = fs_info->sb;
2555 	struct rcu_string *name;
2556 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2557 	u64 orig_super_total_bytes;
2558 	u64 orig_super_num_devices;
2559 	int seeding_dev = 0;
2560 	int ret = 0;
2561 	bool unlocked = false;
2562 
2563 	if (sb_rdonly(sb) && !fs_devices->seeding)
2564 		return -EROFS;
2565 
2566 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2567 				  fs_info->bdev_holder);
2568 	if (IS_ERR(bdev))
2569 		return PTR_ERR(bdev);
2570 
2571 	if (fs_devices->seeding) {
2572 		seeding_dev = 1;
2573 		down_write(&sb->s_umount);
2574 		mutex_lock(&uuid_mutex);
2575 	}
2576 
2577 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2578 
2579 	mutex_lock(&fs_devices->device_list_mutex);
2580 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
2581 		if (device->bdev == bdev) {
2582 			ret = -EEXIST;
2583 			mutex_unlock(
2584 				&fs_devices->device_list_mutex);
2585 			goto error;
2586 		}
2587 	}
2588 	mutex_unlock(&fs_devices->device_list_mutex);
2589 
2590 	device = btrfs_alloc_device(fs_info, NULL, NULL);
2591 	if (IS_ERR(device)) {
2592 		/* we can safely leave the fs_devices entry around */
2593 		ret = PTR_ERR(device);
2594 		goto error;
2595 	}
2596 
2597 	name = rcu_string_strdup(device_path, GFP_KERNEL);
2598 	if (!name) {
2599 		ret = -ENOMEM;
2600 		goto error_free_device;
2601 	}
2602 	rcu_assign_pointer(device->name, name);
2603 
2604 	trans = btrfs_start_transaction(root, 0);
2605 	if (IS_ERR(trans)) {
2606 		ret = PTR_ERR(trans);
2607 		goto error_free_device;
2608 	}
2609 
2610 	q = bdev_get_queue(bdev);
2611 	set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2612 	device->generation = trans->transid;
2613 	device->io_width = fs_info->sectorsize;
2614 	device->io_align = fs_info->sectorsize;
2615 	device->sector_size = fs_info->sectorsize;
2616 	device->total_bytes = round_down(i_size_read(bdev->bd_inode),
2617 					 fs_info->sectorsize);
2618 	device->disk_total_bytes = device->total_bytes;
2619 	device->commit_total_bytes = device->total_bytes;
2620 	device->fs_info = fs_info;
2621 	device->bdev = bdev;
2622 	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2623 	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2624 	device->mode = FMODE_EXCL;
2625 	device->dev_stats_valid = 1;
2626 	set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2627 
2628 	if (seeding_dev) {
2629 		sb->s_flags &= ~SB_RDONLY;
2630 		ret = btrfs_prepare_sprout(fs_info);
2631 		if (ret) {
2632 			btrfs_abort_transaction(trans, ret);
2633 			goto error_trans;
2634 		}
2635 	}
2636 
2637 	device->fs_devices = fs_devices;
2638 
2639 	mutex_lock(&fs_devices->device_list_mutex);
2640 	mutex_lock(&fs_info->chunk_mutex);
2641 	list_add_rcu(&device->dev_list, &fs_devices->devices);
2642 	list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2643 	fs_devices->num_devices++;
2644 	fs_devices->open_devices++;
2645 	fs_devices->rw_devices++;
2646 	fs_devices->total_devices++;
2647 	fs_devices->total_rw_bytes += device->total_bytes;
2648 
2649 	atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2650 
2651 	if (!blk_queue_nonrot(q))
2652 		fs_devices->rotating = 1;
2653 
2654 	orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2655 	btrfs_set_super_total_bytes(fs_info->super_copy,
2656 		round_down(orig_super_total_bytes + device->total_bytes,
2657 			   fs_info->sectorsize));
2658 
2659 	orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2660 	btrfs_set_super_num_devices(fs_info->super_copy,
2661 				    orig_super_num_devices + 1);
2662 
2663 	/* add sysfs device entry */
2664 	btrfs_sysfs_add_device_link(fs_devices, device);
2665 
2666 	/*
2667 	 * we've got more storage, clear any full flags on the space
2668 	 * infos
2669 	 */
2670 	btrfs_clear_space_info_full(fs_info);
2671 
2672 	mutex_unlock(&fs_info->chunk_mutex);
2673 	mutex_unlock(&fs_devices->device_list_mutex);
2674 
2675 	if (seeding_dev) {
2676 		mutex_lock(&fs_info->chunk_mutex);
2677 		ret = init_first_rw_device(trans);
2678 		mutex_unlock(&fs_info->chunk_mutex);
2679 		if (ret) {
2680 			btrfs_abort_transaction(trans, ret);
2681 			goto error_sysfs;
2682 		}
2683 	}
2684 
2685 	ret = btrfs_add_dev_item(trans, device);
2686 	if (ret) {
2687 		btrfs_abort_transaction(trans, ret);
2688 		goto error_sysfs;
2689 	}
2690 
2691 	if (seeding_dev) {
2692 		ret = btrfs_finish_sprout(trans);
2693 		if (ret) {
2694 			btrfs_abort_transaction(trans, ret);
2695 			goto error_sysfs;
2696 		}
2697 
2698 		btrfs_sysfs_update_sprout_fsid(fs_devices,
2699 				fs_info->fs_devices->fsid);
2700 	}
2701 
2702 	ret = btrfs_commit_transaction(trans);
2703 
2704 	if (seeding_dev) {
2705 		mutex_unlock(&uuid_mutex);
2706 		up_write(&sb->s_umount);
2707 		unlocked = true;
2708 
2709 		if (ret) /* transaction commit */
2710 			return ret;
2711 
2712 		ret = btrfs_relocate_sys_chunks(fs_info);
2713 		if (ret < 0)
2714 			btrfs_handle_fs_error(fs_info, ret,
2715 				    "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2716 		trans = btrfs_attach_transaction(root);
2717 		if (IS_ERR(trans)) {
2718 			if (PTR_ERR(trans) == -ENOENT)
2719 				return 0;
2720 			ret = PTR_ERR(trans);
2721 			trans = NULL;
2722 			goto error_sysfs;
2723 		}
2724 		ret = btrfs_commit_transaction(trans);
2725 	}
2726 
2727 	/* Update ctime/mtime for libblkid */
2728 	update_dev_time(device_path);
2729 	return ret;
2730 
2731 error_sysfs:
2732 	btrfs_sysfs_rm_device_link(fs_devices, device);
2733 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2734 	mutex_lock(&fs_info->chunk_mutex);
2735 	list_del_rcu(&device->dev_list);
2736 	list_del(&device->dev_alloc_list);
2737 	fs_info->fs_devices->num_devices--;
2738 	fs_info->fs_devices->open_devices--;
2739 	fs_info->fs_devices->rw_devices--;
2740 	fs_info->fs_devices->total_devices--;
2741 	fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2742 	atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2743 	btrfs_set_super_total_bytes(fs_info->super_copy,
2744 				    orig_super_total_bytes);
2745 	btrfs_set_super_num_devices(fs_info->super_copy,
2746 				    orig_super_num_devices);
2747 	mutex_unlock(&fs_info->chunk_mutex);
2748 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2749 error_trans:
2750 	if (seeding_dev)
2751 		sb->s_flags |= SB_RDONLY;
2752 	if (trans)
2753 		btrfs_end_transaction(trans);
2754 error_free_device:
2755 	btrfs_free_device(device);
2756 error:
2757 	blkdev_put(bdev, FMODE_EXCL);
2758 	if (seeding_dev && !unlocked) {
2759 		mutex_unlock(&uuid_mutex);
2760 		up_write(&sb->s_umount);
2761 	}
2762 	return ret;
2763 }
2764 
2765 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2766 					struct btrfs_device *device)
2767 {
2768 	int ret;
2769 	struct btrfs_path *path;
2770 	struct btrfs_root *root = device->fs_info->chunk_root;
2771 	struct btrfs_dev_item *dev_item;
2772 	struct extent_buffer *leaf;
2773 	struct btrfs_key key;
2774 
2775 	path = btrfs_alloc_path();
2776 	if (!path)
2777 		return -ENOMEM;
2778 
2779 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2780 	key.type = BTRFS_DEV_ITEM_KEY;
2781 	key.offset = device->devid;
2782 
2783 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2784 	if (ret < 0)
2785 		goto out;
2786 
2787 	if (ret > 0) {
2788 		ret = -ENOENT;
2789 		goto out;
2790 	}
2791 
2792 	leaf = path->nodes[0];
2793 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2794 
2795 	btrfs_set_device_id(leaf, dev_item, device->devid);
2796 	btrfs_set_device_type(leaf, dev_item, device->type);
2797 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2798 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2799 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2800 	btrfs_set_device_total_bytes(leaf, dev_item,
2801 				     btrfs_device_get_disk_total_bytes(device));
2802 	btrfs_set_device_bytes_used(leaf, dev_item,
2803 				    btrfs_device_get_bytes_used(device));
2804 	btrfs_mark_buffer_dirty(leaf);
2805 
2806 out:
2807 	btrfs_free_path(path);
2808 	return ret;
2809 }
2810 
2811 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2812 		      struct btrfs_device *device, u64 new_size)
2813 {
2814 	struct btrfs_fs_info *fs_info = device->fs_info;
2815 	struct btrfs_super_block *super_copy = fs_info->super_copy;
2816 	u64 old_total;
2817 	u64 diff;
2818 
2819 	if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2820 		return -EACCES;
2821 
2822 	new_size = round_down(new_size, fs_info->sectorsize);
2823 
2824 	mutex_lock(&fs_info->chunk_mutex);
2825 	old_total = btrfs_super_total_bytes(super_copy);
2826 	diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2827 
2828 	if (new_size <= device->total_bytes ||
2829 	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2830 		mutex_unlock(&fs_info->chunk_mutex);
2831 		return -EINVAL;
2832 	}
2833 
2834 	btrfs_set_super_total_bytes(super_copy,
2835 			round_down(old_total + diff, fs_info->sectorsize));
2836 	device->fs_devices->total_rw_bytes += diff;
2837 
2838 	btrfs_device_set_total_bytes(device, new_size);
2839 	btrfs_device_set_disk_total_bytes(device, new_size);
2840 	btrfs_clear_space_info_full(device->fs_info);
2841 	if (list_empty(&device->post_commit_list))
2842 		list_add_tail(&device->post_commit_list,
2843 			      &trans->transaction->dev_update_list);
2844 	mutex_unlock(&fs_info->chunk_mutex);
2845 
2846 	return btrfs_update_device(trans, device);
2847 }
2848 
2849 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2850 {
2851 	struct btrfs_fs_info *fs_info = trans->fs_info;
2852 	struct btrfs_root *root = fs_info->chunk_root;
2853 	int ret;
2854 	struct btrfs_path *path;
2855 	struct btrfs_key key;
2856 
2857 	path = btrfs_alloc_path();
2858 	if (!path)
2859 		return -ENOMEM;
2860 
2861 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2862 	key.offset = chunk_offset;
2863 	key.type = BTRFS_CHUNK_ITEM_KEY;
2864 
2865 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2866 	if (ret < 0)
2867 		goto out;
2868 	else if (ret > 0) { /* Logic error or corruption */
2869 		btrfs_handle_fs_error(fs_info, -ENOENT,
2870 				      "Failed lookup while freeing chunk.");
2871 		ret = -ENOENT;
2872 		goto out;
2873 	}
2874 
2875 	ret = btrfs_del_item(trans, root, path);
2876 	if (ret < 0)
2877 		btrfs_handle_fs_error(fs_info, ret,
2878 				      "Failed to delete chunk item.");
2879 out:
2880 	btrfs_free_path(path);
2881 	return ret;
2882 }
2883 
2884 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2885 {
2886 	struct btrfs_super_block *super_copy = fs_info->super_copy;
2887 	struct btrfs_disk_key *disk_key;
2888 	struct btrfs_chunk *chunk;
2889 	u8 *ptr;
2890 	int ret = 0;
2891 	u32 num_stripes;
2892 	u32 array_size;
2893 	u32 len = 0;
2894 	u32 cur;
2895 	struct btrfs_key key;
2896 
2897 	mutex_lock(&fs_info->chunk_mutex);
2898 	array_size = btrfs_super_sys_array_size(super_copy);
2899 
2900 	ptr = super_copy->sys_chunk_array;
2901 	cur = 0;
2902 
2903 	while (cur < array_size) {
2904 		disk_key = (struct btrfs_disk_key *)ptr;
2905 		btrfs_disk_key_to_cpu(&key, disk_key);
2906 
2907 		len = sizeof(*disk_key);
2908 
2909 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2910 			chunk = (struct btrfs_chunk *)(ptr + len);
2911 			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2912 			len += btrfs_chunk_item_size(num_stripes);
2913 		} else {
2914 			ret = -EIO;
2915 			break;
2916 		}
2917 		if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2918 		    key.offset == chunk_offset) {
2919 			memmove(ptr, ptr + len, array_size - (cur + len));
2920 			array_size -= len;
2921 			btrfs_set_super_sys_array_size(super_copy, array_size);
2922 		} else {
2923 			ptr += len;
2924 			cur += len;
2925 		}
2926 	}
2927 	mutex_unlock(&fs_info->chunk_mutex);
2928 	return ret;
2929 }
2930 
2931 /*
2932  * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
2933  * @logical: Logical block offset in bytes.
2934  * @length: Length of extent in bytes.
2935  *
2936  * Return: Chunk mapping or ERR_PTR.
2937  */
2938 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
2939 				       u64 logical, u64 length)
2940 {
2941 	struct extent_map_tree *em_tree;
2942 	struct extent_map *em;
2943 
2944 	em_tree = &fs_info->mapping_tree;
2945 	read_lock(&em_tree->lock);
2946 	em = lookup_extent_mapping(em_tree, logical, length);
2947 	read_unlock(&em_tree->lock);
2948 
2949 	if (!em) {
2950 		btrfs_crit(fs_info, "unable to find logical %llu length %llu",
2951 			   logical, length);
2952 		return ERR_PTR(-EINVAL);
2953 	}
2954 
2955 	if (em->start > logical || em->start + em->len < logical) {
2956 		btrfs_crit(fs_info,
2957 			   "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
2958 			   logical, length, em->start, em->start + em->len);
2959 		free_extent_map(em);
2960 		return ERR_PTR(-EINVAL);
2961 	}
2962 
2963 	/* callers are responsible for dropping em's ref. */
2964 	return em;
2965 }
2966 
2967 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2968 {
2969 	struct btrfs_fs_info *fs_info = trans->fs_info;
2970 	struct extent_map *em;
2971 	struct map_lookup *map;
2972 	u64 dev_extent_len = 0;
2973 	int i, ret = 0;
2974 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2975 
2976 	em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
2977 	if (IS_ERR(em)) {
2978 		/*
2979 		 * This is a logic error, but we don't want to just rely on the
2980 		 * user having built with ASSERT enabled, so if ASSERT doesn't
2981 		 * do anything we still error out.
2982 		 */
2983 		ASSERT(0);
2984 		return PTR_ERR(em);
2985 	}
2986 	map = em->map_lookup;
2987 	mutex_lock(&fs_info->chunk_mutex);
2988 	check_system_chunk(trans, map->type);
2989 	mutex_unlock(&fs_info->chunk_mutex);
2990 
2991 	/*
2992 	 * Take the device list mutex to prevent races with the final phase of
2993 	 * a device replace operation that replaces the device object associated
2994 	 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
2995 	 */
2996 	mutex_lock(&fs_devices->device_list_mutex);
2997 	for (i = 0; i < map->num_stripes; i++) {
2998 		struct btrfs_device *device = map->stripes[i].dev;
2999 		ret = btrfs_free_dev_extent(trans, device,
3000 					    map->stripes[i].physical,
3001 					    &dev_extent_len);
3002 		if (ret) {
3003 			mutex_unlock(&fs_devices->device_list_mutex);
3004 			btrfs_abort_transaction(trans, ret);
3005 			goto out;
3006 		}
3007 
3008 		if (device->bytes_used > 0) {
3009 			mutex_lock(&fs_info->chunk_mutex);
3010 			btrfs_device_set_bytes_used(device,
3011 					device->bytes_used - dev_extent_len);
3012 			atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
3013 			btrfs_clear_space_info_full(fs_info);
3014 			mutex_unlock(&fs_info->chunk_mutex);
3015 		}
3016 
3017 		ret = btrfs_update_device(trans, device);
3018 		if (ret) {
3019 			mutex_unlock(&fs_devices->device_list_mutex);
3020 			btrfs_abort_transaction(trans, ret);
3021 			goto out;
3022 		}
3023 	}
3024 	mutex_unlock(&fs_devices->device_list_mutex);
3025 
3026 	ret = btrfs_free_chunk(trans, chunk_offset);
3027 	if (ret) {
3028 		btrfs_abort_transaction(trans, ret);
3029 		goto out;
3030 	}
3031 
3032 	trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
3033 
3034 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3035 		ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
3036 		if (ret) {
3037 			btrfs_abort_transaction(trans, ret);
3038 			goto out;
3039 		}
3040 	}
3041 
3042 	ret = btrfs_remove_block_group(trans, chunk_offset, em);
3043 	if (ret) {
3044 		btrfs_abort_transaction(trans, ret);
3045 		goto out;
3046 	}
3047 
3048 out:
3049 	/* once for us */
3050 	free_extent_map(em);
3051 	return ret;
3052 }
3053 
3054 static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3055 {
3056 	struct btrfs_root *root = fs_info->chunk_root;
3057 	struct btrfs_trans_handle *trans;
3058 	int ret;
3059 
3060 	/*
3061 	 * Prevent races with automatic removal of unused block groups.
3062 	 * After we relocate and before we remove the chunk with offset
3063 	 * chunk_offset, automatic removal of the block group can kick in,
3064 	 * resulting in a failure when calling btrfs_remove_chunk() below.
3065 	 *
3066 	 * Make sure to acquire this mutex before doing a tree search (dev
3067 	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3068 	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3069 	 * we release the path used to search the chunk/dev tree and before
3070 	 * the current task acquires this mutex and calls us.
3071 	 */
3072 	lockdep_assert_held(&fs_info->delete_unused_bgs_mutex);
3073 
3074 	/* step one, relocate all the extents inside this chunk */
3075 	btrfs_scrub_pause(fs_info);
3076 	ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3077 	btrfs_scrub_continue(fs_info);
3078 	if (ret)
3079 		return ret;
3080 
3081 	trans = btrfs_start_trans_remove_block_group(root->fs_info,
3082 						     chunk_offset);
3083 	if (IS_ERR(trans)) {
3084 		ret = PTR_ERR(trans);
3085 		btrfs_handle_fs_error(root->fs_info, ret, NULL);
3086 		return ret;
3087 	}
3088 
3089 	/*
3090 	 * step two, delete the device extents and the
3091 	 * chunk tree entries
3092 	 */
3093 	ret = btrfs_remove_chunk(trans, chunk_offset);
3094 	btrfs_end_transaction(trans);
3095 	return ret;
3096 }
3097 
3098 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3099 {
3100 	struct btrfs_root *chunk_root = fs_info->chunk_root;
3101 	struct btrfs_path *path;
3102 	struct extent_buffer *leaf;
3103 	struct btrfs_chunk *chunk;
3104 	struct btrfs_key key;
3105 	struct btrfs_key found_key;
3106 	u64 chunk_type;
3107 	bool retried = false;
3108 	int failed = 0;
3109 	int ret;
3110 
3111 	path = btrfs_alloc_path();
3112 	if (!path)
3113 		return -ENOMEM;
3114 
3115 again:
3116 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3117 	key.offset = (u64)-1;
3118 	key.type = BTRFS_CHUNK_ITEM_KEY;
3119 
3120 	while (1) {
3121 		mutex_lock(&fs_info->delete_unused_bgs_mutex);
3122 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3123 		if (ret < 0) {
3124 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3125 			goto error;
3126 		}
3127 		BUG_ON(ret == 0); /* Corruption */
3128 
3129 		ret = btrfs_previous_item(chunk_root, path, key.objectid,
3130 					  key.type);
3131 		if (ret)
3132 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3133 		if (ret < 0)
3134 			goto error;
3135 		if (ret > 0)
3136 			break;
3137 
3138 		leaf = path->nodes[0];
3139 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3140 
3141 		chunk = btrfs_item_ptr(leaf, path->slots[0],
3142 				       struct btrfs_chunk);
3143 		chunk_type = btrfs_chunk_type(leaf, chunk);
3144 		btrfs_release_path(path);
3145 
3146 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3147 			ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3148 			if (ret == -ENOSPC)
3149 				failed++;
3150 			else
3151 				BUG_ON(ret);
3152 		}
3153 		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3154 
3155 		if (found_key.offset == 0)
3156 			break;
3157 		key.offset = found_key.offset - 1;
3158 	}
3159 	ret = 0;
3160 	if (failed && !retried) {
3161 		failed = 0;
3162 		retried = true;
3163 		goto again;
3164 	} else if (WARN_ON(failed && retried)) {
3165 		ret = -ENOSPC;
3166 	}
3167 error:
3168 	btrfs_free_path(path);
3169 	return ret;
3170 }
3171 
3172 /*
3173  * return 1 : allocate a data chunk successfully,
3174  * return <0: errors during allocating a data chunk,
3175  * return 0 : no need to allocate a data chunk.
3176  */
3177 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3178 				      u64 chunk_offset)
3179 {
3180 	struct btrfs_block_group_cache *cache;
3181 	u64 bytes_used;
3182 	u64 chunk_type;
3183 
3184 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3185 	ASSERT(cache);
3186 	chunk_type = cache->flags;
3187 	btrfs_put_block_group(cache);
3188 
3189 	if (chunk_type & BTRFS_BLOCK_GROUP_DATA) {
3190 		spin_lock(&fs_info->data_sinfo->lock);
3191 		bytes_used = fs_info->data_sinfo->bytes_used;
3192 		spin_unlock(&fs_info->data_sinfo->lock);
3193 
3194 		if (!bytes_used) {
3195 			struct btrfs_trans_handle *trans;
3196 			int ret;
3197 
3198 			trans =	btrfs_join_transaction(fs_info->tree_root);
3199 			if (IS_ERR(trans))
3200 				return PTR_ERR(trans);
3201 
3202 			ret = btrfs_force_chunk_alloc(trans,
3203 						      BTRFS_BLOCK_GROUP_DATA);
3204 			btrfs_end_transaction(trans);
3205 			if (ret < 0)
3206 				return ret;
3207 			return 1;
3208 		}
3209 	}
3210 	return 0;
3211 }
3212 
3213 static int insert_balance_item(struct btrfs_fs_info *fs_info,
3214 			       struct btrfs_balance_control *bctl)
3215 {
3216 	struct btrfs_root *root = fs_info->tree_root;
3217 	struct btrfs_trans_handle *trans;
3218 	struct btrfs_balance_item *item;
3219 	struct btrfs_disk_balance_args disk_bargs;
3220 	struct btrfs_path *path;
3221 	struct extent_buffer *leaf;
3222 	struct btrfs_key key;
3223 	int ret, err;
3224 
3225 	path = btrfs_alloc_path();
3226 	if (!path)
3227 		return -ENOMEM;
3228 
3229 	trans = btrfs_start_transaction(root, 0);
3230 	if (IS_ERR(trans)) {
3231 		btrfs_free_path(path);
3232 		return PTR_ERR(trans);
3233 	}
3234 
3235 	key.objectid = BTRFS_BALANCE_OBJECTID;
3236 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3237 	key.offset = 0;
3238 
3239 	ret = btrfs_insert_empty_item(trans, root, path, &key,
3240 				      sizeof(*item));
3241 	if (ret)
3242 		goto out;
3243 
3244 	leaf = path->nodes[0];
3245 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3246 
3247 	memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3248 
3249 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3250 	btrfs_set_balance_data(leaf, item, &disk_bargs);
3251 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3252 	btrfs_set_balance_meta(leaf, item, &disk_bargs);
3253 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3254 	btrfs_set_balance_sys(leaf, item, &disk_bargs);
3255 
3256 	btrfs_set_balance_flags(leaf, item, bctl->flags);
3257 
3258 	btrfs_mark_buffer_dirty(leaf);
3259 out:
3260 	btrfs_free_path(path);
3261 	err = btrfs_commit_transaction(trans);
3262 	if (err && !ret)
3263 		ret = err;
3264 	return ret;
3265 }
3266 
3267 static int del_balance_item(struct btrfs_fs_info *fs_info)
3268 {
3269 	struct btrfs_root *root = fs_info->tree_root;
3270 	struct btrfs_trans_handle *trans;
3271 	struct btrfs_path *path;
3272 	struct btrfs_key key;
3273 	int ret, err;
3274 
3275 	path = btrfs_alloc_path();
3276 	if (!path)
3277 		return -ENOMEM;
3278 
3279 	trans = btrfs_start_transaction(root, 0);
3280 	if (IS_ERR(trans)) {
3281 		btrfs_free_path(path);
3282 		return PTR_ERR(trans);
3283 	}
3284 
3285 	key.objectid = BTRFS_BALANCE_OBJECTID;
3286 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3287 	key.offset = 0;
3288 
3289 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3290 	if (ret < 0)
3291 		goto out;
3292 	if (ret > 0) {
3293 		ret = -ENOENT;
3294 		goto out;
3295 	}
3296 
3297 	ret = btrfs_del_item(trans, root, path);
3298 out:
3299 	btrfs_free_path(path);
3300 	err = btrfs_commit_transaction(trans);
3301 	if (err && !ret)
3302 		ret = err;
3303 	return ret;
3304 }
3305 
3306 /*
3307  * This is a heuristic used to reduce the number of chunks balanced on
3308  * resume after balance was interrupted.
3309  */
3310 static void update_balance_args(struct btrfs_balance_control *bctl)
3311 {
3312 	/*
3313 	 * Turn on soft mode for chunk types that were being converted.
3314 	 */
3315 	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3316 		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3317 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3318 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3319 	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3320 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3321 
3322 	/*
3323 	 * Turn on usage filter if is not already used.  The idea is
3324 	 * that chunks that we have already balanced should be
3325 	 * reasonably full.  Don't do it for chunks that are being
3326 	 * converted - that will keep us from relocating unconverted
3327 	 * (albeit full) chunks.
3328 	 */
3329 	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3330 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3331 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3332 		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3333 		bctl->data.usage = 90;
3334 	}
3335 	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3336 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3337 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3338 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3339 		bctl->sys.usage = 90;
3340 	}
3341 	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3342 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3343 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3344 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3345 		bctl->meta.usage = 90;
3346 	}
3347 }
3348 
3349 /*
3350  * Clear the balance status in fs_info and delete the balance item from disk.
3351  */
3352 static void reset_balance_state(struct btrfs_fs_info *fs_info)
3353 {
3354 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3355 	int ret;
3356 
3357 	BUG_ON(!fs_info->balance_ctl);
3358 
3359 	spin_lock(&fs_info->balance_lock);
3360 	fs_info->balance_ctl = NULL;
3361 	spin_unlock(&fs_info->balance_lock);
3362 
3363 	kfree(bctl);
3364 	ret = del_balance_item(fs_info);
3365 	if (ret)
3366 		btrfs_handle_fs_error(fs_info, ret, NULL);
3367 }
3368 
3369 /*
3370  * Balance filters.  Return 1 if chunk should be filtered out
3371  * (should not be balanced).
3372  */
3373 static int chunk_profiles_filter(u64 chunk_type,
3374 				 struct btrfs_balance_args *bargs)
3375 {
3376 	chunk_type = chunk_to_extended(chunk_type) &
3377 				BTRFS_EXTENDED_PROFILE_MASK;
3378 
3379 	if (bargs->profiles & chunk_type)
3380 		return 0;
3381 
3382 	return 1;
3383 }
3384 
3385 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3386 			      struct btrfs_balance_args *bargs)
3387 {
3388 	struct btrfs_block_group_cache *cache;
3389 	u64 chunk_used;
3390 	u64 user_thresh_min;
3391 	u64 user_thresh_max;
3392 	int ret = 1;
3393 
3394 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3395 	chunk_used = btrfs_block_group_used(&cache->item);
3396 
3397 	if (bargs->usage_min == 0)
3398 		user_thresh_min = 0;
3399 	else
3400 		user_thresh_min = div_factor_fine(cache->key.offset,
3401 					bargs->usage_min);
3402 
3403 	if (bargs->usage_max == 0)
3404 		user_thresh_max = 1;
3405 	else if (bargs->usage_max > 100)
3406 		user_thresh_max = cache->key.offset;
3407 	else
3408 		user_thresh_max = div_factor_fine(cache->key.offset,
3409 					bargs->usage_max);
3410 
3411 	if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3412 		ret = 0;
3413 
3414 	btrfs_put_block_group(cache);
3415 	return ret;
3416 }
3417 
3418 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3419 		u64 chunk_offset, struct btrfs_balance_args *bargs)
3420 {
3421 	struct btrfs_block_group_cache *cache;
3422 	u64 chunk_used, user_thresh;
3423 	int ret = 1;
3424 
3425 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3426 	chunk_used = btrfs_block_group_used(&cache->item);
3427 
3428 	if (bargs->usage_min == 0)
3429 		user_thresh = 1;
3430 	else if (bargs->usage > 100)
3431 		user_thresh = cache->key.offset;
3432 	else
3433 		user_thresh = div_factor_fine(cache->key.offset,
3434 					      bargs->usage);
3435 
3436 	if (chunk_used < user_thresh)
3437 		ret = 0;
3438 
3439 	btrfs_put_block_group(cache);
3440 	return ret;
3441 }
3442 
3443 static int chunk_devid_filter(struct extent_buffer *leaf,
3444 			      struct btrfs_chunk *chunk,
3445 			      struct btrfs_balance_args *bargs)
3446 {
3447 	struct btrfs_stripe *stripe;
3448 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3449 	int i;
3450 
3451 	for (i = 0; i < num_stripes; i++) {
3452 		stripe = btrfs_stripe_nr(chunk, i);
3453 		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3454 			return 0;
3455 	}
3456 
3457 	return 1;
3458 }
3459 
3460 static u64 calc_data_stripes(u64 type, int num_stripes)
3461 {
3462 	const int index = btrfs_bg_flags_to_raid_index(type);
3463 	const int ncopies = btrfs_raid_array[index].ncopies;
3464 	const int nparity = btrfs_raid_array[index].nparity;
3465 
3466 	if (nparity)
3467 		return num_stripes - nparity;
3468 	else
3469 		return num_stripes / ncopies;
3470 }
3471 
3472 /* [pstart, pend) */
3473 static int chunk_drange_filter(struct extent_buffer *leaf,
3474 			       struct btrfs_chunk *chunk,
3475 			       struct btrfs_balance_args *bargs)
3476 {
3477 	struct btrfs_stripe *stripe;
3478 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3479 	u64 stripe_offset;
3480 	u64 stripe_length;
3481 	u64 type;
3482 	int factor;
3483 	int i;
3484 
3485 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3486 		return 0;
3487 
3488 	type = btrfs_chunk_type(leaf, chunk);
3489 	factor = calc_data_stripes(type, num_stripes);
3490 
3491 	for (i = 0; i < num_stripes; i++) {
3492 		stripe = btrfs_stripe_nr(chunk, i);
3493 		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3494 			continue;
3495 
3496 		stripe_offset = btrfs_stripe_offset(leaf, stripe);
3497 		stripe_length = btrfs_chunk_length(leaf, chunk);
3498 		stripe_length = div_u64(stripe_length, factor);
3499 
3500 		if (stripe_offset < bargs->pend &&
3501 		    stripe_offset + stripe_length > bargs->pstart)
3502 			return 0;
3503 	}
3504 
3505 	return 1;
3506 }
3507 
3508 /* [vstart, vend) */
3509 static int chunk_vrange_filter(struct extent_buffer *leaf,
3510 			       struct btrfs_chunk *chunk,
3511 			       u64 chunk_offset,
3512 			       struct btrfs_balance_args *bargs)
3513 {
3514 	if (chunk_offset < bargs->vend &&
3515 	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3516 		/* at least part of the chunk is inside this vrange */
3517 		return 0;
3518 
3519 	return 1;
3520 }
3521 
3522 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3523 			       struct btrfs_chunk *chunk,
3524 			       struct btrfs_balance_args *bargs)
3525 {
3526 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3527 
3528 	if (bargs->stripes_min <= num_stripes
3529 			&& num_stripes <= bargs->stripes_max)
3530 		return 0;
3531 
3532 	return 1;
3533 }
3534 
3535 static int chunk_soft_convert_filter(u64 chunk_type,
3536 				     struct btrfs_balance_args *bargs)
3537 {
3538 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3539 		return 0;
3540 
3541 	chunk_type = chunk_to_extended(chunk_type) &
3542 				BTRFS_EXTENDED_PROFILE_MASK;
3543 
3544 	if (bargs->target == chunk_type)
3545 		return 1;
3546 
3547 	return 0;
3548 }
3549 
3550 static int should_balance_chunk(struct extent_buffer *leaf,
3551 				struct btrfs_chunk *chunk, u64 chunk_offset)
3552 {
3553 	struct btrfs_fs_info *fs_info = leaf->fs_info;
3554 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3555 	struct btrfs_balance_args *bargs = NULL;
3556 	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3557 
3558 	/* type filter */
3559 	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3560 	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3561 		return 0;
3562 	}
3563 
3564 	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3565 		bargs = &bctl->data;
3566 	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3567 		bargs = &bctl->sys;
3568 	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3569 		bargs = &bctl->meta;
3570 
3571 	/* profiles filter */
3572 	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3573 	    chunk_profiles_filter(chunk_type, bargs)) {
3574 		return 0;
3575 	}
3576 
3577 	/* usage filter */
3578 	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3579 	    chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3580 		return 0;
3581 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3582 	    chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3583 		return 0;
3584 	}
3585 
3586 	/* devid filter */
3587 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3588 	    chunk_devid_filter(leaf, chunk, bargs)) {
3589 		return 0;
3590 	}
3591 
3592 	/* drange filter, makes sense only with devid filter */
3593 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3594 	    chunk_drange_filter(leaf, chunk, bargs)) {
3595 		return 0;
3596 	}
3597 
3598 	/* vrange filter */
3599 	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3600 	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3601 		return 0;
3602 	}
3603 
3604 	/* stripes filter */
3605 	if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3606 	    chunk_stripes_range_filter(leaf, chunk, bargs)) {
3607 		return 0;
3608 	}
3609 
3610 	/* soft profile changing mode */
3611 	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3612 	    chunk_soft_convert_filter(chunk_type, bargs)) {
3613 		return 0;
3614 	}
3615 
3616 	/*
3617 	 * limited by count, must be the last filter
3618 	 */
3619 	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3620 		if (bargs->limit == 0)
3621 			return 0;
3622 		else
3623 			bargs->limit--;
3624 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3625 		/*
3626 		 * Same logic as the 'limit' filter; the minimum cannot be
3627 		 * determined here because we do not have the global information
3628 		 * about the count of all chunks that satisfy the filters.
3629 		 */
3630 		if (bargs->limit_max == 0)
3631 			return 0;
3632 		else
3633 			bargs->limit_max--;
3634 	}
3635 
3636 	return 1;
3637 }
3638 
3639 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3640 {
3641 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3642 	struct btrfs_root *chunk_root = fs_info->chunk_root;
3643 	u64 chunk_type;
3644 	struct btrfs_chunk *chunk;
3645 	struct btrfs_path *path = NULL;
3646 	struct btrfs_key key;
3647 	struct btrfs_key found_key;
3648 	struct extent_buffer *leaf;
3649 	int slot;
3650 	int ret;
3651 	int enospc_errors = 0;
3652 	bool counting = true;
3653 	/* The single value limit and min/max limits use the same bytes in the */
3654 	u64 limit_data = bctl->data.limit;
3655 	u64 limit_meta = bctl->meta.limit;
3656 	u64 limit_sys = bctl->sys.limit;
3657 	u32 count_data = 0;
3658 	u32 count_meta = 0;
3659 	u32 count_sys = 0;
3660 	int chunk_reserved = 0;
3661 
3662 	path = btrfs_alloc_path();
3663 	if (!path) {
3664 		ret = -ENOMEM;
3665 		goto error;
3666 	}
3667 
3668 	/* zero out stat counters */
3669 	spin_lock(&fs_info->balance_lock);
3670 	memset(&bctl->stat, 0, sizeof(bctl->stat));
3671 	spin_unlock(&fs_info->balance_lock);
3672 again:
3673 	if (!counting) {
3674 		/*
3675 		 * The single value limit and min/max limits use the same bytes
3676 		 * in the
3677 		 */
3678 		bctl->data.limit = limit_data;
3679 		bctl->meta.limit = limit_meta;
3680 		bctl->sys.limit = limit_sys;
3681 	}
3682 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3683 	key.offset = (u64)-1;
3684 	key.type = BTRFS_CHUNK_ITEM_KEY;
3685 
3686 	while (1) {
3687 		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3688 		    atomic_read(&fs_info->balance_cancel_req)) {
3689 			ret = -ECANCELED;
3690 			goto error;
3691 		}
3692 
3693 		mutex_lock(&fs_info->delete_unused_bgs_mutex);
3694 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3695 		if (ret < 0) {
3696 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3697 			goto error;
3698 		}
3699 
3700 		/*
3701 		 * this shouldn't happen, it means the last relocate
3702 		 * failed
3703 		 */
3704 		if (ret == 0)
3705 			BUG(); /* FIXME break ? */
3706 
3707 		ret = btrfs_previous_item(chunk_root, path, 0,
3708 					  BTRFS_CHUNK_ITEM_KEY);
3709 		if (ret) {
3710 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3711 			ret = 0;
3712 			break;
3713 		}
3714 
3715 		leaf = path->nodes[0];
3716 		slot = path->slots[0];
3717 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3718 
3719 		if (found_key.objectid != key.objectid) {
3720 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3721 			break;
3722 		}
3723 
3724 		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3725 		chunk_type = btrfs_chunk_type(leaf, chunk);
3726 
3727 		if (!counting) {
3728 			spin_lock(&fs_info->balance_lock);
3729 			bctl->stat.considered++;
3730 			spin_unlock(&fs_info->balance_lock);
3731 		}
3732 
3733 		ret = should_balance_chunk(leaf, chunk, found_key.offset);
3734 
3735 		btrfs_release_path(path);
3736 		if (!ret) {
3737 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3738 			goto loop;
3739 		}
3740 
3741 		if (counting) {
3742 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3743 			spin_lock(&fs_info->balance_lock);
3744 			bctl->stat.expected++;
3745 			spin_unlock(&fs_info->balance_lock);
3746 
3747 			if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3748 				count_data++;
3749 			else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3750 				count_sys++;
3751 			else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3752 				count_meta++;
3753 
3754 			goto loop;
3755 		}
3756 
3757 		/*
3758 		 * Apply limit_min filter, no need to check if the LIMITS
3759 		 * filter is used, limit_min is 0 by default
3760 		 */
3761 		if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3762 					count_data < bctl->data.limit_min)
3763 				|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3764 					count_meta < bctl->meta.limit_min)
3765 				|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3766 					count_sys < bctl->sys.limit_min)) {
3767 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3768 			goto loop;
3769 		}
3770 
3771 		if (!chunk_reserved) {
3772 			/*
3773 			 * We may be relocating the only data chunk we have,
3774 			 * which could potentially end up with losing data's
3775 			 * raid profile, so lets allocate an empty one in
3776 			 * advance.
3777 			 */
3778 			ret = btrfs_may_alloc_data_chunk(fs_info,
3779 							 found_key.offset);
3780 			if (ret < 0) {
3781 				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3782 				goto error;
3783 			} else if (ret == 1) {
3784 				chunk_reserved = 1;
3785 			}
3786 		}
3787 
3788 		ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3789 		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3790 		if (ret == -ENOSPC) {
3791 			enospc_errors++;
3792 		} else if (ret == -ETXTBSY) {
3793 			btrfs_info(fs_info,
3794 	   "skipping relocation of block group %llu due to active swapfile",
3795 				   found_key.offset);
3796 			ret = 0;
3797 		} else if (ret) {
3798 			goto error;
3799 		} else {
3800 			spin_lock(&fs_info->balance_lock);
3801 			bctl->stat.completed++;
3802 			spin_unlock(&fs_info->balance_lock);
3803 		}
3804 loop:
3805 		if (found_key.offset == 0)
3806 			break;
3807 		key.offset = found_key.offset - 1;
3808 	}
3809 
3810 	if (counting) {
3811 		btrfs_release_path(path);
3812 		counting = false;
3813 		goto again;
3814 	}
3815 error:
3816 	btrfs_free_path(path);
3817 	if (enospc_errors) {
3818 		btrfs_info(fs_info, "%d enospc errors during balance",
3819 			   enospc_errors);
3820 		if (!ret)
3821 			ret = -ENOSPC;
3822 	}
3823 
3824 	return ret;
3825 }
3826 
3827 /**
3828  * alloc_profile_is_valid - see if a given profile is valid and reduced
3829  * @flags: profile to validate
3830  * @extended: if true @flags is treated as an extended profile
3831  */
3832 static int alloc_profile_is_valid(u64 flags, int extended)
3833 {
3834 	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3835 			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
3836 
3837 	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3838 
3839 	/* 1) check that all other bits are zeroed */
3840 	if (flags & ~mask)
3841 		return 0;
3842 
3843 	/* 2) see if profile is reduced */
3844 	if (flags == 0)
3845 		return !extended; /* "0" is valid for usual profiles */
3846 
3847 	/* true if exactly one bit set */
3848 	return is_power_of_2(flags);
3849 }
3850 
3851 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3852 {
3853 	/* cancel requested || normal exit path */
3854 	return atomic_read(&fs_info->balance_cancel_req) ||
3855 		(atomic_read(&fs_info->balance_pause_req) == 0 &&
3856 		 atomic_read(&fs_info->balance_cancel_req) == 0);
3857 }
3858 
3859 /* Non-zero return value signifies invalidity */
3860 static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg,
3861 		u64 allowed)
3862 {
3863 	return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3864 		(!alloc_profile_is_valid(bctl_arg->target, 1) ||
3865 		 (bctl_arg->target & ~allowed)));
3866 }
3867 
3868 /*
3869  * Fill @buf with textual description of balance filter flags @bargs, up to
3870  * @size_buf including the terminating null. The output may be trimmed if it
3871  * does not fit into the provided buffer.
3872  */
3873 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
3874 				 u32 size_buf)
3875 {
3876 	int ret;
3877 	u32 size_bp = size_buf;
3878 	char *bp = buf;
3879 	u64 flags = bargs->flags;
3880 	char tmp_buf[128] = {'\0'};
3881 
3882 	if (!flags)
3883 		return;
3884 
3885 #define CHECK_APPEND_NOARG(a)						\
3886 	do {								\
3887 		ret = snprintf(bp, size_bp, (a));			\
3888 		if (ret < 0 || ret >= size_bp)				\
3889 			goto out_overflow;				\
3890 		size_bp -= ret;						\
3891 		bp += ret;						\
3892 	} while (0)
3893 
3894 #define CHECK_APPEND_1ARG(a, v1)					\
3895 	do {								\
3896 		ret = snprintf(bp, size_bp, (a), (v1));			\
3897 		if (ret < 0 || ret >= size_bp)				\
3898 			goto out_overflow;				\
3899 		size_bp -= ret;						\
3900 		bp += ret;						\
3901 	} while (0)
3902 
3903 #define CHECK_APPEND_2ARG(a, v1, v2)					\
3904 	do {								\
3905 		ret = snprintf(bp, size_bp, (a), (v1), (v2));		\
3906 		if (ret < 0 || ret >= size_bp)				\
3907 			goto out_overflow;				\
3908 		size_bp -= ret;						\
3909 		bp += ret;						\
3910 	} while (0)
3911 
3912 	if (flags & BTRFS_BALANCE_ARGS_CONVERT)
3913 		CHECK_APPEND_1ARG("convert=%s,",
3914 				  btrfs_bg_type_to_raid_name(bargs->target));
3915 
3916 	if (flags & BTRFS_BALANCE_ARGS_SOFT)
3917 		CHECK_APPEND_NOARG("soft,");
3918 
3919 	if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
3920 		btrfs_describe_block_groups(bargs->profiles, tmp_buf,
3921 					    sizeof(tmp_buf));
3922 		CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
3923 	}
3924 
3925 	if (flags & BTRFS_BALANCE_ARGS_USAGE)
3926 		CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
3927 
3928 	if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
3929 		CHECK_APPEND_2ARG("usage=%u..%u,",
3930 				  bargs->usage_min, bargs->usage_max);
3931 
3932 	if (flags & BTRFS_BALANCE_ARGS_DEVID)
3933 		CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
3934 
3935 	if (flags & BTRFS_BALANCE_ARGS_DRANGE)
3936 		CHECK_APPEND_2ARG("drange=%llu..%llu,",
3937 				  bargs->pstart, bargs->pend);
3938 
3939 	if (flags & BTRFS_BALANCE_ARGS_VRANGE)
3940 		CHECK_APPEND_2ARG("vrange=%llu..%llu,",
3941 				  bargs->vstart, bargs->vend);
3942 
3943 	if (flags & BTRFS_BALANCE_ARGS_LIMIT)
3944 		CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
3945 
3946 	if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
3947 		CHECK_APPEND_2ARG("limit=%u..%u,",
3948 				bargs->limit_min, bargs->limit_max);
3949 
3950 	if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
3951 		CHECK_APPEND_2ARG("stripes=%u..%u,",
3952 				  bargs->stripes_min, bargs->stripes_max);
3953 
3954 #undef CHECK_APPEND_2ARG
3955 #undef CHECK_APPEND_1ARG
3956 #undef CHECK_APPEND_NOARG
3957 
3958 out_overflow:
3959 
3960 	if (size_bp < size_buf)
3961 		buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
3962 	else
3963 		buf[0] = '\0';
3964 }
3965 
3966 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
3967 {
3968 	u32 size_buf = 1024;
3969 	char tmp_buf[192] = {'\0'};
3970 	char *buf;
3971 	char *bp;
3972 	u32 size_bp = size_buf;
3973 	int ret;
3974 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3975 
3976 	buf = kzalloc(size_buf, GFP_KERNEL);
3977 	if (!buf)
3978 		return;
3979 
3980 	bp = buf;
3981 
3982 #define CHECK_APPEND_1ARG(a, v1)					\
3983 	do {								\
3984 		ret = snprintf(bp, size_bp, (a), (v1));			\
3985 		if (ret < 0 || ret >= size_bp)				\
3986 			goto out_overflow;				\
3987 		size_bp -= ret;						\
3988 		bp += ret;						\
3989 	} while (0)
3990 
3991 	if (bctl->flags & BTRFS_BALANCE_FORCE)
3992 		CHECK_APPEND_1ARG("%s", "-f ");
3993 
3994 	if (bctl->flags & BTRFS_BALANCE_DATA) {
3995 		describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
3996 		CHECK_APPEND_1ARG("-d%s ", tmp_buf);
3997 	}
3998 
3999 	if (bctl->flags & BTRFS_BALANCE_METADATA) {
4000 		describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
4001 		CHECK_APPEND_1ARG("-m%s ", tmp_buf);
4002 	}
4003 
4004 	if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
4005 		describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
4006 		CHECK_APPEND_1ARG("-s%s ", tmp_buf);
4007 	}
4008 
4009 #undef CHECK_APPEND_1ARG
4010 
4011 out_overflow:
4012 
4013 	if (size_bp < size_buf)
4014 		buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
4015 	btrfs_info(fs_info, "balance: %s %s",
4016 		   (bctl->flags & BTRFS_BALANCE_RESUME) ?
4017 		   "resume" : "start", buf);
4018 
4019 	kfree(buf);
4020 }
4021 
4022 /*
4023  * Should be called with balance mutexe held
4024  */
4025 int btrfs_balance(struct btrfs_fs_info *fs_info,
4026 		  struct btrfs_balance_control *bctl,
4027 		  struct btrfs_ioctl_balance_args *bargs)
4028 {
4029 	u64 meta_target, data_target;
4030 	u64 allowed;
4031 	int mixed = 0;
4032 	int ret;
4033 	u64 num_devices;
4034 	unsigned seq;
4035 	bool reducing_integrity;
4036 	int i;
4037 
4038 	if (btrfs_fs_closing(fs_info) ||
4039 	    atomic_read(&fs_info->balance_pause_req) ||
4040 	    atomic_read(&fs_info->balance_cancel_req)) {
4041 		ret = -EINVAL;
4042 		goto out;
4043 	}
4044 
4045 	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4046 	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4047 		mixed = 1;
4048 
4049 	/*
4050 	 * In case of mixed groups both data and meta should be picked,
4051 	 * and identical options should be given for both of them.
4052 	 */
4053 	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4054 	if (mixed && (bctl->flags & allowed)) {
4055 		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4056 		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4057 		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4058 			btrfs_err(fs_info,
4059 	  "balance: mixed groups data and metadata options must be the same");
4060 			ret = -EINVAL;
4061 			goto out;
4062 		}
4063 	}
4064 
4065 	num_devices = btrfs_num_devices(fs_info);
4066 
4067 	/*
4068 	 * SINGLE profile on-disk has no profile bit, but in-memory we have a
4069 	 * special bit for it, to make it easier to distinguish.  Thus we need
4070 	 * to set it manually, or balance would refuse the profile.
4071 	 */
4072 	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4073 	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4074 		if (num_devices >= btrfs_raid_array[i].devs_min)
4075 			allowed |= btrfs_raid_array[i].bg_flag;
4076 
4077 	if (validate_convert_profile(&bctl->data, allowed)) {
4078 		btrfs_err(fs_info,
4079 			  "balance: invalid convert data profile %s",
4080 			  btrfs_bg_type_to_raid_name(bctl->data.target));
4081 		ret = -EINVAL;
4082 		goto out;
4083 	}
4084 	if (validate_convert_profile(&bctl->meta, allowed)) {
4085 		btrfs_err(fs_info,
4086 			  "balance: invalid convert metadata profile %s",
4087 			  btrfs_bg_type_to_raid_name(bctl->meta.target));
4088 		ret = -EINVAL;
4089 		goto out;
4090 	}
4091 	if (validate_convert_profile(&bctl->sys, allowed)) {
4092 		btrfs_err(fs_info,
4093 			  "balance: invalid convert system profile %s",
4094 			  btrfs_bg_type_to_raid_name(bctl->sys.target));
4095 		ret = -EINVAL;
4096 		goto out;
4097 	}
4098 
4099 	/*
4100 	 * Allow to reduce metadata or system integrity only if force set for
4101 	 * profiles with redundancy (copies, parity)
4102 	 */
4103 	allowed = 0;
4104 	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
4105 		if (btrfs_raid_array[i].ncopies >= 2 ||
4106 		    btrfs_raid_array[i].tolerated_failures >= 1)
4107 			allowed |= btrfs_raid_array[i].bg_flag;
4108 	}
4109 	do {
4110 		seq = read_seqbegin(&fs_info->profiles_lock);
4111 
4112 		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4113 		     (fs_info->avail_system_alloc_bits & allowed) &&
4114 		     !(bctl->sys.target & allowed)) ||
4115 		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4116 		     (fs_info->avail_metadata_alloc_bits & allowed) &&
4117 		     !(bctl->meta.target & allowed)))
4118 			reducing_integrity = true;
4119 		else
4120 			reducing_integrity = false;
4121 
4122 		/* if we're not converting, the target field is uninitialized */
4123 		meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4124 			bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4125 		data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4126 			bctl->data.target : fs_info->avail_data_alloc_bits;
4127 	} while (read_seqretry(&fs_info->profiles_lock, seq));
4128 
4129 	if (reducing_integrity) {
4130 		if (bctl->flags & BTRFS_BALANCE_FORCE) {
4131 			btrfs_info(fs_info,
4132 				   "balance: force reducing metadata integrity");
4133 		} else {
4134 			btrfs_err(fs_info,
4135 	  "balance: reduces metadata integrity, use --force if you want this");
4136 			ret = -EINVAL;
4137 			goto out;
4138 		}
4139 	}
4140 
4141 	if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4142 		btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4143 		btrfs_warn(fs_info,
4144 	"balance: metadata profile %s has lower redundancy than data profile %s",
4145 				btrfs_bg_type_to_raid_name(meta_target),
4146 				btrfs_bg_type_to_raid_name(data_target));
4147 	}
4148 
4149 	if (fs_info->send_in_progress) {
4150 		btrfs_warn_rl(fs_info,
4151 "cannot run balance while send operations are in progress (%d in progress)",
4152 			      fs_info->send_in_progress);
4153 		ret = -EAGAIN;
4154 		goto out;
4155 	}
4156 
4157 	ret = insert_balance_item(fs_info, bctl);
4158 	if (ret && ret != -EEXIST)
4159 		goto out;
4160 
4161 	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4162 		BUG_ON(ret == -EEXIST);
4163 		BUG_ON(fs_info->balance_ctl);
4164 		spin_lock(&fs_info->balance_lock);
4165 		fs_info->balance_ctl = bctl;
4166 		spin_unlock(&fs_info->balance_lock);
4167 	} else {
4168 		BUG_ON(ret != -EEXIST);
4169 		spin_lock(&fs_info->balance_lock);
4170 		update_balance_args(bctl);
4171 		spin_unlock(&fs_info->balance_lock);
4172 	}
4173 
4174 	ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4175 	set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4176 	describe_balance_start_or_resume(fs_info);
4177 	mutex_unlock(&fs_info->balance_mutex);
4178 
4179 	ret = __btrfs_balance(fs_info);
4180 
4181 	mutex_lock(&fs_info->balance_mutex);
4182 	if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
4183 		btrfs_info(fs_info, "balance: paused");
4184 	else if (ret == -ECANCELED && atomic_read(&fs_info->balance_cancel_req))
4185 		btrfs_info(fs_info, "balance: canceled");
4186 	else
4187 		btrfs_info(fs_info, "balance: ended with status: %d", ret);
4188 
4189 	clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4190 
4191 	if (bargs) {
4192 		memset(bargs, 0, sizeof(*bargs));
4193 		btrfs_update_ioctl_balance_args(fs_info, bargs);
4194 	}
4195 
4196 	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
4197 	    balance_need_close(fs_info)) {
4198 		reset_balance_state(fs_info);
4199 		clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4200 	}
4201 
4202 	wake_up(&fs_info->balance_wait_q);
4203 
4204 	return ret;
4205 out:
4206 	if (bctl->flags & BTRFS_BALANCE_RESUME)
4207 		reset_balance_state(fs_info);
4208 	else
4209 		kfree(bctl);
4210 	clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4211 
4212 	return ret;
4213 }
4214 
4215 static int balance_kthread(void *data)
4216 {
4217 	struct btrfs_fs_info *fs_info = data;
4218 	int ret = 0;
4219 
4220 	mutex_lock(&fs_info->balance_mutex);
4221 	if (fs_info->balance_ctl)
4222 		ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4223 	mutex_unlock(&fs_info->balance_mutex);
4224 
4225 	return ret;
4226 }
4227 
4228 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4229 {
4230 	struct task_struct *tsk;
4231 
4232 	mutex_lock(&fs_info->balance_mutex);
4233 	if (!fs_info->balance_ctl) {
4234 		mutex_unlock(&fs_info->balance_mutex);
4235 		return 0;
4236 	}
4237 	mutex_unlock(&fs_info->balance_mutex);
4238 
4239 	if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4240 		btrfs_info(fs_info, "balance: resume skipped");
4241 		return 0;
4242 	}
4243 
4244 	/*
4245 	 * A ro->rw remount sequence should continue with the paused balance
4246 	 * regardless of who pauses it, system or the user as of now, so set
4247 	 * the resume flag.
4248 	 */
4249 	spin_lock(&fs_info->balance_lock);
4250 	fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4251 	spin_unlock(&fs_info->balance_lock);
4252 
4253 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4254 	return PTR_ERR_OR_ZERO(tsk);
4255 }
4256 
4257 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4258 {
4259 	struct btrfs_balance_control *bctl;
4260 	struct btrfs_balance_item *item;
4261 	struct btrfs_disk_balance_args disk_bargs;
4262 	struct btrfs_path *path;
4263 	struct extent_buffer *leaf;
4264 	struct btrfs_key key;
4265 	int ret;
4266 
4267 	path = btrfs_alloc_path();
4268 	if (!path)
4269 		return -ENOMEM;
4270 
4271 	key.objectid = BTRFS_BALANCE_OBJECTID;
4272 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
4273 	key.offset = 0;
4274 
4275 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4276 	if (ret < 0)
4277 		goto out;
4278 	if (ret > 0) { /* ret = -ENOENT; */
4279 		ret = 0;
4280 		goto out;
4281 	}
4282 
4283 	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4284 	if (!bctl) {
4285 		ret = -ENOMEM;
4286 		goto out;
4287 	}
4288 
4289 	leaf = path->nodes[0];
4290 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4291 
4292 	bctl->flags = btrfs_balance_flags(leaf, item);
4293 	bctl->flags |= BTRFS_BALANCE_RESUME;
4294 
4295 	btrfs_balance_data(leaf, item, &disk_bargs);
4296 	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4297 	btrfs_balance_meta(leaf, item, &disk_bargs);
4298 	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4299 	btrfs_balance_sys(leaf, item, &disk_bargs);
4300 	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4301 
4302 	/*
4303 	 * This should never happen, as the paused balance state is recovered
4304 	 * during mount without any chance of other exclusive ops to collide.
4305 	 *
4306 	 * This gives the exclusive op status to balance and keeps in paused
4307 	 * state until user intervention (cancel or umount). If the ownership
4308 	 * cannot be assigned, show a message but do not fail. The balance
4309 	 * is in a paused state and must have fs_info::balance_ctl properly
4310 	 * set up.
4311 	 */
4312 	if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags))
4313 		btrfs_warn(fs_info,
4314 	"balance: cannot set exclusive op status, resume manually");
4315 
4316 	mutex_lock(&fs_info->balance_mutex);
4317 	BUG_ON(fs_info->balance_ctl);
4318 	spin_lock(&fs_info->balance_lock);
4319 	fs_info->balance_ctl = bctl;
4320 	spin_unlock(&fs_info->balance_lock);
4321 	mutex_unlock(&fs_info->balance_mutex);
4322 out:
4323 	btrfs_free_path(path);
4324 	return ret;
4325 }
4326 
4327 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4328 {
4329 	int ret = 0;
4330 
4331 	mutex_lock(&fs_info->balance_mutex);
4332 	if (!fs_info->balance_ctl) {
4333 		mutex_unlock(&fs_info->balance_mutex);
4334 		return -ENOTCONN;
4335 	}
4336 
4337 	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4338 		atomic_inc(&fs_info->balance_pause_req);
4339 		mutex_unlock(&fs_info->balance_mutex);
4340 
4341 		wait_event(fs_info->balance_wait_q,
4342 			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4343 
4344 		mutex_lock(&fs_info->balance_mutex);
4345 		/* we are good with balance_ctl ripped off from under us */
4346 		BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4347 		atomic_dec(&fs_info->balance_pause_req);
4348 	} else {
4349 		ret = -ENOTCONN;
4350 	}
4351 
4352 	mutex_unlock(&fs_info->balance_mutex);
4353 	return ret;
4354 }
4355 
4356 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4357 {
4358 	mutex_lock(&fs_info->balance_mutex);
4359 	if (!fs_info->balance_ctl) {
4360 		mutex_unlock(&fs_info->balance_mutex);
4361 		return -ENOTCONN;
4362 	}
4363 
4364 	/*
4365 	 * A paused balance with the item stored on disk can be resumed at
4366 	 * mount time if the mount is read-write. Otherwise it's still paused
4367 	 * and we must not allow cancelling as it deletes the item.
4368 	 */
4369 	if (sb_rdonly(fs_info->sb)) {
4370 		mutex_unlock(&fs_info->balance_mutex);
4371 		return -EROFS;
4372 	}
4373 
4374 	atomic_inc(&fs_info->balance_cancel_req);
4375 	/*
4376 	 * if we are running just wait and return, balance item is
4377 	 * deleted in btrfs_balance in this case
4378 	 */
4379 	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4380 		mutex_unlock(&fs_info->balance_mutex);
4381 		wait_event(fs_info->balance_wait_q,
4382 			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4383 		mutex_lock(&fs_info->balance_mutex);
4384 	} else {
4385 		mutex_unlock(&fs_info->balance_mutex);
4386 		/*
4387 		 * Lock released to allow other waiters to continue, we'll
4388 		 * reexamine the status again.
4389 		 */
4390 		mutex_lock(&fs_info->balance_mutex);
4391 
4392 		if (fs_info->balance_ctl) {
4393 			reset_balance_state(fs_info);
4394 			clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4395 			btrfs_info(fs_info, "balance: canceled");
4396 		}
4397 	}
4398 
4399 	BUG_ON(fs_info->balance_ctl ||
4400 		test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4401 	atomic_dec(&fs_info->balance_cancel_req);
4402 	mutex_unlock(&fs_info->balance_mutex);
4403 	return 0;
4404 }
4405 
4406 static int btrfs_uuid_scan_kthread(void *data)
4407 {
4408 	struct btrfs_fs_info *fs_info = data;
4409 	struct btrfs_root *root = fs_info->tree_root;
4410 	struct btrfs_key key;
4411 	struct btrfs_path *path = NULL;
4412 	int ret = 0;
4413 	struct extent_buffer *eb;
4414 	int slot;
4415 	struct btrfs_root_item root_item;
4416 	u32 item_size;
4417 	struct btrfs_trans_handle *trans = NULL;
4418 
4419 	path = btrfs_alloc_path();
4420 	if (!path) {
4421 		ret = -ENOMEM;
4422 		goto out;
4423 	}
4424 
4425 	key.objectid = 0;
4426 	key.type = BTRFS_ROOT_ITEM_KEY;
4427 	key.offset = 0;
4428 
4429 	while (1) {
4430 		ret = btrfs_search_forward(root, &key, path,
4431 				BTRFS_OLDEST_GENERATION);
4432 		if (ret) {
4433 			if (ret > 0)
4434 				ret = 0;
4435 			break;
4436 		}
4437 
4438 		if (key.type != BTRFS_ROOT_ITEM_KEY ||
4439 		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4440 		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4441 		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
4442 			goto skip;
4443 
4444 		eb = path->nodes[0];
4445 		slot = path->slots[0];
4446 		item_size = btrfs_item_size_nr(eb, slot);
4447 		if (item_size < sizeof(root_item))
4448 			goto skip;
4449 
4450 		read_extent_buffer(eb, &root_item,
4451 				   btrfs_item_ptr_offset(eb, slot),
4452 				   (int)sizeof(root_item));
4453 		if (btrfs_root_refs(&root_item) == 0)
4454 			goto skip;
4455 
4456 		if (!btrfs_is_empty_uuid(root_item.uuid) ||
4457 		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
4458 			if (trans)
4459 				goto update_tree;
4460 
4461 			btrfs_release_path(path);
4462 			/*
4463 			 * 1 - subvol uuid item
4464 			 * 1 - received_subvol uuid item
4465 			 */
4466 			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4467 			if (IS_ERR(trans)) {
4468 				ret = PTR_ERR(trans);
4469 				break;
4470 			}
4471 			continue;
4472 		} else {
4473 			goto skip;
4474 		}
4475 update_tree:
4476 		if (!btrfs_is_empty_uuid(root_item.uuid)) {
4477 			ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4478 						  BTRFS_UUID_KEY_SUBVOL,
4479 						  key.objectid);
4480 			if (ret < 0) {
4481 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4482 					ret);
4483 				break;
4484 			}
4485 		}
4486 
4487 		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4488 			ret = btrfs_uuid_tree_add(trans,
4489 						  root_item.received_uuid,
4490 						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4491 						  key.objectid);
4492 			if (ret < 0) {
4493 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4494 					ret);
4495 				break;
4496 			}
4497 		}
4498 
4499 skip:
4500 		if (trans) {
4501 			ret = btrfs_end_transaction(trans);
4502 			trans = NULL;
4503 			if (ret)
4504 				break;
4505 		}
4506 
4507 		btrfs_release_path(path);
4508 		if (key.offset < (u64)-1) {
4509 			key.offset++;
4510 		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4511 			key.offset = 0;
4512 			key.type = BTRFS_ROOT_ITEM_KEY;
4513 		} else if (key.objectid < (u64)-1) {
4514 			key.offset = 0;
4515 			key.type = BTRFS_ROOT_ITEM_KEY;
4516 			key.objectid++;
4517 		} else {
4518 			break;
4519 		}
4520 		cond_resched();
4521 	}
4522 
4523 out:
4524 	btrfs_free_path(path);
4525 	if (trans && !IS_ERR(trans))
4526 		btrfs_end_transaction(trans);
4527 	if (ret)
4528 		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4529 	else
4530 		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4531 	up(&fs_info->uuid_tree_rescan_sem);
4532 	return 0;
4533 }
4534 
4535 /*
4536  * Callback for btrfs_uuid_tree_iterate().
4537  * returns:
4538  * 0	check succeeded, the entry is not outdated.
4539  * < 0	if an error occurred.
4540  * > 0	if the check failed, which means the caller shall remove the entry.
4541  */
4542 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
4543 				       u8 *uuid, u8 type, u64 subid)
4544 {
4545 	struct btrfs_key key;
4546 	int ret = 0;
4547 	struct btrfs_root *subvol_root;
4548 
4549 	if (type != BTRFS_UUID_KEY_SUBVOL &&
4550 	    type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
4551 		goto out;
4552 
4553 	key.objectid = subid;
4554 	key.type = BTRFS_ROOT_ITEM_KEY;
4555 	key.offset = (u64)-1;
4556 	subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
4557 	if (IS_ERR(subvol_root)) {
4558 		ret = PTR_ERR(subvol_root);
4559 		if (ret == -ENOENT)
4560 			ret = 1;
4561 		goto out;
4562 	}
4563 
4564 	switch (type) {
4565 	case BTRFS_UUID_KEY_SUBVOL:
4566 		if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
4567 			ret = 1;
4568 		break;
4569 	case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
4570 		if (memcmp(uuid, subvol_root->root_item.received_uuid,
4571 			   BTRFS_UUID_SIZE))
4572 			ret = 1;
4573 		break;
4574 	}
4575 
4576 out:
4577 	return ret;
4578 }
4579 
4580 static int btrfs_uuid_rescan_kthread(void *data)
4581 {
4582 	struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
4583 	int ret;
4584 
4585 	/*
4586 	 * 1st step is to iterate through the existing UUID tree and
4587 	 * to delete all entries that contain outdated data.
4588 	 * 2nd step is to add all missing entries to the UUID tree.
4589 	 */
4590 	ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
4591 	if (ret < 0) {
4592 		btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
4593 		up(&fs_info->uuid_tree_rescan_sem);
4594 		return ret;
4595 	}
4596 	return btrfs_uuid_scan_kthread(data);
4597 }
4598 
4599 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4600 {
4601 	struct btrfs_trans_handle *trans;
4602 	struct btrfs_root *tree_root = fs_info->tree_root;
4603 	struct btrfs_root *uuid_root;
4604 	struct task_struct *task;
4605 	int ret;
4606 
4607 	/*
4608 	 * 1 - root node
4609 	 * 1 - root item
4610 	 */
4611 	trans = btrfs_start_transaction(tree_root, 2);
4612 	if (IS_ERR(trans))
4613 		return PTR_ERR(trans);
4614 
4615 	uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4616 	if (IS_ERR(uuid_root)) {
4617 		ret = PTR_ERR(uuid_root);
4618 		btrfs_abort_transaction(trans, ret);
4619 		btrfs_end_transaction(trans);
4620 		return ret;
4621 	}
4622 
4623 	fs_info->uuid_root = uuid_root;
4624 
4625 	ret = btrfs_commit_transaction(trans);
4626 	if (ret)
4627 		return ret;
4628 
4629 	down(&fs_info->uuid_tree_rescan_sem);
4630 	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4631 	if (IS_ERR(task)) {
4632 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4633 		btrfs_warn(fs_info, "failed to start uuid_scan task");
4634 		up(&fs_info->uuid_tree_rescan_sem);
4635 		return PTR_ERR(task);
4636 	}
4637 
4638 	return 0;
4639 }
4640 
4641 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
4642 {
4643 	struct task_struct *task;
4644 
4645 	down(&fs_info->uuid_tree_rescan_sem);
4646 	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
4647 	if (IS_ERR(task)) {
4648 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4649 		btrfs_warn(fs_info, "failed to start uuid_rescan task");
4650 		up(&fs_info->uuid_tree_rescan_sem);
4651 		return PTR_ERR(task);
4652 	}
4653 
4654 	return 0;
4655 }
4656 
4657 /*
4658  * shrinking a device means finding all of the device extents past
4659  * the new size, and then following the back refs to the chunks.
4660  * The chunk relocation code actually frees the device extent
4661  */
4662 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4663 {
4664 	struct btrfs_fs_info *fs_info = device->fs_info;
4665 	struct btrfs_root *root = fs_info->dev_root;
4666 	struct btrfs_trans_handle *trans;
4667 	struct btrfs_dev_extent *dev_extent = NULL;
4668 	struct btrfs_path *path;
4669 	u64 length;
4670 	u64 chunk_offset;
4671 	int ret;
4672 	int slot;
4673 	int failed = 0;
4674 	bool retried = false;
4675 	struct extent_buffer *l;
4676 	struct btrfs_key key;
4677 	struct btrfs_super_block *super_copy = fs_info->super_copy;
4678 	u64 old_total = btrfs_super_total_bytes(super_copy);
4679 	u64 old_size = btrfs_device_get_total_bytes(device);
4680 	u64 diff;
4681 	u64 start;
4682 
4683 	new_size = round_down(new_size, fs_info->sectorsize);
4684 	start = new_size;
4685 	diff = round_down(old_size - new_size, fs_info->sectorsize);
4686 
4687 	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4688 		return -EINVAL;
4689 
4690 	path = btrfs_alloc_path();
4691 	if (!path)
4692 		return -ENOMEM;
4693 
4694 	path->reada = READA_BACK;
4695 
4696 	trans = btrfs_start_transaction(root, 0);
4697 	if (IS_ERR(trans)) {
4698 		btrfs_free_path(path);
4699 		return PTR_ERR(trans);
4700 	}
4701 
4702 	mutex_lock(&fs_info->chunk_mutex);
4703 
4704 	btrfs_device_set_total_bytes(device, new_size);
4705 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4706 		device->fs_devices->total_rw_bytes -= diff;
4707 		atomic64_sub(diff, &fs_info->free_chunk_space);
4708 	}
4709 
4710 	/*
4711 	 * Once the device's size has been set to the new size, ensure all
4712 	 * in-memory chunks are synced to disk so that the loop below sees them
4713 	 * and relocates them accordingly.
4714 	 */
4715 	if (contains_pending_extent(device, &start, diff)) {
4716 		mutex_unlock(&fs_info->chunk_mutex);
4717 		ret = btrfs_commit_transaction(trans);
4718 		if (ret)
4719 			goto done;
4720 	} else {
4721 		mutex_unlock(&fs_info->chunk_mutex);
4722 		btrfs_end_transaction(trans);
4723 	}
4724 
4725 again:
4726 	key.objectid = device->devid;
4727 	key.offset = (u64)-1;
4728 	key.type = BTRFS_DEV_EXTENT_KEY;
4729 
4730 	do {
4731 		mutex_lock(&fs_info->delete_unused_bgs_mutex);
4732 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4733 		if (ret < 0) {
4734 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4735 			goto done;
4736 		}
4737 
4738 		ret = btrfs_previous_item(root, path, 0, key.type);
4739 		if (ret)
4740 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4741 		if (ret < 0)
4742 			goto done;
4743 		if (ret) {
4744 			ret = 0;
4745 			btrfs_release_path(path);
4746 			break;
4747 		}
4748 
4749 		l = path->nodes[0];
4750 		slot = path->slots[0];
4751 		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4752 
4753 		if (key.objectid != device->devid) {
4754 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4755 			btrfs_release_path(path);
4756 			break;
4757 		}
4758 
4759 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4760 		length = btrfs_dev_extent_length(l, dev_extent);
4761 
4762 		if (key.offset + length <= new_size) {
4763 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4764 			btrfs_release_path(path);
4765 			break;
4766 		}
4767 
4768 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4769 		btrfs_release_path(path);
4770 
4771 		/*
4772 		 * We may be relocating the only data chunk we have,
4773 		 * which could potentially end up with losing data's
4774 		 * raid profile, so lets allocate an empty one in
4775 		 * advance.
4776 		 */
4777 		ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4778 		if (ret < 0) {
4779 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4780 			goto done;
4781 		}
4782 
4783 		ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4784 		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4785 		if (ret == -ENOSPC) {
4786 			failed++;
4787 		} else if (ret) {
4788 			if (ret == -ETXTBSY) {
4789 				btrfs_warn(fs_info,
4790 		   "could not shrink block group %llu due to active swapfile",
4791 					   chunk_offset);
4792 			}
4793 			goto done;
4794 		}
4795 	} while (key.offset-- > 0);
4796 
4797 	if (failed && !retried) {
4798 		failed = 0;
4799 		retried = true;
4800 		goto again;
4801 	} else if (failed && retried) {
4802 		ret = -ENOSPC;
4803 		goto done;
4804 	}
4805 
4806 	/* Shrinking succeeded, else we would be at "done". */
4807 	trans = btrfs_start_transaction(root, 0);
4808 	if (IS_ERR(trans)) {
4809 		ret = PTR_ERR(trans);
4810 		goto done;
4811 	}
4812 
4813 	mutex_lock(&fs_info->chunk_mutex);
4814 	btrfs_device_set_disk_total_bytes(device, new_size);
4815 	if (list_empty(&device->post_commit_list))
4816 		list_add_tail(&device->post_commit_list,
4817 			      &trans->transaction->dev_update_list);
4818 
4819 	WARN_ON(diff > old_total);
4820 	btrfs_set_super_total_bytes(super_copy,
4821 			round_down(old_total - diff, fs_info->sectorsize));
4822 	mutex_unlock(&fs_info->chunk_mutex);
4823 
4824 	/* Now btrfs_update_device() will change the on-disk size. */
4825 	ret = btrfs_update_device(trans, device);
4826 	if (ret < 0) {
4827 		btrfs_abort_transaction(trans, ret);
4828 		btrfs_end_transaction(trans);
4829 	} else {
4830 		ret = btrfs_commit_transaction(trans);
4831 	}
4832 done:
4833 	btrfs_free_path(path);
4834 	if (ret) {
4835 		mutex_lock(&fs_info->chunk_mutex);
4836 		btrfs_device_set_total_bytes(device, old_size);
4837 		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4838 			device->fs_devices->total_rw_bytes += diff;
4839 		atomic64_add(diff, &fs_info->free_chunk_space);
4840 		mutex_unlock(&fs_info->chunk_mutex);
4841 	}
4842 	return ret;
4843 }
4844 
4845 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4846 			   struct btrfs_key *key,
4847 			   struct btrfs_chunk *chunk, int item_size)
4848 {
4849 	struct btrfs_super_block *super_copy = fs_info->super_copy;
4850 	struct btrfs_disk_key disk_key;
4851 	u32 array_size;
4852 	u8 *ptr;
4853 
4854 	mutex_lock(&fs_info->chunk_mutex);
4855 	array_size = btrfs_super_sys_array_size(super_copy);
4856 	if (array_size + item_size + sizeof(disk_key)
4857 			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4858 		mutex_unlock(&fs_info->chunk_mutex);
4859 		return -EFBIG;
4860 	}
4861 
4862 	ptr = super_copy->sys_chunk_array + array_size;
4863 	btrfs_cpu_key_to_disk(&disk_key, key);
4864 	memcpy(ptr, &disk_key, sizeof(disk_key));
4865 	ptr += sizeof(disk_key);
4866 	memcpy(ptr, chunk, item_size);
4867 	item_size += sizeof(disk_key);
4868 	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4869 	mutex_unlock(&fs_info->chunk_mutex);
4870 
4871 	return 0;
4872 }
4873 
4874 /*
4875  * sort the devices in descending order by max_avail, total_avail
4876  */
4877 static int btrfs_cmp_device_info(const void *a, const void *b)
4878 {
4879 	const struct btrfs_device_info *di_a = a;
4880 	const struct btrfs_device_info *di_b = b;
4881 
4882 	if (di_a->max_avail > di_b->max_avail)
4883 		return -1;
4884 	if (di_a->max_avail < di_b->max_avail)
4885 		return 1;
4886 	if (di_a->total_avail > di_b->total_avail)
4887 		return -1;
4888 	if (di_a->total_avail < di_b->total_avail)
4889 		return 1;
4890 	return 0;
4891 }
4892 
4893 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4894 {
4895 	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4896 		return;
4897 
4898 	btrfs_set_fs_incompat(info, RAID56);
4899 }
4900 
4901 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4902 			       u64 start, u64 type)
4903 {
4904 	struct btrfs_fs_info *info = trans->fs_info;
4905 	struct btrfs_fs_devices *fs_devices = info->fs_devices;
4906 	struct btrfs_device *device;
4907 	struct map_lookup *map = NULL;
4908 	struct extent_map_tree *em_tree;
4909 	struct extent_map *em;
4910 	struct btrfs_device_info *devices_info = NULL;
4911 	u64 total_avail;
4912 	int num_stripes;	/* total number of stripes to allocate */
4913 	int data_stripes;	/* number of stripes that count for
4914 				   block group size */
4915 	int sub_stripes;	/* sub_stripes info for map */
4916 	int dev_stripes;	/* stripes per dev */
4917 	int devs_max;		/* max devs to use */
4918 	int devs_min;		/* min devs needed */
4919 	int devs_increment;	/* ndevs has to be a multiple of this */
4920 	int ncopies;		/* how many copies to data has */
4921 	int nparity;		/* number of stripes worth of bytes to
4922 				   store parity information */
4923 	int ret;
4924 	u64 max_stripe_size;
4925 	u64 max_chunk_size;
4926 	u64 stripe_size;
4927 	u64 chunk_size;
4928 	int ndevs;
4929 	int i;
4930 	int j;
4931 	int index;
4932 
4933 	BUG_ON(!alloc_profile_is_valid(type, 0));
4934 
4935 	if (list_empty(&fs_devices->alloc_list)) {
4936 		if (btrfs_test_opt(info, ENOSPC_DEBUG))
4937 			btrfs_debug(info, "%s: no writable device", __func__);
4938 		return -ENOSPC;
4939 	}
4940 
4941 	index = btrfs_bg_flags_to_raid_index(type);
4942 
4943 	sub_stripes = btrfs_raid_array[index].sub_stripes;
4944 	dev_stripes = btrfs_raid_array[index].dev_stripes;
4945 	devs_max = btrfs_raid_array[index].devs_max;
4946 	if (!devs_max)
4947 		devs_max = BTRFS_MAX_DEVS(info);
4948 	devs_min = btrfs_raid_array[index].devs_min;
4949 	devs_increment = btrfs_raid_array[index].devs_increment;
4950 	ncopies = btrfs_raid_array[index].ncopies;
4951 	nparity = btrfs_raid_array[index].nparity;
4952 
4953 	if (type & BTRFS_BLOCK_GROUP_DATA) {
4954 		max_stripe_size = SZ_1G;
4955 		max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
4956 	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4957 		/* for larger filesystems, use larger metadata chunks */
4958 		if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
4959 			max_stripe_size = SZ_1G;
4960 		else
4961 			max_stripe_size = SZ_256M;
4962 		max_chunk_size = max_stripe_size;
4963 	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4964 		max_stripe_size = SZ_32M;
4965 		max_chunk_size = 2 * max_stripe_size;
4966 	} else {
4967 		btrfs_err(info, "invalid chunk type 0x%llx requested",
4968 		       type);
4969 		BUG();
4970 	}
4971 
4972 	/* We don't want a chunk larger than 10% of writable space */
4973 	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4974 			     max_chunk_size);
4975 
4976 	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
4977 			       GFP_NOFS);
4978 	if (!devices_info)
4979 		return -ENOMEM;
4980 
4981 	/*
4982 	 * in the first pass through the devices list, we gather information
4983 	 * about the available holes on each device.
4984 	 */
4985 	ndevs = 0;
4986 	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
4987 		u64 max_avail;
4988 		u64 dev_offset;
4989 
4990 		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4991 			WARN(1, KERN_ERR
4992 			       "BTRFS: read-only device in alloc_list\n");
4993 			continue;
4994 		}
4995 
4996 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
4997 					&device->dev_state) ||
4998 		    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4999 			continue;
5000 
5001 		if (device->total_bytes > device->bytes_used)
5002 			total_avail = device->total_bytes - device->bytes_used;
5003 		else
5004 			total_avail = 0;
5005 
5006 		/* If there is no space on this device, skip it. */
5007 		if (total_avail == 0)
5008 			continue;
5009 
5010 		ret = find_free_dev_extent(device,
5011 					   max_stripe_size * dev_stripes,
5012 					   &dev_offset, &max_avail);
5013 		if (ret && ret != -ENOSPC)
5014 			goto error;
5015 
5016 		if (ret == 0)
5017 			max_avail = max_stripe_size * dev_stripes;
5018 
5019 		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes) {
5020 			if (btrfs_test_opt(info, ENOSPC_DEBUG))
5021 				btrfs_debug(info,
5022 			"%s: devid %llu has no free space, have=%llu want=%u",
5023 					    __func__, device->devid, max_avail,
5024 					    BTRFS_STRIPE_LEN * dev_stripes);
5025 			continue;
5026 		}
5027 
5028 		if (ndevs == fs_devices->rw_devices) {
5029 			WARN(1, "%s: found more than %llu devices\n",
5030 			     __func__, fs_devices->rw_devices);
5031 			break;
5032 		}
5033 		devices_info[ndevs].dev_offset = dev_offset;
5034 		devices_info[ndevs].max_avail = max_avail;
5035 		devices_info[ndevs].total_avail = total_avail;
5036 		devices_info[ndevs].dev = device;
5037 		++ndevs;
5038 	}
5039 
5040 	/*
5041 	 * now sort the devices by hole size / available space
5042 	 */
5043 	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
5044 	     btrfs_cmp_device_info, NULL);
5045 
5046 	/* round down to number of usable stripes */
5047 	ndevs = round_down(ndevs, devs_increment);
5048 
5049 	if (ndevs < devs_min) {
5050 		ret = -ENOSPC;
5051 		if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5052 			btrfs_debug(info,
5053 	"%s: not enough devices with free space: have=%d minimum required=%d",
5054 				    __func__, ndevs, devs_min);
5055 		}
5056 		goto error;
5057 	}
5058 
5059 	ndevs = min(ndevs, devs_max);
5060 
5061 	/*
5062 	 * The primary goal is to maximize the number of stripes, so use as
5063 	 * many devices as possible, even if the stripes are not maximum sized.
5064 	 *
5065 	 * The DUP profile stores more than one stripe per device, the
5066 	 * max_avail is the total size so we have to adjust.
5067 	 */
5068 	stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes);
5069 	num_stripes = ndevs * dev_stripes;
5070 
5071 	/*
5072 	 * this will have to be fixed for RAID1 and RAID10 over
5073 	 * more drives
5074 	 */
5075 	data_stripes = (num_stripes - nparity) / ncopies;
5076 
5077 	/*
5078 	 * Use the number of data stripes to figure out how big this chunk
5079 	 * is really going to be in terms of logical address space,
5080 	 * and compare that answer with the max chunk size. If it's higher,
5081 	 * we try to reduce stripe_size.
5082 	 */
5083 	if (stripe_size * data_stripes > max_chunk_size) {
5084 		/*
5085 		 * Reduce stripe_size, round it up to a 16MB boundary again and
5086 		 * then use it, unless it ends up being even bigger than the
5087 		 * previous value we had already.
5088 		 */
5089 		stripe_size = min(round_up(div_u64(max_chunk_size,
5090 						   data_stripes), SZ_16M),
5091 				  stripe_size);
5092 	}
5093 
5094 	/* align to BTRFS_STRIPE_LEN */
5095 	stripe_size = round_down(stripe_size, BTRFS_STRIPE_LEN);
5096 
5097 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
5098 	if (!map) {
5099 		ret = -ENOMEM;
5100 		goto error;
5101 	}
5102 	map->num_stripes = num_stripes;
5103 
5104 	for (i = 0; i < ndevs; ++i) {
5105 		for (j = 0; j < dev_stripes; ++j) {
5106 			int s = i * dev_stripes + j;
5107 			map->stripes[s].dev = devices_info[i].dev;
5108 			map->stripes[s].physical = devices_info[i].dev_offset +
5109 						   j * stripe_size;
5110 		}
5111 	}
5112 	map->stripe_len = BTRFS_STRIPE_LEN;
5113 	map->io_align = BTRFS_STRIPE_LEN;
5114 	map->io_width = BTRFS_STRIPE_LEN;
5115 	map->type = type;
5116 	map->sub_stripes = sub_stripes;
5117 
5118 	chunk_size = stripe_size * data_stripes;
5119 
5120 	trace_btrfs_chunk_alloc(info, map, start, chunk_size);
5121 
5122 	em = alloc_extent_map();
5123 	if (!em) {
5124 		kfree(map);
5125 		ret = -ENOMEM;
5126 		goto error;
5127 	}
5128 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5129 	em->map_lookup = map;
5130 	em->start = start;
5131 	em->len = chunk_size;
5132 	em->block_start = 0;
5133 	em->block_len = em->len;
5134 	em->orig_block_len = stripe_size;
5135 
5136 	em_tree = &info->mapping_tree;
5137 	write_lock(&em_tree->lock);
5138 	ret = add_extent_mapping(em_tree, em, 0);
5139 	if (ret) {
5140 		write_unlock(&em_tree->lock);
5141 		free_extent_map(em);
5142 		goto error;
5143 	}
5144 	write_unlock(&em_tree->lock);
5145 
5146 	ret = btrfs_make_block_group(trans, 0, type, start, chunk_size);
5147 	if (ret)
5148 		goto error_del_extent;
5149 
5150 	for (i = 0; i < map->num_stripes; i++) {
5151 		struct btrfs_device *dev = map->stripes[i].dev;
5152 
5153 		btrfs_device_set_bytes_used(dev, dev->bytes_used + stripe_size);
5154 		if (list_empty(&dev->post_commit_list))
5155 			list_add_tail(&dev->post_commit_list,
5156 				      &trans->transaction->dev_update_list);
5157 	}
5158 
5159 	atomic64_sub(stripe_size * map->num_stripes, &info->free_chunk_space);
5160 
5161 	free_extent_map(em);
5162 	check_raid56_incompat_flag(info, type);
5163 
5164 	kfree(devices_info);
5165 	return 0;
5166 
5167 error_del_extent:
5168 	write_lock(&em_tree->lock);
5169 	remove_extent_mapping(em_tree, em);
5170 	write_unlock(&em_tree->lock);
5171 
5172 	/* One for our allocation */
5173 	free_extent_map(em);
5174 	/* One for the tree reference */
5175 	free_extent_map(em);
5176 error:
5177 	kfree(devices_info);
5178 	return ret;
5179 }
5180 
5181 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
5182 			     u64 chunk_offset, u64 chunk_size)
5183 {
5184 	struct btrfs_fs_info *fs_info = trans->fs_info;
5185 	struct btrfs_root *extent_root = fs_info->extent_root;
5186 	struct btrfs_root *chunk_root = fs_info->chunk_root;
5187 	struct btrfs_key key;
5188 	struct btrfs_device *device;
5189 	struct btrfs_chunk *chunk;
5190 	struct btrfs_stripe *stripe;
5191 	struct extent_map *em;
5192 	struct map_lookup *map;
5193 	size_t item_size;
5194 	u64 dev_offset;
5195 	u64 stripe_size;
5196 	int i = 0;
5197 	int ret = 0;
5198 
5199 	em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
5200 	if (IS_ERR(em))
5201 		return PTR_ERR(em);
5202 
5203 	map = em->map_lookup;
5204 	item_size = btrfs_chunk_item_size(map->num_stripes);
5205 	stripe_size = em->orig_block_len;
5206 
5207 	chunk = kzalloc(item_size, GFP_NOFS);
5208 	if (!chunk) {
5209 		ret = -ENOMEM;
5210 		goto out;
5211 	}
5212 
5213 	/*
5214 	 * Take the device list mutex to prevent races with the final phase of
5215 	 * a device replace operation that replaces the device object associated
5216 	 * with the map's stripes, because the device object's id can change
5217 	 * at any time during that final phase of the device replace operation
5218 	 * (dev-replace.c:btrfs_dev_replace_finishing()).
5219 	 */
5220 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
5221 	for (i = 0; i < map->num_stripes; i++) {
5222 		device = map->stripes[i].dev;
5223 		dev_offset = map->stripes[i].physical;
5224 
5225 		ret = btrfs_update_device(trans, device);
5226 		if (ret)
5227 			break;
5228 		ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
5229 					     dev_offset, stripe_size);
5230 		if (ret)
5231 			break;
5232 	}
5233 	if (ret) {
5234 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5235 		goto out;
5236 	}
5237 
5238 	stripe = &chunk->stripe;
5239 	for (i = 0; i < map->num_stripes; i++) {
5240 		device = map->stripes[i].dev;
5241 		dev_offset = map->stripes[i].physical;
5242 
5243 		btrfs_set_stack_stripe_devid(stripe, device->devid);
5244 		btrfs_set_stack_stripe_offset(stripe, dev_offset);
5245 		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5246 		stripe++;
5247 	}
5248 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5249 
5250 	btrfs_set_stack_chunk_length(chunk, chunk_size);
5251 	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
5252 	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
5253 	btrfs_set_stack_chunk_type(chunk, map->type);
5254 	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5255 	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5256 	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5257 	btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5258 	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5259 
5260 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5261 	key.type = BTRFS_CHUNK_ITEM_KEY;
5262 	key.offset = chunk_offset;
5263 
5264 	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5265 	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5266 		/*
5267 		 * TODO: Cleanup of inserted chunk root in case of
5268 		 * failure.
5269 		 */
5270 		ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5271 	}
5272 
5273 out:
5274 	kfree(chunk);
5275 	free_extent_map(em);
5276 	return ret;
5277 }
5278 
5279 /*
5280  * Chunk allocation falls into two parts. The first part does work
5281  * that makes the new allocated chunk usable, but does not do any operation
5282  * that modifies the chunk tree. The second part does the work that
5283  * requires modifying the chunk tree. This division is important for the
5284  * bootstrap process of adding storage to a seed btrfs.
5285  */
5286 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
5287 {
5288 	u64 chunk_offset;
5289 
5290 	lockdep_assert_held(&trans->fs_info->chunk_mutex);
5291 	chunk_offset = find_next_chunk(trans->fs_info);
5292 	return __btrfs_alloc_chunk(trans, chunk_offset, type);
5293 }
5294 
5295 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
5296 {
5297 	struct btrfs_fs_info *fs_info = trans->fs_info;
5298 	u64 chunk_offset;
5299 	u64 sys_chunk_offset;
5300 	u64 alloc_profile;
5301 	int ret;
5302 
5303 	chunk_offset = find_next_chunk(fs_info);
5304 	alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5305 	ret = __btrfs_alloc_chunk(trans, chunk_offset, alloc_profile);
5306 	if (ret)
5307 		return ret;
5308 
5309 	sys_chunk_offset = find_next_chunk(fs_info);
5310 	alloc_profile = btrfs_system_alloc_profile(fs_info);
5311 	ret = __btrfs_alloc_chunk(trans, sys_chunk_offset, alloc_profile);
5312 	return ret;
5313 }
5314 
5315 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5316 {
5317 	const int index = btrfs_bg_flags_to_raid_index(map->type);
5318 
5319 	return btrfs_raid_array[index].tolerated_failures;
5320 }
5321 
5322 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5323 {
5324 	struct extent_map *em;
5325 	struct map_lookup *map;
5326 	int readonly = 0;
5327 	int miss_ndevs = 0;
5328 	int i;
5329 
5330 	em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5331 	if (IS_ERR(em))
5332 		return 1;
5333 
5334 	map = em->map_lookup;
5335 	for (i = 0; i < map->num_stripes; i++) {
5336 		if (test_bit(BTRFS_DEV_STATE_MISSING,
5337 					&map->stripes[i].dev->dev_state)) {
5338 			miss_ndevs++;
5339 			continue;
5340 		}
5341 		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5342 					&map->stripes[i].dev->dev_state)) {
5343 			readonly = 1;
5344 			goto end;
5345 		}
5346 	}
5347 
5348 	/*
5349 	 * If the number of missing devices is larger than max errors,
5350 	 * we can not write the data into that chunk successfully, so
5351 	 * set it readonly.
5352 	 */
5353 	if (miss_ndevs > btrfs_chunk_max_errors(map))
5354 		readonly = 1;
5355 end:
5356 	free_extent_map(em);
5357 	return readonly;
5358 }
5359 
5360 void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5361 {
5362 	struct extent_map *em;
5363 
5364 	while (1) {
5365 		write_lock(&tree->lock);
5366 		em = lookup_extent_mapping(tree, 0, (u64)-1);
5367 		if (em)
5368 			remove_extent_mapping(tree, em);
5369 		write_unlock(&tree->lock);
5370 		if (!em)
5371 			break;
5372 		/* once for us */
5373 		free_extent_map(em);
5374 		/* once for the tree */
5375 		free_extent_map(em);
5376 	}
5377 }
5378 
5379 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5380 {
5381 	struct extent_map *em;
5382 	struct map_lookup *map;
5383 	int ret;
5384 
5385 	em = btrfs_get_chunk_map(fs_info, logical, len);
5386 	if (IS_ERR(em))
5387 		/*
5388 		 * We could return errors for these cases, but that could get
5389 		 * ugly and we'd probably do the same thing which is just not do
5390 		 * anything else and exit, so return 1 so the callers don't try
5391 		 * to use other copies.
5392 		 */
5393 		return 1;
5394 
5395 	map = em->map_lookup;
5396 	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
5397 		ret = map->num_stripes;
5398 	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5399 		ret = map->sub_stripes;
5400 	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5401 		ret = 2;
5402 	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5403 		/*
5404 		 * There could be two corrupted data stripes, we need
5405 		 * to loop retry in order to rebuild the correct data.
5406 		 *
5407 		 * Fail a stripe at a time on every retry except the
5408 		 * stripe under reconstruction.
5409 		 */
5410 		ret = map->num_stripes;
5411 	else
5412 		ret = 1;
5413 	free_extent_map(em);
5414 
5415 	down_read(&fs_info->dev_replace.rwsem);
5416 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5417 	    fs_info->dev_replace.tgtdev)
5418 		ret++;
5419 	up_read(&fs_info->dev_replace.rwsem);
5420 
5421 	return ret;
5422 }
5423 
5424 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5425 				    u64 logical)
5426 {
5427 	struct extent_map *em;
5428 	struct map_lookup *map;
5429 	unsigned long len = fs_info->sectorsize;
5430 
5431 	em = btrfs_get_chunk_map(fs_info, logical, len);
5432 
5433 	if (!WARN_ON(IS_ERR(em))) {
5434 		map = em->map_lookup;
5435 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5436 			len = map->stripe_len * nr_data_stripes(map);
5437 		free_extent_map(em);
5438 	}
5439 	return len;
5440 }
5441 
5442 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5443 {
5444 	struct extent_map *em;
5445 	struct map_lookup *map;
5446 	int ret = 0;
5447 
5448 	em = btrfs_get_chunk_map(fs_info, logical, len);
5449 
5450 	if(!WARN_ON(IS_ERR(em))) {
5451 		map = em->map_lookup;
5452 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5453 			ret = 1;
5454 		free_extent_map(em);
5455 	}
5456 	return ret;
5457 }
5458 
5459 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5460 			    struct map_lookup *map, int first,
5461 			    int dev_replace_is_ongoing)
5462 {
5463 	int i;
5464 	int num_stripes;
5465 	int preferred_mirror;
5466 	int tolerance;
5467 	struct btrfs_device *srcdev;
5468 
5469 	ASSERT((map->type &
5470 		 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5471 
5472 	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5473 		num_stripes = map->sub_stripes;
5474 	else
5475 		num_stripes = map->num_stripes;
5476 
5477 	preferred_mirror = first + current->pid % num_stripes;
5478 
5479 	if (dev_replace_is_ongoing &&
5480 	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5481 	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5482 		srcdev = fs_info->dev_replace.srcdev;
5483 	else
5484 		srcdev = NULL;
5485 
5486 	/*
5487 	 * try to avoid the drive that is the source drive for a
5488 	 * dev-replace procedure, only choose it if no other non-missing
5489 	 * mirror is available
5490 	 */
5491 	for (tolerance = 0; tolerance < 2; tolerance++) {
5492 		if (map->stripes[preferred_mirror].dev->bdev &&
5493 		    (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5494 			return preferred_mirror;
5495 		for (i = first; i < first + num_stripes; i++) {
5496 			if (map->stripes[i].dev->bdev &&
5497 			    (tolerance || map->stripes[i].dev != srcdev))
5498 				return i;
5499 		}
5500 	}
5501 
5502 	/* we couldn't find one that doesn't fail.  Just return something
5503 	 * and the io error handling code will clean up eventually
5504 	 */
5505 	return preferred_mirror;
5506 }
5507 
5508 static inline int parity_smaller(u64 a, u64 b)
5509 {
5510 	return a > b;
5511 }
5512 
5513 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5514 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5515 {
5516 	struct btrfs_bio_stripe s;
5517 	int i;
5518 	u64 l;
5519 	int again = 1;
5520 
5521 	while (again) {
5522 		again = 0;
5523 		for (i = 0; i < num_stripes - 1; i++) {
5524 			if (parity_smaller(bbio->raid_map[i],
5525 					   bbio->raid_map[i+1])) {
5526 				s = bbio->stripes[i];
5527 				l = bbio->raid_map[i];
5528 				bbio->stripes[i] = bbio->stripes[i+1];
5529 				bbio->raid_map[i] = bbio->raid_map[i+1];
5530 				bbio->stripes[i+1] = s;
5531 				bbio->raid_map[i+1] = l;
5532 
5533 				again = 1;
5534 			}
5535 		}
5536 	}
5537 }
5538 
5539 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5540 {
5541 	struct btrfs_bio *bbio = kzalloc(
5542 		 /* the size of the btrfs_bio */
5543 		sizeof(struct btrfs_bio) +
5544 		/* plus the variable array for the stripes */
5545 		sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5546 		/* plus the variable array for the tgt dev */
5547 		sizeof(int) * (real_stripes) +
5548 		/*
5549 		 * plus the raid_map, which includes both the tgt dev
5550 		 * and the stripes
5551 		 */
5552 		sizeof(u64) * (total_stripes),
5553 		GFP_NOFS|__GFP_NOFAIL);
5554 
5555 	atomic_set(&bbio->error, 0);
5556 	refcount_set(&bbio->refs, 1);
5557 
5558 	return bbio;
5559 }
5560 
5561 void btrfs_get_bbio(struct btrfs_bio *bbio)
5562 {
5563 	WARN_ON(!refcount_read(&bbio->refs));
5564 	refcount_inc(&bbio->refs);
5565 }
5566 
5567 void btrfs_put_bbio(struct btrfs_bio *bbio)
5568 {
5569 	if (!bbio)
5570 		return;
5571 	if (refcount_dec_and_test(&bbio->refs))
5572 		kfree(bbio);
5573 }
5574 
5575 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5576 /*
5577  * Please note that, discard won't be sent to target device of device
5578  * replace.
5579  */
5580 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5581 					 u64 logical, u64 length,
5582 					 struct btrfs_bio **bbio_ret)
5583 {
5584 	struct extent_map *em;
5585 	struct map_lookup *map;
5586 	struct btrfs_bio *bbio;
5587 	u64 offset;
5588 	u64 stripe_nr;
5589 	u64 stripe_nr_end;
5590 	u64 stripe_end_offset;
5591 	u64 stripe_cnt;
5592 	u64 stripe_len;
5593 	u64 stripe_offset;
5594 	u64 num_stripes;
5595 	u32 stripe_index;
5596 	u32 factor = 0;
5597 	u32 sub_stripes = 0;
5598 	u64 stripes_per_dev = 0;
5599 	u32 remaining_stripes = 0;
5600 	u32 last_stripe = 0;
5601 	int ret = 0;
5602 	int i;
5603 
5604 	/* discard always return a bbio */
5605 	ASSERT(bbio_ret);
5606 
5607 	em = btrfs_get_chunk_map(fs_info, logical, length);
5608 	if (IS_ERR(em))
5609 		return PTR_ERR(em);
5610 
5611 	map = em->map_lookup;
5612 	/* we don't discard raid56 yet */
5613 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5614 		ret = -EOPNOTSUPP;
5615 		goto out;
5616 	}
5617 
5618 	offset = logical - em->start;
5619 	length = min_t(u64, em->len - offset, length);
5620 
5621 	stripe_len = map->stripe_len;
5622 	/*
5623 	 * stripe_nr counts the total number of stripes we have to stride
5624 	 * to get to this block
5625 	 */
5626 	stripe_nr = div64_u64(offset, stripe_len);
5627 
5628 	/* stripe_offset is the offset of this block in its stripe */
5629 	stripe_offset = offset - stripe_nr * stripe_len;
5630 
5631 	stripe_nr_end = round_up(offset + length, map->stripe_len);
5632 	stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5633 	stripe_cnt = stripe_nr_end - stripe_nr;
5634 	stripe_end_offset = stripe_nr_end * map->stripe_len -
5635 			    (offset + length);
5636 	/*
5637 	 * after this, stripe_nr is the number of stripes on this
5638 	 * device we have to walk to find the data, and stripe_index is
5639 	 * the number of our device in the stripe array
5640 	 */
5641 	num_stripes = 1;
5642 	stripe_index = 0;
5643 	if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5644 			 BTRFS_BLOCK_GROUP_RAID10)) {
5645 		if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5646 			sub_stripes = 1;
5647 		else
5648 			sub_stripes = map->sub_stripes;
5649 
5650 		factor = map->num_stripes / sub_stripes;
5651 		num_stripes = min_t(u64, map->num_stripes,
5652 				    sub_stripes * stripe_cnt);
5653 		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5654 		stripe_index *= sub_stripes;
5655 		stripes_per_dev = div_u64_rem(stripe_cnt, factor,
5656 					      &remaining_stripes);
5657 		div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5658 		last_stripe *= sub_stripes;
5659 	} else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
5660 				BTRFS_BLOCK_GROUP_DUP)) {
5661 		num_stripes = map->num_stripes;
5662 	} else {
5663 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5664 					&stripe_index);
5665 	}
5666 
5667 	bbio = alloc_btrfs_bio(num_stripes, 0);
5668 	if (!bbio) {
5669 		ret = -ENOMEM;
5670 		goto out;
5671 	}
5672 
5673 	for (i = 0; i < num_stripes; i++) {
5674 		bbio->stripes[i].physical =
5675 			map->stripes[stripe_index].physical +
5676 			stripe_offset + stripe_nr * map->stripe_len;
5677 		bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5678 
5679 		if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5680 				 BTRFS_BLOCK_GROUP_RAID10)) {
5681 			bbio->stripes[i].length = stripes_per_dev *
5682 				map->stripe_len;
5683 
5684 			if (i / sub_stripes < remaining_stripes)
5685 				bbio->stripes[i].length +=
5686 					map->stripe_len;
5687 
5688 			/*
5689 			 * Special for the first stripe and
5690 			 * the last stripe:
5691 			 *
5692 			 * |-------|...|-------|
5693 			 *     |----------|
5694 			 *    off     end_off
5695 			 */
5696 			if (i < sub_stripes)
5697 				bbio->stripes[i].length -=
5698 					stripe_offset;
5699 
5700 			if (stripe_index >= last_stripe &&
5701 			    stripe_index <= (last_stripe +
5702 					     sub_stripes - 1))
5703 				bbio->stripes[i].length -=
5704 					stripe_end_offset;
5705 
5706 			if (i == sub_stripes - 1)
5707 				stripe_offset = 0;
5708 		} else {
5709 			bbio->stripes[i].length = length;
5710 		}
5711 
5712 		stripe_index++;
5713 		if (stripe_index == map->num_stripes) {
5714 			stripe_index = 0;
5715 			stripe_nr++;
5716 		}
5717 	}
5718 
5719 	*bbio_ret = bbio;
5720 	bbio->map_type = map->type;
5721 	bbio->num_stripes = num_stripes;
5722 out:
5723 	free_extent_map(em);
5724 	return ret;
5725 }
5726 
5727 /*
5728  * In dev-replace case, for repair case (that's the only case where the mirror
5729  * is selected explicitly when calling btrfs_map_block), blocks left of the
5730  * left cursor can also be read from the target drive.
5731  *
5732  * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
5733  * array of stripes.
5734  * For READ, it also needs to be supported using the same mirror number.
5735  *
5736  * If the requested block is not left of the left cursor, EIO is returned. This
5737  * can happen because btrfs_num_copies() returns one more in the dev-replace
5738  * case.
5739  */
5740 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
5741 					 u64 logical, u64 length,
5742 					 u64 srcdev_devid, int *mirror_num,
5743 					 u64 *physical)
5744 {
5745 	struct btrfs_bio *bbio = NULL;
5746 	int num_stripes;
5747 	int index_srcdev = 0;
5748 	int found = 0;
5749 	u64 physical_of_found = 0;
5750 	int i;
5751 	int ret = 0;
5752 
5753 	ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
5754 				logical, &length, &bbio, 0, 0);
5755 	if (ret) {
5756 		ASSERT(bbio == NULL);
5757 		return ret;
5758 	}
5759 
5760 	num_stripes = bbio->num_stripes;
5761 	if (*mirror_num > num_stripes) {
5762 		/*
5763 		 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
5764 		 * that means that the requested area is not left of the left
5765 		 * cursor
5766 		 */
5767 		btrfs_put_bbio(bbio);
5768 		return -EIO;
5769 	}
5770 
5771 	/*
5772 	 * process the rest of the function using the mirror_num of the source
5773 	 * drive. Therefore look it up first.  At the end, patch the device
5774 	 * pointer to the one of the target drive.
5775 	 */
5776 	for (i = 0; i < num_stripes; i++) {
5777 		if (bbio->stripes[i].dev->devid != srcdev_devid)
5778 			continue;
5779 
5780 		/*
5781 		 * In case of DUP, in order to keep it simple, only add the
5782 		 * mirror with the lowest physical address
5783 		 */
5784 		if (found &&
5785 		    physical_of_found <= bbio->stripes[i].physical)
5786 			continue;
5787 
5788 		index_srcdev = i;
5789 		found = 1;
5790 		physical_of_found = bbio->stripes[i].physical;
5791 	}
5792 
5793 	btrfs_put_bbio(bbio);
5794 
5795 	ASSERT(found);
5796 	if (!found)
5797 		return -EIO;
5798 
5799 	*mirror_num = index_srcdev + 1;
5800 	*physical = physical_of_found;
5801 	return ret;
5802 }
5803 
5804 static void handle_ops_on_dev_replace(enum btrfs_map_op op,
5805 				      struct btrfs_bio **bbio_ret,
5806 				      struct btrfs_dev_replace *dev_replace,
5807 				      int *num_stripes_ret, int *max_errors_ret)
5808 {
5809 	struct btrfs_bio *bbio = *bbio_ret;
5810 	u64 srcdev_devid = dev_replace->srcdev->devid;
5811 	int tgtdev_indexes = 0;
5812 	int num_stripes = *num_stripes_ret;
5813 	int max_errors = *max_errors_ret;
5814 	int i;
5815 
5816 	if (op == BTRFS_MAP_WRITE) {
5817 		int index_where_to_add;
5818 
5819 		/*
5820 		 * duplicate the write operations while the dev replace
5821 		 * procedure is running. Since the copying of the old disk to
5822 		 * the new disk takes place at run time while the filesystem is
5823 		 * mounted writable, the regular write operations to the old
5824 		 * disk have to be duplicated to go to the new disk as well.
5825 		 *
5826 		 * Note that device->missing is handled by the caller, and that
5827 		 * the write to the old disk is already set up in the stripes
5828 		 * array.
5829 		 */
5830 		index_where_to_add = num_stripes;
5831 		for (i = 0; i < num_stripes; i++) {
5832 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5833 				/* write to new disk, too */
5834 				struct btrfs_bio_stripe *new =
5835 					bbio->stripes + index_where_to_add;
5836 				struct btrfs_bio_stripe *old =
5837 					bbio->stripes + i;
5838 
5839 				new->physical = old->physical;
5840 				new->length = old->length;
5841 				new->dev = dev_replace->tgtdev;
5842 				bbio->tgtdev_map[i] = index_where_to_add;
5843 				index_where_to_add++;
5844 				max_errors++;
5845 				tgtdev_indexes++;
5846 			}
5847 		}
5848 		num_stripes = index_where_to_add;
5849 	} else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
5850 		int index_srcdev = 0;
5851 		int found = 0;
5852 		u64 physical_of_found = 0;
5853 
5854 		/*
5855 		 * During the dev-replace procedure, the target drive can also
5856 		 * be used to read data in case it is needed to repair a corrupt
5857 		 * block elsewhere. This is possible if the requested area is
5858 		 * left of the left cursor. In this area, the target drive is a
5859 		 * full copy of the source drive.
5860 		 */
5861 		for (i = 0; i < num_stripes; i++) {
5862 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5863 				/*
5864 				 * In case of DUP, in order to keep it simple,
5865 				 * only add the mirror with the lowest physical
5866 				 * address
5867 				 */
5868 				if (found &&
5869 				    physical_of_found <=
5870 				     bbio->stripes[i].physical)
5871 					continue;
5872 				index_srcdev = i;
5873 				found = 1;
5874 				physical_of_found = bbio->stripes[i].physical;
5875 			}
5876 		}
5877 		if (found) {
5878 			struct btrfs_bio_stripe *tgtdev_stripe =
5879 				bbio->stripes + num_stripes;
5880 
5881 			tgtdev_stripe->physical = physical_of_found;
5882 			tgtdev_stripe->length =
5883 				bbio->stripes[index_srcdev].length;
5884 			tgtdev_stripe->dev = dev_replace->tgtdev;
5885 			bbio->tgtdev_map[index_srcdev] = num_stripes;
5886 
5887 			tgtdev_indexes++;
5888 			num_stripes++;
5889 		}
5890 	}
5891 
5892 	*num_stripes_ret = num_stripes;
5893 	*max_errors_ret = max_errors;
5894 	bbio->num_tgtdevs = tgtdev_indexes;
5895 	*bbio_ret = bbio;
5896 }
5897 
5898 static bool need_full_stripe(enum btrfs_map_op op)
5899 {
5900 	return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
5901 }
5902 
5903 /*
5904  * btrfs_get_io_geometry - calculates the geomery of a particular (address, len)
5905  *		       tuple. This information is used to calculate how big a
5906  *		       particular bio can get before it straddles a stripe.
5907  *
5908  * @fs_info - the filesystem
5909  * @logical - address that we want to figure out the geometry of
5910  * @len	    - the length of IO we are going to perform, starting at @logical
5911  * @op      - type of operation - write or read
5912  * @io_geom - pointer used to return values
5913  *
5914  * Returns < 0 in case a chunk for the given logical address cannot be found,
5915  * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
5916  */
5917 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
5918 			u64 logical, u64 len, struct btrfs_io_geometry *io_geom)
5919 {
5920 	struct extent_map *em;
5921 	struct map_lookup *map;
5922 	u64 offset;
5923 	u64 stripe_offset;
5924 	u64 stripe_nr;
5925 	u64 stripe_len;
5926 	u64 raid56_full_stripe_start = (u64)-1;
5927 	int data_stripes;
5928 	int ret = 0;
5929 
5930 	ASSERT(op != BTRFS_MAP_DISCARD);
5931 
5932 	em = btrfs_get_chunk_map(fs_info, logical, len);
5933 	if (IS_ERR(em))
5934 		return PTR_ERR(em);
5935 
5936 	map = em->map_lookup;
5937 	/* Offset of this logical address in the chunk */
5938 	offset = logical - em->start;
5939 	/* Len of a stripe in a chunk */
5940 	stripe_len = map->stripe_len;
5941 	/* Stripe wher this block falls in */
5942 	stripe_nr = div64_u64(offset, stripe_len);
5943 	/* Offset of stripe in the chunk */
5944 	stripe_offset = stripe_nr * stripe_len;
5945 	if (offset < stripe_offset) {
5946 		btrfs_crit(fs_info,
5947 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
5948 			stripe_offset, offset, em->start, logical, stripe_len);
5949 		ret = -EINVAL;
5950 		goto out;
5951 	}
5952 
5953 	/* stripe_offset is the offset of this block in its stripe */
5954 	stripe_offset = offset - stripe_offset;
5955 	data_stripes = nr_data_stripes(map);
5956 
5957 	if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
5958 		u64 max_len = stripe_len - stripe_offset;
5959 
5960 		/*
5961 		 * In case of raid56, we need to know the stripe aligned start
5962 		 */
5963 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5964 			unsigned long full_stripe_len = stripe_len * data_stripes;
5965 			raid56_full_stripe_start = offset;
5966 
5967 			/*
5968 			 * Allow a write of a full stripe, but make sure we
5969 			 * don't allow straddling of stripes
5970 			 */
5971 			raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
5972 					full_stripe_len);
5973 			raid56_full_stripe_start *= full_stripe_len;
5974 
5975 			/*
5976 			 * For writes to RAID[56], allow a full stripeset across
5977 			 * all disks. For other RAID types and for RAID[56]
5978 			 * reads, just allow a single stripe (on a single disk).
5979 			 */
5980 			if (op == BTRFS_MAP_WRITE) {
5981 				max_len = stripe_len * data_stripes -
5982 					  (offset - raid56_full_stripe_start);
5983 			}
5984 		}
5985 		len = min_t(u64, em->len - offset, max_len);
5986 	} else {
5987 		len = em->len - offset;
5988 	}
5989 
5990 	io_geom->len = len;
5991 	io_geom->offset = offset;
5992 	io_geom->stripe_len = stripe_len;
5993 	io_geom->stripe_nr = stripe_nr;
5994 	io_geom->stripe_offset = stripe_offset;
5995 	io_geom->raid56_stripe_offset = raid56_full_stripe_start;
5996 
5997 out:
5998 	/* once for us */
5999 	free_extent_map(em);
6000 	return ret;
6001 }
6002 
6003 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
6004 			     enum btrfs_map_op op,
6005 			     u64 logical, u64 *length,
6006 			     struct btrfs_bio **bbio_ret,
6007 			     int mirror_num, int need_raid_map)
6008 {
6009 	struct extent_map *em;
6010 	struct map_lookup *map;
6011 	u64 stripe_offset;
6012 	u64 stripe_nr;
6013 	u64 stripe_len;
6014 	u32 stripe_index;
6015 	int data_stripes;
6016 	int i;
6017 	int ret = 0;
6018 	int num_stripes;
6019 	int max_errors = 0;
6020 	int tgtdev_indexes = 0;
6021 	struct btrfs_bio *bbio = NULL;
6022 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6023 	int dev_replace_is_ongoing = 0;
6024 	int num_alloc_stripes;
6025 	int patch_the_first_stripe_for_dev_replace = 0;
6026 	u64 physical_to_patch_in_first_stripe = 0;
6027 	u64 raid56_full_stripe_start = (u64)-1;
6028 	struct btrfs_io_geometry geom;
6029 
6030 	ASSERT(bbio_ret);
6031 
6032 	if (op == BTRFS_MAP_DISCARD)
6033 		return __btrfs_map_block_for_discard(fs_info, logical,
6034 						     *length, bbio_ret);
6035 
6036 	ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom);
6037 	if (ret < 0)
6038 		return ret;
6039 
6040 	em = btrfs_get_chunk_map(fs_info, logical, *length);
6041 	ASSERT(!IS_ERR(em));
6042 	map = em->map_lookup;
6043 
6044 	*length = geom.len;
6045 	stripe_len = geom.stripe_len;
6046 	stripe_nr = geom.stripe_nr;
6047 	stripe_offset = geom.stripe_offset;
6048 	raid56_full_stripe_start = geom.raid56_stripe_offset;
6049 	data_stripes = nr_data_stripes(map);
6050 
6051 	down_read(&dev_replace->rwsem);
6052 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6053 	/*
6054 	 * Hold the semaphore for read during the whole operation, write is
6055 	 * requested at commit time but must wait.
6056 	 */
6057 	if (!dev_replace_is_ongoing)
6058 		up_read(&dev_replace->rwsem);
6059 
6060 	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6061 	    !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6062 		ret = get_extra_mirror_from_replace(fs_info, logical, *length,
6063 						    dev_replace->srcdev->devid,
6064 						    &mirror_num,
6065 					    &physical_to_patch_in_first_stripe);
6066 		if (ret)
6067 			goto out;
6068 		else
6069 			patch_the_first_stripe_for_dev_replace = 1;
6070 	} else if (mirror_num > map->num_stripes) {
6071 		mirror_num = 0;
6072 	}
6073 
6074 	num_stripes = 1;
6075 	stripe_index = 0;
6076 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6077 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6078 				&stripe_index);
6079 		if (!need_full_stripe(op))
6080 			mirror_num = 1;
6081 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
6082 		if (need_full_stripe(op))
6083 			num_stripes = map->num_stripes;
6084 		else if (mirror_num)
6085 			stripe_index = mirror_num - 1;
6086 		else {
6087 			stripe_index = find_live_mirror(fs_info, map, 0,
6088 					    dev_replace_is_ongoing);
6089 			mirror_num = stripe_index + 1;
6090 		}
6091 
6092 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6093 		if (need_full_stripe(op)) {
6094 			num_stripes = map->num_stripes;
6095 		} else if (mirror_num) {
6096 			stripe_index = mirror_num - 1;
6097 		} else {
6098 			mirror_num = 1;
6099 		}
6100 
6101 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6102 		u32 factor = map->num_stripes / map->sub_stripes;
6103 
6104 		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6105 		stripe_index *= map->sub_stripes;
6106 
6107 		if (need_full_stripe(op))
6108 			num_stripes = map->sub_stripes;
6109 		else if (mirror_num)
6110 			stripe_index += mirror_num - 1;
6111 		else {
6112 			int old_stripe_index = stripe_index;
6113 			stripe_index = find_live_mirror(fs_info, map,
6114 					      stripe_index,
6115 					      dev_replace_is_ongoing);
6116 			mirror_num = stripe_index - old_stripe_index + 1;
6117 		}
6118 
6119 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6120 		if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
6121 			/* push stripe_nr back to the start of the full stripe */
6122 			stripe_nr = div64_u64(raid56_full_stripe_start,
6123 					stripe_len * data_stripes);
6124 
6125 			/* RAID[56] write or recovery. Return all stripes */
6126 			num_stripes = map->num_stripes;
6127 			max_errors = nr_parity_stripes(map);
6128 
6129 			*length = map->stripe_len;
6130 			stripe_index = 0;
6131 			stripe_offset = 0;
6132 		} else {
6133 			/*
6134 			 * Mirror #0 or #1 means the original data block.
6135 			 * Mirror #2 is RAID5 parity block.
6136 			 * Mirror #3 is RAID6 Q block.
6137 			 */
6138 			stripe_nr = div_u64_rem(stripe_nr,
6139 					data_stripes, &stripe_index);
6140 			if (mirror_num > 1)
6141 				stripe_index = data_stripes + mirror_num - 2;
6142 
6143 			/* We distribute the parity blocks across stripes */
6144 			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
6145 					&stripe_index);
6146 			if (!need_full_stripe(op) && mirror_num <= 1)
6147 				mirror_num = 1;
6148 		}
6149 	} else {
6150 		/*
6151 		 * after this, stripe_nr is the number of stripes on this
6152 		 * device we have to walk to find the data, and stripe_index is
6153 		 * the number of our device in the stripe array
6154 		 */
6155 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6156 				&stripe_index);
6157 		mirror_num = stripe_index + 1;
6158 	}
6159 	if (stripe_index >= map->num_stripes) {
6160 		btrfs_crit(fs_info,
6161 			   "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6162 			   stripe_index, map->num_stripes);
6163 		ret = -EINVAL;
6164 		goto out;
6165 	}
6166 
6167 	num_alloc_stripes = num_stripes;
6168 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6169 		if (op == BTRFS_MAP_WRITE)
6170 			num_alloc_stripes <<= 1;
6171 		if (op == BTRFS_MAP_GET_READ_MIRRORS)
6172 			num_alloc_stripes++;
6173 		tgtdev_indexes = num_stripes;
6174 	}
6175 
6176 	bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
6177 	if (!bbio) {
6178 		ret = -ENOMEM;
6179 		goto out;
6180 	}
6181 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
6182 		bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
6183 
6184 	/* build raid_map */
6185 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
6186 	    (need_full_stripe(op) || mirror_num > 1)) {
6187 		u64 tmp;
6188 		unsigned rot;
6189 
6190 		bbio->raid_map = (u64 *)((void *)bbio->stripes +
6191 				 sizeof(struct btrfs_bio_stripe) *
6192 				 num_alloc_stripes +
6193 				 sizeof(int) * tgtdev_indexes);
6194 
6195 		/* Work out the disk rotation on this stripe-set */
6196 		div_u64_rem(stripe_nr, num_stripes, &rot);
6197 
6198 		/* Fill in the logical address of each stripe */
6199 		tmp = stripe_nr * data_stripes;
6200 		for (i = 0; i < data_stripes; i++)
6201 			bbio->raid_map[(i+rot) % num_stripes] =
6202 				em->start + (tmp + i) * map->stripe_len;
6203 
6204 		bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
6205 		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6206 			bbio->raid_map[(i+rot+1) % num_stripes] =
6207 				RAID6_Q_STRIPE;
6208 	}
6209 
6210 
6211 	for (i = 0; i < num_stripes; i++) {
6212 		bbio->stripes[i].physical =
6213 			map->stripes[stripe_index].physical +
6214 			stripe_offset +
6215 			stripe_nr * map->stripe_len;
6216 		bbio->stripes[i].dev =
6217 			map->stripes[stripe_index].dev;
6218 		stripe_index++;
6219 	}
6220 
6221 	if (need_full_stripe(op))
6222 		max_errors = btrfs_chunk_max_errors(map);
6223 
6224 	if (bbio->raid_map)
6225 		sort_parity_stripes(bbio, num_stripes);
6226 
6227 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6228 	    need_full_stripe(op)) {
6229 		handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes,
6230 					  &max_errors);
6231 	}
6232 
6233 	*bbio_ret = bbio;
6234 	bbio->map_type = map->type;
6235 	bbio->num_stripes = num_stripes;
6236 	bbio->max_errors = max_errors;
6237 	bbio->mirror_num = mirror_num;
6238 
6239 	/*
6240 	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
6241 	 * mirror_num == num_stripes + 1 && dev_replace target drive is
6242 	 * available as a mirror
6243 	 */
6244 	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
6245 		WARN_ON(num_stripes > 1);
6246 		bbio->stripes[0].dev = dev_replace->tgtdev;
6247 		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
6248 		bbio->mirror_num = map->num_stripes + 1;
6249 	}
6250 out:
6251 	if (dev_replace_is_ongoing) {
6252 		lockdep_assert_held(&dev_replace->rwsem);
6253 		/* Unlock and let waiting writers proceed */
6254 		up_read(&dev_replace->rwsem);
6255 	}
6256 	free_extent_map(em);
6257 	return ret;
6258 }
6259 
6260 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6261 		      u64 logical, u64 *length,
6262 		      struct btrfs_bio **bbio_ret, int mirror_num)
6263 {
6264 	return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
6265 				 mirror_num, 0);
6266 }
6267 
6268 /* For Scrub/replace */
6269 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6270 		     u64 logical, u64 *length,
6271 		     struct btrfs_bio **bbio_ret)
6272 {
6273 	return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
6274 }
6275 
6276 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
6277 		     u64 physical, u64 **logical, int *naddrs, int *stripe_len)
6278 {
6279 	struct extent_map *em;
6280 	struct map_lookup *map;
6281 	u64 *buf;
6282 	u64 bytenr;
6283 	u64 length;
6284 	u64 stripe_nr;
6285 	u64 rmap_len;
6286 	int i, j, nr = 0;
6287 
6288 	em = btrfs_get_chunk_map(fs_info, chunk_start, 1);
6289 	if (IS_ERR(em))
6290 		return -EIO;
6291 
6292 	map = em->map_lookup;
6293 	length = em->len;
6294 	rmap_len = map->stripe_len;
6295 
6296 	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
6297 		length = div_u64(length, map->num_stripes / map->sub_stripes);
6298 	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
6299 		length = div_u64(length, map->num_stripes);
6300 	else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6301 		length = div_u64(length, nr_data_stripes(map));
6302 		rmap_len = map->stripe_len * nr_data_stripes(map);
6303 	}
6304 
6305 	buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
6306 	BUG_ON(!buf); /* -ENOMEM */
6307 
6308 	for (i = 0; i < map->num_stripes; i++) {
6309 		if (map->stripes[i].physical > physical ||
6310 		    map->stripes[i].physical + length <= physical)
6311 			continue;
6312 
6313 		stripe_nr = physical - map->stripes[i].physical;
6314 		stripe_nr = div64_u64(stripe_nr, map->stripe_len);
6315 
6316 		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6317 			stripe_nr = stripe_nr * map->num_stripes + i;
6318 			stripe_nr = div_u64(stripe_nr, map->sub_stripes);
6319 		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6320 			stripe_nr = stripe_nr * map->num_stripes + i;
6321 		} /* else if RAID[56], multiply by nr_data_stripes().
6322 		   * Alternatively, just use rmap_len below instead of
6323 		   * map->stripe_len */
6324 
6325 		bytenr = chunk_start + stripe_nr * rmap_len;
6326 		WARN_ON(nr >= map->num_stripes);
6327 		for (j = 0; j < nr; j++) {
6328 			if (buf[j] == bytenr)
6329 				break;
6330 		}
6331 		if (j == nr) {
6332 			WARN_ON(nr >= map->num_stripes);
6333 			buf[nr++] = bytenr;
6334 		}
6335 	}
6336 
6337 	*logical = buf;
6338 	*naddrs = nr;
6339 	*stripe_len = rmap_len;
6340 
6341 	free_extent_map(em);
6342 	return 0;
6343 }
6344 
6345 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
6346 {
6347 	bio->bi_private = bbio->private;
6348 	bio->bi_end_io = bbio->end_io;
6349 	bio_endio(bio);
6350 
6351 	btrfs_put_bbio(bbio);
6352 }
6353 
6354 static void btrfs_end_bio(struct bio *bio)
6355 {
6356 	struct btrfs_bio *bbio = bio->bi_private;
6357 	int is_orig_bio = 0;
6358 
6359 	if (bio->bi_status) {
6360 		atomic_inc(&bbio->error);
6361 		if (bio->bi_status == BLK_STS_IOERR ||
6362 		    bio->bi_status == BLK_STS_TARGET) {
6363 			unsigned int stripe_index =
6364 				btrfs_io_bio(bio)->stripe_index;
6365 			struct btrfs_device *dev;
6366 
6367 			BUG_ON(stripe_index >= bbio->num_stripes);
6368 			dev = bbio->stripes[stripe_index].dev;
6369 			if (dev->bdev) {
6370 				if (bio_op(bio) == REQ_OP_WRITE)
6371 					btrfs_dev_stat_inc_and_print(dev,
6372 						BTRFS_DEV_STAT_WRITE_ERRS);
6373 				else if (!(bio->bi_opf & REQ_RAHEAD))
6374 					btrfs_dev_stat_inc_and_print(dev,
6375 						BTRFS_DEV_STAT_READ_ERRS);
6376 				if (bio->bi_opf & REQ_PREFLUSH)
6377 					btrfs_dev_stat_inc_and_print(dev,
6378 						BTRFS_DEV_STAT_FLUSH_ERRS);
6379 			}
6380 		}
6381 	}
6382 
6383 	if (bio == bbio->orig_bio)
6384 		is_orig_bio = 1;
6385 
6386 	btrfs_bio_counter_dec(bbio->fs_info);
6387 
6388 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6389 		if (!is_orig_bio) {
6390 			bio_put(bio);
6391 			bio = bbio->orig_bio;
6392 		}
6393 
6394 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6395 		/* only send an error to the higher layers if it is
6396 		 * beyond the tolerance of the btrfs bio
6397 		 */
6398 		if (atomic_read(&bbio->error) > bbio->max_errors) {
6399 			bio->bi_status = BLK_STS_IOERR;
6400 		} else {
6401 			/*
6402 			 * this bio is actually up to date, we didn't
6403 			 * go over the max number of errors
6404 			 */
6405 			bio->bi_status = BLK_STS_OK;
6406 		}
6407 
6408 		btrfs_end_bbio(bbio, bio);
6409 	} else if (!is_orig_bio) {
6410 		bio_put(bio);
6411 	}
6412 }
6413 
6414 /*
6415  * see run_scheduled_bios for a description of why bios are collected for
6416  * async submit.
6417  *
6418  * This will add one bio to the pending list for a device and make sure
6419  * the work struct is scheduled.
6420  */
6421 static noinline void btrfs_schedule_bio(struct btrfs_device *device,
6422 					struct bio *bio)
6423 {
6424 	struct btrfs_fs_info *fs_info = device->fs_info;
6425 	int should_queue = 1;
6426 	struct btrfs_pending_bios *pending_bios;
6427 
6428 	/* don't bother with additional async steps for reads, right now */
6429 	if (bio_op(bio) == REQ_OP_READ) {
6430 		btrfsic_submit_bio(bio);
6431 		return;
6432 	}
6433 
6434 	WARN_ON(bio->bi_next);
6435 	bio->bi_next = NULL;
6436 
6437 	spin_lock(&device->io_lock);
6438 	if (op_is_sync(bio->bi_opf))
6439 		pending_bios = &device->pending_sync_bios;
6440 	else
6441 		pending_bios = &device->pending_bios;
6442 
6443 	if (pending_bios->tail)
6444 		pending_bios->tail->bi_next = bio;
6445 
6446 	pending_bios->tail = bio;
6447 	if (!pending_bios->head)
6448 		pending_bios->head = bio;
6449 	if (device->running_pending)
6450 		should_queue = 0;
6451 
6452 	spin_unlock(&device->io_lock);
6453 
6454 	if (should_queue)
6455 		btrfs_queue_work(fs_info->submit_workers, &device->work);
6456 }
6457 
6458 static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
6459 			      u64 physical, int dev_nr, int async)
6460 {
6461 	struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
6462 	struct btrfs_fs_info *fs_info = bbio->fs_info;
6463 
6464 	bio->bi_private = bbio;
6465 	btrfs_io_bio(bio)->stripe_index = dev_nr;
6466 	bio->bi_end_io = btrfs_end_bio;
6467 	bio->bi_iter.bi_sector = physical >> 9;
6468 	btrfs_debug_in_rcu(fs_info,
6469 	"btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6470 		bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector,
6471 		(u_long)dev->bdev->bd_dev, rcu_str_deref(dev->name), dev->devid,
6472 		bio->bi_iter.bi_size);
6473 	bio_set_dev(bio, dev->bdev);
6474 
6475 	btrfs_bio_counter_inc_noblocked(fs_info);
6476 
6477 	if (async)
6478 		btrfs_schedule_bio(dev, bio);
6479 	else
6480 		btrfsic_submit_bio(bio);
6481 }
6482 
6483 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6484 {
6485 	atomic_inc(&bbio->error);
6486 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6487 		/* Should be the original bio. */
6488 		WARN_ON(bio != bbio->orig_bio);
6489 
6490 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6491 		bio->bi_iter.bi_sector = logical >> 9;
6492 		if (atomic_read(&bbio->error) > bbio->max_errors)
6493 			bio->bi_status = BLK_STS_IOERR;
6494 		else
6495 			bio->bi_status = BLK_STS_OK;
6496 		btrfs_end_bbio(bbio, bio);
6497 	}
6498 }
6499 
6500 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6501 			   int mirror_num, int async_submit)
6502 {
6503 	struct btrfs_device *dev;
6504 	struct bio *first_bio = bio;
6505 	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6506 	u64 length = 0;
6507 	u64 map_length;
6508 	int ret;
6509 	int dev_nr;
6510 	int total_devs;
6511 	struct btrfs_bio *bbio = NULL;
6512 
6513 	length = bio->bi_iter.bi_size;
6514 	map_length = length;
6515 
6516 	btrfs_bio_counter_inc_blocked(fs_info);
6517 	ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6518 				&map_length, &bbio, mirror_num, 1);
6519 	if (ret) {
6520 		btrfs_bio_counter_dec(fs_info);
6521 		return errno_to_blk_status(ret);
6522 	}
6523 
6524 	total_devs = bbio->num_stripes;
6525 	bbio->orig_bio = first_bio;
6526 	bbio->private = first_bio->bi_private;
6527 	bbio->end_io = first_bio->bi_end_io;
6528 	bbio->fs_info = fs_info;
6529 	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6530 
6531 	if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6532 	    ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
6533 		/* In this case, map_length has been set to the length of
6534 		   a single stripe; not the whole write */
6535 		if (bio_op(bio) == REQ_OP_WRITE) {
6536 			ret = raid56_parity_write(fs_info, bio, bbio,
6537 						  map_length);
6538 		} else {
6539 			ret = raid56_parity_recover(fs_info, bio, bbio,
6540 						    map_length, mirror_num, 1);
6541 		}
6542 
6543 		btrfs_bio_counter_dec(fs_info);
6544 		return errno_to_blk_status(ret);
6545 	}
6546 
6547 	if (map_length < length) {
6548 		btrfs_crit(fs_info,
6549 			   "mapping failed logical %llu bio len %llu len %llu",
6550 			   logical, length, map_length);
6551 		BUG();
6552 	}
6553 
6554 	for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6555 		dev = bbio->stripes[dev_nr].dev;
6556 		if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
6557 						   &dev->dev_state) ||
6558 		    (bio_op(first_bio) == REQ_OP_WRITE &&
6559 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6560 			bbio_error(bbio, first_bio, logical);
6561 			continue;
6562 		}
6563 
6564 		if (dev_nr < total_devs - 1)
6565 			bio = btrfs_bio_clone(first_bio);
6566 		else
6567 			bio = first_bio;
6568 
6569 		submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical,
6570 				  dev_nr, async_submit);
6571 	}
6572 	btrfs_bio_counter_dec(fs_info);
6573 	return BLK_STS_OK;
6574 }
6575 
6576 /*
6577  * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6578  * return NULL.
6579  *
6580  * If devid and uuid are both specified, the match must be exact, otherwise
6581  * only devid is used.
6582  *
6583  * If @seed is true, traverse through the seed devices.
6584  */
6585 struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
6586 				       u64 devid, u8 *uuid, u8 *fsid,
6587 				       bool seed)
6588 {
6589 	struct btrfs_device *device;
6590 
6591 	while (fs_devices) {
6592 		if (!fsid ||
6593 		    !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6594 			list_for_each_entry(device, &fs_devices->devices,
6595 					    dev_list) {
6596 				if (device->devid == devid &&
6597 				    (!uuid || memcmp(device->uuid, uuid,
6598 						     BTRFS_UUID_SIZE) == 0))
6599 					return device;
6600 			}
6601 		}
6602 		if (seed)
6603 			fs_devices = fs_devices->seed;
6604 		else
6605 			return NULL;
6606 	}
6607 	return NULL;
6608 }
6609 
6610 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6611 					    u64 devid, u8 *dev_uuid)
6612 {
6613 	struct btrfs_device *device;
6614 
6615 	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6616 	if (IS_ERR(device))
6617 		return device;
6618 
6619 	list_add(&device->dev_list, &fs_devices->devices);
6620 	device->fs_devices = fs_devices;
6621 	fs_devices->num_devices++;
6622 
6623 	set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6624 	fs_devices->missing_devices++;
6625 
6626 	return device;
6627 }
6628 
6629 /**
6630  * btrfs_alloc_device - allocate struct btrfs_device
6631  * @fs_info:	used only for generating a new devid, can be NULL if
6632  *		devid is provided (i.e. @devid != NULL).
6633  * @devid:	a pointer to devid for this device.  If NULL a new devid
6634  *		is generated.
6635  * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
6636  *		is generated.
6637  *
6638  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6639  * on error.  Returned struct is not linked onto any lists and must be
6640  * destroyed with btrfs_free_device.
6641  */
6642 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6643 					const u64 *devid,
6644 					const u8 *uuid)
6645 {
6646 	struct btrfs_device *dev;
6647 	u64 tmp;
6648 
6649 	if (WARN_ON(!devid && !fs_info))
6650 		return ERR_PTR(-EINVAL);
6651 
6652 	dev = __alloc_device();
6653 	if (IS_ERR(dev))
6654 		return dev;
6655 
6656 	if (devid)
6657 		tmp = *devid;
6658 	else {
6659 		int ret;
6660 
6661 		ret = find_next_devid(fs_info, &tmp);
6662 		if (ret) {
6663 			btrfs_free_device(dev);
6664 			return ERR_PTR(ret);
6665 		}
6666 	}
6667 	dev->devid = tmp;
6668 
6669 	if (uuid)
6670 		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6671 	else
6672 		generate_random_uuid(dev->uuid);
6673 
6674 	btrfs_init_work(&dev->work, btrfs_submit_helper,
6675 			pending_bios_fn, NULL, NULL);
6676 
6677 	return dev;
6678 }
6679 
6680 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6681 					u64 devid, u8 *uuid, bool error)
6682 {
6683 	if (error)
6684 		btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6685 			      devid, uuid);
6686 	else
6687 		btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6688 			      devid, uuid);
6689 }
6690 
6691 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
6692 {
6693 	int index = btrfs_bg_flags_to_raid_index(type);
6694 	int ncopies = btrfs_raid_array[index].ncopies;
6695 	int data_stripes;
6696 
6697 	switch (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
6698 	case BTRFS_BLOCK_GROUP_RAID5:
6699 		data_stripes = num_stripes - 1;
6700 		break;
6701 	case BTRFS_BLOCK_GROUP_RAID6:
6702 		data_stripes = num_stripes - 2;
6703 		break;
6704 	default:
6705 		data_stripes = num_stripes / ncopies;
6706 		break;
6707 	}
6708 	return div_u64(chunk_len, data_stripes);
6709 }
6710 
6711 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
6712 			  struct btrfs_chunk *chunk)
6713 {
6714 	struct btrfs_fs_info *fs_info = leaf->fs_info;
6715 	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
6716 	struct map_lookup *map;
6717 	struct extent_map *em;
6718 	u64 logical;
6719 	u64 length;
6720 	u64 devid;
6721 	u8 uuid[BTRFS_UUID_SIZE];
6722 	int num_stripes;
6723 	int ret;
6724 	int i;
6725 
6726 	logical = key->offset;
6727 	length = btrfs_chunk_length(leaf, chunk);
6728 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6729 
6730 	/*
6731 	 * Only need to verify chunk item if we're reading from sys chunk array,
6732 	 * as chunk item in tree block is already verified by tree-checker.
6733 	 */
6734 	if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
6735 		ret = btrfs_check_chunk_valid(leaf, chunk, logical);
6736 		if (ret)
6737 			return ret;
6738 	}
6739 
6740 	read_lock(&map_tree->lock);
6741 	em = lookup_extent_mapping(map_tree, logical, 1);
6742 	read_unlock(&map_tree->lock);
6743 
6744 	/* already mapped? */
6745 	if (em && em->start <= logical && em->start + em->len > logical) {
6746 		free_extent_map(em);
6747 		return 0;
6748 	} else if (em) {
6749 		free_extent_map(em);
6750 	}
6751 
6752 	em = alloc_extent_map();
6753 	if (!em)
6754 		return -ENOMEM;
6755 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6756 	if (!map) {
6757 		free_extent_map(em);
6758 		return -ENOMEM;
6759 	}
6760 
6761 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6762 	em->map_lookup = map;
6763 	em->start = logical;
6764 	em->len = length;
6765 	em->orig_start = 0;
6766 	em->block_start = 0;
6767 	em->block_len = em->len;
6768 
6769 	map->num_stripes = num_stripes;
6770 	map->io_width = btrfs_chunk_io_width(leaf, chunk);
6771 	map->io_align = btrfs_chunk_io_align(leaf, chunk);
6772 	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6773 	map->type = btrfs_chunk_type(leaf, chunk);
6774 	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6775 	map->verified_stripes = 0;
6776 	em->orig_block_len = calc_stripe_length(map->type, em->len,
6777 						map->num_stripes);
6778 	for (i = 0; i < num_stripes; i++) {
6779 		map->stripes[i].physical =
6780 			btrfs_stripe_offset_nr(leaf, chunk, i);
6781 		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6782 		read_extent_buffer(leaf, uuid, (unsigned long)
6783 				   btrfs_stripe_dev_uuid_nr(chunk, i),
6784 				   BTRFS_UUID_SIZE);
6785 		map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
6786 							devid, uuid, NULL, true);
6787 		if (!map->stripes[i].dev &&
6788 		    !btrfs_test_opt(fs_info, DEGRADED)) {
6789 			free_extent_map(em);
6790 			btrfs_report_missing_device(fs_info, devid, uuid, true);
6791 			return -ENOENT;
6792 		}
6793 		if (!map->stripes[i].dev) {
6794 			map->stripes[i].dev =
6795 				add_missing_dev(fs_info->fs_devices, devid,
6796 						uuid);
6797 			if (IS_ERR(map->stripes[i].dev)) {
6798 				free_extent_map(em);
6799 				btrfs_err(fs_info,
6800 					"failed to init missing dev %llu: %ld",
6801 					devid, PTR_ERR(map->stripes[i].dev));
6802 				return PTR_ERR(map->stripes[i].dev);
6803 			}
6804 			btrfs_report_missing_device(fs_info, devid, uuid, false);
6805 		}
6806 		set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
6807 				&(map->stripes[i].dev->dev_state));
6808 
6809 	}
6810 
6811 	write_lock(&map_tree->lock);
6812 	ret = add_extent_mapping(map_tree, em, 0);
6813 	write_unlock(&map_tree->lock);
6814 	if (ret < 0) {
6815 		btrfs_err(fs_info,
6816 			  "failed to add chunk map, start=%llu len=%llu: %d",
6817 			  em->start, em->len, ret);
6818 	}
6819 	free_extent_map(em);
6820 
6821 	return ret;
6822 }
6823 
6824 static void fill_device_from_item(struct extent_buffer *leaf,
6825 				 struct btrfs_dev_item *dev_item,
6826 				 struct btrfs_device *device)
6827 {
6828 	unsigned long ptr;
6829 
6830 	device->devid = btrfs_device_id(leaf, dev_item);
6831 	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6832 	device->total_bytes = device->disk_total_bytes;
6833 	device->commit_total_bytes = device->disk_total_bytes;
6834 	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6835 	device->commit_bytes_used = device->bytes_used;
6836 	device->type = btrfs_device_type(leaf, dev_item);
6837 	device->io_align = btrfs_device_io_align(leaf, dev_item);
6838 	device->io_width = btrfs_device_io_width(leaf, dev_item);
6839 	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6840 	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6841 	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
6842 
6843 	ptr = btrfs_device_uuid(dev_item);
6844 	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6845 }
6846 
6847 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
6848 						  u8 *fsid)
6849 {
6850 	struct btrfs_fs_devices *fs_devices;
6851 	int ret;
6852 
6853 	lockdep_assert_held(&uuid_mutex);
6854 	ASSERT(fsid);
6855 
6856 	fs_devices = fs_info->fs_devices->seed;
6857 	while (fs_devices) {
6858 		if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
6859 			return fs_devices;
6860 
6861 		fs_devices = fs_devices->seed;
6862 	}
6863 
6864 	fs_devices = find_fsid(fsid, NULL);
6865 	if (!fs_devices) {
6866 		if (!btrfs_test_opt(fs_info, DEGRADED))
6867 			return ERR_PTR(-ENOENT);
6868 
6869 		fs_devices = alloc_fs_devices(fsid, NULL);
6870 		if (IS_ERR(fs_devices))
6871 			return fs_devices;
6872 
6873 		fs_devices->seeding = 1;
6874 		fs_devices->opened = 1;
6875 		return fs_devices;
6876 	}
6877 
6878 	fs_devices = clone_fs_devices(fs_devices);
6879 	if (IS_ERR(fs_devices))
6880 		return fs_devices;
6881 
6882 	ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
6883 	if (ret) {
6884 		free_fs_devices(fs_devices);
6885 		fs_devices = ERR_PTR(ret);
6886 		goto out;
6887 	}
6888 
6889 	if (!fs_devices->seeding) {
6890 		close_fs_devices(fs_devices);
6891 		free_fs_devices(fs_devices);
6892 		fs_devices = ERR_PTR(-EINVAL);
6893 		goto out;
6894 	}
6895 
6896 	fs_devices->seed = fs_info->fs_devices->seed;
6897 	fs_info->fs_devices->seed = fs_devices;
6898 out:
6899 	return fs_devices;
6900 }
6901 
6902 static int read_one_dev(struct extent_buffer *leaf,
6903 			struct btrfs_dev_item *dev_item)
6904 {
6905 	struct btrfs_fs_info *fs_info = leaf->fs_info;
6906 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6907 	struct btrfs_device *device;
6908 	u64 devid;
6909 	int ret;
6910 	u8 fs_uuid[BTRFS_FSID_SIZE];
6911 	u8 dev_uuid[BTRFS_UUID_SIZE];
6912 
6913 	devid = btrfs_device_id(leaf, dev_item);
6914 	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6915 			   BTRFS_UUID_SIZE);
6916 	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6917 			   BTRFS_FSID_SIZE);
6918 
6919 	if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
6920 		fs_devices = open_seed_devices(fs_info, fs_uuid);
6921 		if (IS_ERR(fs_devices))
6922 			return PTR_ERR(fs_devices);
6923 	}
6924 
6925 	device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
6926 				   fs_uuid, true);
6927 	if (!device) {
6928 		if (!btrfs_test_opt(fs_info, DEGRADED)) {
6929 			btrfs_report_missing_device(fs_info, devid,
6930 							dev_uuid, true);
6931 			return -ENOENT;
6932 		}
6933 
6934 		device = add_missing_dev(fs_devices, devid, dev_uuid);
6935 		if (IS_ERR(device)) {
6936 			btrfs_err(fs_info,
6937 				"failed to add missing dev %llu: %ld",
6938 				devid, PTR_ERR(device));
6939 			return PTR_ERR(device);
6940 		}
6941 		btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
6942 	} else {
6943 		if (!device->bdev) {
6944 			if (!btrfs_test_opt(fs_info, DEGRADED)) {
6945 				btrfs_report_missing_device(fs_info,
6946 						devid, dev_uuid, true);
6947 				return -ENOENT;
6948 			}
6949 			btrfs_report_missing_device(fs_info, devid,
6950 							dev_uuid, false);
6951 		}
6952 
6953 		if (!device->bdev &&
6954 		    !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
6955 			/*
6956 			 * this happens when a device that was properly setup
6957 			 * in the device info lists suddenly goes bad.
6958 			 * device->bdev is NULL, and so we have to set
6959 			 * device->missing to one here
6960 			 */
6961 			device->fs_devices->missing_devices++;
6962 			set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6963 		}
6964 
6965 		/* Move the device to its own fs_devices */
6966 		if (device->fs_devices != fs_devices) {
6967 			ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
6968 							&device->dev_state));
6969 
6970 			list_move(&device->dev_list, &fs_devices->devices);
6971 			device->fs_devices->num_devices--;
6972 			fs_devices->num_devices++;
6973 
6974 			device->fs_devices->missing_devices--;
6975 			fs_devices->missing_devices++;
6976 
6977 			device->fs_devices = fs_devices;
6978 		}
6979 	}
6980 
6981 	if (device->fs_devices != fs_info->fs_devices) {
6982 		BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
6983 		if (device->generation !=
6984 		    btrfs_device_generation(leaf, dev_item))
6985 			return -EINVAL;
6986 	}
6987 
6988 	fill_device_from_item(leaf, dev_item, device);
6989 	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
6990 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
6991 	   !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
6992 		device->fs_devices->total_rw_bytes += device->total_bytes;
6993 		atomic64_add(device->total_bytes - device->bytes_used,
6994 				&fs_info->free_chunk_space);
6995 	}
6996 	ret = 0;
6997 	return ret;
6998 }
6999 
7000 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
7001 {
7002 	struct btrfs_root *root = fs_info->tree_root;
7003 	struct btrfs_super_block *super_copy = fs_info->super_copy;
7004 	struct extent_buffer *sb;
7005 	struct btrfs_disk_key *disk_key;
7006 	struct btrfs_chunk *chunk;
7007 	u8 *array_ptr;
7008 	unsigned long sb_array_offset;
7009 	int ret = 0;
7010 	u32 num_stripes;
7011 	u32 array_size;
7012 	u32 len = 0;
7013 	u32 cur_offset;
7014 	u64 type;
7015 	struct btrfs_key key;
7016 
7017 	ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
7018 	/*
7019 	 * This will create extent buffer of nodesize, superblock size is
7020 	 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
7021 	 * overallocate but we can keep it as-is, only the first page is used.
7022 	 */
7023 	sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
7024 	if (IS_ERR(sb))
7025 		return PTR_ERR(sb);
7026 	set_extent_buffer_uptodate(sb);
7027 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
7028 	/*
7029 	 * The sb extent buffer is artificial and just used to read the system array.
7030 	 * set_extent_buffer_uptodate() call does not properly mark all it's
7031 	 * pages up-to-date when the page is larger: extent does not cover the
7032 	 * whole page and consequently check_page_uptodate does not find all
7033 	 * the page's extents up-to-date (the hole beyond sb),
7034 	 * write_extent_buffer then triggers a WARN_ON.
7035 	 *
7036 	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
7037 	 * but sb spans only this function. Add an explicit SetPageUptodate call
7038 	 * to silence the warning eg. on PowerPC 64.
7039 	 */
7040 	if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
7041 		SetPageUptodate(sb->pages[0]);
7042 
7043 	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
7044 	array_size = btrfs_super_sys_array_size(super_copy);
7045 
7046 	array_ptr = super_copy->sys_chunk_array;
7047 	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
7048 	cur_offset = 0;
7049 
7050 	while (cur_offset < array_size) {
7051 		disk_key = (struct btrfs_disk_key *)array_ptr;
7052 		len = sizeof(*disk_key);
7053 		if (cur_offset + len > array_size)
7054 			goto out_short_read;
7055 
7056 		btrfs_disk_key_to_cpu(&key, disk_key);
7057 
7058 		array_ptr += len;
7059 		sb_array_offset += len;
7060 		cur_offset += len;
7061 
7062 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
7063 			chunk = (struct btrfs_chunk *)sb_array_offset;
7064 			/*
7065 			 * At least one btrfs_chunk with one stripe must be
7066 			 * present, exact stripe count check comes afterwards
7067 			 */
7068 			len = btrfs_chunk_item_size(1);
7069 			if (cur_offset + len > array_size)
7070 				goto out_short_read;
7071 
7072 			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
7073 			if (!num_stripes) {
7074 				btrfs_err(fs_info,
7075 					"invalid number of stripes %u in sys_array at offset %u",
7076 					num_stripes, cur_offset);
7077 				ret = -EIO;
7078 				break;
7079 			}
7080 
7081 			type = btrfs_chunk_type(sb, chunk);
7082 			if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
7083 				btrfs_err(fs_info,
7084 			    "invalid chunk type %llu in sys_array at offset %u",
7085 					type, cur_offset);
7086 				ret = -EIO;
7087 				break;
7088 			}
7089 
7090 			len = btrfs_chunk_item_size(num_stripes);
7091 			if (cur_offset + len > array_size)
7092 				goto out_short_read;
7093 
7094 			ret = read_one_chunk(&key, sb, chunk);
7095 			if (ret)
7096 				break;
7097 		} else {
7098 			btrfs_err(fs_info,
7099 			    "unexpected item type %u in sys_array at offset %u",
7100 				  (u32)key.type, cur_offset);
7101 			ret = -EIO;
7102 			break;
7103 		}
7104 		array_ptr += len;
7105 		sb_array_offset += len;
7106 		cur_offset += len;
7107 	}
7108 	clear_extent_buffer_uptodate(sb);
7109 	free_extent_buffer_stale(sb);
7110 	return ret;
7111 
7112 out_short_read:
7113 	btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
7114 			len, cur_offset);
7115 	clear_extent_buffer_uptodate(sb);
7116 	free_extent_buffer_stale(sb);
7117 	return -EIO;
7118 }
7119 
7120 /*
7121  * Check if all chunks in the fs are OK for read-write degraded mount
7122  *
7123  * If the @failing_dev is specified, it's accounted as missing.
7124  *
7125  * Return true if all chunks meet the minimal RW mount requirements.
7126  * Return false if any chunk doesn't meet the minimal RW mount requirements.
7127  */
7128 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7129 					struct btrfs_device *failing_dev)
7130 {
7131 	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7132 	struct extent_map *em;
7133 	u64 next_start = 0;
7134 	bool ret = true;
7135 
7136 	read_lock(&map_tree->lock);
7137 	em = lookup_extent_mapping(map_tree, 0, (u64)-1);
7138 	read_unlock(&map_tree->lock);
7139 	/* No chunk at all? Return false anyway */
7140 	if (!em) {
7141 		ret = false;
7142 		goto out;
7143 	}
7144 	while (em) {
7145 		struct map_lookup *map;
7146 		int missing = 0;
7147 		int max_tolerated;
7148 		int i;
7149 
7150 		map = em->map_lookup;
7151 		max_tolerated =
7152 			btrfs_get_num_tolerated_disk_barrier_failures(
7153 					map->type);
7154 		for (i = 0; i < map->num_stripes; i++) {
7155 			struct btrfs_device *dev = map->stripes[i].dev;
7156 
7157 			if (!dev || !dev->bdev ||
7158 			    test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7159 			    dev->last_flush_error)
7160 				missing++;
7161 			else if (failing_dev && failing_dev == dev)
7162 				missing++;
7163 		}
7164 		if (missing > max_tolerated) {
7165 			if (!failing_dev)
7166 				btrfs_warn(fs_info,
7167 	"chunk %llu missing %d devices, max tolerance is %d for writable mount",
7168 				   em->start, missing, max_tolerated);
7169 			free_extent_map(em);
7170 			ret = false;
7171 			goto out;
7172 		}
7173 		next_start = extent_map_end(em);
7174 		free_extent_map(em);
7175 
7176 		read_lock(&map_tree->lock);
7177 		em = lookup_extent_mapping(map_tree, next_start,
7178 					   (u64)(-1) - next_start);
7179 		read_unlock(&map_tree->lock);
7180 	}
7181 out:
7182 	return ret;
7183 }
7184 
7185 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7186 {
7187 	struct btrfs_root *root = fs_info->chunk_root;
7188 	struct btrfs_path *path;
7189 	struct extent_buffer *leaf;
7190 	struct btrfs_key key;
7191 	struct btrfs_key found_key;
7192 	int ret;
7193 	int slot;
7194 	u64 total_dev = 0;
7195 
7196 	path = btrfs_alloc_path();
7197 	if (!path)
7198 		return -ENOMEM;
7199 
7200 	/*
7201 	 * uuid_mutex is needed only if we are mounting a sprout FS
7202 	 * otherwise we don't need it.
7203 	 */
7204 	mutex_lock(&uuid_mutex);
7205 	mutex_lock(&fs_info->chunk_mutex);
7206 
7207 	/*
7208 	 * Read all device items, and then all the chunk items. All
7209 	 * device items are found before any chunk item (their object id
7210 	 * is smaller than the lowest possible object id for a chunk
7211 	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7212 	 */
7213 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7214 	key.offset = 0;
7215 	key.type = 0;
7216 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7217 	if (ret < 0)
7218 		goto error;
7219 	while (1) {
7220 		leaf = path->nodes[0];
7221 		slot = path->slots[0];
7222 		if (slot >= btrfs_header_nritems(leaf)) {
7223 			ret = btrfs_next_leaf(root, path);
7224 			if (ret == 0)
7225 				continue;
7226 			if (ret < 0)
7227 				goto error;
7228 			break;
7229 		}
7230 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
7231 		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7232 			struct btrfs_dev_item *dev_item;
7233 			dev_item = btrfs_item_ptr(leaf, slot,
7234 						  struct btrfs_dev_item);
7235 			ret = read_one_dev(leaf, dev_item);
7236 			if (ret)
7237 				goto error;
7238 			total_dev++;
7239 		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7240 			struct btrfs_chunk *chunk;
7241 			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7242 			ret = read_one_chunk(&found_key, leaf, chunk);
7243 			if (ret)
7244 				goto error;
7245 		}
7246 		path->slots[0]++;
7247 	}
7248 
7249 	/*
7250 	 * After loading chunk tree, we've got all device information,
7251 	 * do another round of validation checks.
7252 	 */
7253 	if (total_dev != fs_info->fs_devices->total_devices) {
7254 		btrfs_err(fs_info,
7255 	   "super_num_devices %llu mismatch with num_devices %llu found here",
7256 			  btrfs_super_num_devices(fs_info->super_copy),
7257 			  total_dev);
7258 		ret = -EINVAL;
7259 		goto error;
7260 	}
7261 	if (btrfs_super_total_bytes(fs_info->super_copy) <
7262 	    fs_info->fs_devices->total_rw_bytes) {
7263 		btrfs_err(fs_info,
7264 	"super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7265 			  btrfs_super_total_bytes(fs_info->super_copy),
7266 			  fs_info->fs_devices->total_rw_bytes);
7267 		ret = -EINVAL;
7268 		goto error;
7269 	}
7270 	ret = 0;
7271 error:
7272 	mutex_unlock(&fs_info->chunk_mutex);
7273 	mutex_unlock(&uuid_mutex);
7274 
7275 	btrfs_free_path(path);
7276 	return ret;
7277 }
7278 
7279 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7280 {
7281 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7282 	struct btrfs_device *device;
7283 
7284 	while (fs_devices) {
7285 		mutex_lock(&fs_devices->device_list_mutex);
7286 		list_for_each_entry(device, &fs_devices->devices, dev_list)
7287 			device->fs_info = fs_info;
7288 		mutex_unlock(&fs_devices->device_list_mutex);
7289 
7290 		fs_devices = fs_devices->seed;
7291 	}
7292 }
7293 
7294 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7295 				 const struct btrfs_dev_stats_item *ptr,
7296 				 int index)
7297 {
7298 	u64 val;
7299 
7300 	read_extent_buffer(eb, &val,
7301 			   offsetof(struct btrfs_dev_stats_item, values) +
7302 			    ((unsigned long)ptr) + (index * sizeof(u64)),
7303 			   sizeof(val));
7304 	return val;
7305 }
7306 
7307 static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7308 				      struct btrfs_dev_stats_item *ptr,
7309 				      int index, u64 val)
7310 {
7311 	write_extent_buffer(eb, &val,
7312 			    offsetof(struct btrfs_dev_stats_item, values) +
7313 			     ((unsigned long)ptr) + (index * sizeof(u64)),
7314 			    sizeof(val));
7315 }
7316 
7317 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7318 {
7319 	struct btrfs_key key;
7320 	struct btrfs_root *dev_root = fs_info->dev_root;
7321 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7322 	struct extent_buffer *eb;
7323 	int slot;
7324 	int ret = 0;
7325 	struct btrfs_device *device;
7326 	struct btrfs_path *path = NULL;
7327 	int i;
7328 
7329 	path = btrfs_alloc_path();
7330 	if (!path)
7331 		return -ENOMEM;
7332 
7333 	mutex_lock(&fs_devices->device_list_mutex);
7334 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7335 		int item_size;
7336 		struct btrfs_dev_stats_item *ptr;
7337 
7338 		key.objectid = BTRFS_DEV_STATS_OBJECTID;
7339 		key.type = BTRFS_PERSISTENT_ITEM_KEY;
7340 		key.offset = device->devid;
7341 		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
7342 		if (ret) {
7343 			for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7344 				btrfs_dev_stat_set(device, i, 0);
7345 			device->dev_stats_valid = 1;
7346 			btrfs_release_path(path);
7347 			continue;
7348 		}
7349 		slot = path->slots[0];
7350 		eb = path->nodes[0];
7351 		item_size = btrfs_item_size_nr(eb, slot);
7352 
7353 		ptr = btrfs_item_ptr(eb, slot,
7354 				     struct btrfs_dev_stats_item);
7355 
7356 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7357 			if (item_size >= (1 + i) * sizeof(__le64))
7358 				btrfs_dev_stat_set(device, i,
7359 					btrfs_dev_stats_value(eb, ptr, i));
7360 			else
7361 				btrfs_dev_stat_set(device, i, 0);
7362 		}
7363 
7364 		device->dev_stats_valid = 1;
7365 		btrfs_dev_stat_print_on_load(device);
7366 		btrfs_release_path(path);
7367 	}
7368 	mutex_unlock(&fs_devices->device_list_mutex);
7369 
7370 	btrfs_free_path(path);
7371 	return ret < 0 ? ret : 0;
7372 }
7373 
7374 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7375 				struct btrfs_device *device)
7376 {
7377 	struct btrfs_fs_info *fs_info = trans->fs_info;
7378 	struct btrfs_root *dev_root = fs_info->dev_root;
7379 	struct btrfs_path *path;
7380 	struct btrfs_key key;
7381 	struct extent_buffer *eb;
7382 	struct btrfs_dev_stats_item *ptr;
7383 	int ret;
7384 	int i;
7385 
7386 	key.objectid = BTRFS_DEV_STATS_OBJECTID;
7387 	key.type = BTRFS_PERSISTENT_ITEM_KEY;
7388 	key.offset = device->devid;
7389 
7390 	path = btrfs_alloc_path();
7391 	if (!path)
7392 		return -ENOMEM;
7393 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7394 	if (ret < 0) {
7395 		btrfs_warn_in_rcu(fs_info,
7396 			"error %d while searching for dev_stats item for device %s",
7397 			      ret, rcu_str_deref(device->name));
7398 		goto out;
7399 	}
7400 
7401 	if (ret == 0 &&
7402 	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7403 		/* need to delete old one and insert a new one */
7404 		ret = btrfs_del_item(trans, dev_root, path);
7405 		if (ret != 0) {
7406 			btrfs_warn_in_rcu(fs_info,
7407 				"delete too small dev_stats item for device %s failed %d",
7408 				      rcu_str_deref(device->name), ret);
7409 			goto out;
7410 		}
7411 		ret = 1;
7412 	}
7413 
7414 	if (ret == 1) {
7415 		/* need to insert a new item */
7416 		btrfs_release_path(path);
7417 		ret = btrfs_insert_empty_item(trans, dev_root, path,
7418 					      &key, sizeof(*ptr));
7419 		if (ret < 0) {
7420 			btrfs_warn_in_rcu(fs_info,
7421 				"insert dev_stats item for device %s failed %d",
7422 				rcu_str_deref(device->name), ret);
7423 			goto out;
7424 		}
7425 	}
7426 
7427 	eb = path->nodes[0];
7428 	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7429 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7430 		btrfs_set_dev_stats_value(eb, ptr, i,
7431 					  btrfs_dev_stat_read(device, i));
7432 	btrfs_mark_buffer_dirty(eb);
7433 
7434 out:
7435 	btrfs_free_path(path);
7436 	return ret;
7437 }
7438 
7439 /*
7440  * called from commit_transaction. Writes all changed device stats to disk.
7441  */
7442 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7443 {
7444 	struct btrfs_fs_info *fs_info = trans->fs_info;
7445 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7446 	struct btrfs_device *device;
7447 	int stats_cnt;
7448 	int ret = 0;
7449 
7450 	mutex_lock(&fs_devices->device_list_mutex);
7451 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7452 		stats_cnt = atomic_read(&device->dev_stats_ccnt);
7453 		if (!device->dev_stats_valid || stats_cnt == 0)
7454 			continue;
7455 
7456 
7457 		/*
7458 		 * There is a LOAD-LOAD control dependency between the value of
7459 		 * dev_stats_ccnt and updating the on-disk values which requires
7460 		 * reading the in-memory counters. Such control dependencies
7461 		 * require explicit read memory barriers.
7462 		 *
7463 		 * This memory barriers pairs with smp_mb__before_atomic in
7464 		 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7465 		 * barrier implied by atomic_xchg in
7466 		 * btrfs_dev_stats_read_and_reset
7467 		 */
7468 		smp_rmb();
7469 
7470 		ret = update_dev_stat_item(trans, device);
7471 		if (!ret)
7472 			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7473 	}
7474 	mutex_unlock(&fs_devices->device_list_mutex);
7475 
7476 	return ret;
7477 }
7478 
7479 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7480 {
7481 	btrfs_dev_stat_inc(dev, index);
7482 	btrfs_dev_stat_print_on_error(dev);
7483 }
7484 
7485 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7486 {
7487 	if (!dev->dev_stats_valid)
7488 		return;
7489 	btrfs_err_rl_in_rcu(dev->fs_info,
7490 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7491 			   rcu_str_deref(dev->name),
7492 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7493 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7494 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7495 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7496 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7497 }
7498 
7499 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7500 {
7501 	int i;
7502 
7503 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7504 		if (btrfs_dev_stat_read(dev, i) != 0)
7505 			break;
7506 	if (i == BTRFS_DEV_STAT_VALUES_MAX)
7507 		return; /* all values == 0, suppress message */
7508 
7509 	btrfs_info_in_rcu(dev->fs_info,
7510 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7511 	       rcu_str_deref(dev->name),
7512 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7513 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7514 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7515 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7516 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7517 }
7518 
7519 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7520 			struct btrfs_ioctl_get_dev_stats *stats)
7521 {
7522 	struct btrfs_device *dev;
7523 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7524 	int i;
7525 
7526 	mutex_lock(&fs_devices->device_list_mutex);
7527 	dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL,
7528 				true);
7529 	mutex_unlock(&fs_devices->device_list_mutex);
7530 
7531 	if (!dev) {
7532 		btrfs_warn(fs_info, "get dev_stats failed, device not found");
7533 		return -ENODEV;
7534 	} else if (!dev->dev_stats_valid) {
7535 		btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7536 		return -ENODEV;
7537 	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7538 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7539 			if (stats->nr_items > i)
7540 				stats->values[i] =
7541 					btrfs_dev_stat_read_and_reset(dev, i);
7542 			else
7543 				btrfs_dev_stat_set(dev, i, 0);
7544 		}
7545 	} else {
7546 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7547 			if (stats->nr_items > i)
7548 				stats->values[i] = btrfs_dev_stat_read(dev, i);
7549 	}
7550 	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7551 		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7552 	return 0;
7553 }
7554 
7555 void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path)
7556 {
7557 	struct buffer_head *bh;
7558 	struct btrfs_super_block *disk_super;
7559 	int copy_num;
7560 
7561 	if (!bdev)
7562 		return;
7563 
7564 	for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX;
7565 		copy_num++) {
7566 
7567 		if (btrfs_read_dev_one_super(bdev, copy_num, &bh))
7568 			continue;
7569 
7570 		disk_super = (struct btrfs_super_block *)bh->b_data;
7571 
7572 		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
7573 		set_buffer_dirty(bh);
7574 		sync_dirty_buffer(bh);
7575 		brelse(bh);
7576 	}
7577 
7578 	/* Notify udev that device has changed */
7579 	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
7580 
7581 	/* Update ctime/mtime for device path for libblkid */
7582 	update_dev_time(device_path);
7583 }
7584 
7585 /*
7586  * Update the size and bytes used for each device where it changed.  This is
7587  * delayed since we would otherwise get errors while writing out the
7588  * superblocks.
7589  *
7590  * Must be invoked during transaction commit.
7591  */
7592 void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
7593 {
7594 	struct btrfs_device *curr, *next;
7595 
7596 	ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
7597 
7598 	if (list_empty(&trans->dev_update_list))
7599 		return;
7600 
7601 	/*
7602 	 * We don't need the device_list_mutex here.  This list is owned by the
7603 	 * transaction and the transaction must complete before the device is
7604 	 * released.
7605 	 */
7606 	mutex_lock(&trans->fs_info->chunk_mutex);
7607 	list_for_each_entry_safe(curr, next, &trans->dev_update_list,
7608 				 post_commit_list) {
7609 		list_del_init(&curr->post_commit_list);
7610 		curr->commit_total_bytes = curr->disk_total_bytes;
7611 		curr->commit_bytes_used = curr->bytes_used;
7612 	}
7613 	mutex_unlock(&trans->fs_info->chunk_mutex);
7614 }
7615 
7616 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
7617 {
7618 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7619 	while (fs_devices) {
7620 		fs_devices->fs_info = fs_info;
7621 		fs_devices = fs_devices->seed;
7622 	}
7623 }
7624 
7625 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
7626 {
7627 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7628 	while (fs_devices) {
7629 		fs_devices->fs_info = NULL;
7630 		fs_devices = fs_devices->seed;
7631 	}
7632 }
7633 
7634 /*
7635  * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7636  */
7637 int btrfs_bg_type_to_factor(u64 flags)
7638 {
7639 	const int index = btrfs_bg_flags_to_raid_index(flags);
7640 
7641 	return btrfs_raid_array[index].ncopies;
7642 }
7643 
7644 
7645 
7646 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7647 				 u64 chunk_offset, u64 devid,
7648 				 u64 physical_offset, u64 physical_len)
7649 {
7650 	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7651 	struct extent_map *em;
7652 	struct map_lookup *map;
7653 	struct btrfs_device *dev;
7654 	u64 stripe_len;
7655 	bool found = false;
7656 	int ret = 0;
7657 	int i;
7658 
7659 	read_lock(&em_tree->lock);
7660 	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
7661 	read_unlock(&em_tree->lock);
7662 
7663 	if (!em) {
7664 		btrfs_err(fs_info,
7665 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7666 			  physical_offset, devid);
7667 		ret = -EUCLEAN;
7668 		goto out;
7669 	}
7670 
7671 	map = em->map_lookup;
7672 	stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
7673 	if (physical_len != stripe_len) {
7674 		btrfs_err(fs_info,
7675 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7676 			  physical_offset, devid, em->start, physical_len,
7677 			  stripe_len);
7678 		ret = -EUCLEAN;
7679 		goto out;
7680 	}
7681 
7682 	for (i = 0; i < map->num_stripes; i++) {
7683 		if (map->stripes[i].dev->devid == devid &&
7684 		    map->stripes[i].physical == physical_offset) {
7685 			found = true;
7686 			if (map->verified_stripes >= map->num_stripes) {
7687 				btrfs_err(fs_info,
7688 				"too many dev extents for chunk %llu found",
7689 					  em->start);
7690 				ret = -EUCLEAN;
7691 				goto out;
7692 			}
7693 			map->verified_stripes++;
7694 			break;
7695 		}
7696 	}
7697 	if (!found) {
7698 		btrfs_err(fs_info,
7699 	"dev extent physical offset %llu devid %llu has no corresponding chunk",
7700 			physical_offset, devid);
7701 		ret = -EUCLEAN;
7702 	}
7703 
7704 	/* Make sure no dev extent is beyond device bondary */
7705 	dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
7706 	if (!dev) {
7707 		btrfs_err(fs_info, "failed to find devid %llu", devid);
7708 		ret = -EUCLEAN;
7709 		goto out;
7710 	}
7711 
7712 	/* It's possible this device is a dummy for seed device */
7713 	if (dev->disk_total_bytes == 0) {
7714 		dev = btrfs_find_device(fs_info->fs_devices->seed, devid, NULL,
7715 					NULL, false);
7716 		if (!dev) {
7717 			btrfs_err(fs_info, "failed to find seed devid %llu",
7718 				  devid);
7719 			ret = -EUCLEAN;
7720 			goto out;
7721 		}
7722 	}
7723 
7724 	if (physical_offset + physical_len > dev->disk_total_bytes) {
7725 		btrfs_err(fs_info,
7726 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7727 			  devid, physical_offset, physical_len,
7728 			  dev->disk_total_bytes);
7729 		ret = -EUCLEAN;
7730 		goto out;
7731 	}
7732 out:
7733 	free_extent_map(em);
7734 	return ret;
7735 }
7736 
7737 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
7738 {
7739 	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7740 	struct extent_map *em;
7741 	struct rb_node *node;
7742 	int ret = 0;
7743 
7744 	read_lock(&em_tree->lock);
7745 	for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
7746 		em = rb_entry(node, struct extent_map, rb_node);
7747 		if (em->map_lookup->num_stripes !=
7748 		    em->map_lookup->verified_stripes) {
7749 			btrfs_err(fs_info,
7750 			"chunk %llu has missing dev extent, have %d expect %d",
7751 				  em->start, em->map_lookup->verified_stripes,
7752 				  em->map_lookup->num_stripes);
7753 			ret = -EUCLEAN;
7754 			goto out;
7755 		}
7756 	}
7757 out:
7758 	read_unlock(&em_tree->lock);
7759 	return ret;
7760 }
7761 
7762 /*
7763  * Ensure that all dev extents are mapped to correct chunk, otherwise
7764  * later chunk allocation/free would cause unexpected behavior.
7765  *
7766  * NOTE: This will iterate through the whole device tree, which should be of
7767  * the same size level as the chunk tree.  This slightly increases mount time.
7768  */
7769 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
7770 {
7771 	struct btrfs_path *path;
7772 	struct btrfs_root *root = fs_info->dev_root;
7773 	struct btrfs_key key;
7774 	u64 prev_devid = 0;
7775 	u64 prev_dev_ext_end = 0;
7776 	int ret = 0;
7777 
7778 	key.objectid = 1;
7779 	key.type = BTRFS_DEV_EXTENT_KEY;
7780 	key.offset = 0;
7781 
7782 	path = btrfs_alloc_path();
7783 	if (!path)
7784 		return -ENOMEM;
7785 
7786 	path->reada = READA_FORWARD;
7787 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7788 	if (ret < 0)
7789 		goto out;
7790 
7791 	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
7792 		ret = btrfs_next_item(root, path);
7793 		if (ret < 0)
7794 			goto out;
7795 		/* No dev extents at all? Not good */
7796 		if (ret > 0) {
7797 			ret = -EUCLEAN;
7798 			goto out;
7799 		}
7800 	}
7801 	while (1) {
7802 		struct extent_buffer *leaf = path->nodes[0];
7803 		struct btrfs_dev_extent *dext;
7804 		int slot = path->slots[0];
7805 		u64 chunk_offset;
7806 		u64 physical_offset;
7807 		u64 physical_len;
7808 		u64 devid;
7809 
7810 		btrfs_item_key_to_cpu(leaf, &key, slot);
7811 		if (key.type != BTRFS_DEV_EXTENT_KEY)
7812 			break;
7813 		devid = key.objectid;
7814 		physical_offset = key.offset;
7815 
7816 		dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
7817 		chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
7818 		physical_len = btrfs_dev_extent_length(leaf, dext);
7819 
7820 		/* Check if this dev extent overlaps with the previous one */
7821 		if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
7822 			btrfs_err(fs_info,
7823 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
7824 				  devid, physical_offset, prev_dev_ext_end);
7825 			ret = -EUCLEAN;
7826 			goto out;
7827 		}
7828 
7829 		ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
7830 					    physical_offset, physical_len);
7831 		if (ret < 0)
7832 			goto out;
7833 		prev_devid = devid;
7834 		prev_dev_ext_end = physical_offset + physical_len;
7835 
7836 		ret = btrfs_next_item(root, path);
7837 		if (ret < 0)
7838 			goto out;
7839 		if (ret > 0) {
7840 			ret = 0;
7841 			break;
7842 		}
7843 	}
7844 
7845 	/* Ensure all chunks have corresponding dev extents */
7846 	ret = verify_chunk_dev_extent_mapping(fs_info);
7847 out:
7848 	btrfs_free_path(path);
7849 	return ret;
7850 }
7851 
7852 /*
7853  * Check whether the given block group or device is pinned by any inode being
7854  * used as a swapfile.
7855  */
7856 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
7857 {
7858 	struct btrfs_swapfile_pin *sp;
7859 	struct rb_node *node;
7860 
7861 	spin_lock(&fs_info->swapfile_pins_lock);
7862 	node = fs_info->swapfile_pins.rb_node;
7863 	while (node) {
7864 		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
7865 		if (ptr < sp->ptr)
7866 			node = node->rb_left;
7867 		else if (ptr > sp->ptr)
7868 			node = node->rb_right;
7869 		else
7870 			break;
7871 	}
7872 	spin_unlock(&fs_info->swapfile_pins_lock);
7873 	return node != NULL;
7874 }
7875