xref: /openbmc/linux/fs/btrfs/volumes.c (revision 91c38504)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/sched/mm.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/ratelimit.h>
12 #include <linux/kthread.h>
13 #include <linux/raid/pq.h>
14 #include <linux/semaphore.h>
15 #include <linux/uuid.h>
16 #include <linux/list_sort.h>
17 #include <linux/namei.h>
18 #include "misc.h"
19 #include "ctree.h"
20 #include "extent_map.h"
21 #include "disk-io.h"
22 #include "transaction.h"
23 #include "print-tree.h"
24 #include "volumes.h"
25 #include "raid56.h"
26 #include "async-thread.h"
27 #include "check-integrity.h"
28 #include "rcu-string.h"
29 #include "dev-replace.h"
30 #include "sysfs.h"
31 #include "tree-checker.h"
32 #include "space-info.h"
33 #include "block-group.h"
34 #include "discard.h"
35 #include "zoned.h"
36 
37 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
38 	[BTRFS_RAID_RAID10] = {
39 		.sub_stripes	= 2,
40 		.dev_stripes	= 1,
41 		.devs_max	= 0,	/* 0 == as many as possible */
42 		.devs_min	= 2,
43 		.tolerated_failures = 1,
44 		.devs_increment	= 2,
45 		.ncopies	= 2,
46 		.nparity        = 0,
47 		.raid_name	= "raid10",
48 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID10,
49 		.mindev_error	= BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
50 	},
51 	[BTRFS_RAID_RAID1] = {
52 		.sub_stripes	= 1,
53 		.dev_stripes	= 1,
54 		.devs_max	= 2,
55 		.devs_min	= 2,
56 		.tolerated_failures = 1,
57 		.devs_increment	= 2,
58 		.ncopies	= 2,
59 		.nparity        = 0,
60 		.raid_name	= "raid1",
61 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1,
62 		.mindev_error	= BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
63 	},
64 	[BTRFS_RAID_RAID1C3] = {
65 		.sub_stripes	= 1,
66 		.dev_stripes	= 1,
67 		.devs_max	= 3,
68 		.devs_min	= 3,
69 		.tolerated_failures = 2,
70 		.devs_increment	= 3,
71 		.ncopies	= 3,
72 		.nparity        = 0,
73 		.raid_name	= "raid1c3",
74 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1C3,
75 		.mindev_error	= BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
76 	},
77 	[BTRFS_RAID_RAID1C4] = {
78 		.sub_stripes	= 1,
79 		.dev_stripes	= 1,
80 		.devs_max	= 4,
81 		.devs_min	= 4,
82 		.tolerated_failures = 3,
83 		.devs_increment	= 4,
84 		.ncopies	= 4,
85 		.nparity        = 0,
86 		.raid_name	= "raid1c4",
87 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1C4,
88 		.mindev_error	= BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
89 	},
90 	[BTRFS_RAID_DUP] = {
91 		.sub_stripes	= 1,
92 		.dev_stripes	= 2,
93 		.devs_max	= 1,
94 		.devs_min	= 1,
95 		.tolerated_failures = 0,
96 		.devs_increment	= 1,
97 		.ncopies	= 2,
98 		.nparity        = 0,
99 		.raid_name	= "dup",
100 		.bg_flag	= BTRFS_BLOCK_GROUP_DUP,
101 		.mindev_error	= 0,
102 	},
103 	[BTRFS_RAID_RAID0] = {
104 		.sub_stripes	= 1,
105 		.dev_stripes	= 1,
106 		.devs_max	= 0,
107 		.devs_min	= 1,
108 		.tolerated_failures = 0,
109 		.devs_increment	= 1,
110 		.ncopies	= 1,
111 		.nparity        = 0,
112 		.raid_name	= "raid0",
113 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID0,
114 		.mindev_error	= 0,
115 	},
116 	[BTRFS_RAID_SINGLE] = {
117 		.sub_stripes	= 1,
118 		.dev_stripes	= 1,
119 		.devs_max	= 1,
120 		.devs_min	= 1,
121 		.tolerated_failures = 0,
122 		.devs_increment	= 1,
123 		.ncopies	= 1,
124 		.nparity        = 0,
125 		.raid_name	= "single",
126 		.bg_flag	= 0,
127 		.mindev_error	= 0,
128 	},
129 	[BTRFS_RAID_RAID5] = {
130 		.sub_stripes	= 1,
131 		.dev_stripes	= 1,
132 		.devs_max	= 0,
133 		.devs_min	= 2,
134 		.tolerated_failures = 1,
135 		.devs_increment	= 1,
136 		.ncopies	= 1,
137 		.nparity        = 1,
138 		.raid_name	= "raid5",
139 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID5,
140 		.mindev_error	= BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
141 	},
142 	[BTRFS_RAID_RAID6] = {
143 		.sub_stripes	= 1,
144 		.dev_stripes	= 1,
145 		.devs_max	= 0,
146 		.devs_min	= 3,
147 		.tolerated_failures = 2,
148 		.devs_increment	= 1,
149 		.ncopies	= 1,
150 		.nparity        = 2,
151 		.raid_name	= "raid6",
152 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID6,
153 		.mindev_error	= BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
154 	},
155 };
156 
157 /*
158  * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
159  * can be used as index to access btrfs_raid_array[].
160  */
161 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags)
162 {
163 	if (flags & BTRFS_BLOCK_GROUP_RAID10)
164 		return BTRFS_RAID_RAID10;
165 	else if (flags & BTRFS_BLOCK_GROUP_RAID1)
166 		return BTRFS_RAID_RAID1;
167 	else if (flags & BTRFS_BLOCK_GROUP_RAID1C3)
168 		return BTRFS_RAID_RAID1C3;
169 	else if (flags & BTRFS_BLOCK_GROUP_RAID1C4)
170 		return BTRFS_RAID_RAID1C4;
171 	else if (flags & BTRFS_BLOCK_GROUP_DUP)
172 		return BTRFS_RAID_DUP;
173 	else if (flags & BTRFS_BLOCK_GROUP_RAID0)
174 		return BTRFS_RAID_RAID0;
175 	else if (flags & BTRFS_BLOCK_GROUP_RAID5)
176 		return BTRFS_RAID_RAID5;
177 	else if (flags & BTRFS_BLOCK_GROUP_RAID6)
178 		return BTRFS_RAID_RAID6;
179 
180 	return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
181 }
182 
183 const char *btrfs_bg_type_to_raid_name(u64 flags)
184 {
185 	const int index = btrfs_bg_flags_to_raid_index(flags);
186 
187 	if (index >= BTRFS_NR_RAID_TYPES)
188 		return NULL;
189 
190 	return btrfs_raid_array[index].raid_name;
191 }
192 
193 /*
194  * Fill @buf with textual description of @bg_flags, no more than @size_buf
195  * bytes including terminating null byte.
196  */
197 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
198 {
199 	int i;
200 	int ret;
201 	char *bp = buf;
202 	u64 flags = bg_flags;
203 	u32 size_bp = size_buf;
204 
205 	if (!flags) {
206 		strcpy(bp, "NONE");
207 		return;
208 	}
209 
210 #define DESCRIBE_FLAG(flag, desc)						\
211 	do {								\
212 		if (flags & (flag)) {					\
213 			ret = snprintf(bp, size_bp, "%s|", (desc));	\
214 			if (ret < 0 || ret >= size_bp)			\
215 				goto out_overflow;			\
216 			size_bp -= ret;					\
217 			bp += ret;					\
218 			flags &= ~(flag);				\
219 		}							\
220 	} while (0)
221 
222 	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
223 	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
224 	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
225 
226 	DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
227 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
228 		DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
229 			      btrfs_raid_array[i].raid_name);
230 #undef DESCRIBE_FLAG
231 
232 	if (flags) {
233 		ret = snprintf(bp, size_bp, "0x%llx|", flags);
234 		size_bp -= ret;
235 	}
236 
237 	if (size_bp < size_buf)
238 		buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
239 
240 	/*
241 	 * The text is trimmed, it's up to the caller to provide sufficiently
242 	 * large buffer
243 	 */
244 out_overflow:;
245 }
246 
247 static int init_first_rw_device(struct btrfs_trans_handle *trans);
248 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
249 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
250 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
251 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
252 			     enum btrfs_map_op op,
253 			     u64 logical, u64 *length,
254 			     struct btrfs_io_context **bioc_ret,
255 			     int mirror_num, int need_raid_map);
256 
257 /*
258  * Device locking
259  * ==============
260  *
261  * There are several mutexes that protect manipulation of devices and low-level
262  * structures like chunks but not block groups, extents or files
263  *
264  * uuid_mutex (global lock)
265  * ------------------------
266  * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
267  * the SCAN_DEV ioctl registration or from mount either implicitly (the first
268  * device) or requested by the device= mount option
269  *
270  * the mutex can be very coarse and can cover long-running operations
271  *
272  * protects: updates to fs_devices counters like missing devices, rw devices,
273  * seeding, structure cloning, opening/closing devices at mount/umount time
274  *
275  * global::fs_devs - add, remove, updates to the global list
276  *
277  * does not protect: manipulation of the fs_devices::devices list in general
278  * but in mount context it could be used to exclude list modifications by eg.
279  * scan ioctl
280  *
281  * btrfs_device::name - renames (write side), read is RCU
282  *
283  * fs_devices::device_list_mutex (per-fs, with RCU)
284  * ------------------------------------------------
285  * protects updates to fs_devices::devices, ie. adding and deleting
286  *
287  * simple list traversal with read-only actions can be done with RCU protection
288  *
289  * may be used to exclude some operations from running concurrently without any
290  * modifications to the list (see write_all_supers)
291  *
292  * Is not required at mount and close times, because our device list is
293  * protected by the uuid_mutex at that point.
294  *
295  * balance_mutex
296  * -------------
297  * protects balance structures (status, state) and context accessed from
298  * several places (internally, ioctl)
299  *
300  * chunk_mutex
301  * -----------
302  * protects chunks, adding or removing during allocation, trim or when a new
303  * device is added/removed. Additionally it also protects post_commit_list of
304  * individual devices, since they can be added to the transaction's
305  * post_commit_list only with chunk_mutex held.
306  *
307  * cleaner_mutex
308  * -------------
309  * a big lock that is held by the cleaner thread and prevents running subvolume
310  * cleaning together with relocation or delayed iputs
311  *
312  *
313  * Lock nesting
314  * ============
315  *
316  * uuid_mutex
317  *   device_list_mutex
318  *     chunk_mutex
319  *   balance_mutex
320  *
321  *
322  * Exclusive operations
323  * ====================
324  *
325  * Maintains the exclusivity of the following operations that apply to the
326  * whole filesystem and cannot run in parallel.
327  *
328  * - Balance (*)
329  * - Device add
330  * - Device remove
331  * - Device replace (*)
332  * - Resize
333  *
334  * The device operations (as above) can be in one of the following states:
335  *
336  * - Running state
337  * - Paused state
338  * - Completed state
339  *
340  * Only device operations marked with (*) can go into the Paused state for the
341  * following reasons:
342  *
343  * - ioctl (only Balance can be Paused through ioctl)
344  * - filesystem remounted as read-only
345  * - filesystem unmounted and mounted as read-only
346  * - system power-cycle and filesystem mounted as read-only
347  * - filesystem or device errors leading to forced read-only
348  *
349  * The status of exclusive operation is set and cleared atomically.
350  * During the course of Paused state, fs_info::exclusive_operation remains set.
351  * A device operation in Paused or Running state can be canceled or resumed
352  * either by ioctl (Balance only) or when remounted as read-write.
353  * The exclusive status is cleared when the device operation is canceled or
354  * completed.
355  */
356 
357 DEFINE_MUTEX(uuid_mutex);
358 static LIST_HEAD(fs_uuids);
359 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
360 {
361 	return &fs_uuids;
362 }
363 
364 /*
365  * alloc_fs_devices - allocate struct btrfs_fs_devices
366  * @fsid:		if not NULL, copy the UUID to fs_devices::fsid
367  * @metadata_fsid:	if not NULL, copy the UUID to fs_devices::metadata_fsid
368  *
369  * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
370  * The returned struct is not linked onto any lists and can be destroyed with
371  * kfree() right away.
372  */
373 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
374 						 const u8 *metadata_fsid)
375 {
376 	struct btrfs_fs_devices *fs_devs;
377 
378 	fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
379 	if (!fs_devs)
380 		return ERR_PTR(-ENOMEM);
381 
382 	mutex_init(&fs_devs->device_list_mutex);
383 
384 	INIT_LIST_HEAD(&fs_devs->devices);
385 	INIT_LIST_HEAD(&fs_devs->alloc_list);
386 	INIT_LIST_HEAD(&fs_devs->fs_list);
387 	INIT_LIST_HEAD(&fs_devs->seed_list);
388 	if (fsid)
389 		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
390 
391 	if (metadata_fsid)
392 		memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
393 	else if (fsid)
394 		memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
395 
396 	return fs_devs;
397 }
398 
399 void btrfs_free_device(struct btrfs_device *device)
400 {
401 	WARN_ON(!list_empty(&device->post_commit_list));
402 	rcu_string_free(device->name);
403 	extent_io_tree_release(&device->alloc_state);
404 	bio_put(device->flush_bio);
405 	btrfs_destroy_dev_zone_info(device);
406 	kfree(device);
407 }
408 
409 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
410 {
411 	struct btrfs_device *device;
412 	WARN_ON(fs_devices->opened);
413 	while (!list_empty(&fs_devices->devices)) {
414 		device = list_entry(fs_devices->devices.next,
415 				    struct btrfs_device, dev_list);
416 		list_del(&device->dev_list);
417 		btrfs_free_device(device);
418 	}
419 	kfree(fs_devices);
420 }
421 
422 void __exit btrfs_cleanup_fs_uuids(void)
423 {
424 	struct btrfs_fs_devices *fs_devices;
425 
426 	while (!list_empty(&fs_uuids)) {
427 		fs_devices = list_entry(fs_uuids.next,
428 					struct btrfs_fs_devices, fs_list);
429 		list_del(&fs_devices->fs_list);
430 		free_fs_devices(fs_devices);
431 	}
432 }
433 
434 static noinline struct btrfs_fs_devices *find_fsid(
435 		const u8 *fsid, const u8 *metadata_fsid)
436 {
437 	struct btrfs_fs_devices *fs_devices;
438 
439 	ASSERT(fsid);
440 
441 	/* Handle non-split brain cases */
442 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
443 		if (metadata_fsid) {
444 			if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
445 			    && memcmp(metadata_fsid, fs_devices->metadata_uuid,
446 				      BTRFS_FSID_SIZE) == 0)
447 				return fs_devices;
448 		} else {
449 			if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
450 				return fs_devices;
451 		}
452 	}
453 	return NULL;
454 }
455 
456 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
457 				struct btrfs_super_block *disk_super)
458 {
459 
460 	struct btrfs_fs_devices *fs_devices;
461 
462 	/*
463 	 * Handle scanned device having completed its fsid change but
464 	 * belonging to a fs_devices that was created by first scanning
465 	 * a device which didn't have its fsid/metadata_uuid changed
466 	 * at all and the CHANGING_FSID_V2 flag set.
467 	 */
468 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
469 		if (fs_devices->fsid_change &&
470 		    memcmp(disk_super->metadata_uuid, fs_devices->fsid,
471 			   BTRFS_FSID_SIZE) == 0 &&
472 		    memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
473 			   BTRFS_FSID_SIZE) == 0) {
474 			return fs_devices;
475 		}
476 	}
477 	/*
478 	 * Handle scanned device having completed its fsid change but
479 	 * belonging to a fs_devices that was created by a device that
480 	 * has an outdated pair of fsid/metadata_uuid and
481 	 * CHANGING_FSID_V2 flag set.
482 	 */
483 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
484 		if (fs_devices->fsid_change &&
485 		    memcmp(fs_devices->metadata_uuid,
486 			   fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
487 		    memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
488 			   BTRFS_FSID_SIZE) == 0) {
489 			return fs_devices;
490 		}
491 	}
492 
493 	return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
494 }
495 
496 
497 static int
498 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
499 		      int flush, struct block_device **bdev,
500 		      struct btrfs_super_block **disk_super)
501 {
502 	int ret;
503 
504 	*bdev = blkdev_get_by_path(device_path, flags, holder);
505 
506 	if (IS_ERR(*bdev)) {
507 		ret = PTR_ERR(*bdev);
508 		goto error;
509 	}
510 
511 	if (flush)
512 		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
513 	ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
514 	if (ret) {
515 		blkdev_put(*bdev, flags);
516 		goto error;
517 	}
518 	invalidate_bdev(*bdev);
519 	*disk_super = btrfs_read_dev_super(*bdev);
520 	if (IS_ERR(*disk_super)) {
521 		ret = PTR_ERR(*disk_super);
522 		blkdev_put(*bdev, flags);
523 		goto error;
524 	}
525 
526 	return 0;
527 
528 error:
529 	*bdev = NULL;
530 	return ret;
531 }
532 
533 /*
534  * Check if the device in the path matches the device in the given struct device.
535  *
536  * Returns:
537  *   true  If it is the same device.
538  *   false If it is not the same device or on error.
539  */
540 static bool device_matched(const struct btrfs_device *device, const char *path)
541 {
542 	char *device_name;
543 	dev_t dev_old;
544 	dev_t dev_new;
545 	int ret;
546 
547 	/*
548 	 * If we are looking for a device with the matching dev_t, then skip
549 	 * device without a name (a missing device).
550 	 */
551 	if (!device->name)
552 		return false;
553 
554 	device_name = kzalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
555 	if (!device_name)
556 		return false;
557 
558 	rcu_read_lock();
559 	scnprintf(device_name, BTRFS_PATH_NAME_MAX, "%s", rcu_str_deref(device->name));
560 	rcu_read_unlock();
561 
562 	ret = lookup_bdev(device_name, &dev_old);
563 	kfree(device_name);
564 	if (ret)
565 		return false;
566 
567 	ret = lookup_bdev(path, &dev_new);
568 	if (ret)
569 		return false;
570 
571 	if (dev_old == dev_new)
572 		return true;
573 
574 	return false;
575 }
576 
577 /*
578  *  Search and remove all stale (devices which are not mounted) devices.
579  *  When both inputs are NULL, it will search and release all stale devices.
580  *  path:	Optional. When provided will it release all unmounted devices
581  *		matching this path only.
582  *  skip_dev:	Optional. Will skip this device when searching for the stale
583  *		devices.
584  *  Return:	0 for success or if @path is NULL.
585  * 		-EBUSY if @path is a mounted device.
586  * 		-ENOENT if @path does not match any device in the list.
587  */
588 static int btrfs_free_stale_devices(const char *path,
589 				     struct btrfs_device *skip_device)
590 {
591 	struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
592 	struct btrfs_device *device, *tmp_device;
593 	int ret = 0;
594 
595 	lockdep_assert_held(&uuid_mutex);
596 
597 	if (path)
598 		ret = -ENOENT;
599 
600 	list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
601 
602 		mutex_lock(&fs_devices->device_list_mutex);
603 		list_for_each_entry_safe(device, tmp_device,
604 					 &fs_devices->devices, dev_list) {
605 			if (skip_device && skip_device == device)
606 				continue;
607 			if (path && !device_matched(device, path))
608 				continue;
609 			if (fs_devices->opened) {
610 				/* for an already deleted device return 0 */
611 				if (path && ret != 0)
612 					ret = -EBUSY;
613 				break;
614 			}
615 
616 			/* delete the stale device */
617 			fs_devices->num_devices--;
618 			list_del(&device->dev_list);
619 			btrfs_free_device(device);
620 
621 			ret = 0;
622 		}
623 		mutex_unlock(&fs_devices->device_list_mutex);
624 
625 		if (fs_devices->num_devices == 0) {
626 			btrfs_sysfs_remove_fsid(fs_devices);
627 			list_del(&fs_devices->fs_list);
628 			free_fs_devices(fs_devices);
629 		}
630 	}
631 
632 	return ret;
633 }
634 
635 /*
636  * This is only used on mount, and we are protected from competing things
637  * messing with our fs_devices by the uuid_mutex, thus we do not need the
638  * fs_devices->device_list_mutex here.
639  */
640 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
641 			struct btrfs_device *device, fmode_t flags,
642 			void *holder)
643 {
644 	struct request_queue *q;
645 	struct block_device *bdev;
646 	struct btrfs_super_block *disk_super;
647 	u64 devid;
648 	int ret;
649 
650 	if (device->bdev)
651 		return -EINVAL;
652 	if (!device->name)
653 		return -EINVAL;
654 
655 	ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
656 				    &bdev, &disk_super);
657 	if (ret)
658 		return ret;
659 
660 	devid = btrfs_stack_device_id(&disk_super->dev_item);
661 	if (devid != device->devid)
662 		goto error_free_page;
663 
664 	if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
665 		goto error_free_page;
666 
667 	device->generation = btrfs_super_generation(disk_super);
668 
669 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
670 		if (btrfs_super_incompat_flags(disk_super) &
671 		    BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
672 			pr_err(
673 		"BTRFS: Invalid seeding and uuid-changed device detected\n");
674 			goto error_free_page;
675 		}
676 
677 		clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
678 		fs_devices->seeding = true;
679 	} else {
680 		if (bdev_read_only(bdev))
681 			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
682 		else
683 			set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
684 	}
685 
686 	q = bdev_get_queue(bdev);
687 	if (!blk_queue_nonrot(q))
688 		fs_devices->rotating = true;
689 
690 	device->bdev = bdev;
691 	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
692 	device->mode = flags;
693 
694 	fs_devices->open_devices++;
695 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
696 	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
697 		fs_devices->rw_devices++;
698 		list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
699 	}
700 	btrfs_release_disk_super(disk_super);
701 
702 	return 0;
703 
704 error_free_page:
705 	btrfs_release_disk_super(disk_super);
706 	blkdev_put(bdev, flags);
707 
708 	return -EINVAL;
709 }
710 
711 /*
712  * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
713  * being created with a disk that has already completed its fsid change. Such
714  * disk can belong to an fs which has its FSID changed or to one which doesn't.
715  * Handle both cases here.
716  */
717 static struct btrfs_fs_devices *find_fsid_inprogress(
718 					struct btrfs_super_block *disk_super)
719 {
720 	struct btrfs_fs_devices *fs_devices;
721 
722 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
723 		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
724 			   BTRFS_FSID_SIZE) != 0 &&
725 		    memcmp(fs_devices->metadata_uuid, disk_super->fsid,
726 			   BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
727 			return fs_devices;
728 		}
729 	}
730 
731 	return find_fsid(disk_super->fsid, NULL);
732 }
733 
734 
735 static struct btrfs_fs_devices *find_fsid_changed(
736 					struct btrfs_super_block *disk_super)
737 {
738 	struct btrfs_fs_devices *fs_devices;
739 
740 	/*
741 	 * Handles the case where scanned device is part of an fs that had
742 	 * multiple successful changes of FSID but currently device didn't
743 	 * observe it. Meaning our fsid will be different than theirs. We need
744 	 * to handle two subcases :
745 	 *  1 - The fs still continues to have different METADATA/FSID uuids.
746 	 *  2 - The fs is switched back to its original FSID (METADATA/FSID
747 	 *  are equal).
748 	 */
749 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
750 		/* Changed UUIDs */
751 		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
752 			   BTRFS_FSID_SIZE) != 0 &&
753 		    memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
754 			   BTRFS_FSID_SIZE) == 0 &&
755 		    memcmp(fs_devices->fsid, disk_super->fsid,
756 			   BTRFS_FSID_SIZE) != 0)
757 			return fs_devices;
758 
759 		/* Unchanged UUIDs */
760 		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
761 			   BTRFS_FSID_SIZE) == 0 &&
762 		    memcmp(fs_devices->fsid, disk_super->metadata_uuid,
763 			   BTRFS_FSID_SIZE) == 0)
764 			return fs_devices;
765 	}
766 
767 	return NULL;
768 }
769 
770 static struct btrfs_fs_devices *find_fsid_reverted_metadata(
771 				struct btrfs_super_block *disk_super)
772 {
773 	struct btrfs_fs_devices *fs_devices;
774 
775 	/*
776 	 * Handle the case where the scanned device is part of an fs whose last
777 	 * metadata UUID change reverted it to the original FSID. At the same
778 	 * time * fs_devices was first created by another constitutent device
779 	 * which didn't fully observe the operation. This results in an
780 	 * btrfs_fs_devices created with metadata/fsid different AND
781 	 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
782 	 * fs_devices equal to the FSID of the disk.
783 	 */
784 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
785 		if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
786 			   BTRFS_FSID_SIZE) != 0 &&
787 		    memcmp(fs_devices->metadata_uuid, disk_super->fsid,
788 			   BTRFS_FSID_SIZE) == 0 &&
789 		    fs_devices->fsid_change)
790 			return fs_devices;
791 	}
792 
793 	return NULL;
794 }
795 /*
796  * Add new device to list of registered devices
797  *
798  * Returns:
799  * device pointer which was just added or updated when successful
800  * error pointer when failed
801  */
802 static noinline struct btrfs_device *device_list_add(const char *path,
803 			   struct btrfs_super_block *disk_super,
804 			   bool *new_device_added)
805 {
806 	struct btrfs_device *device;
807 	struct btrfs_fs_devices *fs_devices = NULL;
808 	struct rcu_string *name;
809 	u64 found_transid = btrfs_super_generation(disk_super);
810 	u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
811 	bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
812 		BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
813 	bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
814 					BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
815 
816 	if (fsid_change_in_progress) {
817 		if (!has_metadata_uuid)
818 			fs_devices = find_fsid_inprogress(disk_super);
819 		else
820 			fs_devices = find_fsid_changed(disk_super);
821 	} else if (has_metadata_uuid) {
822 		fs_devices = find_fsid_with_metadata_uuid(disk_super);
823 	} else {
824 		fs_devices = find_fsid_reverted_metadata(disk_super);
825 		if (!fs_devices)
826 			fs_devices = find_fsid(disk_super->fsid, NULL);
827 	}
828 
829 
830 	if (!fs_devices) {
831 		if (has_metadata_uuid)
832 			fs_devices = alloc_fs_devices(disk_super->fsid,
833 						      disk_super->metadata_uuid);
834 		else
835 			fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
836 
837 		if (IS_ERR(fs_devices))
838 			return ERR_CAST(fs_devices);
839 
840 		fs_devices->fsid_change = fsid_change_in_progress;
841 
842 		mutex_lock(&fs_devices->device_list_mutex);
843 		list_add(&fs_devices->fs_list, &fs_uuids);
844 
845 		device = NULL;
846 	} else {
847 		struct btrfs_dev_lookup_args args = {
848 			.devid = devid,
849 			.uuid = disk_super->dev_item.uuid,
850 		};
851 
852 		mutex_lock(&fs_devices->device_list_mutex);
853 		device = btrfs_find_device(fs_devices, &args);
854 
855 		/*
856 		 * If this disk has been pulled into an fs devices created by
857 		 * a device which had the CHANGING_FSID_V2 flag then replace the
858 		 * metadata_uuid/fsid values of the fs_devices.
859 		 */
860 		if (fs_devices->fsid_change &&
861 		    found_transid > fs_devices->latest_generation) {
862 			memcpy(fs_devices->fsid, disk_super->fsid,
863 					BTRFS_FSID_SIZE);
864 
865 			if (has_metadata_uuid)
866 				memcpy(fs_devices->metadata_uuid,
867 				       disk_super->metadata_uuid,
868 				       BTRFS_FSID_SIZE);
869 			else
870 				memcpy(fs_devices->metadata_uuid,
871 				       disk_super->fsid, BTRFS_FSID_SIZE);
872 
873 			fs_devices->fsid_change = false;
874 		}
875 	}
876 
877 	if (!device) {
878 		if (fs_devices->opened) {
879 			mutex_unlock(&fs_devices->device_list_mutex);
880 			return ERR_PTR(-EBUSY);
881 		}
882 
883 		device = btrfs_alloc_device(NULL, &devid,
884 					    disk_super->dev_item.uuid);
885 		if (IS_ERR(device)) {
886 			mutex_unlock(&fs_devices->device_list_mutex);
887 			/* we can safely leave the fs_devices entry around */
888 			return device;
889 		}
890 
891 		name = rcu_string_strdup(path, GFP_NOFS);
892 		if (!name) {
893 			btrfs_free_device(device);
894 			mutex_unlock(&fs_devices->device_list_mutex);
895 			return ERR_PTR(-ENOMEM);
896 		}
897 		rcu_assign_pointer(device->name, name);
898 
899 		list_add_rcu(&device->dev_list, &fs_devices->devices);
900 		fs_devices->num_devices++;
901 
902 		device->fs_devices = fs_devices;
903 		*new_device_added = true;
904 
905 		if (disk_super->label[0])
906 			pr_info(
907 	"BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
908 				disk_super->label, devid, found_transid, path,
909 				current->comm, task_pid_nr(current));
910 		else
911 			pr_info(
912 	"BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
913 				disk_super->fsid, devid, found_transid, path,
914 				current->comm, task_pid_nr(current));
915 
916 	} else if (!device->name || strcmp(device->name->str, path)) {
917 		/*
918 		 * When FS is already mounted.
919 		 * 1. If you are here and if the device->name is NULL that
920 		 *    means this device was missing at time of FS mount.
921 		 * 2. If you are here and if the device->name is different
922 		 *    from 'path' that means either
923 		 *      a. The same device disappeared and reappeared with
924 		 *         different name. or
925 		 *      b. The missing-disk-which-was-replaced, has
926 		 *         reappeared now.
927 		 *
928 		 * We must allow 1 and 2a above. But 2b would be a spurious
929 		 * and unintentional.
930 		 *
931 		 * Further in case of 1 and 2a above, the disk at 'path'
932 		 * would have missed some transaction when it was away and
933 		 * in case of 2a the stale bdev has to be updated as well.
934 		 * 2b must not be allowed at all time.
935 		 */
936 
937 		/*
938 		 * For now, we do allow update to btrfs_fs_device through the
939 		 * btrfs dev scan cli after FS has been mounted.  We're still
940 		 * tracking a problem where systems fail mount by subvolume id
941 		 * when we reject replacement on a mounted FS.
942 		 */
943 		if (!fs_devices->opened && found_transid < device->generation) {
944 			/*
945 			 * That is if the FS is _not_ mounted and if you
946 			 * are here, that means there is more than one
947 			 * disk with same uuid and devid.We keep the one
948 			 * with larger generation number or the last-in if
949 			 * generation are equal.
950 			 */
951 			mutex_unlock(&fs_devices->device_list_mutex);
952 			return ERR_PTR(-EEXIST);
953 		}
954 
955 		/*
956 		 * We are going to replace the device path for a given devid,
957 		 * make sure it's the same device if the device is mounted
958 		 *
959 		 * NOTE: the device->fs_info may not be reliable here so pass
960 		 * in a NULL to message helpers instead. This avoids a possible
961 		 * use-after-free when the fs_info and fs_info->sb are already
962 		 * torn down.
963 		 */
964 		if (device->bdev) {
965 			int error;
966 			dev_t path_dev;
967 
968 			error = lookup_bdev(path, &path_dev);
969 			if (error) {
970 				mutex_unlock(&fs_devices->device_list_mutex);
971 				return ERR_PTR(error);
972 			}
973 
974 			if (device->bdev->bd_dev != path_dev) {
975 				mutex_unlock(&fs_devices->device_list_mutex);
976 				btrfs_warn_in_rcu(NULL,
977 	"duplicate device %s devid %llu generation %llu scanned by %s (%d)",
978 						  path, devid, found_transid,
979 						  current->comm,
980 						  task_pid_nr(current));
981 				return ERR_PTR(-EEXIST);
982 			}
983 			btrfs_info_in_rcu(NULL,
984 	"devid %llu device path %s changed to %s scanned by %s (%d)",
985 					  devid, rcu_str_deref(device->name),
986 					  path, current->comm,
987 					  task_pid_nr(current));
988 		}
989 
990 		name = rcu_string_strdup(path, GFP_NOFS);
991 		if (!name) {
992 			mutex_unlock(&fs_devices->device_list_mutex);
993 			return ERR_PTR(-ENOMEM);
994 		}
995 		rcu_string_free(device->name);
996 		rcu_assign_pointer(device->name, name);
997 		if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
998 			fs_devices->missing_devices--;
999 			clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1000 		}
1001 	}
1002 
1003 	/*
1004 	 * Unmount does not free the btrfs_device struct but would zero
1005 	 * generation along with most of the other members. So just update
1006 	 * it back. We need it to pick the disk with largest generation
1007 	 * (as above).
1008 	 */
1009 	if (!fs_devices->opened) {
1010 		device->generation = found_transid;
1011 		fs_devices->latest_generation = max_t(u64, found_transid,
1012 						fs_devices->latest_generation);
1013 	}
1014 
1015 	fs_devices->total_devices = btrfs_super_num_devices(disk_super);
1016 
1017 	mutex_unlock(&fs_devices->device_list_mutex);
1018 	return device;
1019 }
1020 
1021 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
1022 {
1023 	struct btrfs_fs_devices *fs_devices;
1024 	struct btrfs_device *device;
1025 	struct btrfs_device *orig_dev;
1026 	int ret = 0;
1027 
1028 	lockdep_assert_held(&uuid_mutex);
1029 
1030 	fs_devices = alloc_fs_devices(orig->fsid, NULL);
1031 	if (IS_ERR(fs_devices))
1032 		return fs_devices;
1033 
1034 	fs_devices->total_devices = orig->total_devices;
1035 
1036 	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
1037 		struct rcu_string *name;
1038 
1039 		device = btrfs_alloc_device(NULL, &orig_dev->devid,
1040 					    orig_dev->uuid);
1041 		if (IS_ERR(device)) {
1042 			ret = PTR_ERR(device);
1043 			goto error;
1044 		}
1045 
1046 		/*
1047 		 * This is ok to do without rcu read locked because we hold the
1048 		 * uuid mutex so nothing we touch in here is going to disappear.
1049 		 */
1050 		if (orig_dev->name) {
1051 			name = rcu_string_strdup(orig_dev->name->str,
1052 					GFP_KERNEL);
1053 			if (!name) {
1054 				btrfs_free_device(device);
1055 				ret = -ENOMEM;
1056 				goto error;
1057 			}
1058 			rcu_assign_pointer(device->name, name);
1059 		}
1060 
1061 		list_add(&device->dev_list, &fs_devices->devices);
1062 		device->fs_devices = fs_devices;
1063 		fs_devices->num_devices++;
1064 	}
1065 	return fs_devices;
1066 error:
1067 	free_fs_devices(fs_devices);
1068 	return ERR_PTR(ret);
1069 }
1070 
1071 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
1072 				      struct btrfs_device **latest_dev)
1073 {
1074 	struct btrfs_device *device, *next;
1075 
1076 	/* This is the initialized path, it is safe to release the devices. */
1077 	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1078 		if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) {
1079 			if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1080 				      &device->dev_state) &&
1081 			    !test_bit(BTRFS_DEV_STATE_MISSING,
1082 				      &device->dev_state) &&
1083 			    (!*latest_dev ||
1084 			     device->generation > (*latest_dev)->generation)) {
1085 				*latest_dev = device;
1086 			}
1087 			continue;
1088 		}
1089 
1090 		/*
1091 		 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
1092 		 * in btrfs_init_dev_replace() so just continue.
1093 		 */
1094 		if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1095 			continue;
1096 
1097 		if (device->bdev) {
1098 			blkdev_put(device->bdev, device->mode);
1099 			device->bdev = NULL;
1100 			fs_devices->open_devices--;
1101 		}
1102 		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1103 			list_del_init(&device->dev_alloc_list);
1104 			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1105 			fs_devices->rw_devices--;
1106 		}
1107 		list_del_init(&device->dev_list);
1108 		fs_devices->num_devices--;
1109 		btrfs_free_device(device);
1110 	}
1111 
1112 }
1113 
1114 /*
1115  * After we have read the system tree and know devids belonging to this
1116  * filesystem, remove the device which does not belong there.
1117  */
1118 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices)
1119 {
1120 	struct btrfs_device *latest_dev = NULL;
1121 	struct btrfs_fs_devices *seed_dev;
1122 
1123 	mutex_lock(&uuid_mutex);
1124 	__btrfs_free_extra_devids(fs_devices, &latest_dev);
1125 
1126 	list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
1127 		__btrfs_free_extra_devids(seed_dev, &latest_dev);
1128 
1129 	fs_devices->latest_dev = latest_dev;
1130 
1131 	mutex_unlock(&uuid_mutex);
1132 }
1133 
1134 static void btrfs_close_bdev(struct btrfs_device *device)
1135 {
1136 	if (!device->bdev)
1137 		return;
1138 
1139 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1140 		sync_blockdev(device->bdev);
1141 		invalidate_bdev(device->bdev);
1142 	}
1143 
1144 	blkdev_put(device->bdev, device->mode);
1145 }
1146 
1147 static void btrfs_close_one_device(struct btrfs_device *device)
1148 {
1149 	struct btrfs_fs_devices *fs_devices = device->fs_devices;
1150 
1151 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1152 	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
1153 		list_del_init(&device->dev_alloc_list);
1154 		fs_devices->rw_devices--;
1155 	}
1156 
1157 	if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1158 		clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
1159 
1160 	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1161 		clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1162 		fs_devices->missing_devices--;
1163 	}
1164 
1165 	btrfs_close_bdev(device);
1166 	if (device->bdev) {
1167 		fs_devices->open_devices--;
1168 		device->bdev = NULL;
1169 	}
1170 	clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1171 	btrfs_destroy_dev_zone_info(device);
1172 
1173 	device->fs_info = NULL;
1174 	atomic_set(&device->dev_stats_ccnt, 0);
1175 	extent_io_tree_release(&device->alloc_state);
1176 
1177 	/*
1178 	 * Reset the flush error record. We might have a transient flush error
1179 	 * in this mount, and if so we aborted the current transaction and set
1180 	 * the fs to an error state, guaranteeing no super blocks can be further
1181 	 * committed. However that error might be transient and if we unmount the
1182 	 * filesystem and mount it again, we should allow the mount to succeed
1183 	 * (btrfs_check_rw_degradable() should not fail) - if after mounting the
1184 	 * filesystem again we still get flush errors, then we will again abort
1185 	 * any transaction and set the error state, guaranteeing no commits of
1186 	 * unsafe super blocks.
1187 	 */
1188 	device->last_flush_error = 0;
1189 
1190 	/* Verify the device is back in a pristine state  */
1191 	ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
1192 	ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1193 	ASSERT(list_empty(&device->dev_alloc_list));
1194 	ASSERT(list_empty(&device->post_commit_list));
1195 	ASSERT(atomic_read(&device->reada_in_flight) == 0);
1196 }
1197 
1198 static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
1199 {
1200 	struct btrfs_device *device, *tmp;
1201 
1202 	lockdep_assert_held(&uuid_mutex);
1203 
1204 	if (--fs_devices->opened > 0)
1205 		return;
1206 
1207 	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list)
1208 		btrfs_close_one_device(device);
1209 
1210 	WARN_ON(fs_devices->open_devices);
1211 	WARN_ON(fs_devices->rw_devices);
1212 	fs_devices->opened = 0;
1213 	fs_devices->seeding = false;
1214 	fs_devices->fs_info = NULL;
1215 }
1216 
1217 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1218 {
1219 	LIST_HEAD(list);
1220 	struct btrfs_fs_devices *tmp;
1221 
1222 	mutex_lock(&uuid_mutex);
1223 	close_fs_devices(fs_devices);
1224 	if (!fs_devices->opened)
1225 		list_splice_init(&fs_devices->seed_list, &list);
1226 
1227 	list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
1228 		close_fs_devices(fs_devices);
1229 		list_del(&fs_devices->seed_list);
1230 		free_fs_devices(fs_devices);
1231 	}
1232 	mutex_unlock(&uuid_mutex);
1233 }
1234 
1235 static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1236 				fmode_t flags, void *holder)
1237 {
1238 	struct btrfs_device *device;
1239 	struct btrfs_device *latest_dev = NULL;
1240 	struct btrfs_device *tmp_device;
1241 
1242 	flags |= FMODE_EXCL;
1243 
1244 	list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
1245 				 dev_list) {
1246 		int ret;
1247 
1248 		ret = btrfs_open_one_device(fs_devices, device, flags, holder);
1249 		if (ret == 0 &&
1250 		    (!latest_dev || device->generation > latest_dev->generation)) {
1251 			latest_dev = device;
1252 		} else if (ret == -ENODATA) {
1253 			fs_devices->num_devices--;
1254 			list_del(&device->dev_list);
1255 			btrfs_free_device(device);
1256 		}
1257 	}
1258 	if (fs_devices->open_devices == 0)
1259 		return -EINVAL;
1260 
1261 	fs_devices->opened = 1;
1262 	fs_devices->latest_dev = latest_dev;
1263 	fs_devices->total_rw_bytes = 0;
1264 	fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
1265 	fs_devices->read_policy = BTRFS_READ_POLICY_PID;
1266 
1267 	return 0;
1268 }
1269 
1270 static int devid_cmp(void *priv, const struct list_head *a,
1271 		     const struct list_head *b)
1272 {
1273 	const struct btrfs_device *dev1, *dev2;
1274 
1275 	dev1 = list_entry(a, struct btrfs_device, dev_list);
1276 	dev2 = list_entry(b, struct btrfs_device, dev_list);
1277 
1278 	if (dev1->devid < dev2->devid)
1279 		return -1;
1280 	else if (dev1->devid > dev2->devid)
1281 		return 1;
1282 	return 0;
1283 }
1284 
1285 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1286 		       fmode_t flags, void *holder)
1287 {
1288 	int ret;
1289 
1290 	lockdep_assert_held(&uuid_mutex);
1291 	/*
1292 	 * The device_list_mutex cannot be taken here in case opening the
1293 	 * underlying device takes further locks like open_mutex.
1294 	 *
1295 	 * We also don't need the lock here as this is called during mount and
1296 	 * exclusion is provided by uuid_mutex
1297 	 */
1298 
1299 	if (fs_devices->opened) {
1300 		fs_devices->opened++;
1301 		ret = 0;
1302 	} else {
1303 		list_sort(NULL, &fs_devices->devices, devid_cmp);
1304 		ret = open_fs_devices(fs_devices, flags, holder);
1305 	}
1306 
1307 	return ret;
1308 }
1309 
1310 void btrfs_release_disk_super(struct btrfs_super_block *super)
1311 {
1312 	struct page *page = virt_to_page(super);
1313 
1314 	put_page(page);
1315 }
1316 
1317 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
1318 						       u64 bytenr, u64 bytenr_orig)
1319 {
1320 	struct btrfs_super_block *disk_super;
1321 	struct page *page;
1322 	void *p;
1323 	pgoff_t index;
1324 
1325 	/* make sure our super fits in the device */
1326 	if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1327 		return ERR_PTR(-EINVAL);
1328 
1329 	/* make sure our super fits in the page */
1330 	if (sizeof(*disk_super) > PAGE_SIZE)
1331 		return ERR_PTR(-EINVAL);
1332 
1333 	/* make sure our super doesn't straddle pages on disk */
1334 	index = bytenr >> PAGE_SHIFT;
1335 	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1336 		return ERR_PTR(-EINVAL);
1337 
1338 	/* pull in the page with our super */
1339 	page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
1340 
1341 	if (IS_ERR(page))
1342 		return ERR_CAST(page);
1343 
1344 	p = page_address(page);
1345 
1346 	/* align our pointer to the offset of the super block */
1347 	disk_super = p + offset_in_page(bytenr);
1348 
1349 	if (btrfs_super_bytenr(disk_super) != bytenr_orig ||
1350 	    btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1351 		btrfs_release_disk_super(p);
1352 		return ERR_PTR(-EINVAL);
1353 	}
1354 
1355 	if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
1356 		disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
1357 
1358 	return disk_super;
1359 }
1360 
1361 int btrfs_forget_devices(const char *path)
1362 {
1363 	int ret;
1364 
1365 	mutex_lock(&uuid_mutex);
1366 	ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
1367 	mutex_unlock(&uuid_mutex);
1368 
1369 	return ret;
1370 }
1371 
1372 /*
1373  * Look for a btrfs signature on a device. This may be called out of the mount path
1374  * and we are not allowed to call set_blocksize during the scan. The superblock
1375  * is read via pagecache
1376  */
1377 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1378 					   void *holder)
1379 {
1380 	struct btrfs_super_block *disk_super;
1381 	bool new_device_added = false;
1382 	struct btrfs_device *device = NULL;
1383 	struct block_device *bdev;
1384 	u64 bytenr, bytenr_orig;
1385 	int ret;
1386 
1387 	lockdep_assert_held(&uuid_mutex);
1388 
1389 	/*
1390 	 * we would like to check all the supers, but that would make
1391 	 * a btrfs mount succeed after a mkfs from a different FS.
1392 	 * So, we need to add a special mount option to scan for
1393 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1394 	 */
1395 	flags |= FMODE_EXCL;
1396 
1397 	bdev = blkdev_get_by_path(path, flags, holder);
1398 	if (IS_ERR(bdev))
1399 		return ERR_CAST(bdev);
1400 
1401 	bytenr_orig = btrfs_sb_offset(0);
1402 	ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr);
1403 	if (ret) {
1404 		device = ERR_PTR(ret);
1405 		goto error_bdev_put;
1406 	}
1407 
1408 	disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig);
1409 	if (IS_ERR(disk_super)) {
1410 		device = ERR_CAST(disk_super);
1411 		goto error_bdev_put;
1412 	}
1413 
1414 	device = device_list_add(path, disk_super, &new_device_added);
1415 	if (!IS_ERR(device)) {
1416 		if (new_device_added)
1417 			btrfs_free_stale_devices(path, device);
1418 	}
1419 
1420 	btrfs_release_disk_super(disk_super);
1421 
1422 error_bdev_put:
1423 	blkdev_put(bdev, flags);
1424 
1425 	return device;
1426 }
1427 
1428 /*
1429  * Try to find a chunk that intersects [start, start + len] range and when one
1430  * such is found, record the end of it in *start
1431  */
1432 static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1433 				    u64 len)
1434 {
1435 	u64 physical_start, physical_end;
1436 
1437 	lockdep_assert_held(&device->fs_info->chunk_mutex);
1438 
1439 	if (!find_first_extent_bit(&device->alloc_state, *start,
1440 				   &physical_start, &physical_end,
1441 				   CHUNK_ALLOCATED, NULL)) {
1442 
1443 		if (in_range(physical_start, *start, len) ||
1444 		    in_range(*start, physical_start,
1445 			     physical_end - physical_start)) {
1446 			*start = physical_end + 1;
1447 			return true;
1448 		}
1449 	}
1450 	return false;
1451 }
1452 
1453 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
1454 {
1455 	switch (device->fs_devices->chunk_alloc_policy) {
1456 	case BTRFS_CHUNK_ALLOC_REGULAR:
1457 		/*
1458 		 * We don't want to overwrite the superblock on the drive nor
1459 		 * any area used by the boot loader (grub for example), so we
1460 		 * make sure to start at an offset of at least 1MB.
1461 		 */
1462 		return max_t(u64, start, SZ_1M);
1463 	case BTRFS_CHUNK_ALLOC_ZONED:
1464 		/*
1465 		 * We don't care about the starting region like regular
1466 		 * allocator, because we anyway use/reserve the first two zones
1467 		 * for superblock logging.
1468 		 */
1469 		return ALIGN(start, device->zone_info->zone_size);
1470 	default:
1471 		BUG();
1472 	}
1473 }
1474 
1475 static bool dev_extent_hole_check_zoned(struct btrfs_device *device,
1476 					u64 *hole_start, u64 *hole_size,
1477 					u64 num_bytes)
1478 {
1479 	u64 zone_size = device->zone_info->zone_size;
1480 	u64 pos;
1481 	int ret;
1482 	bool changed = false;
1483 
1484 	ASSERT(IS_ALIGNED(*hole_start, zone_size));
1485 
1486 	while (*hole_size > 0) {
1487 		pos = btrfs_find_allocatable_zones(device, *hole_start,
1488 						   *hole_start + *hole_size,
1489 						   num_bytes);
1490 		if (pos != *hole_start) {
1491 			*hole_size = *hole_start + *hole_size - pos;
1492 			*hole_start = pos;
1493 			changed = true;
1494 			if (*hole_size < num_bytes)
1495 				break;
1496 		}
1497 
1498 		ret = btrfs_ensure_empty_zones(device, pos, num_bytes);
1499 
1500 		/* Range is ensured to be empty */
1501 		if (!ret)
1502 			return changed;
1503 
1504 		/* Given hole range was invalid (outside of device) */
1505 		if (ret == -ERANGE) {
1506 			*hole_start += *hole_size;
1507 			*hole_size = 0;
1508 			return true;
1509 		}
1510 
1511 		*hole_start += zone_size;
1512 		*hole_size -= zone_size;
1513 		changed = true;
1514 	}
1515 
1516 	return changed;
1517 }
1518 
1519 /**
1520  * dev_extent_hole_check - check if specified hole is suitable for allocation
1521  * @device:	the device which we have the hole
1522  * @hole_start: starting position of the hole
1523  * @hole_size:	the size of the hole
1524  * @num_bytes:	the size of the free space that we need
1525  *
1526  * This function may modify @hole_start and @hole_size to reflect the suitable
1527  * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1528  */
1529 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
1530 				  u64 *hole_size, u64 num_bytes)
1531 {
1532 	bool changed = false;
1533 	u64 hole_end = *hole_start + *hole_size;
1534 
1535 	for (;;) {
1536 		/*
1537 		 * Check before we set max_hole_start, otherwise we could end up
1538 		 * sending back this offset anyway.
1539 		 */
1540 		if (contains_pending_extent(device, hole_start, *hole_size)) {
1541 			if (hole_end >= *hole_start)
1542 				*hole_size = hole_end - *hole_start;
1543 			else
1544 				*hole_size = 0;
1545 			changed = true;
1546 		}
1547 
1548 		switch (device->fs_devices->chunk_alloc_policy) {
1549 		case BTRFS_CHUNK_ALLOC_REGULAR:
1550 			/* No extra check */
1551 			break;
1552 		case BTRFS_CHUNK_ALLOC_ZONED:
1553 			if (dev_extent_hole_check_zoned(device, hole_start,
1554 							hole_size, num_bytes)) {
1555 				changed = true;
1556 				/*
1557 				 * The changed hole can contain pending extent.
1558 				 * Loop again to check that.
1559 				 */
1560 				continue;
1561 			}
1562 			break;
1563 		default:
1564 			BUG();
1565 		}
1566 
1567 		break;
1568 	}
1569 
1570 	return changed;
1571 }
1572 
1573 /*
1574  * find_free_dev_extent_start - find free space in the specified device
1575  * @device:	  the device which we search the free space in
1576  * @num_bytes:	  the size of the free space that we need
1577  * @search_start: the position from which to begin the search
1578  * @start:	  store the start of the free space.
1579  * @len:	  the size of the free space. that we find, or the size
1580  *		  of the max free space if we don't find suitable free space
1581  *
1582  * this uses a pretty simple search, the expectation is that it is
1583  * called very infrequently and that a given device has a small number
1584  * of extents
1585  *
1586  * @start is used to store the start of the free space if we find. But if we
1587  * don't find suitable free space, it will be used to store the start position
1588  * of the max free space.
1589  *
1590  * @len is used to store the size of the free space that we find.
1591  * But if we don't find suitable free space, it is used to store the size of
1592  * the max free space.
1593  *
1594  * NOTE: This function will search *commit* root of device tree, and does extra
1595  * check to ensure dev extents are not double allocated.
1596  * This makes the function safe to allocate dev extents but may not report
1597  * correct usable device space, as device extent freed in current transaction
1598  * is not reported as available.
1599  */
1600 static int find_free_dev_extent_start(struct btrfs_device *device,
1601 				u64 num_bytes, u64 search_start, u64 *start,
1602 				u64 *len)
1603 {
1604 	struct btrfs_fs_info *fs_info = device->fs_info;
1605 	struct btrfs_root *root = fs_info->dev_root;
1606 	struct btrfs_key key;
1607 	struct btrfs_dev_extent *dev_extent;
1608 	struct btrfs_path *path;
1609 	u64 hole_size;
1610 	u64 max_hole_start;
1611 	u64 max_hole_size;
1612 	u64 extent_end;
1613 	u64 search_end = device->total_bytes;
1614 	int ret;
1615 	int slot;
1616 	struct extent_buffer *l;
1617 
1618 	search_start = dev_extent_search_start(device, search_start);
1619 
1620 	WARN_ON(device->zone_info &&
1621 		!IS_ALIGNED(num_bytes, device->zone_info->zone_size));
1622 
1623 	path = btrfs_alloc_path();
1624 	if (!path)
1625 		return -ENOMEM;
1626 
1627 	max_hole_start = search_start;
1628 	max_hole_size = 0;
1629 
1630 again:
1631 	if (search_start >= search_end ||
1632 		test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1633 		ret = -ENOSPC;
1634 		goto out;
1635 	}
1636 
1637 	path->reada = READA_FORWARD;
1638 	path->search_commit_root = 1;
1639 	path->skip_locking = 1;
1640 
1641 	key.objectid = device->devid;
1642 	key.offset = search_start;
1643 	key.type = BTRFS_DEV_EXTENT_KEY;
1644 
1645 	ret = btrfs_search_backwards(root, &key, path);
1646 	if (ret < 0)
1647 		goto out;
1648 
1649 	while (1) {
1650 		l = path->nodes[0];
1651 		slot = path->slots[0];
1652 		if (slot >= btrfs_header_nritems(l)) {
1653 			ret = btrfs_next_leaf(root, path);
1654 			if (ret == 0)
1655 				continue;
1656 			if (ret < 0)
1657 				goto out;
1658 
1659 			break;
1660 		}
1661 		btrfs_item_key_to_cpu(l, &key, slot);
1662 
1663 		if (key.objectid < device->devid)
1664 			goto next;
1665 
1666 		if (key.objectid > device->devid)
1667 			break;
1668 
1669 		if (key.type != BTRFS_DEV_EXTENT_KEY)
1670 			goto next;
1671 
1672 		if (key.offset > search_start) {
1673 			hole_size = key.offset - search_start;
1674 			dev_extent_hole_check(device, &search_start, &hole_size,
1675 					      num_bytes);
1676 
1677 			if (hole_size > max_hole_size) {
1678 				max_hole_start = search_start;
1679 				max_hole_size = hole_size;
1680 			}
1681 
1682 			/*
1683 			 * If this free space is greater than which we need,
1684 			 * it must be the max free space that we have found
1685 			 * until now, so max_hole_start must point to the start
1686 			 * of this free space and the length of this free space
1687 			 * is stored in max_hole_size. Thus, we return
1688 			 * max_hole_start and max_hole_size and go back to the
1689 			 * caller.
1690 			 */
1691 			if (hole_size >= num_bytes) {
1692 				ret = 0;
1693 				goto out;
1694 			}
1695 		}
1696 
1697 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1698 		extent_end = key.offset + btrfs_dev_extent_length(l,
1699 								  dev_extent);
1700 		if (extent_end > search_start)
1701 			search_start = extent_end;
1702 next:
1703 		path->slots[0]++;
1704 		cond_resched();
1705 	}
1706 
1707 	/*
1708 	 * At this point, search_start should be the end of
1709 	 * allocated dev extents, and when shrinking the device,
1710 	 * search_end may be smaller than search_start.
1711 	 */
1712 	if (search_end > search_start) {
1713 		hole_size = search_end - search_start;
1714 		if (dev_extent_hole_check(device, &search_start, &hole_size,
1715 					  num_bytes)) {
1716 			btrfs_release_path(path);
1717 			goto again;
1718 		}
1719 
1720 		if (hole_size > max_hole_size) {
1721 			max_hole_start = search_start;
1722 			max_hole_size = hole_size;
1723 		}
1724 	}
1725 
1726 	/* See above. */
1727 	if (max_hole_size < num_bytes)
1728 		ret = -ENOSPC;
1729 	else
1730 		ret = 0;
1731 
1732 out:
1733 	btrfs_free_path(path);
1734 	*start = max_hole_start;
1735 	if (len)
1736 		*len = max_hole_size;
1737 	return ret;
1738 }
1739 
1740 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1741 			 u64 *start, u64 *len)
1742 {
1743 	/* FIXME use last free of some kind */
1744 	return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1745 }
1746 
1747 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1748 			  struct btrfs_device *device,
1749 			  u64 start, u64 *dev_extent_len)
1750 {
1751 	struct btrfs_fs_info *fs_info = device->fs_info;
1752 	struct btrfs_root *root = fs_info->dev_root;
1753 	int ret;
1754 	struct btrfs_path *path;
1755 	struct btrfs_key key;
1756 	struct btrfs_key found_key;
1757 	struct extent_buffer *leaf = NULL;
1758 	struct btrfs_dev_extent *extent = NULL;
1759 
1760 	path = btrfs_alloc_path();
1761 	if (!path)
1762 		return -ENOMEM;
1763 
1764 	key.objectid = device->devid;
1765 	key.offset = start;
1766 	key.type = BTRFS_DEV_EXTENT_KEY;
1767 again:
1768 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1769 	if (ret > 0) {
1770 		ret = btrfs_previous_item(root, path, key.objectid,
1771 					  BTRFS_DEV_EXTENT_KEY);
1772 		if (ret)
1773 			goto out;
1774 		leaf = path->nodes[0];
1775 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1776 		extent = btrfs_item_ptr(leaf, path->slots[0],
1777 					struct btrfs_dev_extent);
1778 		BUG_ON(found_key.offset > start || found_key.offset +
1779 		       btrfs_dev_extent_length(leaf, extent) < start);
1780 		key = found_key;
1781 		btrfs_release_path(path);
1782 		goto again;
1783 	} else if (ret == 0) {
1784 		leaf = path->nodes[0];
1785 		extent = btrfs_item_ptr(leaf, path->slots[0],
1786 					struct btrfs_dev_extent);
1787 	} else {
1788 		goto out;
1789 	}
1790 
1791 	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1792 
1793 	ret = btrfs_del_item(trans, root, path);
1794 	if (ret == 0)
1795 		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1796 out:
1797 	btrfs_free_path(path);
1798 	return ret;
1799 }
1800 
1801 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1802 {
1803 	struct extent_map_tree *em_tree;
1804 	struct extent_map *em;
1805 	struct rb_node *n;
1806 	u64 ret = 0;
1807 
1808 	em_tree = &fs_info->mapping_tree;
1809 	read_lock(&em_tree->lock);
1810 	n = rb_last(&em_tree->map.rb_root);
1811 	if (n) {
1812 		em = rb_entry(n, struct extent_map, rb_node);
1813 		ret = em->start + em->len;
1814 	}
1815 	read_unlock(&em_tree->lock);
1816 
1817 	return ret;
1818 }
1819 
1820 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1821 				    u64 *devid_ret)
1822 {
1823 	int ret;
1824 	struct btrfs_key key;
1825 	struct btrfs_key found_key;
1826 	struct btrfs_path *path;
1827 
1828 	path = btrfs_alloc_path();
1829 	if (!path)
1830 		return -ENOMEM;
1831 
1832 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1833 	key.type = BTRFS_DEV_ITEM_KEY;
1834 	key.offset = (u64)-1;
1835 
1836 	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1837 	if (ret < 0)
1838 		goto error;
1839 
1840 	if (ret == 0) {
1841 		/* Corruption */
1842 		btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1843 		ret = -EUCLEAN;
1844 		goto error;
1845 	}
1846 
1847 	ret = btrfs_previous_item(fs_info->chunk_root, path,
1848 				  BTRFS_DEV_ITEMS_OBJECTID,
1849 				  BTRFS_DEV_ITEM_KEY);
1850 	if (ret) {
1851 		*devid_ret = 1;
1852 	} else {
1853 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1854 				      path->slots[0]);
1855 		*devid_ret = found_key.offset + 1;
1856 	}
1857 	ret = 0;
1858 error:
1859 	btrfs_free_path(path);
1860 	return ret;
1861 }
1862 
1863 /*
1864  * the device information is stored in the chunk root
1865  * the btrfs_device struct should be fully filled in
1866  */
1867 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1868 			    struct btrfs_device *device)
1869 {
1870 	int ret;
1871 	struct btrfs_path *path;
1872 	struct btrfs_dev_item *dev_item;
1873 	struct extent_buffer *leaf;
1874 	struct btrfs_key key;
1875 	unsigned long ptr;
1876 
1877 	path = btrfs_alloc_path();
1878 	if (!path)
1879 		return -ENOMEM;
1880 
1881 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1882 	key.type = BTRFS_DEV_ITEM_KEY;
1883 	key.offset = device->devid;
1884 
1885 	btrfs_reserve_chunk_metadata(trans, true);
1886 	ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1887 				      &key, sizeof(*dev_item));
1888 	btrfs_trans_release_chunk_metadata(trans);
1889 	if (ret)
1890 		goto out;
1891 
1892 	leaf = path->nodes[0];
1893 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1894 
1895 	btrfs_set_device_id(leaf, dev_item, device->devid);
1896 	btrfs_set_device_generation(leaf, dev_item, 0);
1897 	btrfs_set_device_type(leaf, dev_item, device->type);
1898 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1899 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1900 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1901 	btrfs_set_device_total_bytes(leaf, dev_item,
1902 				     btrfs_device_get_disk_total_bytes(device));
1903 	btrfs_set_device_bytes_used(leaf, dev_item,
1904 				    btrfs_device_get_bytes_used(device));
1905 	btrfs_set_device_group(leaf, dev_item, 0);
1906 	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1907 	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1908 	btrfs_set_device_start_offset(leaf, dev_item, 0);
1909 
1910 	ptr = btrfs_device_uuid(dev_item);
1911 	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1912 	ptr = btrfs_device_fsid(dev_item);
1913 	write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1914 			    ptr, BTRFS_FSID_SIZE);
1915 	btrfs_mark_buffer_dirty(leaf);
1916 
1917 	ret = 0;
1918 out:
1919 	btrfs_free_path(path);
1920 	return ret;
1921 }
1922 
1923 /*
1924  * Function to update ctime/mtime for a given device path.
1925  * Mainly used for ctime/mtime based probe like libblkid.
1926  *
1927  * We don't care about errors here, this is just to be kind to userspace.
1928  */
1929 static void update_dev_time(const char *device_path)
1930 {
1931 	struct path path;
1932 	struct timespec64 now;
1933 	int ret;
1934 
1935 	ret = kern_path(device_path, LOOKUP_FOLLOW, &path);
1936 	if (ret)
1937 		return;
1938 
1939 	now = current_time(d_inode(path.dentry));
1940 	inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME);
1941 	path_put(&path);
1942 }
1943 
1944 static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans,
1945 			     struct btrfs_device *device)
1946 {
1947 	struct btrfs_root *root = device->fs_info->chunk_root;
1948 	int ret;
1949 	struct btrfs_path *path;
1950 	struct btrfs_key key;
1951 
1952 	path = btrfs_alloc_path();
1953 	if (!path)
1954 		return -ENOMEM;
1955 
1956 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1957 	key.type = BTRFS_DEV_ITEM_KEY;
1958 	key.offset = device->devid;
1959 
1960 	btrfs_reserve_chunk_metadata(trans, false);
1961 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1962 	btrfs_trans_release_chunk_metadata(trans);
1963 	if (ret) {
1964 		if (ret > 0)
1965 			ret = -ENOENT;
1966 		goto out;
1967 	}
1968 
1969 	ret = btrfs_del_item(trans, root, path);
1970 out:
1971 	btrfs_free_path(path);
1972 	return ret;
1973 }
1974 
1975 /*
1976  * Verify that @num_devices satisfies the RAID profile constraints in the whole
1977  * filesystem. It's up to the caller to adjust that number regarding eg. device
1978  * replace.
1979  */
1980 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
1981 		u64 num_devices)
1982 {
1983 	u64 all_avail;
1984 	unsigned seq;
1985 	int i;
1986 
1987 	do {
1988 		seq = read_seqbegin(&fs_info->profiles_lock);
1989 
1990 		all_avail = fs_info->avail_data_alloc_bits |
1991 			    fs_info->avail_system_alloc_bits |
1992 			    fs_info->avail_metadata_alloc_bits;
1993 	} while (read_seqretry(&fs_info->profiles_lock, seq));
1994 
1995 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1996 		if (!(all_avail & btrfs_raid_array[i].bg_flag))
1997 			continue;
1998 
1999 		if (num_devices < btrfs_raid_array[i].devs_min)
2000 			return btrfs_raid_array[i].mindev_error;
2001 	}
2002 
2003 	return 0;
2004 }
2005 
2006 static struct btrfs_device * btrfs_find_next_active_device(
2007 		struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
2008 {
2009 	struct btrfs_device *next_device;
2010 
2011 	list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
2012 		if (next_device != device &&
2013 		    !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
2014 		    && next_device->bdev)
2015 			return next_device;
2016 	}
2017 
2018 	return NULL;
2019 }
2020 
2021 /*
2022  * Helper function to check if the given device is part of s_bdev / latest_dev
2023  * and replace it with the provided or the next active device, in the context
2024  * where this function called, there should be always be another device (or
2025  * this_dev) which is active.
2026  */
2027 void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
2028 					    struct btrfs_device *next_device)
2029 {
2030 	struct btrfs_fs_info *fs_info = device->fs_info;
2031 
2032 	if (!next_device)
2033 		next_device = btrfs_find_next_active_device(fs_info->fs_devices,
2034 							    device);
2035 	ASSERT(next_device);
2036 
2037 	if (fs_info->sb->s_bdev &&
2038 			(fs_info->sb->s_bdev == device->bdev))
2039 		fs_info->sb->s_bdev = next_device->bdev;
2040 
2041 	if (fs_info->fs_devices->latest_dev->bdev == device->bdev)
2042 		fs_info->fs_devices->latest_dev = next_device;
2043 }
2044 
2045 /*
2046  * Return btrfs_fs_devices::num_devices excluding the device that's being
2047  * currently replaced.
2048  */
2049 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
2050 {
2051 	u64 num_devices = fs_info->fs_devices->num_devices;
2052 
2053 	down_read(&fs_info->dev_replace.rwsem);
2054 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
2055 		ASSERT(num_devices > 1);
2056 		num_devices--;
2057 	}
2058 	up_read(&fs_info->dev_replace.rwsem);
2059 
2060 	return num_devices;
2061 }
2062 
2063 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
2064 			       struct block_device *bdev,
2065 			       const char *device_path)
2066 {
2067 	struct btrfs_super_block *disk_super;
2068 	int copy_num;
2069 
2070 	if (!bdev)
2071 		return;
2072 
2073 	for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
2074 		struct page *page;
2075 		int ret;
2076 
2077 		disk_super = btrfs_read_dev_one_super(bdev, copy_num);
2078 		if (IS_ERR(disk_super))
2079 			continue;
2080 
2081 		if (bdev_is_zoned(bdev)) {
2082 			btrfs_reset_sb_log_zones(bdev, copy_num);
2083 			continue;
2084 		}
2085 
2086 		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
2087 
2088 		page = virt_to_page(disk_super);
2089 		set_page_dirty(page);
2090 		lock_page(page);
2091 		/* write_on_page() unlocks the page */
2092 		ret = write_one_page(page);
2093 		if (ret)
2094 			btrfs_warn(fs_info,
2095 				"error clearing superblock number %d (%d)",
2096 				copy_num, ret);
2097 		btrfs_release_disk_super(disk_super);
2098 
2099 	}
2100 
2101 	/* Notify udev that device has changed */
2102 	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
2103 
2104 	/* Update ctime/mtime for device path for libblkid */
2105 	update_dev_time(device_path);
2106 }
2107 
2108 int btrfs_rm_device(struct btrfs_fs_info *fs_info,
2109 		    struct btrfs_dev_lookup_args *args,
2110 		    struct block_device **bdev, fmode_t *mode)
2111 {
2112 	struct btrfs_trans_handle *trans;
2113 	struct btrfs_device *device;
2114 	struct btrfs_fs_devices *cur_devices;
2115 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2116 	u64 num_devices;
2117 	int ret = 0;
2118 
2119 	/*
2120 	 * The device list in fs_devices is accessed without locks (neither
2121 	 * uuid_mutex nor device_list_mutex) as it won't change on a mounted
2122 	 * filesystem and another device rm cannot run.
2123 	 */
2124 	num_devices = btrfs_num_devices(fs_info);
2125 
2126 	ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2127 	if (ret)
2128 		return ret;
2129 
2130 	device = btrfs_find_device(fs_info->fs_devices, args);
2131 	if (!device) {
2132 		if (args->missing)
2133 			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2134 		else
2135 			ret = -ENOENT;
2136 		return ret;
2137 	}
2138 
2139 	if (btrfs_pinned_by_swapfile(fs_info, device)) {
2140 		btrfs_warn_in_rcu(fs_info,
2141 		  "cannot remove device %s (devid %llu) due to active swapfile",
2142 				  rcu_str_deref(device->name), device->devid);
2143 		return -ETXTBSY;
2144 	}
2145 
2146 	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
2147 		return BTRFS_ERROR_DEV_TGT_REPLACE;
2148 
2149 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2150 	    fs_info->fs_devices->rw_devices == 1)
2151 		return BTRFS_ERROR_DEV_ONLY_WRITABLE;
2152 
2153 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2154 		mutex_lock(&fs_info->chunk_mutex);
2155 		list_del_init(&device->dev_alloc_list);
2156 		device->fs_devices->rw_devices--;
2157 		mutex_unlock(&fs_info->chunk_mutex);
2158 	}
2159 
2160 	ret = btrfs_shrink_device(device, 0);
2161 	if (!ret)
2162 		btrfs_reada_remove_dev(device);
2163 	if (ret)
2164 		goto error_undo;
2165 
2166 	trans = btrfs_start_transaction(fs_info->chunk_root, 0);
2167 	if (IS_ERR(trans)) {
2168 		ret = PTR_ERR(trans);
2169 		goto error_undo;
2170 	}
2171 
2172 	ret = btrfs_rm_dev_item(trans, device);
2173 	if (ret) {
2174 		/* Any error in dev item removal is critical */
2175 		btrfs_crit(fs_info,
2176 			   "failed to remove device item for devid %llu: %d",
2177 			   device->devid, ret);
2178 		btrfs_abort_transaction(trans, ret);
2179 		btrfs_end_transaction(trans);
2180 		return ret;
2181 	}
2182 
2183 	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2184 	btrfs_scrub_cancel_dev(device);
2185 
2186 	/*
2187 	 * the device list mutex makes sure that we don't change
2188 	 * the device list while someone else is writing out all
2189 	 * the device supers. Whoever is writing all supers, should
2190 	 * lock the device list mutex before getting the number of
2191 	 * devices in the super block (super_copy). Conversely,
2192 	 * whoever updates the number of devices in the super block
2193 	 * (super_copy) should hold the device list mutex.
2194 	 */
2195 
2196 	/*
2197 	 * In normal cases the cur_devices == fs_devices. But in case
2198 	 * of deleting a seed device, the cur_devices should point to
2199 	 * its own fs_devices listed under the fs_devices->seed.
2200 	 */
2201 	cur_devices = device->fs_devices;
2202 	mutex_lock(&fs_devices->device_list_mutex);
2203 	list_del_rcu(&device->dev_list);
2204 
2205 	cur_devices->num_devices--;
2206 	cur_devices->total_devices--;
2207 	/* Update total_devices of the parent fs_devices if it's seed */
2208 	if (cur_devices != fs_devices)
2209 		fs_devices->total_devices--;
2210 
2211 	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2212 		cur_devices->missing_devices--;
2213 
2214 	btrfs_assign_next_active_device(device, NULL);
2215 
2216 	if (device->bdev) {
2217 		cur_devices->open_devices--;
2218 		/* remove sysfs entry */
2219 		btrfs_sysfs_remove_device(device);
2220 	}
2221 
2222 	num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2223 	btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2224 	mutex_unlock(&fs_devices->device_list_mutex);
2225 
2226 	/*
2227 	 * At this point, the device is zero sized and detached from the
2228 	 * devices list.  All that's left is to zero out the old supers and
2229 	 * free the device.
2230 	 *
2231 	 * We cannot call btrfs_close_bdev() here because we're holding the sb
2232 	 * write lock, and blkdev_put() will pull in the ->open_mutex on the
2233 	 * block device and it's dependencies.  Instead just flush the device
2234 	 * and let the caller do the final blkdev_put.
2235 	 */
2236 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2237 		btrfs_scratch_superblocks(fs_info, device->bdev,
2238 					  device->name->str);
2239 		if (device->bdev) {
2240 			sync_blockdev(device->bdev);
2241 			invalidate_bdev(device->bdev);
2242 		}
2243 	}
2244 
2245 	*bdev = device->bdev;
2246 	*mode = device->mode;
2247 	synchronize_rcu();
2248 	btrfs_free_device(device);
2249 
2250 	if (cur_devices->open_devices == 0) {
2251 		list_del_init(&cur_devices->seed_list);
2252 		close_fs_devices(cur_devices);
2253 		free_fs_devices(cur_devices);
2254 	}
2255 
2256 	ret = btrfs_commit_transaction(trans);
2257 
2258 	return ret;
2259 
2260 error_undo:
2261 	btrfs_reada_undo_remove_dev(device);
2262 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2263 		mutex_lock(&fs_info->chunk_mutex);
2264 		list_add(&device->dev_alloc_list,
2265 			 &fs_devices->alloc_list);
2266 		device->fs_devices->rw_devices++;
2267 		mutex_unlock(&fs_info->chunk_mutex);
2268 	}
2269 	return ret;
2270 }
2271 
2272 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2273 {
2274 	struct btrfs_fs_devices *fs_devices;
2275 
2276 	lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2277 
2278 	/*
2279 	 * in case of fs with no seed, srcdev->fs_devices will point
2280 	 * to fs_devices of fs_info. However when the dev being replaced is
2281 	 * a seed dev it will point to the seed's local fs_devices. In short
2282 	 * srcdev will have its correct fs_devices in both the cases.
2283 	 */
2284 	fs_devices = srcdev->fs_devices;
2285 
2286 	list_del_rcu(&srcdev->dev_list);
2287 	list_del(&srcdev->dev_alloc_list);
2288 	fs_devices->num_devices--;
2289 	if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2290 		fs_devices->missing_devices--;
2291 
2292 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2293 		fs_devices->rw_devices--;
2294 
2295 	if (srcdev->bdev)
2296 		fs_devices->open_devices--;
2297 }
2298 
2299 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2300 {
2301 	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2302 
2303 	mutex_lock(&uuid_mutex);
2304 
2305 	btrfs_close_bdev(srcdev);
2306 	synchronize_rcu();
2307 	btrfs_free_device(srcdev);
2308 
2309 	/* if this is no devs we rather delete the fs_devices */
2310 	if (!fs_devices->num_devices) {
2311 		/*
2312 		 * On a mounted FS, num_devices can't be zero unless it's a
2313 		 * seed. In case of a seed device being replaced, the replace
2314 		 * target added to the sprout FS, so there will be no more
2315 		 * device left under the seed FS.
2316 		 */
2317 		ASSERT(fs_devices->seeding);
2318 
2319 		list_del_init(&fs_devices->seed_list);
2320 		close_fs_devices(fs_devices);
2321 		free_fs_devices(fs_devices);
2322 	}
2323 	mutex_unlock(&uuid_mutex);
2324 }
2325 
2326 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2327 {
2328 	struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2329 
2330 	mutex_lock(&fs_devices->device_list_mutex);
2331 
2332 	btrfs_sysfs_remove_device(tgtdev);
2333 
2334 	if (tgtdev->bdev)
2335 		fs_devices->open_devices--;
2336 
2337 	fs_devices->num_devices--;
2338 
2339 	btrfs_assign_next_active_device(tgtdev, NULL);
2340 
2341 	list_del_rcu(&tgtdev->dev_list);
2342 
2343 	mutex_unlock(&fs_devices->device_list_mutex);
2344 
2345 	btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
2346 				  tgtdev->name->str);
2347 
2348 	btrfs_close_bdev(tgtdev);
2349 	synchronize_rcu();
2350 	btrfs_free_device(tgtdev);
2351 }
2352 
2353 /**
2354  * Populate args from device at path
2355  *
2356  * @fs_info:	the filesystem
2357  * @args:	the args to populate
2358  * @path:	the path to the device
2359  *
2360  * This will read the super block of the device at @path and populate @args with
2361  * the devid, fsid, and uuid.  This is meant to be used for ioctls that need to
2362  * lookup a device to operate on, but need to do it before we take any locks.
2363  * This properly handles the special case of "missing" that a user may pass in,
2364  * and does some basic sanity checks.  The caller must make sure that @path is
2365  * properly NUL terminated before calling in, and must call
2366  * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and
2367  * uuid buffers.
2368  *
2369  * Return: 0 for success, -errno for failure
2370  */
2371 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
2372 				 struct btrfs_dev_lookup_args *args,
2373 				 const char *path)
2374 {
2375 	struct btrfs_super_block *disk_super;
2376 	struct block_device *bdev;
2377 	int ret;
2378 
2379 	if (!path || !path[0])
2380 		return -EINVAL;
2381 	if (!strcmp(path, "missing")) {
2382 		args->missing = true;
2383 		return 0;
2384 	}
2385 
2386 	args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL);
2387 	args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL);
2388 	if (!args->uuid || !args->fsid) {
2389 		btrfs_put_dev_args_from_path(args);
2390 		return -ENOMEM;
2391 	}
2392 
2393 	ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0,
2394 				    &bdev, &disk_super);
2395 	if (ret) {
2396 		btrfs_put_dev_args_from_path(args);
2397 		return ret;
2398 	}
2399 
2400 	args->devid = btrfs_stack_device_id(&disk_super->dev_item);
2401 	memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE);
2402 	if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2403 		memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE);
2404 	else
2405 		memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
2406 	btrfs_release_disk_super(disk_super);
2407 	blkdev_put(bdev, FMODE_READ);
2408 	return 0;
2409 }
2410 
2411 /*
2412  * Only use this jointly with btrfs_get_dev_args_from_path() because we will
2413  * allocate our ->uuid and ->fsid pointers, everybody else uses local variables
2414  * that don't need to be freed.
2415  */
2416 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args)
2417 {
2418 	kfree(args->uuid);
2419 	kfree(args->fsid);
2420 	args->uuid = NULL;
2421 	args->fsid = NULL;
2422 }
2423 
2424 struct btrfs_device *btrfs_find_device_by_devspec(
2425 		struct btrfs_fs_info *fs_info, u64 devid,
2426 		const char *device_path)
2427 {
2428 	BTRFS_DEV_LOOKUP_ARGS(args);
2429 	struct btrfs_device *device;
2430 	int ret;
2431 
2432 	if (devid) {
2433 		args.devid = devid;
2434 		device = btrfs_find_device(fs_info->fs_devices, &args);
2435 		if (!device)
2436 			return ERR_PTR(-ENOENT);
2437 		return device;
2438 	}
2439 
2440 	ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path);
2441 	if (ret)
2442 		return ERR_PTR(ret);
2443 	device = btrfs_find_device(fs_info->fs_devices, &args);
2444 	btrfs_put_dev_args_from_path(&args);
2445 	if (!device)
2446 		return ERR_PTR(-ENOENT);
2447 	return device;
2448 }
2449 
2450 /*
2451  * does all the dirty work required for changing file system's UUID.
2452  */
2453 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
2454 {
2455 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2456 	struct btrfs_fs_devices *old_devices;
2457 	struct btrfs_fs_devices *seed_devices;
2458 	struct btrfs_super_block *disk_super = fs_info->super_copy;
2459 	struct btrfs_device *device;
2460 	u64 super_flags;
2461 
2462 	lockdep_assert_held(&uuid_mutex);
2463 	if (!fs_devices->seeding)
2464 		return -EINVAL;
2465 
2466 	/*
2467 	 * Private copy of the seed devices, anchored at
2468 	 * fs_info->fs_devices->seed_list
2469 	 */
2470 	seed_devices = alloc_fs_devices(NULL, NULL);
2471 	if (IS_ERR(seed_devices))
2472 		return PTR_ERR(seed_devices);
2473 
2474 	/*
2475 	 * It's necessary to retain a copy of the original seed fs_devices in
2476 	 * fs_uuids so that filesystems which have been seeded can successfully
2477 	 * reference the seed device from open_seed_devices. This also supports
2478 	 * multiple fs seed.
2479 	 */
2480 	old_devices = clone_fs_devices(fs_devices);
2481 	if (IS_ERR(old_devices)) {
2482 		kfree(seed_devices);
2483 		return PTR_ERR(old_devices);
2484 	}
2485 
2486 	list_add(&old_devices->fs_list, &fs_uuids);
2487 
2488 	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2489 	seed_devices->opened = 1;
2490 	INIT_LIST_HEAD(&seed_devices->devices);
2491 	INIT_LIST_HEAD(&seed_devices->alloc_list);
2492 	mutex_init(&seed_devices->device_list_mutex);
2493 
2494 	mutex_lock(&fs_devices->device_list_mutex);
2495 	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2496 			      synchronize_rcu);
2497 	list_for_each_entry(device, &seed_devices->devices, dev_list)
2498 		device->fs_devices = seed_devices;
2499 
2500 	fs_devices->seeding = false;
2501 	fs_devices->num_devices = 0;
2502 	fs_devices->open_devices = 0;
2503 	fs_devices->missing_devices = 0;
2504 	fs_devices->rotating = false;
2505 	list_add(&seed_devices->seed_list, &fs_devices->seed_list);
2506 
2507 	generate_random_uuid(fs_devices->fsid);
2508 	memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2509 	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2510 	mutex_unlock(&fs_devices->device_list_mutex);
2511 
2512 	super_flags = btrfs_super_flags(disk_super) &
2513 		      ~BTRFS_SUPER_FLAG_SEEDING;
2514 	btrfs_set_super_flags(disk_super, super_flags);
2515 
2516 	return 0;
2517 }
2518 
2519 /*
2520  * Store the expected generation for seed devices in device items.
2521  */
2522 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
2523 {
2524 	BTRFS_DEV_LOOKUP_ARGS(args);
2525 	struct btrfs_fs_info *fs_info = trans->fs_info;
2526 	struct btrfs_root *root = fs_info->chunk_root;
2527 	struct btrfs_path *path;
2528 	struct extent_buffer *leaf;
2529 	struct btrfs_dev_item *dev_item;
2530 	struct btrfs_device *device;
2531 	struct btrfs_key key;
2532 	u8 fs_uuid[BTRFS_FSID_SIZE];
2533 	u8 dev_uuid[BTRFS_UUID_SIZE];
2534 	int ret;
2535 
2536 	path = btrfs_alloc_path();
2537 	if (!path)
2538 		return -ENOMEM;
2539 
2540 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2541 	key.offset = 0;
2542 	key.type = BTRFS_DEV_ITEM_KEY;
2543 
2544 	while (1) {
2545 		btrfs_reserve_chunk_metadata(trans, false);
2546 		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2547 		btrfs_trans_release_chunk_metadata(trans);
2548 		if (ret < 0)
2549 			goto error;
2550 
2551 		leaf = path->nodes[0];
2552 next_slot:
2553 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2554 			ret = btrfs_next_leaf(root, path);
2555 			if (ret > 0)
2556 				break;
2557 			if (ret < 0)
2558 				goto error;
2559 			leaf = path->nodes[0];
2560 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2561 			btrfs_release_path(path);
2562 			continue;
2563 		}
2564 
2565 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2566 		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2567 		    key.type != BTRFS_DEV_ITEM_KEY)
2568 			break;
2569 
2570 		dev_item = btrfs_item_ptr(leaf, path->slots[0],
2571 					  struct btrfs_dev_item);
2572 		args.devid = btrfs_device_id(leaf, dev_item);
2573 		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2574 				   BTRFS_UUID_SIZE);
2575 		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2576 				   BTRFS_FSID_SIZE);
2577 		args.uuid = dev_uuid;
2578 		args.fsid = fs_uuid;
2579 		device = btrfs_find_device(fs_info->fs_devices, &args);
2580 		BUG_ON(!device); /* Logic error */
2581 
2582 		if (device->fs_devices->seeding) {
2583 			btrfs_set_device_generation(leaf, dev_item,
2584 						    device->generation);
2585 			btrfs_mark_buffer_dirty(leaf);
2586 		}
2587 
2588 		path->slots[0]++;
2589 		goto next_slot;
2590 	}
2591 	ret = 0;
2592 error:
2593 	btrfs_free_path(path);
2594 	return ret;
2595 }
2596 
2597 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2598 {
2599 	struct btrfs_root *root = fs_info->dev_root;
2600 	struct request_queue *q;
2601 	struct btrfs_trans_handle *trans;
2602 	struct btrfs_device *device;
2603 	struct block_device *bdev;
2604 	struct super_block *sb = fs_info->sb;
2605 	struct rcu_string *name;
2606 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2607 	u64 orig_super_total_bytes;
2608 	u64 orig_super_num_devices;
2609 	int seeding_dev = 0;
2610 	int ret = 0;
2611 	bool locked = false;
2612 
2613 	if (sb_rdonly(sb) && !fs_devices->seeding)
2614 		return -EROFS;
2615 
2616 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2617 				  fs_info->bdev_holder);
2618 	if (IS_ERR(bdev))
2619 		return PTR_ERR(bdev);
2620 
2621 	if (!btrfs_check_device_zone_type(fs_info, bdev)) {
2622 		ret = -EINVAL;
2623 		goto error;
2624 	}
2625 
2626 	if (fs_devices->seeding) {
2627 		seeding_dev = 1;
2628 		down_write(&sb->s_umount);
2629 		mutex_lock(&uuid_mutex);
2630 		locked = true;
2631 	}
2632 
2633 	sync_blockdev(bdev);
2634 
2635 	rcu_read_lock();
2636 	list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
2637 		if (device->bdev == bdev) {
2638 			ret = -EEXIST;
2639 			rcu_read_unlock();
2640 			goto error;
2641 		}
2642 	}
2643 	rcu_read_unlock();
2644 
2645 	device = btrfs_alloc_device(fs_info, NULL, NULL);
2646 	if (IS_ERR(device)) {
2647 		/* we can safely leave the fs_devices entry around */
2648 		ret = PTR_ERR(device);
2649 		goto error;
2650 	}
2651 
2652 	name = rcu_string_strdup(device_path, GFP_KERNEL);
2653 	if (!name) {
2654 		ret = -ENOMEM;
2655 		goto error_free_device;
2656 	}
2657 	rcu_assign_pointer(device->name, name);
2658 
2659 	device->fs_info = fs_info;
2660 	device->bdev = bdev;
2661 
2662 	ret = btrfs_get_dev_zone_info(device, false);
2663 	if (ret)
2664 		goto error_free_device;
2665 
2666 	trans = btrfs_start_transaction(root, 0);
2667 	if (IS_ERR(trans)) {
2668 		ret = PTR_ERR(trans);
2669 		goto error_free_zone;
2670 	}
2671 
2672 	q = bdev_get_queue(bdev);
2673 	set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2674 	device->generation = trans->transid;
2675 	device->io_width = fs_info->sectorsize;
2676 	device->io_align = fs_info->sectorsize;
2677 	device->sector_size = fs_info->sectorsize;
2678 	device->total_bytes = round_down(i_size_read(bdev->bd_inode),
2679 					 fs_info->sectorsize);
2680 	device->disk_total_bytes = device->total_bytes;
2681 	device->commit_total_bytes = device->total_bytes;
2682 	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2683 	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2684 	device->mode = FMODE_EXCL;
2685 	device->dev_stats_valid = 1;
2686 	set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2687 
2688 	if (seeding_dev) {
2689 		btrfs_clear_sb_rdonly(sb);
2690 		ret = btrfs_prepare_sprout(fs_info);
2691 		if (ret) {
2692 			btrfs_abort_transaction(trans, ret);
2693 			goto error_trans;
2694 		}
2695 		btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev,
2696 						device);
2697 	}
2698 
2699 	device->fs_devices = fs_devices;
2700 
2701 	mutex_lock(&fs_devices->device_list_mutex);
2702 	mutex_lock(&fs_info->chunk_mutex);
2703 	list_add_rcu(&device->dev_list, &fs_devices->devices);
2704 	list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2705 	fs_devices->num_devices++;
2706 	fs_devices->open_devices++;
2707 	fs_devices->rw_devices++;
2708 	fs_devices->total_devices++;
2709 	fs_devices->total_rw_bytes += device->total_bytes;
2710 
2711 	atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2712 
2713 	if (!blk_queue_nonrot(q))
2714 		fs_devices->rotating = true;
2715 
2716 	orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2717 	btrfs_set_super_total_bytes(fs_info->super_copy,
2718 		round_down(orig_super_total_bytes + device->total_bytes,
2719 			   fs_info->sectorsize));
2720 
2721 	orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2722 	btrfs_set_super_num_devices(fs_info->super_copy,
2723 				    orig_super_num_devices + 1);
2724 
2725 	/*
2726 	 * we've got more storage, clear any full flags on the space
2727 	 * infos
2728 	 */
2729 	btrfs_clear_space_info_full(fs_info);
2730 
2731 	mutex_unlock(&fs_info->chunk_mutex);
2732 
2733 	/* Add sysfs device entry */
2734 	btrfs_sysfs_add_device(device);
2735 
2736 	mutex_unlock(&fs_devices->device_list_mutex);
2737 
2738 	if (seeding_dev) {
2739 		mutex_lock(&fs_info->chunk_mutex);
2740 		ret = init_first_rw_device(trans);
2741 		mutex_unlock(&fs_info->chunk_mutex);
2742 		if (ret) {
2743 			btrfs_abort_transaction(trans, ret);
2744 			goto error_sysfs;
2745 		}
2746 	}
2747 
2748 	ret = btrfs_add_dev_item(trans, device);
2749 	if (ret) {
2750 		btrfs_abort_transaction(trans, ret);
2751 		goto error_sysfs;
2752 	}
2753 
2754 	if (seeding_dev) {
2755 		ret = btrfs_finish_sprout(trans);
2756 		if (ret) {
2757 			btrfs_abort_transaction(trans, ret);
2758 			goto error_sysfs;
2759 		}
2760 
2761 		/*
2762 		 * fs_devices now represents the newly sprouted filesystem and
2763 		 * its fsid has been changed by btrfs_prepare_sprout
2764 		 */
2765 		btrfs_sysfs_update_sprout_fsid(fs_devices);
2766 	}
2767 
2768 	ret = btrfs_commit_transaction(trans);
2769 
2770 	if (seeding_dev) {
2771 		mutex_unlock(&uuid_mutex);
2772 		up_write(&sb->s_umount);
2773 		locked = false;
2774 
2775 		if (ret) /* transaction commit */
2776 			return ret;
2777 
2778 		ret = btrfs_relocate_sys_chunks(fs_info);
2779 		if (ret < 0)
2780 			btrfs_handle_fs_error(fs_info, ret,
2781 				    "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2782 		trans = btrfs_attach_transaction(root);
2783 		if (IS_ERR(trans)) {
2784 			if (PTR_ERR(trans) == -ENOENT)
2785 				return 0;
2786 			ret = PTR_ERR(trans);
2787 			trans = NULL;
2788 			goto error_sysfs;
2789 		}
2790 		ret = btrfs_commit_transaction(trans);
2791 	}
2792 
2793 	/*
2794 	 * Now that we have written a new super block to this device, check all
2795 	 * other fs_devices list if device_path alienates any other scanned
2796 	 * device.
2797 	 * We can ignore the return value as it typically returns -EINVAL and
2798 	 * only succeeds if the device was an alien.
2799 	 */
2800 	btrfs_forget_devices(device_path);
2801 
2802 	/* Update ctime/mtime for blkid or udev */
2803 	update_dev_time(device_path);
2804 
2805 	return ret;
2806 
2807 error_sysfs:
2808 	btrfs_sysfs_remove_device(device);
2809 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2810 	mutex_lock(&fs_info->chunk_mutex);
2811 	list_del_rcu(&device->dev_list);
2812 	list_del(&device->dev_alloc_list);
2813 	fs_info->fs_devices->num_devices--;
2814 	fs_info->fs_devices->open_devices--;
2815 	fs_info->fs_devices->rw_devices--;
2816 	fs_info->fs_devices->total_devices--;
2817 	fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2818 	atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2819 	btrfs_set_super_total_bytes(fs_info->super_copy,
2820 				    orig_super_total_bytes);
2821 	btrfs_set_super_num_devices(fs_info->super_copy,
2822 				    orig_super_num_devices);
2823 	mutex_unlock(&fs_info->chunk_mutex);
2824 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2825 error_trans:
2826 	if (seeding_dev)
2827 		btrfs_set_sb_rdonly(sb);
2828 	if (trans)
2829 		btrfs_end_transaction(trans);
2830 error_free_zone:
2831 	btrfs_destroy_dev_zone_info(device);
2832 error_free_device:
2833 	btrfs_free_device(device);
2834 error:
2835 	blkdev_put(bdev, FMODE_EXCL);
2836 	if (locked) {
2837 		mutex_unlock(&uuid_mutex);
2838 		up_write(&sb->s_umount);
2839 	}
2840 	return ret;
2841 }
2842 
2843 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2844 					struct btrfs_device *device)
2845 {
2846 	int ret;
2847 	struct btrfs_path *path;
2848 	struct btrfs_root *root = device->fs_info->chunk_root;
2849 	struct btrfs_dev_item *dev_item;
2850 	struct extent_buffer *leaf;
2851 	struct btrfs_key key;
2852 
2853 	path = btrfs_alloc_path();
2854 	if (!path)
2855 		return -ENOMEM;
2856 
2857 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2858 	key.type = BTRFS_DEV_ITEM_KEY;
2859 	key.offset = device->devid;
2860 
2861 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2862 	if (ret < 0)
2863 		goto out;
2864 
2865 	if (ret > 0) {
2866 		ret = -ENOENT;
2867 		goto out;
2868 	}
2869 
2870 	leaf = path->nodes[0];
2871 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2872 
2873 	btrfs_set_device_id(leaf, dev_item, device->devid);
2874 	btrfs_set_device_type(leaf, dev_item, device->type);
2875 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2876 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2877 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2878 	btrfs_set_device_total_bytes(leaf, dev_item,
2879 				     btrfs_device_get_disk_total_bytes(device));
2880 	btrfs_set_device_bytes_used(leaf, dev_item,
2881 				    btrfs_device_get_bytes_used(device));
2882 	btrfs_mark_buffer_dirty(leaf);
2883 
2884 out:
2885 	btrfs_free_path(path);
2886 	return ret;
2887 }
2888 
2889 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2890 		      struct btrfs_device *device, u64 new_size)
2891 {
2892 	struct btrfs_fs_info *fs_info = device->fs_info;
2893 	struct btrfs_super_block *super_copy = fs_info->super_copy;
2894 	u64 old_total;
2895 	u64 diff;
2896 	int ret;
2897 
2898 	if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2899 		return -EACCES;
2900 
2901 	new_size = round_down(new_size, fs_info->sectorsize);
2902 
2903 	mutex_lock(&fs_info->chunk_mutex);
2904 	old_total = btrfs_super_total_bytes(super_copy);
2905 	diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2906 
2907 	if (new_size <= device->total_bytes ||
2908 	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2909 		mutex_unlock(&fs_info->chunk_mutex);
2910 		return -EINVAL;
2911 	}
2912 
2913 	btrfs_set_super_total_bytes(super_copy,
2914 			round_down(old_total + diff, fs_info->sectorsize));
2915 	device->fs_devices->total_rw_bytes += diff;
2916 
2917 	btrfs_device_set_total_bytes(device, new_size);
2918 	btrfs_device_set_disk_total_bytes(device, new_size);
2919 	btrfs_clear_space_info_full(device->fs_info);
2920 	if (list_empty(&device->post_commit_list))
2921 		list_add_tail(&device->post_commit_list,
2922 			      &trans->transaction->dev_update_list);
2923 	mutex_unlock(&fs_info->chunk_mutex);
2924 
2925 	btrfs_reserve_chunk_metadata(trans, false);
2926 	ret = btrfs_update_device(trans, device);
2927 	btrfs_trans_release_chunk_metadata(trans);
2928 
2929 	return ret;
2930 }
2931 
2932 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2933 {
2934 	struct btrfs_fs_info *fs_info = trans->fs_info;
2935 	struct btrfs_root *root = fs_info->chunk_root;
2936 	int ret;
2937 	struct btrfs_path *path;
2938 	struct btrfs_key key;
2939 
2940 	path = btrfs_alloc_path();
2941 	if (!path)
2942 		return -ENOMEM;
2943 
2944 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2945 	key.offset = chunk_offset;
2946 	key.type = BTRFS_CHUNK_ITEM_KEY;
2947 
2948 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2949 	if (ret < 0)
2950 		goto out;
2951 	else if (ret > 0) { /* Logic error or corruption */
2952 		btrfs_handle_fs_error(fs_info, -ENOENT,
2953 				      "Failed lookup while freeing chunk.");
2954 		ret = -ENOENT;
2955 		goto out;
2956 	}
2957 
2958 	ret = btrfs_del_item(trans, root, path);
2959 	if (ret < 0)
2960 		btrfs_handle_fs_error(fs_info, ret,
2961 				      "Failed to delete chunk item.");
2962 out:
2963 	btrfs_free_path(path);
2964 	return ret;
2965 }
2966 
2967 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2968 {
2969 	struct btrfs_super_block *super_copy = fs_info->super_copy;
2970 	struct btrfs_disk_key *disk_key;
2971 	struct btrfs_chunk *chunk;
2972 	u8 *ptr;
2973 	int ret = 0;
2974 	u32 num_stripes;
2975 	u32 array_size;
2976 	u32 len = 0;
2977 	u32 cur;
2978 	struct btrfs_key key;
2979 
2980 	lockdep_assert_held(&fs_info->chunk_mutex);
2981 	array_size = btrfs_super_sys_array_size(super_copy);
2982 
2983 	ptr = super_copy->sys_chunk_array;
2984 	cur = 0;
2985 
2986 	while (cur < array_size) {
2987 		disk_key = (struct btrfs_disk_key *)ptr;
2988 		btrfs_disk_key_to_cpu(&key, disk_key);
2989 
2990 		len = sizeof(*disk_key);
2991 
2992 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2993 			chunk = (struct btrfs_chunk *)(ptr + len);
2994 			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2995 			len += btrfs_chunk_item_size(num_stripes);
2996 		} else {
2997 			ret = -EIO;
2998 			break;
2999 		}
3000 		if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
3001 		    key.offset == chunk_offset) {
3002 			memmove(ptr, ptr + len, array_size - (cur + len));
3003 			array_size -= len;
3004 			btrfs_set_super_sys_array_size(super_copy, array_size);
3005 		} else {
3006 			ptr += len;
3007 			cur += len;
3008 		}
3009 	}
3010 	return ret;
3011 }
3012 
3013 /*
3014  * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
3015  * @logical: Logical block offset in bytes.
3016  * @length: Length of extent in bytes.
3017  *
3018  * Return: Chunk mapping or ERR_PTR.
3019  */
3020 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
3021 				       u64 logical, u64 length)
3022 {
3023 	struct extent_map_tree *em_tree;
3024 	struct extent_map *em;
3025 
3026 	em_tree = &fs_info->mapping_tree;
3027 	read_lock(&em_tree->lock);
3028 	em = lookup_extent_mapping(em_tree, logical, length);
3029 	read_unlock(&em_tree->lock);
3030 
3031 	if (!em) {
3032 		btrfs_crit(fs_info, "unable to find logical %llu length %llu",
3033 			   logical, length);
3034 		return ERR_PTR(-EINVAL);
3035 	}
3036 
3037 	if (em->start > logical || em->start + em->len < logical) {
3038 		btrfs_crit(fs_info,
3039 			   "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
3040 			   logical, length, em->start, em->start + em->len);
3041 		free_extent_map(em);
3042 		return ERR_PTR(-EINVAL);
3043 	}
3044 
3045 	/* callers are responsible for dropping em's ref. */
3046 	return em;
3047 }
3048 
3049 static int remove_chunk_item(struct btrfs_trans_handle *trans,
3050 			     struct map_lookup *map, u64 chunk_offset)
3051 {
3052 	int i;
3053 
3054 	/*
3055 	 * Removing chunk items and updating the device items in the chunks btree
3056 	 * requires holding the chunk_mutex.
3057 	 * See the comment at btrfs_chunk_alloc() for the details.
3058 	 */
3059 	lockdep_assert_held(&trans->fs_info->chunk_mutex);
3060 
3061 	for (i = 0; i < map->num_stripes; i++) {
3062 		int ret;
3063 
3064 		ret = btrfs_update_device(trans, map->stripes[i].dev);
3065 		if (ret)
3066 			return ret;
3067 	}
3068 
3069 	return btrfs_free_chunk(trans, chunk_offset);
3070 }
3071 
3072 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
3073 {
3074 	struct btrfs_fs_info *fs_info = trans->fs_info;
3075 	struct extent_map *em;
3076 	struct map_lookup *map;
3077 	u64 dev_extent_len = 0;
3078 	int i, ret = 0;
3079 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3080 
3081 	em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
3082 	if (IS_ERR(em)) {
3083 		/*
3084 		 * This is a logic error, but we don't want to just rely on the
3085 		 * user having built with ASSERT enabled, so if ASSERT doesn't
3086 		 * do anything we still error out.
3087 		 */
3088 		ASSERT(0);
3089 		return PTR_ERR(em);
3090 	}
3091 	map = em->map_lookup;
3092 
3093 	/*
3094 	 * First delete the device extent items from the devices btree.
3095 	 * We take the device_list_mutex to avoid racing with the finishing phase
3096 	 * of a device replace operation. See the comment below before acquiring
3097 	 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex
3098 	 * because that can result in a deadlock when deleting the device extent
3099 	 * items from the devices btree - COWing an extent buffer from the btree
3100 	 * may result in allocating a new metadata chunk, which would attempt to
3101 	 * lock again fs_info->chunk_mutex.
3102 	 */
3103 	mutex_lock(&fs_devices->device_list_mutex);
3104 	for (i = 0; i < map->num_stripes; i++) {
3105 		struct btrfs_device *device = map->stripes[i].dev;
3106 		ret = btrfs_free_dev_extent(trans, device,
3107 					    map->stripes[i].physical,
3108 					    &dev_extent_len);
3109 		if (ret) {
3110 			mutex_unlock(&fs_devices->device_list_mutex);
3111 			btrfs_abort_transaction(trans, ret);
3112 			goto out;
3113 		}
3114 
3115 		if (device->bytes_used > 0) {
3116 			mutex_lock(&fs_info->chunk_mutex);
3117 			btrfs_device_set_bytes_used(device,
3118 					device->bytes_used - dev_extent_len);
3119 			atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
3120 			btrfs_clear_space_info_full(fs_info);
3121 			mutex_unlock(&fs_info->chunk_mutex);
3122 		}
3123 	}
3124 	mutex_unlock(&fs_devices->device_list_mutex);
3125 
3126 	/*
3127 	 * We acquire fs_info->chunk_mutex for 2 reasons:
3128 	 *
3129 	 * 1) Just like with the first phase of the chunk allocation, we must
3130 	 *    reserve system space, do all chunk btree updates and deletions, and
3131 	 *    update the system chunk array in the superblock while holding this
3132 	 *    mutex. This is for similar reasons as explained on the comment at
3133 	 *    the top of btrfs_chunk_alloc();
3134 	 *
3135 	 * 2) Prevent races with the final phase of a device replace operation
3136 	 *    that replaces the device object associated with the map's stripes,
3137 	 *    because the device object's id can change at any time during that
3138 	 *    final phase of the device replace operation
3139 	 *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
3140 	 *    replaced device and then see it with an ID of
3141 	 *    BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating
3142 	 *    the device item, which does not exists on the chunk btree.
3143 	 *    The finishing phase of device replace acquires both the
3144 	 *    device_list_mutex and the chunk_mutex, in that order, so we are
3145 	 *    safe by just acquiring the chunk_mutex.
3146 	 */
3147 	trans->removing_chunk = true;
3148 	mutex_lock(&fs_info->chunk_mutex);
3149 
3150 	check_system_chunk(trans, map->type);
3151 
3152 	ret = remove_chunk_item(trans, map, chunk_offset);
3153 	/*
3154 	 * Normally we should not get -ENOSPC since we reserved space before
3155 	 * through the call to check_system_chunk().
3156 	 *
3157 	 * Despite our system space_info having enough free space, we may not
3158 	 * be able to allocate extents from its block groups, because all have
3159 	 * an incompatible profile, which will force us to allocate a new system
3160 	 * block group with the right profile, or right after we called
3161 	 * check_system_space() above, a scrub turned the only system block group
3162 	 * with enough free space into RO mode.
3163 	 * This is explained with more detail at do_chunk_alloc().
3164 	 *
3165 	 * So if we get -ENOSPC, allocate a new system chunk and retry once.
3166 	 */
3167 	if (ret == -ENOSPC) {
3168 		const u64 sys_flags = btrfs_system_alloc_profile(fs_info);
3169 		struct btrfs_block_group *sys_bg;
3170 
3171 		sys_bg = btrfs_create_chunk(trans, sys_flags);
3172 		if (IS_ERR(sys_bg)) {
3173 			ret = PTR_ERR(sys_bg);
3174 			btrfs_abort_transaction(trans, ret);
3175 			goto out;
3176 		}
3177 
3178 		ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
3179 		if (ret) {
3180 			btrfs_abort_transaction(trans, ret);
3181 			goto out;
3182 		}
3183 
3184 		ret = remove_chunk_item(trans, map, chunk_offset);
3185 		if (ret) {
3186 			btrfs_abort_transaction(trans, ret);
3187 			goto out;
3188 		}
3189 	} else if (ret) {
3190 		btrfs_abort_transaction(trans, ret);
3191 		goto out;
3192 	}
3193 
3194 	trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
3195 
3196 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3197 		ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
3198 		if (ret) {
3199 			btrfs_abort_transaction(trans, ret);
3200 			goto out;
3201 		}
3202 	}
3203 
3204 	mutex_unlock(&fs_info->chunk_mutex);
3205 	trans->removing_chunk = false;
3206 
3207 	/*
3208 	 * We are done with chunk btree updates and deletions, so release the
3209 	 * system space we previously reserved (with check_system_chunk()).
3210 	 */
3211 	btrfs_trans_release_chunk_metadata(trans);
3212 
3213 	ret = btrfs_remove_block_group(trans, chunk_offset, em);
3214 	if (ret) {
3215 		btrfs_abort_transaction(trans, ret);
3216 		goto out;
3217 	}
3218 
3219 out:
3220 	if (trans->removing_chunk) {
3221 		mutex_unlock(&fs_info->chunk_mutex);
3222 		trans->removing_chunk = false;
3223 	}
3224 	/* once for us */
3225 	free_extent_map(em);
3226 	return ret;
3227 }
3228 
3229 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3230 {
3231 	struct btrfs_root *root = fs_info->chunk_root;
3232 	struct btrfs_trans_handle *trans;
3233 	struct btrfs_block_group *block_group;
3234 	u64 length;
3235 	int ret;
3236 
3237 	/*
3238 	 * Prevent races with automatic removal of unused block groups.
3239 	 * After we relocate and before we remove the chunk with offset
3240 	 * chunk_offset, automatic removal of the block group can kick in,
3241 	 * resulting in a failure when calling btrfs_remove_chunk() below.
3242 	 *
3243 	 * Make sure to acquire this mutex before doing a tree search (dev
3244 	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3245 	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3246 	 * we release the path used to search the chunk/dev tree and before
3247 	 * the current task acquires this mutex and calls us.
3248 	 */
3249 	lockdep_assert_held(&fs_info->reclaim_bgs_lock);
3250 
3251 	/* step one, relocate all the extents inside this chunk */
3252 	btrfs_scrub_pause(fs_info);
3253 	ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3254 	btrfs_scrub_continue(fs_info);
3255 	if (ret)
3256 		return ret;
3257 
3258 	block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
3259 	if (!block_group)
3260 		return -ENOENT;
3261 	btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
3262 	length = block_group->length;
3263 	btrfs_put_block_group(block_group);
3264 
3265 	/*
3266 	 * On a zoned file system, discard the whole block group, this will
3267 	 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If
3268 	 * resetting the zone fails, don't treat it as a fatal problem from the
3269 	 * filesystem's point of view.
3270 	 */
3271 	if (btrfs_is_zoned(fs_info)) {
3272 		ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL);
3273 		if (ret)
3274 			btrfs_info(fs_info,
3275 				"failed to reset zone %llu after relocation",
3276 				chunk_offset);
3277 	}
3278 
3279 	trans = btrfs_start_trans_remove_block_group(root->fs_info,
3280 						     chunk_offset);
3281 	if (IS_ERR(trans)) {
3282 		ret = PTR_ERR(trans);
3283 		btrfs_handle_fs_error(root->fs_info, ret, NULL);
3284 		return ret;
3285 	}
3286 
3287 	/*
3288 	 * step two, delete the device extents and the
3289 	 * chunk tree entries
3290 	 */
3291 	ret = btrfs_remove_chunk(trans, chunk_offset);
3292 	btrfs_end_transaction(trans);
3293 	return ret;
3294 }
3295 
3296 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3297 {
3298 	struct btrfs_root *chunk_root = fs_info->chunk_root;
3299 	struct btrfs_path *path;
3300 	struct extent_buffer *leaf;
3301 	struct btrfs_chunk *chunk;
3302 	struct btrfs_key key;
3303 	struct btrfs_key found_key;
3304 	u64 chunk_type;
3305 	bool retried = false;
3306 	int failed = 0;
3307 	int ret;
3308 
3309 	path = btrfs_alloc_path();
3310 	if (!path)
3311 		return -ENOMEM;
3312 
3313 again:
3314 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3315 	key.offset = (u64)-1;
3316 	key.type = BTRFS_CHUNK_ITEM_KEY;
3317 
3318 	while (1) {
3319 		mutex_lock(&fs_info->reclaim_bgs_lock);
3320 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3321 		if (ret < 0) {
3322 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3323 			goto error;
3324 		}
3325 		BUG_ON(ret == 0); /* Corruption */
3326 
3327 		ret = btrfs_previous_item(chunk_root, path, key.objectid,
3328 					  key.type);
3329 		if (ret)
3330 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3331 		if (ret < 0)
3332 			goto error;
3333 		if (ret > 0)
3334 			break;
3335 
3336 		leaf = path->nodes[0];
3337 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3338 
3339 		chunk = btrfs_item_ptr(leaf, path->slots[0],
3340 				       struct btrfs_chunk);
3341 		chunk_type = btrfs_chunk_type(leaf, chunk);
3342 		btrfs_release_path(path);
3343 
3344 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3345 			ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3346 			if (ret == -ENOSPC)
3347 				failed++;
3348 			else
3349 				BUG_ON(ret);
3350 		}
3351 		mutex_unlock(&fs_info->reclaim_bgs_lock);
3352 
3353 		if (found_key.offset == 0)
3354 			break;
3355 		key.offset = found_key.offset - 1;
3356 	}
3357 	ret = 0;
3358 	if (failed && !retried) {
3359 		failed = 0;
3360 		retried = true;
3361 		goto again;
3362 	} else if (WARN_ON(failed && retried)) {
3363 		ret = -ENOSPC;
3364 	}
3365 error:
3366 	btrfs_free_path(path);
3367 	return ret;
3368 }
3369 
3370 /*
3371  * return 1 : allocate a data chunk successfully,
3372  * return <0: errors during allocating a data chunk,
3373  * return 0 : no need to allocate a data chunk.
3374  */
3375 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3376 				      u64 chunk_offset)
3377 {
3378 	struct btrfs_block_group *cache;
3379 	u64 bytes_used;
3380 	u64 chunk_type;
3381 
3382 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3383 	ASSERT(cache);
3384 	chunk_type = cache->flags;
3385 	btrfs_put_block_group(cache);
3386 
3387 	if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
3388 		return 0;
3389 
3390 	spin_lock(&fs_info->data_sinfo->lock);
3391 	bytes_used = fs_info->data_sinfo->bytes_used;
3392 	spin_unlock(&fs_info->data_sinfo->lock);
3393 
3394 	if (!bytes_used) {
3395 		struct btrfs_trans_handle *trans;
3396 		int ret;
3397 
3398 		trans =	btrfs_join_transaction(fs_info->tree_root);
3399 		if (IS_ERR(trans))
3400 			return PTR_ERR(trans);
3401 
3402 		ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
3403 		btrfs_end_transaction(trans);
3404 		if (ret < 0)
3405 			return ret;
3406 		return 1;
3407 	}
3408 
3409 	return 0;
3410 }
3411 
3412 static int insert_balance_item(struct btrfs_fs_info *fs_info,
3413 			       struct btrfs_balance_control *bctl)
3414 {
3415 	struct btrfs_root *root = fs_info->tree_root;
3416 	struct btrfs_trans_handle *trans;
3417 	struct btrfs_balance_item *item;
3418 	struct btrfs_disk_balance_args disk_bargs;
3419 	struct btrfs_path *path;
3420 	struct extent_buffer *leaf;
3421 	struct btrfs_key key;
3422 	int ret, err;
3423 
3424 	path = btrfs_alloc_path();
3425 	if (!path)
3426 		return -ENOMEM;
3427 
3428 	trans = btrfs_start_transaction(root, 0);
3429 	if (IS_ERR(trans)) {
3430 		btrfs_free_path(path);
3431 		return PTR_ERR(trans);
3432 	}
3433 
3434 	key.objectid = BTRFS_BALANCE_OBJECTID;
3435 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3436 	key.offset = 0;
3437 
3438 	ret = btrfs_insert_empty_item(trans, root, path, &key,
3439 				      sizeof(*item));
3440 	if (ret)
3441 		goto out;
3442 
3443 	leaf = path->nodes[0];
3444 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3445 
3446 	memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3447 
3448 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3449 	btrfs_set_balance_data(leaf, item, &disk_bargs);
3450 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3451 	btrfs_set_balance_meta(leaf, item, &disk_bargs);
3452 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3453 	btrfs_set_balance_sys(leaf, item, &disk_bargs);
3454 
3455 	btrfs_set_balance_flags(leaf, item, bctl->flags);
3456 
3457 	btrfs_mark_buffer_dirty(leaf);
3458 out:
3459 	btrfs_free_path(path);
3460 	err = btrfs_commit_transaction(trans);
3461 	if (err && !ret)
3462 		ret = err;
3463 	return ret;
3464 }
3465 
3466 static int del_balance_item(struct btrfs_fs_info *fs_info)
3467 {
3468 	struct btrfs_root *root = fs_info->tree_root;
3469 	struct btrfs_trans_handle *trans;
3470 	struct btrfs_path *path;
3471 	struct btrfs_key key;
3472 	int ret, err;
3473 
3474 	path = btrfs_alloc_path();
3475 	if (!path)
3476 		return -ENOMEM;
3477 
3478 	trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3479 	if (IS_ERR(trans)) {
3480 		btrfs_free_path(path);
3481 		return PTR_ERR(trans);
3482 	}
3483 
3484 	key.objectid = BTRFS_BALANCE_OBJECTID;
3485 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3486 	key.offset = 0;
3487 
3488 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3489 	if (ret < 0)
3490 		goto out;
3491 	if (ret > 0) {
3492 		ret = -ENOENT;
3493 		goto out;
3494 	}
3495 
3496 	ret = btrfs_del_item(trans, root, path);
3497 out:
3498 	btrfs_free_path(path);
3499 	err = btrfs_commit_transaction(trans);
3500 	if (err && !ret)
3501 		ret = err;
3502 	return ret;
3503 }
3504 
3505 /*
3506  * This is a heuristic used to reduce the number of chunks balanced on
3507  * resume after balance was interrupted.
3508  */
3509 static void update_balance_args(struct btrfs_balance_control *bctl)
3510 {
3511 	/*
3512 	 * Turn on soft mode for chunk types that were being converted.
3513 	 */
3514 	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3515 		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3516 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3517 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3518 	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3519 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3520 
3521 	/*
3522 	 * Turn on usage filter if is not already used.  The idea is
3523 	 * that chunks that we have already balanced should be
3524 	 * reasonably full.  Don't do it for chunks that are being
3525 	 * converted - that will keep us from relocating unconverted
3526 	 * (albeit full) chunks.
3527 	 */
3528 	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3529 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3530 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3531 		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3532 		bctl->data.usage = 90;
3533 	}
3534 	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3535 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3536 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3537 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3538 		bctl->sys.usage = 90;
3539 	}
3540 	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3541 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3542 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3543 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3544 		bctl->meta.usage = 90;
3545 	}
3546 }
3547 
3548 /*
3549  * Clear the balance status in fs_info and delete the balance item from disk.
3550  */
3551 static void reset_balance_state(struct btrfs_fs_info *fs_info)
3552 {
3553 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3554 	int ret;
3555 
3556 	BUG_ON(!fs_info->balance_ctl);
3557 
3558 	spin_lock(&fs_info->balance_lock);
3559 	fs_info->balance_ctl = NULL;
3560 	spin_unlock(&fs_info->balance_lock);
3561 
3562 	kfree(bctl);
3563 	ret = del_balance_item(fs_info);
3564 	if (ret)
3565 		btrfs_handle_fs_error(fs_info, ret, NULL);
3566 }
3567 
3568 /*
3569  * Balance filters.  Return 1 if chunk should be filtered out
3570  * (should not be balanced).
3571  */
3572 static int chunk_profiles_filter(u64 chunk_type,
3573 				 struct btrfs_balance_args *bargs)
3574 {
3575 	chunk_type = chunk_to_extended(chunk_type) &
3576 				BTRFS_EXTENDED_PROFILE_MASK;
3577 
3578 	if (bargs->profiles & chunk_type)
3579 		return 0;
3580 
3581 	return 1;
3582 }
3583 
3584 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3585 			      struct btrfs_balance_args *bargs)
3586 {
3587 	struct btrfs_block_group *cache;
3588 	u64 chunk_used;
3589 	u64 user_thresh_min;
3590 	u64 user_thresh_max;
3591 	int ret = 1;
3592 
3593 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3594 	chunk_used = cache->used;
3595 
3596 	if (bargs->usage_min == 0)
3597 		user_thresh_min = 0;
3598 	else
3599 		user_thresh_min = div_factor_fine(cache->length,
3600 						  bargs->usage_min);
3601 
3602 	if (bargs->usage_max == 0)
3603 		user_thresh_max = 1;
3604 	else if (bargs->usage_max > 100)
3605 		user_thresh_max = cache->length;
3606 	else
3607 		user_thresh_max = div_factor_fine(cache->length,
3608 						  bargs->usage_max);
3609 
3610 	if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3611 		ret = 0;
3612 
3613 	btrfs_put_block_group(cache);
3614 	return ret;
3615 }
3616 
3617 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3618 		u64 chunk_offset, struct btrfs_balance_args *bargs)
3619 {
3620 	struct btrfs_block_group *cache;
3621 	u64 chunk_used, user_thresh;
3622 	int ret = 1;
3623 
3624 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3625 	chunk_used = cache->used;
3626 
3627 	if (bargs->usage_min == 0)
3628 		user_thresh = 1;
3629 	else if (bargs->usage > 100)
3630 		user_thresh = cache->length;
3631 	else
3632 		user_thresh = div_factor_fine(cache->length, bargs->usage);
3633 
3634 	if (chunk_used < user_thresh)
3635 		ret = 0;
3636 
3637 	btrfs_put_block_group(cache);
3638 	return ret;
3639 }
3640 
3641 static int chunk_devid_filter(struct extent_buffer *leaf,
3642 			      struct btrfs_chunk *chunk,
3643 			      struct btrfs_balance_args *bargs)
3644 {
3645 	struct btrfs_stripe *stripe;
3646 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3647 	int i;
3648 
3649 	for (i = 0; i < num_stripes; i++) {
3650 		stripe = btrfs_stripe_nr(chunk, i);
3651 		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3652 			return 0;
3653 	}
3654 
3655 	return 1;
3656 }
3657 
3658 static u64 calc_data_stripes(u64 type, int num_stripes)
3659 {
3660 	const int index = btrfs_bg_flags_to_raid_index(type);
3661 	const int ncopies = btrfs_raid_array[index].ncopies;
3662 	const int nparity = btrfs_raid_array[index].nparity;
3663 
3664 	return (num_stripes - nparity) / ncopies;
3665 }
3666 
3667 /* [pstart, pend) */
3668 static int chunk_drange_filter(struct extent_buffer *leaf,
3669 			       struct btrfs_chunk *chunk,
3670 			       struct btrfs_balance_args *bargs)
3671 {
3672 	struct btrfs_stripe *stripe;
3673 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3674 	u64 stripe_offset;
3675 	u64 stripe_length;
3676 	u64 type;
3677 	int factor;
3678 	int i;
3679 
3680 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3681 		return 0;
3682 
3683 	type = btrfs_chunk_type(leaf, chunk);
3684 	factor = calc_data_stripes(type, num_stripes);
3685 
3686 	for (i = 0; i < num_stripes; i++) {
3687 		stripe = btrfs_stripe_nr(chunk, i);
3688 		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3689 			continue;
3690 
3691 		stripe_offset = btrfs_stripe_offset(leaf, stripe);
3692 		stripe_length = btrfs_chunk_length(leaf, chunk);
3693 		stripe_length = div_u64(stripe_length, factor);
3694 
3695 		if (stripe_offset < bargs->pend &&
3696 		    stripe_offset + stripe_length > bargs->pstart)
3697 			return 0;
3698 	}
3699 
3700 	return 1;
3701 }
3702 
3703 /* [vstart, vend) */
3704 static int chunk_vrange_filter(struct extent_buffer *leaf,
3705 			       struct btrfs_chunk *chunk,
3706 			       u64 chunk_offset,
3707 			       struct btrfs_balance_args *bargs)
3708 {
3709 	if (chunk_offset < bargs->vend &&
3710 	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3711 		/* at least part of the chunk is inside this vrange */
3712 		return 0;
3713 
3714 	return 1;
3715 }
3716 
3717 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3718 			       struct btrfs_chunk *chunk,
3719 			       struct btrfs_balance_args *bargs)
3720 {
3721 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3722 
3723 	if (bargs->stripes_min <= num_stripes
3724 			&& num_stripes <= bargs->stripes_max)
3725 		return 0;
3726 
3727 	return 1;
3728 }
3729 
3730 static int chunk_soft_convert_filter(u64 chunk_type,
3731 				     struct btrfs_balance_args *bargs)
3732 {
3733 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3734 		return 0;
3735 
3736 	chunk_type = chunk_to_extended(chunk_type) &
3737 				BTRFS_EXTENDED_PROFILE_MASK;
3738 
3739 	if (bargs->target == chunk_type)
3740 		return 1;
3741 
3742 	return 0;
3743 }
3744 
3745 static int should_balance_chunk(struct extent_buffer *leaf,
3746 				struct btrfs_chunk *chunk, u64 chunk_offset)
3747 {
3748 	struct btrfs_fs_info *fs_info = leaf->fs_info;
3749 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3750 	struct btrfs_balance_args *bargs = NULL;
3751 	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3752 
3753 	/* type filter */
3754 	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3755 	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3756 		return 0;
3757 	}
3758 
3759 	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3760 		bargs = &bctl->data;
3761 	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3762 		bargs = &bctl->sys;
3763 	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3764 		bargs = &bctl->meta;
3765 
3766 	/* profiles filter */
3767 	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3768 	    chunk_profiles_filter(chunk_type, bargs)) {
3769 		return 0;
3770 	}
3771 
3772 	/* usage filter */
3773 	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3774 	    chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3775 		return 0;
3776 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3777 	    chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3778 		return 0;
3779 	}
3780 
3781 	/* devid filter */
3782 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3783 	    chunk_devid_filter(leaf, chunk, bargs)) {
3784 		return 0;
3785 	}
3786 
3787 	/* drange filter, makes sense only with devid filter */
3788 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3789 	    chunk_drange_filter(leaf, chunk, bargs)) {
3790 		return 0;
3791 	}
3792 
3793 	/* vrange filter */
3794 	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3795 	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3796 		return 0;
3797 	}
3798 
3799 	/* stripes filter */
3800 	if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3801 	    chunk_stripes_range_filter(leaf, chunk, bargs)) {
3802 		return 0;
3803 	}
3804 
3805 	/* soft profile changing mode */
3806 	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3807 	    chunk_soft_convert_filter(chunk_type, bargs)) {
3808 		return 0;
3809 	}
3810 
3811 	/*
3812 	 * limited by count, must be the last filter
3813 	 */
3814 	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3815 		if (bargs->limit == 0)
3816 			return 0;
3817 		else
3818 			bargs->limit--;
3819 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3820 		/*
3821 		 * Same logic as the 'limit' filter; the minimum cannot be
3822 		 * determined here because we do not have the global information
3823 		 * about the count of all chunks that satisfy the filters.
3824 		 */
3825 		if (bargs->limit_max == 0)
3826 			return 0;
3827 		else
3828 			bargs->limit_max--;
3829 	}
3830 
3831 	return 1;
3832 }
3833 
3834 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3835 {
3836 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3837 	struct btrfs_root *chunk_root = fs_info->chunk_root;
3838 	u64 chunk_type;
3839 	struct btrfs_chunk *chunk;
3840 	struct btrfs_path *path = NULL;
3841 	struct btrfs_key key;
3842 	struct btrfs_key found_key;
3843 	struct extent_buffer *leaf;
3844 	int slot;
3845 	int ret;
3846 	int enospc_errors = 0;
3847 	bool counting = true;
3848 	/* The single value limit and min/max limits use the same bytes in the */
3849 	u64 limit_data = bctl->data.limit;
3850 	u64 limit_meta = bctl->meta.limit;
3851 	u64 limit_sys = bctl->sys.limit;
3852 	u32 count_data = 0;
3853 	u32 count_meta = 0;
3854 	u32 count_sys = 0;
3855 	int chunk_reserved = 0;
3856 
3857 	path = btrfs_alloc_path();
3858 	if (!path) {
3859 		ret = -ENOMEM;
3860 		goto error;
3861 	}
3862 
3863 	/* zero out stat counters */
3864 	spin_lock(&fs_info->balance_lock);
3865 	memset(&bctl->stat, 0, sizeof(bctl->stat));
3866 	spin_unlock(&fs_info->balance_lock);
3867 again:
3868 	if (!counting) {
3869 		/*
3870 		 * The single value limit and min/max limits use the same bytes
3871 		 * in the
3872 		 */
3873 		bctl->data.limit = limit_data;
3874 		bctl->meta.limit = limit_meta;
3875 		bctl->sys.limit = limit_sys;
3876 	}
3877 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3878 	key.offset = (u64)-1;
3879 	key.type = BTRFS_CHUNK_ITEM_KEY;
3880 
3881 	while (1) {
3882 		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3883 		    atomic_read(&fs_info->balance_cancel_req)) {
3884 			ret = -ECANCELED;
3885 			goto error;
3886 		}
3887 
3888 		mutex_lock(&fs_info->reclaim_bgs_lock);
3889 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3890 		if (ret < 0) {
3891 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3892 			goto error;
3893 		}
3894 
3895 		/*
3896 		 * this shouldn't happen, it means the last relocate
3897 		 * failed
3898 		 */
3899 		if (ret == 0)
3900 			BUG(); /* FIXME break ? */
3901 
3902 		ret = btrfs_previous_item(chunk_root, path, 0,
3903 					  BTRFS_CHUNK_ITEM_KEY);
3904 		if (ret) {
3905 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3906 			ret = 0;
3907 			break;
3908 		}
3909 
3910 		leaf = path->nodes[0];
3911 		slot = path->slots[0];
3912 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3913 
3914 		if (found_key.objectid != key.objectid) {
3915 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3916 			break;
3917 		}
3918 
3919 		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3920 		chunk_type = btrfs_chunk_type(leaf, chunk);
3921 
3922 		if (!counting) {
3923 			spin_lock(&fs_info->balance_lock);
3924 			bctl->stat.considered++;
3925 			spin_unlock(&fs_info->balance_lock);
3926 		}
3927 
3928 		ret = should_balance_chunk(leaf, chunk, found_key.offset);
3929 
3930 		btrfs_release_path(path);
3931 		if (!ret) {
3932 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3933 			goto loop;
3934 		}
3935 
3936 		if (counting) {
3937 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3938 			spin_lock(&fs_info->balance_lock);
3939 			bctl->stat.expected++;
3940 			spin_unlock(&fs_info->balance_lock);
3941 
3942 			if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3943 				count_data++;
3944 			else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3945 				count_sys++;
3946 			else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3947 				count_meta++;
3948 
3949 			goto loop;
3950 		}
3951 
3952 		/*
3953 		 * Apply limit_min filter, no need to check if the LIMITS
3954 		 * filter is used, limit_min is 0 by default
3955 		 */
3956 		if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3957 					count_data < bctl->data.limit_min)
3958 				|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3959 					count_meta < bctl->meta.limit_min)
3960 				|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3961 					count_sys < bctl->sys.limit_min)) {
3962 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3963 			goto loop;
3964 		}
3965 
3966 		if (!chunk_reserved) {
3967 			/*
3968 			 * We may be relocating the only data chunk we have,
3969 			 * which could potentially end up with losing data's
3970 			 * raid profile, so lets allocate an empty one in
3971 			 * advance.
3972 			 */
3973 			ret = btrfs_may_alloc_data_chunk(fs_info,
3974 							 found_key.offset);
3975 			if (ret < 0) {
3976 				mutex_unlock(&fs_info->reclaim_bgs_lock);
3977 				goto error;
3978 			} else if (ret == 1) {
3979 				chunk_reserved = 1;
3980 			}
3981 		}
3982 
3983 		ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3984 		mutex_unlock(&fs_info->reclaim_bgs_lock);
3985 		if (ret == -ENOSPC) {
3986 			enospc_errors++;
3987 		} else if (ret == -ETXTBSY) {
3988 			btrfs_info(fs_info,
3989 	   "skipping relocation of block group %llu due to active swapfile",
3990 				   found_key.offset);
3991 			ret = 0;
3992 		} else if (ret) {
3993 			goto error;
3994 		} else {
3995 			spin_lock(&fs_info->balance_lock);
3996 			bctl->stat.completed++;
3997 			spin_unlock(&fs_info->balance_lock);
3998 		}
3999 loop:
4000 		if (found_key.offset == 0)
4001 			break;
4002 		key.offset = found_key.offset - 1;
4003 	}
4004 
4005 	if (counting) {
4006 		btrfs_release_path(path);
4007 		counting = false;
4008 		goto again;
4009 	}
4010 error:
4011 	btrfs_free_path(path);
4012 	if (enospc_errors) {
4013 		btrfs_info(fs_info, "%d enospc errors during balance",
4014 			   enospc_errors);
4015 		if (!ret)
4016 			ret = -ENOSPC;
4017 	}
4018 
4019 	return ret;
4020 }
4021 
4022 /**
4023  * alloc_profile_is_valid - see if a given profile is valid and reduced
4024  * @flags: profile to validate
4025  * @extended: if true @flags is treated as an extended profile
4026  */
4027 static int alloc_profile_is_valid(u64 flags, int extended)
4028 {
4029 	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
4030 			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
4031 
4032 	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
4033 
4034 	/* 1) check that all other bits are zeroed */
4035 	if (flags & ~mask)
4036 		return 0;
4037 
4038 	/* 2) see if profile is reduced */
4039 	if (flags == 0)
4040 		return !extended; /* "0" is valid for usual profiles */
4041 
4042 	return has_single_bit_set(flags);
4043 }
4044 
4045 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
4046 {
4047 	/* cancel requested || normal exit path */
4048 	return atomic_read(&fs_info->balance_cancel_req) ||
4049 		(atomic_read(&fs_info->balance_pause_req) == 0 &&
4050 		 atomic_read(&fs_info->balance_cancel_req) == 0);
4051 }
4052 
4053 /*
4054  * Validate target profile against allowed profiles and return true if it's OK.
4055  * Otherwise print the error message and return false.
4056  */
4057 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
4058 		const struct btrfs_balance_args *bargs,
4059 		u64 allowed, const char *type)
4060 {
4061 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
4062 		return true;
4063 
4064 	if (fs_info->sectorsize < PAGE_SIZE &&
4065 		bargs->target & BTRFS_BLOCK_GROUP_RAID56_MASK) {
4066 		btrfs_err(fs_info,
4067 		"RAID56 is not yet supported for sectorsize %u with page size %lu",
4068 			  fs_info->sectorsize, PAGE_SIZE);
4069 		return false;
4070 	}
4071 	/* Profile is valid and does not have bits outside of the allowed set */
4072 	if (alloc_profile_is_valid(bargs->target, 1) &&
4073 	    (bargs->target & ~allowed) == 0)
4074 		return true;
4075 
4076 	btrfs_err(fs_info, "balance: invalid convert %s profile %s",
4077 			type, btrfs_bg_type_to_raid_name(bargs->target));
4078 	return false;
4079 }
4080 
4081 /*
4082  * Fill @buf with textual description of balance filter flags @bargs, up to
4083  * @size_buf including the terminating null. The output may be trimmed if it
4084  * does not fit into the provided buffer.
4085  */
4086 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
4087 				 u32 size_buf)
4088 {
4089 	int ret;
4090 	u32 size_bp = size_buf;
4091 	char *bp = buf;
4092 	u64 flags = bargs->flags;
4093 	char tmp_buf[128] = {'\0'};
4094 
4095 	if (!flags)
4096 		return;
4097 
4098 #define CHECK_APPEND_NOARG(a)						\
4099 	do {								\
4100 		ret = snprintf(bp, size_bp, (a));			\
4101 		if (ret < 0 || ret >= size_bp)				\
4102 			goto out_overflow;				\
4103 		size_bp -= ret;						\
4104 		bp += ret;						\
4105 	} while (0)
4106 
4107 #define CHECK_APPEND_1ARG(a, v1)					\
4108 	do {								\
4109 		ret = snprintf(bp, size_bp, (a), (v1));			\
4110 		if (ret < 0 || ret >= size_bp)				\
4111 			goto out_overflow;				\
4112 		size_bp -= ret;						\
4113 		bp += ret;						\
4114 	} while (0)
4115 
4116 #define CHECK_APPEND_2ARG(a, v1, v2)					\
4117 	do {								\
4118 		ret = snprintf(bp, size_bp, (a), (v1), (v2));		\
4119 		if (ret < 0 || ret >= size_bp)				\
4120 			goto out_overflow;				\
4121 		size_bp -= ret;						\
4122 		bp += ret;						\
4123 	} while (0)
4124 
4125 	if (flags & BTRFS_BALANCE_ARGS_CONVERT)
4126 		CHECK_APPEND_1ARG("convert=%s,",
4127 				  btrfs_bg_type_to_raid_name(bargs->target));
4128 
4129 	if (flags & BTRFS_BALANCE_ARGS_SOFT)
4130 		CHECK_APPEND_NOARG("soft,");
4131 
4132 	if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
4133 		btrfs_describe_block_groups(bargs->profiles, tmp_buf,
4134 					    sizeof(tmp_buf));
4135 		CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
4136 	}
4137 
4138 	if (flags & BTRFS_BALANCE_ARGS_USAGE)
4139 		CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
4140 
4141 	if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
4142 		CHECK_APPEND_2ARG("usage=%u..%u,",
4143 				  bargs->usage_min, bargs->usage_max);
4144 
4145 	if (flags & BTRFS_BALANCE_ARGS_DEVID)
4146 		CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
4147 
4148 	if (flags & BTRFS_BALANCE_ARGS_DRANGE)
4149 		CHECK_APPEND_2ARG("drange=%llu..%llu,",
4150 				  bargs->pstart, bargs->pend);
4151 
4152 	if (flags & BTRFS_BALANCE_ARGS_VRANGE)
4153 		CHECK_APPEND_2ARG("vrange=%llu..%llu,",
4154 				  bargs->vstart, bargs->vend);
4155 
4156 	if (flags & BTRFS_BALANCE_ARGS_LIMIT)
4157 		CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
4158 
4159 	if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
4160 		CHECK_APPEND_2ARG("limit=%u..%u,",
4161 				bargs->limit_min, bargs->limit_max);
4162 
4163 	if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
4164 		CHECK_APPEND_2ARG("stripes=%u..%u,",
4165 				  bargs->stripes_min, bargs->stripes_max);
4166 
4167 #undef CHECK_APPEND_2ARG
4168 #undef CHECK_APPEND_1ARG
4169 #undef CHECK_APPEND_NOARG
4170 
4171 out_overflow:
4172 
4173 	if (size_bp < size_buf)
4174 		buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
4175 	else
4176 		buf[0] = '\0';
4177 }
4178 
4179 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
4180 {
4181 	u32 size_buf = 1024;
4182 	char tmp_buf[192] = {'\0'};
4183 	char *buf;
4184 	char *bp;
4185 	u32 size_bp = size_buf;
4186 	int ret;
4187 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4188 
4189 	buf = kzalloc(size_buf, GFP_KERNEL);
4190 	if (!buf)
4191 		return;
4192 
4193 	bp = buf;
4194 
4195 #define CHECK_APPEND_1ARG(a, v1)					\
4196 	do {								\
4197 		ret = snprintf(bp, size_bp, (a), (v1));			\
4198 		if (ret < 0 || ret >= size_bp)				\
4199 			goto out_overflow;				\
4200 		size_bp -= ret;						\
4201 		bp += ret;						\
4202 	} while (0)
4203 
4204 	if (bctl->flags & BTRFS_BALANCE_FORCE)
4205 		CHECK_APPEND_1ARG("%s", "-f ");
4206 
4207 	if (bctl->flags & BTRFS_BALANCE_DATA) {
4208 		describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
4209 		CHECK_APPEND_1ARG("-d%s ", tmp_buf);
4210 	}
4211 
4212 	if (bctl->flags & BTRFS_BALANCE_METADATA) {
4213 		describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
4214 		CHECK_APPEND_1ARG("-m%s ", tmp_buf);
4215 	}
4216 
4217 	if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
4218 		describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
4219 		CHECK_APPEND_1ARG("-s%s ", tmp_buf);
4220 	}
4221 
4222 #undef CHECK_APPEND_1ARG
4223 
4224 out_overflow:
4225 
4226 	if (size_bp < size_buf)
4227 		buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
4228 	btrfs_info(fs_info, "balance: %s %s",
4229 		   (bctl->flags & BTRFS_BALANCE_RESUME) ?
4230 		   "resume" : "start", buf);
4231 
4232 	kfree(buf);
4233 }
4234 
4235 /*
4236  * Should be called with balance mutexe held
4237  */
4238 int btrfs_balance(struct btrfs_fs_info *fs_info,
4239 		  struct btrfs_balance_control *bctl,
4240 		  struct btrfs_ioctl_balance_args *bargs)
4241 {
4242 	u64 meta_target, data_target;
4243 	u64 allowed;
4244 	int mixed = 0;
4245 	int ret;
4246 	u64 num_devices;
4247 	unsigned seq;
4248 	bool reducing_redundancy;
4249 	int i;
4250 
4251 	if (btrfs_fs_closing(fs_info) ||
4252 	    atomic_read(&fs_info->balance_pause_req) ||
4253 	    btrfs_should_cancel_balance(fs_info)) {
4254 		ret = -EINVAL;
4255 		goto out;
4256 	}
4257 
4258 	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4259 	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4260 		mixed = 1;
4261 
4262 	/*
4263 	 * In case of mixed groups both data and meta should be picked,
4264 	 * and identical options should be given for both of them.
4265 	 */
4266 	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4267 	if (mixed && (bctl->flags & allowed)) {
4268 		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4269 		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4270 		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4271 			btrfs_err(fs_info,
4272 	  "balance: mixed groups data and metadata options must be the same");
4273 			ret = -EINVAL;
4274 			goto out;
4275 		}
4276 	}
4277 
4278 	/*
4279 	 * rw_devices will not change at the moment, device add/delete/replace
4280 	 * are exclusive
4281 	 */
4282 	num_devices = fs_info->fs_devices->rw_devices;
4283 
4284 	/*
4285 	 * SINGLE profile on-disk has no profile bit, but in-memory we have a
4286 	 * special bit for it, to make it easier to distinguish.  Thus we need
4287 	 * to set it manually, or balance would refuse the profile.
4288 	 */
4289 	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4290 	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4291 		if (num_devices >= btrfs_raid_array[i].devs_min)
4292 			allowed |= btrfs_raid_array[i].bg_flag;
4293 
4294 	if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
4295 	    !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
4296 	    !validate_convert_profile(fs_info, &bctl->sys,  allowed, "system")) {
4297 		ret = -EINVAL;
4298 		goto out;
4299 	}
4300 
4301 	/*
4302 	 * Allow to reduce metadata or system integrity only if force set for
4303 	 * profiles with redundancy (copies, parity)
4304 	 */
4305 	allowed = 0;
4306 	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
4307 		if (btrfs_raid_array[i].ncopies >= 2 ||
4308 		    btrfs_raid_array[i].tolerated_failures >= 1)
4309 			allowed |= btrfs_raid_array[i].bg_flag;
4310 	}
4311 	do {
4312 		seq = read_seqbegin(&fs_info->profiles_lock);
4313 
4314 		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4315 		     (fs_info->avail_system_alloc_bits & allowed) &&
4316 		     !(bctl->sys.target & allowed)) ||
4317 		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4318 		     (fs_info->avail_metadata_alloc_bits & allowed) &&
4319 		     !(bctl->meta.target & allowed)))
4320 			reducing_redundancy = true;
4321 		else
4322 			reducing_redundancy = false;
4323 
4324 		/* if we're not converting, the target field is uninitialized */
4325 		meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4326 			bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4327 		data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4328 			bctl->data.target : fs_info->avail_data_alloc_bits;
4329 	} while (read_seqretry(&fs_info->profiles_lock, seq));
4330 
4331 	if (reducing_redundancy) {
4332 		if (bctl->flags & BTRFS_BALANCE_FORCE) {
4333 			btrfs_info(fs_info,
4334 			   "balance: force reducing metadata redundancy");
4335 		} else {
4336 			btrfs_err(fs_info,
4337 	"balance: reduces metadata redundancy, use --force if you want this");
4338 			ret = -EINVAL;
4339 			goto out;
4340 		}
4341 	}
4342 
4343 	if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4344 		btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4345 		btrfs_warn(fs_info,
4346 	"balance: metadata profile %s has lower redundancy than data profile %s",
4347 				btrfs_bg_type_to_raid_name(meta_target),
4348 				btrfs_bg_type_to_raid_name(data_target));
4349 	}
4350 
4351 	ret = insert_balance_item(fs_info, bctl);
4352 	if (ret && ret != -EEXIST)
4353 		goto out;
4354 
4355 	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4356 		BUG_ON(ret == -EEXIST);
4357 		BUG_ON(fs_info->balance_ctl);
4358 		spin_lock(&fs_info->balance_lock);
4359 		fs_info->balance_ctl = bctl;
4360 		spin_unlock(&fs_info->balance_lock);
4361 	} else {
4362 		BUG_ON(ret != -EEXIST);
4363 		spin_lock(&fs_info->balance_lock);
4364 		update_balance_args(bctl);
4365 		spin_unlock(&fs_info->balance_lock);
4366 	}
4367 
4368 	ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4369 	set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4370 	describe_balance_start_or_resume(fs_info);
4371 	mutex_unlock(&fs_info->balance_mutex);
4372 
4373 	ret = __btrfs_balance(fs_info);
4374 
4375 	mutex_lock(&fs_info->balance_mutex);
4376 	if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
4377 		btrfs_info(fs_info, "balance: paused");
4378 	/*
4379 	 * Balance can be canceled by:
4380 	 *
4381 	 * - Regular cancel request
4382 	 *   Then ret == -ECANCELED and balance_cancel_req > 0
4383 	 *
4384 	 * - Fatal signal to "btrfs" process
4385 	 *   Either the signal caught by wait_reserve_ticket() and callers
4386 	 *   got -EINTR, or caught by btrfs_should_cancel_balance() and
4387 	 *   got -ECANCELED.
4388 	 *   Either way, in this case balance_cancel_req = 0, and
4389 	 *   ret == -EINTR or ret == -ECANCELED.
4390 	 *
4391 	 * So here we only check the return value to catch canceled balance.
4392 	 */
4393 	else if (ret == -ECANCELED || ret == -EINTR)
4394 		btrfs_info(fs_info, "balance: canceled");
4395 	else
4396 		btrfs_info(fs_info, "balance: ended with status: %d", ret);
4397 
4398 	clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4399 
4400 	if (bargs) {
4401 		memset(bargs, 0, sizeof(*bargs));
4402 		btrfs_update_ioctl_balance_args(fs_info, bargs);
4403 	}
4404 
4405 	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
4406 	    balance_need_close(fs_info)) {
4407 		reset_balance_state(fs_info);
4408 		btrfs_exclop_finish(fs_info);
4409 	}
4410 
4411 	wake_up(&fs_info->balance_wait_q);
4412 
4413 	return ret;
4414 out:
4415 	if (bctl->flags & BTRFS_BALANCE_RESUME)
4416 		reset_balance_state(fs_info);
4417 	else
4418 		kfree(bctl);
4419 	btrfs_exclop_finish(fs_info);
4420 
4421 	return ret;
4422 }
4423 
4424 static int balance_kthread(void *data)
4425 {
4426 	struct btrfs_fs_info *fs_info = data;
4427 	int ret = 0;
4428 
4429 	sb_start_write(fs_info->sb);
4430 	mutex_lock(&fs_info->balance_mutex);
4431 	if (fs_info->balance_ctl)
4432 		ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4433 	mutex_unlock(&fs_info->balance_mutex);
4434 	sb_end_write(fs_info->sb);
4435 
4436 	return ret;
4437 }
4438 
4439 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4440 {
4441 	struct task_struct *tsk;
4442 
4443 	mutex_lock(&fs_info->balance_mutex);
4444 	if (!fs_info->balance_ctl) {
4445 		mutex_unlock(&fs_info->balance_mutex);
4446 		return 0;
4447 	}
4448 	mutex_unlock(&fs_info->balance_mutex);
4449 
4450 	if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4451 		btrfs_info(fs_info, "balance: resume skipped");
4452 		return 0;
4453 	}
4454 
4455 	/*
4456 	 * A ro->rw remount sequence should continue with the paused balance
4457 	 * regardless of who pauses it, system or the user as of now, so set
4458 	 * the resume flag.
4459 	 */
4460 	spin_lock(&fs_info->balance_lock);
4461 	fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4462 	spin_unlock(&fs_info->balance_lock);
4463 
4464 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4465 	return PTR_ERR_OR_ZERO(tsk);
4466 }
4467 
4468 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4469 {
4470 	struct btrfs_balance_control *bctl;
4471 	struct btrfs_balance_item *item;
4472 	struct btrfs_disk_balance_args disk_bargs;
4473 	struct btrfs_path *path;
4474 	struct extent_buffer *leaf;
4475 	struct btrfs_key key;
4476 	int ret;
4477 
4478 	path = btrfs_alloc_path();
4479 	if (!path)
4480 		return -ENOMEM;
4481 
4482 	key.objectid = BTRFS_BALANCE_OBJECTID;
4483 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
4484 	key.offset = 0;
4485 
4486 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4487 	if (ret < 0)
4488 		goto out;
4489 	if (ret > 0) { /* ret = -ENOENT; */
4490 		ret = 0;
4491 		goto out;
4492 	}
4493 
4494 	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4495 	if (!bctl) {
4496 		ret = -ENOMEM;
4497 		goto out;
4498 	}
4499 
4500 	leaf = path->nodes[0];
4501 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4502 
4503 	bctl->flags = btrfs_balance_flags(leaf, item);
4504 	bctl->flags |= BTRFS_BALANCE_RESUME;
4505 
4506 	btrfs_balance_data(leaf, item, &disk_bargs);
4507 	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4508 	btrfs_balance_meta(leaf, item, &disk_bargs);
4509 	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4510 	btrfs_balance_sys(leaf, item, &disk_bargs);
4511 	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4512 
4513 	/*
4514 	 * This should never happen, as the paused balance state is recovered
4515 	 * during mount without any chance of other exclusive ops to collide.
4516 	 *
4517 	 * This gives the exclusive op status to balance and keeps in paused
4518 	 * state until user intervention (cancel or umount). If the ownership
4519 	 * cannot be assigned, show a message but do not fail. The balance
4520 	 * is in a paused state and must have fs_info::balance_ctl properly
4521 	 * set up.
4522 	 */
4523 	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE))
4524 		btrfs_warn(fs_info,
4525 	"balance: cannot set exclusive op status, resume manually");
4526 
4527 	btrfs_release_path(path);
4528 
4529 	mutex_lock(&fs_info->balance_mutex);
4530 	BUG_ON(fs_info->balance_ctl);
4531 	spin_lock(&fs_info->balance_lock);
4532 	fs_info->balance_ctl = bctl;
4533 	spin_unlock(&fs_info->balance_lock);
4534 	mutex_unlock(&fs_info->balance_mutex);
4535 out:
4536 	btrfs_free_path(path);
4537 	return ret;
4538 }
4539 
4540 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4541 {
4542 	int ret = 0;
4543 
4544 	mutex_lock(&fs_info->balance_mutex);
4545 	if (!fs_info->balance_ctl) {
4546 		mutex_unlock(&fs_info->balance_mutex);
4547 		return -ENOTCONN;
4548 	}
4549 
4550 	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4551 		atomic_inc(&fs_info->balance_pause_req);
4552 		mutex_unlock(&fs_info->balance_mutex);
4553 
4554 		wait_event(fs_info->balance_wait_q,
4555 			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4556 
4557 		mutex_lock(&fs_info->balance_mutex);
4558 		/* we are good with balance_ctl ripped off from under us */
4559 		BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4560 		atomic_dec(&fs_info->balance_pause_req);
4561 	} else {
4562 		ret = -ENOTCONN;
4563 	}
4564 
4565 	mutex_unlock(&fs_info->balance_mutex);
4566 	return ret;
4567 }
4568 
4569 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4570 {
4571 	mutex_lock(&fs_info->balance_mutex);
4572 	if (!fs_info->balance_ctl) {
4573 		mutex_unlock(&fs_info->balance_mutex);
4574 		return -ENOTCONN;
4575 	}
4576 
4577 	/*
4578 	 * A paused balance with the item stored on disk can be resumed at
4579 	 * mount time if the mount is read-write. Otherwise it's still paused
4580 	 * and we must not allow cancelling as it deletes the item.
4581 	 */
4582 	if (sb_rdonly(fs_info->sb)) {
4583 		mutex_unlock(&fs_info->balance_mutex);
4584 		return -EROFS;
4585 	}
4586 
4587 	atomic_inc(&fs_info->balance_cancel_req);
4588 	/*
4589 	 * if we are running just wait and return, balance item is
4590 	 * deleted in btrfs_balance in this case
4591 	 */
4592 	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4593 		mutex_unlock(&fs_info->balance_mutex);
4594 		wait_event(fs_info->balance_wait_q,
4595 			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4596 		mutex_lock(&fs_info->balance_mutex);
4597 	} else {
4598 		mutex_unlock(&fs_info->balance_mutex);
4599 		/*
4600 		 * Lock released to allow other waiters to continue, we'll
4601 		 * reexamine the status again.
4602 		 */
4603 		mutex_lock(&fs_info->balance_mutex);
4604 
4605 		if (fs_info->balance_ctl) {
4606 			reset_balance_state(fs_info);
4607 			btrfs_exclop_finish(fs_info);
4608 			btrfs_info(fs_info, "balance: canceled");
4609 		}
4610 	}
4611 
4612 	BUG_ON(fs_info->balance_ctl ||
4613 		test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4614 	atomic_dec(&fs_info->balance_cancel_req);
4615 	mutex_unlock(&fs_info->balance_mutex);
4616 	return 0;
4617 }
4618 
4619 int btrfs_uuid_scan_kthread(void *data)
4620 {
4621 	struct btrfs_fs_info *fs_info = data;
4622 	struct btrfs_root *root = fs_info->tree_root;
4623 	struct btrfs_key key;
4624 	struct btrfs_path *path = NULL;
4625 	int ret = 0;
4626 	struct extent_buffer *eb;
4627 	int slot;
4628 	struct btrfs_root_item root_item;
4629 	u32 item_size;
4630 	struct btrfs_trans_handle *trans = NULL;
4631 	bool closing = false;
4632 
4633 	path = btrfs_alloc_path();
4634 	if (!path) {
4635 		ret = -ENOMEM;
4636 		goto out;
4637 	}
4638 
4639 	key.objectid = 0;
4640 	key.type = BTRFS_ROOT_ITEM_KEY;
4641 	key.offset = 0;
4642 
4643 	while (1) {
4644 		if (btrfs_fs_closing(fs_info)) {
4645 			closing = true;
4646 			break;
4647 		}
4648 		ret = btrfs_search_forward(root, &key, path,
4649 				BTRFS_OLDEST_GENERATION);
4650 		if (ret) {
4651 			if (ret > 0)
4652 				ret = 0;
4653 			break;
4654 		}
4655 
4656 		if (key.type != BTRFS_ROOT_ITEM_KEY ||
4657 		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4658 		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4659 		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
4660 			goto skip;
4661 
4662 		eb = path->nodes[0];
4663 		slot = path->slots[0];
4664 		item_size = btrfs_item_size_nr(eb, slot);
4665 		if (item_size < sizeof(root_item))
4666 			goto skip;
4667 
4668 		read_extent_buffer(eb, &root_item,
4669 				   btrfs_item_ptr_offset(eb, slot),
4670 				   (int)sizeof(root_item));
4671 		if (btrfs_root_refs(&root_item) == 0)
4672 			goto skip;
4673 
4674 		if (!btrfs_is_empty_uuid(root_item.uuid) ||
4675 		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
4676 			if (trans)
4677 				goto update_tree;
4678 
4679 			btrfs_release_path(path);
4680 			/*
4681 			 * 1 - subvol uuid item
4682 			 * 1 - received_subvol uuid item
4683 			 */
4684 			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4685 			if (IS_ERR(trans)) {
4686 				ret = PTR_ERR(trans);
4687 				break;
4688 			}
4689 			continue;
4690 		} else {
4691 			goto skip;
4692 		}
4693 update_tree:
4694 		btrfs_release_path(path);
4695 		if (!btrfs_is_empty_uuid(root_item.uuid)) {
4696 			ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4697 						  BTRFS_UUID_KEY_SUBVOL,
4698 						  key.objectid);
4699 			if (ret < 0) {
4700 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4701 					ret);
4702 				break;
4703 			}
4704 		}
4705 
4706 		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4707 			ret = btrfs_uuid_tree_add(trans,
4708 						  root_item.received_uuid,
4709 						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4710 						  key.objectid);
4711 			if (ret < 0) {
4712 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4713 					ret);
4714 				break;
4715 			}
4716 		}
4717 
4718 skip:
4719 		btrfs_release_path(path);
4720 		if (trans) {
4721 			ret = btrfs_end_transaction(trans);
4722 			trans = NULL;
4723 			if (ret)
4724 				break;
4725 		}
4726 
4727 		if (key.offset < (u64)-1) {
4728 			key.offset++;
4729 		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4730 			key.offset = 0;
4731 			key.type = BTRFS_ROOT_ITEM_KEY;
4732 		} else if (key.objectid < (u64)-1) {
4733 			key.offset = 0;
4734 			key.type = BTRFS_ROOT_ITEM_KEY;
4735 			key.objectid++;
4736 		} else {
4737 			break;
4738 		}
4739 		cond_resched();
4740 	}
4741 
4742 out:
4743 	btrfs_free_path(path);
4744 	if (trans && !IS_ERR(trans))
4745 		btrfs_end_transaction(trans);
4746 	if (ret)
4747 		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4748 	else if (!closing)
4749 		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4750 	up(&fs_info->uuid_tree_rescan_sem);
4751 	return 0;
4752 }
4753 
4754 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4755 {
4756 	struct btrfs_trans_handle *trans;
4757 	struct btrfs_root *tree_root = fs_info->tree_root;
4758 	struct btrfs_root *uuid_root;
4759 	struct task_struct *task;
4760 	int ret;
4761 
4762 	/*
4763 	 * 1 - root node
4764 	 * 1 - root item
4765 	 */
4766 	trans = btrfs_start_transaction(tree_root, 2);
4767 	if (IS_ERR(trans))
4768 		return PTR_ERR(trans);
4769 
4770 	uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4771 	if (IS_ERR(uuid_root)) {
4772 		ret = PTR_ERR(uuid_root);
4773 		btrfs_abort_transaction(trans, ret);
4774 		btrfs_end_transaction(trans);
4775 		return ret;
4776 	}
4777 
4778 	fs_info->uuid_root = uuid_root;
4779 
4780 	ret = btrfs_commit_transaction(trans);
4781 	if (ret)
4782 		return ret;
4783 
4784 	down(&fs_info->uuid_tree_rescan_sem);
4785 	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4786 	if (IS_ERR(task)) {
4787 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4788 		btrfs_warn(fs_info, "failed to start uuid_scan task");
4789 		up(&fs_info->uuid_tree_rescan_sem);
4790 		return PTR_ERR(task);
4791 	}
4792 
4793 	return 0;
4794 }
4795 
4796 /*
4797  * shrinking a device means finding all of the device extents past
4798  * the new size, and then following the back refs to the chunks.
4799  * The chunk relocation code actually frees the device extent
4800  */
4801 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4802 {
4803 	struct btrfs_fs_info *fs_info = device->fs_info;
4804 	struct btrfs_root *root = fs_info->dev_root;
4805 	struct btrfs_trans_handle *trans;
4806 	struct btrfs_dev_extent *dev_extent = NULL;
4807 	struct btrfs_path *path;
4808 	u64 length;
4809 	u64 chunk_offset;
4810 	int ret;
4811 	int slot;
4812 	int failed = 0;
4813 	bool retried = false;
4814 	struct extent_buffer *l;
4815 	struct btrfs_key key;
4816 	struct btrfs_super_block *super_copy = fs_info->super_copy;
4817 	u64 old_total = btrfs_super_total_bytes(super_copy);
4818 	u64 old_size = btrfs_device_get_total_bytes(device);
4819 	u64 diff;
4820 	u64 start;
4821 
4822 	new_size = round_down(new_size, fs_info->sectorsize);
4823 	start = new_size;
4824 	diff = round_down(old_size - new_size, fs_info->sectorsize);
4825 
4826 	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4827 		return -EINVAL;
4828 
4829 	path = btrfs_alloc_path();
4830 	if (!path)
4831 		return -ENOMEM;
4832 
4833 	path->reada = READA_BACK;
4834 
4835 	trans = btrfs_start_transaction(root, 0);
4836 	if (IS_ERR(trans)) {
4837 		btrfs_free_path(path);
4838 		return PTR_ERR(trans);
4839 	}
4840 
4841 	mutex_lock(&fs_info->chunk_mutex);
4842 
4843 	btrfs_device_set_total_bytes(device, new_size);
4844 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4845 		device->fs_devices->total_rw_bytes -= diff;
4846 		atomic64_sub(diff, &fs_info->free_chunk_space);
4847 	}
4848 
4849 	/*
4850 	 * Once the device's size has been set to the new size, ensure all
4851 	 * in-memory chunks are synced to disk so that the loop below sees them
4852 	 * and relocates them accordingly.
4853 	 */
4854 	if (contains_pending_extent(device, &start, diff)) {
4855 		mutex_unlock(&fs_info->chunk_mutex);
4856 		ret = btrfs_commit_transaction(trans);
4857 		if (ret)
4858 			goto done;
4859 	} else {
4860 		mutex_unlock(&fs_info->chunk_mutex);
4861 		btrfs_end_transaction(trans);
4862 	}
4863 
4864 again:
4865 	key.objectid = device->devid;
4866 	key.offset = (u64)-1;
4867 	key.type = BTRFS_DEV_EXTENT_KEY;
4868 
4869 	do {
4870 		mutex_lock(&fs_info->reclaim_bgs_lock);
4871 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4872 		if (ret < 0) {
4873 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4874 			goto done;
4875 		}
4876 
4877 		ret = btrfs_previous_item(root, path, 0, key.type);
4878 		if (ret) {
4879 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4880 			if (ret < 0)
4881 				goto done;
4882 			ret = 0;
4883 			btrfs_release_path(path);
4884 			break;
4885 		}
4886 
4887 		l = path->nodes[0];
4888 		slot = path->slots[0];
4889 		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4890 
4891 		if (key.objectid != device->devid) {
4892 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4893 			btrfs_release_path(path);
4894 			break;
4895 		}
4896 
4897 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4898 		length = btrfs_dev_extent_length(l, dev_extent);
4899 
4900 		if (key.offset + length <= new_size) {
4901 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4902 			btrfs_release_path(path);
4903 			break;
4904 		}
4905 
4906 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4907 		btrfs_release_path(path);
4908 
4909 		/*
4910 		 * We may be relocating the only data chunk we have,
4911 		 * which could potentially end up with losing data's
4912 		 * raid profile, so lets allocate an empty one in
4913 		 * advance.
4914 		 */
4915 		ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4916 		if (ret < 0) {
4917 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4918 			goto done;
4919 		}
4920 
4921 		ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4922 		mutex_unlock(&fs_info->reclaim_bgs_lock);
4923 		if (ret == -ENOSPC) {
4924 			failed++;
4925 		} else if (ret) {
4926 			if (ret == -ETXTBSY) {
4927 				btrfs_warn(fs_info,
4928 		   "could not shrink block group %llu due to active swapfile",
4929 					   chunk_offset);
4930 			}
4931 			goto done;
4932 		}
4933 	} while (key.offset-- > 0);
4934 
4935 	if (failed && !retried) {
4936 		failed = 0;
4937 		retried = true;
4938 		goto again;
4939 	} else if (failed && retried) {
4940 		ret = -ENOSPC;
4941 		goto done;
4942 	}
4943 
4944 	/* Shrinking succeeded, else we would be at "done". */
4945 	trans = btrfs_start_transaction(root, 0);
4946 	if (IS_ERR(trans)) {
4947 		ret = PTR_ERR(trans);
4948 		goto done;
4949 	}
4950 
4951 	mutex_lock(&fs_info->chunk_mutex);
4952 	/* Clear all state bits beyond the shrunk device size */
4953 	clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
4954 			  CHUNK_STATE_MASK);
4955 
4956 	btrfs_device_set_disk_total_bytes(device, new_size);
4957 	if (list_empty(&device->post_commit_list))
4958 		list_add_tail(&device->post_commit_list,
4959 			      &trans->transaction->dev_update_list);
4960 
4961 	WARN_ON(diff > old_total);
4962 	btrfs_set_super_total_bytes(super_copy,
4963 			round_down(old_total - diff, fs_info->sectorsize));
4964 	mutex_unlock(&fs_info->chunk_mutex);
4965 
4966 	btrfs_reserve_chunk_metadata(trans, false);
4967 	/* Now btrfs_update_device() will change the on-disk size. */
4968 	ret = btrfs_update_device(trans, device);
4969 	btrfs_trans_release_chunk_metadata(trans);
4970 	if (ret < 0) {
4971 		btrfs_abort_transaction(trans, ret);
4972 		btrfs_end_transaction(trans);
4973 	} else {
4974 		ret = btrfs_commit_transaction(trans);
4975 	}
4976 done:
4977 	btrfs_free_path(path);
4978 	if (ret) {
4979 		mutex_lock(&fs_info->chunk_mutex);
4980 		btrfs_device_set_total_bytes(device, old_size);
4981 		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4982 			device->fs_devices->total_rw_bytes += diff;
4983 		atomic64_add(diff, &fs_info->free_chunk_space);
4984 		mutex_unlock(&fs_info->chunk_mutex);
4985 	}
4986 	return ret;
4987 }
4988 
4989 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4990 			   struct btrfs_key *key,
4991 			   struct btrfs_chunk *chunk, int item_size)
4992 {
4993 	struct btrfs_super_block *super_copy = fs_info->super_copy;
4994 	struct btrfs_disk_key disk_key;
4995 	u32 array_size;
4996 	u8 *ptr;
4997 
4998 	lockdep_assert_held(&fs_info->chunk_mutex);
4999 
5000 	array_size = btrfs_super_sys_array_size(super_copy);
5001 	if (array_size + item_size + sizeof(disk_key)
5002 			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
5003 		return -EFBIG;
5004 
5005 	ptr = super_copy->sys_chunk_array + array_size;
5006 	btrfs_cpu_key_to_disk(&disk_key, key);
5007 	memcpy(ptr, &disk_key, sizeof(disk_key));
5008 	ptr += sizeof(disk_key);
5009 	memcpy(ptr, chunk, item_size);
5010 	item_size += sizeof(disk_key);
5011 	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
5012 
5013 	return 0;
5014 }
5015 
5016 /*
5017  * sort the devices in descending order by max_avail, total_avail
5018  */
5019 static int btrfs_cmp_device_info(const void *a, const void *b)
5020 {
5021 	const struct btrfs_device_info *di_a = a;
5022 	const struct btrfs_device_info *di_b = b;
5023 
5024 	if (di_a->max_avail > di_b->max_avail)
5025 		return -1;
5026 	if (di_a->max_avail < di_b->max_avail)
5027 		return 1;
5028 	if (di_a->total_avail > di_b->total_avail)
5029 		return -1;
5030 	if (di_a->total_avail < di_b->total_avail)
5031 		return 1;
5032 	return 0;
5033 }
5034 
5035 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
5036 {
5037 	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
5038 		return;
5039 
5040 	btrfs_set_fs_incompat(info, RAID56);
5041 }
5042 
5043 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
5044 {
5045 	if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
5046 		return;
5047 
5048 	btrfs_set_fs_incompat(info, RAID1C34);
5049 }
5050 
5051 /*
5052  * Structure used internally for btrfs_create_chunk() function.
5053  * Wraps needed parameters.
5054  */
5055 struct alloc_chunk_ctl {
5056 	u64 start;
5057 	u64 type;
5058 	/* Total number of stripes to allocate */
5059 	int num_stripes;
5060 	/* sub_stripes info for map */
5061 	int sub_stripes;
5062 	/* Stripes per device */
5063 	int dev_stripes;
5064 	/* Maximum number of devices to use */
5065 	int devs_max;
5066 	/* Minimum number of devices to use */
5067 	int devs_min;
5068 	/* ndevs has to be a multiple of this */
5069 	int devs_increment;
5070 	/* Number of copies */
5071 	int ncopies;
5072 	/* Number of stripes worth of bytes to store parity information */
5073 	int nparity;
5074 	u64 max_stripe_size;
5075 	u64 max_chunk_size;
5076 	u64 dev_extent_min;
5077 	u64 stripe_size;
5078 	u64 chunk_size;
5079 	int ndevs;
5080 };
5081 
5082 static void init_alloc_chunk_ctl_policy_regular(
5083 				struct btrfs_fs_devices *fs_devices,
5084 				struct alloc_chunk_ctl *ctl)
5085 {
5086 	u64 type = ctl->type;
5087 
5088 	if (type & BTRFS_BLOCK_GROUP_DATA) {
5089 		ctl->max_stripe_size = SZ_1G;
5090 		ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
5091 	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
5092 		/* For larger filesystems, use larger metadata chunks */
5093 		if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
5094 			ctl->max_stripe_size = SZ_1G;
5095 		else
5096 			ctl->max_stripe_size = SZ_256M;
5097 		ctl->max_chunk_size = ctl->max_stripe_size;
5098 	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
5099 		ctl->max_stripe_size = SZ_32M;
5100 		ctl->max_chunk_size = 2 * ctl->max_stripe_size;
5101 		ctl->devs_max = min_t(int, ctl->devs_max,
5102 				      BTRFS_MAX_DEVS_SYS_CHUNK);
5103 	} else {
5104 		BUG();
5105 	}
5106 
5107 	/* We don't want a chunk larger than 10% of writable space */
5108 	ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
5109 				  ctl->max_chunk_size);
5110 	ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
5111 }
5112 
5113 static void init_alloc_chunk_ctl_policy_zoned(
5114 				      struct btrfs_fs_devices *fs_devices,
5115 				      struct alloc_chunk_ctl *ctl)
5116 {
5117 	u64 zone_size = fs_devices->fs_info->zone_size;
5118 	u64 limit;
5119 	int min_num_stripes = ctl->devs_min * ctl->dev_stripes;
5120 	int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies;
5121 	u64 min_chunk_size = min_data_stripes * zone_size;
5122 	u64 type = ctl->type;
5123 
5124 	ctl->max_stripe_size = zone_size;
5125 	if (type & BTRFS_BLOCK_GROUP_DATA) {
5126 		ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE,
5127 						 zone_size);
5128 	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
5129 		ctl->max_chunk_size = ctl->max_stripe_size;
5130 	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
5131 		ctl->max_chunk_size = 2 * ctl->max_stripe_size;
5132 		ctl->devs_max = min_t(int, ctl->devs_max,
5133 				      BTRFS_MAX_DEVS_SYS_CHUNK);
5134 	} else {
5135 		BUG();
5136 	}
5137 
5138 	/* We don't want a chunk larger than 10% of writable space */
5139 	limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1),
5140 			       zone_size),
5141 		    min_chunk_size);
5142 	ctl->max_chunk_size = min(limit, ctl->max_chunk_size);
5143 	ctl->dev_extent_min = zone_size * ctl->dev_stripes;
5144 }
5145 
5146 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
5147 				 struct alloc_chunk_ctl *ctl)
5148 {
5149 	int index = btrfs_bg_flags_to_raid_index(ctl->type);
5150 
5151 	ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
5152 	ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
5153 	ctl->devs_max = btrfs_raid_array[index].devs_max;
5154 	if (!ctl->devs_max)
5155 		ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
5156 	ctl->devs_min = btrfs_raid_array[index].devs_min;
5157 	ctl->devs_increment = btrfs_raid_array[index].devs_increment;
5158 	ctl->ncopies = btrfs_raid_array[index].ncopies;
5159 	ctl->nparity = btrfs_raid_array[index].nparity;
5160 	ctl->ndevs = 0;
5161 
5162 	switch (fs_devices->chunk_alloc_policy) {
5163 	case BTRFS_CHUNK_ALLOC_REGULAR:
5164 		init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
5165 		break;
5166 	case BTRFS_CHUNK_ALLOC_ZONED:
5167 		init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl);
5168 		break;
5169 	default:
5170 		BUG();
5171 	}
5172 }
5173 
5174 static int gather_device_info(struct btrfs_fs_devices *fs_devices,
5175 			      struct alloc_chunk_ctl *ctl,
5176 			      struct btrfs_device_info *devices_info)
5177 {
5178 	struct btrfs_fs_info *info = fs_devices->fs_info;
5179 	struct btrfs_device *device;
5180 	u64 total_avail;
5181 	u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
5182 	int ret;
5183 	int ndevs = 0;
5184 	u64 max_avail;
5185 	u64 dev_offset;
5186 
5187 	/*
5188 	 * in the first pass through the devices list, we gather information
5189 	 * about the available holes on each device.
5190 	 */
5191 	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
5192 		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
5193 			WARN(1, KERN_ERR
5194 			       "BTRFS: read-only device in alloc_list\n");
5195 			continue;
5196 		}
5197 
5198 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
5199 					&device->dev_state) ||
5200 		    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
5201 			continue;
5202 
5203 		if (device->total_bytes > device->bytes_used)
5204 			total_avail = device->total_bytes - device->bytes_used;
5205 		else
5206 			total_avail = 0;
5207 
5208 		/* If there is no space on this device, skip it. */
5209 		if (total_avail < ctl->dev_extent_min)
5210 			continue;
5211 
5212 		ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
5213 					   &max_avail);
5214 		if (ret && ret != -ENOSPC)
5215 			return ret;
5216 
5217 		if (ret == 0)
5218 			max_avail = dev_extent_want;
5219 
5220 		if (max_avail < ctl->dev_extent_min) {
5221 			if (btrfs_test_opt(info, ENOSPC_DEBUG))
5222 				btrfs_debug(info,
5223 			"%s: devid %llu has no free space, have=%llu want=%llu",
5224 					    __func__, device->devid, max_avail,
5225 					    ctl->dev_extent_min);
5226 			continue;
5227 		}
5228 
5229 		if (ndevs == fs_devices->rw_devices) {
5230 			WARN(1, "%s: found more than %llu devices\n",
5231 			     __func__, fs_devices->rw_devices);
5232 			break;
5233 		}
5234 		devices_info[ndevs].dev_offset = dev_offset;
5235 		devices_info[ndevs].max_avail = max_avail;
5236 		devices_info[ndevs].total_avail = total_avail;
5237 		devices_info[ndevs].dev = device;
5238 		++ndevs;
5239 	}
5240 	ctl->ndevs = ndevs;
5241 
5242 	/*
5243 	 * now sort the devices by hole size / available space
5244 	 */
5245 	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
5246 	     btrfs_cmp_device_info, NULL);
5247 
5248 	return 0;
5249 }
5250 
5251 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
5252 				      struct btrfs_device_info *devices_info)
5253 {
5254 	/* Number of stripes that count for block group size */
5255 	int data_stripes;
5256 
5257 	/*
5258 	 * The primary goal is to maximize the number of stripes, so use as
5259 	 * many devices as possible, even if the stripes are not maximum sized.
5260 	 *
5261 	 * The DUP profile stores more than one stripe per device, the
5262 	 * max_avail is the total size so we have to adjust.
5263 	 */
5264 	ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
5265 				   ctl->dev_stripes);
5266 	ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5267 
5268 	/* This will have to be fixed for RAID1 and RAID10 over more drives */
5269 	data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5270 
5271 	/*
5272 	 * Use the number of data stripes to figure out how big this chunk is
5273 	 * really going to be in terms of logical address space, and compare
5274 	 * that answer with the max chunk size. If it's higher, we try to
5275 	 * reduce stripe_size.
5276 	 */
5277 	if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5278 		/*
5279 		 * Reduce stripe_size, round it up to a 16MB boundary again and
5280 		 * then use it, unless it ends up being even bigger than the
5281 		 * previous value we had already.
5282 		 */
5283 		ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
5284 							data_stripes), SZ_16M),
5285 				       ctl->stripe_size);
5286 	}
5287 
5288 	/* Align to BTRFS_STRIPE_LEN */
5289 	ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
5290 	ctl->chunk_size = ctl->stripe_size * data_stripes;
5291 
5292 	return 0;
5293 }
5294 
5295 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl,
5296 				    struct btrfs_device_info *devices_info)
5297 {
5298 	u64 zone_size = devices_info[0].dev->zone_info->zone_size;
5299 	/* Number of stripes that count for block group size */
5300 	int data_stripes;
5301 
5302 	/*
5303 	 * It should hold because:
5304 	 *    dev_extent_min == dev_extent_want == zone_size * dev_stripes
5305 	 */
5306 	ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min);
5307 
5308 	ctl->stripe_size = zone_size;
5309 	ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5310 	data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5311 
5312 	/* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */
5313 	if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5314 		ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies,
5315 					     ctl->stripe_size) + ctl->nparity,
5316 				     ctl->dev_stripes);
5317 		ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5318 		data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5319 		ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size);
5320 	}
5321 
5322 	ctl->chunk_size = ctl->stripe_size * data_stripes;
5323 
5324 	return 0;
5325 }
5326 
5327 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
5328 			      struct alloc_chunk_ctl *ctl,
5329 			      struct btrfs_device_info *devices_info)
5330 {
5331 	struct btrfs_fs_info *info = fs_devices->fs_info;
5332 
5333 	/*
5334 	 * Round down to number of usable stripes, devs_increment can be any
5335 	 * number so we can't use round_down() that requires power of 2, while
5336 	 * rounddown is safe.
5337 	 */
5338 	ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
5339 
5340 	if (ctl->ndevs < ctl->devs_min) {
5341 		if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5342 			btrfs_debug(info,
5343 	"%s: not enough devices with free space: have=%d minimum required=%d",
5344 				    __func__, ctl->ndevs, ctl->devs_min);
5345 		}
5346 		return -ENOSPC;
5347 	}
5348 
5349 	ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
5350 
5351 	switch (fs_devices->chunk_alloc_policy) {
5352 	case BTRFS_CHUNK_ALLOC_REGULAR:
5353 		return decide_stripe_size_regular(ctl, devices_info);
5354 	case BTRFS_CHUNK_ALLOC_ZONED:
5355 		return decide_stripe_size_zoned(ctl, devices_info);
5356 	default:
5357 		BUG();
5358 	}
5359 }
5360 
5361 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
5362 			struct alloc_chunk_ctl *ctl,
5363 			struct btrfs_device_info *devices_info)
5364 {
5365 	struct btrfs_fs_info *info = trans->fs_info;
5366 	struct map_lookup *map = NULL;
5367 	struct extent_map_tree *em_tree;
5368 	struct btrfs_block_group *block_group;
5369 	struct extent_map *em;
5370 	u64 start = ctl->start;
5371 	u64 type = ctl->type;
5372 	int ret;
5373 	int i;
5374 	int j;
5375 
5376 	map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
5377 	if (!map)
5378 		return ERR_PTR(-ENOMEM);
5379 	map->num_stripes = ctl->num_stripes;
5380 
5381 	for (i = 0; i < ctl->ndevs; ++i) {
5382 		for (j = 0; j < ctl->dev_stripes; ++j) {
5383 			int s = i * ctl->dev_stripes + j;
5384 			map->stripes[s].dev = devices_info[i].dev;
5385 			map->stripes[s].physical = devices_info[i].dev_offset +
5386 						   j * ctl->stripe_size;
5387 		}
5388 	}
5389 	map->stripe_len = BTRFS_STRIPE_LEN;
5390 	map->io_align = BTRFS_STRIPE_LEN;
5391 	map->io_width = BTRFS_STRIPE_LEN;
5392 	map->type = type;
5393 	map->sub_stripes = ctl->sub_stripes;
5394 
5395 	trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
5396 
5397 	em = alloc_extent_map();
5398 	if (!em) {
5399 		kfree(map);
5400 		return ERR_PTR(-ENOMEM);
5401 	}
5402 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5403 	em->map_lookup = map;
5404 	em->start = start;
5405 	em->len = ctl->chunk_size;
5406 	em->block_start = 0;
5407 	em->block_len = em->len;
5408 	em->orig_block_len = ctl->stripe_size;
5409 
5410 	em_tree = &info->mapping_tree;
5411 	write_lock(&em_tree->lock);
5412 	ret = add_extent_mapping(em_tree, em, 0);
5413 	if (ret) {
5414 		write_unlock(&em_tree->lock);
5415 		free_extent_map(em);
5416 		return ERR_PTR(ret);
5417 	}
5418 	write_unlock(&em_tree->lock);
5419 
5420 	block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
5421 	if (IS_ERR(block_group))
5422 		goto error_del_extent;
5423 
5424 	for (i = 0; i < map->num_stripes; i++) {
5425 		struct btrfs_device *dev = map->stripes[i].dev;
5426 
5427 		btrfs_device_set_bytes_used(dev,
5428 					    dev->bytes_used + ctl->stripe_size);
5429 		if (list_empty(&dev->post_commit_list))
5430 			list_add_tail(&dev->post_commit_list,
5431 				      &trans->transaction->dev_update_list);
5432 	}
5433 
5434 	atomic64_sub(ctl->stripe_size * map->num_stripes,
5435 		     &info->free_chunk_space);
5436 
5437 	free_extent_map(em);
5438 	check_raid56_incompat_flag(info, type);
5439 	check_raid1c34_incompat_flag(info, type);
5440 
5441 	return block_group;
5442 
5443 error_del_extent:
5444 	write_lock(&em_tree->lock);
5445 	remove_extent_mapping(em_tree, em);
5446 	write_unlock(&em_tree->lock);
5447 
5448 	/* One for our allocation */
5449 	free_extent_map(em);
5450 	/* One for the tree reference */
5451 	free_extent_map(em);
5452 
5453 	return block_group;
5454 }
5455 
5456 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
5457 					    u64 type)
5458 {
5459 	struct btrfs_fs_info *info = trans->fs_info;
5460 	struct btrfs_fs_devices *fs_devices = info->fs_devices;
5461 	struct btrfs_device_info *devices_info = NULL;
5462 	struct alloc_chunk_ctl ctl;
5463 	struct btrfs_block_group *block_group;
5464 	int ret;
5465 
5466 	lockdep_assert_held(&info->chunk_mutex);
5467 
5468 	if (!alloc_profile_is_valid(type, 0)) {
5469 		ASSERT(0);
5470 		return ERR_PTR(-EINVAL);
5471 	}
5472 
5473 	if (list_empty(&fs_devices->alloc_list)) {
5474 		if (btrfs_test_opt(info, ENOSPC_DEBUG))
5475 			btrfs_debug(info, "%s: no writable device", __func__);
5476 		return ERR_PTR(-ENOSPC);
5477 	}
5478 
5479 	if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
5480 		btrfs_err(info, "invalid chunk type 0x%llx requested", type);
5481 		ASSERT(0);
5482 		return ERR_PTR(-EINVAL);
5483 	}
5484 
5485 	ctl.start = find_next_chunk(info);
5486 	ctl.type = type;
5487 	init_alloc_chunk_ctl(fs_devices, &ctl);
5488 
5489 	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
5490 			       GFP_NOFS);
5491 	if (!devices_info)
5492 		return ERR_PTR(-ENOMEM);
5493 
5494 	ret = gather_device_info(fs_devices, &ctl, devices_info);
5495 	if (ret < 0) {
5496 		block_group = ERR_PTR(ret);
5497 		goto out;
5498 	}
5499 
5500 	ret = decide_stripe_size(fs_devices, &ctl, devices_info);
5501 	if (ret < 0) {
5502 		block_group = ERR_PTR(ret);
5503 		goto out;
5504 	}
5505 
5506 	block_group = create_chunk(trans, &ctl, devices_info);
5507 
5508 out:
5509 	kfree(devices_info);
5510 	return block_group;
5511 }
5512 
5513 /*
5514  * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the
5515  * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system
5516  * chunks.
5517  *
5518  * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
5519  * phases.
5520  */
5521 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
5522 				     struct btrfs_block_group *bg)
5523 {
5524 	struct btrfs_fs_info *fs_info = trans->fs_info;
5525 	struct btrfs_root *extent_root = fs_info->extent_root;
5526 	struct btrfs_root *chunk_root = fs_info->chunk_root;
5527 	struct btrfs_key key;
5528 	struct btrfs_chunk *chunk;
5529 	struct btrfs_stripe *stripe;
5530 	struct extent_map *em;
5531 	struct map_lookup *map;
5532 	size_t item_size;
5533 	int i;
5534 	int ret;
5535 
5536 	/*
5537 	 * We take the chunk_mutex for 2 reasons:
5538 	 *
5539 	 * 1) Updates and insertions in the chunk btree must be done while holding
5540 	 *    the chunk_mutex, as well as updating the system chunk array in the
5541 	 *    superblock. See the comment on top of btrfs_chunk_alloc() for the
5542 	 *    details;
5543 	 *
5544 	 * 2) To prevent races with the final phase of a device replace operation
5545 	 *    that replaces the device object associated with the map's stripes,
5546 	 *    because the device object's id can change at any time during that
5547 	 *    final phase of the device replace operation
5548 	 *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
5549 	 *    replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
5550 	 *    which would cause a failure when updating the device item, which does
5551 	 *    not exists, or persisting a stripe of the chunk item with such ID.
5552 	 *    Here we can't use the device_list_mutex because our caller already
5553 	 *    has locked the chunk_mutex, and the final phase of device replace
5554 	 *    acquires both mutexes - first the device_list_mutex and then the
5555 	 *    chunk_mutex. Using any of those two mutexes protects us from a
5556 	 *    concurrent device replace.
5557 	 */
5558 	lockdep_assert_held(&fs_info->chunk_mutex);
5559 
5560 	em = btrfs_get_chunk_map(fs_info, bg->start, bg->length);
5561 	if (IS_ERR(em)) {
5562 		ret = PTR_ERR(em);
5563 		btrfs_abort_transaction(trans, ret);
5564 		return ret;
5565 	}
5566 
5567 	map = em->map_lookup;
5568 	item_size = btrfs_chunk_item_size(map->num_stripes);
5569 
5570 	chunk = kzalloc(item_size, GFP_NOFS);
5571 	if (!chunk) {
5572 		ret = -ENOMEM;
5573 		btrfs_abort_transaction(trans, ret);
5574 		goto out;
5575 	}
5576 
5577 	for (i = 0; i < map->num_stripes; i++) {
5578 		struct btrfs_device *device = map->stripes[i].dev;
5579 
5580 		ret = btrfs_update_device(trans, device);
5581 		if (ret)
5582 			goto out;
5583 	}
5584 
5585 	stripe = &chunk->stripe;
5586 	for (i = 0; i < map->num_stripes; i++) {
5587 		struct btrfs_device *device = map->stripes[i].dev;
5588 		const u64 dev_offset = map->stripes[i].physical;
5589 
5590 		btrfs_set_stack_stripe_devid(stripe, device->devid);
5591 		btrfs_set_stack_stripe_offset(stripe, dev_offset);
5592 		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5593 		stripe++;
5594 	}
5595 
5596 	btrfs_set_stack_chunk_length(chunk, bg->length);
5597 	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
5598 	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
5599 	btrfs_set_stack_chunk_type(chunk, map->type);
5600 	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5601 	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5602 	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5603 	btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5604 	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5605 
5606 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5607 	key.type = BTRFS_CHUNK_ITEM_KEY;
5608 	key.offset = bg->start;
5609 
5610 	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5611 	if (ret)
5612 		goto out;
5613 
5614 	bg->chunk_item_inserted = 1;
5615 
5616 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5617 		ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5618 		if (ret)
5619 			goto out;
5620 	}
5621 
5622 out:
5623 	kfree(chunk);
5624 	free_extent_map(em);
5625 	return ret;
5626 }
5627 
5628 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
5629 {
5630 	struct btrfs_fs_info *fs_info = trans->fs_info;
5631 	u64 alloc_profile;
5632 	struct btrfs_block_group *meta_bg;
5633 	struct btrfs_block_group *sys_bg;
5634 
5635 	/*
5636 	 * When adding a new device for sprouting, the seed device is read-only
5637 	 * so we must first allocate a metadata and a system chunk. But before
5638 	 * adding the block group items to the extent, device and chunk btrees,
5639 	 * we must first:
5640 	 *
5641 	 * 1) Create both chunks without doing any changes to the btrees, as
5642 	 *    otherwise we would get -ENOSPC since the block groups from the
5643 	 *    seed device are read-only;
5644 	 *
5645 	 * 2) Add the device item for the new sprout device - finishing the setup
5646 	 *    of a new block group requires updating the device item in the chunk
5647 	 *    btree, so it must exist when we attempt to do it. The previous step
5648 	 *    ensures this does not fail with -ENOSPC.
5649 	 *
5650 	 * After that we can add the block group items to their btrees:
5651 	 * update existing device item in the chunk btree, add a new block group
5652 	 * item to the extent btree, add a new chunk item to the chunk btree and
5653 	 * finally add the new device extent items to the devices btree.
5654 	 */
5655 
5656 	alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5657 	meta_bg = btrfs_create_chunk(trans, alloc_profile);
5658 	if (IS_ERR(meta_bg))
5659 		return PTR_ERR(meta_bg);
5660 
5661 	alloc_profile = btrfs_system_alloc_profile(fs_info);
5662 	sys_bg = btrfs_create_chunk(trans, alloc_profile);
5663 	if (IS_ERR(sys_bg))
5664 		return PTR_ERR(sys_bg);
5665 
5666 	return 0;
5667 }
5668 
5669 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5670 {
5671 	const int index = btrfs_bg_flags_to_raid_index(map->type);
5672 
5673 	return btrfs_raid_array[index].tolerated_failures;
5674 }
5675 
5676 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5677 {
5678 	struct extent_map *em;
5679 	struct map_lookup *map;
5680 	int readonly = 0;
5681 	int miss_ndevs = 0;
5682 	int i;
5683 
5684 	em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5685 	if (IS_ERR(em))
5686 		return 1;
5687 
5688 	map = em->map_lookup;
5689 	for (i = 0; i < map->num_stripes; i++) {
5690 		if (test_bit(BTRFS_DEV_STATE_MISSING,
5691 					&map->stripes[i].dev->dev_state)) {
5692 			miss_ndevs++;
5693 			continue;
5694 		}
5695 		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5696 					&map->stripes[i].dev->dev_state)) {
5697 			readonly = 1;
5698 			goto end;
5699 		}
5700 	}
5701 
5702 	/*
5703 	 * If the number of missing devices is larger than max errors,
5704 	 * we can not write the data into that chunk successfully, so
5705 	 * set it readonly.
5706 	 */
5707 	if (miss_ndevs > btrfs_chunk_max_errors(map))
5708 		readonly = 1;
5709 end:
5710 	free_extent_map(em);
5711 	return readonly;
5712 }
5713 
5714 void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5715 {
5716 	struct extent_map *em;
5717 
5718 	while (1) {
5719 		write_lock(&tree->lock);
5720 		em = lookup_extent_mapping(tree, 0, (u64)-1);
5721 		if (em)
5722 			remove_extent_mapping(tree, em);
5723 		write_unlock(&tree->lock);
5724 		if (!em)
5725 			break;
5726 		/* once for us */
5727 		free_extent_map(em);
5728 		/* once for the tree */
5729 		free_extent_map(em);
5730 	}
5731 }
5732 
5733 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5734 {
5735 	struct extent_map *em;
5736 	struct map_lookup *map;
5737 	int ret;
5738 
5739 	em = btrfs_get_chunk_map(fs_info, logical, len);
5740 	if (IS_ERR(em))
5741 		/*
5742 		 * We could return errors for these cases, but that could get
5743 		 * ugly and we'd probably do the same thing which is just not do
5744 		 * anything else and exit, so return 1 so the callers don't try
5745 		 * to use other copies.
5746 		 */
5747 		return 1;
5748 
5749 	map = em->map_lookup;
5750 	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
5751 		ret = map->num_stripes;
5752 	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5753 		ret = map->sub_stripes;
5754 	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5755 		ret = 2;
5756 	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5757 		/*
5758 		 * There could be two corrupted data stripes, we need
5759 		 * to loop retry in order to rebuild the correct data.
5760 		 *
5761 		 * Fail a stripe at a time on every retry except the
5762 		 * stripe under reconstruction.
5763 		 */
5764 		ret = map->num_stripes;
5765 	else
5766 		ret = 1;
5767 	free_extent_map(em);
5768 
5769 	down_read(&fs_info->dev_replace.rwsem);
5770 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5771 	    fs_info->dev_replace.tgtdev)
5772 		ret++;
5773 	up_read(&fs_info->dev_replace.rwsem);
5774 
5775 	return ret;
5776 }
5777 
5778 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5779 				    u64 logical)
5780 {
5781 	struct extent_map *em;
5782 	struct map_lookup *map;
5783 	unsigned long len = fs_info->sectorsize;
5784 
5785 	em = btrfs_get_chunk_map(fs_info, logical, len);
5786 
5787 	if (!WARN_ON(IS_ERR(em))) {
5788 		map = em->map_lookup;
5789 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5790 			len = map->stripe_len * nr_data_stripes(map);
5791 		free_extent_map(em);
5792 	}
5793 	return len;
5794 }
5795 
5796 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5797 {
5798 	struct extent_map *em;
5799 	struct map_lookup *map;
5800 	int ret = 0;
5801 
5802 	em = btrfs_get_chunk_map(fs_info, logical, len);
5803 
5804 	if(!WARN_ON(IS_ERR(em))) {
5805 		map = em->map_lookup;
5806 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5807 			ret = 1;
5808 		free_extent_map(em);
5809 	}
5810 	return ret;
5811 }
5812 
5813 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5814 			    struct map_lookup *map, int first,
5815 			    int dev_replace_is_ongoing)
5816 {
5817 	int i;
5818 	int num_stripes;
5819 	int preferred_mirror;
5820 	int tolerance;
5821 	struct btrfs_device *srcdev;
5822 
5823 	ASSERT((map->type &
5824 		 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5825 
5826 	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5827 		num_stripes = map->sub_stripes;
5828 	else
5829 		num_stripes = map->num_stripes;
5830 
5831 	switch (fs_info->fs_devices->read_policy) {
5832 	default:
5833 		/* Shouldn't happen, just warn and use pid instead of failing */
5834 		btrfs_warn_rl(fs_info,
5835 			      "unknown read_policy type %u, reset to pid",
5836 			      fs_info->fs_devices->read_policy);
5837 		fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID;
5838 		fallthrough;
5839 	case BTRFS_READ_POLICY_PID:
5840 		preferred_mirror = first + (current->pid % num_stripes);
5841 		break;
5842 	}
5843 
5844 	if (dev_replace_is_ongoing &&
5845 	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5846 	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5847 		srcdev = fs_info->dev_replace.srcdev;
5848 	else
5849 		srcdev = NULL;
5850 
5851 	/*
5852 	 * try to avoid the drive that is the source drive for a
5853 	 * dev-replace procedure, only choose it if no other non-missing
5854 	 * mirror is available
5855 	 */
5856 	for (tolerance = 0; tolerance < 2; tolerance++) {
5857 		if (map->stripes[preferred_mirror].dev->bdev &&
5858 		    (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5859 			return preferred_mirror;
5860 		for (i = first; i < first + num_stripes; i++) {
5861 			if (map->stripes[i].dev->bdev &&
5862 			    (tolerance || map->stripes[i].dev != srcdev))
5863 				return i;
5864 		}
5865 	}
5866 
5867 	/* we couldn't find one that doesn't fail.  Just return something
5868 	 * and the io error handling code will clean up eventually
5869 	 */
5870 	return preferred_mirror;
5871 }
5872 
5873 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5874 static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes)
5875 {
5876 	int i;
5877 	int again = 1;
5878 
5879 	while (again) {
5880 		again = 0;
5881 		for (i = 0; i < num_stripes - 1; i++) {
5882 			/* Swap if parity is on a smaller index */
5883 			if (bioc->raid_map[i] > bioc->raid_map[i + 1]) {
5884 				swap(bioc->stripes[i], bioc->stripes[i + 1]);
5885 				swap(bioc->raid_map[i], bioc->raid_map[i + 1]);
5886 				again = 1;
5887 			}
5888 		}
5889 	}
5890 }
5891 
5892 static struct btrfs_io_context *alloc_btrfs_io_context(int total_stripes,
5893 						       int real_stripes)
5894 {
5895 	struct btrfs_io_context *bioc = kzalloc(
5896 		 /* The size of btrfs_io_context */
5897 		sizeof(struct btrfs_io_context) +
5898 		/* Plus the variable array for the stripes */
5899 		sizeof(struct btrfs_io_stripe) * (total_stripes) +
5900 		/* Plus the variable array for the tgt dev */
5901 		sizeof(int) * (real_stripes) +
5902 		/*
5903 		 * Plus the raid_map, which includes both the tgt dev
5904 		 * and the stripes.
5905 		 */
5906 		sizeof(u64) * (total_stripes),
5907 		GFP_NOFS|__GFP_NOFAIL);
5908 
5909 	atomic_set(&bioc->error, 0);
5910 	refcount_set(&bioc->refs, 1);
5911 
5912 	bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes);
5913 	bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes);
5914 
5915 	return bioc;
5916 }
5917 
5918 void btrfs_get_bioc(struct btrfs_io_context *bioc)
5919 {
5920 	WARN_ON(!refcount_read(&bioc->refs));
5921 	refcount_inc(&bioc->refs);
5922 }
5923 
5924 void btrfs_put_bioc(struct btrfs_io_context *bioc)
5925 {
5926 	if (!bioc)
5927 		return;
5928 	if (refcount_dec_and_test(&bioc->refs))
5929 		kfree(bioc);
5930 }
5931 
5932 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5933 /*
5934  * Please note that, discard won't be sent to target device of device
5935  * replace.
5936  */
5937 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5938 					 u64 logical, u64 *length_ret,
5939 					 struct btrfs_io_context **bioc_ret)
5940 {
5941 	struct extent_map *em;
5942 	struct map_lookup *map;
5943 	struct btrfs_io_context *bioc;
5944 	u64 length = *length_ret;
5945 	u64 offset;
5946 	u64 stripe_nr;
5947 	u64 stripe_nr_end;
5948 	u64 stripe_end_offset;
5949 	u64 stripe_cnt;
5950 	u64 stripe_len;
5951 	u64 stripe_offset;
5952 	u64 num_stripes;
5953 	u32 stripe_index;
5954 	u32 factor = 0;
5955 	u32 sub_stripes = 0;
5956 	u64 stripes_per_dev = 0;
5957 	u32 remaining_stripes = 0;
5958 	u32 last_stripe = 0;
5959 	int ret = 0;
5960 	int i;
5961 
5962 	/* Discard always returns a bioc. */
5963 	ASSERT(bioc_ret);
5964 
5965 	em = btrfs_get_chunk_map(fs_info, logical, length);
5966 	if (IS_ERR(em))
5967 		return PTR_ERR(em);
5968 
5969 	map = em->map_lookup;
5970 	/* we don't discard raid56 yet */
5971 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5972 		ret = -EOPNOTSUPP;
5973 		goto out;
5974 	}
5975 
5976 	offset = logical - em->start;
5977 	length = min_t(u64, em->start + em->len - logical, length);
5978 	*length_ret = length;
5979 
5980 	stripe_len = map->stripe_len;
5981 	/*
5982 	 * stripe_nr counts the total number of stripes we have to stride
5983 	 * to get to this block
5984 	 */
5985 	stripe_nr = div64_u64(offset, stripe_len);
5986 
5987 	/* stripe_offset is the offset of this block in its stripe */
5988 	stripe_offset = offset - stripe_nr * stripe_len;
5989 
5990 	stripe_nr_end = round_up(offset + length, map->stripe_len);
5991 	stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5992 	stripe_cnt = stripe_nr_end - stripe_nr;
5993 	stripe_end_offset = stripe_nr_end * map->stripe_len -
5994 			    (offset + length);
5995 	/*
5996 	 * after this, stripe_nr is the number of stripes on this
5997 	 * device we have to walk to find the data, and stripe_index is
5998 	 * the number of our device in the stripe array
5999 	 */
6000 	num_stripes = 1;
6001 	stripe_index = 0;
6002 	if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
6003 			 BTRFS_BLOCK_GROUP_RAID10)) {
6004 		if (map->type & BTRFS_BLOCK_GROUP_RAID0)
6005 			sub_stripes = 1;
6006 		else
6007 			sub_stripes = map->sub_stripes;
6008 
6009 		factor = map->num_stripes / sub_stripes;
6010 		num_stripes = min_t(u64, map->num_stripes,
6011 				    sub_stripes * stripe_cnt);
6012 		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6013 		stripe_index *= sub_stripes;
6014 		stripes_per_dev = div_u64_rem(stripe_cnt, factor,
6015 					      &remaining_stripes);
6016 		div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
6017 		last_stripe *= sub_stripes;
6018 	} else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
6019 				BTRFS_BLOCK_GROUP_DUP)) {
6020 		num_stripes = map->num_stripes;
6021 	} else {
6022 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6023 					&stripe_index);
6024 	}
6025 
6026 	bioc = alloc_btrfs_io_context(num_stripes, 0);
6027 	if (!bioc) {
6028 		ret = -ENOMEM;
6029 		goto out;
6030 	}
6031 
6032 	for (i = 0; i < num_stripes; i++) {
6033 		bioc->stripes[i].physical =
6034 			map->stripes[stripe_index].physical +
6035 			stripe_offset + stripe_nr * map->stripe_len;
6036 		bioc->stripes[i].dev = map->stripes[stripe_index].dev;
6037 
6038 		if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
6039 				 BTRFS_BLOCK_GROUP_RAID10)) {
6040 			bioc->stripes[i].length = stripes_per_dev *
6041 				map->stripe_len;
6042 
6043 			if (i / sub_stripes < remaining_stripes)
6044 				bioc->stripes[i].length += map->stripe_len;
6045 
6046 			/*
6047 			 * Special for the first stripe and
6048 			 * the last stripe:
6049 			 *
6050 			 * |-------|...|-------|
6051 			 *     |----------|
6052 			 *    off     end_off
6053 			 */
6054 			if (i < sub_stripes)
6055 				bioc->stripes[i].length -= stripe_offset;
6056 
6057 			if (stripe_index >= last_stripe &&
6058 			    stripe_index <= (last_stripe +
6059 					     sub_stripes - 1))
6060 				bioc->stripes[i].length -= stripe_end_offset;
6061 
6062 			if (i == sub_stripes - 1)
6063 				stripe_offset = 0;
6064 		} else {
6065 			bioc->stripes[i].length = length;
6066 		}
6067 
6068 		stripe_index++;
6069 		if (stripe_index == map->num_stripes) {
6070 			stripe_index = 0;
6071 			stripe_nr++;
6072 		}
6073 	}
6074 
6075 	*bioc_ret = bioc;
6076 	bioc->map_type = map->type;
6077 	bioc->num_stripes = num_stripes;
6078 out:
6079 	free_extent_map(em);
6080 	return ret;
6081 }
6082 
6083 /*
6084  * In dev-replace case, for repair case (that's the only case where the mirror
6085  * is selected explicitly when calling btrfs_map_block), blocks left of the
6086  * left cursor can also be read from the target drive.
6087  *
6088  * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
6089  * array of stripes.
6090  * For READ, it also needs to be supported using the same mirror number.
6091  *
6092  * If the requested block is not left of the left cursor, EIO is returned. This
6093  * can happen because btrfs_num_copies() returns one more in the dev-replace
6094  * case.
6095  */
6096 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
6097 					 u64 logical, u64 length,
6098 					 u64 srcdev_devid, int *mirror_num,
6099 					 u64 *physical)
6100 {
6101 	struct btrfs_io_context *bioc = NULL;
6102 	int num_stripes;
6103 	int index_srcdev = 0;
6104 	int found = 0;
6105 	u64 physical_of_found = 0;
6106 	int i;
6107 	int ret = 0;
6108 
6109 	ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
6110 				logical, &length, &bioc, 0, 0);
6111 	if (ret) {
6112 		ASSERT(bioc == NULL);
6113 		return ret;
6114 	}
6115 
6116 	num_stripes = bioc->num_stripes;
6117 	if (*mirror_num > num_stripes) {
6118 		/*
6119 		 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
6120 		 * that means that the requested area is not left of the left
6121 		 * cursor
6122 		 */
6123 		btrfs_put_bioc(bioc);
6124 		return -EIO;
6125 	}
6126 
6127 	/*
6128 	 * process the rest of the function using the mirror_num of the source
6129 	 * drive. Therefore look it up first.  At the end, patch the device
6130 	 * pointer to the one of the target drive.
6131 	 */
6132 	for (i = 0; i < num_stripes; i++) {
6133 		if (bioc->stripes[i].dev->devid != srcdev_devid)
6134 			continue;
6135 
6136 		/*
6137 		 * In case of DUP, in order to keep it simple, only add the
6138 		 * mirror with the lowest physical address
6139 		 */
6140 		if (found &&
6141 		    physical_of_found <= bioc->stripes[i].physical)
6142 			continue;
6143 
6144 		index_srcdev = i;
6145 		found = 1;
6146 		physical_of_found = bioc->stripes[i].physical;
6147 	}
6148 
6149 	btrfs_put_bioc(bioc);
6150 
6151 	ASSERT(found);
6152 	if (!found)
6153 		return -EIO;
6154 
6155 	*mirror_num = index_srcdev + 1;
6156 	*physical = physical_of_found;
6157 	return ret;
6158 }
6159 
6160 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical)
6161 {
6162 	struct btrfs_block_group *cache;
6163 	bool ret;
6164 
6165 	/* Non zoned filesystem does not use "to_copy" flag */
6166 	if (!btrfs_is_zoned(fs_info))
6167 		return false;
6168 
6169 	cache = btrfs_lookup_block_group(fs_info, logical);
6170 
6171 	spin_lock(&cache->lock);
6172 	ret = cache->to_copy;
6173 	spin_unlock(&cache->lock);
6174 
6175 	btrfs_put_block_group(cache);
6176 	return ret;
6177 }
6178 
6179 static void handle_ops_on_dev_replace(enum btrfs_map_op op,
6180 				      struct btrfs_io_context **bioc_ret,
6181 				      struct btrfs_dev_replace *dev_replace,
6182 				      u64 logical,
6183 				      int *num_stripes_ret, int *max_errors_ret)
6184 {
6185 	struct btrfs_io_context *bioc = *bioc_ret;
6186 	u64 srcdev_devid = dev_replace->srcdev->devid;
6187 	int tgtdev_indexes = 0;
6188 	int num_stripes = *num_stripes_ret;
6189 	int max_errors = *max_errors_ret;
6190 	int i;
6191 
6192 	if (op == BTRFS_MAP_WRITE) {
6193 		int index_where_to_add;
6194 
6195 		/*
6196 		 * A block group which have "to_copy" set will eventually
6197 		 * copied by dev-replace process. We can avoid cloning IO here.
6198 		 */
6199 		if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical))
6200 			return;
6201 
6202 		/*
6203 		 * duplicate the write operations while the dev replace
6204 		 * procedure is running. Since the copying of the old disk to
6205 		 * the new disk takes place at run time while the filesystem is
6206 		 * mounted writable, the regular write operations to the old
6207 		 * disk have to be duplicated to go to the new disk as well.
6208 		 *
6209 		 * Note that device->missing is handled by the caller, and that
6210 		 * the write to the old disk is already set up in the stripes
6211 		 * array.
6212 		 */
6213 		index_where_to_add = num_stripes;
6214 		for (i = 0; i < num_stripes; i++) {
6215 			if (bioc->stripes[i].dev->devid == srcdev_devid) {
6216 				/* write to new disk, too */
6217 				struct btrfs_io_stripe *new =
6218 					bioc->stripes + index_where_to_add;
6219 				struct btrfs_io_stripe *old =
6220 					bioc->stripes + i;
6221 
6222 				new->physical = old->physical;
6223 				new->length = old->length;
6224 				new->dev = dev_replace->tgtdev;
6225 				bioc->tgtdev_map[i] = index_where_to_add;
6226 				index_where_to_add++;
6227 				max_errors++;
6228 				tgtdev_indexes++;
6229 			}
6230 		}
6231 		num_stripes = index_where_to_add;
6232 	} else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
6233 		int index_srcdev = 0;
6234 		int found = 0;
6235 		u64 physical_of_found = 0;
6236 
6237 		/*
6238 		 * During the dev-replace procedure, the target drive can also
6239 		 * be used to read data in case it is needed to repair a corrupt
6240 		 * block elsewhere. This is possible if the requested area is
6241 		 * left of the left cursor. In this area, the target drive is a
6242 		 * full copy of the source drive.
6243 		 */
6244 		for (i = 0; i < num_stripes; i++) {
6245 			if (bioc->stripes[i].dev->devid == srcdev_devid) {
6246 				/*
6247 				 * In case of DUP, in order to keep it simple,
6248 				 * only add the mirror with the lowest physical
6249 				 * address
6250 				 */
6251 				if (found &&
6252 				    physical_of_found <= bioc->stripes[i].physical)
6253 					continue;
6254 				index_srcdev = i;
6255 				found = 1;
6256 				physical_of_found = bioc->stripes[i].physical;
6257 			}
6258 		}
6259 		if (found) {
6260 			struct btrfs_io_stripe *tgtdev_stripe =
6261 				bioc->stripes + num_stripes;
6262 
6263 			tgtdev_stripe->physical = physical_of_found;
6264 			tgtdev_stripe->length =
6265 				bioc->stripes[index_srcdev].length;
6266 			tgtdev_stripe->dev = dev_replace->tgtdev;
6267 			bioc->tgtdev_map[index_srcdev] = num_stripes;
6268 
6269 			tgtdev_indexes++;
6270 			num_stripes++;
6271 		}
6272 	}
6273 
6274 	*num_stripes_ret = num_stripes;
6275 	*max_errors_ret = max_errors;
6276 	bioc->num_tgtdevs = tgtdev_indexes;
6277 	*bioc_ret = bioc;
6278 }
6279 
6280 static bool need_full_stripe(enum btrfs_map_op op)
6281 {
6282 	return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
6283 }
6284 
6285 /*
6286  * Calculate the geometry of a particular (address, len) tuple. This
6287  * information is used to calculate how big a particular bio can get before it
6288  * straddles a stripe.
6289  *
6290  * @fs_info: the filesystem
6291  * @em:      mapping containing the logical extent
6292  * @op:      type of operation - write or read
6293  * @logical: address that we want to figure out the geometry of
6294  * @io_geom: pointer used to return values
6295  *
6296  * Returns < 0 in case a chunk for the given logical address cannot be found,
6297  * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
6298  */
6299 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em,
6300 			  enum btrfs_map_op op, u64 logical,
6301 			  struct btrfs_io_geometry *io_geom)
6302 {
6303 	struct map_lookup *map;
6304 	u64 len;
6305 	u64 offset;
6306 	u64 stripe_offset;
6307 	u64 stripe_nr;
6308 	u64 stripe_len;
6309 	u64 raid56_full_stripe_start = (u64)-1;
6310 	int data_stripes;
6311 
6312 	ASSERT(op != BTRFS_MAP_DISCARD);
6313 
6314 	map = em->map_lookup;
6315 	/* Offset of this logical address in the chunk */
6316 	offset = logical - em->start;
6317 	/* Len of a stripe in a chunk */
6318 	stripe_len = map->stripe_len;
6319 	/* Stripe where this block falls in */
6320 	stripe_nr = div64_u64(offset, stripe_len);
6321 	/* Offset of stripe in the chunk */
6322 	stripe_offset = stripe_nr * stripe_len;
6323 	if (offset < stripe_offset) {
6324 		btrfs_crit(fs_info,
6325 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
6326 			stripe_offset, offset, em->start, logical, stripe_len);
6327 		return -EINVAL;
6328 	}
6329 
6330 	/* stripe_offset is the offset of this block in its stripe */
6331 	stripe_offset = offset - stripe_offset;
6332 	data_stripes = nr_data_stripes(map);
6333 
6334 	if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
6335 		u64 max_len = stripe_len - stripe_offset;
6336 
6337 		/*
6338 		 * In case of raid56, we need to know the stripe aligned start
6339 		 */
6340 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6341 			unsigned long full_stripe_len = stripe_len * data_stripes;
6342 			raid56_full_stripe_start = offset;
6343 
6344 			/*
6345 			 * Allow a write of a full stripe, but make sure we
6346 			 * don't allow straddling of stripes
6347 			 */
6348 			raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
6349 					full_stripe_len);
6350 			raid56_full_stripe_start *= full_stripe_len;
6351 
6352 			/*
6353 			 * For writes to RAID[56], allow a full stripeset across
6354 			 * all disks. For other RAID types and for RAID[56]
6355 			 * reads, just allow a single stripe (on a single disk).
6356 			 */
6357 			if (op == BTRFS_MAP_WRITE) {
6358 				max_len = stripe_len * data_stripes -
6359 					  (offset - raid56_full_stripe_start);
6360 			}
6361 		}
6362 		len = min_t(u64, em->len - offset, max_len);
6363 	} else {
6364 		len = em->len - offset;
6365 	}
6366 
6367 	io_geom->len = len;
6368 	io_geom->offset = offset;
6369 	io_geom->stripe_len = stripe_len;
6370 	io_geom->stripe_nr = stripe_nr;
6371 	io_geom->stripe_offset = stripe_offset;
6372 	io_geom->raid56_stripe_offset = raid56_full_stripe_start;
6373 
6374 	return 0;
6375 }
6376 
6377 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
6378 			     enum btrfs_map_op op,
6379 			     u64 logical, u64 *length,
6380 			     struct btrfs_io_context **bioc_ret,
6381 			     int mirror_num, int need_raid_map)
6382 {
6383 	struct extent_map *em;
6384 	struct map_lookup *map;
6385 	u64 stripe_offset;
6386 	u64 stripe_nr;
6387 	u64 stripe_len;
6388 	u32 stripe_index;
6389 	int data_stripes;
6390 	int i;
6391 	int ret = 0;
6392 	int num_stripes;
6393 	int max_errors = 0;
6394 	int tgtdev_indexes = 0;
6395 	struct btrfs_io_context *bioc = NULL;
6396 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6397 	int dev_replace_is_ongoing = 0;
6398 	int num_alloc_stripes;
6399 	int patch_the_first_stripe_for_dev_replace = 0;
6400 	u64 physical_to_patch_in_first_stripe = 0;
6401 	u64 raid56_full_stripe_start = (u64)-1;
6402 	struct btrfs_io_geometry geom;
6403 
6404 	ASSERT(bioc_ret);
6405 	ASSERT(op != BTRFS_MAP_DISCARD);
6406 
6407 	em = btrfs_get_chunk_map(fs_info, logical, *length);
6408 	ASSERT(!IS_ERR(em));
6409 
6410 	ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom);
6411 	if (ret < 0)
6412 		return ret;
6413 
6414 	map = em->map_lookup;
6415 
6416 	*length = geom.len;
6417 	stripe_len = geom.stripe_len;
6418 	stripe_nr = geom.stripe_nr;
6419 	stripe_offset = geom.stripe_offset;
6420 	raid56_full_stripe_start = geom.raid56_stripe_offset;
6421 	data_stripes = nr_data_stripes(map);
6422 
6423 	down_read(&dev_replace->rwsem);
6424 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6425 	/*
6426 	 * Hold the semaphore for read during the whole operation, write is
6427 	 * requested at commit time but must wait.
6428 	 */
6429 	if (!dev_replace_is_ongoing)
6430 		up_read(&dev_replace->rwsem);
6431 
6432 	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6433 	    !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6434 		ret = get_extra_mirror_from_replace(fs_info, logical, *length,
6435 						    dev_replace->srcdev->devid,
6436 						    &mirror_num,
6437 					    &physical_to_patch_in_first_stripe);
6438 		if (ret)
6439 			goto out;
6440 		else
6441 			patch_the_first_stripe_for_dev_replace = 1;
6442 	} else if (mirror_num > map->num_stripes) {
6443 		mirror_num = 0;
6444 	}
6445 
6446 	num_stripes = 1;
6447 	stripe_index = 0;
6448 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6449 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6450 				&stripe_index);
6451 		if (!need_full_stripe(op))
6452 			mirror_num = 1;
6453 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
6454 		if (need_full_stripe(op))
6455 			num_stripes = map->num_stripes;
6456 		else if (mirror_num)
6457 			stripe_index = mirror_num - 1;
6458 		else {
6459 			stripe_index = find_live_mirror(fs_info, map, 0,
6460 					    dev_replace_is_ongoing);
6461 			mirror_num = stripe_index + 1;
6462 		}
6463 
6464 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6465 		if (need_full_stripe(op)) {
6466 			num_stripes = map->num_stripes;
6467 		} else if (mirror_num) {
6468 			stripe_index = mirror_num - 1;
6469 		} else {
6470 			mirror_num = 1;
6471 		}
6472 
6473 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6474 		u32 factor = map->num_stripes / map->sub_stripes;
6475 
6476 		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6477 		stripe_index *= map->sub_stripes;
6478 
6479 		if (need_full_stripe(op))
6480 			num_stripes = map->sub_stripes;
6481 		else if (mirror_num)
6482 			stripe_index += mirror_num - 1;
6483 		else {
6484 			int old_stripe_index = stripe_index;
6485 			stripe_index = find_live_mirror(fs_info, map,
6486 					      stripe_index,
6487 					      dev_replace_is_ongoing);
6488 			mirror_num = stripe_index - old_stripe_index + 1;
6489 		}
6490 
6491 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6492 		if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
6493 			/* push stripe_nr back to the start of the full stripe */
6494 			stripe_nr = div64_u64(raid56_full_stripe_start,
6495 					stripe_len * data_stripes);
6496 
6497 			/* RAID[56] write or recovery. Return all stripes */
6498 			num_stripes = map->num_stripes;
6499 			max_errors = nr_parity_stripes(map);
6500 
6501 			*length = map->stripe_len;
6502 			stripe_index = 0;
6503 			stripe_offset = 0;
6504 		} else {
6505 			/*
6506 			 * Mirror #0 or #1 means the original data block.
6507 			 * Mirror #2 is RAID5 parity block.
6508 			 * Mirror #3 is RAID6 Q block.
6509 			 */
6510 			stripe_nr = div_u64_rem(stripe_nr,
6511 					data_stripes, &stripe_index);
6512 			if (mirror_num > 1)
6513 				stripe_index = data_stripes + mirror_num - 2;
6514 
6515 			/* We distribute the parity blocks across stripes */
6516 			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
6517 					&stripe_index);
6518 			if (!need_full_stripe(op) && mirror_num <= 1)
6519 				mirror_num = 1;
6520 		}
6521 	} else {
6522 		/*
6523 		 * after this, stripe_nr is the number of stripes on this
6524 		 * device we have to walk to find the data, and stripe_index is
6525 		 * the number of our device in the stripe array
6526 		 */
6527 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6528 				&stripe_index);
6529 		mirror_num = stripe_index + 1;
6530 	}
6531 	if (stripe_index >= map->num_stripes) {
6532 		btrfs_crit(fs_info,
6533 			   "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6534 			   stripe_index, map->num_stripes);
6535 		ret = -EINVAL;
6536 		goto out;
6537 	}
6538 
6539 	num_alloc_stripes = num_stripes;
6540 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6541 		if (op == BTRFS_MAP_WRITE)
6542 			num_alloc_stripes <<= 1;
6543 		if (op == BTRFS_MAP_GET_READ_MIRRORS)
6544 			num_alloc_stripes++;
6545 		tgtdev_indexes = num_stripes;
6546 	}
6547 
6548 	bioc = alloc_btrfs_io_context(num_alloc_stripes, tgtdev_indexes);
6549 	if (!bioc) {
6550 		ret = -ENOMEM;
6551 		goto out;
6552 	}
6553 
6554 	for (i = 0; i < num_stripes; i++) {
6555 		bioc->stripes[i].physical = map->stripes[stripe_index].physical +
6556 			stripe_offset + stripe_nr * map->stripe_len;
6557 		bioc->stripes[i].dev = map->stripes[stripe_index].dev;
6558 		stripe_index++;
6559 	}
6560 
6561 	/* Build raid_map */
6562 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
6563 	    (need_full_stripe(op) || mirror_num > 1)) {
6564 		u64 tmp;
6565 		unsigned rot;
6566 
6567 		/* Work out the disk rotation on this stripe-set */
6568 		div_u64_rem(stripe_nr, num_stripes, &rot);
6569 
6570 		/* Fill in the logical address of each stripe */
6571 		tmp = stripe_nr * data_stripes;
6572 		for (i = 0; i < data_stripes; i++)
6573 			bioc->raid_map[(i + rot) % num_stripes] =
6574 				em->start + (tmp + i) * map->stripe_len;
6575 
6576 		bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE;
6577 		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6578 			bioc->raid_map[(i + rot + 1) % num_stripes] =
6579 				RAID6_Q_STRIPE;
6580 
6581 		sort_parity_stripes(bioc, num_stripes);
6582 	}
6583 
6584 	if (need_full_stripe(op))
6585 		max_errors = btrfs_chunk_max_errors(map);
6586 
6587 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6588 	    need_full_stripe(op)) {
6589 		handle_ops_on_dev_replace(op, &bioc, dev_replace, logical,
6590 					  &num_stripes, &max_errors);
6591 	}
6592 
6593 	*bioc_ret = bioc;
6594 	bioc->map_type = map->type;
6595 	bioc->num_stripes = num_stripes;
6596 	bioc->max_errors = max_errors;
6597 	bioc->mirror_num = mirror_num;
6598 
6599 	/*
6600 	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
6601 	 * mirror_num == num_stripes + 1 && dev_replace target drive is
6602 	 * available as a mirror
6603 	 */
6604 	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
6605 		WARN_ON(num_stripes > 1);
6606 		bioc->stripes[0].dev = dev_replace->tgtdev;
6607 		bioc->stripes[0].physical = physical_to_patch_in_first_stripe;
6608 		bioc->mirror_num = map->num_stripes + 1;
6609 	}
6610 out:
6611 	if (dev_replace_is_ongoing) {
6612 		lockdep_assert_held(&dev_replace->rwsem);
6613 		/* Unlock and let waiting writers proceed */
6614 		up_read(&dev_replace->rwsem);
6615 	}
6616 	free_extent_map(em);
6617 	return ret;
6618 }
6619 
6620 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6621 		      u64 logical, u64 *length,
6622 		      struct btrfs_io_context **bioc_ret, int mirror_num)
6623 {
6624 	if (op == BTRFS_MAP_DISCARD)
6625 		return __btrfs_map_block_for_discard(fs_info, logical,
6626 						     length, bioc_ret);
6627 
6628 	return __btrfs_map_block(fs_info, op, logical, length, bioc_ret,
6629 				 mirror_num, 0);
6630 }
6631 
6632 /* For Scrub/replace */
6633 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6634 		     u64 logical, u64 *length,
6635 		     struct btrfs_io_context **bioc_ret)
6636 {
6637 	return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 0, 1);
6638 }
6639 
6640 static inline void btrfs_end_bioc(struct btrfs_io_context *bioc, struct bio *bio)
6641 {
6642 	bio->bi_private = bioc->private;
6643 	bio->bi_end_io = bioc->end_io;
6644 	bio_endio(bio);
6645 
6646 	btrfs_put_bioc(bioc);
6647 }
6648 
6649 static void btrfs_end_bio(struct bio *bio)
6650 {
6651 	struct btrfs_io_context *bioc = bio->bi_private;
6652 	int is_orig_bio = 0;
6653 
6654 	if (bio->bi_status) {
6655 		atomic_inc(&bioc->error);
6656 		if (bio->bi_status == BLK_STS_IOERR ||
6657 		    bio->bi_status == BLK_STS_TARGET) {
6658 			struct btrfs_device *dev = btrfs_io_bio(bio)->device;
6659 
6660 			ASSERT(dev->bdev);
6661 			if (btrfs_op(bio) == BTRFS_MAP_WRITE)
6662 				btrfs_dev_stat_inc_and_print(dev,
6663 						BTRFS_DEV_STAT_WRITE_ERRS);
6664 			else if (!(bio->bi_opf & REQ_RAHEAD))
6665 				btrfs_dev_stat_inc_and_print(dev,
6666 						BTRFS_DEV_STAT_READ_ERRS);
6667 			if (bio->bi_opf & REQ_PREFLUSH)
6668 				btrfs_dev_stat_inc_and_print(dev,
6669 						BTRFS_DEV_STAT_FLUSH_ERRS);
6670 		}
6671 	}
6672 
6673 	if (bio == bioc->orig_bio)
6674 		is_orig_bio = 1;
6675 
6676 	btrfs_bio_counter_dec(bioc->fs_info);
6677 
6678 	if (atomic_dec_and_test(&bioc->stripes_pending)) {
6679 		if (!is_orig_bio) {
6680 			bio_put(bio);
6681 			bio = bioc->orig_bio;
6682 		}
6683 
6684 		btrfs_io_bio(bio)->mirror_num = bioc->mirror_num;
6685 		/* only send an error to the higher layers if it is
6686 		 * beyond the tolerance of the btrfs bio
6687 		 */
6688 		if (atomic_read(&bioc->error) > bioc->max_errors) {
6689 			bio->bi_status = BLK_STS_IOERR;
6690 		} else {
6691 			/*
6692 			 * this bio is actually up to date, we didn't
6693 			 * go over the max number of errors
6694 			 */
6695 			bio->bi_status = BLK_STS_OK;
6696 		}
6697 
6698 		btrfs_end_bioc(bioc, bio);
6699 	} else if (!is_orig_bio) {
6700 		bio_put(bio);
6701 	}
6702 }
6703 
6704 static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio,
6705 			      u64 physical, struct btrfs_device *dev)
6706 {
6707 	struct btrfs_fs_info *fs_info = bioc->fs_info;
6708 
6709 	bio->bi_private = bioc;
6710 	btrfs_io_bio(bio)->device = dev;
6711 	bio->bi_end_io = btrfs_end_bio;
6712 	bio->bi_iter.bi_sector = physical >> 9;
6713 	/*
6714 	 * For zone append writing, bi_sector must point the beginning of the
6715 	 * zone
6716 	 */
6717 	if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
6718 		if (btrfs_dev_is_sequential(dev, physical)) {
6719 			u64 zone_start = round_down(physical, fs_info->zone_size);
6720 
6721 			bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT;
6722 		} else {
6723 			bio->bi_opf &= ~REQ_OP_ZONE_APPEND;
6724 			bio->bi_opf |= REQ_OP_WRITE;
6725 		}
6726 	}
6727 	btrfs_debug_in_rcu(fs_info,
6728 	"btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6729 		bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector,
6730 		(unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name),
6731 		dev->devid, bio->bi_iter.bi_size);
6732 	bio_set_dev(bio, dev->bdev);
6733 
6734 	btrfs_bio_counter_inc_noblocked(fs_info);
6735 
6736 	btrfsic_submit_bio(bio);
6737 }
6738 
6739 static void bioc_error(struct btrfs_io_context *bioc, struct bio *bio, u64 logical)
6740 {
6741 	atomic_inc(&bioc->error);
6742 	if (atomic_dec_and_test(&bioc->stripes_pending)) {
6743 		/* Should be the original bio. */
6744 		WARN_ON(bio != bioc->orig_bio);
6745 
6746 		btrfs_io_bio(bio)->mirror_num = bioc->mirror_num;
6747 		bio->bi_iter.bi_sector = logical >> 9;
6748 		if (atomic_read(&bioc->error) > bioc->max_errors)
6749 			bio->bi_status = BLK_STS_IOERR;
6750 		else
6751 			bio->bi_status = BLK_STS_OK;
6752 		btrfs_end_bioc(bioc, bio);
6753 	}
6754 }
6755 
6756 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6757 			   int mirror_num)
6758 {
6759 	struct btrfs_device *dev;
6760 	struct bio *first_bio = bio;
6761 	u64 logical = bio->bi_iter.bi_sector << 9;
6762 	u64 length = 0;
6763 	u64 map_length;
6764 	int ret;
6765 	int dev_nr;
6766 	int total_devs;
6767 	struct btrfs_io_context *bioc = NULL;
6768 
6769 	length = bio->bi_iter.bi_size;
6770 	map_length = length;
6771 
6772 	btrfs_bio_counter_inc_blocked(fs_info);
6773 	ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6774 				&map_length, &bioc, mirror_num, 1);
6775 	if (ret) {
6776 		btrfs_bio_counter_dec(fs_info);
6777 		return errno_to_blk_status(ret);
6778 	}
6779 
6780 	total_devs = bioc->num_stripes;
6781 	bioc->orig_bio = first_bio;
6782 	bioc->private = first_bio->bi_private;
6783 	bioc->end_io = first_bio->bi_end_io;
6784 	bioc->fs_info = fs_info;
6785 	atomic_set(&bioc->stripes_pending, bioc->num_stripes);
6786 
6787 	if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6788 	    ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) {
6789 		/* In this case, map_length has been set to the length of
6790 		   a single stripe; not the whole write */
6791 		if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
6792 			ret = raid56_parity_write(fs_info, bio, bioc,
6793 						  map_length);
6794 		} else {
6795 			ret = raid56_parity_recover(fs_info, bio, bioc,
6796 						    map_length, mirror_num, 1);
6797 		}
6798 
6799 		btrfs_bio_counter_dec(fs_info);
6800 		return errno_to_blk_status(ret);
6801 	}
6802 
6803 	if (map_length < length) {
6804 		btrfs_crit(fs_info,
6805 			   "mapping failed logical %llu bio len %llu len %llu",
6806 			   logical, length, map_length);
6807 		BUG();
6808 	}
6809 
6810 	for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6811 		dev = bioc->stripes[dev_nr].dev;
6812 		if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
6813 						   &dev->dev_state) ||
6814 		    (btrfs_op(first_bio) == BTRFS_MAP_WRITE &&
6815 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6816 			bioc_error(bioc, first_bio, logical);
6817 			continue;
6818 		}
6819 
6820 		if (dev_nr < total_devs - 1)
6821 			bio = btrfs_bio_clone(first_bio);
6822 		else
6823 			bio = first_bio;
6824 
6825 		submit_stripe_bio(bioc, bio, bioc->stripes[dev_nr].physical, dev);
6826 	}
6827 	btrfs_bio_counter_dec(fs_info);
6828 	return BLK_STS_OK;
6829 }
6830 
6831 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args,
6832 				      const struct btrfs_fs_devices *fs_devices)
6833 {
6834 	if (args->fsid == NULL)
6835 		return true;
6836 	if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0)
6837 		return true;
6838 	return false;
6839 }
6840 
6841 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args,
6842 				  const struct btrfs_device *device)
6843 {
6844 	if (args->missing) {
6845 		if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) &&
6846 		    !device->bdev)
6847 			return true;
6848 		return false;
6849 	}
6850 
6851 	if (device->devid != args->devid)
6852 		return false;
6853 	if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0)
6854 		return false;
6855 	return true;
6856 }
6857 
6858 /*
6859  * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6860  * return NULL.
6861  *
6862  * If devid and uuid are both specified, the match must be exact, otherwise
6863  * only devid is used.
6864  */
6865 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices,
6866 				       const struct btrfs_dev_lookup_args *args)
6867 {
6868 	struct btrfs_device *device;
6869 	struct btrfs_fs_devices *seed_devs;
6870 
6871 	if (dev_args_match_fs_devices(args, fs_devices)) {
6872 		list_for_each_entry(device, &fs_devices->devices, dev_list) {
6873 			if (dev_args_match_device(args, device))
6874 				return device;
6875 		}
6876 	}
6877 
6878 	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
6879 		if (!dev_args_match_fs_devices(args, seed_devs))
6880 			continue;
6881 		list_for_each_entry(device, &seed_devs->devices, dev_list) {
6882 			if (dev_args_match_device(args, device))
6883 				return device;
6884 		}
6885 	}
6886 
6887 	return NULL;
6888 }
6889 
6890 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6891 					    u64 devid, u8 *dev_uuid)
6892 {
6893 	struct btrfs_device *device;
6894 	unsigned int nofs_flag;
6895 
6896 	/*
6897 	 * We call this under the chunk_mutex, so we want to use NOFS for this
6898 	 * allocation, however we don't want to change btrfs_alloc_device() to
6899 	 * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6900 	 * places.
6901 	 */
6902 	nofs_flag = memalloc_nofs_save();
6903 	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6904 	memalloc_nofs_restore(nofs_flag);
6905 	if (IS_ERR(device))
6906 		return device;
6907 
6908 	list_add(&device->dev_list, &fs_devices->devices);
6909 	device->fs_devices = fs_devices;
6910 	fs_devices->num_devices++;
6911 
6912 	set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6913 	fs_devices->missing_devices++;
6914 
6915 	return device;
6916 }
6917 
6918 /**
6919  * btrfs_alloc_device - allocate struct btrfs_device
6920  * @fs_info:	used only for generating a new devid, can be NULL if
6921  *		devid is provided (i.e. @devid != NULL).
6922  * @devid:	a pointer to devid for this device.  If NULL a new devid
6923  *		is generated.
6924  * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
6925  *		is generated.
6926  *
6927  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6928  * on error.  Returned struct is not linked onto any lists and must be
6929  * destroyed with btrfs_free_device.
6930  */
6931 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6932 					const u64 *devid,
6933 					const u8 *uuid)
6934 {
6935 	struct btrfs_device *dev;
6936 	u64 tmp;
6937 
6938 	if (WARN_ON(!devid && !fs_info))
6939 		return ERR_PTR(-EINVAL);
6940 
6941 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
6942 	if (!dev)
6943 		return ERR_PTR(-ENOMEM);
6944 
6945 	/*
6946 	 * Preallocate a bio that's always going to be used for flushing device
6947 	 * barriers and matches the device lifespan
6948 	 */
6949 	dev->flush_bio = bio_kmalloc(GFP_KERNEL, 0);
6950 	if (!dev->flush_bio) {
6951 		kfree(dev);
6952 		return ERR_PTR(-ENOMEM);
6953 	}
6954 
6955 	INIT_LIST_HEAD(&dev->dev_list);
6956 	INIT_LIST_HEAD(&dev->dev_alloc_list);
6957 	INIT_LIST_HEAD(&dev->post_commit_list);
6958 
6959 	atomic_set(&dev->reada_in_flight, 0);
6960 	atomic_set(&dev->dev_stats_ccnt, 0);
6961 	btrfs_device_data_ordered_init(dev);
6962 	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
6963 	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
6964 	extent_io_tree_init(fs_info, &dev->alloc_state,
6965 			    IO_TREE_DEVICE_ALLOC_STATE, NULL);
6966 
6967 	if (devid)
6968 		tmp = *devid;
6969 	else {
6970 		int ret;
6971 
6972 		ret = find_next_devid(fs_info, &tmp);
6973 		if (ret) {
6974 			btrfs_free_device(dev);
6975 			return ERR_PTR(ret);
6976 		}
6977 	}
6978 	dev->devid = tmp;
6979 
6980 	if (uuid)
6981 		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6982 	else
6983 		generate_random_uuid(dev->uuid);
6984 
6985 	return dev;
6986 }
6987 
6988 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6989 					u64 devid, u8 *uuid, bool error)
6990 {
6991 	if (error)
6992 		btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6993 			      devid, uuid);
6994 	else
6995 		btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6996 			      devid, uuid);
6997 }
6998 
6999 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
7000 {
7001 	const int data_stripes = calc_data_stripes(type, num_stripes);
7002 
7003 	return div_u64(chunk_len, data_stripes);
7004 }
7005 
7006 #if BITS_PER_LONG == 32
7007 /*
7008  * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE
7009  * can't be accessed on 32bit systems.
7010  *
7011  * This function do mount time check to reject the fs if it already has
7012  * metadata chunk beyond that limit.
7013  */
7014 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
7015 				  u64 logical, u64 length, u64 type)
7016 {
7017 	if (!(type & BTRFS_BLOCK_GROUP_METADATA))
7018 		return 0;
7019 
7020 	if (logical + length < MAX_LFS_FILESIZE)
7021 		return 0;
7022 
7023 	btrfs_err_32bit_limit(fs_info);
7024 	return -EOVERFLOW;
7025 }
7026 
7027 /*
7028  * This is to give early warning for any metadata chunk reaching
7029  * BTRFS_32BIT_EARLY_WARN_THRESHOLD.
7030  * Although we can still access the metadata, it's not going to be possible
7031  * once the limit is reached.
7032  */
7033 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
7034 				  u64 logical, u64 length, u64 type)
7035 {
7036 	if (!(type & BTRFS_BLOCK_GROUP_METADATA))
7037 		return;
7038 
7039 	if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD)
7040 		return;
7041 
7042 	btrfs_warn_32bit_limit(fs_info);
7043 }
7044 #endif
7045 
7046 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
7047 			  struct btrfs_chunk *chunk)
7048 {
7049 	BTRFS_DEV_LOOKUP_ARGS(args);
7050 	struct btrfs_fs_info *fs_info = leaf->fs_info;
7051 	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7052 	struct map_lookup *map;
7053 	struct extent_map *em;
7054 	u64 logical;
7055 	u64 length;
7056 	u64 devid;
7057 	u64 type;
7058 	u8 uuid[BTRFS_UUID_SIZE];
7059 	int num_stripes;
7060 	int ret;
7061 	int i;
7062 
7063 	logical = key->offset;
7064 	length = btrfs_chunk_length(leaf, chunk);
7065 	type = btrfs_chunk_type(leaf, chunk);
7066 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
7067 
7068 #if BITS_PER_LONG == 32
7069 	ret = check_32bit_meta_chunk(fs_info, logical, length, type);
7070 	if (ret < 0)
7071 		return ret;
7072 	warn_32bit_meta_chunk(fs_info, logical, length, type);
7073 #endif
7074 
7075 	/*
7076 	 * Only need to verify chunk item if we're reading from sys chunk array,
7077 	 * as chunk item in tree block is already verified by tree-checker.
7078 	 */
7079 	if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
7080 		ret = btrfs_check_chunk_valid(leaf, chunk, logical);
7081 		if (ret)
7082 			return ret;
7083 	}
7084 
7085 	read_lock(&map_tree->lock);
7086 	em = lookup_extent_mapping(map_tree, logical, 1);
7087 	read_unlock(&map_tree->lock);
7088 
7089 	/* already mapped? */
7090 	if (em && em->start <= logical && em->start + em->len > logical) {
7091 		free_extent_map(em);
7092 		return 0;
7093 	} else if (em) {
7094 		free_extent_map(em);
7095 	}
7096 
7097 	em = alloc_extent_map();
7098 	if (!em)
7099 		return -ENOMEM;
7100 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
7101 	if (!map) {
7102 		free_extent_map(em);
7103 		return -ENOMEM;
7104 	}
7105 
7106 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
7107 	em->map_lookup = map;
7108 	em->start = logical;
7109 	em->len = length;
7110 	em->orig_start = 0;
7111 	em->block_start = 0;
7112 	em->block_len = em->len;
7113 
7114 	map->num_stripes = num_stripes;
7115 	map->io_width = btrfs_chunk_io_width(leaf, chunk);
7116 	map->io_align = btrfs_chunk_io_align(leaf, chunk);
7117 	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
7118 	map->type = type;
7119 	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
7120 	map->verified_stripes = 0;
7121 	em->orig_block_len = calc_stripe_length(type, em->len,
7122 						map->num_stripes);
7123 	for (i = 0; i < num_stripes; i++) {
7124 		map->stripes[i].physical =
7125 			btrfs_stripe_offset_nr(leaf, chunk, i);
7126 		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
7127 		args.devid = devid;
7128 		read_extent_buffer(leaf, uuid, (unsigned long)
7129 				   btrfs_stripe_dev_uuid_nr(chunk, i),
7130 				   BTRFS_UUID_SIZE);
7131 		args.uuid = uuid;
7132 		map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args);
7133 		if (!map->stripes[i].dev &&
7134 		    !btrfs_test_opt(fs_info, DEGRADED)) {
7135 			free_extent_map(em);
7136 			btrfs_report_missing_device(fs_info, devid, uuid, true);
7137 			return -ENOENT;
7138 		}
7139 		if (!map->stripes[i].dev) {
7140 			map->stripes[i].dev =
7141 				add_missing_dev(fs_info->fs_devices, devid,
7142 						uuid);
7143 			if (IS_ERR(map->stripes[i].dev)) {
7144 				free_extent_map(em);
7145 				btrfs_err(fs_info,
7146 					"failed to init missing dev %llu: %ld",
7147 					devid, PTR_ERR(map->stripes[i].dev));
7148 				return PTR_ERR(map->stripes[i].dev);
7149 			}
7150 			btrfs_report_missing_device(fs_info, devid, uuid, false);
7151 		}
7152 		set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
7153 				&(map->stripes[i].dev->dev_state));
7154 
7155 	}
7156 
7157 	write_lock(&map_tree->lock);
7158 	ret = add_extent_mapping(map_tree, em, 0);
7159 	write_unlock(&map_tree->lock);
7160 	if (ret < 0) {
7161 		btrfs_err(fs_info,
7162 			  "failed to add chunk map, start=%llu len=%llu: %d",
7163 			  em->start, em->len, ret);
7164 	}
7165 	free_extent_map(em);
7166 
7167 	return ret;
7168 }
7169 
7170 static void fill_device_from_item(struct extent_buffer *leaf,
7171 				 struct btrfs_dev_item *dev_item,
7172 				 struct btrfs_device *device)
7173 {
7174 	unsigned long ptr;
7175 
7176 	device->devid = btrfs_device_id(leaf, dev_item);
7177 	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
7178 	device->total_bytes = device->disk_total_bytes;
7179 	device->commit_total_bytes = device->disk_total_bytes;
7180 	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
7181 	device->commit_bytes_used = device->bytes_used;
7182 	device->type = btrfs_device_type(leaf, dev_item);
7183 	device->io_align = btrfs_device_io_align(leaf, dev_item);
7184 	device->io_width = btrfs_device_io_width(leaf, dev_item);
7185 	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
7186 	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
7187 	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
7188 
7189 	ptr = btrfs_device_uuid(dev_item);
7190 	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
7191 }
7192 
7193 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
7194 						  u8 *fsid)
7195 {
7196 	struct btrfs_fs_devices *fs_devices;
7197 	int ret;
7198 
7199 	lockdep_assert_held(&uuid_mutex);
7200 	ASSERT(fsid);
7201 
7202 	/* This will match only for multi-device seed fs */
7203 	list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list)
7204 		if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
7205 			return fs_devices;
7206 
7207 
7208 	fs_devices = find_fsid(fsid, NULL);
7209 	if (!fs_devices) {
7210 		if (!btrfs_test_opt(fs_info, DEGRADED))
7211 			return ERR_PTR(-ENOENT);
7212 
7213 		fs_devices = alloc_fs_devices(fsid, NULL);
7214 		if (IS_ERR(fs_devices))
7215 			return fs_devices;
7216 
7217 		fs_devices->seeding = true;
7218 		fs_devices->opened = 1;
7219 		return fs_devices;
7220 	}
7221 
7222 	/*
7223 	 * Upon first call for a seed fs fsid, just create a private copy of the
7224 	 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
7225 	 */
7226 	fs_devices = clone_fs_devices(fs_devices);
7227 	if (IS_ERR(fs_devices))
7228 		return fs_devices;
7229 
7230 	ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
7231 	if (ret) {
7232 		free_fs_devices(fs_devices);
7233 		return ERR_PTR(ret);
7234 	}
7235 
7236 	if (!fs_devices->seeding) {
7237 		close_fs_devices(fs_devices);
7238 		free_fs_devices(fs_devices);
7239 		return ERR_PTR(-EINVAL);
7240 	}
7241 
7242 	list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
7243 
7244 	return fs_devices;
7245 }
7246 
7247 static int read_one_dev(struct extent_buffer *leaf,
7248 			struct btrfs_dev_item *dev_item)
7249 {
7250 	BTRFS_DEV_LOOKUP_ARGS(args);
7251 	struct btrfs_fs_info *fs_info = leaf->fs_info;
7252 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7253 	struct btrfs_device *device;
7254 	u64 devid;
7255 	int ret;
7256 	u8 fs_uuid[BTRFS_FSID_SIZE];
7257 	u8 dev_uuid[BTRFS_UUID_SIZE];
7258 
7259 	devid = args.devid = btrfs_device_id(leaf, dev_item);
7260 	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
7261 			   BTRFS_UUID_SIZE);
7262 	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
7263 			   BTRFS_FSID_SIZE);
7264 	args.uuid = dev_uuid;
7265 	args.fsid = fs_uuid;
7266 
7267 	if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
7268 		fs_devices = open_seed_devices(fs_info, fs_uuid);
7269 		if (IS_ERR(fs_devices))
7270 			return PTR_ERR(fs_devices);
7271 	}
7272 
7273 	device = btrfs_find_device(fs_info->fs_devices, &args);
7274 	if (!device) {
7275 		if (!btrfs_test_opt(fs_info, DEGRADED)) {
7276 			btrfs_report_missing_device(fs_info, devid,
7277 							dev_uuid, true);
7278 			return -ENOENT;
7279 		}
7280 
7281 		device = add_missing_dev(fs_devices, devid, dev_uuid);
7282 		if (IS_ERR(device)) {
7283 			btrfs_err(fs_info,
7284 				"failed to add missing dev %llu: %ld",
7285 				devid, PTR_ERR(device));
7286 			return PTR_ERR(device);
7287 		}
7288 		btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
7289 	} else {
7290 		if (!device->bdev) {
7291 			if (!btrfs_test_opt(fs_info, DEGRADED)) {
7292 				btrfs_report_missing_device(fs_info,
7293 						devid, dev_uuid, true);
7294 				return -ENOENT;
7295 			}
7296 			btrfs_report_missing_device(fs_info, devid,
7297 							dev_uuid, false);
7298 		}
7299 
7300 		if (!device->bdev &&
7301 		    !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
7302 			/*
7303 			 * this happens when a device that was properly setup
7304 			 * in the device info lists suddenly goes bad.
7305 			 * device->bdev is NULL, and so we have to set
7306 			 * device->missing to one here
7307 			 */
7308 			device->fs_devices->missing_devices++;
7309 			set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
7310 		}
7311 
7312 		/* Move the device to its own fs_devices */
7313 		if (device->fs_devices != fs_devices) {
7314 			ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
7315 							&device->dev_state));
7316 
7317 			list_move(&device->dev_list, &fs_devices->devices);
7318 			device->fs_devices->num_devices--;
7319 			fs_devices->num_devices++;
7320 
7321 			device->fs_devices->missing_devices--;
7322 			fs_devices->missing_devices++;
7323 
7324 			device->fs_devices = fs_devices;
7325 		}
7326 	}
7327 
7328 	if (device->fs_devices != fs_info->fs_devices) {
7329 		BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
7330 		if (device->generation !=
7331 		    btrfs_device_generation(leaf, dev_item))
7332 			return -EINVAL;
7333 	}
7334 
7335 	fill_device_from_item(leaf, dev_item, device);
7336 	if (device->bdev) {
7337 		u64 max_total_bytes = i_size_read(device->bdev->bd_inode);
7338 
7339 		if (device->total_bytes > max_total_bytes) {
7340 			btrfs_err(fs_info,
7341 			"device total_bytes should be at most %llu but found %llu",
7342 				  max_total_bytes, device->total_bytes);
7343 			return -EINVAL;
7344 		}
7345 	}
7346 	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
7347 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
7348 	   !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
7349 		device->fs_devices->total_rw_bytes += device->total_bytes;
7350 		atomic64_add(device->total_bytes - device->bytes_used,
7351 				&fs_info->free_chunk_space);
7352 	}
7353 	ret = 0;
7354 	return ret;
7355 }
7356 
7357 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
7358 {
7359 	struct btrfs_root *root = fs_info->tree_root;
7360 	struct btrfs_super_block *super_copy = fs_info->super_copy;
7361 	struct extent_buffer *sb;
7362 	struct btrfs_disk_key *disk_key;
7363 	struct btrfs_chunk *chunk;
7364 	u8 *array_ptr;
7365 	unsigned long sb_array_offset;
7366 	int ret = 0;
7367 	u32 num_stripes;
7368 	u32 array_size;
7369 	u32 len = 0;
7370 	u32 cur_offset;
7371 	u64 type;
7372 	struct btrfs_key key;
7373 
7374 	ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
7375 	/*
7376 	 * This will create extent buffer of nodesize, superblock size is
7377 	 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
7378 	 * overallocate but we can keep it as-is, only the first page is used.
7379 	 */
7380 	sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET,
7381 					  root->root_key.objectid, 0);
7382 	if (IS_ERR(sb))
7383 		return PTR_ERR(sb);
7384 	set_extent_buffer_uptodate(sb);
7385 	/*
7386 	 * The sb extent buffer is artificial and just used to read the system array.
7387 	 * set_extent_buffer_uptodate() call does not properly mark all it's
7388 	 * pages up-to-date when the page is larger: extent does not cover the
7389 	 * whole page and consequently check_page_uptodate does not find all
7390 	 * the page's extents up-to-date (the hole beyond sb),
7391 	 * write_extent_buffer then triggers a WARN_ON.
7392 	 *
7393 	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
7394 	 * but sb spans only this function. Add an explicit SetPageUptodate call
7395 	 * to silence the warning eg. on PowerPC 64.
7396 	 */
7397 	if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
7398 		SetPageUptodate(sb->pages[0]);
7399 
7400 	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
7401 	array_size = btrfs_super_sys_array_size(super_copy);
7402 
7403 	array_ptr = super_copy->sys_chunk_array;
7404 	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
7405 	cur_offset = 0;
7406 
7407 	while (cur_offset < array_size) {
7408 		disk_key = (struct btrfs_disk_key *)array_ptr;
7409 		len = sizeof(*disk_key);
7410 		if (cur_offset + len > array_size)
7411 			goto out_short_read;
7412 
7413 		btrfs_disk_key_to_cpu(&key, disk_key);
7414 
7415 		array_ptr += len;
7416 		sb_array_offset += len;
7417 		cur_offset += len;
7418 
7419 		if (key.type != BTRFS_CHUNK_ITEM_KEY) {
7420 			btrfs_err(fs_info,
7421 			    "unexpected item type %u in sys_array at offset %u",
7422 				  (u32)key.type, cur_offset);
7423 			ret = -EIO;
7424 			break;
7425 		}
7426 
7427 		chunk = (struct btrfs_chunk *)sb_array_offset;
7428 		/*
7429 		 * At least one btrfs_chunk with one stripe must be present,
7430 		 * exact stripe count check comes afterwards
7431 		 */
7432 		len = btrfs_chunk_item_size(1);
7433 		if (cur_offset + len > array_size)
7434 			goto out_short_read;
7435 
7436 		num_stripes = btrfs_chunk_num_stripes(sb, chunk);
7437 		if (!num_stripes) {
7438 			btrfs_err(fs_info,
7439 			"invalid number of stripes %u in sys_array at offset %u",
7440 				  num_stripes, cur_offset);
7441 			ret = -EIO;
7442 			break;
7443 		}
7444 
7445 		type = btrfs_chunk_type(sb, chunk);
7446 		if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
7447 			btrfs_err(fs_info,
7448 			"invalid chunk type %llu in sys_array at offset %u",
7449 				  type, cur_offset);
7450 			ret = -EIO;
7451 			break;
7452 		}
7453 
7454 		len = btrfs_chunk_item_size(num_stripes);
7455 		if (cur_offset + len > array_size)
7456 			goto out_short_read;
7457 
7458 		ret = read_one_chunk(&key, sb, chunk);
7459 		if (ret)
7460 			break;
7461 
7462 		array_ptr += len;
7463 		sb_array_offset += len;
7464 		cur_offset += len;
7465 	}
7466 	clear_extent_buffer_uptodate(sb);
7467 	free_extent_buffer_stale(sb);
7468 	return ret;
7469 
7470 out_short_read:
7471 	btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
7472 			len, cur_offset);
7473 	clear_extent_buffer_uptodate(sb);
7474 	free_extent_buffer_stale(sb);
7475 	return -EIO;
7476 }
7477 
7478 /*
7479  * Check if all chunks in the fs are OK for read-write degraded mount
7480  *
7481  * If the @failing_dev is specified, it's accounted as missing.
7482  *
7483  * Return true if all chunks meet the minimal RW mount requirements.
7484  * Return false if any chunk doesn't meet the minimal RW mount requirements.
7485  */
7486 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7487 					struct btrfs_device *failing_dev)
7488 {
7489 	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7490 	struct extent_map *em;
7491 	u64 next_start = 0;
7492 	bool ret = true;
7493 
7494 	read_lock(&map_tree->lock);
7495 	em = lookup_extent_mapping(map_tree, 0, (u64)-1);
7496 	read_unlock(&map_tree->lock);
7497 	/* No chunk at all? Return false anyway */
7498 	if (!em) {
7499 		ret = false;
7500 		goto out;
7501 	}
7502 	while (em) {
7503 		struct map_lookup *map;
7504 		int missing = 0;
7505 		int max_tolerated;
7506 		int i;
7507 
7508 		map = em->map_lookup;
7509 		max_tolerated =
7510 			btrfs_get_num_tolerated_disk_barrier_failures(
7511 					map->type);
7512 		for (i = 0; i < map->num_stripes; i++) {
7513 			struct btrfs_device *dev = map->stripes[i].dev;
7514 
7515 			if (!dev || !dev->bdev ||
7516 			    test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7517 			    dev->last_flush_error)
7518 				missing++;
7519 			else if (failing_dev && failing_dev == dev)
7520 				missing++;
7521 		}
7522 		if (missing > max_tolerated) {
7523 			if (!failing_dev)
7524 				btrfs_warn(fs_info,
7525 	"chunk %llu missing %d devices, max tolerance is %d for writable mount",
7526 				   em->start, missing, max_tolerated);
7527 			free_extent_map(em);
7528 			ret = false;
7529 			goto out;
7530 		}
7531 		next_start = extent_map_end(em);
7532 		free_extent_map(em);
7533 
7534 		read_lock(&map_tree->lock);
7535 		em = lookup_extent_mapping(map_tree, next_start,
7536 					   (u64)(-1) - next_start);
7537 		read_unlock(&map_tree->lock);
7538 	}
7539 out:
7540 	return ret;
7541 }
7542 
7543 static void readahead_tree_node_children(struct extent_buffer *node)
7544 {
7545 	int i;
7546 	const int nr_items = btrfs_header_nritems(node);
7547 
7548 	for (i = 0; i < nr_items; i++)
7549 		btrfs_readahead_node_child(node, i);
7550 }
7551 
7552 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7553 {
7554 	struct btrfs_root *root = fs_info->chunk_root;
7555 	struct btrfs_path *path;
7556 	struct extent_buffer *leaf;
7557 	struct btrfs_key key;
7558 	struct btrfs_key found_key;
7559 	int ret;
7560 	int slot;
7561 	u64 total_dev = 0;
7562 	u64 last_ra_node = 0;
7563 
7564 	path = btrfs_alloc_path();
7565 	if (!path)
7566 		return -ENOMEM;
7567 
7568 	/*
7569 	 * uuid_mutex is needed only if we are mounting a sprout FS
7570 	 * otherwise we don't need it.
7571 	 */
7572 	mutex_lock(&uuid_mutex);
7573 
7574 	/*
7575 	 * It is possible for mount and umount to race in such a way that
7576 	 * we execute this code path, but open_fs_devices failed to clear
7577 	 * total_rw_bytes. We certainly want it cleared before reading the
7578 	 * device items, so clear it here.
7579 	 */
7580 	fs_info->fs_devices->total_rw_bytes = 0;
7581 
7582 	/*
7583 	 * Lockdep complains about possible circular locking dependency between
7584 	 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores
7585 	 * used for freeze procection of a fs (struct super_block.s_writers),
7586 	 * which we take when starting a transaction, and extent buffers of the
7587 	 * chunk tree if we call read_one_dev() while holding a lock on an
7588 	 * extent buffer of the chunk tree. Since we are mounting the filesystem
7589 	 * and at this point there can't be any concurrent task modifying the
7590 	 * chunk tree, to keep it simple, just skip locking on the chunk tree.
7591 	 */
7592 	ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
7593 	path->skip_locking = 1;
7594 
7595 	/*
7596 	 * Read all device items, and then all the chunk items. All
7597 	 * device items are found before any chunk item (their object id
7598 	 * is smaller than the lowest possible object id for a chunk
7599 	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7600 	 */
7601 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7602 	key.offset = 0;
7603 	key.type = 0;
7604 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7605 	if (ret < 0)
7606 		goto error;
7607 	while (1) {
7608 		struct extent_buffer *node;
7609 
7610 		leaf = path->nodes[0];
7611 		slot = path->slots[0];
7612 		if (slot >= btrfs_header_nritems(leaf)) {
7613 			ret = btrfs_next_leaf(root, path);
7614 			if (ret == 0)
7615 				continue;
7616 			if (ret < 0)
7617 				goto error;
7618 			break;
7619 		}
7620 		node = path->nodes[1];
7621 		if (node) {
7622 			if (last_ra_node != node->start) {
7623 				readahead_tree_node_children(node);
7624 				last_ra_node = node->start;
7625 			}
7626 		}
7627 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
7628 		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7629 			struct btrfs_dev_item *dev_item;
7630 			dev_item = btrfs_item_ptr(leaf, slot,
7631 						  struct btrfs_dev_item);
7632 			ret = read_one_dev(leaf, dev_item);
7633 			if (ret)
7634 				goto error;
7635 			total_dev++;
7636 		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7637 			struct btrfs_chunk *chunk;
7638 
7639 			/*
7640 			 * We are only called at mount time, so no need to take
7641 			 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings,
7642 			 * we always lock first fs_info->chunk_mutex before
7643 			 * acquiring any locks on the chunk tree. This is a
7644 			 * requirement for chunk allocation, see the comment on
7645 			 * top of btrfs_chunk_alloc() for details.
7646 			 */
7647 			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7648 			ret = read_one_chunk(&found_key, leaf, chunk);
7649 			if (ret)
7650 				goto error;
7651 		}
7652 		path->slots[0]++;
7653 	}
7654 
7655 	/*
7656 	 * After loading chunk tree, we've got all device information,
7657 	 * do another round of validation checks.
7658 	 */
7659 	if (total_dev != fs_info->fs_devices->total_devices) {
7660 		btrfs_warn(fs_info,
7661 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit",
7662 			  btrfs_super_num_devices(fs_info->super_copy),
7663 			  total_dev);
7664 		fs_info->fs_devices->total_devices = total_dev;
7665 		btrfs_set_super_num_devices(fs_info->super_copy, total_dev);
7666 	}
7667 	if (btrfs_super_total_bytes(fs_info->super_copy) <
7668 	    fs_info->fs_devices->total_rw_bytes) {
7669 		btrfs_err(fs_info,
7670 	"super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7671 			  btrfs_super_total_bytes(fs_info->super_copy),
7672 			  fs_info->fs_devices->total_rw_bytes);
7673 		ret = -EINVAL;
7674 		goto error;
7675 	}
7676 	ret = 0;
7677 error:
7678 	mutex_unlock(&uuid_mutex);
7679 
7680 	btrfs_free_path(path);
7681 	return ret;
7682 }
7683 
7684 int btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7685 {
7686 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7687 	struct btrfs_device *device;
7688 	int ret = 0;
7689 
7690 	fs_devices->fs_info = fs_info;
7691 
7692 	mutex_lock(&fs_devices->device_list_mutex);
7693 	list_for_each_entry(device, &fs_devices->devices, dev_list)
7694 		device->fs_info = fs_info;
7695 
7696 	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7697 		list_for_each_entry(device, &seed_devs->devices, dev_list) {
7698 			device->fs_info = fs_info;
7699 			ret = btrfs_get_dev_zone_info(device, false);
7700 			if (ret)
7701 				break;
7702 		}
7703 
7704 		seed_devs->fs_info = fs_info;
7705 	}
7706 	mutex_unlock(&fs_devices->device_list_mutex);
7707 
7708 	return ret;
7709 }
7710 
7711 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7712 				 const struct btrfs_dev_stats_item *ptr,
7713 				 int index)
7714 {
7715 	u64 val;
7716 
7717 	read_extent_buffer(eb, &val,
7718 			   offsetof(struct btrfs_dev_stats_item, values) +
7719 			    ((unsigned long)ptr) + (index * sizeof(u64)),
7720 			   sizeof(val));
7721 	return val;
7722 }
7723 
7724 static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7725 				      struct btrfs_dev_stats_item *ptr,
7726 				      int index, u64 val)
7727 {
7728 	write_extent_buffer(eb, &val,
7729 			    offsetof(struct btrfs_dev_stats_item, values) +
7730 			     ((unsigned long)ptr) + (index * sizeof(u64)),
7731 			    sizeof(val));
7732 }
7733 
7734 static int btrfs_device_init_dev_stats(struct btrfs_device *device,
7735 				       struct btrfs_path *path)
7736 {
7737 	struct btrfs_dev_stats_item *ptr;
7738 	struct extent_buffer *eb;
7739 	struct btrfs_key key;
7740 	int item_size;
7741 	int i, ret, slot;
7742 
7743 	if (!device->fs_info->dev_root)
7744 		return 0;
7745 
7746 	key.objectid = BTRFS_DEV_STATS_OBJECTID;
7747 	key.type = BTRFS_PERSISTENT_ITEM_KEY;
7748 	key.offset = device->devid;
7749 	ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0);
7750 	if (ret) {
7751 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7752 			btrfs_dev_stat_set(device, i, 0);
7753 		device->dev_stats_valid = 1;
7754 		btrfs_release_path(path);
7755 		return ret < 0 ? ret : 0;
7756 	}
7757 	slot = path->slots[0];
7758 	eb = path->nodes[0];
7759 	item_size = btrfs_item_size_nr(eb, slot);
7760 
7761 	ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item);
7762 
7763 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7764 		if (item_size >= (1 + i) * sizeof(__le64))
7765 			btrfs_dev_stat_set(device, i,
7766 					   btrfs_dev_stats_value(eb, ptr, i));
7767 		else
7768 			btrfs_dev_stat_set(device, i, 0);
7769 	}
7770 
7771 	device->dev_stats_valid = 1;
7772 	btrfs_dev_stat_print_on_load(device);
7773 	btrfs_release_path(path);
7774 
7775 	return 0;
7776 }
7777 
7778 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7779 {
7780 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7781 	struct btrfs_device *device;
7782 	struct btrfs_path *path = NULL;
7783 	int ret = 0;
7784 
7785 	path = btrfs_alloc_path();
7786 	if (!path)
7787 		return -ENOMEM;
7788 
7789 	mutex_lock(&fs_devices->device_list_mutex);
7790 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7791 		ret = btrfs_device_init_dev_stats(device, path);
7792 		if (ret)
7793 			goto out;
7794 	}
7795 	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7796 		list_for_each_entry(device, &seed_devs->devices, dev_list) {
7797 			ret = btrfs_device_init_dev_stats(device, path);
7798 			if (ret)
7799 				goto out;
7800 		}
7801 	}
7802 out:
7803 	mutex_unlock(&fs_devices->device_list_mutex);
7804 
7805 	btrfs_free_path(path);
7806 	return ret;
7807 }
7808 
7809 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7810 				struct btrfs_device *device)
7811 {
7812 	struct btrfs_fs_info *fs_info = trans->fs_info;
7813 	struct btrfs_root *dev_root = fs_info->dev_root;
7814 	struct btrfs_path *path;
7815 	struct btrfs_key key;
7816 	struct extent_buffer *eb;
7817 	struct btrfs_dev_stats_item *ptr;
7818 	int ret;
7819 	int i;
7820 
7821 	key.objectid = BTRFS_DEV_STATS_OBJECTID;
7822 	key.type = BTRFS_PERSISTENT_ITEM_KEY;
7823 	key.offset = device->devid;
7824 
7825 	path = btrfs_alloc_path();
7826 	if (!path)
7827 		return -ENOMEM;
7828 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7829 	if (ret < 0) {
7830 		btrfs_warn_in_rcu(fs_info,
7831 			"error %d while searching for dev_stats item for device %s",
7832 			      ret, rcu_str_deref(device->name));
7833 		goto out;
7834 	}
7835 
7836 	if (ret == 0 &&
7837 	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7838 		/* need to delete old one and insert a new one */
7839 		ret = btrfs_del_item(trans, dev_root, path);
7840 		if (ret != 0) {
7841 			btrfs_warn_in_rcu(fs_info,
7842 				"delete too small dev_stats item for device %s failed %d",
7843 				      rcu_str_deref(device->name), ret);
7844 			goto out;
7845 		}
7846 		ret = 1;
7847 	}
7848 
7849 	if (ret == 1) {
7850 		/* need to insert a new item */
7851 		btrfs_release_path(path);
7852 		ret = btrfs_insert_empty_item(trans, dev_root, path,
7853 					      &key, sizeof(*ptr));
7854 		if (ret < 0) {
7855 			btrfs_warn_in_rcu(fs_info,
7856 				"insert dev_stats item for device %s failed %d",
7857 				rcu_str_deref(device->name), ret);
7858 			goto out;
7859 		}
7860 	}
7861 
7862 	eb = path->nodes[0];
7863 	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7864 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7865 		btrfs_set_dev_stats_value(eb, ptr, i,
7866 					  btrfs_dev_stat_read(device, i));
7867 	btrfs_mark_buffer_dirty(eb);
7868 
7869 out:
7870 	btrfs_free_path(path);
7871 	return ret;
7872 }
7873 
7874 /*
7875  * called from commit_transaction. Writes all changed device stats to disk.
7876  */
7877 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7878 {
7879 	struct btrfs_fs_info *fs_info = trans->fs_info;
7880 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7881 	struct btrfs_device *device;
7882 	int stats_cnt;
7883 	int ret = 0;
7884 
7885 	mutex_lock(&fs_devices->device_list_mutex);
7886 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7887 		stats_cnt = atomic_read(&device->dev_stats_ccnt);
7888 		if (!device->dev_stats_valid || stats_cnt == 0)
7889 			continue;
7890 
7891 
7892 		/*
7893 		 * There is a LOAD-LOAD control dependency between the value of
7894 		 * dev_stats_ccnt and updating the on-disk values which requires
7895 		 * reading the in-memory counters. Such control dependencies
7896 		 * require explicit read memory barriers.
7897 		 *
7898 		 * This memory barriers pairs with smp_mb__before_atomic in
7899 		 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7900 		 * barrier implied by atomic_xchg in
7901 		 * btrfs_dev_stats_read_and_reset
7902 		 */
7903 		smp_rmb();
7904 
7905 		ret = update_dev_stat_item(trans, device);
7906 		if (!ret)
7907 			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7908 	}
7909 	mutex_unlock(&fs_devices->device_list_mutex);
7910 
7911 	return ret;
7912 }
7913 
7914 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7915 {
7916 	btrfs_dev_stat_inc(dev, index);
7917 	btrfs_dev_stat_print_on_error(dev);
7918 }
7919 
7920 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7921 {
7922 	if (!dev->dev_stats_valid)
7923 		return;
7924 	btrfs_err_rl_in_rcu(dev->fs_info,
7925 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7926 			   rcu_str_deref(dev->name),
7927 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7928 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7929 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7930 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7931 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7932 }
7933 
7934 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7935 {
7936 	int i;
7937 
7938 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7939 		if (btrfs_dev_stat_read(dev, i) != 0)
7940 			break;
7941 	if (i == BTRFS_DEV_STAT_VALUES_MAX)
7942 		return; /* all values == 0, suppress message */
7943 
7944 	btrfs_info_in_rcu(dev->fs_info,
7945 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7946 	       rcu_str_deref(dev->name),
7947 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7948 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7949 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7950 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7951 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7952 }
7953 
7954 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7955 			struct btrfs_ioctl_get_dev_stats *stats)
7956 {
7957 	BTRFS_DEV_LOOKUP_ARGS(args);
7958 	struct btrfs_device *dev;
7959 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7960 	int i;
7961 
7962 	mutex_lock(&fs_devices->device_list_mutex);
7963 	args.devid = stats->devid;
7964 	dev = btrfs_find_device(fs_info->fs_devices, &args);
7965 	mutex_unlock(&fs_devices->device_list_mutex);
7966 
7967 	if (!dev) {
7968 		btrfs_warn(fs_info, "get dev_stats failed, device not found");
7969 		return -ENODEV;
7970 	} else if (!dev->dev_stats_valid) {
7971 		btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7972 		return -ENODEV;
7973 	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7974 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7975 			if (stats->nr_items > i)
7976 				stats->values[i] =
7977 					btrfs_dev_stat_read_and_reset(dev, i);
7978 			else
7979 				btrfs_dev_stat_set(dev, i, 0);
7980 		}
7981 		btrfs_info(fs_info, "device stats zeroed by %s (%d)",
7982 			   current->comm, task_pid_nr(current));
7983 	} else {
7984 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7985 			if (stats->nr_items > i)
7986 				stats->values[i] = btrfs_dev_stat_read(dev, i);
7987 	}
7988 	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7989 		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7990 	return 0;
7991 }
7992 
7993 /*
7994  * Update the size and bytes used for each device where it changed.  This is
7995  * delayed since we would otherwise get errors while writing out the
7996  * superblocks.
7997  *
7998  * Must be invoked during transaction commit.
7999  */
8000 void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
8001 {
8002 	struct btrfs_device *curr, *next;
8003 
8004 	ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
8005 
8006 	if (list_empty(&trans->dev_update_list))
8007 		return;
8008 
8009 	/*
8010 	 * We don't need the device_list_mutex here.  This list is owned by the
8011 	 * transaction and the transaction must complete before the device is
8012 	 * released.
8013 	 */
8014 	mutex_lock(&trans->fs_info->chunk_mutex);
8015 	list_for_each_entry_safe(curr, next, &trans->dev_update_list,
8016 				 post_commit_list) {
8017 		list_del_init(&curr->post_commit_list);
8018 		curr->commit_total_bytes = curr->disk_total_bytes;
8019 		curr->commit_bytes_used = curr->bytes_used;
8020 	}
8021 	mutex_unlock(&trans->fs_info->chunk_mutex);
8022 }
8023 
8024 /*
8025  * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
8026  */
8027 int btrfs_bg_type_to_factor(u64 flags)
8028 {
8029 	const int index = btrfs_bg_flags_to_raid_index(flags);
8030 
8031 	return btrfs_raid_array[index].ncopies;
8032 }
8033 
8034 
8035 
8036 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
8037 				 u64 chunk_offset, u64 devid,
8038 				 u64 physical_offset, u64 physical_len)
8039 {
8040 	struct btrfs_dev_lookup_args args = { .devid = devid };
8041 	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
8042 	struct extent_map *em;
8043 	struct map_lookup *map;
8044 	struct btrfs_device *dev;
8045 	u64 stripe_len;
8046 	bool found = false;
8047 	int ret = 0;
8048 	int i;
8049 
8050 	read_lock(&em_tree->lock);
8051 	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
8052 	read_unlock(&em_tree->lock);
8053 
8054 	if (!em) {
8055 		btrfs_err(fs_info,
8056 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
8057 			  physical_offset, devid);
8058 		ret = -EUCLEAN;
8059 		goto out;
8060 	}
8061 
8062 	map = em->map_lookup;
8063 	stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
8064 	if (physical_len != stripe_len) {
8065 		btrfs_err(fs_info,
8066 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
8067 			  physical_offset, devid, em->start, physical_len,
8068 			  stripe_len);
8069 		ret = -EUCLEAN;
8070 		goto out;
8071 	}
8072 
8073 	for (i = 0; i < map->num_stripes; i++) {
8074 		if (map->stripes[i].dev->devid == devid &&
8075 		    map->stripes[i].physical == physical_offset) {
8076 			found = true;
8077 			if (map->verified_stripes >= map->num_stripes) {
8078 				btrfs_err(fs_info,
8079 				"too many dev extents for chunk %llu found",
8080 					  em->start);
8081 				ret = -EUCLEAN;
8082 				goto out;
8083 			}
8084 			map->verified_stripes++;
8085 			break;
8086 		}
8087 	}
8088 	if (!found) {
8089 		btrfs_err(fs_info,
8090 	"dev extent physical offset %llu devid %llu has no corresponding chunk",
8091 			physical_offset, devid);
8092 		ret = -EUCLEAN;
8093 	}
8094 
8095 	/* Make sure no dev extent is beyond device boundary */
8096 	dev = btrfs_find_device(fs_info->fs_devices, &args);
8097 	if (!dev) {
8098 		btrfs_err(fs_info, "failed to find devid %llu", devid);
8099 		ret = -EUCLEAN;
8100 		goto out;
8101 	}
8102 
8103 	if (physical_offset + physical_len > dev->disk_total_bytes) {
8104 		btrfs_err(fs_info,
8105 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
8106 			  devid, physical_offset, physical_len,
8107 			  dev->disk_total_bytes);
8108 		ret = -EUCLEAN;
8109 		goto out;
8110 	}
8111 
8112 	if (dev->zone_info) {
8113 		u64 zone_size = dev->zone_info->zone_size;
8114 
8115 		if (!IS_ALIGNED(physical_offset, zone_size) ||
8116 		    !IS_ALIGNED(physical_len, zone_size)) {
8117 			btrfs_err(fs_info,
8118 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone",
8119 				  devid, physical_offset, physical_len);
8120 			ret = -EUCLEAN;
8121 			goto out;
8122 		}
8123 	}
8124 
8125 out:
8126 	free_extent_map(em);
8127 	return ret;
8128 }
8129 
8130 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
8131 {
8132 	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
8133 	struct extent_map *em;
8134 	struct rb_node *node;
8135 	int ret = 0;
8136 
8137 	read_lock(&em_tree->lock);
8138 	for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
8139 		em = rb_entry(node, struct extent_map, rb_node);
8140 		if (em->map_lookup->num_stripes !=
8141 		    em->map_lookup->verified_stripes) {
8142 			btrfs_err(fs_info,
8143 			"chunk %llu has missing dev extent, have %d expect %d",
8144 				  em->start, em->map_lookup->verified_stripes,
8145 				  em->map_lookup->num_stripes);
8146 			ret = -EUCLEAN;
8147 			goto out;
8148 		}
8149 	}
8150 out:
8151 	read_unlock(&em_tree->lock);
8152 	return ret;
8153 }
8154 
8155 /*
8156  * Ensure that all dev extents are mapped to correct chunk, otherwise
8157  * later chunk allocation/free would cause unexpected behavior.
8158  *
8159  * NOTE: This will iterate through the whole device tree, which should be of
8160  * the same size level as the chunk tree.  This slightly increases mount time.
8161  */
8162 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
8163 {
8164 	struct btrfs_path *path;
8165 	struct btrfs_root *root = fs_info->dev_root;
8166 	struct btrfs_key key;
8167 	u64 prev_devid = 0;
8168 	u64 prev_dev_ext_end = 0;
8169 	int ret = 0;
8170 
8171 	/*
8172 	 * We don't have a dev_root because we mounted with ignorebadroots and
8173 	 * failed to load the root, so we want to skip the verification in this
8174 	 * case for sure.
8175 	 *
8176 	 * However if the dev root is fine, but the tree itself is corrupted
8177 	 * we'd still fail to mount.  This verification is only to make sure
8178 	 * writes can happen safely, so instead just bypass this check
8179 	 * completely in the case of IGNOREBADROOTS.
8180 	 */
8181 	if (btrfs_test_opt(fs_info, IGNOREBADROOTS))
8182 		return 0;
8183 
8184 	key.objectid = 1;
8185 	key.type = BTRFS_DEV_EXTENT_KEY;
8186 	key.offset = 0;
8187 
8188 	path = btrfs_alloc_path();
8189 	if (!path)
8190 		return -ENOMEM;
8191 
8192 	path->reada = READA_FORWARD;
8193 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8194 	if (ret < 0)
8195 		goto out;
8196 
8197 	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
8198 		ret = btrfs_next_leaf(root, path);
8199 		if (ret < 0)
8200 			goto out;
8201 		/* No dev extents at all? Not good */
8202 		if (ret > 0) {
8203 			ret = -EUCLEAN;
8204 			goto out;
8205 		}
8206 	}
8207 	while (1) {
8208 		struct extent_buffer *leaf = path->nodes[0];
8209 		struct btrfs_dev_extent *dext;
8210 		int slot = path->slots[0];
8211 		u64 chunk_offset;
8212 		u64 physical_offset;
8213 		u64 physical_len;
8214 		u64 devid;
8215 
8216 		btrfs_item_key_to_cpu(leaf, &key, slot);
8217 		if (key.type != BTRFS_DEV_EXTENT_KEY)
8218 			break;
8219 		devid = key.objectid;
8220 		physical_offset = key.offset;
8221 
8222 		dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
8223 		chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
8224 		physical_len = btrfs_dev_extent_length(leaf, dext);
8225 
8226 		/* Check if this dev extent overlaps with the previous one */
8227 		if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
8228 			btrfs_err(fs_info,
8229 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
8230 				  devid, physical_offset, prev_dev_ext_end);
8231 			ret = -EUCLEAN;
8232 			goto out;
8233 		}
8234 
8235 		ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
8236 					    physical_offset, physical_len);
8237 		if (ret < 0)
8238 			goto out;
8239 		prev_devid = devid;
8240 		prev_dev_ext_end = physical_offset + physical_len;
8241 
8242 		ret = btrfs_next_item(root, path);
8243 		if (ret < 0)
8244 			goto out;
8245 		if (ret > 0) {
8246 			ret = 0;
8247 			break;
8248 		}
8249 	}
8250 
8251 	/* Ensure all chunks have corresponding dev extents */
8252 	ret = verify_chunk_dev_extent_mapping(fs_info);
8253 out:
8254 	btrfs_free_path(path);
8255 	return ret;
8256 }
8257 
8258 /*
8259  * Check whether the given block group or device is pinned by any inode being
8260  * used as a swapfile.
8261  */
8262 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
8263 {
8264 	struct btrfs_swapfile_pin *sp;
8265 	struct rb_node *node;
8266 
8267 	spin_lock(&fs_info->swapfile_pins_lock);
8268 	node = fs_info->swapfile_pins.rb_node;
8269 	while (node) {
8270 		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
8271 		if (ptr < sp->ptr)
8272 			node = node->rb_left;
8273 		else if (ptr > sp->ptr)
8274 			node = node->rb_right;
8275 		else
8276 			break;
8277 	}
8278 	spin_unlock(&fs_info->swapfile_pins_lock);
8279 	return node != NULL;
8280 }
8281 
8282 static int relocating_repair_kthread(void *data)
8283 {
8284 	struct btrfs_block_group *cache = (struct btrfs_block_group *)data;
8285 	struct btrfs_fs_info *fs_info = cache->fs_info;
8286 	u64 target;
8287 	int ret = 0;
8288 
8289 	target = cache->start;
8290 	btrfs_put_block_group(cache);
8291 
8292 	sb_start_write(fs_info->sb);
8293 	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
8294 		btrfs_info(fs_info,
8295 			   "zoned: skip relocating block group %llu to repair: EBUSY",
8296 			   target);
8297 		sb_end_write(fs_info->sb);
8298 		return -EBUSY;
8299 	}
8300 
8301 	mutex_lock(&fs_info->reclaim_bgs_lock);
8302 
8303 	/* Ensure block group still exists */
8304 	cache = btrfs_lookup_block_group(fs_info, target);
8305 	if (!cache)
8306 		goto out;
8307 
8308 	if (!cache->relocating_repair)
8309 		goto out;
8310 
8311 	ret = btrfs_may_alloc_data_chunk(fs_info, target);
8312 	if (ret < 0)
8313 		goto out;
8314 
8315 	btrfs_info(fs_info,
8316 		   "zoned: relocating block group %llu to repair IO failure",
8317 		   target);
8318 	ret = btrfs_relocate_chunk(fs_info, target);
8319 
8320 out:
8321 	if (cache)
8322 		btrfs_put_block_group(cache);
8323 	mutex_unlock(&fs_info->reclaim_bgs_lock);
8324 	btrfs_exclop_finish(fs_info);
8325 	sb_end_write(fs_info->sb);
8326 
8327 	return ret;
8328 }
8329 
8330 int btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical)
8331 {
8332 	struct btrfs_block_group *cache;
8333 
8334 	/* Do not attempt to repair in degraded state */
8335 	if (btrfs_test_opt(fs_info, DEGRADED))
8336 		return 0;
8337 
8338 	cache = btrfs_lookup_block_group(fs_info, logical);
8339 	if (!cache)
8340 		return 0;
8341 
8342 	spin_lock(&cache->lock);
8343 	if (cache->relocating_repair) {
8344 		spin_unlock(&cache->lock);
8345 		btrfs_put_block_group(cache);
8346 		return 0;
8347 	}
8348 	cache->relocating_repair = 1;
8349 	spin_unlock(&cache->lock);
8350 
8351 	kthread_run(relocating_repair_kthread, cache,
8352 		    "btrfs-relocating-repair");
8353 
8354 	return 0;
8355 }
8356