xref: /openbmc/linux/fs/btrfs/volumes.c (revision 1a15eb72)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/sched/mm.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/ratelimit.h>
12 #include <linux/kthread.h>
13 #include <linux/raid/pq.h>
14 #include <linux/semaphore.h>
15 #include <linux/uuid.h>
16 #include <linux/list_sort.h>
17 #include "misc.h"
18 #include "ctree.h"
19 #include "extent_map.h"
20 #include "disk-io.h"
21 #include "transaction.h"
22 #include "print-tree.h"
23 #include "volumes.h"
24 #include "raid56.h"
25 #include "async-thread.h"
26 #include "check-integrity.h"
27 #include "rcu-string.h"
28 #include "dev-replace.h"
29 #include "sysfs.h"
30 #include "tree-checker.h"
31 #include "space-info.h"
32 #include "block-group.h"
33 #include "discard.h"
34 #include "zoned.h"
35 
36 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
37 	[BTRFS_RAID_RAID10] = {
38 		.sub_stripes	= 2,
39 		.dev_stripes	= 1,
40 		.devs_max	= 0,	/* 0 == as many as possible */
41 		.devs_min	= 2,
42 		.tolerated_failures = 1,
43 		.devs_increment	= 2,
44 		.ncopies	= 2,
45 		.nparity        = 0,
46 		.raid_name	= "raid10",
47 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID10,
48 		.mindev_error	= BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
49 	},
50 	[BTRFS_RAID_RAID1] = {
51 		.sub_stripes	= 1,
52 		.dev_stripes	= 1,
53 		.devs_max	= 2,
54 		.devs_min	= 2,
55 		.tolerated_failures = 1,
56 		.devs_increment	= 2,
57 		.ncopies	= 2,
58 		.nparity        = 0,
59 		.raid_name	= "raid1",
60 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1,
61 		.mindev_error	= BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
62 	},
63 	[BTRFS_RAID_RAID1C3] = {
64 		.sub_stripes	= 1,
65 		.dev_stripes	= 1,
66 		.devs_max	= 3,
67 		.devs_min	= 3,
68 		.tolerated_failures = 2,
69 		.devs_increment	= 3,
70 		.ncopies	= 3,
71 		.nparity        = 0,
72 		.raid_name	= "raid1c3",
73 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1C3,
74 		.mindev_error	= BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
75 	},
76 	[BTRFS_RAID_RAID1C4] = {
77 		.sub_stripes	= 1,
78 		.dev_stripes	= 1,
79 		.devs_max	= 4,
80 		.devs_min	= 4,
81 		.tolerated_failures = 3,
82 		.devs_increment	= 4,
83 		.ncopies	= 4,
84 		.nparity        = 0,
85 		.raid_name	= "raid1c4",
86 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1C4,
87 		.mindev_error	= BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
88 	},
89 	[BTRFS_RAID_DUP] = {
90 		.sub_stripes	= 1,
91 		.dev_stripes	= 2,
92 		.devs_max	= 1,
93 		.devs_min	= 1,
94 		.tolerated_failures = 0,
95 		.devs_increment	= 1,
96 		.ncopies	= 2,
97 		.nparity        = 0,
98 		.raid_name	= "dup",
99 		.bg_flag	= BTRFS_BLOCK_GROUP_DUP,
100 		.mindev_error	= 0,
101 	},
102 	[BTRFS_RAID_RAID0] = {
103 		.sub_stripes	= 1,
104 		.dev_stripes	= 1,
105 		.devs_max	= 0,
106 		.devs_min	= 1,
107 		.tolerated_failures = 0,
108 		.devs_increment	= 1,
109 		.ncopies	= 1,
110 		.nparity        = 0,
111 		.raid_name	= "raid0",
112 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID0,
113 		.mindev_error	= 0,
114 	},
115 	[BTRFS_RAID_SINGLE] = {
116 		.sub_stripes	= 1,
117 		.dev_stripes	= 1,
118 		.devs_max	= 1,
119 		.devs_min	= 1,
120 		.tolerated_failures = 0,
121 		.devs_increment	= 1,
122 		.ncopies	= 1,
123 		.nparity        = 0,
124 		.raid_name	= "single",
125 		.bg_flag	= 0,
126 		.mindev_error	= 0,
127 	},
128 	[BTRFS_RAID_RAID5] = {
129 		.sub_stripes	= 1,
130 		.dev_stripes	= 1,
131 		.devs_max	= 0,
132 		.devs_min	= 2,
133 		.tolerated_failures = 1,
134 		.devs_increment	= 1,
135 		.ncopies	= 1,
136 		.nparity        = 1,
137 		.raid_name	= "raid5",
138 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID5,
139 		.mindev_error	= BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
140 	},
141 	[BTRFS_RAID_RAID6] = {
142 		.sub_stripes	= 1,
143 		.dev_stripes	= 1,
144 		.devs_max	= 0,
145 		.devs_min	= 3,
146 		.tolerated_failures = 2,
147 		.devs_increment	= 1,
148 		.ncopies	= 1,
149 		.nparity        = 2,
150 		.raid_name	= "raid6",
151 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID6,
152 		.mindev_error	= BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
153 	},
154 };
155 
156 /*
157  * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
158  * can be used as index to access btrfs_raid_array[].
159  */
160 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags)
161 {
162 	if (flags & BTRFS_BLOCK_GROUP_RAID10)
163 		return BTRFS_RAID_RAID10;
164 	else if (flags & BTRFS_BLOCK_GROUP_RAID1)
165 		return BTRFS_RAID_RAID1;
166 	else if (flags & BTRFS_BLOCK_GROUP_RAID1C3)
167 		return BTRFS_RAID_RAID1C3;
168 	else if (flags & BTRFS_BLOCK_GROUP_RAID1C4)
169 		return BTRFS_RAID_RAID1C4;
170 	else if (flags & BTRFS_BLOCK_GROUP_DUP)
171 		return BTRFS_RAID_DUP;
172 	else if (flags & BTRFS_BLOCK_GROUP_RAID0)
173 		return BTRFS_RAID_RAID0;
174 	else if (flags & BTRFS_BLOCK_GROUP_RAID5)
175 		return BTRFS_RAID_RAID5;
176 	else if (flags & BTRFS_BLOCK_GROUP_RAID6)
177 		return BTRFS_RAID_RAID6;
178 
179 	return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
180 }
181 
182 const char *btrfs_bg_type_to_raid_name(u64 flags)
183 {
184 	const int index = btrfs_bg_flags_to_raid_index(flags);
185 
186 	if (index >= BTRFS_NR_RAID_TYPES)
187 		return NULL;
188 
189 	return btrfs_raid_array[index].raid_name;
190 }
191 
192 /*
193  * Fill @buf with textual description of @bg_flags, no more than @size_buf
194  * bytes including terminating null byte.
195  */
196 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
197 {
198 	int i;
199 	int ret;
200 	char *bp = buf;
201 	u64 flags = bg_flags;
202 	u32 size_bp = size_buf;
203 
204 	if (!flags) {
205 		strcpy(bp, "NONE");
206 		return;
207 	}
208 
209 #define DESCRIBE_FLAG(flag, desc)						\
210 	do {								\
211 		if (flags & (flag)) {					\
212 			ret = snprintf(bp, size_bp, "%s|", (desc));	\
213 			if (ret < 0 || ret >= size_bp)			\
214 				goto out_overflow;			\
215 			size_bp -= ret;					\
216 			bp += ret;					\
217 			flags &= ~(flag);				\
218 		}							\
219 	} while (0)
220 
221 	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
222 	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
223 	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
224 
225 	DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
226 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
227 		DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
228 			      btrfs_raid_array[i].raid_name);
229 #undef DESCRIBE_FLAG
230 
231 	if (flags) {
232 		ret = snprintf(bp, size_bp, "0x%llx|", flags);
233 		size_bp -= ret;
234 	}
235 
236 	if (size_bp < size_buf)
237 		buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
238 
239 	/*
240 	 * The text is trimmed, it's up to the caller to provide sufficiently
241 	 * large buffer
242 	 */
243 out_overflow:;
244 }
245 
246 static int init_first_rw_device(struct btrfs_trans_handle *trans);
247 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
248 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
249 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
250 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
251 			     enum btrfs_map_op op,
252 			     u64 logical, u64 *length,
253 			     struct btrfs_io_context **bioc_ret,
254 			     int mirror_num, int need_raid_map);
255 
256 /*
257  * Device locking
258  * ==============
259  *
260  * There are several mutexes that protect manipulation of devices and low-level
261  * structures like chunks but not block groups, extents or files
262  *
263  * uuid_mutex (global lock)
264  * ------------------------
265  * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
266  * the SCAN_DEV ioctl registration or from mount either implicitly (the first
267  * device) or requested by the device= mount option
268  *
269  * the mutex can be very coarse and can cover long-running operations
270  *
271  * protects: updates to fs_devices counters like missing devices, rw devices,
272  * seeding, structure cloning, opening/closing devices at mount/umount time
273  *
274  * global::fs_devs - add, remove, updates to the global list
275  *
276  * does not protect: manipulation of the fs_devices::devices list in general
277  * but in mount context it could be used to exclude list modifications by eg.
278  * scan ioctl
279  *
280  * btrfs_device::name - renames (write side), read is RCU
281  *
282  * fs_devices::device_list_mutex (per-fs, with RCU)
283  * ------------------------------------------------
284  * protects updates to fs_devices::devices, ie. adding and deleting
285  *
286  * simple list traversal with read-only actions can be done with RCU protection
287  *
288  * may be used to exclude some operations from running concurrently without any
289  * modifications to the list (see write_all_supers)
290  *
291  * Is not required at mount and close times, because our device list is
292  * protected by the uuid_mutex at that point.
293  *
294  * balance_mutex
295  * -------------
296  * protects balance structures (status, state) and context accessed from
297  * several places (internally, ioctl)
298  *
299  * chunk_mutex
300  * -----------
301  * protects chunks, adding or removing during allocation, trim or when a new
302  * device is added/removed. Additionally it also protects post_commit_list of
303  * individual devices, since they can be added to the transaction's
304  * post_commit_list only with chunk_mutex held.
305  *
306  * cleaner_mutex
307  * -------------
308  * a big lock that is held by the cleaner thread and prevents running subvolume
309  * cleaning together with relocation or delayed iputs
310  *
311  *
312  * Lock nesting
313  * ============
314  *
315  * uuid_mutex
316  *   device_list_mutex
317  *     chunk_mutex
318  *   balance_mutex
319  *
320  *
321  * Exclusive operations
322  * ====================
323  *
324  * Maintains the exclusivity of the following operations that apply to the
325  * whole filesystem and cannot run in parallel.
326  *
327  * - Balance (*)
328  * - Device add
329  * - Device remove
330  * - Device replace (*)
331  * - Resize
332  *
333  * The device operations (as above) can be in one of the following states:
334  *
335  * - Running state
336  * - Paused state
337  * - Completed state
338  *
339  * Only device operations marked with (*) can go into the Paused state for the
340  * following reasons:
341  *
342  * - ioctl (only Balance can be Paused through ioctl)
343  * - filesystem remounted as read-only
344  * - filesystem unmounted and mounted as read-only
345  * - system power-cycle and filesystem mounted as read-only
346  * - filesystem or device errors leading to forced read-only
347  *
348  * The status of exclusive operation is set and cleared atomically.
349  * During the course of Paused state, fs_info::exclusive_operation remains set.
350  * A device operation in Paused or Running state can be canceled or resumed
351  * either by ioctl (Balance only) or when remounted as read-write.
352  * The exclusive status is cleared when the device operation is canceled or
353  * completed.
354  */
355 
356 DEFINE_MUTEX(uuid_mutex);
357 static LIST_HEAD(fs_uuids);
358 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
359 {
360 	return &fs_uuids;
361 }
362 
363 /*
364  * alloc_fs_devices - allocate struct btrfs_fs_devices
365  * @fsid:		if not NULL, copy the UUID to fs_devices::fsid
366  * @metadata_fsid:	if not NULL, copy the UUID to fs_devices::metadata_fsid
367  *
368  * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
369  * The returned struct is not linked onto any lists and can be destroyed with
370  * kfree() right away.
371  */
372 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
373 						 const u8 *metadata_fsid)
374 {
375 	struct btrfs_fs_devices *fs_devs;
376 
377 	fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
378 	if (!fs_devs)
379 		return ERR_PTR(-ENOMEM);
380 
381 	mutex_init(&fs_devs->device_list_mutex);
382 
383 	INIT_LIST_HEAD(&fs_devs->devices);
384 	INIT_LIST_HEAD(&fs_devs->alloc_list);
385 	INIT_LIST_HEAD(&fs_devs->fs_list);
386 	INIT_LIST_HEAD(&fs_devs->seed_list);
387 	if (fsid)
388 		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
389 
390 	if (metadata_fsid)
391 		memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
392 	else if (fsid)
393 		memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
394 
395 	return fs_devs;
396 }
397 
398 void btrfs_free_device(struct btrfs_device *device)
399 {
400 	WARN_ON(!list_empty(&device->post_commit_list));
401 	rcu_string_free(device->name);
402 	extent_io_tree_release(&device->alloc_state);
403 	bio_put(device->flush_bio);
404 	btrfs_destroy_dev_zone_info(device);
405 	kfree(device);
406 }
407 
408 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
409 {
410 	struct btrfs_device *device;
411 	WARN_ON(fs_devices->opened);
412 	while (!list_empty(&fs_devices->devices)) {
413 		device = list_entry(fs_devices->devices.next,
414 				    struct btrfs_device, dev_list);
415 		list_del(&device->dev_list);
416 		btrfs_free_device(device);
417 	}
418 	kfree(fs_devices);
419 }
420 
421 void __exit btrfs_cleanup_fs_uuids(void)
422 {
423 	struct btrfs_fs_devices *fs_devices;
424 
425 	while (!list_empty(&fs_uuids)) {
426 		fs_devices = list_entry(fs_uuids.next,
427 					struct btrfs_fs_devices, fs_list);
428 		list_del(&fs_devices->fs_list);
429 		free_fs_devices(fs_devices);
430 	}
431 }
432 
433 static noinline struct btrfs_fs_devices *find_fsid(
434 		const u8 *fsid, const u8 *metadata_fsid)
435 {
436 	struct btrfs_fs_devices *fs_devices;
437 
438 	ASSERT(fsid);
439 
440 	/* Handle non-split brain cases */
441 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
442 		if (metadata_fsid) {
443 			if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
444 			    && memcmp(metadata_fsid, fs_devices->metadata_uuid,
445 				      BTRFS_FSID_SIZE) == 0)
446 				return fs_devices;
447 		} else {
448 			if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
449 				return fs_devices;
450 		}
451 	}
452 	return NULL;
453 }
454 
455 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
456 				struct btrfs_super_block *disk_super)
457 {
458 
459 	struct btrfs_fs_devices *fs_devices;
460 
461 	/*
462 	 * Handle scanned device having completed its fsid change but
463 	 * belonging to a fs_devices that was created by first scanning
464 	 * a device which didn't have its fsid/metadata_uuid changed
465 	 * at all and the CHANGING_FSID_V2 flag set.
466 	 */
467 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
468 		if (fs_devices->fsid_change &&
469 		    memcmp(disk_super->metadata_uuid, fs_devices->fsid,
470 			   BTRFS_FSID_SIZE) == 0 &&
471 		    memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
472 			   BTRFS_FSID_SIZE) == 0) {
473 			return fs_devices;
474 		}
475 	}
476 	/*
477 	 * Handle scanned device having completed its fsid change but
478 	 * belonging to a fs_devices that was created by a device that
479 	 * has an outdated pair of fsid/metadata_uuid and
480 	 * CHANGING_FSID_V2 flag set.
481 	 */
482 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
483 		if (fs_devices->fsid_change &&
484 		    memcmp(fs_devices->metadata_uuid,
485 			   fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
486 		    memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
487 			   BTRFS_FSID_SIZE) == 0) {
488 			return fs_devices;
489 		}
490 	}
491 
492 	return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
493 }
494 
495 
496 static int
497 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
498 		      int flush, struct block_device **bdev,
499 		      struct btrfs_super_block **disk_super)
500 {
501 	int ret;
502 
503 	*bdev = blkdev_get_by_path(device_path, flags, holder);
504 
505 	if (IS_ERR(*bdev)) {
506 		ret = PTR_ERR(*bdev);
507 		goto error;
508 	}
509 
510 	if (flush)
511 		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
512 	ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
513 	if (ret) {
514 		blkdev_put(*bdev, flags);
515 		goto error;
516 	}
517 	invalidate_bdev(*bdev);
518 	*disk_super = btrfs_read_dev_super(*bdev);
519 	if (IS_ERR(*disk_super)) {
520 		ret = PTR_ERR(*disk_super);
521 		blkdev_put(*bdev, flags);
522 		goto error;
523 	}
524 
525 	return 0;
526 
527 error:
528 	*bdev = NULL;
529 	return ret;
530 }
531 
532 static bool device_path_matched(const char *path, struct btrfs_device *device)
533 {
534 	int found;
535 
536 	rcu_read_lock();
537 	found = strcmp(rcu_str_deref(device->name), path);
538 	rcu_read_unlock();
539 
540 	return found == 0;
541 }
542 
543 /*
544  *  Search and remove all stale (devices which are not mounted) devices.
545  *  When both inputs are NULL, it will search and release all stale devices.
546  *  path:	Optional. When provided will it release all unmounted devices
547  *		matching this path only.
548  *  skip_dev:	Optional. Will skip this device when searching for the stale
549  *		devices.
550  *  Return:	0 for success or if @path is NULL.
551  * 		-EBUSY if @path is a mounted device.
552  * 		-ENOENT if @path does not match any device in the list.
553  */
554 static int btrfs_free_stale_devices(const char *path,
555 				     struct btrfs_device *skip_device)
556 {
557 	struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
558 	struct btrfs_device *device, *tmp_device;
559 	int ret = 0;
560 
561 	lockdep_assert_held(&uuid_mutex);
562 
563 	if (path)
564 		ret = -ENOENT;
565 
566 	list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
567 
568 		mutex_lock(&fs_devices->device_list_mutex);
569 		list_for_each_entry_safe(device, tmp_device,
570 					 &fs_devices->devices, dev_list) {
571 			if (skip_device && skip_device == device)
572 				continue;
573 			if (path && !device->name)
574 				continue;
575 			if (path && !device_path_matched(path, device))
576 				continue;
577 			if (fs_devices->opened) {
578 				/* for an already deleted device return 0 */
579 				if (path && ret != 0)
580 					ret = -EBUSY;
581 				break;
582 			}
583 
584 			/* delete the stale device */
585 			fs_devices->num_devices--;
586 			list_del(&device->dev_list);
587 			btrfs_free_device(device);
588 
589 			ret = 0;
590 		}
591 		mutex_unlock(&fs_devices->device_list_mutex);
592 
593 		if (fs_devices->num_devices == 0) {
594 			btrfs_sysfs_remove_fsid(fs_devices);
595 			list_del(&fs_devices->fs_list);
596 			free_fs_devices(fs_devices);
597 		}
598 	}
599 
600 	return ret;
601 }
602 
603 /*
604  * This is only used on mount, and we are protected from competing things
605  * messing with our fs_devices by the uuid_mutex, thus we do not need the
606  * fs_devices->device_list_mutex here.
607  */
608 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
609 			struct btrfs_device *device, fmode_t flags,
610 			void *holder)
611 {
612 	struct request_queue *q;
613 	struct block_device *bdev;
614 	struct btrfs_super_block *disk_super;
615 	u64 devid;
616 	int ret;
617 
618 	if (device->bdev)
619 		return -EINVAL;
620 	if (!device->name)
621 		return -EINVAL;
622 
623 	ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
624 				    &bdev, &disk_super);
625 	if (ret)
626 		return ret;
627 
628 	devid = btrfs_stack_device_id(&disk_super->dev_item);
629 	if (devid != device->devid)
630 		goto error_free_page;
631 
632 	if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
633 		goto error_free_page;
634 
635 	device->generation = btrfs_super_generation(disk_super);
636 
637 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
638 		if (btrfs_super_incompat_flags(disk_super) &
639 		    BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
640 			pr_err(
641 		"BTRFS: Invalid seeding and uuid-changed device detected\n");
642 			goto error_free_page;
643 		}
644 
645 		clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
646 		fs_devices->seeding = true;
647 	} else {
648 		if (bdev_read_only(bdev))
649 			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
650 		else
651 			set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
652 	}
653 
654 	q = bdev_get_queue(bdev);
655 	if (!blk_queue_nonrot(q))
656 		fs_devices->rotating = true;
657 
658 	device->bdev = bdev;
659 	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
660 	device->mode = flags;
661 
662 	fs_devices->open_devices++;
663 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
664 	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
665 		fs_devices->rw_devices++;
666 		list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
667 	}
668 	btrfs_release_disk_super(disk_super);
669 
670 	return 0;
671 
672 error_free_page:
673 	btrfs_release_disk_super(disk_super);
674 	blkdev_put(bdev, flags);
675 
676 	return -EINVAL;
677 }
678 
679 /*
680  * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
681  * being created with a disk that has already completed its fsid change. Such
682  * disk can belong to an fs which has its FSID changed or to one which doesn't.
683  * Handle both cases here.
684  */
685 static struct btrfs_fs_devices *find_fsid_inprogress(
686 					struct btrfs_super_block *disk_super)
687 {
688 	struct btrfs_fs_devices *fs_devices;
689 
690 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
691 		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
692 			   BTRFS_FSID_SIZE) != 0 &&
693 		    memcmp(fs_devices->metadata_uuid, disk_super->fsid,
694 			   BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
695 			return fs_devices;
696 		}
697 	}
698 
699 	return find_fsid(disk_super->fsid, NULL);
700 }
701 
702 
703 static struct btrfs_fs_devices *find_fsid_changed(
704 					struct btrfs_super_block *disk_super)
705 {
706 	struct btrfs_fs_devices *fs_devices;
707 
708 	/*
709 	 * Handles the case where scanned device is part of an fs that had
710 	 * multiple successful changes of FSID but currently device didn't
711 	 * observe it. Meaning our fsid will be different than theirs. We need
712 	 * to handle two subcases :
713 	 *  1 - The fs still continues to have different METADATA/FSID uuids.
714 	 *  2 - The fs is switched back to its original FSID (METADATA/FSID
715 	 *  are equal).
716 	 */
717 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
718 		/* Changed UUIDs */
719 		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
720 			   BTRFS_FSID_SIZE) != 0 &&
721 		    memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
722 			   BTRFS_FSID_SIZE) == 0 &&
723 		    memcmp(fs_devices->fsid, disk_super->fsid,
724 			   BTRFS_FSID_SIZE) != 0)
725 			return fs_devices;
726 
727 		/* Unchanged UUIDs */
728 		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
729 			   BTRFS_FSID_SIZE) == 0 &&
730 		    memcmp(fs_devices->fsid, disk_super->metadata_uuid,
731 			   BTRFS_FSID_SIZE) == 0)
732 			return fs_devices;
733 	}
734 
735 	return NULL;
736 }
737 
738 static struct btrfs_fs_devices *find_fsid_reverted_metadata(
739 				struct btrfs_super_block *disk_super)
740 {
741 	struct btrfs_fs_devices *fs_devices;
742 
743 	/*
744 	 * Handle the case where the scanned device is part of an fs whose last
745 	 * metadata UUID change reverted it to the original FSID. At the same
746 	 * time * fs_devices was first created by another constitutent device
747 	 * which didn't fully observe the operation. This results in an
748 	 * btrfs_fs_devices created with metadata/fsid different AND
749 	 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
750 	 * fs_devices equal to the FSID of the disk.
751 	 */
752 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
753 		if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
754 			   BTRFS_FSID_SIZE) != 0 &&
755 		    memcmp(fs_devices->metadata_uuid, disk_super->fsid,
756 			   BTRFS_FSID_SIZE) == 0 &&
757 		    fs_devices->fsid_change)
758 			return fs_devices;
759 	}
760 
761 	return NULL;
762 }
763 /*
764  * Add new device to list of registered devices
765  *
766  * Returns:
767  * device pointer which was just added or updated when successful
768  * error pointer when failed
769  */
770 static noinline struct btrfs_device *device_list_add(const char *path,
771 			   struct btrfs_super_block *disk_super,
772 			   bool *new_device_added)
773 {
774 	struct btrfs_device *device;
775 	struct btrfs_fs_devices *fs_devices = NULL;
776 	struct rcu_string *name;
777 	u64 found_transid = btrfs_super_generation(disk_super);
778 	u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
779 	bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
780 		BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
781 	bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
782 					BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
783 
784 	if (fsid_change_in_progress) {
785 		if (!has_metadata_uuid)
786 			fs_devices = find_fsid_inprogress(disk_super);
787 		else
788 			fs_devices = find_fsid_changed(disk_super);
789 	} else if (has_metadata_uuid) {
790 		fs_devices = find_fsid_with_metadata_uuid(disk_super);
791 	} else {
792 		fs_devices = find_fsid_reverted_metadata(disk_super);
793 		if (!fs_devices)
794 			fs_devices = find_fsid(disk_super->fsid, NULL);
795 	}
796 
797 
798 	if (!fs_devices) {
799 		if (has_metadata_uuid)
800 			fs_devices = alloc_fs_devices(disk_super->fsid,
801 						      disk_super->metadata_uuid);
802 		else
803 			fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
804 
805 		if (IS_ERR(fs_devices))
806 			return ERR_CAST(fs_devices);
807 
808 		fs_devices->fsid_change = fsid_change_in_progress;
809 
810 		mutex_lock(&fs_devices->device_list_mutex);
811 		list_add(&fs_devices->fs_list, &fs_uuids);
812 
813 		device = NULL;
814 	} else {
815 		struct btrfs_dev_lookup_args args = {
816 			.devid = devid,
817 			.uuid = disk_super->dev_item.uuid,
818 		};
819 
820 		mutex_lock(&fs_devices->device_list_mutex);
821 		device = btrfs_find_device(fs_devices, &args);
822 
823 		/*
824 		 * If this disk has been pulled into an fs devices created by
825 		 * a device which had the CHANGING_FSID_V2 flag then replace the
826 		 * metadata_uuid/fsid values of the fs_devices.
827 		 */
828 		if (fs_devices->fsid_change &&
829 		    found_transid > fs_devices->latest_generation) {
830 			memcpy(fs_devices->fsid, disk_super->fsid,
831 					BTRFS_FSID_SIZE);
832 
833 			if (has_metadata_uuid)
834 				memcpy(fs_devices->metadata_uuid,
835 				       disk_super->metadata_uuid,
836 				       BTRFS_FSID_SIZE);
837 			else
838 				memcpy(fs_devices->metadata_uuid,
839 				       disk_super->fsid, BTRFS_FSID_SIZE);
840 
841 			fs_devices->fsid_change = false;
842 		}
843 	}
844 
845 	if (!device) {
846 		if (fs_devices->opened) {
847 			mutex_unlock(&fs_devices->device_list_mutex);
848 			return ERR_PTR(-EBUSY);
849 		}
850 
851 		device = btrfs_alloc_device(NULL, &devid,
852 					    disk_super->dev_item.uuid);
853 		if (IS_ERR(device)) {
854 			mutex_unlock(&fs_devices->device_list_mutex);
855 			/* we can safely leave the fs_devices entry around */
856 			return device;
857 		}
858 
859 		name = rcu_string_strdup(path, GFP_NOFS);
860 		if (!name) {
861 			btrfs_free_device(device);
862 			mutex_unlock(&fs_devices->device_list_mutex);
863 			return ERR_PTR(-ENOMEM);
864 		}
865 		rcu_assign_pointer(device->name, name);
866 
867 		list_add_rcu(&device->dev_list, &fs_devices->devices);
868 		fs_devices->num_devices++;
869 
870 		device->fs_devices = fs_devices;
871 		*new_device_added = true;
872 
873 		if (disk_super->label[0])
874 			pr_info(
875 	"BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
876 				disk_super->label, devid, found_transid, path,
877 				current->comm, task_pid_nr(current));
878 		else
879 			pr_info(
880 	"BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
881 				disk_super->fsid, devid, found_transid, path,
882 				current->comm, task_pid_nr(current));
883 
884 	} else if (!device->name || strcmp(device->name->str, path)) {
885 		/*
886 		 * When FS is already mounted.
887 		 * 1. If you are here and if the device->name is NULL that
888 		 *    means this device was missing at time of FS mount.
889 		 * 2. If you are here and if the device->name is different
890 		 *    from 'path' that means either
891 		 *      a. The same device disappeared and reappeared with
892 		 *         different name. or
893 		 *      b. The missing-disk-which-was-replaced, has
894 		 *         reappeared now.
895 		 *
896 		 * We must allow 1 and 2a above. But 2b would be a spurious
897 		 * and unintentional.
898 		 *
899 		 * Further in case of 1 and 2a above, the disk at 'path'
900 		 * would have missed some transaction when it was away and
901 		 * in case of 2a the stale bdev has to be updated as well.
902 		 * 2b must not be allowed at all time.
903 		 */
904 
905 		/*
906 		 * For now, we do allow update to btrfs_fs_device through the
907 		 * btrfs dev scan cli after FS has been mounted.  We're still
908 		 * tracking a problem where systems fail mount by subvolume id
909 		 * when we reject replacement on a mounted FS.
910 		 */
911 		if (!fs_devices->opened && found_transid < device->generation) {
912 			/*
913 			 * That is if the FS is _not_ mounted and if you
914 			 * are here, that means there is more than one
915 			 * disk with same uuid and devid.We keep the one
916 			 * with larger generation number or the last-in if
917 			 * generation are equal.
918 			 */
919 			mutex_unlock(&fs_devices->device_list_mutex);
920 			return ERR_PTR(-EEXIST);
921 		}
922 
923 		/*
924 		 * We are going to replace the device path for a given devid,
925 		 * make sure it's the same device if the device is mounted
926 		 */
927 		if (device->bdev) {
928 			int error;
929 			dev_t path_dev;
930 
931 			error = lookup_bdev(path, &path_dev);
932 			if (error) {
933 				mutex_unlock(&fs_devices->device_list_mutex);
934 				return ERR_PTR(error);
935 			}
936 
937 			if (device->bdev->bd_dev != path_dev) {
938 				mutex_unlock(&fs_devices->device_list_mutex);
939 				/*
940 				 * device->fs_info may not be reliable here, so
941 				 * pass in a NULL instead. This avoids a
942 				 * possible use-after-free when the fs_info and
943 				 * fs_info->sb are already torn down.
944 				 */
945 				btrfs_warn_in_rcu(NULL,
946 	"duplicate device %s devid %llu generation %llu scanned by %s (%d)",
947 						  path, devid, found_transid,
948 						  current->comm,
949 						  task_pid_nr(current));
950 				return ERR_PTR(-EEXIST);
951 			}
952 			btrfs_info_in_rcu(device->fs_info,
953 	"devid %llu device path %s changed to %s scanned by %s (%d)",
954 					  devid, rcu_str_deref(device->name),
955 					  path, current->comm,
956 					  task_pid_nr(current));
957 		}
958 
959 		name = rcu_string_strdup(path, GFP_NOFS);
960 		if (!name) {
961 			mutex_unlock(&fs_devices->device_list_mutex);
962 			return ERR_PTR(-ENOMEM);
963 		}
964 		rcu_string_free(device->name);
965 		rcu_assign_pointer(device->name, name);
966 		if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
967 			fs_devices->missing_devices--;
968 			clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
969 		}
970 	}
971 
972 	/*
973 	 * Unmount does not free the btrfs_device struct but would zero
974 	 * generation along with most of the other members. So just update
975 	 * it back. We need it to pick the disk with largest generation
976 	 * (as above).
977 	 */
978 	if (!fs_devices->opened) {
979 		device->generation = found_transid;
980 		fs_devices->latest_generation = max_t(u64, found_transid,
981 						fs_devices->latest_generation);
982 	}
983 
984 	fs_devices->total_devices = btrfs_super_num_devices(disk_super);
985 
986 	mutex_unlock(&fs_devices->device_list_mutex);
987 	return device;
988 }
989 
990 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
991 {
992 	struct btrfs_fs_devices *fs_devices;
993 	struct btrfs_device *device;
994 	struct btrfs_device *orig_dev;
995 	int ret = 0;
996 
997 	lockdep_assert_held(&uuid_mutex);
998 
999 	fs_devices = alloc_fs_devices(orig->fsid, NULL);
1000 	if (IS_ERR(fs_devices))
1001 		return fs_devices;
1002 
1003 	fs_devices->total_devices = orig->total_devices;
1004 
1005 	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
1006 		struct rcu_string *name;
1007 
1008 		device = btrfs_alloc_device(NULL, &orig_dev->devid,
1009 					    orig_dev->uuid);
1010 		if (IS_ERR(device)) {
1011 			ret = PTR_ERR(device);
1012 			goto error;
1013 		}
1014 
1015 		/*
1016 		 * This is ok to do without rcu read locked because we hold the
1017 		 * uuid mutex so nothing we touch in here is going to disappear.
1018 		 */
1019 		if (orig_dev->name) {
1020 			name = rcu_string_strdup(orig_dev->name->str,
1021 					GFP_KERNEL);
1022 			if (!name) {
1023 				btrfs_free_device(device);
1024 				ret = -ENOMEM;
1025 				goto error;
1026 			}
1027 			rcu_assign_pointer(device->name, name);
1028 		}
1029 
1030 		list_add(&device->dev_list, &fs_devices->devices);
1031 		device->fs_devices = fs_devices;
1032 		fs_devices->num_devices++;
1033 	}
1034 	return fs_devices;
1035 error:
1036 	free_fs_devices(fs_devices);
1037 	return ERR_PTR(ret);
1038 }
1039 
1040 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
1041 				      struct btrfs_device **latest_dev)
1042 {
1043 	struct btrfs_device *device, *next;
1044 
1045 	/* This is the initialized path, it is safe to release the devices. */
1046 	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1047 		if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) {
1048 			if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1049 				      &device->dev_state) &&
1050 			    !test_bit(BTRFS_DEV_STATE_MISSING,
1051 				      &device->dev_state) &&
1052 			    (!*latest_dev ||
1053 			     device->generation > (*latest_dev)->generation)) {
1054 				*latest_dev = device;
1055 			}
1056 			continue;
1057 		}
1058 
1059 		/*
1060 		 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
1061 		 * in btrfs_init_dev_replace() so just continue.
1062 		 */
1063 		if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1064 			continue;
1065 
1066 		if (device->bdev) {
1067 			blkdev_put(device->bdev, device->mode);
1068 			device->bdev = NULL;
1069 			fs_devices->open_devices--;
1070 		}
1071 		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1072 			list_del_init(&device->dev_alloc_list);
1073 			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1074 			fs_devices->rw_devices--;
1075 		}
1076 		list_del_init(&device->dev_list);
1077 		fs_devices->num_devices--;
1078 		btrfs_free_device(device);
1079 	}
1080 
1081 }
1082 
1083 /*
1084  * After we have read the system tree and know devids belonging to this
1085  * filesystem, remove the device which does not belong there.
1086  */
1087 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices)
1088 {
1089 	struct btrfs_device *latest_dev = NULL;
1090 	struct btrfs_fs_devices *seed_dev;
1091 
1092 	mutex_lock(&uuid_mutex);
1093 	__btrfs_free_extra_devids(fs_devices, &latest_dev);
1094 
1095 	list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
1096 		__btrfs_free_extra_devids(seed_dev, &latest_dev);
1097 
1098 	fs_devices->latest_dev = latest_dev;
1099 
1100 	mutex_unlock(&uuid_mutex);
1101 }
1102 
1103 static void btrfs_close_bdev(struct btrfs_device *device)
1104 {
1105 	if (!device->bdev)
1106 		return;
1107 
1108 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1109 		sync_blockdev(device->bdev);
1110 		invalidate_bdev(device->bdev);
1111 	}
1112 
1113 	blkdev_put(device->bdev, device->mode);
1114 }
1115 
1116 static void btrfs_close_one_device(struct btrfs_device *device)
1117 {
1118 	struct btrfs_fs_devices *fs_devices = device->fs_devices;
1119 
1120 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1121 	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
1122 		list_del_init(&device->dev_alloc_list);
1123 		fs_devices->rw_devices--;
1124 	}
1125 
1126 	if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1127 		clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
1128 
1129 	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
1130 		fs_devices->missing_devices--;
1131 
1132 	btrfs_close_bdev(device);
1133 	if (device->bdev) {
1134 		fs_devices->open_devices--;
1135 		device->bdev = NULL;
1136 	}
1137 	clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1138 	btrfs_destroy_dev_zone_info(device);
1139 
1140 	device->fs_info = NULL;
1141 	atomic_set(&device->dev_stats_ccnt, 0);
1142 	extent_io_tree_release(&device->alloc_state);
1143 
1144 	/*
1145 	 * Reset the flush error record. We might have a transient flush error
1146 	 * in this mount, and if so we aborted the current transaction and set
1147 	 * the fs to an error state, guaranteeing no super blocks can be further
1148 	 * committed. However that error might be transient and if we unmount the
1149 	 * filesystem and mount it again, we should allow the mount to succeed
1150 	 * (btrfs_check_rw_degradable() should not fail) - if after mounting the
1151 	 * filesystem again we still get flush errors, then we will again abort
1152 	 * any transaction and set the error state, guaranteeing no commits of
1153 	 * unsafe super blocks.
1154 	 */
1155 	device->last_flush_error = 0;
1156 
1157 	/* Verify the device is back in a pristine state  */
1158 	ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
1159 	ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1160 	ASSERT(list_empty(&device->dev_alloc_list));
1161 	ASSERT(list_empty(&device->post_commit_list));
1162 	ASSERT(atomic_read(&device->reada_in_flight) == 0);
1163 }
1164 
1165 static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
1166 {
1167 	struct btrfs_device *device, *tmp;
1168 
1169 	lockdep_assert_held(&uuid_mutex);
1170 
1171 	if (--fs_devices->opened > 0)
1172 		return;
1173 
1174 	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list)
1175 		btrfs_close_one_device(device);
1176 
1177 	WARN_ON(fs_devices->open_devices);
1178 	WARN_ON(fs_devices->rw_devices);
1179 	fs_devices->opened = 0;
1180 	fs_devices->seeding = false;
1181 	fs_devices->fs_info = NULL;
1182 }
1183 
1184 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1185 {
1186 	LIST_HEAD(list);
1187 	struct btrfs_fs_devices *tmp;
1188 
1189 	mutex_lock(&uuid_mutex);
1190 	close_fs_devices(fs_devices);
1191 	if (!fs_devices->opened)
1192 		list_splice_init(&fs_devices->seed_list, &list);
1193 
1194 	list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
1195 		close_fs_devices(fs_devices);
1196 		list_del(&fs_devices->seed_list);
1197 		free_fs_devices(fs_devices);
1198 	}
1199 	mutex_unlock(&uuid_mutex);
1200 }
1201 
1202 static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1203 				fmode_t flags, void *holder)
1204 {
1205 	struct btrfs_device *device;
1206 	struct btrfs_device *latest_dev = NULL;
1207 	struct btrfs_device *tmp_device;
1208 
1209 	flags |= FMODE_EXCL;
1210 
1211 	list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
1212 				 dev_list) {
1213 		int ret;
1214 
1215 		ret = btrfs_open_one_device(fs_devices, device, flags, holder);
1216 		if (ret == 0 &&
1217 		    (!latest_dev || device->generation > latest_dev->generation)) {
1218 			latest_dev = device;
1219 		} else if (ret == -ENODATA) {
1220 			fs_devices->num_devices--;
1221 			list_del(&device->dev_list);
1222 			btrfs_free_device(device);
1223 		}
1224 	}
1225 	if (fs_devices->open_devices == 0)
1226 		return -EINVAL;
1227 
1228 	fs_devices->opened = 1;
1229 	fs_devices->latest_dev = latest_dev;
1230 	fs_devices->total_rw_bytes = 0;
1231 	fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
1232 	fs_devices->read_policy = BTRFS_READ_POLICY_PID;
1233 
1234 	return 0;
1235 }
1236 
1237 static int devid_cmp(void *priv, const struct list_head *a,
1238 		     const struct list_head *b)
1239 {
1240 	const struct btrfs_device *dev1, *dev2;
1241 
1242 	dev1 = list_entry(a, struct btrfs_device, dev_list);
1243 	dev2 = list_entry(b, struct btrfs_device, dev_list);
1244 
1245 	if (dev1->devid < dev2->devid)
1246 		return -1;
1247 	else if (dev1->devid > dev2->devid)
1248 		return 1;
1249 	return 0;
1250 }
1251 
1252 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1253 		       fmode_t flags, void *holder)
1254 {
1255 	int ret;
1256 
1257 	lockdep_assert_held(&uuid_mutex);
1258 	/*
1259 	 * The device_list_mutex cannot be taken here in case opening the
1260 	 * underlying device takes further locks like open_mutex.
1261 	 *
1262 	 * We also don't need the lock here as this is called during mount and
1263 	 * exclusion is provided by uuid_mutex
1264 	 */
1265 
1266 	if (fs_devices->opened) {
1267 		fs_devices->opened++;
1268 		ret = 0;
1269 	} else {
1270 		list_sort(NULL, &fs_devices->devices, devid_cmp);
1271 		ret = open_fs_devices(fs_devices, flags, holder);
1272 	}
1273 
1274 	return ret;
1275 }
1276 
1277 void btrfs_release_disk_super(struct btrfs_super_block *super)
1278 {
1279 	struct page *page = virt_to_page(super);
1280 
1281 	put_page(page);
1282 }
1283 
1284 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
1285 						       u64 bytenr, u64 bytenr_orig)
1286 {
1287 	struct btrfs_super_block *disk_super;
1288 	struct page *page;
1289 	void *p;
1290 	pgoff_t index;
1291 
1292 	/* make sure our super fits in the device */
1293 	if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1294 		return ERR_PTR(-EINVAL);
1295 
1296 	/* make sure our super fits in the page */
1297 	if (sizeof(*disk_super) > PAGE_SIZE)
1298 		return ERR_PTR(-EINVAL);
1299 
1300 	/* make sure our super doesn't straddle pages on disk */
1301 	index = bytenr >> PAGE_SHIFT;
1302 	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1303 		return ERR_PTR(-EINVAL);
1304 
1305 	/* pull in the page with our super */
1306 	page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
1307 
1308 	if (IS_ERR(page))
1309 		return ERR_CAST(page);
1310 
1311 	p = page_address(page);
1312 
1313 	/* align our pointer to the offset of the super block */
1314 	disk_super = p + offset_in_page(bytenr);
1315 
1316 	if (btrfs_super_bytenr(disk_super) != bytenr_orig ||
1317 	    btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1318 		btrfs_release_disk_super(p);
1319 		return ERR_PTR(-EINVAL);
1320 	}
1321 
1322 	if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
1323 		disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
1324 
1325 	return disk_super;
1326 }
1327 
1328 int btrfs_forget_devices(const char *path)
1329 {
1330 	int ret;
1331 
1332 	mutex_lock(&uuid_mutex);
1333 	ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
1334 	mutex_unlock(&uuid_mutex);
1335 
1336 	return ret;
1337 }
1338 
1339 /*
1340  * Look for a btrfs signature on a device. This may be called out of the mount path
1341  * and we are not allowed to call set_blocksize during the scan. The superblock
1342  * is read via pagecache
1343  */
1344 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1345 					   void *holder)
1346 {
1347 	struct btrfs_super_block *disk_super;
1348 	bool new_device_added = false;
1349 	struct btrfs_device *device = NULL;
1350 	struct block_device *bdev;
1351 	u64 bytenr, bytenr_orig;
1352 	int ret;
1353 
1354 	lockdep_assert_held(&uuid_mutex);
1355 
1356 	/*
1357 	 * we would like to check all the supers, but that would make
1358 	 * a btrfs mount succeed after a mkfs from a different FS.
1359 	 * So, we need to add a special mount option to scan for
1360 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1361 	 */
1362 	flags |= FMODE_EXCL;
1363 
1364 	bdev = blkdev_get_by_path(path, flags, holder);
1365 	if (IS_ERR(bdev))
1366 		return ERR_CAST(bdev);
1367 
1368 	bytenr_orig = btrfs_sb_offset(0);
1369 	ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr);
1370 	if (ret)
1371 		return ERR_PTR(ret);
1372 
1373 	disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig);
1374 	if (IS_ERR(disk_super)) {
1375 		device = ERR_CAST(disk_super);
1376 		goto error_bdev_put;
1377 	}
1378 
1379 	device = device_list_add(path, disk_super, &new_device_added);
1380 	if (!IS_ERR(device)) {
1381 		if (new_device_added)
1382 			btrfs_free_stale_devices(path, device);
1383 	}
1384 
1385 	btrfs_release_disk_super(disk_super);
1386 
1387 error_bdev_put:
1388 	blkdev_put(bdev, flags);
1389 
1390 	return device;
1391 }
1392 
1393 /*
1394  * Try to find a chunk that intersects [start, start + len] range and when one
1395  * such is found, record the end of it in *start
1396  */
1397 static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1398 				    u64 len)
1399 {
1400 	u64 physical_start, physical_end;
1401 
1402 	lockdep_assert_held(&device->fs_info->chunk_mutex);
1403 
1404 	if (!find_first_extent_bit(&device->alloc_state, *start,
1405 				   &physical_start, &physical_end,
1406 				   CHUNK_ALLOCATED, NULL)) {
1407 
1408 		if (in_range(physical_start, *start, len) ||
1409 		    in_range(*start, physical_start,
1410 			     physical_end - physical_start)) {
1411 			*start = physical_end + 1;
1412 			return true;
1413 		}
1414 	}
1415 	return false;
1416 }
1417 
1418 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
1419 {
1420 	switch (device->fs_devices->chunk_alloc_policy) {
1421 	case BTRFS_CHUNK_ALLOC_REGULAR:
1422 		/*
1423 		 * We don't want to overwrite the superblock on the drive nor
1424 		 * any area used by the boot loader (grub for example), so we
1425 		 * make sure to start at an offset of at least 1MB.
1426 		 */
1427 		return max_t(u64, start, SZ_1M);
1428 	case BTRFS_CHUNK_ALLOC_ZONED:
1429 		/*
1430 		 * We don't care about the starting region like regular
1431 		 * allocator, because we anyway use/reserve the first two zones
1432 		 * for superblock logging.
1433 		 */
1434 		return ALIGN(start, device->zone_info->zone_size);
1435 	default:
1436 		BUG();
1437 	}
1438 }
1439 
1440 static bool dev_extent_hole_check_zoned(struct btrfs_device *device,
1441 					u64 *hole_start, u64 *hole_size,
1442 					u64 num_bytes)
1443 {
1444 	u64 zone_size = device->zone_info->zone_size;
1445 	u64 pos;
1446 	int ret;
1447 	bool changed = false;
1448 
1449 	ASSERT(IS_ALIGNED(*hole_start, zone_size));
1450 
1451 	while (*hole_size > 0) {
1452 		pos = btrfs_find_allocatable_zones(device, *hole_start,
1453 						   *hole_start + *hole_size,
1454 						   num_bytes);
1455 		if (pos != *hole_start) {
1456 			*hole_size = *hole_start + *hole_size - pos;
1457 			*hole_start = pos;
1458 			changed = true;
1459 			if (*hole_size < num_bytes)
1460 				break;
1461 		}
1462 
1463 		ret = btrfs_ensure_empty_zones(device, pos, num_bytes);
1464 
1465 		/* Range is ensured to be empty */
1466 		if (!ret)
1467 			return changed;
1468 
1469 		/* Given hole range was invalid (outside of device) */
1470 		if (ret == -ERANGE) {
1471 			*hole_start += *hole_size;
1472 			*hole_size = 0;
1473 			return true;
1474 		}
1475 
1476 		*hole_start += zone_size;
1477 		*hole_size -= zone_size;
1478 		changed = true;
1479 	}
1480 
1481 	return changed;
1482 }
1483 
1484 /**
1485  * dev_extent_hole_check - check if specified hole is suitable for allocation
1486  * @device:	the device which we have the hole
1487  * @hole_start: starting position of the hole
1488  * @hole_size:	the size of the hole
1489  * @num_bytes:	the size of the free space that we need
1490  *
1491  * This function may modify @hole_start and @hole_size to reflect the suitable
1492  * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1493  */
1494 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
1495 				  u64 *hole_size, u64 num_bytes)
1496 {
1497 	bool changed = false;
1498 	u64 hole_end = *hole_start + *hole_size;
1499 
1500 	for (;;) {
1501 		/*
1502 		 * Check before we set max_hole_start, otherwise we could end up
1503 		 * sending back this offset anyway.
1504 		 */
1505 		if (contains_pending_extent(device, hole_start, *hole_size)) {
1506 			if (hole_end >= *hole_start)
1507 				*hole_size = hole_end - *hole_start;
1508 			else
1509 				*hole_size = 0;
1510 			changed = true;
1511 		}
1512 
1513 		switch (device->fs_devices->chunk_alloc_policy) {
1514 		case BTRFS_CHUNK_ALLOC_REGULAR:
1515 			/* No extra check */
1516 			break;
1517 		case BTRFS_CHUNK_ALLOC_ZONED:
1518 			if (dev_extent_hole_check_zoned(device, hole_start,
1519 							hole_size, num_bytes)) {
1520 				changed = true;
1521 				/*
1522 				 * The changed hole can contain pending extent.
1523 				 * Loop again to check that.
1524 				 */
1525 				continue;
1526 			}
1527 			break;
1528 		default:
1529 			BUG();
1530 		}
1531 
1532 		break;
1533 	}
1534 
1535 	return changed;
1536 }
1537 
1538 /*
1539  * find_free_dev_extent_start - find free space in the specified device
1540  * @device:	  the device which we search the free space in
1541  * @num_bytes:	  the size of the free space that we need
1542  * @search_start: the position from which to begin the search
1543  * @start:	  store the start of the free space.
1544  * @len:	  the size of the free space. that we find, or the size
1545  *		  of the max free space if we don't find suitable free space
1546  *
1547  * this uses a pretty simple search, the expectation is that it is
1548  * called very infrequently and that a given device has a small number
1549  * of extents
1550  *
1551  * @start is used to store the start of the free space if we find. But if we
1552  * don't find suitable free space, it will be used to store the start position
1553  * of the max free space.
1554  *
1555  * @len is used to store the size of the free space that we find.
1556  * But if we don't find suitable free space, it is used to store the size of
1557  * the max free space.
1558  *
1559  * NOTE: This function will search *commit* root of device tree, and does extra
1560  * check to ensure dev extents are not double allocated.
1561  * This makes the function safe to allocate dev extents but may not report
1562  * correct usable device space, as device extent freed in current transaction
1563  * is not reported as available.
1564  */
1565 static int find_free_dev_extent_start(struct btrfs_device *device,
1566 				u64 num_bytes, u64 search_start, u64 *start,
1567 				u64 *len)
1568 {
1569 	struct btrfs_fs_info *fs_info = device->fs_info;
1570 	struct btrfs_root *root = fs_info->dev_root;
1571 	struct btrfs_key key;
1572 	struct btrfs_dev_extent *dev_extent;
1573 	struct btrfs_path *path;
1574 	u64 hole_size;
1575 	u64 max_hole_start;
1576 	u64 max_hole_size;
1577 	u64 extent_end;
1578 	u64 search_end = device->total_bytes;
1579 	int ret;
1580 	int slot;
1581 	struct extent_buffer *l;
1582 
1583 	search_start = dev_extent_search_start(device, search_start);
1584 
1585 	WARN_ON(device->zone_info &&
1586 		!IS_ALIGNED(num_bytes, device->zone_info->zone_size));
1587 
1588 	path = btrfs_alloc_path();
1589 	if (!path)
1590 		return -ENOMEM;
1591 
1592 	max_hole_start = search_start;
1593 	max_hole_size = 0;
1594 
1595 again:
1596 	if (search_start >= search_end ||
1597 		test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1598 		ret = -ENOSPC;
1599 		goto out;
1600 	}
1601 
1602 	path->reada = READA_FORWARD;
1603 	path->search_commit_root = 1;
1604 	path->skip_locking = 1;
1605 
1606 	key.objectid = device->devid;
1607 	key.offset = search_start;
1608 	key.type = BTRFS_DEV_EXTENT_KEY;
1609 
1610 	ret = btrfs_search_backwards(root, &key, path);
1611 	if (ret < 0)
1612 		goto out;
1613 
1614 	while (1) {
1615 		l = path->nodes[0];
1616 		slot = path->slots[0];
1617 		if (slot >= btrfs_header_nritems(l)) {
1618 			ret = btrfs_next_leaf(root, path);
1619 			if (ret == 0)
1620 				continue;
1621 			if (ret < 0)
1622 				goto out;
1623 
1624 			break;
1625 		}
1626 		btrfs_item_key_to_cpu(l, &key, slot);
1627 
1628 		if (key.objectid < device->devid)
1629 			goto next;
1630 
1631 		if (key.objectid > device->devid)
1632 			break;
1633 
1634 		if (key.type != BTRFS_DEV_EXTENT_KEY)
1635 			goto next;
1636 
1637 		if (key.offset > search_start) {
1638 			hole_size = key.offset - search_start;
1639 			dev_extent_hole_check(device, &search_start, &hole_size,
1640 					      num_bytes);
1641 
1642 			if (hole_size > max_hole_size) {
1643 				max_hole_start = search_start;
1644 				max_hole_size = hole_size;
1645 			}
1646 
1647 			/*
1648 			 * If this free space is greater than which we need,
1649 			 * it must be the max free space that we have found
1650 			 * until now, so max_hole_start must point to the start
1651 			 * of this free space and the length of this free space
1652 			 * is stored in max_hole_size. Thus, we return
1653 			 * max_hole_start and max_hole_size and go back to the
1654 			 * caller.
1655 			 */
1656 			if (hole_size >= num_bytes) {
1657 				ret = 0;
1658 				goto out;
1659 			}
1660 		}
1661 
1662 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1663 		extent_end = key.offset + btrfs_dev_extent_length(l,
1664 								  dev_extent);
1665 		if (extent_end > search_start)
1666 			search_start = extent_end;
1667 next:
1668 		path->slots[0]++;
1669 		cond_resched();
1670 	}
1671 
1672 	/*
1673 	 * At this point, search_start should be the end of
1674 	 * allocated dev extents, and when shrinking the device,
1675 	 * search_end may be smaller than search_start.
1676 	 */
1677 	if (search_end > search_start) {
1678 		hole_size = search_end - search_start;
1679 		if (dev_extent_hole_check(device, &search_start, &hole_size,
1680 					  num_bytes)) {
1681 			btrfs_release_path(path);
1682 			goto again;
1683 		}
1684 
1685 		if (hole_size > max_hole_size) {
1686 			max_hole_start = search_start;
1687 			max_hole_size = hole_size;
1688 		}
1689 	}
1690 
1691 	/* See above. */
1692 	if (max_hole_size < num_bytes)
1693 		ret = -ENOSPC;
1694 	else
1695 		ret = 0;
1696 
1697 out:
1698 	btrfs_free_path(path);
1699 	*start = max_hole_start;
1700 	if (len)
1701 		*len = max_hole_size;
1702 	return ret;
1703 }
1704 
1705 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1706 			 u64 *start, u64 *len)
1707 {
1708 	/* FIXME use last free of some kind */
1709 	return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1710 }
1711 
1712 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1713 			  struct btrfs_device *device,
1714 			  u64 start, u64 *dev_extent_len)
1715 {
1716 	struct btrfs_fs_info *fs_info = device->fs_info;
1717 	struct btrfs_root *root = fs_info->dev_root;
1718 	int ret;
1719 	struct btrfs_path *path;
1720 	struct btrfs_key key;
1721 	struct btrfs_key found_key;
1722 	struct extent_buffer *leaf = NULL;
1723 	struct btrfs_dev_extent *extent = NULL;
1724 
1725 	path = btrfs_alloc_path();
1726 	if (!path)
1727 		return -ENOMEM;
1728 
1729 	key.objectid = device->devid;
1730 	key.offset = start;
1731 	key.type = BTRFS_DEV_EXTENT_KEY;
1732 again:
1733 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1734 	if (ret > 0) {
1735 		ret = btrfs_previous_item(root, path, key.objectid,
1736 					  BTRFS_DEV_EXTENT_KEY);
1737 		if (ret)
1738 			goto out;
1739 		leaf = path->nodes[0];
1740 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1741 		extent = btrfs_item_ptr(leaf, path->slots[0],
1742 					struct btrfs_dev_extent);
1743 		BUG_ON(found_key.offset > start || found_key.offset +
1744 		       btrfs_dev_extent_length(leaf, extent) < start);
1745 		key = found_key;
1746 		btrfs_release_path(path);
1747 		goto again;
1748 	} else if (ret == 0) {
1749 		leaf = path->nodes[0];
1750 		extent = btrfs_item_ptr(leaf, path->slots[0],
1751 					struct btrfs_dev_extent);
1752 	} else {
1753 		goto out;
1754 	}
1755 
1756 	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1757 
1758 	ret = btrfs_del_item(trans, root, path);
1759 	if (ret == 0)
1760 		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1761 out:
1762 	btrfs_free_path(path);
1763 	return ret;
1764 }
1765 
1766 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1767 {
1768 	struct extent_map_tree *em_tree;
1769 	struct extent_map *em;
1770 	struct rb_node *n;
1771 	u64 ret = 0;
1772 
1773 	em_tree = &fs_info->mapping_tree;
1774 	read_lock(&em_tree->lock);
1775 	n = rb_last(&em_tree->map.rb_root);
1776 	if (n) {
1777 		em = rb_entry(n, struct extent_map, rb_node);
1778 		ret = em->start + em->len;
1779 	}
1780 	read_unlock(&em_tree->lock);
1781 
1782 	return ret;
1783 }
1784 
1785 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1786 				    u64 *devid_ret)
1787 {
1788 	int ret;
1789 	struct btrfs_key key;
1790 	struct btrfs_key found_key;
1791 	struct btrfs_path *path;
1792 
1793 	path = btrfs_alloc_path();
1794 	if (!path)
1795 		return -ENOMEM;
1796 
1797 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1798 	key.type = BTRFS_DEV_ITEM_KEY;
1799 	key.offset = (u64)-1;
1800 
1801 	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1802 	if (ret < 0)
1803 		goto error;
1804 
1805 	if (ret == 0) {
1806 		/* Corruption */
1807 		btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1808 		ret = -EUCLEAN;
1809 		goto error;
1810 	}
1811 
1812 	ret = btrfs_previous_item(fs_info->chunk_root, path,
1813 				  BTRFS_DEV_ITEMS_OBJECTID,
1814 				  BTRFS_DEV_ITEM_KEY);
1815 	if (ret) {
1816 		*devid_ret = 1;
1817 	} else {
1818 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1819 				      path->slots[0]);
1820 		*devid_ret = found_key.offset + 1;
1821 	}
1822 	ret = 0;
1823 error:
1824 	btrfs_free_path(path);
1825 	return ret;
1826 }
1827 
1828 /*
1829  * the device information is stored in the chunk root
1830  * the btrfs_device struct should be fully filled in
1831  */
1832 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1833 			    struct btrfs_device *device)
1834 {
1835 	int ret;
1836 	struct btrfs_path *path;
1837 	struct btrfs_dev_item *dev_item;
1838 	struct extent_buffer *leaf;
1839 	struct btrfs_key key;
1840 	unsigned long ptr;
1841 
1842 	path = btrfs_alloc_path();
1843 	if (!path)
1844 		return -ENOMEM;
1845 
1846 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1847 	key.type = BTRFS_DEV_ITEM_KEY;
1848 	key.offset = device->devid;
1849 
1850 	ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1851 				      &key, sizeof(*dev_item));
1852 	if (ret)
1853 		goto out;
1854 
1855 	leaf = path->nodes[0];
1856 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1857 
1858 	btrfs_set_device_id(leaf, dev_item, device->devid);
1859 	btrfs_set_device_generation(leaf, dev_item, 0);
1860 	btrfs_set_device_type(leaf, dev_item, device->type);
1861 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1862 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1863 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1864 	btrfs_set_device_total_bytes(leaf, dev_item,
1865 				     btrfs_device_get_disk_total_bytes(device));
1866 	btrfs_set_device_bytes_used(leaf, dev_item,
1867 				    btrfs_device_get_bytes_used(device));
1868 	btrfs_set_device_group(leaf, dev_item, 0);
1869 	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1870 	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1871 	btrfs_set_device_start_offset(leaf, dev_item, 0);
1872 
1873 	ptr = btrfs_device_uuid(dev_item);
1874 	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1875 	ptr = btrfs_device_fsid(dev_item);
1876 	write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1877 			    ptr, BTRFS_FSID_SIZE);
1878 	btrfs_mark_buffer_dirty(leaf);
1879 
1880 	ret = 0;
1881 out:
1882 	btrfs_free_path(path);
1883 	return ret;
1884 }
1885 
1886 /*
1887  * Function to update ctime/mtime for a given device path.
1888  * Mainly used for ctime/mtime based probe like libblkid.
1889  */
1890 static void update_dev_time(struct block_device *bdev)
1891 {
1892 	struct inode *inode = bdev->bd_inode;
1893 	struct timespec64 now;
1894 
1895 	/* Shouldn't happen but just in case. */
1896 	if (!inode)
1897 		return;
1898 
1899 	now = current_time(inode);
1900 	generic_update_time(inode, &now, S_MTIME | S_CTIME);
1901 }
1902 
1903 static int btrfs_rm_dev_item(struct btrfs_device *device)
1904 {
1905 	struct btrfs_root *root = device->fs_info->chunk_root;
1906 	int ret;
1907 	struct btrfs_path *path;
1908 	struct btrfs_key key;
1909 	struct btrfs_trans_handle *trans;
1910 
1911 	path = btrfs_alloc_path();
1912 	if (!path)
1913 		return -ENOMEM;
1914 
1915 	trans = btrfs_start_transaction(root, 0);
1916 	if (IS_ERR(trans)) {
1917 		btrfs_free_path(path);
1918 		return PTR_ERR(trans);
1919 	}
1920 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1921 	key.type = BTRFS_DEV_ITEM_KEY;
1922 	key.offset = device->devid;
1923 
1924 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1925 	if (ret) {
1926 		if (ret > 0)
1927 			ret = -ENOENT;
1928 		btrfs_abort_transaction(trans, ret);
1929 		btrfs_end_transaction(trans);
1930 		goto out;
1931 	}
1932 
1933 	ret = btrfs_del_item(trans, root, path);
1934 	if (ret) {
1935 		btrfs_abort_transaction(trans, ret);
1936 		btrfs_end_transaction(trans);
1937 	}
1938 
1939 out:
1940 	btrfs_free_path(path);
1941 	if (!ret)
1942 		ret = btrfs_commit_transaction(trans);
1943 	return ret;
1944 }
1945 
1946 /*
1947  * Verify that @num_devices satisfies the RAID profile constraints in the whole
1948  * filesystem. It's up to the caller to adjust that number regarding eg. device
1949  * replace.
1950  */
1951 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
1952 		u64 num_devices)
1953 {
1954 	u64 all_avail;
1955 	unsigned seq;
1956 	int i;
1957 
1958 	do {
1959 		seq = read_seqbegin(&fs_info->profiles_lock);
1960 
1961 		all_avail = fs_info->avail_data_alloc_bits |
1962 			    fs_info->avail_system_alloc_bits |
1963 			    fs_info->avail_metadata_alloc_bits;
1964 	} while (read_seqretry(&fs_info->profiles_lock, seq));
1965 
1966 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1967 		if (!(all_avail & btrfs_raid_array[i].bg_flag))
1968 			continue;
1969 
1970 		if (num_devices < btrfs_raid_array[i].devs_min)
1971 			return btrfs_raid_array[i].mindev_error;
1972 	}
1973 
1974 	return 0;
1975 }
1976 
1977 static struct btrfs_device * btrfs_find_next_active_device(
1978 		struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
1979 {
1980 	struct btrfs_device *next_device;
1981 
1982 	list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
1983 		if (next_device != device &&
1984 		    !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
1985 		    && next_device->bdev)
1986 			return next_device;
1987 	}
1988 
1989 	return NULL;
1990 }
1991 
1992 /*
1993  * Helper function to check if the given device is part of s_bdev / latest_dev
1994  * and replace it with the provided or the next active device, in the context
1995  * where this function called, there should be always be another device (or
1996  * this_dev) which is active.
1997  */
1998 void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
1999 					    struct btrfs_device *next_device)
2000 {
2001 	struct btrfs_fs_info *fs_info = device->fs_info;
2002 
2003 	if (!next_device)
2004 		next_device = btrfs_find_next_active_device(fs_info->fs_devices,
2005 							    device);
2006 	ASSERT(next_device);
2007 
2008 	if (fs_info->sb->s_bdev &&
2009 			(fs_info->sb->s_bdev == device->bdev))
2010 		fs_info->sb->s_bdev = next_device->bdev;
2011 
2012 	if (fs_info->fs_devices->latest_dev->bdev == device->bdev)
2013 		fs_info->fs_devices->latest_dev = next_device;
2014 }
2015 
2016 /*
2017  * Return btrfs_fs_devices::num_devices excluding the device that's being
2018  * currently replaced.
2019  */
2020 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
2021 {
2022 	u64 num_devices = fs_info->fs_devices->num_devices;
2023 
2024 	down_read(&fs_info->dev_replace.rwsem);
2025 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
2026 		ASSERT(num_devices > 1);
2027 		num_devices--;
2028 	}
2029 	up_read(&fs_info->dev_replace.rwsem);
2030 
2031 	return num_devices;
2032 }
2033 
2034 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
2035 			       struct block_device *bdev,
2036 			       const char *device_path)
2037 {
2038 	struct btrfs_super_block *disk_super;
2039 	int copy_num;
2040 
2041 	if (!bdev)
2042 		return;
2043 
2044 	for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
2045 		struct page *page;
2046 		int ret;
2047 
2048 		disk_super = btrfs_read_dev_one_super(bdev, copy_num);
2049 		if (IS_ERR(disk_super))
2050 			continue;
2051 
2052 		if (bdev_is_zoned(bdev)) {
2053 			btrfs_reset_sb_log_zones(bdev, copy_num);
2054 			continue;
2055 		}
2056 
2057 		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
2058 
2059 		page = virt_to_page(disk_super);
2060 		set_page_dirty(page);
2061 		lock_page(page);
2062 		/* write_on_page() unlocks the page */
2063 		ret = write_one_page(page);
2064 		if (ret)
2065 			btrfs_warn(fs_info,
2066 				"error clearing superblock number %d (%d)",
2067 				copy_num, ret);
2068 		btrfs_release_disk_super(disk_super);
2069 
2070 	}
2071 
2072 	/* Notify udev that device has changed */
2073 	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
2074 
2075 	/* Update ctime/mtime for device path for libblkid */
2076 	update_dev_time(bdev);
2077 }
2078 
2079 int btrfs_rm_device(struct btrfs_fs_info *fs_info,
2080 		    struct btrfs_dev_lookup_args *args,
2081 		    struct block_device **bdev, fmode_t *mode)
2082 {
2083 	struct btrfs_device *device;
2084 	struct btrfs_fs_devices *cur_devices;
2085 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2086 	u64 num_devices;
2087 	int ret = 0;
2088 
2089 	/*
2090 	 * The device list in fs_devices is accessed without locks (neither
2091 	 * uuid_mutex nor device_list_mutex) as it won't change on a mounted
2092 	 * filesystem and another device rm cannot run.
2093 	 */
2094 	num_devices = btrfs_num_devices(fs_info);
2095 
2096 	ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2097 	if (ret)
2098 		goto out;
2099 
2100 	device = btrfs_find_device(fs_info->fs_devices, args);
2101 	if (!device) {
2102 		if (args->missing)
2103 			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2104 		else
2105 			ret = -ENOENT;
2106 		goto out;
2107 	}
2108 
2109 	if (btrfs_pinned_by_swapfile(fs_info, device)) {
2110 		btrfs_warn_in_rcu(fs_info,
2111 		  "cannot remove device %s (devid %llu) due to active swapfile",
2112 				  rcu_str_deref(device->name), device->devid);
2113 		ret = -ETXTBSY;
2114 		goto out;
2115 	}
2116 
2117 	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2118 		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
2119 		goto out;
2120 	}
2121 
2122 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2123 	    fs_info->fs_devices->rw_devices == 1) {
2124 		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
2125 		goto out;
2126 	}
2127 
2128 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2129 		mutex_lock(&fs_info->chunk_mutex);
2130 		list_del_init(&device->dev_alloc_list);
2131 		device->fs_devices->rw_devices--;
2132 		mutex_unlock(&fs_info->chunk_mutex);
2133 	}
2134 
2135 	ret = btrfs_shrink_device(device, 0);
2136 	if (!ret)
2137 		btrfs_reada_remove_dev(device);
2138 	if (ret)
2139 		goto error_undo;
2140 
2141 	/*
2142 	 * TODO: the superblock still includes this device in its num_devices
2143 	 * counter although write_all_supers() is not locked out. This
2144 	 * could give a filesystem state which requires a degraded mount.
2145 	 */
2146 	ret = btrfs_rm_dev_item(device);
2147 	if (ret)
2148 		goto error_undo;
2149 
2150 	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2151 	btrfs_scrub_cancel_dev(device);
2152 
2153 	/*
2154 	 * the device list mutex makes sure that we don't change
2155 	 * the device list while someone else is writing out all
2156 	 * the device supers. Whoever is writing all supers, should
2157 	 * lock the device list mutex before getting the number of
2158 	 * devices in the super block (super_copy). Conversely,
2159 	 * whoever updates the number of devices in the super block
2160 	 * (super_copy) should hold the device list mutex.
2161 	 */
2162 
2163 	/*
2164 	 * In normal cases the cur_devices == fs_devices. But in case
2165 	 * of deleting a seed device, the cur_devices should point to
2166 	 * its own fs_devices listed under the fs_devices->seed_list.
2167 	 */
2168 	cur_devices = device->fs_devices;
2169 	mutex_lock(&fs_devices->device_list_mutex);
2170 	list_del_rcu(&device->dev_list);
2171 
2172 	cur_devices->num_devices--;
2173 	cur_devices->total_devices--;
2174 	/* Update total_devices of the parent fs_devices if it's seed */
2175 	if (cur_devices != fs_devices)
2176 		fs_devices->total_devices--;
2177 
2178 	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2179 		cur_devices->missing_devices--;
2180 
2181 	btrfs_assign_next_active_device(device, NULL);
2182 
2183 	if (device->bdev) {
2184 		cur_devices->open_devices--;
2185 		/* remove sysfs entry */
2186 		btrfs_sysfs_remove_device(device);
2187 	}
2188 
2189 	num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2190 	btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2191 	mutex_unlock(&fs_devices->device_list_mutex);
2192 
2193 	/*
2194 	 * At this point, the device is zero sized and detached from the
2195 	 * devices list.  All that's left is to zero out the old supers and
2196 	 * free the device.
2197 	 *
2198 	 * We cannot call btrfs_close_bdev() here because we're holding the sb
2199 	 * write lock, and blkdev_put() will pull in the ->open_mutex on the
2200 	 * block device and it's dependencies.  Instead just flush the device
2201 	 * and let the caller do the final blkdev_put.
2202 	 */
2203 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2204 		btrfs_scratch_superblocks(fs_info, device->bdev,
2205 					  device->name->str);
2206 		if (device->bdev) {
2207 			sync_blockdev(device->bdev);
2208 			invalidate_bdev(device->bdev);
2209 		}
2210 	}
2211 
2212 	*bdev = device->bdev;
2213 	*mode = device->mode;
2214 	synchronize_rcu();
2215 	btrfs_free_device(device);
2216 
2217 	/*
2218 	 * This can happen if cur_devices is the private seed devices list.  We
2219 	 * cannot call close_fs_devices() here because it expects the uuid_mutex
2220 	 * to be held, but in fact we don't need that for the private
2221 	 * seed_devices, we can simply decrement cur_devices->opened and then
2222 	 * remove it from our list and free the fs_devices.
2223 	 */
2224 	if (cur_devices->num_devices == 0) {
2225 		list_del_init(&cur_devices->seed_list);
2226 		ASSERT(cur_devices->opened == 1);
2227 		cur_devices->opened--;
2228 		free_fs_devices(cur_devices);
2229 	}
2230 
2231 out:
2232 	return ret;
2233 
2234 error_undo:
2235 	btrfs_reada_undo_remove_dev(device);
2236 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2237 		mutex_lock(&fs_info->chunk_mutex);
2238 		list_add(&device->dev_alloc_list,
2239 			 &fs_devices->alloc_list);
2240 		device->fs_devices->rw_devices++;
2241 		mutex_unlock(&fs_info->chunk_mutex);
2242 	}
2243 	goto out;
2244 }
2245 
2246 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2247 {
2248 	struct btrfs_fs_devices *fs_devices;
2249 
2250 	lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2251 
2252 	/*
2253 	 * in case of fs with no seed, srcdev->fs_devices will point
2254 	 * to fs_devices of fs_info. However when the dev being replaced is
2255 	 * a seed dev it will point to the seed's local fs_devices. In short
2256 	 * srcdev will have its correct fs_devices in both the cases.
2257 	 */
2258 	fs_devices = srcdev->fs_devices;
2259 
2260 	list_del_rcu(&srcdev->dev_list);
2261 	list_del(&srcdev->dev_alloc_list);
2262 	fs_devices->num_devices--;
2263 	if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2264 		fs_devices->missing_devices--;
2265 
2266 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2267 		fs_devices->rw_devices--;
2268 
2269 	if (srcdev->bdev)
2270 		fs_devices->open_devices--;
2271 }
2272 
2273 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2274 {
2275 	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2276 
2277 	mutex_lock(&uuid_mutex);
2278 
2279 	btrfs_close_bdev(srcdev);
2280 	synchronize_rcu();
2281 	btrfs_free_device(srcdev);
2282 
2283 	/* if this is no devs we rather delete the fs_devices */
2284 	if (!fs_devices->num_devices) {
2285 		/*
2286 		 * On a mounted FS, num_devices can't be zero unless it's a
2287 		 * seed. In case of a seed device being replaced, the replace
2288 		 * target added to the sprout FS, so there will be no more
2289 		 * device left under the seed FS.
2290 		 */
2291 		ASSERT(fs_devices->seeding);
2292 
2293 		list_del_init(&fs_devices->seed_list);
2294 		close_fs_devices(fs_devices);
2295 		free_fs_devices(fs_devices);
2296 	}
2297 	mutex_unlock(&uuid_mutex);
2298 }
2299 
2300 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2301 {
2302 	struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2303 
2304 	mutex_lock(&fs_devices->device_list_mutex);
2305 
2306 	btrfs_sysfs_remove_device(tgtdev);
2307 
2308 	if (tgtdev->bdev)
2309 		fs_devices->open_devices--;
2310 
2311 	fs_devices->num_devices--;
2312 
2313 	btrfs_assign_next_active_device(tgtdev, NULL);
2314 
2315 	list_del_rcu(&tgtdev->dev_list);
2316 
2317 	mutex_unlock(&fs_devices->device_list_mutex);
2318 
2319 	btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
2320 				  tgtdev->name->str);
2321 
2322 	btrfs_close_bdev(tgtdev);
2323 	synchronize_rcu();
2324 	btrfs_free_device(tgtdev);
2325 }
2326 
2327 /**
2328  * Populate args from device at path
2329  *
2330  * @fs_info:	the filesystem
2331  * @args:	the args to populate
2332  * @path:	the path to the device
2333  *
2334  * This will read the super block of the device at @path and populate @args with
2335  * the devid, fsid, and uuid.  This is meant to be used for ioctls that need to
2336  * lookup a device to operate on, but need to do it before we take any locks.
2337  * This properly handles the special case of "missing" that a user may pass in,
2338  * and does some basic sanity checks.  The caller must make sure that @path is
2339  * properly NUL terminated before calling in, and must call
2340  * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and
2341  * uuid buffers.
2342  *
2343  * Return: 0 for success, -errno for failure
2344  */
2345 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
2346 				 struct btrfs_dev_lookup_args *args,
2347 				 const char *path)
2348 {
2349 	struct btrfs_super_block *disk_super;
2350 	struct block_device *bdev;
2351 	int ret;
2352 
2353 	if (!path || !path[0])
2354 		return -EINVAL;
2355 	if (!strcmp(path, "missing")) {
2356 		args->missing = true;
2357 		return 0;
2358 	}
2359 
2360 	args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL);
2361 	args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL);
2362 	if (!args->uuid || !args->fsid) {
2363 		btrfs_put_dev_args_from_path(args);
2364 		return -ENOMEM;
2365 	}
2366 
2367 	ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0,
2368 				    &bdev, &disk_super);
2369 	if (ret)
2370 		return ret;
2371 	args->devid = btrfs_stack_device_id(&disk_super->dev_item);
2372 	memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE);
2373 	if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2374 		memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE);
2375 	else
2376 		memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
2377 	btrfs_release_disk_super(disk_super);
2378 	blkdev_put(bdev, FMODE_READ);
2379 	return 0;
2380 }
2381 
2382 /*
2383  * Only use this jointly with btrfs_get_dev_args_from_path() because we will
2384  * allocate our ->uuid and ->fsid pointers, everybody else uses local variables
2385  * that don't need to be freed.
2386  */
2387 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args)
2388 {
2389 	kfree(args->uuid);
2390 	kfree(args->fsid);
2391 	args->uuid = NULL;
2392 	args->fsid = NULL;
2393 }
2394 
2395 struct btrfs_device *btrfs_find_device_by_devspec(
2396 		struct btrfs_fs_info *fs_info, u64 devid,
2397 		const char *device_path)
2398 {
2399 	BTRFS_DEV_LOOKUP_ARGS(args);
2400 	struct btrfs_device *device;
2401 	int ret;
2402 
2403 	if (devid) {
2404 		args.devid = devid;
2405 		device = btrfs_find_device(fs_info->fs_devices, &args);
2406 		if (!device)
2407 			return ERR_PTR(-ENOENT);
2408 		return device;
2409 	}
2410 
2411 	ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path);
2412 	if (ret)
2413 		return ERR_PTR(ret);
2414 	device = btrfs_find_device(fs_info->fs_devices, &args);
2415 	btrfs_put_dev_args_from_path(&args);
2416 	if (!device)
2417 		return ERR_PTR(-ENOENT);
2418 	return device;
2419 }
2420 
2421 /*
2422  * does all the dirty work required for changing file system's UUID.
2423  */
2424 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
2425 {
2426 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2427 	struct btrfs_fs_devices *old_devices;
2428 	struct btrfs_fs_devices *seed_devices;
2429 	struct btrfs_super_block *disk_super = fs_info->super_copy;
2430 	struct btrfs_device *device;
2431 	u64 super_flags;
2432 
2433 	lockdep_assert_held(&uuid_mutex);
2434 	if (!fs_devices->seeding)
2435 		return -EINVAL;
2436 
2437 	/*
2438 	 * Private copy of the seed devices, anchored at
2439 	 * fs_info->fs_devices->seed_list
2440 	 */
2441 	seed_devices = alloc_fs_devices(NULL, NULL);
2442 	if (IS_ERR(seed_devices))
2443 		return PTR_ERR(seed_devices);
2444 
2445 	/*
2446 	 * It's necessary to retain a copy of the original seed fs_devices in
2447 	 * fs_uuids so that filesystems which have been seeded can successfully
2448 	 * reference the seed device from open_seed_devices. This also supports
2449 	 * multiple fs seed.
2450 	 */
2451 	old_devices = clone_fs_devices(fs_devices);
2452 	if (IS_ERR(old_devices)) {
2453 		kfree(seed_devices);
2454 		return PTR_ERR(old_devices);
2455 	}
2456 
2457 	list_add(&old_devices->fs_list, &fs_uuids);
2458 
2459 	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2460 	seed_devices->opened = 1;
2461 	INIT_LIST_HEAD(&seed_devices->devices);
2462 	INIT_LIST_HEAD(&seed_devices->alloc_list);
2463 	mutex_init(&seed_devices->device_list_mutex);
2464 
2465 	mutex_lock(&fs_devices->device_list_mutex);
2466 	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2467 			      synchronize_rcu);
2468 	list_for_each_entry(device, &seed_devices->devices, dev_list)
2469 		device->fs_devices = seed_devices;
2470 
2471 	fs_devices->seeding = false;
2472 	fs_devices->num_devices = 0;
2473 	fs_devices->open_devices = 0;
2474 	fs_devices->missing_devices = 0;
2475 	fs_devices->rotating = false;
2476 	list_add(&seed_devices->seed_list, &fs_devices->seed_list);
2477 
2478 	generate_random_uuid(fs_devices->fsid);
2479 	memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2480 	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2481 	mutex_unlock(&fs_devices->device_list_mutex);
2482 
2483 	super_flags = btrfs_super_flags(disk_super) &
2484 		      ~BTRFS_SUPER_FLAG_SEEDING;
2485 	btrfs_set_super_flags(disk_super, super_flags);
2486 
2487 	return 0;
2488 }
2489 
2490 /*
2491  * Store the expected generation for seed devices in device items.
2492  */
2493 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
2494 {
2495 	BTRFS_DEV_LOOKUP_ARGS(args);
2496 	struct btrfs_fs_info *fs_info = trans->fs_info;
2497 	struct btrfs_root *root = fs_info->chunk_root;
2498 	struct btrfs_path *path;
2499 	struct extent_buffer *leaf;
2500 	struct btrfs_dev_item *dev_item;
2501 	struct btrfs_device *device;
2502 	struct btrfs_key key;
2503 	u8 fs_uuid[BTRFS_FSID_SIZE];
2504 	u8 dev_uuid[BTRFS_UUID_SIZE];
2505 	int ret;
2506 
2507 	path = btrfs_alloc_path();
2508 	if (!path)
2509 		return -ENOMEM;
2510 
2511 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2512 	key.offset = 0;
2513 	key.type = BTRFS_DEV_ITEM_KEY;
2514 
2515 	while (1) {
2516 		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2517 		if (ret < 0)
2518 			goto error;
2519 
2520 		leaf = path->nodes[0];
2521 next_slot:
2522 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2523 			ret = btrfs_next_leaf(root, path);
2524 			if (ret > 0)
2525 				break;
2526 			if (ret < 0)
2527 				goto error;
2528 			leaf = path->nodes[0];
2529 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2530 			btrfs_release_path(path);
2531 			continue;
2532 		}
2533 
2534 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2535 		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2536 		    key.type != BTRFS_DEV_ITEM_KEY)
2537 			break;
2538 
2539 		dev_item = btrfs_item_ptr(leaf, path->slots[0],
2540 					  struct btrfs_dev_item);
2541 		args.devid = btrfs_device_id(leaf, dev_item);
2542 		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2543 				   BTRFS_UUID_SIZE);
2544 		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2545 				   BTRFS_FSID_SIZE);
2546 		args.uuid = dev_uuid;
2547 		args.fsid = fs_uuid;
2548 		device = btrfs_find_device(fs_info->fs_devices, &args);
2549 		BUG_ON(!device); /* Logic error */
2550 
2551 		if (device->fs_devices->seeding) {
2552 			btrfs_set_device_generation(leaf, dev_item,
2553 						    device->generation);
2554 			btrfs_mark_buffer_dirty(leaf);
2555 		}
2556 
2557 		path->slots[0]++;
2558 		goto next_slot;
2559 	}
2560 	ret = 0;
2561 error:
2562 	btrfs_free_path(path);
2563 	return ret;
2564 }
2565 
2566 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2567 {
2568 	struct btrfs_root *root = fs_info->dev_root;
2569 	struct request_queue *q;
2570 	struct btrfs_trans_handle *trans;
2571 	struct btrfs_device *device;
2572 	struct block_device *bdev;
2573 	struct super_block *sb = fs_info->sb;
2574 	struct rcu_string *name;
2575 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2576 	u64 orig_super_total_bytes;
2577 	u64 orig_super_num_devices;
2578 	int seeding_dev = 0;
2579 	int ret = 0;
2580 	bool locked = false;
2581 
2582 	if (sb_rdonly(sb) && !fs_devices->seeding)
2583 		return -EROFS;
2584 
2585 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2586 				  fs_info->bdev_holder);
2587 	if (IS_ERR(bdev))
2588 		return PTR_ERR(bdev);
2589 
2590 	if (!btrfs_check_device_zone_type(fs_info, bdev)) {
2591 		ret = -EINVAL;
2592 		goto error;
2593 	}
2594 
2595 	if (fs_devices->seeding) {
2596 		seeding_dev = 1;
2597 		down_write(&sb->s_umount);
2598 		mutex_lock(&uuid_mutex);
2599 		locked = true;
2600 	}
2601 
2602 	sync_blockdev(bdev);
2603 
2604 	rcu_read_lock();
2605 	list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
2606 		if (device->bdev == bdev) {
2607 			ret = -EEXIST;
2608 			rcu_read_unlock();
2609 			goto error;
2610 		}
2611 	}
2612 	rcu_read_unlock();
2613 
2614 	device = btrfs_alloc_device(fs_info, NULL, NULL);
2615 	if (IS_ERR(device)) {
2616 		/* we can safely leave the fs_devices entry around */
2617 		ret = PTR_ERR(device);
2618 		goto error;
2619 	}
2620 
2621 	name = rcu_string_strdup(device_path, GFP_KERNEL);
2622 	if (!name) {
2623 		ret = -ENOMEM;
2624 		goto error_free_device;
2625 	}
2626 	rcu_assign_pointer(device->name, name);
2627 
2628 	device->fs_info = fs_info;
2629 	device->bdev = bdev;
2630 
2631 	ret = btrfs_get_dev_zone_info(device);
2632 	if (ret)
2633 		goto error_free_device;
2634 
2635 	trans = btrfs_start_transaction(root, 0);
2636 	if (IS_ERR(trans)) {
2637 		ret = PTR_ERR(trans);
2638 		goto error_free_zone;
2639 	}
2640 
2641 	q = bdev_get_queue(bdev);
2642 	set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2643 	device->generation = trans->transid;
2644 	device->io_width = fs_info->sectorsize;
2645 	device->io_align = fs_info->sectorsize;
2646 	device->sector_size = fs_info->sectorsize;
2647 	device->total_bytes = round_down(i_size_read(bdev->bd_inode),
2648 					 fs_info->sectorsize);
2649 	device->disk_total_bytes = device->total_bytes;
2650 	device->commit_total_bytes = device->total_bytes;
2651 	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2652 	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2653 	device->mode = FMODE_EXCL;
2654 	device->dev_stats_valid = 1;
2655 	set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2656 
2657 	if (seeding_dev) {
2658 		btrfs_clear_sb_rdonly(sb);
2659 		ret = btrfs_prepare_sprout(fs_info);
2660 		if (ret) {
2661 			btrfs_abort_transaction(trans, ret);
2662 			goto error_trans;
2663 		}
2664 		btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev,
2665 						device);
2666 	}
2667 
2668 	device->fs_devices = fs_devices;
2669 
2670 	mutex_lock(&fs_devices->device_list_mutex);
2671 	mutex_lock(&fs_info->chunk_mutex);
2672 	list_add_rcu(&device->dev_list, &fs_devices->devices);
2673 	list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2674 	fs_devices->num_devices++;
2675 	fs_devices->open_devices++;
2676 	fs_devices->rw_devices++;
2677 	fs_devices->total_devices++;
2678 	fs_devices->total_rw_bytes += device->total_bytes;
2679 
2680 	atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2681 
2682 	if (!blk_queue_nonrot(q))
2683 		fs_devices->rotating = true;
2684 
2685 	orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2686 	btrfs_set_super_total_bytes(fs_info->super_copy,
2687 		round_down(orig_super_total_bytes + device->total_bytes,
2688 			   fs_info->sectorsize));
2689 
2690 	orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2691 	btrfs_set_super_num_devices(fs_info->super_copy,
2692 				    orig_super_num_devices + 1);
2693 
2694 	/*
2695 	 * we've got more storage, clear any full flags on the space
2696 	 * infos
2697 	 */
2698 	btrfs_clear_space_info_full(fs_info);
2699 
2700 	mutex_unlock(&fs_info->chunk_mutex);
2701 
2702 	/* Add sysfs device entry */
2703 	btrfs_sysfs_add_device(device);
2704 
2705 	mutex_unlock(&fs_devices->device_list_mutex);
2706 
2707 	if (seeding_dev) {
2708 		mutex_lock(&fs_info->chunk_mutex);
2709 		ret = init_first_rw_device(trans);
2710 		mutex_unlock(&fs_info->chunk_mutex);
2711 		if (ret) {
2712 			btrfs_abort_transaction(trans, ret);
2713 			goto error_sysfs;
2714 		}
2715 	}
2716 
2717 	ret = btrfs_add_dev_item(trans, device);
2718 	if (ret) {
2719 		btrfs_abort_transaction(trans, ret);
2720 		goto error_sysfs;
2721 	}
2722 
2723 	if (seeding_dev) {
2724 		ret = btrfs_finish_sprout(trans);
2725 		if (ret) {
2726 			btrfs_abort_transaction(trans, ret);
2727 			goto error_sysfs;
2728 		}
2729 
2730 		/*
2731 		 * fs_devices now represents the newly sprouted filesystem and
2732 		 * its fsid has been changed by btrfs_prepare_sprout
2733 		 */
2734 		btrfs_sysfs_update_sprout_fsid(fs_devices);
2735 	}
2736 
2737 	ret = btrfs_commit_transaction(trans);
2738 
2739 	if (seeding_dev) {
2740 		mutex_unlock(&uuid_mutex);
2741 		up_write(&sb->s_umount);
2742 		locked = false;
2743 
2744 		if (ret) /* transaction commit */
2745 			return ret;
2746 
2747 		ret = btrfs_relocate_sys_chunks(fs_info);
2748 		if (ret < 0)
2749 			btrfs_handle_fs_error(fs_info, ret,
2750 				    "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2751 		trans = btrfs_attach_transaction(root);
2752 		if (IS_ERR(trans)) {
2753 			if (PTR_ERR(trans) == -ENOENT)
2754 				return 0;
2755 			ret = PTR_ERR(trans);
2756 			trans = NULL;
2757 			goto error_sysfs;
2758 		}
2759 		ret = btrfs_commit_transaction(trans);
2760 	}
2761 
2762 	/*
2763 	 * Now that we have written a new super block to this device, check all
2764 	 * other fs_devices list if device_path alienates any other scanned
2765 	 * device.
2766 	 * We can ignore the return value as it typically returns -EINVAL and
2767 	 * only succeeds if the device was an alien.
2768 	 */
2769 	btrfs_forget_devices(device_path);
2770 
2771 	/* Update ctime/mtime for blkid or udev */
2772 	update_dev_time(bdev);
2773 
2774 	return ret;
2775 
2776 error_sysfs:
2777 	btrfs_sysfs_remove_device(device);
2778 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2779 	mutex_lock(&fs_info->chunk_mutex);
2780 	list_del_rcu(&device->dev_list);
2781 	list_del(&device->dev_alloc_list);
2782 	fs_info->fs_devices->num_devices--;
2783 	fs_info->fs_devices->open_devices--;
2784 	fs_info->fs_devices->rw_devices--;
2785 	fs_info->fs_devices->total_devices--;
2786 	fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2787 	atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2788 	btrfs_set_super_total_bytes(fs_info->super_copy,
2789 				    orig_super_total_bytes);
2790 	btrfs_set_super_num_devices(fs_info->super_copy,
2791 				    orig_super_num_devices);
2792 	mutex_unlock(&fs_info->chunk_mutex);
2793 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2794 error_trans:
2795 	if (seeding_dev)
2796 		btrfs_set_sb_rdonly(sb);
2797 	if (trans)
2798 		btrfs_end_transaction(trans);
2799 error_free_zone:
2800 	btrfs_destroy_dev_zone_info(device);
2801 error_free_device:
2802 	btrfs_free_device(device);
2803 error:
2804 	blkdev_put(bdev, FMODE_EXCL);
2805 	if (locked) {
2806 		mutex_unlock(&uuid_mutex);
2807 		up_write(&sb->s_umount);
2808 	}
2809 	return ret;
2810 }
2811 
2812 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2813 					struct btrfs_device *device)
2814 {
2815 	int ret;
2816 	struct btrfs_path *path;
2817 	struct btrfs_root *root = device->fs_info->chunk_root;
2818 	struct btrfs_dev_item *dev_item;
2819 	struct extent_buffer *leaf;
2820 	struct btrfs_key key;
2821 
2822 	path = btrfs_alloc_path();
2823 	if (!path)
2824 		return -ENOMEM;
2825 
2826 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2827 	key.type = BTRFS_DEV_ITEM_KEY;
2828 	key.offset = device->devid;
2829 
2830 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2831 	if (ret < 0)
2832 		goto out;
2833 
2834 	if (ret > 0) {
2835 		ret = -ENOENT;
2836 		goto out;
2837 	}
2838 
2839 	leaf = path->nodes[0];
2840 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2841 
2842 	btrfs_set_device_id(leaf, dev_item, device->devid);
2843 	btrfs_set_device_type(leaf, dev_item, device->type);
2844 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2845 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2846 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2847 	btrfs_set_device_total_bytes(leaf, dev_item,
2848 				     btrfs_device_get_disk_total_bytes(device));
2849 	btrfs_set_device_bytes_used(leaf, dev_item,
2850 				    btrfs_device_get_bytes_used(device));
2851 	btrfs_mark_buffer_dirty(leaf);
2852 
2853 out:
2854 	btrfs_free_path(path);
2855 	return ret;
2856 }
2857 
2858 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2859 		      struct btrfs_device *device, u64 new_size)
2860 {
2861 	struct btrfs_fs_info *fs_info = device->fs_info;
2862 	struct btrfs_super_block *super_copy = fs_info->super_copy;
2863 	u64 old_total;
2864 	u64 diff;
2865 
2866 	if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2867 		return -EACCES;
2868 
2869 	new_size = round_down(new_size, fs_info->sectorsize);
2870 
2871 	mutex_lock(&fs_info->chunk_mutex);
2872 	old_total = btrfs_super_total_bytes(super_copy);
2873 	diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2874 
2875 	if (new_size <= device->total_bytes ||
2876 	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2877 		mutex_unlock(&fs_info->chunk_mutex);
2878 		return -EINVAL;
2879 	}
2880 
2881 	btrfs_set_super_total_bytes(super_copy,
2882 			round_down(old_total + diff, fs_info->sectorsize));
2883 	device->fs_devices->total_rw_bytes += diff;
2884 
2885 	btrfs_device_set_total_bytes(device, new_size);
2886 	btrfs_device_set_disk_total_bytes(device, new_size);
2887 	btrfs_clear_space_info_full(device->fs_info);
2888 	if (list_empty(&device->post_commit_list))
2889 		list_add_tail(&device->post_commit_list,
2890 			      &trans->transaction->dev_update_list);
2891 	mutex_unlock(&fs_info->chunk_mutex);
2892 
2893 	return btrfs_update_device(trans, device);
2894 }
2895 
2896 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2897 {
2898 	struct btrfs_fs_info *fs_info = trans->fs_info;
2899 	struct btrfs_root *root = fs_info->chunk_root;
2900 	int ret;
2901 	struct btrfs_path *path;
2902 	struct btrfs_key key;
2903 
2904 	path = btrfs_alloc_path();
2905 	if (!path)
2906 		return -ENOMEM;
2907 
2908 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2909 	key.offset = chunk_offset;
2910 	key.type = BTRFS_CHUNK_ITEM_KEY;
2911 
2912 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2913 	if (ret < 0)
2914 		goto out;
2915 	else if (ret > 0) { /* Logic error or corruption */
2916 		btrfs_handle_fs_error(fs_info, -ENOENT,
2917 				      "Failed lookup while freeing chunk.");
2918 		ret = -ENOENT;
2919 		goto out;
2920 	}
2921 
2922 	ret = btrfs_del_item(trans, root, path);
2923 	if (ret < 0)
2924 		btrfs_handle_fs_error(fs_info, ret,
2925 				      "Failed to delete chunk item.");
2926 out:
2927 	btrfs_free_path(path);
2928 	return ret;
2929 }
2930 
2931 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2932 {
2933 	struct btrfs_super_block *super_copy = fs_info->super_copy;
2934 	struct btrfs_disk_key *disk_key;
2935 	struct btrfs_chunk *chunk;
2936 	u8 *ptr;
2937 	int ret = 0;
2938 	u32 num_stripes;
2939 	u32 array_size;
2940 	u32 len = 0;
2941 	u32 cur;
2942 	struct btrfs_key key;
2943 
2944 	lockdep_assert_held(&fs_info->chunk_mutex);
2945 	array_size = btrfs_super_sys_array_size(super_copy);
2946 
2947 	ptr = super_copy->sys_chunk_array;
2948 	cur = 0;
2949 
2950 	while (cur < array_size) {
2951 		disk_key = (struct btrfs_disk_key *)ptr;
2952 		btrfs_disk_key_to_cpu(&key, disk_key);
2953 
2954 		len = sizeof(*disk_key);
2955 
2956 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2957 			chunk = (struct btrfs_chunk *)(ptr + len);
2958 			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2959 			len += btrfs_chunk_item_size(num_stripes);
2960 		} else {
2961 			ret = -EIO;
2962 			break;
2963 		}
2964 		if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2965 		    key.offset == chunk_offset) {
2966 			memmove(ptr, ptr + len, array_size - (cur + len));
2967 			array_size -= len;
2968 			btrfs_set_super_sys_array_size(super_copy, array_size);
2969 		} else {
2970 			ptr += len;
2971 			cur += len;
2972 		}
2973 	}
2974 	return ret;
2975 }
2976 
2977 /*
2978  * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
2979  * @logical: Logical block offset in bytes.
2980  * @length: Length of extent in bytes.
2981  *
2982  * Return: Chunk mapping or ERR_PTR.
2983  */
2984 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
2985 				       u64 logical, u64 length)
2986 {
2987 	struct extent_map_tree *em_tree;
2988 	struct extent_map *em;
2989 
2990 	em_tree = &fs_info->mapping_tree;
2991 	read_lock(&em_tree->lock);
2992 	em = lookup_extent_mapping(em_tree, logical, length);
2993 	read_unlock(&em_tree->lock);
2994 
2995 	if (!em) {
2996 		btrfs_crit(fs_info, "unable to find logical %llu length %llu",
2997 			   logical, length);
2998 		return ERR_PTR(-EINVAL);
2999 	}
3000 
3001 	if (em->start > logical || em->start + em->len < logical) {
3002 		btrfs_crit(fs_info,
3003 			   "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
3004 			   logical, length, em->start, em->start + em->len);
3005 		free_extent_map(em);
3006 		return ERR_PTR(-EINVAL);
3007 	}
3008 
3009 	/* callers are responsible for dropping em's ref. */
3010 	return em;
3011 }
3012 
3013 static int remove_chunk_item(struct btrfs_trans_handle *trans,
3014 			     struct map_lookup *map, u64 chunk_offset)
3015 {
3016 	int i;
3017 
3018 	/*
3019 	 * Removing chunk items and updating the device items in the chunks btree
3020 	 * requires holding the chunk_mutex.
3021 	 * See the comment at btrfs_chunk_alloc() for the details.
3022 	 */
3023 	lockdep_assert_held(&trans->fs_info->chunk_mutex);
3024 
3025 	for (i = 0; i < map->num_stripes; i++) {
3026 		int ret;
3027 
3028 		ret = btrfs_update_device(trans, map->stripes[i].dev);
3029 		if (ret)
3030 			return ret;
3031 	}
3032 
3033 	return btrfs_free_chunk(trans, chunk_offset);
3034 }
3035 
3036 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
3037 {
3038 	struct btrfs_fs_info *fs_info = trans->fs_info;
3039 	struct extent_map *em;
3040 	struct map_lookup *map;
3041 	u64 dev_extent_len = 0;
3042 	int i, ret = 0;
3043 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3044 
3045 	em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
3046 	if (IS_ERR(em)) {
3047 		/*
3048 		 * This is a logic error, but we don't want to just rely on the
3049 		 * user having built with ASSERT enabled, so if ASSERT doesn't
3050 		 * do anything we still error out.
3051 		 */
3052 		ASSERT(0);
3053 		return PTR_ERR(em);
3054 	}
3055 	map = em->map_lookup;
3056 
3057 	/*
3058 	 * First delete the device extent items from the devices btree.
3059 	 * We take the device_list_mutex to avoid racing with the finishing phase
3060 	 * of a device replace operation. See the comment below before acquiring
3061 	 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex
3062 	 * because that can result in a deadlock when deleting the device extent
3063 	 * items from the devices btree - COWing an extent buffer from the btree
3064 	 * may result in allocating a new metadata chunk, which would attempt to
3065 	 * lock again fs_info->chunk_mutex.
3066 	 */
3067 	mutex_lock(&fs_devices->device_list_mutex);
3068 	for (i = 0; i < map->num_stripes; i++) {
3069 		struct btrfs_device *device = map->stripes[i].dev;
3070 		ret = btrfs_free_dev_extent(trans, device,
3071 					    map->stripes[i].physical,
3072 					    &dev_extent_len);
3073 		if (ret) {
3074 			mutex_unlock(&fs_devices->device_list_mutex);
3075 			btrfs_abort_transaction(trans, ret);
3076 			goto out;
3077 		}
3078 
3079 		if (device->bytes_used > 0) {
3080 			mutex_lock(&fs_info->chunk_mutex);
3081 			btrfs_device_set_bytes_used(device,
3082 					device->bytes_used - dev_extent_len);
3083 			atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
3084 			btrfs_clear_space_info_full(fs_info);
3085 			mutex_unlock(&fs_info->chunk_mutex);
3086 		}
3087 	}
3088 	mutex_unlock(&fs_devices->device_list_mutex);
3089 
3090 	/*
3091 	 * We acquire fs_info->chunk_mutex for 2 reasons:
3092 	 *
3093 	 * 1) Just like with the first phase of the chunk allocation, we must
3094 	 *    reserve system space, do all chunk btree updates and deletions, and
3095 	 *    update the system chunk array in the superblock while holding this
3096 	 *    mutex. This is for similar reasons as explained on the comment at
3097 	 *    the top of btrfs_chunk_alloc();
3098 	 *
3099 	 * 2) Prevent races with the final phase of a device replace operation
3100 	 *    that replaces the device object associated with the map's stripes,
3101 	 *    because the device object's id can change at any time during that
3102 	 *    final phase of the device replace operation
3103 	 *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
3104 	 *    replaced device and then see it with an ID of
3105 	 *    BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating
3106 	 *    the device item, which does not exists on the chunk btree.
3107 	 *    The finishing phase of device replace acquires both the
3108 	 *    device_list_mutex and the chunk_mutex, in that order, so we are
3109 	 *    safe by just acquiring the chunk_mutex.
3110 	 */
3111 	trans->removing_chunk = true;
3112 	mutex_lock(&fs_info->chunk_mutex);
3113 
3114 	check_system_chunk(trans, map->type);
3115 
3116 	ret = remove_chunk_item(trans, map, chunk_offset);
3117 	/*
3118 	 * Normally we should not get -ENOSPC since we reserved space before
3119 	 * through the call to check_system_chunk().
3120 	 *
3121 	 * Despite our system space_info having enough free space, we may not
3122 	 * be able to allocate extents from its block groups, because all have
3123 	 * an incompatible profile, which will force us to allocate a new system
3124 	 * block group with the right profile, or right after we called
3125 	 * check_system_space() above, a scrub turned the only system block group
3126 	 * with enough free space into RO mode.
3127 	 * This is explained with more detail at do_chunk_alloc().
3128 	 *
3129 	 * So if we get -ENOSPC, allocate a new system chunk and retry once.
3130 	 */
3131 	if (ret == -ENOSPC) {
3132 		const u64 sys_flags = btrfs_system_alloc_profile(fs_info);
3133 		struct btrfs_block_group *sys_bg;
3134 
3135 		sys_bg = btrfs_create_chunk(trans, sys_flags);
3136 		if (IS_ERR(sys_bg)) {
3137 			ret = PTR_ERR(sys_bg);
3138 			btrfs_abort_transaction(trans, ret);
3139 			goto out;
3140 		}
3141 
3142 		ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
3143 		if (ret) {
3144 			btrfs_abort_transaction(trans, ret);
3145 			goto out;
3146 		}
3147 
3148 		ret = remove_chunk_item(trans, map, chunk_offset);
3149 		if (ret) {
3150 			btrfs_abort_transaction(trans, ret);
3151 			goto out;
3152 		}
3153 	} else if (ret) {
3154 		btrfs_abort_transaction(trans, ret);
3155 		goto out;
3156 	}
3157 
3158 	trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
3159 
3160 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3161 		ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
3162 		if (ret) {
3163 			btrfs_abort_transaction(trans, ret);
3164 			goto out;
3165 		}
3166 	}
3167 
3168 	mutex_unlock(&fs_info->chunk_mutex);
3169 	trans->removing_chunk = false;
3170 
3171 	/*
3172 	 * We are done with chunk btree updates and deletions, so release the
3173 	 * system space we previously reserved (with check_system_chunk()).
3174 	 */
3175 	btrfs_trans_release_chunk_metadata(trans);
3176 
3177 	ret = btrfs_remove_block_group(trans, chunk_offset, em);
3178 	if (ret) {
3179 		btrfs_abort_transaction(trans, ret);
3180 		goto out;
3181 	}
3182 
3183 out:
3184 	if (trans->removing_chunk) {
3185 		mutex_unlock(&fs_info->chunk_mutex);
3186 		trans->removing_chunk = false;
3187 	}
3188 	/* once for us */
3189 	free_extent_map(em);
3190 	return ret;
3191 }
3192 
3193 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3194 {
3195 	struct btrfs_root *root = fs_info->chunk_root;
3196 	struct btrfs_trans_handle *trans;
3197 	struct btrfs_block_group *block_group;
3198 	u64 length;
3199 	int ret;
3200 
3201 	/*
3202 	 * Prevent races with automatic removal of unused block groups.
3203 	 * After we relocate and before we remove the chunk with offset
3204 	 * chunk_offset, automatic removal of the block group can kick in,
3205 	 * resulting in a failure when calling btrfs_remove_chunk() below.
3206 	 *
3207 	 * Make sure to acquire this mutex before doing a tree search (dev
3208 	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3209 	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3210 	 * we release the path used to search the chunk/dev tree and before
3211 	 * the current task acquires this mutex and calls us.
3212 	 */
3213 	lockdep_assert_held(&fs_info->reclaim_bgs_lock);
3214 
3215 	/* step one, relocate all the extents inside this chunk */
3216 	btrfs_scrub_pause(fs_info);
3217 	ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3218 	btrfs_scrub_continue(fs_info);
3219 	if (ret)
3220 		return ret;
3221 
3222 	block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
3223 	if (!block_group)
3224 		return -ENOENT;
3225 	btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
3226 	length = block_group->length;
3227 	btrfs_put_block_group(block_group);
3228 
3229 	/*
3230 	 * On a zoned file system, discard the whole block group, this will
3231 	 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If
3232 	 * resetting the zone fails, don't treat it as a fatal problem from the
3233 	 * filesystem's point of view.
3234 	 */
3235 	if (btrfs_is_zoned(fs_info)) {
3236 		ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL);
3237 		if (ret)
3238 			btrfs_info(fs_info,
3239 				"failed to reset zone %llu after relocation",
3240 				chunk_offset);
3241 	}
3242 
3243 	trans = btrfs_start_trans_remove_block_group(root->fs_info,
3244 						     chunk_offset);
3245 	if (IS_ERR(trans)) {
3246 		ret = PTR_ERR(trans);
3247 		btrfs_handle_fs_error(root->fs_info, ret, NULL);
3248 		return ret;
3249 	}
3250 
3251 	/*
3252 	 * step two, delete the device extents and the
3253 	 * chunk tree entries
3254 	 */
3255 	ret = btrfs_remove_chunk(trans, chunk_offset);
3256 	btrfs_end_transaction(trans);
3257 	return ret;
3258 }
3259 
3260 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3261 {
3262 	struct btrfs_root *chunk_root = fs_info->chunk_root;
3263 	struct btrfs_path *path;
3264 	struct extent_buffer *leaf;
3265 	struct btrfs_chunk *chunk;
3266 	struct btrfs_key key;
3267 	struct btrfs_key found_key;
3268 	u64 chunk_type;
3269 	bool retried = false;
3270 	int failed = 0;
3271 	int ret;
3272 
3273 	path = btrfs_alloc_path();
3274 	if (!path)
3275 		return -ENOMEM;
3276 
3277 again:
3278 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3279 	key.offset = (u64)-1;
3280 	key.type = BTRFS_CHUNK_ITEM_KEY;
3281 
3282 	while (1) {
3283 		mutex_lock(&fs_info->reclaim_bgs_lock);
3284 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3285 		if (ret < 0) {
3286 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3287 			goto error;
3288 		}
3289 		BUG_ON(ret == 0); /* Corruption */
3290 
3291 		ret = btrfs_previous_item(chunk_root, path, key.objectid,
3292 					  key.type);
3293 		if (ret)
3294 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3295 		if (ret < 0)
3296 			goto error;
3297 		if (ret > 0)
3298 			break;
3299 
3300 		leaf = path->nodes[0];
3301 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3302 
3303 		chunk = btrfs_item_ptr(leaf, path->slots[0],
3304 				       struct btrfs_chunk);
3305 		chunk_type = btrfs_chunk_type(leaf, chunk);
3306 		btrfs_release_path(path);
3307 
3308 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3309 			ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3310 			if (ret == -ENOSPC)
3311 				failed++;
3312 			else
3313 				BUG_ON(ret);
3314 		}
3315 		mutex_unlock(&fs_info->reclaim_bgs_lock);
3316 
3317 		if (found_key.offset == 0)
3318 			break;
3319 		key.offset = found_key.offset - 1;
3320 	}
3321 	ret = 0;
3322 	if (failed && !retried) {
3323 		failed = 0;
3324 		retried = true;
3325 		goto again;
3326 	} else if (WARN_ON(failed && retried)) {
3327 		ret = -ENOSPC;
3328 	}
3329 error:
3330 	btrfs_free_path(path);
3331 	return ret;
3332 }
3333 
3334 /*
3335  * return 1 : allocate a data chunk successfully,
3336  * return <0: errors during allocating a data chunk,
3337  * return 0 : no need to allocate a data chunk.
3338  */
3339 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3340 				      u64 chunk_offset)
3341 {
3342 	struct btrfs_block_group *cache;
3343 	u64 bytes_used;
3344 	u64 chunk_type;
3345 
3346 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3347 	ASSERT(cache);
3348 	chunk_type = cache->flags;
3349 	btrfs_put_block_group(cache);
3350 
3351 	if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
3352 		return 0;
3353 
3354 	spin_lock(&fs_info->data_sinfo->lock);
3355 	bytes_used = fs_info->data_sinfo->bytes_used;
3356 	spin_unlock(&fs_info->data_sinfo->lock);
3357 
3358 	if (!bytes_used) {
3359 		struct btrfs_trans_handle *trans;
3360 		int ret;
3361 
3362 		trans =	btrfs_join_transaction(fs_info->tree_root);
3363 		if (IS_ERR(trans))
3364 			return PTR_ERR(trans);
3365 
3366 		ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
3367 		btrfs_end_transaction(trans);
3368 		if (ret < 0)
3369 			return ret;
3370 		return 1;
3371 	}
3372 
3373 	return 0;
3374 }
3375 
3376 static int insert_balance_item(struct btrfs_fs_info *fs_info,
3377 			       struct btrfs_balance_control *bctl)
3378 {
3379 	struct btrfs_root *root = fs_info->tree_root;
3380 	struct btrfs_trans_handle *trans;
3381 	struct btrfs_balance_item *item;
3382 	struct btrfs_disk_balance_args disk_bargs;
3383 	struct btrfs_path *path;
3384 	struct extent_buffer *leaf;
3385 	struct btrfs_key key;
3386 	int ret, err;
3387 
3388 	path = btrfs_alloc_path();
3389 	if (!path)
3390 		return -ENOMEM;
3391 
3392 	trans = btrfs_start_transaction(root, 0);
3393 	if (IS_ERR(trans)) {
3394 		btrfs_free_path(path);
3395 		return PTR_ERR(trans);
3396 	}
3397 
3398 	key.objectid = BTRFS_BALANCE_OBJECTID;
3399 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3400 	key.offset = 0;
3401 
3402 	ret = btrfs_insert_empty_item(trans, root, path, &key,
3403 				      sizeof(*item));
3404 	if (ret)
3405 		goto out;
3406 
3407 	leaf = path->nodes[0];
3408 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3409 
3410 	memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3411 
3412 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3413 	btrfs_set_balance_data(leaf, item, &disk_bargs);
3414 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3415 	btrfs_set_balance_meta(leaf, item, &disk_bargs);
3416 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3417 	btrfs_set_balance_sys(leaf, item, &disk_bargs);
3418 
3419 	btrfs_set_balance_flags(leaf, item, bctl->flags);
3420 
3421 	btrfs_mark_buffer_dirty(leaf);
3422 out:
3423 	btrfs_free_path(path);
3424 	err = btrfs_commit_transaction(trans);
3425 	if (err && !ret)
3426 		ret = err;
3427 	return ret;
3428 }
3429 
3430 static int del_balance_item(struct btrfs_fs_info *fs_info)
3431 {
3432 	struct btrfs_root *root = fs_info->tree_root;
3433 	struct btrfs_trans_handle *trans;
3434 	struct btrfs_path *path;
3435 	struct btrfs_key key;
3436 	int ret, err;
3437 
3438 	path = btrfs_alloc_path();
3439 	if (!path)
3440 		return -ENOMEM;
3441 
3442 	trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3443 	if (IS_ERR(trans)) {
3444 		btrfs_free_path(path);
3445 		return PTR_ERR(trans);
3446 	}
3447 
3448 	key.objectid = BTRFS_BALANCE_OBJECTID;
3449 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3450 	key.offset = 0;
3451 
3452 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3453 	if (ret < 0)
3454 		goto out;
3455 	if (ret > 0) {
3456 		ret = -ENOENT;
3457 		goto out;
3458 	}
3459 
3460 	ret = btrfs_del_item(trans, root, path);
3461 out:
3462 	btrfs_free_path(path);
3463 	err = btrfs_commit_transaction(trans);
3464 	if (err && !ret)
3465 		ret = err;
3466 	return ret;
3467 }
3468 
3469 /*
3470  * This is a heuristic used to reduce the number of chunks balanced on
3471  * resume after balance was interrupted.
3472  */
3473 static void update_balance_args(struct btrfs_balance_control *bctl)
3474 {
3475 	/*
3476 	 * Turn on soft mode for chunk types that were being converted.
3477 	 */
3478 	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3479 		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3480 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3481 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3482 	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3483 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3484 
3485 	/*
3486 	 * Turn on usage filter if is not already used.  The idea is
3487 	 * that chunks that we have already balanced should be
3488 	 * reasonably full.  Don't do it for chunks that are being
3489 	 * converted - that will keep us from relocating unconverted
3490 	 * (albeit full) chunks.
3491 	 */
3492 	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3493 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3494 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3495 		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3496 		bctl->data.usage = 90;
3497 	}
3498 	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3499 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3500 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3501 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3502 		bctl->sys.usage = 90;
3503 	}
3504 	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3505 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3506 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3507 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3508 		bctl->meta.usage = 90;
3509 	}
3510 }
3511 
3512 /*
3513  * Clear the balance status in fs_info and delete the balance item from disk.
3514  */
3515 static void reset_balance_state(struct btrfs_fs_info *fs_info)
3516 {
3517 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3518 	int ret;
3519 
3520 	BUG_ON(!fs_info->balance_ctl);
3521 
3522 	spin_lock(&fs_info->balance_lock);
3523 	fs_info->balance_ctl = NULL;
3524 	spin_unlock(&fs_info->balance_lock);
3525 
3526 	kfree(bctl);
3527 	ret = del_balance_item(fs_info);
3528 	if (ret)
3529 		btrfs_handle_fs_error(fs_info, ret, NULL);
3530 }
3531 
3532 /*
3533  * Balance filters.  Return 1 if chunk should be filtered out
3534  * (should not be balanced).
3535  */
3536 static int chunk_profiles_filter(u64 chunk_type,
3537 				 struct btrfs_balance_args *bargs)
3538 {
3539 	chunk_type = chunk_to_extended(chunk_type) &
3540 				BTRFS_EXTENDED_PROFILE_MASK;
3541 
3542 	if (bargs->profiles & chunk_type)
3543 		return 0;
3544 
3545 	return 1;
3546 }
3547 
3548 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3549 			      struct btrfs_balance_args *bargs)
3550 {
3551 	struct btrfs_block_group *cache;
3552 	u64 chunk_used;
3553 	u64 user_thresh_min;
3554 	u64 user_thresh_max;
3555 	int ret = 1;
3556 
3557 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3558 	chunk_used = cache->used;
3559 
3560 	if (bargs->usage_min == 0)
3561 		user_thresh_min = 0;
3562 	else
3563 		user_thresh_min = div_factor_fine(cache->length,
3564 						  bargs->usage_min);
3565 
3566 	if (bargs->usage_max == 0)
3567 		user_thresh_max = 1;
3568 	else if (bargs->usage_max > 100)
3569 		user_thresh_max = cache->length;
3570 	else
3571 		user_thresh_max = div_factor_fine(cache->length,
3572 						  bargs->usage_max);
3573 
3574 	if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3575 		ret = 0;
3576 
3577 	btrfs_put_block_group(cache);
3578 	return ret;
3579 }
3580 
3581 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3582 		u64 chunk_offset, struct btrfs_balance_args *bargs)
3583 {
3584 	struct btrfs_block_group *cache;
3585 	u64 chunk_used, user_thresh;
3586 	int ret = 1;
3587 
3588 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3589 	chunk_used = cache->used;
3590 
3591 	if (bargs->usage_min == 0)
3592 		user_thresh = 1;
3593 	else if (bargs->usage > 100)
3594 		user_thresh = cache->length;
3595 	else
3596 		user_thresh = div_factor_fine(cache->length, bargs->usage);
3597 
3598 	if (chunk_used < user_thresh)
3599 		ret = 0;
3600 
3601 	btrfs_put_block_group(cache);
3602 	return ret;
3603 }
3604 
3605 static int chunk_devid_filter(struct extent_buffer *leaf,
3606 			      struct btrfs_chunk *chunk,
3607 			      struct btrfs_balance_args *bargs)
3608 {
3609 	struct btrfs_stripe *stripe;
3610 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3611 	int i;
3612 
3613 	for (i = 0; i < num_stripes; i++) {
3614 		stripe = btrfs_stripe_nr(chunk, i);
3615 		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3616 			return 0;
3617 	}
3618 
3619 	return 1;
3620 }
3621 
3622 static u64 calc_data_stripes(u64 type, int num_stripes)
3623 {
3624 	const int index = btrfs_bg_flags_to_raid_index(type);
3625 	const int ncopies = btrfs_raid_array[index].ncopies;
3626 	const int nparity = btrfs_raid_array[index].nparity;
3627 
3628 	return (num_stripes - nparity) / ncopies;
3629 }
3630 
3631 /* [pstart, pend) */
3632 static int chunk_drange_filter(struct extent_buffer *leaf,
3633 			       struct btrfs_chunk *chunk,
3634 			       struct btrfs_balance_args *bargs)
3635 {
3636 	struct btrfs_stripe *stripe;
3637 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3638 	u64 stripe_offset;
3639 	u64 stripe_length;
3640 	u64 type;
3641 	int factor;
3642 	int i;
3643 
3644 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3645 		return 0;
3646 
3647 	type = btrfs_chunk_type(leaf, chunk);
3648 	factor = calc_data_stripes(type, num_stripes);
3649 
3650 	for (i = 0; i < num_stripes; i++) {
3651 		stripe = btrfs_stripe_nr(chunk, i);
3652 		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3653 			continue;
3654 
3655 		stripe_offset = btrfs_stripe_offset(leaf, stripe);
3656 		stripe_length = btrfs_chunk_length(leaf, chunk);
3657 		stripe_length = div_u64(stripe_length, factor);
3658 
3659 		if (stripe_offset < bargs->pend &&
3660 		    stripe_offset + stripe_length > bargs->pstart)
3661 			return 0;
3662 	}
3663 
3664 	return 1;
3665 }
3666 
3667 /* [vstart, vend) */
3668 static int chunk_vrange_filter(struct extent_buffer *leaf,
3669 			       struct btrfs_chunk *chunk,
3670 			       u64 chunk_offset,
3671 			       struct btrfs_balance_args *bargs)
3672 {
3673 	if (chunk_offset < bargs->vend &&
3674 	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3675 		/* at least part of the chunk is inside this vrange */
3676 		return 0;
3677 
3678 	return 1;
3679 }
3680 
3681 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3682 			       struct btrfs_chunk *chunk,
3683 			       struct btrfs_balance_args *bargs)
3684 {
3685 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3686 
3687 	if (bargs->stripes_min <= num_stripes
3688 			&& num_stripes <= bargs->stripes_max)
3689 		return 0;
3690 
3691 	return 1;
3692 }
3693 
3694 static int chunk_soft_convert_filter(u64 chunk_type,
3695 				     struct btrfs_balance_args *bargs)
3696 {
3697 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3698 		return 0;
3699 
3700 	chunk_type = chunk_to_extended(chunk_type) &
3701 				BTRFS_EXTENDED_PROFILE_MASK;
3702 
3703 	if (bargs->target == chunk_type)
3704 		return 1;
3705 
3706 	return 0;
3707 }
3708 
3709 static int should_balance_chunk(struct extent_buffer *leaf,
3710 				struct btrfs_chunk *chunk, u64 chunk_offset)
3711 {
3712 	struct btrfs_fs_info *fs_info = leaf->fs_info;
3713 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3714 	struct btrfs_balance_args *bargs = NULL;
3715 	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3716 
3717 	/* type filter */
3718 	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3719 	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3720 		return 0;
3721 	}
3722 
3723 	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3724 		bargs = &bctl->data;
3725 	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3726 		bargs = &bctl->sys;
3727 	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3728 		bargs = &bctl->meta;
3729 
3730 	/* profiles filter */
3731 	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3732 	    chunk_profiles_filter(chunk_type, bargs)) {
3733 		return 0;
3734 	}
3735 
3736 	/* usage filter */
3737 	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3738 	    chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3739 		return 0;
3740 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3741 	    chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3742 		return 0;
3743 	}
3744 
3745 	/* devid filter */
3746 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3747 	    chunk_devid_filter(leaf, chunk, bargs)) {
3748 		return 0;
3749 	}
3750 
3751 	/* drange filter, makes sense only with devid filter */
3752 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3753 	    chunk_drange_filter(leaf, chunk, bargs)) {
3754 		return 0;
3755 	}
3756 
3757 	/* vrange filter */
3758 	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3759 	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3760 		return 0;
3761 	}
3762 
3763 	/* stripes filter */
3764 	if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3765 	    chunk_stripes_range_filter(leaf, chunk, bargs)) {
3766 		return 0;
3767 	}
3768 
3769 	/* soft profile changing mode */
3770 	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3771 	    chunk_soft_convert_filter(chunk_type, bargs)) {
3772 		return 0;
3773 	}
3774 
3775 	/*
3776 	 * limited by count, must be the last filter
3777 	 */
3778 	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3779 		if (bargs->limit == 0)
3780 			return 0;
3781 		else
3782 			bargs->limit--;
3783 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3784 		/*
3785 		 * Same logic as the 'limit' filter; the minimum cannot be
3786 		 * determined here because we do not have the global information
3787 		 * about the count of all chunks that satisfy the filters.
3788 		 */
3789 		if (bargs->limit_max == 0)
3790 			return 0;
3791 		else
3792 			bargs->limit_max--;
3793 	}
3794 
3795 	return 1;
3796 }
3797 
3798 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3799 {
3800 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3801 	struct btrfs_root *chunk_root = fs_info->chunk_root;
3802 	u64 chunk_type;
3803 	struct btrfs_chunk *chunk;
3804 	struct btrfs_path *path = NULL;
3805 	struct btrfs_key key;
3806 	struct btrfs_key found_key;
3807 	struct extent_buffer *leaf;
3808 	int slot;
3809 	int ret;
3810 	int enospc_errors = 0;
3811 	bool counting = true;
3812 	/* The single value limit and min/max limits use the same bytes in the */
3813 	u64 limit_data = bctl->data.limit;
3814 	u64 limit_meta = bctl->meta.limit;
3815 	u64 limit_sys = bctl->sys.limit;
3816 	u32 count_data = 0;
3817 	u32 count_meta = 0;
3818 	u32 count_sys = 0;
3819 	int chunk_reserved = 0;
3820 
3821 	path = btrfs_alloc_path();
3822 	if (!path) {
3823 		ret = -ENOMEM;
3824 		goto error;
3825 	}
3826 
3827 	/* zero out stat counters */
3828 	spin_lock(&fs_info->balance_lock);
3829 	memset(&bctl->stat, 0, sizeof(bctl->stat));
3830 	spin_unlock(&fs_info->balance_lock);
3831 again:
3832 	if (!counting) {
3833 		/*
3834 		 * The single value limit and min/max limits use the same bytes
3835 		 * in the
3836 		 */
3837 		bctl->data.limit = limit_data;
3838 		bctl->meta.limit = limit_meta;
3839 		bctl->sys.limit = limit_sys;
3840 	}
3841 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3842 	key.offset = (u64)-1;
3843 	key.type = BTRFS_CHUNK_ITEM_KEY;
3844 
3845 	while (1) {
3846 		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3847 		    atomic_read(&fs_info->balance_cancel_req)) {
3848 			ret = -ECANCELED;
3849 			goto error;
3850 		}
3851 
3852 		mutex_lock(&fs_info->reclaim_bgs_lock);
3853 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3854 		if (ret < 0) {
3855 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3856 			goto error;
3857 		}
3858 
3859 		/*
3860 		 * this shouldn't happen, it means the last relocate
3861 		 * failed
3862 		 */
3863 		if (ret == 0)
3864 			BUG(); /* FIXME break ? */
3865 
3866 		ret = btrfs_previous_item(chunk_root, path, 0,
3867 					  BTRFS_CHUNK_ITEM_KEY);
3868 		if (ret) {
3869 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3870 			ret = 0;
3871 			break;
3872 		}
3873 
3874 		leaf = path->nodes[0];
3875 		slot = path->slots[0];
3876 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3877 
3878 		if (found_key.objectid != key.objectid) {
3879 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3880 			break;
3881 		}
3882 
3883 		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3884 		chunk_type = btrfs_chunk_type(leaf, chunk);
3885 
3886 		if (!counting) {
3887 			spin_lock(&fs_info->balance_lock);
3888 			bctl->stat.considered++;
3889 			spin_unlock(&fs_info->balance_lock);
3890 		}
3891 
3892 		ret = should_balance_chunk(leaf, chunk, found_key.offset);
3893 
3894 		btrfs_release_path(path);
3895 		if (!ret) {
3896 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3897 			goto loop;
3898 		}
3899 
3900 		if (counting) {
3901 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3902 			spin_lock(&fs_info->balance_lock);
3903 			bctl->stat.expected++;
3904 			spin_unlock(&fs_info->balance_lock);
3905 
3906 			if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3907 				count_data++;
3908 			else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3909 				count_sys++;
3910 			else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3911 				count_meta++;
3912 
3913 			goto loop;
3914 		}
3915 
3916 		/*
3917 		 * Apply limit_min filter, no need to check if the LIMITS
3918 		 * filter is used, limit_min is 0 by default
3919 		 */
3920 		if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3921 					count_data < bctl->data.limit_min)
3922 				|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3923 					count_meta < bctl->meta.limit_min)
3924 				|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3925 					count_sys < bctl->sys.limit_min)) {
3926 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3927 			goto loop;
3928 		}
3929 
3930 		if (!chunk_reserved) {
3931 			/*
3932 			 * We may be relocating the only data chunk we have,
3933 			 * which could potentially end up with losing data's
3934 			 * raid profile, so lets allocate an empty one in
3935 			 * advance.
3936 			 */
3937 			ret = btrfs_may_alloc_data_chunk(fs_info,
3938 							 found_key.offset);
3939 			if (ret < 0) {
3940 				mutex_unlock(&fs_info->reclaim_bgs_lock);
3941 				goto error;
3942 			} else if (ret == 1) {
3943 				chunk_reserved = 1;
3944 			}
3945 		}
3946 
3947 		ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3948 		mutex_unlock(&fs_info->reclaim_bgs_lock);
3949 		if (ret == -ENOSPC) {
3950 			enospc_errors++;
3951 		} else if (ret == -ETXTBSY) {
3952 			btrfs_info(fs_info,
3953 	   "skipping relocation of block group %llu due to active swapfile",
3954 				   found_key.offset);
3955 			ret = 0;
3956 		} else if (ret) {
3957 			goto error;
3958 		} else {
3959 			spin_lock(&fs_info->balance_lock);
3960 			bctl->stat.completed++;
3961 			spin_unlock(&fs_info->balance_lock);
3962 		}
3963 loop:
3964 		if (found_key.offset == 0)
3965 			break;
3966 		key.offset = found_key.offset - 1;
3967 	}
3968 
3969 	if (counting) {
3970 		btrfs_release_path(path);
3971 		counting = false;
3972 		goto again;
3973 	}
3974 error:
3975 	btrfs_free_path(path);
3976 	if (enospc_errors) {
3977 		btrfs_info(fs_info, "%d enospc errors during balance",
3978 			   enospc_errors);
3979 		if (!ret)
3980 			ret = -ENOSPC;
3981 	}
3982 
3983 	return ret;
3984 }
3985 
3986 /**
3987  * alloc_profile_is_valid - see if a given profile is valid and reduced
3988  * @flags: profile to validate
3989  * @extended: if true @flags is treated as an extended profile
3990  */
3991 static int alloc_profile_is_valid(u64 flags, int extended)
3992 {
3993 	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3994 			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
3995 
3996 	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3997 
3998 	/* 1) check that all other bits are zeroed */
3999 	if (flags & ~mask)
4000 		return 0;
4001 
4002 	/* 2) see if profile is reduced */
4003 	if (flags == 0)
4004 		return !extended; /* "0" is valid for usual profiles */
4005 
4006 	return has_single_bit_set(flags);
4007 }
4008 
4009 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
4010 {
4011 	/* cancel requested || normal exit path */
4012 	return atomic_read(&fs_info->balance_cancel_req) ||
4013 		(atomic_read(&fs_info->balance_pause_req) == 0 &&
4014 		 atomic_read(&fs_info->balance_cancel_req) == 0);
4015 }
4016 
4017 /*
4018  * Validate target profile against allowed profiles and return true if it's OK.
4019  * Otherwise print the error message and return false.
4020  */
4021 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
4022 		const struct btrfs_balance_args *bargs,
4023 		u64 allowed, const char *type)
4024 {
4025 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
4026 		return true;
4027 
4028 	if (fs_info->sectorsize < PAGE_SIZE &&
4029 		bargs->target & BTRFS_BLOCK_GROUP_RAID56_MASK) {
4030 		btrfs_err(fs_info,
4031 		"RAID56 is not yet supported for sectorsize %u with page size %lu",
4032 			  fs_info->sectorsize, PAGE_SIZE);
4033 		return false;
4034 	}
4035 	/* Profile is valid and does not have bits outside of the allowed set */
4036 	if (alloc_profile_is_valid(bargs->target, 1) &&
4037 	    (bargs->target & ~allowed) == 0)
4038 		return true;
4039 
4040 	btrfs_err(fs_info, "balance: invalid convert %s profile %s",
4041 			type, btrfs_bg_type_to_raid_name(bargs->target));
4042 	return false;
4043 }
4044 
4045 /*
4046  * Fill @buf with textual description of balance filter flags @bargs, up to
4047  * @size_buf including the terminating null. The output may be trimmed if it
4048  * does not fit into the provided buffer.
4049  */
4050 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
4051 				 u32 size_buf)
4052 {
4053 	int ret;
4054 	u32 size_bp = size_buf;
4055 	char *bp = buf;
4056 	u64 flags = bargs->flags;
4057 	char tmp_buf[128] = {'\0'};
4058 
4059 	if (!flags)
4060 		return;
4061 
4062 #define CHECK_APPEND_NOARG(a)						\
4063 	do {								\
4064 		ret = snprintf(bp, size_bp, (a));			\
4065 		if (ret < 0 || ret >= size_bp)				\
4066 			goto out_overflow;				\
4067 		size_bp -= ret;						\
4068 		bp += ret;						\
4069 	} while (0)
4070 
4071 #define CHECK_APPEND_1ARG(a, v1)					\
4072 	do {								\
4073 		ret = snprintf(bp, size_bp, (a), (v1));			\
4074 		if (ret < 0 || ret >= size_bp)				\
4075 			goto out_overflow;				\
4076 		size_bp -= ret;						\
4077 		bp += ret;						\
4078 	} while (0)
4079 
4080 #define CHECK_APPEND_2ARG(a, v1, v2)					\
4081 	do {								\
4082 		ret = snprintf(bp, size_bp, (a), (v1), (v2));		\
4083 		if (ret < 0 || ret >= size_bp)				\
4084 			goto out_overflow;				\
4085 		size_bp -= ret;						\
4086 		bp += ret;						\
4087 	} while (0)
4088 
4089 	if (flags & BTRFS_BALANCE_ARGS_CONVERT)
4090 		CHECK_APPEND_1ARG("convert=%s,",
4091 				  btrfs_bg_type_to_raid_name(bargs->target));
4092 
4093 	if (flags & BTRFS_BALANCE_ARGS_SOFT)
4094 		CHECK_APPEND_NOARG("soft,");
4095 
4096 	if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
4097 		btrfs_describe_block_groups(bargs->profiles, tmp_buf,
4098 					    sizeof(tmp_buf));
4099 		CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
4100 	}
4101 
4102 	if (flags & BTRFS_BALANCE_ARGS_USAGE)
4103 		CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
4104 
4105 	if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
4106 		CHECK_APPEND_2ARG("usage=%u..%u,",
4107 				  bargs->usage_min, bargs->usage_max);
4108 
4109 	if (flags & BTRFS_BALANCE_ARGS_DEVID)
4110 		CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
4111 
4112 	if (flags & BTRFS_BALANCE_ARGS_DRANGE)
4113 		CHECK_APPEND_2ARG("drange=%llu..%llu,",
4114 				  bargs->pstart, bargs->pend);
4115 
4116 	if (flags & BTRFS_BALANCE_ARGS_VRANGE)
4117 		CHECK_APPEND_2ARG("vrange=%llu..%llu,",
4118 				  bargs->vstart, bargs->vend);
4119 
4120 	if (flags & BTRFS_BALANCE_ARGS_LIMIT)
4121 		CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
4122 
4123 	if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
4124 		CHECK_APPEND_2ARG("limit=%u..%u,",
4125 				bargs->limit_min, bargs->limit_max);
4126 
4127 	if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
4128 		CHECK_APPEND_2ARG("stripes=%u..%u,",
4129 				  bargs->stripes_min, bargs->stripes_max);
4130 
4131 #undef CHECK_APPEND_2ARG
4132 #undef CHECK_APPEND_1ARG
4133 #undef CHECK_APPEND_NOARG
4134 
4135 out_overflow:
4136 
4137 	if (size_bp < size_buf)
4138 		buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
4139 	else
4140 		buf[0] = '\0';
4141 }
4142 
4143 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
4144 {
4145 	u32 size_buf = 1024;
4146 	char tmp_buf[192] = {'\0'};
4147 	char *buf;
4148 	char *bp;
4149 	u32 size_bp = size_buf;
4150 	int ret;
4151 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4152 
4153 	buf = kzalloc(size_buf, GFP_KERNEL);
4154 	if (!buf)
4155 		return;
4156 
4157 	bp = buf;
4158 
4159 #define CHECK_APPEND_1ARG(a, v1)					\
4160 	do {								\
4161 		ret = snprintf(bp, size_bp, (a), (v1));			\
4162 		if (ret < 0 || ret >= size_bp)				\
4163 			goto out_overflow;				\
4164 		size_bp -= ret;						\
4165 		bp += ret;						\
4166 	} while (0)
4167 
4168 	if (bctl->flags & BTRFS_BALANCE_FORCE)
4169 		CHECK_APPEND_1ARG("%s", "-f ");
4170 
4171 	if (bctl->flags & BTRFS_BALANCE_DATA) {
4172 		describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
4173 		CHECK_APPEND_1ARG("-d%s ", tmp_buf);
4174 	}
4175 
4176 	if (bctl->flags & BTRFS_BALANCE_METADATA) {
4177 		describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
4178 		CHECK_APPEND_1ARG("-m%s ", tmp_buf);
4179 	}
4180 
4181 	if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
4182 		describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
4183 		CHECK_APPEND_1ARG("-s%s ", tmp_buf);
4184 	}
4185 
4186 #undef CHECK_APPEND_1ARG
4187 
4188 out_overflow:
4189 
4190 	if (size_bp < size_buf)
4191 		buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
4192 	btrfs_info(fs_info, "balance: %s %s",
4193 		   (bctl->flags & BTRFS_BALANCE_RESUME) ?
4194 		   "resume" : "start", buf);
4195 
4196 	kfree(buf);
4197 }
4198 
4199 /*
4200  * Should be called with balance mutexe held
4201  */
4202 int btrfs_balance(struct btrfs_fs_info *fs_info,
4203 		  struct btrfs_balance_control *bctl,
4204 		  struct btrfs_ioctl_balance_args *bargs)
4205 {
4206 	u64 meta_target, data_target;
4207 	u64 allowed;
4208 	int mixed = 0;
4209 	int ret;
4210 	u64 num_devices;
4211 	unsigned seq;
4212 	bool reducing_redundancy;
4213 	int i;
4214 
4215 	if (btrfs_fs_closing(fs_info) ||
4216 	    atomic_read(&fs_info->balance_pause_req) ||
4217 	    btrfs_should_cancel_balance(fs_info)) {
4218 		ret = -EINVAL;
4219 		goto out;
4220 	}
4221 
4222 	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4223 	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4224 		mixed = 1;
4225 
4226 	/*
4227 	 * In case of mixed groups both data and meta should be picked,
4228 	 * and identical options should be given for both of them.
4229 	 */
4230 	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4231 	if (mixed && (bctl->flags & allowed)) {
4232 		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4233 		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4234 		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4235 			btrfs_err(fs_info,
4236 	  "balance: mixed groups data and metadata options must be the same");
4237 			ret = -EINVAL;
4238 			goto out;
4239 		}
4240 	}
4241 
4242 	/*
4243 	 * rw_devices will not change at the moment, device add/delete/replace
4244 	 * are exclusive
4245 	 */
4246 	num_devices = fs_info->fs_devices->rw_devices;
4247 
4248 	/*
4249 	 * SINGLE profile on-disk has no profile bit, but in-memory we have a
4250 	 * special bit for it, to make it easier to distinguish.  Thus we need
4251 	 * to set it manually, or balance would refuse the profile.
4252 	 */
4253 	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4254 	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4255 		if (num_devices >= btrfs_raid_array[i].devs_min)
4256 			allowed |= btrfs_raid_array[i].bg_flag;
4257 
4258 	if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
4259 	    !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
4260 	    !validate_convert_profile(fs_info, &bctl->sys,  allowed, "system")) {
4261 		ret = -EINVAL;
4262 		goto out;
4263 	}
4264 
4265 	/*
4266 	 * Allow to reduce metadata or system integrity only if force set for
4267 	 * profiles with redundancy (copies, parity)
4268 	 */
4269 	allowed = 0;
4270 	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
4271 		if (btrfs_raid_array[i].ncopies >= 2 ||
4272 		    btrfs_raid_array[i].tolerated_failures >= 1)
4273 			allowed |= btrfs_raid_array[i].bg_flag;
4274 	}
4275 	do {
4276 		seq = read_seqbegin(&fs_info->profiles_lock);
4277 
4278 		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4279 		     (fs_info->avail_system_alloc_bits & allowed) &&
4280 		     !(bctl->sys.target & allowed)) ||
4281 		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4282 		     (fs_info->avail_metadata_alloc_bits & allowed) &&
4283 		     !(bctl->meta.target & allowed)))
4284 			reducing_redundancy = true;
4285 		else
4286 			reducing_redundancy = false;
4287 
4288 		/* if we're not converting, the target field is uninitialized */
4289 		meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4290 			bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4291 		data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4292 			bctl->data.target : fs_info->avail_data_alloc_bits;
4293 	} while (read_seqretry(&fs_info->profiles_lock, seq));
4294 
4295 	if (reducing_redundancy) {
4296 		if (bctl->flags & BTRFS_BALANCE_FORCE) {
4297 			btrfs_info(fs_info,
4298 			   "balance: force reducing metadata redundancy");
4299 		} else {
4300 			btrfs_err(fs_info,
4301 	"balance: reduces metadata redundancy, use --force if you want this");
4302 			ret = -EINVAL;
4303 			goto out;
4304 		}
4305 	}
4306 
4307 	if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4308 		btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4309 		btrfs_warn(fs_info,
4310 	"balance: metadata profile %s has lower redundancy than data profile %s",
4311 				btrfs_bg_type_to_raid_name(meta_target),
4312 				btrfs_bg_type_to_raid_name(data_target));
4313 	}
4314 
4315 	ret = insert_balance_item(fs_info, bctl);
4316 	if (ret && ret != -EEXIST)
4317 		goto out;
4318 
4319 	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4320 		BUG_ON(ret == -EEXIST);
4321 		BUG_ON(fs_info->balance_ctl);
4322 		spin_lock(&fs_info->balance_lock);
4323 		fs_info->balance_ctl = bctl;
4324 		spin_unlock(&fs_info->balance_lock);
4325 	} else {
4326 		BUG_ON(ret != -EEXIST);
4327 		spin_lock(&fs_info->balance_lock);
4328 		update_balance_args(bctl);
4329 		spin_unlock(&fs_info->balance_lock);
4330 	}
4331 
4332 	ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4333 	set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4334 	describe_balance_start_or_resume(fs_info);
4335 	mutex_unlock(&fs_info->balance_mutex);
4336 
4337 	ret = __btrfs_balance(fs_info);
4338 
4339 	mutex_lock(&fs_info->balance_mutex);
4340 	if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
4341 		btrfs_info(fs_info, "balance: paused");
4342 	/*
4343 	 * Balance can be canceled by:
4344 	 *
4345 	 * - Regular cancel request
4346 	 *   Then ret == -ECANCELED and balance_cancel_req > 0
4347 	 *
4348 	 * - Fatal signal to "btrfs" process
4349 	 *   Either the signal caught by wait_reserve_ticket() and callers
4350 	 *   got -EINTR, or caught by btrfs_should_cancel_balance() and
4351 	 *   got -ECANCELED.
4352 	 *   Either way, in this case balance_cancel_req = 0, and
4353 	 *   ret == -EINTR or ret == -ECANCELED.
4354 	 *
4355 	 * So here we only check the return value to catch canceled balance.
4356 	 */
4357 	else if (ret == -ECANCELED || ret == -EINTR)
4358 		btrfs_info(fs_info, "balance: canceled");
4359 	else
4360 		btrfs_info(fs_info, "balance: ended with status: %d", ret);
4361 
4362 	clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4363 
4364 	if (bargs) {
4365 		memset(bargs, 0, sizeof(*bargs));
4366 		btrfs_update_ioctl_balance_args(fs_info, bargs);
4367 	}
4368 
4369 	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
4370 	    balance_need_close(fs_info)) {
4371 		reset_balance_state(fs_info);
4372 		btrfs_exclop_finish(fs_info);
4373 	}
4374 
4375 	wake_up(&fs_info->balance_wait_q);
4376 
4377 	return ret;
4378 out:
4379 	if (bctl->flags & BTRFS_BALANCE_RESUME)
4380 		reset_balance_state(fs_info);
4381 	else
4382 		kfree(bctl);
4383 	btrfs_exclop_finish(fs_info);
4384 
4385 	return ret;
4386 }
4387 
4388 static int balance_kthread(void *data)
4389 {
4390 	struct btrfs_fs_info *fs_info = data;
4391 	int ret = 0;
4392 
4393 	mutex_lock(&fs_info->balance_mutex);
4394 	if (fs_info->balance_ctl)
4395 		ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4396 	mutex_unlock(&fs_info->balance_mutex);
4397 
4398 	return ret;
4399 }
4400 
4401 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4402 {
4403 	struct task_struct *tsk;
4404 
4405 	mutex_lock(&fs_info->balance_mutex);
4406 	if (!fs_info->balance_ctl) {
4407 		mutex_unlock(&fs_info->balance_mutex);
4408 		return 0;
4409 	}
4410 	mutex_unlock(&fs_info->balance_mutex);
4411 
4412 	if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4413 		btrfs_info(fs_info, "balance: resume skipped");
4414 		return 0;
4415 	}
4416 
4417 	/*
4418 	 * A ro->rw remount sequence should continue with the paused balance
4419 	 * regardless of who pauses it, system or the user as of now, so set
4420 	 * the resume flag.
4421 	 */
4422 	spin_lock(&fs_info->balance_lock);
4423 	fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4424 	spin_unlock(&fs_info->balance_lock);
4425 
4426 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4427 	return PTR_ERR_OR_ZERO(tsk);
4428 }
4429 
4430 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4431 {
4432 	struct btrfs_balance_control *bctl;
4433 	struct btrfs_balance_item *item;
4434 	struct btrfs_disk_balance_args disk_bargs;
4435 	struct btrfs_path *path;
4436 	struct extent_buffer *leaf;
4437 	struct btrfs_key key;
4438 	int ret;
4439 
4440 	path = btrfs_alloc_path();
4441 	if (!path)
4442 		return -ENOMEM;
4443 
4444 	key.objectid = BTRFS_BALANCE_OBJECTID;
4445 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
4446 	key.offset = 0;
4447 
4448 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4449 	if (ret < 0)
4450 		goto out;
4451 	if (ret > 0) { /* ret = -ENOENT; */
4452 		ret = 0;
4453 		goto out;
4454 	}
4455 
4456 	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4457 	if (!bctl) {
4458 		ret = -ENOMEM;
4459 		goto out;
4460 	}
4461 
4462 	leaf = path->nodes[0];
4463 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4464 
4465 	bctl->flags = btrfs_balance_flags(leaf, item);
4466 	bctl->flags |= BTRFS_BALANCE_RESUME;
4467 
4468 	btrfs_balance_data(leaf, item, &disk_bargs);
4469 	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4470 	btrfs_balance_meta(leaf, item, &disk_bargs);
4471 	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4472 	btrfs_balance_sys(leaf, item, &disk_bargs);
4473 	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4474 
4475 	/*
4476 	 * This should never happen, as the paused balance state is recovered
4477 	 * during mount without any chance of other exclusive ops to collide.
4478 	 *
4479 	 * This gives the exclusive op status to balance and keeps in paused
4480 	 * state until user intervention (cancel or umount). If the ownership
4481 	 * cannot be assigned, show a message but do not fail. The balance
4482 	 * is in a paused state and must have fs_info::balance_ctl properly
4483 	 * set up.
4484 	 */
4485 	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE))
4486 		btrfs_warn(fs_info,
4487 	"balance: cannot set exclusive op status, resume manually");
4488 
4489 	btrfs_release_path(path);
4490 
4491 	mutex_lock(&fs_info->balance_mutex);
4492 	BUG_ON(fs_info->balance_ctl);
4493 	spin_lock(&fs_info->balance_lock);
4494 	fs_info->balance_ctl = bctl;
4495 	spin_unlock(&fs_info->balance_lock);
4496 	mutex_unlock(&fs_info->balance_mutex);
4497 out:
4498 	btrfs_free_path(path);
4499 	return ret;
4500 }
4501 
4502 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4503 {
4504 	int ret = 0;
4505 
4506 	mutex_lock(&fs_info->balance_mutex);
4507 	if (!fs_info->balance_ctl) {
4508 		mutex_unlock(&fs_info->balance_mutex);
4509 		return -ENOTCONN;
4510 	}
4511 
4512 	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4513 		atomic_inc(&fs_info->balance_pause_req);
4514 		mutex_unlock(&fs_info->balance_mutex);
4515 
4516 		wait_event(fs_info->balance_wait_q,
4517 			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4518 
4519 		mutex_lock(&fs_info->balance_mutex);
4520 		/* we are good with balance_ctl ripped off from under us */
4521 		BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4522 		atomic_dec(&fs_info->balance_pause_req);
4523 	} else {
4524 		ret = -ENOTCONN;
4525 	}
4526 
4527 	mutex_unlock(&fs_info->balance_mutex);
4528 	return ret;
4529 }
4530 
4531 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4532 {
4533 	mutex_lock(&fs_info->balance_mutex);
4534 	if (!fs_info->balance_ctl) {
4535 		mutex_unlock(&fs_info->balance_mutex);
4536 		return -ENOTCONN;
4537 	}
4538 
4539 	/*
4540 	 * A paused balance with the item stored on disk can be resumed at
4541 	 * mount time if the mount is read-write. Otherwise it's still paused
4542 	 * and we must not allow cancelling as it deletes the item.
4543 	 */
4544 	if (sb_rdonly(fs_info->sb)) {
4545 		mutex_unlock(&fs_info->balance_mutex);
4546 		return -EROFS;
4547 	}
4548 
4549 	atomic_inc(&fs_info->balance_cancel_req);
4550 	/*
4551 	 * if we are running just wait and return, balance item is
4552 	 * deleted in btrfs_balance in this case
4553 	 */
4554 	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4555 		mutex_unlock(&fs_info->balance_mutex);
4556 		wait_event(fs_info->balance_wait_q,
4557 			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4558 		mutex_lock(&fs_info->balance_mutex);
4559 	} else {
4560 		mutex_unlock(&fs_info->balance_mutex);
4561 		/*
4562 		 * Lock released to allow other waiters to continue, we'll
4563 		 * reexamine the status again.
4564 		 */
4565 		mutex_lock(&fs_info->balance_mutex);
4566 
4567 		if (fs_info->balance_ctl) {
4568 			reset_balance_state(fs_info);
4569 			btrfs_exclop_finish(fs_info);
4570 			btrfs_info(fs_info, "balance: canceled");
4571 		}
4572 	}
4573 
4574 	BUG_ON(fs_info->balance_ctl ||
4575 		test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4576 	atomic_dec(&fs_info->balance_cancel_req);
4577 	mutex_unlock(&fs_info->balance_mutex);
4578 	return 0;
4579 }
4580 
4581 int btrfs_uuid_scan_kthread(void *data)
4582 {
4583 	struct btrfs_fs_info *fs_info = data;
4584 	struct btrfs_root *root = fs_info->tree_root;
4585 	struct btrfs_key key;
4586 	struct btrfs_path *path = NULL;
4587 	int ret = 0;
4588 	struct extent_buffer *eb;
4589 	int slot;
4590 	struct btrfs_root_item root_item;
4591 	u32 item_size;
4592 	struct btrfs_trans_handle *trans = NULL;
4593 	bool closing = false;
4594 
4595 	path = btrfs_alloc_path();
4596 	if (!path) {
4597 		ret = -ENOMEM;
4598 		goto out;
4599 	}
4600 
4601 	key.objectid = 0;
4602 	key.type = BTRFS_ROOT_ITEM_KEY;
4603 	key.offset = 0;
4604 
4605 	while (1) {
4606 		if (btrfs_fs_closing(fs_info)) {
4607 			closing = true;
4608 			break;
4609 		}
4610 		ret = btrfs_search_forward(root, &key, path,
4611 				BTRFS_OLDEST_GENERATION);
4612 		if (ret) {
4613 			if (ret > 0)
4614 				ret = 0;
4615 			break;
4616 		}
4617 
4618 		if (key.type != BTRFS_ROOT_ITEM_KEY ||
4619 		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4620 		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4621 		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
4622 			goto skip;
4623 
4624 		eb = path->nodes[0];
4625 		slot = path->slots[0];
4626 		item_size = btrfs_item_size_nr(eb, slot);
4627 		if (item_size < sizeof(root_item))
4628 			goto skip;
4629 
4630 		read_extent_buffer(eb, &root_item,
4631 				   btrfs_item_ptr_offset(eb, slot),
4632 				   (int)sizeof(root_item));
4633 		if (btrfs_root_refs(&root_item) == 0)
4634 			goto skip;
4635 
4636 		if (!btrfs_is_empty_uuid(root_item.uuid) ||
4637 		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
4638 			if (trans)
4639 				goto update_tree;
4640 
4641 			btrfs_release_path(path);
4642 			/*
4643 			 * 1 - subvol uuid item
4644 			 * 1 - received_subvol uuid item
4645 			 */
4646 			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4647 			if (IS_ERR(trans)) {
4648 				ret = PTR_ERR(trans);
4649 				break;
4650 			}
4651 			continue;
4652 		} else {
4653 			goto skip;
4654 		}
4655 update_tree:
4656 		btrfs_release_path(path);
4657 		if (!btrfs_is_empty_uuid(root_item.uuid)) {
4658 			ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4659 						  BTRFS_UUID_KEY_SUBVOL,
4660 						  key.objectid);
4661 			if (ret < 0) {
4662 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4663 					ret);
4664 				break;
4665 			}
4666 		}
4667 
4668 		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4669 			ret = btrfs_uuid_tree_add(trans,
4670 						  root_item.received_uuid,
4671 						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4672 						  key.objectid);
4673 			if (ret < 0) {
4674 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4675 					ret);
4676 				break;
4677 			}
4678 		}
4679 
4680 skip:
4681 		btrfs_release_path(path);
4682 		if (trans) {
4683 			ret = btrfs_end_transaction(trans);
4684 			trans = NULL;
4685 			if (ret)
4686 				break;
4687 		}
4688 
4689 		if (key.offset < (u64)-1) {
4690 			key.offset++;
4691 		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4692 			key.offset = 0;
4693 			key.type = BTRFS_ROOT_ITEM_KEY;
4694 		} else if (key.objectid < (u64)-1) {
4695 			key.offset = 0;
4696 			key.type = BTRFS_ROOT_ITEM_KEY;
4697 			key.objectid++;
4698 		} else {
4699 			break;
4700 		}
4701 		cond_resched();
4702 	}
4703 
4704 out:
4705 	btrfs_free_path(path);
4706 	if (trans && !IS_ERR(trans))
4707 		btrfs_end_transaction(trans);
4708 	if (ret)
4709 		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4710 	else if (!closing)
4711 		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4712 	up(&fs_info->uuid_tree_rescan_sem);
4713 	return 0;
4714 }
4715 
4716 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4717 {
4718 	struct btrfs_trans_handle *trans;
4719 	struct btrfs_root *tree_root = fs_info->tree_root;
4720 	struct btrfs_root *uuid_root;
4721 	struct task_struct *task;
4722 	int ret;
4723 
4724 	/*
4725 	 * 1 - root node
4726 	 * 1 - root item
4727 	 */
4728 	trans = btrfs_start_transaction(tree_root, 2);
4729 	if (IS_ERR(trans))
4730 		return PTR_ERR(trans);
4731 
4732 	uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4733 	if (IS_ERR(uuid_root)) {
4734 		ret = PTR_ERR(uuid_root);
4735 		btrfs_abort_transaction(trans, ret);
4736 		btrfs_end_transaction(trans);
4737 		return ret;
4738 	}
4739 
4740 	fs_info->uuid_root = uuid_root;
4741 
4742 	ret = btrfs_commit_transaction(trans);
4743 	if (ret)
4744 		return ret;
4745 
4746 	down(&fs_info->uuid_tree_rescan_sem);
4747 	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4748 	if (IS_ERR(task)) {
4749 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4750 		btrfs_warn(fs_info, "failed to start uuid_scan task");
4751 		up(&fs_info->uuid_tree_rescan_sem);
4752 		return PTR_ERR(task);
4753 	}
4754 
4755 	return 0;
4756 }
4757 
4758 /*
4759  * shrinking a device means finding all of the device extents past
4760  * the new size, and then following the back refs to the chunks.
4761  * The chunk relocation code actually frees the device extent
4762  */
4763 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4764 {
4765 	struct btrfs_fs_info *fs_info = device->fs_info;
4766 	struct btrfs_root *root = fs_info->dev_root;
4767 	struct btrfs_trans_handle *trans;
4768 	struct btrfs_dev_extent *dev_extent = NULL;
4769 	struct btrfs_path *path;
4770 	u64 length;
4771 	u64 chunk_offset;
4772 	int ret;
4773 	int slot;
4774 	int failed = 0;
4775 	bool retried = false;
4776 	struct extent_buffer *l;
4777 	struct btrfs_key key;
4778 	struct btrfs_super_block *super_copy = fs_info->super_copy;
4779 	u64 old_total = btrfs_super_total_bytes(super_copy);
4780 	u64 old_size = btrfs_device_get_total_bytes(device);
4781 	u64 diff;
4782 	u64 start;
4783 
4784 	new_size = round_down(new_size, fs_info->sectorsize);
4785 	start = new_size;
4786 	diff = round_down(old_size - new_size, fs_info->sectorsize);
4787 
4788 	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4789 		return -EINVAL;
4790 
4791 	path = btrfs_alloc_path();
4792 	if (!path)
4793 		return -ENOMEM;
4794 
4795 	path->reada = READA_BACK;
4796 
4797 	trans = btrfs_start_transaction(root, 0);
4798 	if (IS_ERR(trans)) {
4799 		btrfs_free_path(path);
4800 		return PTR_ERR(trans);
4801 	}
4802 
4803 	mutex_lock(&fs_info->chunk_mutex);
4804 
4805 	btrfs_device_set_total_bytes(device, new_size);
4806 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4807 		device->fs_devices->total_rw_bytes -= diff;
4808 		atomic64_sub(diff, &fs_info->free_chunk_space);
4809 	}
4810 
4811 	/*
4812 	 * Once the device's size has been set to the new size, ensure all
4813 	 * in-memory chunks are synced to disk so that the loop below sees them
4814 	 * and relocates them accordingly.
4815 	 */
4816 	if (contains_pending_extent(device, &start, diff)) {
4817 		mutex_unlock(&fs_info->chunk_mutex);
4818 		ret = btrfs_commit_transaction(trans);
4819 		if (ret)
4820 			goto done;
4821 	} else {
4822 		mutex_unlock(&fs_info->chunk_mutex);
4823 		btrfs_end_transaction(trans);
4824 	}
4825 
4826 again:
4827 	key.objectid = device->devid;
4828 	key.offset = (u64)-1;
4829 	key.type = BTRFS_DEV_EXTENT_KEY;
4830 
4831 	do {
4832 		mutex_lock(&fs_info->reclaim_bgs_lock);
4833 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4834 		if (ret < 0) {
4835 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4836 			goto done;
4837 		}
4838 
4839 		ret = btrfs_previous_item(root, path, 0, key.type);
4840 		if (ret) {
4841 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4842 			if (ret < 0)
4843 				goto done;
4844 			ret = 0;
4845 			btrfs_release_path(path);
4846 			break;
4847 		}
4848 
4849 		l = path->nodes[0];
4850 		slot = path->slots[0];
4851 		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4852 
4853 		if (key.objectid != device->devid) {
4854 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4855 			btrfs_release_path(path);
4856 			break;
4857 		}
4858 
4859 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4860 		length = btrfs_dev_extent_length(l, dev_extent);
4861 
4862 		if (key.offset + length <= new_size) {
4863 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4864 			btrfs_release_path(path);
4865 			break;
4866 		}
4867 
4868 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4869 		btrfs_release_path(path);
4870 
4871 		/*
4872 		 * We may be relocating the only data chunk we have,
4873 		 * which could potentially end up with losing data's
4874 		 * raid profile, so lets allocate an empty one in
4875 		 * advance.
4876 		 */
4877 		ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4878 		if (ret < 0) {
4879 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4880 			goto done;
4881 		}
4882 
4883 		ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4884 		mutex_unlock(&fs_info->reclaim_bgs_lock);
4885 		if (ret == -ENOSPC) {
4886 			failed++;
4887 		} else if (ret) {
4888 			if (ret == -ETXTBSY) {
4889 				btrfs_warn(fs_info,
4890 		   "could not shrink block group %llu due to active swapfile",
4891 					   chunk_offset);
4892 			}
4893 			goto done;
4894 		}
4895 	} while (key.offset-- > 0);
4896 
4897 	if (failed && !retried) {
4898 		failed = 0;
4899 		retried = true;
4900 		goto again;
4901 	} else if (failed && retried) {
4902 		ret = -ENOSPC;
4903 		goto done;
4904 	}
4905 
4906 	/* Shrinking succeeded, else we would be at "done". */
4907 	trans = btrfs_start_transaction(root, 0);
4908 	if (IS_ERR(trans)) {
4909 		ret = PTR_ERR(trans);
4910 		goto done;
4911 	}
4912 
4913 	mutex_lock(&fs_info->chunk_mutex);
4914 	/* Clear all state bits beyond the shrunk device size */
4915 	clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
4916 			  CHUNK_STATE_MASK);
4917 
4918 	btrfs_device_set_disk_total_bytes(device, new_size);
4919 	if (list_empty(&device->post_commit_list))
4920 		list_add_tail(&device->post_commit_list,
4921 			      &trans->transaction->dev_update_list);
4922 
4923 	WARN_ON(diff > old_total);
4924 	btrfs_set_super_total_bytes(super_copy,
4925 			round_down(old_total - diff, fs_info->sectorsize));
4926 	mutex_unlock(&fs_info->chunk_mutex);
4927 
4928 	/* Now btrfs_update_device() will change the on-disk size. */
4929 	ret = btrfs_update_device(trans, device);
4930 	if (ret < 0) {
4931 		btrfs_abort_transaction(trans, ret);
4932 		btrfs_end_transaction(trans);
4933 	} else {
4934 		ret = btrfs_commit_transaction(trans);
4935 	}
4936 done:
4937 	btrfs_free_path(path);
4938 	if (ret) {
4939 		mutex_lock(&fs_info->chunk_mutex);
4940 		btrfs_device_set_total_bytes(device, old_size);
4941 		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4942 			device->fs_devices->total_rw_bytes += diff;
4943 		atomic64_add(diff, &fs_info->free_chunk_space);
4944 		mutex_unlock(&fs_info->chunk_mutex);
4945 	}
4946 	return ret;
4947 }
4948 
4949 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4950 			   struct btrfs_key *key,
4951 			   struct btrfs_chunk *chunk, int item_size)
4952 {
4953 	struct btrfs_super_block *super_copy = fs_info->super_copy;
4954 	struct btrfs_disk_key disk_key;
4955 	u32 array_size;
4956 	u8 *ptr;
4957 
4958 	lockdep_assert_held(&fs_info->chunk_mutex);
4959 
4960 	array_size = btrfs_super_sys_array_size(super_copy);
4961 	if (array_size + item_size + sizeof(disk_key)
4962 			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
4963 		return -EFBIG;
4964 
4965 	ptr = super_copy->sys_chunk_array + array_size;
4966 	btrfs_cpu_key_to_disk(&disk_key, key);
4967 	memcpy(ptr, &disk_key, sizeof(disk_key));
4968 	ptr += sizeof(disk_key);
4969 	memcpy(ptr, chunk, item_size);
4970 	item_size += sizeof(disk_key);
4971 	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4972 
4973 	return 0;
4974 }
4975 
4976 /*
4977  * sort the devices in descending order by max_avail, total_avail
4978  */
4979 static int btrfs_cmp_device_info(const void *a, const void *b)
4980 {
4981 	const struct btrfs_device_info *di_a = a;
4982 	const struct btrfs_device_info *di_b = b;
4983 
4984 	if (di_a->max_avail > di_b->max_avail)
4985 		return -1;
4986 	if (di_a->max_avail < di_b->max_avail)
4987 		return 1;
4988 	if (di_a->total_avail > di_b->total_avail)
4989 		return -1;
4990 	if (di_a->total_avail < di_b->total_avail)
4991 		return 1;
4992 	return 0;
4993 }
4994 
4995 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4996 {
4997 	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4998 		return;
4999 
5000 	btrfs_set_fs_incompat(info, RAID56);
5001 }
5002 
5003 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
5004 {
5005 	if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
5006 		return;
5007 
5008 	btrfs_set_fs_incompat(info, RAID1C34);
5009 }
5010 
5011 /*
5012  * Structure used internally for btrfs_create_chunk() function.
5013  * Wraps needed parameters.
5014  */
5015 struct alloc_chunk_ctl {
5016 	u64 start;
5017 	u64 type;
5018 	/* Total number of stripes to allocate */
5019 	int num_stripes;
5020 	/* sub_stripes info for map */
5021 	int sub_stripes;
5022 	/* Stripes per device */
5023 	int dev_stripes;
5024 	/* Maximum number of devices to use */
5025 	int devs_max;
5026 	/* Minimum number of devices to use */
5027 	int devs_min;
5028 	/* ndevs has to be a multiple of this */
5029 	int devs_increment;
5030 	/* Number of copies */
5031 	int ncopies;
5032 	/* Number of stripes worth of bytes to store parity information */
5033 	int nparity;
5034 	u64 max_stripe_size;
5035 	u64 max_chunk_size;
5036 	u64 dev_extent_min;
5037 	u64 stripe_size;
5038 	u64 chunk_size;
5039 	int ndevs;
5040 };
5041 
5042 static void init_alloc_chunk_ctl_policy_regular(
5043 				struct btrfs_fs_devices *fs_devices,
5044 				struct alloc_chunk_ctl *ctl)
5045 {
5046 	u64 type = ctl->type;
5047 
5048 	if (type & BTRFS_BLOCK_GROUP_DATA) {
5049 		ctl->max_stripe_size = SZ_1G;
5050 		ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
5051 	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
5052 		/* For larger filesystems, use larger metadata chunks */
5053 		if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
5054 			ctl->max_stripe_size = SZ_1G;
5055 		else
5056 			ctl->max_stripe_size = SZ_256M;
5057 		ctl->max_chunk_size = ctl->max_stripe_size;
5058 	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
5059 		ctl->max_stripe_size = SZ_32M;
5060 		ctl->max_chunk_size = 2 * ctl->max_stripe_size;
5061 		ctl->devs_max = min_t(int, ctl->devs_max,
5062 				      BTRFS_MAX_DEVS_SYS_CHUNK);
5063 	} else {
5064 		BUG();
5065 	}
5066 
5067 	/* We don't want a chunk larger than 10% of writable space */
5068 	ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
5069 				  ctl->max_chunk_size);
5070 	ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
5071 }
5072 
5073 static void init_alloc_chunk_ctl_policy_zoned(
5074 				      struct btrfs_fs_devices *fs_devices,
5075 				      struct alloc_chunk_ctl *ctl)
5076 {
5077 	u64 zone_size = fs_devices->fs_info->zone_size;
5078 	u64 limit;
5079 	int min_num_stripes = ctl->devs_min * ctl->dev_stripes;
5080 	int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies;
5081 	u64 min_chunk_size = min_data_stripes * zone_size;
5082 	u64 type = ctl->type;
5083 
5084 	ctl->max_stripe_size = zone_size;
5085 	if (type & BTRFS_BLOCK_GROUP_DATA) {
5086 		ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE,
5087 						 zone_size);
5088 	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
5089 		ctl->max_chunk_size = ctl->max_stripe_size;
5090 	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
5091 		ctl->max_chunk_size = 2 * ctl->max_stripe_size;
5092 		ctl->devs_max = min_t(int, ctl->devs_max,
5093 				      BTRFS_MAX_DEVS_SYS_CHUNK);
5094 	} else {
5095 		BUG();
5096 	}
5097 
5098 	/* We don't want a chunk larger than 10% of writable space */
5099 	limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1),
5100 			       zone_size),
5101 		    min_chunk_size);
5102 	ctl->max_chunk_size = min(limit, ctl->max_chunk_size);
5103 	ctl->dev_extent_min = zone_size * ctl->dev_stripes;
5104 }
5105 
5106 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
5107 				 struct alloc_chunk_ctl *ctl)
5108 {
5109 	int index = btrfs_bg_flags_to_raid_index(ctl->type);
5110 
5111 	ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
5112 	ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
5113 	ctl->devs_max = btrfs_raid_array[index].devs_max;
5114 	if (!ctl->devs_max)
5115 		ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
5116 	ctl->devs_min = btrfs_raid_array[index].devs_min;
5117 	ctl->devs_increment = btrfs_raid_array[index].devs_increment;
5118 	ctl->ncopies = btrfs_raid_array[index].ncopies;
5119 	ctl->nparity = btrfs_raid_array[index].nparity;
5120 	ctl->ndevs = 0;
5121 
5122 	switch (fs_devices->chunk_alloc_policy) {
5123 	case BTRFS_CHUNK_ALLOC_REGULAR:
5124 		init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
5125 		break;
5126 	case BTRFS_CHUNK_ALLOC_ZONED:
5127 		init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl);
5128 		break;
5129 	default:
5130 		BUG();
5131 	}
5132 }
5133 
5134 static int gather_device_info(struct btrfs_fs_devices *fs_devices,
5135 			      struct alloc_chunk_ctl *ctl,
5136 			      struct btrfs_device_info *devices_info)
5137 {
5138 	struct btrfs_fs_info *info = fs_devices->fs_info;
5139 	struct btrfs_device *device;
5140 	u64 total_avail;
5141 	u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
5142 	int ret;
5143 	int ndevs = 0;
5144 	u64 max_avail;
5145 	u64 dev_offset;
5146 
5147 	/*
5148 	 * in the first pass through the devices list, we gather information
5149 	 * about the available holes on each device.
5150 	 */
5151 	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
5152 		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
5153 			WARN(1, KERN_ERR
5154 			       "BTRFS: read-only device in alloc_list\n");
5155 			continue;
5156 		}
5157 
5158 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
5159 					&device->dev_state) ||
5160 		    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
5161 			continue;
5162 
5163 		if (device->total_bytes > device->bytes_used)
5164 			total_avail = device->total_bytes - device->bytes_used;
5165 		else
5166 			total_avail = 0;
5167 
5168 		/* If there is no space on this device, skip it. */
5169 		if (total_avail < ctl->dev_extent_min)
5170 			continue;
5171 
5172 		ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
5173 					   &max_avail);
5174 		if (ret && ret != -ENOSPC)
5175 			return ret;
5176 
5177 		if (ret == 0)
5178 			max_avail = dev_extent_want;
5179 
5180 		if (max_avail < ctl->dev_extent_min) {
5181 			if (btrfs_test_opt(info, ENOSPC_DEBUG))
5182 				btrfs_debug(info,
5183 			"%s: devid %llu has no free space, have=%llu want=%llu",
5184 					    __func__, device->devid, max_avail,
5185 					    ctl->dev_extent_min);
5186 			continue;
5187 		}
5188 
5189 		if (ndevs == fs_devices->rw_devices) {
5190 			WARN(1, "%s: found more than %llu devices\n",
5191 			     __func__, fs_devices->rw_devices);
5192 			break;
5193 		}
5194 		devices_info[ndevs].dev_offset = dev_offset;
5195 		devices_info[ndevs].max_avail = max_avail;
5196 		devices_info[ndevs].total_avail = total_avail;
5197 		devices_info[ndevs].dev = device;
5198 		++ndevs;
5199 	}
5200 	ctl->ndevs = ndevs;
5201 
5202 	/*
5203 	 * now sort the devices by hole size / available space
5204 	 */
5205 	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
5206 	     btrfs_cmp_device_info, NULL);
5207 
5208 	return 0;
5209 }
5210 
5211 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
5212 				      struct btrfs_device_info *devices_info)
5213 {
5214 	/* Number of stripes that count for block group size */
5215 	int data_stripes;
5216 
5217 	/*
5218 	 * The primary goal is to maximize the number of stripes, so use as
5219 	 * many devices as possible, even if the stripes are not maximum sized.
5220 	 *
5221 	 * The DUP profile stores more than one stripe per device, the
5222 	 * max_avail is the total size so we have to adjust.
5223 	 */
5224 	ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
5225 				   ctl->dev_stripes);
5226 	ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5227 
5228 	/* This will have to be fixed for RAID1 and RAID10 over more drives */
5229 	data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5230 
5231 	/*
5232 	 * Use the number of data stripes to figure out how big this chunk is
5233 	 * really going to be in terms of logical address space, and compare
5234 	 * that answer with the max chunk size. If it's higher, we try to
5235 	 * reduce stripe_size.
5236 	 */
5237 	if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5238 		/*
5239 		 * Reduce stripe_size, round it up to a 16MB boundary again and
5240 		 * then use it, unless it ends up being even bigger than the
5241 		 * previous value we had already.
5242 		 */
5243 		ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
5244 							data_stripes), SZ_16M),
5245 				       ctl->stripe_size);
5246 	}
5247 
5248 	/* Align to BTRFS_STRIPE_LEN */
5249 	ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
5250 	ctl->chunk_size = ctl->stripe_size * data_stripes;
5251 
5252 	return 0;
5253 }
5254 
5255 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl,
5256 				    struct btrfs_device_info *devices_info)
5257 {
5258 	u64 zone_size = devices_info[0].dev->zone_info->zone_size;
5259 	/* Number of stripes that count for block group size */
5260 	int data_stripes;
5261 
5262 	/*
5263 	 * It should hold because:
5264 	 *    dev_extent_min == dev_extent_want == zone_size * dev_stripes
5265 	 */
5266 	ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min);
5267 
5268 	ctl->stripe_size = zone_size;
5269 	ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5270 	data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5271 
5272 	/* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */
5273 	if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5274 		ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies,
5275 					     ctl->stripe_size) + ctl->nparity,
5276 				     ctl->dev_stripes);
5277 		ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5278 		data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5279 		ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size);
5280 	}
5281 
5282 	ctl->chunk_size = ctl->stripe_size * data_stripes;
5283 
5284 	return 0;
5285 }
5286 
5287 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
5288 			      struct alloc_chunk_ctl *ctl,
5289 			      struct btrfs_device_info *devices_info)
5290 {
5291 	struct btrfs_fs_info *info = fs_devices->fs_info;
5292 
5293 	/*
5294 	 * Round down to number of usable stripes, devs_increment can be any
5295 	 * number so we can't use round_down() that requires power of 2, while
5296 	 * rounddown is safe.
5297 	 */
5298 	ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
5299 
5300 	if (ctl->ndevs < ctl->devs_min) {
5301 		if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5302 			btrfs_debug(info,
5303 	"%s: not enough devices with free space: have=%d minimum required=%d",
5304 				    __func__, ctl->ndevs, ctl->devs_min);
5305 		}
5306 		return -ENOSPC;
5307 	}
5308 
5309 	ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
5310 
5311 	switch (fs_devices->chunk_alloc_policy) {
5312 	case BTRFS_CHUNK_ALLOC_REGULAR:
5313 		return decide_stripe_size_regular(ctl, devices_info);
5314 	case BTRFS_CHUNK_ALLOC_ZONED:
5315 		return decide_stripe_size_zoned(ctl, devices_info);
5316 	default:
5317 		BUG();
5318 	}
5319 }
5320 
5321 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
5322 			struct alloc_chunk_ctl *ctl,
5323 			struct btrfs_device_info *devices_info)
5324 {
5325 	struct btrfs_fs_info *info = trans->fs_info;
5326 	struct map_lookup *map = NULL;
5327 	struct extent_map_tree *em_tree;
5328 	struct btrfs_block_group *block_group;
5329 	struct extent_map *em;
5330 	u64 start = ctl->start;
5331 	u64 type = ctl->type;
5332 	int ret;
5333 	int i;
5334 	int j;
5335 
5336 	map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
5337 	if (!map)
5338 		return ERR_PTR(-ENOMEM);
5339 	map->num_stripes = ctl->num_stripes;
5340 
5341 	for (i = 0; i < ctl->ndevs; ++i) {
5342 		for (j = 0; j < ctl->dev_stripes; ++j) {
5343 			int s = i * ctl->dev_stripes + j;
5344 			map->stripes[s].dev = devices_info[i].dev;
5345 			map->stripes[s].physical = devices_info[i].dev_offset +
5346 						   j * ctl->stripe_size;
5347 		}
5348 	}
5349 	map->stripe_len = BTRFS_STRIPE_LEN;
5350 	map->io_align = BTRFS_STRIPE_LEN;
5351 	map->io_width = BTRFS_STRIPE_LEN;
5352 	map->type = type;
5353 	map->sub_stripes = ctl->sub_stripes;
5354 
5355 	trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
5356 
5357 	em = alloc_extent_map();
5358 	if (!em) {
5359 		kfree(map);
5360 		return ERR_PTR(-ENOMEM);
5361 	}
5362 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5363 	em->map_lookup = map;
5364 	em->start = start;
5365 	em->len = ctl->chunk_size;
5366 	em->block_start = 0;
5367 	em->block_len = em->len;
5368 	em->orig_block_len = ctl->stripe_size;
5369 
5370 	em_tree = &info->mapping_tree;
5371 	write_lock(&em_tree->lock);
5372 	ret = add_extent_mapping(em_tree, em, 0);
5373 	if (ret) {
5374 		write_unlock(&em_tree->lock);
5375 		free_extent_map(em);
5376 		return ERR_PTR(ret);
5377 	}
5378 	write_unlock(&em_tree->lock);
5379 
5380 	block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
5381 	if (IS_ERR(block_group))
5382 		goto error_del_extent;
5383 
5384 	for (i = 0; i < map->num_stripes; i++) {
5385 		struct btrfs_device *dev = map->stripes[i].dev;
5386 
5387 		btrfs_device_set_bytes_used(dev,
5388 					    dev->bytes_used + ctl->stripe_size);
5389 		if (list_empty(&dev->post_commit_list))
5390 			list_add_tail(&dev->post_commit_list,
5391 				      &trans->transaction->dev_update_list);
5392 	}
5393 
5394 	atomic64_sub(ctl->stripe_size * map->num_stripes,
5395 		     &info->free_chunk_space);
5396 
5397 	free_extent_map(em);
5398 	check_raid56_incompat_flag(info, type);
5399 	check_raid1c34_incompat_flag(info, type);
5400 
5401 	return block_group;
5402 
5403 error_del_extent:
5404 	write_lock(&em_tree->lock);
5405 	remove_extent_mapping(em_tree, em);
5406 	write_unlock(&em_tree->lock);
5407 
5408 	/* One for our allocation */
5409 	free_extent_map(em);
5410 	/* One for the tree reference */
5411 	free_extent_map(em);
5412 
5413 	return block_group;
5414 }
5415 
5416 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
5417 					    u64 type)
5418 {
5419 	struct btrfs_fs_info *info = trans->fs_info;
5420 	struct btrfs_fs_devices *fs_devices = info->fs_devices;
5421 	struct btrfs_device_info *devices_info = NULL;
5422 	struct alloc_chunk_ctl ctl;
5423 	struct btrfs_block_group *block_group;
5424 	int ret;
5425 
5426 	lockdep_assert_held(&info->chunk_mutex);
5427 
5428 	if (!alloc_profile_is_valid(type, 0)) {
5429 		ASSERT(0);
5430 		return ERR_PTR(-EINVAL);
5431 	}
5432 
5433 	if (list_empty(&fs_devices->alloc_list)) {
5434 		if (btrfs_test_opt(info, ENOSPC_DEBUG))
5435 			btrfs_debug(info, "%s: no writable device", __func__);
5436 		return ERR_PTR(-ENOSPC);
5437 	}
5438 
5439 	if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
5440 		btrfs_err(info, "invalid chunk type 0x%llx requested", type);
5441 		ASSERT(0);
5442 		return ERR_PTR(-EINVAL);
5443 	}
5444 
5445 	ctl.start = find_next_chunk(info);
5446 	ctl.type = type;
5447 	init_alloc_chunk_ctl(fs_devices, &ctl);
5448 
5449 	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
5450 			       GFP_NOFS);
5451 	if (!devices_info)
5452 		return ERR_PTR(-ENOMEM);
5453 
5454 	ret = gather_device_info(fs_devices, &ctl, devices_info);
5455 	if (ret < 0) {
5456 		block_group = ERR_PTR(ret);
5457 		goto out;
5458 	}
5459 
5460 	ret = decide_stripe_size(fs_devices, &ctl, devices_info);
5461 	if (ret < 0) {
5462 		block_group = ERR_PTR(ret);
5463 		goto out;
5464 	}
5465 
5466 	block_group = create_chunk(trans, &ctl, devices_info);
5467 
5468 out:
5469 	kfree(devices_info);
5470 	return block_group;
5471 }
5472 
5473 /*
5474  * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the
5475  * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system
5476  * chunks.
5477  *
5478  * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
5479  * phases.
5480  */
5481 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
5482 				     struct btrfs_block_group *bg)
5483 {
5484 	struct btrfs_fs_info *fs_info = trans->fs_info;
5485 	struct btrfs_root *extent_root = fs_info->extent_root;
5486 	struct btrfs_root *chunk_root = fs_info->chunk_root;
5487 	struct btrfs_key key;
5488 	struct btrfs_chunk *chunk;
5489 	struct btrfs_stripe *stripe;
5490 	struct extent_map *em;
5491 	struct map_lookup *map;
5492 	size_t item_size;
5493 	int i;
5494 	int ret;
5495 
5496 	/*
5497 	 * We take the chunk_mutex for 2 reasons:
5498 	 *
5499 	 * 1) Updates and insertions in the chunk btree must be done while holding
5500 	 *    the chunk_mutex, as well as updating the system chunk array in the
5501 	 *    superblock. See the comment on top of btrfs_chunk_alloc() for the
5502 	 *    details;
5503 	 *
5504 	 * 2) To prevent races with the final phase of a device replace operation
5505 	 *    that replaces the device object associated with the map's stripes,
5506 	 *    because the device object's id can change at any time during that
5507 	 *    final phase of the device replace operation
5508 	 *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
5509 	 *    replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
5510 	 *    which would cause a failure when updating the device item, which does
5511 	 *    not exists, or persisting a stripe of the chunk item with such ID.
5512 	 *    Here we can't use the device_list_mutex because our caller already
5513 	 *    has locked the chunk_mutex, and the final phase of device replace
5514 	 *    acquires both mutexes - first the device_list_mutex and then the
5515 	 *    chunk_mutex. Using any of those two mutexes protects us from a
5516 	 *    concurrent device replace.
5517 	 */
5518 	lockdep_assert_held(&fs_info->chunk_mutex);
5519 
5520 	em = btrfs_get_chunk_map(fs_info, bg->start, bg->length);
5521 	if (IS_ERR(em)) {
5522 		ret = PTR_ERR(em);
5523 		btrfs_abort_transaction(trans, ret);
5524 		return ret;
5525 	}
5526 
5527 	map = em->map_lookup;
5528 	item_size = btrfs_chunk_item_size(map->num_stripes);
5529 
5530 	chunk = kzalloc(item_size, GFP_NOFS);
5531 	if (!chunk) {
5532 		ret = -ENOMEM;
5533 		btrfs_abort_transaction(trans, ret);
5534 		goto out;
5535 	}
5536 
5537 	for (i = 0; i < map->num_stripes; i++) {
5538 		struct btrfs_device *device = map->stripes[i].dev;
5539 
5540 		ret = btrfs_update_device(trans, device);
5541 		if (ret)
5542 			goto out;
5543 	}
5544 
5545 	stripe = &chunk->stripe;
5546 	for (i = 0; i < map->num_stripes; i++) {
5547 		struct btrfs_device *device = map->stripes[i].dev;
5548 		const u64 dev_offset = map->stripes[i].physical;
5549 
5550 		btrfs_set_stack_stripe_devid(stripe, device->devid);
5551 		btrfs_set_stack_stripe_offset(stripe, dev_offset);
5552 		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5553 		stripe++;
5554 	}
5555 
5556 	btrfs_set_stack_chunk_length(chunk, bg->length);
5557 	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
5558 	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
5559 	btrfs_set_stack_chunk_type(chunk, map->type);
5560 	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5561 	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5562 	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5563 	btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5564 	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5565 
5566 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5567 	key.type = BTRFS_CHUNK_ITEM_KEY;
5568 	key.offset = bg->start;
5569 
5570 	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5571 	if (ret)
5572 		goto out;
5573 
5574 	bg->chunk_item_inserted = 1;
5575 
5576 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5577 		ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5578 		if (ret)
5579 			goto out;
5580 	}
5581 
5582 out:
5583 	kfree(chunk);
5584 	free_extent_map(em);
5585 	return ret;
5586 }
5587 
5588 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
5589 {
5590 	struct btrfs_fs_info *fs_info = trans->fs_info;
5591 	u64 alloc_profile;
5592 	struct btrfs_block_group *meta_bg;
5593 	struct btrfs_block_group *sys_bg;
5594 
5595 	/*
5596 	 * When adding a new device for sprouting, the seed device is read-only
5597 	 * so we must first allocate a metadata and a system chunk. But before
5598 	 * adding the block group items to the extent, device and chunk btrees,
5599 	 * we must first:
5600 	 *
5601 	 * 1) Create both chunks without doing any changes to the btrees, as
5602 	 *    otherwise we would get -ENOSPC since the block groups from the
5603 	 *    seed device are read-only;
5604 	 *
5605 	 * 2) Add the device item for the new sprout device - finishing the setup
5606 	 *    of a new block group requires updating the device item in the chunk
5607 	 *    btree, so it must exist when we attempt to do it. The previous step
5608 	 *    ensures this does not fail with -ENOSPC.
5609 	 *
5610 	 * After that we can add the block group items to their btrees:
5611 	 * update existing device item in the chunk btree, add a new block group
5612 	 * item to the extent btree, add a new chunk item to the chunk btree and
5613 	 * finally add the new device extent items to the devices btree.
5614 	 */
5615 
5616 	alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5617 	meta_bg = btrfs_create_chunk(trans, alloc_profile);
5618 	if (IS_ERR(meta_bg))
5619 		return PTR_ERR(meta_bg);
5620 
5621 	alloc_profile = btrfs_system_alloc_profile(fs_info);
5622 	sys_bg = btrfs_create_chunk(trans, alloc_profile);
5623 	if (IS_ERR(sys_bg))
5624 		return PTR_ERR(sys_bg);
5625 
5626 	return 0;
5627 }
5628 
5629 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5630 {
5631 	const int index = btrfs_bg_flags_to_raid_index(map->type);
5632 
5633 	return btrfs_raid_array[index].tolerated_failures;
5634 }
5635 
5636 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5637 {
5638 	struct extent_map *em;
5639 	struct map_lookup *map;
5640 	int miss_ndevs = 0;
5641 	int i;
5642 	bool ret = true;
5643 
5644 	em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5645 	if (IS_ERR(em))
5646 		return false;
5647 
5648 	map = em->map_lookup;
5649 	for (i = 0; i < map->num_stripes; i++) {
5650 		if (test_bit(BTRFS_DEV_STATE_MISSING,
5651 					&map->stripes[i].dev->dev_state)) {
5652 			miss_ndevs++;
5653 			continue;
5654 		}
5655 		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5656 					&map->stripes[i].dev->dev_state)) {
5657 			ret = false;
5658 			goto end;
5659 		}
5660 	}
5661 
5662 	/*
5663 	 * If the number of missing devices is larger than max errors, we can
5664 	 * not write the data into that chunk successfully.
5665 	 */
5666 	if (miss_ndevs > btrfs_chunk_max_errors(map))
5667 		ret = false;
5668 end:
5669 	free_extent_map(em);
5670 	return ret;
5671 }
5672 
5673 void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5674 {
5675 	struct extent_map *em;
5676 
5677 	while (1) {
5678 		write_lock(&tree->lock);
5679 		em = lookup_extent_mapping(tree, 0, (u64)-1);
5680 		if (em)
5681 			remove_extent_mapping(tree, em);
5682 		write_unlock(&tree->lock);
5683 		if (!em)
5684 			break;
5685 		/* once for us */
5686 		free_extent_map(em);
5687 		/* once for the tree */
5688 		free_extent_map(em);
5689 	}
5690 }
5691 
5692 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5693 {
5694 	struct extent_map *em;
5695 	struct map_lookup *map;
5696 	int ret;
5697 
5698 	em = btrfs_get_chunk_map(fs_info, logical, len);
5699 	if (IS_ERR(em))
5700 		/*
5701 		 * We could return errors for these cases, but that could get
5702 		 * ugly and we'd probably do the same thing which is just not do
5703 		 * anything else and exit, so return 1 so the callers don't try
5704 		 * to use other copies.
5705 		 */
5706 		return 1;
5707 
5708 	map = em->map_lookup;
5709 	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
5710 		ret = map->num_stripes;
5711 	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5712 		ret = map->sub_stripes;
5713 	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5714 		ret = 2;
5715 	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5716 		/*
5717 		 * There could be two corrupted data stripes, we need
5718 		 * to loop retry in order to rebuild the correct data.
5719 		 *
5720 		 * Fail a stripe at a time on every retry except the
5721 		 * stripe under reconstruction.
5722 		 */
5723 		ret = map->num_stripes;
5724 	else
5725 		ret = 1;
5726 	free_extent_map(em);
5727 
5728 	down_read(&fs_info->dev_replace.rwsem);
5729 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5730 	    fs_info->dev_replace.tgtdev)
5731 		ret++;
5732 	up_read(&fs_info->dev_replace.rwsem);
5733 
5734 	return ret;
5735 }
5736 
5737 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5738 				    u64 logical)
5739 {
5740 	struct extent_map *em;
5741 	struct map_lookup *map;
5742 	unsigned long len = fs_info->sectorsize;
5743 
5744 	em = btrfs_get_chunk_map(fs_info, logical, len);
5745 
5746 	if (!WARN_ON(IS_ERR(em))) {
5747 		map = em->map_lookup;
5748 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5749 			len = map->stripe_len * nr_data_stripes(map);
5750 		free_extent_map(em);
5751 	}
5752 	return len;
5753 }
5754 
5755 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5756 {
5757 	struct extent_map *em;
5758 	struct map_lookup *map;
5759 	int ret = 0;
5760 
5761 	em = btrfs_get_chunk_map(fs_info, logical, len);
5762 
5763 	if(!WARN_ON(IS_ERR(em))) {
5764 		map = em->map_lookup;
5765 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5766 			ret = 1;
5767 		free_extent_map(em);
5768 	}
5769 	return ret;
5770 }
5771 
5772 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5773 			    struct map_lookup *map, int first,
5774 			    int dev_replace_is_ongoing)
5775 {
5776 	int i;
5777 	int num_stripes;
5778 	int preferred_mirror;
5779 	int tolerance;
5780 	struct btrfs_device *srcdev;
5781 
5782 	ASSERT((map->type &
5783 		 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5784 
5785 	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5786 		num_stripes = map->sub_stripes;
5787 	else
5788 		num_stripes = map->num_stripes;
5789 
5790 	switch (fs_info->fs_devices->read_policy) {
5791 	default:
5792 		/* Shouldn't happen, just warn and use pid instead of failing */
5793 		btrfs_warn_rl(fs_info,
5794 			      "unknown read_policy type %u, reset to pid",
5795 			      fs_info->fs_devices->read_policy);
5796 		fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID;
5797 		fallthrough;
5798 	case BTRFS_READ_POLICY_PID:
5799 		preferred_mirror = first + (current->pid % num_stripes);
5800 		break;
5801 	}
5802 
5803 	if (dev_replace_is_ongoing &&
5804 	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5805 	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5806 		srcdev = fs_info->dev_replace.srcdev;
5807 	else
5808 		srcdev = NULL;
5809 
5810 	/*
5811 	 * try to avoid the drive that is the source drive for a
5812 	 * dev-replace procedure, only choose it if no other non-missing
5813 	 * mirror is available
5814 	 */
5815 	for (tolerance = 0; tolerance < 2; tolerance++) {
5816 		if (map->stripes[preferred_mirror].dev->bdev &&
5817 		    (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5818 			return preferred_mirror;
5819 		for (i = first; i < first + num_stripes; i++) {
5820 			if (map->stripes[i].dev->bdev &&
5821 			    (tolerance || map->stripes[i].dev != srcdev))
5822 				return i;
5823 		}
5824 	}
5825 
5826 	/* we couldn't find one that doesn't fail.  Just return something
5827 	 * and the io error handling code will clean up eventually
5828 	 */
5829 	return preferred_mirror;
5830 }
5831 
5832 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5833 static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes)
5834 {
5835 	int i;
5836 	int again = 1;
5837 
5838 	while (again) {
5839 		again = 0;
5840 		for (i = 0; i < num_stripes - 1; i++) {
5841 			/* Swap if parity is on a smaller index */
5842 			if (bioc->raid_map[i] > bioc->raid_map[i + 1]) {
5843 				swap(bioc->stripes[i], bioc->stripes[i + 1]);
5844 				swap(bioc->raid_map[i], bioc->raid_map[i + 1]);
5845 				again = 1;
5846 			}
5847 		}
5848 	}
5849 }
5850 
5851 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info,
5852 						       int total_stripes,
5853 						       int real_stripes)
5854 {
5855 	struct btrfs_io_context *bioc = kzalloc(
5856 		 /* The size of btrfs_io_context */
5857 		sizeof(struct btrfs_io_context) +
5858 		/* Plus the variable array for the stripes */
5859 		sizeof(struct btrfs_io_stripe) * (total_stripes) +
5860 		/* Plus the variable array for the tgt dev */
5861 		sizeof(int) * (real_stripes) +
5862 		/*
5863 		 * Plus the raid_map, which includes both the tgt dev
5864 		 * and the stripes.
5865 		 */
5866 		sizeof(u64) * (total_stripes),
5867 		GFP_NOFS|__GFP_NOFAIL);
5868 
5869 	atomic_set(&bioc->error, 0);
5870 	refcount_set(&bioc->refs, 1);
5871 
5872 	bioc->fs_info = fs_info;
5873 	bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes);
5874 	bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes);
5875 
5876 	return bioc;
5877 }
5878 
5879 void btrfs_get_bioc(struct btrfs_io_context *bioc)
5880 {
5881 	WARN_ON(!refcount_read(&bioc->refs));
5882 	refcount_inc(&bioc->refs);
5883 }
5884 
5885 void btrfs_put_bioc(struct btrfs_io_context *bioc)
5886 {
5887 	if (!bioc)
5888 		return;
5889 	if (refcount_dec_and_test(&bioc->refs))
5890 		kfree(bioc);
5891 }
5892 
5893 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5894 /*
5895  * Please note that, discard won't be sent to target device of device
5896  * replace.
5897  */
5898 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5899 					 u64 logical, u64 *length_ret,
5900 					 struct btrfs_io_context **bioc_ret)
5901 {
5902 	struct extent_map *em;
5903 	struct map_lookup *map;
5904 	struct btrfs_io_context *bioc;
5905 	u64 length = *length_ret;
5906 	u64 offset;
5907 	u64 stripe_nr;
5908 	u64 stripe_nr_end;
5909 	u64 stripe_end_offset;
5910 	u64 stripe_cnt;
5911 	u64 stripe_len;
5912 	u64 stripe_offset;
5913 	u64 num_stripes;
5914 	u32 stripe_index;
5915 	u32 factor = 0;
5916 	u32 sub_stripes = 0;
5917 	u64 stripes_per_dev = 0;
5918 	u32 remaining_stripes = 0;
5919 	u32 last_stripe = 0;
5920 	int ret = 0;
5921 	int i;
5922 
5923 	/* Discard always returns a bioc. */
5924 	ASSERT(bioc_ret);
5925 
5926 	em = btrfs_get_chunk_map(fs_info, logical, length);
5927 	if (IS_ERR(em))
5928 		return PTR_ERR(em);
5929 
5930 	map = em->map_lookup;
5931 	/* we don't discard raid56 yet */
5932 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5933 		ret = -EOPNOTSUPP;
5934 		goto out;
5935 	}
5936 
5937 	offset = logical - em->start;
5938 	length = min_t(u64, em->start + em->len - logical, length);
5939 	*length_ret = length;
5940 
5941 	stripe_len = map->stripe_len;
5942 	/*
5943 	 * stripe_nr counts the total number of stripes we have to stride
5944 	 * to get to this block
5945 	 */
5946 	stripe_nr = div64_u64(offset, stripe_len);
5947 
5948 	/* stripe_offset is the offset of this block in its stripe */
5949 	stripe_offset = offset - stripe_nr * stripe_len;
5950 
5951 	stripe_nr_end = round_up(offset + length, map->stripe_len);
5952 	stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5953 	stripe_cnt = stripe_nr_end - stripe_nr;
5954 	stripe_end_offset = stripe_nr_end * map->stripe_len -
5955 			    (offset + length);
5956 	/*
5957 	 * after this, stripe_nr is the number of stripes on this
5958 	 * device we have to walk to find the data, and stripe_index is
5959 	 * the number of our device in the stripe array
5960 	 */
5961 	num_stripes = 1;
5962 	stripe_index = 0;
5963 	if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5964 			 BTRFS_BLOCK_GROUP_RAID10)) {
5965 		if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5966 			sub_stripes = 1;
5967 		else
5968 			sub_stripes = map->sub_stripes;
5969 
5970 		factor = map->num_stripes / sub_stripes;
5971 		num_stripes = min_t(u64, map->num_stripes,
5972 				    sub_stripes * stripe_cnt);
5973 		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5974 		stripe_index *= sub_stripes;
5975 		stripes_per_dev = div_u64_rem(stripe_cnt, factor,
5976 					      &remaining_stripes);
5977 		div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5978 		last_stripe *= sub_stripes;
5979 	} else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
5980 				BTRFS_BLOCK_GROUP_DUP)) {
5981 		num_stripes = map->num_stripes;
5982 	} else {
5983 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5984 					&stripe_index);
5985 	}
5986 
5987 	bioc = alloc_btrfs_io_context(fs_info, num_stripes, 0);
5988 	if (!bioc) {
5989 		ret = -ENOMEM;
5990 		goto out;
5991 	}
5992 
5993 	for (i = 0; i < num_stripes; i++) {
5994 		bioc->stripes[i].physical =
5995 			map->stripes[stripe_index].physical +
5996 			stripe_offset + stripe_nr * map->stripe_len;
5997 		bioc->stripes[i].dev = map->stripes[stripe_index].dev;
5998 
5999 		if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
6000 				 BTRFS_BLOCK_GROUP_RAID10)) {
6001 			bioc->stripes[i].length = stripes_per_dev *
6002 				map->stripe_len;
6003 
6004 			if (i / sub_stripes < remaining_stripes)
6005 				bioc->stripes[i].length += map->stripe_len;
6006 
6007 			/*
6008 			 * Special for the first stripe and
6009 			 * the last stripe:
6010 			 *
6011 			 * |-------|...|-------|
6012 			 *     |----------|
6013 			 *    off     end_off
6014 			 */
6015 			if (i < sub_stripes)
6016 				bioc->stripes[i].length -= stripe_offset;
6017 
6018 			if (stripe_index >= last_stripe &&
6019 			    stripe_index <= (last_stripe +
6020 					     sub_stripes - 1))
6021 				bioc->stripes[i].length -= stripe_end_offset;
6022 
6023 			if (i == sub_stripes - 1)
6024 				stripe_offset = 0;
6025 		} else {
6026 			bioc->stripes[i].length = length;
6027 		}
6028 
6029 		stripe_index++;
6030 		if (stripe_index == map->num_stripes) {
6031 			stripe_index = 0;
6032 			stripe_nr++;
6033 		}
6034 	}
6035 
6036 	*bioc_ret = bioc;
6037 	bioc->map_type = map->type;
6038 	bioc->num_stripes = num_stripes;
6039 out:
6040 	free_extent_map(em);
6041 	return ret;
6042 }
6043 
6044 /*
6045  * In dev-replace case, for repair case (that's the only case where the mirror
6046  * is selected explicitly when calling btrfs_map_block), blocks left of the
6047  * left cursor can also be read from the target drive.
6048  *
6049  * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
6050  * array of stripes.
6051  * For READ, it also needs to be supported using the same mirror number.
6052  *
6053  * If the requested block is not left of the left cursor, EIO is returned. This
6054  * can happen because btrfs_num_copies() returns one more in the dev-replace
6055  * case.
6056  */
6057 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
6058 					 u64 logical, u64 length,
6059 					 u64 srcdev_devid, int *mirror_num,
6060 					 u64 *physical)
6061 {
6062 	struct btrfs_io_context *bioc = NULL;
6063 	int num_stripes;
6064 	int index_srcdev = 0;
6065 	int found = 0;
6066 	u64 physical_of_found = 0;
6067 	int i;
6068 	int ret = 0;
6069 
6070 	ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
6071 				logical, &length, &bioc, 0, 0);
6072 	if (ret) {
6073 		ASSERT(bioc == NULL);
6074 		return ret;
6075 	}
6076 
6077 	num_stripes = bioc->num_stripes;
6078 	if (*mirror_num > num_stripes) {
6079 		/*
6080 		 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
6081 		 * that means that the requested area is not left of the left
6082 		 * cursor
6083 		 */
6084 		btrfs_put_bioc(bioc);
6085 		return -EIO;
6086 	}
6087 
6088 	/*
6089 	 * process the rest of the function using the mirror_num of the source
6090 	 * drive. Therefore look it up first.  At the end, patch the device
6091 	 * pointer to the one of the target drive.
6092 	 */
6093 	for (i = 0; i < num_stripes; i++) {
6094 		if (bioc->stripes[i].dev->devid != srcdev_devid)
6095 			continue;
6096 
6097 		/*
6098 		 * In case of DUP, in order to keep it simple, only add the
6099 		 * mirror with the lowest physical address
6100 		 */
6101 		if (found &&
6102 		    physical_of_found <= bioc->stripes[i].physical)
6103 			continue;
6104 
6105 		index_srcdev = i;
6106 		found = 1;
6107 		physical_of_found = bioc->stripes[i].physical;
6108 	}
6109 
6110 	btrfs_put_bioc(bioc);
6111 
6112 	ASSERT(found);
6113 	if (!found)
6114 		return -EIO;
6115 
6116 	*mirror_num = index_srcdev + 1;
6117 	*physical = physical_of_found;
6118 	return ret;
6119 }
6120 
6121 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical)
6122 {
6123 	struct btrfs_block_group *cache;
6124 	bool ret;
6125 
6126 	/* Non zoned filesystem does not use "to_copy" flag */
6127 	if (!btrfs_is_zoned(fs_info))
6128 		return false;
6129 
6130 	cache = btrfs_lookup_block_group(fs_info, logical);
6131 
6132 	spin_lock(&cache->lock);
6133 	ret = cache->to_copy;
6134 	spin_unlock(&cache->lock);
6135 
6136 	btrfs_put_block_group(cache);
6137 	return ret;
6138 }
6139 
6140 static void handle_ops_on_dev_replace(enum btrfs_map_op op,
6141 				      struct btrfs_io_context **bioc_ret,
6142 				      struct btrfs_dev_replace *dev_replace,
6143 				      u64 logical,
6144 				      int *num_stripes_ret, int *max_errors_ret)
6145 {
6146 	struct btrfs_io_context *bioc = *bioc_ret;
6147 	u64 srcdev_devid = dev_replace->srcdev->devid;
6148 	int tgtdev_indexes = 0;
6149 	int num_stripes = *num_stripes_ret;
6150 	int max_errors = *max_errors_ret;
6151 	int i;
6152 
6153 	if (op == BTRFS_MAP_WRITE) {
6154 		int index_where_to_add;
6155 
6156 		/*
6157 		 * A block group which have "to_copy" set will eventually
6158 		 * copied by dev-replace process. We can avoid cloning IO here.
6159 		 */
6160 		if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical))
6161 			return;
6162 
6163 		/*
6164 		 * duplicate the write operations while the dev replace
6165 		 * procedure is running. Since the copying of the old disk to
6166 		 * the new disk takes place at run time while the filesystem is
6167 		 * mounted writable, the regular write operations to the old
6168 		 * disk have to be duplicated to go to the new disk as well.
6169 		 *
6170 		 * Note that device->missing is handled by the caller, and that
6171 		 * the write to the old disk is already set up in the stripes
6172 		 * array.
6173 		 */
6174 		index_where_to_add = num_stripes;
6175 		for (i = 0; i < num_stripes; i++) {
6176 			if (bioc->stripes[i].dev->devid == srcdev_devid) {
6177 				/* write to new disk, too */
6178 				struct btrfs_io_stripe *new =
6179 					bioc->stripes + index_where_to_add;
6180 				struct btrfs_io_stripe *old =
6181 					bioc->stripes + i;
6182 
6183 				new->physical = old->physical;
6184 				new->length = old->length;
6185 				new->dev = dev_replace->tgtdev;
6186 				bioc->tgtdev_map[i] = index_where_to_add;
6187 				index_where_to_add++;
6188 				max_errors++;
6189 				tgtdev_indexes++;
6190 			}
6191 		}
6192 		num_stripes = index_where_to_add;
6193 	} else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
6194 		int index_srcdev = 0;
6195 		int found = 0;
6196 		u64 physical_of_found = 0;
6197 
6198 		/*
6199 		 * During the dev-replace procedure, the target drive can also
6200 		 * be used to read data in case it is needed to repair a corrupt
6201 		 * block elsewhere. This is possible if the requested area is
6202 		 * left of the left cursor. In this area, the target drive is a
6203 		 * full copy of the source drive.
6204 		 */
6205 		for (i = 0; i < num_stripes; i++) {
6206 			if (bioc->stripes[i].dev->devid == srcdev_devid) {
6207 				/*
6208 				 * In case of DUP, in order to keep it simple,
6209 				 * only add the mirror with the lowest physical
6210 				 * address
6211 				 */
6212 				if (found &&
6213 				    physical_of_found <= bioc->stripes[i].physical)
6214 					continue;
6215 				index_srcdev = i;
6216 				found = 1;
6217 				physical_of_found = bioc->stripes[i].physical;
6218 			}
6219 		}
6220 		if (found) {
6221 			struct btrfs_io_stripe *tgtdev_stripe =
6222 				bioc->stripes + num_stripes;
6223 
6224 			tgtdev_stripe->physical = physical_of_found;
6225 			tgtdev_stripe->length =
6226 				bioc->stripes[index_srcdev].length;
6227 			tgtdev_stripe->dev = dev_replace->tgtdev;
6228 			bioc->tgtdev_map[index_srcdev] = num_stripes;
6229 
6230 			tgtdev_indexes++;
6231 			num_stripes++;
6232 		}
6233 	}
6234 
6235 	*num_stripes_ret = num_stripes;
6236 	*max_errors_ret = max_errors;
6237 	bioc->num_tgtdevs = tgtdev_indexes;
6238 	*bioc_ret = bioc;
6239 }
6240 
6241 static bool need_full_stripe(enum btrfs_map_op op)
6242 {
6243 	return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
6244 }
6245 
6246 /*
6247  * Calculate the geometry of a particular (address, len) tuple. This
6248  * information is used to calculate how big a particular bio can get before it
6249  * straddles a stripe.
6250  *
6251  * @fs_info: the filesystem
6252  * @em:      mapping containing the logical extent
6253  * @op:      type of operation - write or read
6254  * @logical: address that we want to figure out the geometry of
6255  * @io_geom: pointer used to return values
6256  *
6257  * Returns < 0 in case a chunk for the given logical address cannot be found,
6258  * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
6259  */
6260 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em,
6261 			  enum btrfs_map_op op, u64 logical,
6262 			  struct btrfs_io_geometry *io_geom)
6263 {
6264 	struct map_lookup *map;
6265 	u64 len;
6266 	u64 offset;
6267 	u64 stripe_offset;
6268 	u64 stripe_nr;
6269 	u64 stripe_len;
6270 	u64 raid56_full_stripe_start = (u64)-1;
6271 	int data_stripes;
6272 
6273 	ASSERT(op != BTRFS_MAP_DISCARD);
6274 
6275 	map = em->map_lookup;
6276 	/* Offset of this logical address in the chunk */
6277 	offset = logical - em->start;
6278 	/* Len of a stripe in a chunk */
6279 	stripe_len = map->stripe_len;
6280 	/* Stripe where this block falls in */
6281 	stripe_nr = div64_u64(offset, stripe_len);
6282 	/* Offset of stripe in the chunk */
6283 	stripe_offset = stripe_nr * stripe_len;
6284 	if (offset < stripe_offset) {
6285 		btrfs_crit(fs_info,
6286 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
6287 			stripe_offset, offset, em->start, logical, stripe_len);
6288 		return -EINVAL;
6289 	}
6290 
6291 	/* stripe_offset is the offset of this block in its stripe */
6292 	stripe_offset = offset - stripe_offset;
6293 	data_stripes = nr_data_stripes(map);
6294 
6295 	if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
6296 		u64 max_len = stripe_len - stripe_offset;
6297 
6298 		/*
6299 		 * In case of raid56, we need to know the stripe aligned start
6300 		 */
6301 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6302 			unsigned long full_stripe_len = stripe_len * data_stripes;
6303 			raid56_full_stripe_start = offset;
6304 
6305 			/*
6306 			 * Allow a write of a full stripe, but make sure we
6307 			 * don't allow straddling of stripes
6308 			 */
6309 			raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
6310 					full_stripe_len);
6311 			raid56_full_stripe_start *= full_stripe_len;
6312 
6313 			/*
6314 			 * For writes to RAID[56], allow a full stripeset across
6315 			 * all disks. For other RAID types and for RAID[56]
6316 			 * reads, just allow a single stripe (on a single disk).
6317 			 */
6318 			if (op == BTRFS_MAP_WRITE) {
6319 				max_len = stripe_len * data_stripes -
6320 					  (offset - raid56_full_stripe_start);
6321 			}
6322 		}
6323 		len = min_t(u64, em->len - offset, max_len);
6324 	} else {
6325 		len = em->len - offset;
6326 	}
6327 
6328 	io_geom->len = len;
6329 	io_geom->offset = offset;
6330 	io_geom->stripe_len = stripe_len;
6331 	io_geom->stripe_nr = stripe_nr;
6332 	io_geom->stripe_offset = stripe_offset;
6333 	io_geom->raid56_stripe_offset = raid56_full_stripe_start;
6334 
6335 	return 0;
6336 }
6337 
6338 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
6339 			     enum btrfs_map_op op,
6340 			     u64 logical, u64 *length,
6341 			     struct btrfs_io_context **bioc_ret,
6342 			     int mirror_num, int need_raid_map)
6343 {
6344 	struct extent_map *em;
6345 	struct map_lookup *map;
6346 	u64 stripe_offset;
6347 	u64 stripe_nr;
6348 	u64 stripe_len;
6349 	u32 stripe_index;
6350 	int data_stripes;
6351 	int i;
6352 	int ret = 0;
6353 	int num_stripes;
6354 	int max_errors = 0;
6355 	int tgtdev_indexes = 0;
6356 	struct btrfs_io_context *bioc = NULL;
6357 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6358 	int dev_replace_is_ongoing = 0;
6359 	int num_alloc_stripes;
6360 	int patch_the_first_stripe_for_dev_replace = 0;
6361 	u64 physical_to_patch_in_first_stripe = 0;
6362 	u64 raid56_full_stripe_start = (u64)-1;
6363 	struct btrfs_io_geometry geom;
6364 
6365 	ASSERT(bioc_ret);
6366 	ASSERT(op != BTRFS_MAP_DISCARD);
6367 
6368 	em = btrfs_get_chunk_map(fs_info, logical, *length);
6369 	ASSERT(!IS_ERR(em));
6370 
6371 	ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom);
6372 	if (ret < 0)
6373 		return ret;
6374 
6375 	map = em->map_lookup;
6376 
6377 	*length = geom.len;
6378 	stripe_len = geom.stripe_len;
6379 	stripe_nr = geom.stripe_nr;
6380 	stripe_offset = geom.stripe_offset;
6381 	raid56_full_stripe_start = geom.raid56_stripe_offset;
6382 	data_stripes = nr_data_stripes(map);
6383 
6384 	down_read(&dev_replace->rwsem);
6385 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6386 	/*
6387 	 * Hold the semaphore for read during the whole operation, write is
6388 	 * requested at commit time but must wait.
6389 	 */
6390 	if (!dev_replace_is_ongoing)
6391 		up_read(&dev_replace->rwsem);
6392 
6393 	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6394 	    !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6395 		ret = get_extra_mirror_from_replace(fs_info, logical, *length,
6396 						    dev_replace->srcdev->devid,
6397 						    &mirror_num,
6398 					    &physical_to_patch_in_first_stripe);
6399 		if (ret)
6400 			goto out;
6401 		else
6402 			patch_the_first_stripe_for_dev_replace = 1;
6403 	} else if (mirror_num > map->num_stripes) {
6404 		mirror_num = 0;
6405 	}
6406 
6407 	num_stripes = 1;
6408 	stripe_index = 0;
6409 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6410 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6411 				&stripe_index);
6412 		if (!need_full_stripe(op))
6413 			mirror_num = 1;
6414 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
6415 		if (need_full_stripe(op))
6416 			num_stripes = map->num_stripes;
6417 		else if (mirror_num)
6418 			stripe_index = mirror_num - 1;
6419 		else {
6420 			stripe_index = find_live_mirror(fs_info, map, 0,
6421 					    dev_replace_is_ongoing);
6422 			mirror_num = stripe_index + 1;
6423 		}
6424 
6425 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6426 		if (need_full_stripe(op)) {
6427 			num_stripes = map->num_stripes;
6428 		} else if (mirror_num) {
6429 			stripe_index = mirror_num - 1;
6430 		} else {
6431 			mirror_num = 1;
6432 		}
6433 
6434 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6435 		u32 factor = map->num_stripes / map->sub_stripes;
6436 
6437 		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6438 		stripe_index *= map->sub_stripes;
6439 
6440 		if (need_full_stripe(op))
6441 			num_stripes = map->sub_stripes;
6442 		else if (mirror_num)
6443 			stripe_index += mirror_num - 1;
6444 		else {
6445 			int old_stripe_index = stripe_index;
6446 			stripe_index = find_live_mirror(fs_info, map,
6447 					      stripe_index,
6448 					      dev_replace_is_ongoing);
6449 			mirror_num = stripe_index - old_stripe_index + 1;
6450 		}
6451 
6452 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6453 		if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
6454 			/* push stripe_nr back to the start of the full stripe */
6455 			stripe_nr = div64_u64(raid56_full_stripe_start,
6456 					stripe_len * data_stripes);
6457 
6458 			/* RAID[56] write or recovery. Return all stripes */
6459 			num_stripes = map->num_stripes;
6460 			max_errors = nr_parity_stripes(map);
6461 
6462 			*length = map->stripe_len;
6463 			stripe_index = 0;
6464 			stripe_offset = 0;
6465 		} else {
6466 			/*
6467 			 * Mirror #0 or #1 means the original data block.
6468 			 * Mirror #2 is RAID5 parity block.
6469 			 * Mirror #3 is RAID6 Q block.
6470 			 */
6471 			stripe_nr = div_u64_rem(stripe_nr,
6472 					data_stripes, &stripe_index);
6473 			if (mirror_num > 1)
6474 				stripe_index = data_stripes + mirror_num - 2;
6475 
6476 			/* We distribute the parity blocks across stripes */
6477 			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
6478 					&stripe_index);
6479 			if (!need_full_stripe(op) && mirror_num <= 1)
6480 				mirror_num = 1;
6481 		}
6482 	} else {
6483 		/*
6484 		 * after this, stripe_nr is the number of stripes on this
6485 		 * device we have to walk to find the data, and stripe_index is
6486 		 * the number of our device in the stripe array
6487 		 */
6488 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6489 				&stripe_index);
6490 		mirror_num = stripe_index + 1;
6491 	}
6492 	if (stripe_index >= map->num_stripes) {
6493 		btrfs_crit(fs_info,
6494 			   "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6495 			   stripe_index, map->num_stripes);
6496 		ret = -EINVAL;
6497 		goto out;
6498 	}
6499 
6500 	num_alloc_stripes = num_stripes;
6501 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6502 		if (op == BTRFS_MAP_WRITE)
6503 			num_alloc_stripes <<= 1;
6504 		if (op == BTRFS_MAP_GET_READ_MIRRORS)
6505 			num_alloc_stripes++;
6506 		tgtdev_indexes = num_stripes;
6507 	}
6508 
6509 	bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes, tgtdev_indexes);
6510 	if (!bioc) {
6511 		ret = -ENOMEM;
6512 		goto out;
6513 	}
6514 
6515 	for (i = 0; i < num_stripes; i++) {
6516 		bioc->stripes[i].physical = map->stripes[stripe_index].physical +
6517 			stripe_offset + stripe_nr * map->stripe_len;
6518 		bioc->stripes[i].dev = map->stripes[stripe_index].dev;
6519 		stripe_index++;
6520 	}
6521 
6522 	/* Build raid_map */
6523 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
6524 	    (need_full_stripe(op) || mirror_num > 1)) {
6525 		u64 tmp;
6526 		unsigned rot;
6527 
6528 		/* Work out the disk rotation on this stripe-set */
6529 		div_u64_rem(stripe_nr, num_stripes, &rot);
6530 
6531 		/* Fill in the logical address of each stripe */
6532 		tmp = stripe_nr * data_stripes;
6533 		for (i = 0; i < data_stripes; i++)
6534 			bioc->raid_map[(i + rot) % num_stripes] =
6535 				em->start + (tmp + i) * map->stripe_len;
6536 
6537 		bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE;
6538 		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6539 			bioc->raid_map[(i + rot + 1) % num_stripes] =
6540 				RAID6_Q_STRIPE;
6541 
6542 		sort_parity_stripes(bioc, num_stripes);
6543 	}
6544 
6545 	if (need_full_stripe(op))
6546 		max_errors = btrfs_chunk_max_errors(map);
6547 
6548 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6549 	    need_full_stripe(op)) {
6550 		handle_ops_on_dev_replace(op, &bioc, dev_replace, logical,
6551 					  &num_stripes, &max_errors);
6552 	}
6553 
6554 	*bioc_ret = bioc;
6555 	bioc->map_type = map->type;
6556 	bioc->num_stripes = num_stripes;
6557 	bioc->max_errors = max_errors;
6558 	bioc->mirror_num = mirror_num;
6559 
6560 	/*
6561 	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
6562 	 * mirror_num == num_stripes + 1 && dev_replace target drive is
6563 	 * available as a mirror
6564 	 */
6565 	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
6566 		WARN_ON(num_stripes > 1);
6567 		bioc->stripes[0].dev = dev_replace->tgtdev;
6568 		bioc->stripes[0].physical = physical_to_patch_in_first_stripe;
6569 		bioc->mirror_num = map->num_stripes + 1;
6570 	}
6571 out:
6572 	if (dev_replace_is_ongoing) {
6573 		lockdep_assert_held(&dev_replace->rwsem);
6574 		/* Unlock and let waiting writers proceed */
6575 		up_read(&dev_replace->rwsem);
6576 	}
6577 	free_extent_map(em);
6578 	return ret;
6579 }
6580 
6581 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6582 		      u64 logical, u64 *length,
6583 		      struct btrfs_io_context **bioc_ret, int mirror_num)
6584 {
6585 	if (op == BTRFS_MAP_DISCARD)
6586 		return __btrfs_map_block_for_discard(fs_info, logical,
6587 						     length, bioc_ret);
6588 
6589 	return __btrfs_map_block(fs_info, op, logical, length, bioc_ret,
6590 				 mirror_num, 0);
6591 }
6592 
6593 /* For Scrub/replace */
6594 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6595 		     u64 logical, u64 *length,
6596 		     struct btrfs_io_context **bioc_ret)
6597 {
6598 	return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 0, 1);
6599 }
6600 
6601 static inline void btrfs_end_bioc(struct btrfs_io_context *bioc, struct bio *bio)
6602 {
6603 	bio->bi_private = bioc->private;
6604 	bio->bi_end_io = bioc->end_io;
6605 	bio_endio(bio);
6606 
6607 	btrfs_put_bioc(bioc);
6608 }
6609 
6610 static void btrfs_end_bio(struct bio *bio)
6611 {
6612 	struct btrfs_io_context *bioc = bio->bi_private;
6613 	int is_orig_bio = 0;
6614 
6615 	if (bio->bi_status) {
6616 		atomic_inc(&bioc->error);
6617 		if (bio->bi_status == BLK_STS_IOERR ||
6618 		    bio->bi_status == BLK_STS_TARGET) {
6619 			struct btrfs_device *dev = btrfs_bio(bio)->device;
6620 
6621 			ASSERT(dev->bdev);
6622 			if (btrfs_op(bio) == BTRFS_MAP_WRITE)
6623 				btrfs_dev_stat_inc_and_print(dev,
6624 						BTRFS_DEV_STAT_WRITE_ERRS);
6625 			else if (!(bio->bi_opf & REQ_RAHEAD))
6626 				btrfs_dev_stat_inc_and_print(dev,
6627 						BTRFS_DEV_STAT_READ_ERRS);
6628 			if (bio->bi_opf & REQ_PREFLUSH)
6629 				btrfs_dev_stat_inc_and_print(dev,
6630 						BTRFS_DEV_STAT_FLUSH_ERRS);
6631 		}
6632 	}
6633 
6634 	if (bio == bioc->orig_bio)
6635 		is_orig_bio = 1;
6636 
6637 	btrfs_bio_counter_dec(bioc->fs_info);
6638 
6639 	if (atomic_dec_and_test(&bioc->stripes_pending)) {
6640 		if (!is_orig_bio) {
6641 			bio_put(bio);
6642 			bio = bioc->orig_bio;
6643 		}
6644 
6645 		btrfs_bio(bio)->mirror_num = bioc->mirror_num;
6646 		/* only send an error to the higher layers if it is
6647 		 * beyond the tolerance of the btrfs bio
6648 		 */
6649 		if (atomic_read(&bioc->error) > bioc->max_errors) {
6650 			bio->bi_status = BLK_STS_IOERR;
6651 		} else {
6652 			/*
6653 			 * this bio is actually up to date, we didn't
6654 			 * go over the max number of errors
6655 			 */
6656 			bio->bi_status = BLK_STS_OK;
6657 		}
6658 
6659 		btrfs_end_bioc(bioc, bio);
6660 	} else if (!is_orig_bio) {
6661 		bio_put(bio);
6662 	}
6663 }
6664 
6665 static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio,
6666 			      u64 physical, struct btrfs_device *dev)
6667 {
6668 	struct btrfs_fs_info *fs_info = bioc->fs_info;
6669 
6670 	bio->bi_private = bioc;
6671 	btrfs_bio(bio)->device = dev;
6672 	bio->bi_end_io = btrfs_end_bio;
6673 	bio->bi_iter.bi_sector = physical >> 9;
6674 	/*
6675 	 * For zone append writing, bi_sector must point the beginning of the
6676 	 * zone
6677 	 */
6678 	if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
6679 		if (btrfs_dev_is_sequential(dev, physical)) {
6680 			u64 zone_start = round_down(physical, fs_info->zone_size);
6681 
6682 			bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT;
6683 		} else {
6684 			bio->bi_opf &= ~REQ_OP_ZONE_APPEND;
6685 			bio->bi_opf |= REQ_OP_WRITE;
6686 		}
6687 	}
6688 	btrfs_debug_in_rcu(fs_info,
6689 	"btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6690 		bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector,
6691 		(unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name),
6692 		dev->devid, bio->bi_iter.bi_size);
6693 	bio_set_dev(bio, dev->bdev);
6694 
6695 	btrfs_bio_counter_inc_noblocked(fs_info);
6696 
6697 	btrfsic_submit_bio(bio);
6698 }
6699 
6700 static void bioc_error(struct btrfs_io_context *bioc, struct bio *bio, u64 logical)
6701 {
6702 	atomic_inc(&bioc->error);
6703 	if (atomic_dec_and_test(&bioc->stripes_pending)) {
6704 		/* Should be the original bio. */
6705 		WARN_ON(bio != bioc->orig_bio);
6706 
6707 		btrfs_bio(bio)->mirror_num = bioc->mirror_num;
6708 		bio->bi_iter.bi_sector = logical >> 9;
6709 		if (atomic_read(&bioc->error) > bioc->max_errors)
6710 			bio->bi_status = BLK_STS_IOERR;
6711 		else
6712 			bio->bi_status = BLK_STS_OK;
6713 		btrfs_end_bioc(bioc, bio);
6714 	}
6715 }
6716 
6717 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6718 			   int mirror_num)
6719 {
6720 	struct btrfs_device *dev;
6721 	struct bio *first_bio = bio;
6722 	u64 logical = bio->bi_iter.bi_sector << 9;
6723 	u64 length = 0;
6724 	u64 map_length;
6725 	int ret;
6726 	int dev_nr;
6727 	int total_devs;
6728 	struct btrfs_io_context *bioc = NULL;
6729 
6730 	length = bio->bi_iter.bi_size;
6731 	map_length = length;
6732 
6733 	btrfs_bio_counter_inc_blocked(fs_info);
6734 	ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6735 				&map_length, &bioc, mirror_num, 1);
6736 	if (ret) {
6737 		btrfs_bio_counter_dec(fs_info);
6738 		return errno_to_blk_status(ret);
6739 	}
6740 
6741 	total_devs = bioc->num_stripes;
6742 	bioc->orig_bio = first_bio;
6743 	bioc->private = first_bio->bi_private;
6744 	bioc->end_io = first_bio->bi_end_io;
6745 	atomic_set(&bioc->stripes_pending, bioc->num_stripes);
6746 
6747 	if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6748 	    ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) {
6749 		/* In this case, map_length has been set to the length of
6750 		   a single stripe; not the whole write */
6751 		if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
6752 			ret = raid56_parity_write(bio, bioc, map_length);
6753 		} else {
6754 			ret = raid56_parity_recover(bio, bioc, map_length,
6755 						    mirror_num, 1);
6756 		}
6757 
6758 		btrfs_bio_counter_dec(fs_info);
6759 		return errno_to_blk_status(ret);
6760 	}
6761 
6762 	if (map_length < length) {
6763 		btrfs_crit(fs_info,
6764 			   "mapping failed logical %llu bio len %llu len %llu",
6765 			   logical, length, map_length);
6766 		BUG();
6767 	}
6768 
6769 	for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6770 		dev = bioc->stripes[dev_nr].dev;
6771 		if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
6772 						   &dev->dev_state) ||
6773 		    (btrfs_op(first_bio) == BTRFS_MAP_WRITE &&
6774 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6775 			bioc_error(bioc, first_bio, logical);
6776 			continue;
6777 		}
6778 
6779 		if (dev_nr < total_devs - 1)
6780 			bio = btrfs_bio_clone(first_bio);
6781 		else
6782 			bio = first_bio;
6783 
6784 		submit_stripe_bio(bioc, bio, bioc->stripes[dev_nr].physical, dev);
6785 	}
6786 	btrfs_bio_counter_dec(fs_info);
6787 	return BLK_STS_OK;
6788 }
6789 
6790 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args,
6791 				      const struct btrfs_fs_devices *fs_devices)
6792 {
6793 	if (args->fsid == NULL)
6794 		return true;
6795 	if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0)
6796 		return true;
6797 	return false;
6798 }
6799 
6800 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args,
6801 				  const struct btrfs_device *device)
6802 {
6803 	ASSERT((args->devid != (u64)-1) || args->missing);
6804 
6805 	if ((args->devid != (u64)-1) && device->devid != args->devid)
6806 		return false;
6807 	if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0)
6808 		return false;
6809 	if (!args->missing)
6810 		return true;
6811 	if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) &&
6812 	    !device->bdev)
6813 		return true;
6814 	return false;
6815 }
6816 
6817 /*
6818  * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6819  * return NULL.
6820  *
6821  * If devid and uuid are both specified, the match must be exact, otherwise
6822  * only devid is used.
6823  */
6824 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices,
6825 				       const struct btrfs_dev_lookup_args *args)
6826 {
6827 	struct btrfs_device *device;
6828 	struct btrfs_fs_devices *seed_devs;
6829 
6830 	if (dev_args_match_fs_devices(args, fs_devices)) {
6831 		list_for_each_entry(device, &fs_devices->devices, dev_list) {
6832 			if (dev_args_match_device(args, device))
6833 				return device;
6834 		}
6835 	}
6836 
6837 	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
6838 		if (!dev_args_match_fs_devices(args, seed_devs))
6839 			continue;
6840 		list_for_each_entry(device, &seed_devs->devices, dev_list) {
6841 			if (dev_args_match_device(args, device))
6842 				return device;
6843 		}
6844 	}
6845 
6846 	return NULL;
6847 }
6848 
6849 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6850 					    u64 devid, u8 *dev_uuid)
6851 {
6852 	struct btrfs_device *device;
6853 	unsigned int nofs_flag;
6854 
6855 	/*
6856 	 * We call this under the chunk_mutex, so we want to use NOFS for this
6857 	 * allocation, however we don't want to change btrfs_alloc_device() to
6858 	 * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6859 	 * places.
6860 	 */
6861 	nofs_flag = memalloc_nofs_save();
6862 	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6863 	memalloc_nofs_restore(nofs_flag);
6864 	if (IS_ERR(device))
6865 		return device;
6866 
6867 	list_add(&device->dev_list, &fs_devices->devices);
6868 	device->fs_devices = fs_devices;
6869 	fs_devices->num_devices++;
6870 
6871 	set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6872 	fs_devices->missing_devices++;
6873 
6874 	return device;
6875 }
6876 
6877 /**
6878  * btrfs_alloc_device - allocate struct btrfs_device
6879  * @fs_info:	used only for generating a new devid, can be NULL if
6880  *		devid is provided (i.e. @devid != NULL).
6881  * @devid:	a pointer to devid for this device.  If NULL a new devid
6882  *		is generated.
6883  * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
6884  *		is generated.
6885  *
6886  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6887  * on error.  Returned struct is not linked onto any lists and must be
6888  * destroyed with btrfs_free_device.
6889  */
6890 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6891 					const u64 *devid,
6892 					const u8 *uuid)
6893 {
6894 	struct btrfs_device *dev;
6895 	u64 tmp;
6896 
6897 	if (WARN_ON(!devid && !fs_info))
6898 		return ERR_PTR(-EINVAL);
6899 
6900 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
6901 	if (!dev)
6902 		return ERR_PTR(-ENOMEM);
6903 
6904 	/*
6905 	 * Preallocate a bio that's always going to be used for flushing device
6906 	 * barriers and matches the device lifespan
6907 	 */
6908 	dev->flush_bio = bio_kmalloc(GFP_KERNEL, 0);
6909 	if (!dev->flush_bio) {
6910 		kfree(dev);
6911 		return ERR_PTR(-ENOMEM);
6912 	}
6913 
6914 	INIT_LIST_HEAD(&dev->dev_list);
6915 	INIT_LIST_HEAD(&dev->dev_alloc_list);
6916 	INIT_LIST_HEAD(&dev->post_commit_list);
6917 
6918 	atomic_set(&dev->reada_in_flight, 0);
6919 	atomic_set(&dev->dev_stats_ccnt, 0);
6920 	btrfs_device_data_ordered_init(dev);
6921 	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
6922 	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
6923 	extent_io_tree_init(fs_info, &dev->alloc_state,
6924 			    IO_TREE_DEVICE_ALLOC_STATE, NULL);
6925 
6926 	if (devid)
6927 		tmp = *devid;
6928 	else {
6929 		int ret;
6930 
6931 		ret = find_next_devid(fs_info, &tmp);
6932 		if (ret) {
6933 			btrfs_free_device(dev);
6934 			return ERR_PTR(ret);
6935 		}
6936 	}
6937 	dev->devid = tmp;
6938 
6939 	if (uuid)
6940 		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6941 	else
6942 		generate_random_uuid(dev->uuid);
6943 
6944 	return dev;
6945 }
6946 
6947 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6948 					u64 devid, u8 *uuid, bool error)
6949 {
6950 	if (error)
6951 		btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6952 			      devid, uuid);
6953 	else
6954 		btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6955 			      devid, uuid);
6956 }
6957 
6958 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
6959 {
6960 	const int data_stripes = calc_data_stripes(type, num_stripes);
6961 
6962 	return div_u64(chunk_len, data_stripes);
6963 }
6964 
6965 #if BITS_PER_LONG == 32
6966 /*
6967  * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE
6968  * can't be accessed on 32bit systems.
6969  *
6970  * This function do mount time check to reject the fs if it already has
6971  * metadata chunk beyond that limit.
6972  */
6973 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
6974 				  u64 logical, u64 length, u64 type)
6975 {
6976 	if (!(type & BTRFS_BLOCK_GROUP_METADATA))
6977 		return 0;
6978 
6979 	if (logical + length < MAX_LFS_FILESIZE)
6980 		return 0;
6981 
6982 	btrfs_err_32bit_limit(fs_info);
6983 	return -EOVERFLOW;
6984 }
6985 
6986 /*
6987  * This is to give early warning for any metadata chunk reaching
6988  * BTRFS_32BIT_EARLY_WARN_THRESHOLD.
6989  * Although we can still access the metadata, it's not going to be possible
6990  * once the limit is reached.
6991  */
6992 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
6993 				  u64 logical, u64 length, u64 type)
6994 {
6995 	if (!(type & BTRFS_BLOCK_GROUP_METADATA))
6996 		return;
6997 
6998 	if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD)
6999 		return;
7000 
7001 	btrfs_warn_32bit_limit(fs_info);
7002 }
7003 #endif
7004 
7005 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
7006 			  struct btrfs_chunk *chunk)
7007 {
7008 	BTRFS_DEV_LOOKUP_ARGS(args);
7009 	struct btrfs_fs_info *fs_info = leaf->fs_info;
7010 	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7011 	struct map_lookup *map;
7012 	struct extent_map *em;
7013 	u64 logical;
7014 	u64 length;
7015 	u64 devid;
7016 	u64 type;
7017 	u8 uuid[BTRFS_UUID_SIZE];
7018 	int num_stripes;
7019 	int ret;
7020 	int i;
7021 
7022 	logical = key->offset;
7023 	length = btrfs_chunk_length(leaf, chunk);
7024 	type = btrfs_chunk_type(leaf, chunk);
7025 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
7026 
7027 #if BITS_PER_LONG == 32
7028 	ret = check_32bit_meta_chunk(fs_info, logical, length, type);
7029 	if (ret < 0)
7030 		return ret;
7031 	warn_32bit_meta_chunk(fs_info, logical, length, type);
7032 #endif
7033 
7034 	/*
7035 	 * Only need to verify chunk item if we're reading from sys chunk array,
7036 	 * as chunk item in tree block is already verified by tree-checker.
7037 	 */
7038 	if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
7039 		ret = btrfs_check_chunk_valid(leaf, chunk, logical);
7040 		if (ret)
7041 			return ret;
7042 	}
7043 
7044 	read_lock(&map_tree->lock);
7045 	em = lookup_extent_mapping(map_tree, logical, 1);
7046 	read_unlock(&map_tree->lock);
7047 
7048 	/* already mapped? */
7049 	if (em && em->start <= logical && em->start + em->len > logical) {
7050 		free_extent_map(em);
7051 		return 0;
7052 	} else if (em) {
7053 		free_extent_map(em);
7054 	}
7055 
7056 	em = alloc_extent_map();
7057 	if (!em)
7058 		return -ENOMEM;
7059 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
7060 	if (!map) {
7061 		free_extent_map(em);
7062 		return -ENOMEM;
7063 	}
7064 
7065 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
7066 	em->map_lookup = map;
7067 	em->start = logical;
7068 	em->len = length;
7069 	em->orig_start = 0;
7070 	em->block_start = 0;
7071 	em->block_len = em->len;
7072 
7073 	map->num_stripes = num_stripes;
7074 	map->io_width = btrfs_chunk_io_width(leaf, chunk);
7075 	map->io_align = btrfs_chunk_io_align(leaf, chunk);
7076 	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
7077 	map->type = type;
7078 	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
7079 	map->verified_stripes = 0;
7080 	em->orig_block_len = calc_stripe_length(type, em->len,
7081 						map->num_stripes);
7082 	for (i = 0; i < num_stripes; i++) {
7083 		map->stripes[i].physical =
7084 			btrfs_stripe_offset_nr(leaf, chunk, i);
7085 		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
7086 		args.devid = devid;
7087 		read_extent_buffer(leaf, uuid, (unsigned long)
7088 				   btrfs_stripe_dev_uuid_nr(chunk, i),
7089 				   BTRFS_UUID_SIZE);
7090 		args.uuid = uuid;
7091 		map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args);
7092 		if (!map->stripes[i].dev &&
7093 		    !btrfs_test_opt(fs_info, DEGRADED)) {
7094 			free_extent_map(em);
7095 			btrfs_report_missing_device(fs_info, devid, uuid, true);
7096 			return -ENOENT;
7097 		}
7098 		if (!map->stripes[i].dev) {
7099 			map->stripes[i].dev =
7100 				add_missing_dev(fs_info->fs_devices, devid,
7101 						uuid);
7102 			if (IS_ERR(map->stripes[i].dev)) {
7103 				free_extent_map(em);
7104 				btrfs_err(fs_info,
7105 					"failed to init missing dev %llu: %ld",
7106 					devid, PTR_ERR(map->stripes[i].dev));
7107 				return PTR_ERR(map->stripes[i].dev);
7108 			}
7109 			btrfs_report_missing_device(fs_info, devid, uuid, false);
7110 		}
7111 		set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
7112 				&(map->stripes[i].dev->dev_state));
7113 
7114 	}
7115 
7116 	write_lock(&map_tree->lock);
7117 	ret = add_extent_mapping(map_tree, em, 0);
7118 	write_unlock(&map_tree->lock);
7119 	if (ret < 0) {
7120 		btrfs_err(fs_info,
7121 			  "failed to add chunk map, start=%llu len=%llu: %d",
7122 			  em->start, em->len, ret);
7123 	}
7124 	free_extent_map(em);
7125 
7126 	return ret;
7127 }
7128 
7129 static void fill_device_from_item(struct extent_buffer *leaf,
7130 				 struct btrfs_dev_item *dev_item,
7131 				 struct btrfs_device *device)
7132 {
7133 	unsigned long ptr;
7134 
7135 	device->devid = btrfs_device_id(leaf, dev_item);
7136 	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
7137 	device->total_bytes = device->disk_total_bytes;
7138 	device->commit_total_bytes = device->disk_total_bytes;
7139 	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
7140 	device->commit_bytes_used = device->bytes_used;
7141 	device->type = btrfs_device_type(leaf, dev_item);
7142 	device->io_align = btrfs_device_io_align(leaf, dev_item);
7143 	device->io_width = btrfs_device_io_width(leaf, dev_item);
7144 	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
7145 	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
7146 	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
7147 
7148 	ptr = btrfs_device_uuid(dev_item);
7149 	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
7150 }
7151 
7152 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
7153 						  u8 *fsid)
7154 {
7155 	struct btrfs_fs_devices *fs_devices;
7156 	int ret;
7157 
7158 	lockdep_assert_held(&uuid_mutex);
7159 	ASSERT(fsid);
7160 
7161 	/* This will match only for multi-device seed fs */
7162 	list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list)
7163 		if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
7164 			return fs_devices;
7165 
7166 
7167 	fs_devices = find_fsid(fsid, NULL);
7168 	if (!fs_devices) {
7169 		if (!btrfs_test_opt(fs_info, DEGRADED))
7170 			return ERR_PTR(-ENOENT);
7171 
7172 		fs_devices = alloc_fs_devices(fsid, NULL);
7173 		if (IS_ERR(fs_devices))
7174 			return fs_devices;
7175 
7176 		fs_devices->seeding = true;
7177 		fs_devices->opened = 1;
7178 		return fs_devices;
7179 	}
7180 
7181 	/*
7182 	 * Upon first call for a seed fs fsid, just create a private copy of the
7183 	 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
7184 	 */
7185 	fs_devices = clone_fs_devices(fs_devices);
7186 	if (IS_ERR(fs_devices))
7187 		return fs_devices;
7188 
7189 	ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
7190 	if (ret) {
7191 		free_fs_devices(fs_devices);
7192 		return ERR_PTR(ret);
7193 	}
7194 
7195 	if (!fs_devices->seeding) {
7196 		close_fs_devices(fs_devices);
7197 		free_fs_devices(fs_devices);
7198 		return ERR_PTR(-EINVAL);
7199 	}
7200 
7201 	list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
7202 
7203 	return fs_devices;
7204 }
7205 
7206 static int read_one_dev(struct extent_buffer *leaf,
7207 			struct btrfs_dev_item *dev_item)
7208 {
7209 	BTRFS_DEV_LOOKUP_ARGS(args);
7210 	struct btrfs_fs_info *fs_info = leaf->fs_info;
7211 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7212 	struct btrfs_device *device;
7213 	u64 devid;
7214 	int ret;
7215 	u8 fs_uuid[BTRFS_FSID_SIZE];
7216 	u8 dev_uuid[BTRFS_UUID_SIZE];
7217 
7218 	devid = args.devid = btrfs_device_id(leaf, dev_item);
7219 	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
7220 			   BTRFS_UUID_SIZE);
7221 	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
7222 			   BTRFS_FSID_SIZE);
7223 	args.uuid = dev_uuid;
7224 	args.fsid = fs_uuid;
7225 
7226 	if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
7227 		fs_devices = open_seed_devices(fs_info, fs_uuid);
7228 		if (IS_ERR(fs_devices))
7229 			return PTR_ERR(fs_devices);
7230 	}
7231 
7232 	device = btrfs_find_device(fs_info->fs_devices, &args);
7233 	if (!device) {
7234 		if (!btrfs_test_opt(fs_info, DEGRADED)) {
7235 			btrfs_report_missing_device(fs_info, devid,
7236 							dev_uuid, true);
7237 			return -ENOENT;
7238 		}
7239 
7240 		device = add_missing_dev(fs_devices, devid, dev_uuid);
7241 		if (IS_ERR(device)) {
7242 			btrfs_err(fs_info,
7243 				"failed to add missing dev %llu: %ld",
7244 				devid, PTR_ERR(device));
7245 			return PTR_ERR(device);
7246 		}
7247 		btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
7248 	} else {
7249 		if (!device->bdev) {
7250 			if (!btrfs_test_opt(fs_info, DEGRADED)) {
7251 				btrfs_report_missing_device(fs_info,
7252 						devid, dev_uuid, true);
7253 				return -ENOENT;
7254 			}
7255 			btrfs_report_missing_device(fs_info, devid,
7256 							dev_uuid, false);
7257 		}
7258 
7259 		if (!device->bdev &&
7260 		    !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
7261 			/*
7262 			 * this happens when a device that was properly setup
7263 			 * in the device info lists suddenly goes bad.
7264 			 * device->bdev is NULL, and so we have to set
7265 			 * device->missing to one here
7266 			 */
7267 			device->fs_devices->missing_devices++;
7268 			set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
7269 		}
7270 
7271 		/* Move the device to its own fs_devices */
7272 		if (device->fs_devices != fs_devices) {
7273 			ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
7274 							&device->dev_state));
7275 
7276 			list_move(&device->dev_list, &fs_devices->devices);
7277 			device->fs_devices->num_devices--;
7278 			fs_devices->num_devices++;
7279 
7280 			device->fs_devices->missing_devices--;
7281 			fs_devices->missing_devices++;
7282 
7283 			device->fs_devices = fs_devices;
7284 		}
7285 	}
7286 
7287 	if (device->fs_devices != fs_info->fs_devices) {
7288 		BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
7289 		if (device->generation !=
7290 		    btrfs_device_generation(leaf, dev_item))
7291 			return -EINVAL;
7292 	}
7293 
7294 	fill_device_from_item(leaf, dev_item, device);
7295 	if (device->bdev) {
7296 		u64 max_total_bytes = i_size_read(device->bdev->bd_inode);
7297 
7298 		if (device->total_bytes > max_total_bytes) {
7299 			btrfs_err(fs_info,
7300 			"device total_bytes should be at most %llu but found %llu",
7301 				  max_total_bytes, device->total_bytes);
7302 			return -EINVAL;
7303 		}
7304 	}
7305 	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
7306 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
7307 	   !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
7308 		device->fs_devices->total_rw_bytes += device->total_bytes;
7309 		atomic64_add(device->total_bytes - device->bytes_used,
7310 				&fs_info->free_chunk_space);
7311 	}
7312 	ret = 0;
7313 	return ret;
7314 }
7315 
7316 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
7317 {
7318 	struct btrfs_root *root = fs_info->tree_root;
7319 	struct btrfs_super_block *super_copy = fs_info->super_copy;
7320 	struct extent_buffer *sb;
7321 	struct btrfs_disk_key *disk_key;
7322 	struct btrfs_chunk *chunk;
7323 	u8 *array_ptr;
7324 	unsigned long sb_array_offset;
7325 	int ret = 0;
7326 	u32 num_stripes;
7327 	u32 array_size;
7328 	u32 len = 0;
7329 	u32 cur_offset;
7330 	u64 type;
7331 	struct btrfs_key key;
7332 
7333 	ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
7334 	/*
7335 	 * This will create extent buffer of nodesize, superblock size is
7336 	 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
7337 	 * overallocate but we can keep it as-is, only the first page is used.
7338 	 */
7339 	sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET,
7340 					  root->root_key.objectid, 0);
7341 	if (IS_ERR(sb))
7342 		return PTR_ERR(sb);
7343 	set_extent_buffer_uptodate(sb);
7344 	/*
7345 	 * The sb extent buffer is artificial and just used to read the system array.
7346 	 * set_extent_buffer_uptodate() call does not properly mark all it's
7347 	 * pages up-to-date when the page is larger: extent does not cover the
7348 	 * whole page and consequently check_page_uptodate does not find all
7349 	 * the page's extents up-to-date (the hole beyond sb),
7350 	 * write_extent_buffer then triggers a WARN_ON.
7351 	 *
7352 	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
7353 	 * but sb spans only this function. Add an explicit SetPageUptodate call
7354 	 * to silence the warning eg. on PowerPC 64.
7355 	 */
7356 	if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
7357 		SetPageUptodate(sb->pages[0]);
7358 
7359 	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
7360 	array_size = btrfs_super_sys_array_size(super_copy);
7361 
7362 	array_ptr = super_copy->sys_chunk_array;
7363 	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
7364 	cur_offset = 0;
7365 
7366 	while (cur_offset < array_size) {
7367 		disk_key = (struct btrfs_disk_key *)array_ptr;
7368 		len = sizeof(*disk_key);
7369 		if (cur_offset + len > array_size)
7370 			goto out_short_read;
7371 
7372 		btrfs_disk_key_to_cpu(&key, disk_key);
7373 
7374 		array_ptr += len;
7375 		sb_array_offset += len;
7376 		cur_offset += len;
7377 
7378 		if (key.type != BTRFS_CHUNK_ITEM_KEY) {
7379 			btrfs_err(fs_info,
7380 			    "unexpected item type %u in sys_array at offset %u",
7381 				  (u32)key.type, cur_offset);
7382 			ret = -EIO;
7383 			break;
7384 		}
7385 
7386 		chunk = (struct btrfs_chunk *)sb_array_offset;
7387 		/*
7388 		 * At least one btrfs_chunk with one stripe must be present,
7389 		 * exact stripe count check comes afterwards
7390 		 */
7391 		len = btrfs_chunk_item_size(1);
7392 		if (cur_offset + len > array_size)
7393 			goto out_short_read;
7394 
7395 		num_stripes = btrfs_chunk_num_stripes(sb, chunk);
7396 		if (!num_stripes) {
7397 			btrfs_err(fs_info,
7398 			"invalid number of stripes %u in sys_array at offset %u",
7399 				  num_stripes, cur_offset);
7400 			ret = -EIO;
7401 			break;
7402 		}
7403 
7404 		type = btrfs_chunk_type(sb, chunk);
7405 		if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
7406 			btrfs_err(fs_info,
7407 			"invalid chunk type %llu in sys_array at offset %u",
7408 				  type, cur_offset);
7409 			ret = -EIO;
7410 			break;
7411 		}
7412 
7413 		len = btrfs_chunk_item_size(num_stripes);
7414 		if (cur_offset + len > array_size)
7415 			goto out_short_read;
7416 
7417 		ret = read_one_chunk(&key, sb, chunk);
7418 		if (ret)
7419 			break;
7420 
7421 		array_ptr += len;
7422 		sb_array_offset += len;
7423 		cur_offset += len;
7424 	}
7425 	clear_extent_buffer_uptodate(sb);
7426 	free_extent_buffer_stale(sb);
7427 	return ret;
7428 
7429 out_short_read:
7430 	btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
7431 			len, cur_offset);
7432 	clear_extent_buffer_uptodate(sb);
7433 	free_extent_buffer_stale(sb);
7434 	return -EIO;
7435 }
7436 
7437 /*
7438  * Check if all chunks in the fs are OK for read-write degraded mount
7439  *
7440  * If the @failing_dev is specified, it's accounted as missing.
7441  *
7442  * Return true if all chunks meet the minimal RW mount requirements.
7443  * Return false if any chunk doesn't meet the minimal RW mount requirements.
7444  */
7445 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7446 					struct btrfs_device *failing_dev)
7447 {
7448 	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7449 	struct extent_map *em;
7450 	u64 next_start = 0;
7451 	bool ret = true;
7452 
7453 	read_lock(&map_tree->lock);
7454 	em = lookup_extent_mapping(map_tree, 0, (u64)-1);
7455 	read_unlock(&map_tree->lock);
7456 	/* No chunk at all? Return false anyway */
7457 	if (!em) {
7458 		ret = false;
7459 		goto out;
7460 	}
7461 	while (em) {
7462 		struct map_lookup *map;
7463 		int missing = 0;
7464 		int max_tolerated;
7465 		int i;
7466 
7467 		map = em->map_lookup;
7468 		max_tolerated =
7469 			btrfs_get_num_tolerated_disk_barrier_failures(
7470 					map->type);
7471 		for (i = 0; i < map->num_stripes; i++) {
7472 			struct btrfs_device *dev = map->stripes[i].dev;
7473 
7474 			if (!dev || !dev->bdev ||
7475 			    test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7476 			    dev->last_flush_error)
7477 				missing++;
7478 			else if (failing_dev && failing_dev == dev)
7479 				missing++;
7480 		}
7481 		if (missing > max_tolerated) {
7482 			if (!failing_dev)
7483 				btrfs_warn(fs_info,
7484 	"chunk %llu missing %d devices, max tolerance is %d for writable mount",
7485 				   em->start, missing, max_tolerated);
7486 			free_extent_map(em);
7487 			ret = false;
7488 			goto out;
7489 		}
7490 		next_start = extent_map_end(em);
7491 		free_extent_map(em);
7492 
7493 		read_lock(&map_tree->lock);
7494 		em = lookup_extent_mapping(map_tree, next_start,
7495 					   (u64)(-1) - next_start);
7496 		read_unlock(&map_tree->lock);
7497 	}
7498 out:
7499 	return ret;
7500 }
7501 
7502 static void readahead_tree_node_children(struct extent_buffer *node)
7503 {
7504 	int i;
7505 	const int nr_items = btrfs_header_nritems(node);
7506 
7507 	for (i = 0; i < nr_items; i++)
7508 		btrfs_readahead_node_child(node, i);
7509 }
7510 
7511 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7512 {
7513 	struct btrfs_root *root = fs_info->chunk_root;
7514 	struct btrfs_path *path;
7515 	struct extent_buffer *leaf;
7516 	struct btrfs_key key;
7517 	struct btrfs_key found_key;
7518 	int ret;
7519 	int slot;
7520 	u64 total_dev = 0;
7521 	u64 last_ra_node = 0;
7522 
7523 	path = btrfs_alloc_path();
7524 	if (!path)
7525 		return -ENOMEM;
7526 
7527 	/*
7528 	 * uuid_mutex is needed only if we are mounting a sprout FS
7529 	 * otherwise we don't need it.
7530 	 */
7531 	mutex_lock(&uuid_mutex);
7532 
7533 	/*
7534 	 * It is possible for mount and umount to race in such a way that
7535 	 * we execute this code path, but open_fs_devices failed to clear
7536 	 * total_rw_bytes. We certainly want it cleared before reading the
7537 	 * device items, so clear it here.
7538 	 */
7539 	fs_info->fs_devices->total_rw_bytes = 0;
7540 
7541 	/*
7542 	 * Read all device items, and then all the chunk items. All
7543 	 * device items are found before any chunk item (their object id
7544 	 * is smaller than the lowest possible object id for a chunk
7545 	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7546 	 */
7547 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7548 	key.offset = 0;
7549 	key.type = 0;
7550 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7551 	if (ret < 0)
7552 		goto error;
7553 	while (1) {
7554 		struct extent_buffer *node;
7555 
7556 		leaf = path->nodes[0];
7557 		slot = path->slots[0];
7558 		if (slot >= btrfs_header_nritems(leaf)) {
7559 			ret = btrfs_next_leaf(root, path);
7560 			if (ret == 0)
7561 				continue;
7562 			if (ret < 0)
7563 				goto error;
7564 			break;
7565 		}
7566 		/*
7567 		 * The nodes on level 1 are not locked but we don't need to do
7568 		 * that during mount time as nothing else can access the tree
7569 		 */
7570 		node = path->nodes[1];
7571 		if (node) {
7572 			if (last_ra_node != node->start) {
7573 				readahead_tree_node_children(node);
7574 				last_ra_node = node->start;
7575 			}
7576 		}
7577 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
7578 		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7579 			struct btrfs_dev_item *dev_item;
7580 			dev_item = btrfs_item_ptr(leaf, slot,
7581 						  struct btrfs_dev_item);
7582 			ret = read_one_dev(leaf, dev_item);
7583 			if (ret)
7584 				goto error;
7585 			total_dev++;
7586 		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7587 			struct btrfs_chunk *chunk;
7588 
7589 			/*
7590 			 * We are only called at mount time, so no need to take
7591 			 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings,
7592 			 * we always lock first fs_info->chunk_mutex before
7593 			 * acquiring any locks on the chunk tree. This is a
7594 			 * requirement for chunk allocation, see the comment on
7595 			 * top of btrfs_chunk_alloc() for details.
7596 			 */
7597 			ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
7598 			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7599 			ret = read_one_chunk(&found_key, leaf, chunk);
7600 			if (ret)
7601 				goto error;
7602 		}
7603 		path->slots[0]++;
7604 	}
7605 
7606 	/*
7607 	 * After loading chunk tree, we've got all device information,
7608 	 * do another round of validation checks.
7609 	 */
7610 	if (total_dev != fs_info->fs_devices->total_devices) {
7611 		btrfs_err(fs_info,
7612 	   "super_num_devices %llu mismatch with num_devices %llu found here",
7613 			  btrfs_super_num_devices(fs_info->super_copy),
7614 			  total_dev);
7615 		ret = -EINVAL;
7616 		goto error;
7617 	}
7618 	if (btrfs_super_total_bytes(fs_info->super_copy) <
7619 	    fs_info->fs_devices->total_rw_bytes) {
7620 		btrfs_err(fs_info,
7621 	"super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7622 			  btrfs_super_total_bytes(fs_info->super_copy),
7623 			  fs_info->fs_devices->total_rw_bytes);
7624 		ret = -EINVAL;
7625 		goto error;
7626 	}
7627 	ret = 0;
7628 error:
7629 	mutex_unlock(&uuid_mutex);
7630 
7631 	btrfs_free_path(path);
7632 	return ret;
7633 }
7634 
7635 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7636 {
7637 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7638 	struct btrfs_device *device;
7639 
7640 	fs_devices->fs_info = fs_info;
7641 
7642 	mutex_lock(&fs_devices->device_list_mutex);
7643 	list_for_each_entry(device, &fs_devices->devices, dev_list)
7644 		device->fs_info = fs_info;
7645 
7646 	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7647 		list_for_each_entry(device, &seed_devs->devices, dev_list)
7648 			device->fs_info = fs_info;
7649 
7650 		seed_devs->fs_info = fs_info;
7651 	}
7652 	mutex_unlock(&fs_devices->device_list_mutex);
7653 }
7654 
7655 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7656 				 const struct btrfs_dev_stats_item *ptr,
7657 				 int index)
7658 {
7659 	u64 val;
7660 
7661 	read_extent_buffer(eb, &val,
7662 			   offsetof(struct btrfs_dev_stats_item, values) +
7663 			    ((unsigned long)ptr) + (index * sizeof(u64)),
7664 			   sizeof(val));
7665 	return val;
7666 }
7667 
7668 static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7669 				      struct btrfs_dev_stats_item *ptr,
7670 				      int index, u64 val)
7671 {
7672 	write_extent_buffer(eb, &val,
7673 			    offsetof(struct btrfs_dev_stats_item, values) +
7674 			     ((unsigned long)ptr) + (index * sizeof(u64)),
7675 			    sizeof(val));
7676 }
7677 
7678 static int btrfs_device_init_dev_stats(struct btrfs_device *device,
7679 				       struct btrfs_path *path)
7680 {
7681 	struct btrfs_dev_stats_item *ptr;
7682 	struct extent_buffer *eb;
7683 	struct btrfs_key key;
7684 	int item_size;
7685 	int i, ret, slot;
7686 
7687 	if (!device->fs_info->dev_root)
7688 		return 0;
7689 
7690 	key.objectid = BTRFS_DEV_STATS_OBJECTID;
7691 	key.type = BTRFS_PERSISTENT_ITEM_KEY;
7692 	key.offset = device->devid;
7693 	ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0);
7694 	if (ret) {
7695 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7696 			btrfs_dev_stat_set(device, i, 0);
7697 		device->dev_stats_valid = 1;
7698 		btrfs_release_path(path);
7699 		return ret < 0 ? ret : 0;
7700 	}
7701 	slot = path->slots[0];
7702 	eb = path->nodes[0];
7703 	item_size = btrfs_item_size_nr(eb, slot);
7704 
7705 	ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item);
7706 
7707 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7708 		if (item_size >= (1 + i) * sizeof(__le64))
7709 			btrfs_dev_stat_set(device, i,
7710 					   btrfs_dev_stats_value(eb, ptr, i));
7711 		else
7712 			btrfs_dev_stat_set(device, i, 0);
7713 	}
7714 
7715 	device->dev_stats_valid = 1;
7716 	btrfs_dev_stat_print_on_load(device);
7717 	btrfs_release_path(path);
7718 
7719 	return 0;
7720 }
7721 
7722 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7723 {
7724 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7725 	struct btrfs_device *device;
7726 	struct btrfs_path *path = NULL;
7727 	int ret = 0;
7728 
7729 	path = btrfs_alloc_path();
7730 	if (!path)
7731 		return -ENOMEM;
7732 
7733 	mutex_lock(&fs_devices->device_list_mutex);
7734 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7735 		ret = btrfs_device_init_dev_stats(device, path);
7736 		if (ret)
7737 			goto out;
7738 	}
7739 	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7740 		list_for_each_entry(device, &seed_devs->devices, dev_list) {
7741 			ret = btrfs_device_init_dev_stats(device, path);
7742 			if (ret)
7743 				goto out;
7744 		}
7745 	}
7746 out:
7747 	mutex_unlock(&fs_devices->device_list_mutex);
7748 
7749 	btrfs_free_path(path);
7750 	return ret;
7751 }
7752 
7753 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7754 				struct btrfs_device *device)
7755 {
7756 	struct btrfs_fs_info *fs_info = trans->fs_info;
7757 	struct btrfs_root *dev_root = fs_info->dev_root;
7758 	struct btrfs_path *path;
7759 	struct btrfs_key key;
7760 	struct extent_buffer *eb;
7761 	struct btrfs_dev_stats_item *ptr;
7762 	int ret;
7763 	int i;
7764 
7765 	key.objectid = BTRFS_DEV_STATS_OBJECTID;
7766 	key.type = BTRFS_PERSISTENT_ITEM_KEY;
7767 	key.offset = device->devid;
7768 
7769 	path = btrfs_alloc_path();
7770 	if (!path)
7771 		return -ENOMEM;
7772 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7773 	if (ret < 0) {
7774 		btrfs_warn_in_rcu(fs_info,
7775 			"error %d while searching for dev_stats item for device %s",
7776 			      ret, rcu_str_deref(device->name));
7777 		goto out;
7778 	}
7779 
7780 	if (ret == 0 &&
7781 	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7782 		/* need to delete old one and insert a new one */
7783 		ret = btrfs_del_item(trans, dev_root, path);
7784 		if (ret != 0) {
7785 			btrfs_warn_in_rcu(fs_info,
7786 				"delete too small dev_stats item for device %s failed %d",
7787 				      rcu_str_deref(device->name), ret);
7788 			goto out;
7789 		}
7790 		ret = 1;
7791 	}
7792 
7793 	if (ret == 1) {
7794 		/* need to insert a new item */
7795 		btrfs_release_path(path);
7796 		ret = btrfs_insert_empty_item(trans, dev_root, path,
7797 					      &key, sizeof(*ptr));
7798 		if (ret < 0) {
7799 			btrfs_warn_in_rcu(fs_info,
7800 				"insert dev_stats item for device %s failed %d",
7801 				rcu_str_deref(device->name), ret);
7802 			goto out;
7803 		}
7804 	}
7805 
7806 	eb = path->nodes[0];
7807 	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7808 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7809 		btrfs_set_dev_stats_value(eb, ptr, i,
7810 					  btrfs_dev_stat_read(device, i));
7811 	btrfs_mark_buffer_dirty(eb);
7812 
7813 out:
7814 	btrfs_free_path(path);
7815 	return ret;
7816 }
7817 
7818 /*
7819  * called from commit_transaction. Writes all changed device stats to disk.
7820  */
7821 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7822 {
7823 	struct btrfs_fs_info *fs_info = trans->fs_info;
7824 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7825 	struct btrfs_device *device;
7826 	int stats_cnt;
7827 	int ret = 0;
7828 
7829 	mutex_lock(&fs_devices->device_list_mutex);
7830 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7831 		stats_cnt = atomic_read(&device->dev_stats_ccnt);
7832 		if (!device->dev_stats_valid || stats_cnt == 0)
7833 			continue;
7834 
7835 
7836 		/*
7837 		 * There is a LOAD-LOAD control dependency between the value of
7838 		 * dev_stats_ccnt and updating the on-disk values which requires
7839 		 * reading the in-memory counters. Such control dependencies
7840 		 * require explicit read memory barriers.
7841 		 *
7842 		 * This memory barriers pairs with smp_mb__before_atomic in
7843 		 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7844 		 * barrier implied by atomic_xchg in
7845 		 * btrfs_dev_stats_read_and_reset
7846 		 */
7847 		smp_rmb();
7848 
7849 		ret = update_dev_stat_item(trans, device);
7850 		if (!ret)
7851 			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7852 	}
7853 	mutex_unlock(&fs_devices->device_list_mutex);
7854 
7855 	return ret;
7856 }
7857 
7858 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7859 {
7860 	btrfs_dev_stat_inc(dev, index);
7861 	btrfs_dev_stat_print_on_error(dev);
7862 }
7863 
7864 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7865 {
7866 	if (!dev->dev_stats_valid)
7867 		return;
7868 	btrfs_err_rl_in_rcu(dev->fs_info,
7869 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7870 			   rcu_str_deref(dev->name),
7871 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7872 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7873 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7874 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7875 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7876 }
7877 
7878 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7879 {
7880 	int i;
7881 
7882 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7883 		if (btrfs_dev_stat_read(dev, i) != 0)
7884 			break;
7885 	if (i == BTRFS_DEV_STAT_VALUES_MAX)
7886 		return; /* all values == 0, suppress message */
7887 
7888 	btrfs_info_in_rcu(dev->fs_info,
7889 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7890 	       rcu_str_deref(dev->name),
7891 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7892 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7893 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7894 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7895 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7896 }
7897 
7898 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7899 			struct btrfs_ioctl_get_dev_stats *stats)
7900 {
7901 	BTRFS_DEV_LOOKUP_ARGS(args);
7902 	struct btrfs_device *dev;
7903 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7904 	int i;
7905 
7906 	mutex_lock(&fs_devices->device_list_mutex);
7907 	args.devid = stats->devid;
7908 	dev = btrfs_find_device(fs_info->fs_devices, &args);
7909 	mutex_unlock(&fs_devices->device_list_mutex);
7910 
7911 	if (!dev) {
7912 		btrfs_warn(fs_info, "get dev_stats failed, device not found");
7913 		return -ENODEV;
7914 	} else if (!dev->dev_stats_valid) {
7915 		btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7916 		return -ENODEV;
7917 	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7918 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7919 			if (stats->nr_items > i)
7920 				stats->values[i] =
7921 					btrfs_dev_stat_read_and_reset(dev, i);
7922 			else
7923 				btrfs_dev_stat_set(dev, i, 0);
7924 		}
7925 		btrfs_info(fs_info, "device stats zeroed by %s (%d)",
7926 			   current->comm, task_pid_nr(current));
7927 	} else {
7928 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7929 			if (stats->nr_items > i)
7930 				stats->values[i] = btrfs_dev_stat_read(dev, i);
7931 	}
7932 	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7933 		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7934 	return 0;
7935 }
7936 
7937 /*
7938  * Update the size and bytes used for each device where it changed.  This is
7939  * delayed since we would otherwise get errors while writing out the
7940  * superblocks.
7941  *
7942  * Must be invoked during transaction commit.
7943  */
7944 void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
7945 {
7946 	struct btrfs_device *curr, *next;
7947 
7948 	ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
7949 
7950 	if (list_empty(&trans->dev_update_list))
7951 		return;
7952 
7953 	/*
7954 	 * We don't need the device_list_mutex here.  This list is owned by the
7955 	 * transaction and the transaction must complete before the device is
7956 	 * released.
7957 	 */
7958 	mutex_lock(&trans->fs_info->chunk_mutex);
7959 	list_for_each_entry_safe(curr, next, &trans->dev_update_list,
7960 				 post_commit_list) {
7961 		list_del_init(&curr->post_commit_list);
7962 		curr->commit_total_bytes = curr->disk_total_bytes;
7963 		curr->commit_bytes_used = curr->bytes_used;
7964 	}
7965 	mutex_unlock(&trans->fs_info->chunk_mutex);
7966 }
7967 
7968 /*
7969  * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7970  */
7971 int btrfs_bg_type_to_factor(u64 flags)
7972 {
7973 	const int index = btrfs_bg_flags_to_raid_index(flags);
7974 
7975 	return btrfs_raid_array[index].ncopies;
7976 }
7977 
7978 
7979 
7980 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7981 				 u64 chunk_offset, u64 devid,
7982 				 u64 physical_offset, u64 physical_len)
7983 {
7984 	struct btrfs_dev_lookup_args args = { .devid = devid };
7985 	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7986 	struct extent_map *em;
7987 	struct map_lookup *map;
7988 	struct btrfs_device *dev;
7989 	u64 stripe_len;
7990 	bool found = false;
7991 	int ret = 0;
7992 	int i;
7993 
7994 	read_lock(&em_tree->lock);
7995 	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
7996 	read_unlock(&em_tree->lock);
7997 
7998 	if (!em) {
7999 		btrfs_err(fs_info,
8000 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
8001 			  physical_offset, devid);
8002 		ret = -EUCLEAN;
8003 		goto out;
8004 	}
8005 
8006 	map = em->map_lookup;
8007 	stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
8008 	if (physical_len != stripe_len) {
8009 		btrfs_err(fs_info,
8010 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
8011 			  physical_offset, devid, em->start, physical_len,
8012 			  stripe_len);
8013 		ret = -EUCLEAN;
8014 		goto out;
8015 	}
8016 
8017 	for (i = 0; i < map->num_stripes; i++) {
8018 		if (map->stripes[i].dev->devid == devid &&
8019 		    map->stripes[i].physical == physical_offset) {
8020 			found = true;
8021 			if (map->verified_stripes >= map->num_stripes) {
8022 				btrfs_err(fs_info,
8023 				"too many dev extents for chunk %llu found",
8024 					  em->start);
8025 				ret = -EUCLEAN;
8026 				goto out;
8027 			}
8028 			map->verified_stripes++;
8029 			break;
8030 		}
8031 	}
8032 	if (!found) {
8033 		btrfs_err(fs_info,
8034 	"dev extent physical offset %llu devid %llu has no corresponding chunk",
8035 			physical_offset, devid);
8036 		ret = -EUCLEAN;
8037 	}
8038 
8039 	/* Make sure no dev extent is beyond device boundary */
8040 	dev = btrfs_find_device(fs_info->fs_devices, &args);
8041 	if (!dev) {
8042 		btrfs_err(fs_info, "failed to find devid %llu", devid);
8043 		ret = -EUCLEAN;
8044 		goto out;
8045 	}
8046 
8047 	if (physical_offset + physical_len > dev->disk_total_bytes) {
8048 		btrfs_err(fs_info,
8049 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
8050 			  devid, physical_offset, physical_len,
8051 			  dev->disk_total_bytes);
8052 		ret = -EUCLEAN;
8053 		goto out;
8054 	}
8055 
8056 	if (dev->zone_info) {
8057 		u64 zone_size = dev->zone_info->zone_size;
8058 
8059 		if (!IS_ALIGNED(physical_offset, zone_size) ||
8060 		    !IS_ALIGNED(physical_len, zone_size)) {
8061 			btrfs_err(fs_info,
8062 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone",
8063 				  devid, physical_offset, physical_len);
8064 			ret = -EUCLEAN;
8065 			goto out;
8066 		}
8067 	}
8068 
8069 out:
8070 	free_extent_map(em);
8071 	return ret;
8072 }
8073 
8074 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
8075 {
8076 	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
8077 	struct extent_map *em;
8078 	struct rb_node *node;
8079 	int ret = 0;
8080 
8081 	read_lock(&em_tree->lock);
8082 	for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
8083 		em = rb_entry(node, struct extent_map, rb_node);
8084 		if (em->map_lookup->num_stripes !=
8085 		    em->map_lookup->verified_stripes) {
8086 			btrfs_err(fs_info,
8087 			"chunk %llu has missing dev extent, have %d expect %d",
8088 				  em->start, em->map_lookup->verified_stripes,
8089 				  em->map_lookup->num_stripes);
8090 			ret = -EUCLEAN;
8091 			goto out;
8092 		}
8093 	}
8094 out:
8095 	read_unlock(&em_tree->lock);
8096 	return ret;
8097 }
8098 
8099 /*
8100  * Ensure that all dev extents are mapped to correct chunk, otherwise
8101  * later chunk allocation/free would cause unexpected behavior.
8102  *
8103  * NOTE: This will iterate through the whole device tree, which should be of
8104  * the same size level as the chunk tree.  This slightly increases mount time.
8105  */
8106 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
8107 {
8108 	struct btrfs_path *path;
8109 	struct btrfs_root *root = fs_info->dev_root;
8110 	struct btrfs_key key;
8111 	u64 prev_devid = 0;
8112 	u64 prev_dev_ext_end = 0;
8113 	int ret = 0;
8114 
8115 	/*
8116 	 * We don't have a dev_root because we mounted with ignorebadroots and
8117 	 * failed to load the root, so we want to skip the verification in this
8118 	 * case for sure.
8119 	 *
8120 	 * However if the dev root is fine, but the tree itself is corrupted
8121 	 * we'd still fail to mount.  This verification is only to make sure
8122 	 * writes can happen safely, so instead just bypass this check
8123 	 * completely in the case of IGNOREBADROOTS.
8124 	 */
8125 	if (btrfs_test_opt(fs_info, IGNOREBADROOTS))
8126 		return 0;
8127 
8128 	key.objectid = 1;
8129 	key.type = BTRFS_DEV_EXTENT_KEY;
8130 	key.offset = 0;
8131 
8132 	path = btrfs_alloc_path();
8133 	if (!path)
8134 		return -ENOMEM;
8135 
8136 	path->reada = READA_FORWARD;
8137 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8138 	if (ret < 0)
8139 		goto out;
8140 
8141 	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
8142 		ret = btrfs_next_leaf(root, path);
8143 		if (ret < 0)
8144 			goto out;
8145 		/* No dev extents at all? Not good */
8146 		if (ret > 0) {
8147 			ret = -EUCLEAN;
8148 			goto out;
8149 		}
8150 	}
8151 	while (1) {
8152 		struct extent_buffer *leaf = path->nodes[0];
8153 		struct btrfs_dev_extent *dext;
8154 		int slot = path->slots[0];
8155 		u64 chunk_offset;
8156 		u64 physical_offset;
8157 		u64 physical_len;
8158 		u64 devid;
8159 
8160 		btrfs_item_key_to_cpu(leaf, &key, slot);
8161 		if (key.type != BTRFS_DEV_EXTENT_KEY)
8162 			break;
8163 		devid = key.objectid;
8164 		physical_offset = key.offset;
8165 
8166 		dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
8167 		chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
8168 		physical_len = btrfs_dev_extent_length(leaf, dext);
8169 
8170 		/* Check if this dev extent overlaps with the previous one */
8171 		if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
8172 			btrfs_err(fs_info,
8173 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
8174 				  devid, physical_offset, prev_dev_ext_end);
8175 			ret = -EUCLEAN;
8176 			goto out;
8177 		}
8178 
8179 		ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
8180 					    physical_offset, physical_len);
8181 		if (ret < 0)
8182 			goto out;
8183 		prev_devid = devid;
8184 		prev_dev_ext_end = physical_offset + physical_len;
8185 
8186 		ret = btrfs_next_item(root, path);
8187 		if (ret < 0)
8188 			goto out;
8189 		if (ret > 0) {
8190 			ret = 0;
8191 			break;
8192 		}
8193 	}
8194 
8195 	/* Ensure all chunks have corresponding dev extents */
8196 	ret = verify_chunk_dev_extent_mapping(fs_info);
8197 out:
8198 	btrfs_free_path(path);
8199 	return ret;
8200 }
8201 
8202 /*
8203  * Check whether the given block group or device is pinned by any inode being
8204  * used as a swapfile.
8205  */
8206 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
8207 {
8208 	struct btrfs_swapfile_pin *sp;
8209 	struct rb_node *node;
8210 
8211 	spin_lock(&fs_info->swapfile_pins_lock);
8212 	node = fs_info->swapfile_pins.rb_node;
8213 	while (node) {
8214 		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
8215 		if (ptr < sp->ptr)
8216 			node = node->rb_left;
8217 		else if (ptr > sp->ptr)
8218 			node = node->rb_right;
8219 		else
8220 			break;
8221 	}
8222 	spin_unlock(&fs_info->swapfile_pins_lock);
8223 	return node != NULL;
8224 }
8225 
8226 static int relocating_repair_kthread(void *data)
8227 {
8228 	struct btrfs_block_group *cache = (struct btrfs_block_group *)data;
8229 	struct btrfs_fs_info *fs_info = cache->fs_info;
8230 	u64 target;
8231 	int ret = 0;
8232 
8233 	target = cache->start;
8234 	btrfs_put_block_group(cache);
8235 
8236 	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
8237 		btrfs_info(fs_info,
8238 			   "zoned: skip relocating block group %llu to repair: EBUSY",
8239 			   target);
8240 		return -EBUSY;
8241 	}
8242 
8243 	mutex_lock(&fs_info->reclaim_bgs_lock);
8244 
8245 	/* Ensure block group still exists */
8246 	cache = btrfs_lookup_block_group(fs_info, target);
8247 	if (!cache)
8248 		goto out;
8249 
8250 	if (!cache->relocating_repair)
8251 		goto out;
8252 
8253 	ret = btrfs_may_alloc_data_chunk(fs_info, target);
8254 	if (ret < 0)
8255 		goto out;
8256 
8257 	btrfs_info(fs_info,
8258 		   "zoned: relocating block group %llu to repair IO failure",
8259 		   target);
8260 	ret = btrfs_relocate_chunk(fs_info, target);
8261 
8262 out:
8263 	if (cache)
8264 		btrfs_put_block_group(cache);
8265 	mutex_unlock(&fs_info->reclaim_bgs_lock);
8266 	btrfs_exclop_finish(fs_info);
8267 
8268 	return ret;
8269 }
8270 
8271 int btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical)
8272 {
8273 	struct btrfs_block_group *cache;
8274 
8275 	/* Do not attempt to repair in degraded state */
8276 	if (btrfs_test_opt(fs_info, DEGRADED))
8277 		return 0;
8278 
8279 	cache = btrfs_lookup_block_group(fs_info, logical);
8280 	if (!cache)
8281 		return 0;
8282 
8283 	spin_lock(&cache->lock);
8284 	if (cache->relocating_repair) {
8285 		spin_unlock(&cache->lock);
8286 		btrfs_put_block_group(cache);
8287 		return 0;
8288 	}
8289 	cache->relocating_repair = 1;
8290 	spin_unlock(&cache->lock);
8291 
8292 	kthread_run(relocating_repair_kthread, cache,
8293 		    "btrfs-relocating-repair");
8294 
8295 	return 0;
8296 }
8297