xref: /openbmc/linux/fs/btrfs/volumes.c (revision 0661cb2a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/sched/mm.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/ratelimit.h>
12 #include <linux/kthread.h>
13 #include <linux/raid/pq.h>
14 #include <linux/semaphore.h>
15 #include <linux/uuid.h>
16 #include <linux/list_sort.h>
17 #include "misc.h"
18 #include "ctree.h"
19 #include "extent_map.h"
20 #include "disk-io.h"
21 #include "transaction.h"
22 #include "print-tree.h"
23 #include "volumes.h"
24 #include "raid56.h"
25 #include "async-thread.h"
26 #include "check-integrity.h"
27 #include "rcu-string.h"
28 #include "dev-replace.h"
29 #include "sysfs.h"
30 #include "tree-checker.h"
31 #include "space-info.h"
32 #include "block-group.h"
33 #include "discard.h"
34 #include "zoned.h"
35 
36 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
37 	[BTRFS_RAID_RAID10] = {
38 		.sub_stripes	= 2,
39 		.dev_stripes	= 1,
40 		.devs_max	= 0,	/* 0 == as many as possible */
41 		.devs_min	= 4,
42 		.tolerated_failures = 1,
43 		.devs_increment	= 2,
44 		.ncopies	= 2,
45 		.nparity        = 0,
46 		.raid_name	= "raid10",
47 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID10,
48 		.mindev_error	= BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
49 	},
50 	[BTRFS_RAID_RAID1] = {
51 		.sub_stripes	= 1,
52 		.dev_stripes	= 1,
53 		.devs_max	= 2,
54 		.devs_min	= 2,
55 		.tolerated_failures = 1,
56 		.devs_increment	= 2,
57 		.ncopies	= 2,
58 		.nparity        = 0,
59 		.raid_name	= "raid1",
60 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1,
61 		.mindev_error	= BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
62 	},
63 	[BTRFS_RAID_RAID1C3] = {
64 		.sub_stripes	= 1,
65 		.dev_stripes	= 1,
66 		.devs_max	= 3,
67 		.devs_min	= 3,
68 		.tolerated_failures = 2,
69 		.devs_increment	= 3,
70 		.ncopies	= 3,
71 		.nparity        = 0,
72 		.raid_name	= "raid1c3",
73 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1C3,
74 		.mindev_error	= BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
75 	},
76 	[BTRFS_RAID_RAID1C4] = {
77 		.sub_stripes	= 1,
78 		.dev_stripes	= 1,
79 		.devs_max	= 4,
80 		.devs_min	= 4,
81 		.tolerated_failures = 3,
82 		.devs_increment	= 4,
83 		.ncopies	= 4,
84 		.nparity        = 0,
85 		.raid_name	= "raid1c4",
86 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID1C4,
87 		.mindev_error	= BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
88 	},
89 	[BTRFS_RAID_DUP] = {
90 		.sub_stripes	= 1,
91 		.dev_stripes	= 2,
92 		.devs_max	= 1,
93 		.devs_min	= 1,
94 		.tolerated_failures = 0,
95 		.devs_increment	= 1,
96 		.ncopies	= 2,
97 		.nparity        = 0,
98 		.raid_name	= "dup",
99 		.bg_flag	= BTRFS_BLOCK_GROUP_DUP,
100 		.mindev_error	= 0,
101 	},
102 	[BTRFS_RAID_RAID0] = {
103 		.sub_stripes	= 1,
104 		.dev_stripes	= 1,
105 		.devs_max	= 0,
106 		.devs_min	= 2,
107 		.tolerated_failures = 0,
108 		.devs_increment	= 1,
109 		.ncopies	= 1,
110 		.nparity        = 0,
111 		.raid_name	= "raid0",
112 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID0,
113 		.mindev_error	= 0,
114 	},
115 	[BTRFS_RAID_SINGLE] = {
116 		.sub_stripes	= 1,
117 		.dev_stripes	= 1,
118 		.devs_max	= 1,
119 		.devs_min	= 1,
120 		.tolerated_failures = 0,
121 		.devs_increment	= 1,
122 		.ncopies	= 1,
123 		.nparity        = 0,
124 		.raid_name	= "single",
125 		.bg_flag	= 0,
126 		.mindev_error	= 0,
127 	},
128 	[BTRFS_RAID_RAID5] = {
129 		.sub_stripes	= 1,
130 		.dev_stripes	= 1,
131 		.devs_max	= 0,
132 		.devs_min	= 2,
133 		.tolerated_failures = 1,
134 		.devs_increment	= 1,
135 		.ncopies	= 1,
136 		.nparity        = 1,
137 		.raid_name	= "raid5",
138 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID5,
139 		.mindev_error	= BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
140 	},
141 	[BTRFS_RAID_RAID6] = {
142 		.sub_stripes	= 1,
143 		.dev_stripes	= 1,
144 		.devs_max	= 0,
145 		.devs_min	= 3,
146 		.tolerated_failures = 2,
147 		.devs_increment	= 1,
148 		.ncopies	= 1,
149 		.nparity        = 2,
150 		.raid_name	= "raid6",
151 		.bg_flag	= BTRFS_BLOCK_GROUP_RAID6,
152 		.mindev_error	= BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
153 	},
154 };
155 
156 const char *btrfs_bg_type_to_raid_name(u64 flags)
157 {
158 	const int index = btrfs_bg_flags_to_raid_index(flags);
159 
160 	if (index >= BTRFS_NR_RAID_TYPES)
161 		return NULL;
162 
163 	return btrfs_raid_array[index].raid_name;
164 }
165 
166 /*
167  * Fill @buf with textual description of @bg_flags, no more than @size_buf
168  * bytes including terminating null byte.
169  */
170 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
171 {
172 	int i;
173 	int ret;
174 	char *bp = buf;
175 	u64 flags = bg_flags;
176 	u32 size_bp = size_buf;
177 
178 	if (!flags) {
179 		strcpy(bp, "NONE");
180 		return;
181 	}
182 
183 #define DESCRIBE_FLAG(flag, desc)						\
184 	do {								\
185 		if (flags & (flag)) {					\
186 			ret = snprintf(bp, size_bp, "%s|", (desc));	\
187 			if (ret < 0 || ret >= size_bp)			\
188 				goto out_overflow;			\
189 			size_bp -= ret;					\
190 			bp += ret;					\
191 			flags &= ~(flag);				\
192 		}							\
193 	} while (0)
194 
195 	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
196 	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
197 	DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
198 
199 	DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
200 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
201 		DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
202 			      btrfs_raid_array[i].raid_name);
203 #undef DESCRIBE_FLAG
204 
205 	if (flags) {
206 		ret = snprintf(bp, size_bp, "0x%llx|", flags);
207 		size_bp -= ret;
208 	}
209 
210 	if (size_bp < size_buf)
211 		buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
212 
213 	/*
214 	 * The text is trimmed, it's up to the caller to provide sufficiently
215 	 * large buffer
216 	 */
217 out_overflow:;
218 }
219 
220 static int init_first_rw_device(struct btrfs_trans_handle *trans);
221 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
222 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
223 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
224 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
225 			     enum btrfs_map_op op,
226 			     u64 logical, u64 *length,
227 			     struct btrfs_bio **bbio_ret,
228 			     int mirror_num, int need_raid_map);
229 
230 /*
231  * Device locking
232  * ==============
233  *
234  * There are several mutexes that protect manipulation of devices and low-level
235  * structures like chunks but not block groups, extents or files
236  *
237  * uuid_mutex (global lock)
238  * ------------------------
239  * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
240  * the SCAN_DEV ioctl registration or from mount either implicitly (the first
241  * device) or requested by the device= mount option
242  *
243  * the mutex can be very coarse and can cover long-running operations
244  *
245  * protects: updates to fs_devices counters like missing devices, rw devices,
246  * seeding, structure cloning, opening/closing devices at mount/umount time
247  *
248  * global::fs_devs - add, remove, updates to the global list
249  *
250  * does not protect: manipulation of the fs_devices::devices list in general
251  * but in mount context it could be used to exclude list modifications by eg.
252  * scan ioctl
253  *
254  * btrfs_device::name - renames (write side), read is RCU
255  *
256  * fs_devices::device_list_mutex (per-fs, with RCU)
257  * ------------------------------------------------
258  * protects updates to fs_devices::devices, ie. adding and deleting
259  *
260  * simple list traversal with read-only actions can be done with RCU protection
261  *
262  * may be used to exclude some operations from running concurrently without any
263  * modifications to the list (see write_all_supers)
264  *
265  * Is not required at mount and close times, because our device list is
266  * protected by the uuid_mutex at that point.
267  *
268  * balance_mutex
269  * -------------
270  * protects balance structures (status, state) and context accessed from
271  * several places (internally, ioctl)
272  *
273  * chunk_mutex
274  * -----------
275  * protects chunks, adding or removing during allocation, trim or when a new
276  * device is added/removed. Additionally it also protects post_commit_list of
277  * individual devices, since they can be added to the transaction's
278  * post_commit_list only with chunk_mutex held.
279  *
280  * cleaner_mutex
281  * -------------
282  * a big lock that is held by the cleaner thread and prevents running subvolume
283  * cleaning together with relocation or delayed iputs
284  *
285  *
286  * Lock nesting
287  * ============
288  *
289  * uuid_mutex
290  *   device_list_mutex
291  *     chunk_mutex
292  *   balance_mutex
293  *
294  *
295  * Exclusive operations
296  * ====================
297  *
298  * Maintains the exclusivity of the following operations that apply to the
299  * whole filesystem and cannot run in parallel.
300  *
301  * - Balance (*)
302  * - Device add
303  * - Device remove
304  * - Device replace (*)
305  * - Resize
306  *
307  * The device operations (as above) can be in one of the following states:
308  *
309  * - Running state
310  * - Paused state
311  * - Completed state
312  *
313  * Only device operations marked with (*) can go into the Paused state for the
314  * following reasons:
315  *
316  * - ioctl (only Balance can be Paused through ioctl)
317  * - filesystem remounted as read-only
318  * - filesystem unmounted and mounted as read-only
319  * - system power-cycle and filesystem mounted as read-only
320  * - filesystem or device errors leading to forced read-only
321  *
322  * The status of exclusive operation is set and cleared atomically.
323  * During the course of Paused state, fs_info::exclusive_operation remains set.
324  * A device operation in Paused or Running state can be canceled or resumed
325  * either by ioctl (Balance only) or when remounted as read-write.
326  * The exclusive status is cleared when the device operation is canceled or
327  * completed.
328  */
329 
330 DEFINE_MUTEX(uuid_mutex);
331 static LIST_HEAD(fs_uuids);
332 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
333 {
334 	return &fs_uuids;
335 }
336 
337 /*
338  * alloc_fs_devices - allocate struct btrfs_fs_devices
339  * @fsid:		if not NULL, copy the UUID to fs_devices::fsid
340  * @metadata_fsid:	if not NULL, copy the UUID to fs_devices::metadata_fsid
341  *
342  * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
343  * The returned struct is not linked onto any lists and can be destroyed with
344  * kfree() right away.
345  */
346 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
347 						 const u8 *metadata_fsid)
348 {
349 	struct btrfs_fs_devices *fs_devs;
350 
351 	fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
352 	if (!fs_devs)
353 		return ERR_PTR(-ENOMEM);
354 
355 	mutex_init(&fs_devs->device_list_mutex);
356 
357 	INIT_LIST_HEAD(&fs_devs->devices);
358 	INIT_LIST_HEAD(&fs_devs->alloc_list);
359 	INIT_LIST_HEAD(&fs_devs->fs_list);
360 	INIT_LIST_HEAD(&fs_devs->seed_list);
361 	if (fsid)
362 		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
363 
364 	if (metadata_fsid)
365 		memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
366 	else if (fsid)
367 		memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
368 
369 	return fs_devs;
370 }
371 
372 void btrfs_free_device(struct btrfs_device *device)
373 {
374 	WARN_ON(!list_empty(&device->post_commit_list));
375 	rcu_string_free(device->name);
376 	extent_io_tree_release(&device->alloc_state);
377 	bio_put(device->flush_bio);
378 	btrfs_destroy_dev_zone_info(device);
379 	kfree(device);
380 }
381 
382 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
383 {
384 	struct btrfs_device *device;
385 	WARN_ON(fs_devices->opened);
386 	while (!list_empty(&fs_devices->devices)) {
387 		device = list_entry(fs_devices->devices.next,
388 				    struct btrfs_device, dev_list);
389 		list_del(&device->dev_list);
390 		btrfs_free_device(device);
391 	}
392 	kfree(fs_devices);
393 }
394 
395 void __exit btrfs_cleanup_fs_uuids(void)
396 {
397 	struct btrfs_fs_devices *fs_devices;
398 
399 	while (!list_empty(&fs_uuids)) {
400 		fs_devices = list_entry(fs_uuids.next,
401 					struct btrfs_fs_devices, fs_list);
402 		list_del(&fs_devices->fs_list);
403 		free_fs_devices(fs_devices);
404 	}
405 }
406 
407 /*
408  * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
409  * Returned struct is not linked onto any lists and must be destroyed using
410  * btrfs_free_device.
411  */
412 static struct btrfs_device *__alloc_device(struct btrfs_fs_info *fs_info)
413 {
414 	struct btrfs_device *dev;
415 
416 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
417 	if (!dev)
418 		return ERR_PTR(-ENOMEM);
419 
420 	/*
421 	 * Preallocate a bio that's always going to be used for flushing device
422 	 * barriers and matches the device lifespan
423 	 */
424 	dev->flush_bio = bio_kmalloc(GFP_KERNEL, 0);
425 	if (!dev->flush_bio) {
426 		kfree(dev);
427 		return ERR_PTR(-ENOMEM);
428 	}
429 
430 	INIT_LIST_HEAD(&dev->dev_list);
431 	INIT_LIST_HEAD(&dev->dev_alloc_list);
432 	INIT_LIST_HEAD(&dev->post_commit_list);
433 
434 	atomic_set(&dev->reada_in_flight, 0);
435 	atomic_set(&dev->dev_stats_ccnt, 0);
436 	btrfs_device_data_ordered_init(dev);
437 	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
438 	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
439 	extent_io_tree_init(fs_info, &dev->alloc_state,
440 			    IO_TREE_DEVICE_ALLOC_STATE, NULL);
441 
442 	return dev;
443 }
444 
445 static noinline struct btrfs_fs_devices *find_fsid(
446 		const u8 *fsid, const u8 *metadata_fsid)
447 {
448 	struct btrfs_fs_devices *fs_devices;
449 
450 	ASSERT(fsid);
451 
452 	/* Handle non-split brain cases */
453 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
454 		if (metadata_fsid) {
455 			if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
456 			    && memcmp(metadata_fsid, fs_devices->metadata_uuid,
457 				      BTRFS_FSID_SIZE) == 0)
458 				return fs_devices;
459 		} else {
460 			if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
461 				return fs_devices;
462 		}
463 	}
464 	return NULL;
465 }
466 
467 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
468 				struct btrfs_super_block *disk_super)
469 {
470 
471 	struct btrfs_fs_devices *fs_devices;
472 
473 	/*
474 	 * Handle scanned device having completed its fsid change but
475 	 * belonging to a fs_devices that was created by first scanning
476 	 * a device which didn't have its fsid/metadata_uuid changed
477 	 * at all and the CHANGING_FSID_V2 flag set.
478 	 */
479 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
480 		if (fs_devices->fsid_change &&
481 		    memcmp(disk_super->metadata_uuid, fs_devices->fsid,
482 			   BTRFS_FSID_SIZE) == 0 &&
483 		    memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
484 			   BTRFS_FSID_SIZE) == 0) {
485 			return fs_devices;
486 		}
487 	}
488 	/*
489 	 * Handle scanned device having completed its fsid change but
490 	 * belonging to a fs_devices that was created by a device that
491 	 * has an outdated pair of fsid/metadata_uuid and
492 	 * CHANGING_FSID_V2 flag set.
493 	 */
494 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
495 		if (fs_devices->fsid_change &&
496 		    memcmp(fs_devices->metadata_uuid,
497 			   fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
498 		    memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
499 			   BTRFS_FSID_SIZE) == 0) {
500 			return fs_devices;
501 		}
502 	}
503 
504 	return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
505 }
506 
507 
508 static int
509 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
510 		      int flush, struct block_device **bdev,
511 		      struct btrfs_super_block **disk_super)
512 {
513 	int ret;
514 
515 	*bdev = blkdev_get_by_path(device_path, flags, holder);
516 
517 	if (IS_ERR(*bdev)) {
518 		ret = PTR_ERR(*bdev);
519 		goto error;
520 	}
521 
522 	if (flush)
523 		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
524 	ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
525 	if (ret) {
526 		blkdev_put(*bdev, flags);
527 		goto error;
528 	}
529 	invalidate_bdev(*bdev);
530 	*disk_super = btrfs_read_dev_super(*bdev);
531 	if (IS_ERR(*disk_super)) {
532 		ret = PTR_ERR(*disk_super);
533 		blkdev_put(*bdev, flags);
534 		goto error;
535 	}
536 
537 	return 0;
538 
539 error:
540 	*bdev = NULL;
541 	return ret;
542 }
543 
544 static bool device_path_matched(const char *path, struct btrfs_device *device)
545 {
546 	int found;
547 
548 	rcu_read_lock();
549 	found = strcmp(rcu_str_deref(device->name), path);
550 	rcu_read_unlock();
551 
552 	return found == 0;
553 }
554 
555 /*
556  *  Search and remove all stale (devices which are not mounted) devices.
557  *  When both inputs are NULL, it will search and release all stale devices.
558  *  path:	Optional. When provided will it release all unmounted devices
559  *		matching this path only.
560  *  skip_dev:	Optional. Will skip this device when searching for the stale
561  *		devices.
562  *  Return:	0 for success or if @path is NULL.
563  * 		-EBUSY if @path is a mounted device.
564  * 		-ENOENT if @path does not match any device in the list.
565  */
566 static int btrfs_free_stale_devices(const char *path,
567 				     struct btrfs_device *skip_device)
568 {
569 	struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
570 	struct btrfs_device *device, *tmp_device;
571 	int ret = 0;
572 
573 	if (path)
574 		ret = -ENOENT;
575 
576 	list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
577 
578 		mutex_lock(&fs_devices->device_list_mutex);
579 		list_for_each_entry_safe(device, tmp_device,
580 					 &fs_devices->devices, dev_list) {
581 			if (skip_device && skip_device == device)
582 				continue;
583 			if (path && !device->name)
584 				continue;
585 			if (path && !device_path_matched(path, device))
586 				continue;
587 			if (fs_devices->opened) {
588 				/* for an already deleted device return 0 */
589 				if (path && ret != 0)
590 					ret = -EBUSY;
591 				break;
592 			}
593 
594 			/* delete the stale device */
595 			fs_devices->num_devices--;
596 			list_del(&device->dev_list);
597 			btrfs_free_device(device);
598 
599 			ret = 0;
600 		}
601 		mutex_unlock(&fs_devices->device_list_mutex);
602 
603 		if (fs_devices->num_devices == 0) {
604 			btrfs_sysfs_remove_fsid(fs_devices);
605 			list_del(&fs_devices->fs_list);
606 			free_fs_devices(fs_devices);
607 		}
608 	}
609 
610 	return ret;
611 }
612 
613 /*
614  * This is only used on mount, and we are protected from competing things
615  * messing with our fs_devices by the uuid_mutex, thus we do not need the
616  * fs_devices->device_list_mutex here.
617  */
618 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
619 			struct btrfs_device *device, fmode_t flags,
620 			void *holder)
621 {
622 	struct request_queue *q;
623 	struct block_device *bdev;
624 	struct btrfs_super_block *disk_super;
625 	u64 devid;
626 	int ret;
627 
628 	if (device->bdev)
629 		return -EINVAL;
630 	if (!device->name)
631 		return -EINVAL;
632 
633 	ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
634 				    &bdev, &disk_super);
635 	if (ret)
636 		return ret;
637 
638 	devid = btrfs_stack_device_id(&disk_super->dev_item);
639 	if (devid != device->devid)
640 		goto error_free_page;
641 
642 	if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
643 		goto error_free_page;
644 
645 	device->generation = btrfs_super_generation(disk_super);
646 
647 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
648 		if (btrfs_super_incompat_flags(disk_super) &
649 		    BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
650 			pr_err(
651 		"BTRFS: Invalid seeding and uuid-changed device detected\n");
652 			goto error_free_page;
653 		}
654 
655 		clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
656 		fs_devices->seeding = true;
657 	} else {
658 		if (bdev_read_only(bdev))
659 			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
660 		else
661 			set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
662 	}
663 
664 	q = bdev_get_queue(bdev);
665 	if (!blk_queue_nonrot(q))
666 		fs_devices->rotating = true;
667 
668 	device->bdev = bdev;
669 	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
670 	device->mode = flags;
671 
672 	fs_devices->open_devices++;
673 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
674 	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
675 		fs_devices->rw_devices++;
676 		list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
677 	}
678 	btrfs_release_disk_super(disk_super);
679 
680 	return 0;
681 
682 error_free_page:
683 	btrfs_release_disk_super(disk_super);
684 	blkdev_put(bdev, flags);
685 
686 	return -EINVAL;
687 }
688 
689 /*
690  * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
691  * being created with a disk that has already completed its fsid change. Such
692  * disk can belong to an fs which has its FSID changed or to one which doesn't.
693  * Handle both cases here.
694  */
695 static struct btrfs_fs_devices *find_fsid_inprogress(
696 					struct btrfs_super_block *disk_super)
697 {
698 	struct btrfs_fs_devices *fs_devices;
699 
700 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
701 		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
702 			   BTRFS_FSID_SIZE) != 0 &&
703 		    memcmp(fs_devices->metadata_uuid, disk_super->fsid,
704 			   BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
705 			return fs_devices;
706 		}
707 	}
708 
709 	return find_fsid(disk_super->fsid, NULL);
710 }
711 
712 
713 static struct btrfs_fs_devices *find_fsid_changed(
714 					struct btrfs_super_block *disk_super)
715 {
716 	struct btrfs_fs_devices *fs_devices;
717 
718 	/*
719 	 * Handles the case where scanned device is part of an fs that had
720 	 * multiple successful changes of FSID but currently device didn't
721 	 * observe it. Meaning our fsid will be different than theirs. We need
722 	 * to handle two subcases :
723 	 *  1 - The fs still continues to have different METADATA/FSID uuids.
724 	 *  2 - The fs is switched back to its original FSID (METADATA/FSID
725 	 *  are equal).
726 	 */
727 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
728 		/* Changed UUIDs */
729 		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
730 			   BTRFS_FSID_SIZE) != 0 &&
731 		    memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
732 			   BTRFS_FSID_SIZE) == 0 &&
733 		    memcmp(fs_devices->fsid, disk_super->fsid,
734 			   BTRFS_FSID_SIZE) != 0)
735 			return fs_devices;
736 
737 		/* Unchanged UUIDs */
738 		if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
739 			   BTRFS_FSID_SIZE) == 0 &&
740 		    memcmp(fs_devices->fsid, disk_super->metadata_uuid,
741 			   BTRFS_FSID_SIZE) == 0)
742 			return fs_devices;
743 	}
744 
745 	return NULL;
746 }
747 
748 static struct btrfs_fs_devices *find_fsid_reverted_metadata(
749 				struct btrfs_super_block *disk_super)
750 {
751 	struct btrfs_fs_devices *fs_devices;
752 
753 	/*
754 	 * Handle the case where the scanned device is part of an fs whose last
755 	 * metadata UUID change reverted it to the original FSID. At the same
756 	 * time * fs_devices was first created by another constitutent device
757 	 * which didn't fully observe the operation. This results in an
758 	 * btrfs_fs_devices created with metadata/fsid different AND
759 	 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
760 	 * fs_devices equal to the FSID of the disk.
761 	 */
762 	list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
763 		if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
764 			   BTRFS_FSID_SIZE) != 0 &&
765 		    memcmp(fs_devices->metadata_uuid, disk_super->fsid,
766 			   BTRFS_FSID_SIZE) == 0 &&
767 		    fs_devices->fsid_change)
768 			return fs_devices;
769 	}
770 
771 	return NULL;
772 }
773 /*
774  * Add new device to list of registered devices
775  *
776  * Returns:
777  * device pointer which was just added or updated when successful
778  * error pointer when failed
779  */
780 static noinline struct btrfs_device *device_list_add(const char *path,
781 			   struct btrfs_super_block *disk_super,
782 			   bool *new_device_added)
783 {
784 	struct btrfs_device *device;
785 	struct btrfs_fs_devices *fs_devices = NULL;
786 	struct rcu_string *name;
787 	u64 found_transid = btrfs_super_generation(disk_super);
788 	u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
789 	bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
790 		BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
791 	bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
792 					BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
793 
794 	if (fsid_change_in_progress) {
795 		if (!has_metadata_uuid)
796 			fs_devices = find_fsid_inprogress(disk_super);
797 		else
798 			fs_devices = find_fsid_changed(disk_super);
799 	} else if (has_metadata_uuid) {
800 		fs_devices = find_fsid_with_metadata_uuid(disk_super);
801 	} else {
802 		fs_devices = find_fsid_reverted_metadata(disk_super);
803 		if (!fs_devices)
804 			fs_devices = find_fsid(disk_super->fsid, NULL);
805 	}
806 
807 
808 	if (!fs_devices) {
809 		if (has_metadata_uuid)
810 			fs_devices = alloc_fs_devices(disk_super->fsid,
811 						      disk_super->metadata_uuid);
812 		else
813 			fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
814 
815 		if (IS_ERR(fs_devices))
816 			return ERR_CAST(fs_devices);
817 
818 		fs_devices->fsid_change = fsid_change_in_progress;
819 
820 		mutex_lock(&fs_devices->device_list_mutex);
821 		list_add(&fs_devices->fs_list, &fs_uuids);
822 
823 		device = NULL;
824 	} else {
825 		mutex_lock(&fs_devices->device_list_mutex);
826 		device = btrfs_find_device(fs_devices, devid,
827 				disk_super->dev_item.uuid, NULL);
828 
829 		/*
830 		 * If this disk has been pulled into an fs devices created by
831 		 * a device which had the CHANGING_FSID_V2 flag then replace the
832 		 * metadata_uuid/fsid values of the fs_devices.
833 		 */
834 		if (fs_devices->fsid_change &&
835 		    found_transid > fs_devices->latest_generation) {
836 			memcpy(fs_devices->fsid, disk_super->fsid,
837 					BTRFS_FSID_SIZE);
838 
839 			if (has_metadata_uuid)
840 				memcpy(fs_devices->metadata_uuid,
841 				       disk_super->metadata_uuid,
842 				       BTRFS_FSID_SIZE);
843 			else
844 				memcpy(fs_devices->metadata_uuid,
845 				       disk_super->fsid, BTRFS_FSID_SIZE);
846 
847 			fs_devices->fsid_change = false;
848 		}
849 	}
850 
851 	if (!device) {
852 		if (fs_devices->opened) {
853 			mutex_unlock(&fs_devices->device_list_mutex);
854 			return ERR_PTR(-EBUSY);
855 		}
856 
857 		device = btrfs_alloc_device(NULL, &devid,
858 					    disk_super->dev_item.uuid);
859 		if (IS_ERR(device)) {
860 			mutex_unlock(&fs_devices->device_list_mutex);
861 			/* we can safely leave the fs_devices entry around */
862 			return device;
863 		}
864 
865 		name = rcu_string_strdup(path, GFP_NOFS);
866 		if (!name) {
867 			btrfs_free_device(device);
868 			mutex_unlock(&fs_devices->device_list_mutex);
869 			return ERR_PTR(-ENOMEM);
870 		}
871 		rcu_assign_pointer(device->name, name);
872 
873 		list_add_rcu(&device->dev_list, &fs_devices->devices);
874 		fs_devices->num_devices++;
875 
876 		device->fs_devices = fs_devices;
877 		*new_device_added = true;
878 
879 		if (disk_super->label[0])
880 			pr_info(
881 	"BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
882 				disk_super->label, devid, found_transid, path,
883 				current->comm, task_pid_nr(current));
884 		else
885 			pr_info(
886 	"BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
887 				disk_super->fsid, devid, found_transid, path,
888 				current->comm, task_pid_nr(current));
889 
890 	} else if (!device->name || strcmp(device->name->str, path)) {
891 		/*
892 		 * When FS is already mounted.
893 		 * 1. If you are here and if the device->name is NULL that
894 		 *    means this device was missing at time of FS mount.
895 		 * 2. If you are here and if the device->name is different
896 		 *    from 'path' that means either
897 		 *      a. The same device disappeared and reappeared with
898 		 *         different name. or
899 		 *      b. The missing-disk-which-was-replaced, has
900 		 *         reappeared now.
901 		 *
902 		 * We must allow 1 and 2a above. But 2b would be a spurious
903 		 * and unintentional.
904 		 *
905 		 * Further in case of 1 and 2a above, the disk at 'path'
906 		 * would have missed some transaction when it was away and
907 		 * in case of 2a the stale bdev has to be updated as well.
908 		 * 2b must not be allowed at all time.
909 		 */
910 
911 		/*
912 		 * For now, we do allow update to btrfs_fs_device through the
913 		 * btrfs dev scan cli after FS has been mounted.  We're still
914 		 * tracking a problem where systems fail mount by subvolume id
915 		 * when we reject replacement on a mounted FS.
916 		 */
917 		if (!fs_devices->opened && found_transid < device->generation) {
918 			/*
919 			 * That is if the FS is _not_ mounted and if you
920 			 * are here, that means there is more than one
921 			 * disk with same uuid and devid.We keep the one
922 			 * with larger generation number or the last-in if
923 			 * generation are equal.
924 			 */
925 			mutex_unlock(&fs_devices->device_list_mutex);
926 			return ERR_PTR(-EEXIST);
927 		}
928 
929 		/*
930 		 * We are going to replace the device path for a given devid,
931 		 * make sure it's the same device if the device is mounted
932 		 */
933 		if (device->bdev) {
934 			int error;
935 			dev_t path_dev;
936 
937 			error = lookup_bdev(path, &path_dev);
938 			if (error) {
939 				mutex_unlock(&fs_devices->device_list_mutex);
940 				return ERR_PTR(error);
941 			}
942 
943 			if (device->bdev->bd_dev != path_dev) {
944 				mutex_unlock(&fs_devices->device_list_mutex);
945 				/*
946 				 * device->fs_info may not be reliable here, so
947 				 * pass in a NULL instead. This avoids a
948 				 * possible use-after-free when the fs_info and
949 				 * fs_info->sb are already torn down.
950 				 */
951 				btrfs_warn_in_rcu(NULL,
952 	"duplicate device %s devid %llu generation %llu scanned by %s (%d)",
953 						  path, devid, found_transid,
954 						  current->comm,
955 						  task_pid_nr(current));
956 				return ERR_PTR(-EEXIST);
957 			}
958 			btrfs_info_in_rcu(device->fs_info,
959 	"devid %llu device path %s changed to %s scanned by %s (%d)",
960 					  devid, rcu_str_deref(device->name),
961 					  path, current->comm,
962 					  task_pid_nr(current));
963 		}
964 
965 		name = rcu_string_strdup(path, GFP_NOFS);
966 		if (!name) {
967 			mutex_unlock(&fs_devices->device_list_mutex);
968 			return ERR_PTR(-ENOMEM);
969 		}
970 		rcu_string_free(device->name);
971 		rcu_assign_pointer(device->name, name);
972 		if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
973 			fs_devices->missing_devices--;
974 			clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
975 		}
976 	}
977 
978 	/*
979 	 * Unmount does not free the btrfs_device struct but would zero
980 	 * generation along with most of the other members. So just update
981 	 * it back. We need it to pick the disk with largest generation
982 	 * (as above).
983 	 */
984 	if (!fs_devices->opened) {
985 		device->generation = found_transid;
986 		fs_devices->latest_generation = max_t(u64, found_transid,
987 						fs_devices->latest_generation);
988 	}
989 
990 	fs_devices->total_devices = btrfs_super_num_devices(disk_super);
991 
992 	mutex_unlock(&fs_devices->device_list_mutex);
993 	return device;
994 }
995 
996 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
997 {
998 	struct btrfs_fs_devices *fs_devices;
999 	struct btrfs_device *device;
1000 	struct btrfs_device *orig_dev;
1001 	int ret = 0;
1002 
1003 	fs_devices = alloc_fs_devices(orig->fsid, NULL);
1004 	if (IS_ERR(fs_devices))
1005 		return fs_devices;
1006 
1007 	mutex_lock(&orig->device_list_mutex);
1008 	fs_devices->total_devices = orig->total_devices;
1009 
1010 	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
1011 		struct rcu_string *name;
1012 
1013 		device = btrfs_alloc_device(NULL, &orig_dev->devid,
1014 					    orig_dev->uuid);
1015 		if (IS_ERR(device)) {
1016 			ret = PTR_ERR(device);
1017 			goto error;
1018 		}
1019 
1020 		/*
1021 		 * This is ok to do without rcu read locked because we hold the
1022 		 * uuid mutex so nothing we touch in here is going to disappear.
1023 		 */
1024 		if (orig_dev->name) {
1025 			name = rcu_string_strdup(orig_dev->name->str,
1026 					GFP_KERNEL);
1027 			if (!name) {
1028 				btrfs_free_device(device);
1029 				ret = -ENOMEM;
1030 				goto error;
1031 			}
1032 			rcu_assign_pointer(device->name, name);
1033 		}
1034 
1035 		list_add(&device->dev_list, &fs_devices->devices);
1036 		device->fs_devices = fs_devices;
1037 		fs_devices->num_devices++;
1038 	}
1039 	mutex_unlock(&orig->device_list_mutex);
1040 	return fs_devices;
1041 error:
1042 	mutex_unlock(&orig->device_list_mutex);
1043 	free_fs_devices(fs_devices);
1044 	return ERR_PTR(ret);
1045 }
1046 
1047 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
1048 				      struct btrfs_device **latest_dev)
1049 {
1050 	struct btrfs_device *device, *next;
1051 
1052 	/* This is the initialized path, it is safe to release the devices. */
1053 	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1054 		if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) {
1055 			if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1056 				      &device->dev_state) &&
1057 			    !test_bit(BTRFS_DEV_STATE_MISSING,
1058 				      &device->dev_state) &&
1059 			    (!*latest_dev ||
1060 			     device->generation > (*latest_dev)->generation)) {
1061 				*latest_dev = device;
1062 			}
1063 			continue;
1064 		}
1065 
1066 		/*
1067 		 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
1068 		 * in btrfs_init_dev_replace() so just continue.
1069 		 */
1070 		if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1071 			continue;
1072 
1073 		if (device->bdev) {
1074 			blkdev_put(device->bdev, device->mode);
1075 			device->bdev = NULL;
1076 			fs_devices->open_devices--;
1077 		}
1078 		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1079 			list_del_init(&device->dev_alloc_list);
1080 			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1081 		}
1082 		list_del_init(&device->dev_list);
1083 		fs_devices->num_devices--;
1084 		btrfs_free_device(device);
1085 	}
1086 
1087 }
1088 
1089 /*
1090  * After we have read the system tree and know devids belonging to this
1091  * filesystem, remove the device which does not belong there.
1092  */
1093 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices)
1094 {
1095 	struct btrfs_device *latest_dev = NULL;
1096 	struct btrfs_fs_devices *seed_dev;
1097 
1098 	mutex_lock(&uuid_mutex);
1099 	__btrfs_free_extra_devids(fs_devices, &latest_dev);
1100 
1101 	list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
1102 		__btrfs_free_extra_devids(seed_dev, &latest_dev);
1103 
1104 	fs_devices->latest_bdev = latest_dev->bdev;
1105 
1106 	mutex_unlock(&uuid_mutex);
1107 }
1108 
1109 static void btrfs_close_bdev(struct btrfs_device *device)
1110 {
1111 	if (!device->bdev)
1112 		return;
1113 
1114 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1115 		sync_blockdev(device->bdev);
1116 		invalidate_bdev(device->bdev);
1117 	}
1118 
1119 	blkdev_put(device->bdev, device->mode);
1120 }
1121 
1122 static void btrfs_close_one_device(struct btrfs_device *device)
1123 {
1124 	struct btrfs_fs_devices *fs_devices = device->fs_devices;
1125 
1126 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1127 	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
1128 		list_del_init(&device->dev_alloc_list);
1129 		fs_devices->rw_devices--;
1130 	}
1131 
1132 	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
1133 		fs_devices->missing_devices--;
1134 
1135 	btrfs_close_bdev(device);
1136 	if (device->bdev) {
1137 		fs_devices->open_devices--;
1138 		device->bdev = NULL;
1139 	}
1140 	clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1141 	btrfs_destroy_dev_zone_info(device);
1142 
1143 	device->fs_info = NULL;
1144 	atomic_set(&device->dev_stats_ccnt, 0);
1145 	extent_io_tree_release(&device->alloc_state);
1146 
1147 	/* Verify the device is back in a pristine state  */
1148 	ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
1149 	ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1150 	ASSERT(list_empty(&device->dev_alloc_list));
1151 	ASSERT(list_empty(&device->post_commit_list));
1152 	ASSERT(atomic_read(&device->reada_in_flight) == 0);
1153 }
1154 
1155 static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
1156 {
1157 	struct btrfs_device *device, *tmp;
1158 
1159 	lockdep_assert_held(&uuid_mutex);
1160 
1161 	if (--fs_devices->opened > 0)
1162 		return;
1163 
1164 	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list)
1165 		btrfs_close_one_device(device);
1166 
1167 	WARN_ON(fs_devices->open_devices);
1168 	WARN_ON(fs_devices->rw_devices);
1169 	fs_devices->opened = 0;
1170 	fs_devices->seeding = false;
1171 	fs_devices->fs_info = NULL;
1172 }
1173 
1174 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1175 {
1176 	LIST_HEAD(list);
1177 	struct btrfs_fs_devices *tmp;
1178 
1179 	mutex_lock(&uuid_mutex);
1180 	close_fs_devices(fs_devices);
1181 	if (!fs_devices->opened)
1182 		list_splice_init(&fs_devices->seed_list, &list);
1183 
1184 	list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
1185 		close_fs_devices(fs_devices);
1186 		list_del(&fs_devices->seed_list);
1187 		free_fs_devices(fs_devices);
1188 	}
1189 	mutex_unlock(&uuid_mutex);
1190 }
1191 
1192 static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1193 				fmode_t flags, void *holder)
1194 {
1195 	struct btrfs_device *device;
1196 	struct btrfs_device *latest_dev = NULL;
1197 	struct btrfs_device *tmp_device;
1198 
1199 	flags |= FMODE_EXCL;
1200 
1201 	list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
1202 				 dev_list) {
1203 		int ret;
1204 
1205 		ret = btrfs_open_one_device(fs_devices, device, flags, holder);
1206 		if (ret == 0 &&
1207 		    (!latest_dev || device->generation > latest_dev->generation)) {
1208 			latest_dev = device;
1209 		} else if (ret == -ENODATA) {
1210 			fs_devices->num_devices--;
1211 			list_del(&device->dev_list);
1212 			btrfs_free_device(device);
1213 		}
1214 	}
1215 	if (fs_devices->open_devices == 0)
1216 		return -EINVAL;
1217 
1218 	fs_devices->opened = 1;
1219 	fs_devices->latest_bdev = latest_dev->bdev;
1220 	fs_devices->total_rw_bytes = 0;
1221 	fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
1222 	fs_devices->read_policy = BTRFS_READ_POLICY_PID;
1223 
1224 	return 0;
1225 }
1226 
1227 static int devid_cmp(void *priv, const struct list_head *a,
1228 		     const struct list_head *b)
1229 {
1230 	struct btrfs_device *dev1, *dev2;
1231 
1232 	dev1 = list_entry(a, struct btrfs_device, dev_list);
1233 	dev2 = list_entry(b, struct btrfs_device, dev_list);
1234 
1235 	if (dev1->devid < dev2->devid)
1236 		return -1;
1237 	else if (dev1->devid > dev2->devid)
1238 		return 1;
1239 	return 0;
1240 }
1241 
1242 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1243 		       fmode_t flags, void *holder)
1244 {
1245 	int ret;
1246 
1247 	lockdep_assert_held(&uuid_mutex);
1248 	/*
1249 	 * The device_list_mutex cannot be taken here in case opening the
1250 	 * underlying device takes further locks like open_mutex.
1251 	 *
1252 	 * We also don't need the lock here as this is called during mount and
1253 	 * exclusion is provided by uuid_mutex
1254 	 */
1255 
1256 	if (fs_devices->opened) {
1257 		fs_devices->opened++;
1258 		ret = 0;
1259 	} else {
1260 		list_sort(NULL, &fs_devices->devices, devid_cmp);
1261 		ret = open_fs_devices(fs_devices, flags, holder);
1262 	}
1263 
1264 	return ret;
1265 }
1266 
1267 void btrfs_release_disk_super(struct btrfs_super_block *super)
1268 {
1269 	struct page *page = virt_to_page(super);
1270 
1271 	put_page(page);
1272 }
1273 
1274 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
1275 						       u64 bytenr, u64 bytenr_orig)
1276 {
1277 	struct btrfs_super_block *disk_super;
1278 	struct page *page;
1279 	void *p;
1280 	pgoff_t index;
1281 
1282 	/* make sure our super fits in the device */
1283 	if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1284 		return ERR_PTR(-EINVAL);
1285 
1286 	/* make sure our super fits in the page */
1287 	if (sizeof(*disk_super) > PAGE_SIZE)
1288 		return ERR_PTR(-EINVAL);
1289 
1290 	/* make sure our super doesn't straddle pages on disk */
1291 	index = bytenr >> PAGE_SHIFT;
1292 	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1293 		return ERR_PTR(-EINVAL);
1294 
1295 	/* pull in the page with our super */
1296 	page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
1297 
1298 	if (IS_ERR(page))
1299 		return ERR_CAST(page);
1300 
1301 	p = page_address(page);
1302 
1303 	/* align our pointer to the offset of the super block */
1304 	disk_super = p + offset_in_page(bytenr);
1305 
1306 	if (btrfs_super_bytenr(disk_super) != bytenr_orig ||
1307 	    btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1308 		btrfs_release_disk_super(p);
1309 		return ERR_PTR(-EINVAL);
1310 	}
1311 
1312 	if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
1313 		disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
1314 
1315 	return disk_super;
1316 }
1317 
1318 int btrfs_forget_devices(const char *path)
1319 {
1320 	int ret;
1321 
1322 	mutex_lock(&uuid_mutex);
1323 	ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
1324 	mutex_unlock(&uuid_mutex);
1325 
1326 	return ret;
1327 }
1328 
1329 /*
1330  * Look for a btrfs signature on a device. This may be called out of the mount path
1331  * and we are not allowed to call set_blocksize during the scan. The superblock
1332  * is read via pagecache
1333  */
1334 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1335 					   void *holder)
1336 {
1337 	struct btrfs_super_block *disk_super;
1338 	bool new_device_added = false;
1339 	struct btrfs_device *device = NULL;
1340 	struct block_device *bdev;
1341 	u64 bytenr, bytenr_orig;
1342 	int ret;
1343 
1344 	lockdep_assert_held(&uuid_mutex);
1345 
1346 	/*
1347 	 * we would like to check all the supers, but that would make
1348 	 * a btrfs mount succeed after a mkfs from a different FS.
1349 	 * So, we need to add a special mount option to scan for
1350 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1351 	 */
1352 	flags |= FMODE_EXCL;
1353 
1354 	bdev = blkdev_get_by_path(path, flags, holder);
1355 	if (IS_ERR(bdev))
1356 		return ERR_CAST(bdev);
1357 
1358 	bytenr_orig = btrfs_sb_offset(0);
1359 	ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr);
1360 	if (ret)
1361 		return ERR_PTR(ret);
1362 
1363 	disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig);
1364 	if (IS_ERR(disk_super)) {
1365 		device = ERR_CAST(disk_super);
1366 		goto error_bdev_put;
1367 	}
1368 
1369 	device = device_list_add(path, disk_super, &new_device_added);
1370 	if (!IS_ERR(device)) {
1371 		if (new_device_added)
1372 			btrfs_free_stale_devices(path, device);
1373 	}
1374 
1375 	btrfs_release_disk_super(disk_super);
1376 
1377 error_bdev_put:
1378 	blkdev_put(bdev, flags);
1379 
1380 	return device;
1381 }
1382 
1383 /*
1384  * Try to find a chunk that intersects [start, start + len] range and when one
1385  * such is found, record the end of it in *start
1386  */
1387 static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1388 				    u64 len)
1389 {
1390 	u64 physical_start, physical_end;
1391 
1392 	lockdep_assert_held(&device->fs_info->chunk_mutex);
1393 
1394 	if (!find_first_extent_bit(&device->alloc_state, *start,
1395 				   &physical_start, &physical_end,
1396 				   CHUNK_ALLOCATED, NULL)) {
1397 
1398 		if (in_range(physical_start, *start, len) ||
1399 		    in_range(*start, physical_start,
1400 			     physical_end - physical_start)) {
1401 			*start = physical_end + 1;
1402 			return true;
1403 		}
1404 	}
1405 	return false;
1406 }
1407 
1408 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
1409 {
1410 	switch (device->fs_devices->chunk_alloc_policy) {
1411 	case BTRFS_CHUNK_ALLOC_REGULAR:
1412 		/*
1413 		 * We don't want to overwrite the superblock on the drive nor
1414 		 * any area used by the boot loader (grub for example), so we
1415 		 * make sure to start at an offset of at least 1MB.
1416 		 */
1417 		return max_t(u64, start, SZ_1M);
1418 	case BTRFS_CHUNK_ALLOC_ZONED:
1419 		/*
1420 		 * We don't care about the starting region like regular
1421 		 * allocator, because we anyway use/reserve the first two zones
1422 		 * for superblock logging.
1423 		 */
1424 		return ALIGN(start, device->zone_info->zone_size);
1425 	default:
1426 		BUG();
1427 	}
1428 }
1429 
1430 static bool dev_extent_hole_check_zoned(struct btrfs_device *device,
1431 					u64 *hole_start, u64 *hole_size,
1432 					u64 num_bytes)
1433 {
1434 	u64 zone_size = device->zone_info->zone_size;
1435 	u64 pos;
1436 	int ret;
1437 	bool changed = false;
1438 
1439 	ASSERT(IS_ALIGNED(*hole_start, zone_size));
1440 
1441 	while (*hole_size > 0) {
1442 		pos = btrfs_find_allocatable_zones(device, *hole_start,
1443 						   *hole_start + *hole_size,
1444 						   num_bytes);
1445 		if (pos != *hole_start) {
1446 			*hole_size = *hole_start + *hole_size - pos;
1447 			*hole_start = pos;
1448 			changed = true;
1449 			if (*hole_size < num_bytes)
1450 				break;
1451 		}
1452 
1453 		ret = btrfs_ensure_empty_zones(device, pos, num_bytes);
1454 
1455 		/* Range is ensured to be empty */
1456 		if (!ret)
1457 			return changed;
1458 
1459 		/* Given hole range was invalid (outside of device) */
1460 		if (ret == -ERANGE) {
1461 			*hole_start += *hole_size;
1462 			*hole_size = 0;
1463 			return true;
1464 		}
1465 
1466 		*hole_start += zone_size;
1467 		*hole_size -= zone_size;
1468 		changed = true;
1469 	}
1470 
1471 	return changed;
1472 }
1473 
1474 /**
1475  * dev_extent_hole_check - check if specified hole is suitable for allocation
1476  * @device:	the device which we have the hole
1477  * @hole_start: starting position of the hole
1478  * @hole_size:	the size of the hole
1479  * @num_bytes:	the size of the free space that we need
1480  *
1481  * This function may modify @hole_start and @hole_size to reflect the suitable
1482  * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1483  */
1484 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
1485 				  u64 *hole_size, u64 num_bytes)
1486 {
1487 	bool changed = false;
1488 	u64 hole_end = *hole_start + *hole_size;
1489 
1490 	for (;;) {
1491 		/*
1492 		 * Check before we set max_hole_start, otherwise we could end up
1493 		 * sending back this offset anyway.
1494 		 */
1495 		if (contains_pending_extent(device, hole_start, *hole_size)) {
1496 			if (hole_end >= *hole_start)
1497 				*hole_size = hole_end - *hole_start;
1498 			else
1499 				*hole_size = 0;
1500 			changed = true;
1501 		}
1502 
1503 		switch (device->fs_devices->chunk_alloc_policy) {
1504 		case BTRFS_CHUNK_ALLOC_REGULAR:
1505 			/* No extra check */
1506 			break;
1507 		case BTRFS_CHUNK_ALLOC_ZONED:
1508 			if (dev_extent_hole_check_zoned(device, hole_start,
1509 							hole_size, num_bytes)) {
1510 				changed = true;
1511 				/*
1512 				 * The changed hole can contain pending extent.
1513 				 * Loop again to check that.
1514 				 */
1515 				continue;
1516 			}
1517 			break;
1518 		default:
1519 			BUG();
1520 		}
1521 
1522 		break;
1523 	}
1524 
1525 	return changed;
1526 }
1527 
1528 /*
1529  * find_free_dev_extent_start - find free space in the specified device
1530  * @device:	  the device which we search the free space in
1531  * @num_bytes:	  the size of the free space that we need
1532  * @search_start: the position from which to begin the search
1533  * @start:	  store the start of the free space.
1534  * @len:	  the size of the free space. that we find, or the size
1535  *		  of the max free space if we don't find suitable free space
1536  *
1537  * this uses a pretty simple search, the expectation is that it is
1538  * called very infrequently and that a given device has a small number
1539  * of extents
1540  *
1541  * @start is used to store the start of the free space if we find. But if we
1542  * don't find suitable free space, it will be used to store the start position
1543  * of the max free space.
1544  *
1545  * @len is used to store the size of the free space that we find.
1546  * But if we don't find suitable free space, it is used to store the size of
1547  * the max free space.
1548  *
1549  * NOTE: This function will search *commit* root of device tree, and does extra
1550  * check to ensure dev extents are not double allocated.
1551  * This makes the function safe to allocate dev extents but may not report
1552  * correct usable device space, as device extent freed in current transaction
1553  * is not reported as available.
1554  */
1555 static int find_free_dev_extent_start(struct btrfs_device *device,
1556 				u64 num_bytes, u64 search_start, u64 *start,
1557 				u64 *len)
1558 {
1559 	struct btrfs_fs_info *fs_info = device->fs_info;
1560 	struct btrfs_root *root = fs_info->dev_root;
1561 	struct btrfs_key key;
1562 	struct btrfs_dev_extent *dev_extent;
1563 	struct btrfs_path *path;
1564 	u64 hole_size;
1565 	u64 max_hole_start;
1566 	u64 max_hole_size;
1567 	u64 extent_end;
1568 	u64 search_end = device->total_bytes;
1569 	int ret;
1570 	int slot;
1571 	struct extent_buffer *l;
1572 
1573 	search_start = dev_extent_search_start(device, search_start);
1574 
1575 	WARN_ON(device->zone_info &&
1576 		!IS_ALIGNED(num_bytes, device->zone_info->zone_size));
1577 
1578 	path = btrfs_alloc_path();
1579 	if (!path)
1580 		return -ENOMEM;
1581 
1582 	max_hole_start = search_start;
1583 	max_hole_size = 0;
1584 
1585 again:
1586 	if (search_start >= search_end ||
1587 		test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1588 		ret = -ENOSPC;
1589 		goto out;
1590 	}
1591 
1592 	path->reada = READA_FORWARD;
1593 	path->search_commit_root = 1;
1594 	path->skip_locking = 1;
1595 
1596 	key.objectid = device->devid;
1597 	key.offset = search_start;
1598 	key.type = BTRFS_DEV_EXTENT_KEY;
1599 
1600 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1601 	if (ret < 0)
1602 		goto out;
1603 	if (ret > 0) {
1604 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1605 		if (ret < 0)
1606 			goto out;
1607 	}
1608 
1609 	while (1) {
1610 		l = path->nodes[0];
1611 		slot = path->slots[0];
1612 		if (slot >= btrfs_header_nritems(l)) {
1613 			ret = btrfs_next_leaf(root, path);
1614 			if (ret == 0)
1615 				continue;
1616 			if (ret < 0)
1617 				goto out;
1618 
1619 			break;
1620 		}
1621 		btrfs_item_key_to_cpu(l, &key, slot);
1622 
1623 		if (key.objectid < device->devid)
1624 			goto next;
1625 
1626 		if (key.objectid > device->devid)
1627 			break;
1628 
1629 		if (key.type != BTRFS_DEV_EXTENT_KEY)
1630 			goto next;
1631 
1632 		if (key.offset > search_start) {
1633 			hole_size = key.offset - search_start;
1634 			dev_extent_hole_check(device, &search_start, &hole_size,
1635 					      num_bytes);
1636 
1637 			if (hole_size > max_hole_size) {
1638 				max_hole_start = search_start;
1639 				max_hole_size = hole_size;
1640 			}
1641 
1642 			/*
1643 			 * If this free space is greater than which we need,
1644 			 * it must be the max free space that we have found
1645 			 * until now, so max_hole_start must point to the start
1646 			 * of this free space and the length of this free space
1647 			 * is stored in max_hole_size. Thus, we return
1648 			 * max_hole_start and max_hole_size and go back to the
1649 			 * caller.
1650 			 */
1651 			if (hole_size >= num_bytes) {
1652 				ret = 0;
1653 				goto out;
1654 			}
1655 		}
1656 
1657 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1658 		extent_end = key.offset + btrfs_dev_extent_length(l,
1659 								  dev_extent);
1660 		if (extent_end > search_start)
1661 			search_start = extent_end;
1662 next:
1663 		path->slots[0]++;
1664 		cond_resched();
1665 	}
1666 
1667 	/*
1668 	 * At this point, search_start should be the end of
1669 	 * allocated dev extents, and when shrinking the device,
1670 	 * search_end may be smaller than search_start.
1671 	 */
1672 	if (search_end > search_start) {
1673 		hole_size = search_end - search_start;
1674 		if (dev_extent_hole_check(device, &search_start, &hole_size,
1675 					  num_bytes)) {
1676 			btrfs_release_path(path);
1677 			goto again;
1678 		}
1679 
1680 		if (hole_size > max_hole_size) {
1681 			max_hole_start = search_start;
1682 			max_hole_size = hole_size;
1683 		}
1684 	}
1685 
1686 	/* See above. */
1687 	if (max_hole_size < num_bytes)
1688 		ret = -ENOSPC;
1689 	else
1690 		ret = 0;
1691 
1692 out:
1693 	btrfs_free_path(path);
1694 	*start = max_hole_start;
1695 	if (len)
1696 		*len = max_hole_size;
1697 	return ret;
1698 }
1699 
1700 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1701 			 u64 *start, u64 *len)
1702 {
1703 	/* FIXME use last free of some kind */
1704 	return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1705 }
1706 
1707 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1708 			  struct btrfs_device *device,
1709 			  u64 start, u64 *dev_extent_len)
1710 {
1711 	struct btrfs_fs_info *fs_info = device->fs_info;
1712 	struct btrfs_root *root = fs_info->dev_root;
1713 	int ret;
1714 	struct btrfs_path *path;
1715 	struct btrfs_key key;
1716 	struct btrfs_key found_key;
1717 	struct extent_buffer *leaf = NULL;
1718 	struct btrfs_dev_extent *extent = NULL;
1719 
1720 	path = btrfs_alloc_path();
1721 	if (!path)
1722 		return -ENOMEM;
1723 
1724 	key.objectid = device->devid;
1725 	key.offset = start;
1726 	key.type = BTRFS_DEV_EXTENT_KEY;
1727 again:
1728 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1729 	if (ret > 0) {
1730 		ret = btrfs_previous_item(root, path, key.objectid,
1731 					  BTRFS_DEV_EXTENT_KEY);
1732 		if (ret)
1733 			goto out;
1734 		leaf = path->nodes[0];
1735 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1736 		extent = btrfs_item_ptr(leaf, path->slots[0],
1737 					struct btrfs_dev_extent);
1738 		BUG_ON(found_key.offset > start || found_key.offset +
1739 		       btrfs_dev_extent_length(leaf, extent) < start);
1740 		key = found_key;
1741 		btrfs_release_path(path);
1742 		goto again;
1743 	} else if (ret == 0) {
1744 		leaf = path->nodes[0];
1745 		extent = btrfs_item_ptr(leaf, path->slots[0],
1746 					struct btrfs_dev_extent);
1747 	} else {
1748 		goto out;
1749 	}
1750 
1751 	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1752 
1753 	ret = btrfs_del_item(trans, root, path);
1754 	if (ret == 0)
1755 		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1756 out:
1757 	btrfs_free_path(path);
1758 	return ret;
1759 }
1760 
1761 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1762 				  struct btrfs_device *device,
1763 				  u64 chunk_offset, u64 start, u64 num_bytes)
1764 {
1765 	int ret;
1766 	struct btrfs_path *path;
1767 	struct btrfs_fs_info *fs_info = device->fs_info;
1768 	struct btrfs_root *root = fs_info->dev_root;
1769 	struct btrfs_dev_extent *extent;
1770 	struct extent_buffer *leaf;
1771 	struct btrfs_key key;
1772 
1773 	WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
1774 	WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1775 	path = btrfs_alloc_path();
1776 	if (!path)
1777 		return -ENOMEM;
1778 
1779 	key.objectid = device->devid;
1780 	key.offset = start;
1781 	key.type = BTRFS_DEV_EXTENT_KEY;
1782 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1783 				      sizeof(*extent));
1784 	if (ret)
1785 		goto out;
1786 
1787 	leaf = path->nodes[0];
1788 	extent = btrfs_item_ptr(leaf, path->slots[0],
1789 				struct btrfs_dev_extent);
1790 	btrfs_set_dev_extent_chunk_tree(leaf, extent,
1791 					BTRFS_CHUNK_TREE_OBJECTID);
1792 	btrfs_set_dev_extent_chunk_objectid(leaf, extent,
1793 					    BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1794 	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1795 
1796 	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1797 	btrfs_mark_buffer_dirty(leaf);
1798 out:
1799 	btrfs_free_path(path);
1800 	return ret;
1801 }
1802 
1803 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1804 {
1805 	struct extent_map_tree *em_tree;
1806 	struct extent_map *em;
1807 	struct rb_node *n;
1808 	u64 ret = 0;
1809 
1810 	em_tree = &fs_info->mapping_tree;
1811 	read_lock(&em_tree->lock);
1812 	n = rb_last(&em_tree->map.rb_root);
1813 	if (n) {
1814 		em = rb_entry(n, struct extent_map, rb_node);
1815 		ret = em->start + em->len;
1816 	}
1817 	read_unlock(&em_tree->lock);
1818 
1819 	return ret;
1820 }
1821 
1822 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1823 				    u64 *devid_ret)
1824 {
1825 	int ret;
1826 	struct btrfs_key key;
1827 	struct btrfs_key found_key;
1828 	struct btrfs_path *path;
1829 
1830 	path = btrfs_alloc_path();
1831 	if (!path)
1832 		return -ENOMEM;
1833 
1834 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1835 	key.type = BTRFS_DEV_ITEM_KEY;
1836 	key.offset = (u64)-1;
1837 
1838 	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1839 	if (ret < 0)
1840 		goto error;
1841 
1842 	if (ret == 0) {
1843 		/* Corruption */
1844 		btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1845 		ret = -EUCLEAN;
1846 		goto error;
1847 	}
1848 
1849 	ret = btrfs_previous_item(fs_info->chunk_root, path,
1850 				  BTRFS_DEV_ITEMS_OBJECTID,
1851 				  BTRFS_DEV_ITEM_KEY);
1852 	if (ret) {
1853 		*devid_ret = 1;
1854 	} else {
1855 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1856 				      path->slots[0]);
1857 		*devid_ret = found_key.offset + 1;
1858 	}
1859 	ret = 0;
1860 error:
1861 	btrfs_free_path(path);
1862 	return ret;
1863 }
1864 
1865 /*
1866  * the device information is stored in the chunk root
1867  * the btrfs_device struct should be fully filled in
1868  */
1869 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1870 			    struct btrfs_device *device)
1871 {
1872 	int ret;
1873 	struct btrfs_path *path;
1874 	struct btrfs_dev_item *dev_item;
1875 	struct extent_buffer *leaf;
1876 	struct btrfs_key key;
1877 	unsigned long ptr;
1878 
1879 	path = btrfs_alloc_path();
1880 	if (!path)
1881 		return -ENOMEM;
1882 
1883 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1884 	key.type = BTRFS_DEV_ITEM_KEY;
1885 	key.offset = device->devid;
1886 
1887 	ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1888 				      &key, sizeof(*dev_item));
1889 	if (ret)
1890 		goto out;
1891 
1892 	leaf = path->nodes[0];
1893 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1894 
1895 	btrfs_set_device_id(leaf, dev_item, device->devid);
1896 	btrfs_set_device_generation(leaf, dev_item, 0);
1897 	btrfs_set_device_type(leaf, dev_item, device->type);
1898 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1899 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1900 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1901 	btrfs_set_device_total_bytes(leaf, dev_item,
1902 				     btrfs_device_get_disk_total_bytes(device));
1903 	btrfs_set_device_bytes_used(leaf, dev_item,
1904 				    btrfs_device_get_bytes_used(device));
1905 	btrfs_set_device_group(leaf, dev_item, 0);
1906 	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1907 	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1908 	btrfs_set_device_start_offset(leaf, dev_item, 0);
1909 
1910 	ptr = btrfs_device_uuid(dev_item);
1911 	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1912 	ptr = btrfs_device_fsid(dev_item);
1913 	write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1914 			    ptr, BTRFS_FSID_SIZE);
1915 	btrfs_mark_buffer_dirty(leaf);
1916 
1917 	ret = 0;
1918 out:
1919 	btrfs_free_path(path);
1920 	return ret;
1921 }
1922 
1923 /*
1924  * Function to update ctime/mtime for a given device path.
1925  * Mainly used for ctime/mtime based probe like libblkid.
1926  */
1927 static void update_dev_time(const char *path_name)
1928 {
1929 	struct file *filp;
1930 
1931 	filp = filp_open(path_name, O_RDWR, 0);
1932 	if (IS_ERR(filp))
1933 		return;
1934 	file_update_time(filp);
1935 	filp_close(filp, NULL);
1936 }
1937 
1938 static int btrfs_rm_dev_item(struct btrfs_device *device)
1939 {
1940 	struct btrfs_root *root = device->fs_info->chunk_root;
1941 	int ret;
1942 	struct btrfs_path *path;
1943 	struct btrfs_key key;
1944 	struct btrfs_trans_handle *trans;
1945 
1946 	path = btrfs_alloc_path();
1947 	if (!path)
1948 		return -ENOMEM;
1949 
1950 	trans = btrfs_start_transaction(root, 0);
1951 	if (IS_ERR(trans)) {
1952 		btrfs_free_path(path);
1953 		return PTR_ERR(trans);
1954 	}
1955 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1956 	key.type = BTRFS_DEV_ITEM_KEY;
1957 	key.offset = device->devid;
1958 
1959 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1960 	if (ret) {
1961 		if (ret > 0)
1962 			ret = -ENOENT;
1963 		btrfs_abort_transaction(trans, ret);
1964 		btrfs_end_transaction(trans);
1965 		goto out;
1966 	}
1967 
1968 	ret = btrfs_del_item(trans, root, path);
1969 	if (ret) {
1970 		btrfs_abort_transaction(trans, ret);
1971 		btrfs_end_transaction(trans);
1972 	}
1973 
1974 out:
1975 	btrfs_free_path(path);
1976 	if (!ret)
1977 		ret = btrfs_commit_transaction(trans);
1978 	return ret;
1979 }
1980 
1981 /*
1982  * Verify that @num_devices satisfies the RAID profile constraints in the whole
1983  * filesystem. It's up to the caller to adjust that number regarding eg. device
1984  * replace.
1985  */
1986 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
1987 		u64 num_devices)
1988 {
1989 	u64 all_avail;
1990 	unsigned seq;
1991 	int i;
1992 
1993 	do {
1994 		seq = read_seqbegin(&fs_info->profiles_lock);
1995 
1996 		all_avail = fs_info->avail_data_alloc_bits |
1997 			    fs_info->avail_system_alloc_bits |
1998 			    fs_info->avail_metadata_alloc_bits;
1999 	} while (read_seqretry(&fs_info->profiles_lock, seq));
2000 
2001 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2002 		if (!(all_avail & btrfs_raid_array[i].bg_flag))
2003 			continue;
2004 
2005 		if (num_devices < btrfs_raid_array[i].devs_min) {
2006 			int ret = btrfs_raid_array[i].mindev_error;
2007 
2008 			if (ret)
2009 				return ret;
2010 		}
2011 	}
2012 
2013 	return 0;
2014 }
2015 
2016 static struct btrfs_device * btrfs_find_next_active_device(
2017 		struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
2018 {
2019 	struct btrfs_device *next_device;
2020 
2021 	list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
2022 		if (next_device != device &&
2023 		    !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
2024 		    && next_device->bdev)
2025 			return next_device;
2026 	}
2027 
2028 	return NULL;
2029 }
2030 
2031 /*
2032  * Helper function to check if the given device is part of s_bdev / latest_bdev
2033  * and replace it with the provided or the next active device, in the context
2034  * where this function called, there should be always be another device (or
2035  * this_dev) which is active.
2036  */
2037 void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
2038 					    struct btrfs_device *next_device)
2039 {
2040 	struct btrfs_fs_info *fs_info = device->fs_info;
2041 
2042 	if (!next_device)
2043 		next_device = btrfs_find_next_active_device(fs_info->fs_devices,
2044 							    device);
2045 	ASSERT(next_device);
2046 
2047 	if (fs_info->sb->s_bdev &&
2048 			(fs_info->sb->s_bdev == device->bdev))
2049 		fs_info->sb->s_bdev = next_device->bdev;
2050 
2051 	if (fs_info->fs_devices->latest_bdev == device->bdev)
2052 		fs_info->fs_devices->latest_bdev = next_device->bdev;
2053 }
2054 
2055 /*
2056  * Return btrfs_fs_devices::num_devices excluding the device that's being
2057  * currently replaced.
2058  */
2059 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
2060 {
2061 	u64 num_devices = fs_info->fs_devices->num_devices;
2062 
2063 	down_read(&fs_info->dev_replace.rwsem);
2064 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
2065 		ASSERT(num_devices > 1);
2066 		num_devices--;
2067 	}
2068 	up_read(&fs_info->dev_replace.rwsem);
2069 
2070 	return num_devices;
2071 }
2072 
2073 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
2074 			       struct block_device *bdev,
2075 			       const char *device_path)
2076 {
2077 	struct btrfs_super_block *disk_super;
2078 	int copy_num;
2079 
2080 	if (!bdev)
2081 		return;
2082 
2083 	for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
2084 		struct page *page;
2085 		int ret;
2086 
2087 		disk_super = btrfs_read_dev_one_super(bdev, copy_num);
2088 		if (IS_ERR(disk_super))
2089 			continue;
2090 
2091 		if (bdev_is_zoned(bdev)) {
2092 			btrfs_reset_sb_log_zones(bdev, copy_num);
2093 			continue;
2094 		}
2095 
2096 		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
2097 
2098 		page = virt_to_page(disk_super);
2099 		set_page_dirty(page);
2100 		lock_page(page);
2101 		/* write_on_page() unlocks the page */
2102 		ret = write_one_page(page);
2103 		if (ret)
2104 			btrfs_warn(fs_info,
2105 				"error clearing superblock number %d (%d)",
2106 				copy_num, ret);
2107 		btrfs_release_disk_super(disk_super);
2108 
2109 	}
2110 
2111 	/* Notify udev that device has changed */
2112 	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
2113 
2114 	/* Update ctime/mtime for device path for libblkid */
2115 	update_dev_time(device_path);
2116 }
2117 
2118 int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
2119 		    u64 devid)
2120 {
2121 	struct btrfs_device *device;
2122 	struct btrfs_fs_devices *cur_devices;
2123 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2124 	u64 num_devices;
2125 	int ret = 0;
2126 
2127 	mutex_lock(&uuid_mutex);
2128 
2129 	num_devices = btrfs_num_devices(fs_info);
2130 
2131 	ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2132 	if (ret)
2133 		goto out;
2134 
2135 	device = btrfs_find_device_by_devspec(fs_info, devid, device_path);
2136 
2137 	if (IS_ERR(device)) {
2138 		if (PTR_ERR(device) == -ENOENT &&
2139 		    strcmp(device_path, "missing") == 0)
2140 			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2141 		else
2142 			ret = PTR_ERR(device);
2143 		goto out;
2144 	}
2145 
2146 	if (btrfs_pinned_by_swapfile(fs_info, device)) {
2147 		btrfs_warn_in_rcu(fs_info,
2148 		  "cannot remove device %s (devid %llu) due to active swapfile",
2149 				  rcu_str_deref(device->name), device->devid);
2150 		ret = -ETXTBSY;
2151 		goto out;
2152 	}
2153 
2154 	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2155 		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
2156 		goto out;
2157 	}
2158 
2159 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2160 	    fs_info->fs_devices->rw_devices == 1) {
2161 		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
2162 		goto out;
2163 	}
2164 
2165 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2166 		mutex_lock(&fs_info->chunk_mutex);
2167 		list_del_init(&device->dev_alloc_list);
2168 		device->fs_devices->rw_devices--;
2169 		mutex_unlock(&fs_info->chunk_mutex);
2170 	}
2171 
2172 	mutex_unlock(&uuid_mutex);
2173 	ret = btrfs_shrink_device(device, 0);
2174 	if (!ret)
2175 		btrfs_reada_remove_dev(device);
2176 	mutex_lock(&uuid_mutex);
2177 	if (ret)
2178 		goto error_undo;
2179 
2180 	/*
2181 	 * TODO: the superblock still includes this device in its num_devices
2182 	 * counter although write_all_supers() is not locked out. This
2183 	 * could give a filesystem state which requires a degraded mount.
2184 	 */
2185 	ret = btrfs_rm_dev_item(device);
2186 	if (ret)
2187 		goto error_undo;
2188 
2189 	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2190 	btrfs_scrub_cancel_dev(device);
2191 
2192 	/*
2193 	 * the device list mutex makes sure that we don't change
2194 	 * the device list while someone else is writing out all
2195 	 * the device supers. Whoever is writing all supers, should
2196 	 * lock the device list mutex before getting the number of
2197 	 * devices in the super block (super_copy). Conversely,
2198 	 * whoever updates the number of devices in the super block
2199 	 * (super_copy) should hold the device list mutex.
2200 	 */
2201 
2202 	/*
2203 	 * In normal cases the cur_devices == fs_devices. But in case
2204 	 * of deleting a seed device, the cur_devices should point to
2205 	 * its own fs_devices listed under the fs_devices->seed.
2206 	 */
2207 	cur_devices = device->fs_devices;
2208 	mutex_lock(&fs_devices->device_list_mutex);
2209 	list_del_rcu(&device->dev_list);
2210 
2211 	cur_devices->num_devices--;
2212 	cur_devices->total_devices--;
2213 	/* Update total_devices of the parent fs_devices if it's seed */
2214 	if (cur_devices != fs_devices)
2215 		fs_devices->total_devices--;
2216 
2217 	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2218 		cur_devices->missing_devices--;
2219 
2220 	btrfs_assign_next_active_device(device, NULL);
2221 
2222 	if (device->bdev) {
2223 		cur_devices->open_devices--;
2224 		/* remove sysfs entry */
2225 		btrfs_sysfs_remove_device(device);
2226 	}
2227 
2228 	num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2229 	btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2230 	mutex_unlock(&fs_devices->device_list_mutex);
2231 
2232 	/*
2233 	 * at this point, the device is zero sized and detached from
2234 	 * the devices list.  All that's left is to zero out the old
2235 	 * supers and free the device.
2236 	 */
2237 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2238 		btrfs_scratch_superblocks(fs_info, device->bdev,
2239 					  device->name->str);
2240 
2241 	btrfs_close_bdev(device);
2242 	synchronize_rcu();
2243 	btrfs_free_device(device);
2244 
2245 	if (cur_devices->open_devices == 0) {
2246 		list_del_init(&cur_devices->seed_list);
2247 		close_fs_devices(cur_devices);
2248 		free_fs_devices(cur_devices);
2249 	}
2250 
2251 out:
2252 	mutex_unlock(&uuid_mutex);
2253 	return ret;
2254 
2255 error_undo:
2256 	btrfs_reada_undo_remove_dev(device);
2257 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2258 		mutex_lock(&fs_info->chunk_mutex);
2259 		list_add(&device->dev_alloc_list,
2260 			 &fs_devices->alloc_list);
2261 		device->fs_devices->rw_devices++;
2262 		mutex_unlock(&fs_info->chunk_mutex);
2263 	}
2264 	goto out;
2265 }
2266 
2267 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2268 {
2269 	struct btrfs_fs_devices *fs_devices;
2270 
2271 	lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2272 
2273 	/*
2274 	 * in case of fs with no seed, srcdev->fs_devices will point
2275 	 * to fs_devices of fs_info. However when the dev being replaced is
2276 	 * a seed dev it will point to the seed's local fs_devices. In short
2277 	 * srcdev will have its correct fs_devices in both the cases.
2278 	 */
2279 	fs_devices = srcdev->fs_devices;
2280 
2281 	list_del_rcu(&srcdev->dev_list);
2282 	list_del(&srcdev->dev_alloc_list);
2283 	fs_devices->num_devices--;
2284 	if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2285 		fs_devices->missing_devices--;
2286 
2287 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2288 		fs_devices->rw_devices--;
2289 
2290 	if (srcdev->bdev)
2291 		fs_devices->open_devices--;
2292 }
2293 
2294 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2295 {
2296 	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2297 
2298 	mutex_lock(&uuid_mutex);
2299 
2300 	btrfs_close_bdev(srcdev);
2301 	synchronize_rcu();
2302 	btrfs_free_device(srcdev);
2303 
2304 	/* if this is no devs we rather delete the fs_devices */
2305 	if (!fs_devices->num_devices) {
2306 		/*
2307 		 * On a mounted FS, num_devices can't be zero unless it's a
2308 		 * seed. In case of a seed device being replaced, the replace
2309 		 * target added to the sprout FS, so there will be no more
2310 		 * device left under the seed FS.
2311 		 */
2312 		ASSERT(fs_devices->seeding);
2313 
2314 		list_del_init(&fs_devices->seed_list);
2315 		close_fs_devices(fs_devices);
2316 		free_fs_devices(fs_devices);
2317 	}
2318 	mutex_unlock(&uuid_mutex);
2319 }
2320 
2321 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2322 {
2323 	struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2324 
2325 	mutex_lock(&fs_devices->device_list_mutex);
2326 
2327 	btrfs_sysfs_remove_device(tgtdev);
2328 
2329 	if (tgtdev->bdev)
2330 		fs_devices->open_devices--;
2331 
2332 	fs_devices->num_devices--;
2333 
2334 	btrfs_assign_next_active_device(tgtdev, NULL);
2335 
2336 	list_del_rcu(&tgtdev->dev_list);
2337 
2338 	mutex_unlock(&fs_devices->device_list_mutex);
2339 
2340 	/*
2341 	 * The update_dev_time() with in btrfs_scratch_superblocks()
2342 	 * may lead to a call to btrfs_show_devname() which will try
2343 	 * to hold device_list_mutex. And here this device
2344 	 * is already out of device list, so we don't have to hold
2345 	 * the device_list_mutex lock.
2346 	 */
2347 	btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
2348 				  tgtdev->name->str);
2349 
2350 	btrfs_close_bdev(tgtdev);
2351 	synchronize_rcu();
2352 	btrfs_free_device(tgtdev);
2353 }
2354 
2355 static struct btrfs_device *btrfs_find_device_by_path(
2356 		struct btrfs_fs_info *fs_info, const char *device_path)
2357 {
2358 	int ret = 0;
2359 	struct btrfs_super_block *disk_super;
2360 	u64 devid;
2361 	u8 *dev_uuid;
2362 	struct block_device *bdev;
2363 	struct btrfs_device *device;
2364 
2365 	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2366 				    fs_info->bdev_holder, 0, &bdev, &disk_super);
2367 	if (ret)
2368 		return ERR_PTR(ret);
2369 
2370 	devid = btrfs_stack_device_id(&disk_super->dev_item);
2371 	dev_uuid = disk_super->dev_item.uuid;
2372 	if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2373 		device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2374 					   disk_super->metadata_uuid);
2375 	else
2376 		device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2377 					   disk_super->fsid);
2378 
2379 	btrfs_release_disk_super(disk_super);
2380 	if (!device)
2381 		device = ERR_PTR(-ENOENT);
2382 	blkdev_put(bdev, FMODE_READ);
2383 	return device;
2384 }
2385 
2386 /*
2387  * Lookup a device given by device id, or the path if the id is 0.
2388  */
2389 struct btrfs_device *btrfs_find_device_by_devspec(
2390 		struct btrfs_fs_info *fs_info, u64 devid,
2391 		const char *device_path)
2392 {
2393 	struct btrfs_device *device;
2394 
2395 	if (devid) {
2396 		device = btrfs_find_device(fs_info->fs_devices, devid, NULL,
2397 					   NULL);
2398 		if (!device)
2399 			return ERR_PTR(-ENOENT);
2400 		return device;
2401 	}
2402 
2403 	if (!device_path || !device_path[0])
2404 		return ERR_PTR(-EINVAL);
2405 
2406 	if (strcmp(device_path, "missing") == 0) {
2407 		/* Find first missing device */
2408 		list_for_each_entry(device, &fs_info->fs_devices->devices,
2409 				    dev_list) {
2410 			if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
2411 				     &device->dev_state) && !device->bdev)
2412 				return device;
2413 		}
2414 		return ERR_PTR(-ENOENT);
2415 	}
2416 
2417 	return btrfs_find_device_by_path(fs_info, device_path);
2418 }
2419 
2420 /*
2421  * does all the dirty work required for changing file system's UUID.
2422  */
2423 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
2424 {
2425 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2426 	struct btrfs_fs_devices *old_devices;
2427 	struct btrfs_fs_devices *seed_devices;
2428 	struct btrfs_super_block *disk_super = fs_info->super_copy;
2429 	struct btrfs_device *device;
2430 	u64 super_flags;
2431 
2432 	lockdep_assert_held(&uuid_mutex);
2433 	if (!fs_devices->seeding)
2434 		return -EINVAL;
2435 
2436 	/*
2437 	 * Private copy of the seed devices, anchored at
2438 	 * fs_info->fs_devices->seed_list
2439 	 */
2440 	seed_devices = alloc_fs_devices(NULL, NULL);
2441 	if (IS_ERR(seed_devices))
2442 		return PTR_ERR(seed_devices);
2443 
2444 	/*
2445 	 * It's necessary to retain a copy of the original seed fs_devices in
2446 	 * fs_uuids so that filesystems which have been seeded can successfully
2447 	 * reference the seed device from open_seed_devices. This also supports
2448 	 * multiple fs seed.
2449 	 */
2450 	old_devices = clone_fs_devices(fs_devices);
2451 	if (IS_ERR(old_devices)) {
2452 		kfree(seed_devices);
2453 		return PTR_ERR(old_devices);
2454 	}
2455 
2456 	list_add(&old_devices->fs_list, &fs_uuids);
2457 
2458 	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2459 	seed_devices->opened = 1;
2460 	INIT_LIST_HEAD(&seed_devices->devices);
2461 	INIT_LIST_HEAD(&seed_devices->alloc_list);
2462 	mutex_init(&seed_devices->device_list_mutex);
2463 
2464 	mutex_lock(&fs_devices->device_list_mutex);
2465 	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2466 			      synchronize_rcu);
2467 	list_for_each_entry(device, &seed_devices->devices, dev_list)
2468 		device->fs_devices = seed_devices;
2469 
2470 	fs_devices->seeding = false;
2471 	fs_devices->num_devices = 0;
2472 	fs_devices->open_devices = 0;
2473 	fs_devices->missing_devices = 0;
2474 	fs_devices->rotating = false;
2475 	list_add(&seed_devices->seed_list, &fs_devices->seed_list);
2476 
2477 	generate_random_uuid(fs_devices->fsid);
2478 	memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2479 	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2480 	mutex_unlock(&fs_devices->device_list_mutex);
2481 
2482 	super_flags = btrfs_super_flags(disk_super) &
2483 		      ~BTRFS_SUPER_FLAG_SEEDING;
2484 	btrfs_set_super_flags(disk_super, super_flags);
2485 
2486 	return 0;
2487 }
2488 
2489 /*
2490  * Store the expected generation for seed devices in device items.
2491  */
2492 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
2493 {
2494 	struct btrfs_fs_info *fs_info = trans->fs_info;
2495 	struct btrfs_root *root = fs_info->chunk_root;
2496 	struct btrfs_path *path;
2497 	struct extent_buffer *leaf;
2498 	struct btrfs_dev_item *dev_item;
2499 	struct btrfs_device *device;
2500 	struct btrfs_key key;
2501 	u8 fs_uuid[BTRFS_FSID_SIZE];
2502 	u8 dev_uuid[BTRFS_UUID_SIZE];
2503 	u64 devid;
2504 	int ret;
2505 
2506 	path = btrfs_alloc_path();
2507 	if (!path)
2508 		return -ENOMEM;
2509 
2510 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2511 	key.offset = 0;
2512 	key.type = BTRFS_DEV_ITEM_KEY;
2513 
2514 	while (1) {
2515 		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2516 		if (ret < 0)
2517 			goto error;
2518 
2519 		leaf = path->nodes[0];
2520 next_slot:
2521 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2522 			ret = btrfs_next_leaf(root, path);
2523 			if (ret > 0)
2524 				break;
2525 			if (ret < 0)
2526 				goto error;
2527 			leaf = path->nodes[0];
2528 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2529 			btrfs_release_path(path);
2530 			continue;
2531 		}
2532 
2533 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2534 		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2535 		    key.type != BTRFS_DEV_ITEM_KEY)
2536 			break;
2537 
2538 		dev_item = btrfs_item_ptr(leaf, path->slots[0],
2539 					  struct btrfs_dev_item);
2540 		devid = btrfs_device_id(leaf, dev_item);
2541 		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2542 				   BTRFS_UUID_SIZE);
2543 		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2544 				   BTRFS_FSID_SIZE);
2545 		device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2546 					   fs_uuid);
2547 		BUG_ON(!device); /* Logic error */
2548 
2549 		if (device->fs_devices->seeding) {
2550 			btrfs_set_device_generation(leaf, dev_item,
2551 						    device->generation);
2552 			btrfs_mark_buffer_dirty(leaf);
2553 		}
2554 
2555 		path->slots[0]++;
2556 		goto next_slot;
2557 	}
2558 	ret = 0;
2559 error:
2560 	btrfs_free_path(path);
2561 	return ret;
2562 }
2563 
2564 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2565 {
2566 	struct btrfs_root *root = fs_info->dev_root;
2567 	struct request_queue *q;
2568 	struct btrfs_trans_handle *trans;
2569 	struct btrfs_device *device;
2570 	struct block_device *bdev;
2571 	struct super_block *sb = fs_info->sb;
2572 	struct rcu_string *name;
2573 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2574 	u64 orig_super_total_bytes;
2575 	u64 orig_super_num_devices;
2576 	int seeding_dev = 0;
2577 	int ret = 0;
2578 	bool locked = false;
2579 
2580 	if (sb_rdonly(sb) && !fs_devices->seeding)
2581 		return -EROFS;
2582 
2583 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2584 				  fs_info->bdev_holder);
2585 	if (IS_ERR(bdev))
2586 		return PTR_ERR(bdev);
2587 
2588 	if (!btrfs_check_device_zone_type(fs_info, bdev)) {
2589 		ret = -EINVAL;
2590 		goto error;
2591 	}
2592 
2593 	if (fs_devices->seeding) {
2594 		seeding_dev = 1;
2595 		down_write(&sb->s_umount);
2596 		mutex_lock(&uuid_mutex);
2597 		locked = true;
2598 	}
2599 
2600 	sync_blockdev(bdev);
2601 
2602 	rcu_read_lock();
2603 	list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
2604 		if (device->bdev == bdev) {
2605 			ret = -EEXIST;
2606 			rcu_read_unlock();
2607 			goto error;
2608 		}
2609 	}
2610 	rcu_read_unlock();
2611 
2612 	device = btrfs_alloc_device(fs_info, NULL, NULL);
2613 	if (IS_ERR(device)) {
2614 		/* we can safely leave the fs_devices entry around */
2615 		ret = PTR_ERR(device);
2616 		goto error;
2617 	}
2618 
2619 	name = rcu_string_strdup(device_path, GFP_KERNEL);
2620 	if (!name) {
2621 		ret = -ENOMEM;
2622 		goto error_free_device;
2623 	}
2624 	rcu_assign_pointer(device->name, name);
2625 
2626 	device->fs_info = fs_info;
2627 	device->bdev = bdev;
2628 
2629 	ret = btrfs_get_dev_zone_info(device);
2630 	if (ret)
2631 		goto error_free_device;
2632 
2633 	trans = btrfs_start_transaction(root, 0);
2634 	if (IS_ERR(trans)) {
2635 		ret = PTR_ERR(trans);
2636 		goto error_free_zone;
2637 	}
2638 
2639 	q = bdev_get_queue(bdev);
2640 	set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2641 	device->generation = trans->transid;
2642 	device->io_width = fs_info->sectorsize;
2643 	device->io_align = fs_info->sectorsize;
2644 	device->sector_size = fs_info->sectorsize;
2645 	device->total_bytes = round_down(i_size_read(bdev->bd_inode),
2646 					 fs_info->sectorsize);
2647 	device->disk_total_bytes = device->total_bytes;
2648 	device->commit_total_bytes = device->total_bytes;
2649 	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2650 	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2651 	device->mode = FMODE_EXCL;
2652 	device->dev_stats_valid = 1;
2653 	set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2654 
2655 	if (seeding_dev) {
2656 		btrfs_clear_sb_rdonly(sb);
2657 		ret = btrfs_prepare_sprout(fs_info);
2658 		if (ret) {
2659 			btrfs_abort_transaction(trans, ret);
2660 			goto error_trans;
2661 		}
2662 	}
2663 
2664 	device->fs_devices = fs_devices;
2665 
2666 	mutex_lock(&fs_devices->device_list_mutex);
2667 	mutex_lock(&fs_info->chunk_mutex);
2668 	list_add_rcu(&device->dev_list, &fs_devices->devices);
2669 	list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2670 	fs_devices->num_devices++;
2671 	fs_devices->open_devices++;
2672 	fs_devices->rw_devices++;
2673 	fs_devices->total_devices++;
2674 	fs_devices->total_rw_bytes += device->total_bytes;
2675 
2676 	atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2677 
2678 	if (!blk_queue_nonrot(q))
2679 		fs_devices->rotating = true;
2680 
2681 	orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2682 	btrfs_set_super_total_bytes(fs_info->super_copy,
2683 		round_down(orig_super_total_bytes + device->total_bytes,
2684 			   fs_info->sectorsize));
2685 
2686 	orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2687 	btrfs_set_super_num_devices(fs_info->super_copy,
2688 				    orig_super_num_devices + 1);
2689 
2690 	/*
2691 	 * we've got more storage, clear any full flags on the space
2692 	 * infos
2693 	 */
2694 	btrfs_clear_space_info_full(fs_info);
2695 
2696 	mutex_unlock(&fs_info->chunk_mutex);
2697 
2698 	/* Add sysfs device entry */
2699 	btrfs_sysfs_add_device(device);
2700 
2701 	mutex_unlock(&fs_devices->device_list_mutex);
2702 
2703 	if (seeding_dev) {
2704 		mutex_lock(&fs_info->chunk_mutex);
2705 		ret = init_first_rw_device(trans);
2706 		mutex_unlock(&fs_info->chunk_mutex);
2707 		if (ret) {
2708 			btrfs_abort_transaction(trans, ret);
2709 			goto error_sysfs;
2710 		}
2711 	}
2712 
2713 	ret = btrfs_add_dev_item(trans, device);
2714 	if (ret) {
2715 		btrfs_abort_transaction(trans, ret);
2716 		goto error_sysfs;
2717 	}
2718 
2719 	if (seeding_dev) {
2720 		ret = btrfs_finish_sprout(trans);
2721 		if (ret) {
2722 			btrfs_abort_transaction(trans, ret);
2723 			goto error_sysfs;
2724 		}
2725 
2726 		/*
2727 		 * fs_devices now represents the newly sprouted filesystem and
2728 		 * its fsid has been changed by btrfs_prepare_sprout
2729 		 */
2730 		btrfs_sysfs_update_sprout_fsid(fs_devices);
2731 	}
2732 
2733 	ret = btrfs_commit_transaction(trans);
2734 
2735 	if (seeding_dev) {
2736 		mutex_unlock(&uuid_mutex);
2737 		up_write(&sb->s_umount);
2738 		locked = false;
2739 
2740 		if (ret) /* transaction commit */
2741 			return ret;
2742 
2743 		ret = btrfs_relocate_sys_chunks(fs_info);
2744 		if (ret < 0)
2745 			btrfs_handle_fs_error(fs_info, ret,
2746 				    "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2747 		trans = btrfs_attach_transaction(root);
2748 		if (IS_ERR(trans)) {
2749 			if (PTR_ERR(trans) == -ENOENT)
2750 				return 0;
2751 			ret = PTR_ERR(trans);
2752 			trans = NULL;
2753 			goto error_sysfs;
2754 		}
2755 		ret = btrfs_commit_transaction(trans);
2756 	}
2757 
2758 	/*
2759 	 * Now that we have written a new super block to this device, check all
2760 	 * other fs_devices list if device_path alienates any other scanned
2761 	 * device.
2762 	 * We can ignore the return value as it typically returns -EINVAL and
2763 	 * only succeeds if the device was an alien.
2764 	 */
2765 	btrfs_forget_devices(device_path);
2766 
2767 	/* Update ctime/mtime for blkid or udev */
2768 	update_dev_time(device_path);
2769 
2770 	return ret;
2771 
2772 error_sysfs:
2773 	btrfs_sysfs_remove_device(device);
2774 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2775 	mutex_lock(&fs_info->chunk_mutex);
2776 	list_del_rcu(&device->dev_list);
2777 	list_del(&device->dev_alloc_list);
2778 	fs_info->fs_devices->num_devices--;
2779 	fs_info->fs_devices->open_devices--;
2780 	fs_info->fs_devices->rw_devices--;
2781 	fs_info->fs_devices->total_devices--;
2782 	fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2783 	atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2784 	btrfs_set_super_total_bytes(fs_info->super_copy,
2785 				    orig_super_total_bytes);
2786 	btrfs_set_super_num_devices(fs_info->super_copy,
2787 				    orig_super_num_devices);
2788 	mutex_unlock(&fs_info->chunk_mutex);
2789 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2790 error_trans:
2791 	if (seeding_dev)
2792 		btrfs_set_sb_rdonly(sb);
2793 	if (trans)
2794 		btrfs_end_transaction(trans);
2795 error_free_zone:
2796 	btrfs_destroy_dev_zone_info(device);
2797 error_free_device:
2798 	btrfs_free_device(device);
2799 error:
2800 	blkdev_put(bdev, FMODE_EXCL);
2801 	if (locked) {
2802 		mutex_unlock(&uuid_mutex);
2803 		up_write(&sb->s_umount);
2804 	}
2805 	return ret;
2806 }
2807 
2808 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2809 					struct btrfs_device *device)
2810 {
2811 	int ret;
2812 	struct btrfs_path *path;
2813 	struct btrfs_root *root = device->fs_info->chunk_root;
2814 	struct btrfs_dev_item *dev_item;
2815 	struct extent_buffer *leaf;
2816 	struct btrfs_key key;
2817 
2818 	path = btrfs_alloc_path();
2819 	if (!path)
2820 		return -ENOMEM;
2821 
2822 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2823 	key.type = BTRFS_DEV_ITEM_KEY;
2824 	key.offset = device->devid;
2825 
2826 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2827 	if (ret < 0)
2828 		goto out;
2829 
2830 	if (ret > 0) {
2831 		ret = -ENOENT;
2832 		goto out;
2833 	}
2834 
2835 	leaf = path->nodes[0];
2836 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2837 
2838 	btrfs_set_device_id(leaf, dev_item, device->devid);
2839 	btrfs_set_device_type(leaf, dev_item, device->type);
2840 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2841 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2842 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2843 	btrfs_set_device_total_bytes(leaf, dev_item,
2844 				     btrfs_device_get_disk_total_bytes(device));
2845 	btrfs_set_device_bytes_used(leaf, dev_item,
2846 				    btrfs_device_get_bytes_used(device));
2847 	btrfs_mark_buffer_dirty(leaf);
2848 
2849 out:
2850 	btrfs_free_path(path);
2851 	return ret;
2852 }
2853 
2854 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2855 		      struct btrfs_device *device, u64 new_size)
2856 {
2857 	struct btrfs_fs_info *fs_info = device->fs_info;
2858 	struct btrfs_super_block *super_copy = fs_info->super_copy;
2859 	u64 old_total;
2860 	u64 diff;
2861 
2862 	if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2863 		return -EACCES;
2864 
2865 	new_size = round_down(new_size, fs_info->sectorsize);
2866 
2867 	mutex_lock(&fs_info->chunk_mutex);
2868 	old_total = btrfs_super_total_bytes(super_copy);
2869 	diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2870 
2871 	if (new_size <= device->total_bytes ||
2872 	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2873 		mutex_unlock(&fs_info->chunk_mutex);
2874 		return -EINVAL;
2875 	}
2876 
2877 	btrfs_set_super_total_bytes(super_copy,
2878 			round_down(old_total + diff, fs_info->sectorsize));
2879 	device->fs_devices->total_rw_bytes += diff;
2880 
2881 	btrfs_device_set_total_bytes(device, new_size);
2882 	btrfs_device_set_disk_total_bytes(device, new_size);
2883 	btrfs_clear_space_info_full(device->fs_info);
2884 	if (list_empty(&device->post_commit_list))
2885 		list_add_tail(&device->post_commit_list,
2886 			      &trans->transaction->dev_update_list);
2887 	mutex_unlock(&fs_info->chunk_mutex);
2888 
2889 	return btrfs_update_device(trans, device);
2890 }
2891 
2892 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2893 {
2894 	struct btrfs_fs_info *fs_info = trans->fs_info;
2895 	struct btrfs_root *root = fs_info->chunk_root;
2896 	int ret;
2897 	struct btrfs_path *path;
2898 	struct btrfs_key key;
2899 
2900 	path = btrfs_alloc_path();
2901 	if (!path)
2902 		return -ENOMEM;
2903 
2904 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2905 	key.offset = chunk_offset;
2906 	key.type = BTRFS_CHUNK_ITEM_KEY;
2907 
2908 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2909 	if (ret < 0)
2910 		goto out;
2911 	else if (ret > 0) { /* Logic error or corruption */
2912 		btrfs_handle_fs_error(fs_info, -ENOENT,
2913 				      "Failed lookup while freeing chunk.");
2914 		ret = -ENOENT;
2915 		goto out;
2916 	}
2917 
2918 	ret = btrfs_del_item(trans, root, path);
2919 	if (ret < 0)
2920 		btrfs_handle_fs_error(fs_info, ret,
2921 				      "Failed to delete chunk item.");
2922 out:
2923 	btrfs_free_path(path);
2924 	return ret;
2925 }
2926 
2927 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2928 {
2929 	struct btrfs_super_block *super_copy = fs_info->super_copy;
2930 	struct btrfs_disk_key *disk_key;
2931 	struct btrfs_chunk *chunk;
2932 	u8 *ptr;
2933 	int ret = 0;
2934 	u32 num_stripes;
2935 	u32 array_size;
2936 	u32 len = 0;
2937 	u32 cur;
2938 	struct btrfs_key key;
2939 
2940 	lockdep_assert_held(&fs_info->chunk_mutex);
2941 	array_size = btrfs_super_sys_array_size(super_copy);
2942 
2943 	ptr = super_copy->sys_chunk_array;
2944 	cur = 0;
2945 
2946 	while (cur < array_size) {
2947 		disk_key = (struct btrfs_disk_key *)ptr;
2948 		btrfs_disk_key_to_cpu(&key, disk_key);
2949 
2950 		len = sizeof(*disk_key);
2951 
2952 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2953 			chunk = (struct btrfs_chunk *)(ptr + len);
2954 			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2955 			len += btrfs_chunk_item_size(num_stripes);
2956 		} else {
2957 			ret = -EIO;
2958 			break;
2959 		}
2960 		if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2961 		    key.offset == chunk_offset) {
2962 			memmove(ptr, ptr + len, array_size - (cur + len));
2963 			array_size -= len;
2964 			btrfs_set_super_sys_array_size(super_copy, array_size);
2965 		} else {
2966 			ptr += len;
2967 			cur += len;
2968 		}
2969 	}
2970 	return ret;
2971 }
2972 
2973 /*
2974  * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
2975  * @logical: Logical block offset in bytes.
2976  * @length: Length of extent in bytes.
2977  *
2978  * Return: Chunk mapping or ERR_PTR.
2979  */
2980 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
2981 				       u64 logical, u64 length)
2982 {
2983 	struct extent_map_tree *em_tree;
2984 	struct extent_map *em;
2985 
2986 	em_tree = &fs_info->mapping_tree;
2987 	read_lock(&em_tree->lock);
2988 	em = lookup_extent_mapping(em_tree, logical, length);
2989 	read_unlock(&em_tree->lock);
2990 
2991 	if (!em) {
2992 		btrfs_crit(fs_info, "unable to find logical %llu length %llu",
2993 			   logical, length);
2994 		return ERR_PTR(-EINVAL);
2995 	}
2996 
2997 	if (em->start > logical || em->start + em->len < logical) {
2998 		btrfs_crit(fs_info,
2999 			   "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
3000 			   logical, length, em->start, em->start + em->len);
3001 		free_extent_map(em);
3002 		return ERR_PTR(-EINVAL);
3003 	}
3004 
3005 	/* callers are responsible for dropping em's ref. */
3006 	return em;
3007 }
3008 
3009 static int remove_chunk_item(struct btrfs_trans_handle *trans,
3010 			     struct map_lookup *map, u64 chunk_offset)
3011 {
3012 	int i;
3013 
3014 	/*
3015 	 * Removing chunk items and updating the device items in the chunks btree
3016 	 * requires holding the chunk_mutex.
3017 	 * See the comment at btrfs_chunk_alloc() for the details.
3018 	 */
3019 	lockdep_assert_held(&trans->fs_info->chunk_mutex);
3020 
3021 	for (i = 0; i < map->num_stripes; i++) {
3022 		int ret;
3023 
3024 		ret = btrfs_update_device(trans, map->stripes[i].dev);
3025 		if (ret)
3026 			return ret;
3027 	}
3028 
3029 	return btrfs_free_chunk(trans, chunk_offset);
3030 }
3031 
3032 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
3033 {
3034 	struct btrfs_fs_info *fs_info = trans->fs_info;
3035 	struct extent_map *em;
3036 	struct map_lookup *map;
3037 	u64 dev_extent_len = 0;
3038 	int i, ret = 0;
3039 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3040 
3041 	em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
3042 	if (IS_ERR(em)) {
3043 		/*
3044 		 * This is a logic error, but we don't want to just rely on the
3045 		 * user having built with ASSERT enabled, so if ASSERT doesn't
3046 		 * do anything we still error out.
3047 		 */
3048 		ASSERT(0);
3049 		return PTR_ERR(em);
3050 	}
3051 	map = em->map_lookup;
3052 
3053 	/*
3054 	 * First delete the device extent items from the devices btree.
3055 	 * We take the device_list_mutex to avoid racing with the finishing phase
3056 	 * of a device replace operation. See the comment below before acquiring
3057 	 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex
3058 	 * because that can result in a deadlock when deleting the device extent
3059 	 * items from the devices btree - COWing an extent buffer from the btree
3060 	 * may result in allocating a new metadata chunk, which would attempt to
3061 	 * lock again fs_info->chunk_mutex.
3062 	 */
3063 	mutex_lock(&fs_devices->device_list_mutex);
3064 	for (i = 0; i < map->num_stripes; i++) {
3065 		struct btrfs_device *device = map->stripes[i].dev;
3066 		ret = btrfs_free_dev_extent(trans, device,
3067 					    map->stripes[i].physical,
3068 					    &dev_extent_len);
3069 		if (ret) {
3070 			mutex_unlock(&fs_devices->device_list_mutex);
3071 			btrfs_abort_transaction(trans, ret);
3072 			goto out;
3073 		}
3074 
3075 		if (device->bytes_used > 0) {
3076 			mutex_lock(&fs_info->chunk_mutex);
3077 			btrfs_device_set_bytes_used(device,
3078 					device->bytes_used - dev_extent_len);
3079 			atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
3080 			btrfs_clear_space_info_full(fs_info);
3081 			mutex_unlock(&fs_info->chunk_mutex);
3082 		}
3083 	}
3084 	mutex_unlock(&fs_devices->device_list_mutex);
3085 
3086 	/*
3087 	 * We acquire fs_info->chunk_mutex for 2 reasons:
3088 	 *
3089 	 * 1) Just like with the first phase of the chunk allocation, we must
3090 	 *    reserve system space, do all chunk btree updates and deletions, and
3091 	 *    update the system chunk array in the superblock while holding this
3092 	 *    mutex. This is for similar reasons as explained on the comment at
3093 	 *    the top of btrfs_chunk_alloc();
3094 	 *
3095 	 * 2) Prevent races with the final phase of a device replace operation
3096 	 *    that replaces the device object associated with the map's stripes,
3097 	 *    because the device object's id can change at any time during that
3098 	 *    final phase of the device replace operation
3099 	 *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
3100 	 *    replaced device and then see it with an ID of
3101 	 *    BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating
3102 	 *    the device item, which does not exists on the chunk btree.
3103 	 *    The finishing phase of device replace acquires both the
3104 	 *    device_list_mutex and the chunk_mutex, in that order, so we are
3105 	 *    safe by just acquiring the chunk_mutex.
3106 	 */
3107 	trans->removing_chunk = true;
3108 	mutex_lock(&fs_info->chunk_mutex);
3109 
3110 	check_system_chunk(trans, map->type);
3111 
3112 	ret = remove_chunk_item(trans, map, chunk_offset);
3113 	/*
3114 	 * Normally we should not get -ENOSPC since we reserved space before
3115 	 * through the call to check_system_chunk().
3116 	 *
3117 	 * Despite our system space_info having enough free space, we may not
3118 	 * be able to allocate extents from its block groups, because all have
3119 	 * an incompatible profile, which will force us to allocate a new system
3120 	 * block group with the right profile, or right after we called
3121 	 * check_system_space() above, a scrub turned the only system block group
3122 	 * with enough free space into RO mode.
3123 	 * This is explained with more detail at do_chunk_alloc().
3124 	 *
3125 	 * So if we get -ENOSPC, allocate a new system chunk and retry once.
3126 	 */
3127 	if (ret == -ENOSPC) {
3128 		const u64 sys_flags = btrfs_system_alloc_profile(fs_info);
3129 		struct btrfs_block_group *sys_bg;
3130 
3131 		sys_bg = btrfs_alloc_chunk(trans, sys_flags);
3132 		if (IS_ERR(sys_bg)) {
3133 			ret = PTR_ERR(sys_bg);
3134 			btrfs_abort_transaction(trans, ret);
3135 			goto out;
3136 		}
3137 
3138 		ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
3139 		if (ret) {
3140 			btrfs_abort_transaction(trans, ret);
3141 			goto out;
3142 		}
3143 
3144 		ret = remove_chunk_item(trans, map, chunk_offset);
3145 		if (ret) {
3146 			btrfs_abort_transaction(trans, ret);
3147 			goto out;
3148 		}
3149 	} else if (ret) {
3150 		btrfs_abort_transaction(trans, ret);
3151 		goto out;
3152 	}
3153 
3154 	trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
3155 
3156 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3157 		ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
3158 		if (ret) {
3159 			btrfs_abort_transaction(trans, ret);
3160 			goto out;
3161 		}
3162 	}
3163 
3164 	mutex_unlock(&fs_info->chunk_mutex);
3165 	trans->removing_chunk = false;
3166 
3167 	/*
3168 	 * We are done with chunk btree updates and deletions, so release the
3169 	 * system space we previously reserved (with check_system_chunk()).
3170 	 */
3171 	btrfs_trans_release_chunk_metadata(trans);
3172 
3173 	ret = btrfs_remove_block_group(trans, chunk_offset, em);
3174 	if (ret) {
3175 		btrfs_abort_transaction(trans, ret);
3176 		goto out;
3177 	}
3178 
3179 out:
3180 	if (trans->removing_chunk) {
3181 		mutex_unlock(&fs_info->chunk_mutex);
3182 		trans->removing_chunk = false;
3183 	}
3184 	/* once for us */
3185 	free_extent_map(em);
3186 	return ret;
3187 }
3188 
3189 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3190 {
3191 	struct btrfs_root *root = fs_info->chunk_root;
3192 	struct btrfs_trans_handle *trans;
3193 	struct btrfs_block_group *block_group;
3194 	u64 length;
3195 	int ret;
3196 
3197 	/*
3198 	 * Prevent races with automatic removal of unused block groups.
3199 	 * After we relocate and before we remove the chunk with offset
3200 	 * chunk_offset, automatic removal of the block group can kick in,
3201 	 * resulting in a failure when calling btrfs_remove_chunk() below.
3202 	 *
3203 	 * Make sure to acquire this mutex before doing a tree search (dev
3204 	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3205 	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3206 	 * we release the path used to search the chunk/dev tree and before
3207 	 * the current task acquires this mutex and calls us.
3208 	 */
3209 	lockdep_assert_held(&fs_info->reclaim_bgs_lock);
3210 
3211 	/* step one, relocate all the extents inside this chunk */
3212 	btrfs_scrub_pause(fs_info);
3213 	ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3214 	btrfs_scrub_continue(fs_info);
3215 	if (ret)
3216 		return ret;
3217 
3218 	block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
3219 	if (!block_group)
3220 		return -ENOENT;
3221 	btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
3222 	length = block_group->length;
3223 	btrfs_put_block_group(block_group);
3224 
3225 	/*
3226 	 * On a zoned file system, discard the whole block group, this will
3227 	 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If
3228 	 * resetting the zone fails, don't treat it as a fatal problem from the
3229 	 * filesystem's point of view.
3230 	 */
3231 	if (btrfs_is_zoned(fs_info)) {
3232 		ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL);
3233 		if (ret)
3234 			btrfs_info(fs_info,
3235 				"failed to reset zone %llu after relocation",
3236 				chunk_offset);
3237 	}
3238 
3239 	trans = btrfs_start_trans_remove_block_group(root->fs_info,
3240 						     chunk_offset);
3241 	if (IS_ERR(trans)) {
3242 		ret = PTR_ERR(trans);
3243 		btrfs_handle_fs_error(root->fs_info, ret, NULL);
3244 		return ret;
3245 	}
3246 
3247 	/*
3248 	 * step two, delete the device extents and the
3249 	 * chunk tree entries
3250 	 */
3251 	ret = btrfs_remove_chunk(trans, chunk_offset);
3252 	btrfs_end_transaction(trans);
3253 	return ret;
3254 }
3255 
3256 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3257 {
3258 	struct btrfs_root *chunk_root = fs_info->chunk_root;
3259 	struct btrfs_path *path;
3260 	struct extent_buffer *leaf;
3261 	struct btrfs_chunk *chunk;
3262 	struct btrfs_key key;
3263 	struct btrfs_key found_key;
3264 	u64 chunk_type;
3265 	bool retried = false;
3266 	int failed = 0;
3267 	int ret;
3268 
3269 	path = btrfs_alloc_path();
3270 	if (!path)
3271 		return -ENOMEM;
3272 
3273 again:
3274 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3275 	key.offset = (u64)-1;
3276 	key.type = BTRFS_CHUNK_ITEM_KEY;
3277 
3278 	while (1) {
3279 		mutex_lock(&fs_info->reclaim_bgs_lock);
3280 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3281 		if (ret < 0) {
3282 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3283 			goto error;
3284 		}
3285 		BUG_ON(ret == 0); /* Corruption */
3286 
3287 		ret = btrfs_previous_item(chunk_root, path, key.objectid,
3288 					  key.type);
3289 		if (ret)
3290 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3291 		if (ret < 0)
3292 			goto error;
3293 		if (ret > 0)
3294 			break;
3295 
3296 		leaf = path->nodes[0];
3297 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3298 
3299 		chunk = btrfs_item_ptr(leaf, path->slots[0],
3300 				       struct btrfs_chunk);
3301 		chunk_type = btrfs_chunk_type(leaf, chunk);
3302 		btrfs_release_path(path);
3303 
3304 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3305 			ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3306 			if (ret == -ENOSPC)
3307 				failed++;
3308 			else
3309 				BUG_ON(ret);
3310 		}
3311 		mutex_unlock(&fs_info->reclaim_bgs_lock);
3312 
3313 		if (found_key.offset == 0)
3314 			break;
3315 		key.offset = found_key.offset - 1;
3316 	}
3317 	ret = 0;
3318 	if (failed && !retried) {
3319 		failed = 0;
3320 		retried = true;
3321 		goto again;
3322 	} else if (WARN_ON(failed && retried)) {
3323 		ret = -ENOSPC;
3324 	}
3325 error:
3326 	btrfs_free_path(path);
3327 	return ret;
3328 }
3329 
3330 /*
3331  * return 1 : allocate a data chunk successfully,
3332  * return <0: errors during allocating a data chunk,
3333  * return 0 : no need to allocate a data chunk.
3334  */
3335 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3336 				      u64 chunk_offset)
3337 {
3338 	struct btrfs_block_group *cache;
3339 	u64 bytes_used;
3340 	u64 chunk_type;
3341 
3342 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3343 	ASSERT(cache);
3344 	chunk_type = cache->flags;
3345 	btrfs_put_block_group(cache);
3346 
3347 	if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
3348 		return 0;
3349 
3350 	spin_lock(&fs_info->data_sinfo->lock);
3351 	bytes_used = fs_info->data_sinfo->bytes_used;
3352 	spin_unlock(&fs_info->data_sinfo->lock);
3353 
3354 	if (!bytes_used) {
3355 		struct btrfs_trans_handle *trans;
3356 		int ret;
3357 
3358 		trans =	btrfs_join_transaction(fs_info->tree_root);
3359 		if (IS_ERR(trans))
3360 			return PTR_ERR(trans);
3361 
3362 		ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
3363 		btrfs_end_transaction(trans);
3364 		if (ret < 0)
3365 			return ret;
3366 		return 1;
3367 	}
3368 
3369 	return 0;
3370 }
3371 
3372 static int insert_balance_item(struct btrfs_fs_info *fs_info,
3373 			       struct btrfs_balance_control *bctl)
3374 {
3375 	struct btrfs_root *root = fs_info->tree_root;
3376 	struct btrfs_trans_handle *trans;
3377 	struct btrfs_balance_item *item;
3378 	struct btrfs_disk_balance_args disk_bargs;
3379 	struct btrfs_path *path;
3380 	struct extent_buffer *leaf;
3381 	struct btrfs_key key;
3382 	int ret, err;
3383 
3384 	path = btrfs_alloc_path();
3385 	if (!path)
3386 		return -ENOMEM;
3387 
3388 	trans = btrfs_start_transaction(root, 0);
3389 	if (IS_ERR(trans)) {
3390 		btrfs_free_path(path);
3391 		return PTR_ERR(trans);
3392 	}
3393 
3394 	key.objectid = BTRFS_BALANCE_OBJECTID;
3395 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3396 	key.offset = 0;
3397 
3398 	ret = btrfs_insert_empty_item(trans, root, path, &key,
3399 				      sizeof(*item));
3400 	if (ret)
3401 		goto out;
3402 
3403 	leaf = path->nodes[0];
3404 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3405 
3406 	memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3407 
3408 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3409 	btrfs_set_balance_data(leaf, item, &disk_bargs);
3410 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3411 	btrfs_set_balance_meta(leaf, item, &disk_bargs);
3412 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3413 	btrfs_set_balance_sys(leaf, item, &disk_bargs);
3414 
3415 	btrfs_set_balance_flags(leaf, item, bctl->flags);
3416 
3417 	btrfs_mark_buffer_dirty(leaf);
3418 out:
3419 	btrfs_free_path(path);
3420 	err = btrfs_commit_transaction(trans);
3421 	if (err && !ret)
3422 		ret = err;
3423 	return ret;
3424 }
3425 
3426 static int del_balance_item(struct btrfs_fs_info *fs_info)
3427 {
3428 	struct btrfs_root *root = fs_info->tree_root;
3429 	struct btrfs_trans_handle *trans;
3430 	struct btrfs_path *path;
3431 	struct btrfs_key key;
3432 	int ret, err;
3433 
3434 	path = btrfs_alloc_path();
3435 	if (!path)
3436 		return -ENOMEM;
3437 
3438 	trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3439 	if (IS_ERR(trans)) {
3440 		btrfs_free_path(path);
3441 		return PTR_ERR(trans);
3442 	}
3443 
3444 	key.objectid = BTRFS_BALANCE_OBJECTID;
3445 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
3446 	key.offset = 0;
3447 
3448 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3449 	if (ret < 0)
3450 		goto out;
3451 	if (ret > 0) {
3452 		ret = -ENOENT;
3453 		goto out;
3454 	}
3455 
3456 	ret = btrfs_del_item(trans, root, path);
3457 out:
3458 	btrfs_free_path(path);
3459 	err = btrfs_commit_transaction(trans);
3460 	if (err && !ret)
3461 		ret = err;
3462 	return ret;
3463 }
3464 
3465 /*
3466  * This is a heuristic used to reduce the number of chunks balanced on
3467  * resume after balance was interrupted.
3468  */
3469 static void update_balance_args(struct btrfs_balance_control *bctl)
3470 {
3471 	/*
3472 	 * Turn on soft mode for chunk types that were being converted.
3473 	 */
3474 	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3475 		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3476 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3477 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3478 	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3479 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3480 
3481 	/*
3482 	 * Turn on usage filter if is not already used.  The idea is
3483 	 * that chunks that we have already balanced should be
3484 	 * reasonably full.  Don't do it for chunks that are being
3485 	 * converted - that will keep us from relocating unconverted
3486 	 * (albeit full) chunks.
3487 	 */
3488 	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3489 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3490 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3491 		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3492 		bctl->data.usage = 90;
3493 	}
3494 	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3495 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3496 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3497 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3498 		bctl->sys.usage = 90;
3499 	}
3500 	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3501 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3502 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3503 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3504 		bctl->meta.usage = 90;
3505 	}
3506 }
3507 
3508 /*
3509  * Clear the balance status in fs_info and delete the balance item from disk.
3510  */
3511 static void reset_balance_state(struct btrfs_fs_info *fs_info)
3512 {
3513 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3514 	int ret;
3515 
3516 	BUG_ON(!fs_info->balance_ctl);
3517 
3518 	spin_lock(&fs_info->balance_lock);
3519 	fs_info->balance_ctl = NULL;
3520 	spin_unlock(&fs_info->balance_lock);
3521 
3522 	kfree(bctl);
3523 	ret = del_balance_item(fs_info);
3524 	if (ret)
3525 		btrfs_handle_fs_error(fs_info, ret, NULL);
3526 }
3527 
3528 /*
3529  * Balance filters.  Return 1 if chunk should be filtered out
3530  * (should not be balanced).
3531  */
3532 static int chunk_profiles_filter(u64 chunk_type,
3533 				 struct btrfs_balance_args *bargs)
3534 {
3535 	chunk_type = chunk_to_extended(chunk_type) &
3536 				BTRFS_EXTENDED_PROFILE_MASK;
3537 
3538 	if (bargs->profiles & chunk_type)
3539 		return 0;
3540 
3541 	return 1;
3542 }
3543 
3544 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3545 			      struct btrfs_balance_args *bargs)
3546 {
3547 	struct btrfs_block_group *cache;
3548 	u64 chunk_used;
3549 	u64 user_thresh_min;
3550 	u64 user_thresh_max;
3551 	int ret = 1;
3552 
3553 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3554 	chunk_used = cache->used;
3555 
3556 	if (bargs->usage_min == 0)
3557 		user_thresh_min = 0;
3558 	else
3559 		user_thresh_min = div_factor_fine(cache->length,
3560 						  bargs->usage_min);
3561 
3562 	if (bargs->usage_max == 0)
3563 		user_thresh_max = 1;
3564 	else if (bargs->usage_max > 100)
3565 		user_thresh_max = cache->length;
3566 	else
3567 		user_thresh_max = div_factor_fine(cache->length,
3568 						  bargs->usage_max);
3569 
3570 	if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3571 		ret = 0;
3572 
3573 	btrfs_put_block_group(cache);
3574 	return ret;
3575 }
3576 
3577 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3578 		u64 chunk_offset, struct btrfs_balance_args *bargs)
3579 {
3580 	struct btrfs_block_group *cache;
3581 	u64 chunk_used, user_thresh;
3582 	int ret = 1;
3583 
3584 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3585 	chunk_used = cache->used;
3586 
3587 	if (bargs->usage_min == 0)
3588 		user_thresh = 1;
3589 	else if (bargs->usage > 100)
3590 		user_thresh = cache->length;
3591 	else
3592 		user_thresh = div_factor_fine(cache->length, bargs->usage);
3593 
3594 	if (chunk_used < user_thresh)
3595 		ret = 0;
3596 
3597 	btrfs_put_block_group(cache);
3598 	return ret;
3599 }
3600 
3601 static int chunk_devid_filter(struct extent_buffer *leaf,
3602 			      struct btrfs_chunk *chunk,
3603 			      struct btrfs_balance_args *bargs)
3604 {
3605 	struct btrfs_stripe *stripe;
3606 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3607 	int i;
3608 
3609 	for (i = 0; i < num_stripes; i++) {
3610 		stripe = btrfs_stripe_nr(chunk, i);
3611 		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3612 			return 0;
3613 	}
3614 
3615 	return 1;
3616 }
3617 
3618 static u64 calc_data_stripes(u64 type, int num_stripes)
3619 {
3620 	const int index = btrfs_bg_flags_to_raid_index(type);
3621 	const int ncopies = btrfs_raid_array[index].ncopies;
3622 	const int nparity = btrfs_raid_array[index].nparity;
3623 
3624 	if (nparity)
3625 		return num_stripes - nparity;
3626 	else
3627 		return num_stripes / ncopies;
3628 }
3629 
3630 /* [pstart, pend) */
3631 static int chunk_drange_filter(struct extent_buffer *leaf,
3632 			       struct btrfs_chunk *chunk,
3633 			       struct btrfs_balance_args *bargs)
3634 {
3635 	struct btrfs_stripe *stripe;
3636 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3637 	u64 stripe_offset;
3638 	u64 stripe_length;
3639 	u64 type;
3640 	int factor;
3641 	int i;
3642 
3643 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3644 		return 0;
3645 
3646 	type = btrfs_chunk_type(leaf, chunk);
3647 	factor = calc_data_stripes(type, num_stripes);
3648 
3649 	for (i = 0; i < num_stripes; i++) {
3650 		stripe = btrfs_stripe_nr(chunk, i);
3651 		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3652 			continue;
3653 
3654 		stripe_offset = btrfs_stripe_offset(leaf, stripe);
3655 		stripe_length = btrfs_chunk_length(leaf, chunk);
3656 		stripe_length = div_u64(stripe_length, factor);
3657 
3658 		if (stripe_offset < bargs->pend &&
3659 		    stripe_offset + stripe_length > bargs->pstart)
3660 			return 0;
3661 	}
3662 
3663 	return 1;
3664 }
3665 
3666 /* [vstart, vend) */
3667 static int chunk_vrange_filter(struct extent_buffer *leaf,
3668 			       struct btrfs_chunk *chunk,
3669 			       u64 chunk_offset,
3670 			       struct btrfs_balance_args *bargs)
3671 {
3672 	if (chunk_offset < bargs->vend &&
3673 	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3674 		/* at least part of the chunk is inside this vrange */
3675 		return 0;
3676 
3677 	return 1;
3678 }
3679 
3680 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3681 			       struct btrfs_chunk *chunk,
3682 			       struct btrfs_balance_args *bargs)
3683 {
3684 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3685 
3686 	if (bargs->stripes_min <= num_stripes
3687 			&& num_stripes <= bargs->stripes_max)
3688 		return 0;
3689 
3690 	return 1;
3691 }
3692 
3693 static int chunk_soft_convert_filter(u64 chunk_type,
3694 				     struct btrfs_balance_args *bargs)
3695 {
3696 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3697 		return 0;
3698 
3699 	chunk_type = chunk_to_extended(chunk_type) &
3700 				BTRFS_EXTENDED_PROFILE_MASK;
3701 
3702 	if (bargs->target == chunk_type)
3703 		return 1;
3704 
3705 	return 0;
3706 }
3707 
3708 static int should_balance_chunk(struct extent_buffer *leaf,
3709 				struct btrfs_chunk *chunk, u64 chunk_offset)
3710 {
3711 	struct btrfs_fs_info *fs_info = leaf->fs_info;
3712 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3713 	struct btrfs_balance_args *bargs = NULL;
3714 	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3715 
3716 	/* type filter */
3717 	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3718 	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3719 		return 0;
3720 	}
3721 
3722 	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3723 		bargs = &bctl->data;
3724 	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3725 		bargs = &bctl->sys;
3726 	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3727 		bargs = &bctl->meta;
3728 
3729 	/* profiles filter */
3730 	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3731 	    chunk_profiles_filter(chunk_type, bargs)) {
3732 		return 0;
3733 	}
3734 
3735 	/* usage filter */
3736 	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3737 	    chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3738 		return 0;
3739 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3740 	    chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3741 		return 0;
3742 	}
3743 
3744 	/* devid filter */
3745 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3746 	    chunk_devid_filter(leaf, chunk, bargs)) {
3747 		return 0;
3748 	}
3749 
3750 	/* drange filter, makes sense only with devid filter */
3751 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3752 	    chunk_drange_filter(leaf, chunk, bargs)) {
3753 		return 0;
3754 	}
3755 
3756 	/* vrange filter */
3757 	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3758 	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3759 		return 0;
3760 	}
3761 
3762 	/* stripes filter */
3763 	if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3764 	    chunk_stripes_range_filter(leaf, chunk, bargs)) {
3765 		return 0;
3766 	}
3767 
3768 	/* soft profile changing mode */
3769 	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3770 	    chunk_soft_convert_filter(chunk_type, bargs)) {
3771 		return 0;
3772 	}
3773 
3774 	/*
3775 	 * limited by count, must be the last filter
3776 	 */
3777 	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3778 		if (bargs->limit == 0)
3779 			return 0;
3780 		else
3781 			bargs->limit--;
3782 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3783 		/*
3784 		 * Same logic as the 'limit' filter; the minimum cannot be
3785 		 * determined here because we do not have the global information
3786 		 * about the count of all chunks that satisfy the filters.
3787 		 */
3788 		if (bargs->limit_max == 0)
3789 			return 0;
3790 		else
3791 			bargs->limit_max--;
3792 	}
3793 
3794 	return 1;
3795 }
3796 
3797 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3798 {
3799 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3800 	struct btrfs_root *chunk_root = fs_info->chunk_root;
3801 	u64 chunk_type;
3802 	struct btrfs_chunk *chunk;
3803 	struct btrfs_path *path = NULL;
3804 	struct btrfs_key key;
3805 	struct btrfs_key found_key;
3806 	struct extent_buffer *leaf;
3807 	int slot;
3808 	int ret;
3809 	int enospc_errors = 0;
3810 	bool counting = true;
3811 	/* The single value limit and min/max limits use the same bytes in the */
3812 	u64 limit_data = bctl->data.limit;
3813 	u64 limit_meta = bctl->meta.limit;
3814 	u64 limit_sys = bctl->sys.limit;
3815 	u32 count_data = 0;
3816 	u32 count_meta = 0;
3817 	u32 count_sys = 0;
3818 	int chunk_reserved = 0;
3819 
3820 	path = btrfs_alloc_path();
3821 	if (!path) {
3822 		ret = -ENOMEM;
3823 		goto error;
3824 	}
3825 
3826 	/* zero out stat counters */
3827 	spin_lock(&fs_info->balance_lock);
3828 	memset(&bctl->stat, 0, sizeof(bctl->stat));
3829 	spin_unlock(&fs_info->balance_lock);
3830 again:
3831 	if (!counting) {
3832 		/*
3833 		 * The single value limit and min/max limits use the same bytes
3834 		 * in the
3835 		 */
3836 		bctl->data.limit = limit_data;
3837 		bctl->meta.limit = limit_meta;
3838 		bctl->sys.limit = limit_sys;
3839 	}
3840 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3841 	key.offset = (u64)-1;
3842 	key.type = BTRFS_CHUNK_ITEM_KEY;
3843 
3844 	while (1) {
3845 		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3846 		    atomic_read(&fs_info->balance_cancel_req)) {
3847 			ret = -ECANCELED;
3848 			goto error;
3849 		}
3850 
3851 		mutex_lock(&fs_info->reclaim_bgs_lock);
3852 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3853 		if (ret < 0) {
3854 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3855 			goto error;
3856 		}
3857 
3858 		/*
3859 		 * this shouldn't happen, it means the last relocate
3860 		 * failed
3861 		 */
3862 		if (ret == 0)
3863 			BUG(); /* FIXME break ? */
3864 
3865 		ret = btrfs_previous_item(chunk_root, path, 0,
3866 					  BTRFS_CHUNK_ITEM_KEY);
3867 		if (ret) {
3868 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3869 			ret = 0;
3870 			break;
3871 		}
3872 
3873 		leaf = path->nodes[0];
3874 		slot = path->slots[0];
3875 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3876 
3877 		if (found_key.objectid != key.objectid) {
3878 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3879 			break;
3880 		}
3881 
3882 		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3883 		chunk_type = btrfs_chunk_type(leaf, chunk);
3884 
3885 		if (!counting) {
3886 			spin_lock(&fs_info->balance_lock);
3887 			bctl->stat.considered++;
3888 			spin_unlock(&fs_info->balance_lock);
3889 		}
3890 
3891 		ret = should_balance_chunk(leaf, chunk, found_key.offset);
3892 
3893 		btrfs_release_path(path);
3894 		if (!ret) {
3895 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3896 			goto loop;
3897 		}
3898 
3899 		if (counting) {
3900 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3901 			spin_lock(&fs_info->balance_lock);
3902 			bctl->stat.expected++;
3903 			spin_unlock(&fs_info->balance_lock);
3904 
3905 			if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3906 				count_data++;
3907 			else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3908 				count_sys++;
3909 			else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3910 				count_meta++;
3911 
3912 			goto loop;
3913 		}
3914 
3915 		/*
3916 		 * Apply limit_min filter, no need to check if the LIMITS
3917 		 * filter is used, limit_min is 0 by default
3918 		 */
3919 		if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3920 					count_data < bctl->data.limit_min)
3921 				|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3922 					count_meta < bctl->meta.limit_min)
3923 				|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3924 					count_sys < bctl->sys.limit_min)) {
3925 			mutex_unlock(&fs_info->reclaim_bgs_lock);
3926 			goto loop;
3927 		}
3928 
3929 		if (!chunk_reserved) {
3930 			/*
3931 			 * We may be relocating the only data chunk we have,
3932 			 * which could potentially end up with losing data's
3933 			 * raid profile, so lets allocate an empty one in
3934 			 * advance.
3935 			 */
3936 			ret = btrfs_may_alloc_data_chunk(fs_info,
3937 							 found_key.offset);
3938 			if (ret < 0) {
3939 				mutex_unlock(&fs_info->reclaim_bgs_lock);
3940 				goto error;
3941 			} else if (ret == 1) {
3942 				chunk_reserved = 1;
3943 			}
3944 		}
3945 
3946 		ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3947 		mutex_unlock(&fs_info->reclaim_bgs_lock);
3948 		if (ret == -ENOSPC) {
3949 			enospc_errors++;
3950 		} else if (ret == -ETXTBSY) {
3951 			btrfs_info(fs_info,
3952 	   "skipping relocation of block group %llu due to active swapfile",
3953 				   found_key.offset);
3954 			ret = 0;
3955 		} else if (ret) {
3956 			goto error;
3957 		} else {
3958 			spin_lock(&fs_info->balance_lock);
3959 			bctl->stat.completed++;
3960 			spin_unlock(&fs_info->balance_lock);
3961 		}
3962 loop:
3963 		if (found_key.offset == 0)
3964 			break;
3965 		key.offset = found_key.offset - 1;
3966 	}
3967 
3968 	if (counting) {
3969 		btrfs_release_path(path);
3970 		counting = false;
3971 		goto again;
3972 	}
3973 error:
3974 	btrfs_free_path(path);
3975 	if (enospc_errors) {
3976 		btrfs_info(fs_info, "%d enospc errors during balance",
3977 			   enospc_errors);
3978 		if (!ret)
3979 			ret = -ENOSPC;
3980 	}
3981 
3982 	return ret;
3983 }
3984 
3985 /**
3986  * alloc_profile_is_valid - see if a given profile is valid and reduced
3987  * @flags: profile to validate
3988  * @extended: if true @flags is treated as an extended profile
3989  */
3990 static int alloc_profile_is_valid(u64 flags, int extended)
3991 {
3992 	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3993 			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
3994 
3995 	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3996 
3997 	/* 1) check that all other bits are zeroed */
3998 	if (flags & ~mask)
3999 		return 0;
4000 
4001 	/* 2) see if profile is reduced */
4002 	if (flags == 0)
4003 		return !extended; /* "0" is valid for usual profiles */
4004 
4005 	return has_single_bit_set(flags);
4006 }
4007 
4008 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
4009 {
4010 	/* cancel requested || normal exit path */
4011 	return atomic_read(&fs_info->balance_cancel_req) ||
4012 		(atomic_read(&fs_info->balance_pause_req) == 0 &&
4013 		 atomic_read(&fs_info->balance_cancel_req) == 0);
4014 }
4015 
4016 /*
4017  * Validate target profile against allowed profiles and return true if it's OK.
4018  * Otherwise print the error message and return false.
4019  */
4020 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
4021 		const struct btrfs_balance_args *bargs,
4022 		u64 allowed, const char *type)
4023 {
4024 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
4025 		return true;
4026 
4027 	/* Profile is valid and does not have bits outside of the allowed set */
4028 	if (alloc_profile_is_valid(bargs->target, 1) &&
4029 	    (bargs->target & ~allowed) == 0)
4030 		return true;
4031 
4032 	btrfs_err(fs_info, "balance: invalid convert %s profile %s",
4033 			type, btrfs_bg_type_to_raid_name(bargs->target));
4034 	return false;
4035 }
4036 
4037 /*
4038  * Fill @buf with textual description of balance filter flags @bargs, up to
4039  * @size_buf including the terminating null. The output may be trimmed if it
4040  * does not fit into the provided buffer.
4041  */
4042 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
4043 				 u32 size_buf)
4044 {
4045 	int ret;
4046 	u32 size_bp = size_buf;
4047 	char *bp = buf;
4048 	u64 flags = bargs->flags;
4049 	char tmp_buf[128] = {'\0'};
4050 
4051 	if (!flags)
4052 		return;
4053 
4054 #define CHECK_APPEND_NOARG(a)						\
4055 	do {								\
4056 		ret = snprintf(bp, size_bp, (a));			\
4057 		if (ret < 0 || ret >= size_bp)				\
4058 			goto out_overflow;				\
4059 		size_bp -= ret;						\
4060 		bp += ret;						\
4061 	} while (0)
4062 
4063 #define CHECK_APPEND_1ARG(a, v1)					\
4064 	do {								\
4065 		ret = snprintf(bp, size_bp, (a), (v1));			\
4066 		if (ret < 0 || ret >= size_bp)				\
4067 			goto out_overflow;				\
4068 		size_bp -= ret;						\
4069 		bp += ret;						\
4070 	} while (0)
4071 
4072 #define CHECK_APPEND_2ARG(a, v1, v2)					\
4073 	do {								\
4074 		ret = snprintf(bp, size_bp, (a), (v1), (v2));		\
4075 		if (ret < 0 || ret >= size_bp)				\
4076 			goto out_overflow;				\
4077 		size_bp -= ret;						\
4078 		bp += ret;						\
4079 	} while (0)
4080 
4081 	if (flags & BTRFS_BALANCE_ARGS_CONVERT)
4082 		CHECK_APPEND_1ARG("convert=%s,",
4083 				  btrfs_bg_type_to_raid_name(bargs->target));
4084 
4085 	if (flags & BTRFS_BALANCE_ARGS_SOFT)
4086 		CHECK_APPEND_NOARG("soft,");
4087 
4088 	if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
4089 		btrfs_describe_block_groups(bargs->profiles, tmp_buf,
4090 					    sizeof(tmp_buf));
4091 		CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
4092 	}
4093 
4094 	if (flags & BTRFS_BALANCE_ARGS_USAGE)
4095 		CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
4096 
4097 	if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
4098 		CHECK_APPEND_2ARG("usage=%u..%u,",
4099 				  bargs->usage_min, bargs->usage_max);
4100 
4101 	if (flags & BTRFS_BALANCE_ARGS_DEVID)
4102 		CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
4103 
4104 	if (flags & BTRFS_BALANCE_ARGS_DRANGE)
4105 		CHECK_APPEND_2ARG("drange=%llu..%llu,",
4106 				  bargs->pstart, bargs->pend);
4107 
4108 	if (flags & BTRFS_BALANCE_ARGS_VRANGE)
4109 		CHECK_APPEND_2ARG("vrange=%llu..%llu,",
4110 				  bargs->vstart, bargs->vend);
4111 
4112 	if (flags & BTRFS_BALANCE_ARGS_LIMIT)
4113 		CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
4114 
4115 	if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
4116 		CHECK_APPEND_2ARG("limit=%u..%u,",
4117 				bargs->limit_min, bargs->limit_max);
4118 
4119 	if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
4120 		CHECK_APPEND_2ARG("stripes=%u..%u,",
4121 				  bargs->stripes_min, bargs->stripes_max);
4122 
4123 #undef CHECK_APPEND_2ARG
4124 #undef CHECK_APPEND_1ARG
4125 #undef CHECK_APPEND_NOARG
4126 
4127 out_overflow:
4128 
4129 	if (size_bp < size_buf)
4130 		buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
4131 	else
4132 		buf[0] = '\0';
4133 }
4134 
4135 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
4136 {
4137 	u32 size_buf = 1024;
4138 	char tmp_buf[192] = {'\0'};
4139 	char *buf;
4140 	char *bp;
4141 	u32 size_bp = size_buf;
4142 	int ret;
4143 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4144 
4145 	buf = kzalloc(size_buf, GFP_KERNEL);
4146 	if (!buf)
4147 		return;
4148 
4149 	bp = buf;
4150 
4151 #define CHECK_APPEND_1ARG(a, v1)					\
4152 	do {								\
4153 		ret = snprintf(bp, size_bp, (a), (v1));			\
4154 		if (ret < 0 || ret >= size_bp)				\
4155 			goto out_overflow;				\
4156 		size_bp -= ret;						\
4157 		bp += ret;						\
4158 	} while (0)
4159 
4160 	if (bctl->flags & BTRFS_BALANCE_FORCE)
4161 		CHECK_APPEND_1ARG("%s", "-f ");
4162 
4163 	if (bctl->flags & BTRFS_BALANCE_DATA) {
4164 		describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
4165 		CHECK_APPEND_1ARG("-d%s ", tmp_buf);
4166 	}
4167 
4168 	if (bctl->flags & BTRFS_BALANCE_METADATA) {
4169 		describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
4170 		CHECK_APPEND_1ARG("-m%s ", tmp_buf);
4171 	}
4172 
4173 	if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
4174 		describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
4175 		CHECK_APPEND_1ARG("-s%s ", tmp_buf);
4176 	}
4177 
4178 #undef CHECK_APPEND_1ARG
4179 
4180 out_overflow:
4181 
4182 	if (size_bp < size_buf)
4183 		buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
4184 	btrfs_info(fs_info, "balance: %s %s",
4185 		   (bctl->flags & BTRFS_BALANCE_RESUME) ?
4186 		   "resume" : "start", buf);
4187 
4188 	kfree(buf);
4189 }
4190 
4191 /*
4192  * Should be called with balance mutexe held
4193  */
4194 int btrfs_balance(struct btrfs_fs_info *fs_info,
4195 		  struct btrfs_balance_control *bctl,
4196 		  struct btrfs_ioctl_balance_args *bargs)
4197 {
4198 	u64 meta_target, data_target;
4199 	u64 allowed;
4200 	int mixed = 0;
4201 	int ret;
4202 	u64 num_devices;
4203 	unsigned seq;
4204 	bool reducing_redundancy;
4205 	int i;
4206 
4207 	if (btrfs_fs_closing(fs_info) ||
4208 	    atomic_read(&fs_info->balance_pause_req) ||
4209 	    btrfs_should_cancel_balance(fs_info)) {
4210 		ret = -EINVAL;
4211 		goto out;
4212 	}
4213 
4214 	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4215 	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4216 		mixed = 1;
4217 
4218 	/*
4219 	 * In case of mixed groups both data and meta should be picked,
4220 	 * and identical options should be given for both of them.
4221 	 */
4222 	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4223 	if (mixed && (bctl->flags & allowed)) {
4224 		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4225 		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4226 		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4227 			btrfs_err(fs_info,
4228 	  "balance: mixed groups data and metadata options must be the same");
4229 			ret = -EINVAL;
4230 			goto out;
4231 		}
4232 	}
4233 
4234 	/*
4235 	 * rw_devices will not change at the moment, device add/delete/replace
4236 	 * are exclusive
4237 	 */
4238 	num_devices = fs_info->fs_devices->rw_devices;
4239 
4240 	/*
4241 	 * SINGLE profile on-disk has no profile bit, but in-memory we have a
4242 	 * special bit for it, to make it easier to distinguish.  Thus we need
4243 	 * to set it manually, or balance would refuse the profile.
4244 	 */
4245 	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4246 	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4247 		if (num_devices >= btrfs_raid_array[i].devs_min)
4248 			allowed |= btrfs_raid_array[i].bg_flag;
4249 
4250 	if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
4251 	    !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
4252 	    !validate_convert_profile(fs_info, &bctl->sys,  allowed, "system")) {
4253 		ret = -EINVAL;
4254 		goto out;
4255 	}
4256 
4257 	/*
4258 	 * Allow to reduce metadata or system integrity only if force set for
4259 	 * profiles with redundancy (copies, parity)
4260 	 */
4261 	allowed = 0;
4262 	for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
4263 		if (btrfs_raid_array[i].ncopies >= 2 ||
4264 		    btrfs_raid_array[i].tolerated_failures >= 1)
4265 			allowed |= btrfs_raid_array[i].bg_flag;
4266 	}
4267 	do {
4268 		seq = read_seqbegin(&fs_info->profiles_lock);
4269 
4270 		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4271 		     (fs_info->avail_system_alloc_bits & allowed) &&
4272 		     !(bctl->sys.target & allowed)) ||
4273 		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4274 		     (fs_info->avail_metadata_alloc_bits & allowed) &&
4275 		     !(bctl->meta.target & allowed)))
4276 			reducing_redundancy = true;
4277 		else
4278 			reducing_redundancy = false;
4279 
4280 		/* if we're not converting, the target field is uninitialized */
4281 		meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4282 			bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4283 		data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4284 			bctl->data.target : fs_info->avail_data_alloc_bits;
4285 	} while (read_seqretry(&fs_info->profiles_lock, seq));
4286 
4287 	if (reducing_redundancy) {
4288 		if (bctl->flags & BTRFS_BALANCE_FORCE) {
4289 			btrfs_info(fs_info,
4290 			   "balance: force reducing metadata redundancy");
4291 		} else {
4292 			btrfs_err(fs_info,
4293 	"balance: reduces metadata redundancy, use --force if you want this");
4294 			ret = -EINVAL;
4295 			goto out;
4296 		}
4297 	}
4298 
4299 	if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4300 		btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4301 		btrfs_warn(fs_info,
4302 	"balance: metadata profile %s has lower redundancy than data profile %s",
4303 				btrfs_bg_type_to_raid_name(meta_target),
4304 				btrfs_bg_type_to_raid_name(data_target));
4305 	}
4306 
4307 	ret = insert_balance_item(fs_info, bctl);
4308 	if (ret && ret != -EEXIST)
4309 		goto out;
4310 
4311 	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4312 		BUG_ON(ret == -EEXIST);
4313 		BUG_ON(fs_info->balance_ctl);
4314 		spin_lock(&fs_info->balance_lock);
4315 		fs_info->balance_ctl = bctl;
4316 		spin_unlock(&fs_info->balance_lock);
4317 	} else {
4318 		BUG_ON(ret != -EEXIST);
4319 		spin_lock(&fs_info->balance_lock);
4320 		update_balance_args(bctl);
4321 		spin_unlock(&fs_info->balance_lock);
4322 	}
4323 
4324 	ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4325 	set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4326 	describe_balance_start_or_resume(fs_info);
4327 	mutex_unlock(&fs_info->balance_mutex);
4328 
4329 	ret = __btrfs_balance(fs_info);
4330 
4331 	mutex_lock(&fs_info->balance_mutex);
4332 	if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
4333 		btrfs_info(fs_info, "balance: paused");
4334 	/*
4335 	 * Balance can be canceled by:
4336 	 *
4337 	 * - Regular cancel request
4338 	 *   Then ret == -ECANCELED and balance_cancel_req > 0
4339 	 *
4340 	 * - Fatal signal to "btrfs" process
4341 	 *   Either the signal caught by wait_reserve_ticket() and callers
4342 	 *   got -EINTR, or caught by btrfs_should_cancel_balance() and
4343 	 *   got -ECANCELED.
4344 	 *   Either way, in this case balance_cancel_req = 0, and
4345 	 *   ret == -EINTR or ret == -ECANCELED.
4346 	 *
4347 	 * So here we only check the return value to catch canceled balance.
4348 	 */
4349 	else if (ret == -ECANCELED || ret == -EINTR)
4350 		btrfs_info(fs_info, "balance: canceled");
4351 	else
4352 		btrfs_info(fs_info, "balance: ended with status: %d", ret);
4353 
4354 	clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4355 
4356 	if (bargs) {
4357 		memset(bargs, 0, sizeof(*bargs));
4358 		btrfs_update_ioctl_balance_args(fs_info, bargs);
4359 	}
4360 
4361 	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
4362 	    balance_need_close(fs_info)) {
4363 		reset_balance_state(fs_info);
4364 		btrfs_exclop_finish(fs_info);
4365 	}
4366 
4367 	wake_up(&fs_info->balance_wait_q);
4368 
4369 	return ret;
4370 out:
4371 	if (bctl->flags & BTRFS_BALANCE_RESUME)
4372 		reset_balance_state(fs_info);
4373 	else
4374 		kfree(bctl);
4375 	btrfs_exclop_finish(fs_info);
4376 
4377 	return ret;
4378 }
4379 
4380 static int balance_kthread(void *data)
4381 {
4382 	struct btrfs_fs_info *fs_info = data;
4383 	int ret = 0;
4384 
4385 	mutex_lock(&fs_info->balance_mutex);
4386 	if (fs_info->balance_ctl)
4387 		ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4388 	mutex_unlock(&fs_info->balance_mutex);
4389 
4390 	return ret;
4391 }
4392 
4393 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4394 {
4395 	struct task_struct *tsk;
4396 
4397 	mutex_lock(&fs_info->balance_mutex);
4398 	if (!fs_info->balance_ctl) {
4399 		mutex_unlock(&fs_info->balance_mutex);
4400 		return 0;
4401 	}
4402 	mutex_unlock(&fs_info->balance_mutex);
4403 
4404 	if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4405 		btrfs_info(fs_info, "balance: resume skipped");
4406 		return 0;
4407 	}
4408 
4409 	/*
4410 	 * A ro->rw remount sequence should continue with the paused balance
4411 	 * regardless of who pauses it, system or the user as of now, so set
4412 	 * the resume flag.
4413 	 */
4414 	spin_lock(&fs_info->balance_lock);
4415 	fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4416 	spin_unlock(&fs_info->balance_lock);
4417 
4418 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4419 	return PTR_ERR_OR_ZERO(tsk);
4420 }
4421 
4422 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4423 {
4424 	struct btrfs_balance_control *bctl;
4425 	struct btrfs_balance_item *item;
4426 	struct btrfs_disk_balance_args disk_bargs;
4427 	struct btrfs_path *path;
4428 	struct extent_buffer *leaf;
4429 	struct btrfs_key key;
4430 	int ret;
4431 
4432 	path = btrfs_alloc_path();
4433 	if (!path)
4434 		return -ENOMEM;
4435 
4436 	key.objectid = BTRFS_BALANCE_OBJECTID;
4437 	key.type = BTRFS_TEMPORARY_ITEM_KEY;
4438 	key.offset = 0;
4439 
4440 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4441 	if (ret < 0)
4442 		goto out;
4443 	if (ret > 0) { /* ret = -ENOENT; */
4444 		ret = 0;
4445 		goto out;
4446 	}
4447 
4448 	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4449 	if (!bctl) {
4450 		ret = -ENOMEM;
4451 		goto out;
4452 	}
4453 
4454 	leaf = path->nodes[0];
4455 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4456 
4457 	bctl->flags = btrfs_balance_flags(leaf, item);
4458 	bctl->flags |= BTRFS_BALANCE_RESUME;
4459 
4460 	btrfs_balance_data(leaf, item, &disk_bargs);
4461 	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4462 	btrfs_balance_meta(leaf, item, &disk_bargs);
4463 	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4464 	btrfs_balance_sys(leaf, item, &disk_bargs);
4465 	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4466 
4467 	/*
4468 	 * This should never happen, as the paused balance state is recovered
4469 	 * during mount without any chance of other exclusive ops to collide.
4470 	 *
4471 	 * This gives the exclusive op status to balance and keeps in paused
4472 	 * state until user intervention (cancel or umount). If the ownership
4473 	 * cannot be assigned, show a message but do not fail. The balance
4474 	 * is in a paused state and must have fs_info::balance_ctl properly
4475 	 * set up.
4476 	 */
4477 	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE))
4478 		btrfs_warn(fs_info,
4479 	"balance: cannot set exclusive op status, resume manually");
4480 
4481 	btrfs_release_path(path);
4482 
4483 	mutex_lock(&fs_info->balance_mutex);
4484 	BUG_ON(fs_info->balance_ctl);
4485 	spin_lock(&fs_info->balance_lock);
4486 	fs_info->balance_ctl = bctl;
4487 	spin_unlock(&fs_info->balance_lock);
4488 	mutex_unlock(&fs_info->balance_mutex);
4489 out:
4490 	btrfs_free_path(path);
4491 	return ret;
4492 }
4493 
4494 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4495 {
4496 	int ret = 0;
4497 
4498 	mutex_lock(&fs_info->balance_mutex);
4499 	if (!fs_info->balance_ctl) {
4500 		mutex_unlock(&fs_info->balance_mutex);
4501 		return -ENOTCONN;
4502 	}
4503 
4504 	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4505 		atomic_inc(&fs_info->balance_pause_req);
4506 		mutex_unlock(&fs_info->balance_mutex);
4507 
4508 		wait_event(fs_info->balance_wait_q,
4509 			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4510 
4511 		mutex_lock(&fs_info->balance_mutex);
4512 		/* we are good with balance_ctl ripped off from under us */
4513 		BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4514 		atomic_dec(&fs_info->balance_pause_req);
4515 	} else {
4516 		ret = -ENOTCONN;
4517 	}
4518 
4519 	mutex_unlock(&fs_info->balance_mutex);
4520 	return ret;
4521 }
4522 
4523 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4524 {
4525 	mutex_lock(&fs_info->balance_mutex);
4526 	if (!fs_info->balance_ctl) {
4527 		mutex_unlock(&fs_info->balance_mutex);
4528 		return -ENOTCONN;
4529 	}
4530 
4531 	/*
4532 	 * A paused balance with the item stored on disk can be resumed at
4533 	 * mount time if the mount is read-write. Otherwise it's still paused
4534 	 * and we must not allow cancelling as it deletes the item.
4535 	 */
4536 	if (sb_rdonly(fs_info->sb)) {
4537 		mutex_unlock(&fs_info->balance_mutex);
4538 		return -EROFS;
4539 	}
4540 
4541 	atomic_inc(&fs_info->balance_cancel_req);
4542 	/*
4543 	 * if we are running just wait and return, balance item is
4544 	 * deleted in btrfs_balance in this case
4545 	 */
4546 	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4547 		mutex_unlock(&fs_info->balance_mutex);
4548 		wait_event(fs_info->balance_wait_q,
4549 			   !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4550 		mutex_lock(&fs_info->balance_mutex);
4551 	} else {
4552 		mutex_unlock(&fs_info->balance_mutex);
4553 		/*
4554 		 * Lock released to allow other waiters to continue, we'll
4555 		 * reexamine the status again.
4556 		 */
4557 		mutex_lock(&fs_info->balance_mutex);
4558 
4559 		if (fs_info->balance_ctl) {
4560 			reset_balance_state(fs_info);
4561 			btrfs_exclop_finish(fs_info);
4562 			btrfs_info(fs_info, "balance: canceled");
4563 		}
4564 	}
4565 
4566 	BUG_ON(fs_info->balance_ctl ||
4567 		test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4568 	atomic_dec(&fs_info->balance_cancel_req);
4569 	mutex_unlock(&fs_info->balance_mutex);
4570 	return 0;
4571 }
4572 
4573 int btrfs_uuid_scan_kthread(void *data)
4574 {
4575 	struct btrfs_fs_info *fs_info = data;
4576 	struct btrfs_root *root = fs_info->tree_root;
4577 	struct btrfs_key key;
4578 	struct btrfs_path *path = NULL;
4579 	int ret = 0;
4580 	struct extent_buffer *eb;
4581 	int slot;
4582 	struct btrfs_root_item root_item;
4583 	u32 item_size;
4584 	struct btrfs_trans_handle *trans = NULL;
4585 	bool closing = false;
4586 
4587 	path = btrfs_alloc_path();
4588 	if (!path) {
4589 		ret = -ENOMEM;
4590 		goto out;
4591 	}
4592 
4593 	key.objectid = 0;
4594 	key.type = BTRFS_ROOT_ITEM_KEY;
4595 	key.offset = 0;
4596 
4597 	while (1) {
4598 		if (btrfs_fs_closing(fs_info)) {
4599 			closing = true;
4600 			break;
4601 		}
4602 		ret = btrfs_search_forward(root, &key, path,
4603 				BTRFS_OLDEST_GENERATION);
4604 		if (ret) {
4605 			if (ret > 0)
4606 				ret = 0;
4607 			break;
4608 		}
4609 
4610 		if (key.type != BTRFS_ROOT_ITEM_KEY ||
4611 		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4612 		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4613 		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
4614 			goto skip;
4615 
4616 		eb = path->nodes[0];
4617 		slot = path->slots[0];
4618 		item_size = btrfs_item_size_nr(eb, slot);
4619 		if (item_size < sizeof(root_item))
4620 			goto skip;
4621 
4622 		read_extent_buffer(eb, &root_item,
4623 				   btrfs_item_ptr_offset(eb, slot),
4624 				   (int)sizeof(root_item));
4625 		if (btrfs_root_refs(&root_item) == 0)
4626 			goto skip;
4627 
4628 		if (!btrfs_is_empty_uuid(root_item.uuid) ||
4629 		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
4630 			if (trans)
4631 				goto update_tree;
4632 
4633 			btrfs_release_path(path);
4634 			/*
4635 			 * 1 - subvol uuid item
4636 			 * 1 - received_subvol uuid item
4637 			 */
4638 			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4639 			if (IS_ERR(trans)) {
4640 				ret = PTR_ERR(trans);
4641 				break;
4642 			}
4643 			continue;
4644 		} else {
4645 			goto skip;
4646 		}
4647 update_tree:
4648 		btrfs_release_path(path);
4649 		if (!btrfs_is_empty_uuid(root_item.uuid)) {
4650 			ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4651 						  BTRFS_UUID_KEY_SUBVOL,
4652 						  key.objectid);
4653 			if (ret < 0) {
4654 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4655 					ret);
4656 				break;
4657 			}
4658 		}
4659 
4660 		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4661 			ret = btrfs_uuid_tree_add(trans,
4662 						  root_item.received_uuid,
4663 						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4664 						  key.objectid);
4665 			if (ret < 0) {
4666 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4667 					ret);
4668 				break;
4669 			}
4670 		}
4671 
4672 skip:
4673 		btrfs_release_path(path);
4674 		if (trans) {
4675 			ret = btrfs_end_transaction(trans);
4676 			trans = NULL;
4677 			if (ret)
4678 				break;
4679 		}
4680 
4681 		if (key.offset < (u64)-1) {
4682 			key.offset++;
4683 		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4684 			key.offset = 0;
4685 			key.type = BTRFS_ROOT_ITEM_KEY;
4686 		} else if (key.objectid < (u64)-1) {
4687 			key.offset = 0;
4688 			key.type = BTRFS_ROOT_ITEM_KEY;
4689 			key.objectid++;
4690 		} else {
4691 			break;
4692 		}
4693 		cond_resched();
4694 	}
4695 
4696 out:
4697 	btrfs_free_path(path);
4698 	if (trans && !IS_ERR(trans))
4699 		btrfs_end_transaction(trans);
4700 	if (ret)
4701 		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4702 	else if (!closing)
4703 		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4704 	up(&fs_info->uuid_tree_rescan_sem);
4705 	return 0;
4706 }
4707 
4708 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4709 {
4710 	struct btrfs_trans_handle *trans;
4711 	struct btrfs_root *tree_root = fs_info->tree_root;
4712 	struct btrfs_root *uuid_root;
4713 	struct task_struct *task;
4714 	int ret;
4715 
4716 	/*
4717 	 * 1 - root node
4718 	 * 1 - root item
4719 	 */
4720 	trans = btrfs_start_transaction(tree_root, 2);
4721 	if (IS_ERR(trans))
4722 		return PTR_ERR(trans);
4723 
4724 	uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4725 	if (IS_ERR(uuid_root)) {
4726 		ret = PTR_ERR(uuid_root);
4727 		btrfs_abort_transaction(trans, ret);
4728 		btrfs_end_transaction(trans);
4729 		return ret;
4730 	}
4731 
4732 	fs_info->uuid_root = uuid_root;
4733 
4734 	ret = btrfs_commit_transaction(trans);
4735 	if (ret)
4736 		return ret;
4737 
4738 	down(&fs_info->uuid_tree_rescan_sem);
4739 	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4740 	if (IS_ERR(task)) {
4741 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4742 		btrfs_warn(fs_info, "failed to start uuid_scan task");
4743 		up(&fs_info->uuid_tree_rescan_sem);
4744 		return PTR_ERR(task);
4745 	}
4746 
4747 	return 0;
4748 }
4749 
4750 /*
4751  * shrinking a device means finding all of the device extents past
4752  * the new size, and then following the back refs to the chunks.
4753  * The chunk relocation code actually frees the device extent
4754  */
4755 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4756 {
4757 	struct btrfs_fs_info *fs_info = device->fs_info;
4758 	struct btrfs_root *root = fs_info->dev_root;
4759 	struct btrfs_trans_handle *trans;
4760 	struct btrfs_dev_extent *dev_extent = NULL;
4761 	struct btrfs_path *path;
4762 	u64 length;
4763 	u64 chunk_offset;
4764 	int ret;
4765 	int slot;
4766 	int failed = 0;
4767 	bool retried = false;
4768 	struct extent_buffer *l;
4769 	struct btrfs_key key;
4770 	struct btrfs_super_block *super_copy = fs_info->super_copy;
4771 	u64 old_total = btrfs_super_total_bytes(super_copy);
4772 	u64 old_size = btrfs_device_get_total_bytes(device);
4773 	u64 diff;
4774 	u64 start;
4775 
4776 	new_size = round_down(new_size, fs_info->sectorsize);
4777 	start = new_size;
4778 	diff = round_down(old_size - new_size, fs_info->sectorsize);
4779 
4780 	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4781 		return -EINVAL;
4782 
4783 	path = btrfs_alloc_path();
4784 	if (!path)
4785 		return -ENOMEM;
4786 
4787 	path->reada = READA_BACK;
4788 
4789 	trans = btrfs_start_transaction(root, 0);
4790 	if (IS_ERR(trans)) {
4791 		btrfs_free_path(path);
4792 		return PTR_ERR(trans);
4793 	}
4794 
4795 	mutex_lock(&fs_info->chunk_mutex);
4796 
4797 	btrfs_device_set_total_bytes(device, new_size);
4798 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4799 		device->fs_devices->total_rw_bytes -= diff;
4800 		atomic64_sub(diff, &fs_info->free_chunk_space);
4801 	}
4802 
4803 	/*
4804 	 * Once the device's size has been set to the new size, ensure all
4805 	 * in-memory chunks are synced to disk so that the loop below sees them
4806 	 * and relocates them accordingly.
4807 	 */
4808 	if (contains_pending_extent(device, &start, diff)) {
4809 		mutex_unlock(&fs_info->chunk_mutex);
4810 		ret = btrfs_commit_transaction(trans);
4811 		if (ret)
4812 			goto done;
4813 	} else {
4814 		mutex_unlock(&fs_info->chunk_mutex);
4815 		btrfs_end_transaction(trans);
4816 	}
4817 
4818 again:
4819 	key.objectid = device->devid;
4820 	key.offset = (u64)-1;
4821 	key.type = BTRFS_DEV_EXTENT_KEY;
4822 
4823 	do {
4824 		mutex_lock(&fs_info->reclaim_bgs_lock);
4825 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4826 		if (ret < 0) {
4827 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4828 			goto done;
4829 		}
4830 
4831 		ret = btrfs_previous_item(root, path, 0, key.type);
4832 		if (ret) {
4833 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4834 			if (ret < 0)
4835 				goto done;
4836 			ret = 0;
4837 			btrfs_release_path(path);
4838 			break;
4839 		}
4840 
4841 		l = path->nodes[0];
4842 		slot = path->slots[0];
4843 		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4844 
4845 		if (key.objectid != device->devid) {
4846 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4847 			btrfs_release_path(path);
4848 			break;
4849 		}
4850 
4851 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4852 		length = btrfs_dev_extent_length(l, dev_extent);
4853 
4854 		if (key.offset + length <= new_size) {
4855 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4856 			btrfs_release_path(path);
4857 			break;
4858 		}
4859 
4860 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4861 		btrfs_release_path(path);
4862 
4863 		/*
4864 		 * We may be relocating the only data chunk we have,
4865 		 * which could potentially end up with losing data's
4866 		 * raid profile, so lets allocate an empty one in
4867 		 * advance.
4868 		 */
4869 		ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4870 		if (ret < 0) {
4871 			mutex_unlock(&fs_info->reclaim_bgs_lock);
4872 			goto done;
4873 		}
4874 
4875 		ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4876 		mutex_unlock(&fs_info->reclaim_bgs_lock);
4877 		if (ret == -ENOSPC) {
4878 			failed++;
4879 		} else if (ret) {
4880 			if (ret == -ETXTBSY) {
4881 				btrfs_warn(fs_info,
4882 		   "could not shrink block group %llu due to active swapfile",
4883 					   chunk_offset);
4884 			}
4885 			goto done;
4886 		}
4887 	} while (key.offset-- > 0);
4888 
4889 	if (failed && !retried) {
4890 		failed = 0;
4891 		retried = true;
4892 		goto again;
4893 	} else if (failed && retried) {
4894 		ret = -ENOSPC;
4895 		goto done;
4896 	}
4897 
4898 	/* Shrinking succeeded, else we would be at "done". */
4899 	trans = btrfs_start_transaction(root, 0);
4900 	if (IS_ERR(trans)) {
4901 		ret = PTR_ERR(trans);
4902 		goto done;
4903 	}
4904 
4905 	mutex_lock(&fs_info->chunk_mutex);
4906 	/* Clear all state bits beyond the shrunk device size */
4907 	clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
4908 			  CHUNK_STATE_MASK);
4909 
4910 	btrfs_device_set_disk_total_bytes(device, new_size);
4911 	if (list_empty(&device->post_commit_list))
4912 		list_add_tail(&device->post_commit_list,
4913 			      &trans->transaction->dev_update_list);
4914 
4915 	WARN_ON(diff > old_total);
4916 	btrfs_set_super_total_bytes(super_copy,
4917 			round_down(old_total - diff, fs_info->sectorsize));
4918 	mutex_unlock(&fs_info->chunk_mutex);
4919 
4920 	/* Now btrfs_update_device() will change the on-disk size. */
4921 	ret = btrfs_update_device(trans, device);
4922 	if (ret < 0) {
4923 		btrfs_abort_transaction(trans, ret);
4924 		btrfs_end_transaction(trans);
4925 	} else {
4926 		ret = btrfs_commit_transaction(trans);
4927 	}
4928 done:
4929 	btrfs_free_path(path);
4930 	if (ret) {
4931 		mutex_lock(&fs_info->chunk_mutex);
4932 		btrfs_device_set_total_bytes(device, old_size);
4933 		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4934 			device->fs_devices->total_rw_bytes += diff;
4935 		atomic64_add(diff, &fs_info->free_chunk_space);
4936 		mutex_unlock(&fs_info->chunk_mutex);
4937 	}
4938 	return ret;
4939 }
4940 
4941 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4942 			   struct btrfs_key *key,
4943 			   struct btrfs_chunk *chunk, int item_size)
4944 {
4945 	struct btrfs_super_block *super_copy = fs_info->super_copy;
4946 	struct btrfs_disk_key disk_key;
4947 	u32 array_size;
4948 	u8 *ptr;
4949 
4950 	lockdep_assert_held(&fs_info->chunk_mutex);
4951 
4952 	array_size = btrfs_super_sys_array_size(super_copy);
4953 	if (array_size + item_size + sizeof(disk_key)
4954 			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
4955 		return -EFBIG;
4956 
4957 	ptr = super_copy->sys_chunk_array + array_size;
4958 	btrfs_cpu_key_to_disk(&disk_key, key);
4959 	memcpy(ptr, &disk_key, sizeof(disk_key));
4960 	ptr += sizeof(disk_key);
4961 	memcpy(ptr, chunk, item_size);
4962 	item_size += sizeof(disk_key);
4963 	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4964 
4965 	return 0;
4966 }
4967 
4968 /*
4969  * sort the devices in descending order by max_avail, total_avail
4970  */
4971 static int btrfs_cmp_device_info(const void *a, const void *b)
4972 {
4973 	const struct btrfs_device_info *di_a = a;
4974 	const struct btrfs_device_info *di_b = b;
4975 
4976 	if (di_a->max_avail > di_b->max_avail)
4977 		return -1;
4978 	if (di_a->max_avail < di_b->max_avail)
4979 		return 1;
4980 	if (di_a->total_avail > di_b->total_avail)
4981 		return -1;
4982 	if (di_a->total_avail < di_b->total_avail)
4983 		return 1;
4984 	return 0;
4985 }
4986 
4987 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4988 {
4989 	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4990 		return;
4991 
4992 	btrfs_set_fs_incompat(info, RAID56);
4993 }
4994 
4995 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
4996 {
4997 	if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
4998 		return;
4999 
5000 	btrfs_set_fs_incompat(info, RAID1C34);
5001 }
5002 
5003 /*
5004  * Structure used internally for __btrfs_alloc_chunk() function.
5005  * Wraps needed parameters.
5006  */
5007 struct alloc_chunk_ctl {
5008 	u64 start;
5009 	u64 type;
5010 	/* Total number of stripes to allocate */
5011 	int num_stripes;
5012 	/* sub_stripes info for map */
5013 	int sub_stripes;
5014 	/* Stripes per device */
5015 	int dev_stripes;
5016 	/* Maximum number of devices to use */
5017 	int devs_max;
5018 	/* Minimum number of devices to use */
5019 	int devs_min;
5020 	/* ndevs has to be a multiple of this */
5021 	int devs_increment;
5022 	/* Number of copies */
5023 	int ncopies;
5024 	/* Number of stripes worth of bytes to store parity information */
5025 	int nparity;
5026 	u64 max_stripe_size;
5027 	u64 max_chunk_size;
5028 	u64 dev_extent_min;
5029 	u64 stripe_size;
5030 	u64 chunk_size;
5031 	int ndevs;
5032 };
5033 
5034 static void init_alloc_chunk_ctl_policy_regular(
5035 				struct btrfs_fs_devices *fs_devices,
5036 				struct alloc_chunk_ctl *ctl)
5037 {
5038 	u64 type = ctl->type;
5039 
5040 	if (type & BTRFS_BLOCK_GROUP_DATA) {
5041 		ctl->max_stripe_size = SZ_1G;
5042 		ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
5043 	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
5044 		/* For larger filesystems, use larger metadata chunks */
5045 		if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
5046 			ctl->max_stripe_size = SZ_1G;
5047 		else
5048 			ctl->max_stripe_size = SZ_256M;
5049 		ctl->max_chunk_size = ctl->max_stripe_size;
5050 	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
5051 		ctl->max_stripe_size = SZ_32M;
5052 		ctl->max_chunk_size = 2 * ctl->max_stripe_size;
5053 		ctl->devs_max = min_t(int, ctl->devs_max,
5054 				      BTRFS_MAX_DEVS_SYS_CHUNK);
5055 	} else {
5056 		BUG();
5057 	}
5058 
5059 	/* We don't want a chunk larger than 10% of writable space */
5060 	ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
5061 				  ctl->max_chunk_size);
5062 	ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
5063 }
5064 
5065 static void init_alloc_chunk_ctl_policy_zoned(
5066 				      struct btrfs_fs_devices *fs_devices,
5067 				      struct alloc_chunk_ctl *ctl)
5068 {
5069 	u64 zone_size = fs_devices->fs_info->zone_size;
5070 	u64 limit;
5071 	int min_num_stripes = ctl->devs_min * ctl->dev_stripes;
5072 	int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies;
5073 	u64 min_chunk_size = min_data_stripes * zone_size;
5074 	u64 type = ctl->type;
5075 
5076 	ctl->max_stripe_size = zone_size;
5077 	if (type & BTRFS_BLOCK_GROUP_DATA) {
5078 		ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE,
5079 						 zone_size);
5080 	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
5081 		ctl->max_chunk_size = ctl->max_stripe_size;
5082 	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
5083 		ctl->max_chunk_size = 2 * ctl->max_stripe_size;
5084 		ctl->devs_max = min_t(int, ctl->devs_max,
5085 				      BTRFS_MAX_DEVS_SYS_CHUNK);
5086 	} else {
5087 		BUG();
5088 	}
5089 
5090 	/* We don't want a chunk larger than 10% of writable space */
5091 	limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1),
5092 			       zone_size),
5093 		    min_chunk_size);
5094 	ctl->max_chunk_size = min(limit, ctl->max_chunk_size);
5095 	ctl->dev_extent_min = zone_size * ctl->dev_stripes;
5096 }
5097 
5098 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
5099 				 struct alloc_chunk_ctl *ctl)
5100 {
5101 	int index = btrfs_bg_flags_to_raid_index(ctl->type);
5102 
5103 	ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
5104 	ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
5105 	ctl->devs_max = btrfs_raid_array[index].devs_max;
5106 	if (!ctl->devs_max)
5107 		ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
5108 	ctl->devs_min = btrfs_raid_array[index].devs_min;
5109 	ctl->devs_increment = btrfs_raid_array[index].devs_increment;
5110 	ctl->ncopies = btrfs_raid_array[index].ncopies;
5111 	ctl->nparity = btrfs_raid_array[index].nparity;
5112 	ctl->ndevs = 0;
5113 
5114 	switch (fs_devices->chunk_alloc_policy) {
5115 	case BTRFS_CHUNK_ALLOC_REGULAR:
5116 		init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
5117 		break;
5118 	case BTRFS_CHUNK_ALLOC_ZONED:
5119 		init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl);
5120 		break;
5121 	default:
5122 		BUG();
5123 	}
5124 }
5125 
5126 static int gather_device_info(struct btrfs_fs_devices *fs_devices,
5127 			      struct alloc_chunk_ctl *ctl,
5128 			      struct btrfs_device_info *devices_info)
5129 {
5130 	struct btrfs_fs_info *info = fs_devices->fs_info;
5131 	struct btrfs_device *device;
5132 	u64 total_avail;
5133 	u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
5134 	int ret;
5135 	int ndevs = 0;
5136 	u64 max_avail;
5137 	u64 dev_offset;
5138 
5139 	/*
5140 	 * in the first pass through the devices list, we gather information
5141 	 * about the available holes on each device.
5142 	 */
5143 	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
5144 		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
5145 			WARN(1, KERN_ERR
5146 			       "BTRFS: read-only device in alloc_list\n");
5147 			continue;
5148 		}
5149 
5150 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
5151 					&device->dev_state) ||
5152 		    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
5153 			continue;
5154 
5155 		if (device->total_bytes > device->bytes_used)
5156 			total_avail = device->total_bytes - device->bytes_used;
5157 		else
5158 			total_avail = 0;
5159 
5160 		/* If there is no space on this device, skip it. */
5161 		if (total_avail < ctl->dev_extent_min)
5162 			continue;
5163 
5164 		ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
5165 					   &max_avail);
5166 		if (ret && ret != -ENOSPC)
5167 			return ret;
5168 
5169 		if (ret == 0)
5170 			max_avail = dev_extent_want;
5171 
5172 		if (max_avail < ctl->dev_extent_min) {
5173 			if (btrfs_test_opt(info, ENOSPC_DEBUG))
5174 				btrfs_debug(info,
5175 			"%s: devid %llu has no free space, have=%llu want=%llu",
5176 					    __func__, device->devid, max_avail,
5177 					    ctl->dev_extent_min);
5178 			continue;
5179 		}
5180 
5181 		if (ndevs == fs_devices->rw_devices) {
5182 			WARN(1, "%s: found more than %llu devices\n",
5183 			     __func__, fs_devices->rw_devices);
5184 			break;
5185 		}
5186 		devices_info[ndevs].dev_offset = dev_offset;
5187 		devices_info[ndevs].max_avail = max_avail;
5188 		devices_info[ndevs].total_avail = total_avail;
5189 		devices_info[ndevs].dev = device;
5190 		++ndevs;
5191 	}
5192 	ctl->ndevs = ndevs;
5193 
5194 	/*
5195 	 * now sort the devices by hole size / available space
5196 	 */
5197 	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
5198 	     btrfs_cmp_device_info, NULL);
5199 
5200 	return 0;
5201 }
5202 
5203 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
5204 				      struct btrfs_device_info *devices_info)
5205 {
5206 	/* Number of stripes that count for block group size */
5207 	int data_stripes;
5208 
5209 	/*
5210 	 * The primary goal is to maximize the number of stripes, so use as
5211 	 * many devices as possible, even if the stripes are not maximum sized.
5212 	 *
5213 	 * The DUP profile stores more than one stripe per device, the
5214 	 * max_avail is the total size so we have to adjust.
5215 	 */
5216 	ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
5217 				   ctl->dev_stripes);
5218 	ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5219 
5220 	/* This will have to be fixed for RAID1 and RAID10 over more drives */
5221 	data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5222 
5223 	/*
5224 	 * Use the number of data stripes to figure out how big this chunk is
5225 	 * really going to be in terms of logical address space, and compare
5226 	 * that answer with the max chunk size. If it's higher, we try to
5227 	 * reduce stripe_size.
5228 	 */
5229 	if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5230 		/*
5231 		 * Reduce stripe_size, round it up to a 16MB boundary again and
5232 		 * then use it, unless it ends up being even bigger than the
5233 		 * previous value we had already.
5234 		 */
5235 		ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
5236 							data_stripes), SZ_16M),
5237 				       ctl->stripe_size);
5238 	}
5239 
5240 	/* Align to BTRFS_STRIPE_LEN */
5241 	ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
5242 	ctl->chunk_size = ctl->stripe_size * data_stripes;
5243 
5244 	return 0;
5245 }
5246 
5247 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl,
5248 				    struct btrfs_device_info *devices_info)
5249 {
5250 	u64 zone_size = devices_info[0].dev->zone_info->zone_size;
5251 	/* Number of stripes that count for block group size */
5252 	int data_stripes;
5253 
5254 	/*
5255 	 * It should hold because:
5256 	 *    dev_extent_min == dev_extent_want == zone_size * dev_stripes
5257 	 */
5258 	ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min);
5259 
5260 	ctl->stripe_size = zone_size;
5261 	ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5262 	data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5263 
5264 	/* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */
5265 	if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5266 		ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies,
5267 					     ctl->stripe_size) + ctl->nparity,
5268 				     ctl->dev_stripes);
5269 		ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5270 		data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5271 		ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size);
5272 	}
5273 
5274 	ctl->chunk_size = ctl->stripe_size * data_stripes;
5275 
5276 	return 0;
5277 }
5278 
5279 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
5280 			      struct alloc_chunk_ctl *ctl,
5281 			      struct btrfs_device_info *devices_info)
5282 {
5283 	struct btrfs_fs_info *info = fs_devices->fs_info;
5284 
5285 	/*
5286 	 * Round down to number of usable stripes, devs_increment can be any
5287 	 * number so we can't use round_down() that requires power of 2, while
5288 	 * rounddown is safe.
5289 	 */
5290 	ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
5291 
5292 	if (ctl->ndevs < ctl->devs_min) {
5293 		if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5294 			btrfs_debug(info,
5295 	"%s: not enough devices with free space: have=%d minimum required=%d",
5296 				    __func__, ctl->ndevs, ctl->devs_min);
5297 		}
5298 		return -ENOSPC;
5299 	}
5300 
5301 	ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
5302 
5303 	switch (fs_devices->chunk_alloc_policy) {
5304 	case BTRFS_CHUNK_ALLOC_REGULAR:
5305 		return decide_stripe_size_regular(ctl, devices_info);
5306 	case BTRFS_CHUNK_ALLOC_ZONED:
5307 		return decide_stripe_size_zoned(ctl, devices_info);
5308 	default:
5309 		BUG();
5310 	}
5311 }
5312 
5313 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
5314 			struct alloc_chunk_ctl *ctl,
5315 			struct btrfs_device_info *devices_info)
5316 {
5317 	struct btrfs_fs_info *info = trans->fs_info;
5318 	struct map_lookup *map = NULL;
5319 	struct extent_map_tree *em_tree;
5320 	struct btrfs_block_group *block_group;
5321 	struct extent_map *em;
5322 	u64 start = ctl->start;
5323 	u64 type = ctl->type;
5324 	int ret;
5325 	int i;
5326 	int j;
5327 
5328 	map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
5329 	if (!map)
5330 		return ERR_PTR(-ENOMEM);
5331 	map->num_stripes = ctl->num_stripes;
5332 
5333 	for (i = 0; i < ctl->ndevs; ++i) {
5334 		for (j = 0; j < ctl->dev_stripes; ++j) {
5335 			int s = i * ctl->dev_stripes + j;
5336 			map->stripes[s].dev = devices_info[i].dev;
5337 			map->stripes[s].physical = devices_info[i].dev_offset +
5338 						   j * ctl->stripe_size;
5339 		}
5340 	}
5341 	map->stripe_len = BTRFS_STRIPE_LEN;
5342 	map->io_align = BTRFS_STRIPE_LEN;
5343 	map->io_width = BTRFS_STRIPE_LEN;
5344 	map->type = type;
5345 	map->sub_stripes = ctl->sub_stripes;
5346 
5347 	trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
5348 
5349 	em = alloc_extent_map();
5350 	if (!em) {
5351 		kfree(map);
5352 		return ERR_PTR(-ENOMEM);
5353 	}
5354 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5355 	em->map_lookup = map;
5356 	em->start = start;
5357 	em->len = ctl->chunk_size;
5358 	em->block_start = 0;
5359 	em->block_len = em->len;
5360 	em->orig_block_len = ctl->stripe_size;
5361 
5362 	em_tree = &info->mapping_tree;
5363 	write_lock(&em_tree->lock);
5364 	ret = add_extent_mapping(em_tree, em, 0);
5365 	if (ret) {
5366 		write_unlock(&em_tree->lock);
5367 		free_extent_map(em);
5368 		return ERR_PTR(ret);
5369 	}
5370 	write_unlock(&em_tree->lock);
5371 
5372 	block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
5373 	if (IS_ERR(block_group))
5374 		goto error_del_extent;
5375 
5376 	for (i = 0; i < map->num_stripes; i++) {
5377 		struct btrfs_device *dev = map->stripes[i].dev;
5378 
5379 		btrfs_device_set_bytes_used(dev,
5380 					    dev->bytes_used + ctl->stripe_size);
5381 		if (list_empty(&dev->post_commit_list))
5382 			list_add_tail(&dev->post_commit_list,
5383 				      &trans->transaction->dev_update_list);
5384 	}
5385 
5386 	atomic64_sub(ctl->stripe_size * map->num_stripes,
5387 		     &info->free_chunk_space);
5388 
5389 	free_extent_map(em);
5390 	check_raid56_incompat_flag(info, type);
5391 	check_raid1c34_incompat_flag(info, type);
5392 
5393 	return block_group;
5394 
5395 error_del_extent:
5396 	write_lock(&em_tree->lock);
5397 	remove_extent_mapping(em_tree, em);
5398 	write_unlock(&em_tree->lock);
5399 
5400 	/* One for our allocation */
5401 	free_extent_map(em);
5402 	/* One for the tree reference */
5403 	free_extent_map(em);
5404 
5405 	return block_group;
5406 }
5407 
5408 struct btrfs_block_group *btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
5409 					    u64 type)
5410 {
5411 	struct btrfs_fs_info *info = trans->fs_info;
5412 	struct btrfs_fs_devices *fs_devices = info->fs_devices;
5413 	struct btrfs_device_info *devices_info = NULL;
5414 	struct alloc_chunk_ctl ctl;
5415 	struct btrfs_block_group *block_group;
5416 	int ret;
5417 
5418 	lockdep_assert_held(&info->chunk_mutex);
5419 
5420 	if (!alloc_profile_is_valid(type, 0)) {
5421 		ASSERT(0);
5422 		return ERR_PTR(-EINVAL);
5423 	}
5424 
5425 	if (list_empty(&fs_devices->alloc_list)) {
5426 		if (btrfs_test_opt(info, ENOSPC_DEBUG))
5427 			btrfs_debug(info, "%s: no writable device", __func__);
5428 		return ERR_PTR(-ENOSPC);
5429 	}
5430 
5431 	if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
5432 		btrfs_err(info, "invalid chunk type 0x%llx requested", type);
5433 		ASSERT(0);
5434 		return ERR_PTR(-EINVAL);
5435 	}
5436 
5437 	ctl.start = find_next_chunk(info);
5438 	ctl.type = type;
5439 	init_alloc_chunk_ctl(fs_devices, &ctl);
5440 
5441 	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
5442 			       GFP_NOFS);
5443 	if (!devices_info)
5444 		return ERR_PTR(-ENOMEM);
5445 
5446 	ret = gather_device_info(fs_devices, &ctl, devices_info);
5447 	if (ret < 0) {
5448 		block_group = ERR_PTR(ret);
5449 		goto out;
5450 	}
5451 
5452 	ret = decide_stripe_size(fs_devices, &ctl, devices_info);
5453 	if (ret < 0) {
5454 		block_group = ERR_PTR(ret);
5455 		goto out;
5456 	}
5457 
5458 	block_group = create_chunk(trans, &ctl, devices_info);
5459 
5460 out:
5461 	kfree(devices_info);
5462 	return block_group;
5463 }
5464 
5465 /*
5466  * This function, btrfs_finish_chunk_alloc(), belongs to phase 2.
5467  *
5468  * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
5469  * phases.
5470  */
5471 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
5472 			     u64 chunk_offset, u64 chunk_size)
5473 {
5474 	struct btrfs_fs_info *fs_info = trans->fs_info;
5475 	struct btrfs_device *device;
5476 	struct extent_map *em;
5477 	struct map_lookup *map;
5478 	u64 dev_offset;
5479 	u64 stripe_size;
5480 	int i;
5481 	int ret = 0;
5482 
5483 	em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
5484 	if (IS_ERR(em))
5485 		return PTR_ERR(em);
5486 
5487 	map = em->map_lookup;
5488 	stripe_size = em->orig_block_len;
5489 
5490 	/*
5491 	 * Take the device list mutex to prevent races with the final phase of
5492 	 * a device replace operation that replaces the device object associated
5493 	 * with the map's stripes, because the device object's id can change
5494 	 * at any time during that final phase of the device replace operation
5495 	 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
5496 	 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
5497 	 * resulting in persisting a device extent item with such ID.
5498 	 */
5499 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
5500 	for (i = 0; i < map->num_stripes; i++) {
5501 		device = map->stripes[i].dev;
5502 		dev_offset = map->stripes[i].physical;
5503 
5504 		ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
5505 					     dev_offset, stripe_size);
5506 		if (ret)
5507 			break;
5508 	}
5509 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5510 
5511 	free_extent_map(em);
5512 	return ret;
5513 }
5514 
5515 /*
5516  * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the
5517  * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system
5518  * chunks.
5519  *
5520  * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
5521  * phases.
5522  */
5523 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
5524 				     struct btrfs_block_group *bg)
5525 {
5526 	struct btrfs_fs_info *fs_info = trans->fs_info;
5527 	struct btrfs_root *extent_root = fs_info->extent_root;
5528 	struct btrfs_root *chunk_root = fs_info->chunk_root;
5529 	struct btrfs_key key;
5530 	struct btrfs_chunk *chunk;
5531 	struct btrfs_stripe *stripe;
5532 	struct extent_map *em;
5533 	struct map_lookup *map;
5534 	size_t item_size;
5535 	int i;
5536 	int ret;
5537 
5538 	/*
5539 	 * We take the chunk_mutex for 2 reasons:
5540 	 *
5541 	 * 1) Updates and insertions in the chunk btree must be done while holding
5542 	 *    the chunk_mutex, as well as updating the system chunk array in the
5543 	 *    superblock. See the comment on top of btrfs_chunk_alloc() for the
5544 	 *    details;
5545 	 *
5546 	 * 2) To prevent races with the final phase of a device replace operation
5547 	 *    that replaces the device object associated with the map's stripes,
5548 	 *    because the device object's id can change at any time during that
5549 	 *    final phase of the device replace operation
5550 	 *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
5551 	 *    replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
5552 	 *    which would cause a failure when updating the device item, which does
5553 	 *    not exists, or persisting a stripe of the chunk item with such ID.
5554 	 *    Here we can't use the device_list_mutex because our caller already
5555 	 *    has locked the chunk_mutex, and the final phase of device replace
5556 	 *    acquires both mutexes - first the device_list_mutex and then the
5557 	 *    chunk_mutex. Using any of those two mutexes protects us from a
5558 	 *    concurrent device replace.
5559 	 */
5560 	lockdep_assert_held(&fs_info->chunk_mutex);
5561 
5562 	em = btrfs_get_chunk_map(fs_info, bg->start, bg->length);
5563 	if (IS_ERR(em)) {
5564 		ret = PTR_ERR(em);
5565 		btrfs_abort_transaction(trans, ret);
5566 		return ret;
5567 	}
5568 
5569 	map = em->map_lookup;
5570 	item_size = btrfs_chunk_item_size(map->num_stripes);
5571 
5572 	chunk = kzalloc(item_size, GFP_NOFS);
5573 	if (!chunk) {
5574 		ret = -ENOMEM;
5575 		btrfs_abort_transaction(trans, ret);
5576 		goto out;
5577 	}
5578 
5579 	for (i = 0; i < map->num_stripes; i++) {
5580 		struct btrfs_device *device = map->stripes[i].dev;
5581 
5582 		ret = btrfs_update_device(trans, device);
5583 		if (ret)
5584 			goto out;
5585 	}
5586 
5587 	stripe = &chunk->stripe;
5588 	for (i = 0; i < map->num_stripes; i++) {
5589 		struct btrfs_device *device = map->stripes[i].dev;
5590 		const u64 dev_offset = map->stripes[i].physical;
5591 
5592 		btrfs_set_stack_stripe_devid(stripe, device->devid);
5593 		btrfs_set_stack_stripe_offset(stripe, dev_offset);
5594 		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5595 		stripe++;
5596 	}
5597 
5598 	btrfs_set_stack_chunk_length(chunk, bg->length);
5599 	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
5600 	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
5601 	btrfs_set_stack_chunk_type(chunk, map->type);
5602 	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5603 	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5604 	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5605 	btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5606 	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5607 
5608 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5609 	key.type = BTRFS_CHUNK_ITEM_KEY;
5610 	key.offset = bg->start;
5611 
5612 	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5613 	if (ret)
5614 		goto out;
5615 
5616 	bg->chunk_item_inserted = 1;
5617 
5618 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5619 		ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5620 		if (ret)
5621 			goto out;
5622 	}
5623 
5624 out:
5625 	kfree(chunk);
5626 	free_extent_map(em);
5627 	return ret;
5628 }
5629 
5630 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
5631 {
5632 	struct btrfs_fs_info *fs_info = trans->fs_info;
5633 	u64 alloc_profile;
5634 	struct btrfs_block_group *meta_bg;
5635 	struct btrfs_block_group *sys_bg;
5636 
5637 	/*
5638 	 * When adding a new device for sprouting, the seed device is read-only
5639 	 * so we must first allocate a metadata and a system chunk. But before
5640 	 * adding the block group items to the extent, device and chunk btrees,
5641 	 * we must first:
5642 	 *
5643 	 * 1) Create both chunks without doing any changes to the btrees, as
5644 	 *    otherwise we would get -ENOSPC since the block groups from the
5645 	 *    seed device are read-only;
5646 	 *
5647 	 * 2) Add the device item for the new sprout device - finishing the setup
5648 	 *    of a new block group requires updating the device item in the chunk
5649 	 *    btree, so it must exist when we attempt to do it. The previous step
5650 	 *    ensures this does not fail with -ENOSPC.
5651 	 *
5652 	 * After that we can add the block group items to their btrees:
5653 	 * update existing device item in the chunk btree, add a new block group
5654 	 * item to the extent btree, add a new chunk item to the chunk btree and
5655 	 * finally add the new device extent items to the devices btree.
5656 	 */
5657 
5658 	alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5659 	meta_bg = btrfs_alloc_chunk(trans, alloc_profile);
5660 	if (IS_ERR(meta_bg))
5661 		return PTR_ERR(meta_bg);
5662 
5663 	alloc_profile = btrfs_system_alloc_profile(fs_info);
5664 	sys_bg = btrfs_alloc_chunk(trans, alloc_profile);
5665 	if (IS_ERR(sys_bg))
5666 		return PTR_ERR(sys_bg);
5667 
5668 	return 0;
5669 }
5670 
5671 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5672 {
5673 	const int index = btrfs_bg_flags_to_raid_index(map->type);
5674 
5675 	return btrfs_raid_array[index].tolerated_failures;
5676 }
5677 
5678 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5679 {
5680 	struct extent_map *em;
5681 	struct map_lookup *map;
5682 	int readonly = 0;
5683 	int miss_ndevs = 0;
5684 	int i;
5685 
5686 	em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5687 	if (IS_ERR(em))
5688 		return 1;
5689 
5690 	map = em->map_lookup;
5691 	for (i = 0; i < map->num_stripes; i++) {
5692 		if (test_bit(BTRFS_DEV_STATE_MISSING,
5693 					&map->stripes[i].dev->dev_state)) {
5694 			miss_ndevs++;
5695 			continue;
5696 		}
5697 		if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5698 					&map->stripes[i].dev->dev_state)) {
5699 			readonly = 1;
5700 			goto end;
5701 		}
5702 	}
5703 
5704 	/*
5705 	 * If the number of missing devices is larger than max errors,
5706 	 * we can not write the data into that chunk successfully, so
5707 	 * set it readonly.
5708 	 */
5709 	if (miss_ndevs > btrfs_chunk_max_errors(map))
5710 		readonly = 1;
5711 end:
5712 	free_extent_map(em);
5713 	return readonly;
5714 }
5715 
5716 void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5717 {
5718 	struct extent_map *em;
5719 
5720 	while (1) {
5721 		write_lock(&tree->lock);
5722 		em = lookup_extent_mapping(tree, 0, (u64)-1);
5723 		if (em)
5724 			remove_extent_mapping(tree, em);
5725 		write_unlock(&tree->lock);
5726 		if (!em)
5727 			break;
5728 		/* once for us */
5729 		free_extent_map(em);
5730 		/* once for the tree */
5731 		free_extent_map(em);
5732 	}
5733 }
5734 
5735 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5736 {
5737 	struct extent_map *em;
5738 	struct map_lookup *map;
5739 	int ret;
5740 
5741 	em = btrfs_get_chunk_map(fs_info, logical, len);
5742 	if (IS_ERR(em))
5743 		/*
5744 		 * We could return errors for these cases, but that could get
5745 		 * ugly and we'd probably do the same thing which is just not do
5746 		 * anything else and exit, so return 1 so the callers don't try
5747 		 * to use other copies.
5748 		 */
5749 		return 1;
5750 
5751 	map = em->map_lookup;
5752 	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
5753 		ret = map->num_stripes;
5754 	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5755 		ret = map->sub_stripes;
5756 	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5757 		ret = 2;
5758 	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5759 		/*
5760 		 * There could be two corrupted data stripes, we need
5761 		 * to loop retry in order to rebuild the correct data.
5762 		 *
5763 		 * Fail a stripe at a time on every retry except the
5764 		 * stripe under reconstruction.
5765 		 */
5766 		ret = map->num_stripes;
5767 	else
5768 		ret = 1;
5769 	free_extent_map(em);
5770 
5771 	down_read(&fs_info->dev_replace.rwsem);
5772 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5773 	    fs_info->dev_replace.tgtdev)
5774 		ret++;
5775 	up_read(&fs_info->dev_replace.rwsem);
5776 
5777 	return ret;
5778 }
5779 
5780 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5781 				    u64 logical)
5782 {
5783 	struct extent_map *em;
5784 	struct map_lookup *map;
5785 	unsigned long len = fs_info->sectorsize;
5786 
5787 	em = btrfs_get_chunk_map(fs_info, logical, len);
5788 
5789 	if (!WARN_ON(IS_ERR(em))) {
5790 		map = em->map_lookup;
5791 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5792 			len = map->stripe_len * nr_data_stripes(map);
5793 		free_extent_map(em);
5794 	}
5795 	return len;
5796 }
5797 
5798 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5799 {
5800 	struct extent_map *em;
5801 	struct map_lookup *map;
5802 	int ret = 0;
5803 
5804 	em = btrfs_get_chunk_map(fs_info, logical, len);
5805 
5806 	if(!WARN_ON(IS_ERR(em))) {
5807 		map = em->map_lookup;
5808 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5809 			ret = 1;
5810 		free_extent_map(em);
5811 	}
5812 	return ret;
5813 }
5814 
5815 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5816 			    struct map_lookup *map, int first,
5817 			    int dev_replace_is_ongoing)
5818 {
5819 	int i;
5820 	int num_stripes;
5821 	int preferred_mirror;
5822 	int tolerance;
5823 	struct btrfs_device *srcdev;
5824 
5825 	ASSERT((map->type &
5826 		 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5827 
5828 	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5829 		num_stripes = map->sub_stripes;
5830 	else
5831 		num_stripes = map->num_stripes;
5832 
5833 	switch (fs_info->fs_devices->read_policy) {
5834 	default:
5835 		/* Shouldn't happen, just warn and use pid instead of failing */
5836 		btrfs_warn_rl(fs_info,
5837 			      "unknown read_policy type %u, reset to pid",
5838 			      fs_info->fs_devices->read_policy);
5839 		fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID;
5840 		fallthrough;
5841 	case BTRFS_READ_POLICY_PID:
5842 		preferred_mirror = first + (current->pid % num_stripes);
5843 		break;
5844 	}
5845 
5846 	if (dev_replace_is_ongoing &&
5847 	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5848 	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5849 		srcdev = fs_info->dev_replace.srcdev;
5850 	else
5851 		srcdev = NULL;
5852 
5853 	/*
5854 	 * try to avoid the drive that is the source drive for a
5855 	 * dev-replace procedure, only choose it if no other non-missing
5856 	 * mirror is available
5857 	 */
5858 	for (tolerance = 0; tolerance < 2; tolerance++) {
5859 		if (map->stripes[preferred_mirror].dev->bdev &&
5860 		    (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5861 			return preferred_mirror;
5862 		for (i = first; i < first + num_stripes; i++) {
5863 			if (map->stripes[i].dev->bdev &&
5864 			    (tolerance || map->stripes[i].dev != srcdev))
5865 				return i;
5866 		}
5867 	}
5868 
5869 	/* we couldn't find one that doesn't fail.  Just return something
5870 	 * and the io error handling code will clean up eventually
5871 	 */
5872 	return preferred_mirror;
5873 }
5874 
5875 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5876 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5877 {
5878 	int i;
5879 	int again = 1;
5880 
5881 	while (again) {
5882 		again = 0;
5883 		for (i = 0; i < num_stripes - 1; i++) {
5884 			/* Swap if parity is on a smaller index */
5885 			if (bbio->raid_map[i] > bbio->raid_map[i + 1]) {
5886 				swap(bbio->stripes[i], bbio->stripes[i + 1]);
5887 				swap(bbio->raid_map[i], bbio->raid_map[i + 1]);
5888 				again = 1;
5889 			}
5890 		}
5891 	}
5892 }
5893 
5894 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5895 {
5896 	struct btrfs_bio *bbio = kzalloc(
5897 		 /* the size of the btrfs_bio */
5898 		sizeof(struct btrfs_bio) +
5899 		/* plus the variable array for the stripes */
5900 		sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5901 		/* plus the variable array for the tgt dev */
5902 		sizeof(int) * (real_stripes) +
5903 		/*
5904 		 * plus the raid_map, which includes both the tgt dev
5905 		 * and the stripes
5906 		 */
5907 		sizeof(u64) * (total_stripes),
5908 		GFP_NOFS|__GFP_NOFAIL);
5909 
5910 	atomic_set(&bbio->error, 0);
5911 	refcount_set(&bbio->refs, 1);
5912 
5913 	bbio->tgtdev_map = (int *)(bbio->stripes + total_stripes);
5914 	bbio->raid_map = (u64 *)(bbio->tgtdev_map + real_stripes);
5915 
5916 	return bbio;
5917 }
5918 
5919 void btrfs_get_bbio(struct btrfs_bio *bbio)
5920 {
5921 	WARN_ON(!refcount_read(&bbio->refs));
5922 	refcount_inc(&bbio->refs);
5923 }
5924 
5925 void btrfs_put_bbio(struct btrfs_bio *bbio)
5926 {
5927 	if (!bbio)
5928 		return;
5929 	if (refcount_dec_and_test(&bbio->refs))
5930 		kfree(bbio);
5931 }
5932 
5933 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5934 /*
5935  * Please note that, discard won't be sent to target device of device
5936  * replace.
5937  */
5938 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5939 					 u64 logical, u64 *length_ret,
5940 					 struct btrfs_bio **bbio_ret)
5941 {
5942 	struct extent_map *em;
5943 	struct map_lookup *map;
5944 	struct btrfs_bio *bbio;
5945 	u64 length = *length_ret;
5946 	u64 offset;
5947 	u64 stripe_nr;
5948 	u64 stripe_nr_end;
5949 	u64 stripe_end_offset;
5950 	u64 stripe_cnt;
5951 	u64 stripe_len;
5952 	u64 stripe_offset;
5953 	u64 num_stripes;
5954 	u32 stripe_index;
5955 	u32 factor = 0;
5956 	u32 sub_stripes = 0;
5957 	u64 stripes_per_dev = 0;
5958 	u32 remaining_stripes = 0;
5959 	u32 last_stripe = 0;
5960 	int ret = 0;
5961 	int i;
5962 
5963 	/* discard always return a bbio */
5964 	ASSERT(bbio_ret);
5965 
5966 	em = btrfs_get_chunk_map(fs_info, logical, length);
5967 	if (IS_ERR(em))
5968 		return PTR_ERR(em);
5969 
5970 	map = em->map_lookup;
5971 	/* we don't discard raid56 yet */
5972 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5973 		ret = -EOPNOTSUPP;
5974 		goto out;
5975 	}
5976 
5977 	offset = logical - em->start;
5978 	length = min_t(u64, em->start + em->len - logical, length);
5979 	*length_ret = length;
5980 
5981 	stripe_len = map->stripe_len;
5982 	/*
5983 	 * stripe_nr counts the total number of stripes we have to stride
5984 	 * to get to this block
5985 	 */
5986 	stripe_nr = div64_u64(offset, stripe_len);
5987 
5988 	/* stripe_offset is the offset of this block in its stripe */
5989 	stripe_offset = offset - stripe_nr * stripe_len;
5990 
5991 	stripe_nr_end = round_up(offset + length, map->stripe_len);
5992 	stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5993 	stripe_cnt = stripe_nr_end - stripe_nr;
5994 	stripe_end_offset = stripe_nr_end * map->stripe_len -
5995 			    (offset + length);
5996 	/*
5997 	 * after this, stripe_nr is the number of stripes on this
5998 	 * device we have to walk to find the data, and stripe_index is
5999 	 * the number of our device in the stripe array
6000 	 */
6001 	num_stripes = 1;
6002 	stripe_index = 0;
6003 	if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
6004 			 BTRFS_BLOCK_GROUP_RAID10)) {
6005 		if (map->type & BTRFS_BLOCK_GROUP_RAID0)
6006 			sub_stripes = 1;
6007 		else
6008 			sub_stripes = map->sub_stripes;
6009 
6010 		factor = map->num_stripes / sub_stripes;
6011 		num_stripes = min_t(u64, map->num_stripes,
6012 				    sub_stripes * stripe_cnt);
6013 		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6014 		stripe_index *= sub_stripes;
6015 		stripes_per_dev = div_u64_rem(stripe_cnt, factor,
6016 					      &remaining_stripes);
6017 		div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
6018 		last_stripe *= sub_stripes;
6019 	} else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
6020 				BTRFS_BLOCK_GROUP_DUP)) {
6021 		num_stripes = map->num_stripes;
6022 	} else {
6023 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6024 					&stripe_index);
6025 	}
6026 
6027 	bbio = alloc_btrfs_bio(num_stripes, 0);
6028 	if (!bbio) {
6029 		ret = -ENOMEM;
6030 		goto out;
6031 	}
6032 
6033 	for (i = 0; i < num_stripes; i++) {
6034 		bbio->stripes[i].physical =
6035 			map->stripes[stripe_index].physical +
6036 			stripe_offset + stripe_nr * map->stripe_len;
6037 		bbio->stripes[i].dev = map->stripes[stripe_index].dev;
6038 
6039 		if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
6040 				 BTRFS_BLOCK_GROUP_RAID10)) {
6041 			bbio->stripes[i].length = stripes_per_dev *
6042 				map->stripe_len;
6043 
6044 			if (i / sub_stripes < remaining_stripes)
6045 				bbio->stripes[i].length +=
6046 					map->stripe_len;
6047 
6048 			/*
6049 			 * Special for the first stripe and
6050 			 * the last stripe:
6051 			 *
6052 			 * |-------|...|-------|
6053 			 *     |----------|
6054 			 *    off     end_off
6055 			 */
6056 			if (i < sub_stripes)
6057 				bbio->stripes[i].length -=
6058 					stripe_offset;
6059 
6060 			if (stripe_index >= last_stripe &&
6061 			    stripe_index <= (last_stripe +
6062 					     sub_stripes - 1))
6063 				bbio->stripes[i].length -=
6064 					stripe_end_offset;
6065 
6066 			if (i == sub_stripes - 1)
6067 				stripe_offset = 0;
6068 		} else {
6069 			bbio->stripes[i].length = length;
6070 		}
6071 
6072 		stripe_index++;
6073 		if (stripe_index == map->num_stripes) {
6074 			stripe_index = 0;
6075 			stripe_nr++;
6076 		}
6077 	}
6078 
6079 	*bbio_ret = bbio;
6080 	bbio->map_type = map->type;
6081 	bbio->num_stripes = num_stripes;
6082 out:
6083 	free_extent_map(em);
6084 	return ret;
6085 }
6086 
6087 /*
6088  * In dev-replace case, for repair case (that's the only case where the mirror
6089  * is selected explicitly when calling btrfs_map_block), blocks left of the
6090  * left cursor can also be read from the target drive.
6091  *
6092  * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
6093  * array of stripes.
6094  * For READ, it also needs to be supported using the same mirror number.
6095  *
6096  * If the requested block is not left of the left cursor, EIO is returned. This
6097  * can happen because btrfs_num_copies() returns one more in the dev-replace
6098  * case.
6099  */
6100 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
6101 					 u64 logical, u64 length,
6102 					 u64 srcdev_devid, int *mirror_num,
6103 					 u64 *physical)
6104 {
6105 	struct btrfs_bio *bbio = NULL;
6106 	int num_stripes;
6107 	int index_srcdev = 0;
6108 	int found = 0;
6109 	u64 physical_of_found = 0;
6110 	int i;
6111 	int ret = 0;
6112 
6113 	ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
6114 				logical, &length, &bbio, 0, 0);
6115 	if (ret) {
6116 		ASSERT(bbio == NULL);
6117 		return ret;
6118 	}
6119 
6120 	num_stripes = bbio->num_stripes;
6121 	if (*mirror_num > num_stripes) {
6122 		/*
6123 		 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
6124 		 * that means that the requested area is not left of the left
6125 		 * cursor
6126 		 */
6127 		btrfs_put_bbio(bbio);
6128 		return -EIO;
6129 	}
6130 
6131 	/*
6132 	 * process the rest of the function using the mirror_num of the source
6133 	 * drive. Therefore look it up first.  At the end, patch the device
6134 	 * pointer to the one of the target drive.
6135 	 */
6136 	for (i = 0; i < num_stripes; i++) {
6137 		if (bbio->stripes[i].dev->devid != srcdev_devid)
6138 			continue;
6139 
6140 		/*
6141 		 * In case of DUP, in order to keep it simple, only add the
6142 		 * mirror with the lowest physical address
6143 		 */
6144 		if (found &&
6145 		    physical_of_found <= bbio->stripes[i].physical)
6146 			continue;
6147 
6148 		index_srcdev = i;
6149 		found = 1;
6150 		physical_of_found = bbio->stripes[i].physical;
6151 	}
6152 
6153 	btrfs_put_bbio(bbio);
6154 
6155 	ASSERT(found);
6156 	if (!found)
6157 		return -EIO;
6158 
6159 	*mirror_num = index_srcdev + 1;
6160 	*physical = physical_of_found;
6161 	return ret;
6162 }
6163 
6164 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical)
6165 {
6166 	struct btrfs_block_group *cache;
6167 	bool ret;
6168 
6169 	/* Non zoned filesystem does not use "to_copy" flag */
6170 	if (!btrfs_is_zoned(fs_info))
6171 		return false;
6172 
6173 	cache = btrfs_lookup_block_group(fs_info, logical);
6174 
6175 	spin_lock(&cache->lock);
6176 	ret = cache->to_copy;
6177 	spin_unlock(&cache->lock);
6178 
6179 	btrfs_put_block_group(cache);
6180 	return ret;
6181 }
6182 
6183 static void handle_ops_on_dev_replace(enum btrfs_map_op op,
6184 				      struct btrfs_bio **bbio_ret,
6185 				      struct btrfs_dev_replace *dev_replace,
6186 				      u64 logical,
6187 				      int *num_stripes_ret, int *max_errors_ret)
6188 {
6189 	struct btrfs_bio *bbio = *bbio_ret;
6190 	u64 srcdev_devid = dev_replace->srcdev->devid;
6191 	int tgtdev_indexes = 0;
6192 	int num_stripes = *num_stripes_ret;
6193 	int max_errors = *max_errors_ret;
6194 	int i;
6195 
6196 	if (op == BTRFS_MAP_WRITE) {
6197 		int index_where_to_add;
6198 
6199 		/*
6200 		 * A block group which have "to_copy" set will eventually
6201 		 * copied by dev-replace process. We can avoid cloning IO here.
6202 		 */
6203 		if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical))
6204 			return;
6205 
6206 		/*
6207 		 * duplicate the write operations while the dev replace
6208 		 * procedure is running. Since the copying of the old disk to
6209 		 * the new disk takes place at run time while the filesystem is
6210 		 * mounted writable, the regular write operations to the old
6211 		 * disk have to be duplicated to go to the new disk as well.
6212 		 *
6213 		 * Note that device->missing is handled by the caller, and that
6214 		 * the write to the old disk is already set up in the stripes
6215 		 * array.
6216 		 */
6217 		index_where_to_add = num_stripes;
6218 		for (i = 0; i < num_stripes; i++) {
6219 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
6220 				/* write to new disk, too */
6221 				struct btrfs_bio_stripe *new =
6222 					bbio->stripes + index_where_to_add;
6223 				struct btrfs_bio_stripe *old =
6224 					bbio->stripes + i;
6225 
6226 				new->physical = old->physical;
6227 				new->length = old->length;
6228 				new->dev = dev_replace->tgtdev;
6229 				bbio->tgtdev_map[i] = index_where_to_add;
6230 				index_where_to_add++;
6231 				max_errors++;
6232 				tgtdev_indexes++;
6233 			}
6234 		}
6235 		num_stripes = index_where_to_add;
6236 	} else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
6237 		int index_srcdev = 0;
6238 		int found = 0;
6239 		u64 physical_of_found = 0;
6240 
6241 		/*
6242 		 * During the dev-replace procedure, the target drive can also
6243 		 * be used to read data in case it is needed to repair a corrupt
6244 		 * block elsewhere. This is possible if the requested area is
6245 		 * left of the left cursor. In this area, the target drive is a
6246 		 * full copy of the source drive.
6247 		 */
6248 		for (i = 0; i < num_stripes; i++) {
6249 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
6250 				/*
6251 				 * In case of DUP, in order to keep it simple,
6252 				 * only add the mirror with the lowest physical
6253 				 * address
6254 				 */
6255 				if (found &&
6256 				    physical_of_found <=
6257 				     bbio->stripes[i].physical)
6258 					continue;
6259 				index_srcdev = i;
6260 				found = 1;
6261 				physical_of_found = bbio->stripes[i].physical;
6262 			}
6263 		}
6264 		if (found) {
6265 			struct btrfs_bio_stripe *tgtdev_stripe =
6266 				bbio->stripes + num_stripes;
6267 
6268 			tgtdev_stripe->physical = physical_of_found;
6269 			tgtdev_stripe->length =
6270 				bbio->stripes[index_srcdev].length;
6271 			tgtdev_stripe->dev = dev_replace->tgtdev;
6272 			bbio->tgtdev_map[index_srcdev] = num_stripes;
6273 
6274 			tgtdev_indexes++;
6275 			num_stripes++;
6276 		}
6277 	}
6278 
6279 	*num_stripes_ret = num_stripes;
6280 	*max_errors_ret = max_errors;
6281 	bbio->num_tgtdevs = tgtdev_indexes;
6282 	*bbio_ret = bbio;
6283 }
6284 
6285 static bool need_full_stripe(enum btrfs_map_op op)
6286 {
6287 	return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
6288 }
6289 
6290 /*
6291  * Calculate the geometry of a particular (address, len) tuple. This
6292  * information is used to calculate how big a particular bio can get before it
6293  * straddles a stripe.
6294  *
6295  * @fs_info: the filesystem
6296  * @em:      mapping containing the logical extent
6297  * @op:      type of operation - write or read
6298  * @logical: address that we want to figure out the geometry of
6299  * @io_geom: pointer used to return values
6300  *
6301  * Returns < 0 in case a chunk for the given logical address cannot be found,
6302  * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
6303  */
6304 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em,
6305 			  enum btrfs_map_op op, u64 logical,
6306 			  struct btrfs_io_geometry *io_geom)
6307 {
6308 	struct map_lookup *map;
6309 	u64 len;
6310 	u64 offset;
6311 	u64 stripe_offset;
6312 	u64 stripe_nr;
6313 	u64 stripe_len;
6314 	u64 raid56_full_stripe_start = (u64)-1;
6315 	int data_stripes;
6316 
6317 	ASSERT(op != BTRFS_MAP_DISCARD);
6318 
6319 	map = em->map_lookup;
6320 	/* Offset of this logical address in the chunk */
6321 	offset = logical - em->start;
6322 	/* Len of a stripe in a chunk */
6323 	stripe_len = map->stripe_len;
6324 	/* Stripe where this block falls in */
6325 	stripe_nr = div64_u64(offset, stripe_len);
6326 	/* Offset of stripe in the chunk */
6327 	stripe_offset = stripe_nr * stripe_len;
6328 	if (offset < stripe_offset) {
6329 		btrfs_crit(fs_info,
6330 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
6331 			stripe_offset, offset, em->start, logical, stripe_len);
6332 		return -EINVAL;
6333 	}
6334 
6335 	/* stripe_offset is the offset of this block in its stripe */
6336 	stripe_offset = offset - stripe_offset;
6337 	data_stripes = nr_data_stripes(map);
6338 
6339 	if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
6340 		u64 max_len = stripe_len - stripe_offset;
6341 
6342 		/*
6343 		 * In case of raid56, we need to know the stripe aligned start
6344 		 */
6345 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6346 			unsigned long full_stripe_len = stripe_len * data_stripes;
6347 			raid56_full_stripe_start = offset;
6348 
6349 			/*
6350 			 * Allow a write of a full stripe, but make sure we
6351 			 * don't allow straddling of stripes
6352 			 */
6353 			raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
6354 					full_stripe_len);
6355 			raid56_full_stripe_start *= full_stripe_len;
6356 
6357 			/*
6358 			 * For writes to RAID[56], allow a full stripeset across
6359 			 * all disks. For other RAID types and for RAID[56]
6360 			 * reads, just allow a single stripe (on a single disk).
6361 			 */
6362 			if (op == BTRFS_MAP_WRITE) {
6363 				max_len = stripe_len * data_stripes -
6364 					  (offset - raid56_full_stripe_start);
6365 			}
6366 		}
6367 		len = min_t(u64, em->len - offset, max_len);
6368 	} else {
6369 		len = em->len - offset;
6370 	}
6371 
6372 	io_geom->len = len;
6373 	io_geom->offset = offset;
6374 	io_geom->stripe_len = stripe_len;
6375 	io_geom->stripe_nr = stripe_nr;
6376 	io_geom->stripe_offset = stripe_offset;
6377 	io_geom->raid56_stripe_offset = raid56_full_stripe_start;
6378 
6379 	return 0;
6380 }
6381 
6382 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
6383 			     enum btrfs_map_op op,
6384 			     u64 logical, u64 *length,
6385 			     struct btrfs_bio **bbio_ret,
6386 			     int mirror_num, int need_raid_map)
6387 {
6388 	struct extent_map *em;
6389 	struct map_lookup *map;
6390 	u64 stripe_offset;
6391 	u64 stripe_nr;
6392 	u64 stripe_len;
6393 	u32 stripe_index;
6394 	int data_stripes;
6395 	int i;
6396 	int ret = 0;
6397 	int num_stripes;
6398 	int max_errors = 0;
6399 	int tgtdev_indexes = 0;
6400 	struct btrfs_bio *bbio = NULL;
6401 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6402 	int dev_replace_is_ongoing = 0;
6403 	int num_alloc_stripes;
6404 	int patch_the_first_stripe_for_dev_replace = 0;
6405 	u64 physical_to_patch_in_first_stripe = 0;
6406 	u64 raid56_full_stripe_start = (u64)-1;
6407 	struct btrfs_io_geometry geom;
6408 
6409 	ASSERT(bbio_ret);
6410 	ASSERT(op != BTRFS_MAP_DISCARD);
6411 
6412 	em = btrfs_get_chunk_map(fs_info, logical, *length);
6413 	ASSERT(!IS_ERR(em));
6414 
6415 	ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom);
6416 	if (ret < 0)
6417 		return ret;
6418 
6419 	map = em->map_lookup;
6420 
6421 	*length = geom.len;
6422 	stripe_len = geom.stripe_len;
6423 	stripe_nr = geom.stripe_nr;
6424 	stripe_offset = geom.stripe_offset;
6425 	raid56_full_stripe_start = geom.raid56_stripe_offset;
6426 	data_stripes = nr_data_stripes(map);
6427 
6428 	down_read(&dev_replace->rwsem);
6429 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6430 	/*
6431 	 * Hold the semaphore for read during the whole operation, write is
6432 	 * requested at commit time but must wait.
6433 	 */
6434 	if (!dev_replace_is_ongoing)
6435 		up_read(&dev_replace->rwsem);
6436 
6437 	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6438 	    !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6439 		ret = get_extra_mirror_from_replace(fs_info, logical, *length,
6440 						    dev_replace->srcdev->devid,
6441 						    &mirror_num,
6442 					    &physical_to_patch_in_first_stripe);
6443 		if (ret)
6444 			goto out;
6445 		else
6446 			patch_the_first_stripe_for_dev_replace = 1;
6447 	} else if (mirror_num > map->num_stripes) {
6448 		mirror_num = 0;
6449 	}
6450 
6451 	num_stripes = 1;
6452 	stripe_index = 0;
6453 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6454 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6455 				&stripe_index);
6456 		if (!need_full_stripe(op))
6457 			mirror_num = 1;
6458 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
6459 		if (need_full_stripe(op))
6460 			num_stripes = map->num_stripes;
6461 		else if (mirror_num)
6462 			stripe_index = mirror_num - 1;
6463 		else {
6464 			stripe_index = find_live_mirror(fs_info, map, 0,
6465 					    dev_replace_is_ongoing);
6466 			mirror_num = stripe_index + 1;
6467 		}
6468 
6469 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6470 		if (need_full_stripe(op)) {
6471 			num_stripes = map->num_stripes;
6472 		} else if (mirror_num) {
6473 			stripe_index = mirror_num - 1;
6474 		} else {
6475 			mirror_num = 1;
6476 		}
6477 
6478 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6479 		u32 factor = map->num_stripes / map->sub_stripes;
6480 
6481 		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6482 		stripe_index *= map->sub_stripes;
6483 
6484 		if (need_full_stripe(op))
6485 			num_stripes = map->sub_stripes;
6486 		else if (mirror_num)
6487 			stripe_index += mirror_num - 1;
6488 		else {
6489 			int old_stripe_index = stripe_index;
6490 			stripe_index = find_live_mirror(fs_info, map,
6491 					      stripe_index,
6492 					      dev_replace_is_ongoing);
6493 			mirror_num = stripe_index - old_stripe_index + 1;
6494 		}
6495 
6496 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6497 		if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
6498 			/* push stripe_nr back to the start of the full stripe */
6499 			stripe_nr = div64_u64(raid56_full_stripe_start,
6500 					stripe_len * data_stripes);
6501 
6502 			/* RAID[56] write or recovery. Return all stripes */
6503 			num_stripes = map->num_stripes;
6504 			max_errors = nr_parity_stripes(map);
6505 
6506 			*length = map->stripe_len;
6507 			stripe_index = 0;
6508 			stripe_offset = 0;
6509 		} else {
6510 			/*
6511 			 * Mirror #0 or #1 means the original data block.
6512 			 * Mirror #2 is RAID5 parity block.
6513 			 * Mirror #3 is RAID6 Q block.
6514 			 */
6515 			stripe_nr = div_u64_rem(stripe_nr,
6516 					data_stripes, &stripe_index);
6517 			if (mirror_num > 1)
6518 				stripe_index = data_stripes + mirror_num - 2;
6519 
6520 			/* We distribute the parity blocks across stripes */
6521 			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
6522 					&stripe_index);
6523 			if (!need_full_stripe(op) && mirror_num <= 1)
6524 				mirror_num = 1;
6525 		}
6526 	} else {
6527 		/*
6528 		 * after this, stripe_nr is the number of stripes on this
6529 		 * device we have to walk to find the data, and stripe_index is
6530 		 * the number of our device in the stripe array
6531 		 */
6532 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6533 				&stripe_index);
6534 		mirror_num = stripe_index + 1;
6535 	}
6536 	if (stripe_index >= map->num_stripes) {
6537 		btrfs_crit(fs_info,
6538 			   "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6539 			   stripe_index, map->num_stripes);
6540 		ret = -EINVAL;
6541 		goto out;
6542 	}
6543 
6544 	num_alloc_stripes = num_stripes;
6545 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6546 		if (op == BTRFS_MAP_WRITE)
6547 			num_alloc_stripes <<= 1;
6548 		if (op == BTRFS_MAP_GET_READ_MIRRORS)
6549 			num_alloc_stripes++;
6550 		tgtdev_indexes = num_stripes;
6551 	}
6552 
6553 	bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
6554 	if (!bbio) {
6555 		ret = -ENOMEM;
6556 		goto out;
6557 	}
6558 
6559 	for (i = 0; i < num_stripes; i++) {
6560 		bbio->stripes[i].physical = map->stripes[stripe_index].physical +
6561 			stripe_offset + stripe_nr * map->stripe_len;
6562 		bbio->stripes[i].dev = map->stripes[stripe_index].dev;
6563 		stripe_index++;
6564 	}
6565 
6566 	/* build raid_map */
6567 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
6568 	    (need_full_stripe(op) || mirror_num > 1)) {
6569 		u64 tmp;
6570 		unsigned rot;
6571 
6572 		/* Work out the disk rotation on this stripe-set */
6573 		div_u64_rem(stripe_nr, num_stripes, &rot);
6574 
6575 		/* Fill in the logical address of each stripe */
6576 		tmp = stripe_nr * data_stripes;
6577 		for (i = 0; i < data_stripes; i++)
6578 			bbio->raid_map[(i+rot) % num_stripes] =
6579 				em->start + (tmp + i) * map->stripe_len;
6580 
6581 		bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
6582 		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6583 			bbio->raid_map[(i+rot+1) % num_stripes] =
6584 				RAID6_Q_STRIPE;
6585 
6586 		sort_parity_stripes(bbio, num_stripes);
6587 	}
6588 
6589 	if (need_full_stripe(op))
6590 		max_errors = btrfs_chunk_max_errors(map);
6591 
6592 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6593 	    need_full_stripe(op)) {
6594 		handle_ops_on_dev_replace(op, &bbio, dev_replace, logical,
6595 					  &num_stripes, &max_errors);
6596 	}
6597 
6598 	*bbio_ret = bbio;
6599 	bbio->map_type = map->type;
6600 	bbio->num_stripes = num_stripes;
6601 	bbio->max_errors = max_errors;
6602 	bbio->mirror_num = mirror_num;
6603 
6604 	/*
6605 	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
6606 	 * mirror_num == num_stripes + 1 && dev_replace target drive is
6607 	 * available as a mirror
6608 	 */
6609 	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
6610 		WARN_ON(num_stripes > 1);
6611 		bbio->stripes[0].dev = dev_replace->tgtdev;
6612 		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
6613 		bbio->mirror_num = map->num_stripes + 1;
6614 	}
6615 out:
6616 	if (dev_replace_is_ongoing) {
6617 		lockdep_assert_held(&dev_replace->rwsem);
6618 		/* Unlock and let waiting writers proceed */
6619 		up_read(&dev_replace->rwsem);
6620 	}
6621 	free_extent_map(em);
6622 	return ret;
6623 }
6624 
6625 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6626 		      u64 logical, u64 *length,
6627 		      struct btrfs_bio **bbio_ret, int mirror_num)
6628 {
6629 	if (op == BTRFS_MAP_DISCARD)
6630 		return __btrfs_map_block_for_discard(fs_info, logical,
6631 						     length, bbio_ret);
6632 
6633 	return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
6634 				 mirror_num, 0);
6635 }
6636 
6637 /* For Scrub/replace */
6638 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6639 		     u64 logical, u64 *length,
6640 		     struct btrfs_bio **bbio_ret)
6641 {
6642 	return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
6643 }
6644 
6645 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
6646 {
6647 	bio->bi_private = bbio->private;
6648 	bio->bi_end_io = bbio->end_io;
6649 	bio_endio(bio);
6650 
6651 	btrfs_put_bbio(bbio);
6652 }
6653 
6654 static void btrfs_end_bio(struct bio *bio)
6655 {
6656 	struct btrfs_bio *bbio = bio->bi_private;
6657 	int is_orig_bio = 0;
6658 
6659 	if (bio->bi_status) {
6660 		atomic_inc(&bbio->error);
6661 		if (bio->bi_status == BLK_STS_IOERR ||
6662 		    bio->bi_status == BLK_STS_TARGET) {
6663 			struct btrfs_device *dev = btrfs_io_bio(bio)->device;
6664 
6665 			ASSERT(dev->bdev);
6666 			if (btrfs_op(bio) == BTRFS_MAP_WRITE)
6667 				btrfs_dev_stat_inc_and_print(dev,
6668 						BTRFS_DEV_STAT_WRITE_ERRS);
6669 			else if (!(bio->bi_opf & REQ_RAHEAD))
6670 				btrfs_dev_stat_inc_and_print(dev,
6671 						BTRFS_DEV_STAT_READ_ERRS);
6672 			if (bio->bi_opf & REQ_PREFLUSH)
6673 				btrfs_dev_stat_inc_and_print(dev,
6674 						BTRFS_DEV_STAT_FLUSH_ERRS);
6675 		}
6676 	}
6677 
6678 	if (bio == bbio->orig_bio)
6679 		is_orig_bio = 1;
6680 
6681 	btrfs_bio_counter_dec(bbio->fs_info);
6682 
6683 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6684 		if (!is_orig_bio) {
6685 			bio_put(bio);
6686 			bio = bbio->orig_bio;
6687 		}
6688 
6689 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6690 		/* only send an error to the higher layers if it is
6691 		 * beyond the tolerance of the btrfs bio
6692 		 */
6693 		if (atomic_read(&bbio->error) > bbio->max_errors) {
6694 			bio->bi_status = BLK_STS_IOERR;
6695 		} else {
6696 			/*
6697 			 * this bio is actually up to date, we didn't
6698 			 * go over the max number of errors
6699 			 */
6700 			bio->bi_status = BLK_STS_OK;
6701 		}
6702 
6703 		btrfs_end_bbio(bbio, bio);
6704 	} else if (!is_orig_bio) {
6705 		bio_put(bio);
6706 	}
6707 }
6708 
6709 static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
6710 			      u64 physical, struct btrfs_device *dev)
6711 {
6712 	struct btrfs_fs_info *fs_info = bbio->fs_info;
6713 
6714 	bio->bi_private = bbio;
6715 	btrfs_io_bio(bio)->device = dev;
6716 	bio->bi_end_io = btrfs_end_bio;
6717 	bio->bi_iter.bi_sector = physical >> 9;
6718 	/*
6719 	 * For zone append writing, bi_sector must point the beginning of the
6720 	 * zone
6721 	 */
6722 	if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
6723 		if (btrfs_dev_is_sequential(dev, physical)) {
6724 			u64 zone_start = round_down(physical, fs_info->zone_size);
6725 
6726 			bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT;
6727 		} else {
6728 			bio->bi_opf &= ~REQ_OP_ZONE_APPEND;
6729 			bio->bi_opf |= REQ_OP_WRITE;
6730 		}
6731 	}
6732 	btrfs_debug_in_rcu(fs_info,
6733 	"btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6734 		bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector,
6735 		(unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name),
6736 		dev->devid, bio->bi_iter.bi_size);
6737 	bio_set_dev(bio, dev->bdev);
6738 
6739 	btrfs_bio_counter_inc_noblocked(fs_info);
6740 
6741 	btrfsic_submit_bio(bio);
6742 }
6743 
6744 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6745 {
6746 	atomic_inc(&bbio->error);
6747 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6748 		/* Should be the original bio. */
6749 		WARN_ON(bio != bbio->orig_bio);
6750 
6751 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6752 		bio->bi_iter.bi_sector = logical >> 9;
6753 		if (atomic_read(&bbio->error) > bbio->max_errors)
6754 			bio->bi_status = BLK_STS_IOERR;
6755 		else
6756 			bio->bi_status = BLK_STS_OK;
6757 		btrfs_end_bbio(bbio, bio);
6758 	}
6759 }
6760 
6761 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6762 			   int mirror_num)
6763 {
6764 	struct btrfs_device *dev;
6765 	struct bio *first_bio = bio;
6766 	u64 logical = bio->bi_iter.bi_sector << 9;
6767 	u64 length = 0;
6768 	u64 map_length;
6769 	int ret;
6770 	int dev_nr;
6771 	int total_devs;
6772 	struct btrfs_bio *bbio = NULL;
6773 
6774 	length = bio->bi_iter.bi_size;
6775 	map_length = length;
6776 
6777 	btrfs_bio_counter_inc_blocked(fs_info);
6778 	ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6779 				&map_length, &bbio, mirror_num, 1);
6780 	if (ret) {
6781 		btrfs_bio_counter_dec(fs_info);
6782 		return errno_to_blk_status(ret);
6783 	}
6784 
6785 	total_devs = bbio->num_stripes;
6786 	bbio->orig_bio = first_bio;
6787 	bbio->private = first_bio->bi_private;
6788 	bbio->end_io = first_bio->bi_end_io;
6789 	bbio->fs_info = fs_info;
6790 	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6791 
6792 	if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6793 	    ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) {
6794 		/* In this case, map_length has been set to the length of
6795 		   a single stripe; not the whole write */
6796 		if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
6797 			ret = raid56_parity_write(fs_info, bio, bbio,
6798 						  map_length);
6799 		} else {
6800 			ret = raid56_parity_recover(fs_info, bio, bbio,
6801 						    map_length, mirror_num, 1);
6802 		}
6803 
6804 		btrfs_bio_counter_dec(fs_info);
6805 		return errno_to_blk_status(ret);
6806 	}
6807 
6808 	if (map_length < length) {
6809 		btrfs_crit(fs_info,
6810 			   "mapping failed logical %llu bio len %llu len %llu",
6811 			   logical, length, map_length);
6812 		BUG();
6813 	}
6814 
6815 	for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6816 		dev = bbio->stripes[dev_nr].dev;
6817 		if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
6818 						   &dev->dev_state) ||
6819 		    (btrfs_op(first_bio) == BTRFS_MAP_WRITE &&
6820 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6821 			bbio_error(bbio, first_bio, logical);
6822 			continue;
6823 		}
6824 
6825 		if (dev_nr < total_devs - 1)
6826 			bio = btrfs_bio_clone(first_bio);
6827 		else
6828 			bio = first_bio;
6829 
6830 		submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, dev);
6831 	}
6832 	btrfs_bio_counter_dec(fs_info);
6833 	return BLK_STS_OK;
6834 }
6835 
6836 /*
6837  * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6838  * return NULL.
6839  *
6840  * If devid and uuid are both specified, the match must be exact, otherwise
6841  * only devid is used.
6842  */
6843 struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
6844 				       u64 devid, u8 *uuid, u8 *fsid)
6845 {
6846 	struct btrfs_device *device;
6847 	struct btrfs_fs_devices *seed_devs;
6848 
6849 	if (!fsid || !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6850 		list_for_each_entry(device, &fs_devices->devices, dev_list) {
6851 			if (device->devid == devid &&
6852 			    (!uuid || memcmp(device->uuid, uuid,
6853 					     BTRFS_UUID_SIZE) == 0))
6854 				return device;
6855 		}
6856 	}
6857 
6858 	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
6859 		if (!fsid ||
6860 		    !memcmp(seed_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6861 			list_for_each_entry(device, &seed_devs->devices,
6862 					    dev_list) {
6863 				if (device->devid == devid &&
6864 				    (!uuid || memcmp(device->uuid, uuid,
6865 						     BTRFS_UUID_SIZE) == 0))
6866 					return device;
6867 			}
6868 		}
6869 	}
6870 
6871 	return NULL;
6872 }
6873 
6874 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6875 					    u64 devid, u8 *dev_uuid)
6876 {
6877 	struct btrfs_device *device;
6878 	unsigned int nofs_flag;
6879 
6880 	/*
6881 	 * We call this under the chunk_mutex, so we want to use NOFS for this
6882 	 * allocation, however we don't want to change btrfs_alloc_device() to
6883 	 * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6884 	 * places.
6885 	 */
6886 	nofs_flag = memalloc_nofs_save();
6887 	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6888 	memalloc_nofs_restore(nofs_flag);
6889 	if (IS_ERR(device))
6890 		return device;
6891 
6892 	list_add(&device->dev_list, &fs_devices->devices);
6893 	device->fs_devices = fs_devices;
6894 	fs_devices->num_devices++;
6895 
6896 	set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6897 	fs_devices->missing_devices++;
6898 
6899 	return device;
6900 }
6901 
6902 /**
6903  * btrfs_alloc_device - allocate struct btrfs_device
6904  * @fs_info:	used only for generating a new devid, can be NULL if
6905  *		devid is provided (i.e. @devid != NULL).
6906  * @devid:	a pointer to devid for this device.  If NULL a new devid
6907  *		is generated.
6908  * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
6909  *		is generated.
6910  *
6911  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6912  * on error.  Returned struct is not linked onto any lists and must be
6913  * destroyed with btrfs_free_device.
6914  */
6915 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6916 					const u64 *devid,
6917 					const u8 *uuid)
6918 {
6919 	struct btrfs_device *dev;
6920 	u64 tmp;
6921 
6922 	if (WARN_ON(!devid && !fs_info))
6923 		return ERR_PTR(-EINVAL);
6924 
6925 	dev = __alloc_device(fs_info);
6926 	if (IS_ERR(dev))
6927 		return dev;
6928 
6929 	if (devid)
6930 		tmp = *devid;
6931 	else {
6932 		int ret;
6933 
6934 		ret = find_next_devid(fs_info, &tmp);
6935 		if (ret) {
6936 			btrfs_free_device(dev);
6937 			return ERR_PTR(ret);
6938 		}
6939 	}
6940 	dev->devid = tmp;
6941 
6942 	if (uuid)
6943 		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6944 	else
6945 		generate_random_uuid(dev->uuid);
6946 
6947 	return dev;
6948 }
6949 
6950 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6951 					u64 devid, u8 *uuid, bool error)
6952 {
6953 	if (error)
6954 		btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6955 			      devid, uuid);
6956 	else
6957 		btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6958 			      devid, uuid);
6959 }
6960 
6961 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
6962 {
6963 	int index = btrfs_bg_flags_to_raid_index(type);
6964 	int ncopies = btrfs_raid_array[index].ncopies;
6965 	const int nparity = btrfs_raid_array[index].nparity;
6966 	int data_stripes;
6967 
6968 	if (nparity)
6969 		data_stripes = num_stripes - nparity;
6970 	else
6971 		data_stripes = num_stripes / ncopies;
6972 
6973 	return div_u64(chunk_len, data_stripes);
6974 }
6975 
6976 #if BITS_PER_LONG == 32
6977 /*
6978  * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE
6979  * can't be accessed on 32bit systems.
6980  *
6981  * This function do mount time check to reject the fs if it already has
6982  * metadata chunk beyond that limit.
6983  */
6984 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
6985 				  u64 logical, u64 length, u64 type)
6986 {
6987 	if (!(type & BTRFS_BLOCK_GROUP_METADATA))
6988 		return 0;
6989 
6990 	if (logical + length < MAX_LFS_FILESIZE)
6991 		return 0;
6992 
6993 	btrfs_err_32bit_limit(fs_info);
6994 	return -EOVERFLOW;
6995 }
6996 
6997 /*
6998  * This is to give early warning for any metadata chunk reaching
6999  * BTRFS_32BIT_EARLY_WARN_THRESHOLD.
7000  * Although we can still access the metadata, it's not going to be possible
7001  * once the limit is reached.
7002  */
7003 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
7004 				  u64 logical, u64 length, u64 type)
7005 {
7006 	if (!(type & BTRFS_BLOCK_GROUP_METADATA))
7007 		return;
7008 
7009 	if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD)
7010 		return;
7011 
7012 	btrfs_warn_32bit_limit(fs_info);
7013 }
7014 #endif
7015 
7016 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
7017 			  struct btrfs_chunk *chunk)
7018 {
7019 	struct btrfs_fs_info *fs_info = leaf->fs_info;
7020 	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7021 	struct map_lookup *map;
7022 	struct extent_map *em;
7023 	u64 logical;
7024 	u64 length;
7025 	u64 devid;
7026 	u64 type;
7027 	u8 uuid[BTRFS_UUID_SIZE];
7028 	int num_stripes;
7029 	int ret;
7030 	int i;
7031 
7032 	logical = key->offset;
7033 	length = btrfs_chunk_length(leaf, chunk);
7034 	type = btrfs_chunk_type(leaf, chunk);
7035 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
7036 
7037 #if BITS_PER_LONG == 32
7038 	ret = check_32bit_meta_chunk(fs_info, logical, length, type);
7039 	if (ret < 0)
7040 		return ret;
7041 	warn_32bit_meta_chunk(fs_info, logical, length, type);
7042 #endif
7043 
7044 	/*
7045 	 * Only need to verify chunk item if we're reading from sys chunk array,
7046 	 * as chunk item in tree block is already verified by tree-checker.
7047 	 */
7048 	if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
7049 		ret = btrfs_check_chunk_valid(leaf, chunk, logical);
7050 		if (ret)
7051 			return ret;
7052 	}
7053 
7054 	read_lock(&map_tree->lock);
7055 	em = lookup_extent_mapping(map_tree, logical, 1);
7056 	read_unlock(&map_tree->lock);
7057 
7058 	/* already mapped? */
7059 	if (em && em->start <= logical && em->start + em->len > logical) {
7060 		free_extent_map(em);
7061 		return 0;
7062 	} else if (em) {
7063 		free_extent_map(em);
7064 	}
7065 
7066 	em = alloc_extent_map();
7067 	if (!em)
7068 		return -ENOMEM;
7069 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
7070 	if (!map) {
7071 		free_extent_map(em);
7072 		return -ENOMEM;
7073 	}
7074 
7075 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
7076 	em->map_lookup = map;
7077 	em->start = logical;
7078 	em->len = length;
7079 	em->orig_start = 0;
7080 	em->block_start = 0;
7081 	em->block_len = em->len;
7082 
7083 	map->num_stripes = num_stripes;
7084 	map->io_width = btrfs_chunk_io_width(leaf, chunk);
7085 	map->io_align = btrfs_chunk_io_align(leaf, chunk);
7086 	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
7087 	map->type = type;
7088 	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
7089 	map->verified_stripes = 0;
7090 	em->orig_block_len = calc_stripe_length(type, em->len,
7091 						map->num_stripes);
7092 	for (i = 0; i < num_stripes; i++) {
7093 		map->stripes[i].physical =
7094 			btrfs_stripe_offset_nr(leaf, chunk, i);
7095 		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
7096 		read_extent_buffer(leaf, uuid, (unsigned long)
7097 				   btrfs_stripe_dev_uuid_nr(chunk, i),
7098 				   BTRFS_UUID_SIZE);
7099 		map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
7100 							devid, uuid, NULL);
7101 		if (!map->stripes[i].dev &&
7102 		    !btrfs_test_opt(fs_info, DEGRADED)) {
7103 			free_extent_map(em);
7104 			btrfs_report_missing_device(fs_info, devid, uuid, true);
7105 			return -ENOENT;
7106 		}
7107 		if (!map->stripes[i].dev) {
7108 			map->stripes[i].dev =
7109 				add_missing_dev(fs_info->fs_devices, devid,
7110 						uuid);
7111 			if (IS_ERR(map->stripes[i].dev)) {
7112 				free_extent_map(em);
7113 				btrfs_err(fs_info,
7114 					"failed to init missing dev %llu: %ld",
7115 					devid, PTR_ERR(map->stripes[i].dev));
7116 				return PTR_ERR(map->stripes[i].dev);
7117 			}
7118 			btrfs_report_missing_device(fs_info, devid, uuid, false);
7119 		}
7120 		set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
7121 				&(map->stripes[i].dev->dev_state));
7122 
7123 	}
7124 
7125 	write_lock(&map_tree->lock);
7126 	ret = add_extent_mapping(map_tree, em, 0);
7127 	write_unlock(&map_tree->lock);
7128 	if (ret < 0) {
7129 		btrfs_err(fs_info,
7130 			  "failed to add chunk map, start=%llu len=%llu: %d",
7131 			  em->start, em->len, ret);
7132 	}
7133 	free_extent_map(em);
7134 
7135 	return ret;
7136 }
7137 
7138 static void fill_device_from_item(struct extent_buffer *leaf,
7139 				 struct btrfs_dev_item *dev_item,
7140 				 struct btrfs_device *device)
7141 {
7142 	unsigned long ptr;
7143 
7144 	device->devid = btrfs_device_id(leaf, dev_item);
7145 	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
7146 	device->total_bytes = device->disk_total_bytes;
7147 	device->commit_total_bytes = device->disk_total_bytes;
7148 	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
7149 	device->commit_bytes_used = device->bytes_used;
7150 	device->type = btrfs_device_type(leaf, dev_item);
7151 	device->io_align = btrfs_device_io_align(leaf, dev_item);
7152 	device->io_width = btrfs_device_io_width(leaf, dev_item);
7153 	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
7154 	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
7155 	clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
7156 
7157 	ptr = btrfs_device_uuid(dev_item);
7158 	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
7159 }
7160 
7161 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
7162 						  u8 *fsid)
7163 {
7164 	struct btrfs_fs_devices *fs_devices;
7165 	int ret;
7166 
7167 	lockdep_assert_held(&uuid_mutex);
7168 	ASSERT(fsid);
7169 
7170 	/* This will match only for multi-device seed fs */
7171 	list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list)
7172 		if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
7173 			return fs_devices;
7174 
7175 
7176 	fs_devices = find_fsid(fsid, NULL);
7177 	if (!fs_devices) {
7178 		if (!btrfs_test_opt(fs_info, DEGRADED))
7179 			return ERR_PTR(-ENOENT);
7180 
7181 		fs_devices = alloc_fs_devices(fsid, NULL);
7182 		if (IS_ERR(fs_devices))
7183 			return fs_devices;
7184 
7185 		fs_devices->seeding = true;
7186 		fs_devices->opened = 1;
7187 		return fs_devices;
7188 	}
7189 
7190 	/*
7191 	 * Upon first call for a seed fs fsid, just create a private copy of the
7192 	 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
7193 	 */
7194 	fs_devices = clone_fs_devices(fs_devices);
7195 	if (IS_ERR(fs_devices))
7196 		return fs_devices;
7197 
7198 	ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
7199 	if (ret) {
7200 		free_fs_devices(fs_devices);
7201 		return ERR_PTR(ret);
7202 	}
7203 
7204 	if (!fs_devices->seeding) {
7205 		close_fs_devices(fs_devices);
7206 		free_fs_devices(fs_devices);
7207 		return ERR_PTR(-EINVAL);
7208 	}
7209 
7210 	list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
7211 
7212 	return fs_devices;
7213 }
7214 
7215 static int read_one_dev(struct extent_buffer *leaf,
7216 			struct btrfs_dev_item *dev_item)
7217 {
7218 	struct btrfs_fs_info *fs_info = leaf->fs_info;
7219 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7220 	struct btrfs_device *device;
7221 	u64 devid;
7222 	int ret;
7223 	u8 fs_uuid[BTRFS_FSID_SIZE];
7224 	u8 dev_uuid[BTRFS_UUID_SIZE];
7225 
7226 	devid = btrfs_device_id(leaf, dev_item);
7227 	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
7228 			   BTRFS_UUID_SIZE);
7229 	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
7230 			   BTRFS_FSID_SIZE);
7231 
7232 	if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
7233 		fs_devices = open_seed_devices(fs_info, fs_uuid);
7234 		if (IS_ERR(fs_devices))
7235 			return PTR_ERR(fs_devices);
7236 	}
7237 
7238 	device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
7239 				   fs_uuid);
7240 	if (!device) {
7241 		if (!btrfs_test_opt(fs_info, DEGRADED)) {
7242 			btrfs_report_missing_device(fs_info, devid,
7243 							dev_uuid, true);
7244 			return -ENOENT;
7245 		}
7246 
7247 		device = add_missing_dev(fs_devices, devid, dev_uuid);
7248 		if (IS_ERR(device)) {
7249 			btrfs_err(fs_info,
7250 				"failed to add missing dev %llu: %ld",
7251 				devid, PTR_ERR(device));
7252 			return PTR_ERR(device);
7253 		}
7254 		btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
7255 	} else {
7256 		if (!device->bdev) {
7257 			if (!btrfs_test_opt(fs_info, DEGRADED)) {
7258 				btrfs_report_missing_device(fs_info,
7259 						devid, dev_uuid, true);
7260 				return -ENOENT;
7261 			}
7262 			btrfs_report_missing_device(fs_info, devid,
7263 							dev_uuid, false);
7264 		}
7265 
7266 		if (!device->bdev &&
7267 		    !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
7268 			/*
7269 			 * this happens when a device that was properly setup
7270 			 * in the device info lists suddenly goes bad.
7271 			 * device->bdev is NULL, and so we have to set
7272 			 * device->missing to one here
7273 			 */
7274 			device->fs_devices->missing_devices++;
7275 			set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
7276 		}
7277 
7278 		/* Move the device to its own fs_devices */
7279 		if (device->fs_devices != fs_devices) {
7280 			ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
7281 							&device->dev_state));
7282 
7283 			list_move(&device->dev_list, &fs_devices->devices);
7284 			device->fs_devices->num_devices--;
7285 			fs_devices->num_devices++;
7286 
7287 			device->fs_devices->missing_devices--;
7288 			fs_devices->missing_devices++;
7289 
7290 			device->fs_devices = fs_devices;
7291 		}
7292 	}
7293 
7294 	if (device->fs_devices != fs_info->fs_devices) {
7295 		BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
7296 		if (device->generation !=
7297 		    btrfs_device_generation(leaf, dev_item))
7298 			return -EINVAL;
7299 	}
7300 
7301 	fill_device_from_item(leaf, dev_item, device);
7302 	if (device->bdev) {
7303 		u64 max_total_bytes = i_size_read(device->bdev->bd_inode);
7304 
7305 		if (device->total_bytes > max_total_bytes) {
7306 			btrfs_err(fs_info,
7307 			"device total_bytes should be at most %llu but found %llu",
7308 				  max_total_bytes, device->total_bytes);
7309 			return -EINVAL;
7310 		}
7311 	}
7312 	set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
7313 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
7314 	   !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
7315 		device->fs_devices->total_rw_bytes += device->total_bytes;
7316 		atomic64_add(device->total_bytes - device->bytes_used,
7317 				&fs_info->free_chunk_space);
7318 	}
7319 	ret = 0;
7320 	return ret;
7321 }
7322 
7323 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
7324 {
7325 	struct btrfs_root *root = fs_info->tree_root;
7326 	struct btrfs_super_block *super_copy = fs_info->super_copy;
7327 	struct extent_buffer *sb;
7328 	struct btrfs_disk_key *disk_key;
7329 	struct btrfs_chunk *chunk;
7330 	u8 *array_ptr;
7331 	unsigned long sb_array_offset;
7332 	int ret = 0;
7333 	u32 num_stripes;
7334 	u32 array_size;
7335 	u32 len = 0;
7336 	u32 cur_offset;
7337 	u64 type;
7338 	struct btrfs_key key;
7339 
7340 	ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
7341 	/*
7342 	 * This will create extent buffer of nodesize, superblock size is
7343 	 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
7344 	 * overallocate but we can keep it as-is, only the first page is used.
7345 	 */
7346 	sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET,
7347 					  root->root_key.objectid, 0);
7348 	if (IS_ERR(sb))
7349 		return PTR_ERR(sb);
7350 	set_extent_buffer_uptodate(sb);
7351 	/*
7352 	 * The sb extent buffer is artificial and just used to read the system array.
7353 	 * set_extent_buffer_uptodate() call does not properly mark all it's
7354 	 * pages up-to-date when the page is larger: extent does not cover the
7355 	 * whole page and consequently check_page_uptodate does not find all
7356 	 * the page's extents up-to-date (the hole beyond sb),
7357 	 * write_extent_buffer then triggers a WARN_ON.
7358 	 *
7359 	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
7360 	 * but sb spans only this function. Add an explicit SetPageUptodate call
7361 	 * to silence the warning eg. on PowerPC 64.
7362 	 */
7363 	if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
7364 		SetPageUptodate(sb->pages[0]);
7365 
7366 	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
7367 	array_size = btrfs_super_sys_array_size(super_copy);
7368 
7369 	array_ptr = super_copy->sys_chunk_array;
7370 	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
7371 	cur_offset = 0;
7372 
7373 	while (cur_offset < array_size) {
7374 		disk_key = (struct btrfs_disk_key *)array_ptr;
7375 		len = sizeof(*disk_key);
7376 		if (cur_offset + len > array_size)
7377 			goto out_short_read;
7378 
7379 		btrfs_disk_key_to_cpu(&key, disk_key);
7380 
7381 		array_ptr += len;
7382 		sb_array_offset += len;
7383 		cur_offset += len;
7384 
7385 		if (key.type != BTRFS_CHUNK_ITEM_KEY) {
7386 			btrfs_err(fs_info,
7387 			    "unexpected item type %u in sys_array at offset %u",
7388 				  (u32)key.type, cur_offset);
7389 			ret = -EIO;
7390 			break;
7391 		}
7392 
7393 		chunk = (struct btrfs_chunk *)sb_array_offset;
7394 		/*
7395 		 * At least one btrfs_chunk with one stripe must be present,
7396 		 * exact stripe count check comes afterwards
7397 		 */
7398 		len = btrfs_chunk_item_size(1);
7399 		if (cur_offset + len > array_size)
7400 			goto out_short_read;
7401 
7402 		num_stripes = btrfs_chunk_num_stripes(sb, chunk);
7403 		if (!num_stripes) {
7404 			btrfs_err(fs_info,
7405 			"invalid number of stripes %u in sys_array at offset %u",
7406 				  num_stripes, cur_offset);
7407 			ret = -EIO;
7408 			break;
7409 		}
7410 
7411 		type = btrfs_chunk_type(sb, chunk);
7412 		if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
7413 			btrfs_err(fs_info,
7414 			"invalid chunk type %llu in sys_array at offset %u",
7415 				  type, cur_offset);
7416 			ret = -EIO;
7417 			break;
7418 		}
7419 
7420 		len = btrfs_chunk_item_size(num_stripes);
7421 		if (cur_offset + len > array_size)
7422 			goto out_short_read;
7423 
7424 		ret = read_one_chunk(&key, sb, chunk);
7425 		if (ret)
7426 			break;
7427 
7428 		array_ptr += len;
7429 		sb_array_offset += len;
7430 		cur_offset += len;
7431 	}
7432 	clear_extent_buffer_uptodate(sb);
7433 	free_extent_buffer_stale(sb);
7434 	return ret;
7435 
7436 out_short_read:
7437 	btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
7438 			len, cur_offset);
7439 	clear_extent_buffer_uptodate(sb);
7440 	free_extent_buffer_stale(sb);
7441 	return -EIO;
7442 }
7443 
7444 /*
7445  * Check if all chunks in the fs are OK for read-write degraded mount
7446  *
7447  * If the @failing_dev is specified, it's accounted as missing.
7448  *
7449  * Return true if all chunks meet the minimal RW mount requirements.
7450  * Return false if any chunk doesn't meet the minimal RW mount requirements.
7451  */
7452 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7453 					struct btrfs_device *failing_dev)
7454 {
7455 	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7456 	struct extent_map *em;
7457 	u64 next_start = 0;
7458 	bool ret = true;
7459 
7460 	read_lock(&map_tree->lock);
7461 	em = lookup_extent_mapping(map_tree, 0, (u64)-1);
7462 	read_unlock(&map_tree->lock);
7463 	/* No chunk at all? Return false anyway */
7464 	if (!em) {
7465 		ret = false;
7466 		goto out;
7467 	}
7468 	while (em) {
7469 		struct map_lookup *map;
7470 		int missing = 0;
7471 		int max_tolerated;
7472 		int i;
7473 
7474 		map = em->map_lookup;
7475 		max_tolerated =
7476 			btrfs_get_num_tolerated_disk_barrier_failures(
7477 					map->type);
7478 		for (i = 0; i < map->num_stripes; i++) {
7479 			struct btrfs_device *dev = map->stripes[i].dev;
7480 
7481 			if (!dev || !dev->bdev ||
7482 			    test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7483 			    dev->last_flush_error)
7484 				missing++;
7485 			else if (failing_dev && failing_dev == dev)
7486 				missing++;
7487 		}
7488 		if (missing > max_tolerated) {
7489 			if (!failing_dev)
7490 				btrfs_warn(fs_info,
7491 	"chunk %llu missing %d devices, max tolerance is %d for writable mount",
7492 				   em->start, missing, max_tolerated);
7493 			free_extent_map(em);
7494 			ret = false;
7495 			goto out;
7496 		}
7497 		next_start = extent_map_end(em);
7498 		free_extent_map(em);
7499 
7500 		read_lock(&map_tree->lock);
7501 		em = lookup_extent_mapping(map_tree, next_start,
7502 					   (u64)(-1) - next_start);
7503 		read_unlock(&map_tree->lock);
7504 	}
7505 out:
7506 	return ret;
7507 }
7508 
7509 static void readahead_tree_node_children(struct extent_buffer *node)
7510 {
7511 	int i;
7512 	const int nr_items = btrfs_header_nritems(node);
7513 
7514 	for (i = 0; i < nr_items; i++)
7515 		btrfs_readahead_node_child(node, i);
7516 }
7517 
7518 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7519 {
7520 	struct btrfs_root *root = fs_info->chunk_root;
7521 	struct btrfs_path *path;
7522 	struct extent_buffer *leaf;
7523 	struct btrfs_key key;
7524 	struct btrfs_key found_key;
7525 	int ret;
7526 	int slot;
7527 	u64 total_dev = 0;
7528 	u64 last_ra_node = 0;
7529 
7530 	path = btrfs_alloc_path();
7531 	if (!path)
7532 		return -ENOMEM;
7533 
7534 	/*
7535 	 * uuid_mutex is needed only if we are mounting a sprout FS
7536 	 * otherwise we don't need it.
7537 	 */
7538 	mutex_lock(&uuid_mutex);
7539 
7540 	/*
7541 	 * It is possible for mount and umount to race in such a way that
7542 	 * we execute this code path, but open_fs_devices failed to clear
7543 	 * total_rw_bytes. We certainly want it cleared before reading the
7544 	 * device items, so clear it here.
7545 	 */
7546 	fs_info->fs_devices->total_rw_bytes = 0;
7547 
7548 	/*
7549 	 * Read all device items, and then all the chunk items. All
7550 	 * device items are found before any chunk item (their object id
7551 	 * is smaller than the lowest possible object id for a chunk
7552 	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7553 	 */
7554 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7555 	key.offset = 0;
7556 	key.type = 0;
7557 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7558 	if (ret < 0)
7559 		goto error;
7560 	while (1) {
7561 		struct extent_buffer *node;
7562 
7563 		leaf = path->nodes[0];
7564 		slot = path->slots[0];
7565 		if (slot >= btrfs_header_nritems(leaf)) {
7566 			ret = btrfs_next_leaf(root, path);
7567 			if (ret == 0)
7568 				continue;
7569 			if (ret < 0)
7570 				goto error;
7571 			break;
7572 		}
7573 		/*
7574 		 * The nodes on level 1 are not locked but we don't need to do
7575 		 * that during mount time as nothing else can access the tree
7576 		 */
7577 		node = path->nodes[1];
7578 		if (node) {
7579 			if (last_ra_node != node->start) {
7580 				readahead_tree_node_children(node);
7581 				last_ra_node = node->start;
7582 			}
7583 		}
7584 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
7585 		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7586 			struct btrfs_dev_item *dev_item;
7587 			dev_item = btrfs_item_ptr(leaf, slot,
7588 						  struct btrfs_dev_item);
7589 			ret = read_one_dev(leaf, dev_item);
7590 			if (ret)
7591 				goto error;
7592 			total_dev++;
7593 		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7594 			struct btrfs_chunk *chunk;
7595 
7596 			/*
7597 			 * We are only called at mount time, so no need to take
7598 			 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings,
7599 			 * we always lock first fs_info->chunk_mutex before
7600 			 * acquiring any locks on the chunk tree. This is a
7601 			 * requirement for chunk allocation, see the comment on
7602 			 * top of btrfs_chunk_alloc() for details.
7603 			 */
7604 			ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
7605 			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7606 			ret = read_one_chunk(&found_key, leaf, chunk);
7607 			if (ret)
7608 				goto error;
7609 		}
7610 		path->slots[0]++;
7611 	}
7612 
7613 	/*
7614 	 * After loading chunk tree, we've got all device information,
7615 	 * do another round of validation checks.
7616 	 */
7617 	if (total_dev != fs_info->fs_devices->total_devices) {
7618 		btrfs_err(fs_info,
7619 	   "super_num_devices %llu mismatch with num_devices %llu found here",
7620 			  btrfs_super_num_devices(fs_info->super_copy),
7621 			  total_dev);
7622 		ret = -EINVAL;
7623 		goto error;
7624 	}
7625 	if (btrfs_super_total_bytes(fs_info->super_copy) <
7626 	    fs_info->fs_devices->total_rw_bytes) {
7627 		btrfs_err(fs_info,
7628 	"super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7629 			  btrfs_super_total_bytes(fs_info->super_copy),
7630 			  fs_info->fs_devices->total_rw_bytes);
7631 		ret = -EINVAL;
7632 		goto error;
7633 	}
7634 	ret = 0;
7635 error:
7636 	mutex_unlock(&uuid_mutex);
7637 
7638 	btrfs_free_path(path);
7639 	return ret;
7640 }
7641 
7642 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7643 {
7644 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7645 	struct btrfs_device *device;
7646 
7647 	fs_devices->fs_info = fs_info;
7648 
7649 	mutex_lock(&fs_devices->device_list_mutex);
7650 	list_for_each_entry(device, &fs_devices->devices, dev_list)
7651 		device->fs_info = fs_info;
7652 
7653 	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7654 		list_for_each_entry(device, &seed_devs->devices, dev_list)
7655 			device->fs_info = fs_info;
7656 
7657 		seed_devs->fs_info = fs_info;
7658 	}
7659 	mutex_unlock(&fs_devices->device_list_mutex);
7660 }
7661 
7662 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7663 				 const struct btrfs_dev_stats_item *ptr,
7664 				 int index)
7665 {
7666 	u64 val;
7667 
7668 	read_extent_buffer(eb, &val,
7669 			   offsetof(struct btrfs_dev_stats_item, values) +
7670 			    ((unsigned long)ptr) + (index * sizeof(u64)),
7671 			   sizeof(val));
7672 	return val;
7673 }
7674 
7675 static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7676 				      struct btrfs_dev_stats_item *ptr,
7677 				      int index, u64 val)
7678 {
7679 	write_extent_buffer(eb, &val,
7680 			    offsetof(struct btrfs_dev_stats_item, values) +
7681 			     ((unsigned long)ptr) + (index * sizeof(u64)),
7682 			    sizeof(val));
7683 }
7684 
7685 static int btrfs_device_init_dev_stats(struct btrfs_device *device,
7686 				       struct btrfs_path *path)
7687 {
7688 	struct btrfs_dev_stats_item *ptr;
7689 	struct extent_buffer *eb;
7690 	struct btrfs_key key;
7691 	int item_size;
7692 	int i, ret, slot;
7693 
7694 	if (!device->fs_info->dev_root)
7695 		return 0;
7696 
7697 	key.objectid = BTRFS_DEV_STATS_OBJECTID;
7698 	key.type = BTRFS_PERSISTENT_ITEM_KEY;
7699 	key.offset = device->devid;
7700 	ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0);
7701 	if (ret) {
7702 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7703 			btrfs_dev_stat_set(device, i, 0);
7704 		device->dev_stats_valid = 1;
7705 		btrfs_release_path(path);
7706 		return ret < 0 ? ret : 0;
7707 	}
7708 	slot = path->slots[0];
7709 	eb = path->nodes[0];
7710 	item_size = btrfs_item_size_nr(eb, slot);
7711 
7712 	ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item);
7713 
7714 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7715 		if (item_size >= (1 + i) * sizeof(__le64))
7716 			btrfs_dev_stat_set(device, i,
7717 					   btrfs_dev_stats_value(eb, ptr, i));
7718 		else
7719 			btrfs_dev_stat_set(device, i, 0);
7720 	}
7721 
7722 	device->dev_stats_valid = 1;
7723 	btrfs_dev_stat_print_on_load(device);
7724 	btrfs_release_path(path);
7725 
7726 	return 0;
7727 }
7728 
7729 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7730 {
7731 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7732 	struct btrfs_device *device;
7733 	struct btrfs_path *path = NULL;
7734 	int ret = 0;
7735 
7736 	path = btrfs_alloc_path();
7737 	if (!path)
7738 		return -ENOMEM;
7739 
7740 	mutex_lock(&fs_devices->device_list_mutex);
7741 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7742 		ret = btrfs_device_init_dev_stats(device, path);
7743 		if (ret)
7744 			goto out;
7745 	}
7746 	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7747 		list_for_each_entry(device, &seed_devs->devices, dev_list) {
7748 			ret = btrfs_device_init_dev_stats(device, path);
7749 			if (ret)
7750 				goto out;
7751 		}
7752 	}
7753 out:
7754 	mutex_unlock(&fs_devices->device_list_mutex);
7755 
7756 	btrfs_free_path(path);
7757 	return ret;
7758 }
7759 
7760 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7761 				struct btrfs_device *device)
7762 {
7763 	struct btrfs_fs_info *fs_info = trans->fs_info;
7764 	struct btrfs_root *dev_root = fs_info->dev_root;
7765 	struct btrfs_path *path;
7766 	struct btrfs_key key;
7767 	struct extent_buffer *eb;
7768 	struct btrfs_dev_stats_item *ptr;
7769 	int ret;
7770 	int i;
7771 
7772 	key.objectid = BTRFS_DEV_STATS_OBJECTID;
7773 	key.type = BTRFS_PERSISTENT_ITEM_KEY;
7774 	key.offset = device->devid;
7775 
7776 	path = btrfs_alloc_path();
7777 	if (!path)
7778 		return -ENOMEM;
7779 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7780 	if (ret < 0) {
7781 		btrfs_warn_in_rcu(fs_info,
7782 			"error %d while searching for dev_stats item for device %s",
7783 			      ret, rcu_str_deref(device->name));
7784 		goto out;
7785 	}
7786 
7787 	if (ret == 0 &&
7788 	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7789 		/* need to delete old one and insert a new one */
7790 		ret = btrfs_del_item(trans, dev_root, path);
7791 		if (ret != 0) {
7792 			btrfs_warn_in_rcu(fs_info,
7793 				"delete too small dev_stats item for device %s failed %d",
7794 				      rcu_str_deref(device->name), ret);
7795 			goto out;
7796 		}
7797 		ret = 1;
7798 	}
7799 
7800 	if (ret == 1) {
7801 		/* need to insert a new item */
7802 		btrfs_release_path(path);
7803 		ret = btrfs_insert_empty_item(trans, dev_root, path,
7804 					      &key, sizeof(*ptr));
7805 		if (ret < 0) {
7806 			btrfs_warn_in_rcu(fs_info,
7807 				"insert dev_stats item for device %s failed %d",
7808 				rcu_str_deref(device->name), ret);
7809 			goto out;
7810 		}
7811 	}
7812 
7813 	eb = path->nodes[0];
7814 	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7815 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7816 		btrfs_set_dev_stats_value(eb, ptr, i,
7817 					  btrfs_dev_stat_read(device, i));
7818 	btrfs_mark_buffer_dirty(eb);
7819 
7820 out:
7821 	btrfs_free_path(path);
7822 	return ret;
7823 }
7824 
7825 /*
7826  * called from commit_transaction. Writes all changed device stats to disk.
7827  */
7828 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7829 {
7830 	struct btrfs_fs_info *fs_info = trans->fs_info;
7831 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7832 	struct btrfs_device *device;
7833 	int stats_cnt;
7834 	int ret = 0;
7835 
7836 	mutex_lock(&fs_devices->device_list_mutex);
7837 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
7838 		stats_cnt = atomic_read(&device->dev_stats_ccnt);
7839 		if (!device->dev_stats_valid || stats_cnt == 0)
7840 			continue;
7841 
7842 
7843 		/*
7844 		 * There is a LOAD-LOAD control dependency between the value of
7845 		 * dev_stats_ccnt and updating the on-disk values which requires
7846 		 * reading the in-memory counters. Such control dependencies
7847 		 * require explicit read memory barriers.
7848 		 *
7849 		 * This memory barriers pairs with smp_mb__before_atomic in
7850 		 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7851 		 * barrier implied by atomic_xchg in
7852 		 * btrfs_dev_stats_read_and_reset
7853 		 */
7854 		smp_rmb();
7855 
7856 		ret = update_dev_stat_item(trans, device);
7857 		if (!ret)
7858 			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7859 	}
7860 	mutex_unlock(&fs_devices->device_list_mutex);
7861 
7862 	return ret;
7863 }
7864 
7865 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7866 {
7867 	btrfs_dev_stat_inc(dev, index);
7868 	btrfs_dev_stat_print_on_error(dev);
7869 }
7870 
7871 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7872 {
7873 	if (!dev->dev_stats_valid)
7874 		return;
7875 	btrfs_err_rl_in_rcu(dev->fs_info,
7876 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7877 			   rcu_str_deref(dev->name),
7878 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7879 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7880 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7881 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7882 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7883 }
7884 
7885 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7886 {
7887 	int i;
7888 
7889 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7890 		if (btrfs_dev_stat_read(dev, i) != 0)
7891 			break;
7892 	if (i == BTRFS_DEV_STAT_VALUES_MAX)
7893 		return; /* all values == 0, suppress message */
7894 
7895 	btrfs_info_in_rcu(dev->fs_info,
7896 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7897 	       rcu_str_deref(dev->name),
7898 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7899 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7900 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7901 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7902 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7903 }
7904 
7905 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7906 			struct btrfs_ioctl_get_dev_stats *stats)
7907 {
7908 	struct btrfs_device *dev;
7909 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7910 	int i;
7911 
7912 	mutex_lock(&fs_devices->device_list_mutex);
7913 	dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL);
7914 	mutex_unlock(&fs_devices->device_list_mutex);
7915 
7916 	if (!dev) {
7917 		btrfs_warn(fs_info, "get dev_stats failed, device not found");
7918 		return -ENODEV;
7919 	} else if (!dev->dev_stats_valid) {
7920 		btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7921 		return -ENODEV;
7922 	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7923 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7924 			if (stats->nr_items > i)
7925 				stats->values[i] =
7926 					btrfs_dev_stat_read_and_reset(dev, i);
7927 			else
7928 				btrfs_dev_stat_set(dev, i, 0);
7929 		}
7930 		btrfs_info(fs_info, "device stats zeroed by %s (%d)",
7931 			   current->comm, task_pid_nr(current));
7932 	} else {
7933 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7934 			if (stats->nr_items > i)
7935 				stats->values[i] = btrfs_dev_stat_read(dev, i);
7936 	}
7937 	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7938 		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7939 	return 0;
7940 }
7941 
7942 /*
7943  * Update the size and bytes used for each device where it changed.  This is
7944  * delayed since we would otherwise get errors while writing out the
7945  * superblocks.
7946  *
7947  * Must be invoked during transaction commit.
7948  */
7949 void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
7950 {
7951 	struct btrfs_device *curr, *next;
7952 
7953 	ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
7954 
7955 	if (list_empty(&trans->dev_update_list))
7956 		return;
7957 
7958 	/*
7959 	 * We don't need the device_list_mutex here.  This list is owned by the
7960 	 * transaction and the transaction must complete before the device is
7961 	 * released.
7962 	 */
7963 	mutex_lock(&trans->fs_info->chunk_mutex);
7964 	list_for_each_entry_safe(curr, next, &trans->dev_update_list,
7965 				 post_commit_list) {
7966 		list_del_init(&curr->post_commit_list);
7967 		curr->commit_total_bytes = curr->disk_total_bytes;
7968 		curr->commit_bytes_used = curr->bytes_used;
7969 	}
7970 	mutex_unlock(&trans->fs_info->chunk_mutex);
7971 }
7972 
7973 /*
7974  * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7975  */
7976 int btrfs_bg_type_to_factor(u64 flags)
7977 {
7978 	const int index = btrfs_bg_flags_to_raid_index(flags);
7979 
7980 	return btrfs_raid_array[index].ncopies;
7981 }
7982 
7983 
7984 
7985 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7986 				 u64 chunk_offset, u64 devid,
7987 				 u64 physical_offset, u64 physical_len)
7988 {
7989 	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7990 	struct extent_map *em;
7991 	struct map_lookup *map;
7992 	struct btrfs_device *dev;
7993 	u64 stripe_len;
7994 	bool found = false;
7995 	int ret = 0;
7996 	int i;
7997 
7998 	read_lock(&em_tree->lock);
7999 	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
8000 	read_unlock(&em_tree->lock);
8001 
8002 	if (!em) {
8003 		btrfs_err(fs_info,
8004 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
8005 			  physical_offset, devid);
8006 		ret = -EUCLEAN;
8007 		goto out;
8008 	}
8009 
8010 	map = em->map_lookup;
8011 	stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
8012 	if (physical_len != stripe_len) {
8013 		btrfs_err(fs_info,
8014 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
8015 			  physical_offset, devid, em->start, physical_len,
8016 			  stripe_len);
8017 		ret = -EUCLEAN;
8018 		goto out;
8019 	}
8020 
8021 	for (i = 0; i < map->num_stripes; i++) {
8022 		if (map->stripes[i].dev->devid == devid &&
8023 		    map->stripes[i].physical == physical_offset) {
8024 			found = true;
8025 			if (map->verified_stripes >= map->num_stripes) {
8026 				btrfs_err(fs_info,
8027 				"too many dev extents for chunk %llu found",
8028 					  em->start);
8029 				ret = -EUCLEAN;
8030 				goto out;
8031 			}
8032 			map->verified_stripes++;
8033 			break;
8034 		}
8035 	}
8036 	if (!found) {
8037 		btrfs_err(fs_info,
8038 	"dev extent physical offset %llu devid %llu has no corresponding chunk",
8039 			physical_offset, devid);
8040 		ret = -EUCLEAN;
8041 	}
8042 
8043 	/* Make sure no dev extent is beyond device boundary */
8044 	dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL);
8045 	if (!dev) {
8046 		btrfs_err(fs_info, "failed to find devid %llu", devid);
8047 		ret = -EUCLEAN;
8048 		goto out;
8049 	}
8050 
8051 	if (physical_offset + physical_len > dev->disk_total_bytes) {
8052 		btrfs_err(fs_info,
8053 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
8054 			  devid, physical_offset, physical_len,
8055 			  dev->disk_total_bytes);
8056 		ret = -EUCLEAN;
8057 		goto out;
8058 	}
8059 
8060 	if (dev->zone_info) {
8061 		u64 zone_size = dev->zone_info->zone_size;
8062 
8063 		if (!IS_ALIGNED(physical_offset, zone_size) ||
8064 		    !IS_ALIGNED(physical_len, zone_size)) {
8065 			btrfs_err(fs_info,
8066 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone",
8067 				  devid, physical_offset, physical_len);
8068 			ret = -EUCLEAN;
8069 			goto out;
8070 		}
8071 	}
8072 
8073 out:
8074 	free_extent_map(em);
8075 	return ret;
8076 }
8077 
8078 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
8079 {
8080 	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
8081 	struct extent_map *em;
8082 	struct rb_node *node;
8083 	int ret = 0;
8084 
8085 	read_lock(&em_tree->lock);
8086 	for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
8087 		em = rb_entry(node, struct extent_map, rb_node);
8088 		if (em->map_lookup->num_stripes !=
8089 		    em->map_lookup->verified_stripes) {
8090 			btrfs_err(fs_info,
8091 			"chunk %llu has missing dev extent, have %d expect %d",
8092 				  em->start, em->map_lookup->verified_stripes,
8093 				  em->map_lookup->num_stripes);
8094 			ret = -EUCLEAN;
8095 			goto out;
8096 		}
8097 	}
8098 out:
8099 	read_unlock(&em_tree->lock);
8100 	return ret;
8101 }
8102 
8103 /*
8104  * Ensure that all dev extents are mapped to correct chunk, otherwise
8105  * later chunk allocation/free would cause unexpected behavior.
8106  *
8107  * NOTE: This will iterate through the whole device tree, which should be of
8108  * the same size level as the chunk tree.  This slightly increases mount time.
8109  */
8110 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
8111 {
8112 	struct btrfs_path *path;
8113 	struct btrfs_root *root = fs_info->dev_root;
8114 	struct btrfs_key key;
8115 	u64 prev_devid = 0;
8116 	u64 prev_dev_ext_end = 0;
8117 	int ret = 0;
8118 
8119 	/*
8120 	 * We don't have a dev_root because we mounted with ignorebadroots and
8121 	 * failed to load the root, so we want to skip the verification in this
8122 	 * case for sure.
8123 	 *
8124 	 * However if the dev root is fine, but the tree itself is corrupted
8125 	 * we'd still fail to mount.  This verification is only to make sure
8126 	 * writes can happen safely, so instead just bypass this check
8127 	 * completely in the case of IGNOREBADROOTS.
8128 	 */
8129 	if (btrfs_test_opt(fs_info, IGNOREBADROOTS))
8130 		return 0;
8131 
8132 	key.objectid = 1;
8133 	key.type = BTRFS_DEV_EXTENT_KEY;
8134 	key.offset = 0;
8135 
8136 	path = btrfs_alloc_path();
8137 	if (!path)
8138 		return -ENOMEM;
8139 
8140 	path->reada = READA_FORWARD;
8141 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8142 	if (ret < 0)
8143 		goto out;
8144 
8145 	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
8146 		ret = btrfs_next_item(root, path);
8147 		if (ret < 0)
8148 			goto out;
8149 		/* No dev extents at all? Not good */
8150 		if (ret > 0) {
8151 			ret = -EUCLEAN;
8152 			goto out;
8153 		}
8154 	}
8155 	while (1) {
8156 		struct extent_buffer *leaf = path->nodes[0];
8157 		struct btrfs_dev_extent *dext;
8158 		int slot = path->slots[0];
8159 		u64 chunk_offset;
8160 		u64 physical_offset;
8161 		u64 physical_len;
8162 		u64 devid;
8163 
8164 		btrfs_item_key_to_cpu(leaf, &key, slot);
8165 		if (key.type != BTRFS_DEV_EXTENT_KEY)
8166 			break;
8167 		devid = key.objectid;
8168 		physical_offset = key.offset;
8169 
8170 		dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
8171 		chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
8172 		physical_len = btrfs_dev_extent_length(leaf, dext);
8173 
8174 		/* Check if this dev extent overlaps with the previous one */
8175 		if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
8176 			btrfs_err(fs_info,
8177 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
8178 				  devid, physical_offset, prev_dev_ext_end);
8179 			ret = -EUCLEAN;
8180 			goto out;
8181 		}
8182 
8183 		ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
8184 					    physical_offset, physical_len);
8185 		if (ret < 0)
8186 			goto out;
8187 		prev_devid = devid;
8188 		prev_dev_ext_end = physical_offset + physical_len;
8189 
8190 		ret = btrfs_next_item(root, path);
8191 		if (ret < 0)
8192 			goto out;
8193 		if (ret > 0) {
8194 			ret = 0;
8195 			break;
8196 		}
8197 	}
8198 
8199 	/* Ensure all chunks have corresponding dev extents */
8200 	ret = verify_chunk_dev_extent_mapping(fs_info);
8201 out:
8202 	btrfs_free_path(path);
8203 	return ret;
8204 }
8205 
8206 /*
8207  * Check whether the given block group or device is pinned by any inode being
8208  * used as a swapfile.
8209  */
8210 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
8211 {
8212 	struct btrfs_swapfile_pin *sp;
8213 	struct rb_node *node;
8214 
8215 	spin_lock(&fs_info->swapfile_pins_lock);
8216 	node = fs_info->swapfile_pins.rb_node;
8217 	while (node) {
8218 		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
8219 		if (ptr < sp->ptr)
8220 			node = node->rb_left;
8221 		else if (ptr > sp->ptr)
8222 			node = node->rb_right;
8223 		else
8224 			break;
8225 	}
8226 	spin_unlock(&fs_info->swapfile_pins_lock);
8227 	return node != NULL;
8228 }
8229 
8230 static int relocating_repair_kthread(void *data)
8231 {
8232 	struct btrfs_block_group *cache = (struct btrfs_block_group *)data;
8233 	struct btrfs_fs_info *fs_info = cache->fs_info;
8234 	u64 target;
8235 	int ret = 0;
8236 
8237 	target = cache->start;
8238 	btrfs_put_block_group(cache);
8239 
8240 	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
8241 		btrfs_info(fs_info,
8242 			   "zoned: skip relocating block group %llu to repair: EBUSY",
8243 			   target);
8244 		return -EBUSY;
8245 	}
8246 
8247 	mutex_lock(&fs_info->reclaim_bgs_lock);
8248 
8249 	/* Ensure block group still exists */
8250 	cache = btrfs_lookup_block_group(fs_info, target);
8251 	if (!cache)
8252 		goto out;
8253 
8254 	if (!cache->relocating_repair)
8255 		goto out;
8256 
8257 	ret = btrfs_may_alloc_data_chunk(fs_info, target);
8258 	if (ret < 0)
8259 		goto out;
8260 
8261 	btrfs_info(fs_info,
8262 		   "zoned: relocating block group %llu to repair IO failure",
8263 		   target);
8264 	ret = btrfs_relocate_chunk(fs_info, target);
8265 
8266 out:
8267 	if (cache)
8268 		btrfs_put_block_group(cache);
8269 	mutex_unlock(&fs_info->reclaim_bgs_lock);
8270 	btrfs_exclop_finish(fs_info);
8271 
8272 	return ret;
8273 }
8274 
8275 int btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical)
8276 {
8277 	struct btrfs_block_group *cache;
8278 
8279 	/* Do not attempt to repair in degraded state */
8280 	if (btrfs_test_opt(fs_info, DEGRADED))
8281 		return 0;
8282 
8283 	cache = btrfs_lookup_block_group(fs_info, logical);
8284 	if (!cache)
8285 		return 0;
8286 
8287 	spin_lock(&cache->lock);
8288 	if (cache->relocating_repair) {
8289 		spin_unlock(&cache->lock);
8290 		btrfs_put_block_group(cache);
8291 		return 0;
8292 	}
8293 	cache->relocating_repair = 1;
8294 	spin_unlock(&cache->lock);
8295 
8296 	kthread_run(relocating_repair_kthread, cache,
8297 		    "btrfs-relocating-repair");
8298 
8299 	return 0;
8300 }
8301