xref: /openbmc/linux/fs/btrfs/volumes.c (revision cc8bbe1a)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include <linux/raid/pq.h>
29 #include <linux/semaphore.h>
30 #include <asm/div64.h>
31 #include "ctree.h"
32 #include "extent_map.h"
33 #include "disk-io.h"
34 #include "transaction.h"
35 #include "print-tree.h"
36 #include "volumes.h"
37 #include "raid56.h"
38 #include "async-thread.h"
39 #include "check-integrity.h"
40 #include "rcu-string.h"
41 #include "math.h"
42 #include "dev-replace.h"
43 #include "sysfs.h"
44 
45 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
46 	[BTRFS_RAID_RAID10] = {
47 		.sub_stripes	= 2,
48 		.dev_stripes	= 1,
49 		.devs_max	= 0,	/* 0 == as many as possible */
50 		.devs_min	= 4,
51 		.tolerated_failures = 1,
52 		.devs_increment	= 2,
53 		.ncopies	= 2,
54 	},
55 	[BTRFS_RAID_RAID1] = {
56 		.sub_stripes	= 1,
57 		.dev_stripes	= 1,
58 		.devs_max	= 2,
59 		.devs_min	= 2,
60 		.tolerated_failures = 1,
61 		.devs_increment	= 2,
62 		.ncopies	= 2,
63 	},
64 	[BTRFS_RAID_DUP] = {
65 		.sub_stripes	= 1,
66 		.dev_stripes	= 2,
67 		.devs_max	= 1,
68 		.devs_min	= 1,
69 		.tolerated_failures = 0,
70 		.devs_increment	= 1,
71 		.ncopies	= 2,
72 	},
73 	[BTRFS_RAID_RAID0] = {
74 		.sub_stripes	= 1,
75 		.dev_stripes	= 1,
76 		.devs_max	= 0,
77 		.devs_min	= 2,
78 		.tolerated_failures = 0,
79 		.devs_increment	= 1,
80 		.ncopies	= 1,
81 	},
82 	[BTRFS_RAID_SINGLE] = {
83 		.sub_stripes	= 1,
84 		.dev_stripes	= 1,
85 		.devs_max	= 1,
86 		.devs_min	= 1,
87 		.tolerated_failures = 0,
88 		.devs_increment	= 1,
89 		.ncopies	= 1,
90 	},
91 	[BTRFS_RAID_RAID5] = {
92 		.sub_stripes	= 1,
93 		.dev_stripes	= 1,
94 		.devs_max	= 0,
95 		.devs_min	= 2,
96 		.tolerated_failures = 1,
97 		.devs_increment	= 1,
98 		.ncopies	= 2,
99 	},
100 	[BTRFS_RAID_RAID6] = {
101 		.sub_stripes	= 1,
102 		.dev_stripes	= 1,
103 		.devs_max	= 0,
104 		.devs_min	= 3,
105 		.tolerated_failures = 2,
106 		.devs_increment	= 1,
107 		.ncopies	= 3,
108 	},
109 };
110 
111 const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
112 	[BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10,
113 	[BTRFS_RAID_RAID1]  = BTRFS_BLOCK_GROUP_RAID1,
114 	[BTRFS_RAID_DUP]    = BTRFS_BLOCK_GROUP_DUP,
115 	[BTRFS_RAID_RAID0]  = BTRFS_BLOCK_GROUP_RAID0,
116 	[BTRFS_RAID_SINGLE] = 0,
117 	[BTRFS_RAID_RAID5]  = BTRFS_BLOCK_GROUP_RAID5,
118 	[BTRFS_RAID_RAID6]  = BTRFS_BLOCK_GROUP_RAID6,
119 };
120 
121 static int init_first_rw_device(struct btrfs_trans_handle *trans,
122 				struct btrfs_root *root,
123 				struct btrfs_device *device);
124 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
125 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
126 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
127 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
128 static void btrfs_close_one_device(struct btrfs_device *device);
129 
130 DEFINE_MUTEX(uuid_mutex);
131 static LIST_HEAD(fs_uuids);
132 struct list_head *btrfs_get_fs_uuids(void)
133 {
134 	return &fs_uuids;
135 }
136 
137 static struct btrfs_fs_devices *__alloc_fs_devices(void)
138 {
139 	struct btrfs_fs_devices *fs_devs;
140 
141 	fs_devs = kzalloc(sizeof(*fs_devs), GFP_NOFS);
142 	if (!fs_devs)
143 		return ERR_PTR(-ENOMEM);
144 
145 	mutex_init(&fs_devs->device_list_mutex);
146 
147 	INIT_LIST_HEAD(&fs_devs->devices);
148 	INIT_LIST_HEAD(&fs_devs->resized_devices);
149 	INIT_LIST_HEAD(&fs_devs->alloc_list);
150 	INIT_LIST_HEAD(&fs_devs->list);
151 
152 	return fs_devs;
153 }
154 
155 /**
156  * alloc_fs_devices - allocate struct btrfs_fs_devices
157  * @fsid:	a pointer to UUID for this FS.  If NULL a new UUID is
158  *		generated.
159  *
160  * Return: a pointer to a new &struct btrfs_fs_devices on success;
161  * ERR_PTR() on error.  Returned struct is not linked onto any lists and
162  * can be destroyed with kfree() right away.
163  */
164 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
165 {
166 	struct btrfs_fs_devices *fs_devs;
167 
168 	fs_devs = __alloc_fs_devices();
169 	if (IS_ERR(fs_devs))
170 		return fs_devs;
171 
172 	if (fsid)
173 		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
174 	else
175 		generate_random_uuid(fs_devs->fsid);
176 
177 	return fs_devs;
178 }
179 
180 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
181 {
182 	struct btrfs_device *device;
183 	WARN_ON(fs_devices->opened);
184 	while (!list_empty(&fs_devices->devices)) {
185 		device = list_entry(fs_devices->devices.next,
186 				    struct btrfs_device, dev_list);
187 		list_del(&device->dev_list);
188 		rcu_string_free(device->name);
189 		kfree(device);
190 	}
191 	kfree(fs_devices);
192 }
193 
194 static void btrfs_kobject_uevent(struct block_device *bdev,
195 				 enum kobject_action action)
196 {
197 	int ret;
198 
199 	ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
200 	if (ret)
201 		pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
202 			action,
203 			kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
204 			&disk_to_dev(bdev->bd_disk)->kobj);
205 }
206 
207 void btrfs_cleanup_fs_uuids(void)
208 {
209 	struct btrfs_fs_devices *fs_devices;
210 
211 	while (!list_empty(&fs_uuids)) {
212 		fs_devices = list_entry(fs_uuids.next,
213 					struct btrfs_fs_devices, list);
214 		list_del(&fs_devices->list);
215 		free_fs_devices(fs_devices);
216 	}
217 }
218 
219 static struct btrfs_device *__alloc_device(void)
220 {
221 	struct btrfs_device *dev;
222 
223 	dev = kzalloc(sizeof(*dev), GFP_NOFS);
224 	if (!dev)
225 		return ERR_PTR(-ENOMEM);
226 
227 	INIT_LIST_HEAD(&dev->dev_list);
228 	INIT_LIST_HEAD(&dev->dev_alloc_list);
229 	INIT_LIST_HEAD(&dev->resized_list);
230 
231 	spin_lock_init(&dev->io_lock);
232 
233 	spin_lock_init(&dev->reada_lock);
234 	atomic_set(&dev->reada_in_flight, 0);
235 	atomic_set(&dev->dev_stats_ccnt, 0);
236 	btrfs_device_data_ordered_init(dev);
237 	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
238 	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
239 
240 	return dev;
241 }
242 
243 static noinline struct btrfs_device *__find_device(struct list_head *head,
244 						   u64 devid, u8 *uuid)
245 {
246 	struct btrfs_device *dev;
247 
248 	list_for_each_entry(dev, head, dev_list) {
249 		if (dev->devid == devid &&
250 		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
251 			return dev;
252 		}
253 	}
254 	return NULL;
255 }
256 
257 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
258 {
259 	struct btrfs_fs_devices *fs_devices;
260 
261 	list_for_each_entry(fs_devices, &fs_uuids, list) {
262 		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
263 			return fs_devices;
264 	}
265 	return NULL;
266 }
267 
268 static int
269 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
270 		      int flush, struct block_device **bdev,
271 		      struct buffer_head **bh)
272 {
273 	int ret;
274 
275 	*bdev = blkdev_get_by_path(device_path, flags, holder);
276 
277 	if (IS_ERR(*bdev)) {
278 		ret = PTR_ERR(*bdev);
279 		goto error;
280 	}
281 
282 	if (flush)
283 		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
284 	ret = set_blocksize(*bdev, 4096);
285 	if (ret) {
286 		blkdev_put(*bdev, flags);
287 		goto error;
288 	}
289 	invalidate_bdev(*bdev);
290 	*bh = btrfs_read_dev_super(*bdev);
291 	if (IS_ERR(*bh)) {
292 		ret = PTR_ERR(*bh);
293 		blkdev_put(*bdev, flags);
294 		goto error;
295 	}
296 
297 	return 0;
298 
299 error:
300 	*bdev = NULL;
301 	*bh = NULL;
302 	return ret;
303 }
304 
305 static void requeue_list(struct btrfs_pending_bios *pending_bios,
306 			struct bio *head, struct bio *tail)
307 {
308 
309 	struct bio *old_head;
310 
311 	old_head = pending_bios->head;
312 	pending_bios->head = head;
313 	if (pending_bios->tail)
314 		tail->bi_next = old_head;
315 	else
316 		pending_bios->tail = tail;
317 }
318 
319 /*
320  * we try to collect pending bios for a device so we don't get a large
321  * number of procs sending bios down to the same device.  This greatly
322  * improves the schedulers ability to collect and merge the bios.
323  *
324  * But, it also turns into a long list of bios to process and that is sure
325  * to eventually make the worker thread block.  The solution here is to
326  * make some progress and then put this work struct back at the end of
327  * the list if the block device is congested.  This way, multiple devices
328  * can make progress from a single worker thread.
329  */
330 static noinline void run_scheduled_bios(struct btrfs_device *device)
331 {
332 	struct bio *pending;
333 	struct backing_dev_info *bdi;
334 	struct btrfs_fs_info *fs_info;
335 	struct btrfs_pending_bios *pending_bios;
336 	struct bio *tail;
337 	struct bio *cur;
338 	int again = 0;
339 	unsigned long num_run;
340 	unsigned long batch_run = 0;
341 	unsigned long limit;
342 	unsigned long last_waited = 0;
343 	int force_reg = 0;
344 	int sync_pending = 0;
345 	struct blk_plug plug;
346 
347 	/*
348 	 * this function runs all the bios we've collected for
349 	 * a particular device.  We don't want to wander off to
350 	 * another device without first sending all of these down.
351 	 * So, setup a plug here and finish it off before we return
352 	 */
353 	blk_start_plug(&plug);
354 
355 	bdi = blk_get_backing_dev_info(device->bdev);
356 	fs_info = device->dev_root->fs_info;
357 	limit = btrfs_async_submit_limit(fs_info);
358 	limit = limit * 2 / 3;
359 
360 loop:
361 	spin_lock(&device->io_lock);
362 
363 loop_lock:
364 	num_run = 0;
365 
366 	/* take all the bios off the list at once and process them
367 	 * later on (without the lock held).  But, remember the
368 	 * tail and other pointers so the bios can be properly reinserted
369 	 * into the list if we hit congestion
370 	 */
371 	if (!force_reg && device->pending_sync_bios.head) {
372 		pending_bios = &device->pending_sync_bios;
373 		force_reg = 1;
374 	} else {
375 		pending_bios = &device->pending_bios;
376 		force_reg = 0;
377 	}
378 
379 	pending = pending_bios->head;
380 	tail = pending_bios->tail;
381 	WARN_ON(pending && !tail);
382 
383 	/*
384 	 * if pending was null this time around, no bios need processing
385 	 * at all and we can stop.  Otherwise it'll loop back up again
386 	 * and do an additional check so no bios are missed.
387 	 *
388 	 * device->running_pending is used to synchronize with the
389 	 * schedule_bio code.
390 	 */
391 	if (device->pending_sync_bios.head == NULL &&
392 	    device->pending_bios.head == NULL) {
393 		again = 0;
394 		device->running_pending = 0;
395 	} else {
396 		again = 1;
397 		device->running_pending = 1;
398 	}
399 
400 	pending_bios->head = NULL;
401 	pending_bios->tail = NULL;
402 
403 	spin_unlock(&device->io_lock);
404 
405 	while (pending) {
406 
407 		rmb();
408 		/* we want to work on both lists, but do more bios on the
409 		 * sync list than the regular list
410 		 */
411 		if ((num_run > 32 &&
412 		    pending_bios != &device->pending_sync_bios &&
413 		    device->pending_sync_bios.head) ||
414 		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
415 		    device->pending_bios.head)) {
416 			spin_lock(&device->io_lock);
417 			requeue_list(pending_bios, pending, tail);
418 			goto loop_lock;
419 		}
420 
421 		cur = pending;
422 		pending = pending->bi_next;
423 		cur->bi_next = NULL;
424 
425 		/*
426 		 * atomic_dec_return implies a barrier for waitqueue_active
427 		 */
428 		if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
429 		    waitqueue_active(&fs_info->async_submit_wait))
430 			wake_up(&fs_info->async_submit_wait);
431 
432 		BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
433 
434 		/*
435 		 * if we're doing the sync list, record that our
436 		 * plug has some sync requests on it
437 		 *
438 		 * If we're doing the regular list and there are
439 		 * sync requests sitting around, unplug before
440 		 * we add more
441 		 */
442 		if (pending_bios == &device->pending_sync_bios) {
443 			sync_pending = 1;
444 		} else if (sync_pending) {
445 			blk_finish_plug(&plug);
446 			blk_start_plug(&plug);
447 			sync_pending = 0;
448 		}
449 
450 		btrfsic_submit_bio(cur->bi_rw, cur);
451 		num_run++;
452 		batch_run++;
453 
454 		cond_resched();
455 
456 		/*
457 		 * we made progress, there is more work to do and the bdi
458 		 * is now congested.  Back off and let other work structs
459 		 * run instead
460 		 */
461 		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
462 		    fs_info->fs_devices->open_devices > 1) {
463 			struct io_context *ioc;
464 
465 			ioc = current->io_context;
466 
467 			/*
468 			 * the main goal here is that we don't want to
469 			 * block if we're going to be able to submit
470 			 * more requests without blocking.
471 			 *
472 			 * This code does two great things, it pokes into
473 			 * the elevator code from a filesystem _and_
474 			 * it makes assumptions about how batching works.
475 			 */
476 			if (ioc && ioc->nr_batch_requests > 0 &&
477 			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
478 			    (last_waited == 0 ||
479 			     ioc->last_waited == last_waited)) {
480 				/*
481 				 * we want to go through our batch of
482 				 * requests and stop.  So, we copy out
483 				 * the ioc->last_waited time and test
484 				 * against it before looping
485 				 */
486 				last_waited = ioc->last_waited;
487 				cond_resched();
488 				continue;
489 			}
490 			spin_lock(&device->io_lock);
491 			requeue_list(pending_bios, pending, tail);
492 			device->running_pending = 1;
493 
494 			spin_unlock(&device->io_lock);
495 			btrfs_queue_work(fs_info->submit_workers,
496 					 &device->work);
497 			goto done;
498 		}
499 		/* unplug every 64 requests just for good measure */
500 		if (batch_run % 64 == 0) {
501 			blk_finish_plug(&plug);
502 			blk_start_plug(&plug);
503 			sync_pending = 0;
504 		}
505 	}
506 
507 	cond_resched();
508 	if (again)
509 		goto loop;
510 
511 	spin_lock(&device->io_lock);
512 	if (device->pending_bios.head || device->pending_sync_bios.head)
513 		goto loop_lock;
514 	spin_unlock(&device->io_lock);
515 
516 done:
517 	blk_finish_plug(&plug);
518 }
519 
520 static void pending_bios_fn(struct btrfs_work *work)
521 {
522 	struct btrfs_device *device;
523 
524 	device = container_of(work, struct btrfs_device, work);
525 	run_scheduled_bios(device);
526 }
527 
528 
529 void btrfs_free_stale_device(struct btrfs_device *cur_dev)
530 {
531 	struct btrfs_fs_devices *fs_devs;
532 	struct btrfs_device *dev;
533 
534 	if (!cur_dev->name)
535 		return;
536 
537 	list_for_each_entry(fs_devs, &fs_uuids, list) {
538 		int del = 1;
539 
540 		if (fs_devs->opened)
541 			continue;
542 		if (fs_devs->seeding)
543 			continue;
544 
545 		list_for_each_entry(dev, &fs_devs->devices, dev_list) {
546 
547 			if (dev == cur_dev)
548 				continue;
549 			if (!dev->name)
550 				continue;
551 
552 			/*
553 			 * Todo: This won't be enough. What if the same device
554 			 * comes back (with new uuid and) with its mapper path?
555 			 * But for now, this does help as mostly an admin will
556 			 * either use mapper or non mapper path throughout.
557 			 */
558 			rcu_read_lock();
559 			del = strcmp(rcu_str_deref(dev->name),
560 						rcu_str_deref(cur_dev->name));
561 			rcu_read_unlock();
562 			if (!del)
563 				break;
564 		}
565 
566 		if (!del) {
567 			/* delete the stale device */
568 			if (fs_devs->num_devices == 1) {
569 				btrfs_sysfs_remove_fsid(fs_devs);
570 				list_del(&fs_devs->list);
571 				free_fs_devices(fs_devs);
572 			} else {
573 				fs_devs->num_devices--;
574 				list_del(&dev->dev_list);
575 				rcu_string_free(dev->name);
576 				kfree(dev);
577 			}
578 			break;
579 		}
580 	}
581 }
582 
583 /*
584  * Add new device to list of registered devices
585  *
586  * Returns:
587  * 1   - first time device is seen
588  * 0   - device already known
589  * < 0 - error
590  */
591 static noinline int device_list_add(const char *path,
592 			   struct btrfs_super_block *disk_super,
593 			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
594 {
595 	struct btrfs_device *device;
596 	struct btrfs_fs_devices *fs_devices;
597 	struct rcu_string *name;
598 	int ret = 0;
599 	u64 found_transid = btrfs_super_generation(disk_super);
600 
601 	fs_devices = find_fsid(disk_super->fsid);
602 	if (!fs_devices) {
603 		fs_devices = alloc_fs_devices(disk_super->fsid);
604 		if (IS_ERR(fs_devices))
605 			return PTR_ERR(fs_devices);
606 
607 		list_add(&fs_devices->list, &fs_uuids);
608 
609 		device = NULL;
610 	} else {
611 		device = __find_device(&fs_devices->devices, devid,
612 				       disk_super->dev_item.uuid);
613 	}
614 
615 	if (!device) {
616 		if (fs_devices->opened)
617 			return -EBUSY;
618 
619 		device = btrfs_alloc_device(NULL, &devid,
620 					    disk_super->dev_item.uuid);
621 		if (IS_ERR(device)) {
622 			/* we can safely leave the fs_devices entry around */
623 			return PTR_ERR(device);
624 		}
625 
626 		name = rcu_string_strdup(path, GFP_NOFS);
627 		if (!name) {
628 			kfree(device);
629 			return -ENOMEM;
630 		}
631 		rcu_assign_pointer(device->name, name);
632 
633 		mutex_lock(&fs_devices->device_list_mutex);
634 		list_add_rcu(&device->dev_list, &fs_devices->devices);
635 		fs_devices->num_devices++;
636 		mutex_unlock(&fs_devices->device_list_mutex);
637 
638 		ret = 1;
639 		device->fs_devices = fs_devices;
640 	} else if (!device->name || strcmp(device->name->str, path)) {
641 		/*
642 		 * When FS is already mounted.
643 		 * 1. If you are here and if the device->name is NULL that
644 		 *    means this device was missing at time of FS mount.
645 		 * 2. If you are here and if the device->name is different
646 		 *    from 'path' that means either
647 		 *      a. The same device disappeared and reappeared with
648 		 *         different name. or
649 		 *      b. The missing-disk-which-was-replaced, has
650 		 *         reappeared now.
651 		 *
652 		 * We must allow 1 and 2a above. But 2b would be a spurious
653 		 * and unintentional.
654 		 *
655 		 * Further in case of 1 and 2a above, the disk at 'path'
656 		 * would have missed some transaction when it was away and
657 		 * in case of 2a the stale bdev has to be updated as well.
658 		 * 2b must not be allowed at all time.
659 		 */
660 
661 		/*
662 		 * For now, we do allow update to btrfs_fs_device through the
663 		 * btrfs dev scan cli after FS has been mounted.  We're still
664 		 * tracking a problem where systems fail mount by subvolume id
665 		 * when we reject replacement on a mounted FS.
666 		 */
667 		if (!fs_devices->opened && found_transid < device->generation) {
668 			/*
669 			 * That is if the FS is _not_ mounted and if you
670 			 * are here, that means there is more than one
671 			 * disk with same uuid and devid.We keep the one
672 			 * with larger generation number or the last-in if
673 			 * generation are equal.
674 			 */
675 			return -EEXIST;
676 		}
677 
678 		name = rcu_string_strdup(path, GFP_NOFS);
679 		if (!name)
680 			return -ENOMEM;
681 		rcu_string_free(device->name);
682 		rcu_assign_pointer(device->name, name);
683 		if (device->missing) {
684 			fs_devices->missing_devices--;
685 			device->missing = 0;
686 		}
687 	}
688 
689 	/*
690 	 * Unmount does not free the btrfs_device struct but would zero
691 	 * generation along with most of the other members. So just update
692 	 * it back. We need it to pick the disk with largest generation
693 	 * (as above).
694 	 */
695 	if (!fs_devices->opened)
696 		device->generation = found_transid;
697 
698 	/*
699 	 * if there is new btrfs on an already registered device,
700 	 * then remove the stale device entry.
701 	 */
702 	btrfs_free_stale_device(device);
703 
704 	*fs_devices_ret = fs_devices;
705 
706 	return ret;
707 }
708 
709 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
710 {
711 	struct btrfs_fs_devices *fs_devices;
712 	struct btrfs_device *device;
713 	struct btrfs_device *orig_dev;
714 
715 	fs_devices = alloc_fs_devices(orig->fsid);
716 	if (IS_ERR(fs_devices))
717 		return fs_devices;
718 
719 	mutex_lock(&orig->device_list_mutex);
720 	fs_devices->total_devices = orig->total_devices;
721 
722 	/* We have held the volume lock, it is safe to get the devices. */
723 	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
724 		struct rcu_string *name;
725 
726 		device = btrfs_alloc_device(NULL, &orig_dev->devid,
727 					    orig_dev->uuid);
728 		if (IS_ERR(device))
729 			goto error;
730 
731 		/*
732 		 * This is ok to do without rcu read locked because we hold the
733 		 * uuid mutex so nothing we touch in here is going to disappear.
734 		 */
735 		if (orig_dev->name) {
736 			name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
737 			if (!name) {
738 				kfree(device);
739 				goto error;
740 			}
741 			rcu_assign_pointer(device->name, name);
742 		}
743 
744 		list_add(&device->dev_list, &fs_devices->devices);
745 		device->fs_devices = fs_devices;
746 		fs_devices->num_devices++;
747 	}
748 	mutex_unlock(&orig->device_list_mutex);
749 	return fs_devices;
750 error:
751 	mutex_unlock(&orig->device_list_mutex);
752 	free_fs_devices(fs_devices);
753 	return ERR_PTR(-ENOMEM);
754 }
755 
756 void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step)
757 {
758 	struct btrfs_device *device, *next;
759 	struct btrfs_device *latest_dev = NULL;
760 
761 	mutex_lock(&uuid_mutex);
762 again:
763 	/* This is the initialized path, it is safe to release the devices. */
764 	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
765 		if (device->in_fs_metadata) {
766 			if (!device->is_tgtdev_for_dev_replace &&
767 			    (!latest_dev ||
768 			     device->generation > latest_dev->generation)) {
769 				latest_dev = device;
770 			}
771 			continue;
772 		}
773 
774 		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
775 			/*
776 			 * In the first step, keep the device which has
777 			 * the correct fsid and the devid that is used
778 			 * for the dev_replace procedure.
779 			 * In the second step, the dev_replace state is
780 			 * read from the device tree and it is known
781 			 * whether the procedure is really active or
782 			 * not, which means whether this device is
783 			 * used or whether it should be removed.
784 			 */
785 			if (step == 0 || device->is_tgtdev_for_dev_replace) {
786 				continue;
787 			}
788 		}
789 		if (device->bdev) {
790 			blkdev_put(device->bdev, device->mode);
791 			device->bdev = NULL;
792 			fs_devices->open_devices--;
793 		}
794 		if (device->writeable) {
795 			list_del_init(&device->dev_alloc_list);
796 			device->writeable = 0;
797 			if (!device->is_tgtdev_for_dev_replace)
798 				fs_devices->rw_devices--;
799 		}
800 		list_del_init(&device->dev_list);
801 		fs_devices->num_devices--;
802 		rcu_string_free(device->name);
803 		kfree(device);
804 	}
805 
806 	if (fs_devices->seed) {
807 		fs_devices = fs_devices->seed;
808 		goto again;
809 	}
810 
811 	fs_devices->latest_bdev = latest_dev->bdev;
812 
813 	mutex_unlock(&uuid_mutex);
814 }
815 
816 static void __free_device(struct work_struct *work)
817 {
818 	struct btrfs_device *device;
819 
820 	device = container_of(work, struct btrfs_device, rcu_work);
821 
822 	if (device->bdev)
823 		blkdev_put(device->bdev, device->mode);
824 
825 	rcu_string_free(device->name);
826 	kfree(device);
827 }
828 
829 static void free_device(struct rcu_head *head)
830 {
831 	struct btrfs_device *device;
832 
833 	device = container_of(head, struct btrfs_device, rcu);
834 
835 	INIT_WORK(&device->rcu_work, __free_device);
836 	schedule_work(&device->rcu_work);
837 }
838 
839 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
840 {
841 	struct btrfs_device *device, *tmp;
842 
843 	if (--fs_devices->opened > 0)
844 		return 0;
845 
846 	mutex_lock(&fs_devices->device_list_mutex);
847 	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
848 		btrfs_close_one_device(device);
849 	}
850 	mutex_unlock(&fs_devices->device_list_mutex);
851 
852 	WARN_ON(fs_devices->open_devices);
853 	WARN_ON(fs_devices->rw_devices);
854 	fs_devices->opened = 0;
855 	fs_devices->seeding = 0;
856 
857 	return 0;
858 }
859 
860 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
861 {
862 	struct btrfs_fs_devices *seed_devices = NULL;
863 	int ret;
864 
865 	mutex_lock(&uuid_mutex);
866 	ret = __btrfs_close_devices(fs_devices);
867 	if (!fs_devices->opened) {
868 		seed_devices = fs_devices->seed;
869 		fs_devices->seed = NULL;
870 	}
871 	mutex_unlock(&uuid_mutex);
872 
873 	while (seed_devices) {
874 		fs_devices = seed_devices;
875 		seed_devices = fs_devices->seed;
876 		__btrfs_close_devices(fs_devices);
877 		free_fs_devices(fs_devices);
878 	}
879 	/*
880 	 * Wait for rcu kworkers under __btrfs_close_devices
881 	 * to finish all blkdev_puts so device is really
882 	 * free when umount is done.
883 	 */
884 	rcu_barrier();
885 	return ret;
886 }
887 
888 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
889 				fmode_t flags, void *holder)
890 {
891 	struct request_queue *q;
892 	struct block_device *bdev;
893 	struct list_head *head = &fs_devices->devices;
894 	struct btrfs_device *device;
895 	struct btrfs_device *latest_dev = NULL;
896 	struct buffer_head *bh;
897 	struct btrfs_super_block *disk_super;
898 	u64 devid;
899 	int seeding = 1;
900 	int ret = 0;
901 
902 	flags |= FMODE_EXCL;
903 
904 	list_for_each_entry(device, head, dev_list) {
905 		if (device->bdev)
906 			continue;
907 		if (!device->name)
908 			continue;
909 
910 		/* Just open everything we can; ignore failures here */
911 		if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
912 					    &bdev, &bh))
913 			continue;
914 
915 		disk_super = (struct btrfs_super_block *)bh->b_data;
916 		devid = btrfs_stack_device_id(&disk_super->dev_item);
917 		if (devid != device->devid)
918 			goto error_brelse;
919 
920 		if (memcmp(device->uuid, disk_super->dev_item.uuid,
921 			   BTRFS_UUID_SIZE))
922 			goto error_brelse;
923 
924 		device->generation = btrfs_super_generation(disk_super);
925 		if (!latest_dev ||
926 		    device->generation > latest_dev->generation)
927 			latest_dev = device;
928 
929 		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
930 			device->writeable = 0;
931 		} else {
932 			device->writeable = !bdev_read_only(bdev);
933 			seeding = 0;
934 		}
935 
936 		q = bdev_get_queue(bdev);
937 		if (blk_queue_discard(q))
938 			device->can_discard = 1;
939 
940 		device->bdev = bdev;
941 		device->in_fs_metadata = 0;
942 		device->mode = flags;
943 
944 		if (!blk_queue_nonrot(bdev_get_queue(bdev)))
945 			fs_devices->rotating = 1;
946 
947 		fs_devices->open_devices++;
948 		if (device->writeable &&
949 		    device->devid != BTRFS_DEV_REPLACE_DEVID) {
950 			fs_devices->rw_devices++;
951 			list_add(&device->dev_alloc_list,
952 				 &fs_devices->alloc_list);
953 		}
954 		brelse(bh);
955 		continue;
956 
957 error_brelse:
958 		brelse(bh);
959 		blkdev_put(bdev, flags);
960 		continue;
961 	}
962 	if (fs_devices->open_devices == 0) {
963 		ret = -EINVAL;
964 		goto out;
965 	}
966 	fs_devices->seeding = seeding;
967 	fs_devices->opened = 1;
968 	fs_devices->latest_bdev = latest_dev->bdev;
969 	fs_devices->total_rw_bytes = 0;
970 out:
971 	return ret;
972 }
973 
974 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
975 		       fmode_t flags, void *holder)
976 {
977 	int ret;
978 
979 	mutex_lock(&uuid_mutex);
980 	if (fs_devices->opened) {
981 		fs_devices->opened++;
982 		ret = 0;
983 	} else {
984 		ret = __btrfs_open_devices(fs_devices, flags, holder);
985 	}
986 	mutex_unlock(&uuid_mutex);
987 	return ret;
988 }
989 
990 /*
991  * Look for a btrfs signature on a device. This may be called out of the mount path
992  * and we are not allowed to call set_blocksize during the scan. The superblock
993  * is read via pagecache
994  */
995 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
996 			  struct btrfs_fs_devices **fs_devices_ret)
997 {
998 	struct btrfs_super_block *disk_super;
999 	struct block_device *bdev;
1000 	struct page *page;
1001 	void *p;
1002 	int ret = -EINVAL;
1003 	u64 devid;
1004 	u64 transid;
1005 	u64 total_devices;
1006 	u64 bytenr;
1007 	pgoff_t index;
1008 
1009 	/*
1010 	 * we would like to check all the supers, but that would make
1011 	 * a btrfs mount succeed after a mkfs from a different FS.
1012 	 * So, we need to add a special mount option to scan for
1013 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1014 	 */
1015 	bytenr = btrfs_sb_offset(0);
1016 	flags |= FMODE_EXCL;
1017 	mutex_lock(&uuid_mutex);
1018 
1019 	bdev = blkdev_get_by_path(path, flags, holder);
1020 
1021 	if (IS_ERR(bdev)) {
1022 		ret = PTR_ERR(bdev);
1023 		goto error;
1024 	}
1025 
1026 	/* make sure our super fits in the device */
1027 	if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
1028 		goto error_bdev_put;
1029 
1030 	/* make sure our super fits in the page */
1031 	if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
1032 		goto error_bdev_put;
1033 
1034 	/* make sure our super doesn't straddle pages on disk */
1035 	index = bytenr >> PAGE_CACHE_SHIFT;
1036 	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
1037 		goto error_bdev_put;
1038 
1039 	/* pull in the page with our super */
1040 	page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
1041 				   index, GFP_NOFS);
1042 
1043 	if (IS_ERR_OR_NULL(page))
1044 		goto error_bdev_put;
1045 
1046 	p = kmap(page);
1047 
1048 	/* align our pointer to the offset of the super block */
1049 	disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
1050 
1051 	if (btrfs_super_bytenr(disk_super) != bytenr ||
1052 	    btrfs_super_magic(disk_super) != BTRFS_MAGIC)
1053 		goto error_unmap;
1054 
1055 	devid = btrfs_stack_device_id(&disk_super->dev_item);
1056 	transid = btrfs_super_generation(disk_super);
1057 	total_devices = btrfs_super_num_devices(disk_super);
1058 
1059 	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
1060 	if (ret > 0) {
1061 		if (disk_super->label[0]) {
1062 			if (disk_super->label[BTRFS_LABEL_SIZE - 1])
1063 				disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
1064 			printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
1065 		} else {
1066 			printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
1067 		}
1068 
1069 		printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
1070 		ret = 0;
1071 	}
1072 	if (!ret && fs_devices_ret)
1073 		(*fs_devices_ret)->total_devices = total_devices;
1074 
1075 error_unmap:
1076 	kunmap(page);
1077 	page_cache_release(page);
1078 
1079 error_bdev_put:
1080 	blkdev_put(bdev, flags);
1081 error:
1082 	mutex_unlock(&uuid_mutex);
1083 	return ret;
1084 }
1085 
1086 /* helper to account the used device space in the range */
1087 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
1088 				   u64 end, u64 *length)
1089 {
1090 	struct btrfs_key key;
1091 	struct btrfs_root *root = device->dev_root;
1092 	struct btrfs_dev_extent *dev_extent;
1093 	struct btrfs_path *path;
1094 	u64 extent_end;
1095 	int ret;
1096 	int slot;
1097 	struct extent_buffer *l;
1098 
1099 	*length = 0;
1100 
1101 	if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
1102 		return 0;
1103 
1104 	path = btrfs_alloc_path();
1105 	if (!path)
1106 		return -ENOMEM;
1107 	path->reada = READA_FORWARD;
1108 
1109 	key.objectid = device->devid;
1110 	key.offset = start;
1111 	key.type = BTRFS_DEV_EXTENT_KEY;
1112 
1113 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1114 	if (ret < 0)
1115 		goto out;
1116 	if (ret > 0) {
1117 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1118 		if (ret < 0)
1119 			goto out;
1120 	}
1121 
1122 	while (1) {
1123 		l = path->nodes[0];
1124 		slot = path->slots[0];
1125 		if (slot >= btrfs_header_nritems(l)) {
1126 			ret = btrfs_next_leaf(root, path);
1127 			if (ret == 0)
1128 				continue;
1129 			if (ret < 0)
1130 				goto out;
1131 
1132 			break;
1133 		}
1134 		btrfs_item_key_to_cpu(l, &key, slot);
1135 
1136 		if (key.objectid < device->devid)
1137 			goto next;
1138 
1139 		if (key.objectid > device->devid)
1140 			break;
1141 
1142 		if (key.type != BTRFS_DEV_EXTENT_KEY)
1143 			goto next;
1144 
1145 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1146 		extent_end = key.offset + btrfs_dev_extent_length(l,
1147 								  dev_extent);
1148 		if (key.offset <= start && extent_end > end) {
1149 			*length = end - start + 1;
1150 			break;
1151 		} else if (key.offset <= start && extent_end > start)
1152 			*length += extent_end - start;
1153 		else if (key.offset > start && extent_end <= end)
1154 			*length += extent_end - key.offset;
1155 		else if (key.offset > start && key.offset <= end) {
1156 			*length += end - key.offset + 1;
1157 			break;
1158 		} else if (key.offset > end)
1159 			break;
1160 
1161 next:
1162 		path->slots[0]++;
1163 	}
1164 	ret = 0;
1165 out:
1166 	btrfs_free_path(path);
1167 	return ret;
1168 }
1169 
1170 static int contains_pending_extent(struct btrfs_transaction *transaction,
1171 				   struct btrfs_device *device,
1172 				   u64 *start, u64 len)
1173 {
1174 	struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
1175 	struct extent_map *em;
1176 	struct list_head *search_list = &fs_info->pinned_chunks;
1177 	int ret = 0;
1178 	u64 physical_start = *start;
1179 
1180 	if (transaction)
1181 		search_list = &transaction->pending_chunks;
1182 again:
1183 	list_for_each_entry(em, search_list, list) {
1184 		struct map_lookup *map;
1185 		int i;
1186 
1187 		map = em->map_lookup;
1188 		for (i = 0; i < map->num_stripes; i++) {
1189 			u64 end;
1190 
1191 			if (map->stripes[i].dev != device)
1192 				continue;
1193 			if (map->stripes[i].physical >= physical_start + len ||
1194 			    map->stripes[i].physical + em->orig_block_len <=
1195 			    physical_start)
1196 				continue;
1197 			/*
1198 			 * Make sure that while processing the pinned list we do
1199 			 * not override our *start with a lower value, because
1200 			 * we can have pinned chunks that fall within this
1201 			 * device hole and that have lower physical addresses
1202 			 * than the pending chunks we processed before. If we
1203 			 * do not take this special care we can end up getting
1204 			 * 2 pending chunks that start at the same physical
1205 			 * device offsets because the end offset of a pinned
1206 			 * chunk can be equal to the start offset of some
1207 			 * pending chunk.
1208 			 */
1209 			end = map->stripes[i].physical + em->orig_block_len;
1210 			if (end > *start) {
1211 				*start = end;
1212 				ret = 1;
1213 			}
1214 		}
1215 	}
1216 	if (search_list != &fs_info->pinned_chunks) {
1217 		search_list = &fs_info->pinned_chunks;
1218 		goto again;
1219 	}
1220 
1221 	return ret;
1222 }
1223 
1224 
1225 /*
1226  * find_free_dev_extent_start - find free space in the specified device
1227  * @device:	  the device which we search the free space in
1228  * @num_bytes:	  the size of the free space that we need
1229  * @search_start: the position from which to begin the search
1230  * @start:	  store the start of the free space.
1231  * @len:	  the size of the free space. that we find, or the size
1232  *		  of the max free space if we don't find suitable free space
1233  *
1234  * this uses a pretty simple search, the expectation is that it is
1235  * called very infrequently and that a given device has a small number
1236  * of extents
1237  *
1238  * @start is used to store the start of the free space if we find. But if we
1239  * don't find suitable free space, it will be used to store the start position
1240  * of the max free space.
1241  *
1242  * @len is used to store the size of the free space that we find.
1243  * But if we don't find suitable free space, it is used to store the size of
1244  * the max free space.
1245  */
1246 int find_free_dev_extent_start(struct btrfs_transaction *transaction,
1247 			       struct btrfs_device *device, u64 num_bytes,
1248 			       u64 search_start, u64 *start, u64 *len)
1249 {
1250 	struct btrfs_key key;
1251 	struct btrfs_root *root = device->dev_root;
1252 	struct btrfs_dev_extent *dev_extent;
1253 	struct btrfs_path *path;
1254 	u64 hole_size;
1255 	u64 max_hole_start;
1256 	u64 max_hole_size;
1257 	u64 extent_end;
1258 	u64 search_end = device->total_bytes;
1259 	int ret;
1260 	int slot;
1261 	struct extent_buffer *l;
1262 	u64 min_search_start;
1263 
1264 	/*
1265 	 * We don't want to overwrite the superblock on the drive nor any area
1266 	 * used by the boot loader (grub for example), so we make sure to start
1267 	 * at an offset of at least 1MB.
1268 	 */
1269 	min_search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1270 	search_start = max(search_start, min_search_start);
1271 
1272 	path = btrfs_alloc_path();
1273 	if (!path)
1274 		return -ENOMEM;
1275 
1276 	max_hole_start = search_start;
1277 	max_hole_size = 0;
1278 
1279 again:
1280 	if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1281 		ret = -ENOSPC;
1282 		goto out;
1283 	}
1284 
1285 	path->reada = READA_FORWARD;
1286 	path->search_commit_root = 1;
1287 	path->skip_locking = 1;
1288 
1289 	key.objectid = device->devid;
1290 	key.offset = search_start;
1291 	key.type = BTRFS_DEV_EXTENT_KEY;
1292 
1293 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1294 	if (ret < 0)
1295 		goto out;
1296 	if (ret > 0) {
1297 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1298 		if (ret < 0)
1299 			goto out;
1300 	}
1301 
1302 	while (1) {
1303 		l = path->nodes[0];
1304 		slot = path->slots[0];
1305 		if (slot >= btrfs_header_nritems(l)) {
1306 			ret = btrfs_next_leaf(root, path);
1307 			if (ret == 0)
1308 				continue;
1309 			if (ret < 0)
1310 				goto out;
1311 
1312 			break;
1313 		}
1314 		btrfs_item_key_to_cpu(l, &key, slot);
1315 
1316 		if (key.objectid < device->devid)
1317 			goto next;
1318 
1319 		if (key.objectid > device->devid)
1320 			break;
1321 
1322 		if (key.type != BTRFS_DEV_EXTENT_KEY)
1323 			goto next;
1324 
1325 		if (key.offset > search_start) {
1326 			hole_size = key.offset - search_start;
1327 
1328 			/*
1329 			 * Have to check before we set max_hole_start, otherwise
1330 			 * we could end up sending back this offset anyway.
1331 			 */
1332 			if (contains_pending_extent(transaction, device,
1333 						    &search_start,
1334 						    hole_size)) {
1335 				if (key.offset >= search_start) {
1336 					hole_size = key.offset - search_start;
1337 				} else {
1338 					WARN_ON_ONCE(1);
1339 					hole_size = 0;
1340 				}
1341 			}
1342 
1343 			if (hole_size > max_hole_size) {
1344 				max_hole_start = search_start;
1345 				max_hole_size = hole_size;
1346 			}
1347 
1348 			/*
1349 			 * If this free space is greater than which we need,
1350 			 * it must be the max free space that we have found
1351 			 * until now, so max_hole_start must point to the start
1352 			 * of this free space and the length of this free space
1353 			 * is stored in max_hole_size. Thus, we return
1354 			 * max_hole_start and max_hole_size and go back to the
1355 			 * caller.
1356 			 */
1357 			if (hole_size >= num_bytes) {
1358 				ret = 0;
1359 				goto out;
1360 			}
1361 		}
1362 
1363 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1364 		extent_end = key.offset + btrfs_dev_extent_length(l,
1365 								  dev_extent);
1366 		if (extent_end > search_start)
1367 			search_start = extent_end;
1368 next:
1369 		path->slots[0]++;
1370 		cond_resched();
1371 	}
1372 
1373 	/*
1374 	 * At this point, search_start should be the end of
1375 	 * allocated dev extents, and when shrinking the device,
1376 	 * search_end may be smaller than search_start.
1377 	 */
1378 	if (search_end > search_start) {
1379 		hole_size = search_end - search_start;
1380 
1381 		if (contains_pending_extent(transaction, device, &search_start,
1382 					    hole_size)) {
1383 			btrfs_release_path(path);
1384 			goto again;
1385 		}
1386 
1387 		if (hole_size > max_hole_size) {
1388 			max_hole_start = search_start;
1389 			max_hole_size = hole_size;
1390 		}
1391 	}
1392 
1393 	/* See above. */
1394 	if (max_hole_size < num_bytes)
1395 		ret = -ENOSPC;
1396 	else
1397 		ret = 0;
1398 
1399 out:
1400 	btrfs_free_path(path);
1401 	*start = max_hole_start;
1402 	if (len)
1403 		*len = max_hole_size;
1404 	return ret;
1405 }
1406 
1407 int find_free_dev_extent(struct btrfs_trans_handle *trans,
1408 			 struct btrfs_device *device, u64 num_bytes,
1409 			 u64 *start, u64 *len)
1410 {
1411 	/* FIXME use last free of some kind */
1412 	return find_free_dev_extent_start(trans->transaction, device,
1413 					  num_bytes, 0, start, len);
1414 }
1415 
1416 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1417 			  struct btrfs_device *device,
1418 			  u64 start, u64 *dev_extent_len)
1419 {
1420 	int ret;
1421 	struct btrfs_path *path;
1422 	struct btrfs_root *root = device->dev_root;
1423 	struct btrfs_key key;
1424 	struct btrfs_key found_key;
1425 	struct extent_buffer *leaf = NULL;
1426 	struct btrfs_dev_extent *extent = NULL;
1427 
1428 	path = btrfs_alloc_path();
1429 	if (!path)
1430 		return -ENOMEM;
1431 
1432 	key.objectid = device->devid;
1433 	key.offset = start;
1434 	key.type = BTRFS_DEV_EXTENT_KEY;
1435 again:
1436 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1437 	if (ret > 0) {
1438 		ret = btrfs_previous_item(root, path, key.objectid,
1439 					  BTRFS_DEV_EXTENT_KEY);
1440 		if (ret)
1441 			goto out;
1442 		leaf = path->nodes[0];
1443 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1444 		extent = btrfs_item_ptr(leaf, path->slots[0],
1445 					struct btrfs_dev_extent);
1446 		BUG_ON(found_key.offset > start || found_key.offset +
1447 		       btrfs_dev_extent_length(leaf, extent) < start);
1448 		key = found_key;
1449 		btrfs_release_path(path);
1450 		goto again;
1451 	} else if (ret == 0) {
1452 		leaf = path->nodes[0];
1453 		extent = btrfs_item_ptr(leaf, path->slots[0],
1454 					struct btrfs_dev_extent);
1455 	} else {
1456 		btrfs_std_error(root->fs_info, ret, "Slot search failed");
1457 		goto out;
1458 	}
1459 
1460 	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1461 
1462 	ret = btrfs_del_item(trans, root, path);
1463 	if (ret) {
1464 		btrfs_std_error(root->fs_info, ret,
1465 			    "Failed to remove dev extent item");
1466 	} else {
1467 		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1468 	}
1469 out:
1470 	btrfs_free_path(path);
1471 	return ret;
1472 }
1473 
1474 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1475 				  struct btrfs_device *device,
1476 				  u64 chunk_tree, u64 chunk_objectid,
1477 				  u64 chunk_offset, u64 start, u64 num_bytes)
1478 {
1479 	int ret;
1480 	struct btrfs_path *path;
1481 	struct btrfs_root *root = device->dev_root;
1482 	struct btrfs_dev_extent *extent;
1483 	struct extent_buffer *leaf;
1484 	struct btrfs_key key;
1485 
1486 	WARN_ON(!device->in_fs_metadata);
1487 	WARN_ON(device->is_tgtdev_for_dev_replace);
1488 	path = btrfs_alloc_path();
1489 	if (!path)
1490 		return -ENOMEM;
1491 
1492 	key.objectid = device->devid;
1493 	key.offset = start;
1494 	key.type = BTRFS_DEV_EXTENT_KEY;
1495 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1496 				      sizeof(*extent));
1497 	if (ret)
1498 		goto out;
1499 
1500 	leaf = path->nodes[0];
1501 	extent = btrfs_item_ptr(leaf, path->slots[0],
1502 				struct btrfs_dev_extent);
1503 	btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1504 	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1505 	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1506 
1507 	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1508 		    btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE);
1509 
1510 	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1511 	btrfs_mark_buffer_dirty(leaf);
1512 out:
1513 	btrfs_free_path(path);
1514 	return ret;
1515 }
1516 
1517 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1518 {
1519 	struct extent_map_tree *em_tree;
1520 	struct extent_map *em;
1521 	struct rb_node *n;
1522 	u64 ret = 0;
1523 
1524 	em_tree = &fs_info->mapping_tree.map_tree;
1525 	read_lock(&em_tree->lock);
1526 	n = rb_last(&em_tree->map);
1527 	if (n) {
1528 		em = rb_entry(n, struct extent_map, rb_node);
1529 		ret = em->start + em->len;
1530 	}
1531 	read_unlock(&em_tree->lock);
1532 
1533 	return ret;
1534 }
1535 
1536 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1537 				    u64 *devid_ret)
1538 {
1539 	int ret;
1540 	struct btrfs_key key;
1541 	struct btrfs_key found_key;
1542 	struct btrfs_path *path;
1543 
1544 	path = btrfs_alloc_path();
1545 	if (!path)
1546 		return -ENOMEM;
1547 
1548 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1549 	key.type = BTRFS_DEV_ITEM_KEY;
1550 	key.offset = (u64)-1;
1551 
1552 	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1553 	if (ret < 0)
1554 		goto error;
1555 
1556 	BUG_ON(ret == 0); /* Corruption */
1557 
1558 	ret = btrfs_previous_item(fs_info->chunk_root, path,
1559 				  BTRFS_DEV_ITEMS_OBJECTID,
1560 				  BTRFS_DEV_ITEM_KEY);
1561 	if (ret) {
1562 		*devid_ret = 1;
1563 	} else {
1564 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1565 				      path->slots[0]);
1566 		*devid_ret = found_key.offset + 1;
1567 	}
1568 	ret = 0;
1569 error:
1570 	btrfs_free_path(path);
1571 	return ret;
1572 }
1573 
1574 /*
1575  * the device information is stored in the chunk root
1576  * the btrfs_device struct should be fully filled in
1577  */
1578 static int btrfs_add_device(struct btrfs_trans_handle *trans,
1579 			    struct btrfs_root *root,
1580 			    struct btrfs_device *device)
1581 {
1582 	int ret;
1583 	struct btrfs_path *path;
1584 	struct btrfs_dev_item *dev_item;
1585 	struct extent_buffer *leaf;
1586 	struct btrfs_key key;
1587 	unsigned long ptr;
1588 
1589 	root = root->fs_info->chunk_root;
1590 
1591 	path = btrfs_alloc_path();
1592 	if (!path)
1593 		return -ENOMEM;
1594 
1595 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1596 	key.type = BTRFS_DEV_ITEM_KEY;
1597 	key.offset = device->devid;
1598 
1599 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1600 				      sizeof(*dev_item));
1601 	if (ret)
1602 		goto out;
1603 
1604 	leaf = path->nodes[0];
1605 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1606 
1607 	btrfs_set_device_id(leaf, dev_item, device->devid);
1608 	btrfs_set_device_generation(leaf, dev_item, 0);
1609 	btrfs_set_device_type(leaf, dev_item, device->type);
1610 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1611 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1612 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1613 	btrfs_set_device_total_bytes(leaf, dev_item,
1614 				     btrfs_device_get_disk_total_bytes(device));
1615 	btrfs_set_device_bytes_used(leaf, dev_item,
1616 				    btrfs_device_get_bytes_used(device));
1617 	btrfs_set_device_group(leaf, dev_item, 0);
1618 	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1619 	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1620 	btrfs_set_device_start_offset(leaf, dev_item, 0);
1621 
1622 	ptr = btrfs_device_uuid(dev_item);
1623 	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1624 	ptr = btrfs_device_fsid(dev_item);
1625 	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1626 	btrfs_mark_buffer_dirty(leaf);
1627 
1628 	ret = 0;
1629 out:
1630 	btrfs_free_path(path);
1631 	return ret;
1632 }
1633 
1634 /*
1635  * Function to update ctime/mtime for a given device path.
1636  * Mainly used for ctime/mtime based probe like libblkid.
1637  */
1638 static void update_dev_time(char *path_name)
1639 {
1640 	struct file *filp;
1641 
1642 	filp = filp_open(path_name, O_RDWR, 0);
1643 	if (IS_ERR(filp))
1644 		return;
1645 	file_update_time(filp);
1646 	filp_close(filp, NULL);
1647 }
1648 
1649 static int btrfs_rm_dev_item(struct btrfs_root *root,
1650 			     struct btrfs_device *device)
1651 {
1652 	int ret;
1653 	struct btrfs_path *path;
1654 	struct btrfs_key key;
1655 	struct btrfs_trans_handle *trans;
1656 
1657 	root = root->fs_info->chunk_root;
1658 
1659 	path = btrfs_alloc_path();
1660 	if (!path)
1661 		return -ENOMEM;
1662 
1663 	trans = btrfs_start_transaction(root, 0);
1664 	if (IS_ERR(trans)) {
1665 		btrfs_free_path(path);
1666 		return PTR_ERR(trans);
1667 	}
1668 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1669 	key.type = BTRFS_DEV_ITEM_KEY;
1670 	key.offset = device->devid;
1671 
1672 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1673 	if (ret < 0)
1674 		goto out;
1675 
1676 	if (ret > 0) {
1677 		ret = -ENOENT;
1678 		goto out;
1679 	}
1680 
1681 	ret = btrfs_del_item(trans, root, path);
1682 	if (ret)
1683 		goto out;
1684 out:
1685 	btrfs_free_path(path);
1686 	btrfs_commit_transaction(trans, root);
1687 	return ret;
1688 }
1689 
1690 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1691 {
1692 	struct btrfs_device *device;
1693 	struct btrfs_device *next_device;
1694 	struct block_device *bdev;
1695 	struct buffer_head *bh = NULL;
1696 	struct btrfs_super_block *disk_super;
1697 	struct btrfs_fs_devices *cur_devices;
1698 	u64 all_avail;
1699 	u64 devid;
1700 	u64 num_devices;
1701 	u8 *dev_uuid;
1702 	unsigned seq;
1703 	int ret = 0;
1704 	bool clear_super = false;
1705 
1706 	mutex_lock(&uuid_mutex);
1707 
1708 	do {
1709 		seq = read_seqbegin(&root->fs_info->profiles_lock);
1710 
1711 		all_avail = root->fs_info->avail_data_alloc_bits |
1712 			    root->fs_info->avail_system_alloc_bits |
1713 			    root->fs_info->avail_metadata_alloc_bits;
1714 	} while (read_seqretry(&root->fs_info->profiles_lock, seq));
1715 
1716 	num_devices = root->fs_info->fs_devices->num_devices;
1717 	btrfs_dev_replace_lock(&root->fs_info->dev_replace);
1718 	if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1719 		WARN_ON(num_devices < 1);
1720 		num_devices--;
1721 	}
1722 	btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
1723 
1724 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1725 		ret = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET;
1726 		goto out;
1727 	}
1728 
1729 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1730 		ret = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET;
1731 		goto out;
1732 	}
1733 
1734 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
1735 	    root->fs_info->fs_devices->rw_devices <= 2) {
1736 		ret = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET;
1737 		goto out;
1738 	}
1739 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
1740 	    root->fs_info->fs_devices->rw_devices <= 3) {
1741 		ret = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET;
1742 		goto out;
1743 	}
1744 
1745 	if (strcmp(device_path, "missing") == 0) {
1746 		struct list_head *devices;
1747 		struct btrfs_device *tmp;
1748 
1749 		device = NULL;
1750 		devices = &root->fs_info->fs_devices->devices;
1751 		/*
1752 		 * It is safe to read the devices since the volume_mutex
1753 		 * is held.
1754 		 */
1755 		list_for_each_entry(tmp, devices, dev_list) {
1756 			if (tmp->in_fs_metadata &&
1757 			    !tmp->is_tgtdev_for_dev_replace &&
1758 			    !tmp->bdev) {
1759 				device = tmp;
1760 				break;
1761 			}
1762 		}
1763 		bdev = NULL;
1764 		bh = NULL;
1765 		disk_super = NULL;
1766 		if (!device) {
1767 			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
1768 			goto out;
1769 		}
1770 	} else {
1771 		ret = btrfs_get_bdev_and_sb(device_path,
1772 					    FMODE_WRITE | FMODE_EXCL,
1773 					    root->fs_info->bdev_holder, 0,
1774 					    &bdev, &bh);
1775 		if (ret)
1776 			goto out;
1777 		disk_super = (struct btrfs_super_block *)bh->b_data;
1778 		devid = btrfs_stack_device_id(&disk_super->dev_item);
1779 		dev_uuid = disk_super->dev_item.uuid;
1780 		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1781 					   disk_super->fsid);
1782 		if (!device) {
1783 			ret = -ENOENT;
1784 			goto error_brelse;
1785 		}
1786 	}
1787 
1788 	if (device->is_tgtdev_for_dev_replace) {
1789 		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1790 		goto error_brelse;
1791 	}
1792 
1793 	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1794 		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1795 		goto error_brelse;
1796 	}
1797 
1798 	if (device->writeable) {
1799 		lock_chunks(root);
1800 		list_del_init(&device->dev_alloc_list);
1801 		device->fs_devices->rw_devices--;
1802 		unlock_chunks(root);
1803 		clear_super = true;
1804 	}
1805 
1806 	mutex_unlock(&uuid_mutex);
1807 	ret = btrfs_shrink_device(device, 0);
1808 	mutex_lock(&uuid_mutex);
1809 	if (ret)
1810 		goto error_undo;
1811 
1812 	/*
1813 	 * TODO: the superblock still includes this device in its num_devices
1814 	 * counter although write_all_supers() is not locked out. This
1815 	 * could give a filesystem state which requires a degraded mount.
1816 	 */
1817 	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1818 	if (ret)
1819 		goto error_undo;
1820 
1821 	device->in_fs_metadata = 0;
1822 	btrfs_scrub_cancel_dev(root->fs_info, device);
1823 
1824 	/*
1825 	 * the device list mutex makes sure that we don't change
1826 	 * the device list while someone else is writing out all
1827 	 * the device supers. Whoever is writing all supers, should
1828 	 * lock the device list mutex before getting the number of
1829 	 * devices in the super block (super_copy). Conversely,
1830 	 * whoever updates the number of devices in the super block
1831 	 * (super_copy) should hold the device list mutex.
1832 	 */
1833 
1834 	cur_devices = device->fs_devices;
1835 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1836 	list_del_rcu(&device->dev_list);
1837 
1838 	device->fs_devices->num_devices--;
1839 	device->fs_devices->total_devices--;
1840 
1841 	if (device->missing)
1842 		device->fs_devices->missing_devices--;
1843 
1844 	next_device = list_entry(root->fs_info->fs_devices->devices.next,
1845 				 struct btrfs_device, dev_list);
1846 	if (device->bdev == root->fs_info->sb->s_bdev)
1847 		root->fs_info->sb->s_bdev = next_device->bdev;
1848 	if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1849 		root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1850 
1851 	if (device->bdev) {
1852 		device->fs_devices->open_devices--;
1853 		/* remove sysfs entry */
1854 		btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
1855 	}
1856 
1857 	call_rcu(&device->rcu, free_device);
1858 
1859 	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1860 	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1861 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1862 
1863 	if (cur_devices->open_devices == 0) {
1864 		struct btrfs_fs_devices *fs_devices;
1865 		fs_devices = root->fs_info->fs_devices;
1866 		while (fs_devices) {
1867 			if (fs_devices->seed == cur_devices) {
1868 				fs_devices->seed = cur_devices->seed;
1869 				break;
1870 			}
1871 			fs_devices = fs_devices->seed;
1872 		}
1873 		cur_devices->seed = NULL;
1874 		__btrfs_close_devices(cur_devices);
1875 		free_fs_devices(cur_devices);
1876 	}
1877 
1878 	root->fs_info->num_tolerated_disk_barrier_failures =
1879 		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1880 
1881 	/*
1882 	 * at this point, the device is zero sized.  We want to
1883 	 * remove it from the devices list and zero out the old super
1884 	 */
1885 	if (clear_super && disk_super) {
1886 		u64 bytenr;
1887 		int i;
1888 
1889 		/* make sure this device isn't detected as part of
1890 		 * the FS anymore
1891 		 */
1892 		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1893 		set_buffer_dirty(bh);
1894 		sync_dirty_buffer(bh);
1895 
1896 		/* clear the mirror copies of super block on the disk
1897 		 * being removed, 0th copy is been taken care above and
1898 		 * the below would take of the rest
1899 		 */
1900 		for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1901 			bytenr = btrfs_sb_offset(i);
1902 			if (bytenr + BTRFS_SUPER_INFO_SIZE >=
1903 					i_size_read(bdev->bd_inode))
1904 				break;
1905 
1906 			brelse(bh);
1907 			bh = __bread(bdev, bytenr / 4096,
1908 					BTRFS_SUPER_INFO_SIZE);
1909 			if (!bh)
1910 				continue;
1911 
1912 			disk_super = (struct btrfs_super_block *)bh->b_data;
1913 
1914 			if (btrfs_super_bytenr(disk_super) != bytenr ||
1915 				btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1916 				continue;
1917 			}
1918 			memset(&disk_super->magic, 0,
1919 						sizeof(disk_super->magic));
1920 			set_buffer_dirty(bh);
1921 			sync_dirty_buffer(bh);
1922 		}
1923 	}
1924 
1925 	ret = 0;
1926 
1927 	if (bdev) {
1928 		/* Notify udev that device has changed */
1929 		btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1930 
1931 		/* Update ctime/mtime for device path for libblkid */
1932 		update_dev_time(device_path);
1933 	}
1934 
1935 error_brelse:
1936 	brelse(bh);
1937 	if (bdev)
1938 		blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1939 out:
1940 	mutex_unlock(&uuid_mutex);
1941 	return ret;
1942 error_undo:
1943 	if (device->writeable) {
1944 		lock_chunks(root);
1945 		list_add(&device->dev_alloc_list,
1946 			 &root->fs_info->fs_devices->alloc_list);
1947 		device->fs_devices->rw_devices++;
1948 		unlock_chunks(root);
1949 	}
1950 	goto error_brelse;
1951 }
1952 
1953 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
1954 					struct btrfs_device *srcdev)
1955 {
1956 	struct btrfs_fs_devices *fs_devices;
1957 
1958 	WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1959 
1960 	/*
1961 	 * in case of fs with no seed, srcdev->fs_devices will point
1962 	 * to fs_devices of fs_info. However when the dev being replaced is
1963 	 * a seed dev it will point to the seed's local fs_devices. In short
1964 	 * srcdev will have its correct fs_devices in both the cases.
1965 	 */
1966 	fs_devices = srcdev->fs_devices;
1967 
1968 	list_del_rcu(&srcdev->dev_list);
1969 	list_del_rcu(&srcdev->dev_alloc_list);
1970 	fs_devices->num_devices--;
1971 	if (srcdev->missing)
1972 		fs_devices->missing_devices--;
1973 
1974 	if (srcdev->writeable) {
1975 		fs_devices->rw_devices--;
1976 		/* zero out the old super if it is writable */
1977 		btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
1978 	}
1979 
1980 	if (srcdev->bdev)
1981 		fs_devices->open_devices--;
1982 }
1983 
1984 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
1985 				      struct btrfs_device *srcdev)
1986 {
1987 	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
1988 
1989 	call_rcu(&srcdev->rcu, free_device);
1990 
1991 	/*
1992 	 * unless fs_devices is seed fs, num_devices shouldn't go
1993 	 * zero
1994 	 */
1995 	BUG_ON(!fs_devices->num_devices && !fs_devices->seeding);
1996 
1997 	/* if this is no devs we rather delete the fs_devices */
1998 	if (!fs_devices->num_devices) {
1999 		struct btrfs_fs_devices *tmp_fs_devices;
2000 
2001 		tmp_fs_devices = fs_info->fs_devices;
2002 		while (tmp_fs_devices) {
2003 			if (tmp_fs_devices->seed == fs_devices) {
2004 				tmp_fs_devices->seed = fs_devices->seed;
2005 				break;
2006 			}
2007 			tmp_fs_devices = tmp_fs_devices->seed;
2008 		}
2009 		fs_devices->seed = NULL;
2010 		__btrfs_close_devices(fs_devices);
2011 		free_fs_devices(fs_devices);
2012 	}
2013 }
2014 
2015 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
2016 				      struct btrfs_device *tgtdev)
2017 {
2018 	struct btrfs_device *next_device;
2019 
2020 	mutex_lock(&uuid_mutex);
2021 	WARN_ON(!tgtdev);
2022 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2023 
2024 	btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev);
2025 
2026 	if (tgtdev->bdev) {
2027 		btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
2028 		fs_info->fs_devices->open_devices--;
2029 	}
2030 	fs_info->fs_devices->num_devices--;
2031 
2032 	next_device = list_entry(fs_info->fs_devices->devices.next,
2033 				 struct btrfs_device, dev_list);
2034 	if (tgtdev->bdev == fs_info->sb->s_bdev)
2035 		fs_info->sb->s_bdev = next_device->bdev;
2036 	if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
2037 		fs_info->fs_devices->latest_bdev = next_device->bdev;
2038 	list_del_rcu(&tgtdev->dev_list);
2039 
2040 	call_rcu(&tgtdev->rcu, free_device);
2041 
2042 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2043 	mutex_unlock(&uuid_mutex);
2044 }
2045 
2046 static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
2047 				     struct btrfs_device **device)
2048 {
2049 	int ret = 0;
2050 	struct btrfs_super_block *disk_super;
2051 	u64 devid;
2052 	u8 *dev_uuid;
2053 	struct block_device *bdev;
2054 	struct buffer_head *bh;
2055 
2056 	*device = NULL;
2057 	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2058 				    root->fs_info->bdev_holder, 0, &bdev, &bh);
2059 	if (ret)
2060 		return ret;
2061 	disk_super = (struct btrfs_super_block *)bh->b_data;
2062 	devid = btrfs_stack_device_id(&disk_super->dev_item);
2063 	dev_uuid = disk_super->dev_item.uuid;
2064 	*device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2065 				    disk_super->fsid);
2066 	brelse(bh);
2067 	if (!*device)
2068 		ret = -ENOENT;
2069 	blkdev_put(bdev, FMODE_READ);
2070 	return ret;
2071 }
2072 
2073 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
2074 					 char *device_path,
2075 					 struct btrfs_device **device)
2076 {
2077 	*device = NULL;
2078 	if (strcmp(device_path, "missing") == 0) {
2079 		struct list_head *devices;
2080 		struct btrfs_device *tmp;
2081 
2082 		devices = &root->fs_info->fs_devices->devices;
2083 		/*
2084 		 * It is safe to read the devices since the volume_mutex
2085 		 * is held by the caller.
2086 		 */
2087 		list_for_each_entry(tmp, devices, dev_list) {
2088 			if (tmp->in_fs_metadata && !tmp->bdev) {
2089 				*device = tmp;
2090 				break;
2091 			}
2092 		}
2093 
2094 		if (!*device)
2095 			return BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2096 
2097 		return 0;
2098 	} else {
2099 		return btrfs_find_device_by_path(root, device_path, device);
2100 	}
2101 }
2102 
2103 /*
2104  * does all the dirty work required for changing file system's UUID.
2105  */
2106 static int btrfs_prepare_sprout(struct btrfs_root *root)
2107 {
2108 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2109 	struct btrfs_fs_devices *old_devices;
2110 	struct btrfs_fs_devices *seed_devices;
2111 	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
2112 	struct btrfs_device *device;
2113 	u64 super_flags;
2114 
2115 	BUG_ON(!mutex_is_locked(&uuid_mutex));
2116 	if (!fs_devices->seeding)
2117 		return -EINVAL;
2118 
2119 	seed_devices = __alloc_fs_devices();
2120 	if (IS_ERR(seed_devices))
2121 		return PTR_ERR(seed_devices);
2122 
2123 	old_devices = clone_fs_devices(fs_devices);
2124 	if (IS_ERR(old_devices)) {
2125 		kfree(seed_devices);
2126 		return PTR_ERR(old_devices);
2127 	}
2128 
2129 	list_add(&old_devices->list, &fs_uuids);
2130 
2131 	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2132 	seed_devices->opened = 1;
2133 	INIT_LIST_HEAD(&seed_devices->devices);
2134 	INIT_LIST_HEAD(&seed_devices->alloc_list);
2135 	mutex_init(&seed_devices->device_list_mutex);
2136 
2137 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2138 	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2139 			      synchronize_rcu);
2140 	list_for_each_entry(device, &seed_devices->devices, dev_list)
2141 		device->fs_devices = seed_devices;
2142 
2143 	lock_chunks(root);
2144 	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
2145 	unlock_chunks(root);
2146 
2147 	fs_devices->seeding = 0;
2148 	fs_devices->num_devices = 0;
2149 	fs_devices->open_devices = 0;
2150 	fs_devices->missing_devices = 0;
2151 	fs_devices->rotating = 0;
2152 	fs_devices->seed = seed_devices;
2153 
2154 	generate_random_uuid(fs_devices->fsid);
2155 	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2156 	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2157 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2158 
2159 	super_flags = btrfs_super_flags(disk_super) &
2160 		      ~BTRFS_SUPER_FLAG_SEEDING;
2161 	btrfs_set_super_flags(disk_super, super_flags);
2162 
2163 	return 0;
2164 }
2165 
2166 /*
2167  * strore the expected generation for seed devices in device items.
2168  */
2169 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
2170 			       struct btrfs_root *root)
2171 {
2172 	struct btrfs_path *path;
2173 	struct extent_buffer *leaf;
2174 	struct btrfs_dev_item *dev_item;
2175 	struct btrfs_device *device;
2176 	struct btrfs_key key;
2177 	u8 fs_uuid[BTRFS_UUID_SIZE];
2178 	u8 dev_uuid[BTRFS_UUID_SIZE];
2179 	u64 devid;
2180 	int ret;
2181 
2182 	path = btrfs_alloc_path();
2183 	if (!path)
2184 		return -ENOMEM;
2185 
2186 	root = root->fs_info->chunk_root;
2187 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2188 	key.offset = 0;
2189 	key.type = BTRFS_DEV_ITEM_KEY;
2190 
2191 	while (1) {
2192 		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2193 		if (ret < 0)
2194 			goto error;
2195 
2196 		leaf = path->nodes[0];
2197 next_slot:
2198 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2199 			ret = btrfs_next_leaf(root, path);
2200 			if (ret > 0)
2201 				break;
2202 			if (ret < 0)
2203 				goto error;
2204 			leaf = path->nodes[0];
2205 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2206 			btrfs_release_path(path);
2207 			continue;
2208 		}
2209 
2210 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2211 		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2212 		    key.type != BTRFS_DEV_ITEM_KEY)
2213 			break;
2214 
2215 		dev_item = btrfs_item_ptr(leaf, path->slots[0],
2216 					  struct btrfs_dev_item);
2217 		devid = btrfs_device_id(leaf, dev_item);
2218 		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2219 				   BTRFS_UUID_SIZE);
2220 		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2221 				   BTRFS_UUID_SIZE);
2222 		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2223 					   fs_uuid);
2224 		BUG_ON(!device); /* Logic error */
2225 
2226 		if (device->fs_devices->seeding) {
2227 			btrfs_set_device_generation(leaf, dev_item,
2228 						    device->generation);
2229 			btrfs_mark_buffer_dirty(leaf);
2230 		}
2231 
2232 		path->slots[0]++;
2233 		goto next_slot;
2234 	}
2235 	ret = 0;
2236 error:
2237 	btrfs_free_path(path);
2238 	return ret;
2239 }
2240 
2241 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
2242 {
2243 	struct request_queue *q;
2244 	struct btrfs_trans_handle *trans;
2245 	struct btrfs_device *device;
2246 	struct block_device *bdev;
2247 	struct list_head *devices;
2248 	struct super_block *sb = root->fs_info->sb;
2249 	struct rcu_string *name;
2250 	u64 tmp;
2251 	int seeding_dev = 0;
2252 	int ret = 0;
2253 
2254 	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
2255 		return -EROFS;
2256 
2257 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2258 				  root->fs_info->bdev_holder);
2259 	if (IS_ERR(bdev))
2260 		return PTR_ERR(bdev);
2261 
2262 	if (root->fs_info->fs_devices->seeding) {
2263 		seeding_dev = 1;
2264 		down_write(&sb->s_umount);
2265 		mutex_lock(&uuid_mutex);
2266 	}
2267 
2268 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2269 
2270 	devices = &root->fs_info->fs_devices->devices;
2271 
2272 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2273 	list_for_each_entry(device, devices, dev_list) {
2274 		if (device->bdev == bdev) {
2275 			ret = -EEXIST;
2276 			mutex_unlock(
2277 				&root->fs_info->fs_devices->device_list_mutex);
2278 			goto error;
2279 		}
2280 	}
2281 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2282 
2283 	device = btrfs_alloc_device(root->fs_info, NULL, NULL);
2284 	if (IS_ERR(device)) {
2285 		/* we can safely leave the fs_devices entry around */
2286 		ret = PTR_ERR(device);
2287 		goto error;
2288 	}
2289 
2290 	name = rcu_string_strdup(device_path, GFP_NOFS);
2291 	if (!name) {
2292 		kfree(device);
2293 		ret = -ENOMEM;
2294 		goto error;
2295 	}
2296 	rcu_assign_pointer(device->name, name);
2297 
2298 	trans = btrfs_start_transaction(root, 0);
2299 	if (IS_ERR(trans)) {
2300 		rcu_string_free(device->name);
2301 		kfree(device);
2302 		ret = PTR_ERR(trans);
2303 		goto error;
2304 	}
2305 
2306 	q = bdev_get_queue(bdev);
2307 	if (blk_queue_discard(q))
2308 		device->can_discard = 1;
2309 	device->writeable = 1;
2310 	device->generation = trans->transid;
2311 	device->io_width = root->sectorsize;
2312 	device->io_align = root->sectorsize;
2313 	device->sector_size = root->sectorsize;
2314 	device->total_bytes = i_size_read(bdev->bd_inode);
2315 	device->disk_total_bytes = device->total_bytes;
2316 	device->commit_total_bytes = device->total_bytes;
2317 	device->dev_root = root->fs_info->dev_root;
2318 	device->bdev = bdev;
2319 	device->in_fs_metadata = 1;
2320 	device->is_tgtdev_for_dev_replace = 0;
2321 	device->mode = FMODE_EXCL;
2322 	device->dev_stats_valid = 1;
2323 	set_blocksize(device->bdev, 4096);
2324 
2325 	if (seeding_dev) {
2326 		sb->s_flags &= ~MS_RDONLY;
2327 		ret = btrfs_prepare_sprout(root);
2328 		BUG_ON(ret); /* -ENOMEM */
2329 	}
2330 
2331 	device->fs_devices = root->fs_info->fs_devices;
2332 
2333 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2334 	lock_chunks(root);
2335 	list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2336 	list_add(&device->dev_alloc_list,
2337 		 &root->fs_info->fs_devices->alloc_list);
2338 	root->fs_info->fs_devices->num_devices++;
2339 	root->fs_info->fs_devices->open_devices++;
2340 	root->fs_info->fs_devices->rw_devices++;
2341 	root->fs_info->fs_devices->total_devices++;
2342 	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2343 
2344 	spin_lock(&root->fs_info->free_chunk_lock);
2345 	root->fs_info->free_chunk_space += device->total_bytes;
2346 	spin_unlock(&root->fs_info->free_chunk_lock);
2347 
2348 	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2349 		root->fs_info->fs_devices->rotating = 1;
2350 
2351 	tmp = btrfs_super_total_bytes(root->fs_info->super_copy);
2352 	btrfs_set_super_total_bytes(root->fs_info->super_copy,
2353 				    tmp + device->total_bytes);
2354 
2355 	tmp = btrfs_super_num_devices(root->fs_info->super_copy);
2356 	btrfs_set_super_num_devices(root->fs_info->super_copy,
2357 				    tmp + 1);
2358 
2359 	/* add sysfs device entry */
2360 	btrfs_sysfs_add_device_link(root->fs_info->fs_devices, device);
2361 
2362 	/*
2363 	 * we've got more storage, clear any full flags on the space
2364 	 * infos
2365 	 */
2366 	btrfs_clear_space_info_full(root->fs_info);
2367 
2368 	unlock_chunks(root);
2369 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2370 
2371 	if (seeding_dev) {
2372 		lock_chunks(root);
2373 		ret = init_first_rw_device(trans, root, device);
2374 		unlock_chunks(root);
2375 		if (ret) {
2376 			btrfs_abort_transaction(trans, root, ret);
2377 			goto error_trans;
2378 		}
2379 	}
2380 
2381 	ret = btrfs_add_device(trans, root, device);
2382 	if (ret) {
2383 		btrfs_abort_transaction(trans, root, ret);
2384 		goto error_trans;
2385 	}
2386 
2387 	if (seeding_dev) {
2388 		char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
2389 
2390 		ret = btrfs_finish_sprout(trans, root);
2391 		if (ret) {
2392 			btrfs_abort_transaction(trans, root, ret);
2393 			goto error_trans;
2394 		}
2395 
2396 		/* Sprouting would change fsid of the mounted root,
2397 		 * so rename the fsid on the sysfs
2398 		 */
2399 		snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
2400 						root->fs_info->fsid);
2401 		if (kobject_rename(&root->fs_info->fs_devices->fsid_kobj,
2402 								fsid_buf))
2403 			btrfs_warn(root->fs_info,
2404 				"sysfs: failed to create fsid for sprout");
2405 	}
2406 
2407 	root->fs_info->num_tolerated_disk_barrier_failures =
2408 		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2409 	ret = btrfs_commit_transaction(trans, root);
2410 
2411 	if (seeding_dev) {
2412 		mutex_unlock(&uuid_mutex);
2413 		up_write(&sb->s_umount);
2414 
2415 		if (ret) /* transaction commit */
2416 			return ret;
2417 
2418 		ret = btrfs_relocate_sys_chunks(root);
2419 		if (ret < 0)
2420 			btrfs_std_error(root->fs_info, ret,
2421 				    "Failed to relocate sys chunks after "
2422 				    "device initialization. This can be fixed "
2423 				    "using the \"btrfs balance\" command.");
2424 		trans = btrfs_attach_transaction(root);
2425 		if (IS_ERR(trans)) {
2426 			if (PTR_ERR(trans) == -ENOENT)
2427 				return 0;
2428 			return PTR_ERR(trans);
2429 		}
2430 		ret = btrfs_commit_transaction(trans, root);
2431 	}
2432 
2433 	/* Update ctime/mtime for libblkid */
2434 	update_dev_time(device_path);
2435 	return ret;
2436 
2437 error_trans:
2438 	btrfs_end_transaction(trans, root);
2439 	rcu_string_free(device->name);
2440 	btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
2441 	kfree(device);
2442 error:
2443 	blkdev_put(bdev, FMODE_EXCL);
2444 	if (seeding_dev) {
2445 		mutex_unlock(&uuid_mutex);
2446 		up_write(&sb->s_umount);
2447 	}
2448 	return ret;
2449 }
2450 
2451 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2452 				  struct btrfs_device *srcdev,
2453 				  struct btrfs_device **device_out)
2454 {
2455 	struct request_queue *q;
2456 	struct btrfs_device *device;
2457 	struct block_device *bdev;
2458 	struct btrfs_fs_info *fs_info = root->fs_info;
2459 	struct list_head *devices;
2460 	struct rcu_string *name;
2461 	u64 devid = BTRFS_DEV_REPLACE_DEVID;
2462 	int ret = 0;
2463 
2464 	*device_out = NULL;
2465 	if (fs_info->fs_devices->seeding) {
2466 		btrfs_err(fs_info, "the filesystem is a seed filesystem!");
2467 		return -EINVAL;
2468 	}
2469 
2470 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2471 				  fs_info->bdev_holder);
2472 	if (IS_ERR(bdev)) {
2473 		btrfs_err(fs_info, "target device %s is invalid!", device_path);
2474 		return PTR_ERR(bdev);
2475 	}
2476 
2477 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2478 
2479 	devices = &fs_info->fs_devices->devices;
2480 	list_for_each_entry(device, devices, dev_list) {
2481 		if (device->bdev == bdev) {
2482 			btrfs_err(fs_info, "target device is in the filesystem!");
2483 			ret = -EEXIST;
2484 			goto error;
2485 		}
2486 	}
2487 
2488 
2489 	if (i_size_read(bdev->bd_inode) <
2490 	    btrfs_device_get_total_bytes(srcdev)) {
2491 		btrfs_err(fs_info, "target device is smaller than source device!");
2492 		ret = -EINVAL;
2493 		goto error;
2494 	}
2495 
2496 
2497 	device = btrfs_alloc_device(NULL, &devid, NULL);
2498 	if (IS_ERR(device)) {
2499 		ret = PTR_ERR(device);
2500 		goto error;
2501 	}
2502 
2503 	name = rcu_string_strdup(device_path, GFP_NOFS);
2504 	if (!name) {
2505 		kfree(device);
2506 		ret = -ENOMEM;
2507 		goto error;
2508 	}
2509 	rcu_assign_pointer(device->name, name);
2510 
2511 	q = bdev_get_queue(bdev);
2512 	if (blk_queue_discard(q))
2513 		device->can_discard = 1;
2514 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2515 	device->writeable = 1;
2516 	device->generation = 0;
2517 	device->io_width = root->sectorsize;
2518 	device->io_align = root->sectorsize;
2519 	device->sector_size = root->sectorsize;
2520 	device->total_bytes = btrfs_device_get_total_bytes(srcdev);
2521 	device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev);
2522 	device->bytes_used = btrfs_device_get_bytes_used(srcdev);
2523 	ASSERT(list_empty(&srcdev->resized_list));
2524 	device->commit_total_bytes = srcdev->commit_total_bytes;
2525 	device->commit_bytes_used = device->bytes_used;
2526 	device->dev_root = fs_info->dev_root;
2527 	device->bdev = bdev;
2528 	device->in_fs_metadata = 1;
2529 	device->is_tgtdev_for_dev_replace = 1;
2530 	device->mode = FMODE_EXCL;
2531 	device->dev_stats_valid = 1;
2532 	set_blocksize(device->bdev, 4096);
2533 	device->fs_devices = fs_info->fs_devices;
2534 	list_add(&device->dev_list, &fs_info->fs_devices->devices);
2535 	fs_info->fs_devices->num_devices++;
2536 	fs_info->fs_devices->open_devices++;
2537 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2538 
2539 	*device_out = device;
2540 	return ret;
2541 
2542 error:
2543 	blkdev_put(bdev, FMODE_EXCL);
2544 	return ret;
2545 }
2546 
2547 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2548 					      struct btrfs_device *tgtdev)
2549 {
2550 	WARN_ON(fs_info->fs_devices->rw_devices == 0);
2551 	tgtdev->io_width = fs_info->dev_root->sectorsize;
2552 	tgtdev->io_align = fs_info->dev_root->sectorsize;
2553 	tgtdev->sector_size = fs_info->dev_root->sectorsize;
2554 	tgtdev->dev_root = fs_info->dev_root;
2555 	tgtdev->in_fs_metadata = 1;
2556 }
2557 
2558 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2559 					struct btrfs_device *device)
2560 {
2561 	int ret;
2562 	struct btrfs_path *path;
2563 	struct btrfs_root *root;
2564 	struct btrfs_dev_item *dev_item;
2565 	struct extent_buffer *leaf;
2566 	struct btrfs_key key;
2567 
2568 	root = device->dev_root->fs_info->chunk_root;
2569 
2570 	path = btrfs_alloc_path();
2571 	if (!path)
2572 		return -ENOMEM;
2573 
2574 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2575 	key.type = BTRFS_DEV_ITEM_KEY;
2576 	key.offset = device->devid;
2577 
2578 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2579 	if (ret < 0)
2580 		goto out;
2581 
2582 	if (ret > 0) {
2583 		ret = -ENOENT;
2584 		goto out;
2585 	}
2586 
2587 	leaf = path->nodes[0];
2588 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2589 
2590 	btrfs_set_device_id(leaf, dev_item, device->devid);
2591 	btrfs_set_device_type(leaf, dev_item, device->type);
2592 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2593 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2594 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2595 	btrfs_set_device_total_bytes(leaf, dev_item,
2596 				     btrfs_device_get_disk_total_bytes(device));
2597 	btrfs_set_device_bytes_used(leaf, dev_item,
2598 				    btrfs_device_get_bytes_used(device));
2599 	btrfs_mark_buffer_dirty(leaf);
2600 
2601 out:
2602 	btrfs_free_path(path);
2603 	return ret;
2604 }
2605 
2606 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2607 		      struct btrfs_device *device, u64 new_size)
2608 {
2609 	struct btrfs_super_block *super_copy =
2610 		device->dev_root->fs_info->super_copy;
2611 	struct btrfs_fs_devices *fs_devices;
2612 	u64 old_total;
2613 	u64 diff;
2614 
2615 	if (!device->writeable)
2616 		return -EACCES;
2617 
2618 	lock_chunks(device->dev_root);
2619 	old_total = btrfs_super_total_bytes(super_copy);
2620 	diff = new_size - device->total_bytes;
2621 
2622 	if (new_size <= device->total_bytes ||
2623 	    device->is_tgtdev_for_dev_replace) {
2624 		unlock_chunks(device->dev_root);
2625 		return -EINVAL;
2626 	}
2627 
2628 	fs_devices = device->dev_root->fs_info->fs_devices;
2629 
2630 	btrfs_set_super_total_bytes(super_copy, old_total + diff);
2631 	device->fs_devices->total_rw_bytes += diff;
2632 
2633 	btrfs_device_set_total_bytes(device, new_size);
2634 	btrfs_device_set_disk_total_bytes(device, new_size);
2635 	btrfs_clear_space_info_full(device->dev_root->fs_info);
2636 	if (list_empty(&device->resized_list))
2637 		list_add_tail(&device->resized_list,
2638 			      &fs_devices->resized_devices);
2639 	unlock_chunks(device->dev_root);
2640 
2641 	return btrfs_update_device(trans, device);
2642 }
2643 
2644 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2645 			    struct btrfs_root *root, u64 chunk_objectid,
2646 			    u64 chunk_offset)
2647 {
2648 	int ret;
2649 	struct btrfs_path *path;
2650 	struct btrfs_key key;
2651 
2652 	root = root->fs_info->chunk_root;
2653 	path = btrfs_alloc_path();
2654 	if (!path)
2655 		return -ENOMEM;
2656 
2657 	key.objectid = chunk_objectid;
2658 	key.offset = chunk_offset;
2659 	key.type = BTRFS_CHUNK_ITEM_KEY;
2660 
2661 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2662 	if (ret < 0)
2663 		goto out;
2664 	else if (ret > 0) { /* Logic error or corruption */
2665 		btrfs_std_error(root->fs_info, -ENOENT,
2666 			    "Failed lookup while freeing chunk.");
2667 		ret = -ENOENT;
2668 		goto out;
2669 	}
2670 
2671 	ret = btrfs_del_item(trans, root, path);
2672 	if (ret < 0)
2673 		btrfs_std_error(root->fs_info, ret,
2674 			    "Failed to delete chunk item.");
2675 out:
2676 	btrfs_free_path(path);
2677 	return ret;
2678 }
2679 
2680 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2681 			chunk_offset)
2682 {
2683 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2684 	struct btrfs_disk_key *disk_key;
2685 	struct btrfs_chunk *chunk;
2686 	u8 *ptr;
2687 	int ret = 0;
2688 	u32 num_stripes;
2689 	u32 array_size;
2690 	u32 len = 0;
2691 	u32 cur;
2692 	struct btrfs_key key;
2693 
2694 	lock_chunks(root);
2695 	array_size = btrfs_super_sys_array_size(super_copy);
2696 
2697 	ptr = super_copy->sys_chunk_array;
2698 	cur = 0;
2699 
2700 	while (cur < array_size) {
2701 		disk_key = (struct btrfs_disk_key *)ptr;
2702 		btrfs_disk_key_to_cpu(&key, disk_key);
2703 
2704 		len = sizeof(*disk_key);
2705 
2706 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2707 			chunk = (struct btrfs_chunk *)(ptr + len);
2708 			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2709 			len += btrfs_chunk_item_size(num_stripes);
2710 		} else {
2711 			ret = -EIO;
2712 			break;
2713 		}
2714 		if (key.objectid == chunk_objectid &&
2715 		    key.offset == chunk_offset) {
2716 			memmove(ptr, ptr + len, array_size - (cur + len));
2717 			array_size -= len;
2718 			btrfs_set_super_sys_array_size(super_copy, array_size);
2719 		} else {
2720 			ptr += len;
2721 			cur += len;
2722 		}
2723 	}
2724 	unlock_chunks(root);
2725 	return ret;
2726 }
2727 
2728 int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
2729 		       struct btrfs_root *root, u64 chunk_offset)
2730 {
2731 	struct extent_map_tree *em_tree;
2732 	struct extent_map *em;
2733 	struct btrfs_root *extent_root = root->fs_info->extent_root;
2734 	struct map_lookup *map;
2735 	u64 dev_extent_len = 0;
2736 	u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2737 	int i, ret = 0;
2738 
2739 	/* Just in case */
2740 	root = root->fs_info->chunk_root;
2741 	em_tree = &root->fs_info->mapping_tree.map_tree;
2742 
2743 	read_lock(&em_tree->lock);
2744 	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2745 	read_unlock(&em_tree->lock);
2746 
2747 	if (!em || em->start > chunk_offset ||
2748 	    em->start + em->len < chunk_offset) {
2749 		/*
2750 		 * This is a logic error, but we don't want to just rely on the
2751 		 * user having built with ASSERT enabled, so if ASSERT doens't
2752 		 * do anything we still error out.
2753 		 */
2754 		ASSERT(0);
2755 		if (em)
2756 			free_extent_map(em);
2757 		return -EINVAL;
2758 	}
2759 	map = em->map_lookup;
2760 	lock_chunks(root->fs_info->chunk_root);
2761 	check_system_chunk(trans, extent_root, map->type);
2762 	unlock_chunks(root->fs_info->chunk_root);
2763 
2764 	for (i = 0; i < map->num_stripes; i++) {
2765 		struct btrfs_device *device = map->stripes[i].dev;
2766 		ret = btrfs_free_dev_extent(trans, device,
2767 					    map->stripes[i].physical,
2768 					    &dev_extent_len);
2769 		if (ret) {
2770 			btrfs_abort_transaction(trans, root, ret);
2771 			goto out;
2772 		}
2773 
2774 		if (device->bytes_used > 0) {
2775 			lock_chunks(root);
2776 			btrfs_device_set_bytes_used(device,
2777 					device->bytes_used - dev_extent_len);
2778 			spin_lock(&root->fs_info->free_chunk_lock);
2779 			root->fs_info->free_chunk_space += dev_extent_len;
2780 			spin_unlock(&root->fs_info->free_chunk_lock);
2781 			btrfs_clear_space_info_full(root->fs_info);
2782 			unlock_chunks(root);
2783 		}
2784 
2785 		if (map->stripes[i].dev) {
2786 			ret = btrfs_update_device(trans, map->stripes[i].dev);
2787 			if (ret) {
2788 				btrfs_abort_transaction(trans, root, ret);
2789 				goto out;
2790 			}
2791 		}
2792 	}
2793 	ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
2794 	if (ret) {
2795 		btrfs_abort_transaction(trans, root, ret);
2796 		goto out;
2797 	}
2798 
2799 	trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2800 
2801 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2802 		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2803 		if (ret) {
2804 			btrfs_abort_transaction(trans, root, ret);
2805 			goto out;
2806 		}
2807 	}
2808 
2809 	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em);
2810 	if (ret) {
2811 		btrfs_abort_transaction(trans, extent_root, ret);
2812 		goto out;
2813 	}
2814 
2815 out:
2816 	/* once for us */
2817 	free_extent_map(em);
2818 	return ret;
2819 }
2820 
2821 static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
2822 {
2823 	struct btrfs_root *extent_root;
2824 	struct btrfs_trans_handle *trans;
2825 	int ret;
2826 
2827 	root = root->fs_info->chunk_root;
2828 	extent_root = root->fs_info->extent_root;
2829 
2830 	/*
2831 	 * Prevent races with automatic removal of unused block groups.
2832 	 * After we relocate and before we remove the chunk with offset
2833 	 * chunk_offset, automatic removal of the block group can kick in,
2834 	 * resulting in a failure when calling btrfs_remove_chunk() below.
2835 	 *
2836 	 * Make sure to acquire this mutex before doing a tree search (dev
2837 	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
2838 	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
2839 	 * we release the path used to search the chunk/dev tree and before
2840 	 * the current task acquires this mutex and calls us.
2841 	 */
2842 	ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex));
2843 
2844 	ret = btrfs_can_relocate(extent_root, chunk_offset);
2845 	if (ret)
2846 		return -ENOSPC;
2847 
2848 	/* step one, relocate all the extents inside this chunk */
2849 	btrfs_scrub_pause(root);
2850 	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2851 	btrfs_scrub_continue(root);
2852 	if (ret)
2853 		return ret;
2854 
2855 	trans = btrfs_start_trans_remove_block_group(root->fs_info,
2856 						     chunk_offset);
2857 	if (IS_ERR(trans)) {
2858 		ret = PTR_ERR(trans);
2859 		btrfs_std_error(root->fs_info, ret, NULL);
2860 		return ret;
2861 	}
2862 
2863 	/*
2864 	 * step two, delete the device extents and the
2865 	 * chunk tree entries
2866 	 */
2867 	ret = btrfs_remove_chunk(trans, root, chunk_offset);
2868 	btrfs_end_transaction(trans, root);
2869 	return ret;
2870 }
2871 
2872 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2873 {
2874 	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2875 	struct btrfs_path *path;
2876 	struct extent_buffer *leaf;
2877 	struct btrfs_chunk *chunk;
2878 	struct btrfs_key key;
2879 	struct btrfs_key found_key;
2880 	u64 chunk_type;
2881 	bool retried = false;
2882 	int failed = 0;
2883 	int ret;
2884 
2885 	path = btrfs_alloc_path();
2886 	if (!path)
2887 		return -ENOMEM;
2888 
2889 again:
2890 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2891 	key.offset = (u64)-1;
2892 	key.type = BTRFS_CHUNK_ITEM_KEY;
2893 
2894 	while (1) {
2895 		mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
2896 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2897 		if (ret < 0) {
2898 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2899 			goto error;
2900 		}
2901 		BUG_ON(ret == 0); /* Corruption */
2902 
2903 		ret = btrfs_previous_item(chunk_root, path, key.objectid,
2904 					  key.type);
2905 		if (ret)
2906 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2907 		if (ret < 0)
2908 			goto error;
2909 		if (ret > 0)
2910 			break;
2911 
2912 		leaf = path->nodes[0];
2913 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2914 
2915 		chunk = btrfs_item_ptr(leaf, path->slots[0],
2916 				       struct btrfs_chunk);
2917 		chunk_type = btrfs_chunk_type(leaf, chunk);
2918 		btrfs_release_path(path);
2919 
2920 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2921 			ret = btrfs_relocate_chunk(chunk_root,
2922 						   found_key.offset);
2923 			if (ret == -ENOSPC)
2924 				failed++;
2925 			else
2926 				BUG_ON(ret);
2927 		}
2928 		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2929 
2930 		if (found_key.offset == 0)
2931 			break;
2932 		key.offset = found_key.offset - 1;
2933 	}
2934 	ret = 0;
2935 	if (failed && !retried) {
2936 		failed = 0;
2937 		retried = true;
2938 		goto again;
2939 	} else if (WARN_ON(failed && retried)) {
2940 		ret = -ENOSPC;
2941 	}
2942 error:
2943 	btrfs_free_path(path);
2944 	return ret;
2945 }
2946 
2947 static int insert_balance_item(struct btrfs_root *root,
2948 			       struct btrfs_balance_control *bctl)
2949 {
2950 	struct btrfs_trans_handle *trans;
2951 	struct btrfs_balance_item *item;
2952 	struct btrfs_disk_balance_args disk_bargs;
2953 	struct btrfs_path *path;
2954 	struct extent_buffer *leaf;
2955 	struct btrfs_key key;
2956 	int ret, err;
2957 
2958 	path = btrfs_alloc_path();
2959 	if (!path)
2960 		return -ENOMEM;
2961 
2962 	trans = btrfs_start_transaction(root, 0);
2963 	if (IS_ERR(trans)) {
2964 		btrfs_free_path(path);
2965 		return PTR_ERR(trans);
2966 	}
2967 
2968 	key.objectid = BTRFS_BALANCE_OBJECTID;
2969 	key.type = BTRFS_BALANCE_ITEM_KEY;
2970 	key.offset = 0;
2971 
2972 	ret = btrfs_insert_empty_item(trans, root, path, &key,
2973 				      sizeof(*item));
2974 	if (ret)
2975 		goto out;
2976 
2977 	leaf = path->nodes[0];
2978 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2979 
2980 	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2981 
2982 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2983 	btrfs_set_balance_data(leaf, item, &disk_bargs);
2984 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2985 	btrfs_set_balance_meta(leaf, item, &disk_bargs);
2986 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2987 	btrfs_set_balance_sys(leaf, item, &disk_bargs);
2988 
2989 	btrfs_set_balance_flags(leaf, item, bctl->flags);
2990 
2991 	btrfs_mark_buffer_dirty(leaf);
2992 out:
2993 	btrfs_free_path(path);
2994 	err = btrfs_commit_transaction(trans, root);
2995 	if (err && !ret)
2996 		ret = err;
2997 	return ret;
2998 }
2999 
3000 static int del_balance_item(struct btrfs_root *root)
3001 {
3002 	struct btrfs_trans_handle *trans;
3003 	struct btrfs_path *path;
3004 	struct btrfs_key key;
3005 	int ret, err;
3006 
3007 	path = btrfs_alloc_path();
3008 	if (!path)
3009 		return -ENOMEM;
3010 
3011 	trans = btrfs_start_transaction(root, 0);
3012 	if (IS_ERR(trans)) {
3013 		btrfs_free_path(path);
3014 		return PTR_ERR(trans);
3015 	}
3016 
3017 	key.objectid = BTRFS_BALANCE_OBJECTID;
3018 	key.type = BTRFS_BALANCE_ITEM_KEY;
3019 	key.offset = 0;
3020 
3021 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3022 	if (ret < 0)
3023 		goto out;
3024 	if (ret > 0) {
3025 		ret = -ENOENT;
3026 		goto out;
3027 	}
3028 
3029 	ret = btrfs_del_item(trans, root, path);
3030 out:
3031 	btrfs_free_path(path);
3032 	err = btrfs_commit_transaction(trans, root);
3033 	if (err && !ret)
3034 		ret = err;
3035 	return ret;
3036 }
3037 
3038 /*
3039  * This is a heuristic used to reduce the number of chunks balanced on
3040  * resume after balance was interrupted.
3041  */
3042 static void update_balance_args(struct btrfs_balance_control *bctl)
3043 {
3044 	/*
3045 	 * Turn on soft mode for chunk types that were being converted.
3046 	 */
3047 	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3048 		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3049 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3050 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3051 	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3052 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3053 
3054 	/*
3055 	 * Turn on usage filter if is not already used.  The idea is
3056 	 * that chunks that we have already balanced should be
3057 	 * reasonably full.  Don't do it for chunks that are being
3058 	 * converted - that will keep us from relocating unconverted
3059 	 * (albeit full) chunks.
3060 	 */
3061 	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3062 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3063 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3064 		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3065 		bctl->data.usage = 90;
3066 	}
3067 	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3068 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3069 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3070 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3071 		bctl->sys.usage = 90;
3072 	}
3073 	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3074 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3075 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3076 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3077 		bctl->meta.usage = 90;
3078 	}
3079 }
3080 
3081 /*
3082  * Should be called with both balance and volume mutexes held to
3083  * serialize other volume operations (add_dev/rm_dev/resize) with
3084  * restriper.  Same goes for unset_balance_control.
3085  */
3086 static void set_balance_control(struct btrfs_balance_control *bctl)
3087 {
3088 	struct btrfs_fs_info *fs_info = bctl->fs_info;
3089 
3090 	BUG_ON(fs_info->balance_ctl);
3091 
3092 	spin_lock(&fs_info->balance_lock);
3093 	fs_info->balance_ctl = bctl;
3094 	spin_unlock(&fs_info->balance_lock);
3095 }
3096 
3097 static void unset_balance_control(struct btrfs_fs_info *fs_info)
3098 {
3099 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3100 
3101 	BUG_ON(!fs_info->balance_ctl);
3102 
3103 	spin_lock(&fs_info->balance_lock);
3104 	fs_info->balance_ctl = NULL;
3105 	spin_unlock(&fs_info->balance_lock);
3106 
3107 	kfree(bctl);
3108 }
3109 
3110 /*
3111  * Balance filters.  Return 1 if chunk should be filtered out
3112  * (should not be balanced).
3113  */
3114 static int chunk_profiles_filter(u64 chunk_type,
3115 				 struct btrfs_balance_args *bargs)
3116 {
3117 	chunk_type = chunk_to_extended(chunk_type) &
3118 				BTRFS_EXTENDED_PROFILE_MASK;
3119 
3120 	if (bargs->profiles & chunk_type)
3121 		return 0;
3122 
3123 	return 1;
3124 }
3125 
3126 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3127 			      struct btrfs_balance_args *bargs)
3128 {
3129 	struct btrfs_block_group_cache *cache;
3130 	u64 chunk_used;
3131 	u64 user_thresh_min;
3132 	u64 user_thresh_max;
3133 	int ret = 1;
3134 
3135 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3136 	chunk_used = btrfs_block_group_used(&cache->item);
3137 
3138 	if (bargs->usage_min == 0)
3139 		user_thresh_min = 0;
3140 	else
3141 		user_thresh_min = div_factor_fine(cache->key.offset,
3142 					bargs->usage_min);
3143 
3144 	if (bargs->usage_max == 0)
3145 		user_thresh_max = 1;
3146 	else if (bargs->usage_max > 100)
3147 		user_thresh_max = cache->key.offset;
3148 	else
3149 		user_thresh_max = div_factor_fine(cache->key.offset,
3150 					bargs->usage_max);
3151 
3152 	if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3153 		ret = 0;
3154 
3155 	btrfs_put_block_group(cache);
3156 	return ret;
3157 }
3158 
3159 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3160 		u64 chunk_offset, struct btrfs_balance_args *bargs)
3161 {
3162 	struct btrfs_block_group_cache *cache;
3163 	u64 chunk_used, user_thresh;
3164 	int ret = 1;
3165 
3166 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3167 	chunk_used = btrfs_block_group_used(&cache->item);
3168 
3169 	if (bargs->usage_min == 0)
3170 		user_thresh = 1;
3171 	else if (bargs->usage > 100)
3172 		user_thresh = cache->key.offset;
3173 	else
3174 		user_thresh = div_factor_fine(cache->key.offset,
3175 					      bargs->usage);
3176 
3177 	if (chunk_used < user_thresh)
3178 		ret = 0;
3179 
3180 	btrfs_put_block_group(cache);
3181 	return ret;
3182 }
3183 
3184 static int chunk_devid_filter(struct extent_buffer *leaf,
3185 			      struct btrfs_chunk *chunk,
3186 			      struct btrfs_balance_args *bargs)
3187 {
3188 	struct btrfs_stripe *stripe;
3189 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3190 	int i;
3191 
3192 	for (i = 0; i < num_stripes; i++) {
3193 		stripe = btrfs_stripe_nr(chunk, i);
3194 		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3195 			return 0;
3196 	}
3197 
3198 	return 1;
3199 }
3200 
3201 /* [pstart, pend) */
3202 static int chunk_drange_filter(struct extent_buffer *leaf,
3203 			       struct btrfs_chunk *chunk,
3204 			       u64 chunk_offset,
3205 			       struct btrfs_balance_args *bargs)
3206 {
3207 	struct btrfs_stripe *stripe;
3208 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3209 	u64 stripe_offset;
3210 	u64 stripe_length;
3211 	int factor;
3212 	int i;
3213 
3214 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3215 		return 0;
3216 
3217 	if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
3218 	     BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
3219 		factor = num_stripes / 2;
3220 	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
3221 		factor = num_stripes - 1;
3222 	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
3223 		factor = num_stripes - 2;
3224 	} else {
3225 		factor = num_stripes;
3226 	}
3227 
3228 	for (i = 0; i < num_stripes; i++) {
3229 		stripe = btrfs_stripe_nr(chunk, i);
3230 		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3231 			continue;
3232 
3233 		stripe_offset = btrfs_stripe_offset(leaf, stripe);
3234 		stripe_length = btrfs_chunk_length(leaf, chunk);
3235 		stripe_length = div_u64(stripe_length, factor);
3236 
3237 		if (stripe_offset < bargs->pend &&
3238 		    stripe_offset + stripe_length > bargs->pstart)
3239 			return 0;
3240 	}
3241 
3242 	return 1;
3243 }
3244 
3245 /* [vstart, vend) */
3246 static int chunk_vrange_filter(struct extent_buffer *leaf,
3247 			       struct btrfs_chunk *chunk,
3248 			       u64 chunk_offset,
3249 			       struct btrfs_balance_args *bargs)
3250 {
3251 	if (chunk_offset < bargs->vend &&
3252 	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3253 		/* at least part of the chunk is inside this vrange */
3254 		return 0;
3255 
3256 	return 1;
3257 }
3258 
3259 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3260 			       struct btrfs_chunk *chunk,
3261 			       struct btrfs_balance_args *bargs)
3262 {
3263 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3264 
3265 	if (bargs->stripes_min <= num_stripes
3266 			&& num_stripes <= bargs->stripes_max)
3267 		return 0;
3268 
3269 	return 1;
3270 }
3271 
3272 static int chunk_soft_convert_filter(u64 chunk_type,
3273 				     struct btrfs_balance_args *bargs)
3274 {
3275 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3276 		return 0;
3277 
3278 	chunk_type = chunk_to_extended(chunk_type) &
3279 				BTRFS_EXTENDED_PROFILE_MASK;
3280 
3281 	if (bargs->target == chunk_type)
3282 		return 1;
3283 
3284 	return 0;
3285 }
3286 
3287 static int should_balance_chunk(struct btrfs_root *root,
3288 				struct extent_buffer *leaf,
3289 				struct btrfs_chunk *chunk, u64 chunk_offset)
3290 {
3291 	struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
3292 	struct btrfs_balance_args *bargs = NULL;
3293 	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3294 
3295 	/* type filter */
3296 	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3297 	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3298 		return 0;
3299 	}
3300 
3301 	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3302 		bargs = &bctl->data;
3303 	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3304 		bargs = &bctl->sys;
3305 	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3306 		bargs = &bctl->meta;
3307 
3308 	/* profiles filter */
3309 	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3310 	    chunk_profiles_filter(chunk_type, bargs)) {
3311 		return 0;
3312 	}
3313 
3314 	/* usage filter */
3315 	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3316 	    chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
3317 		return 0;
3318 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3319 	    chunk_usage_range_filter(bctl->fs_info, chunk_offset, bargs)) {
3320 		return 0;
3321 	}
3322 
3323 	/* devid filter */
3324 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3325 	    chunk_devid_filter(leaf, chunk, bargs)) {
3326 		return 0;
3327 	}
3328 
3329 	/* drange filter, makes sense only with devid filter */
3330 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3331 	    chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
3332 		return 0;
3333 	}
3334 
3335 	/* vrange filter */
3336 	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3337 	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3338 		return 0;
3339 	}
3340 
3341 	/* stripes filter */
3342 	if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3343 	    chunk_stripes_range_filter(leaf, chunk, bargs)) {
3344 		return 0;
3345 	}
3346 
3347 	/* soft profile changing mode */
3348 	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3349 	    chunk_soft_convert_filter(chunk_type, bargs)) {
3350 		return 0;
3351 	}
3352 
3353 	/*
3354 	 * limited by count, must be the last filter
3355 	 */
3356 	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3357 		if (bargs->limit == 0)
3358 			return 0;
3359 		else
3360 			bargs->limit--;
3361 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3362 		/*
3363 		 * Same logic as the 'limit' filter; the minimum cannot be
3364 		 * determined here because we do not have the global informatoin
3365 		 * about the count of all chunks that satisfy the filters.
3366 		 */
3367 		if (bargs->limit_max == 0)
3368 			return 0;
3369 		else
3370 			bargs->limit_max--;
3371 	}
3372 
3373 	return 1;
3374 }
3375 
3376 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3377 {
3378 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3379 	struct btrfs_root *chunk_root = fs_info->chunk_root;
3380 	struct btrfs_root *dev_root = fs_info->dev_root;
3381 	struct list_head *devices;
3382 	struct btrfs_device *device;
3383 	u64 old_size;
3384 	u64 size_to_free;
3385 	u64 chunk_type;
3386 	struct btrfs_chunk *chunk;
3387 	struct btrfs_path *path;
3388 	struct btrfs_key key;
3389 	struct btrfs_key found_key;
3390 	struct btrfs_trans_handle *trans;
3391 	struct extent_buffer *leaf;
3392 	int slot;
3393 	int ret;
3394 	int enospc_errors = 0;
3395 	bool counting = true;
3396 	/* The single value limit and min/max limits use the same bytes in the */
3397 	u64 limit_data = bctl->data.limit;
3398 	u64 limit_meta = bctl->meta.limit;
3399 	u64 limit_sys = bctl->sys.limit;
3400 	u32 count_data = 0;
3401 	u32 count_meta = 0;
3402 	u32 count_sys = 0;
3403 	int chunk_reserved = 0;
3404 
3405 	/* step one make some room on all the devices */
3406 	devices = &fs_info->fs_devices->devices;
3407 	list_for_each_entry(device, devices, dev_list) {
3408 		old_size = btrfs_device_get_total_bytes(device);
3409 		size_to_free = div_factor(old_size, 1);
3410 		size_to_free = min_t(u64, size_to_free, SZ_1M);
3411 		if (!device->writeable ||
3412 		    btrfs_device_get_total_bytes(device) -
3413 		    btrfs_device_get_bytes_used(device) > size_to_free ||
3414 		    device->is_tgtdev_for_dev_replace)
3415 			continue;
3416 
3417 		ret = btrfs_shrink_device(device, old_size - size_to_free);
3418 		if (ret == -ENOSPC)
3419 			break;
3420 		BUG_ON(ret);
3421 
3422 		trans = btrfs_start_transaction(dev_root, 0);
3423 		BUG_ON(IS_ERR(trans));
3424 
3425 		ret = btrfs_grow_device(trans, device, old_size);
3426 		BUG_ON(ret);
3427 
3428 		btrfs_end_transaction(trans, dev_root);
3429 	}
3430 
3431 	/* step two, relocate all the chunks */
3432 	path = btrfs_alloc_path();
3433 	if (!path) {
3434 		ret = -ENOMEM;
3435 		goto error;
3436 	}
3437 
3438 	/* zero out stat counters */
3439 	spin_lock(&fs_info->balance_lock);
3440 	memset(&bctl->stat, 0, sizeof(bctl->stat));
3441 	spin_unlock(&fs_info->balance_lock);
3442 again:
3443 	if (!counting) {
3444 		/*
3445 		 * The single value limit and min/max limits use the same bytes
3446 		 * in the
3447 		 */
3448 		bctl->data.limit = limit_data;
3449 		bctl->meta.limit = limit_meta;
3450 		bctl->sys.limit = limit_sys;
3451 	}
3452 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3453 	key.offset = (u64)-1;
3454 	key.type = BTRFS_CHUNK_ITEM_KEY;
3455 
3456 	while (1) {
3457 		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3458 		    atomic_read(&fs_info->balance_cancel_req)) {
3459 			ret = -ECANCELED;
3460 			goto error;
3461 		}
3462 
3463 		mutex_lock(&fs_info->delete_unused_bgs_mutex);
3464 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3465 		if (ret < 0) {
3466 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3467 			goto error;
3468 		}
3469 
3470 		/*
3471 		 * this shouldn't happen, it means the last relocate
3472 		 * failed
3473 		 */
3474 		if (ret == 0)
3475 			BUG(); /* FIXME break ? */
3476 
3477 		ret = btrfs_previous_item(chunk_root, path, 0,
3478 					  BTRFS_CHUNK_ITEM_KEY);
3479 		if (ret) {
3480 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3481 			ret = 0;
3482 			break;
3483 		}
3484 
3485 		leaf = path->nodes[0];
3486 		slot = path->slots[0];
3487 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3488 
3489 		if (found_key.objectid != key.objectid) {
3490 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3491 			break;
3492 		}
3493 
3494 		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3495 		chunk_type = btrfs_chunk_type(leaf, chunk);
3496 
3497 		if (!counting) {
3498 			spin_lock(&fs_info->balance_lock);
3499 			bctl->stat.considered++;
3500 			spin_unlock(&fs_info->balance_lock);
3501 		}
3502 
3503 		ret = should_balance_chunk(chunk_root, leaf, chunk,
3504 					   found_key.offset);
3505 
3506 		btrfs_release_path(path);
3507 		if (!ret) {
3508 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3509 			goto loop;
3510 		}
3511 
3512 		if (counting) {
3513 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3514 			spin_lock(&fs_info->balance_lock);
3515 			bctl->stat.expected++;
3516 			spin_unlock(&fs_info->balance_lock);
3517 
3518 			if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3519 				count_data++;
3520 			else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3521 				count_sys++;
3522 			else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3523 				count_meta++;
3524 
3525 			goto loop;
3526 		}
3527 
3528 		/*
3529 		 * Apply limit_min filter, no need to check if the LIMITS
3530 		 * filter is used, limit_min is 0 by default
3531 		 */
3532 		if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3533 					count_data < bctl->data.limit_min)
3534 				|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3535 					count_meta < bctl->meta.limit_min)
3536 				|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3537 					count_sys < bctl->sys.limit_min)) {
3538 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3539 			goto loop;
3540 		}
3541 
3542 		if ((chunk_type & BTRFS_BLOCK_GROUP_DATA) && !chunk_reserved) {
3543 			trans = btrfs_start_transaction(chunk_root, 0);
3544 			if (IS_ERR(trans)) {
3545 				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3546 				ret = PTR_ERR(trans);
3547 				goto error;
3548 			}
3549 
3550 			ret = btrfs_force_chunk_alloc(trans, chunk_root,
3551 						      BTRFS_BLOCK_GROUP_DATA);
3552 			btrfs_end_transaction(trans, chunk_root);
3553 			if (ret < 0) {
3554 				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3555 				goto error;
3556 			}
3557 			chunk_reserved = 1;
3558 		}
3559 
3560 		ret = btrfs_relocate_chunk(chunk_root,
3561 					   found_key.offset);
3562 		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3563 		if (ret && ret != -ENOSPC)
3564 			goto error;
3565 		if (ret == -ENOSPC) {
3566 			enospc_errors++;
3567 		} else {
3568 			spin_lock(&fs_info->balance_lock);
3569 			bctl->stat.completed++;
3570 			spin_unlock(&fs_info->balance_lock);
3571 		}
3572 loop:
3573 		if (found_key.offset == 0)
3574 			break;
3575 		key.offset = found_key.offset - 1;
3576 	}
3577 
3578 	if (counting) {
3579 		btrfs_release_path(path);
3580 		counting = false;
3581 		goto again;
3582 	}
3583 error:
3584 	btrfs_free_path(path);
3585 	if (enospc_errors) {
3586 		btrfs_info(fs_info, "%d enospc errors during balance",
3587 		       enospc_errors);
3588 		if (!ret)
3589 			ret = -ENOSPC;
3590 	}
3591 
3592 	return ret;
3593 }
3594 
3595 /**
3596  * alloc_profile_is_valid - see if a given profile is valid and reduced
3597  * @flags: profile to validate
3598  * @extended: if true @flags is treated as an extended profile
3599  */
3600 static int alloc_profile_is_valid(u64 flags, int extended)
3601 {
3602 	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3603 			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
3604 
3605 	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3606 
3607 	/* 1) check that all other bits are zeroed */
3608 	if (flags & ~mask)
3609 		return 0;
3610 
3611 	/* 2) see if profile is reduced */
3612 	if (flags == 0)
3613 		return !extended; /* "0" is valid for usual profiles */
3614 
3615 	/* true if exactly one bit set */
3616 	return (flags & (flags - 1)) == 0;
3617 }
3618 
3619 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3620 {
3621 	/* cancel requested || normal exit path */
3622 	return atomic_read(&fs_info->balance_cancel_req) ||
3623 		(atomic_read(&fs_info->balance_pause_req) == 0 &&
3624 		 atomic_read(&fs_info->balance_cancel_req) == 0);
3625 }
3626 
3627 static void __cancel_balance(struct btrfs_fs_info *fs_info)
3628 {
3629 	int ret;
3630 
3631 	unset_balance_control(fs_info);
3632 	ret = del_balance_item(fs_info->tree_root);
3633 	if (ret)
3634 		btrfs_std_error(fs_info, ret, NULL);
3635 
3636 	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3637 }
3638 
3639 /* Non-zero return value signifies invalidity */
3640 static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg,
3641 		u64 allowed)
3642 {
3643 	return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3644 		(!alloc_profile_is_valid(bctl_arg->target, 1) ||
3645 		 (bctl_arg->target & ~allowed)));
3646 }
3647 
3648 /*
3649  * Should be called with both balance and volume mutexes held
3650  */
3651 int btrfs_balance(struct btrfs_balance_control *bctl,
3652 		  struct btrfs_ioctl_balance_args *bargs)
3653 {
3654 	struct btrfs_fs_info *fs_info = bctl->fs_info;
3655 	u64 allowed;
3656 	int mixed = 0;
3657 	int ret;
3658 	u64 num_devices;
3659 	unsigned seq;
3660 
3661 	if (btrfs_fs_closing(fs_info) ||
3662 	    atomic_read(&fs_info->balance_pause_req) ||
3663 	    atomic_read(&fs_info->balance_cancel_req)) {
3664 		ret = -EINVAL;
3665 		goto out;
3666 	}
3667 
3668 	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3669 	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3670 		mixed = 1;
3671 
3672 	/*
3673 	 * In case of mixed groups both data and meta should be picked,
3674 	 * and identical options should be given for both of them.
3675 	 */
3676 	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3677 	if (mixed && (bctl->flags & allowed)) {
3678 		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3679 		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3680 		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3681 			btrfs_err(fs_info, "with mixed groups data and "
3682 				   "metadata balance options must be the same");
3683 			ret = -EINVAL;
3684 			goto out;
3685 		}
3686 	}
3687 
3688 	num_devices = fs_info->fs_devices->num_devices;
3689 	btrfs_dev_replace_lock(&fs_info->dev_replace);
3690 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3691 		BUG_ON(num_devices < 1);
3692 		num_devices--;
3693 	}
3694 	btrfs_dev_replace_unlock(&fs_info->dev_replace);
3695 	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3696 	if (num_devices == 1)
3697 		allowed |= BTRFS_BLOCK_GROUP_DUP;
3698 	else if (num_devices > 1)
3699 		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3700 	if (num_devices > 2)
3701 		allowed |= BTRFS_BLOCK_GROUP_RAID5;
3702 	if (num_devices > 3)
3703 		allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3704 			    BTRFS_BLOCK_GROUP_RAID6);
3705 	if (validate_convert_profile(&bctl->data, allowed)) {
3706 		btrfs_err(fs_info, "unable to start balance with target "
3707 			   "data profile %llu",
3708 		       bctl->data.target);
3709 		ret = -EINVAL;
3710 		goto out;
3711 	}
3712 	if (validate_convert_profile(&bctl->meta, allowed)) {
3713 		btrfs_err(fs_info,
3714 			   "unable to start balance with target metadata profile %llu",
3715 		       bctl->meta.target);
3716 		ret = -EINVAL;
3717 		goto out;
3718 	}
3719 	if (validate_convert_profile(&bctl->sys, allowed)) {
3720 		btrfs_err(fs_info,
3721 			   "unable to start balance with target system profile %llu",
3722 		       bctl->sys.target);
3723 		ret = -EINVAL;
3724 		goto out;
3725 	}
3726 
3727 	/* allow to reduce meta or sys integrity only if force set */
3728 	allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3729 			BTRFS_BLOCK_GROUP_RAID10 |
3730 			BTRFS_BLOCK_GROUP_RAID5 |
3731 			BTRFS_BLOCK_GROUP_RAID6;
3732 	do {
3733 		seq = read_seqbegin(&fs_info->profiles_lock);
3734 
3735 		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3736 		     (fs_info->avail_system_alloc_bits & allowed) &&
3737 		     !(bctl->sys.target & allowed)) ||
3738 		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3739 		     (fs_info->avail_metadata_alloc_bits & allowed) &&
3740 		     !(bctl->meta.target & allowed))) {
3741 			if (bctl->flags & BTRFS_BALANCE_FORCE) {
3742 				btrfs_info(fs_info, "force reducing metadata integrity");
3743 			} else {
3744 				btrfs_err(fs_info, "balance will reduce metadata "
3745 					   "integrity, use force if you want this");
3746 				ret = -EINVAL;
3747 				goto out;
3748 			}
3749 		}
3750 	} while (read_seqretry(&fs_info->profiles_lock, seq));
3751 
3752 	if (btrfs_get_num_tolerated_disk_barrier_failures(bctl->meta.target) <
3753 		btrfs_get_num_tolerated_disk_barrier_failures(bctl->data.target)) {
3754 		btrfs_warn(fs_info,
3755 	"metadata profile 0x%llx has lower redundancy than data profile 0x%llx",
3756 			bctl->meta.target, bctl->data.target);
3757 	}
3758 
3759 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3760 		fs_info->num_tolerated_disk_barrier_failures = min(
3761 			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info),
3762 			btrfs_get_num_tolerated_disk_barrier_failures(
3763 				bctl->sys.target));
3764 	}
3765 
3766 	ret = insert_balance_item(fs_info->tree_root, bctl);
3767 	if (ret && ret != -EEXIST)
3768 		goto out;
3769 
3770 	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3771 		BUG_ON(ret == -EEXIST);
3772 		set_balance_control(bctl);
3773 	} else {
3774 		BUG_ON(ret != -EEXIST);
3775 		spin_lock(&fs_info->balance_lock);
3776 		update_balance_args(bctl);
3777 		spin_unlock(&fs_info->balance_lock);
3778 	}
3779 
3780 	atomic_inc(&fs_info->balance_running);
3781 	mutex_unlock(&fs_info->balance_mutex);
3782 
3783 	ret = __btrfs_balance(fs_info);
3784 
3785 	mutex_lock(&fs_info->balance_mutex);
3786 	atomic_dec(&fs_info->balance_running);
3787 
3788 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3789 		fs_info->num_tolerated_disk_barrier_failures =
3790 			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3791 	}
3792 
3793 	if (bargs) {
3794 		memset(bargs, 0, sizeof(*bargs));
3795 		update_ioctl_balance_args(fs_info, 0, bargs);
3796 	}
3797 
3798 	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3799 	    balance_need_close(fs_info)) {
3800 		__cancel_balance(fs_info);
3801 	}
3802 
3803 	wake_up(&fs_info->balance_wait_q);
3804 
3805 	return ret;
3806 out:
3807 	if (bctl->flags & BTRFS_BALANCE_RESUME)
3808 		__cancel_balance(fs_info);
3809 	else {
3810 		kfree(bctl);
3811 		atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3812 	}
3813 	return ret;
3814 }
3815 
3816 static int balance_kthread(void *data)
3817 {
3818 	struct btrfs_fs_info *fs_info = data;
3819 	int ret = 0;
3820 
3821 	mutex_lock(&fs_info->volume_mutex);
3822 	mutex_lock(&fs_info->balance_mutex);
3823 
3824 	if (fs_info->balance_ctl) {
3825 		btrfs_info(fs_info, "continuing balance");
3826 		ret = btrfs_balance(fs_info->balance_ctl, NULL);
3827 	}
3828 
3829 	mutex_unlock(&fs_info->balance_mutex);
3830 	mutex_unlock(&fs_info->volume_mutex);
3831 
3832 	return ret;
3833 }
3834 
3835 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3836 {
3837 	struct task_struct *tsk;
3838 
3839 	spin_lock(&fs_info->balance_lock);
3840 	if (!fs_info->balance_ctl) {
3841 		spin_unlock(&fs_info->balance_lock);
3842 		return 0;
3843 	}
3844 	spin_unlock(&fs_info->balance_lock);
3845 
3846 	if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3847 		btrfs_info(fs_info, "force skipping balance");
3848 		return 0;
3849 	}
3850 
3851 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3852 	return PTR_ERR_OR_ZERO(tsk);
3853 }
3854 
3855 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3856 {
3857 	struct btrfs_balance_control *bctl;
3858 	struct btrfs_balance_item *item;
3859 	struct btrfs_disk_balance_args disk_bargs;
3860 	struct btrfs_path *path;
3861 	struct extent_buffer *leaf;
3862 	struct btrfs_key key;
3863 	int ret;
3864 
3865 	path = btrfs_alloc_path();
3866 	if (!path)
3867 		return -ENOMEM;
3868 
3869 	key.objectid = BTRFS_BALANCE_OBJECTID;
3870 	key.type = BTRFS_BALANCE_ITEM_KEY;
3871 	key.offset = 0;
3872 
3873 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3874 	if (ret < 0)
3875 		goto out;
3876 	if (ret > 0) { /* ret = -ENOENT; */
3877 		ret = 0;
3878 		goto out;
3879 	}
3880 
3881 	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3882 	if (!bctl) {
3883 		ret = -ENOMEM;
3884 		goto out;
3885 	}
3886 
3887 	leaf = path->nodes[0];
3888 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3889 
3890 	bctl->fs_info = fs_info;
3891 	bctl->flags = btrfs_balance_flags(leaf, item);
3892 	bctl->flags |= BTRFS_BALANCE_RESUME;
3893 
3894 	btrfs_balance_data(leaf, item, &disk_bargs);
3895 	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3896 	btrfs_balance_meta(leaf, item, &disk_bargs);
3897 	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3898 	btrfs_balance_sys(leaf, item, &disk_bargs);
3899 	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3900 
3901 	WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3902 
3903 	mutex_lock(&fs_info->volume_mutex);
3904 	mutex_lock(&fs_info->balance_mutex);
3905 
3906 	set_balance_control(bctl);
3907 
3908 	mutex_unlock(&fs_info->balance_mutex);
3909 	mutex_unlock(&fs_info->volume_mutex);
3910 out:
3911 	btrfs_free_path(path);
3912 	return ret;
3913 }
3914 
3915 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3916 {
3917 	int ret = 0;
3918 
3919 	mutex_lock(&fs_info->balance_mutex);
3920 	if (!fs_info->balance_ctl) {
3921 		mutex_unlock(&fs_info->balance_mutex);
3922 		return -ENOTCONN;
3923 	}
3924 
3925 	if (atomic_read(&fs_info->balance_running)) {
3926 		atomic_inc(&fs_info->balance_pause_req);
3927 		mutex_unlock(&fs_info->balance_mutex);
3928 
3929 		wait_event(fs_info->balance_wait_q,
3930 			   atomic_read(&fs_info->balance_running) == 0);
3931 
3932 		mutex_lock(&fs_info->balance_mutex);
3933 		/* we are good with balance_ctl ripped off from under us */
3934 		BUG_ON(atomic_read(&fs_info->balance_running));
3935 		atomic_dec(&fs_info->balance_pause_req);
3936 	} else {
3937 		ret = -ENOTCONN;
3938 	}
3939 
3940 	mutex_unlock(&fs_info->balance_mutex);
3941 	return ret;
3942 }
3943 
3944 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3945 {
3946 	if (fs_info->sb->s_flags & MS_RDONLY)
3947 		return -EROFS;
3948 
3949 	mutex_lock(&fs_info->balance_mutex);
3950 	if (!fs_info->balance_ctl) {
3951 		mutex_unlock(&fs_info->balance_mutex);
3952 		return -ENOTCONN;
3953 	}
3954 
3955 	atomic_inc(&fs_info->balance_cancel_req);
3956 	/*
3957 	 * if we are running just wait and return, balance item is
3958 	 * deleted in btrfs_balance in this case
3959 	 */
3960 	if (atomic_read(&fs_info->balance_running)) {
3961 		mutex_unlock(&fs_info->balance_mutex);
3962 		wait_event(fs_info->balance_wait_q,
3963 			   atomic_read(&fs_info->balance_running) == 0);
3964 		mutex_lock(&fs_info->balance_mutex);
3965 	} else {
3966 		/* __cancel_balance needs volume_mutex */
3967 		mutex_unlock(&fs_info->balance_mutex);
3968 		mutex_lock(&fs_info->volume_mutex);
3969 		mutex_lock(&fs_info->balance_mutex);
3970 
3971 		if (fs_info->balance_ctl)
3972 			__cancel_balance(fs_info);
3973 
3974 		mutex_unlock(&fs_info->volume_mutex);
3975 	}
3976 
3977 	BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3978 	atomic_dec(&fs_info->balance_cancel_req);
3979 	mutex_unlock(&fs_info->balance_mutex);
3980 	return 0;
3981 }
3982 
3983 static int btrfs_uuid_scan_kthread(void *data)
3984 {
3985 	struct btrfs_fs_info *fs_info = data;
3986 	struct btrfs_root *root = fs_info->tree_root;
3987 	struct btrfs_key key;
3988 	struct btrfs_key max_key;
3989 	struct btrfs_path *path = NULL;
3990 	int ret = 0;
3991 	struct extent_buffer *eb;
3992 	int slot;
3993 	struct btrfs_root_item root_item;
3994 	u32 item_size;
3995 	struct btrfs_trans_handle *trans = NULL;
3996 
3997 	path = btrfs_alloc_path();
3998 	if (!path) {
3999 		ret = -ENOMEM;
4000 		goto out;
4001 	}
4002 
4003 	key.objectid = 0;
4004 	key.type = BTRFS_ROOT_ITEM_KEY;
4005 	key.offset = 0;
4006 
4007 	max_key.objectid = (u64)-1;
4008 	max_key.type = BTRFS_ROOT_ITEM_KEY;
4009 	max_key.offset = (u64)-1;
4010 
4011 	while (1) {
4012 		ret = btrfs_search_forward(root, &key, path, 0);
4013 		if (ret) {
4014 			if (ret > 0)
4015 				ret = 0;
4016 			break;
4017 		}
4018 
4019 		if (key.type != BTRFS_ROOT_ITEM_KEY ||
4020 		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4021 		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4022 		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
4023 			goto skip;
4024 
4025 		eb = path->nodes[0];
4026 		slot = path->slots[0];
4027 		item_size = btrfs_item_size_nr(eb, slot);
4028 		if (item_size < sizeof(root_item))
4029 			goto skip;
4030 
4031 		read_extent_buffer(eb, &root_item,
4032 				   btrfs_item_ptr_offset(eb, slot),
4033 				   (int)sizeof(root_item));
4034 		if (btrfs_root_refs(&root_item) == 0)
4035 			goto skip;
4036 
4037 		if (!btrfs_is_empty_uuid(root_item.uuid) ||
4038 		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
4039 			if (trans)
4040 				goto update_tree;
4041 
4042 			btrfs_release_path(path);
4043 			/*
4044 			 * 1 - subvol uuid item
4045 			 * 1 - received_subvol uuid item
4046 			 */
4047 			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4048 			if (IS_ERR(trans)) {
4049 				ret = PTR_ERR(trans);
4050 				break;
4051 			}
4052 			continue;
4053 		} else {
4054 			goto skip;
4055 		}
4056 update_tree:
4057 		if (!btrfs_is_empty_uuid(root_item.uuid)) {
4058 			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
4059 						  root_item.uuid,
4060 						  BTRFS_UUID_KEY_SUBVOL,
4061 						  key.objectid);
4062 			if (ret < 0) {
4063 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4064 					ret);
4065 				break;
4066 			}
4067 		}
4068 
4069 		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4070 			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
4071 						  root_item.received_uuid,
4072 						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4073 						  key.objectid);
4074 			if (ret < 0) {
4075 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4076 					ret);
4077 				break;
4078 			}
4079 		}
4080 
4081 skip:
4082 		if (trans) {
4083 			ret = btrfs_end_transaction(trans, fs_info->uuid_root);
4084 			trans = NULL;
4085 			if (ret)
4086 				break;
4087 		}
4088 
4089 		btrfs_release_path(path);
4090 		if (key.offset < (u64)-1) {
4091 			key.offset++;
4092 		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4093 			key.offset = 0;
4094 			key.type = BTRFS_ROOT_ITEM_KEY;
4095 		} else if (key.objectid < (u64)-1) {
4096 			key.offset = 0;
4097 			key.type = BTRFS_ROOT_ITEM_KEY;
4098 			key.objectid++;
4099 		} else {
4100 			break;
4101 		}
4102 		cond_resched();
4103 	}
4104 
4105 out:
4106 	btrfs_free_path(path);
4107 	if (trans && !IS_ERR(trans))
4108 		btrfs_end_transaction(trans, fs_info->uuid_root);
4109 	if (ret)
4110 		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4111 	else
4112 		fs_info->update_uuid_tree_gen = 1;
4113 	up(&fs_info->uuid_tree_rescan_sem);
4114 	return 0;
4115 }
4116 
4117 /*
4118  * Callback for btrfs_uuid_tree_iterate().
4119  * returns:
4120  * 0	check succeeded, the entry is not outdated.
4121  * < 0	if an error occured.
4122  * > 0	if the check failed, which means the caller shall remove the entry.
4123  */
4124 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
4125 				       u8 *uuid, u8 type, u64 subid)
4126 {
4127 	struct btrfs_key key;
4128 	int ret = 0;
4129 	struct btrfs_root *subvol_root;
4130 
4131 	if (type != BTRFS_UUID_KEY_SUBVOL &&
4132 	    type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
4133 		goto out;
4134 
4135 	key.objectid = subid;
4136 	key.type = BTRFS_ROOT_ITEM_KEY;
4137 	key.offset = (u64)-1;
4138 	subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
4139 	if (IS_ERR(subvol_root)) {
4140 		ret = PTR_ERR(subvol_root);
4141 		if (ret == -ENOENT)
4142 			ret = 1;
4143 		goto out;
4144 	}
4145 
4146 	switch (type) {
4147 	case BTRFS_UUID_KEY_SUBVOL:
4148 		if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
4149 			ret = 1;
4150 		break;
4151 	case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
4152 		if (memcmp(uuid, subvol_root->root_item.received_uuid,
4153 			   BTRFS_UUID_SIZE))
4154 			ret = 1;
4155 		break;
4156 	}
4157 
4158 out:
4159 	return ret;
4160 }
4161 
4162 static int btrfs_uuid_rescan_kthread(void *data)
4163 {
4164 	struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
4165 	int ret;
4166 
4167 	/*
4168 	 * 1st step is to iterate through the existing UUID tree and
4169 	 * to delete all entries that contain outdated data.
4170 	 * 2nd step is to add all missing entries to the UUID tree.
4171 	 */
4172 	ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
4173 	if (ret < 0) {
4174 		btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
4175 		up(&fs_info->uuid_tree_rescan_sem);
4176 		return ret;
4177 	}
4178 	return btrfs_uuid_scan_kthread(data);
4179 }
4180 
4181 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4182 {
4183 	struct btrfs_trans_handle *trans;
4184 	struct btrfs_root *tree_root = fs_info->tree_root;
4185 	struct btrfs_root *uuid_root;
4186 	struct task_struct *task;
4187 	int ret;
4188 
4189 	/*
4190 	 * 1 - root node
4191 	 * 1 - root item
4192 	 */
4193 	trans = btrfs_start_transaction(tree_root, 2);
4194 	if (IS_ERR(trans))
4195 		return PTR_ERR(trans);
4196 
4197 	uuid_root = btrfs_create_tree(trans, fs_info,
4198 				      BTRFS_UUID_TREE_OBJECTID);
4199 	if (IS_ERR(uuid_root)) {
4200 		ret = PTR_ERR(uuid_root);
4201 		btrfs_abort_transaction(trans, tree_root, ret);
4202 		return ret;
4203 	}
4204 
4205 	fs_info->uuid_root = uuid_root;
4206 
4207 	ret = btrfs_commit_transaction(trans, tree_root);
4208 	if (ret)
4209 		return ret;
4210 
4211 	down(&fs_info->uuid_tree_rescan_sem);
4212 	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4213 	if (IS_ERR(task)) {
4214 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4215 		btrfs_warn(fs_info, "failed to start uuid_scan task");
4216 		up(&fs_info->uuid_tree_rescan_sem);
4217 		return PTR_ERR(task);
4218 	}
4219 
4220 	return 0;
4221 }
4222 
4223 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
4224 {
4225 	struct task_struct *task;
4226 
4227 	down(&fs_info->uuid_tree_rescan_sem);
4228 	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
4229 	if (IS_ERR(task)) {
4230 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4231 		btrfs_warn(fs_info, "failed to start uuid_rescan task");
4232 		up(&fs_info->uuid_tree_rescan_sem);
4233 		return PTR_ERR(task);
4234 	}
4235 
4236 	return 0;
4237 }
4238 
4239 /*
4240  * shrinking a device means finding all of the device extents past
4241  * the new size, and then following the back refs to the chunks.
4242  * The chunk relocation code actually frees the device extent
4243  */
4244 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4245 {
4246 	struct btrfs_trans_handle *trans;
4247 	struct btrfs_root *root = device->dev_root;
4248 	struct btrfs_dev_extent *dev_extent = NULL;
4249 	struct btrfs_path *path;
4250 	u64 length;
4251 	u64 chunk_offset;
4252 	int ret;
4253 	int slot;
4254 	int failed = 0;
4255 	bool retried = false;
4256 	bool checked_pending_chunks = false;
4257 	struct extent_buffer *l;
4258 	struct btrfs_key key;
4259 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4260 	u64 old_total = btrfs_super_total_bytes(super_copy);
4261 	u64 old_size = btrfs_device_get_total_bytes(device);
4262 	u64 diff = old_size - new_size;
4263 
4264 	if (device->is_tgtdev_for_dev_replace)
4265 		return -EINVAL;
4266 
4267 	path = btrfs_alloc_path();
4268 	if (!path)
4269 		return -ENOMEM;
4270 
4271 	path->reada = READA_FORWARD;
4272 
4273 	lock_chunks(root);
4274 
4275 	btrfs_device_set_total_bytes(device, new_size);
4276 	if (device->writeable) {
4277 		device->fs_devices->total_rw_bytes -= diff;
4278 		spin_lock(&root->fs_info->free_chunk_lock);
4279 		root->fs_info->free_chunk_space -= diff;
4280 		spin_unlock(&root->fs_info->free_chunk_lock);
4281 	}
4282 	unlock_chunks(root);
4283 
4284 again:
4285 	key.objectid = device->devid;
4286 	key.offset = (u64)-1;
4287 	key.type = BTRFS_DEV_EXTENT_KEY;
4288 
4289 	do {
4290 		mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
4291 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4292 		if (ret < 0) {
4293 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4294 			goto done;
4295 		}
4296 
4297 		ret = btrfs_previous_item(root, path, 0, key.type);
4298 		if (ret)
4299 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4300 		if (ret < 0)
4301 			goto done;
4302 		if (ret) {
4303 			ret = 0;
4304 			btrfs_release_path(path);
4305 			break;
4306 		}
4307 
4308 		l = path->nodes[0];
4309 		slot = path->slots[0];
4310 		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4311 
4312 		if (key.objectid != device->devid) {
4313 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4314 			btrfs_release_path(path);
4315 			break;
4316 		}
4317 
4318 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4319 		length = btrfs_dev_extent_length(l, dev_extent);
4320 
4321 		if (key.offset + length <= new_size) {
4322 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4323 			btrfs_release_path(path);
4324 			break;
4325 		}
4326 
4327 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4328 		btrfs_release_path(path);
4329 
4330 		ret = btrfs_relocate_chunk(root, chunk_offset);
4331 		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4332 		if (ret && ret != -ENOSPC)
4333 			goto done;
4334 		if (ret == -ENOSPC)
4335 			failed++;
4336 	} while (key.offset-- > 0);
4337 
4338 	if (failed && !retried) {
4339 		failed = 0;
4340 		retried = true;
4341 		goto again;
4342 	} else if (failed && retried) {
4343 		ret = -ENOSPC;
4344 		goto done;
4345 	}
4346 
4347 	/* Shrinking succeeded, else we would be at "done". */
4348 	trans = btrfs_start_transaction(root, 0);
4349 	if (IS_ERR(trans)) {
4350 		ret = PTR_ERR(trans);
4351 		goto done;
4352 	}
4353 
4354 	lock_chunks(root);
4355 
4356 	/*
4357 	 * We checked in the above loop all device extents that were already in
4358 	 * the device tree. However before we have updated the device's
4359 	 * total_bytes to the new size, we might have had chunk allocations that
4360 	 * have not complete yet (new block groups attached to transaction
4361 	 * handles), and therefore their device extents were not yet in the
4362 	 * device tree and we missed them in the loop above. So if we have any
4363 	 * pending chunk using a device extent that overlaps the device range
4364 	 * that we can not use anymore, commit the current transaction and
4365 	 * repeat the search on the device tree - this way we guarantee we will
4366 	 * not have chunks using device extents that end beyond 'new_size'.
4367 	 */
4368 	if (!checked_pending_chunks) {
4369 		u64 start = new_size;
4370 		u64 len = old_size - new_size;
4371 
4372 		if (contains_pending_extent(trans->transaction, device,
4373 					    &start, len)) {
4374 			unlock_chunks(root);
4375 			checked_pending_chunks = true;
4376 			failed = 0;
4377 			retried = false;
4378 			ret = btrfs_commit_transaction(trans, root);
4379 			if (ret)
4380 				goto done;
4381 			goto again;
4382 		}
4383 	}
4384 
4385 	btrfs_device_set_disk_total_bytes(device, new_size);
4386 	if (list_empty(&device->resized_list))
4387 		list_add_tail(&device->resized_list,
4388 			      &root->fs_info->fs_devices->resized_devices);
4389 
4390 	WARN_ON(diff > old_total);
4391 	btrfs_set_super_total_bytes(super_copy, old_total - diff);
4392 	unlock_chunks(root);
4393 
4394 	/* Now btrfs_update_device() will change the on-disk size. */
4395 	ret = btrfs_update_device(trans, device);
4396 	btrfs_end_transaction(trans, root);
4397 done:
4398 	btrfs_free_path(path);
4399 	if (ret) {
4400 		lock_chunks(root);
4401 		btrfs_device_set_total_bytes(device, old_size);
4402 		if (device->writeable)
4403 			device->fs_devices->total_rw_bytes += diff;
4404 		spin_lock(&root->fs_info->free_chunk_lock);
4405 		root->fs_info->free_chunk_space += diff;
4406 		spin_unlock(&root->fs_info->free_chunk_lock);
4407 		unlock_chunks(root);
4408 	}
4409 	return ret;
4410 }
4411 
4412 static int btrfs_add_system_chunk(struct btrfs_root *root,
4413 			   struct btrfs_key *key,
4414 			   struct btrfs_chunk *chunk, int item_size)
4415 {
4416 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4417 	struct btrfs_disk_key disk_key;
4418 	u32 array_size;
4419 	u8 *ptr;
4420 
4421 	lock_chunks(root);
4422 	array_size = btrfs_super_sys_array_size(super_copy);
4423 	if (array_size + item_size + sizeof(disk_key)
4424 			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4425 		unlock_chunks(root);
4426 		return -EFBIG;
4427 	}
4428 
4429 	ptr = super_copy->sys_chunk_array + array_size;
4430 	btrfs_cpu_key_to_disk(&disk_key, key);
4431 	memcpy(ptr, &disk_key, sizeof(disk_key));
4432 	ptr += sizeof(disk_key);
4433 	memcpy(ptr, chunk, item_size);
4434 	item_size += sizeof(disk_key);
4435 	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4436 	unlock_chunks(root);
4437 
4438 	return 0;
4439 }
4440 
4441 /*
4442  * sort the devices in descending order by max_avail, total_avail
4443  */
4444 static int btrfs_cmp_device_info(const void *a, const void *b)
4445 {
4446 	const struct btrfs_device_info *di_a = a;
4447 	const struct btrfs_device_info *di_b = b;
4448 
4449 	if (di_a->max_avail > di_b->max_avail)
4450 		return -1;
4451 	if (di_a->max_avail < di_b->max_avail)
4452 		return 1;
4453 	if (di_a->total_avail > di_b->total_avail)
4454 		return -1;
4455 	if (di_a->total_avail < di_b->total_avail)
4456 		return 1;
4457 	return 0;
4458 }
4459 
4460 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
4461 {
4462 	/* TODO allow them to set a preferred stripe size */
4463 	return SZ_64K;
4464 }
4465 
4466 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4467 {
4468 	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4469 		return;
4470 
4471 	btrfs_set_fs_incompat(info, RAID56);
4472 }
4473 
4474 #define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r)		\
4475 			- sizeof(struct btrfs_item)		\
4476 			- sizeof(struct btrfs_chunk))		\
4477 			/ sizeof(struct btrfs_stripe) + 1)
4478 
4479 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE	\
4480 				- 2 * sizeof(struct btrfs_disk_key)	\
4481 				- 2 * sizeof(struct btrfs_chunk))	\
4482 				/ sizeof(struct btrfs_stripe) + 1)
4483 
4484 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4485 			       struct btrfs_root *extent_root, u64 start,
4486 			       u64 type)
4487 {
4488 	struct btrfs_fs_info *info = extent_root->fs_info;
4489 	struct btrfs_fs_devices *fs_devices = info->fs_devices;
4490 	struct list_head *cur;
4491 	struct map_lookup *map = NULL;
4492 	struct extent_map_tree *em_tree;
4493 	struct extent_map *em;
4494 	struct btrfs_device_info *devices_info = NULL;
4495 	u64 total_avail;
4496 	int num_stripes;	/* total number of stripes to allocate */
4497 	int data_stripes;	/* number of stripes that count for
4498 				   block group size */
4499 	int sub_stripes;	/* sub_stripes info for map */
4500 	int dev_stripes;	/* stripes per dev */
4501 	int devs_max;		/* max devs to use */
4502 	int devs_min;		/* min devs needed */
4503 	int devs_increment;	/* ndevs has to be a multiple of this */
4504 	int ncopies;		/* how many copies to data has */
4505 	int ret;
4506 	u64 max_stripe_size;
4507 	u64 max_chunk_size;
4508 	u64 stripe_size;
4509 	u64 num_bytes;
4510 	u64 raid_stripe_len = BTRFS_STRIPE_LEN;
4511 	int ndevs;
4512 	int i;
4513 	int j;
4514 	int index;
4515 
4516 	BUG_ON(!alloc_profile_is_valid(type, 0));
4517 
4518 	if (list_empty(&fs_devices->alloc_list))
4519 		return -ENOSPC;
4520 
4521 	index = __get_raid_index(type);
4522 
4523 	sub_stripes = btrfs_raid_array[index].sub_stripes;
4524 	dev_stripes = btrfs_raid_array[index].dev_stripes;
4525 	devs_max = btrfs_raid_array[index].devs_max;
4526 	devs_min = btrfs_raid_array[index].devs_min;
4527 	devs_increment = btrfs_raid_array[index].devs_increment;
4528 	ncopies = btrfs_raid_array[index].ncopies;
4529 
4530 	if (type & BTRFS_BLOCK_GROUP_DATA) {
4531 		max_stripe_size = SZ_1G;
4532 		max_chunk_size = 10 * max_stripe_size;
4533 		if (!devs_max)
4534 			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4535 	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4536 		/* for larger filesystems, use larger metadata chunks */
4537 		if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
4538 			max_stripe_size = SZ_1G;
4539 		else
4540 			max_stripe_size = SZ_256M;
4541 		max_chunk_size = max_stripe_size;
4542 		if (!devs_max)
4543 			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4544 	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4545 		max_stripe_size = SZ_32M;
4546 		max_chunk_size = 2 * max_stripe_size;
4547 		if (!devs_max)
4548 			devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
4549 	} else {
4550 		btrfs_err(info, "invalid chunk type 0x%llx requested",
4551 		       type);
4552 		BUG_ON(1);
4553 	}
4554 
4555 	/* we don't want a chunk larger than 10% of writeable space */
4556 	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4557 			     max_chunk_size);
4558 
4559 	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
4560 			       GFP_NOFS);
4561 	if (!devices_info)
4562 		return -ENOMEM;
4563 
4564 	cur = fs_devices->alloc_list.next;
4565 
4566 	/*
4567 	 * in the first pass through the devices list, we gather information
4568 	 * about the available holes on each device.
4569 	 */
4570 	ndevs = 0;
4571 	while (cur != &fs_devices->alloc_list) {
4572 		struct btrfs_device *device;
4573 		u64 max_avail;
4574 		u64 dev_offset;
4575 
4576 		device = list_entry(cur, struct btrfs_device, dev_alloc_list);
4577 
4578 		cur = cur->next;
4579 
4580 		if (!device->writeable) {
4581 			WARN(1, KERN_ERR
4582 			       "BTRFS: read-only device in alloc_list\n");
4583 			continue;
4584 		}
4585 
4586 		if (!device->in_fs_metadata ||
4587 		    device->is_tgtdev_for_dev_replace)
4588 			continue;
4589 
4590 		if (device->total_bytes > device->bytes_used)
4591 			total_avail = device->total_bytes - device->bytes_used;
4592 		else
4593 			total_avail = 0;
4594 
4595 		/* If there is no space on this device, skip it. */
4596 		if (total_avail == 0)
4597 			continue;
4598 
4599 		ret = find_free_dev_extent(trans, device,
4600 					   max_stripe_size * dev_stripes,
4601 					   &dev_offset, &max_avail);
4602 		if (ret && ret != -ENOSPC)
4603 			goto error;
4604 
4605 		if (ret == 0)
4606 			max_avail = max_stripe_size * dev_stripes;
4607 
4608 		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
4609 			continue;
4610 
4611 		if (ndevs == fs_devices->rw_devices) {
4612 			WARN(1, "%s: found more than %llu devices\n",
4613 			     __func__, fs_devices->rw_devices);
4614 			break;
4615 		}
4616 		devices_info[ndevs].dev_offset = dev_offset;
4617 		devices_info[ndevs].max_avail = max_avail;
4618 		devices_info[ndevs].total_avail = total_avail;
4619 		devices_info[ndevs].dev = device;
4620 		++ndevs;
4621 	}
4622 
4623 	/*
4624 	 * now sort the devices by hole size / available space
4625 	 */
4626 	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4627 	     btrfs_cmp_device_info, NULL);
4628 
4629 	/* round down to number of usable stripes */
4630 	ndevs -= ndevs % devs_increment;
4631 
4632 	if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
4633 		ret = -ENOSPC;
4634 		goto error;
4635 	}
4636 
4637 	if (devs_max && ndevs > devs_max)
4638 		ndevs = devs_max;
4639 	/*
4640 	 * the primary goal is to maximize the number of stripes, so use as many
4641 	 * devices as possible, even if the stripes are not maximum sized.
4642 	 */
4643 	stripe_size = devices_info[ndevs-1].max_avail;
4644 	num_stripes = ndevs * dev_stripes;
4645 
4646 	/*
4647 	 * this will have to be fixed for RAID1 and RAID10 over
4648 	 * more drives
4649 	 */
4650 	data_stripes = num_stripes / ncopies;
4651 
4652 	if (type & BTRFS_BLOCK_GROUP_RAID5) {
4653 		raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
4654 				 btrfs_super_stripesize(info->super_copy));
4655 		data_stripes = num_stripes - 1;
4656 	}
4657 	if (type & BTRFS_BLOCK_GROUP_RAID6) {
4658 		raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
4659 				 btrfs_super_stripesize(info->super_copy));
4660 		data_stripes = num_stripes - 2;
4661 	}
4662 
4663 	/*
4664 	 * Use the number of data stripes to figure out how big this chunk
4665 	 * is really going to be in terms of logical address space,
4666 	 * and compare that answer with the max chunk size
4667 	 */
4668 	if (stripe_size * data_stripes > max_chunk_size) {
4669 		u64 mask = (1ULL << 24) - 1;
4670 
4671 		stripe_size = div_u64(max_chunk_size, data_stripes);
4672 
4673 		/* bump the answer up to a 16MB boundary */
4674 		stripe_size = (stripe_size + mask) & ~mask;
4675 
4676 		/* but don't go higher than the limits we found
4677 		 * while searching for free extents
4678 		 */
4679 		if (stripe_size > devices_info[ndevs-1].max_avail)
4680 			stripe_size = devices_info[ndevs-1].max_avail;
4681 	}
4682 
4683 	stripe_size = div_u64(stripe_size, dev_stripes);
4684 
4685 	/* align to BTRFS_STRIPE_LEN */
4686 	stripe_size = div_u64(stripe_size, raid_stripe_len);
4687 	stripe_size *= raid_stripe_len;
4688 
4689 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4690 	if (!map) {
4691 		ret = -ENOMEM;
4692 		goto error;
4693 	}
4694 	map->num_stripes = num_stripes;
4695 
4696 	for (i = 0; i < ndevs; ++i) {
4697 		for (j = 0; j < dev_stripes; ++j) {
4698 			int s = i * dev_stripes + j;
4699 			map->stripes[s].dev = devices_info[i].dev;
4700 			map->stripes[s].physical = devices_info[i].dev_offset +
4701 						   j * stripe_size;
4702 		}
4703 	}
4704 	map->sector_size = extent_root->sectorsize;
4705 	map->stripe_len = raid_stripe_len;
4706 	map->io_align = raid_stripe_len;
4707 	map->io_width = raid_stripe_len;
4708 	map->type = type;
4709 	map->sub_stripes = sub_stripes;
4710 
4711 	num_bytes = stripe_size * data_stripes;
4712 
4713 	trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
4714 
4715 	em = alloc_extent_map();
4716 	if (!em) {
4717 		kfree(map);
4718 		ret = -ENOMEM;
4719 		goto error;
4720 	}
4721 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
4722 	em->map_lookup = map;
4723 	em->start = start;
4724 	em->len = num_bytes;
4725 	em->block_start = 0;
4726 	em->block_len = em->len;
4727 	em->orig_block_len = stripe_size;
4728 
4729 	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4730 	write_lock(&em_tree->lock);
4731 	ret = add_extent_mapping(em_tree, em, 0);
4732 	if (!ret) {
4733 		list_add_tail(&em->list, &trans->transaction->pending_chunks);
4734 		atomic_inc(&em->refs);
4735 	}
4736 	write_unlock(&em_tree->lock);
4737 	if (ret) {
4738 		free_extent_map(em);
4739 		goto error;
4740 	}
4741 
4742 	ret = btrfs_make_block_group(trans, extent_root, 0, type,
4743 				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4744 				     start, num_bytes);
4745 	if (ret)
4746 		goto error_del_extent;
4747 
4748 	for (i = 0; i < map->num_stripes; i++) {
4749 		num_bytes = map->stripes[i].dev->bytes_used + stripe_size;
4750 		btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
4751 	}
4752 
4753 	spin_lock(&extent_root->fs_info->free_chunk_lock);
4754 	extent_root->fs_info->free_chunk_space -= (stripe_size *
4755 						   map->num_stripes);
4756 	spin_unlock(&extent_root->fs_info->free_chunk_lock);
4757 
4758 	free_extent_map(em);
4759 	check_raid56_incompat_flag(extent_root->fs_info, type);
4760 
4761 	kfree(devices_info);
4762 	return 0;
4763 
4764 error_del_extent:
4765 	write_lock(&em_tree->lock);
4766 	remove_extent_mapping(em_tree, em);
4767 	write_unlock(&em_tree->lock);
4768 
4769 	/* One for our allocation */
4770 	free_extent_map(em);
4771 	/* One for the tree reference */
4772 	free_extent_map(em);
4773 	/* One for the pending_chunks list reference */
4774 	free_extent_map(em);
4775 error:
4776 	kfree(devices_info);
4777 	return ret;
4778 }
4779 
4780 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4781 				struct btrfs_root *extent_root,
4782 				u64 chunk_offset, u64 chunk_size)
4783 {
4784 	struct btrfs_key key;
4785 	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
4786 	struct btrfs_device *device;
4787 	struct btrfs_chunk *chunk;
4788 	struct btrfs_stripe *stripe;
4789 	struct extent_map_tree *em_tree;
4790 	struct extent_map *em;
4791 	struct map_lookup *map;
4792 	size_t item_size;
4793 	u64 dev_offset;
4794 	u64 stripe_size;
4795 	int i = 0;
4796 	int ret = 0;
4797 
4798 	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4799 	read_lock(&em_tree->lock);
4800 	em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
4801 	read_unlock(&em_tree->lock);
4802 
4803 	if (!em) {
4804 		btrfs_crit(extent_root->fs_info, "unable to find logical "
4805 			   "%Lu len %Lu", chunk_offset, chunk_size);
4806 		return -EINVAL;
4807 	}
4808 
4809 	if (em->start != chunk_offset || em->len != chunk_size) {
4810 		btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
4811 			  " %Lu-%Lu, found %Lu-%Lu", chunk_offset,
4812 			  chunk_size, em->start, em->len);
4813 		free_extent_map(em);
4814 		return -EINVAL;
4815 	}
4816 
4817 	map = em->map_lookup;
4818 	item_size = btrfs_chunk_item_size(map->num_stripes);
4819 	stripe_size = em->orig_block_len;
4820 
4821 	chunk = kzalloc(item_size, GFP_NOFS);
4822 	if (!chunk) {
4823 		ret = -ENOMEM;
4824 		goto out;
4825 	}
4826 
4827 	/*
4828 	 * Take the device list mutex to prevent races with the final phase of
4829 	 * a device replace operation that replaces the device object associated
4830 	 * with the map's stripes, because the device object's id can change
4831 	 * at any time during that final phase of the device replace operation
4832 	 * (dev-replace.c:btrfs_dev_replace_finishing()).
4833 	 */
4834 	mutex_lock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4835 	for (i = 0; i < map->num_stripes; i++) {
4836 		device = map->stripes[i].dev;
4837 		dev_offset = map->stripes[i].physical;
4838 
4839 		ret = btrfs_update_device(trans, device);
4840 		if (ret)
4841 			break;
4842 		ret = btrfs_alloc_dev_extent(trans, device,
4843 					     chunk_root->root_key.objectid,
4844 					     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4845 					     chunk_offset, dev_offset,
4846 					     stripe_size);
4847 		if (ret)
4848 			break;
4849 	}
4850 	if (ret) {
4851 		mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4852 		goto out;
4853 	}
4854 
4855 	stripe = &chunk->stripe;
4856 	for (i = 0; i < map->num_stripes; i++) {
4857 		device = map->stripes[i].dev;
4858 		dev_offset = map->stripes[i].physical;
4859 
4860 		btrfs_set_stack_stripe_devid(stripe, device->devid);
4861 		btrfs_set_stack_stripe_offset(stripe, dev_offset);
4862 		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4863 		stripe++;
4864 	}
4865 	mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
4866 
4867 	btrfs_set_stack_chunk_length(chunk, chunk_size);
4868 	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4869 	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4870 	btrfs_set_stack_chunk_type(chunk, map->type);
4871 	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4872 	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4873 	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4874 	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
4875 	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4876 
4877 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4878 	key.type = BTRFS_CHUNK_ITEM_KEY;
4879 	key.offset = chunk_offset;
4880 
4881 	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4882 	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4883 		/*
4884 		 * TODO: Cleanup of inserted chunk root in case of
4885 		 * failure.
4886 		 */
4887 		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
4888 					     item_size);
4889 	}
4890 
4891 out:
4892 	kfree(chunk);
4893 	free_extent_map(em);
4894 	return ret;
4895 }
4896 
4897 /*
4898  * Chunk allocation falls into two parts. The first part does works
4899  * that make the new allocated chunk useable, but not do any operation
4900  * that modifies the chunk tree. The second part does the works that
4901  * require modifying the chunk tree. This division is important for the
4902  * bootstrap process of adding storage to a seed btrfs.
4903  */
4904 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4905 		      struct btrfs_root *extent_root, u64 type)
4906 {
4907 	u64 chunk_offset;
4908 
4909 	ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
4910 	chunk_offset = find_next_chunk(extent_root->fs_info);
4911 	return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
4912 }
4913 
4914 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
4915 					 struct btrfs_root *root,
4916 					 struct btrfs_device *device)
4917 {
4918 	u64 chunk_offset;
4919 	u64 sys_chunk_offset;
4920 	u64 alloc_profile;
4921 	struct btrfs_fs_info *fs_info = root->fs_info;
4922 	struct btrfs_root *extent_root = fs_info->extent_root;
4923 	int ret;
4924 
4925 	chunk_offset = find_next_chunk(fs_info);
4926 	alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4927 	ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
4928 				  alloc_profile);
4929 	if (ret)
4930 		return ret;
4931 
4932 	sys_chunk_offset = find_next_chunk(root->fs_info);
4933 	alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4934 	ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
4935 				  alloc_profile);
4936 	return ret;
4937 }
4938 
4939 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
4940 {
4941 	int max_errors;
4942 
4943 	if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
4944 			 BTRFS_BLOCK_GROUP_RAID10 |
4945 			 BTRFS_BLOCK_GROUP_RAID5 |
4946 			 BTRFS_BLOCK_GROUP_DUP)) {
4947 		max_errors = 1;
4948 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
4949 		max_errors = 2;
4950 	} else {
4951 		max_errors = 0;
4952 	}
4953 
4954 	return max_errors;
4955 }
4956 
4957 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
4958 {
4959 	struct extent_map *em;
4960 	struct map_lookup *map;
4961 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4962 	int readonly = 0;
4963 	int miss_ndevs = 0;
4964 	int i;
4965 
4966 	read_lock(&map_tree->map_tree.lock);
4967 	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
4968 	read_unlock(&map_tree->map_tree.lock);
4969 	if (!em)
4970 		return 1;
4971 
4972 	map = em->map_lookup;
4973 	for (i = 0; i < map->num_stripes; i++) {
4974 		if (map->stripes[i].dev->missing) {
4975 			miss_ndevs++;
4976 			continue;
4977 		}
4978 
4979 		if (!map->stripes[i].dev->writeable) {
4980 			readonly = 1;
4981 			goto end;
4982 		}
4983 	}
4984 
4985 	/*
4986 	 * If the number of missing devices is larger than max errors,
4987 	 * we can not write the data into that chunk successfully, so
4988 	 * set it readonly.
4989 	 */
4990 	if (miss_ndevs > btrfs_chunk_max_errors(map))
4991 		readonly = 1;
4992 end:
4993 	free_extent_map(em);
4994 	return readonly;
4995 }
4996 
4997 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
4998 {
4999 	extent_map_tree_init(&tree->map_tree);
5000 }
5001 
5002 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
5003 {
5004 	struct extent_map *em;
5005 
5006 	while (1) {
5007 		write_lock(&tree->map_tree.lock);
5008 		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
5009 		if (em)
5010 			remove_extent_mapping(&tree->map_tree, em);
5011 		write_unlock(&tree->map_tree.lock);
5012 		if (!em)
5013 			break;
5014 		/* once for us */
5015 		free_extent_map(em);
5016 		/* once for the tree */
5017 		free_extent_map(em);
5018 	}
5019 }
5020 
5021 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5022 {
5023 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5024 	struct extent_map *em;
5025 	struct map_lookup *map;
5026 	struct extent_map_tree *em_tree = &map_tree->map_tree;
5027 	int ret;
5028 
5029 	read_lock(&em_tree->lock);
5030 	em = lookup_extent_mapping(em_tree, logical, len);
5031 	read_unlock(&em_tree->lock);
5032 
5033 	/*
5034 	 * We could return errors for these cases, but that could get ugly and
5035 	 * we'd probably do the same thing which is just not do anything else
5036 	 * and exit, so return 1 so the callers don't try to use other copies.
5037 	 */
5038 	if (!em) {
5039 		btrfs_crit(fs_info, "No mapping for %Lu-%Lu", logical,
5040 			    logical+len);
5041 		return 1;
5042 	}
5043 
5044 	if (em->start > logical || em->start + em->len < logical) {
5045 		btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
5046 			    "%Lu-%Lu", logical, logical+len, em->start,
5047 			    em->start + em->len);
5048 		free_extent_map(em);
5049 		return 1;
5050 	}
5051 
5052 	map = em->map_lookup;
5053 	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
5054 		ret = map->num_stripes;
5055 	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5056 		ret = map->sub_stripes;
5057 	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5058 		ret = 2;
5059 	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5060 		ret = 3;
5061 	else
5062 		ret = 1;
5063 	free_extent_map(em);
5064 
5065 	btrfs_dev_replace_lock(&fs_info->dev_replace);
5066 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
5067 		ret++;
5068 	btrfs_dev_replace_unlock(&fs_info->dev_replace);
5069 
5070 	return ret;
5071 }
5072 
5073 unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
5074 				    struct btrfs_mapping_tree *map_tree,
5075 				    u64 logical)
5076 {
5077 	struct extent_map *em;
5078 	struct map_lookup *map;
5079 	struct extent_map_tree *em_tree = &map_tree->map_tree;
5080 	unsigned long len = root->sectorsize;
5081 
5082 	read_lock(&em_tree->lock);
5083 	em = lookup_extent_mapping(em_tree, logical, len);
5084 	read_unlock(&em_tree->lock);
5085 	BUG_ON(!em);
5086 
5087 	BUG_ON(em->start > logical || em->start + em->len < logical);
5088 	map = em->map_lookup;
5089 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5090 		len = map->stripe_len * nr_data_stripes(map);
5091 	free_extent_map(em);
5092 	return len;
5093 }
5094 
5095 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
5096 			   u64 logical, u64 len, int mirror_num)
5097 {
5098 	struct extent_map *em;
5099 	struct map_lookup *map;
5100 	struct extent_map_tree *em_tree = &map_tree->map_tree;
5101 	int ret = 0;
5102 
5103 	read_lock(&em_tree->lock);
5104 	em = lookup_extent_mapping(em_tree, logical, len);
5105 	read_unlock(&em_tree->lock);
5106 	BUG_ON(!em);
5107 
5108 	BUG_ON(em->start > logical || em->start + em->len < logical);
5109 	map = em->map_lookup;
5110 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5111 		ret = 1;
5112 	free_extent_map(em);
5113 	return ret;
5114 }
5115 
5116 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5117 			    struct map_lookup *map, int first, int num,
5118 			    int optimal, int dev_replace_is_ongoing)
5119 {
5120 	int i;
5121 	int tolerance;
5122 	struct btrfs_device *srcdev;
5123 
5124 	if (dev_replace_is_ongoing &&
5125 	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5126 	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5127 		srcdev = fs_info->dev_replace.srcdev;
5128 	else
5129 		srcdev = NULL;
5130 
5131 	/*
5132 	 * try to avoid the drive that is the source drive for a
5133 	 * dev-replace procedure, only choose it if no other non-missing
5134 	 * mirror is available
5135 	 */
5136 	for (tolerance = 0; tolerance < 2; tolerance++) {
5137 		if (map->stripes[optimal].dev->bdev &&
5138 		    (tolerance || map->stripes[optimal].dev != srcdev))
5139 			return optimal;
5140 		for (i = first; i < first + num; i++) {
5141 			if (map->stripes[i].dev->bdev &&
5142 			    (tolerance || map->stripes[i].dev != srcdev))
5143 				return i;
5144 		}
5145 	}
5146 
5147 	/* we couldn't find one that doesn't fail.  Just return something
5148 	 * and the io error handling code will clean up eventually
5149 	 */
5150 	return optimal;
5151 }
5152 
5153 static inline int parity_smaller(u64 a, u64 b)
5154 {
5155 	return a > b;
5156 }
5157 
5158 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5159 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5160 {
5161 	struct btrfs_bio_stripe s;
5162 	int i;
5163 	u64 l;
5164 	int again = 1;
5165 
5166 	while (again) {
5167 		again = 0;
5168 		for (i = 0; i < num_stripes - 1; i++) {
5169 			if (parity_smaller(bbio->raid_map[i],
5170 					   bbio->raid_map[i+1])) {
5171 				s = bbio->stripes[i];
5172 				l = bbio->raid_map[i];
5173 				bbio->stripes[i] = bbio->stripes[i+1];
5174 				bbio->raid_map[i] = bbio->raid_map[i+1];
5175 				bbio->stripes[i+1] = s;
5176 				bbio->raid_map[i+1] = l;
5177 
5178 				again = 1;
5179 			}
5180 		}
5181 	}
5182 }
5183 
5184 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5185 {
5186 	struct btrfs_bio *bbio = kzalloc(
5187 		 /* the size of the btrfs_bio */
5188 		sizeof(struct btrfs_bio) +
5189 		/* plus the variable array for the stripes */
5190 		sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5191 		/* plus the variable array for the tgt dev */
5192 		sizeof(int) * (real_stripes) +
5193 		/*
5194 		 * plus the raid_map, which includes both the tgt dev
5195 		 * and the stripes
5196 		 */
5197 		sizeof(u64) * (total_stripes),
5198 		GFP_NOFS|__GFP_NOFAIL);
5199 
5200 	atomic_set(&bbio->error, 0);
5201 	atomic_set(&bbio->refs, 1);
5202 
5203 	return bbio;
5204 }
5205 
5206 void btrfs_get_bbio(struct btrfs_bio *bbio)
5207 {
5208 	WARN_ON(!atomic_read(&bbio->refs));
5209 	atomic_inc(&bbio->refs);
5210 }
5211 
5212 void btrfs_put_bbio(struct btrfs_bio *bbio)
5213 {
5214 	if (!bbio)
5215 		return;
5216 	if (atomic_dec_and_test(&bbio->refs))
5217 		kfree(bbio);
5218 }
5219 
5220 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5221 			     u64 logical, u64 *length,
5222 			     struct btrfs_bio **bbio_ret,
5223 			     int mirror_num, int need_raid_map)
5224 {
5225 	struct extent_map *em;
5226 	struct map_lookup *map;
5227 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5228 	struct extent_map_tree *em_tree = &map_tree->map_tree;
5229 	u64 offset;
5230 	u64 stripe_offset;
5231 	u64 stripe_end_offset;
5232 	u64 stripe_nr;
5233 	u64 stripe_nr_orig;
5234 	u64 stripe_nr_end;
5235 	u64 stripe_len;
5236 	u32 stripe_index;
5237 	int i;
5238 	int ret = 0;
5239 	int num_stripes;
5240 	int max_errors = 0;
5241 	int tgtdev_indexes = 0;
5242 	struct btrfs_bio *bbio = NULL;
5243 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
5244 	int dev_replace_is_ongoing = 0;
5245 	int num_alloc_stripes;
5246 	int patch_the_first_stripe_for_dev_replace = 0;
5247 	u64 physical_to_patch_in_first_stripe = 0;
5248 	u64 raid56_full_stripe_start = (u64)-1;
5249 
5250 	read_lock(&em_tree->lock);
5251 	em = lookup_extent_mapping(em_tree, logical, *length);
5252 	read_unlock(&em_tree->lock);
5253 
5254 	if (!em) {
5255 		btrfs_crit(fs_info, "unable to find logical %llu len %llu",
5256 			logical, *length);
5257 		return -EINVAL;
5258 	}
5259 
5260 	if (em->start > logical || em->start + em->len < logical) {
5261 		btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
5262 			   "found %Lu-%Lu", logical, em->start,
5263 			   em->start + em->len);
5264 		free_extent_map(em);
5265 		return -EINVAL;
5266 	}
5267 
5268 	map = em->map_lookup;
5269 	offset = logical - em->start;
5270 
5271 	stripe_len = map->stripe_len;
5272 	stripe_nr = offset;
5273 	/*
5274 	 * stripe_nr counts the total number of stripes we have to stride
5275 	 * to get to this block
5276 	 */
5277 	stripe_nr = div64_u64(stripe_nr, stripe_len);
5278 
5279 	stripe_offset = stripe_nr * stripe_len;
5280 	BUG_ON(offset < stripe_offset);
5281 
5282 	/* stripe_offset is the offset of this block in its stripe*/
5283 	stripe_offset = offset - stripe_offset;
5284 
5285 	/* if we're here for raid56, we need to know the stripe aligned start */
5286 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5287 		unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
5288 		raid56_full_stripe_start = offset;
5289 
5290 		/* allow a write of a full stripe, but make sure we don't
5291 		 * allow straddling of stripes
5292 		 */
5293 		raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
5294 				full_stripe_len);
5295 		raid56_full_stripe_start *= full_stripe_len;
5296 	}
5297 
5298 	if (rw & REQ_DISCARD) {
5299 		/* we don't discard raid56 yet */
5300 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5301 			ret = -EOPNOTSUPP;
5302 			goto out;
5303 		}
5304 		*length = min_t(u64, em->len - offset, *length);
5305 	} else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
5306 		u64 max_len;
5307 		/* For writes to RAID[56], allow a full stripeset across all disks.
5308 		   For other RAID types and for RAID[56] reads, just allow a single
5309 		   stripe (on a single disk). */
5310 		if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
5311 		    (rw & REQ_WRITE)) {
5312 			max_len = stripe_len * nr_data_stripes(map) -
5313 				(offset - raid56_full_stripe_start);
5314 		} else {
5315 			/* we limit the length of each bio to what fits in a stripe */
5316 			max_len = stripe_len - stripe_offset;
5317 		}
5318 		*length = min_t(u64, em->len - offset, max_len);
5319 	} else {
5320 		*length = em->len - offset;
5321 	}
5322 
5323 	/* This is for when we're called from btrfs_merge_bio_hook() and all
5324 	   it cares about is the length */
5325 	if (!bbio_ret)
5326 		goto out;
5327 
5328 	btrfs_dev_replace_lock(dev_replace);
5329 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
5330 	if (!dev_replace_is_ongoing)
5331 		btrfs_dev_replace_unlock(dev_replace);
5332 
5333 	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
5334 	    !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
5335 	    dev_replace->tgtdev != NULL) {
5336 		/*
5337 		 * in dev-replace case, for repair case (that's the only
5338 		 * case where the mirror is selected explicitly when
5339 		 * calling btrfs_map_block), blocks left of the left cursor
5340 		 * can also be read from the target drive.
5341 		 * For REQ_GET_READ_MIRRORS, the target drive is added as
5342 		 * the last one to the array of stripes. For READ, it also
5343 		 * needs to be supported using the same mirror number.
5344 		 * If the requested block is not left of the left cursor,
5345 		 * EIO is returned. This can happen because btrfs_num_copies()
5346 		 * returns one more in the dev-replace case.
5347 		 */
5348 		u64 tmp_length = *length;
5349 		struct btrfs_bio *tmp_bbio = NULL;
5350 		int tmp_num_stripes;
5351 		u64 srcdev_devid = dev_replace->srcdev->devid;
5352 		int index_srcdev = 0;
5353 		int found = 0;
5354 		u64 physical_of_found = 0;
5355 
5356 		ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
5357 			     logical, &tmp_length, &tmp_bbio, 0, 0);
5358 		if (ret) {
5359 			WARN_ON(tmp_bbio != NULL);
5360 			goto out;
5361 		}
5362 
5363 		tmp_num_stripes = tmp_bbio->num_stripes;
5364 		if (mirror_num > tmp_num_stripes) {
5365 			/*
5366 			 * REQ_GET_READ_MIRRORS does not contain this
5367 			 * mirror, that means that the requested area
5368 			 * is not left of the left cursor
5369 			 */
5370 			ret = -EIO;
5371 			btrfs_put_bbio(tmp_bbio);
5372 			goto out;
5373 		}
5374 
5375 		/*
5376 		 * process the rest of the function using the mirror_num
5377 		 * of the source drive. Therefore look it up first.
5378 		 * At the end, patch the device pointer to the one of the
5379 		 * target drive.
5380 		 */
5381 		for (i = 0; i < tmp_num_stripes; i++) {
5382 			if (tmp_bbio->stripes[i].dev->devid != srcdev_devid)
5383 				continue;
5384 
5385 			/*
5386 			 * In case of DUP, in order to keep it simple, only add
5387 			 * the mirror with the lowest physical address
5388 			 */
5389 			if (found &&
5390 			    physical_of_found <= tmp_bbio->stripes[i].physical)
5391 				continue;
5392 
5393 			index_srcdev = i;
5394 			found = 1;
5395 			physical_of_found = tmp_bbio->stripes[i].physical;
5396 		}
5397 
5398 		btrfs_put_bbio(tmp_bbio);
5399 
5400 		if (!found) {
5401 			WARN_ON(1);
5402 			ret = -EIO;
5403 			goto out;
5404 		}
5405 
5406 		mirror_num = index_srcdev + 1;
5407 		patch_the_first_stripe_for_dev_replace = 1;
5408 		physical_to_patch_in_first_stripe = physical_of_found;
5409 	} else if (mirror_num > map->num_stripes) {
5410 		mirror_num = 0;
5411 	}
5412 
5413 	num_stripes = 1;
5414 	stripe_index = 0;
5415 	stripe_nr_orig = stripe_nr;
5416 	stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
5417 	stripe_nr_end = div_u64(stripe_nr_end, map->stripe_len);
5418 	stripe_end_offset = stripe_nr_end * map->stripe_len -
5419 			    (offset + *length);
5420 
5421 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5422 		if (rw & REQ_DISCARD)
5423 			num_stripes = min_t(u64, map->num_stripes,
5424 					    stripe_nr_end - stripe_nr_orig);
5425 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5426 				&stripe_index);
5427 		if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)))
5428 			mirror_num = 1;
5429 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
5430 		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
5431 			num_stripes = map->num_stripes;
5432 		else if (mirror_num)
5433 			stripe_index = mirror_num - 1;
5434 		else {
5435 			stripe_index = find_live_mirror(fs_info, map, 0,
5436 					    map->num_stripes,
5437 					    current->pid % map->num_stripes,
5438 					    dev_replace_is_ongoing);
5439 			mirror_num = stripe_index + 1;
5440 		}
5441 
5442 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
5443 		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
5444 			num_stripes = map->num_stripes;
5445 		} else if (mirror_num) {
5446 			stripe_index = mirror_num - 1;
5447 		} else {
5448 			mirror_num = 1;
5449 		}
5450 
5451 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5452 		u32 factor = map->num_stripes / map->sub_stripes;
5453 
5454 		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5455 		stripe_index *= map->sub_stripes;
5456 
5457 		if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5458 			num_stripes = map->sub_stripes;
5459 		else if (rw & REQ_DISCARD)
5460 			num_stripes = min_t(u64, map->sub_stripes *
5461 					    (stripe_nr_end - stripe_nr_orig),
5462 					    map->num_stripes);
5463 		else if (mirror_num)
5464 			stripe_index += mirror_num - 1;
5465 		else {
5466 			int old_stripe_index = stripe_index;
5467 			stripe_index = find_live_mirror(fs_info, map,
5468 					      stripe_index,
5469 					      map->sub_stripes, stripe_index +
5470 					      current->pid % map->sub_stripes,
5471 					      dev_replace_is_ongoing);
5472 			mirror_num = stripe_index - old_stripe_index + 1;
5473 		}
5474 
5475 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5476 		if (need_raid_map &&
5477 		    ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
5478 		     mirror_num > 1)) {
5479 			/* push stripe_nr back to the start of the full stripe */
5480 			stripe_nr = div_u64(raid56_full_stripe_start,
5481 					stripe_len * nr_data_stripes(map));
5482 
5483 			/* RAID[56] write or recovery. Return all stripes */
5484 			num_stripes = map->num_stripes;
5485 			max_errors = nr_parity_stripes(map);
5486 
5487 			*length = map->stripe_len;
5488 			stripe_index = 0;
5489 			stripe_offset = 0;
5490 		} else {
5491 			/*
5492 			 * Mirror #0 or #1 means the original data block.
5493 			 * Mirror #2 is RAID5 parity block.
5494 			 * Mirror #3 is RAID6 Q block.
5495 			 */
5496 			stripe_nr = div_u64_rem(stripe_nr,
5497 					nr_data_stripes(map), &stripe_index);
5498 			if (mirror_num > 1)
5499 				stripe_index = nr_data_stripes(map) +
5500 						mirror_num - 2;
5501 
5502 			/* We distribute the parity blocks across stripes */
5503 			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
5504 					&stripe_index);
5505 			if (!(rw & (REQ_WRITE | REQ_DISCARD |
5506 				    REQ_GET_READ_MIRRORS)) && mirror_num <= 1)
5507 				mirror_num = 1;
5508 		}
5509 	} else {
5510 		/*
5511 		 * after this, stripe_nr is the number of stripes on this
5512 		 * device we have to walk to find the data, and stripe_index is
5513 		 * the number of our device in the stripe array
5514 		 */
5515 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5516 				&stripe_index);
5517 		mirror_num = stripe_index + 1;
5518 	}
5519 	BUG_ON(stripe_index >= map->num_stripes);
5520 
5521 	num_alloc_stripes = num_stripes;
5522 	if (dev_replace_is_ongoing) {
5523 		if (rw & (REQ_WRITE | REQ_DISCARD))
5524 			num_alloc_stripes <<= 1;
5525 		if (rw & REQ_GET_READ_MIRRORS)
5526 			num_alloc_stripes++;
5527 		tgtdev_indexes = num_stripes;
5528 	}
5529 
5530 	bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
5531 	if (!bbio) {
5532 		ret = -ENOMEM;
5533 		goto out;
5534 	}
5535 	if (dev_replace_is_ongoing)
5536 		bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
5537 
5538 	/* build raid_map */
5539 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
5540 	    need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
5541 	    mirror_num > 1)) {
5542 		u64 tmp;
5543 		unsigned rot;
5544 
5545 		bbio->raid_map = (u64 *)((void *)bbio->stripes +
5546 				 sizeof(struct btrfs_bio_stripe) *
5547 				 num_alloc_stripes +
5548 				 sizeof(int) * tgtdev_indexes);
5549 
5550 		/* Work out the disk rotation on this stripe-set */
5551 		div_u64_rem(stripe_nr, num_stripes, &rot);
5552 
5553 		/* Fill in the logical address of each stripe */
5554 		tmp = stripe_nr * nr_data_stripes(map);
5555 		for (i = 0; i < nr_data_stripes(map); i++)
5556 			bbio->raid_map[(i+rot) % num_stripes] =
5557 				em->start + (tmp + i) * map->stripe_len;
5558 
5559 		bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
5560 		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5561 			bbio->raid_map[(i+rot+1) % num_stripes] =
5562 				RAID6_Q_STRIPE;
5563 	}
5564 
5565 	if (rw & REQ_DISCARD) {
5566 		u32 factor = 0;
5567 		u32 sub_stripes = 0;
5568 		u64 stripes_per_dev = 0;
5569 		u32 remaining_stripes = 0;
5570 		u32 last_stripe = 0;
5571 
5572 		if (map->type &
5573 		    (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
5574 			if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5575 				sub_stripes = 1;
5576 			else
5577 				sub_stripes = map->sub_stripes;
5578 
5579 			factor = map->num_stripes / sub_stripes;
5580 			stripes_per_dev = div_u64_rem(stripe_nr_end -
5581 						      stripe_nr_orig,
5582 						      factor,
5583 						      &remaining_stripes);
5584 			div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5585 			last_stripe *= sub_stripes;
5586 		}
5587 
5588 		for (i = 0; i < num_stripes; i++) {
5589 			bbio->stripes[i].physical =
5590 				map->stripes[stripe_index].physical +
5591 				stripe_offset + stripe_nr * map->stripe_len;
5592 			bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5593 
5594 			if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5595 					 BTRFS_BLOCK_GROUP_RAID10)) {
5596 				bbio->stripes[i].length = stripes_per_dev *
5597 							  map->stripe_len;
5598 
5599 				if (i / sub_stripes < remaining_stripes)
5600 					bbio->stripes[i].length +=
5601 						map->stripe_len;
5602 
5603 				/*
5604 				 * Special for the first stripe and
5605 				 * the last stripe:
5606 				 *
5607 				 * |-------|...|-------|
5608 				 *     |----------|
5609 				 *    off     end_off
5610 				 */
5611 				if (i < sub_stripes)
5612 					bbio->stripes[i].length -=
5613 						stripe_offset;
5614 
5615 				if (stripe_index >= last_stripe &&
5616 				    stripe_index <= (last_stripe +
5617 						     sub_stripes - 1))
5618 					bbio->stripes[i].length -=
5619 						stripe_end_offset;
5620 
5621 				if (i == sub_stripes - 1)
5622 					stripe_offset = 0;
5623 			} else
5624 				bbio->stripes[i].length = *length;
5625 
5626 			stripe_index++;
5627 			if (stripe_index == map->num_stripes) {
5628 				/* This could only happen for RAID0/10 */
5629 				stripe_index = 0;
5630 				stripe_nr++;
5631 			}
5632 		}
5633 	} else {
5634 		for (i = 0; i < num_stripes; i++) {
5635 			bbio->stripes[i].physical =
5636 				map->stripes[stripe_index].physical +
5637 				stripe_offset +
5638 				stripe_nr * map->stripe_len;
5639 			bbio->stripes[i].dev =
5640 				map->stripes[stripe_index].dev;
5641 			stripe_index++;
5642 		}
5643 	}
5644 
5645 	if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5646 		max_errors = btrfs_chunk_max_errors(map);
5647 
5648 	if (bbio->raid_map)
5649 		sort_parity_stripes(bbio, num_stripes);
5650 
5651 	tgtdev_indexes = 0;
5652 	if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
5653 	    dev_replace->tgtdev != NULL) {
5654 		int index_where_to_add;
5655 		u64 srcdev_devid = dev_replace->srcdev->devid;
5656 
5657 		/*
5658 		 * duplicate the write operations while the dev replace
5659 		 * procedure is running. Since the copying of the old disk
5660 		 * to the new disk takes place at run time while the
5661 		 * filesystem is mounted writable, the regular write
5662 		 * operations to the old disk have to be duplicated to go
5663 		 * to the new disk as well.
5664 		 * Note that device->missing is handled by the caller, and
5665 		 * that the write to the old disk is already set up in the
5666 		 * stripes array.
5667 		 */
5668 		index_where_to_add = num_stripes;
5669 		for (i = 0; i < num_stripes; i++) {
5670 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5671 				/* write to new disk, too */
5672 				struct btrfs_bio_stripe *new =
5673 					bbio->stripes + index_where_to_add;
5674 				struct btrfs_bio_stripe *old =
5675 					bbio->stripes + i;
5676 
5677 				new->physical = old->physical;
5678 				new->length = old->length;
5679 				new->dev = dev_replace->tgtdev;
5680 				bbio->tgtdev_map[i] = index_where_to_add;
5681 				index_where_to_add++;
5682 				max_errors++;
5683 				tgtdev_indexes++;
5684 			}
5685 		}
5686 		num_stripes = index_where_to_add;
5687 	} else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
5688 		   dev_replace->tgtdev != NULL) {
5689 		u64 srcdev_devid = dev_replace->srcdev->devid;
5690 		int index_srcdev = 0;
5691 		int found = 0;
5692 		u64 physical_of_found = 0;
5693 
5694 		/*
5695 		 * During the dev-replace procedure, the target drive can
5696 		 * also be used to read data in case it is needed to repair
5697 		 * a corrupt block elsewhere. This is possible if the
5698 		 * requested area is left of the left cursor. In this area,
5699 		 * the target drive is a full copy of the source drive.
5700 		 */
5701 		for (i = 0; i < num_stripes; i++) {
5702 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5703 				/*
5704 				 * In case of DUP, in order to keep it
5705 				 * simple, only add the mirror with the
5706 				 * lowest physical address
5707 				 */
5708 				if (found &&
5709 				    physical_of_found <=
5710 				     bbio->stripes[i].physical)
5711 					continue;
5712 				index_srcdev = i;
5713 				found = 1;
5714 				physical_of_found = bbio->stripes[i].physical;
5715 			}
5716 		}
5717 		if (found) {
5718 			if (physical_of_found + map->stripe_len <=
5719 			    dev_replace->cursor_left) {
5720 				struct btrfs_bio_stripe *tgtdev_stripe =
5721 					bbio->stripes + num_stripes;
5722 
5723 				tgtdev_stripe->physical = physical_of_found;
5724 				tgtdev_stripe->length =
5725 					bbio->stripes[index_srcdev].length;
5726 				tgtdev_stripe->dev = dev_replace->tgtdev;
5727 				bbio->tgtdev_map[index_srcdev] = num_stripes;
5728 
5729 				tgtdev_indexes++;
5730 				num_stripes++;
5731 			}
5732 		}
5733 	}
5734 
5735 	*bbio_ret = bbio;
5736 	bbio->map_type = map->type;
5737 	bbio->num_stripes = num_stripes;
5738 	bbio->max_errors = max_errors;
5739 	bbio->mirror_num = mirror_num;
5740 	bbio->num_tgtdevs = tgtdev_indexes;
5741 
5742 	/*
5743 	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
5744 	 * mirror_num == num_stripes + 1 && dev_replace target drive is
5745 	 * available as a mirror
5746 	 */
5747 	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
5748 		WARN_ON(num_stripes > 1);
5749 		bbio->stripes[0].dev = dev_replace->tgtdev;
5750 		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
5751 		bbio->mirror_num = map->num_stripes + 1;
5752 	}
5753 out:
5754 	if (dev_replace_is_ongoing)
5755 		btrfs_dev_replace_unlock(dev_replace);
5756 	free_extent_map(em);
5757 	return ret;
5758 }
5759 
5760 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5761 		      u64 logical, u64 *length,
5762 		      struct btrfs_bio **bbio_ret, int mirror_num)
5763 {
5764 	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5765 				 mirror_num, 0);
5766 }
5767 
5768 /* For Scrub/replace */
5769 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
5770 		     u64 logical, u64 *length,
5771 		     struct btrfs_bio **bbio_ret, int mirror_num,
5772 		     int need_raid_map)
5773 {
5774 	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5775 				 mirror_num, need_raid_map);
5776 }
5777 
5778 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5779 		     u64 chunk_start, u64 physical, u64 devid,
5780 		     u64 **logical, int *naddrs, int *stripe_len)
5781 {
5782 	struct extent_map_tree *em_tree = &map_tree->map_tree;
5783 	struct extent_map *em;
5784 	struct map_lookup *map;
5785 	u64 *buf;
5786 	u64 bytenr;
5787 	u64 length;
5788 	u64 stripe_nr;
5789 	u64 rmap_len;
5790 	int i, j, nr = 0;
5791 
5792 	read_lock(&em_tree->lock);
5793 	em = lookup_extent_mapping(em_tree, chunk_start, 1);
5794 	read_unlock(&em_tree->lock);
5795 
5796 	if (!em) {
5797 		printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n",
5798 		       chunk_start);
5799 		return -EIO;
5800 	}
5801 
5802 	if (em->start != chunk_start) {
5803 		printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n",
5804 		       em->start, chunk_start);
5805 		free_extent_map(em);
5806 		return -EIO;
5807 	}
5808 	map = em->map_lookup;
5809 
5810 	length = em->len;
5811 	rmap_len = map->stripe_len;
5812 
5813 	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5814 		length = div_u64(length, map->num_stripes / map->sub_stripes);
5815 	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5816 		length = div_u64(length, map->num_stripes);
5817 	else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5818 		length = div_u64(length, nr_data_stripes(map));
5819 		rmap_len = map->stripe_len * nr_data_stripes(map);
5820 	}
5821 
5822 	buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
5823 	BUG_ON(!buf); /* -ENOMEM */
5824 
5825 	for (i = 0; i < map->num_stripes; i++) {
5826 		if (devid && map->stripes[i].dev->devid != devid)
5827 			continue;
5828 		if (map->stripes[i].physical > physical ||
5829 		    map->stripes[i].physical + length <= physical)
5830 			continue;
5831 
5832 		stripe_nr = physical - map->stripes[i].physical;
5833 		stripe_nr = div_u64(stripe_nr, map->stripe_len);
5834 
5835 		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5836 			stripe_nr = stripe_nr * map->num_stripes + i;
5837 			stripe_nr = div_u64(stripe_nr, map->sub_stripes);
5838 		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5839 			stripe_nr = stripe_nr * map->num_stripes + i;
5840 		} /* else if RAID[56], multiply by nr_data_stripes().
5841 		   * Alternatively, just use rmap_len below instead of
5842 		   * map->stripe_len */
5843 
5844 		bytenr = chunk_start + stripe_nr * rmap_len;
5845 		WARN_ON(nr >= map->num_stripes);
5846 		for (j = 0; j < nr; j++) {
5847 			if (buf[j] == bytenr)
5848 				break;
5849 		}
5850 		if (j == nr) {
5851 			WARN_ON(nr >= map->num_stripes);
5852 			buf[nr++] = bytenr;
5853 		}
5854 	}
5855 
5856 	*logical = buf;
5857 	*naddrs = nr;
5858 	*stripe_len = rmap_len;
5859 
5860 	free_extent_map(em);
5861 	return 0;
5862 }
5863 
5864 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
5865 {
5866 	bio->bi_private = bbio->private;
5867 	bio->bi_end_io = bbio->end_io;
5868 	bio_endio(bio);
5869 
5870 	btrfs_put_bbio(bbio);
5871 }
5872 
5873 static void btrfs_end_bio(struct bio *bio)
5874 {
5875 	struct btrfs_bio *bbio = bio->bi_private;
5876 	int is_orig_bio = 0;
5877 
5878 	if (bio->bi_error) {
5879 		atomic_inc(&bbio->error);
5880 		if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) {
5881 			unsigned int stripe_index =
5882 				btrfs_io_bio(bio)->stripe_index;
5883 			struct btrfs_device *dev;
5884 
5885 			BUG_ON(stripe_index >= bbio->num_stripes);
5886 			dev = bbio->stripes[stripe_index].dev;
5887 			if (dev->bdev) {
5888 				if (bio->bi_rw & WRITE)
5889 					btrfs_dev_stat_inc(dev,
5890 						BTRFS_DEV_STAT_WRITE_ERRS);
5891 				else
5892 					btrfs_dev_stat_inc(dev,
5893 						BTRFS_DEV_STAT_READ_ERRS);
5894 				if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
5895 					btrfs_dev_stat_inc(dev,
5896 						BTRFS_DEV_STAT_FLUSH_ERRS);
5897 				btrfs_dev_stat_print_on_error(dev);
5898 			}
5899 		}
5900 	}
5901 
5902 	if (bio == bbio->orig_bio)
5903 		is_orig_bio = 1;
5904 
5905 	btrfs_bio_counter_dec(bbio->fs_info);
5906 
5907 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
5908 		if (!is_orig_bio) {
5909 			bio_put(bio);
5910 			bio = bbio->orig_bio;
5911 		}
5912 
5913 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5914 		/* only send an error to the higher layers if it is
5915 		 * beyond the tolerance of the btrfs bio
5916 		 */
5917 		if (atomic_read(&bbio->error) > bbio->max_errors) {
5918 			bio->bi_error = -EIO;
5919 		} else {
5920 			/*
5921 			 * this bio is actually up to date, we didn't
5922 			 * go over the max number of errors
5923 			 */
5924 			bio->bi_error = 0;
5925 		}
5926 
5927 		btrfs_end_bbio(bbio, bio);
5928 	} else if (!is_orig_bio) {
5929 		bio_put(bio);
5930 	}
5931 }
5932 
5933 /*
5934  * see run_scheduled_bios for a description of why bios are collected for
5935  * async submit.
5936  *
5937  * This will add one bio to the pending list for a device and make sure
5938  * the work struct is scheduled.
5939  */
5940 static noinline void btrfs_schedule_bio(struct btrfs_root *root,
5941 					struct btrfs_device *device,
5942 					int rw, struct bio *bio)
5943 {
5944 	int should_queue = 1;
5945 	struct btrfs_pending_bios *pending_bios;
5946 
5947 	if (device->missing || !device->bdev) {
5948 		bio_io_error(bio);
5949 		return;
5950 	}
5951 
5952 	/* don't bother with additional async steps for reads, right now */
5953 	if (!(rw & REQ_WRITE)) {
5954 		bio_get(bio);
5955 		btrfsic_submit_bio(rw, bio);
5956 		bio_put(bio);
5957 		return;
5958 	}
5959 
5960 	/*
5961 	 * nr_async_bios allows us to reliably return congestion to the
5962 	 * higher layers.  Otherwise, the async bio makes it appear we have
5963 	 * made progress against dirty pages when we've really just put it
5964 	 * on a queue for later
5965 	 */
5966 	atomic_inc(&root->fs_info->nr_async_bios);
5967 	WARN_ON(bio->bi_next);
5968 	bio->bi_next = NULL;
5969 	bio->bi_rw |= rw;
5970 
5971 	spin_lock(&device->io_lock);
5972 	if (bio->bi_rw & REQ_SYNC)
5973 		pending_bios = &device->pending_sync_bios;
5974 	else
5975 		pending_bios = &device->pending_bios;
5976 
5977 	if (pending_bios->tail)
5978 		pending_bios->tail->bi_next = bio;
5979 
5980 	pending_bios->tail = bio;
5981 	if (!pending_bios->head)
5982 		pending_bios->head = bio;
5983 	if (device->running_pending)
5984 		should_queue = 0;
5985 
5986 	spin_unlock(&device->io_lock);
5987 
5988 	if (should_queue)
5989 		btrfs_queue_work(root->fs_info->submit_workers,
5990 				 &device->work);
5991 }
5992 
5993 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5994 			      struct bio *bio, u64 physical, int dev_nr,
5995 			      int rw, int async)
5996 {
5997 	struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
5998 
5999 	bio->bi_private = bbio;
6000 	btrfs_io_bio(bio)->stripe_index = dev_nr;
6001 	bio->bi_end_io = btrfs_end_bio;
6002 	bio->bi_iter.bi_sector = physical >> 9;
6003 #ifdef DEBUG
6004 	{
6005 		struct rcu_string *name;
6006 
6007 		rcu_read_lock();
6008 		name = rcu_dereference(dev->name);
6009 		pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
6010 			 "(%s id %llu), size=%u\n", rw,
6011 			 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
6012 			 name->str, dev->devid, bio->bi_iter.bi_size);
6013 		rcu_read_unlock();
6014 	}
6015 #endif
6016 	bio->bi_bdev = dev->bdev;
6017 
6018 	btrfs_bio_counter_inc_noblocked(root->fs_info);
6019 
6020 	if (async)
6021 		btrfs_schedule_bio(root, dev, rw, bio);
6022 	else
6023 		btrfsic_submit_bio(rw, bio);
6024 }
6025 
6026 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6027 {
6028 	atomic_inc(&bbio->error);
6029 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6030 		/* Shoud be the original bio. */
6031 		WARN_ON(bio != bbio->orig_bio);
6032 
6033 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6034 		bio->bi_iter.bi_sector = logical >> 9;
6035 		bio->bi_error = -EIO;
6036 		btrfs_end_bbio(bbio, bio);
6037 	}
6038 }
6039 
6040 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
6041 		  int mirror_num, int async_submit)
6042 {
6043 	struct btrfs_device *dev;
6044 	struct bio *first_bio = bio;
6045 	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6046 	u64 length = 0;
6047 	u64 map_length;
6048 	int ret;
6049 	int dev_nr;
6050 	int total_devs;
6051 	struct btrfs_bio *bbio = NULL;
6052 
6053 	length = bio->bi_iter.bi_size;
6054 	map_length = length;
6055 
6056 	btrfs_bio_counter_inc_blocked(root->fs_info);
6057 	ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
6058 			      mirror_num, 1);
6059 	if (ret) {
6060 		btrfs_bio_counter_dec(root->fs_info);
6061 		return ret;
6062 	}
6063 
6064 	total_devs = bbio->num_stripes;
6065 	bbio->orig_bio = first_bio;
6066 	bbio->private = first_bio->bi_private;
6067 	bbio->end_io = first_bio->bi_end_io;
6068 	bbio->fs_info = root->fs_info;
6069 	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6070 
6071 	if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6072 	    ((rw & WRITE) || (mirror_num > 1))) {
6073 		/* In this case, map_length has been set to the length of
6074 		   a single stripe; not the whole write */
6075 		if (rw & WRITE) {
6076 			ret = raid56_parity_write(root, bio, bbio, map_length);
6077 		} else {
6078 			ret = raid56_parity_recover(root, bio, bbio, map_length,
6079 						    mirror_num, 1);
6080 		}
6081 
6082 		btrfs_bio_counter_dec(root->fs_info);
6083 		return ret;
6084 	}
6085 
6086 	if (map_length < length) {
6087 		btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
6088 			logical, length, map_length);
6089 		BUG();
6090 	}
6091 
6092 	for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6093 		dev = bbio->stripes[dev_nr].dev;
6094 		if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
6095 			bbio_error(bbio, first_bio, logical);
6096 			continue;
6097 		}
6098 
6099 		if (dev_nr < total_devs - 1) {
6100 			bio = btrfs_bio_clone(first_bio, GFP_NOFS);
6101 			BUG_ON(!bio); /* -ENOMEM */
6102 		} else
6103 			bio = first_bio;
6104 
6105 		submit_stripe_bio(root, bbio, bio,
6106 				  bbio->stripes[dev_nr].physical, dev_nr, rw,
6107 				  async_submit);
6108 	}
6109 	btrfs_bio_counter_dec(root->fs_info);
6110 	return 0;
6111 }
6112 
6113 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
6114 				       u8 *uuid, u8 *fsid)
6115 {
6116 	struct btrfs_device *device;
6117 	struct btrfs_fs_devices *cur_devices;
6118 
6119 	cur_devices = fs_info->fs_devices;
6120 	while (cur_devices) {
6121 		if (!fsid ||
6122 		    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
6123 			device = __find_device(&cur_devices->devices,
6124 					       devid, uuid);
6125 			if (device)
6126 				return device;
6127 		}
6128 		cur_devices = cur_devices->seed;
6129 	}
6130 	return NULL;
6131 }
6132 
6133 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
6134 					    struct btrfs_fs_devices *fs_devices,
6135 					    u64 devid, u8 *dev_uuid)
6136 {
6137 	struct btrfs_device *device;
6138 
6139 	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6140 	if (IS_ERR(device))
6141 		return NULL;
6142 
6143 	list_add(&device->dev_list, &fs_devices->devices);
6144 	device->fs_devices = fs_devices;
6145 	fs_devices->num_devices++;
6146 
6147 	device->missing = 1;
6148 	fs_devices->missing_devices++;
6149 
6150 	return device;
6151 }
6152 
6153 /**
6154  * btrfs_alloc_device - allocate struct btrfs_device
6155  * @fs_info:	used only for generating a new devid, can be NULL if
6156  *		devid is provided (i.e. @devid != NULL).
6157  * @devid:	a pointer to devid for this device.  If NULL a new devid
6158  *		is generated.
6159  * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
6160  *		is generated.
6161  *
6162  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6163  * on error.  Returned struct is not linked onto any lists and can be
6164  * destroyed with kfree() right away.
6165  */
6166 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6167 					const u64 *devid,
6168 					const u8 *uuid)
6169 {
6170 	struct btrfs_device *dev;
6171 	u64 tmp;
6172 
6173 	if (WARN_ON(!devid && !fs_info))
6174 		return ERR_PTR(-EINVAL);
6175 
6176 	dev = __alloc_device();
6177 	if (IS_ERR(dev))
6178 		return dev;
6179 
6180 	if (devid)
6181 		tmp = *devid;
6182 	else {
6183 		int ret;
6184 
6185 		ret = find_next_devid(fs_info, &tmp);
6186 		if (ret) {
6187 			kfree(dev);
6188 			return ERR_PTR(ret);
6189 		}
6190 	}
6191 	dev->devid = tmp;
6192 
6193 	if (uuid)
6194 		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6195 	else
6196 		generate_random_uuid(dev->uuid);
6197 
6198 	btrfs_init_work(&dev->work, btrfs_submit_helper,
6199 			pending_bios_fn, NULL, NULL);
6200 
6201 	return dev;
6202 }
6203 
6204 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
6205 			  struct extent_buffer *leaf,
6206 			  struct btrfs_chunk *chunk)
6207 {
6208 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
6209 	struct map_lookup *map;
6210 	struct extent_map *em;
6211 	u64 logical;
6212 	u64 length;
6213 	u64 stripe_len;
6214 	u64 devid;
6215 	u8 uuid[BTRFS_UUID_SIZE];
6216 	int num_stripes;
6217 	int ret;
6218 	int i;
6219 
6220 	logical = key->offset;
6221 	length = btrfs_chunk_length(leaf, chunk);
6222 	stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6223 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6224 	/* Validation check */
6225 	if (!num_stripes) {
6226 		btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
6227 			  num_stripes);
6228 		return -EIO;
6229 	}
6230 	if (!IS_ALIGNED(logical, root->sectorsize)) {
6231 		btrfs_err(root->fs_info,
6232 			  "invalid chunk logical %llu", logical);
6233 		return -EIO;
6234 	}
6235 	if (!length || !IS_ALIGNED(length, root->sectorsize)) {
6236 		btrfs_err(root->fs_info,
6237 			"invalid chunk length %llu", length);
6238 		return -EIO;
6239 	}
6240 	if (!is_power_of_2(stripe_len)) {
6241 		btrfs_err(root->fs_info, "invalid chunk stripe length: %llu",
6242 			  stripe_len);
6243 		return -EIO;
6244 	}
6245 	if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6246 	    btrfs_chunk_type(leaf, chunk)) {
6247 		btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
6248 			  ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
6249 			    BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6250 			  btrfs_chunk_type(leaf, chunk));
6251 		return -EIO;
6252 	}
6253 
6254 	read_lock(&map_tree->map_tree.lock);
6255 	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
6256 	read_unlock(&map_tree->map_tree.lock);
6257 
6258 	/* already mapped? */
6259 	if (em && em->start <= logical && em->start + em->len > logical) {
6260 		free_extent_map(em);
6261 		return 0;
6262 	} else if (em) {
6263 		free_extent_map(em);
6264 	}
6265 
6266 	em = alloc_extent_map();
6267 	if (!em)
6268 		return -ENOMEM;
6269 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6270 	if (!map) {
6271 		free_extent_map(em);
6272 		return -ENOMEM;
6273 	}
6274 
6275 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6276 	em->map_lookup = map;
6277 	em->start = logical;
6278 	em->len = length;
6279 	em->orig_start = 0;
6280 	em->block_start = 0;
6281 	em->block_len = em->len;
6282 
6283 	map->num_stripes = num_stripes;
6284 	map->io_width = btrfs_chunk_io_width(leaf, chunk);
6285 	map->io_align = btrfs_chunk_io_align(leaf, chunk);
6286 	map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
6287 	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6288 	map->type = btrfs_chunk_type(leaf, chunk);
6289 	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6290 	for (i = 0; i < num_stripes; i++) {
6291 		map->stripes[i].physical =
6292 			btrfs_stripe_offset_nr(leaf, chunk, i);
6293 		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6294 		read_extent_buffer(leaf, uuid, (unsigned long)
6295 				   btrfs_stripe_dev_uuid_nr(chunk, i),
6296 				   BTRFS_UUID_SIZE);
6297 		map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
6298 							uuid, NULL);
6299 		if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
6300 			free_extent_map(em);
6301 			return -EIO;
6302 		}
6303 		if (!map->stripes[i].dev) {
6304 			map->stripes[i].dev =
6305 				add_missing_dev(root, root->fs_info->fs_devices,
6306 						devid, uuid);
6307 			if (!map->stripes[i].dev) {
6308 				free_extent_map(em);
6309 				return -EIO;
6310 			}
6311 			btrfs_warn(root->fs_info, "devid %llu uuid %pU is missing",
6312 						devid, uuid);
6313 		}
6314 		map->stripes[i].dev->in_fs_metadata = 1;
6315 	}
6316 
6317 	write_lock(&map_tree->map_tree.lock);
6318 	ret = add_extent_mapping(&map_tree->map_tree, em, 0);
6319 	write_unlock(&map_tree->map_tree.lock);
6320 	BUG_ON(ret); /* Tree corruption */
6321 	free_extent_map(em);
6322 
6323 	return 0;
6324 }
6325 
6326 static void fill_device_from_item(struct extent_buffer *leaf,
6327 				 struct btrfs_dev_item *dev_item,
6328 				 struct btrfs_device *device)
6329 {
6330 	unsigned long ptr;
6331 
6332 	device->devid = btrfs_device_id(leaf, dev_item);
6333 	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6334 	device->total_bytes = device->disk_total_bytes;
6335 	device->commit_total_bytes = device->disk_total_bytes;
6336 	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6337 	device->commit_bytes_used = device->bytes_used;
6338 	device->type = btrfs_device_type(leaf, dev_item);
6339 	device->io_align = btrfs_device_io_align(leaf, dev_item);
6340 	device->io_width = btrfs_device_io_width(leaf, dev_item);
6341 	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6342 	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6343 	device->is_tgtdev_for_dev_replace = 0;
6344 
6345 	ptr = btrfs_device_uuid(dev_item);
6346 	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6347 }
6348 
6349 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
6350 						  u8 *fsid)
6351 {
6352 	struct btrfs_fs_devices *fs_devices;
6353 	int ret;
6354 
6355 	BUG_ON(!mutex_is_locked(&uuid_mutex));
6356 
6357 	fs_devices = root->fs_info->fs_devices->seed;
6358 	while (fs_devices) {
6359 		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE))
6360 			return fs_devices;
6361 
6362 		fs_devices = fs_devices->seed;
6363 	}
6364 
6365 	fs_devices = find_fsid(fsid);
6366 	if (!fs_devices) {
6367 		if (!btrfs_test_opt(root, DEGRADED))
6368 			return ERR_PTR(-ENOENT);
6369 
6370 		fs_devices = alloc_fs_devices(fsid);
6371 		if (IS_ERR(fs_devices))
6372 			return fs_devices;
6373 
6374 		fs_devices->seeding = 1;
6375 		fs_devices->opened = 1;
6376 		return fs_devices;
6377 	}
6378 
6379 	fs_devices = clone_fs_devices(fs_devices);
6380 	if (IS_ERR(fs_devices))
6381 		return fs_devices;
6382 
6383 	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
6384 				   root->fs_info->bdev_holder);
6385 	if (ret) {
6386 		free_fs_devices(fs_devices);
6387 		fs_devices = ERR_PTR(ret);
6388 		goto out;
6389 	}
6390 
6391 	if (!fs_devices->seeding) {
6392 		__btrfs_close_devices(fs_devices);
6393 		free_fs_devices(fs_devices);
6394 		fs_devices = ERR_PTR(-EINVAL);
6395 		goto out;
6396 	}
6397 
6398 	fs_devices->seed = root->fs_info->fs_devices->seed;
6399 	root->fs_info->fs_devices->seed = fs_devices;
6400 out:
6401 	return fs_devices;
6402 }
6403 
6404 static int read_one_dev(struct btrfs_root *root,
6405 			struct extent_buffer *leaf,
6406 			struct btrfs_dev_item *dev_item)
6407 {
6408 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6409 	struct btrfs_device *device;
6410 	u64 devid;
6411 	int ret;
6412 	u8 fs_uuid[BTRFS_UUID_SIZE];
6413 	u8 dev_uuid[BTRFS_UUID_SIZE];
6414 
6415 	devid = btrfs_device_id(leaf, dev_item);
6416 	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6417 			   BTRFS_UUID_SIZE);
6418 	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6419 			   BTRFS_UUID_SIZE);
6420 
6421 	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
6422 		fs_devices = open_seed_devices(root, fs_uuid);
6423 		if (IS_ERR(fs_devices))
6424 			return PTR_ERR(fs_devices);
6425 	}
6426 
6427 	device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
6428 	if (!device) {
6429 		if (!btrfs_test_opt(root, DEGRADED))
6430 			return -EIO;
6431 
6432 		device = add_missing_dev(root, fs_devices, devid, dev_uuid);
6433 		if (!device)
6434 			return -ENOMEM;
6435 		btrfs_warn(root->fs_info, "devid %llu uuid %pU missing",
6436 				devid, dev_uuid);
6437 	} else {
6438 		if (!device->bdev && !btrfs_test_opt(root, DEGRADED))
6439 			return -EIO;
6440 
6441 		if(!device->bdev && !device->missing) {
6442 			/*
6443 			 * this happens when a device that was properly setup
6444 			 * in the device info lists suddenly goes bad.
6445 			 * device->bdev is NULL, and so we have to set
6446 			 * device->missing to one here
6447 			 */
6448 			device->fs_devices->missing_devices++;
6449 			device->missing = 1;
6450 		}
6451 
6452 		/* Move the device to its own fs_devices */
6453 		if (device->fs_devices != fs_devices) {
6454 			ASSERT(device->missing);
6455 
6456 			list_move(&device->dev_list, &fs_devices->devices);
6457 			device->fs_devices->num_devices--;
6458 			fs_devices->num_devices++;
6459 
6460 			device->fs_devices->missing_devices--;
6461 			fs_devices->missing_devices++;
6462 
6463 			device->fs_devices = fs_devices;
6464 		}
6465 	}
6466 
6467 	if (device->fs_devices != root->fs_info->fs_devices) {
6468 		BUG_ON(device->writeable);
6469 		if (device->generation !=
6470 		    btrfs_device_generation(leaf, dev_item))
6471 			return -EINVAL;
6472 	}
6473 
6474 	fill_device_from_item(leaf, dev_item, device);
6475 	device->in_fs_metadata = 1;
6476 	if (device->writeable && !device->is_tgtdev_for_dev_replace) {
6477 		device->fs_devices->total_rw_bytes += device->total_bytes;
6478 		spin_lock(&root->fs_info->free_chunk_lock);
6479 		root->fs_info->free_chunk_space += device->total_bytes -
6480 			device->bytes_used;
6481 		spin_unlock(&root->fs_info->free_chunk_lock);
6482 	}
6483 	ret = 0;
6484 	return ret;
6485 }
6486 
6487 int btrfs_read_sys_array(struct btrfs_root *root)
6488 {
6489 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
6490 	struct extent_buffer *sb;
6491 	struct btrfs_disk_key *disk_key;
6492 	struct btrfs_chunk *chunk;
6493 	u8 *array_ptr;
6494 	unsigned long sb_array_offset;
6495 	int ret = 0;
6496 	u32 num_stripes;
6497 	u32 array_size;
6498 	u32 len = 0;
6499 	u32 cur_offset;
6500 	struct btrfs_key key;
6501 
6502 	ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
6503 	/*
6504 	 * This will create extent buffer of nodesize, superblock size is
6505 	 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6506 	 * overallocate but we can keep it as-is, only the first page is used.
6507 	 */
6508 	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
6509 	if (!sb)
6510 		return -ENOMEM;
6511 	set_extent_buffer_uptodate(sb);
6512 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6513 	/*
6514 	 * The sb extent buffer is artifical and just used to read the system array.
6515 	 * set_extent_buffer_uptodate() call does not properly mark all it's
6516 	 * pages up-to-date when the page is larger: extent does not cover the
6517 	 * whole page and consequently check_page_uptodate does not find all
6518 	 * the page's extents up-to-date (the hole beyond sb),
6519 	 * write_extent_buffer then triggers a WARN_ON.
6520 	 *
6521 	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
6522 	 * but sb spans only this function. Add an explicit SetPageUptodate call
6523 	 * to silence the warning eg. on PowerPC 64.
6524 	 */
6525 	if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
6526 		SetPageUptodate(sb->pages[0]);
6527 
6528 	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6529 	array_size = btrfs_super_sys_array_size(super_copy);
6530 
6531 	array_ptr = super_copy->sys_chunk_array;
6532 	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
6533 	cur_offset = 0;
6534 
6535 	while (cur_offset < array_size) {
6536 		disk_key = (struct btrfs_disk_key *)array_ptr;
6537 		len = sizeof(*disk_key);
6538 		if (cur_offset + len > array_size)
6539 			goto out_short_read;
6540 
6541 		btrfs_disk_key_to_cpu(&key, disk_key);
6542 
6543 		array_ptr += len;
6544 		sb_array_offset += len;
6545 		cur_offset += len;
6546 
6547 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
6548 			chunk = (struct btrfs_chunk *)sb_array_offset;
6549 			/*
6550 			 * At least one btrfs_chunk with one stripe must be
6551 			 * present, exact stripe count check comes afterwards
6552 			 */
6553 			len = btrfs_chunk_item_size(1);
6554 			if (cur_offset + len > array_size)
6555 				goto out_short_read;
6556 
6557 			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6558 			if (!num_stripes) {
6559 				printk(KERN_ERR
6560 	    "BTRFS: invalid number of stripes %u in sys_array at offset %u\n",
6561 					num_stripes, cur_offset);
6562 				ret = -EIO;
6563 				break;
6564 			}
6565 
6566 			len = btrfs_chunk_item_size(num_stripes);
6567 			if (cur_offset + len > array_size)
6568 				goto out_short_read;
6569 
6570 			ret = read_one_chunk(root, &key, sb, chunk);
6571 			if (ret)
6572 				break;
6573 		} else {
6574 			printk(KERN_ERR
6575 		"BTRFS: unexpected item type %u in sys_array at offset %u\n",
6576 				(u32)key.type, cur_offset);
6577 			ret = -EIO;
6578 			break;
6579 		}
6580 		array_ptr += len;
6581 		sb_array_offset += len;
6582 		cur_offset += len;
6583 	}
6584 	free_extent_buffer(sb);
6585 	return ret;
6586 
6587 out_short_read:
6588 	printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
6589 			len, cur_offset);
6590 	free_extent_buffer(sb);
6591 	return -EIO;
6592 }
6593 
6594 int btrfs_read_chunk_tree(struct btrfs_root *root)
6595 {
6596 	struct btrfs_path *path;
6597 	struct extent_buffer *leaf;
6598 	struct btrfs_key key;
6599 	struct btrfs_key found_key;
6600 	int ret;
6601 	int slot;
6602 
6603 	root = root->fs_info->chunk_root;
6604 
6605 	path = btrfs_alloc_path();
6606 	if (!path)
6607 		return -ENOMEM;
6608 
6609 	mutex_lock(&uuid_mutex);
6610 	lock_chunks(root);
6611 
6612 	/*
6613 	 * Read all device items, and then all the chunk items. All
6614 	 * device items are found before any chunk item (their object id
6615 	 * is smaller than the lowest possible object id for a chunk
6616 	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
6617 	 */
6618 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
6619 	key.offset = 0;
6620 	key.type = 0;
6621 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6622 	if (ret < 0)
6623 		goto error;
6624 	while (1) {
6625 		leaf = path->nodes[0];
6626 		slot = path->slots[0];
6627 		if (slot >= btrfs_header_nritems(leaf)) {
6628 			ret = btrfs_next_leaf(root, path);
6629 			if (ret == 0)
6630 				continue;
6631 			if (ret < 0)
6632 				goto error;
6633 			break;
6634 		}
6635 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
6636 		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
6637 			struct btrfs_dev_item *dev_item;
6638 			dev_item = btrfs_item_ptr(leaf, slot,
6639 						  struct btrfs_dev_item);
6640 			ret = read_one_dev(root, leaf, dev_item);
6641 			if (ret)
6642 				goto error;
6643 		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
6644 			struct btrfs_chunk *chunk;
6645 			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
6646 			ret = read_one_chunk(root, &found_key, leaf, chunk);
6647 			if (ret)
6648 				goto error;
6649 		}
6650 		path->slots[0]++;
6651 	}
6652 	ret = 0;
6653 error:
6654 	unlock_chunks(root);
6655 	mutex_unlock(&uuid_mutex);
6656 
6657 	btrfs_free_path(path);
6658 	return ret;
6659 }
6660 
6661 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
6662 {
6663 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6664 	struct btrfs_device *device;
6665 
6666 	while (fs_devices) {
6667 		mutex_lock(&fs_devices->device_list_mutex);
6668 		list_for_each_entry(device, &fs_devices->devices, dev_list)
6669 			device->dev_root = fs_info->dev_root;
6670 		mutex_unlock(&fs_devices->device_list_mutex);
6671 
6672 		fs_devices = fs_devices->seed;
6673 	}
6674 }
6675 
6676 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
6677 {
6678 	int i;
6679 
6680 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6681 		btrfs_dev_stat_reset(dev, i);
6682 }
6683 
6684 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
6685 {
6686 	struct btrfs_key key;
6687 	struct btrfs_key found_key;
6688 	struct btrfs_root *dev_root = fs_info->dev_root;
6689 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6690 	struct extent_buffer *eb;
6691 	int slot;
6692 	int ret = 0;
6693 	struct btrfs_device *device;
6694 	struct btrfs_path *path = NULL;
6695 	int i;
6696 
6697 	path = btrfs_alloc_path();
6698 	if (!path) {
6699 		ret = -ENOMEM;
6700 		goto out;
6701 	}
6702 
6703 	mutex_lock(&fs_devices->device_list_mutex);
6704 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6705 		int item_size;
6706 		struct btrfs_dev_stats_item *ptr;
6707 
6708 		key.objectid = 0;
6709 		key.type = BTRFS_DEV_STATS_KEY;
6710 		key.offset = device->devid;
6711 		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
6712 		if (ret) {
6713 			__btrfs_reset_dev_stats(device);
6714 			device->dev_stats_valid = 1;
6715 			btrfs_release_path(path);
6716 			continue;
6717 		}
6718 		slot = path->slots[0];
6719 		eb = path->nodes[0];
6720 		btrfs_item_key_to_cpu(eb, &found_key, slot);
6721 		item_size = btrfs_item_size_nr(eb, slot);
6722 
6723 		ptr = btrfs_item_ptr(eb, slot,
6724 				     struct btrfs_dev_stats_item);
6725 
6726 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6727 			if (item_size >= (1 + i) * sizeof(__le64))
6728 				btrfs_dev_stat_set(device, i,
6729 					btrfs_dev_stats_value(eb, ptr, i));
6730 			else
6731 				btrfs_dev_stat_reset(device, i);
6732 		}
6733 
6734 		device->dev_stats_valid = 1;
6735 		btrfs_dev_stat_print_on_load(device);
6736 		btrfs_release_path(path);
6737 	}
6738 	mutex_unlock(&fs_devices->device_list_mutex);
6739 
6740 out:
6741 	btrfs_free_path(path);
6742 	return ret < 0 ? ret : 0;
6743 }
6744 
6745 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
6746 				struct btrfs_root *dev_root,
6747 				struct btrfs_device *device)
6748 {
6749 	struct btrfs_path *path;
6750 	struct btrfs_key key;
6751 	struct extent_buffer *eb;
6752 	struct btrfs_dev_stats_item *ptr;
6753 	int ret;
6754 	int i;
6755 
6756 	key.objectid = 0;
6757 	key.type = BTRFS_DEV_STATS_KEY;
6758 	key.offset = device->devid;
6759 
6760 	path = btrfs_alloc_path();
6761 	BUG_ON(!path);
6762 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
6763 	if (ret < 0) {
6764 		btrfs_warn_in_rcu(dev_root->fs_info,
6765 			"error %d while searching for dev_stats item for device %s",
6766 			      ret, rcu_str_deref(device->name));
6767 		goto out;
6768 	}
6769 
6770 	if (ret == 0 &&
6771 	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
6772 		/* need to delete old one and insert a new one */
6773 		ret = btrfs_del_item(trans, dev_root, path);
6774 		if (ret != 0) {
6775 			btrfs_warn_in_rcu(dev_root->fs_info,
6776 				"delete too small dev_stats item for device %s failed %d",
6777 				      rcu_str_deref(device->name), ret);
6778 			goto out;
6779 		}
6780 		ret = 1;
6781 	}
6782 
6783 	if (ret == 1) {
6784 		/* need to insert a new item */
6785 		btrfs_release_path(path);
6786 		ret = btrfs_insert_empty_item(trans, dev_root, path,
6787 					      &key, sizeof(*ptr));
6788 		if (ret < 0) {
6789 			btrfs_warn_in_rcu(dev_root->fs_info,
6790 				"insert dev_stats item for device %s failed %d",
6791 				rcu_str_deref(device->name), ret);
6792 			goto out;
6793 		}
6794 	}
6795 
6796 	eb = path->nodes[0];
6797 	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
6798 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6799 		btrfs_set_dev_stats_value(eb, ptr, i,
6800 					  btrfs_dev_stat_read(device, i));
6801 	btrfs_mark_buffer_dirty(eb);
6802 
6803 out:
6804 	btrfs_free_path(path);
6805 	return ret;
6806 }
6807 
6808 /*
6809  * called from commit_transaction. Writes all changed device stats to disk.
6810  */
6811 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
6812 			struct btrfs_fs_info *fs_info)
6813 {
6814 	struct btrfs_root *dev_root = fs_info->dev_root;
6815 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6816 	struct btrfs_device *device;
6817 	int stats_cnt;
6818 	int ret = 0;
6819 
6820 	mutex_lock(&fs_devices->device_list_mutex);
6821 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6822 		if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
6823 			continue;
6824 
6825 		stats_cnt = atomic_read(&device->dev_stats_ccnt);
6826 		ret = update_dev_stat_item(trans, dev_root, device);
6827 		if (!ret)
6828 			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
6829 	}
6830 	mutex_unlock(&fs_devices->device_list_mutex);
6831 
6832 	return ret;
6833 }
6834 
6835 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
6836 {
6837 	btrfs_dev_stat_inc(dev, index);
6838 	btrfs_dev_stat_print_on_error(dev);
6839 }
6840 
6841 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
6842 {
6843 	if (!dev->dev_stats_valid)
6844 		return;
6845 	btrfs_err_rl_in_rcu(dev->dev_root->fs_info,
6846 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
6847 			   rcu_str_deref(dev->name),
6848 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6849 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6850 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6851 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6852 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6853 }
6854 
6855 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
6856 {
6857 	int i;
6858 
6859 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6860 		if (btrfs_dev_stat_read(dev, i) != 0)
6861 			break;
6862 	if (i == BTRFS_DEV_STAT_VALUES_MAX)
6863 		return; /* all values == 0, suppress message */
6864 
6865 	btrfs_info_in_rcu(dev->dev_root->fs_info,
6866 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
6867 	       rcu_str_deref(dev->name),
6868 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6869 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6870 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6871 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6872 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6873 }
6874 
6875 int btrfs_get_dev_stats(struct btrfs_root *root,
6876 			struct btrfs_ioctl_get_dev_stats *stats)
6877 {
6878 	struct btrfs_device *dev;
6879 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6880 	int i;
6881 
6882 	mutex_lock(&fs_devices->device_list_mutex);
6883 	dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
6884 	mutex_unlock(&fs_devices->device_list_mutex);
6885 
6886 	if (!dev) {
6887 		btrfs_warn(root->fs_info, "get dev_stats failed, device not found");
6888 		return -ENODEV;
6889 	} else if (!dev->dev_stats_valid) {
6890 		btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid");
6891 		return -ENODEV;
6892 	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
6893 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6894 			if (stats->nr_items > i)
6895 				stats->values[i] =
6896 					btrfs_dev_stat_read_and_reset(dev, i);
6897 			else
6898 				btrfs_dev_stat_reset(dev, i);
6899 		}
6900 	} else {
6901 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6902 			if (stats->nr_items > i)
6903 				stats->values[i] = btrfs_dev_stat_read(dev, i);
6904 	}
6905 	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
6906 		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
6907 	return 0;
6908 }
6909 
6910 void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path)
6911 {
6912 	struct buffer_head *bh;
6913 	struct btrfs_super_block *disk_super;
6914 	int copy_num;
6915 
6916 	if (!bdev)
6917 		return;
6918 
6919 	for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX;
6920 		copy_num++) {
6921 
6922 		if (btrfs_read_dev_one_super(bdev, copy_num, &bh))
6923 			continue;
6924 
6925 		disk_super = (struct btrfs_super_block *)bh->b_data;
6926 
6927 		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
6928 		set_buffer_dirty(bh);
6929 		sync_dirty_buffer(bh);
6930 		brelse(bh);
6931 	}
6932 
6933 	/* Notify udev that device has changed */
6934 	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
6935 
6936 	/* Update ctime/mtime for device path for libblkid */
6937 	update_dev_time(device_path);
6938 }
6939 
6940 /*
6941  * Update the size of all devices, which is used for writing out the
6942  * super blocks.
6943  */
6944 void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
6945 {
6946 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6947 	struct btrfs_device *curr, *next;
6948 
6949 	if (list_empty(&fs_devices->resized_devices))
6950 		return;
6951 
6952 	mutex_lock(&fs_devices->device_list_mutex);
6953 	lock_chunks(fs_info->dev_root);
6954 	list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
6955 				 resized_list) {
6956 		list_del_init(&curr->resized_list);
6957 		curr->commit_total_bytes = curr->disk_total_bytes;
6958 	}
6959 	unlock_chunks(fs_info->dev_root);
6960 	mutex_unlock(&fs_devices->device_list_mutex);
6961 }
6962 
6963 /* Must be invoked during the transaction commit */
6964 void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
6965 					struct btrfs_transaction *transaction)
6966 {
6967 	struct extent_map *em;
6968 	struct map_lookup *map;
6969 	struct btrfs_device *dev;
6970 	int i;
6971 
6972 	if (list_empty(&transaction->pending_chunks))
6973 		return;
6974 
6975 	/* In order to kick the device replace finish process */
6976 	lock_chunks(root);
6977 	list_for_each_entry(em, &transaction->pending_chunks, list) {
6978 		map = em->map_lookup;
6979 
6980 		for (i = 0; i < map->num_stripes; i++) {
6981 			dev = map->stripes[i].dev;
6982 			dev->commit_bytes_used = dev->bytes_used;
6983 		}
6984 	}
6985 	unlock_chunks(root);
6986 }
6987 
6988 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
6989 {
6990 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6991 	while (fs_devices) {
6992 		fs_devices->fs_info = fs_info;
6993 		fs_devices = fs_devices->seed;
6994 	}
6995 }
6996 
6997 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
6998 {
6999 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7000 	while (fs_devices) {
7001 		fs_devices->fs_info = NULL;
7002 		fs_devices = fs_devices->seed;
7003 	}
7004 }
7005 
7006 static void btrfs_close_one_device(struct btrfs_device *device)
7007 {
7008 	struct btrfs_fs_devices *fs_devices = device->fs_devices;
7009 	struct btrfs_device *new_device;
7010 	struct rcu_string *name;
7011 
7012 	if (device->bdev)
7013 		fs_devices->open_devices--;
7014 
7015 	if (device->writeable &&
7016 	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
7017 		list_del_init(&device->dev_alloc_list);
7018 		fs_devices->rw_devices--;
7019 	}
7020 
7021 	if (device->missing)
7022 		fs_devices->missing_devices--;
7023 
7024 	new_device = btrfs_alloc_device(NULL, &device->devid,
7025 					device->uuid);
7026 	BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
7027 
7028 	/* Safe because we are under uuid_mutex */
7029 	if (device->name) {
7030 		name = rcu_string_strdup(device->name->str, GFP_NOFS);
7031 		BUG_ON(!name); /* -ENOMEM */
7032 		rcu_assign_pointer(new_device->name, name);
7033 	}
7034 
7035 	list_replace_rcu(&device->dev_list, &new_device->dev_list);
7036 	new_device->fs_devices = device->fs_devices;
7037 
7038 	call_rcu(&device->rcu, free_device);
7039 }
7040