xref: /openbmc/linux/fs/btrfs/volumes.c (revision abfbd895)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include <linux/raid/pq.h>
29 #include <linux/semaphore.h>
30 #include <asm/div64.h>
31 #include "ctree.h"
32 #include "extent_map.h"
33 #include "disk-io.h"
34 #include "transaction.h"
35 #include "print-tree.h"
36 #include "volumes.h"
37 #include "raid56.h"
38 #include "async-thread.h"
39 #include "check-integrity.h"
40 #include "rcu-string.h"
41 #include "math.h"
42 #include "dev-replace.h"
43 #include "sysfs.h"
44 
45 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
46 	[BTRFS_RAID_RAID10] = {
47 		.sub_stripes	= 2,
48 		.dev_stripes	= 1,
49 		.devs_max	= 0,	/* 0 == as many as possible */
50 		.devs_min	= 4,
51 		.tolerated_failures = 1,
52 		.devs_increment	= 2,
53 		.ncopies	= 2,
54 	},
55 	[BTRFS_RAID_RAID1] = {
56 		.sub_stripes	= 1,
57 		.dev_stripes	= 1,
58 		.devs_max	= 2,
59 		.devs_min	= 2,
60 		.tolerated_failures = 1,
61 		.devs_increment	= 2,
62 		.ncopies	= 2,
63 	},
64 	[BTRFS_RAID_DUP] = {
65 		.sub_stripes	= 1,
66 		.dev_stripes	= 2,
67 		.devs_max	= 1,
68 		.devs_min	= 1,
69 		.tolerated_failures = 0,
70 		.devs_increment	= 1,
71 		.ncopies	= 2,
72 	},
73 	[BTRFS_RAID_RAID0] = {
74 		.sub_stripes	= 1,
75 		.dev_stripes	= 1,
76 		.devs_max	= 0,
77 		.devs_min	= 2,
78 		.tolerated_failures = 0,
79 		.devs_increment	= 1,
80 		.ncopies	= 1,
81 	},
82 	[BTRFS_RAID_SINGLE] = {
83 		.sub_stripes	= 1,
84 		.dev_stripes	= 1,
85 		.devs_max	= 1,
86 		.devs_min	= 1,
87 		.tolerated_failures = 0,
88 		.devs_increment	= 1,
89 		.ncopies	= 1,
90 	},
91 	[BTRFS_RAID_RAID5] = {
92 		.sub_stripes	= 1,
93 		.dev_stripes	= 1,
94 		.devs_max	= 0,
95 		.devs_min	= 2,
96 		.tolerated_failures = 1,
97 		.devs_increment	= 1,
98 		.ncopies	= 2,
99 	},
100 	[BTRFS_RAID_RAID6] = {
101 		.sub_stripes	= 1,
102 		.dev_stripes	= 1,
103 		.devs_max	= 0,
104 		.devs_min	= 3,
105 		.tolerated_failures = 2,
106 		.devs_increment	= 1,
107 		.ncopies	= 3,
108 	},
109 };
110 
111 const u64 const btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
112 	[BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10,
113 	[BTRFS_RAID_RAID1]  = BTRFS_BLOCK_GROUP_RAID1,
114 	[BTRFS_RAID_DUP]    = BTRFS_BLOCK_GROUP_DUP,
115 	[BTRFS_RAID_RAID0]  = BTRFS_BLOCK_GROUP_RAID0,
116 	[BTRFS_RAID_SINGLE] = 0,
117 	[BTRFS_RAID_RAID5]  = BTRFS_BLOCK_GROUP_RAID5,
118 	[BTRFS_RAID_RAID6]  = BTRFS_BLOCK_GROUP_RAID6,
119 };
120 
121 static int init_first_rw_device(struct btrfs_trans_handle *trans,
122 				struct btrfs_root *root,
123 				struct btrfs_device *device);
124 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
125 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
126 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
127 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
128 
129 DEFINE_MUTEX(uuid_mutex);
130 static LIST_HEAD(fs_uuids);
131 struct list_head *btrfs_get_fs_uuids(void)
132 {
133 	return &fs_uuids;
134 }
135 
136 static struct btrfs_fs_devices *__alloc_fs_devices(void)
137 {
138 	struct btrfs_fs_devices *fs_devs;
139 
140 	fs_devs = kzalloc(sizeof(*fs_devs), GFP_NOFS);
141 	if (!fs_devs)
142 		return ERR_PTR(-ENOMEM);
143 
144 	mutex_init(&fs_devs->device_list_mutex);
145 
146 	INIT_LIST_HEAD(&fs_devs->devices);
147 	INIT_LIST_HEAD(&fs_devs->resized_devices);
148 	INIT_LIST_HEAD(&fs_devs->alloc_list);
149 	INIT_LIST_HEAD(&fs_devs->list);
150 
151 	return fs_devs;
152 }
153 
154 /**
155  * alloc_fs_devices - allocate struct btrfs_fs_devices
156  * @fsid:	a pointer to UUID for this FS.  If NULL a new UUID is
157  *		generated.
158  *
159  * Return: a pointer to a new &struct btrfs_fs_devices on success;
160  * ERR_PTR() on error.  Returned struct is not linked onto any lists and
161  * can be destroyed with kfree() right away.
162  */
163 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
164 {
165 	struct btrfs_fs_devices *fs_devs;
166 
167 	fs_devs = __alloc_fs_devices();
168 	if (IS_ERR(fs_devs))
169 		return fs_devs;
170 
171 	if (fsid)
172 		memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
173 	else
174 		generate_random_uuid(fs_devs->fsid);
175 
176 	return fs_devs;
177 }
178 
179 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
180 {
181 	struct btrfs_device *device;
182 	WARN_ON(fs_devices->opened);
183 	while (!list_empty(&fs_devices->devices)) {
184 		device = list_entry(fs_devices->devices.next,
185 				    struct btrfs_device, dev_list);
186 		list_del(&device->dev_list);
187 		rcu_string_free(device->name);
188 		kfree(device);
189 	}
190 	kfree(fs_devices);
191 }
192 
193 static void btrfs_kobject_uevent(struct block_device *bdev,
194 				 enum kobject_action action)
195 {
196 	int ret;
197 
198 	ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
199 	if (ret)
200 		pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
201 			action,
202 			kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
203 			&disk_to_dev(bdev->bd_disk)->kobj);
204 }
205 
206 void btrfs_cleanup_fs_uuids(void)
207 {
208 	struct btrfs_fs_devices *fs_devices;
209 
210 	while (!list_empty(&fs_uuids)) {
211 		fs_devices = list_entry(fs_uuids.next,
212 					struct btrfs_fs_devices, list);
213 		list_del(&fs_devices->list);
214 		free_fs_devices(fs_devices);
215 	}
216 }
217 
218 static struct btrfs_device *__alloc_device(void)
219 {
220 	struct btrfs_device *dev;
221 
222 	dev = kzalloc(sizeof(*dev), GFP_NOFS);
223 	if (!dev)
224 		return ERR_PTR(-ENOMEM);
225 
226 	INIT_LIST_HEAD(&dev->dev_list);
227 	INIT_LIST_HEAD(&dev->dev_alloc_list);
228 	INIT_LIST_HEAD(&dev->resized_list);
229 
230 	spin_lock_init(&dev->io_lock);
231 
232 	spin_lock_init(&dev->reada_lock);
233 	atomic_set(&dev->reada_in_flight, 0);
234 	atomic_set(&dev->dev_stats_ccnt, 0);
235 	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
236 	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
237 
238 	return dev;
239 }
240 
241 static noinline struct btrfs_device *__find_device(struct list_head *head,
242 						   u64 devid, u8 *uuid)
243 {
244 	struct btrfs_device *dev;
245 
246 	list_for_each_entry(dev, head, dev_list) {
247 		if (dev->devid == devid &&
248 		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
249 			return dev;
250 		}
251 	}
252 	return NULL;
253 }
254 
255 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
256 {
257 	struct btrfs_fs_devices *fs_devices;
258 
259 	list_for_each_entry(fs_devices, &fs_uuids, list) {
260 		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
261 			return fs_devices;
262 	}
263 	return NULL;
264 }
265 
266 static int
267 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
268 		      int flush, struct block_device **bdev,
269 		      struct buffer_head **bh)
270 {
271 	int ret;
272 
273 	*bdev = blkdev_get_by_path(device_path, flags, holder);
274 
275 	if (IS_ERR(*bdev)) {
276 		ret = PTR_ERR(*bdev);
277 		goto error;
278 	}
279 
280 	if (flush)
281 		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
282 	ret = set_blocksize(*bdev, 4096);
283 	if (ret) {
284 		blkdev_put(*bdev, flags);
285 		goto error;
286 	}
287 	invalidate_bdev(*bdev);
288 	*bh = btrfs_read_dev_super(*bdev);
289 	if (IS_ERR(*bh)) {
290 		ret = PTR_ERR(*bh);
291 		blkdev_put(*bdev, flags);
292 		goto error;
293 	}
294 
295 	return 0;
296 
297 error:
298 	*bdev = NULL;
299 	*bh = NULL;
300 	return ret;
301 }
302 
303 static void requeue_list(struct btrfs_pending_bios *pending_bios,
304 			struct bio *head, struct bio *tail)
305 {
306 
307 	struct bio *old_head;
308 
309 	old_head = pending_bios->head;
310 	pending_bios->head = head;
311 	if (pending_bios->tail)
312 		tail->bi_next = old_head;
313 	else
314 		pending_bios->tail = tail;
315 }
316 
317 /*
318  * we try to collect pending bios for a device so we don't get a large
319  * number of procs sending bios down to the same device.  This greatly
320  * improves the schedulers ability to collect and merge the bios.
321  *
322  * But, it also turns into a long list of bios to process and that is sure
323  * to eventually make the worker thread block.  The solution here is to
324  * make some progress and then put this work struct back at the end of
325  * the list if the block device is congested.  This way, multiple devices
326  * can make progress from a single worker thread.
327  */
328 static noinline void run_scheduled_bios(struct btrfs_device *device)
329 {
330 	struct bio *pending;
331 	struct backing_dev_info *bdi;
332 	struct btrfs_fs_info *fs_info;
333 	struct btrfs_pending_bios *pending_bios;
334 	struct bio *tail;
335 	struct bio *cur;
336 	int again = 0;
337 	unsigned long num_run;
338 	unsigned long batch_run = 0;
339 	unsigned long limit;
340 	unsigned long last_waited = 0;
341 	int force_reg = 0;
342 	int sync_pending = 0;
343 	struct blk_plug plug;
344 
345 	/*
346 	 * this function runs all the bios we've collected for
347 	 * a particular device.  We don't want to wander off to
348 	 * another device without first sending all of these down.
349 	 * So, setup a plug here and finish it off before we return
350 	 */
351 	blk_start_plug(&plug);
352 
353 	bdi = blk_get_backing_dev_info(device->bdev);
354 	fs_info = device->dev_root->fs_info;
355 	limit = btrfs_async_submit_limit(fs_info);
356 	limit = limit * 2 / 3;
357 
358 loop:
359 	spin_lock(&device->io_lock);
360 
361 loop_lock:
362 	num_run = 0;
363 
364 	/* take all the bios off the list at once and process them
365 	 * later on (without the lock held).  But, remember the
366 	 * tail and other pointers so the bios can be properly reinserted
367 	 * into the list if we hit congestion
368 	 */
369 	if (!force_reg && device->pending_sync_bios.head) {
370 		pending_bios = &device->pending_sync_bios;
371 		force_reg = 1;
372 	} else {
373 		pending_bios = &device->pending_bios;
374 		force_reg = 0;
375 	}
376 
377 	pending = pending_bios->head;
378 	tail = pending_bios->tail;
379 	WARN_ON(pending && !tail);
380 
381 	/*
382 	 * if pending was null this time around, no bios need processing
383 	 * at all and we can stop.  Otherwise it'll loop back up again
384 	 * and do an additional check so no bios are missed.
385 	 *
386 	 * device->running_pending is used to synchronize with the
387 	 * schedule_bio code.
388 	 */
389 	if (device->pending_sync_bios.head == NULL &&
390 	    device->pending_bios.head == NULL) {
391 		again = 0;
392 		device->running_pending = 0;
393 	} else {
394 		again = 1;
395 		device->running_pending = 1;
396 	}
397 
398 	pending_bios->head = NULL;
399 	pending_bios->tail = NULL;
400 
401 	spin_unlock(&device->io_lock);
402 
403 	while (pending) {
404 
405 		rmb();
406 		/* we want to work on both lists, but do more bios on the
407 		 * sync list than the regular list
408 		 */
409 		if ((num_run > 32 &&
410 		    pending_bios != &device->pending_sync_bios &&
411 		    device->pending_sync_bios.head) ||
412 		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
413 		    device->pending_bios.head)) {
414 			spin_lock(&device->io_lock);
415 			requeue_list(pending_bios, pending, tail);
416 			goto loop_lock;
417 		}
418 
419 		cur = pending;
420 		pending = pending->bi_next;
421 		cur->bi_next = NULL;
422 
423 		/*
424 		 * atomic_dec_return implies a barrier for waitqueue_active
425 		 */
426 		if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
427 		    waitqueue_active(&fs_info->async_submit_wait))
428 			wake_up(&fs_info->async_submit_wait);
429 
430 		BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
431 
432 		/*
433 		 * if we're doing the sync list, record that our
434 		 * plug has some sync requests on it
435 		 *
436 		 * If we're doing the regular list and there are
437 		 * sync requests sitting around, unplug before
438 		 * we add more
439 		 */
440 		if (pending_bios == &device->pending_sync_bios) {
441 			sync_pending = 1;
442 		} else if (sync_pending) {
443 			blk_finish_plug(&plug);
444 			blk_start_plug(&plug);
445 			sync_pending = 0;
446 		}
447 
448 		btrfsic_submit_bio(cur->bi_rw, cur);
449 		num_run++;
450 		batch_run++;
451 
452 		cond_resched();
453 
454 		/*
455 		 * we made progress, there is more work to do and the bdi
456 		 * is now congested.  Back off and let other work structs
457 		 * run instead
458 		 */
459 		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
460 		    fs_info->fs_devices->open_devices > 1) {
461 			struct io_context *ioc;
462 
463 			ioc = current->io_context;
464 
465 			/*
466 			 * the main goal here is that we don't want to
467 			 * block if we're going to be able to submit
468 			 * more requests without blocking.
469 			 *
470 			 * This code does two great things, it pokes into
471 			 * the elevator code from a filesystem _and_
472 			 * it makes assumptions about how batching works.
473 			 */
474 			if (ioc && ioc->nr_batch_requests > 0 &&
475 			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
476 			    (last_waited == 0 ||
477 			     ioc->last_waited == last_waited)) {
478 				/*
479 				 * we want to go through our batch of
480 				 * requests and stop.  So, we copy out
481 				 * the ioc->last_waited time and test
482 				 * against it before looping
483 				 */
484 				last_waited = ioc->last_waited;
485 				cond_resched();
486 				continue;
487 			}
488 			spin_lock(&device->io_lock);
489 			requeue_list(pending_bios, pending, tail);
490 			device->running_pending = 1;
491 
492 			spin_unlock(&device->io_lock);
493 			btrfs_queue_work(fs_info->submit_workers,
494 					 &device->work);
495 			goto done;
496 		}
497 		/* unplug every 64 requests just for good measure */
498 		if (batch_run % 64 == 0) {
499 			blk_finish_plug(&plug);
500 			blk_start_plug(&plug);
501 			sync_pending = 0;
502 		}
503 	}
504 
505 	cond_resched();
506 	if (again)
507 		goto loop;
508 
509 	spin_lock(&device->io_lock);
510 	if (device->pending_bios.head || device->pending_sync_bios.head)
511 		goto loop_lock;
512 	spin_unlock(&device->io_lock);
513 
514 done:
515 	blk_finish_plug(&plug);
516 }
517 
518 static void pending_bios_fn(struct btrfs_work *work)
519 {
520 	struct btrfs_device *device;
521 
522 	device = container_of(work, struct btrfs_device, work);
523 	run_scheduled_bios(device);
524 }
525 
526 
527 void btrfs_free_stale_device(struct btrfs_device *cur_dev)
528 {
529 	struct btrfs_fs_devices *fs_devs;
530 	struct btrfs_device *dev;
531 
532 	if (!cur_dev->name)
533 		return;
534 
535 	list_for_each_entry(fs_devs, &fs_uuids, list) {
536 		int del = 1;
537 
538 		if (fs_devs->opened)
539 			continue;
540 		if (fs_devs->seeding)
541 			continue;
542 
543 		list_for_each_entry(dev, &fs_devs->devices, dev_list) {
544 
545 			if (dev == cur_dev)
546 				continue;
547 			if (!dev->name)
548 				continue;
549 
550 			/*
551 			 * Todo: This won't be enough. What if the same device
552 			 * comes back (with new uuid and) with its mapper path?
553 			 * But for now, this does help as mostly an admin will
554 			 * either use mapper or non mapper path throughout.
555 			 */
556 			rcu_read_lock();
557 			del = strcmp(rcu_str_deref(dev->name),
558 						rcu_str_deref(cur_dev->name));
559 			rcu_read_unlock();
560 			if (!del)
561 				break;
562 		}
563 
564 		if (!del) {
565 			/* delete the stale device */
566 			if (fs_devs->num_devices == 1) {
567 				btrfs_sysfs_remove_fsid(fs_devs);
568 				list_del(&fs_devs->list);
569 				free_fs_devices(fs_devs);
570 			} else {
571 				fs_devs->num_devices--;
572 				list_del(&dev->dev_list);
573 				rcu_string_free(dev->name);
574 				kfree(dev);
575 			}
576 			break;
577 		}
578 	}
579 }
580 
581 /*
582  * Add new device to list of registered devices
583  *
584  * Returns:
585  * 1   - first time device is seen
586  * 0   - device already known
587  * < 0 - error
588  */
589 static noinline int device_list_add(const char *path,
590 			   struct btrfs_super_block *disk_super,
591 			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
592 {
593 	struct btrfs_device *device;
594 	struct btrfs_fs_devices *fs_devices;
595 	struct rcu_string *name;
596 	int ret = 0;
597 	u64 found_transid = btrfs_super_generation(disk_super);
598 
599 	fs_devices = find_fsid(disk_super->fsid);
600 	if (!fs_devices) {
601 		fs_devices = alloc_fs_devices(disk_super->fsid);
602 		if (IS_ERR(fs_devices))
603 			return PTR_ERR(fs_devices);
604 
605 		list_add(&fs_devices->list, &fs_uuids);
606 
607 		device = NULL;
608 	} else {
609 		device = __find_device(&fs_devices->devices, devid,
610 				       disk_super->dev_item.uuid);
611 	}
612 
613 	if (!device) {
614 		if (fs_devices->opened)
615 			return -EBUSY;
616 
617 		device = btrfs_alloc_device(NULL, &devid,
618 					    disk_super->dev_item.uuid);
619 		if (IS_ERR(device)) {
620 			/* we can safely leave the fs_devices entry around */
621 			return PTR_ERR(device);
622 		}
623 
624 		name = rcu_string_strdup(path, GFP_NOFS);
625 		if (!name) {
626 			kfree(device);
627 			return -ENOMEM;
628 		}
629 		rcu_assign_pointer(device->name, name);
630 
631 		mutex_lock(&fs_devices->device_list_mutex);
632 		list_add_rcu(&device->dev_list, &fs_devices->devices);
633 		fs_devices->num_devices++;
634 		mutex_unlock(&fs_devices->device_list_mutex);
635 
636 		ret = 1;
637 		device->fs_devices = fs_devices;
638 	} else if (!device->name || strcmp(device->name->str, path)) {
639 		/*
640 		 * When FS is already mounted.
641 		 * 1. If you are here and if the device->name is NULL that
642 		 *    means this device was missing at time of FS mount.
643 		 * 2. If you are here and if the device->name is different
644 		 *    from 'path' that means either
645 		 *      a. The same device disappeared and reappeared with
646 		 *         different name. or
647 		 *      b. The missing-disk-which-was-replaced, has
648 		 *         reappeared now.
649 		 *
650 		 * We must allow 1 and 2a above. But 2b would be a spurious
651 		 * and unintentional.
652 		 *
653 		 * Further in case of 1 and 2a above, the disk at 'path'
654 		 * would have missed some transaction when it was away and
655 		 * in case of 2a the stale bdev has to be updated as well.
656 		 * 2b must not be allowed at all time.
657 		 */
658 
659 		/*
660 		 * For now, we do allow update to btrfs_fs_device through the
661 		 * btrfs dev scan cli after FS has been mounted.  We're still
662 		 * tracking a problem where systems fail mount by subvolume id
663 		 * when we reject replacement on a mounted FS.
664 		 */
665 		if (!fs_devices->opened && found_transid < device->generation) {
666 			/*
667 			 * That is if the FS is _not_ mounted and if you
668 			 * are here, that means there is more than one
669 			 * disk with same uuid and devid.We keep the one
670 			 * with larger generation number or the last-in if
671 			 * generation are equal.
672 			 */
673 			return -EEXIST;
674 		}
675 
676 		name = rcu_string_strdup(path, GFP_NOFS);
677 		if (!name)
678 			return -ENOMEM;
679 		rcu_string_free(device->name);
680 		rcu_assign_pointer(device->name, name);
681 		if (device->missing) {
682 			fs_devices->missing_devices--;
683 			device->missing = 0;
684 		}
685 	}
686 
687 	/*
688 	 * Unmount does not free the btrfs_device struct but would zero
689 	 * generation along with most of the other members. So just update
690 	 * it back. We need it to pick the disk with largest generation
691 	 * (as above).
692 	 */
693 	if (!fs_devices->opened)
694 		device->generation = found_transid;
695 
696 	/*
697 	 * if there is new btrfs on an already registered device,
698 	 * then remove the stale device entry.
699 	 */
700 	btrfs_free_stale_device(device);
701 
702 	*fs_devices_ret = fs_devices;
703 
704 	return ret;
705 }
706 
707 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
708 {
709 	struct btrfs_fs_devices *fs_devices;
710 	struct btrfs_device *device;
711 	struct btrfs_device *orig_dev;
712 
713 	fs_devices = alloc_fs_devices(orig->fsid);
714 	if (IS_ERR(fs_devices))
715 		return fs_devices;
716 
717 	mutex_lock(&orig->device_list_mutex);
718 	fs_devices->total_devices = orig->total_devices;
719 
720 	/* We have held the volume lock, it is safe to get the devices. */
721 	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
722 		struct rcu_string *name;
723 
724 		device = btrfs_alloc_device(NULL, &orig_dev->devid,
725 					    orig_dev->uuid);
726 		if (IS_ERR(device))
727 			goto error;
728 
729 		/*
730 		 * This is ok to do without rcu read locked because we hold the
731 		 * uuid mutex so nothing we touch in here is going to disappear.
732 		 */
733 		if (orig_dev->name) {
734 			name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
735 			if (!name) {
736 				kfree(device);
737 				goto error;
738 			}
739 			rcu_assign_pointer(device->name, name);
740 		}
741 
742 		list_add(&device->dev_list, &fs_devices->devices);
743 		device->fs_devices = fs_devices;
744 		fs_devices->num_devices++;
745 	}
746 	mutex_unlock(&orig->device_list_mutex);
747 	return fs_devices;
748 error:
749 	mutex_unlock(&orig->device_list_mutex);
750 	free_fs_devices(fs_devices);
751 	return ERR_PTR(-ENOMEM);
752 }
753 
754 void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step)
755 {
756 	struct btrfs_device *device, *next;
757 	struct btrfs_device *latest_dev = NULL;
758 
759 	mutex_lock(&uuid_mutex);
760 again:
761 	/* This is the initialized path, it is safe to release the devices. */
762 	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
763 		if (device->in_fs_metadata) {
764 			if (!device->is_tgtdev_for_dev_replace &&
765 			    (!latest_dev ||
766 			     device->generation > latest_dev->generation)) {
767 				latest_dev = device;
768 			}
769 			continue;
770 		}
771 
772 		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
773 			/*
774 			 * In the first step, keep the device which has
775 			 * the correct fsid and the devid that is used
776 			 * for the dev_replace procedure.
777 			 * In the second step, the dev_replace state is
778 			 * read from the device tree and it is known
779 			 * whether the procedure is really active or
780 			 * not, which means whether this device is
781 			 * used or whether it should be removed.
782 			 */
783 			if (step == 0 || device->is_tgtdev_for_dev_replace) {
784 				continue;
785 			}
786 		}
787 		if (device->bdev) {
788 			blkdev_put(device->bdev, device->mode);
789 			device->bdev = NULL;
790 			fs_devices->open_devices--;
791 		}
792 		if (device->writeable) {
793 			list_del_init(&device->dev_alloc_list);
794 			device->writeable = 0;
795 			if (!device->is_tgtdev_for_dev_replace)
796 				fs_devices->rw_devices--;
797 		}
798 		list_del_init(&device->dev_list);
799 		fs_devices->num_devices--;
800 		rcu_string_free(device->name);
801 		kfree(device);
802 	}
803 
804 	if (fs_devices->seed) {
805 		fs_devices = fs_devices->seed;
806 		goto again;
807 	}
808 
809 	fs_devices->latest_bdev = latest_dev->bdev;
810 
811 	mutex_unlock(&uuid_mutex);
812 }
813 
814 static void __free_device(struct work_struct *work)
815 {
816 	struct btrfs_device *device;
817 
818 	device = container_of(work, struct btrfs_device, rcu_work);
819 
820 	if (device->bdev)
821 		blkdev_put(device->bdev, device->mode);
822 
823 	rcu_string_free(device->name);
824 	kfree(device);
825 }
826 
827 static void free_device(struct rcu_head *head)
828 {
829 	struct btrfs_device *device;
830 
831 	device = container_of(head, struct btrfs_device, rcu);
832 
833 	INIT_WORK(&device->rcu_work, __free_device);
834 	schedule_work(&device->rcu_work);
835 }
836 
837 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
838 {
839 	struct btrfs_device *device, *tmp;
840 
841 	if (--fs_devices->opened > 0)
842 		return 0;
843 
844 	mutex_lock(&fs_devices->device_list_mutex);
845 	list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
846 		btrfs_close_one_device(device);
847 	}
848 	mutex_unlock(&fs_devices->device_list_mutex);
849 
850 	WARN_ON(fs_devices->open_devices);
851 	WARN_ON(fs_devices->rw_devices);
852 	fs_devices->opened = 0;
853 	fs_devices->seeding = 0;
854 
855 	return 0;
856 }
857 
858 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
859 {
860 	struct btrfs_fs_devices *seed_devices = NULL;
861 	int ret;
862 
863 	mutex_lock(&uuid_mutex);
864 	ret = __btrfs_close_devices(fs_devices);
865 	if (!fs_devices->opened) {
866 		seed_devices = fs_devices->seed;
867 		fs_devices->seed = NULL;
868 	}
869 	mutex_unlock(&uuid_mutex);
870 
871 	while (seed_devices) {
872 		fs_devices = seed_devices;
873 		seed_devices = fs_devices->seed;
874 		__btrfs_close_devices(fs_devices);
875 		free_fs_devices(fs_devices);
876 	}
877 	/*
878 	 * Wait for rcu kworkers under __btrfs_close_devices
879 	 * to finish all blkdev_puts so device is really
880 	 * free when umount is done.
881 	 */
882 	rcu_barrier();
883 	return ret;
884 }
885 
886 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
887 				fmode_t flags, void *holder)
888 {
889 	struct request_queue *q;
890 	struct block_device *bdev;
891 	struct list_head *head = &fs_devices->devices;
892 	struct btrfs_device *device;
893 	struct btrfs_device *latest_dev = NULL;
894 	struct buffer_head *bh;
895 	struct btrfs_super_block *disk_super;
896 	u64 devid;
897 	int seeding = 1;
898 	int ret = 0;
899 
900 	flags |= FMODE_EXCL;
901 
902 	list_for_each_entry(device, head, dev_list) {
903 		if (device->bdev)
904 			continue;
905 		if (!device->name)
906 			continue;
907 
908 		/* Just open everything we can; ignore failures here */
909 		if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
910 					    &bdev, &bh))
911 			continue;
912 
913 		disk_super = (struct btrfs_super_block *)bh->b_data;
914 		devid = btrfs_stack_device_id(&disk_super->dev_item);
915 		if (devid != device->devid)
916 			goto error_brelse;
917 
918 		if (memcmp(device->uuid, disk_super->dev_item.uuid,
919 			   BTRFS_UUID_SIZE))
920 			goto error_brelse;
921 
922 		device->generation = btrfs_super_generation(disk_super);
923 		if (!latest_dev ||
924 		    device->generation > latest_dev->generation)
925 			latest_dev = device;
926 
927 		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
928 			device->writeable = 0;
929 		} else {
930 			device->writeable = !bdev_read_only(bdev);
931 			seeding = 0;
932 		}
933 
934 		q = bdev_get_queue(bdev);
935 		if (blk_queue_discard(q))
936 			device->can_discard = 1;
937 
938 		device->bdev = bdev;
939 		device->in_fs_metadata = 0;
940 		device->mode = flags;
941 
942 		if (!blk_queue_nonrot(bdev_get_queue(bdev)))
943 			fs_devices->rotating = 1;
944 
945 		fs_devices->open_devices++;
946 		if (device->writeable &&
947 		    device->devid != BTRFS_DEV_REPLACE_DEVID) {
948 			fs_devices->rw_devices++;
949 			list_add(&device->dev_alloc_list,
950 				 &fs_devices->alloc_list);
951 		}
952 		brelse(bh);
953 		continue;
954 
955 error_brelse:
956 		brelse(bh);
957 		blkdev_put(bdev, flags);
958 		continue;
959 	}
960 	if (fs_devices->open_devices == 0) {
961 		ret = -EINVAL;
962 		goto out;
963 	}
964 	fs_devices->seeding = seeding;
965 	fs_devices->opened = 1;
966 	fs_devices->latest_bdev = latest_dev->bdev;
967 	fs_devices->total_rw_bytes = 0;
968 out:
969 	return ret;
970 }
971 
972 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
973 		       fmode_t flags, void *holder)
974 {
975 	int ret;
976 
977 	mutex_lock(&uuid_mutex);
978 	if (fs_devices->opened) {
979 		fs_devices->opened++;
980 		ret = 0;
981 	} else {
982 		ret = __btrfs_open_devices(fs_devices, flags, holder);
983 	}
984 	mutex_unlock(&uuid_mutex);
985 	return ret;
986 }
987 
988 /*
989  * Look for a btrfs signature on a device. This may be called out of the mount path
990  * and we are not allowed to call set_blocksize during the scan. The superblock
991  * is read via pagecache
992  */
993 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
994 			  struct btrfs_fs_devices **fs_devices_ret)
995 {
996 	struct btrfs_super_block *disk_super;
997 	struct block_device *bdev;
998 	struct page *page;
999 	void *p;
1000 	int ret = -EINVAL;
1001 	u64 devid;
1002 	u64 transid;
1003 	u64 total_devices;
1004 	u64 bytenr;
1005 	pgoff_t index;
1006 
1007 	/*
1008 	 * we would like to check all the supers, but that would make
1009 	 * a btrfs mount succeed after a mkfs from a different FS.
1010 	 * So, we need to add a special mount option to scan for
1011 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1012 	 */
1013 	bytenr = btrfs_sb_offset(0);
1014 	flags |= FMODE_EXCL;
1015 	mutex_lock(&uuid_mutex);
1016 
1017 	bdev = blkdev_get_by_path(path, flags, holder);
1018 
1019 	if (IS_ERR(bdev)) {
1020 		ret = PTR_ERR(bdev);
1021 		goto error;
1022 	}
1023 
1024 	/* make sure our super fits in the device */
1025 	if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
1026 		goto error_bdev_put;
1027 
1028 	/* make sure our super fits in the page */
1029 	if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
1030 		goto error_bdev_put;
1031 
1032 	/* make sure our super doesn't straddle pages on disk */
1033 	index = bytenr >> PAGE_CACHE_SHIFT;
1034 	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
1035 		goto error_bdev_put;
1036 
1037 	/* pull in the page with our super */
1038 	page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
1039 				   index, GFP_NOFS);
1040 
1041 	if (IS_ERR_OR_NULL(page))
1042 		goto error_bdev_put;
1043 
1044 	p = kmap(page);
1045 
1046 	/* align our pointer to the offset of the super block */
1047 	disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
1048 
1049 	if (btrfs_super_bytenr(disk_super) != bytenr ||
1050 	    btrfs_super_magic(disk_super) != BTRFS_MAGIC)
1051 		goto error_unmap;
1052 
1053 	devid = btrfs_stack_device_id(&disk_super->dev_item);
1054 	transid = btrfs_super_generation(disk_super);
1055 	total_devices = btrfs_super_num_devices(disk_super);
1056 
1057 	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
1058 	if (ret > 0) {
1059 		if (disk_super->label[0]) {
1060 			if (disk_super->label[BTRFS_LABEL_SIZE - 1])
1061 				disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
1062 			printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
1063 		} else {
1064 			printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
1065 		}
1066 
1067 		printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
1068 		ret = 0;
1069 	}
1070 	if (!ret && fs_devices_ret)
1071 		(*fs_devices_ret)->total_devices = total_devices;
1072 
1073 error_unmap:
1074 	kunmap(page);
1075 	page_cache_release(page);
1076 
1077 error_bdev_put:
1078 	blkdev_put(bdev, flags);
1079 error:
1080 	mutex_unlock(&uuid_mutex);
1081 	return ret;
1082 }
1083 
1084 /* helper to account the used device space in the range */
1085 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
1086 				   u64 end, u64 *length)
1087 {
1088 	struct btrfs_key key;
1089 	struct btrfs_root *root = device->dev_root;
1090 	struct btrfs_dev_extent *dev_extent;
1091 	struct btrfs_path *path;
1092 	u64 extent_end;
1093 	int ret;
1094 	int slot;
1095 	struct extent_buffer *l;
1096 
1097 	*length = 0;
1098 
1099 	if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
1100 		return 0;
1101 
1102 	path = btrfs_alloc_path();
1103 	if (!path)
1104 		return -ENOMEM;
1105 	path->reada = 2;
1106 
1107 	key.objectid = device->devid;
1108 	key.offset = start;
1109 	key.type = BTRFS_DEV_EXTENT_KEY;
1110 
1111 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1112 	if (ret < 0)
1113 		goto out;
1114 	if (ret > 0) {
1115 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1116 		if (ret < 0)
1117 			goto out;
1118 	}
1119 
1120 	while (1) {
1121 		l = path->nodes[0];
1122 		slot = path->slots[0];
1123 		if (slot >= btrfs_header_nritems(l)) {
1124 			ret = btrfs_next_leaf(root, path);
1125 			if (ret == 0)
1126 				continue;
1127 			if (ret < 0)
1128 				goto out;
1129 
1130 			break;
1131 		}
1132 		btrfs_item_key_to_cpu(l, &key, slot);
1133 
1134 		if (key.objectid < device->devid)
1135 			goto next;
1136 
1137 		if (key.objectid > device->devid)
1138 			break;
1139 
1140 		if (key.type != BTRFS_DEV_EXTENT_KEY)
1141 			goto next;
1142 
1143 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1144 		extent_end = key.offset + btrfs_dev_extent_length(l,
1145 								  dev_extent);
1146 		if (key.offset <= start && extent_end > end) {
1147 			*length = end - start + 1;
1148 			break;
1149 		} else if (key.offset <= start && extent_end > start)
1150 			*length += extent_end - start;
1151 		else if (key.offset > start && extent_end <= end)
1152 			*length += extent_end - key.offset;
1153 		else if (key.offset > start && key.offset <= end) {
1154 			*length += end - key.offset + 1;
1155 			break;
1156 		} else if (key.offset > end)
1157 			break;
1158 
1159 next:
1160 		path->slots[0]++;
1161 	}
1162 	ret = 0;
1163 out:
1164 	btrfs_free_path(path);
1165 	return ret;
1166 }
1167 
1168 static int contains_pending_extent(struct btrfs_transaction *transaction,
1169 				   struct btrfs_device *device,
1170 				   u64 *start, u64 len)
1171 {
1172 	struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
1173 	struct extent_map *em;
1174 	struct list_head *search_list = &fs_info->pinned_chunks;
1175 	int ret = 0;
1176 	u64 physical_start = *start;
1177 
1178 	if (transaction)
1179 		search_list = &transaction->pending_chunks;
1180 again:
1181 	list_for_each_entry(em, search_list, list) {
1182 		struct map_lookup *map;
1183 		int i;
1184 
1185 		map = (struct map_lookup *)em->bdev;
1186 		for (i = 0; i < map->num_stripes; i++) {
1187 			u64 end;
1188 
1189 			if (map->stripes[i].dev != device)
1190 				continue;
1191 			if (map->stripes[i].physical >= physical_start + len ||
1192 			    map->stripes[i].physical + em->orig_block_len <=
1193 			    physical_start)
1194 				continue;
1195 			/*
1196 			 * Make sure that while processing the pinned list we do
1197 			 * not override our *start with a lower value, because
1198 			 * we can have pinned chunks that fall within this
1199 			 * device hole and that have lower physical addresses
1200 			 * than the pending chunks we processed before. If we
1201 			 * do not take this special care we can end up getting
1202 			 * 2 pending chunks that start at the same physical
1203 			 * device offsets because the end offset of a pinned
1204 			 * chunk can be equal to the start offset of some
1205 			 * pending chunk.
1206 			 */
1207 			end = map->stripes[i].physical + em->orig_block_len;
1208 			if (end > *start) {
1209 				*start = end;
1210 				ret = 1;
1211 			}
1212 		}
1213 	}
1214 	if (search_list != &fs_info->pinned_chunks) {
1215 		search_list = &fs_info->pinned_chunks;
1216 		goto again;
1217 	}
1218 
1219 	return ret;
1220 }
1221 
1222 
1223 /*
1224  * find_free_dev_extent_start - find free space in the specified device
1225  * @device:	  the device which we search the free space in
1226  * @num_bytes:	  the size of the free space that we need
1227  * @search_start: the position from which to begin the search
1228  * @start:	  store the start of the free space.
1229  * @len:	  the size of the free space. that we find, or the size
1230  *		  of the max free space if we don't find suitable free space
1231  *
1232  * this uses a pretty simple search, the expectation is that it is
1233  * called very infrequently and that a given device has a small number
1234  * of extents
1235  *
1236  * @start is used to store the start of the free space if we find. But if we
1237  * don't find suitable free space, it will be used to store the start position
1238  * of the max free space.
1239  *
1240  * @len is used to store the size of the free space that we find.
1241  * But if we don't find suitable free space, it is used to store the size of
1242  * the max free space.
1243  */
1244 int find_free_dev_extent_start(struct btrfs_transaction *transaction,
1245 			       struct btrfs_device *device, u64 num_bytes,
1246 			       u64 search_start, u64 *start, u64 *len)
1247 {
1248 	struct btrfs_key key;
1249 	struct btrfs_root *root = device->dev_root;
1250 	struct btrfs_dev_extent *dev_extent;
1251 	struct btrfs_path *path;
1252 	u64 hole_size;
1253 	u64 max_hole_start;
1254 	u64 max_hole_size;
1255 	u64 extent_end;
1256 	u64 search_end = device->total_bytes;
1257 	int ret;
1258 	int slot;
1259 	struct extent_buffer *l;
1260 
1261 	path = btrfs_alloc_path();
1262 	if (!path)
1263 		return -ENOMEM;
1264 
1265 	max_hole_start = search_start;
1266 	max_hole_size = 0;
1267 
1268 again:
1269 	if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1270 		ret = -ENOSPC;
1271 		goto out;
1272 	}
1273 
1274 	path->reada = 2;
1275 	path->search_commit_root = 1;
1276 	path->skip_locking = 1;
1277 
1278 	key.objectid = device->devid;
1279 	key.offset = search_start;
1280 	key.type = BTRFS_DEV_EXTENT_KEY;
1281 
1282 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1283 	if (ret < 0)
1284 		goto out;
1285 	if (ret > 0) {
1286 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1287 		if (ret < 0)
1288 			goto out;
1289 	}
1290 
1291 	while (1) {
1292 		l = path->nodes[0];
1293 		slot = path->slots[0];
1294 		if (slot >= btrfs_header_nritems(l)) {
1295 			ret = btrfs_next_leaf(root, path);
1296 			if (ret == 0)
1297 				continue;
1298 			if (ret < 0)
1299 				goto out;
1300 
1301 			break;
1302 		}
1303 		btrfs_item_key_to_cpu(l, &key, slot);
1304 
1305 		if (key.objectid < device->devid)
1306 			goto next;
1307 
1308 		if (key.objectid > device->devid)
1309 			break;
1310 
1311 		if (key.type != BTRFS_DEV_EXTENT_KEY)
1312 			goto next;
1313 
1314 		if (key.offset > search_start) {
1315 			hole_size = key.offset - search_start;
1316 
1317 			/*
1318 			 * Have to check before we set max_hole_start, otherwise
1319 			 * we could end up sending back this offset anyway.
1320 			 */
1321 			if (contains_pending_extent(transaction, device,
1322 						    &search_start,
1323 						    hole_size)) {
1324 				if (key.offset >= search_start) {
1325 					hole_size = key.offset - search_start;
1326 				} else {
1327 					WARN_ON_ONCE(1);
1328 					hole_size = 0;
1329 				}
1330 			}
1331 
1332 			if (hole_size > max_hole_size) {
1333 				max_hole_start = search_start;
1334 				max_hole_size = hole_size;
1335 			}
1336 
1337 			/*
1338 			 * If this free space is greater than which we need,
1339 			 * it must be the max free space that we have found
1340 			 * until now, so max_hole_start must point to the start
1341 			 * of this free space and the length of this free space
1342 			 * is stored in max_hole_size. Thus, we return
1343 			 * max_hole_start and max_hole_size and go back to the
1344 			 * caller.
1345 			 */
1346 			if (hole_size >= num_bytes) {
1347 				ret = 0;
1348 				goto out;
1349 			}
1350 		}
1351 
1352 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1353 		extent_end = key.offset + btrfs_dev_extent_length(l,
1354 								  dev_extent);
1355 		if (extent_end > search_start)
1356 			search_start = extent_end;
1357 next:
1358 		path->slots[0]++;
1359 		cond_resched();
1360 	}
1361 
1362 	/*
1363 	 * At this point, search_start should be the end of
1364 	 * allocated dev extents, and when shrinking the device,
1365 	 * search_end may be smaller than search_start.
1366 	 */
1367 	if (search_end > search_start) {
1368 		hole_size = search_end - search_start;
1369 
1370 		if (contains_pending_extent(transaction, device, &search_start,
1371 					    hole_size)) {
1372 			btrfs_release_path(path);
1373 			goto again;
1374 		}
1375 
1376 		if (hole_size > max_hole_size) {
1377 			max_hole_start = search_start;
1378 			max_hole_size = hole_size;
1379 		}
1380 	}
1381 
1382 	/* See above. */
1383 	if (max_hole_size < num_bytes)
1384 		ret = -ENOSPC;
1385 	else
1386 		ret = 0;
1387 
1388 out:
1389 	btrfs_free_path(path);
1390 	*start = max_hole_start;
1391 	if (len)
1392 		*len = max_hole_size;
1393 	return ret;
1394 }
1395 
1396 int find_free_dev_extent(struct btrfs_trans_handle *trans,
1397 			 struct btrfs_device *device, u64 num_bytes,
1398 			 u64 *start, u64 *len)
1399 {
1400 	struct btrfs_root *root = device->dev_root;
1401 	u64 search_start;
1402 
1403 	/* FIXME use last free of some kind */
1404 
1405 	/*
1406 	 * we don't want to overwrite the superblock on the drive,
1407 	 * so we make sure to start at an offset of at least 1MB
1408 	 */
1409 	search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1410 	return find_free_dev_extent_start(trans->transaction, device,
1411 					  num_bytes, search_start, start, len);
1412 }
1413 
1414 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1415 			  struct btrfs_device *device,
1416 			  u64 start, u64 *dev_extent_len)
1417 {
1418 	int ret;
1419 	struct btrfs_path *path;
1420 	struct btrfs_root *root = device->dev_root;
1421 	struct btrfs_key key;
1422 	struct btrfs_key found_key;
1423 	struct extent_buffer *leaf = NULL;
1424 	struct btrfs_dev_extent *extent = NULL;
1425 
1426 	path = btrfs_alloc_path();
1427 	if (!path)
1428 		return -ENOMEM;
1429 
1430 	key.objectid = device->devid;
1431 	key.offset = start;
1432 	key.type = BTRFS_DEV_EXTENT_KEY;
1433 again:
1434 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1435 	if (ret > 0) {
1436 		ret = btrfs_previous_item(root, path, key.objectid,
1437 					  BTRFS_DEV_EXTENT_KEY);
1438 		if (ret)
1439 			goto out;
1440 		leaf = path->nodes[0];
1441 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1442 		extent = btrfs_item_ptr(leaf, path->slots[0],
1443 					struct btrfs_dev_extent);
1444 		BUG_ON(found_key.offset > start || found_key.offset +
1445 		       btrfs_dev_extent_length(leaf, extent) < start);
1446 		key = found_key;
1447 		btrfs_release_path(path);
1448 		goto again;
1449 	} else if (ret == 0) {
1450 		leaf = path->nodes[0];
1451 		extent = btrfs_item_ptr(leaf, path->slots[0],
1452 					struct btrfs_dev_extent);
1453 	} else {
1454 		btrfs_std_error(root->fs_info, ret, "Slot search failed");
1455 		goto out;
1456 	}
1457 
1458 	*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1459 
1460 	ret = btrfs_del_item(trans, root, path);
1461 	if (ret) {
1462 		btrfs_std_error(root->fs_info, ret,
1463 			    "Failed to remove dev extent item");
1464 	} else {
1465 		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1466 	}
1467 out:
1468 	btrfs_free_path(path);
1469 	return ret;
1470 }
1471 
1472 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1473 				  struct btrfs_device *device,
1474 				  u64 chunk_tree, u64 chunk_objectid,
1475 				  u64 chunk_offset, u64 start, u64 num_bytes)
1476 {
1477 	int ret;
1478 	struct btrfs_path *path;
1479 	struct btrfs_root *root = device->dev_root;
1480 	struct btrfs_dev_extent *extent;
1481 	struct extent_buffer *leaf;
1482 	struct btrfs_key key;
1483 
1484 	WARN_ON(!device->in_fs_metadata);
1485 	WARN_ON(device->is_tgtdev_for_dev_replace);
1486 	path = btrfs_alloc_path();
1487 	if (!path)
1488 		return -ENOMEM;
1489 
1490 	key.objectid = device->devid;
1491 	key.offset = start;
1492 	key.type = BTRFS_DEV_EXTENT_KEY;
1493 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1494 				      sizeof(*extent));
1495 	if (ret)
1496 		goto out;
1497 
1498 	leaf = path->nodes[0];
1499 	extent = btrfs_item_ptr(leaf, path->slots[0],
1500 				struct btrfs_dev_extent);
1501 	btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1502 	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1503 	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1504 
1505 	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1506 		    btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE);
1507 
1508 	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1509 	btrfs_mark_buffer_dirty(leaf);
1510 out:
1511 	btrfs_free_path(path);
1512 	return ret;
1513 }
1514 
1515 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1516 {
1517 	struct extent_map_tree *em_tree;
1518 	struct extent_map *em;
1519 	struct rb_node *n;
1520 	u64 ret = 0;
1521 
1522 	em_tree = &fs_info->mapping_tree.map_tree;
1523 	read_lock(&em_tree->lock);
1524 	n = rb_last(&em_tree->map);
1525 	if (n) {
1526 		em = rb_entry(n, struct extent_map, rb_node);
1527 		ret = em->start + em->len;
1528 	}
1529 	read_unlock(&em_tree->lock);
1530 
1531 	return ret;
1532 }
1533 
1534 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1535 				    u64 *devid_ret)
1536 {
1537 	int ret;
1538 	struct btrfs_key key;
1539 	struct btrfs_key found_key;
1540 	struct btrfs_path *path;
1541 
1542 	path = btrfs_alloc_path();
1543 	if (!path)
1544 		return -ENOMEM;
1545 
1546 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1547 	key.type = BTRFS_DEV_ITEM_KEY;
1548 	key.offset = (u64)-1;
1549 
1550 	ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1551 	if (ret < 0)
1552 		goto error;
1553 
1554 	BUG_ON(ret == 0); /* Corruption */
1555 
1556 	ret = btrfs_previous_item(fs_info->chunk_root, path,
1557 				  BTRFS_DEV_ITEMS_OBJECTID,
1558 				  BTRFS_DEV_ITEM_KEY);
1559 	if (ret) {
1560 		*devid_ret = 1;
1561 	} else {
1562 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1563 				      path->slots[0]);
1564 		*devid_ret = found_key.offset + 1;
1565 	}
1566 	ret = 0;
1567 error:
1568 	btrfs_free_path(path);
1569 	return ret;
1570 }
1571 
1572 /*
1573  * the device information is stored in the chunk root
1574  * the btrfs_device struct should be fully filled in
1575  */
1576 static int btrfs_add_device(struct btrfs_trans_handle *trans,
1577 			    struct btrfs_root *root,
1578 			    struct btrfs_device *device)
1579 {
1580 	int ret;
1581 	struct btrfs_path *path;
1582 	struct btrfs_dev_item *dev_item;
1583 	struct extent_buffer *leaf;
1584 	struct btrfs_key key;
1585 	unsigned long ptr;
1586 
1587 	root = root->fs_info->chunk_root;
1588 
1589 	path = btrfs_alloc_path();
1590 	if (!path)
1591 		return -ENOMEM;
1592 
1593 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1594 	key.type = BTRFS_DEV_ITEM_KEY;
1595 	key.offset = device->devid;
1596 
1597 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1598 				      sizeof(*dev_item));
1599 	if (ret)
1600 		goto out;
1601 
1602 	leaf = path->nodes[0];
1603 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1604 
1605 	btrfs_set_device_id(leaf, dev_item, device->devid);
1606 	btrfs_set_device_generation(leaf, dev_item, 0);
1607 	btrfs_set_device_type(leaf, dev_item, device->type);
1608 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1609 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1610 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1611 	btrfs_set_device_total_bytes(leaf, dev_item,
1612 				     btrfs_device_get_disk_total_bytes(device));
1613 	btrfs_set_device_bytes_used(leaf, dev_item,
1614 				    btrfs_device_get_bytes_used(device));
1615 	btrfs_set_device_group(leaf, dev_item, 0);
1616 	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1617 	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1618 	btrfs_set_device_start_offset(leaf, dev_item, 0);
1619 
1620 	ptr = btrfs_device_uuid(dev_item);
1621 	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1622 	ptr = btrfs_device_fsid(dev_item);
1623 	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1624 	btrfs_mark_buffer_dirty(leaf);
1625 
1626 	ret = 0;
1627 out:
1628 	btrfs_free_path(path);
1629 	return ret;
1630 }
1631 
1632 /*
1633  * Function to update ctime/mtime for a given device path.
1634  * Mainly used for ctime/mtime based probe like libblkid.
1635  */
1636 static void update_dev_time(char *path_name)
1637 {
1638 	struct file *filp;
1639 
1640 	filp = filp_open(path_name, O_RDWR, 0);
1641 	if (IS_ERR(filp))
1642 		return;
1643 	file_update_time(filp);
1644 	filp_close(filp, NULL);
1645 	return;
1646 }
1647 
1648 static int btrfs_rm_dev_item(struct btrfs_root *root,
1649 			     struct btrfs_device *device)
1650 {
1651 	int ret;
1652 	struct btrfs_path *path;
1653 	struct btrfs_key key;
1654 	struct btrfs_trans_handle *trans;
1655 
1656 	root = root->fs_info->chunk_root;
1657 
1658 	path = btrfs_alloc_path();
1659 	if (!path)
1660 		return -ENOMEM;
1661 
1662 	trans = btrfs_start_transaction(root, 0);
1663 	if (IS_ERR(trans)) {
1664 		btrfs_free_path(path);
1665 		return PTR_ERR(trans);
1666 	}
1667 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1668 	key.type = BTRFS_DEV_ITEM_KEY;
1669 	key.offset = device->devid;
1670 
1671 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1672 	if (ret < 0)
1673 		goto out;
1674 
1675 	if (ret > 0) {
1676 		ret = -ENOENT;
1677 		goto out;
1678 	}
1679 
1680 	ret = btrfs_del_item(trans, root, path);
1681 	if (ret)
1682 		goto out;
1683 out:
1684 	btrfs_free_path(path);
1685 	btrfs_commit_transaction(trans, root);
1686 	return ret;
1687 }
1688 
1689 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1690 {
1691 	struct btrfs_device *device;
1692 	struct btrfs_device *next_device;
1693 	struct block_device *bdev;
1694 	struct buffer_head *bh = NULL;
1695 	struct btrfs_super_block *disk_super;
1696 	struct btrfs_fs_devices *cur_devices;
1697 	u64 all_avail;
1698 	u64 devid;
1699 	u64 num_devices;
1700 	u8 *dev_uuid;
1701 	unsigned seq;
1702 	int ret = 0;
1703 	bool clear_super = false;
1704 
1705 	mutex_lock(&uuid_mutex);
1706 
1707 	do {
1708 		seq = read_seqbegin(&root->fs_info->profiles_lock);
1709 
1710 		all_avail = root->fs_info->avail_data_alloc_bits |
1711 			    root->fs_info->avail_system_alloc_bits |
1712 			    root->fs_info->avail_metadata_alloc_bits;
1713 	} while (read_seqretry(&root->fs_info->profiles_lock, seq));
1714 
1715 	num_devices = root->fs_info->fs_devices->num_devices;
1716 	btrfs_dev_replace_lock(&root->fs_info->dev_replace);
1717 	if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1718 		WARN_ON(num_devices < 1);
1719 		num_devices--;
1720 	}
1721 	btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
1722 
1723 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1724 		ret = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET;
1725 		goto out;
1726 	}
1727 
1728 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1729 		ret = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET;
1730 		goto out;
1731 	}
1732 
1733 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
1734 	    root->fs_info->fs_devices->rw_devices <= 2) {
1735 		ret = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET;
1736 		goto out;
1737 	}
1738 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
1739 	    root->fs_info->fs_devices->rw_devices <= 3) {
1740 		ret = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET;
1741 		goto out;
1742 	}
1743 
1744 	if (strcmp(device_path, "missing") == 0) {
1745 		struct list_head *devices;
1746 		struct btrfs_device *tmp;
1747 
1748 		device = NULL;
1749 		devices = &root->fs_info->fs_devices->devices;
1750 		/*
1751 		 * It is safe to read the devices since the volume_mutex
1752 		 * is held.
1753 		 */
1754 		list_for_each_entry(tmp, devices, dev_list) {
1755 			if (tmp->in_fs_metadata &&
1756 			    !tmp->is_tgtdev_for_dev_replace &&
1757 			    !tmp->bdev) {
1758 				device = tmp;
1759 				break;
1760 			}
1761 		}
1762 		bdev = NULL;
1763 		bh = NULL;
1764 		disk_super = NULL;
1765 		if (!device) {
1766 			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
1767 			goto out;
1768 		}
1769 	} else {
1770 		ret = btrfs_get_bdev_and_sb(device_path,
1771 					    FMODE_WRITE | FMODE_EXCL,
1772 					    root->fs_info->bdev_holder, 0,
1773 					    &bdev, &bh);
1774 		if (ret)
1775 			goto out;
1776 		disk_super = (struct btrfs_super_block *)bh->b_data;
1777 		devid = btrfs_stack_device_id(&disk_super->dev_item);
1778 		dev_uuid = disk_super->dev_item.uuid;
1779 		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1780 					   disk_super->fsid);
1781 		if (!device) {
1782 			ret = -ENOENT;
1783 			goto error_brelse;
1784 		}
1785 	}
1786 
1787 	if (device->is_tgtdev_for_dev_replace) {
1788 		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1789 		goto error_brelse;
1790 	}
1791 
1792 	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1793 		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1794 		goto error_brelse;
1795 	}
1796 
1797 	if (device->writeable) {
1798 		lock_chunks(root);
1799 		list_del_init(&device->dev_alloc_list);
1800 		device->fs_devices->rw_devices--;
1801 		unlock_chunks(root);
1802 		clear_super = true;
1803 	}
1804 
1805 	mutex_unlock(&uuid_mutex);
1806 	ret = btrfs_shrink_device(device, 0);
1807 	mutex_lock(&uuid_mutex);
1808 	if (ret)
1809 		goto error_undo;
1810 
1811 	/*
1812 	 * TODO: the superblock still includes this device in its num_devices
1813 	 * counter although write_all_supers() is not locked out. This
1814 	 * could give a filesystem state which requires a degraded mount.
1815 	 */
1816 	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1817 	if (ret)
1818 		goto error_undo;
1819 
1820 	device->in_fs_metadata = 0;
1821 	btrfs_scrub_cancel_dev(root->fs_info, device);
1822 
1823 	/*
1824 	 * the device list mutex makes sure that we don't change
1825 	 * the device list while someone else is writing out all
1826 	 * the device supers. Whoever is writing all supers, should
1827 	 * lock the device list mutex before getting the number of
1828 	 * devices in the super block (super_copy). Conversely,
1829 	 * whoever updates the number of devices in the super block
1830 	 * (super_copy) should hold the device list mutex.
1831 	 */
1832 
1833 	cur_devices = device->fs_devices;
1834 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1835 	list_del_rcu(&device->dev_list);
1836 
1837 	device->fs_devices->num_devices--;
1838 	device->fs_devices->total_devices--;
1839 
1840 	if (device->missing)
1841 		device->fs_devices->missing_devices--;
1842 
1843 	next_device = list_entry(root->fs_info->fs_devices->devices.next,
1844 				 struct btrfs_device, dev_list);
1845 	if (device->bdev == root->fs_info->sb->s_bdev)
1846 		root->fs_info->sb->s_bdev = next_device->bdev;
1847 	if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1848 		root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1849 
1850 	if (device->bdev) {
1851 		device->fs_devices->open_devices--;
1852 		/* remove sysfs entry */
1853 		btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
1854 	}
1855 
1856 	call_rcu(&device->rcu, free_device);
1857 
1858 	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1859 	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1860 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1861 
1862 	if (cur_devices->open_devices == 0) {
1863 		struct btrfs_fs_devices *fs_devices;
1864 		fs_devices = root->fs_info->fs_devices;
1865 		while (fs_devices) {
1866 			if (fs_devices->seed == cur_devices) {
1867 				fs_devices->seed = cur_devices->seed;
1868 				break;
1869 			}
1870 			fs_devices = fs_devices->seed;
1871 		}
1872 		cur_devices->seed = NULL;
1873 		__btrfs_close_devices(cur_devices);
1874 		free_fs_devices(cur_devices);
1875 	}
1876 
1877 	root->fs_info->num_tolerated_disk_barrier_failures =
1878 		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1879 
1880 	/*
1881 	 * at this point, the device is zero sized.  We want to
1882 	 * remove it from the devices list and zero out the old super
1883 	 */
1884 	if (clear_super && disk_super) {
1885 		u64 bytenr;
1886 		int i;
1887 
1888 		/* make sure this device isn't detected as part of
1889 		 * the FS anymore
1890 		 */
1891 		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1892 		set_buffer_dirty(bh);
1893 		sync_dirty_buffer(bh);
1894 
1895 		/* clear the mirror copies of super block on the disk
1896 		 * being removed, 0th copy is been taken care above and
1897 		 * the below would take of the rest
1898 		 */
1899 		for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1900 			bytenr = btrfs_sb_offset(i);
1901 			if (bytenr + BTRFS_SUPER_INFO_SIZE >=
1902 					i_size_read(bdev->bd_inode))
1903 				break;
1904 
1905 			brelse(bh);
1906 			bh = __bread(bdev, bytenr / 4096,
1907 					BTRFS_SUPER_INFO_SIZE);
1908 			if (!bh)
1909 				continue;
1910 
1911 			disk_super = (struct btrfs_super_block *)bh->b_data;
1912 
1913 			if (btrfs_super_bytenr(disk_super) != bytenr ||
1914 				btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1915 				continue;
1916 			}
1917 			memset(&disk_super->magic, 0,
1918 						sizeof(disk_super->magic));
1919 			set_buffer_dirty(bh);
1920 			sync_dirty_buffer(bh);
1921 		}
1922 	}
1923 
1924 	ret = 0;
1925 
1926 	if (bdev) {
1927 		/* Notify udev that device has changed */
1928 		btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1929 
1930 		/* Update ctime/mtime for device path for libblkid */
1931 		update_dev_time(device_path);
1932 	}
1933 
1934 error_brelse:
1935 	brelse(bh);
1936 	if (bdev)
1937 		blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1938 out:
1939 	mutex_unlock(&uuid_mutex);
1940 	return ret;
1941 error_undo:
1942 	if (device->writeable) {
1943 		lock_chunks(root);
1944 		list_add(&device->dev_alloc_list,
1945 			 &root->fs_info->fs_devices->alloc_list);
1946 		device->fs_devices->rw_devices++;
1947 		unlock_chunks(root);
1948 	}
1949 	goto error_brelse;
1950 }
1951 
1952 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
1953 					struct btrfs_device *srcdev)
1954 {
1955 	struct btrfs_fs_devices *fs_devices;
1956 
1957 	WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1958 
1959 	/*
1960 	 * in case of fs with no seed, srcdev->fs_devices will point
1961 	 * to fs_devices of fs_info. However when the dev being replaced is
1962 	 * a seed dev it will point to the seed's local fs_devices. In short
1963 	 * srcdev will have its correct fs_devices in both the cases.
1964 	 */
1965 	fs_devices = srcdev->fs_devices;
1966 
1967 	list_del_rcu(&srcdev->dev_list);
1968 	list_del_rcu(&srcdev->dev_alloc_list);
1969 	fs_devices->num_devices--;
1970 	if (srcdev->missing)
1971 		fs_devices->missing_devices--;
1972 
1973 	if (srcdev->writeable) {
1974 		fs_devices->rw_devices--;
1975 		/* zero out the old super if it is writable */
1976 		btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
1977 	}
1978 
1979 	if (srcdev->bdev)
1980 		fs_devices->open_devices--;
1981 }
1982 
1983 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
1984 				      struct btrfs_device *srcdev)
1985 {
1986 	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
1987 
1988 	call_rcu(&srcdev->rcu, free_device);
1989 
1990 	/*
1991 	 * unless fs_devices is seed fs, num_devices shouldn't go
1992 	 * zero
1993 	 */
1994 	BUG_ON(!fs_devices->num_devices && !fs_devices->seeding);
1995 
1996 	/* if this is no devs we rather delete the fs_devices */
1997 	if (!fs_devices->num_devices) {
1998 		struct btrfs_fs_devices *tmp_fs_devices;
1999 
2000 		tmp_fs_devices = fs_info->fs_devices;
2001 		while (tmp_fs_devices) {
2002 			if (tmp_fs_devices->seed == fs_devices) {
2003 				tmp_fs_devices->seed = fs_devices->seed;
2004 				break;
2005 			}
2006 			tmp_fs_devices = tmp_fs_devices->seed;
2007 		}
2008 		fs_devices->seed = NULL;
2009 		__btrfs_close_devices(fs_devices);
2010 		free_fs_devices(fs_devices);
2011 	}
2012 }
2013 
2014 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
2015 				      struct btrfs_device *tgtdev)
2016 {
2017 	struct btrfs_device *next_device;
2018 
2019 	mutex_lock(&uuid_mutex);
2020 	WARN_ON(!tgtdev);
2021 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2022 
2023 	btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev);
2024 
2025 	if (tgtdev->bdev) {
2026 		btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
2027 		fs_info->fs_devices->open_devices--;
2028 	}
2029 	fs_info->fs_devices->num_devices--;
2030 
2031 	next_device = list_entry(fs_info->fs_devices->devices.next,
2032 				 struct btrfs_device, dev_list);
2033 	if (tgtdev->bdev == fs_info->sb->s_bdev)
2034 		fs_info->sb->s_bdev = next_device->bdev;
2035 	if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
2036 		fs_info->fs_devices->latest_bdev = next_device->bdev;
2037 	list_del_rcu(&tgtdev->dev_list);
2038 
2039 	call_rcu(&tgtdev->rcu, free_device);
2040 
2041 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2042 	mutex_unlock(&uuid_mutex);
2043 }
2044 
2045 static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
2046 				     struct btrfs_device **device)
2047 {
2048 	int ret = 0;
2049 	struct btrfs_super_block *disk_super;
2050 	u64 devid;
2051 	u8 *dev_uuid;
2052 	struct block_device *bdev;
2053 	struct buffer_head *bh;
2054 
2055 	*device = NULL;
2056 	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2057 				    root->fs_info->bdev_holder, 0, &bdev, &bh);
2058 	if (ret)
2059 		return ret;
2060 	disk_super = (struct btrfs_super_block *)bh->b_data;
2061 	devid = btrfs_stack_device_id(&disk_super->dev_item);
2062 	dev_uuid = disk_super->dev_item.uuid;
2063 	*device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2064 				    disk_super->fsid);
2065 	brelse(bh);
2066 	if (!*device)
2067 		ret = -ENOENT;
2068 	blkdev_put(bdev, FMODE_READ);
2069 	return ret;
2070 }
2071 
2072 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
2073 					 char *device_path,
2074 					 struct btrfs_device **device)
2075 {
2076 	*device = NULL;
2077 	if (strcmp(device_path, "missing") == 0) {
2078 		struct list_head *devices;
2079 		struct btrfs_device *tmp;
2080 
2081 		devices = &root->fs_info->fs_devices->devices;
2082 		/*
2083 		 * It is safe to read the devices since the volume_mutex
2084 		 * is held by the caller.
2085 		 */
2086 		list_for_each_entry(tmp, devices, dev_list) {
2087 			if (tmp->in_fs_metadata && !tmp->bdev) {
2088 				*device = tmp;
2089 				break;
2090 			}
2091 		}
2092 
2093 		if (!*device)
2094 			return BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2095 
2096 		return 0;
2097 	} else {
2098 		return btrfs_find_device_by_path(root, device_path, device);
2099 	}
2100 }
2101 
2102 /*
2103  * does all the dirty work required for changing file system's UUID.
2104  */
2105 static int btrfs_prepare_sprout(struct btrfs_root *root)
2106 {
2107 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2108 	struct btrfs_fs_devices *old_devices;
2109 	struct btrfs_fs_devices *seed_devices;
2110 	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
2111 	struct btrfs_device *device;
2112 	u64 super_flags;
2113 
2114 	BUG_ON(!mutex_is_locked(&uuid_mutex));
2115 	if (!fs_devices->seeding)
2116 		return -EINVAL;
2117 
2118 	seed_devices = __alloc_fs_devices();
2119 	if (IS_ERR(seed_devices))
2120 		return PTR_ERR(seed_devices);
2121 
2122 	old_devices = clone_fs_devices(fs_devices);
2123 	if (IS_ERR(old_devices)) {
2124 		kfree(seed_devices);
2125 		return PTR_ERR(old_devices);
2126 	}
2127 
2128 	list_add(&old_devices->list, &fs_uuids);
2129 
2130 	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2131 	seed_devices->opened = 1;
2132 	INIT_LIST_HEAD(&seed_devices->devices);
2133 	INIT_LIST_HEAD(&seed_devices->alloc_list);
2134 	mutex_init(&seed_devices->device_list_mutex);
2135 
2136 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2137 	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2138 			      synchronize_rcu);
2139 	list_for_each_entry(device, &seed_devices->devices, dev_list)
2140 		device->fs_devices = seed_devices;
2141 
2142 	lock_chunks(root);
2143 	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
2144 	unlock_chunks(root);
2145 
2146 	fs_devices->seeding = 0;
2147 	fs_devices->num_devices = 0;
2148 	fs_devices->open_devices = 0;
2149 	fs_devices->missing_devices = 0;
2150 	fs_devices->rotating = 0;
2151 	fs_devices->seed = seed_devices;
2152 
2153 	generate_random_uuid(fs_devices->fsid);
2154 	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2155 	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2156 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2157 
2158 	super_flags = btrfs_super_flags(disk_super) &
2159 		      ~BTRFS_SUPER_FLAG_SEEDING;
2160 	btrfs_set_super_flags(disk_super, super_flags);
2161 
2162 	return 0;
2163 }
2164 
2165 /*
2166  * strore the expected generation for seed devices in device items.
2167  */
2168 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
2169 			       struct btrfs_root *root)
2170 {
2171 	struct btrfs_path *path;
2172 	struct extent_buffer *leaf;
2173 	struct btrfs_dev_item *dev_item;
2174 	struct btrfs_device *device;
2175 	struct btrfs_key key;
2176 	u8 fs_uuid[BTRFS_UUID_SIZE];
2177 	u8 dev_uuid[BTRFS_UUID_SIZE];
2178 	u64 devid;
2179 	int ret;
2180 
2181 	path = btrfs_alloc_path();
2182 	if (!path)
2183 		return -ENOMEM;
2184 
2185 	root = root->fs_info->chunk_root;
2186 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2187 	key.offset = 0;
2188 	key.type = BTRFS_DEV_ITEM_KEY;
2189 
2190 	while (1) {
2191 		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2192 		if (ret < 0)
2193 			goto error;
2194 
2195 		leaf = path->nodes[0];
2196 next_slot:
2197 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2198 			ret = btrfs_next_leaf(root, path);
2199 			if (ret > 0)
2200 				break;
2201 			if (ret < 0)
2202 				goto error;
2203 			leaf = path->nodes[0];
2204 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2205 			btrfs_release_path(path);
2206 			continue;
2207 		}
2208 
2209 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2210 		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2211 		    key.type != BTRFS_DEV_ITEM_KEY)
2212 			break;
2213 
2214 		dev_item = btrfs_item_ptr(leaf, path->slots[0],
2215 					  struct btrfs_dev_item);
2216 		devid = btrfs_device_id(leaf, dev_item);
2217 		read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2218 				   BTRFS_UUID_SIZE);
2219 		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2220 				   BTRFS_UUID_SIZE);
2221 		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
2222 					   fs_uuid);
2223 		BUG_ON(!device); /* Logic error */
2224 
2225 		if (device->fs_devices->seeding) {
2226 			btrfs_set_device_generation(leaf, dev_item,
2227 						    device->generation);
2228 			btrfs_mark_buffer_dirty(leaf);
2229 		}
2230 
2231 		path->slots[0]++;
2232 		goto next_slot;
2233 	}
2234 	ret = 0;
2235 error:
2236 	btrfs_free_path(path);
2237 	return ret;
2238 }
2239 
2240 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
2241 {
2242 	struct request_queue *q;
2243 	struct btrfs_trans_handle *trans;
2244 	struct btrfs_device *device;
2245 	struct block_device *bdev;
2246 	struct list_head *devices;
2247 	struct super_block *sb = root->fs_info->sb;
2248 	struct rcu_string *name;
2249 	u64 tmp;
2250 	int seeding_dev = 0;
2251 	int ret = 0;
2252 
2253 	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
2254 		return -EROFS;
2255 
2256 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2257 				  root->fs_info->bdev_holder);
2258 	if (IS_ERR(bdev))
2259 		return PTR_ERR(bdev);
2260 
2261 	if (root->fs_info->fs_devices->seeding) {
2262 		seeding_dev = 1;
2263 		down_write(&sb->s_umount);
2264 		mutex_lock(&uuid_mutex);
2265 	}
2266 
2267 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2268 
2269 	devices = &root->fs_info->fs_devices->devices;
2270 
2271 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2272 	list_for_each_entry(device, devices, dev_list) {
2273 		if (device->bdev == bdev) {
2274 			ret = -EEXIST;
2275 			mutex_unlock(
2276 				&root->fs_info->fs_devices->device_list_mutex);
2277 			goto error;
2278 		}
2279 	}
2280 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2281 
2282 	device = btrfs_alloc_device(root->fs_info, NULL, NULL);
2283 	if (IS_ERR(device)) {
2284 		/* we can safely leave the fs_devices entry around */
2285 		ret = PTR_ERR(device);
2286 		goto error;
2287 	}
2288 
2289 	name = rcu_string_strdup(device_path, GFP_NOFS);
2290 	if (!name) {
2291 		kfree(device);
2292 		ret = -ENOMEM;
2293 		goto error;
2294 	}
2295 	rcu_assign_pointer(device->name, name);
2296 
2297 	trans = btrfs_start_transaction(root, 0);
2298 	if (IS_ERR(trans)) {
2299 		rcu_string_free(device->name);
2300 		kfree(device);
2301 		ret = PTR_ERR(trans);
2302 		goto error;
2303 	}
2304 
2305 	q = bdev_get_queue(bdev);
2306 	if (blk_queue_discard(q))
2307 		device->can_discard = 1;
2308 	device->writeable = 1;
2309 	device->generation = trans->transid;
2310 	device->io_width = root->sectorsize;
2311 	device->io_align = root->sectorsize;
2312 	device->sector_size = root->sectorsize;
2313 	device->total_bytes = i_size_read(bdev->bd_inode);
2314 	device->disk_total_bytes = device->total_bytes;
2315 	device->commit_total_bytes = device->total_bytes;
2316 	device->dev_root = root->fs_info->dev_root;
2317 	device->bdev = bdev;
2318 	device->in_fs_metadata = 1;
2319 	device->is_tgtdev_for_dev_replace = 0;
2320 	device->mode = FMODE_EXCL;
2321 	device->dev_stats_valid = 1;
2322 	set_blocksize(device->bdev, 4096);
2323 
2324 	if (seeding_dev) {
2325 		sb->s_flags &= ~MS_RDONLY;
2326 		ret = btrfs_prepare_sprout(root);
2327 		BUG_ON(ret); /* -ENOMEM */
2328 	}
2329 
2330 	device->fs_devices = root->fs_info->fs_devices;
2331 
2332 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2333 	lock_chunks(root);
2334 	list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2335 	list_add(&device->dev_alloc_list,
2336 		 &root->fs_info->fs_devices->alloc_list);
2337 	root->fs_info->fs_devices->num_devices++;
2338 	root->fs_info->fs_devices->open_devices++;
2339 	root->fs_info->fs_devices->rw_devices++;
2340 	root->fs_info->fs_devices->total_devices++;
2341 	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2342 
2343 	spin_lock(&root->fs_info->free_chunk_lock);
2344 	root->fs_info->free_chunk_space += device->total_bytes;
2345 	spin_unlock(&root->fs_info->free_chunk_lock);
2346 
2347 	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2348 		root->fs_info->fs_devices->rotating = 1;
2349 
2350 	tmp = btrfs_super_total_bytes(root->fs_info->super_copy);
2351 	btrfs_set_super_total_bytes(root->fs_info->super_copy,
2352 				    tmp + device->total_bytes);
2353 
2354 	tmp = btrfs_super_num_devices(root->fs_info->super_copy);
2355 	btrfs_set_super_num_devices(root->fs_info->super_copy,
2356 				    tmp + 1);
2357 
2358 	/* add sysfs device entry */
2359 	btrfs_sysfs_add_device_link(root->fs_info->fs_devices, device);
2360 
2361 	/*
2362 	 * we've got more storage, clear any full flags on the space
2363 	 * infos
2364 	 */
2365 	btrfs_clear_space_info_full(root->fs_info);
2366 
2367 	unlock_chunks(root);
2368 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2369 
2370 	if (seeding_dev) {
2371 		lock_chunks(root);
2372 		ret = init_first_rw_device(trans, root, device);
2373 		unlock_chunks(root);
2374 		if (ret) {
2375 			btrfs_abort_transaction(trans, root, ret);
2376 			goto error_trans;
2377 		}
2378 	}
2379 
2380 	ret = btrfs_add_device(trans, root, device);
2381 	if (ret) {
2382 		btrfs_abort_transaction(trans, root, ret);
2383 		goto error_trans;
2384 	}
2385 
2386 	if (seeding_dev) {
2387 		char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
2388 
2389 		ret = btrfs_finish_sprout(trans, root);
2390 		if (ret) {
2391 			btrfs_abort_transaction(trans, root, ret);
2392 			goto error_trans;
2393 		}
2394 
2395 		/* Sprouting would change fsid of the mounted root,
2396 		 * so rename the fsid on the sysfs
2397 		 */
2398 		snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
2399 						root->fs_info->fsid);
2400 		if (kobject_rename(&root->fs_info->fs_devices->fsid_kobj,
2401 								fsid_buf))
2402 			btrfs_warn(root->fs_info,
2403 				"sysfs: failed to create fsid for sprout");
2404 	}
2405 
2406 	root->fs_info->num_tolerated_disk_barrier_failures =
2407 		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2408 	ret = btrfs_commit_transaction(trans, root);
2409 
2410 	if (seeding_dev) {
2411 		mutex_unlock(&uuid_mutex);
2412 		up_write(&sb->s_umount);
2413 
2414 		if (ret) /* transaction commit */
2415 			return ret;
2416 
2417 		ret = btrfs_relocate_sys_chunks(root);
2418 		if (ret < 0)
2419 			btrfs_std_error(root->fs_info, ret,
2420 				    "Failed to relocate sys chunks after "
2421 				    "device initialization. This can be fixed "
2422 				    "using the \"btrfs balance\" command.");
2423 		trans = btrfs_attach_transaction(root);
2424 		if (IS_ERR(trans)) {
2425 			if (PTR_ERR(trans) == -ENOENT)
2426 				return 0;
2427 			return PTR_ERR(trans);
2428 		}
2429 		ret = btrfs_commit_transaction(trans, root);
2430 	}
2431 
2432 	/* Update ctime/mtime for libblkid */
2433 	update_dev_time(device_path);
2434 	return ret;
2435 
2436 error_trans:
2437 	btrfs_end_transaction(trans, root);
2438 	rcu_string_free(device->name);
2439 	btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
2440 	kfree(device);
2441 error:
2442 	blkdev_put(bdev, FMODE_EXCL);
2443 	if (seeding_dev) {
2444 		mutex_unlock(&uuid_mutex);
2445 		up_write(&sb->s_umount);
2446 	}
2447 	return ret;
2448 }
2449 
2450 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2451 				  struct btrfs_device *srcdev,
2452 				  struct btrfs_device **device_out)
2453 {
2454 	struct request_queue *q;
2455 	struct btrfs_device *device;
2456 	struct block_device *bdev;
2457 	struct btrfs_fs_info *fs_info = root->fs_info;
2458 	struct list_head *devices;
2459 	struct rcu_string *name;
2460 	u64 devid = BTRFS_DEV_REPLACE_DEVID;
2461 	int ret = 0;
2462 
2463 	*device_out = NULL;
2464 	if (fs_info->fs_devices->seeding) {
2465 		btrfs_err(fs_info, "the filesystem is a seed filesystem!");
2466 		return -EINVAL;
2467 	}
2468 
2469 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2470 				  fs_info->bdev_holder);
2471 	if (IS_ERR(bdev)) {
2472 		btrfs_err(fs_info, "target device %s is invalid!", device_path);
2473 		return PTR_ERR(bdev);
2474 	}
2475 
2476 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2477 
2478 	devices = &fs_info->fs_devices->devices;
2479 	list_for_each_entry(device, devices, dev_list) {
2480 		if (device->bdev == bdev) {
2481 			btrfs_err(fs_info, "target device is in the filesystem!");
2482 			ret = -EEXIST;
2483 			goto error;
2484 		}
2485 	}
2486 
2487 
2488 	if (i_size_read(bdev->bd_inode) <
2489 	    btrfs_device_get_total_bytes(srcdev)) {
2490 		btrfs_err(fs_info, "target device is smaller than source device!");
2491 		ret = -EINVAL;
2492 		goto error;
2493 	}
2494 
2495 
2496 	device = btrfs_alloc_device(NULL, &devid, NULL);
2497 	if (IS_ERR(device)) {
2498 		ret = PTR_ERR(device);
2499 		goto error;
2500 	}
2501 
2502 	name = rcu_string_strdup(device_path, GFP_NOFS);
2503 	if (!name) {
2504 		kfree(device);
2505 		ret = -ENOMEM;
2506 		goto error;
2507 	}
2508 	rcu_assign_pointer(device->name, name);
2509 
2510 	q = bdev_get_queue(bdev);
2511 	if (blk_queue_discard(q))
2512 		device->can_discard = 1;
2513 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2514 	device->writeable = 1;
2515 	device->generation = 0;
2516 	device->io_width = root->sectorsize;
2517 	device->io_align = root->sectorsize;
2518 	device->sector_size = root->sectorsize;
2519 	device->total_bytes = btrfs_device_get_total_bytes(srcdev);
2520 	device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev);
2521 	device->bytes_used = btrfs_device_get_bytes_used(srcdev);
2522 	ASSERT(list_empty(&srcdev->resized_list));
2523 	device->commit_total_bytes = srcdev->commit_total_bytes;
2524 	device->commit_bytes_used = device->bytes_used;
2525 	device->dev_root = fs_info->dev_root;
2526 	device->bdev = bdev;
2527 	device->in_fs_metadata = 1;
2528 	device->is_tgtdev_for_dev_replace = 1;
2529 	device->mode = FMODE_EXCL;
2530 	device->dev_stats_valid = 1;
2531 	set_blocksize(device->bdev, 4096);
2532 	device->fs_devices = fs_info->fs_devices;
2533 	list_add(&device->dev_list, &fs_info->fs_devices->devices);
2534 	fs_info->fs_devices->num_devices++;
2535 	fs_info->fs_devices->open_devices++;
2536 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2537 
2538 	*device_out = device;
2539 	return ret;
2540 
2541 error:
2542 	blkdev_put(bdev, FMODE_EXCL);
2543 	return ret;
2544 }
2545 
2546 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2547 					      struct btrfs_device *tgtdev)
2548 {
2549 	WARN_ON(fs_info->fs_devices->rw_devices == 0);
2550 	tgtdev->io_width = fs_info->dev_root->sectorsize;
2551 	tgtdev->io_align = fs_info->dev_root->sectorsize;
2552 	tgtdev->sector_size = fs_info->dev_root->sectorsize;
2553 	tgtdev->dev_root = fs_info->dev_root;
2554 	tgtdev->in_fs_metadata = 1;
2555 }
2556 
2557 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2558 					struct btrfs_device *device)
2559 {
2560 	int ret;
2561 	struct btrfs_path *path;
2562 	struct btrfs_root *root;
2563 	struct btrfs_dev_item *dev_item;
2564 	struct extent_buffer *leaf;
2565 	struct btrfs_key key;
2566 
2567 	root = device->dev_root->fs_info->chunk_root;
2568 
2569 	path = btrfs_alloc_path();
2570 	if (!path)
2571 		return -ENOMEM;
2572 
2573 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2574 	key.type = BTRFS_DEV_ITEM_KEY;
2575 	key.offset = device->devid;
2576 
2577 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2578 	if (ret < 0)
2579 		goto out;
2580 
2581 	if (ret > 0) {
2582 		ret = -ENOENT;
2583 		goto out;
2584 	}
2585 
2586 	leaf = path->nodes[0];
2587 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2588 
2589 	btrfs_set_device_id(leaf, dev_item, device->devid);
2590 	btrfs_set_device_type(leaf, dev_item, device->type);
2591 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2592 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2593 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2594 	btrfs_set_device_total_bytes(leaf, dev_item,
2595 				     btrfs_device_get_disk_total_bytes(device));
2596 	btrfs_set_device_bytes_used(leaf, dev_item,
2597 				    btrfs_device_get_bytes_used(device));
2598 	btrfs_mark_buffer_dirty(leaf);
2599 
2600 out:
2601 	btrfs_free_path(path);
2602 	return ret;
2603 }
2604 
2605 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2606 		      struct btrfs_device *device, u64 new_size)
2607 {
2608 	struct btrfs_super_block *super_copy =
2609 		device->dev_root->fs_info->super_copy;
2610 	struct btrfs_fs_devices *fs_devices;
2611 	u64 old_total;
2612 	u64 diff;
2613 
2614 	if (!device->writeable)
2615 		return -EACCES;
2616 
2617 	lock_chunks(device->dev_root);
2618 	old_total = btrfs_super_total_bytes(super_copy);
2619 	diff = new_size - device->total_bytes;
2620 
2621 	if (new_size <= device->total_bytes ||
2622 	    device->is_tgtdev_for_dev_replace) {
2623 		unlock_chunks(device->dev_root);
2624 		return -EINVAL;
2625 	}
2626 
2627 	fs_devices = device->dev_root->fs_info->fs_devices;
2628 
2629 	btrfs_set_super_total_bytes(super_copy, old_total + diff);
2630 	device->fs_devices->total_rw_bytes += diff;
2631 
2632 	btrfs_device_set_total_bytes(device, new_size);
2633 	btrfs_device_set_disk_total_bytes(device, new_size);
2634 	btrfs_clear_space_info_full(device->dev_root->fs_info);
2635 	if (list_empty(&device->resized_list))
2636 		list_add_tail(&device->resized_list,
2637 			      &fs_devices->resized_devices);
2638 	unlock_chunks(device->dev_root);
2639 
2640 	return btrfs_update_device(trans, device);
2641 }
2642 
2643 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2644 			    struct btrfs_root *root, u64 chunk_objectid,
2645 			    u64 chunk_offset)
2646 {
2647 	int ret;
2648 	struct btrfs_path *path;
2649 	struct btrfs_key key;
2650 
2651 	root = root->fs_info->chunk_root;
2652 	path = btrfs_alloc_path();
2653 	if (!path)
2654 		return -ENOMEM;
2655 
2656 	key.objectid = chunk_objectid;
2657 	key.offset = chunk_offset;
2658 	key.type = BTRFS_CHUNK_ITEM_KEY;
2659 
2660 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2661 	if (ret < 0)
2662 		goto out;
2663 	else if (ret > 0) { /* Logic error or corruption */
2664 		btrfs_std_error(root->fs_info, -ENOENT,
2665 			    "Failed lookup while freeing chunk.");
2666 		ret = -ENOENT;
2667 		goto out;
2668 	}
2669 
2670 	ret = btrfs_del_item(trans, root, path);
2671 	if (ret < 0)
2672 		btrfs_std_error(root->fs_info, ret,
2673 			    "Failed to delete chunk item.");
2674 out:
2675 	btrfs_free_path(path);
2676 	return ret;
2677 }
2678 
2679 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2680 			chunk_offset)
2681 {
2682 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2683 	struct btrfs_disk_key *disk_key;
2684 	struct btrfs_chunk *chunk;
2685 	u8 *ptr;
2686 	int ret = 0;
2687 	u32 num_stripes;
2688 	u32 array_size;
2689 	u32 len = 0;
2690 	u32 cur;
2691 	struct btrfs_key key;
2692 
2693 	lock_chunks(root);
2694 	array_size = btrfs_super_sys_array_size(super_copy);
2695 
2696 	ptr = super_copy->sys_chunk_array;
2697 	cur = 0;
2698 
2699 	while (cur < array_size) {
2700 		disk_key = (struct btrfs_disk_key *)ptr;
2701 		btrfs_disk_key_to_cpu(&key, disk_key);
2702 
2703 		len = sizeof(*disk_key);
2704 
2705 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2706 			chunk = (struct btrfs_chunk *)(ptr + len);
2707 			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2708 			len += btrfs_chunk_item_size(num_stripes);
2709 		} else {
2710 			ret = -EIO;
2711 			break;
2712 		}
2713 		if (key.objectid == chunk_objectid &&
2714 		    key.offset == chunk_offset) {
2715 			memmove(ptr, ptr + len, array_size - (cur + len));
2716 			array_size -= len;
2717 			btrfs_set_super_sys_array_size(super_copy, array_size);
2718 		} else {
2719 			ptr += len;
2720 			cur += len;
2721 		}
2722 	}
2723 	unlock_chunks(root);
2724 	return ret;
2725 }
2726 
2727 int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
2728 		       struct btrfs_root *root, u64 chunk_offset)
2729 {
2730 	struct extent_map_tree *em_tree;
2731 	struct extent_map *em;
2732 	struct btrfs_root *extent_root = root->fs_info->extent_root;
2733 	struct map_lookup *map;
2734 	u64 dev_extent_len = 0;
2735 	u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2736 	int i, ret = 0;
2737 
2738 	/* Just in case */
2739 	root = root->fs_info->chunk_root;
2740 	em_tree = &root->fs_info->mapping_tree.map_tree;
2741 
2742 	read_lock(&em_tree->lock);
2743 	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2744 	read_unlock(&em_tree->lock);
2745 
2746 	if (!em || em->start > chunk_offset ||
2747 	    em->start + em->len < chunk_offset) {
2748 		/*
2749 		 * This is a logic error, but we don't want to just rely on the
2750 		 * user having built with ASSERT enabled, so if ASSERT doens't
2751 		 * do anything we still error out.
2752 		 */
2753 		ASSERT(0);
2754 		if (em)
2755 			free_extent_map(em);
2756 		return -EINVAL;
2757 	}
2758 	map = (struct map_lookup *)em->bdev;
2759 	lock_chunks(root->fs_info->chunk_root);
2760 	check_system_chunk(trans, extent_root, map->type);
2761 	unlock_chunks(root->fs_info->chunk_root);
2762 
2763 	for (i = 0; i < map->num_stripes; i++) {
2764 		struct btrfs_device *device = map->stripes[i].dev;
2765 		ret = btrfs_free_dev_extent(trans, device,
2766 					    map->stripes[i].physical,
2767 					    &dev_extent_len);
2768 		if (ret) {
2769 			btrfs_abort_transaction(trans, root, ret);
2770 			goto out;
2771 		}
2772 
2773 		if (device->bytes_used > 0) {
2774 			lock_chunks(root);
2775 			btrfs_device_set_bytes_used(device,
2776 					device->bytes_used - dev_extent_len);
2777 			spin_lock(&root->fs_info->free_chunk_lock);
2778 			root->fs_info->free_chunk_space += dev_extent_len;
2779 			spin_unlock(&root->fs_info->free_chunk_lock);
2780 			btrfs_clear_space_info_full(root->fs_info);
2781 			unlock_chunks(root);
2782 		}
2783 
2784 		if (map->stripes[i].dev) {
2785 			ret = btrfs_update_device(trans, map->stripes[i].dev);
2786 			if (ret) {
2787 				btrfs_abort_transaction(trans, root, ret);
2788 				goto out;
2789 			}
2790 		}
2791 	}
2792 	ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
2793 	if (ret) {
2794 		btrfs_abort_transaction(trans, root, ret);
2795 		goto out;
2796 	}
2797 
2798 	trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2799 
2800 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2801 		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2802 		if (ret) {
2803 			btrfs_abort_transaction(trans, root, ret);
2804 			goto out;
2805 		}
2806 	}
2807 
2808 	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em);
2809 	if (ret) {
2810 		btrfs_abort_transaction(trans, extent_root, ret);
2811 		goto out;
2812 	}
2813 
2814 out:
2815 	/* once for us */
2816 	free_extent_map(em);
2817 	return ret;
2818 }
2819 
2820 static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
2821 {
2822 	struct btrfs_root *extent_root;
2823 	struct btrfs_trans_handle *trans;
2824 	int ret;
2825 
2826 	root = root->fs_info->chunk_root;
2827 	extent_root = root->fs_info->extent_root;
2828 
2829 	/*
2830 	 * Prevent races with automatic removal of unused block groups.
2831 	 * After we relocate and before we remove the chunk with offset
2832 	 * chunk_offset, automatic removal of the block group can kick in,
2833 	 * resulting in a failure when calling btrfs_remove_chunk() below.
2834 	 *
2835 	 * Make sure to acquire this mutex before doing a tree search (dev
2836 	 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
2837 	 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
2838 	 * we release the path used to search the chunk/dev tree and before
2839 	 * the current task acquires this mutex and calls us.
2840 	 */
2841 	ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex));
2842 
2843 	ret = btrfs_can_relocate(extent_root, chunk_offset);
2844 	if (ret)
2845 		return -ENOSPC;
2846 
2847 	/* step one, relocate all the extents inside this chunk */
2848 	btrfs_scrub_pause(root);
2849 	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2850 	btrfs_scrub_continue(root);
2851 	if (ret)
2852 		return ret;
2853 
2854 	trans = btrfs_start_trans_remove_block_group(root->fs_info,
2855 						     chunk_offset);
2856 	if (IS_ERR(trans)) {
2857 		ret = PTR_ERR(trans);
2858 		btrfs_std_error(root->fs_info, ret, NULL);
2859 		return ret;
2860 	}
2861 
2862 	/*
2863 	 * step two, delete the device extents and the
2864 	 * chunk tree entries
2865 	 */
2866 	ret = btrfs_remove_chunk(trans, root, chunk_offset);
2867 	btrfs_end_transaction(trans, root);
2868 	return ret;
2869 }
2870 
2871 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2872 {
2873 	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2874 	struct btrfs_path *path;
2875 	struct extent_buffer *leaf;
2876 	struct btrfs_chunk *chunk;
2877 	struct btrfs_key key;
2878 	struct btrfs_key found_key;
2879 	u64 chunk_type;
2880 	bool retried = false;
2881 	int failed = 0;
2882 	int ret;
2883 
2884 	path = btrfs_alloc_path();
2885 	if (!path)
2886 		return -ENOMEM;
2887 
2888 again:
2889 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2890 	key.offset = (u64)-1;
2891 	key.type = BTRFS_CHUNK_ITEM_KEY;
2892 
2893 	while (1) {
2894 		mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
2895 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2896 		if (ret < 0) {
2897 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2898 			goto error;
2899 		}
2900 		BUG_ON(ret == 0); /* Corruption */
2901 
2902 		ret = btrfs_previous_item(chunk_root, path, key.objectid,
2903 					  key.type);
2904 		if (ret)
2905 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2906 		if (ret < 0)
2907 			goto error;
2908 		if (ret > 0)
2909 			break;
2910 
2911 		leaf = path->nodes[0];
2912 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2913 
2914 		chunk = btrfs_item_ptr(leaf, path->slots[0],
2915 				       struct btrfs_chunk);
2916 		chunk_type = btrfs_chunk_type(leaf, chunk);
2917 		btrfs_release_path(path);
2918 
2919 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2920 			ret = btrfs_relocate_chunk(chunk_root,
2921 						   found_key.offset);
2922 			if (ret == -ENOSPC)
2923 				failed++;
2924 			else
2925 				BUG_ON(ret);
2926 		}
2927 		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2928 
2929 		if (found_key.offset == 0)
2930 			break;
2931 		key.offset = found_key.offset - 1;
2932 	}
2933 	ret = 0;
2934 	if (failed && !retried) {
2935 		failed = 0;
2936 		retried = true;
2937 		goto again;
2938 	} else if (WARN_ON(failed && retried)) {
2939 		ret = -ENOSPC;
2940 	}
2941 error:
2942 	btrfs_free_path(path);
2943 	return ret;
2944 }
2945 
2946 static int insert_balance_item(struct btrfs_root *root,
2947 			       struct btrfs_balance_control *bctl)
2948 {
2949 	struct btrfs_trans_handle *trans;
2950 	struct btrfs_balance_item *item;
2951 	struct btrfs_disk_balance_args disk_bargs;
2952 	struct btrfs_path *path;
2953 	struct extent_buffer *leaf;
2954 	struct btrfs_key key;
2955 	int ret, err;
2956 
2957 	path = btrfs_alloc_path();
2958 	if (!path)
2959 		return -ENOMEM;
2960 
2961 	trans = btrfs_start_transaction(root, 0);
2962 	if (IS_ERR(trans)) {
2963 		btrfs_free_path(path);
2964 		return PTR_ERR(trans);
2965 	}
2966 
2967 	key.objectid = BTRFS_BALANCE_OBJECTID;
2968 	key.type = BTRFS_BALANCE_ITEM_KEY;
2969 	key.offset = 0;
2970 
2971 	ret = btrfs_insert_empty_item(trans, root, path, &key,
2972 				      sizeof(*item));
2973 	if (ret)
2974 		goto out;
2975 
2976 	leaf = path->nodes[0];
2977 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2978 
2979 	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2980 
2981 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2982 	btrfs_set_balance_data(leaf, item, &disk_bargs);
2983 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2984 	btrfs_set_balance_meta(leaf, item, &disk_bargs);
2985 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2986 	btrfs_set_balance_sys(leaf, item, &disk_bargs);
2987 
2988 	btrfs_set_balance_flags(leaf, item, bctl->flags);
2989 
2990 	btrfs_mark_buffer_dirty(leaf);
2991 out:
2992 	btrfs_free_path(path);
2993 	err = btrfs_commit_transaction(trans, root);
2994 	if (err && !ret)
2995 		ret = err;
2996 	return ret;
2997 }
2998 
2999 static int del_balance_item(struct btrfs_root *root)
3000 {
3001 	struct btrfs_trans_handle *trans;
3002 	struct btrfs_path *path;
3003 	struct btrfs_key key;
3004 	int ret, err;
3005 
3006 	path = btrfs_alloc_path();
3007 	if (!path)
3008 		return -ENOMEM;
3009 
3010 	trans = btrfs_start_transaction(root, 0);
3011 	if (IS_ERR(trans)) {
3012 		btrfs_free_path(path);
3013 		return PTR_ERR(trans);
3014 	}
3015 
3016 	key.objectid = BTRFS_BALANCE_OBJECTID;
3017 	key.type = BTRFS_BALANCE_ITEM_KEY;
3018 	key.offset = 0;
3019 
3020 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3021 	if (ret < 0)
3022 		goto out;
3023 	if (ret > 0) {
3024 		ret = -ENOENT;
3025 		goto out;
3026 	}
3027 
3028 	ret = btrfs_del_item(trans, root, path);
3029 out:
3030 	btrfs_free_path(path);
3031 	err = btrfs_commit_transaction(trans, root);
3032 	if (err && !ret)
3033 		ret = err;
3034 	return ret;
3035 }
3036 
3037 /*
3038  * This is a heuristic used to reduce the number of chunks balanced on
3039  * resume after balance was interrupted.
3040  */
3041 static void update_balance_args(struct btrfs_balance_control *bctl)
3042 {
3043 	/*
3044 	 * Turn on soft mode for chunk types that were being converted.
3045 	 */
3046 	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3047 		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3048 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3049 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3050 	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3051 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3052 
3053 	/*
3054 	 * Turn on usage filter if is not already used.  The idea is
3055 	 * that chunks that we have already balanced should be
3056 	 * reasonably full.  Don't do it for chunks that are being
3057 	 * converted - that will keep us from relocating unconverted
3058 	 * (albeit full) chunks.
3059 	 */
3060 	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3061 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3062 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3063 		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3064 		bctl->data.usage = 90;
3065 	}
3066 	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3067 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3068 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3069 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3070 		bctl->sys.usage = 90;
3071 	}
3072 	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3073 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3074 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3075 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3076 		bctl->meta.usage = 90;
3077 	}
3078 }
3079 
3080 /*
3081  * Should be called with both balance and volume mutexes held to
3082  * serialize other volume operations (add_dev/rm_dev/resize) with
3083  * restriper.  Same goes for unset_balance_control.
3084  */
3085 static void set_balance_control(struct btrfs_balance_control *bctl)
3086 {
3087 	struct btrfs_fs_info *fs_info = bctl->fs_info;
3088 
3089 	BUG_ON(fs_info->balance_ctl);
3090 
3091 	spin_lock(&fs_info->balance_lock);
3092 	fs_info->balance_ctl = bctl;
3093 	spin_unlock(&fs_info->balance_lock);
3094 }
3095 
3096 static void unset_balance_control(struct btrfs_fs_info *fs_info)
3097 {
3098 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3099 
3100 	BUG_ON(!fs_info->balance_ctl);
3101 
3102 	spin_lock(&fs_info->balance_lock);
3103 	fs_info->balance_ctl = NULL;
3104 	spin_unlock(&fs_info->balance_lock);
3105 
3106 	kfree(bctl);
3107 }
3108 
3109 /*
3110  * Balance filters.  Return 1 if chunk should be filtered out
3111  * (should not be balanced).
3112  */
3113 static int chunk_profiles_filter(u64 chunk_type,
3114 				 struct btrfs_balance_args *bargs)
3115 {
3116 	chunk_type = chunk_to_extended(chunk_type) &
3117 				BTRFS_EXTENDED_PROFILE_MASK;
3118 
3119 	if (bargs->profiles & chunk_type)
3120 		return 0;
3121 
3122 	return 1;
3123 }
3124 
3125 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3126 			      struct btrfs_balance_args *bargs)
3127 {
3128 	struct btrfs_block_group_cache *cache;
3129 	u64 chunk_used;
3130 	u64 user_thresh_min;
3131 	u64 user_thresh_max;
3132 	int ret = 1;
3133 
3134 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3135 	chunk_used = btrfs_block_group_used(&cache->item);
3136 
3137 	if (bargs->usage_min == 0)
3138 		user_thresh_min = 0;
3139 	else
3140 		user_thresh_min = div_factor_fine(cache->key.offset,
3141 					bargs->usage_min);
3142 
3143 	if (bargs->usage_max == 0)
3144 		user_thresh_max = 1;
3145 	else if (bargs->usage_max > 100)
3146 		user_thresh_max = cache->key.offset;
3147 	else
3148 		user_thresh_max = div_factor_fine(cache->key.offset,
3149 					bargs->usage_max);
3150 
3151 	if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3152 		ret = 0;
3153 
3154 	btrfs_put_block_group(cache);
3155 	return ret;
3156 }
3157 
3158 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3159 		u64 chunk_offset, struct btrfs_balance_args *bargs)
3160 {
3161 	struct btrfs_block_group_cache *cache;
3162 	u64 chunk_used, user_thresh;
3163 	int ret = 1;
3164 
3165 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3166 	chunk_used = btrfs_block_group_used(&cache->item);
3167 
3168 	if (bargs->usage_min == 0)
3169 		user_thresh = 1;
3170 	else if (bargs->usage > 100)
3171 		user_thresh = cache->key.offset;
3172 	else
3173 		user_thresh = div_factor_fine(cache->key.offset,
3174 					      bargs->usage);
3175 
3176 	if (chunk_used < user_thresh)
3177 		ret = 0;
3178 
3179 	btrfs_put_block_group(cache);
3180 	return ret;
3181 }
3182 
3183 static int chunk_devid_filter(struct extent_buffer *leaf,
3184 			      struct btrfs_chunk *chunk,
3185 			      struct btrfs_balance_args *bargs)
3186 {
3187 	struct btrfs_stripe *stripe;
3188 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3189 	int i;
3190 
3191 	for (i = 0; i < num_stripes; i++) {
3192 		stripe = btrfs_stripe_nr(chunk, i);
3193 		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3194 			return 0;
3195 	}
3196 
3197 	return 1;
3198 }
3199 
3200 /* [pstart, pend) */
3201 static int chunk_drange_filter(struct extent_buffer *leaf,
3202 			       struct btrfs_chunk *chunk,
3203 			       u64 chunk_offset,
3204 			       struct btrfs_balance_args *bargs)
3205 {
3206 	struct btrfs_stripe *stripe;
3207 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3208 	u64 stripe_offset;
3209 	u64 stripe_length;
3210 	int factor;
3211 	int i;
3212 
3213 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3214 		return 0;
3215 
3216 	if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
3217 	     BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
3218 		factor = num_stripes / 2;
3219 	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
3220 		factor = num_stripes - 1;
3221 	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
3222 		factor = num_stripes - 2;
3223 	} else {
3224 		factor = num_stripes;
3225 	}
3226 
3227 	for (i = 0; i < num_stripes; i++) {
3228 		stripe = btrfs_stripe_nr(chunk, i);
3229 		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3230 			continue;
3231 
3232 		stripe_offset = btrfs_stripe_offset(leaf, stripe);
3233 		stripe_length = btrfs_chunk_length(leaf, chunk);
3234 		stripe_length = div_u64(stripe_length, factor);
3235 
3236 		if (stripe_offset < bargs->pend &&
3237 		    stripe_offset + stripe_length > bargs->pstart)
3238 			return 0;
3239 	}
3240 
3241 	return 1;
3242 }
3243 
3244 /* [vstart, vend) */
3245 static int chunk_vrange_filter(struct extent_buffer *leaf,
3246 			       struct btrfs_chunk *chunk,
3247 			       u64 chunk_offset,
3248 			       struct btrfs_balance_args *bargs)
3249 {
3250 	if (chunk_offset < bargs->vend &&
3251 	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3252 		/* at least part of the chunk is inside this vrange */
3253 		return 0;
3254 
3255 	return 1;
3256 }
3257 
3258 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3259 			       struct btrfs_chunk *chunk,
3260 			       struct btrfs_balance_args *bargs)
3261 {
3262 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3263 
3264 	if (bargs->stripes_min <= num_stripes
3265 			&& num_stripes <= bargs->stripes_max)
3266 		return 0;
3267 
3268 	return 1;
3269 }
3270 
3271 static int chunk_soft_convert_filter(u64 chunk_type,
3272 				     struct btrfs_balance_args *bargs)
3273 {
3274 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3275 		return 0;
3276 
3277 	chunk_type = chunk_to_extended(chunk_type) &
3278 				BTRFS_EXTENDED_PROFILE_MASK;
3279 
3280 	if (bargs->target == chunk_type)
3281 		return 1;
3282 
3283 	return 0;
3284 }
3285 
3286 static int should_balance_chunk(struct btrfs_root *root,
3287 				struct extent_buffer *leaf,
3288 				struct btrfs_chunk *chunk, u64 chunk_offset)
3289 {
3290 	struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
3291 	struct btrfs_balance_args *bargs = NULL;
3292 	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3293 
3294 	/* type filter */
3295 	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3296 	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3297 		return 0;
3298 	}
3299 
3300 	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3301 		bargs = &bctl->data;
3302 	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3303 		bargs = &bctl->sys;
3304 	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3305 		bargs = &bctl->meta;
3306 
3307 	/* profiles filter */
3308 	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3309 	    chunk_profiles_filter(chunk_type, bargs)) {
3310 		return 0;
3311 	}
3312 
3313 	/* usage filter */
3314 	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3315 	    chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
3316 		return 0;
3317 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3318 	    chunk_usage_range_filter(bctl->fs_info, chunk_offset, bargs)) {
3319 		return 0;
3320 	}
3321 
3322 	/* devid filter */
3323 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3324 	    chunk_devid_filter(leaf, chunk, bargs)) {
3325 		return 0;
3326 	}
3327 
3328 	/* drange filter, makes sense only with devid filter */
3329 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3330 	    chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
3331 		return 0;
3332 	}
3333 
3334 	/* vrange filter */
3335 	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3336 	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3337 		return 0;
3338 	}
3339 
3340 	/* stripes filter */
3341 	if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3342 	    chunk_stripes_range_filter(leaf, chunk, bargs)) {
3343 		return 0;
3344 	}
3345 
3346 	/* soft profile changing mode */
3347 	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3348 	    chunk_soft_convert_filter(chunk_type, bargs)) {
3349 		return 0;
3350 	}
3351 
3352 	/*
3353 	 * limited by count, must be the last filter
3354 	 */
3355 	if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3356 		if (bargs->limit == 0)
3357 			return 0;
3358 		else
3359 			bargs->limit--;
3360 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3361 		/*
3362 		 * Same logic as the 'limit' filter; the minimum cannot be
3363 		 * determined here because we do not have the global informatoin
3364 		 * about the count of all chunks that satisfy the filters.
3365 		 */
3366 		if (bargs->limit_max == 0)
3367 			return 0;
3368 		else
3369 			bargs->limit_max--;
3370 	}
3371 
3372 	return 1;
3373 }
3374 
3375 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3376 {
3377 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3378 	struct btrfs_root *chunk_root = fs_info->chunk_root;
3379 	struct btrfs_root *dev_root = fs_info->dev_root;
3380 	struct list_head *devices;
3381 	struct btrfs_device *device;
3382 	u64 old_size;
3383 	u64 size_to_free;
3384 	u64 chunk_type;
3385 	struct btrfs_chunk *chunk;
3386 	struct btrfs_path *path;
3387 	struct btrfs_key key;
3388 	struct btrfs_key found_key;
3389 	struct btrfs_trans_handle *trans;
3390 	struct extent_buffer *leaf;
3391 	int slot;
3392 	int ret;
3393 	int enospc_errors = 0;
3394 	bool counting = true;
3395 	/* The single value limit and min/max limits use the same bytes in the */
3396 	u64 limit_data = bctl->data.limit;
3397 	u64 limit_meta = bctl->meta.limit;
3398 	u64 limit_sys = bctl->sys.limit;
3399 	u32 count_data = 0;
3400 	u32 count_meta = 0;
3401 	u32 count_sys = 0;
3402 	int chunk_reserved = 0;
3403 
3404 	/* step one make some room on all the devices */
3405 	devices = &fs_info->fs_devices->devices;
3406 	list_for_each_entry(device, devices, dev_list) {
3407 		old_size = btrfs_device_get_total_bytes(device);
3408 		size_to_free = div_factor(old_size, 1);
3409 		size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
3410 		if (!device->writeable ||
3411 		    btrfs_device_get_total_bytes(device) -
3412 		    btrfs_device_get_bytes_used(device) > size_to_free ||
3413 		    device->is_tgtdev_for_dev_replace)
3414 			continue;
3415 
3416 		ret = btrfs_shrink_device(device, old_size - size_to_free);
3417 		if (ret == -ENOSPC)
3418 			break;
3419 		BUG_ON(ret);
3420 
3421 		trans = btrfs_start_transaction(dev_root, 0);
3422 		BUG_ON(IS_ERR(trans));
3423 
3424 		ret = btrfs_grow_device(trans, device, old_size);
3425 		BUG_ON(ret);
3426 
3427 		btrfs_end_transaction(trans, dev_root);
3428 	}
3429 
3430 	/* step two, relocate all the chunks */
3431 	path = btrfs_alloc_path();
3432 	if (!path) {
3433 		ret = -ENOMEM;
3434 		goto error;
3435 	}
3436 
3437 	/* zero out stat counters */
3438 	spin_lock(&fs_info->balance_lock);
3439 	memset(&bctl->stat, 0, sizeof(bctl->stat));
3440 	spin_unlock(&fs_info->balance_lock);
3441 again:
3442 	if (!counting) {
3443 		/*
3444 		 * The single value limit and min/max limits use the same bytes
3445 		 * in the
3446 		 */
3447 		bctl->data.limit = limit_data;
3448 		bctl->meta.limit = limit_meta;
3449 		bctl->sys.limit = limit_sys;
3450 	}
3451 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3452 	key.offset = (u64)-1;
3453 	key.type = BTRFS_CHUNK_ITEM_KEY;
3454 
3455 	while (1) {
3456 		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3457 		    atomic_read(&fs_info->balance_cancel_req)) {
3458 			ret = -ECANCELED;
3459 			goto error;
3460 		}
3461 
3462 		mutex_lock(&fs_info->delete_unused_bgs_mutex);
3463 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3464 		if (ret < 0) {
3465 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3466 			goto error;
3467 		}
3468 
3469 		/*
3470 		 * this shouldn't happen, it means the last relocate
3471 		 * failed
3472 		 */
3473 		if (ret == 0)
3474 			BUG(); /* FIXME break ? */
3475 
3476 		ret = btrfs_previous_item(chunk_root, path, 0,
3477 					  BTRFS_CHUNK_ITEM_KEY);
3478 		if (ret) {
3479 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3480 			ret = 0;
3481 			break;
3482 		}
3483 
3484 		leaf = path->nodes[0];
3485 		slot = path->slots[0];
3486 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3487 
3488 		if (found_key.objectid != key.objectid) {
3489 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3490 			break;
3491 		}
3492 
3493 		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3494 		chunk_type = btrfs_chunk_type(leaf, chunk);
3495 
3496 		if (!counting) {
3497 			spin_lock(&fs_info->balance_lock);
3498 			bctl->stat.considered++;
3499 			spin_unlock(&fs_info->balance_lock);
3500 		}
3501 
3502 		ret = should_balance_chunk(chunk_root, leaf, chunk,
3503 					   found_key.offset);
3504 
3505 		btrfs_release_path(path);
3506 		if (!ret) {
3507 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3508 			goto loop;
3509 		}
3510 
3511 		if (counting) {
3512 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3513 			spin_lock(&fs_info->balance_lock);
3514 			bctl->stat.expected++;
3515 			spin_unlock(&fs_info->balance_lock);
3516 
3517 			if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3518 				count_data++;
3519 			else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3520 				count_sys++;
3521 			else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3522 				count_meta++;
3523 
3524 			goto loop;
3525 		}
3526 
3527 		/*
3528 		 * Apply limit_min filter, no need to check if the LIMITS
3529 		 * filter is used, limit_min is 0 by default
3530 		 */
3531 		if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3532 					count_data < bctl->data.limit_min)
3533 				|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3534 					count_meta < bctl->meta.limit_min)
3535 				|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3536 					count_sys < bctl->sys.limit_min)) {
3537 			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3538 			goto loop;
3539 		}
3540 
3541 		if ((chunk_type & BTRFS_BLOCK_GROUP_DATA) && !chunk_reserved) {
3542 			trans = btrfs_start_transaction(chunk_root, 0);
3543 			if (IS_ERR(trans)) {
3544 				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3545 				ret = PTR_ERR(trans);
3546 				goto error;
3547 			}
3548 
3549 			ret = btrfs_force_chunk_alloc(trans, chunk_root,
3550 						      BTRFS_BLOCK_GROUP_DATA);
3551 			btrfs_end_transaction(trans, chunk_root);
3552 			if (ret < 0) {
3553 				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3554 				goto error;
3555 			}
3556 			chunk_reserved = 1;
3557 		}
3558 
3559 		ret = btrfs_relocate_chunk(chunk_root,
3560 					   found_key.offset);
3561 		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3562 		if (ret && ret != -ENOSPC)
3563 			goto error;
3564 		if (ret == -ENOSPC) {
3565 			enospc_errors++;
3566 		} else {
3567 			spin_lock(&fs_info->balance_lock);
3568 			bctl->stat.completed++;
3569 			spin_unlock(&fs_info->balance_lock);
3570 		}
3571 loop:
3572 		if (found_key.offset == 0)
3573 			break;
3574 		key.offset = found_key.offset - 1;
3575 	}
3576 
3577 	if (counting) {
3578 		btrfs_release_path(path);
3579 		counting = false;
3580 		goto again;
3581 	}
3582 error:
3583 	btrfs_free_path(path);
3584 	if (enospc_errors) {
3585 		btrfs_info(fs_info, "%d enospc errors during balance",
3586 		       enospc_errors);
3587 		if (!ret)
3588 			ret = -ENOSPC;
3589 	}
3590 
3591 	return ret;
3592 }
3593 
3594 /**
3595  * alloc_profile_is_valid - see if a given profile is valid and reduced
3596  * @flags: profile to validate
3597  * @extended: if true @flags is treated as an extended profile
3598  */
3599 static int alloc_profile_is_valid(u64 flags, int extended)
3600 {
3601 	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3602 			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
3603 
3604 	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3605 
3606 	/* 1) check that all other bits are zeroed */
3607 	if (flags & ~mask)
3608 		return 0;
3609 
3610 	/* 2) see if profile is reduced */
3611 	if (flags == 0)
3612 		return !extended; /* "0" is valid for usual profiles */
3613 
3614 	/* true if exactly one bit set */
3615 	return (flags & (flags - 1)) == 0;
3616 }
3617 
3618 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3619 {
3620 	/* cancel requested || normal exit path */
3621 	return atomic_read(&fs_info->balance_cancel_req) ||
3622 		(atomic_read(&fs_info->balance_pause_req) == 0 &&
3623 		 atomic_read(&fs_info->balance_cancel_req) == 0);
3624 }
3625 
3626 static void __cancel_balance(struct btrfs_fs_info *fs_info)
3627 {
3628 	int ret;
3629 
3630 	unset_balance_control(fs_info);
3631 	ret = del_balance_item(fs_info->tree_root);
3632 	if (ret)
3633 		btrfs_std_error(fs_info, ret, NULL);
3634 
3635 	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3636 }
3637 
3638 /* Non-zero return value signifies invalidity */
3639 static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg,
3640 		u64 allowed)
3641 {
3642 	return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3643 		(!alloc_profile_is_valid(bctl_arg->target, 1) ||
3644 		 (bctl_arg->target & ~allowed)));
3645 }
3646 
3647 /*
3648  * Should be called with both balance and volume mutexes held
3649  */
3650 int btrfs_balance(struct btrfs_balance_control *bctl,
3651 		  struct btrfs_ioctl_balance_args *bargs)
3652 {
3653 	struct btrfs_fs_info *fs_info = bctl->fs_info;
3654 	u64 allowed;
3655 	int mixed = 0;
3656 	int ret;
3657 	u64 num_devices;
3658 	unsigned seq;
3659 
3660 	if (btrfs_fs_closing(fs_info) ||
3661 	    atomic_read(&fs_info->balance_pause_req) ||
3662 	    atomic_read(&fs_info->balance_cancel_req)) {
3663 		ret = -EINVAL;
3664 		goto out;
3665 	}
3666 
3667 	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3668 	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3669 		mixed = 1;
3670 
3671 	/*
3672 	 * In case of mixed groups both data and meta should be picked,
3673 	 * and identical options should be given for both of them.
3674 	 */
3675 	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3676 	if (mixed && (bctl->flags & allowed)) {
3677 		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3678 		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3679 		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3680 			btrfs_err(fs_info, "with mixed groups data and "
3681 				   "metadata balance options must be the same");
3682 			ret = -EINVAL;
3683 			goto out;
3684 		}
3685 	}
3686 
3687 	num_devices = fs_info->fs_devices->num_devices;
3688 	btrfs_dev_replace_lock(&fs_info->dev_replace);
3689 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3690 		BUG_ON(num_devices < 1);
3691 		num_devices--;
3692 	}
3693 	btrfs_dev_replace_unlock(&fs_info->dev_replace);
3694 	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3695 	if (num_devices == 1)
3696 		allowed |= BTRFS_BLOCK_GROUP_DUP;
3697 	else if (num_devices > 1)
3698 		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3699 	if (num_devices > 2)
3700 		allowed |= BTRFS_BLOCK_GROUP_RAID5;
3701 	if (num_devices > 3)
3702 		allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3703 			    BTRFS_BLOCK_GROUP_RAID6);
3704 	if (validate_convert_profile(&bctl->data, allowed)) {
3705 		btrfs_err(fs_info, "unable to start balance with target "
3706 			   "data profile %llu",
3707 		       bctl->data.target);
3708 		ret = -EINVAL;
3709 		goto out;
3710 	}
3711 	if (validate_convert_profile(&bctl->meta, allowed)) {
3712 		btrfs_err(fs_info,
3713 			   "unable to start balance with target metadata profile %llu",
3714 		       bctl->meta.target);
3715 		ret = -EINVAL;
3716 		goto out;
3717 	}
3718 	if (validate_convert_profile(&bctl->sys, allowed)) {
3719 		btrfs_err(fs_info,
3720 			   "unable to start balance with target system profile %llu",
3721 		       bctl->sys.target);
3722 		ret = -EINVAL;
3723 		goto out;
3724 	}
3725 
3726 	/* allow dup'ed data chunks only in mixed mode */
3727 	if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3728 	    (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
3729 		btrfs_err(fs_info, "dup for data is not allowed");
3730 		ret = -EINVAL;
3731 		goto out;
3732 	}
3733 
3734 	/* allow to reduce meta or sys integrity only if force set */
3735 	allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3736 			BTRFS_BLOCK_GROUP_RAID10 |
3737 			BTRFS_BLOCK_GROUP_RAID5 |
3738 			BTRFS_BLOCK_GROUP_RAID6;
3739 	do {
3740 		seq = read_seqbegin(&fs_info->profiles_lock);
3741 
3742 		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3743 		     (fs_info->avail_system_alloc_bits & allowed) &&
3744 		     !(bctl->sys.target & allowed)) ||
3745 		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3746 		     (fs_info->avail_metadata_alloc_bits & allowed) &&
3747 		     !(bctl->meta.target & allowed))) {
3748 			if (bctl->flags & BTRFS_BALANCE_FORCE) {
3749 				btrfs_info(fs_info, "force reducing metadata integrity");
3750 			} else {
3751 				btrfs_err(fs_info, "balance will reduce metadata "
3752 					   "integrity, use force if you want this");
3753 				ret = -EINVAL;
3754 				goto out;
3755 			}
3756 		}
3757 	} while (read_seqretry(&fs_info->profiles_lock, seq));
3758 
3759 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3760 		fs_info->num_tolerated_disk_barrier_failures = min(
3761 			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info),
3762 			btrfs_get_num_tolerated_disk_barrier_failures(
3763 				bctl->sys.target));
3764 	}
3765 
3766 	ret = insert_balance_item(fs_info->tree_root, bctl);
3767 	if (ret && ret != -EEXIST)
3768 		goto out;
3769 
3770 	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3771 		BUG_ON(ret == -EEXIST);
3772 		set_balance_control(bctl);
3773 	} else {
3774 		BUG_ON(ret != -EEXIST);
3775 		spin_lock(&fs_info->balance_lock);
3776 		update_balance_args(bctl);
3777 		spin_unlock(&fs_info->balance_lock);
3778 	}
3779 
3780 	atomic_inc(&fs_info->balance_running);
3781 	mutex_unlock(&fs_info->balance_mutex);
3782 
3783 	ret = __btrfs_balance(fs_info);
3784 
3785 	mutex_lock(&fs_info->balance_mutex);
3786 	atomic_dec(&fs_info->balance_running);
3787 
3788 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3789 		fs_info->num_tolerated_disk_barrier_failures =
3790 			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3791 	}
3792 
3793 	if (bargs) {
3794 		memset(bargs, 0, sizeof(*bargs));
3795 		update_ioctl_balance_args(fs_info, 0, bargs);
3796 	}
3797 
3798 	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3799 	    balance_need_close(fs_info)) {
3800 		__cancel_balance(fs_info);
3801 	}
3802 
3803 	wake_up(&fs_info->balance_wait_q);
3804 
3805 	return ret;
3806 out:
3807 	if (bctl->flags & BTRFS_BALANCE_RESUME)
3808 		__cancel_balance(fs_info);
3809 	else {
3810 		kfree(bctl);
3811 		atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3812 	}
3813 	return ret;
3814 }
3815 
3816 static int balance_kthread(void *data)
3817 {
3818 	struct btrfs_fs_info *fs_info = data;
3819 	int ret = 0;
3820 
3821 	mutex_lock(&fs_info->volume_mutex);
3822 	mutex_lock(&fs_info->balance_mutex);
3823 
3824 	if (fs_info->balance_ctl) {
3825 		btrfs_info(fs_info, "continuing balance");
3826 		ret = btrfs_balance(fs_info->balance_ctl, NULL);
3827 	}
3828 
3829 	mutex_unlock(&fs_info->balance_mutex);
3830 	mutex_unlock(&fs_info->volume_mutex);
3831 
3832 	return ret;
3833 }
3834 
3835 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3836 {
3837 	struct task_struct *tsk;
3838 
3839 	spin_lock(&fs_info->balance_lock);
3840 	if (!fs_info->balance_ctl) {
3841 		spin_unlock(&fs_info->balance_lock);
3842 		return 0;
3843 	}
3844 	spin_unlock(&fs_info->balance_lock);
3845 
3846 	if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3847 		btrfs_info(fs_info, "force skipping balance");
3848 		return 0;
3849 	}
3850 
3851 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3852 	return PTR_ERR_OR_ZERO(tsk);
3853 }
3854 
3855 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3856 {
3857 	struct btrfs_balance_control *bctl;
3858 	struct btrfs_balance_item *item;
3859 	struct btrfs_disk_balance_args disk_bargs;
3860 	struct btrfs_path *path;
3861 	struct extent_buffer *leaf;
3862 	struct btrfs_key key;
3863 	int ret;
3864 
3865 	path = btrfs_alloc_path();
3866 	if (!path)
3867 		return -ENOMEM;
3868 
3869 	key.objectid = BTRFS_BALANCE_OBJECTID;
3870 	key.type = BTRFS_BALANCE_ITEM_KEY;
3871 	key.offset = 0;
3872 
3873 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3874 	if (ret < 0)
3875 		goto out;
3876 	if (ret > 0) { /* ret = -ENOENT; */
3877 		ret = 0;
3878 		goto out;
3879 	}
3880 
3881 	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3882 	if (!bctl) {
3883 		ret = -ENOMEM;
3884 		goto out;
3885 	}
3886 
3887 	leaf = path->nodes[0];
3888 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3889 
3890 	bctl->fs_info = fs_info;
3891 	bctl->flags = btrfs_balance_flags(leaf, item);
3892 	bctl->flags |= BTRFS_BALANCE_RESUME;
3893 
3894 	btrfs_balance_data(leaf, item, &disk_bargs);
3895 	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3896 	btrfs_balance_meta(leaf, item, &disk_bargs);
3897 	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3898 	btrfs_balance_sys(leaf, item, &disk_bargs);
3899 	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3900 
3901 	WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3902 
3903 	mutex_lock(&fs_info->volume_mutex);
3904 	mutex_lock(&fs_info->balance_mutex);
3905 
3906 	set_balance_control(bctl);
3907 
3908 	mutex_unlock(&fs_info->balance_mutex);
3909 	mutex_unlock(&fs_info->volume_mutex);
3910 out:
3911 	btrfs_free_path(path);
3912 	return ret;
3913 }
3914 
3915 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3916 {
3917 	int ret = 0;
3918 
3919 	mutex_lock(&fs_info->balance_mutex);
3920 	if (!fs_info->balance_ctl) {
3921 		mutex_unlock(&fs_info->balance_mutex);
3922 		return -ENOTCONN;
3923 	}
3924 
3925 	if (atomic_read(&fs_info->balance_running)) {
3926 		atomic_inc(&fs_info->balance_pause_req);
3927 		mutex_unlock(&fs_info->balance_mutex);
3928 
3929 		wait_event(fs_info->balance_wait_q,
3930 			   atomic_read(&fs_info->balance_running) == 0);
3931 
3932 		mutex_lock(&fs_info->balance_mutex);
3933 		/* we are good with balance_ctl ripped off from under us */
3934 		BUG_ON(atomic_read(&fs_info->balance_running));
3935 		atomic_dec(&fs_info->balance_pause_req);
3936 	} else {
3937 		ret = -ENOTCONN;
3938 	}
3939 
3940 	mutex_unlock(&fs_info->balance_mutex);
3941 	return ret;
3942 }
3943 
3944 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3945 {
3946 	if (fs_info->sb->s_flags & MS_RDONLY)
3947 		return -EROFS;
3948 
3949 	mutex_lock(&fs_info->balance_mutex);
3950 	if (!fs_info->balance_ctl) {
3951 		mutex_unlock(&fs_info->balance_mutex);
3952 		return -ENOTCONN;
3953 	}
3954 
3955 	atomic_inc(&fs_info->balance_cancel_req);
3956 	/*
3957 	 * if we are running just wait and return, balance item is
3958 	 * deleted in btrfs_balance in this case
3959 	 */
3960 	if (atomic_read(&fs_info->balance_running)) {
3961 		mutex_unlock(&fs_info->balance_mutex);
3962 		wait_event(fs_info->balance_wait_q,
3963 			   atomic_read(&fs_info->balance_running) == 0);
3964 		mutex_lock(&fs_info->balance_mutex);
3965 	} else {
3966 		/* __cancel_balance needs volume_mutex */
3967 		mutex_unlock(&fs_info->balance_mutex);
3968 		mutex_lock(&fs_info->volume_mutex);
3969 		mutex_lock(&fs_info->balance_mutex);
3970 
3971 		if (fs_info->balance_ctl)
3972 			__cancel_balance(fs_info);
3973 
3974 		mutex_unlock(&fs_info->volume_mutex);
3975 	}
3976 
3977 	BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3978 	atomic_dec(&fs_info->balance_cancel_req);
3979 	mutex_unlock(&fs_info->balance_mutex);
3980 	return 0;
3981 }
3982 
3983 static int btrfs_uuid_scan_kthread(void *data)
3984 {
3985 	struct btrfs_fs_info *fs_info = data;
3986 	struct btrfs_root *root = fs_info->tree_root;
3987 	struct btrfs_key key;
3988 	struct btrfs_key max_key;
3989 	struct btrfs_path *path = NULL;
3990 	int ret = 0;
3991 	struct extent_buffer *eb;
3992 	int slot;
3993 	struct btrfs_root_item root_item;
3994 	u32 item_size;
3995 	struct btrfs_trans_handle *trans = NULL;
3996 
3997 	path = btrfs_alloc_path();
3998 	if (!path) {
3999 		ret = -ENOMEM;
4000 		goto out;
4001 	}
4002 
4003 	key.objectid = 0;
4004 	key.type = BTRFS_ROOT_ITEM_KEY;
4005 	key.offset = 0;
4006 
4007 	max_key.objectid = (u64)-1;
4008 	max_key.type = BTRFS_ROOT_ITEM_KEY;
4009 	max_key.offset = (u64)-1;
4010 
4011 	while (1) {
4012 		ret = btrfs_search_forward(root, &key, path, 0);
4013 		if (ret) {
4014 			if (ret > 0)
4015 				ret = 0;
4016 			break;
4017 		}
4018 
4019 		if (key.type != BTRFS_ROOT_ITEM_KEY ||
4020 		    (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4021 		     key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4022 		    key.objectid > BTRFS_LAST_FREE_OBJECTID)
4023 			goto skip;
4024 
4025 		eb = path->nodes[0];
4026 		slot = path->slots[0];
4027 		item_size = btrfs_item_size_nr(eb, slot);
4028 		if (item_size < sizeof(root_item))
4029 			goto skip;
4030 
4031 		read_extent_buffer(eb, &root_item,
4032 				   btrfs_item_ptr_offset(eb, slot),
4033 				   (int)sizeof(root_item));
4034 		if (btrfs_root_refs(&root_item) == 0)
4035 			goto skip;
4036 
4037 		if (!btrfs_is_empty_uuid(root_item.uuid) ||
4038 		    !btrfs_is_empty_uuid(root_item.received_uuid)) {
4039 			if (trans)
4040 				goto update_tree;
4041 
4042 			btrfs_release_path(path);
4043 			/*
4044 			 * 1 - subvol uuid item
4045 			 * 1 - received_subvol uuid item
4046 			 */
4047 			trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4048 			if (IS_ERR(trans)) {
4049 				ret = PTR_ERR(trans);
4050 				break;
4051 			}
4052 			continue;
4053 		} else {
4054 			goto skip;
4055 		}
4056 update_tree:
4057 		if (!btrfs_is_empty_uuid(root_item.uuid)) {
4058 			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
4059 						  root_item.uuid,
4060 						  BTRFS_UUID_KEY_SUBVOL,
4061 						  key.objectid);
4062 			if (ret < 0) {
4063 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4064 					ret);
4065 				break;
4066 			}
4067 		}
4068 
4069 		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4070 			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
4071 						  root_item.received_uuid,
4072 						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4073 						  key.objectid);
4074 			if (ret < 0) {
4075 				btrfs_warn(fs_info, "uuid_tree_add failed %d",
4076 					ret);
4077 				break;
4078 			}
4079 		}
4080 
4081 skip:
4082 		if (trans) {
4083 			ret = btrfs_end_transaction(trans, fs_info->uuid_root);
4084 			trans = NULL;
4085 			if (ret)
4086 				break;
4087 		}
4088 
4089 		btrfs_release_path(path);
4090 		if (key.offset < (u64)-1) {
4091 			key.offset++;
4092 		} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4093 			key.offset = 0;
4094 			key.type = BTRFS_ROOT_ITEM_KEY;
4095 		} else if (key.objectid < (u64)-1) {
4096 			key.offset = 0;
4097 			key.type = BTRFS_ROOT_ITEM_KEY;
4098 			key.objectid++;
4099 		} else {
4100 			break;
4101 		}
4102 		cond_resched();
4103 	}
4104 
4105 out:
4106 	btrfs_free_path(path);
4107 	if (trans && !IS_ERR(trans))
4108 		btrfs_end_transaction(trans, fs_info->uuid_root);
4109 	if (ret)
4110 		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4111 	else
4112 		fs_info->update_uuid_tree_gen = 1;
4113 	up(&fs_info->uuid_tree_rescan_sem);
4114 	return 0;
4115 }
4116 
4117 /*
4118  * Callback for btrfs_uuid_tree_iterate().
4119  * returns:
4120  * 0	check succeeded, the entry is not outdated.
4121  * < 0	if an error occured.
4122  * > 0	if the check failed, which means the caller shall remove the entry.
4123  */
4124 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
4125 				       u8 *uuid, u8 type, u64 subid)
4126 {
4127 	struct btrfs_key key;
4128 	int ret = 0;
4129 	struct btrfs_root *subvol_root;
4130 
4131 	if (type != BTRFS_UUID_KEY_SUBVOL &&
4132 	    type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
4133 		goto out;
4134 
4135 	key.objectid = subid;
4136 	key.type = BTRFS_ROOT_ITEM_KEY;
4137 	key.offset = (u64)-1;
4138 	subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
4139 	if (IS_ERR(subvol_root)) {
4140 		ret = PTR_ERR(subvol_root);
4141 		if (ret == -ENOENT)
4142 			ret = 1;
4143 		goto out;
4144 	}
4145 
4146 	switch (type) {
4147 	case BTRFS_UUID_KEY_SUBVOL:
4148 		if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
4149 			ret = 1;
4150 		break;
4151 	case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
4152 		if (memcmp(uuid, subvol_root->root_item.received_uuid,
4153 			   BTRFS_UUID_SIZE))
4154 			ret = 1;
4155 		break;
4156 	}
4157 
4158 out:
4159 	return ret;
4160 }
4161 
4162 static int btrfs_uuid_rescan_kthread(void *data)
4163 {
4164 	struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
4165 	int ret;
4166 
4167 	/*
4168 	 * 1st step is to iterate through the existing UUID tree and
4169 	 * to delete all entries that contain outdated data.
4170 	 * 2nd step is to add all missing entries to the UUID tree.
4171 	 */
4172 	ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
4173 	if (ret < 0) {
4174 		btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
4175 		up(&fs_info->uuid_tree_rescan_sem);
4176 		return ret;
4177 	}
4178 	return btrfs_uuid_scan_kthread(data);
4179 }
4180 
4181 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4182 {
4183 	struct btrfs_trans_handle *trans;
4184 	struct btrfs_root *tree_root = fs_info->tree_root;
4185 	struct btrfs_root *uuid_root;
4186 	struct task_struct *task;
4187 	int ret;
4188 
4189 	/*
4190 	 * 1 - root node
4191 	 * 1 - root item
4192 	 */
4193 	trans = btrfs_start_transaction(tree_root, 2);
4194 	if (IS_ERR(trans))
4195 		return PTR_ERR(trans);
4196 
4197 	uuid_root = btrfs_create_tree(trans, fs_info,
4198 				      BTRFS_UUID_TREE_OBJECTID);
4199 	if (IS_ERR(uuid_root)) {
4200 		ret = PTR_ERR(uuid_root);
4201 		btrfs_abort_transaction(trans, tree_root, ret);
4202 		return ret;
4203 	}
4204 
4205 	fs_info->uuid_root = uuid_root;
4206 
4207 	ret = btrfs_commit_transaction(trans, tree_root);
4208 	if (ret)
4209 		return ret;
4210 
4211 	down(&fs_info->uuid_tree_rescan_sem);
4212 	task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4213 	if (IS_ERR(task)) {
4214 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4215 		btrfs_warn(fs_info, "failed to start uuid_scan task");
4216 		up(&fs_info->uuid_tree_rescan_sem);
4217 		return PTR_ERR(task);
4218 	}
4219 
4220 	return 0;
4221 }
4222 
4223 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
4224 {
4225 	struct task_struct *task;
4226 
4227 	down(&fs_info->uuid_tree_rescan_sem);
4228 	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
4229 	if (IS_ERR(task)) {
4230 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
4231 		btrfs_warn(fs_info, "failed to start uuid_rescan task");
4232 		up(&fs_info->uuid_tree_rescan_sem);
4233 		return PTR_ERR(task);
4234 	}
4235 
4236 	return 0;
4237 }
4238 
4239 /*
4240  * shrinking a device means finding all of the device extents past
4241  * the new size, and then following the back refs to the chunks.
4242  * The chunk relocation code actually frees the device extent
4243  */
4244 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4245 {
4246 	struct btrfs_trans_handle *trans;
4247 	struct btrfs_root *root = device->dev_root;
4248 	struct btrfs_dev_extent *dev_extent = NULL;
4249 	struct btrfs_path *path;
4250 	u64 length;
4251 	u64 chunk_offset;
4252 	int ret;
4253 	int slot;
4254 	int failed = 0;
4255 	bool retried = false;
4256 	bool checked_pending_chunks = false;
4257 	struct extent_buffer *l;
4258 	struct btrfs_key key;
4259 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4260 	u64 old_total = btrfs_super_total_bytes(super_copy);
4261 	u64 old_size = btrfs_device_get_total_bytes(device);
4262 	u64 diff = old_size - new_size;
4263 
4264 	if (device->is_tgtdev_for_dev_replace)
4265 		return -EINVAL;
4266 
4267 	path = btrfs_alloc_path();
4268 	if (!path)
4269 		return -ENOMEM;
4270 
4271 	path->reada = 2;
4272 
4273 	lock_chunks(root);
4274 
4275 	btrfs_device_set_total_bytes(device, new_size);
4276 	if (device->writeable) {
4277 		device->fs_devices->total_rw_bytes -= diff;
4278 		spin_lock(&root->fs_info->free_chunk_lock);
4279 		root->fs_info->free_chunk_space -= diff;
4280 		spin_unlock(&root->fs_info->free_chunk_lock);
4281 	}
4282 	unlock_chunks(root);
4283 
4284 again:
4285 	key.objectid = device->devid;
4286 	key.offset = (u64)-1;
4287 	key.type = BTRFS_DEV_EXTENT_KEY;
4288 
4289 	do {
4290 		mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
4291 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4292 		if (ret < 0) {
4293 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4294 			goto done;
4295 		}
4296 
4297 		ret = btrfs_previous_item(root, path, 0, key.type);
4298 		if (ret)
4299 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4300 		if (ret < 0)
4301 			goto done;
4302 		if (ret) {
4303 			ret = 0;
4304 			btrfs_release_path(path);
4305 			break;
4306 		}
4307 
4308 		l = path->nodes[0];
4309 		slot = path->slots[0];
4310 		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4311 
4312 		if (key.objectid != device->devid) {
4313 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4314 			btrfs_release_path(path);
4315 			break;
4316 		}
4317 
4318 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4319 		length = btrfs_dev_extent_length(l, dev_extent);
4320 
4321 		if (key.offset + length <= new_size) {
4322 			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4323 			btrfs_release_path(path);
4324 			break;
4325 		}
4326 
4327 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4328 		btrfs_release_path(path);
4329 
4330 		ret = btrfs_relocate_chunk(root, chunk_offset);
4331 		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4332 		if (ret && ret != -ENOSPC)
4333 			goto done;
4334 		if (ret == -ENOSPC)
4335 			failed++;
4336 	} while (key.offset-- > 0);
4337 
4338 	if (failed && !retried) {
4339 		failed = 0;
4340 		retried = true;
4341 		goto again;
4342 	} else if (failed && retried) {
4343 		ret = -ENOSPC;
4344 		goto done;
4345 	}
4346 
4347 	/* Shrinking succeeded, else we would be at "done". */
4348 	trans = btrfs_start_transaction(root, 0);
4349 	if (IS_ERR(trans)) {
4350 		ret = PTR_ERR(trans);
4351 		goto done;
4352 	}
4353 
4354 	lock_chunks(root);
4355 
4356 	/*
4357 	 * We checked in the above loop all device extents that were already in
4358 	 * the device tree. However before we have updated the device's
4359 	 * total_bytes to the new size, we might have had chunk allocations that
4360 	 * have not complete yet (new block groups attached to transaction
4361 	 * handles), and therefore their device extents were not yet in the
4362 	 * device tree and we missed them in the loop above. So if we have any
4363 	 * pending chunk using a device extent that overlaps the device range
4364 	 * that we can not use anymore, commit the current transaction and
4365 	 * repeat the search on the device tree - this way we guarantee we will
4366 	 * not have chunks using device extents that end beyond 'new_size'.
4367 	 */
4368 	if (!checked_pending_chunks) {
4369 		u64 start = new_size;
4370 		u64 len = old_size - new_size;
4371 
4372 		if (contains_pending_extent(trans->transaction, device,
4373 					    &start, len)) {
4374 			unlock_chunks(root);
4375 			checked_pending_chunks = true;
4376 			failed = 0;
4377 			retried = false;
4378 			ret = btrfs_commit_transaction(trans, root);
4379 			if (ret)
4380 				goto done;
4381 			goto again;
4382 		}
4383 	}
4384 
4385 	btrfs_device_set_disk_total_bytes(device, new_size);
4386 	if (list_empty(&device->resized_list))
4387 		list_add_tail(&device->resized_list,
4388 			      &root->fs_info->fs_devices->resized_devices);
4389 
4390 	WARN_ON(diff > old_total);
4391 	btrfs_set_super_total_bytes(super_copy, old_total - diff);
4392 	unlock_chunks(root);
4393 
4394 	/* Now btrfs_update_device() will change the on-disk size. */
4395 	ret = btrfs_update_device(trans, device);
4396 	btrfs_end_transaction(trans, root);
4397 done:
4398 	btrfs_free_path(path);
4399 	if (ret) {
4400 		lock_chunks(root);
4401 		btrfs_device_set_total_bytes(device, old_size);
4402 		if (device->writeable)
4403 			device->fs_devices->total_rw_bytes += diff;
4404 		spin_lock(&root->fs_info->free_chunk_lock);
4405 		root->fs_info->free_chunk_space += diff;
4406 		spin_unlock(&root->fs_info->free_chunk_lock);
4407 		unlock_chunks(root);
4408 	}
4409 	return ret;
4410 }
4411 
4412 static int btrfs_add_system_chunk(struct btrfs_root *root,
4413 			   struct btrfs_key *key,
4414 			   struct btrfs_chunk *chunk, int item_size)
4415 {
4416 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4417 	struct btrfs_disk_key disk_key;
4418 	u32 array_size;
4419 	u8 *ptr;
4420 
4421 	lock_chunks(root);
4422 	array_size = btrfs_super_sys_array_size(super_copy);
4423 	if (array_size + item_size + sizeof(disk_key)
4424 			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4425 		unlock_chunks(root);
4426 		return -EFBIG;
4427 	}
4428 
4429 	ptr = super_copy->sys_chunk_array + array_size;
4430 	btrfs_cpu_key_to_disk(&disk_key, key);
4431 	memcpy(ptr, &disk_key, sizeof(disk_key));
4432 	ptr += sizeof(disk_key);
4433 	memcpy(ptr, chunk, item_size);
4434 	item_size += sizeof(disk_key);
4435 	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4436 	unlock_chunks(root);
4437 
4438 	return 0;
4439 }
4440 
4441 /*
4442  * sort the devices in descending order by max_avail, total_avail
4443  */
4444 static int btrfs_cmp_device_info(const void *a, const void *b)
4445 {
4446 	const struct btrfs_device_info *di_a = a;
4447 	const struct btrfs_device_info *di_b = b;
4448 
4449 	if (di_a->max_avail > di_b->max_avail)
4450 		return -1;
4451 	if (di_a->max_avail < di_b->max_avail)
4452 		return 1;
4453 	if (di_a->total_avail > di_b->total_avail)
4454 		return -1;
4455 	if (di_a->total_avail < di_b->total_avail)
4456 		return 1;
4457 	return 0;
4458 }
4459 
4460 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
4461 {
4462 	/* TODO allow them to set a preferred stripe size */
4463 	return 64 * 1024;
4464 }
4465 
4466 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4467 {
4468 	if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4469 		return;
4470 
4471 	btrfs_set_fs_incompat(info, RAID56);
4472 }
4473 
4474 #define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r)		\
4475 			- sizeof(struct btrfs_item)		\
4476 			- sizeof(struct btrfs_chunk))		\
4477 			/ sizeof(struct btrfs_stripe) + 1)
4478 
4479 #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE	\
4480 				- 2 * sizeof(struct btrfs_disk_key)	\
4481 				- 2 * sizeof(struct btrfs_chunk))	\
4482 				/ sizeof(struct btrfs_stripe) + 1)
4483 
4484 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4485 			       struct btrfs_root *extent_root, u64 start,
4486 			       u64 type)
4487 {
4488 	struct btrfs_fs_info *info = extent_root->fs_info;
4489 	struct btrfs_fs_devices *fs_devices = info->fs_devices;
4490 	struct list_head *cur;
4491 	struct map_lookup *map = NULL;
4492 	struct extent_map_tree *em_tree;
4493 	struct extent_map *em;
4494 	struct btrfs_device_info *devices_info = NULL;
4495 	u64 total_avail;
4496 	int num_stripes;	/* total number of stripes to allocate */
4497 	int data_stripes;	/* number of stripes that count for
4498 				   block group size */
4499 	int sub_stripes;	/* sub_stripes info for map */
4500 	int dev_stripes;	/* stripes per dev */
4501 	int devs_max;		/* max devs to use */
4502 	int devs_min;		/* min devs needed */
4503 	int devs_increment;	/* ndevs has to be a multiple of this */
4504 	int ncopies;		/* how many copies to data has */
4505 	int ret;
4506 	u64 max_stripe_size;
4507 	u64 max_chunk_size;
4508 	u64 stripe_size;
4509 	u64 num_bytes;
4510 	u64 raid_stripe_len = BTRFS_STRIPE_LEN;
4511 	int ndevs;
4512 	int i;
4513 	int j;
4514 	int index;
4515 
4516 	BUG_ON(!alloc_profile_is_valid(type, 0));
4517 
4518 	if (list_empty(&fs_devices->alloc_list))
4519 		return -ENOSPC;
4520 
4521 	index = __get_raid_index(type);
4522 
4523 	sub_stripes = btrfs_raid_array[index].sub_stripes;
4524 	dev_stripes = btrfs_raid_array[index].dev_stripes;
4525 	devs_max = btrfs_raid_array[index].devs_max;
4526 	devs_min = btrfs_raid_array[index].devs_min;
4527 	devs_increment = btrfs_raid_array[index].devs_increment;
4528 	ncopies = btrfs_raid_array[index].ncopies;
4529 
4530 	if (type & BTRFS_BLOCK_GROUP_DATA) {
4531 		max_stripe_size = 1024 * 1024 * 1024;
4532 		max_chunk_size = 10 * max_stripe_size;
4533 		if (!devs_max)
4534 			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4535 	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4536 		/* for larger filesystems, use larger metadata chunks */
4537 		if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
4538 			max_stripe_size = 1024 * 1024 * 1024;
4539 		else
4540 			max_stripe_size = 256 * 1024 * 1024;
4541 		max_chunk_size = max_stripe_size;
4542 		if (!devs_max)
4543 			devs_max = BTRFS_MAX_DEVS(info->chunk_root);
4544 	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4545 		max_stripe_size = 32 * 1024 * 1024;
4546 		max_chunk_size = 2 * max_stripe_size;
4547 		if (!devs_max)
4548 			devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
4549 	} else {
4550 		btrfs_err(info, "invalid chunk type 0x%llx requested",
4551 		       type);
4552 		BUG_ON(1);
4553 	}
4554 
4555 	/* we don't want a chunk larger than 10% of writeable space */
4556 	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4557 			     max_chunk_size);
4558 
4559 	devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
4560 			       GFP_NOFS);
4561 	if (!devices_info)
4562 		return -ENOMEM;
4563 
4564 	cur = fs_devices->alloc_list.next;
4565 
4566 	/*
4567 	 * in the first pass through the devices list, we gather information
4568 	 * about the available holes on each device.
4569 	 */
4570 	ndevs = 0;
4571 	while (cur != &fs_devices->alloc_list) {
4572 		struct btrfs_device *device;
4573 		u64 max_avail;
4574 		u64 dev_offset;
4575 
4576 		device = list_entry(cur, struct btrfs_device, dev_alloc_list);
4577 
4578 		cur = cur->next;
4579 
4580 		if (!device->writeable) {
4581 			WARN(1, KERN_ERR
4582 			       "BTRFS: read-only device in alloc_list\n");
4583 			continue;
4584 		}
4585 
4586 		if (!device->in_fs_metadata ||
4587 		    device->is_tgtdev_for_dev_replace)
4588 			continue;
4589 
4590 		if (device->total_bytes > device->bytes_used)
4591 			total_avail = device->total_bytes - device->bytes_used;
4592 		else
4593 			total_avail = 0;
4594 
4595 		/* If there is no space on this device, skip it. */
4596 		if (total_avail == 0)
4597 			continue;
4598 
4599 		ret = find_free_dev_extent(trans, device,
4600 					   max_stripe_size * dev_stripes,
4601 					   &dev_offset, &max_avail);
4602 		if (ret && ret != -ENOSPC)
4603 			goto error;
4604 
4605 		if (ret == 0)
4606 			max_avail = max_stripe_size * dev_stripes;
4607 
4608 		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
4609 			continue;
4610 
4611 		if (ndevs == fs_devices->rw_devices) {
4612 			WARN(1, "%s: found more than %llu devices\n",
4613 			     __func__, fs_devices->rw_devices);
4614 			break;
4615 		}
4616 		devices_info[ndevs].dev_offset = dev_offset;
4617 		devices_info[ndevs].max_avail = max_avail;
4618 		devices_info[ndevs].total_avail = total_avail;
4619 		devices_info[ndevs].dev = device;
4620 		++ndevs;
4621 	}
4622 
4623 	/*
4624 	 * now sort the devices by hole size / available space
4625 	 */
4626 	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4627 	     btrfs_cmp_device_info, NULL);
4628 
4629 	/* round down to number of usable stripes */
4630 	ndevs -= ndevs % devs_increment;
4631 
4632 	if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
4633 		ret = -ENOSPC;
4634 		goto error;
4635 	}
4636 
4637 	if (devs_max && ndevs > devs_max)
4638 		ndevs = devs_max;
4639 	/*
4640 	 * the primary goal is to maximize the number of stripes, so use as many
4641 	 * devices as possible, even if the stripes are not maximum sized.
4642 	 */
4643 	stripe_size = devices_info[ndevs-1].max_avail;
4644 	num_stripes = ndevs * dev_stripes;
4645 
4646 	/*
4647 	 * this will have to be fixed for RAID1 and RAID10 over
4648 	 * more drives
4649 	 */
4650 	data_stripes = num_stripes / ncopies;
4651 
4652 	if (type & BTRFS_BLOCK_GROUP_RAID5) {
4653 		raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
4654 				 btrfs_super_stripesize(info->super_copy));
4655 		data_stripes = num_stripes - 1;
4656 	}
4657 	if (type & BTRFS_BLOCK_GROUP_RAID6) {
4658 		raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
4659 				 btrfs_super_stripesize(info->super_copy));
4660 		data_stripes = num_stripes - 2;
4661 	}
4662 
4663 	/*
4664 	 * Use the number of data stripes to figure out how big this chunk
4665 	 * is really going to be in terms of logical address space,
4666 	 * and compare that answer with the max chunk size
4667 	 */
4668 	if (stripe_size * data_stripes > max_chunk_size) {
4669 		u64 mask = (1ULL << 24) - 1;
4670 
4671 		stripe_size = div_u64(max_chunk_size, data_stripes);
4672 
4673 		/* bump the answer up to a 16MB boundary */
4674 		stripe_size = (stripe_size + mask) & ~mask;
4675 
4676 		/* but don't go higher than the limits we found
4677 		 * while searching for free extents
4678 		 */
4679 		if (stripe_size > devices_info[ndevs-1].max_avail)
4680 			stripe_size = devices_info[ndevs-1].max_avail;
4681 	}
4682 
4683 	stripe_size = div_u64(stripe_size, dev_stripes);
4684 
4685 	/* align to BTRFS_STRIPE_LEN */
4686 	stripe_size = div_u64(stripe_size, raid_stripe_len);
4687 	stripe_size *= raid_stripe_len;
4688 
4689 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4690 	if (!map) {
4691 		ret = -ENOMEM;
4692 		goto error;
4693 	}
4694 	map->num_stripes = num_stripes;
4695 
4696 	for (i = 0; i < ndevs; ++i) {
4697 		for (j = 0; j < dev_stripes; ++j) {
4698 			int s = i * dev_stripes + j;
4699 			map->stripes[s].dev = devices_info[i].dev;
4700 			map->stripes[s].physical = devices_info[i].dev_offset +
4701 						   j * stripe_size;
4702 		}
4703 	}
4704 	map->sector_size = extent_root->sectorsize;
4705 	map->stripe_len = raid_stripe_len;
4706 	map->io_align = raid_stripe_len;
4707 	map->io_width = raid_stripe_len;
4708 	map->type = type;
4709 	map->sub_stripes = sub_stripes;
4710 
4711 	num_bytes = stripe_size * data_stripes;
4712 
4713 	trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
4714 
4715 	em = alloc_extent_map();
4716 	if (!em) {
4717 		kfree(map);
4718 		ret = -ENOMEM;
4719 		goto error;
4720 	}
4721 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
4722 	em->bdev = (struct block_device *)map;
4723 	em->start = start;
4724 	em->len = num_bytes;
4725 	em->block_start = 0;
4726 	em->block_len = em->len;
4727 	em->orig_block_len = stripe_size;
4728 
4729 	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4730 	write_lock(&em_tree->lock);
4731 	ret = add_extent_mapping(em_tree, em, 0);
4732 	if (!ret) {
4733 		list_add_tail(&em->list, &trans->transaction->pending_chunks);
4734 		atomic_inc(&em->refs);
4735 	}
4736 	write_unlock(&em_tree->lock);
4737 	if (ret) {
4738 		free_extent_map(em);
4739 		goto error;
4740 	}
4741 
4742 	ret = btrfs_make_block_group(trans, extent_root, 0, type,
4743 				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4744 				     start, num_bytes);
4745 	if (ret)
4746 		goto error_del_extent;
4747 
4748 	for (i = 0; i < map->num_stripes; i++) {
4749 		num_bytes = map->stripes[i].dev->bytes_used + stripe_size;
4750 		btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
4751 	}
4752 
4753 	spin_lock(&extent_root->fs_info->free_chunk_lock);
4754 	extent_root->fs_info->free_chunk_space -= (stripe_size *
4755 						   map->num_stripes);
4756 	spin_unlock(&extent_root->fs_info->free_chunk_lock);
4757 
4758 	free_extent_map(em);
4759 	check_raid56_incompat_flag(extent_root->fs_info, type);
4760 
4761 	kfree(devices_info);
4762 	return 0;
4763 
4764 error_del_extent:
4765 	write_lock(&em_tree->lock);
4766 	remove_extent_mapping(em_tree, em);
4767 	write_unlock(&em_tree->lock);
4768 
4769 	/* One for our allocation */
4770 	free_extent_map(em);
4771 	/* One for the tree reference */
4772 	free_extent_map(em);
4773 	/* One for the pending_chunks list reference */
4774 	free_extent_map(em);
4775 error:
4776 	kfree(devices_info);
4777 	return ret;
4778 }
4779 
4780 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4781 				struct btrfs_root *extent_root,
4782 				u64 chunk_offset, u64 chunk_size)
4783 {
4784 	struct btrfs_key key;
4785 	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
4786 	struct btrfs_device *device;
4787 	struct btrfs_chunk *chunk;
4788 	struct btrfs_stripe *stripe;
4789 	struct extent_map_tree *em_tree;
4790 	struct extent_map *em;
4791 	struct map_lookup *map;
4792 	size_t item_size;
4793 	u64 dev_offset;
4794 	u64 stripe_size;
4795 	int i = 0;
4796 	int ret;
4797 
4798 	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4799 	read_lock(&em_tree->lock);
4800 	em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
4801 	read_unlock(&em_tree->lock);
4802 
4803 	if (!em) {
4804 		btrfs_crit(extent_root->fs_info, "unable to find logical "
4805 			   "%Lu len %Lu", chunk_offset, chunk_size);
4806 		return -EINVAL;
4807 	}
4808 
4809 	if (em->start != chunk_offset || em->len != chunk_size) {
4810 		btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
4811 			  " %Lu-%Lu, found %Lu-%Lu", chunk_offset,
4812 			  chunk_size, em->start, em->len);
4813 		free_extent_map(em);
4814 		return -EINVAL;
4815 	}
4816 
4817 	map = (struct map_lookup *)em->bdev;
4818 	item_size = btrfs_chunk_item_size(map->num_stripes);
4819 	stripe_size = em->orig_block_len;
4820 
4821 	chunk = kzalloc(item_size, GFP_NOFS);
4822 	if (!chunk) {
4823 		ret = -ENOMEM;
4824 		goto out;
4825 	}
4826 
4827 	for (i = 0; i < map->num_stripes; i++) {
4828 		device = map->stripes[i].dev;
4829 		dev_offset = map->stripes[i].physical;
4830 
4831 		ret = btrfs_update_device(trans, device);
4832 		if (ret)
4833 			goto out;
4834 		ret = btrfs_alloc_dev_extent(trans, device,
4835 					     chunk_root->root_key.objectid,
4836 					     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4837 					     chunk_offset, dev_offset,
4838 					     stripe_size);
4839 		if (ret)
4840 			goto out;
4841 	}
4842 
4843 	stripe = &chunk->stripe;
4844 	for (i = 0; i < map->num_stripes; i++) {
4845 		device = map->stripes[i].dev;
4846 		dev_offset = map->stripes[i].physical;
4847 
4848 		btrfs_set_stack_stripe_devid(stripe, device->devid);
4849 		btrfs_set_stack_stripe_offset(stripe, dev_offset);
4850 		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4851 		stripe++;
4852 	}
4853 
4854 	btrfs_set_stack_chunk_length(chunk, chunk_size);
4855 	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4856 	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4857 	btrfs_set_stack_chunk_type(chunk, map->type);
4858 	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4859 	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4860 	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4861 	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
4862 	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4863 
4864 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4865 	key.type = BTRFS_CHUNK_ITEM_KEY;
4866 	key.offset = chunk_offset;
4867 
4868 	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4869 	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4870 		/*
4871 		 * TODO: Cleanup of inserted chunk root in case of
4872 		 * failure.
4873 		 */
4874 		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
4875 					     item_size);
4876 	}
4877 
4878 out:
4879 	kfree(chunk);
4880 	free_extent_map(em);
4881 	return ret;
4882 }
4883 
4884 /*
4885  * Chunk allocation falls into two parts. The first part does works
4886  * that make the new allocated chunk useable, but not do any operation
4887  * that modifies the chunk tree. The second part does the works that
4888  * require modifying the chunk tree. This division is important for the
4889  * bootstrap process of adding storage to a seed btrfs.
4890  */
4891 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4892 		      struct btrfs_root *extent_root, u64 type)
4893 {
4894 	u64 chunk_offset;
4895 
4896 	ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
4897 	chunk_offset = find_next_chunk(extent_root->fs_info);
4898 	return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
4899 }
4900 
4901 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
4902 					 struct btrfs_root *root,
4903 					 struct btrfs_device *device)
4904 {
4905 	u64 chunk_offset;
4906 	u64 sys_chunk_offset;
4907 	u64 alloc_profile;
4908 	struct btrfs_fs_info *fs_info = root->fs_info;
4909 	struct btrfs_root *extent_root = fs_info->extent_root;
4910 	int ret;
4911 
4912 	chunk_offset = find_next_chunk(fs_info);
4913 	alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4914 	ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
4915 				  alloc_profile);
4916 	if (ret)
4917 		return ret;
4918 
4919 	sys_chunk_offset = find_next_chunk(root->fs_info);
4920 	alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4921 	ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
4922 				  alloc_profile);
4923 	return ret;
4924 }
4925 
4926 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
4927 {
4928 	int max_errors;
4929 
4930 	if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
4931 			 BTRFS_BLOCK_GROUP_RAID10 |
4932 			 BTRFS_BLOCK_GROUP_RAID5 |
4933 			 BTRFS_BLOCK_GROUP_DUP)) {
4934 		max_errors = 1;
4935 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
4936 		max_errors = 2;
4937 	} else {
4938 		max_errors = 0;
4939 	}
4940 
4941 	return max_errors;
4942 }
4943 
4944 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
4945 {
4946 	struct extent_map *em;
4947 	struct map_lookup *map;
4948 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4949 	int readonly = 0;
4950 	int miss_ndevs = 0;
4951 	int i;
4952 
4953 	read_lock(&map_tree->map_tree.lock);
4954 	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
4955 	read_unlock(&map_tree->map_tree.lock);
4956 	if (!em)
4957 		return 1;
4958 
4959 	map = (struct map_lookup *)em->bdev;
4960 	for (i = 0; i < map->num_stripes; i++) {
4961 		if (map->stripes[i].dev->missing) {
4962 			miss_ndevs++;
4963 			continue;
4964 		}
4965 
4966 		if (!map->stripes[i].dev->writeable) {
4967 			readonly = 1;
4968 			goto end;
4969 		}
4970 	}
4971 
4972 	/*
4973 	 * If the number of missing devices is larger than max errors,
4974 	 * we can not write the data into that chunk successfully, so
4975 	 * set it readonly.
4976 	 */
4977 	if (miss_ndevs > btrfs_chunk_max_errors(map))
4978 		readonly = 1;
4979 end:
4980 	free_extent_map(em);
4981 	return readonly;
4982 }
4983 
4984 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
4985 {
4986 	extent_map_tree_init(&tree->map_tree);
4987 }
4988 
4989 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
4990 {
4991 	struct extent_map *em;
4992 
4993 	while (1) {
4994 		write_lock(&tree->map_tree.lock);
4995 		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
4996 		if (em)
4997 			remove_extent_mapping(&tree->map_tree, em);
4998 		write_unlock(&tree->map_tree.lock);
4999 		if (!em)
5000 			break;
5001 		/* once for us */
5002 		free_extent_map(em);
5003 		/* once for the tree */
5004 		free_extent_map(em);
5005 	}
5006 }
5007 
5008 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5009 {
5010 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5011 	struct extent_map *em;
5012 	struct map_lookup *map;
5013 	struct extent_map_tree *em_tree = &map_tree->map_tree;
5014 	int ret;
5015 
5016 	read_lock(&em_tree->lock);
5017 	em = lookup_extent_mapping(em_tree, logical, len);
5018 	read_unlock(&em_tree->lock);
5019 
5020 	/*
5021 	 * We could return errors for these cases, but that could get ugly and
5022 	 * we'd probably do the same thing which is just not do anything else
5023 	 * and exit, so return 1 so the callers don't try to use other copies.
5024 	 */
5025 	if (!em) {
5026 		btrfs_crit(fs_info, "No mapping for %Lu-%Lu", logical,
5027 			    logical+len);
5028 		return 1;
5029 	}
5030 
5031 	if (em->start > logical || em->start + em->len < logical) {
5032 		btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
5033 			    "%Lu-%Lu", logical, logical+len, em->start,
5034 			    em->start + em->len);
5035 		free_extent_map(em);
5036 		return 1;
5037 	}
5038 
5039 	map = (struct map_lookup *)em->bdev;
5040 	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
5041 		ret = map->num_stripes;
5042 	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5043 		ret = map->sub_stripes;
5044 	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5045 		ret = 2;
5046 	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5047 		ret = 3;
5048 	else
5049 		ret = 1;
5050 	free_extent_map(em);
5051 
5052 	btrfs_dev_replace_lock(&fs_info->dev_replace);
5053 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
5054 		ret++;
5055 	btrfs_dev_replace_unlock(&fs_info->dev_replace);
5056 
5057 	return ret;
5058 }
5059 
5060 unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
5061 				    struct btrfs_mapping_tree *map_tree,
5062 				    u64 logical)
5063 {
5064 	struct extent_map *em;
5065 	struct map_lookup *map;
5066 	struct extent_map_tree *em_tree = &map_tree->map_tree;
5067 	unsigned long len = root->sectorsize;
5068 
5069 	read_lock(&em_tree->lock);
5070 	em = lookup_extent_mapping(em_tree, logical, len);
5071 	read_unlock(&em_tree->lock);
5072 	BUG_ON(!em);
5073 
5074 	BUG_ON(em->start > logical || em->start + em->len < logical);
5075 	map = (struct map_lookup *)em->bdev;
5076 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5077 		len = map->stripe_len * nr_data_stripes(map);
5078 	free_extent_map(em);
5079 	return len;
5080 }
5081 
5082 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
5083 			   u64 logical, u64 len, int mirror_num)
5084 {
5085 	struct extent_map *em;
5086 	struct map_lookup *map;
5087 	struct extent_map_tree *em_tree = &map_tree->map_tree;
5088 	int ret = 0;
5089 
5090 	read_lock(&em_tree->lock);
5091 	em = lookup_extent_mapping(em_tree, logical, len);
5092 	read_unlock(&em_tree->lock);
5093 	BUG_ON(!em);
5094 
5095 	BUG_ON(em->start > logical || em->start + em->len < logical);
5096 	map = (struct map_lookup *)em->bdev;
5097 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5098 		ret = 1;
5099 	free_extent_map(em);
5100 	return ret;
5101 }
5102 
5103 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5104 			    struct map_lookup *map, int first, int num,
5105 			    int optimal, int dev_replace_is_ongoing)
5106 {
5107 	int i;
5108 	int tolerance;
5109 	struct btrfs_device *srcdev;
5110 
5111 	if (dev_replace_is_ongoing &&
5112 	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5113 	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5114 		srcdev = fs_info->dev_replace.srcdev;
5115 	else
5116 		srcdev = NULL;
5117 
5118 	/*
5119 	 * try to avoid the drive that is the source drive for a
5120 	 * dev-replace procedure, only choose it if no other non-missing
5121 	 * mirror is available
5122 	 */
5123 	for (tolerance = 0; tolerance < 2; tolerance++) {
5124 		if (map->stripes[optimal].dev->bdev &&
5125 		    (tolerance || map->stripes[optimal].dev != srcdev))
5126 			return optimal;
5127 		for (i = first; i < first + num; i++) {
5128 			if (map->stripes[i].dev->bdev &&
5129 			    (tolerance || map->stripes[i].dev != srcdev))
5130 				return i;
5131 		}
5132 	}
5133 
5134 	/* we couldn't find one that doesn't fail.  Just return something
5135 	 * and the io error handling code will clean up eventually
5136 	 */
5137 	return optimal;
5138 }
5139 
5140 static inline int parity_smaller(u64 a, u64 b)
5141 {
5142 	return a > b;
5143 }
5144 
5145 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5146 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5147 {
5148 	struct btrfs_bio_stripe s;
5149 	int i;
5150 	u64 l;
5151 	int again = 1;
5152 
5153 	while (again) {
5154 		again = 0;
5155 		for (i = 0; i < num_stripes - 1; i++) {
5156 			if (parity_smaller(bbio->raid_map[i],
5157 					   bbio->raid_map[i+1])) {
5158 				s = bbio->stripes[i];
5159 				l = bbio->raid_map[i];
5160 				bbio->stripes[i] = bbio->stripes[i+1];
5161 				bbio->raid_map[i] = bbio->raid_map[i+1];
5162 				bbio->stripes[i+1] = s;
5163 				bbio->raid_map[i+1] = l;
5164 
5165 				again = 1;
5166 			}
5167 		}
5168 	}
5169 }
5170 
5171 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5172 {
5173 	struct btrfs_bio *bbio = kzalloc(
5174 		 /* the size of the btrfs_bio */
5175 		sizeof(struct btrfs_bio) +
5176 		/* plus the variable array for the stripes */
5177 		sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5178 		/* plus the variable array for the tgt dev */
5179 		sizeof(int) * (real_stripes) +
5180 		/*
5181 		 * plus the raid_map, which includes both the tgt dev
5182 		 * and the stripes
5183 		 */
5184 		sizeof(u64) * (total_stripes),
5185 		GFP_NOFS|__GFP_NOFAIL);
5186 
5187 	atomic_set(&bbio->error, 0);
5188 	atomic_set(&bbio->refs, 1);
5189 
5190 	return bbio;
5191 }
5192 
5193 void btrfs_get_bbio(struct btrfs_bio *bbio)
5194 {
5195 	WARN_ON(!atomic_read(&bbio->refs));
5196 	atomic_inc(&bbio->refs);
5197 }
5198 
5199 void btrfs_put_bbio(struct btrfs_bio *bbio)
5200 {
5201 	if (!bbio)
5202 		return;
5203 	if (atomic_dec_and_test(&bbio->refs))
5204 		kfree(bbio);
5205 }
5206 
5207 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5208 			     u64 logical, u64 *length,
5209 			     struct btrfs_bio **bbio_ret,
5210 			     int mirror_num, int need_raid_map)
5211 {
5212 	struct extent_map *em;
5213 	struct map_lookup *map;
5214 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
5215 	struct extent_map_tree *em_tree = &map_tree->map_tree;
5216 	u64 offset;
5217 	u64 stripe_offset;
5218 	u64 stripe_end_offset;
5219 	u64 stripe_nr;
5220 	u64 stripe_nr_orig;
5221 	u64 stripe_nr_end;
5222 	u64 stripe_len;
5223 	u32 stripe_index;
5224 	int i;
5225 	int ret = 0;
5226 	int num_stripes;
5227 	int max_errors = 0;
5228 	int tgtdev_indexes = 0;
5229 	struct btrfs_bio *bbio = NULL;
5230 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
5231 	int dev_replace_is_ongoing = 0;
5232 	int num_alloc_stripes;
5233 	int patch_the_first_stripe_for_dev_replace = 0;
5234 	u64 physical_to_patch_in_first_stripe = 0;
5235 	u64 raid56_full_stripe_start = (u64)-1;
5236 
5237 	read_lock(&em_tree->lock);
5238 	em = lookup_extent_mapping(em_tree, logical, *length);
5239 	read_unlock(&em_tree->lock);
5240 
5241 	if (!em) {
5242 		btrfs_crit(fs_info, "unable to find logical %llu len %llu",
5243 			logical, *length);
5244 		return -EINVAL;
5245 	}
5246 
5247 	if (em->start > logical || em->start + em->len < logical) {
5248 		btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
5249 			   "found %Lu-%Lu", logical, em->start,
5250 			   em->start + em->len);
5251 		free_extent_map(em);
5252 		return -EINVAL;
5253 	}
5254 
5255 	map = (struct map_lookup *)em->bdev;
5256 	offset = logical - em->start;
5257 
5258 	stripe_len = map->stripe_len;
5259 	stripe_nr = offset;
5260 	/*
5261 	 * stripe_nr counts the total number of stripes we have to stride
5262 	 * to get to this block
5263 	 */
5264 	stripe_nr = div64_u64(stripe_nr, stripe_len);
5265 
5266 	stripe_offset = stripe_nr * stripe_len;
5267 	BUG_ON(offset < stripe_offset);
5268 
5269 	/* stripe_offset is the offset of this block in its stripe*/
5270 	stripe_offset = offset - stripe_offset;
5271 
5272 	/* if we're here for raid56, we need to know the stripe aligned start */
5273 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5274 		unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
5275 		raid56_full_stripe_start = offset;
5276 
5277 		/* allow a write of a full stripe, but make sure we don't
5278 		 * allow straddling of stripes
5279 		 */
5280 		raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
5281 				full_stripe_len);
5282 		raid56_full_stripe_start *= full_stripe_len;
5283 	}
5284 
5285 	if (rw & REQ_DISCARD) {
5286 		/* we don't discard raid56 yet */
5287 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5288 			ret = -EOPNOTSUPP;
5289 			goto out;
5290 		}
5291 		*length = min_t(u64, em->len - offset, *length);
5292 	} else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
5293 		u64 max_len;
5294 		/* For writes to RAID[56], allow a full stripeset across all disks.
5295 		   For other RAID types and for RAID[56] reads, just allow a single
5296 		   stripe (on a single disk). */
5297 		if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
5298 		    (rw & REQ_WRITE)) {
5299 			max_len = stripe_len * nr_data_stripes(map) -
5300 				(offset - raid56_full_stripe_start);
5301 		} else {
5302 			/* we limit the length of each bio to what fits in a stripe */
5303 			max_len = stripe_len - stripe_offset;
5304 		}
5305 		*length = min_t(u64, em->len - offset, max_len);
5306 	} else {
5307 		*length = em->len - offset;
5308 	}
5309 
5310 	/* This is for when we're called from btrfs_merge_bio_hook() and all
5311 	   it cares about is the length */
5312 	if (!bbio_ret)
5313 		goto out;
5314 
5315 	btrfs_dev_replace_lock(dev_replace);
5316 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
5317 	if (!dev_replace_is_ongoing)
5318 		btrfs_dev_replace_unlock(dev_replace);
5319 
5320 	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
5321 	    !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
5322 	    dev_replace->tgtdev != NULL) {
5323 		/*
5324 		 * in dev-replace case, for repair case (that's the only
5325 		 * case where the mirror is selected explicitly when
5326 		 * calling btrfs_map_block), blocks left of the left cursor
5327 		 * can also be read from the target drive.
5328 		 * For REQ_GET_READ_MIRRORS, the target drive is added as
5329 		 * the last one to the array of stripes. For READ, it also
5330 		 * needs to be supported using the same mirror number.
5331 		 * If the requested block is not left of the left cursor,
5332 		 * EIO is returned. This can happen because btrfs_num_copies()
5333 		 * returns one more in the dev-replace case.
5334 		 */
5335 		u64 tmp_length = *length;
5336 		struct btrfs_bio *tmp_bbio = NULL;
5337 		int tmp_num_stripes;
5338 		u64 srcdev_devid = dev_replace->srcdev->devid;
5339 		int index_srcdev = 0;
5340 		int found = 0;
5341 		u64 physical_of_found = 0;
5342 
5343 		ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
5344 			     logical, &tmp_length, &tmp_bbio, 0, 0);
5345 		if (ret) {
5346 			WARN_ON(tmp_bbio != NULL);
5347 			goto out;
5348 		}
5349 
5350 		tmp_num_stripes = tmp_bbio->num_stripes;
5351 		if (mirror_num > tmp_num_stripes) {
5352 			/*
5353 			 * REQ_GET_READ_MIRRORS does not contain this
5354 			 * mirror, that means that the requested area
5355 			 * is not left of the left cursor
5356 			 */
5357 			ret = -EIO;
5358 			btrfs_put_bbio(tmp_bbio);
5359 			goto out;
5360 		}
5361 
5362 		/*
5363 		 * process the rest of the function using the mirror_num
5364 		 * of the source drive. Therefore look it up first.
5365 		 * At the end, patch the device pointer to the one of the
5366 		 * target drive.
5367 		 */
5368 		for (i = 0; i < tmp_num_stripes; i++) {
5369 			if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) {
5370 				/*
5371 				 * In case of DUP, in order to keep it
5372 				 * simple, only add the mirror with the
5373 				 * lowest physical address
5374 				 */
5375 				if (found &&
5376 				    physical_of_found <=
5377 				     tmp_bbio->stripes[i].physical)
5378 					continue;
5379 				index_srcdev = i;
5380 				found = 1;
5381 				physical_of_found =
5382 					tmp_bbio->stripes[i].physical;
5383 			}
5384 		}
5385 
5386 		if (found) {
5387 			mirror_num = index_srcdev + 1;
5388 			patch_the_first_stripe_for_dev_replace = 1;
5389 			physical_to_patch_in_first_stripe = physical_of_found;
5390 		} else {
5391 			WARN_ON(1);
5392 			ret = -EIO;
5393 			btrfs_put_bbio(tmp_bbio);
5394 			goto out;
5395 		}
5396 
5397 		btrfs_put_bbio(tmp_bbio);
5398 	} else if (mirror_num > map->num_stripes) {
5399 		mirror_num = 0;
5400 	}
5401 
5402 	num_stripes = 1;
5403 	stripe_index = 0;
5404 	stripe_nr_orig = stripe_nr;
5405 	stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
5406 	stripe_nr_end = div_u64(stripe_nr_end, map->stripe_len);
5407 	stripe_end_offset = stripe_nr_end * map->stripe_len -
5408 			    (offset + *length);
5409 
5410 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5411 		if (rw & REQ_DISCARD)
5412 			num_stripes = min_t(u64, map->num_stripes,
5413 					    stripe_nr_end - stripe_nr_orig);
5414 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5415 				&stripe_index);
5416 		if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)))
5417 			mirror_num = 1;
5418 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
5419 		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
5420 			num_stripes = map->num_stripes;
5421 		else if (mirror_num)
5422 			stripe_index = mirror_num - 1;
5423 		else {
5424 			stripe_index = find_live_mirror(fs_info, map, 0,
5425 					    map->num_stripes,
5426 					    current->pid % map->num_stripes,
5427 					    dev_replace_is_ongoing);
5428 			mirror_num = stripe_index + 1;
5429 		}
5430 
5431 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
5432 		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
5433 			num_stripes = map->num_stripes;
5434 		} else if (mirror_num) {
5435 			stripe_index = mirror_num - 1;
5436 		} else {
5437 			mirror_num = 1;
5438 		}
5439 
5440 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5441 		u32 factor = map->num_stripes / map->sub_stripes;
5442 
5443 		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5444 		stripe_index *= map->sub_stripes;
5445 
5446 		if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5447 			num_stripes = map->sub_stripes;
5448 		else if (rw & REQ_DISCARD)
5449 			num_stripes = min_t(u64, map->sub_stripes *
5450 					    (stripe_nr_end - stripe_nr_orig),
5451 					    map->num_stripes);
5452 		else if (mirror_num)
5453 			stripe_index += mirror_num - 1;
5454 		else {
5455 			int old_stripe_index = stripe_index;
5456 			stripe_index = find_live_mirror(fs_info, map,
5457 					      stripe_index,
5458 					      map->sub_stripes, stripe_index +
5459 					      current->pid % map->sub_stripes,
5460 					      dev_replace_is_ongoing);
5461 			mirror_num = stripe_index - old_stripe_index + 1;
5462 		}
5463 
5464 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5465 		if (need_raid_map &&
5466 		    ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
5467 		     mirror_num > 1)) {
5468 			/* push stripe_nr back to the start of the full stripe */
5469 			stripe_nr = div_u64(raid56_full_stripe_start,
5470 					stripe_len * nr_data_stripes(map));
5471 
5472 			/* RAID[56] write or recovery. Return all stripes */
5473 			num_stripes = map->num_stripes;
5474 			max_errors = nr_parity_stripes(map);
5475 
5476 			*length = map->stripe_len;
5477 			stripe_index = 0;
5478 			stripe_offset = 0;
5479 		} else {
5480 			/*
5481 			 * Mirror #0 or #1 means the original data block.
5482 			 * Mirror #2 is RAID5 parity block.
5483 			 * Mirror #3 is RAID6 Q block.
5484 			 */
5485 			stripe_nr = div_u64_rem(stripe_nr,
5486 					nr_data_stripes(map), &stripe_index);
5487 			if (mirror_num > 1)
5488 				stripe_index = nr_data_stripes(map) +
5489 						mirror_num - 2;
5490 
5491 			/* We distribute the parity blocks across stripes */
5492 			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
5493 					&stripe_index);
5494 			if (!(rw & (REQ_WRITE | REQ_DISCARD |
5495 				    REQ_GET_READ_MIRRORS)) && mirror_num <= 1)
5496 				mirror_num = 1;
5497 		}
5498 	} else {
5499 		/*
5500 		 * after this, stripe_nr is the number of stripes on this
5501 		 * device we have to walk to find the data, and stripe_index is
5502 		 * the number of our device in the stripe array
5503 		 */
5504 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5505 				&stripe_index);
5506 		mirror_num = stripe_index + 1;
5507 	}
5508 	BUG_ON(stripe_index >= map->num_stripes);
5509 
5510 	num_alloc_stripes = num_stripes;
5511 	if (dev_replace_is_ongoing) {
5512 		if (rw & (REQ_WRITE | REQ_DISCARD))
5513 			num_alloc_stripes <<= 1;
5514 		if (rw & REQ_GET_READ_MIRRORS)
5515 			num_alloc_stripes++;
5516 		tgtdev_indexes = num_stripes;
5517 	}
5518 
5519 	bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
5520 	if (!bbio) {
5521 		ret = -ENOMEM;
5522 		goto out;
5523 	}
5524 	if (dev_replace_is_ongoing)
5525 		bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
5526 
5527 	/* build raid_map */
5528 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
5529 	    need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
5530 	    mirror_num > 1)) {
5531 		u64 tmp;
5532 		unsigned rot;
5533 
5534 		bbio->raid_map = (u64 *)((void *)bbio->stripes +
5535 				 sizeof(struct btrfs_bio_stripe) *
5536 				 num_alloc_stripes +
5537 				 sizeof(int) * tgtdev_indexes);
5538 
5539 		/* Work out the disk rotation on this stripe-set */
5540 		div_u64_rem(stripe_nr, num_stripes, &rot);
5541 
5542 		/* Fill in the logical address of each stripe */
5543 		tmp = stripe_nr * nr_data_stripes(map);
5544 		for (i = 0; i < nr_data_stripes(map); i++)
5545 			bbio->raid_map[(i+rot) % num_stripes] =
5546 				em->start + (tmp + i) * map->stripe_len;
5547 
5548 		bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
5549 		if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5550 			bbio->raid_map[(i+rot+1) % num_stripes] =
5551 				RAID6_Q_STRIPE;
5552 	}
5553 
5554 	if (rw & REQ_DISCARD) {
5555 		u32 factor = 0;
5556 		u32 sub_stripes = 0;
5557 		u64 stripes_per_dev = 0;
5558 		u32 remaining_stripes = 0;
5559 		u32 last_stripe = 0;
5560 
5561 		if (map->type &
5562 		    (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
5563 			if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5564 				sub_stripes = 1;
5565 			else
5566 				sub_stripes = map->sub_stripes;
5567 
5568 			factor = map->num_stripes / sub_stripes;
5569 			stripes_per_dev = div_u64_rem(stripe_nr_end -
5570 						      stripe_nr_orig,
5571 						      factor,
5572 						      &remaining_stripes);
5573 			div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5574 			last_stripe *= sub_stripes;
5575 		}
5576 
5577 		for (i = 0; i < num_stripes; i++) {
5578 			bbio->stripes[i].physical =
5579 				map->stripes[stripe_index].physical +
5580 				stripe_offset + stripe_nr * map->stripe_len;
5581 			bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5582 
5583 			if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5584 					 BTRFS_BLOCK_GROUP_RAID10)) {
5585 				bbio->stripes[i].length = stripes_per_dev *
5586 							  map->stripe_len;
5587 
5588 				if (i / sub_stripes < remaining_stripes)
5589 					bbio->stripes[i].length +=
5590 						map->stripe_len;
5591 
5592 				/*
5593 				 * Special for the first stripe and
5594 				 * the last stripe:
5595 				 *
5596 				 * |-------|...|-------|
5597 				 *     |----------|
5598 				 *    off     end_off
5599 				 */
5600 				if (i < sub_stripes)
5601 					bbio->stripes[i].length -=
5602 						stripe_offset;
5603 
5604 				if (stripe_index >= last_stripe &&
5605 				    stripe_index <= (last_stripe +
5606 						     sub_stripes - 1))
5607 					bbio->stripes[i].length -=
5608 						stripe_end_offset;
5609 
5610 				if (i == sub_stripes - 1)
5611 					stripe_offset = 0;
5612 			} else
5613 				bbio->stripes[i].length = *length;
5614 
5615 			stripe_index++;
5616 			if (stripe_index == map->num_stripes) {
5617 				/* This could only happen for RAID0/10 */
5618 				stripe_index = 0;
5619 				stripe_nr++;
5620 			}
5621 		}
5622 	} else {
5623 		for (i = 0; i < num_stripes; i++) {
5624 			bbio->stripes[i].physical =
5625 				map->stripes[stripe_index].physical +
5626 				stripe_offset +
5627 				stripe_nr * map->stripe_len;
5628 			bbio->stripes[i].dev =
5629 				map->stripes[stripe_index].dev;
5630 			stripe_index++;
5631 		}
5632 	}
5633 
5634 	if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5635 		max_errors = btrfs_chunk_max_errors(map);
5636 
5637 	if (bbio->raid_map)
5638 		sort_parity_stripes(bbio, num_stripes);
5639 
5640 	tgtdev_indexes = 0;
5641 	if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
5642 	    dev_replace->tgtdev != NULL) {
5643 		int index_where_to_add;
5644 		u64 srcdev_devid = dev_replace->srcdev->devid;
5645 
5646 		/*
5647 		 * duplicate the write operations while the dev replace
5648 		 * procedure is running. Since the copying of the old disk
5649 		 * to the new disk takes place at run time while the
5650 		 * filesystem is mounted writable, the regular write
5651 		 * operations to the old disk have to be duplicated to go
5652 		 * to the new disk as well.
5653 		 * Note that device->missing is handled by the caller, and
5654 		 * that the write to the old disk is already set up in the
5655 		 * stripes array.
5656 		 */
5657 		index_where_to_add = num_stripes;
5658 		for (i = 0; i < num_stripes; i++) {
5659 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5660 				/* write to new disk, too */
5661 				struct btrfs_bio_stripe *new =
5662 					bbio->stripes + index_where_to_add;
5663 				struct btrfs_bio_stripe *old =
5664 					bbio->stripes + i;
5665 
5666 				new->physical = old->physical;
5667 				new->length = old->length;
5668 				new->dev = dev_replace->tgtdev;
5669 				bbio->tgtdev_map[i] = index_where_to_add;
5670 				index_where_to_add++;
5671 				max_errors++;
5672 				tgtdev_indexes++;
5673 			}
5674 		}
5675 		num_stripes = index_where_to_add;
5676 	} else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
5677 		   dev_replace->tgtdev != NULL) {
5678 		u64 srcdev_devid = dev_replace->srcdev->devid;
5679 		int index_srcdev = 0;
5680 		int found = 0;
5681 		u64 physical_of_found = 0;
5682 
5683 		/*
5684 		 * During the dev-replace procedure, the target drive can
5685 		 * also be used to read data in case it is needed to repair
5686 		 * a corrupt block elsewhere. This is possible if the
5687 		 * requested area is left of the left cursor. In this area,
5688 		 * the target drive is a full copy of the source drive.
5689 		 */
5690 		for (i = 0; i < num_stripes; i++) {
5691 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
5692 				/*
5693 				 * In case of DUP, in order to keep it
5694 				 * simple, only add the mirror with the
5695 				 * lowest physical address
5696 				 */
5697 				if (found &&
5698 				    physical_of_found <=
5699 				     bbio->stripes[i].physical)
5700 					continue;
5701 				index_srcdev = i;
5702 				found = 1;
5703 				physical_of_found = bbio->stripes[i].physical;
5704 			}
5705 		}
5706 		if (found) {
5707 			if (physical_of_found + map->stripe_len <=
5708 			    dev_replace->cursor_left) {
5709 				struct btrfs_bio_stripe *tgtdev_stripe =
5710 					bbio->stripes + num_stripes;
5711 
5712 				tgtdev_stripe->physical = physical_of_found;
5713 				tgtdev_stripe->length =
5714 					bbio->stripes[index_srcdev].length;
5715 				tgtdev_stripe->dev = dev_replace->tgtdev;
5716 				bbio->tgtdev_map[index_srcdev] = num_stripes;
5717 
5718 				tgtdev_indexes++;
5719 				num_stripes++;
5720 			}
5721 		}
5722 	}
5723 
5724 	*bbio_ret = bbio;
5725 	bbio->map_type = map->type;
5726 	bbio->num_stripes = num_stripes;
5727 	bbio->max_errors = max_errors;
5728 	bbio->mirror_num = mirror_num;
5729 	bbio->num_tgtdevs = tgtdev_indexes;
5730 
5731 	/*
5732 	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
5733 	 * mirror_num == num_stripes + 1 && dev_replace target drive is
5734 	 * available as a mirror
5735 	 */
5736 	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
5737 		WARN_ON(num_stripes > 1);
5738 		bbio->stripes[0].dev = dev_replace->tgtdev;
5739 		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
5740 		bbio->mirror_num = map->num_stripes + 1;
5741 	}
5742 out:
5743 	if (dev_replace_is_ongoing)
5744 		btrfs_dev_replace_unlock(dev_replace);
5745 	free_extent_map(em);
5746 	return ret;
5747 }
5748 
5749 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5750 		      u64 logical, u64 *length,
5751 		      struct btrfs_bio **bbio_ret, int mirror_num)
5752 {
5753 	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5754 				 mirror_num, 0);
5755 }
5756 
5757 /* For Scrub/replace */
5758 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
5759 		     u64 logical, u64 *length,
5760 		     struct btrfs_bio **bbio_ret, int mirror_num,
5761 		     int need_raid_map)
5762 {
5763 	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5764 				 mirror_num, need_raid_map);
5765 }
5766 
5767 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5768 		     u64 chunk_start, u64 physical, u64 devid,
5769 		     u64 **logical, int *naddrs, int *stripe_len)
5770 {
5771 	struct extent_map_tree *em_tree = &map_tree->map_tree;
5772 	struct extent_map *em;
5773 	struct map_lookup *map;
5774 	u64 *buf;
5775 	u64 bytenr;
5776 	u64 length;
5777 	u64 stripe_nr;
5778 	u64 rmap_len;
5779 	int i, j, nr = 0;
5780 
5781 	read_lock(&em_tree->lock);
5782 	em = lookup_extent_mapping(em_tree, chunk_start, 1);
5783 	read_unlock(&em_tree->lock);
5784 
5785 	if (!em) {
5786 		printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n",
5787 		       chunk_start);
5788 		return -EIO;
5789 	}
5790 
5791 	if (em->start != chunk_start) {
5792 		printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n",
5793 		       em->start, chunk_start);
5794 		free_extent_map(em);
5795 		return -EIO;
5796 	}
5797 	map = (struct map_lookup *)em->bdev;
5798 
5799 	length = em->len;
5800 	rmap_len = map->stripe_len;
5801 
5802 	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5803 		length = div_u64(length, map->num_stripes / map->sub_stripes);
5804 	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5805 		length = div_u64(length, map->num_stripes);
5806 	else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5807 		length = div_u64(length, nr_data_stripes(map));
5808 		rmap_len = map->stripe_len * nr_data_stripes(map);
5809 	}
5810 
5811 	buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
5812 	BUG_ON(!buf); /* -ENOMEM */
5813 
5814 	for (i = 0; i < map->num_stripes; i++) {
5815 		if (devid && map->stripes[i].dev->devid != devid)
5816 			continue;
5817 		if (map->stripes[i].physical > physical ||
5818 		    map->stripes[i].physical + length <= physical)
5819 			continue;
5820 
5821 		stripe_nr = physical - map->stripes[i].physical;
5822 		stripe_nr = div_u64(stripe_nr, map->stripe_len);
5823 
5824 		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5825 			stripe_nr = stripe_nr * map->num_stripes + i;
5826 			stripe_nr = div_u64(stripe_nr, map->sub_stripes);
5827 		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5828 			stripe_nr = stripe_nr * map->num_stripes + i;
5829 		} /* else if RAID[56], multiply by nr_data_stripes().
5830 		   * Alternatively, just use rmap_len below instead of
5831 		   * map->stripe_len */
5832 
5833 		bytenr = chunk_start + stripe_nr * rmap_len;
5834 		WARN_ON(nr >= map->num_stripes);
5835 		for (j = 0; j < nr; j++) {
5836 			if (buf[j] == bytenr)
5837 				break;
5838 		}
5839 		if (j == nr) {
5840 			WARN_ON(nr >= map->num_stripes);
5841 			buf[nr++] = bytenr;
5842 		}
5843 	}
5844 
5845 	*logical = buf;
5846 	*naddrs = nr;
5847 	*stripe_len = rmap_len;
5848 
5849 	free_extent_map(em);
5850 	return 0;
5851 }
5852 
5853 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
5854 {
5855 	bio->bi_private = bbio->private;
5856 	bio->bi_end_io = bbio->end_io;
5857 	bio_endio(bio);
5858 
5859 	btrfs_put_bbio(bbio);
5860 }
5861 
5862 static void btrfs_end_bio(struct bio *bio)
5863 {
5864 	struct btrfs_bio *bbio = bio->bi_private;
5865 	int is_orig_bio = 0;
5866 
5867 	if (bio->bi_error) {
5868 		atomic_inc(&bbio->error);
5869 		if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) {
5870 			unsigned int stripe_index =
5871 				btrfs_io_bio(bio)->stripe_index;
5872 			struct btrfs_device *dev;
5873 
5874 			BUG_ON(stripe_index >= bbio->num_stripes);
5875 			dev = bbio->stripes[stripe_index].dev;
5876 			if (dev->bdev) {
5877 				if (bio->bi_rw & WRITE)
5878 					btrfs_dev_stat_inc(dev,
5879 						BTRFS_DEV_STAT_WRITE_ERRS);
5880 				else
5881 					btrfs_dev_stat_inc(dev,
5882 						BTRFS_DEV_STAT_READ_ERRS);
5883 				if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
5884 					btrfs_dev_stat_inc(dev,
5885 						BTRFS_DEV_STAT_FLUSH_ERRS);
5886 				btrfs_dev_stat_print_on_error(dev);
5887 			}
5888 		}
5889 	}
5890 
5891 	if (bio == bbio->orig_bio)
5892 		is_orig_bio = 1;
5893 
5894 	btrfs_bio_counter_dec(bbio->fs_info);
5895 
5896 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
5897 		if (!is_orig_bio) {
5898 			bio_put(bio);
5899 			bio = bbio->orig_bio;
5900 		}
5901 
5902 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5903 		/* only send an error to the higher layers if it is
5904 		 * beyond the tolerance of the btrfs bio
5905 		 */
5906 		if (atomic_read(&bbio->error) > bbio->max_errors) {
5907 			bio->bi_error = -EIO;
5908 		} else {
5909 			/*
5910 			 * this bio is actually up to date, we didn't
5911 			 * go over the max number of errors
5912 			 */
5913 			bio->bi_error = 0;
5914 		}
5915 
5916 		btrfs_end_bbio(bbio, bio);
5917 	} else if (!is_orig_bio) {
5918 		bio_put(bio);
5919 	}
5920 }
5921 
5922 /*
5923  * see run_scheduled_bios for a description of why bios are collected for
5924  * async submit.
5925  *
5926  * This will add one bio to the pending list for a device and make sure
5927  * the work struct is scheduled.
5928  */
5929 static noinline void btrfs_schedule_bio(struct btrfs_root *root,
5930 					struct btrfs_device *device,
5931 					int rw, struct bio *bio)
5932 {
5933 	int should_queue = 1;
5934 	struct btrfs_pending_bios *pending_bios;
5935 
5936 	if (device->missing || !device->bdev) {
5937 		bio_io_error(bio);
5938 		return;
5939 	}
5940 
5941 	/* don't bother with additional async steps for reads, right now */
5942 	if (!(rw & REQ_WRITE)) {
5943 		bio_get(bio);
5944 		btrfsic_submit_bio(rw, bio);
5945 		bio_put(bio);
5946 		return;
5947 	}
5948 
5949 	/*
5950 	 * nr_async_bios allows us to reliably return congestion to the
5951 	 * higher layers.  Otherwise, the async bio makes it appear we have
5952 	 * made progress against dirty pages when we've really just put it
5953 	 * on a queue for later
5954 	 */
5955 	atomic_inc(&root->fs_info->nr_async_bios);
5956 	WARN_ON(bio->bi_next);
5957 	bio->bi_next = NULL;
5958 	bio->bi_rw |= rw;
5959 
5960 	spin_lock(&device->io_lock);
5961 	if (bio->bi_rw & REQ_SYNC)
5962 		pending_bios = &device->pending_sync_bios;
5963 	else
5964 		pending_bios = &device->pending_bios;
5965 
5966 	if (pending_bios->tail)
5967 		pending_bios->tail->bi_next = bio;
5968 
5969 	pending_bios->tail = bio;
5970 	if (!pending_bios->head)
5971 		pending_bios->head = bio;
5972 	if (device->running_pending)
5973 		should_queue = 0;
5974 
5975 	spin_unlock(&device->io_lock);
5976 
5977 	if (should_queue)
5978 		btrfs_queue_work(root->fs_info->submit_workers,
5979 				 &device->work);
5980 }
5981 
5982 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5983 			      struct bio *bio, u64 physical, int dev_nr,
5984 			      int rw, int async)
5985 {
5986 	struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
5987 
5988 	bio->bi_private = bbio;
5989 	btrfs_io_bio(bio)->stripe_index = dev_nr;
5990 	bio->bi_end_io = btrfs_end_bio;
5991 	bio->bi_iter.bi_sector = physical >> 9;
5992 #ifdef DEBUG
5993 	{
5994 		struct rcu_string *name;
5995 
5996 		rcu_read_lock();
5997 		name = rcu_dereference(dev->name);
5998 		pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
5999 			 "(%s id %llu), size=%u\n", rw,
6000 			 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
6001 			 name->str, dev->devid, bio->bi_iter.bi_size);
6002 		rcu_read_unlock();
6003 	}
6004 #endif
6005 	bio->bi_bdev = dev->bdev;
6006 
6007 	btrfs_bio_counter_inc_noblocked(root->fs_info);
6008 
6009 	if (async)
6010 		btrfs_schedule_bio(root, dev, rw, bio);
6011 	else
6012 		btrfsic_submit_bio(rw, bio);
6013 }
6014 
6015 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6016 {
6017 	atomic_inc(&bbio->error);
6018 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
6019 		/* Shoud be the original bio. */
6020 		WARN_ON(bio != bbio->orig_bio);
6021 
6022 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6023 		bio->bi_iter.bi_sector = logical >> 9;
6024 		bio->bi_error = -EIO;
6025 		btrfs_end_bbio(bbio, bio);
6026 	}
6027 }
6028 
6029 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
6030 		  int mirror_num, int async_submit)
6031 {
6032 	struct btrfs_device *dev;
6033 	struct bio *first_bio = bio;
6034 	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6035 	u64 length = 0;
6036 	u64 map_length;
6037 	int ret;
6038 	int dev_nr;
6039 	int total_devs;
6040 	struct btrfs_bio *bbio = NULL;
6041 
6042 	length = bio->bi_iter.bi_size;
6043 	map_length = length;
6044 
6045 	btrfs_bio_counter_inc_blocked(root->fs_info);
6046 	ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
6047 			      mirror_num, 1);
6048 	if (ret) {
6049 		btrfs_bio_counter_dec(root->fs_info);
6050 		return ret;
6051 	}
6052 
6053 	total_devs = bbio->num_stripes;
6054 	bbio->orig_bio = first_bio;
6055 	bbio->private = first_bio->bi_private;
6056 	bbio->end_io = first_bio->bi_end_io;
6057 	bbio->fs_info = root->fs_info;
6058 	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6059 
6060 	if (bbio->raid_map) {
6061 		/* In this case, map_length has been set to the length of
6062 		   a single stripe; not the whole write */
6063 		if (rw & WRITE) {
6064 			ret = raid56_parity_write(root, bio, bbio, map_length);
6065 		} else {
6066 			ret = raid56_parity_recover(root, bio, bbio, map_length,
6067 						    mirror_num, 1);
6068 		}
6069 
6070 		btrfs_bio_counter_dec(root->fs_info);
6071 		return ret;
6072 	}
6073 
6074 	if (map_length < length) {
6075 		btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
6076 			logical, length, map_length);
6077 		BUG();
6078 	}
6079 
6080 	for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6081 		dev = bbio->stripes[dev_nr].dev;
6082 		if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
6083 			bbio_error(bbio, first_bio, logical);
6084 			continue;
6085 		}
6086 
6087 		if (dev_nr < total_devs - 1) {
6088 			bio = btrfs_bio_clone(first_bio, GFP_NOFS);
6089 			BUG_ON(!bio); /* -ENOMEM */
6090 		} else
6091 			bio = first_bio;
6092 
6093 		submit_stripe_bio(root, bbio, bio,
6094 				  bbio->stripes[dev_nr].physical, dev_nr, rw,
6095 				  async_submit);
6096 	}
6097 	btrfs_bio_counter_dec(root->fs_info);
6098 	return 0;
6099 }
6100 
6101 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
6102 				       u8 *uuid, u8 *fsid)
6103 {
6104 	struct btrfs_device *device;
6105 	struct btrfs_fs_devices *cur_devices;
6106 
6107 	cur_devices = fs_info->fs_devices;
6108 	while (cur_devices) {
6109 		if (!fsid ||
6110 		    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
6111 			device = __find_device(&cur_devices->devices,
6112 					       devid, uuid);
6113 			if (device)
6114 				return device;
6115 		}
6116 		cur_devices = cur_devices->seed;
6117 	}
6118 	return NULL;
6119 }
6120 
6121 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
6122 					    struct btrfs_fs_devices *fs_devices,
6123 					    u64 devid, u8 *dev_uuid)
6124 {
6125 	struct btrfs_device *device;
6126 
6127 	device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6128 	if (IS_ERR(device))
6129 		return NULL;
6130 
6131 	list_add(&device->dev_list, &fs_devices->devices);
6132 	device->fs_devices = fs_devices;
6133 	fs_devices->num_devices++;
6134 
6135 	device->missing = 1;
6136 	fs_devices->missing_devices++;
6137 
6138 	return device;
6139 }
6140 
6141 /**
6142  * btrfs_alloc_device - allocate struct btrfs_device
6143  * @fs_info:	used only for generating a new devid, can be NULL if
6144  *		devid is provided (i.e. @devid != NULL).
6145  * @devid:	a pointer to devid for this device.  If NULL a new devid
6146  *		is generated.
6147  * @uuid:	a pointer to UUID for this device.  If NULL a new UUID
6148  *		is generated.
6149  *
6150  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6151  * on error.  Returned struct is not linked onto any lists and can be
6152  * destroyed with kfree() right away.
6153  */
6154 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6155 					const u64 *devid,
6156 					const u8 *uuid)
6157 {
6158 	struct btrfs_device *dev;
6159 	u64 tmp;
6160 
6161 	if (WARN_ON(!devid && !fs_info))
6162 		return ERR_PTR(-EINVAL);
6163 
6164 	dev = __alloc_device();
6165 	if (IS_ERR(dev))
6166 		return dev;
6167 
6168 	if (devid)
6169 		tmp = *devid;
6170 	else {
6171 		int ret;
6172 
6173 		ret = find_next_devid(fs_info, &tmp);
6174 		if (ret) {
6175 			kfree(dev);
6176 			return ERR_PTR(ret);
6177 		}
6178 	}
6179 	dev->devid = tmp;
6180 
6181 	if (uuid)
6182 		memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6183 	else
6184 		generate_random_uuid(dev->uuid);
6185 
6186 	btrfs_init_work(&dev->work, btrfs_submit_helper,
6187 			pending_bios_fn, NULL, NULL);
6188 
6189 	return dev;
6190 }
6191 
6192 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
6193 			  struct extent_buffer *leaf,
6194 			  struct btrfs_chunk *chunk)
6195 {
6196 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
6197 	struct map_lookup *map;
6198 	struct extent_map *em;
6199 	u64 logical;
6200 	u64 length;
6201 	u64 devid;
6202 	u8 uuid[BTRFS_UUID_SIZE];
6203 	int num_stripes;
6204 	int ret;
6205 	int i;
6206 
6207 	logical = key->offset;
6208 	length = btrfs_chunk_length(leaf, chunk);
6209 
6210 	read_lock(&map_tree->map_tree.lock);
6211 	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
6212 	read_unlock(&map_tree->map_tree.lock);
6213 
6214 	/* already mapped? */
6215 	if (em && em->start <= logical && em->start + em->len > logical) {
6216 		free_extent_map(em);
6217 		return 0;
6218 	} else if (em) {
6219 		free_extent_map(em);
6220 	}
6221 
6222 	em = alloc_extent_map();
6223 	if (!em)
6224 		return -ENOMEM;
6225 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6226 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6227 	if (!map) {
6228 		free_extent_map(em);
6229 		return -ENOMEM;
6230 	}
6231 
6232 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6233 	em->bdev = (struct block_device *)map;
6234 	em->start = logical;
6235 	em->len = length;
6236 	em->orig_start = 0;
6237 	em->block_start = 0;
6238 	em->block_len = em->len;
6239 
6240 	map->num_stripes = num_stripes;
6241 	map->io_width = btrfs_chunk_io_width(leaf, chunk);
6242 	map->io_align = btrfs_chunk_io_align(leaf, chunk);
6243 	map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
6244 	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6245 	map->type = btrfs_chunk_type(leaf, chunk);
6246 	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6247 	for (i = 0; i < num_stripes; i++) {
6248 		map->stripes[i].physical =
6249 			btrfs_stripe_offset_nr(leaf, chunk, i);
6250 		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6251 		read_extent_buffer(leaf, uuid, (unsigned long)
6252 				   btrfs_stripe_dev_uuid_nr(chunk, i),
6253 				   BTRFS_UUID_SIZE);
6254 		map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
6255 							uuid, NULL);
6256 		if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
6257 			free_extent_map(em);
6258 			return -EIO;
6259 		}
6260 		if (!map->stripes[i].dev) {
6261 			map->stripes[i].dev =
6262 				add_missing_dev(root, root->fs_info->fs_devices,
6263 						devid, uuid);
6264 			if (!map->stripes[i].dev) {
6265 				free_extent_map(em);
6266 				return -EIO;
6267 			}
6268 			btrfs_warn(root->fs_info, "devid %llu uuid %pU is missing",
6269 						devid, uuid);
6270 		}
6271 		map->stripes[i].dev->in_fs_metadata = 1;
6272 	}
6273 
6274 	write_lock(&map_tree->map_tree.lock);
6275 	ret = add_extent_mapping(&map_tree->map_tree, em, 0);
6276 	write_unlock(&map_tree->map_tree.lock);
6277 	BUG_ON(ret); /* Tree corruption */
6278 	free_extent_map(em);
6279 
6280 	return 0;
6281 }
6282 
6283 static void fill_device_from_item(struct extent_buffer *leaf,
6284 				 struct btrfs_dev_item *dev_item,
6285 				 struct btrfs_device *device)
6286 {
6287 	unsigned long ptr;
6288 
6289 	device->devid = btrfs_device_id(leaf, dev_item);
6290 	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6291 	device->total_bytes = device->disk_total_bytes;
6292 	device->commit_total_bytes = device->disk_total_bytes;
6293 	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6294 	device->commit_bytes_used = device->bytes_used;
6295 	device->type = btrfs_device_type(leaf, dev_item);
6296 	device->io_align = btrfs_device_io_align(leaf, dev_item);
6297 	device->io_width = btrfs_device_io_width(leaf, dev_item);
6298 	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6299 	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6300 	device->is_tgtdev_for_dev_replace = 0;
6301 
6302 	ptr = btrfs_device_uuid(dev_item);
6303 	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6304 }
6305 
6306 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
6307 						  u8 *fsid)
6308 {
6309 	struct btrfs_fs_devices *fs_devices;
6310 	int ret;
6311 
6312 	BUG_ON(!mutex_is_locked(&uuid_mutex));
6313 
6314 	fs_devices = root->fs_info->fs_devices->seed;
6315 	while (fs_devices) {
6316 		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE))
6317 			return fs_devices;
6318 
6319 		fs_devices = fs_devices->seed;
6320 	}
6321 
6322 	fs_devices = find_fsid(fsid);
6323 	if (!fs_devices) {
6324 		if (!btrfs_test_opt(root, DEGRADED))
6325 			return ERR_PTR(-ENOENT);
6326 
6327 		fs_devices = alloc_fs_devices(fsid);
6328 		if (IS_ERR(fs_devices))
6329 			return fs_devices;
6330 
6331 		fs_devices->seeding = 1;
6332 		fs_devices->opened = 1;
6333 		return fs_devices;
6334 	}
6335 
6336 	fs_devices = clone_fs_devices(fs_devices);
6337 	if (IS_ERR(fs_devices))
6338 		return fs_devices;
6339 
6340 	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
6341 				   root->fs_info->bdev_holder);
6342 	if (ret) {
6343 		free_fs_devices(fs_devices);
6344 		fs_devices = ERR_PTR(ret);
6345 		goto out;
6346 	}
6347 
6348 	if (!fs_devices->seeding) {
6349 		__btrfs_close_devices(fs_devices);
6350 		free_fs_devices(fs_devices);
6351 		fs_devices = ERR_PTR(-EINVAL);
6352 		goto out;
6353 	}
6354 
6355 	fs_devices->seed = root->fs_info->fs_devices->seed;
6356 	root->fs_info->fs_devices->seed = fs_devices;
6357 out:
6358 	return fs_devices;
6359 }
6360 
6361 static int read_one_dev(struct btrfs_root *root,
6362 			struct extent_buffer *leaf,
6363 			struct btrfs_dev_item *dev_item)
6364 {
6365 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6366 	struct btrfs_device *device;
6367 	u64 devid;
6368 	int ret;
6369 	u8 fs_uuid[BTRFS_UUID_SIZE];
6370 	u8 dev_uuid[BTRFS_UUID_SIZE];
6371 
6372 	devid = btrfs_device_id(leaf, dev_item);
6373 	read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6374 			   BTRFS_UUID_SIZE);
6375 	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6376 			   BTRFS_UUID_SIZE);
6377 
6378 	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
6379 		fs_devices = open_seed_devices(root, fs_uuid);
6380 		if (IS_ERR(fs_devices))
6381 			return PTR_ERR(fs_devices);
6382 	}
6383 
6384 	device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
6385 	if (!device) {
6386 		if (!btrfs_test_opt(root, DEGRADED))
6387 			return -EIO;
6388 
6389 		device = add_missing_dev(root, fs_devices, devid, dev_uuid);
6390 		if (!device)
6391 			return -ENOMEM;
6392 		btrfs_warn(root->fs_info, "devid %llu uuid %pU missing",
6393 				devid, dev_uuid);
6394 	} else {
6395 		if (!device->bdev && !btrfs_test_opt(root, DEGRADED))
6396 			return -EIO;
6397 
6398 		if(!device->bdev && !device->missing) {
6399 			/*
6400 			 * this happens when a device that was properly setup
6401 			 * in the device info lists suddenly goes bad.
6402 			 * device->bdev is NULL, and so we have to set
6403 			 * device->missing to one here
6404 			 */
6405 			device->fs_devices->missing_devices++;
6406 			device->missing = 1;
6407 		}
6408 
6409 		/* Move the device to its own fs_devices */
6410 		if (device->fs_devices != fs_devices) {
6411 			ASSERT(device->missing);
6412 
6413 			list_move(&device->dev_list, &fs_devices->devices);
6414 			device->fs_devices->num_devices--;
6415 			fs_devices->num_devices++;
6416 
6417 			device->fs_devices->missing_devices--;
6418 			fs_devices->missing_devices++;
6419 
6420 			device->fs_devices = fs_devices;
6421 		}
6422 	}
6423 
6424 	if (device->fs_devices != root->fs_info->fs_devices) {
6425 		BUG_ON(device->writeable);
6426 		if (device->generation !=
6427 		    btrfs_device_generation(leaf, dev_item))
6428 			return -EINVAL;
6429 	}
6430 
6431 	fill_device_from_item(leaf, dev_item, device);
6432 	device->in_fs_metadata = 1;
6433 	if (device->writeable && !device->is_tgtdev_for_dev_replace) {
6434 		device->fs_devices->total_rw_bytes += device->total_bytes;
6435 		spin_lock(&root->fs_info->free_chunk_lock);
6436 		root->fs_info->free_chunk_space += device->total_bytes -
6437 			device->bytes_used;
6438 		spin_unlock(&root->fs_info->free_chunk_lock);
6439 	}
6440 	ret = 0;
6441 	return ret;
6442 }
6443 
6444 int btrfs_read_sys_array(struct btrfs_root *root)
6445 {
6446 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
6447 	struct extent_buffer *sb;
6448 	struct btrfs_disk_key *disk_key;
6449 	struct btrfs_chunk *chunk;
6450 	u8 *array_ptr;
6451 	unsigned long sb_array_offset;
6452 	int ret = 0;
6453 	u32 num_stripes;
6454 	u32 array_size;
6455 	u32 len = 0;
6456 	u32 cur_offset;
6457 	struct btrfs_key key;
6458 
6459 	ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
6460 	/*
6461 	 * This will create extent buffer of nodesize, superblock size is
6462 	 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6463 	 * overallocate but we can keep it as-is, only the first page is used.
6464 	 */
6465 	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
6466 	if (!sb)
6467 		return -ENOMEM;
6468 	btrfs_set_buffer_uptodate(sb);
6469 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6470 	/*
6471 	 * The sb extent buffer is artifical and just used to read the system array.
6472 	 * btrfs_set_buffer_uptodate() call does not properly mark all it's
6473 	 * pages up-to-date when the page is larger: extent does not cover the
6474 	 * whole page and consequently check_page_uptodate does not find all
6475 	 * the page's extents up-to-date (the hole beyond sb),
6476 	 * write_extent_buffer then triggers a WARN_ON.
6477 	 *
6478 	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
6479 	 * but sb spans only this function. Add an explicit SetPageUptodate call
6480 	 * to silence the warning eg. on PowerPC 64.
6481 	 */
6482 	if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
6483 		SetPageUptodate(sb->pages[0]);
6484 
6485 	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6486 	array_size = btrfs_super_sys_array_size(super_copy);
6487 
6488 	array_ptr = super_copy->sys_chunk_array;
6489 	sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
6490 	cur_offset = 0;
6491 
6492 	while (cur_offset < array_size) {
6493 		disk_key = (struct btrfs_disk_key *)array_ptr;
6494 		len = sizeof(*disk_key);
6495 		if (cur_offset + len > array_size)
6496 			goto out_short_read;
6497 
6498 		btrfs_disk_key_to_cpu(&key, disk_key);
6499 
6500 		array_ptr += len;
6501 		sb_array_offset += len;
6502 		cur_offset += len;
6503 
6504 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
6505 			chunk = (struct btrfs_chunk *)sb_array_offset;
6506 			/*
6507 			 * At least one btrfs_chunk with one stripe must be
6508 			 * present, exact stripe count check comes afterwards
6509 			 */
6510 			len = btrfs_chunk_item_size(1);
6511 			if (cur_offset + len > array_size)
6512 				goto out_short_read;
6513 
6514 			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6515 			len = btrfs_chunk_item_size(num_stripes);
6516 			if (cur_offset + len > array_size)
6517 				goto out_short_read;
6518 
6519 			ret = read_one_chunk(root, &key, sb, chunk);
6520 			if (ret)
6521 				break;
6522 		} else {
6523 			ret = -EIO;
6524 			break;
6525 		}
6526 		array_ptr += len;
6527 		sb_array_offset += len;
6528 		cur_offset += len;
6529 	}
6530 	free_extent_buffer(sb);
6531 	return ret;
6532 
6533 out_short_read:
6534 	printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
6535 			len, cur_offset);
6536 	free_extent_buffer(sb);
6537 	return -EIO;
6538 }
6539 
6540 int btrfs_read_chunk_tree(struct btrfs_root *root)
6541 {
6542 	struct btrfs_path *path;
6543 	struct extent_buffer *leaf;
6544 	struct btrfs_key key;
6545 	struct btrfs_key found_key;
6546 	int ret;
6547 	int slot;
6548 
6549 	root = root->fs_info->chunk_root;
6550 
6551 	path = btrfs_alloc_path();
6552 	if (!path)
6553 		return -ENOMEM;
6554 
6555 	mutex_lock(&uuid_mutex);
6556 	lock_chunks(root);
6557 
6558 	/*
6559 	 * Read all device items, and then all the chunk items. All
6560 	 * device items are found before any chunk item (their object id
6561 	 * is smaller than the lowest possible object id for a chunk
6562 	 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
6563 	 */
6564 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
6565 	key.offset = 0;
6566 	key.type = 0;
6567 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6568 	if (ret < 0)
6569 		goto error;
6570 	while (1) {
6571 		leaf = path->nodes[0];
6572 		slot = path->slots[0];
6573 		if (slot >= btrfs_header_nritems(leaf)) {
6574 			ret = btrfs_next_leaf(root, path);
6575 			if (ret == 0)
6576 				continue;
6577 			if (ret < 0)
6578 				goto error;
6579 			break;
6580 		}
6581 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
6582 		if (found_key.type == BTRFS_DEV_ITEM_KEY) {
6583 			struct btrfs_dev_item *dev_item;
6584 			dev_item = btrfs_item_ptr(leaf, slot,
6585 						  struct btrfs_dev_item);
6586 			ret = read_one_dev(root, leaf, dev_item);
6587 			if (ret)
6588 				goto error;
6589 		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
6590 			struct btrfs_chunk *chunk;
6591 			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
6592 			ret = read_one_chunk(root, &found_key, leaf, chunk);
6593 			if (ret)
6594 				goto error;
6595 		}
6596 		path->slots[0]++;
6597 	}
6598 	ret = 0;
6599 error:
6600 	unlock_chunks(root);
6601 	mutex_unlock(&uuid_mutex);
6602 
6603 	btrfs_free_path(path);
6604 	return ret;
6605 }
6606 
6607 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
6608 {
6609 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6610 	struct btrfs_device *device;
6611 
6612 	while (fs_devices) {
6613 		mutex_lock(&fs_devices->device_list_mutex);
6614 		list_for_each_entry(device, &fs_devices->devices, dev_list)
6615 			device->dev_root = fs_info->dev_root;
6616 		mutex_unlock(&fs_devices->device_list_mutex);
6617 
6618 		fs_devices = fs_devices->seed;
6619 	}
6620 }
6621 
6622 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
6623 {
6624 	int i;
6625 
6626 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6627 		btrfs_dev_stat_reset(dev, i);
6628 }
6629 
6630 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
6631 {
6632 	struct btrfs_key key;
6633 	struct btrfs_key found_key;
6634 	struct btrfs_root *dev_root = fs_info->dev_root;
6635 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6636 	struct extent_buffer *eb;
6637 	int slot;
6638 	int ret = 0;
6639 	struct btrfs_device *device;
6640 	struct btrfs_path *path = NULL;
6641 	int i;
6642 
6643 	path = btrfs_alloc_path();
6644 	if (!path) {
6645 		ret = -ENOMEM;
6646 		goto out;
6647 	}
6648 
6649 	mutex_lock(&fs_devices->device_list_mutex);
6650 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6651 		int item_size;
6652 		struct btrfs_dev_stats_item *ptr;
6653 
6654 		key.objectid = 0;
6655 		key.type = BTRFS_DEV_STATS_KEY;
6656 		key.offset = device->devid;
6657 		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
6658 		if (ret) {
6659 			__btrfs_reset_dev_stats(device);
6660 			device->dev_stats_valid = 1;
6661 			btrfs_release_path(path);
6662 			continue;
6663 		}
6664 		slot = path->slots[0];
6665 		eb = path->nodes[0];
6666 		btrfs_item_key_to_cpu(eb, &found_key, slot);
6667 		item_size = btrfs_item_size_nr(eb, slot);
6668 
6669 		ptr = btrfs_item_ptr(eb, slot,
6670 				     struct btrfs_dev_stats_item);
6671 
6672 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6673 			if (item_size >= (1 + i) * sizeof(__le64))
6674 				btrfs_dev_stat_set(device, i,
6675 					btrfs_dev_stats_value(eb, ptr, i));
6676 			else
6677 				btrfs_dev_stat_reset(device, i);
6678 		}
6679 
6680 		device->dev_stats_valid = 1;
6681 		btrfs_dev_stat_print_on_load(device);
6682 		btrfs_release_path(path);
6683 	}
6684 	mutex_unlock(&fs_devices->device_list_mutex);
6685 
6686 out:
6687 	btrfs_free_path(path);
6688 	return ret < 0 ? ret : 0;
6689 }
6690 
6691 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
6692 				struct btrfs_root *dev_root,
6693 				struct btrfs_device *device)
6694 {
6695 	struct btrfs_path *path;
6696 	struct btrfs_key key;
6697 	struct extent_buffer *eb;
6698 	struct btrfs_dev_stats_item *ptr;
6699 	int ret;
6700 	int i;
6701 
6702 	key.objectid = 0;
6703 	key.type = BTRFS_DEV_STATS_KEY;
6704 	key.offset = device->devid;
6705 
6706 	path = btrfs_alloc_path();
6707 	BUG_ON(!path);
6708 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
6709 	if (ret < 0) {
6710 		btrfs_warn_in_rcu(dev_root->fs_info,
6711 			"error %d while searching for dev_stats item for device %s",
6712 			      ret, rcu_str_deref(device->name));
6713 		goto out;
6714 	}
6715 
6716 	if (ret == 0 &&
6717 	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
6718 		/* need to delete old one and insert a new one */
6719 		ret = btrfs_del_item(trans, dev_root, path);
6720 		if (ret != 0) {
6721 			btrfs_warn_in_rcu(dev_root->fs_info,
6722 				"delete too small dev_stats item for device %s failed %d",
6723 				      rcu_str_deref(device->name), ret);
6724 			goto out;
6725 		}
6726 		ret = 1;
6727 	}
6728 
6729 	if (ret == 1) {
6730 		/* need to insert a new item */
6731 		btrfs_release_path(path);
6732 		ret = btrfs_insert_empty_item(trans, dev_root, path,
6733 					      &key, sizeof(*ptr));
6734 		if (ret < 0) {
6735 			btrfs_warn_in_rcu(dev_root->fs_info,
6736 				"insert dev_stats item for device %s failed %d",
6737 				rcu_str_deref(device->name), ret);
6738 			goto out;
6739 		}
6740 	}
6741 
6742 	eb = path->nodes[0];
6743 	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
6744 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6745 		btrfs_set_dev_stats_value(eb, ptr, i,
6746 					  btrfs_dev_stat_read(device, i));
6747 	btrfs_mark_buffer_dirty(eb);
6748 
6749 out:
6750 	btrfs_free_path(path);
6751 	return ret;
6752 }
6753 
6754 /*
6755  * called from commit_transaction. Writes all changed device stats to disk.
6756  */
6757 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
6758 			struct btrfs_fs_info *fs_info)
6759 {
6760 	struct btrfs_root *dev_root = fs_info->dev_root;
6761 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6762 	struct btrfs_device *device;
6763 	int stats_cnt;
6764 	int ret = 0;
6765 
6766 	mutex_lock(&fs_devices->device_list_mutex);
6767 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6768 		if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
6769 			continue;
6770 
6771 		stats_cnt = atomic_read(&device->dev_stats_ccnt);
6772 		ret = update_dev_stat_item(trans, dev_root, device);
6773 		if (!ret)
6774 			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
6775 	}
6776 	mutex_unlock(&fs_devices->device_list_mutex);
6777 
6778 	return ret;
6779 }
6780 
6781 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
6782 {
6783 	btrfs_dev_stat_inc(dev, index);
6784 	btrfs_dev_stat_print_on_error(dev);
6785 }
6786 
6787 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
6788 {
6789 	if (!dev->dev_stats_valid)
6790 		return;
6791 	btrfs_err_rl_in_rcu(dev->dev_root->fs_info,
6792 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
6793 			   rcu_str_deref(dev->name),
6794 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6795 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6796 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6797 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6798 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6799 }
6800 
6801 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
6802 {
6803 	int i;
6804 
6805 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6806 		if (btrfs_dev_stat_read(dev, i) != 0)
6807 			break;
6808 	if (i == BTRFS_DEV_STAT_VALUES_MAX)
6809 		return; /* all values == 0, suppress message */
6810 
6811 	btrfs_info_in_rcu(dev->dev_root->fs_info,
6812 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
6813 	       rcu_str_deref(dev->name),
6814 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6815 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6816 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6817 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6818 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6819 }
6820 
6821 int btrfs_get_dev_stats(struct btrfs_root *root,
6822 			struct btrfs_ioctl_get_dev_stats *stats)
6823 {
6824 	struct btrfs_device *dev;
6825 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6826 	int i;
6827 
6828 	mutex_lock(&fs_devices->device_list_mutex);
6829 	dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
6830 	mutex_unlock(&fs_devices->device_list_mutex);
6831 
6832 	if (!dev) {
6833 		btrfs_warn(root->fs_info, "get dev_stats failed, device not found");
6834 		return -ENODEV;
6835 	} else if (!dev->dev_stats_valid) {
6836 		btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid");
6837 		return -ENODEV;
6838 	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
6839 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6840 			if (stats->nr_items > i)
6841 				stats->values[i] =
6842 					btrfs_dev_stat_read_and_reset(dev, i);
6843 			else
6844 				btrfs_dev_stat_reset(dev, i);
6845 		}
6846 	} else {
6847 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6848 			if (stats->nr_items > i)
6849 				stats->values[i] = btrfs_dev_stat_read(dev, i);
6850 	}
6851 	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
6852 		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
6853 	return 0;
6854 }
6855 
6856 void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path)
6857 {
6858 	struct buffer_head *bh;
6859 	struct btrfs_super_block *disk_super;
6860 	int copy_num;
6861 
6862 	if (!bdev)
6863 		return;
6864 
6865 	for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX;
6866 		copy_num++) {
6867 
6868 		if (btrfs_read_dev_one_super(bdev, copy_num, &bh))
6869 			continue;
6870 
6871 		disk_super = (struct btrfs_super_block *)bh->b_data;
6872 
6873 		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
6874 		set_buffer_dirty(bh);
6875 		sync_dirty_buffer(bh);
6876 		brelse(bh);
6877 	}
6878 
6879 	/* Notify udev that device has changed */
6880 	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
6881 
6882 	/* Update ctime/mtime for device path for libblkid */
6883 	update_dev_time(device_path);
6884 }
6885 
6886 /*
6887  * Update the size of all devices, which is used for writing out the
6888  * super blocks.
6889  */
6890 void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
6891 {
6892 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6893 	struct btrfs_device *curr, *next;
6894 
6895 	if (list_empty(&fs_devices->resized_devices))
6896 		return;
6897 
6898 	mutex_lock(&fs_devices->device_list_mutex);
6899 	lock_chunks(fs_info->dev_root);
6900 	list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
6901 				 resized_list) {
6902 		list_del_init(&curr->resized_list);
6903 		curr->commit_total_bytes = curr->disk_total_bytes;
6904 	}
6905 	unlock_chunks(fs_info->dev_root);
6906 	mutex_unlock(&fs_devices->device_list_mutex);
6907 }
6908 
6909 /* Must be invoked during the transaction commit */
6910 void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
6911 					struct btrfs_transaction *transaction)
6912 {
6913 	struct extent_map *em;
6914 	struct map_lookup *map;
6915 	struct btrfs_device *dev;
6916 	int i;
6917 
6918 	if (list_empty(&transaction->pending_chunks))
6919 		return;
6920 
6921 	/* In order to kick the device replace finish process */
6922 	lock_chunks(root);
6923 	list_for_each_entry(em, &transaction->pending_chunks, list) {
6924 		map = (struct map_lookup *)em->bdev;
6925 
6926 		for (i = 0; i < map->num_stripes; i++) {
6927 			dev = map->stripes[i].dev;
6928 			dev->commit_bytes_used = dev->bytes_used;
6929 		}
6930 	}
6931 	unlock_chunks(root);
6932 }
6933 
6934 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
6935 {
6936 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6937 	while (fs_devices) {
6938 		fs_devices->fs_info = fs_info;
6939 		fs_devices = fs_devices->seed;
6940 	}
6941 }
6942 
6943 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
6944 {
6945 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6946 	while (fs_devices) {
6947 		fs_devices->fs_info = NULL;
6948 		fs_devices = fs_devices->seed;
6949 	}
6950 }
6951 
6952 void btrfs_close_one_device(struct btrfs_device *device)
6953 {
6954 	struct btrfs_fs_devices *fs_devices = device->fs_devices;
6955 	struct btrfs_device *new_device;
6956 	struct rcu_string *name;
6957 
6958 	if (device->bdev)
6959 		fs_devices->open_devices--;
6960 
6961 	if (device->writeable &&
6962 	    device->devid != BTRFS_DEV_REPLACE_DEVID) {
6963 		list_del_init(&device->dev_alloc_list);
6964 		fs_devices->rw_devices--;
6965 	}
6966 
6967 	if (device->missing)
6968 		fs_devices->missing_devices--;
6969 
6970 	new_device = btrfs_alloc_device(NULL, &device->devid,
6971 					device->uuid);
6972 	BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
6973 
6974 	/* Safe because we are under uuid_mutex */
6975 	if (device->name) {
6976 		name = rcu_string_strdup(device->name->str, GFP_NOFS);
6977 		BUG_ON(!name); /* -ENOMEM */
6978 		rcu_assign_pointer(new_device->name, name);
6979 	}
6980 
6981 	list_replace_rcu(&device->dev_list, &new_device->dev_list);
6982 	new_device->fs_devices = device->fs_devices;
6983 
6984 	call_rcu(&device->rcu, free_device);
6985 }
6986