xref: /openbmc/linux/fs/btrfs/volumes.c (revision 9d749629)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include "compat.h"
29 #include "ctree.h"
30 #include "extent_map.h"
31 #include "disk-io.h"
32 #include "transaction.h"
33 #include "print-tree.h"
34 #include "volumes.h"
35 #include "async-thread.h"
36 #include "check-integrity.h"
37 #include "rcu-string.h"
38 #include "math.h"
39 #include "dev-replace.h"
40 
41 static int init_first_rw_device(struct btrfs_trans_handle *trans,
42 				struct btrfs_root *root,
43 				struct btrfs_device *device);
44 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
45 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
46 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
47 
48 static DEFINE_MUTEX(uuid_mutex);
49 static LIST_HEAD(fs_uuids);
50 
51 static void lock_chunks(struct btrfs_root *root)
52 {
53 	mutex_lock(&root->fs_info->chunk_mutex);
54 }
55 
56 static void unlock_chunks(struct btrfs_root *root)
57 {
58 	mutex_unlock(&root->fs_info->chunk_mutex);
59 }
60 
61 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
62 {
63 	struct btrfs_device *device;
64 	WARN_ON(fs_devices->opened);
65 	while (!list_empty(&fs_devices->devices)) {
66 		device = list_entry(fs_devices->devices.next,
67 				    struct btrfs_device, dev_list);
68 		list_del(&device->dev_list);
69 		rcu_string_free(device->name);
70 		kfree(device);
71 	}
72 	kfree(fs_devices);
73 }
74 
75 static void btrfs_kobject_uevent(struct block_device *bdev,
76 				 enum kobject_action action)
77 {
78 	int ret;
79 
80 	ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
81 	if (ret)
82 		pr_warn("Sending event '%d' to kobject: '%s' (%p): failed\n",
83 			action,
84 			kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
85 			&disk_to_dev(bdev->bd_disk)->kobj);
86 }
87 
88 void btrfs_cleanup_fs_uuids(void)
89 {
90 	struct btrfs_fs_devices *fs_devices;
91 
92 	while (!list_empty(&fs_uuids)) {
93 		fs_devices = list_entry(fs_uuids.next,
94 					struct btrfs_fs_devices, list);
95 		list_del(&fs_devices->list);
96 		free_fs_devices(fs_devices);
97 	}
98 }
99 
100 static noinline struct btrfs_device *__find_device(struct list_head *head,
101 						   u64 devid, u8 *uuid)
102 {
103 	struct btrfs_device *dev;
104 
105 	list_for_each_entry(dev, head, dev_list) {
106 		if (dev->devid == devid &&
107 		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
108 			return dev;
109 		}
110 	}
111 	return NULL;
112 }
113 
114 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
115 {
116 	struct btrfs_fs_devices *fs_devices;
117 
118 	list_for_each_entry(fs_devices, &fs_uuids, list) {
119 		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
120 			return fs_devices;
121 	}
122 	return NULL;
123 }
124 
125 static int
126 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
127 		      int flush, struct block_device **bdev,
128 		      struct buffer_head **bh)
129 {
130 	int ret;
131 
132 	*bdev = blkdev_get_by_path(device_path, flags, holder);
133 
134 	if (IS_ERR(*bdev)) {
135 		ret = PTR_ERR(*bdev);
136 		printk(KERN_INFO "btrfs: open %s failed\n", device_path);
137 		goto error;
138 	}
139 
140 	if (flush)
141 		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
142 	ret = set_blocksize(*bdev, 4096);
143 	if (ret) {
144 		blkdev_put(*bdev, flags);
145 		goto error;
146 	}
147 	invalidate_bdev(*bdev);
148 	*bh = btrfs_read_dev_super(*bdev);
149 	if (!*bh) {
150 		ret = -EINVAL;
151 		blkdev_put(*bdev, flags);
152 		goto error;
153 	}
154 
155 	return 0;
156 
157 error:
158 	*bdev = NULL;
159 	*bh = NULL;
160 	return ret;
161 }
162 
163 static void requeue_list(struct btrfs_pending_bios *pending_bios,
164 			struct bio *head, struct bio *tail)
165 {
166 
167 	struct bio *old_head;
168 
169 	old_head = pending_bios->head;
170 	pending_bios->head = head;
171 	if (pending_bios->tail)
172 		tail->bi_next = old_head;
173 	else
174 		pending_bios->tail = tail;
175 }
176 
177 /*
178  * we try to collect pending bios for a device so we don't get a large
179  * number of procs sending bios down to the same device.  This greatly
180  * improves the schedulers ability to collect and merge the bios.
181  *
182  * But, it also turns into a long list of bios to process and that is sure
183  * to eventually make the worker thread block.  The solution here is to
184  * make some progress and then put this work struct back at the end of
185  * the list if the block device is congested.  This way, multiple devices
186  * can make progress from a single worker thread.
187  */
188 static noinline void run_scheduled_bios(struct btrfs_device *device)
189 {
190 	struct bio *pending;
191 	struct backing_dev_info *bdi;
192 	struct btrfs_fs_info *fs_info;
193 	struct btrfs_pending_bios *pending_bios;
194 	struct bio *tail;
195 	struct bio *cur;
196 	int again = 0;
197 	unsigned long num_run;
198 	unsigned long batch_run = 0;
199 	unsigned long limit;
200 	unsigned long last_waited = 0;
201 	int force_reg = 0;
202 	int sync_pending = 0;
203 	struct blk_plug plug;
204 
205 	/*
206 	 * this function runs all the bios we've collected for
207 	 * a particular device.  We don't want to wander off to
208 	 * another device without first sending all of these down.
209 	 * So, setup a plug here and finish it off before we return
210 	 */
211 	blk_start_plug(&plug);
212 
213 	bdi = blk_get_backing_dev_info(device->bdev);
214 	fs_info = device->dev_root->fs_info;
215 	limit = btrfs_async_submit_limit(fs_info);
216 	limit = limit * 2 / 3;
217 
218 loop:
219 	spin_lock(&device->io_lock);
220 
221 loop_lock:
222 	num_run = 0;
223 
224 	/* take all the bios off the list at once and process them
225 	 * later on (without the lock held).  But, remember the
226 	 * tail and other pointers so the bios can be properly reinserted
227 	 * into the list if we hit congestion
228 	 */
229 	if (!force_reg && device->pending_sync_bios.head) {
230 		pending_bios = &device->pending_sync_bios;
231 		force_reg = 1;
232 	} else {
233 		pending_bios = &device->pending_bios;
234 		force_reg = 0;
235 	}
236 
237 	pending = pending_bios->head;
238 	tail = pending_bios->tail;
239 	WARN_ON(pending && !tail);
240 
241 	/*
242 	 * if pending was null this time around, no bios need processing
243 	 * at all and we can stop.  Otherwise it'll loop back up again
244 	 * and do an additional check so no bios are missed.
245 	 *
246 	 * device->running_pending is used to synchronize with the
247 	 * schedule_bio code.
248 	 */
249 	if (device->pending_sync_bios.head == NULL &&
250 	    device->pending_bios.head == NULL) {
251 		again = 0;
252 		device->running_pending = 0;
253 	} else {
254 		again = 1;
255 		device->running_pending = 1;
256 	}
257 
258 	pending_bios->head = NULL;
259 	pending_bios->tail = NULL;
260 
261 	spin_unlock(&device->io_lock);
262 
263 	while (pending) {
264 
265 		rmb();
266 		/* we want to work on both lists, but do more bios on the
267 		 * sync list than the regular list
268 		 */
269 		if ((num_run > 32 &&
270 		    pending_bios != &device->pending_sync_bios &&
271 		    device->pending_sync_bios.head) ||
272 		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
273 		    device->pending_bios.head)) {
274 			spin_lock(&device->io_lock);
275 			requeue_list(pending_bios, pending, tail);
276 			goto loop_lock;
277 		}
278 
279 		cur = pending;
280 		pending = pending->bi_next;
281 		cur->bi_next = NULL;
282 
283 		if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
284 		    waitqueue_active(&fs_info->async_submit_wait))
285 			wake_up(&fs_info->async_submit_wait);
286 
287 		BUG_ON(atomic_read(&cur->bi_cnt) == 0);
288 
289 		/*
290 		 * if we're doing the sync list, record that our
291 		 * plug has some sync requests on it
292 		 *
293 		 * If we're doing the regular list and there are
294 		 * sync requests sitting around, unplug before
295 		 * we add more
296 		 */
297 		if (pending_bios == &device->pending_sync_bios) {
298 			sync_pending = 1;
299 		} else if (sync_pending) {
300 			blk_finish_plug(&plug);
301 			blk_start_plug(&plug);
302 			sync_pending = 0;
303 		}
304 
305 		btrfsic_submit_bio(cur->bi_rw, cur);
306 		num_run++;
307 		batch_run++;
308 		if (need_resched())
309 			cond_resched();
310 
311 		/*
312 		 * we made progress, there is more work to do and the bdi
313 		 * is now congested.  Back off and let other work structs
314 		 * run instead
315 		 */
316 		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
317 		    fs_info->fs_devices->open_devices > 1) {
318 			struct io_context *ioc;
319 
320 			ioc = current->io_context;
321 
322 			/*
323 			 * the main goal here is that we don't want to
324 			 * block if we're going to be able to submit
325 			 * more requests without blocking.
326 			 *
327 			 * This code does two great things, it pokes into
328 			 * the elevator code from a filesystem _and_
329 			 * it makes assumptions about how batching works.
330 			 */
331 			if (ioc && ioc->nr_batch_requests > 0 &&
332 			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
333 			    (last_waited == 0 ||
334 			     ioc->last_waited == last_waited)) {
335 				/*
336 				 * we want to go through our batch of
337 				 * requests and stop.  So, we copy out
338 				 * the ioc->last_waited time and test
339 				 * against it before looping
340 				 */
341 				last_waited = ioc->last_waited;
342 				if (need_resched())
343 					cond_resched();
344 				continue;
345 			}
346 			spin_lock(&device->io_lock);
347 			requeue_list(pending_bios, pending, tail);
348 			device->running_pending = 1;
349 
350 			spin_unlock(&device->io_lock);
351 			btrfs_requeue_work(&device->work);
352 			goto done;
353 		}
354 		/* unplug every 64 requests just for good measure */
355 		if (batch_run % 64 == 0) {
356 			blk_finish_plug(&plug);
357 			blk_start_plug(&plug);
358 			sync_pending = 0;
359 		}
360 	}
361 
362 	cond_resched();
363 	if (again)
364 		goto loop;
365 
366 	spin_lock(&device->io_lock);
367 	if (device->pending_bios.head || device->pending_sync_bios.head)
368 		goto loop_lock;
369 	spin_unlock(&device->io_lock);
370 
371 done:
372 	blk_finish_plug(&plug);
373 }
374 
375 static void pending_bios_fn(struct btrfs_work *work)
376 {
377 	struct btrfs_device *device;
378 
379 	device = container_of(work, struct btrfs_device, work);
380 	run_scheduled_bios(device);
381 }
382 
383 static noinline int device_list_add(const char *path,
384 			   struct btrfs_super_block *disk_super,
385 			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
386 {
387 	struct btrfs_device *device;
388 	struct btrfs_fs_devices *fs_devices;
389 	struct rcu_string *name;
390 	u64 found_transid = btrfs_super_generation(disk_super);
391 
392 	fs_devices = find_fsid(disk_super->fsid);
393 	if (!fs_devices) {
394 		fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
395 		if (!fs_devices)
396 			return -ENOMEM;
397 		INIT_LIST_HEAD(&fs_devices->devices);
398 		INIT_LIST_HEAD(&fs_devices->alloc_list);
399 		list_add(&fs_devices->list, &fs_uuids);
400 		memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
401 		fs_devices->latest_devid = devid;
402 		fs_devices->latest_trans = found_transid;
403 		mutex_init(&fs_devices->device_list_mutex);
404 		device = NULL;
405 	} else {
406 		device = __find_device(&fs_devices->devices, devid,
407 				       disk_super->dev_item.uuid);
408 	}
409 	if (!device) {
410 		if (fs_devices->opened)
411 			return -EBUSY;
412 
413 		device = kzalloc(sizeof(*device), GFP_NOFS);
414 		if (!device) {
415 			/* we can safely leave the fs_devices entry around */
416 			return -ENOMEM;
417 		}
418 		device->devid = devid;
419 		device->dev_stats_valid = 0;
420 		device->work.func = pending_bios_fn;
421 		memcpy(device->uuid, disk_super->dev_item.uuid,
422 		       BTRFS_UUID_SIZE);
423 		spin_lock_init(&device->io_lock);
424 
425 		name = rcu_string_strdup(path, GFP_NOFS);
426 		if (!name) {
427 			kfree(device);
428 			return -ENOMEM;
429 		}
430 		rcu_assign_pointer(device->name, name);
431 		INIT_LIST_HEAD(&device->dev_alloc_list);
432 
433 		/* init readahead state */
434 		spin_lock_init(&device->reada_lock);
435 		device->reada_curr_zone = NULL;
436 		atomic_set(&device->reada_in_flight, 0);
437 		device->reada_next = 0;
438 		INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
439 		INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
440 
441 		mutex_lock(&fs_devices->device_list_mutex);
442 		list_add_rcu(&device->dev_list, &fs_devices->devices);
443 		mutex_unlock(&fs_devices->device_list_mutex);
444 
445 		device->fs_devices = fs_devices;
446 		fs_devices->num_devices++;
447 	} else if (!device->name || strcmp(device->name->str, path)) {
448 		name = rcu_string_strdup(path, GFP_NOFS);
449 		if (!name)
450 			return -ENOMEM;
451 		rcu_string_free(device->name);
452 		rcu_assign_pointer(device->name, name);
453 		if (device->missing) {
454 			fs_devices->missing_devices--;
455 			device->missing = 0;
456 		}
457 	}
458 
459 	if (found_transid > fs_devices->latest_trans) {
460 		fs_devices->latest_devid = devid;
461 		fs_devices->latest_trans = found_transid;
462 	}
463 	*fs_devices_ret = fs_devices;
464 	return 0;
465 }
466 
467 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
468 {
469 	struct btrfs_fs_devices *fs_devices;
470 	struct btrfs_device *device;
471 	struct btrfs_device *orig_dev;
472 
473 	fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
474 	if (!fs_devices)
475 		return ERR_PTR(-ENOMEM);
476 
477 	INIT_LIST_HEAD(&fs_devices->devices);
478 	INIT_LIST_HEAD(&fs_devices->alloc_list);
479 	INIT_LIST_HEAD(&fs_devices->list);
480 	mutex_init(&fs_devices->device_list_mutex);
481 	fs_devices->latest_devid = orig->latest_devid;
482 	fs_devices->latest_trans = orig->latest_trans;
483 	fs_devices->total_devices = orig->total_devices;
484 	memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
485 
486 	/* We have held the volume lock, it is safe to get the devices. */
487 	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
488 		struct rcu_string *name;
489 
490 		device = kzalloc(sizeof(*device), GFP_NOFS);
491 		if (!device)
492 			goto error;
493 
494 		/*
495 		 * This is ok to do without rcu read locked because we hold the
496 		 * uuid mutex so nothing we touch in here is going to disappear.
497 		 */
498 		name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
499 		if (!name) {
500 			kfree(device);
501 			goto error;
502 		}
503 		rcu_assign_pointer(device->name, name);
504 
505 		device->devid = orig_dev->devid;
506 		device->work.func = pending_bios_fn;
507 		memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
508 		spin_lock_init(&device->io_lock);
509 		INIT_LIST_HEAD(&device->dev_list);
510 		INIT_LIST_HEAD(&device->dev_alloc_list);
511 
512 		list_add(&device->dev_list, &fs_devices->devices);
513 		device->fs_devices = fs_devices;
514 		fs_devices->num_devices++;
515 	}
516 	return fs_devices;
517 error:
518 	free_fs_devices(fs_devices);
519 	return ERR_PTR(-ENOMEM);
520 }
521 
522 void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
523 			       struct btrfs_fs_devices *fs_devices, int step)
524 {
525 	struct btrfs_device *device, *next;
526 
527 	struct block_device *latest_bdev = NULL;
528 	u64 latest_devid = 0;
529 	u64 latest_transid = 0;
530 
531 	mutex_lock(&uuid_mutex);
532 again:
533 	/* This is the initialized path, it is safe to release the devices. */
534 	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
535 		if (device->in_fs_metadata) {
536 			if (!device->is_tgtdev_for_dev_replace &&
537 			    (!latest_transid ||
538 			     device->generation > latest_transid)) {
539 				latest_devid = device->devid;
540 				latest_transid = device->generation;
541 				latest_bdev = device->bdev;
542 			}
543 			continue;
544 		}
545 
546 		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
547 			/*
548 			 * In the first step, keep the device which has
549 			 * the correct fsid and the devid that is used
550 			 * for the dev_replace procedure.
551 			 * In the second step, the dev_replace state is
552 			 * read from the device tree and it is known
553 			 * whether the procedure is really active or
554 			 * not, which means whether this device is
555 			 * used or whether it should be removed.
556 			 */
557 			if (step == 0 || device->is_tgtdev_for_dev_replace) {
558 				continue;
559 			}
560 		}
561 		if (device->bdev) {
562 			blkdev_put(device->bdev, device->mode);
563 			device->bdev = NULL;
564 			fs_devices->open_devices--;
565 		}
566 		if (device->writeable) {
567 			list_del_init(&device->dev_alloc_list);
568 			device->writeable = 0;
569 			if (!device->is_tgtdev_for_dev_replace)
570 				fs_devices->rw_devices--;
571 		}
572 		list_del_init(&device->dev_list);
573 		fs_devices->num_devices--;
574 		rcu_string_free(device->name);
575 		kfree(device);
576 	}
577 
578 	if (fs_devices->seed) {
579 		fs_devices = fs_devices->seed;
580 		goto again;
581 	}
582 
583 	fs_devices->latest_bdev = latest_bdev;
584 	fs_devices->latest_devid = latest_devid;
585 	fs_devices->latest_trans = latest_transid;
586 
587 	mutex_unlock(&uuid_mutex);
588 }
589 
590 static void __free_device(struct work_struct *work)
591 {
592 	struct btrfs_device *device;
593 
594 	device = container_of(work, struct btrfs_device, rcu_work);
595 
596 	if (device->bdev)
597 		blkdev_put(device->bdev, device->mode);
598 
599 	rcu_string_free(device->name);
600 	kfree(device);
601 }
602 
603 static void free_device(struct rcu_head *head)
604 {
605 	struct btrfs_device *device;
606 
607 	device = container_of(head, struct btrfs_device, rcu);
608 
609 	INIT_WORK(&device->rcu_work, __free_device);
610 	schedule_work(&device->rcu_work);
611 }
612 
613 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
614 {
615 	struct btrfs_device *device;
616 
617 	if (--fs_devices->opened > 0)
618 		return 0;
619 
620 	mutex_lock(&fs_devices->device_list_mutex);
621 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
622 		struct btrfs_device *new_device;
623 		struct rcu_string *name;
624 
625 		if (device->bdev)
626 			fs_devices->open_devices--;
627 
628 		if (device->writeable && !device->is_tgtdev_for_dev_replace) {
629 			list_del_init(&device->dev_alloc_list);
630 			fs_devices->rw_devices--;
631 		}
632 
633 		if (device->can_discard)
634 			fs_devices->num_can_discard--;
635 
636 		new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
637 		BUG_ON(!new_device); /* -ENOMEM */
638 		memcpy(new_device, device, sizeof(*new_device));
639 
640 		/* Safe because we are under uuid_mutex */
641 		if (device->name) {
642 			name = rcu_string_strdup(device->name->str, GFP_NOFS);
643 			BUG_ON(device->name && !name); /* -ENOMEM */
644 			rcu_assign_pointer(new_device->name, name);
645 		}
646 		new_device->bdev = NULL;
647 		new_device->writeable = 0;
648 		new_device->in_fs_metadata = 0;
649 		new_device->can_discard = 0;
650 		list_replace_rcu(&device->dev_list, &new_device->dev_list);
651 
652 		call_rcu(&device->rcu, free_device);
653 	}
654 	mutex_unlock(&fs_devices->device_list_mutex);
655 
656 	WARN_ON(fs_devices->open_devices);
657 	WARN_ON(fs_devices->rw_devices);
658 	fs_devices->opened = 0;
659 	fs_devices->seeding = 0;
660 
661 	return 0;
662 }
663 
664 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
665 {
666 	struct btrfs_fs_devices *seed_devices = NULL;
667 	int ret;
668 
669 	mutex_lock(&uuid_mutex);
670 	ret = __btrfs_close_devices(fs_devices);
671 	if (!fs_devices->opened) {
672 		seed_devices = fs_devices->seed;
673 		fs_devices->seed = NULL;
674 	}
675 	mutex_unlock(&uuid_mutex);
676 
677 	while (seed_devices) {
678 		fs_devices = seed_devices;
679 		seed_devices = fs_devices->seed;
680 		__btrfs_close_devices(fs_devices);
681 		free_fs_devices(fs_devices);
682 	}
683 	return ret;
684 }
685 
686 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
687 				fmode_t flags, void *holder)
688 {
689 	struct request_queue *q;
690 	struct block_device *bdev;
691 	struct list_head *head = &fs_devices->devices;
692 	struct btrfs_device *device;
693 	struct block_device *latest_bdev = NULL;
694 	struct buffer_head *bh;
695 	struct btrfs_super_block *disk_super;
696 	u64 latest_devid = 0;
697 	u64 latest_transid = 0;
698 	u64 devid;
699 	int seeding = 1;
700 	int ret = 0;
701 
702 	flags |= FMODE_EXCL;
703 
704 	list_for_each_entry(device, head, dev_list) {
705 		if (device->bdev)
706 			continue;
707 		if (!device->name)
708 			continue;
709 
710 		ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
711 					    &bdev, &bh);
712 		if (ret)
713 			continue;
714 
715 		disk_super = (struct btrfs_super_block *)bh->b_data;
716 		devid = btrfs_stack_device_id(&disk_super->dev_item);
717 		if (devid != device->devid)
718 			goto error_brelse;
719 
720 		if (memcmp(device->uuid, disk_super->dev_item.uuid,
721 			   BTRFS_UUID_SIZE))
722 			goto error_brelse;
723 
724 		device->generation = btrfs_super_generation(disk_super);
725 		if (!latest_transid || device->generation > latest_transid) {
726 			latest_devid = devid;
727 			latest_transid = device->generation;
728 			latest_bdev = bdev;
729 		}
730 
731 		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
732 			device->writeable = 0;
733 		} else {
734 			device->writeable = !bdev_read_only(bdev);
735 			seeding = 0;
736 		}
737 
738 		q = bdev_get_queue(bdev);
739 		if (blk_queue_discard(q)) {
740 			device->can_discard = 1;
741 			fs_devices->num_can_discard++;
742 		}
743 
744 		device->bdev = bdev;
745 		device->in_fs_metadata = 0;
746 		device->mode = flags;
747 
748 		if (!blk_queue_nonrot(bdev_get_queue(bdev)))
749 			fs_devices->rotating = 1;
750 
751 		fs_devices->open_devices++;
752 		if (device->writeable && !device->is_tgtdev_for_dev_replace) {
753 			fs_devices->rw_devices++;
754 			list_add(&device->dev_alloc_list,
755 				 &fs_devices->alloc_list);
756 		}
757 		brelse(bh);
758 		continue;
759 
760 error_brelse:
761 		brelse(bh);
762 		blkdev_put(bdev, flags);
763 		continue;
764 	}
765 	if (fs_devices->open_devices == 0) {
766 		ret = -EINVAL;
767 		goto out;
768 	}
769 	fs_devices->seeding = seeding;
770 	fs_devices->opened = 1;
771 	fs_devices->latest_bdev = latest_bdev;
772 	fs_devices->latest_devid = latest_devid;
773 	fs_devices->latest_trans = latest_transid;
774 	fs_devices->total_rw_bytes = 0;
775 out:
776 	return ret;
777 }
778 
779 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
780 		       fmode_t flags, void *holder)
781 {
782 	int ret;
783 
784 	mutex_lock(&uuid_mutex);
785 	if (fs_devices->opened) {
786 		fs_devices->opened++;
787 		ret = 0;
788 	} else {
789 		ret = __btrfs_open_devices(fs_devices, flags, holder);
790 	}
791 	mutex_unlock(&uuid_mutex);
792 	return ret;
793 }
794 
795 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
796 			  struct btrfs_fs_devices **fs_devices_ret)
797 {
798 	struct btrfs_super_block *disk_super;
799 	struct block_device *bdev;
800 	struct buffer_head *bh;
801 	int ret;
802 	u64 devid;
803 	u64 transid;
804 	u64 total_devices;
805 
806 	flags |= FMODE_EXCL;
807 	mutex_lock(&uuid_mutex);
808 	ret = btrfs_get_bdev_and_sb(path, flags, holder, 0, &bdev, &bh);
809 	if (ret)
810 		goto error;
811 	disk_super = (struct btrfs_super_block *)bh->b_data;
812 	devid = btrfs_stack_device_id(&disk_super->dev_item);
813 	transid = btrfs_super_generation(disk_super);
814 	total_devices = btrfs_super_num_devices(disk_super);
815 	if (disk_super->label[0]) {
816 		if (disk_super->label[BTRFS_LABEL_SIZE - 1])
817 			disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
818 		printk(KERN_INFO "device label %s ", disk_super->label);
819 	} else {
820 		printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
821 	}
822 	printk(KERN_CONT "devid %llu transid %llu %s\n",
823 	       (unsigned long long)devid, (unsigned long long)transid, path);
824 	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
825 	if (!ret && fs_devices_ret)
826 		(*fs_devices_ret)->total_devices = total_devices;
827 	brelse(bh);
828 	blkdev_put(bdev, flags);
829 error:
830 	mutex_unlock(&uuid_mutex);
831 	return ret;
832 }
833 
834 /* helper to account the used device space in the range */
835 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
836 				   u64 end, u64 *length)
837 {
838 	struct btrfs_key key;
839 	struct btrfs_root *root = device->dev_root;
840 	struct btrfs_dev_extent *dev_extent;
841 	struct btrfs_path *path;
842 	u64 extent_end;
843 	int ret;
844 	int slot;
845 	struct extent_buffer *l;
846 
847 	*length = 0;
848 
849 	if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
850 		return 0;
851 
852 	path = btrfs_alloc_path();
853 	if (!path)
854 		return -ENOMEM;
855 	path->reada = 2;
856 
857 	key.objectid = device->devid;
858 	key.offset = start;
859 	key.type = BTRFS_DEV_EXTENT_KEY;
860 
861 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
862 	if (ret < 0)
863 		goto out;
864 	if (ret > 0) {
865 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
866 		if (ret < 0)
867 			goto out;
868 	}
869 
870 	while (1) {
871 		l = path->nodes[0];
872 		slot = path->slots[0];
873 		if (slot >= btrfs_header_nritems(l)) {
874 			ret = btrfs_next_leaf(root, path);
875 			if (ret == 0)
876 				continue;
877 			if (ret < 0)
878 				goto out;
879 
880 			break;
881 		}
882 		btrfs_item_key_to_cpu(l, &key, slot);
883 
884 		if (key.objectid < device->devid)
885 			goto next;
886 
887 		if (key.objectid > device->devid)
888 			break;
889 
890 		if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
891 			goto next;
892 
893 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
894 		extent_end = key.offset + btrfs_dev_extent_length(l,
895 								  dev_extent);
896 		if (key.offset <= start && extent_end > end) {
897 			*length = end - start + 1;
898 			break;
899 		} else if (key.offset <= start && extent_end > start)
900 			*length += extent_end - start;
901 		else if (key.offset > start && extent_end <= end)
902 			*length += extent_end - key.offset;
903 		else if (key.offset > start && key.offset <= end) {
904 			*length += end - key.offset + 1;
905 			break;
906 		} else if (key.offset > end)
907 			break;
908 
909 next:
910 		path->slots[0]++;
911 	}
912 	ret = 0;
913 out:
914 	btrfs_free_path(path);
915 	return ret;
916 }
917 
918 /*
919  * find_free_dev_extent - find free space in the specified device
920  * @device:	the device which we search the free space in
921  * @num_bytes:	the size of the free space that we need
922  * @start:	store the start of the free space.
923  * @len:	the size of the free space. that we find, or the size of the max
924  * 		free space if we don't find suitable free space
925  *
926  * this uses a pretty simple search, the expectation is that it is
927  * called very infrequently and that a given device has a small number
928  * of extents
929  *
930  * @start is used to store the start of the free space if we find. But if we
931  * don't find suitable free space, it will be used to store the start position
932  * of the max free space.
933  *
934  * @len is used to store the size of the free space that we find.
935  * But if we don't find suitable free space, it is used to store the size of
936  * the max free space.
937  */
938 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
939 			 u64 *start, u64 *len)
940 {
941 	struct btrfs_key key;
942 	struct btrfs_root *root = device->dev_root;
943 	struct btrfs_dev_extent *dev_extent;
944 	struct btrfs_path *path;
945 	u64 hole_size;
946 	u64 max_hole_start;
947 	u64 max_hole_size;
948 	u64 extent_end;
949 	u64 search_start;
950 	u64 search_end = device->total_bytes;
951 	int ret;
952 	int slot;
953 	struct extent_buffer *l;
954 
955 	/* FIXME use last free of some kind */
956 
957 	/* we don't want to overwrite the superblock on the drive,
958 	 * so we make sure to start at an offset of at least 1MB
959 	 */
960 	search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
961 
962 	max_hole_start = search_start;
963 	max_hole_size = 0;
964 	hole_size = 0;
965 
966 	if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
967 		ret = -ENOSPC;
968 		goto error;
969 	}
970 
971 	path = btrfs_alloc_path();
972 	if (!path) {
973 		ret = -ENOMEM;
974 		goto error;
975 	}
976 	path->reada = 2;
977 
978 	key.objectid = device->devid;
979 	key.offset = search_start;
980 	key.type = BTRFS_DEV_EXTENT_KEY;
981 
982 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
983 	if (ret < 0)
984 		goto out;
985 	if (ret > 0) {
986 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
987 		if (ret < 0)
988 			goto out;
989 	}
990 
991 	while (1) {
992 		l = path->nodes[0];
993 		slot = path->slots[0];
994 		if (slot >= btrfs_header_nritems(l)) {
995 			ret = btrfs_next_leaf(root, path);
996 			if (ret == 0)
997 				continue;
998 			if (ret < 0)
999 				goto out;
1000 
1001 			break;
1002 		}
1003 		btrfs_item_key_to_cpu(l, &key, slot);
1004 
1005 		if (key.objectid < device->devid)
1006 			goto next;
1007 
1008 		if (key.objectid > device->devid)
1009 			break;
1010 
1011 		if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
1012 			goto next;
1013 
1014 		if (key.offset > search_start) {
1015 			hole_size = key.offset - search_start;
1016 
1017 			if (hole_size > max_hole_size) {
1018 				max_hole_start = search_start;
1019 				max_hole_size = hole_size;
1020 			}
1021 
1022 			/*
1023 			 * If this free space is greater than which we need,
1024 			 * it must be the max free space that we have found
1025 			 * until now, so max_hole_start must point to the start
1026 			 * of this free space and the length of this free space
1027 			 * is stored in max_hole_size. Thus, we return
1028 			 * max_hole_start and max_hole_size and go back to the
1029 			 * caller.
1030 			 */
1031 			if (hole_size >= num_bytes) {
1032 				ret = 0;
1033 				goto out;
1034 			}
1035 		}
1036 
1037 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1038 		extent_end = key.offset + btrfs_dev_extent_length(l,
1039 								  dev_extent);
1040 		if (extent_end > search_start)
1041 			search_start = extent_end;
1042 next:
1043 		path->slots[0]++;
1044 		cond_resched();
1045 	}
1046 
1047 	/*
1048 	 * At this point, search_start should be the end of
1049 	 * allocated dev extents, and when shrinking the device,
1050 	 * search_end may be smaller than search_start.
1051 	 */
1052 	if (search_end > search_start)
1053 		hole_size = search_end - search_start;
1054 
1055 	if (hole_size > max_hole_size) {
1056 		max_hole_start = search_start;
1057 		max_hole_size = hole_size;
1058 	}
1059 
1060 	/* See above. */
1061 	if (hole_size < num_bytes)
1062 		ret = -ENOSPC;
1063 	else
1064 		ret = 0;
1065 
1066 out:
1067 	btrfs_free_path(path);
1068 error:
1069 	*start = max_hole_start;
1070 	if (len)
1071 		*len = max_hole_size;
1072 	return ret;
1073 }
1074 
1075 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1076 			  struct btrfs_device *device,
1077 			  u64 start)
1078 {
1079 	int ret;
1080 	struct btrfs_path *path;
1081 	struct btrfs_root *root = device->dev_root;
1082 	struct btrfs_key key;
1083 	struct btrfs_key found_key;
1084 	struct extent_buffer *leaf = NULL;
1085 	struct btrfs_dev_extent *extent = NULL;
1086 
1087 	path = btrfs_alloc_path();
1088 	if (!path)
1089 		return -ENOMEM;
1090 
1091 	key.objectid = device->devid;
1092 	key.offset = start;
1093 	key.type = BTRFS_DEV_EXTENT_KEY;
1094 again:
1095 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1096 	if (ret > 0) {
1097 		ret = btrfs_previous_item(root, path, key.objectid,
1098 					  BTRFS_DEV_EXTENT_KEY);
1099 		if (ret)
1100 			goto out;
1101 		leaf = path->nodes[0];
1102 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1103 		extent = btrfs_item_ptr(leaf, path->slots[0],
1104 					struct btrfs_dev_extent);
1105 		BUG_ON(found_key.offset > start || found_key.offset +
1106 		       btrfs_dev_extent_length(leaf, extent) < start);
1107 		key = found_key;
1108 		btrfs_release_path(path);
1109 		goto again;
1110 	} else if (ret == 0) {
1111 		leaf = path->nodes[0];
1112 		extent = btrfs_item_ptr(leaf, path->slots[0],
1113 					struct btrfs_dev_extent);
1114 	} else {
1115 		btrfs_error(root->fs_info, ret, "Slot search failed");
1116 		goto out;
1117 	}
1118 
1119 	if (device->bytes_used > 0) {
1120 		u64 len = btrfs_dev_extent_length(leaf, extent);
1121 		device->bytes_used -= len;
1122 		spin_lock(&root->fs_info->free_chunk_lock);
1123 		root->fs_info->free_chunk_space += len;
1124 		spin_unlock(&root->fs_info->free_chunk_lock);
1125 	}
1126 	ret = btrfs_del_item(trans, root, path);
1127 	if (ret) {
1128 		btrfs_error(root->fs_info, ret,
1129 			    "Failed to remove dev extent item");
1130 	}
1131 out:
1132 	btrfs_free_path(path);
1133 	return ret;
1134 }
1135 
1136 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1137 			   struct btrfs_device *device,
1138 			   u64 chunk_tree, u64 chunk_objectid,
1139 			   u64 chunk_offset, u64 start, u64 num_bytes)
1140 {
1141 	int ret;
1142 	struct btrfs_path *path;
1143 	struct btrfs_root *root = device->dev_root;
1144 	struct btrfs_dev_extent *extent;
1145 	struct extent_buffer *leaf;
1146 	struct btrfs_key key;
1147 
1148 	WARN_ON(!device->in_fs_metadata);
1149 	WARN_ON(device->is_tgtdev_for_dev_replace);
1150 	path = btrfs_alloc_path();
1151 	if (!path)
1152 		return -ENOMEM;
1153 
1154 	key.objectid = device->devid;
1155 	key.offset = start;
1156 	key.type = BTRFS_DEV_EXTENT_KEY;
1157 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1158 				      sizeof(*extent));
1159 	if (ret)
1160 		goto out;
1161 
1162 	leaf = path->nodes[0];
1163 	extent = btrfs_item_ptr(leaf, path->slots[0],
1164 				struct btrfs_dev_extent);
1165 	btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1166 	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1167 	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1168 
1169 	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1170 		    (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1171 		    BTRFS_UUID_SIZE);
1172 
1173 	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1174 	btrfs_mark_buffer_dirty(leaf);
1175 out:
1176 	btrfs_free_path(path);
1177 	return ret;
1178 }
1179 
1180 static noinline int find_next_chunk(struct btrfs_root *root,
1181 				    u64 objectid, u64 *offset)
1182 {
1183 	struct btrfs_path *path;
1184 	int ret;
1185 	struct btrfs_key key;
1186 	struct btrfs_chunk *chunk;
1187 	struct btrfs_key found_key;
1188 
1189 	path = btrfs_alloc_path();
1190 	if (!path)
1191 		return -ENOMEM;
1192 
1193 	key.objectid = objectid;
1194 	key.offset = (u64)-1;
1195 	key.type = BTRFS_CHUNK_ITEM_KEY;
1196 
1197 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1198 	if (ret < 0)
1199 		goto error;
1200 
1201 	BUG_ON(ret == 0); /* Corruption */
1202 
1203 	ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1204 	if (ret) {
1205 		*offset = 0;
1206 	} else {
1207 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1208 				      path->slots[0]);
1209 		if (found_key.objectid != objectid)
1210 			*offset = 0;
1211 		else {
1212 			chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1213 					       struct btrfs_chunk);
1214 			*offset = found_key.offset +
1215 				btrfs_chunk_length(path->nodes[0], chunk);
1216 		}
1217 	}
1218 	ret = 0;
1219 error:
1220 	btrfs_free_path(path);
1221 	return ret;
1222 }
1223 
1224 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1225 {
1226 	int ret;
1227 	struct btrfs_key key;
1228 	struct btrfs_key found_key;
1229 	struct btrfs_path *path;
1230 
1231 	root = root->fs_info->chunk_root;
1232 
1233 	path = btrfs_alloc_path();
1234 	if (!path)
1235 		return -ENOMEM;
1236 
1237 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1238 	key.type = BTRFS_DEV_ITEM_KEY;
1239 	key.offset = (u64)-1;
1240 
1241 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1242 	if (ret < 0)
1243 		goto error;
1244 
1245 	BUG_ON(ret == 0); /* Corruption */
1246 
1247 	ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1248 				  BTRFS_DEV_ITEM_KEY);
1249 	if (ret) {
1250 		*objectid = 1;
1251 	} else {
1252 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1253 				      path->slots[0]);
1254 		*objectid = found_key.offset + 1;
1255 	}
1256 	ret = 0;
1257 error:
1258 	btrfs_free_path(path);
1259 	return ret;
1260 }
1261 
1262 /*
1263  * the device information is stored in the chunk root
1264  * the btrfs_device struct should be fully filled in
1265  */
1266 int btrfs_add_device(struct btrfs_trans_handle *trans,
1267 		     struct btrfs_root *root,
1268 		     struct btrfs_device *device)
1269 {
1270 	int ret;
1271 	struct btrfs_path *path;
1272 	struct btrfs_dev_item *dev_item;
1273 	struct extent_buffer *leaf;
1274 	struct btrfs_key key;
1275 	unsigned long ptr;
1276 
1277 	root = root->fs_info->chunk_root;
1278 
1279 	path = btrfs_alloc_path();
1280 	if (!path)
1281 		return -ENOMEM;
1282 
1283 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1284 	key.type = BTRFS_DEV_ITEM_KEY;
1285 	key.offset = device->devid;
1286 
1287 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1288 				      sizeof(*dev_item));
1289 	if (ret)
1290 		goto out;
1291 
1292 	leaf = path->nodes[0];
1293 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1294 
1295 	btrfs_set_device_id(leaf, dev_item, device->devid);
1296 	btrfs_set_device_generation(leaf, dev_item, 0);
1297 	btrfs_set_device_type(leaf, dev_item, device->type);
1298 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1299 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1300 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1301 	btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1302 	btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1303 	btrfs_set_device_group(leaf, dev_item, 0);
1304 	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1305 	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1306 	btrfs_set_device_start_offset(leaf, dev_item, 0);
1307 
1308 	ptr = (unsigned long)btrfs_device_uuid(dev_item);
1309 	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1310 	ptr = (unsigned long)btrfs_device_fsid(dev_item);
1311 	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1312 	btrfs_mark_buffer_dirty(leaf);
1313 
1314 	ret = 0;
1315 out:
1316 	btrfs_free_path(path);
1317 	return ret;
1318 }
1319 
1320 static int btrfs_rm_dev_item(struct btrfs_root *root,
1321 			     struct btrfs_device *device)
1322 {
1323 	int ret;
1324 	struct btrfs_path *path;
1325 	struct btrfs_key key;
1326 	struct btrfs_trans_handle *trans;
1327 
1328 	root = root->fs_info->chunk_root;
1329 
1330 	path = btrfs_alloc_path();
1331 	if (!path)
1332 		return -ENOMEM;
1333 
1334 	trans = btrfs_start_transaction(root, 0);
1335 	if (IS_ERR(trans)) {
1336 		btrfs_free_path(path);
1337 		return PTR_ERR(trans);
1338 	}
1339 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1340 	key.type = BTRFS_DEV_ITEM_KEY;
1341 	key.offset = device->devid;
1342 	lock_chunks(root);
1343 
1344 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1345 	if (ret < 0)
1346 		goto out;
1347 
1348 	if (ret > 0) {
1349 		ret = -ENOENT;
1350 		goto out;
1351 	}
1352 
1353 	ret = btrfs_del_item(trans, root, path);
1354 	if (ret)
1355 		goto out;
1356 out:
1357 	btrfs_free_path(path);
1358 	unlock_chunks(root);
1359 	btrfs_commit_transaction(trans, root);
1360 	return ret;
1361 }
1362 
1363 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1364 {
1365 	struct btrfs_device *device;
1366 	struct btrfs_device *next_device;
1367 	struct block_device *bdev;
1368 	struct buffer_head *bh = NULL;
1369 	struct btrfs_super_block *disk_super;
1370 	struct btrfs_fs_devices *cur_devices;
1371 	u64 all_avail;
1372 	u64 devid;
1373 	u64 num_devices;
1374 	u8 *dev_uuid;
1375 	int ret = 0;
1376 	bool clear_super = false;
1377 
1378 	mutex_lock(&uuid_mutex);
1379 
1380 	all_avail = root->fs_info->avail_data_alloc_bits |
1381 		root->fs_info->avail_system_alloc_bits |
1382 		root->fs_info->avail_metadata_alloc_bits;
1383 
1384 	num_devices = root->fs_info->fs_devices->num_devices;
1385 	btrfs_dev_replace_lock(&root->fs_info->dev_replace);
1386 	if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1387 		WARN_ON(num_devices < 1);
1388 		num_devices--;
1389 	}
1390 	btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
1391 
1392 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1393 		printk(KERN_ERR "btrfs: unable to go below four devices "
1394 		       "on raid10\n");
1395 		ret = -EINVAL;
1396 		goto out;
1397 	}
1398 
1399 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1400 		printk(KERN_ERR "btrfs: unable to go below two "
1401 		       "devices on raid1\n");
1402 		ret = -EINVAL;
1403 		goto out;
1404 	}
1405 
1406 	if (strcmp(device_path, "missing") == 0) {
1407 		struct list_head *devices;
1408 		struct btrfs_device *tmp;
1409 
1410 		device = NULL;
1411 		devices = &root->fs_info->fs_devices->devices;
1412 		/*
1413 		 * It is safe to read the devices since the volume_mutex
1414 		 * is held.
1415 		 */
1416 		list_for_each_entry(tmp, devices, dev_list) {
1417 			if (tmp->in_fs_metadata &&
1418 			    !tmp->is_tgtdev_for_dev_replace &&
1419 			    !tmp->bdev) {
1420 				device = tmp;
1421 				break;
1422 			}
1423 		}
1424 		bdev = NULL;
1425 		bh = NULL;
1426 		disk_super = NULL;
1427 		if (!device) {
1428 			printk(KERN_ERR "btrfs: no missing devices found to "
1429 			       "remove\n");
1430 			goto out;
1431 		}
1432 	} else {
1433 		ret = btrfs_get_bdev_and_sb(device_path,
1434 					    FMODE_WRITE | FMODE_EXCL,
1435 					    root->fs_info->bdev_holder, 0,
1436 					    &bdev, &bh);
1437 		if (ret)
1438 			goto out;
1439 		disk_super = (struct btrfs_super_block *)bh->b_data;
1440 		devid = btrfs_stack_device_id(&disk_super->dev_item);
1441 		dev_uuid = disk_super->dev_item.uuid;
1442 		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1443 					   disk_super->fsid);
1444 		if (!device) {
1445 			ret = -ENOENT;
1446 			goto error_brelse;
1447 		}
1448 	}
1449 
1450 	if (device->is_tgtdev_for_dev_replace) {
1451 		pr_err("btrfs: unable to remove the dev_replace target dev\n");
1452 		ret = -EINVAL;
1453 		goto error_brelse;
1454 	}
1455 
1456 	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1457 		printk(KERN_ERR "btrfs: unable to remove the only writeable "
1458 		       "device\n");
1459 		ret = -EINVAL;
1460 		goto error_brelse;
1461 	}
1462 
1463 	if (device->writeable) {
1464 		lock_chunks(root);
1465 		list_del_init(&device->dev_alloc_list);
1466 		unlock_chunks(root);
1467 		root->fs_info->fs_devices->rw_devices--;
1468 		clear_super = true;
1469 	}
1470 
1471 	ret = btrfs_shrink_device(device, 0);
1472 	if (ret)
1473 		goto error_undo;
1474 
1475 	/*
1476 	 * TODO: the superblock still includes this device in its num_devices
1477 	 * counter although write_all_supers() is not locked out. This
1478 	 * could give a filesystem state which requires a degraded mount.
1479 	 */
1480 	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1481 	if (ret)
1482 		goto error_undo;
1483 
1484 	spin_lock(&root->fs_info->free_chunk_lock);
1485 	root->fs_info->free_chunk_space = device->total_bytes -
1486 		device->bytes_used;
1487 	spin_unlock(&root->fs_info->free_chunk_lock);
1488 
1489 	device->in_fs_metadata = 0;
1490 	btrfs_scrub_cancel_dev(root->fs_info, device);
1491 
1492 	/*
1493 	 * the device list mutex makes sure that we don't change
1494 	 * the device list while someone else is writing out all
1495 	 * the device supers.
1496 	 */
1497 
1498 	cur_devices = device->fs_devices;
1499 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1500 	list_del_rcu(&device->dev_list);
1501 
1502 	device->fs_devices->num_devices--;
1503 	device->fs_devices->total_devices--;
1504 
1505 	if (device->missing)
1506 		root->fs_info->fs_devices->missing_devices--;
1507 
1508 	next_device = list_entry(root->fs_info->fs_devices->devices.next,
1509 				 struct btrfs_device, dev_list);
1510 	if (device->bdev == root->fs_info->sb->s_bdev)
1511 		root->fs_info->sb->s_bdev = next_device->bdev;
1512 	if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1513 		root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1514 
1515 	if (device->bdev)
1516 		device->fs_devices->open_devices--;
1517 
1518 	call_rcu(&device->rcu, free_device);
1519 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1520 
1521 	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1522 	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1523 
1524 	if (cur_devices->open_devices == 0) {
1525 		struct btrfs_fs_devices *fs_devices;
1526 		fs_devices = root->fs_info->fs_devices;
1527 		while (fs_devices) {
1528 			if (fs_devices->seed == cur_devices)
1529 				break;
1530 			fs_devices = fs_devices->seed;
1531 		}
1532 		fs_devices->seed = cur_devices->seed;
1533 		cur_devices->seed = NULL;
1534 		lock_chunks(root);
1535 		__btrfs_close_devices(cur_devices);
1536 		unlock_chunks(root);
1537 		free_fs_devices(cur_devices);
1538 	}
1539 
1540 	root->fs_info->num_tolerated_disk_barrier_failures =
1541 		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1542 
1543 	/*
1544 	 * at this point, the device is zero sized.  We want to
1545 	 * remove it from the devices list and zero out the old super
1546 	 */
1547 	if (clear_super && disk_super) {
1548 		/* make sure this device isn't detected as part of
1549 		 * the FS anymore
1550 		 */
1551 		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1552 		set_buffer_dirty(bh);
1553 		sync_dirty_buffer(bh);
1554 	}
1555 
1556 	ret = 0;
1557 
1558 	/* Notify udev that device has changed */
1559 	if (bdev)
1560 		btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1561 
1562 error_brelse:
1563 	brelse(bh);
1564 	if (bdev)
1565 		blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1566 out:
1567 	mutex_unlock(&uuid_mutex);
1568 	return ret;
1569 error_undo:
1570 	if (device->writeable) {
1571 		lock_chunks(root);
1572 		list_add(&device->dev_alloc_list,
1573 			 &root->fs_info->fs_devices->alloc_list);
1574 		unlock_chunks(root);
1575 		root->fs_info->fs_devices->rw_devices++;
1576 	}
1577 	goto error_brelse;
1578 }
1579 
1580 void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1581 				 struct btrfs_device *srcdev)
1582 {
1583 	WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1584 	list_del_rcu(&srcdev->dev_list);
1585 	list_del_rcu(&srcdev->dev_alloc_list);
1586 	fs_info->fs_devices->num_devices--;
1587 	if (srcdev->missing) {
1588 		fs_info->fs_devices->missing_devices--;
1589 		fs_info->fs_devices->rw_devices++;
1590 	}
1591 	if (srcdev->can_discard)
1592 		fs_info->fs_devices->num_can_discard--;
1593 	if (srcdev->bdev)
1594 		fs_info->fs_devices->open_devices--;
1595 
1596 	call_rcu(&srcdev->rcu, free_device);
1597 }
1598 
1599 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
1600 				      struct btrfs_device *tgtdev)
1601 {
1602 	struct btrfs_device *next_device;
1603 
1604 	WARN_ON(!tgtdev);
1605 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
1606 	if (tgtdev->bdev) {
1607 		btrfs_scratch_superblock(tgtdev);
1608 		fs_info->fs_devices->open_devices--;
1609 	}
1610 	fs_info->fs_devices->num_devices--;
1611 	if (tgtdev->can_discard)
1612 		fs_info->fs_devices->num_can_discard++;
1613 
1614 	next_device = list_entry(fs_info->fs_devices->devices.next,
1615 				 struct btrfs_device, dev_list);
1616 	if (tgtdev->bdev == fs_info->sb->s_bdev)
1617 		fs_info->sb->s_bdev = next_device->bdev;
1618 	if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
1619 		fs_info->fs_devices->latest_bdev = next_device->bdev;
1620 	list_del_rcu(&tgtdev->dev_list);
1621 
1622 	call_rcu(&tgtdev->rcu, free_device);
1623 
1624 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1625 }
1626 
1627 int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
1628 			      struct btrfs_device **device)
1629 {
1630 	int ret = 0;
1631 	struct btrfs_super_block *disk_super;
1632 	u64 devid;
1633 	u8 *dev_uuid;
1634 	struct block_device *bdev;
1635 	struct buffer_head *bh;
1636 
1637 	*device = NULL;
1638 	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
1639 				    root->fs_info->bdev_holder, 0, &bdev, &bh);
1640 	if (ret)
1641 		return ret;
1642 	disk_super = (struct btrfs_super_block *)bh->b_data;
1643 	devid = btrfs_stack_device_id(&disk_super->dev_item);
1644 	dev_uuid = disk_super->dev_item.uuid;
1645 	*device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1646 				    disk_super->fsid);
1647 	brelse(bh);
1648 	if (!*device)
1649 		ret = -ENOENT;
1650 	blkdev_put(bdev, FMODE_READ);
1651 	return ret;
1652 }
1653 
1654 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
1655 					 char *device_path,
1656 					 struct btrfs_device **device)
1657 {
1658 	*device = NULL;
1659 	if (strcmp(device_path, "missing") == 0) {
1660 		struct list_head *devices;
1661 		struct btrfs_device *tmp;
1662 
1663 		devices = &root->fs_info->fs_devices->devices;
1664 		/*
1665 		 * It is safe to read the devices since the volume_mutex
1666 		 * is held by the caller.
1667 		 */
1668 		list_for_each_entry(tmp, devices, dev_list) {
1669 			if (tmp->in_fs_metadata && !tmp->bdev) {
1670 				*device = tmp;
1671 				break;
1672 			}
1673 		}
1674 
1675 		if (!*device) {
1676 			pr_err("btrfs: no missing device found\n");
1677 			return -ENOENT;
1678 		}
1679 
1680 		return 0;
1681 	} else {
1682 		return btrfs_find_device_by_path(root, device_path, device);
1683 	}
1684 }
1685 
1686 /*
1687  * does all the dirty work required for changing file system's UUID.
1688  */
1689 static int btrfs_prepare_sprout(struct btrfs_root *root)
1690 {
1691 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1692 	struct btrfs_fs_devices *old_devices;
1693 	struct btrfs_fs_devices *seed_devices;
1694 	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1695 	struct btrfs_device *device;
1696 	u64 super_flags;
1697 
1698 	BUG_ON(!mutex_is_locked(&uuid_mutex));
1699 	if (!fs_devices->seeding)
1700 		return -EINVAL;
1701 
1702 	seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1703 	if (!seed_devices)
1704 		return -ENOMEM;
1705 
1706 	old_devices = clone_fs_devices(fs_devices);
1707 	if (IS_ERR(old_devices)) {
1708 		kfree(seed_devices);
1709 		return PTR_ERR(old_devices);
1710 	}
1711 
1712 	list_add(&old_devices->list, &fs_uuids);
1713 
1714 	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1715 	seed_devices->opened = 1;
1716 	INIT_LIST_HEAD(&seed_devices->devices);
1717 	INIT_LIST_HEAD(&seed_devices->alloc_list);
1718 	mutex_init(&seed_devices->device_list_mutex);
1719 
1720 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1721 	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1722 			      synchronize_rcu);
1723 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1724 
1725 	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1726 	list_for_each_entry(device, &seed_devices->devices, dev_list) {
1727 		device->fs_devices = seed_devices;
1728 	}
1729 
1730 	fs_devices->seeding = 0;
1731 	fs_devices->num_devices = 0;
1732 	fs_devices->open_devices = 0;
1733 	fs_devices->total_devices = 0;
1734 	fs_devices->seed = seed_devices;
1735 
1736 	generate_random_uuid(fs_devices->fsid);
1737 	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1738 	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1739 	super_flags = btrfs_super_flags(disk_super) &
1740 		      ~BTRFS_SUPER_FLAG_SEEDING;
1741 	btrfs_set_super_flags(disk_super, super_flags);
1742 
1743 	return 0;
1744 }
1745 
1746 /*
1747  * strore the expected generation for seed devices in device items.
1748  */
1749 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1750 			       struct btrfs_root *root)
1751 {
1752 	struct btrfs_path *path;
1753 	struct extent_buffer *leaf;
1754 	struct btrfs_dev_item *dev_item;
1755 	struct btrfs_device *device;
1756 	struct btrfs_key key;
1757 	u8 fs_uuid[BTRFS_UUID_SIZE];
1758 	u8 dev_uuid[BTRFS_UUID_SIZE];
1759 	u64 devid;
1760 	int ret;
1761 
1762 	path = btrfs_alloc_path();
1763 	if (!path)
1764 		return -ENOMEM;
1765 
1766 	root = root->fs_info->chunk_root;
1767 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1768 	key.offset = 0;
1769 	key.type = BTRFS_DEV_ITEM_KEY;
1770 
1771 	while (1) {
1772 		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1773 		if (ret < 0)
1774 			goto error;
1775 
1776 		leaf = path->nodes[0];
1777 next_slot:
1778 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1779 			ret = btrfs_next_leaf(root, path);
1780 			if (ret > 0)
1781 				break;
1782 			if (ret < 0)
1783 				goto error;
1784 			leaf = path->nodes[0];
1785 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1786 			btrfs_release_path(path);
1787 			continue;
1788 		}
1789 
1790 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1791 		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1792 		    key.type != BTRFS_DEV_ITEM_KEY)
1793 			break;
1794 
1795 		dev_item = btrfs_item_ptr(leaf, path->slots[0],
1796 					  struct btrfs_dev_item);
1797 		devid = btrfs_device_id(leaf, dev_item);
1798 		read_extent_buffer(leaf, dev_uuid,
1799 				   (unsigned long)btrfs_device_uuid(dev_item),
1800 				   BTRFS_UUID_SIZE);
1801 		read_extent_buffer(leaf, fs_uuid,
1802 				   (unsigned long)btrfs_device_fsid(dev_item),
1803 				   BTRFS_UUID_SIZE);
1804 		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1805 					   fs_uuid);
1806 		BUG_ON(!device); /* Logic error */
1807 
1808 		if (device->fs_devices->seeding) {
1809 			btrfs_set_device_generation(leaf, dev_item,
1810 						    device->generation);
1811 			btrfs_mark_buffer_dirty(leaf);
1812 		}
1813 
1814 		path->slots[0]++;
1815 		goto next_slot;
1816 	}
1817 	ret = 0;
1818 error:
1819 	btrfs_free_path(path);
1820 	return ret;
1821 }
1822 
1823 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1824 {
1825 	struct request_queue *q;
1826 	struct btrfs_trans_handle *trans;
1827 	struct btrfs_device *device;
1828 	struct block_device *bdev;
1829 	struct list_head *devices;
1830 	struct super_block *sb = root->fs_info->sb;
1831 	struct rcu_string *name;
1832 	u64 total_bytes;
1833 	int seeding_dev = 0;
1834 	int ret = 0;
1835 
1836 	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1837 		return -EROFS;
1838 
1839 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1840 				  root->fs_info->bdev_holder);
1841 	if (IS_ERR(bdev))
1842 		return PTR_ERR(bdev);
1843 
1844 	if (root->fs_info->fs_devices->seeding) {
1845 		seeding_dev = 1;
1846 		down_write(&sb->s_umount);
1847 		mutex_lock(&uuid_mutex);
1848 	}
1849 
1850 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
1851 
1852 	devices = &root->fs_info->fs_devices->devices;
1853 
1854 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1855 	list_for_each_entry(device, devices, dev_list) {
1856 		if (device->bdev == bdev) {
1857 			ret = -EEXIST;
1858 			mutex_unlock(
1859 				&root->fs_info->fs_devices->device_list_mutex);
1860 			goto error;
1861 		}
1862 	}
1863 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1864 
1865 	device = kzalloc(sizeof(*device), GFP_NOFS);
1866 	if (!device) {
1867 		/* we can safely leave the fs_devices entry around */
1868 		ret = -ENOMEM;
1869 		goto error;
1870 	}
1871 
1872 	name = rcu_string_strdup(device_path, GFP_NOFS);
1873 	if (!name) {
1874 		kfree(device);
1875 		ret = -ENOMEM;
1876 		goto error;
1877 	}
1878 	rcu_assign_pointer(device->name, name);
1879 
1880 	ret = find_next_devid(root, &device->devid);
1881 	if (ret) {
1882 		rcu_string_free(device->name);
1883 		kfree(device);
1884 		goto error;
1885 	}
1886 
1887 	trans = btrfs_start_transaction(root, 0);
1888 	if (IS_ERR(trans)) {
1889 		rcu_string_free(device->name);
1890 		kfree(device);
1891 		ret = PTR_ERR(trans);
1892 		goto error;
1893 	}
1894 
1895 	lock_chunks(root);
1896 
1897 	q = bdev_get_queue(bdev);
1898 	if (blk_queue_discard(q))
1899 		device->can_discard = 1;
1900 	device->writeable = 1;
1901 	device->work.func = pending_bios_fn;
1902 	generate_random_uuid(device->uuid);
1903 	spin_lock_init(&device->io_lock);
1904 	device->generation = trans->transid;
1905 	device->io_width = root->sectorsize;
1906 	device->io_align = root->sectorsize;
1907 	device->sector_size = root->sectorsize;
1908 	device->total_bytes = i_size_read(bdev->bd_inode);
1909 	device->disk_total_bytes = device->total_bytes;
1910 	device->dev_root = root->fs_info->dev_root;
1911 	device->bdev = bdev;
1912 	device->in_fs_metadata = 1;
1913 	device->is_tgtdev_for_dev_replace = 0;
1914 	device->mode = FMODE_EXCL;
1915 	set_blocksize(device->bdev, 4096);
1916 
1917 	if (seeding_dev) {
1918 		sb->s_flags &= ~MS_RDONLY;
1919 		ret = btrfs_prepare_sprout(root);
1920 		BUG_ON(ret); /* -ENOMEM */
1921 	}
1922 
1923 	device->fs_devices = root->fs_info->fs_devices;
1924 
1925 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1926 	list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1927 	list_add(&device->dev_alloc_list,
1928 		 &root->fs_info->fs_devices->alloc_list);
1929 	root->fs_info->fs_devices->num_devices++;
1930 	root->fs_info->fs_devices->open_devices++;
1931 	root->fs_info->fs_devices->rw_devices++;
1932 	root->fs_info->fs_devices->total_devices++;
1933 	if (device->can_discard)
1934 		root->fs_info->fs_devices->num_can_discard++;
1935 	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1936 
1937 	spin_lock(&root->fs_info->free_chunk_lock);
1938 	root->fs_info->free_chunk_space += device->total_bytes;
1939 	spin_unlock(&root->fs_info->free_chunk_lock);
1940 
1941 	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1942 		root->fs_info->fs_devices->rotating = 1;
1943 
1944 	total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1945 	btrfs_set_super_total_bytes(root->fs_info->super_copy,
1946 				    total_bytes + device->total_bytes);
1947 
1948 	total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1949 	btrfs_set_super_num_devices(root->fs_info->super_copy,
1950 				    total_bytes + 1);
1951 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1952 
1953 	if (seeding_dev) {
1954 		ret = init_first_rw_device(trans, root, device);
1955 		if (ret) {
1956 			btrfs_abort_transaction(trans, root, ret);
1957 			goto error_trans;
1958 		}
1959 		ret = btrfs_finish_sprout(trans, root);
1960 		if (ret) {
1961 			btrfs_abort_transaction(trans, root, ret);
1962 			goto error_trans;
1963 		}
1964 	} else {
1965 		ret = btrfs_add_device(trans, root, device);
1966 		if (ret) {
1967 			btrfs_abort_transaction(trans, root, ret);
1968 			goto error_trans;
1969 		}
1970 	}
1971 
1972 	/*
1973 	 * we've got more storage, clear any full flags on the space
1974 	 * infos
1975 	 */
1976 	btrfs_clear_space_info_full(root->fs_info);
1977 
1978 	unlock_chunks(root);
1979 	root->fs_info->num_tolerated_disk_barrier_failures =
1980 		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1981 	ret = btrfs_commit_transaction(trans, root);
1982 
1983 	if (seeding_dev) {
1984 		mutex_unlock(&uuid_mutex);
1985 		up_write(&sb->s_umount);
1986 
1987 		if (ret) /* transaction commit */
1988 			return ret;
1989 
1990 		ret = btrfs_relocate_sys_chunks(root);
1991 		if (ret < 0)
1992 			btrfs_error(root->fs_info, ret,
1993 				    "Failed to relocate sys chunks after "
1994 				    "device initialization. This can be fixed "
1995 				    "using the \"btrfs balance\" command.");
1996 		trans = btrfs_attach_transaction(root);
1997 		if (IS_ERR(trans)) {
1998 			if (PTR_ERR(trans) == -ENOENT)
1999 				return 0;
2000 			return PTR_ERR(trans);
2001 		}
2002 		ret = btrfs_commit_transaction(trans, root);
2003 	}
2004 
2005 	return ret;
2006 
2007 error_trans:
2008 	unlock_chunks(root);
2009 	btrfs_end_transaction(trans, root);
2010 	rcu_string_free(device->name);
2011 	kfree(device);
2012 error:
2013 	blkdev_put(bdev, FMODE_EXCL);
2014 	if (seeding_dev) {
2015 		mutex_unlock(&uuid_mutex);
2016 		up_write(&sb->s_umount);
2017 	}
2018 	return ret;
2019 }
2020 
2021 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2022 				  struct btrfs_device **device_out)
2023 {
2024 	struct request_queue *q;
2025 	struct btrfs_device *device;
2026 	struct block_device *bdev;
2027 	struct btrfs_fs_info *fs_info = root->fs_info;
2028 	struct list_head *devices;
2029 	struct rcu_string *name;
2030 	int ret = 0;
2031 
2032 	*device_out = NULL;
2033 	if (fs_info->fs_devices->seeding)
2034 		return -EINVAL;
2035 
2036 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2037 				  fs_info->bdev_holder);
2038 	if (IS_ERR(bdev))
2039 		return PTR_ERR(bdev);
2040 
2041 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2042 
2043 	devices = &fs_info->fs_devices->devices;
2044 	list_for_each_entry(device, devices, dev_list) {
2045 		if (device->bdev == bdev) {
2046 			ret = -EEXIST;
2047 			goto error;
2048 		}
2049 	}
2050 
2051 	device = kzalloc(sizeof(*device), GFP_NOFS);
2052 	if (!device) {
2053 		ret = -ENOMEM;
2054 		goto error;
2055 	}
2056 
2057 	name = rcu_string_strdup(device_path, GFP_NOFS);
2058 	if (!name) {
2059 		kfree(device);
2060 		ret = -ENOMEM;
2061 		goto error;
2062 	}
2063 	rcu_assign_pointer(device->name, name);
2064 
2065 	q = bdev_get_queue(bdev);
2066 	if (blk_queue_discard(q))
2067 		device->can_discard = 1;
2068 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2069 	device->writeable = 1;
2070 	device->work.func = pending_bios_fn;
2071 	generate_random_uuid(device->uuid);
2072 	device->devid = BTRFS_DEV_REPLACE_DEVID;
2073 	spin_lock_init(&device->io_lock);
2074 	device->generation = 0;
2075 	device->io_width = root->sectorsize;
2076 	device->io_align = root->sectorsize;
2077 	device->sector_size = root->sectorsize;
2078 	device->total_bytes = i_size_read(bdev->bd_inode);
2079 	device->disk_total_bytes = device->total_bytes;
2080 	device->dev_root = fs_info->dev_root;
2081 	device->bdev = bdev;
2082 	device->in_fs_metadata = 1;
2083 	device->is_tgtdev_for_dev_replace = 1;
2084 	device->mode = FMODE_EXCL;
2085 	set_blocksize(device->bdev, 4096);
2086 	device->fs_devices = fs_info->fs_devices;
2087 	list_add(&device->dev_list, &fs_info->fs_devices->devices);
2088 	fs_info->fs_devices->num_devices++;
2089 	fs_info->fs_devices->open_devices++;
2090 	if (device->can_discard)
2091 		fs_info->fs_devices->num_can_discard++;
2092 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2093 
2094 	*device_out = device;
2095 	return ret;
2096 
2097 error:
2098 	blkdev_put(bdev, FMODE_EXCL);
2099 	return ret;
2100 }
2101 
2102 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2103 					      struct btrfs_device *tgtdev)
2104 {
2105 	WARN_ON(fs_info->fs_devices->rw_devices == 0);
2106 	tgtdev->io_width = fs_info->dev_root->sectorsize;
2107 	tgtdev->io_align = fs_info->dev_root->sectorsize;
2108 	tgtdev->sector_size = fs_info->dev_root->sectorsize;
2109 	tgtdev->dev_root = fs_info->dev_root;
2110 	tgtdev->in_fs_metadata = 1;
2111 }
2112 
2113 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2114 					struct btrfs_device *device)
2115 {
2116 	int ret;
2117 	struct btrfs_path *path;
2118 	struct btrfs_root *root;
2119 	struct btrfs_dev_item *dev_item;
2120 	struct extent_buffer *leaf;
2121 	struct btrfs_key key;
2122 
2123 	root = device->dev_root->fs_info->chunk_root;
2124 
2125 	path = btrfs_alloc_path();
2126 	if (!path)
2127 		return -ENOMEM;
2128 
2129 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2130 	key.type = BTRFS_DEV_ITEM_KEY;
2131 	key.offset = device->devid;
2132 
2133 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2134 	if (ret < 0)
2135 		goto out;
2136 
2137 	if (ret > 0) {
2138 		ret = -ENOENT;
2139 		goto out;
2140 	}
2141 
2142 	leaf = path->nodes[0];
2143 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2144 
2145 	btrfs_set_device_id(leaf, dev_item, device->devid);
2146 	btrfs_set_device_type(leaf, dev_item, device->type);
2147 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2148 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2149 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2150 	btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
2151 	btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
2152 	btrfs_mark_buffer_dirty(leaf);
2153 
2154 out:
2155 	btrfs_free_path(path);
2156 	return ret;
2157 }
2158 
2159 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
2160 		      struct btrfs_device *device, u64 new_size)
2161 {
2162 	struct btrfs_super_block *super_copy =
2163 		device->dev_root->fs_info->super_copy;
2164 	u64 old_total = btrfs_super_total_bytes(super_copy);
2165 	u64 diff = new_size - device->total_bytes;
2166 
2167 	if (!device->writeable)
2168 		return -EACCES;
2169 	if (new_size <= device->total_bytes ||
2170 	    device->is_tgtdev_for_dev_replace)
2171 		return -EINVAL;
2172 
2173 	btrfs_set_super_total_bytes(super_copy, old_total + diff);
2174 	device->fs_devices->total_rw_bytes += diff;
2175 
2176 	device->total_bytes = new_size;
2177 	device->disk_total_bytes = new_size;
2178 	btrfs_clear_space_info_full(device->dev_root->fs_info);
2179 
2180 	return btrfs_update_device(trans, device);
2181 }
2182 
2183 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2184 		      struct btrfs_device *device, u64 new_size)
2185 {
2186 	int ret;
2187 	lock_chunks(device->dev_root);
2188 	ret = __btrfs_grow_device(trans, device, new_size);
2189 	unlock_chunks(device->dev_root);
2190 	return ret;
2191 }
2192 
2193 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2194 			    struct btrfs_root *root,
2195 			    u64 chunk_tree, u64 chunk_objectid,
2196 			    u64 chunk_offset)
2197 {
2198 	int ret;
2199 	struct btrfs_path *path;
2200 	struct btrfs_key key;
2201 
2202 	root = root->fs_info->chunk_root;
2203 	path = btrfs_alloc_path();
2204 	if (!path)
2205 		return -ENOMEM;
2206 
2207 	key.objectid = chunk_objectid;
2208 	key.offset = chunk_offset;
2209 	key.type = BTRFS_CHUNK_ITEM_KEY;
2210 
2211 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2212 	if (ret < 0)
2213 		goto out;
2214 	else if (ret > 0) { /* Logic error or corruption */
2215 		btrfs_error(root->fs_info, -ENOENT,
2216 			    "Failed lookup while freeing chunk.");
2217 		ret = -ENOENT;
2218 		goto out;
2219 	}
2220 
2221 	ret = btrfs_del_item(trans, root, path);
2222 	if (ret < 0)
2223 		btrfs_error(root->fs_info, ret,
2224 			    "Failed to delete chunk item.");
2225 out:
2226 	btrfs_free_path(path);
2227 	return ret;
2228 }
2229 
2230 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2231 			chunk_offset)
2232 {
2233 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2234 	struct btrfs_disk_key *disk_key;
2235 	struct btrfs_chunk *chunk;
2236 	u8 *ptr;
2237 	int ret = 0;
2238 	u32 num_stripes;
2239 	u32 array_size;
2240 	u32 len = 0;
2241 	u32 cur;
2242 	struct btrfs_key key;
2243 
2244 	array_size = btrfs_super_sys_array_size(super_copy);
2245 
2246 	ptr = super_copy->sys_chunk_array;
2247 	cur = 0;
2248 
2249 	while (cur < array_size) {
2250 		disk_key = (struct btrfs_disk_key *)ptr;
2251 		btrfs_disk_key_to_cpu(&key, disk_key);
2252 
2253 		len = sizeof(*disk_key);
2254 
2255 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2256 			chunk = (struct btrfs_chunk *)(ptr + len);
2257 			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2258 			len += btrfs_chunk_item_size(num_stripes);
2259 		} else {
2260 			ret = -EIO;
2261 			break;
2262 		}
2263 		if (key.objectid == chunk_objectid &&
2264 		    key.offset == chunk_offset) {
2265 			memmove(ptr, ptr + len, array_size - (cur + len));
2266 			array_size -= len;
2267 			btrfs_set_super_sys_array_size(super_copy, array_size);
2268 		} else {
2269 			ptr += len;
2270 			cur += len;
2271 		}
2272 	}
2273 	return ret;
2274 }
2275 
2276 static int btrfs_relocate_chunk(struct btrfs_root *root,
2277 			 u64 chunk_tree, u64 chunk_objectid,
2278 			 u64 chunk_offset)
2279 {
2280 	struct extent_map_tree *em_tree;
2281 	struct btrfs_root *extent_root;
2282 	struct btrfs_trans_handle *trans;
2283 	struct extent_map *em;
2284 	struct map_lookup *map;
2285 	int ret;
2286 	int i;
2287 
2288 	root = root->fs_info->chunk_root;
2289 	extent_root = root->fs_info->extent_root;
2290 	em_tree = &root->fs_info->mapping_tree.map_tree;
2291 
2292 	ret = btrfs_can_relocate(extent_root, chunk_offset);
2293 	if (ret)
2294 		return -ENOSPC;
2295 
2296 	/* step one, relocate all the extents inside this chunk */
2297 	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2298 	if (ret)
2299 		return ret;
2300 
2301 	trans = btrfs_start_transaction(root, 0);
2302 	BUG_ON(IS_ERR(trans));
2303 
2304 	lock_chunks(root);
2305 
2306 	/*
2307 	 * step two, delete the device extents and the
2308 	 * chunk tree entries
2309 	 */
2310 	read_lock(&em_tree->lock);
2311 	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2312 	read_unlock(&em_tree->lock);
2313 
2314 	BUG_ON(!em || em->start > chunk_offset ||
2315 	       em->start + em->len < chunk_offset);
2316 	map = (struct map_lookup *)em->bdev;
2317 
2318 	for (i = 0; i < map->num_stripes; i++) {
2319 		ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2320 					    map->stripes[i].physical);
2321 		BUG_ON(ret);
2322 
2323 		if (map->stripes[i].dev) {
2324 			ret = btrfs_update_device(trans, map->stripes[i].dev);
2325 			BUG_ON(ret);
2326 		}
2327 	}
2328 	ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2329 			       chunk_offset);
2330 
2331 	BUG_ON(ret);
2332 
2333 	trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2334 
2335 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2336 		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2337 		BUG_ON(ret);
2338 	}
2339 
2340 	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2341 	BUG_ON(ret);
2342 
2343 	write_lock(&em_tree->lock);
2344 	remove_extent_mapping(em_tree, em);
2345 	write_unlock(&em_tree->lock);
2346 
2347 	kfree(map);
2348 	em->bdev = NULL;
2349 
2350 	/* once for the tree */
2351 	free_extent_map(em);
2352 	/* once for us */
2353 	free_extent_map(em);
2354 
2355 	unlock_chunks(root);
2356 	btrfs_end_transaction(trans, root);
2357 	return 0;
2358 }
2359 
2360 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2361 {
2362 	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2363 	struct btrfs_path *path;
2364 	struct extent_buffer *leaf;
2365 	struct btrfs_chunk *chunk;
2366 	struct btrfs_key key;
2367 	struct btrfs_key found_key;
2368 	u64 chunk_tree = chunk_root->root_key.objectid;
2369 	u64 chunk_type;
2370 	bool retried = false;
2371 	int failed = 0;
2372 	int ret;
2373 
2374 	path = btrfs_alloc_path();
2375 	if (!path)
2376 		return -ENOMEM;
2377 
2378 again:
2379 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2380 	key.offset = (u64)-1;
2381 	key.type = BTRFS_CHUNK_ITEM_KEY;
2382 
2383 	while (1) {
2384 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2385 		if (ret < 0)
2386 			goto error;
2387 		BUG_ON(ret == 0); /* Corruption */
2388 
2389 		ret = btrfs_previous_item(chunk_root, path, key.objectid,
2390 					  key.type);
2391 		if (ret < 0)
2392 			goto error;
2393 		if (ret > 0)
2394 			break;
2395 
2396 		leaf = path->nodes[0];
2397 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2398 
2399 		chunk = btrfs_item_ptr(leaf, path->slots[0],
2400 				       struct btrfs_chunk);
2401 		chunk_type = btrfs_chunk_type(leaf, chunk);
2402 		btrfs_release_path(path);
2403 
2404 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2405 			ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2406 						   found_key.objectid,
2407 						   found_key.offset);
2408 			if (ret == -ENOSPC)
2409 				failed++;
2410 			else if (ret)
2411 				BUG();
2412 		}
2413 
2414 		if (found_key.offset == 0)
2415 			break;
2416 		key.offset = found_key.offset - 1;
2417 	}
2418 	ret = 0;
2419 	if (failed && !retried) {
2420 		failed = 0;
2421 		retried = true;
2422 		goto again;
2423 	} else if (failed && retried) {
2424 		WARN_ON(1);
2425 		ret = -ENOSPC;
2426 	}
2427 error:
2428 	btrfs_free_path(path);
2429 	return ret;
2430 }
2431 
2432 static int insert_balance_item(struct btrfs_root *root,
2433 			       struct btrfs_balance_control *bctl)
2434 {
2435 	struct btrfs_trans_handle *trans;
2436 	struct btrfs_balance_item *item;
2437 	struct btrfs_disk_balance_args disk_bargs;
2438 	struct btrfs_path *path;
2439 	struct extent_buffer *leaf;
2440 	struct btrfs_key key;
2441 	int ret, err;
2442 
2443 	path = btrfs_alloc_path();
2444 	if (!path)
2445 		return -ENOMEM;
2446 
2447 	trans = btrfs_start_transaction(root, 0);
2448 	if (IS_ERR(trans)) {
2449 		btrfs_free_path(path);
2450 		return PTR_ERR(trans);
2451 	}
2452 
2453 	key.objectid = BTRFS_BALANCE_OBJECTID;
2454 	key.type = BTRFS_BALANCE_ITEM_KEY;
2455 	key.offset = 0;
2456 
2457 	ret = btrfs_insert_empty_item(trans, root, path, &key,
2458 				      sizeof(*item));
2459 	if (ret)
2460 		goto out;
2461 
2462 	leaf = path->nodes[0];
2463 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2464 
2465 	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2466 
2467 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2468 	btrfs_set_balance_data(leaf, item, &disk_bargs);
2469 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2470 	btrfs_set_balance_meta(leaf, item, &disk_bargs);
2471 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2472 	btrfs_set_balance_sys(leaf, item, &disk_bargs);
2473 
2474 	btrfs_set_balance_flags(leaf, item, bctl->flags);
2475 
2476 	btrfs_mark_buffer_dirty(leaf);
2477 out:
2478 	btrfs_free_path(path);
2479 	err = btrfs_commit_transaction(trans, root);
2480 	if (err && !ret)
2481 		ret = err;
2482 	return ret;
2483 }
2484 
2485 static int del_balance_item(struct btrfs_root *root)
2486 {
2487 	struct btrfs_trans_handle *trans;
2488 	struct btrfs_path *path;
2489 	struct btrfs_key key;
2490 	int ret, err;
2491 
2492 	path = btrfs_alloc_path();
2493 	if (!path)
2494 		return -ENOMEM;
2495 
2496 	trans = btrfs_start_transaction(root, 0);
2497 	if (IS_ERR(trans)) {
2498 		btrfs_free_path(path);
2499 		return PTR_ERR(trans);
2500 	}
2501 
2502 	key.objectid = BTRFS_BALANCE_OBJECTID;
2503 	key.type = BTRFS_BALANCE_ITEM_KEY;
2504 	key.offset = 0;
2505 
2506 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2507 	if (ret < 0)
2508 		goto out;
2509 	if (ret > 0) {
2510 		ret = -ENOENT;
2511 		goto out;
2512 	}
2513 
2514 	ret = btrfs_del_item(trans, root, path);
2515 out:
2516 	btrfs_free_path(path);
2517 	err = btrfs_commit_transaction(trans, root);
2518 	if (err && !ret)
2519 		ret = err;
2520 	return ret;
2521 }
2522 
2523 /*
2524  * This is a heuristic used to reduce the number of chunks balanced on
2525  * resume after balance was interrupted.
2526  */
2527 static void update_balance_args(struct btrfs_balance_control *bctl)
2528 {
2529 	/*
2530 	 * Turn on soft mode for chunk types that were being converted.
2531 	 */
2532 	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2533 		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2534 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2535 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2536 	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2537 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2538 
2539 	/*
2540 	 * Turn on usage filter if is not already used.  The idea is
2541 	 * that chunks that we have already balanced should be
2542 	 * reasonably full.  Don't do it for chunks that are being
2543 	 * converted - that will keep us from relocating unconverted
2544 	 * (albeit full) chunks.
2545 	 */
2546 	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2547 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2548 		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2549 		bctl->data.usage = 90;
2550 	}
2551 	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2552 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2553 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2554 		bctl->sys.usage = 90;
2555 	}
2556 	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2557 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2558 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2559 		bctl->meta.usage = 90;
2560 	}
2561 }
2562 
2563 /*
2564  * Should be called with both balance and volume mutexes held to
2565  * serialize other volume operations (add_dev/rm_dev/resize) with
2566  * restriper.  Same goes for unset_balance_control.
2567  */
2568 static void set_balance_control(struct btrfs_balance_control *bctl)
2569 {
2570 	struct btrfs_fs_info *fs_info = bctl->fs_info;
2571 
2572 	BUG_ON(fs_info->balance_ctl);
2573 
2574 	spin_lock(&fs_info->balance_lock);
2575 	fs_info->balance_ctl = bctl;
2576 	spin_unlock(&fs_info->balance_lock);
2577 }
2578 
2579 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2580 {
2581 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2582 
2583 	BUG_ON(!fs_info->balance_ctl);
2584 
2585 	spin_lock(&fs_info->balance_lock);
2586 	fs_info->balance_ctl = NULL;
2587 	spin_unlock(&fs_info->balance_lock);
2588 
2589 	kfree(bctl);
2590 }
2591 
2592 /*
2593  * Balance filters.  Return 1 if chunk should be filtered out
2594  * (should not be balanced).
2595  */
2596 static int chunk_profiles_filter(u64 chunk_type,
2597 				 struct btrfs_balance_args *bargs)
2598 {
2599 	chunk_type = chunk_to_extended(chunk_type) &
2600 				BTRFS_EXTENDED_PROFILE_MASK;
2601 
2602 	if (bargs->profiles & chunk_type)
2603 		return 0;
2604 
2605 	return 1;
2606 }
2607 
2608 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2609 			      struct btrfs_balance_args *bargs)
2610 {
2611 	struct btrfs_block_group_cache *cache;
2612 	u64 chunk_used, user_thresh;
2613 	int ret = 1;
2614 
2615 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2616 	chunk_used = btrfs_block_group_used(&cache->item);
2617 
2618 	if (bargs->usage == 0)
2619 		user_thresh = 0;
2620 	else if (bargs->usage > 100)
2621 		user_thresh = cache->key.offset;
2622 	else
2623 		user_thresh = div_factor_fine(cache->key.offset,
2624 					      bargs->usage);
2625 
2626 	if (chunk_used < user_thresh)
2627 		ret = 0;
2628 
2629 	btrfs_put_block_group(cache);
2630 	return ret;
2631 }
2632 
2633 static int chunk_devid_filter(struct extent_buffer *leaf,
2634 			      struct btrfs_chunk *chunk,
2635 			      struct btrfs_balance_args *bargs)
2636 {
2637 	struct btrfs_stripe *stripe;
2638 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2639 	int i;
2640 
2641 	for (i = 0; i < num_stripes; i++) {
2642 		stripe = btrfs_stripe_nr(chunk, i);
2643 		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2644 			return 0;
2645 	}
2646 
2647 	return 1;
2648 }
2649 
2650 /* [pstart, pend) */
2651 static int chunk_drange_filter(struct extent_buffer *leaf,
2652 			       struct btrfs_chunk *chunk,
2653 			       u64 chunk_offset,
2654 			       struct btrfs_balance_args *bargs)
2655 {
2656 	struct btrfs_stripe *stripe;
2657 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2658 	u64 stripe_offset;
2659 	u64 stripe_length;
2660 	int factor;
2661 	int i;
2662 
2663 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2664 		return 0;
2665 
2666 	if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2667 	     BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
2668 		factor = 2;
2669 	else
2670 		factor = 1;
2671 	factor = num_stripes / factor;
2672 
2673 	for (i = 0; i < num_stripes; i++) {
2674 		stripe = btrfs_stripe_nr(chunk, i);
2675 		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2676 			continue;
2677 
2678 		stripe_offset = btrfs_stripe_offset(leaf, stripe);
2679 		stripe_length = btrfs_chunk_length(leaf, chunk);
2680 		do_div(stripe_length, factor);
2681 
2682 		if (stripe_offset < bargs->pend &&
2683 		    stripe_offset + stripe_length > bargs->pstart)
2684 			return 0;
2685 	}
2686 
2687 	return 1;
2688 }
2689 
2690 /* [vstart, vend) */
2691 static int chunk_vrange_filter(struct extent_buffer *leaf,
2692 			       struct btrfs_chunk *chunk,
2693 			       u64 chunk_offset,
2694 			       struct btrfs_balance_args *bargs)
2695 {
2696 	if (chunk_offset < bargs->vend &&
2697 	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2698 		/* at least part of the chunk is inside this vrange */
2699 		return 0;
2700 
2701 	return 1;
2702 }
2703 
2704 static int chunk_soft_convert_filter(u64 chunk_type,
2705 				     struct btrfs_balance_args *bargs)
2706 {
2707 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2708 		return 0;
2709 
2710 	chunk_type = chunk_to_extended(chunk_type) &
2711 				BTRFS_EXTENDED_PROFILE_MASK;
2712 
2713 	if (bargs->target == chunk_type)
2714 		return 1;
2715 
2716 	return 0;
2717 }
2718 
2719 static int should_balance_chunk(struct btrfs_root *root,
2720 				struct extent_buffer *leaf,
2721 				struct btrfs_chunk *chunk, u64 chunk_offset)
2722 {
2723 	struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2724 	struct btrfs_balance_args *bargs = NULL;
2725 	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2726 
2727 	/* type filter */
2728 	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2729 	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2730 		return 0;
2731 	}
2732 
2733 	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2734 		bargs = &bctl->data;
2735 	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2736 		bargs = &bctl->sys;
2737 	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2738 		bargs = &bctl->meta;
2739 
2740 	/* profiles filter */
2741 	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2742 	    chunk_profiles_filter(chunk_type, bargs)) {
2743 		return 0;
2744 	}
2745 
2746 	/* usage filter */
2747 	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2748 	    chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2749 		return 0;
2750 	}
2751 
2752 	/* devid filter */
2753 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2754 	    chunk_devid_filter(leaf, chunk, bargs)) {
2755 		return 0;
2756 	}
2757 
2758 	/* drange filter, makes sense only with devid filter */
2759 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2760 	    chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2761 		return 0;
2762 	}
2763 
2764 	/* vrange filter */
2765 	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2766 	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2767 		return 0;
2768 	}
2769 
2770 	/* soft profile changing mode */
2771 	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2772 	    chunk_soft_convert_filter(chunk_type, bargs)) {
2773 		return 0;
2774 	}
2775 
2776 	return 1;
2777 }
2778 
2779 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2780 {
2781 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2782 	struct btrfs_root *chunk_root = fs_info->chunk_root;
2783 	struct btrfs_root *dev_root = fs_info->dev_root;
2784 	struct list_head *devices;
2785 	struct btrfs_device *device;
2786 	u64 old_size;
2787 	u64 size_to_free;
2788 	struct btrfs_chunk *chunk;
2789 	struct btrfs_path *path;
2790 	struct btrfs_key key;
2791 	struct btrfs_key found_key;
2792 	struct btrfs_trans_handle *trans;
2793 	struct extent_buffer *leaf;
2794 	int slot;
2795 	int ret;
2796 	int enospc_errors = 0;
2797 	bool counting = true;
2798 
2799 	/* step one make some room on all the devices */
2800 	devices = &fs_info->fs_devices->devices;
2801 	list_for_each_entry(device, devices, dev_list) {
2802 		old_size = device->total_bytes;
2803 		size_to_free = div_factor(old_size, 1);
2804 		size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2805 		if (!device->writeable ||
2806 		    device->total_bytes - device->bytes_used > size_to_free ||
2807 		    device->is_tgtdev_for_dev_replace)
2808 			continue;
2809 
2810 		ret = btrfs_shrink_device(device, old_size - size_to_free);
2811 		if (ret == -ENOSPC)
2812 			break;
2813 		BUG_ON(ret);
2814 
2815 		trans = btrfs_start_transaction(dev_root, 0);
2816 		BUG_ON(IS_ERR(trans));
2817 
2818 		ret = btrfs_grow_device(trans, device, old_size);
2819 		BUG_ON(ret);
2820 
2821 		btrfs_end_transaction(trans, dev_root);
2822 	}
2823 
2824 	/* step two, relocate all the chunks */
2825 	path = btrfs_alloc_path();
2826 	if (!path) {
2827 		ret = -ENOMEM;
2828 		goto error;
2829 	}
2830 
2831 	/* zero out stat counters */
2832 	spin_lock(&fs_info->balance_lock);
2833 	memset(&bctl->stat, 0, sizeof(bctl->stat));
2834 	spin_unlock(&fs_info->balance_lock);
2835 again:
2836 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2837 	key.offset = (u64)-1;
2838 	key.type = BTRFS_CHUNK_ITEM_KEY;
2839 
2840 	while (1) {
2841 		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2842 		    atomic_read(&fs_info->balance_cancel_req)) {
2843 			ret = -ECANCELED;
2844 			goto error;
2845 		}
2846 
2847 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2848 		if (ret < 0)
2849 			goto error;
2850 
2851 		/*
2852 		 * this shouldn't happen, it means the last relocate
2853 		 * failed
2854 		 */
2855 		if (ret == 0)
2856 			BUG(); /* FIXME break ? */
2857 
2858 		ret = btrfs_previous_item(chunk_root, path, 0,
2859 					  BTRFS_CHUNK_ITEM_KEY);
2860 		if (ret) {
2861 			ret = 0;
2862 			break;
2863 		}
2864 
2865 		leaf = path->nodes[0];
2866 		slot = path->slots[0];
2867 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
2868 
2869 		if (found_key.objectid != key.objectid)
2870 			break;
2871 
2872 		/* chunk zero is special */
2873 		if (found_key.offset == 0)
2874 			break;
2875 
2876 		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2877 
2878 		if (!counting) {
2879 			spin_lock(&fs_info->balance_lock);
2880 			bctl->stat.considered++;
2881 			spin_unlock(&fs_info->balance_lock);
2882 		}
2883 
2884 		ret = should_balance_chunk(chunk_root, leaf, chunk,
2885 					   found_key.offset);
2886 		btrfs_release_path(path);
2887 		if (!ret)
2888 			goto loop;
2889 
2890 		if (counting) {
2891 			spin_lock(&fs_info->balance_lock);
2892 			bctl->stat.expected++;
2893 			spin_unlock(&fs_info->balance_lock);
2894 			goto loop;
2895 		}
2896 
2897 		ret = btrfs_relocate_chunk(chunk_root,
2898 					   chunk_root->root_key.objectid,
2899 					   found_key.objectid,
2900 					   found_key.offset);
2901 		if (ret && ret != -ENOSPC)
2902 			goto error;
2903 		if (ret == -ENOSPC) {
2904 			enospc_errors++;
2905 		} else {
2906 			spin_lock(&fs_info->balance_lock);
2907 			bctl->stat.completed++;
2908 			spin_unlock(&fs_info->balance_lock);
2909 		}
2910 loop:
2911 		key.offset = found_key.offset - 1;
2912 	}
2913 
2914 	if (counting) {
2915 		btrfs_release_path(path);
2916 		counting = false;
2917 		goto again;
2918 	}
2919 error:
2920 	btrfs_free_path(path);
2921 	if (enospc_errors) {
2922 		printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
2923 		       enospc_errors);
2924 		if (!ret)
2925 			ret = -ENOSPC;
2926 	}
2927 
2928 	return ret;
2929 }
2930 
2931 /**
2932  * alloc_profile_is_valid - see if a given profile is valid and reduced
2933  * @flags: profile to validate
2934  * @extended: if true @flags is treated as an extended profile
2935  */
2936 static int alloc_profile_is_valid(u64 flags, int extended)
2937 {
2938 	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
2939 			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
2940 
2941 	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
2942 
2943 	/* 1) check that all other bits are zeroed */
2944 	if (flags & ~mask)
2945 		return 0;
2946 
2947 	/* 2) see if profile is reduced */
2948 	if (flags == 0)
2949 		return !extended; /* "0" is valid for usual profiles */
2950 
2951 	/* true if exactly one bit set */
2952 	return (flags & (flags - 1)) == 0;
2953 }
2954 
2955 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
2956 {
2957 	/* cancel requested || normal exit path */
2958 	return atomic_read(&fs_info->balance_cancel_req) ||
2959 		(atomic_read(&fs_info->balance_pause_req) == 0 &&
2960 		 atomic_read(&fs_info->balance_cancel_req) == 0);
2961 }
2962 
2963 static void __cancel_balance(struct btrfs_fs_info *fs_info)
2964 {
2965 	int ret;
2966 
2967 	unset_balance_control(fs_info);
2968 	ret = del_balance_item(fs_info->tree_root);
2969 	BUG_ON(ret);
2970 
2971 	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
2972 }
2973 
2974 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
2975 			       struct btrfs_ioctl_balance_args *bargs);
2976 
2977 /*
2978  * Should be called with both balance and volume mutexes held
2979  */
2980 int btrfs_balance(struct btrfs_balance_control *bctl,
2981 		  struct btrfs_ioctl_balance_args *bargs)
2982 {
2983 	struct btrfs_fs_info *fs_info = bctl->fs_info;
2984 	u64 allowed;
2985 	int mixed = 0;
2986 	int ret;
2987 	u64 num_devices;
2988 
2989 	if (btrfs_fs_closing(fs_info) ||
2990 	    atomic_read(&fs_info->balance_pause_req) ||
2991 	    atomic_read(&fs_info->balance_cancel_req)) {
2992 		ret = -EINVAL;
2993 		goto out;
2994 	}
2995 
2996 	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2997 	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
2998 		mixed = 1;
2999 
3000 	/*
3001 	 * In case of mixed groups both data and meta should be picked,
3002 	 * and identical options should be given for both of them.
3003 	 */
3004 	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3005 	if (mixed && (bctl->flags & allowed)) {
3006 		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3007 		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3008 		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3009 			printk(KERN_ERR "btrfs: with mixed groups data and "
3010 			       "metadata balance options must be the same\n");
3011 			ret = -EINVAL;
3012 			goto out;
3013 		}
3014 	}
3015 
3016 	num_devices = fs_info->fs_devices->num_devices;
3017 	btrfs_dev_replace_lock(&fs_info->dev_replace);
3018 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3019 		BUG_ON(num_devices < 1);
3020 		num_devices--;
3021 	}
3022 	btrfs_dev_replace_unlock(&fs_info->dev_replace);
3023 	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3024 	if (num_devices == 1)
3025 		allowed |= BTRFS_BLOCK_GROUP_DUP;
3026 	else if (num_devices < 4)
3027 		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3028 	else
3029 		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
3030 				BTRFS_BLOCK_GROUP_RAID10);
3031 
3032 	if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3033 	    (!alloc_profile_is_valid(bctl->data.target, 1) ||
3034 	     (bctl->data.target & ~allowed))) {
3035 		printk(KERN_ERR "btrfs: unable to start balance with target "
3036 		       "data profile %llu\n",
3037 		       (unsigned long long)bctl->data.target);
3038 		ret = -EINVAL;
3039 		goto out;
3040 	}
3041 	if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3042 	    (!alloc_profile_is_valid(bctl->meta.target, 1) ||
3043 	     (bctl->meta.target & ~allowed))) {
3044 		printk(KERN_ERR "btrfs: unable to start balance with target "
3045 		       "metadata profile %llu\n",
3046 		       (unsigned long long)bctl->meta.target);
3047 		ret = -EINVAL;
3048 		goto out;
3049 	}
3050 	if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3051 	    (!alloc_profile_is_valid(bctl->sys.target, 1) ||
3052 	     (bctl->sys.target & ~allowed))) {
3053 		printk(KERN_ERR "btrfs: unable to start balance with target "
3054 		       "system profile %llu\n",
3055 		       (unsigned long long)bctl->sys.target);
3056 		ret = -EINVAL;
3057 		goto out;
3058 	}
3059 
3060 	/* allow dup'ed data chunks only in mixed mode */
3061 	if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3062 	    (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
3063 		printk(KERN_ERR "btrfs: dup for data is not allowed\n");
3064 		ret = -EINVAL;
3065 		goto out;
3066 	}
3067 
3068 	/* allow to reduce meta or sys integrity only if force set */
3069 	allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3070 			BTRFS_BLOCK_GROUP_RAID10;
3071 	if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3072 	     (fs_info->avail_system_alloc_bits & allowed) &&
3073 	     !(bctl->sys.target & allowed)) ||
3074 	    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3075 	     (fs_info->avail_metadata_alloc_bits & allowed) &&
3076 	     !(bctl->meta.target & allowed))) {
3077 		if (bctl->flags & BTRFS_BALANCE_FORCE) {
3078 			printk(KERN_INFO "btrfs: force reducing metadata "
3079 			       "integrity\n");
3080 		} else {
3081 			printk(KERN_ERR "btrfs: balance will reduce metadata "
3082 			       "integrity, use force if you want this\n");
3083 			ret = -EINVAL;
3084 			goto out;
3085 		}
3086 	}
3087 
3088 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3089 		int num_tolerated_disk_barrier_failures;
3090 		u64 target = bctl->sys.target;
3091 
3092 		num_tolerated_disk_barrier_failures =
3093 			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3094 		if (num_tolerated_disk_barrier_failures > 0 &&
3095 		    (target &
3096 		     (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3097 		      BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
3098 			num_tolerated_disk_barrier_failures = 0;
3099 		else if (num_tolerated_disk_barrier_failures > 1 &&
3100 			 (target &
3101 			  (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
3102 			num_tolerated_disk_barrier_failures = 1;
3103 
3104 		fs_info->num_tolerated_disk_barrier_failures =
3105 			num_tolerated_disk_barrier_failures;
3106 	}
3107 
3108 	ret = insert_balance_item(fs_info->tree_root, bctl);
3109 	if (ret && ret != -EEXIST)
3110 		goto out;
3111 
3112 	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3113 		BUG_ON(ret == -EEXIST);
3114 		set_balance_control(bctl);
3115 	} else {
3116 		BUG_ON(ret != -EEXIST);
3117 		spin_lock(&fs_info->balance_lock);
3118 		update_balance_args(bctl);
3119 		spin_unlock(&fs_info->balance_lock);
3120 	}
3121 
3122 	atomic_inc(&fs_info->balance_running);
3123 	mutex_unlock(&fs_info->balance_mutex);
3124 
3125 	ret = __btrfs_balance(fs_info);
3126 
3127 	mutex_lock(&fs_info->balance_mutex);
3128 	atomic_dec(&fs_info->balance_running);
3129 
3130 	if (bargs) {
3131 		memset(bargs, 0, sizeof(*bargs));
3132 		update_ioctl_balance_args(fs_info, 0, bargs);
3133 	}
3134 
3135 	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3136 	    balance_need_close(fs_info)) {
3137 		__cancel_balance(fs_info);
3138 	}
3139 
3140 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3141 		fs_info->num_tolerated_disk_barrier_failures =
3142 			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3143 	}
3144 
3145 	wake_up(&fs_info->balance_wait_q);
3146 
3147 	return ret;
3148 out:
3149 	if (bctl->flags & BTRFS_BALANCE_RESUME)
3150 		__cancel_balance(fs_info);
3151 	else {
3152 		kfree(bctl);
3153 		atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3154 	}
3155 	return ret;
3156 }
3157 
3158 static int balance_kthread(void *data)
3159 {
3160 	struct btrfs_fs_info *fs_info = data;
3161 	int ret = 0;
3162 
3163 	mutex_lock(&fs_info->volume_mutex);
3164 	mutex_lock(&fs_info->balance_mutex);
3165 
3166 	if (fs_info->balance_ctl) {
3167 		printk(KERN_INFO "btrfs: continuing balance\n");
3168 		ret = btrfs_balance(fs_info->balance_ctl, NULL);
3169 	}
3170 
3171 	mutex_unlock(&fs_info->balance_mutex);
3172 	mutex_unlock(&fs_info->volume_mutex);
3173 
3174 	return ret;
3175 }
3176 
3177 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3178 {
3179 	struct task_struct *tsk;
3180 
3181 	spin_lock(&fs_info->balance_lock);
3182 	if (!fs_info->balance_ctl) {
3183 		spin_unlock(&fs_info->balance_lock);
3184 		return 0;
3185 	}
3186 	spin_unlock(&fs_info->balance_lock);
3187 
3188 	if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3189 		printk(KERN_INFO "btrfs: force skipping balance\n");
3190 		return 0;
3191 	}
3192 
3193 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3194 	if (IS_ERR(tsk))
3195 		return PTR_ERR(tsk);
3196 
3197 	return 0;
3198 }
3199 
3200 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3201 {
3202 	struct btrfs_balance_control *bctl;
3203 	struct btrfs_balance_item *item;
3204 	struct btrfs_disk_balance_args disk_bargs;
3205 	struct btrfs_path *path;
3206 	struct extent_buffer *leaf;
3207 	struct btrfs_key key;
3208 	int ret;
3209 
3210 	path = btrfs_alloc_path();
3211 	if (!path)
3212 		return -ENOMEM;
3213 
3214 	key.objectid = BTRFS_BALANCE_OBJECTID;
3215 	key.type = BTRFS_BALANCE_ITEM_KEY;
3216 	key.offset = 0;
3217 
3218 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3219 	if (ret < 0)
3220 		goto out;
3221 	if (ret > 0) { /* ret = -ENOENT; */
3222 		ret = 0;
3223 		goto out;
3224 	}
3225 
3226 	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3227 	if (!bctl) {
3228 		ret = -ENOMEM;
3229 		goto out;
3230 	}
3231 
3232 	leaf = path->nodes[0];
3233 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3234 
3235 	bctl->fs_info = fs_info;
3236 	bctl->flags = btrfs_balance_flags(leaf, item);
3237 	bctl->flags |= BTRFS_BALANCE_RESUME;
3238 
3239 	btrfs_balance_data(leaf, item, &disk_bargs);
3240 	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3241 	btrfs_balance_meta(leaf, item, &disk_bargs);
3242 	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3243 	btrfs_balance_sys(leaf, item, &disk_bargs);
3244 	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3245 
3246 	WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3247 
3248 	mutex_lock(&fs_info->volume_mutex);
3249 	mutex_lock(&fs_info->balance_mutex);
3250 
3251 	set_balance_control(bctl);
3252 
3253 	mutex_unlock(&fs_info->balance_mutex);
3254 	mutex_unlock(&fs_info->volume_mutex);
3255 out:
3256 	btrfs_free_path(path);
3257 	return ret;
3258 }
3259 
3260 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3261 {
3262 	int ret = 0;
3263 
3264 	mutex_lock(&fs_info->balance_mutex);
3265 	if (!fs_info->balance_ctl) {
3266 		mutex_unlock(&fs_info->balance_mutex);
3267 		return -ENOTCONN;
3268 	}
3269 
3270 	if (atomic_read(&fs_info->balance_running)) {
3271 		atomic_inc(&fs_info->balance_pause_req);
3272 		mutex_unlock(&fs_info->balance_mutex);
3273 
3274 		wait_event(fs_info->balance_wait_q,
3275 			   atomic_read(&fs_info->balance_running) == 0);
3276 
3277 		mutex_lock(&fs_info->balance_mutex);
3278 		/* we are good with balance_ctl ripped off from under us */
3279 		BUG_ON(atomic_read(&fs_info->balance_running));
3280 		atomic_dec(&fs_info->balance_pause_req);
3281 	} else {
3282 		ret = -ENOTCONN;
3283 	}
3284 
3285 	mutex_unlock(&fs_info->balance_mutex);
3286 	return ret;
3287 }
3288 
3289 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3290 {
3291 	mutex_lock(&fs_info->balance_mutex);
3292 	if (!fs_info->balance_ctl) {
3293 		mutex_unlock(&fs_info->balance_mutex);
3294 		return -ENOTCONN;
3295 	}
3296 
3297 	atomic_inc(&fs_info->balance_cancel_req);
3298 	/*
3299 	 * if we are running just wait and return, balance item is
3300 	 * deleted in btrfs_balance in this case
3301 	 */
3302 	if (atomic_read(&fs_info->balance_running)) {
3303 		mutex_unlock(&fs_info->balance_mutex);
3304 		wait_event(fs_info->balance_wait_q,
3305 			   atomic_read(&fs_info->balance_running) == 0);
3306 		mutex_lock(&fs_info->balance_mutex);
3307 	} else {
3308 		/* __cancel_balance needs volume_mutex */
3309 		mutex_unlock(&fs_info->balance_mutex);
3310 		mutex_lock(&fs_info->volume_mutex);
3311 		mutex_lock(&fs_info->balance_mutex);
3312 
3313 		if (fs_info->balance_ctl)
3314 			__cancel_balance(fs_info);
3315 
3316 		mutex_unlock(&fs_info->volume_mutex);
3317 	}
3318 
3319 	BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3320 	atomic_dec(&fs_info->balance_cancel_req);
3321 	mutex_unlock(&fs_info->balance_mutex);
3322 	return 0;
3323 }
3324 
3325 /*
3326  * shrinking a device means finding all of the device extents past
3327  * the new size, and then following the back refs to the chunks.
3328  * The chunk relocation code actually frees the device extent
3329  */
3330 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3331 {
3332 	struct btrfs_trans_handle *trans;
3333 	struct btrfs_root *root = device->dev_root;
3334 	struct btrfs_dev_extent *dev_extent = NULL;
3335 	struct btrfs_path *path;
3336 	u64 length;
3337 	u64 chunk_tree;
3338 	u64 chunk_objectid;
3339 	u64 chunk_offset;
3340 	int ret;
3341 	int slot;
3342 	int failed = 0;
3343 	bool retried = false;
3344 	struct extent_buffer *l;
3345 	struct btrfs_key key;
3346 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3347 	u64 old_total = btrfs_super_total_bytes(super_copy);
3348 	u64 old_size = device->total_bytes;
3349 	u64 diff = device->total_bytes - new_size;
3350 
3351 	if (device->is_tgtdev_for_dev_replace)
3352 		return -EINVAL;
3353 
3354 	path = btrfs_alloc_path();
3355 	if (!path)
3356 		return -ENOMEM;
3357 
3358 	path->reada = 2;
3359 
3360 	lock_chunks(root);
3361 
3362 	device->total_bytes = new_size;
3363 	if (device->writeable) {
3364 		device->fs_devices->total_rw_bytes -= diff;
3365 		spin_lock(&root->fs_info->free_chunk_lock);
3366 		root->fs_info->free_chunk_space -= diff;
3367 		spin_unlock(&root->fs_info->free_chunk_lock);
3368 	}
3369 	unlock_chunks(root);
3370 
3371 again:
3372 	key.objectid = device->devid;
3373 	key.offset = (u64)-1;
3374 	key.type = BTRFS_DEV_EXTENT_KEY;
3375 
3376 	do {
3377 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3378 		if (ret < 0)
3379 			goto done;
3380 
3381 		ret = btrfs_previous_item(root, path, 0, key.type);
3382 		if (ret < 0)
3383 			goto done;
3384 		if (ret) {
3385 			ret = 0;
3386 			btrfs_release_path(path);
3387 			break;
3388 		}
3389 
3390 		l = path->nodes[0];
3391 		slot = path->slots[0];
3392 		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3393 
3394 		if (key.objectid != device->devid) {
3395 			btrfs_release_path(path);
3396 			break;
3397 		}
3398 
3399 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3400 		length = btrfs_dev_extent_length(l, dev_extent);
3401 
3402 		if (key.offset + length <= new_size) {
3403 			btrfs_release_path(path);
3404 			break;
3405 		}
3406 
3407 		chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3408 		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3409 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3410 		btrfs_release_path(path);
3411 
3412 		ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3413 					   chunk_offset);
3414 		if (ret && ret != -ENOSPC)
3415 			goto done;
3416 		if (ret == -ENOSPC)
3417 			failed++;
3418 	} while (key.offset-- > 0);
3419 
3420 	if (failed && !retried) {
3421 		failed = 0;
3422 		retried = true;
3423 		goto again;
3424 	} else if (failed && retried) {
3425 		ret = -ENOSPC;
3426 		lock_chunks(root);
3427 
3428 		device->total_bytes = old_size;
3429 		if (device->writeable)
3430 			device->fs_devices->total_rw_bytes += diff;
3431 		spin_lock(&root->fs_info->free_chunk_lock);
3432 		root->fs_info->free_chunk_space += diff;
3433 		spin_unlock(&root->fs_info->free_chunk_lock);
3434 		unlock_chunks(root);
3435 		goto done;
3436 	}
3437 
3438 	/* Shrinking succeeded, else we would be at "done". */
3439 	trans = btrfs_start_transaction(root, 0);
3440 	if (IS_ERR(trans)) {
3441 		ret = PTR_ERR(trans);
3442 		goto done;
3443 	}
3444 
3445 	lock_chunks(root);
3446 
3447 	device->disk_total_bytes = new_size;
3448 	/* Now btrfs_update_device() will change the on-disk size. */
3449 	ret = btrfs_update_device(trans, device);
3450 	if (ret) {
3451 		unlock_chunks(root);
3452 		btrfs_end_transaction(trans, root);
3453 		goto done;
3454 	}
3455 	WARN_ON(diff > old_total);
3456 	btrfs_set_super_total_bytes(super_copy, old_total - diff);
3457 	unlock_chunks(root);
3458 	btrfs_end_transaction(trans, root);
3459 done:
3460 	btrfs_free_path(path);
3461 	return ret;
3462 }
3463 
3464 static int btrfs_add_system_chunk(struct btrfs_root *root,
3465 			   struct btrfs_key *key,
3466 			   struct btrfs_chunk *chunk, int item_size)
3467 {
3468 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3469 	struct btrfs_disk_key disk_key;
3470 	u32 array_size;
3471 	u8 *ptr;
3472 
3473 	array_size = btrfs_super_sys_array_size(super_copy);
3474 	if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3475 		return -EFBIG;
3476 
3477 	ptr = super_copy->sys_chunk_array + array_size;
3478 	btrfs_cpu_key_to_disk(&disk_key, key);
3479 	memcpy(ptr, &disk_key, sizeof(disk_key));
3480 	ptr += sizeof(disk_key);
3481 	memcpy(ptr, chunk, item_size);
3482 	item_size += sizeof(disk_key);
3483 	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3484 	return 0;
3485 }
3486 
3487 /*
3488  * sort the devices in descending order by max_avail, total_avail
3489  */
3490 static int btrfs_cmp_device_info(const void *a, const void *b)
3491 {
3492 	const struct btrfs_device_info *di_a = a;
3493 	const struct btrfs_device_info *di_b = b;
3494 
3495 	if (di_a->max_avail > di_b->max_avail)
3496 		return -1;
3497 	if (di_a->max_avail < di_b->max_avail)
3498 		return 1;
3499 	if (di_a->total_avail > di_b->total_avail)
3500 		return -1;
3501 	if (di_a->total_avail < di_b->total_avail)
3502 		return 1;
3503 	return 0;
3504 }
3505 
3506 struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
3507 	{ 2, 1, 0, 4, 2, 2 /* raid10 */ },
3508 	{ 1, 1, 2, 2, 2, 2 /* raid1 */ },
3509 	{ 1, 2, 1, 1, 1, 2 /* dup */ },
3510 	{ 1, 1, 0, 2, 1, 1 /* raid0 */ },
3511 	{ 1, 1, 1, 1, 1, 1 /* single */ },
3512 };
3513 
3514 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3515 			       struct btrfs_root *extent_root,
3516 			       struct map_lookup **map_ret,
3517 			       u64 *num_bytes_out, u64 *stripe_size_out,
3518 			       u64 start, u64 type)
3519 {
3520 	struct btrfs_fs_info *info = extent_root->fs_info;
3521 	struct btrfs_fs_devices *fs_devices = info->fs_devices;
3522 	struct list_head *cur;
3523 	struct map_lookup *map = NULL;
3524 	struct extent_map_tree *em_tree;
3525 	struct extent_map *em;
3526 	struct btrfs_device_info *devices_info = NULL;
3527 	u64 total_avail;
3528 	int num_stripes;	/* total number of stripes to allocate */
3529 	int sub_stripes;	/* sub_stripes info for map */
3530 	int dev_stripes;	/* stripes per dev */
3531 	int devs_max;		/* max devs to use */
3532 	int devs_min;		/* min devs needed */
3533 	int devs_increment;	/* ndevs has to be a multiple of this */
3534 	int ncopies;		/* how many copies to data has */
3535 	int ret;
3536 	u64 max_stripe_size;
3537 	u64 max_chunk_size;
3538 	u64 stripe_size;
3539 	u64 num_bytes;
3540 	int ndevs;
3541 	int i;
3542 	int j;
3543 	int index;
3544 
3545 	BUG_ON(!alloc_profile_is_valid(type, 0));
3546 
3547 	if (list_empty(&fs_devices->alloc_list))
3548 		return -ENOSPC;
3549 
3550 	index = __get_raid_index(type);
3551 
3552 	sub_stripes = btrfs_raid_array[index].sub_stripes;
3553 	dev_stripes = btrfs_raid_array[index].dev_stripes;
3554 	devs_max = btrfs_raid_array[index].devs_max;
3555 	devs_min = btrfs_raid_array[index].devs_min;
3556 	devs_increment = btrfs_raid_array[index].devs_increment;
3557 	ncopies = btrfs_raid_array[index].ncopies;
3558 
3559 	if (type & BTRFS_BLOCK_GROUP_DATA) {
3560 		max_stripe_size = 1024 * 1024 * 1024;
3561 		max_chunk_size = 10 * max_stripe_size;
3562 	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3563 		/* for larger filesystems, use larger metadata chunks */
3564 		if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3565 			max_stripe_size = 1024 * 1024 * 1024;
3566 		else
3567 			max_stripe_size = 256 * 1024 * 1024;
3568 		max_chunk_size = max_stripe_size;
3569 	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3570 		max_stripe_size = 32 * 1024 * 1024;
3571 		max_chunk_size = 2 * max_stripe_size;
3572 	} else {
3573 		printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3574 		       type);
3575 		BUG_ON(1);
3576 	}
3577 
3578 	/* we don't want a chunk larger than 10% of writeable space */
3579 	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3580 			     max_chunk_size);
3581 
3582 	devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3583 			       GFP_NOFS);
3584 	if (!devices_info)
3585 		return -ENOMEM;
3586 
3587 	cur = fs_devices->alloc_list.next;
3588 
3589 	/*
3590 	 * in the first pass through the devices list, we gather information
3591 	 * about the available holes on each device.
3592 	 */
3593 	ndevs = 0;
3594 	while (cur != &fs_devices->alloc_list) {
3595 		struct btrfs_device *device;
3596 		u64 max_avail;
3597 		u64 dev_offset;
3598 
3599 		device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3600 
3601 		cur = cur->next;
3602 
3603 		if (!device->writeable) {
3604 			WARN(1, KERN_ERR
3605 			       "btrfs: read-only device in alloc_list\n");
3606 			continue;
3607 		}
3608 
3609 		if (!device->in_fs_metadata ||
3610 		    device->is_tgtdev_for_dev_replace)
3611 			continue;
3612 
3613 		if (device->total_bytes > device->bytes_used)
3614 			total_avail = device->total_bytes - device->bytes_used;
3615 		else
3616 			total_avail = 0;
3617 
3618 		/* If there is no space on this device, skip it. */
3619 		if (total_avail == 0)
3620 			continue;
3621 
3622 		ret = find_free_dev_extent(device,
3623 					   max_stripe_size * dev_stripes,
3624 					   &dev_offset, &max_avail);
3625 		if (ret && ret != -ENOSPC)
3626 			goto error;
3627 
3628 		if (ret == 0)
3629 			max_avail = max_stripe_size * dev_stripes;
3630 
3631 		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3632 			continue;
3633 
3634 		devices_info[ndevs].dev_offset = dev_offset;
3635 		devices_info[ndevs].max_avail = max_avail;
3636 		devices_info[ndevs].total_avail = total_avail;
3637 		devices_info[ndevs].dev = device;
3638 		++ndevs;
3639 		WARN_ON(ndevs > fs_devices->rw_devices);
3640 	}
3641 
3642 	/*
3643 	 * now sort the devices by hole size / available space
3644 	 */
3645 	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3646 	     btrfs_cmp_device_info, NULL);
3647 
3648 	/* round down to number of usable stripes */
3649 	ndevs -= ndevs % devs_increment;
3650 
3651 	if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3652 		ret = -ENOSPC;
3653 		goto error;
3654 	}
3655 
3656 	if (devs_max && ndevs > devs_max)
3657 		ndevs = devs_max;
3658 	/*
3659 	 * the primary goal is to maximize the number of stripes, so use as many
3660 	 * devices as possible, even if the stripes are not maximum sized.
3661 	 */
3662 	stripe_size = devices_info[ndevs-1].max_avail;
3663 	num_stripes = ndevs * dev_stripes;
3664 
3665 	if (stripe_size * ndevs > max_chunk_size * ncopies) {
3666 		stripe_size = max_chunk_size * ncopies;
3667 		do_div(stripe_size, ndevs);
3668 	}
3669 
3670 	do_div(stripe_size, dev_stripes);
3671 
3672 	/* align to BTRFS_STRIPE_LEN */
3673 	do_div(stripe_size, BTRFS_STRIPE_LEN);
3674 	stripe_size *= BTRFS_STRIPE_LEN;
3675 
3676 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3677 	if (!map) {
3678 		ret = -ENOMEM;
3679 		goto error;
3680 	}
3681 	map->num_stripes = num_stripes;
3682 
3683 	for (i = 0; i < ndevs; ++i) {
3684 		for (j = 0; j < dev_stripes; ++j) {
3685 			int s = i * dev_stripes + j;
3686 			map->stripes[s].dev = devices_info[i].dev;
3687 			map->stripes[s].physical = devices_info[i].dev_offset +
3688 						   j * stripe_size;
3689 		}
3690 	}
3691 	map->sector_size = extent_root->sectorsize;
3692 	map->stripe_len = BTRFS_STRIPE_LEN;
3693 	map->io_align = BTRFS_STRIPE_LEN;
3694 	map->io_width = BTRFS_STRIPE_LEN;
3695 	map->type = type;
3696 	map->sub_stripes = sub_stripes;
3697 
3698 	*map_ret = map;
3699 	num_bytes = stripe_size * (num_stripes / ncopies);
3700 
3701 	*stripe_size_out = stripe_size;
3702 	*num_bytes_out = num_bytes;
3703 
3704 	trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3705 
3706 	em = alloc_extent_map();
3707 	if (!em) {
3708 		ret = -ENOMEM;
3709 		goto error;
3710 	}
3711 	em->bdev = (struct block_device *)map;
3712 	em->start = start;
3713 	em->len = num_bytes;
3714 	em->block_start = 0;
3715 	em->block_len = em->len;
3716 
3717 	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3718 	write_lock(&em_tree->lock);
3719 	ret = add_extent_mapping(em_tree, em);
3720 	write_unlock(&em_tree->lock);
3721 	free_extent_map(em);
3722 	if (ret)
3723 		goto error;
3724 
3725 	ret = btrfs_make_block_group(trans, extent_root, 0, type,
3726 				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3727 				     start, num_bytes);
3728 	if (ret)
3729 		goto error;
3730 
3731 	for (i = 0; i < map->num_stripes; ++i) {
3732 		struct btrfs_device *device;
3733 		u64 dev_offset;
3734 
3735 		device = map->stripes[i].dev;
3736 		dev_offset = map->stripes[i].physical;
3737 
3738 		ret = btrfs_alloc_dev_extent(trans, device,
3739 				info->chunk_root->root_key.objectid,
3740 				BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3741 				start, dev_offset, stripe_size);
3742 		if (ret) {
3743 			btrfs_abort_transaction(trans, extent_root, ret);
3744 			goto error;
3745 		}
3746 	}
3747 
3748 	kfree(devices_info);
3749 	return 0;
3750 
3751 error:
3752 	kfree(map);
3753 	kfree(devices_info);
3754 	return ret;
3755 }
3756 
3757 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3758 				struct btrfs_root *extent_root,
3759 				struct map_lookup *map, u64 chunk_offset,
3760 				u64 chunk_size, u64 stripe_size)
3761 {
3762 	u64 dev_offset;
3763 	struct btrfs_key key;
3764 	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3765 	struct btrfs_device *device;
3766 	struct btrfs_chunk *chunk;
3767 	struct btrfs_stripe *stripe;
3768 	size_t item_size = btrfs_chunk_item_size(map->num_stripes);
3769 	int index = 0;
3770 	int ret;
3771 
3772 	chunk = kzalloc(item_size, GFP_NOFS);
3773 	if (!chunk)
3774 		return -ENOMEM;
3775 
3776 	index = 0;
3777 	while (index < map->num_stripes) {
3778 		device = map->stripes[index].dev;
3779 		device->bytes_used += stripe_size;
3780 		ret = btrfs_update_device(trans, device);
3781 		if (ret)
3782 			goto out_free;
3783 		index++;
3784 	}
3785 
3786 	spin_lock(&extent_root->fs_info->free_chunk_lock);
3787 	extent_root->fs_info->free_chunk_space -= (stripe_size *
3788 						   map->num_stripes);
3789 	spin_unlock(&extent_root->fs_info->free_chunk_lock);
3790 
3791 	index = 0;
3792 	stripe = &chunk->stripe;
3793 	while (index < map->num_stripes) {
3794 		device = map->stripes[index].dev;
3795 		dev_offset = map->stripes[index].physical;
3796 
3797 		btrfs_set_stack_stripe_devid(stripe, device->devid);
3798 		btrfs_set_stack_stripe_offset(stripe, dev_offset);
3799 		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
3800 		stripe++;
3801 		index++;
3802 	}
3803 
3804 	btrfs_set_stack_chunk_length(chunk, chunk_size);
3805 	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
3806 	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
3807 	btrfs_set_stack_chunk_type(chunk, map->type);
3808 	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
3809 	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
3810 	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
3811 	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
3812 	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
3813 
3814 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3815 	key.type = BTRFS_CHUNK_ITEM_KEY;
3816 	key.offset = chunk_offset;
3817 
3818 	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3819 
3820 	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3821 		/*
3822 		 * TODO: Cleanup of inserted chunk root in case of
3823 		 * failure.
3824 		 */
3825 		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3826 					     item_size);
3827 	}
3828 
3829 out_free:
3830 	kfree(chunk);
3831 	return ret;
3832 }
3833 
3834 /*
3835  * Chunk allocation falls into two parts. The first part does works
3836  * that make the new allocated chunk useable, but not do any operation
3837  * that modifies the chunk tree. The second part does the works that
3838  * require modifying the chunk tree. This division is important for the
3839  * bootstrap process of adding storage to a seed btrfs.
3840  */
3841 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3842 		      struct btrfs_root *extent_root, u64 type)
3843 {
3844 	u64 chunk_offset;
3845 	u64 chunk_size;
3846 	u64 stripe_size;
3847 	struct map_lookup *map;
3848 	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3849 	int ret;
3850 
3851 	ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3852 			      &chunk_offset);
3853 	if (ret)
3854 		return ret;
3855 
3856 	ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3857 				  &stripe_size, chunk_offset, type);
3858 	if (ret)
3859 		return ret;
3860 
3861 	ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3862 				   chunk_size, stripe_size);
3863 	if (ret)
3864 		return ret;
3865 	return 0;
3866 }
3867 
3868 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3869 					 struct btrfs_root *root,
3870 					 struct btrfs_device *device)
3871 {
3872 	u64 chunk_offset;
3873 	u64 sys_chunk_offset;
3874 	u64 chunk_size;
3875 	u64 sys_chunk_size;
3876 	u64 stripe_size;
3877 	u64 sys_stripe_size;
3878 	u64 alloc_profile;
3879 	struct map_lookup *map;
3880 	struct map_lookup *sys_map;
3881 	struct btrfs_fs_info *fs_info = root->fs_info;
3882 	struct btrfs_root *extent_root = fs_info->extent_root;
3883 	int ret;
3884 
3885 	ret = find_next_chunk(fs_info->chunk_root,
3886 			      BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
3887 	if (ret)
3888 		return ret;
3889 
3890 	alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3891 				fs_info->avail_metadata_alloc_bits;
3892 	alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3893 
3894 	ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3895 				  &stripe_size, chunk_offset, alloc_profile);
3896 	if (ret)
3897 		return ret;
3898 
3899 	sys_chunk_offset = chunk_offset + chunk_size;
3900 
3901 	alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3902 				fs_info->avail_system_alloc_bits;
3903 	alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3904 
3905 	ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3906 				  &sys_chunk_size, &sys_stripe_size,
3907 				  sys_chunk_offset, alloc_profile);
3908 	if (ret) {
3909 		btrfs_abort_transaction(trans, root, ret);
3910 		goto out;
3911 	}
3912 
3913 	ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3914 	if (ret) {
3915 		btrfs_abort_transaction(trans, root, ret);
3916 		goto out;
3917 	}
3918 
3919 	/*
3920 	 * Modifying chunk tree needs allocating new blocks from both
3921 	 * system block group and metadata block group. So we only can
3922 	 * do operations require modifying the chunk tree after both
3923 	 * block groups were created.
3924 	 */
3925 	ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3926 				   chunk_size, stripe_size);
3927 	if (ret) {
3928 		btrfs_abort_transaction(trans, root, ret);
3929 		goto out;
3930 	}
3931 
3932 	ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3933 				   sys_chunk_offset, sys_chunk_size,
3934 				   sys_stripe_size);
3935 	if (ret)
3936 		btrfs_abort_transaction(trans, root, ret);
3937 
3938 out:
3939 
3940 	return ret;
3941 }
3942 
3943 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
3944 {
3945 	struct extent_map *em;
3946 	struct map_lookup *map;
3947 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3948 	int readonly = 0;
3949 	int i;
3950 
3951 	read_lock(&map_tree->map_tree.lock);
3952 	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3953 	read_unlock(&map_tree->map_tree.lock);
3954 	if (!em)
3955 		return 1;
3956 
3957 	if (btrfs_test_opt(root, DEGRADED)) {
3958 		free_extent_map(em);
3959 		return 0;
3960 	}
3961 
3962 	map = (struct map_lookup *)em->bdev;
3963 	for (i = 0; i < map->num_stripes; i++) {
3964 		if (!map->stripes[i].dev->writeable) {
3965 			readonly = 1;
3966 			break;
3967 		}
3968 	}
3969 	free_extent_map(em);
3970 	return readonly;
3971 }
3972 
3973 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
3974 {
3975 	extent_map_tree_init(&tree->map_tree);
3976 }
3977 
3978 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
3979 {
3980 	struct extent_map *em;
3981 
3982 	while (1) {
3983 		write_lock(&tree->map_tree.lock);
3984 		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
3985 		if (em)
3986 			remove_extent_mapping(&tree->map_tree, em);
3987 		write_unlock(&tree->map_tree.lock);
3988 		if (!em)
3989 			break;
3990 		kfree(em->bdev);
3991 		/* once for us */
3992 		free_extent_map(em);
3993 		/* once for the tree */
3994 		free_extent_map(em);
3995 	}
3996 }
3997 
3998 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
3999 {
4000 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4001 	struct extent_map *em;
4002 	struct map_lookup *map;
4003 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4004 	int ret;
4005 
4006 	read_lock(&em_tree->lock);
4007 	em = lookup_extent_mapping(em_tree, logical, len);
4008 	read_unlock(&em_tree->lock);
4009 	BUG_ON(!em);
4010 
4011 	BUG_ON(em->start > logical || em->start + em->len < logical);
4012 	map = (struct map_lookup *)em->bdev;
4013 	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
4014 		ret = map->num_stripes;
4015 	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4016 		ret = map->sub_stripes;
4017 	else
4018 		ret = 1;
4019 	free_extent_map(em);
4020 
4021 	btrfs_dev_replace_lock(&fs_info->dev_replace);
4022 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
4023 		ret++;
4024 	btrfs_dev_replace_unlock(&fs_info->dev_replace);
4025 
4026 	return ret;
4027 }
4028 
4029 static int find_live_mirror(struct btrfs_fs_info *fs_info,
4030 			    struct map_lookup *map, int first, int num,
4031 			    int optimal, int dev_replace_is_ongoing)
4032 {
4033 	int i;
4034 	int tolerance;
4035 	struct btrfs_device *srcdev;
4036 
4037 	if (dev_replace_is_ongoing &&
4038 	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
4039 	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
4040 		srcdev = fs_info->dev_replace.srcdev;
4041 	else
4042 		srcdev = NULL;
4043 
4044 	/*
4045 	 * try to avoid the drive that is the source drive for a
4046 	 * dev-replace procedure, only choose it if no other non-missing
4047 	 * mirror is available
4048 	 */
4049 	for (tolerance = 0; tolerance < 2; tolerance++) {
4050 		if (map->stripes[optimal].dev->bdev &&
4051 		    (tolerance || map->stripes[optimal].dev != srcdev))
4052 			return optimal;
4053 		for (i = first; i < first + num; i++) {
4054 			if (map->stripes[i].dev->bdev &&
4055 			    (tolerance || map->stripes[i].dev != srcdev))
4056 				return i;
4057 		}
4058 	}
4059 
4060 	/* we couldn't find one that doesn't fail.  Just return something
4061 	 * and the io error handling code will clean up eventually
4062 	 */
4063 	return optimal;
4064 }
4065 
4066 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4067 			     u64 logical, u64 *length,
4068 			     struct btrfs_bio **bbio_ret,
4069 			     int mirror_num)
4070 {
4071 	struct extent_map *em;
4072 	struct map_lookup *map;
4073 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4074 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4075 	u64 offset;
4076 	u64 stripe_offset;
4077 	u64 stripe_end_offset;
4078 	u64 stripe_nr;
4079 	u64 stripe_nr_orig;
4080 	u64 stripe_nr_end;
4081 	int stripe_index;
4082 	int i;
4083 	int ret = 0;
4084 	int num_stripes;
4085 	int max_errors = 0;
4086 	struct btrfs_bio *bbio = NULL;
4087 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
4088 	int dev_replace_is_ongoing = 0;
4089 	int num_alloc_stripes;
4090 	int patch_the_first_stripe_for_dev_replace = 0;
4091 	u64 physical_to_patch_in_first_stripe = 0;
4092 
4093 	read_lock(&em_tree->lock);
4094 	em = lookup_extent_mapping(em_tree, logical, *length);
4095 	read_unlock(&em_tree->lock);
4096 
4097 	if (!em) {
4098 		printk(KERN_CRIT "btrfs: unable to find logical %llu len %llu\n",
4099 		       (unsigned long long)logical,
4100 		       (unsigned long long)*length);
4101 		BUG();
4102 	}
4103 
4104 	BUG_ON(em->start > logical || em->start + em->len < logical);
4105 	map = (struct map_lookup *)em->bdev;
4106 	offset = logical - em->start;
4107 
4108 	stripe_nr = offset;
4109 	/*
4110 	 * stripe_nr counts the total number of stripes we have to stride
4111 	 * to get to this block
4112 	 */
4113 	do_div(stripe_nr, map->stripe_len);
4114 
4115 	stripe_offset = stripe_nr * map->stripe_len;
4116 	BUG_ON(offset < stripe_offset);
4117 
4118 	/* stripe_offset is the offset of this block in its stripe*/
4119 	stripe_offset = offset - stripe_offset;
4120 
4121 	if (rw & REQ_DISCARD)
4122 		*length = min_t(u64, em->len - offset, *length);
4123 	else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
4124 		/* we limit the length of each bio to what fits in a stripe */
4125 		*length = min_t(u64, em->len - offset,
4126 				map->stripe_len - stripe_offset);
4127 	} else {
4128 		*length = em->len - offset;
4129 	}
4130 
4131 	if (!bbio_ret)
4132 		goto out;
4133 
4134 	btrfs_dev_replace_lock(dev_replace);
4135 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
4136 	if (!dev_replace_is_ongoing)
4137 		btrfs_dev_replace_unlock(dev_replace);
4138 
4139 	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
4140 	    !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
4141 	    dev_replace->tgtdev != NULL) {
4142 		/*
4143 		 * in dev-replace case, for repair case (that's the only
4144 		 * case where the mirror is selected explicitly when
4145 		 * calling btrfs_map_block), blocks left of the left cursor
4146 		 * can also be read from the target drive.
4147 		 * For REQ_GET_READ_MIRRORS, the target drive is added as
4148 		 * the last one to the array of stripes. For READ, it also
4149 		 * needs to be supported using the same mirror number.
4150 		 * If the requested block is not left of the left cursor,
4151 		 * EIO is returned. This can happen because btrfs_num_copies()
4152 		 * returns one more in the dev-replace case.
4153 		 */
4154 		u64 tmp_length = *length;
4155 		struct btrfs_bio *tmp_bbio = NULL;
4156 		int tmp_num_stripes;
4157 		u64 srcdev_devid = dev_replace->srcdev->devid;
4158 		int index_srcdev = 0;
4159 		int found = 0;
4160 		u64 physical_of_found = 0;
4161 
4162 		ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
4163 			     logical, &tmp_length, &tmp_bbio, 0);
4164 		if (ret) {
4165 			WARN_ON(tmp_bbio != NULL);
4166 			goto out;
4167 		}
4168 
4169 		tmp_num_stripes = tmp_bbio->num_stripes;
4170 		if (mirror_num > tmp_num_stripes) {
4171 			/*
4172 			 * REQ_GET_READ_MIRRORS does not contain this
4173 			 * mirror, that means that the requested area
4174 			 * is not left of the left cursor
4175 			 */
4176 			ret = -EIO;
4177 			kfree(tmp_bbio);
4178 			goto out;
4179 		}
4180 
4181 		/*
4182 		 * process the rest of the function using the mirror_num
4183 		 * of the source drive. Therefore look it up first.
4184 		 * At the end, patch the device pointer to the one of the
4185 		 * target drive.
4186 		 */
4187 		for (i = 0; i < tmp_num_stripes; i++) {
4188 			if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) {
4189 				/*
4190 				 * In case of DUP, in order to keep it
4191 				 * simple, only add the mirror with the
4192 				 * lowest physical address
4193 				 */
4194 				if (found &&
4195 				    physical_of_found <=
4196 				     tmp_bbio->stripes[i].physical)
4197 					continue;
4198 				index_srcdev = i;
4199 				found = 1;
4200 				physical_of_found =
4201 					tmp_bbio->stripes[i].physical;
4202 			}
4203 		}
4204 
4205 		if (found) {
4206 			mirror_num = index_srcdev + 1;
4207 			patch_the_first_stripe_for_dev_replace = 1;
4208 			physical_to_patch_in_first_stripe = physical_of_found;
4209 		} else {
4210 			WARN_ON(1);
4211 			ret = -EIO;
4212 			kfree(tmp_bbio);
4213 			goto out;
4214 		}
4215 
4216 		kfree(tmp_bbio);
4217 	} else if (mirror_num > map->num_stripes) {
4218 		mirror_num = 0;
4219 	}
4220 
4221 	num_stripes = 1;
4222 	stripe_index = 0;
4223 	stripe_nr_orig = stripe_nr;
4224 	stripe_nr_end = (offset + *length + map->stripe_len - 1) &
4225 			(~(map->stripe_len - 1));
4226 	do_div(stripe_nr_end, map->stripe_len);
4227 	stripe_end_offset = stripe_nr_end * map->stripe_len -
4228 			    (offset + *length);
4229 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4230 		if (rw & REQ_DISCARD)
4231 			num_stripes = min_t(u64, map->num_stripes,
4232 					    stripe_nr_end - stripe_nr_orig);
4233 		stripe_index = do_div(stripe_nr, map->num_stripes);
4234 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
4235 		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
4236 			num_stripes = map->num_stripes;
4237 		else if (mirror_num)
4238 			stripe_index = mirror_num - 1;
4239 		else {
4240 			stripe_index = find_live_mirror(fs_info, map, 0,
4241 					    map->num_stripes,
4242 					    current->pid % map->num_stripes,
4243 					    dev_replace_is_ongoing);
4244 			mirror_num = stripe_index + 1;
4245 		}
4246 
4247 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
4248 		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
4249 			num_stripes = map->num_stripes;
4250 		} else if (mirror_num) {
4251 			stripe_index = mirror_num - 1;
4252 		} else {
4253 			mirror_num = 1;
4254 		}
4255 
4256 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4257 		int factor = map->num_stripes / map->sub_stripes;
4258 
4259 		stripe_index = do_div(stripe_nr, factor);
4260 		stripe_index *= map->sub_stripes;
4261 
4262 		if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
4263 			num_stripes = map->sub_stripes;
4264 		else if (rw & REQ_DISCARD)
4265 			num_stripes = min_t(u64, map->sub_stripes *
4266 					    (stripe_nr_end - stripe_nr_orig),
4267 					    map->num_stripes);
4268 		else if (mirror_num)
4269 			stripe_index += mirror_num - 1;
4270 		else {
4271 			int old_stripe_index = stripe_index;
4272 			stripe_index = find_live_mirror(fs_info, map,
4273 					      stripe_index,
4274 					      map->sub_stripes, stripe_index +
4275 					      current->pid % map->sub_stripes,
4276 					      dev_replace_is_ongoing);
4277 			mirror_num = stripe_index - old_stripe_index + 1;
4278 		}
4279 	} else {
4280 		/*
4281 		 * after this do_div call, stripe_nr is the number of stripes
4282 		 * on this device we have to walk to find the data, and
4283 		 * stripe_index is the number of our device in the stripe array
4284 		 */
4285 		stripe_index = do_div(stripe_nr, map->num_stripes);
4286 		mirror_num = stripe_index + 1;
4287 	}
4288 	BUG_ON(stripe_index >= map->num_stripes);
4289 
4290 	num_alloc_stripes = num_stripes;
4291 	if (dev_replace_is_ongoing) {
4292 		if (rw & (REQ_WRITE | REQ_DISCARD))
4293 			num_alloc_stripes <<= 1;
4294 		if (rw & REQ_GET_READ_MIRRORS)
4295 			num_alloc_stripes++;
4296 	}
4297 	bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS);
4298 	if (!bbio) {
4299 		ret = -ENOMEM;
4300 		goto out;
4301 	}
4302 	atomic_set(&bbio->error, 0);
4303 
4304 	if (rw & REQ_DISCARD) {
4305 		int factor = 0;
4306 		int sub_stripes = 0;
4307 		u64 stripes_per_dev = 0;
4308 		u32 remaining_stripes = 0;
4309 		u32 last_stripe = 0;
4310 
4311 		if (map->type &
4312 		    (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
4313 			if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4314 				sub_stripes = 1;
4315 			else
4316 				sub_stripes = map->sub_stripes;
4317 
4318 			factor = map->num_stripes / sub_stripes;
4319 			stripes_per_dev = div_u64_rem(stripe_nr_end -
4320 						      stripe_nr_orig,
4321 						      factor,
4322 						      &remaining_stripes);
4323 			div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
4324 			last_stripe *= sub_stripes;
4325 		}
4326 
4327 		for (i = 0; i < num_stripes; i++) {
4328 			bbio->stripes[i].physical =
4329 				map->stripes[stripe_index].physical +
4330 				stripe_offset + stripe_nr * map->stripe_len;
4331 			bbio->stripes[i].dev = map->stripes[stripe_index].dev;
4332 
4333 			if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
4334 					 BTRFS_BLOCK_GROUP_RAID10)) {
4335 				bbio->stripes[i].length = stripes_per_dev *
4336 							  map->stripe_len;
4337 
4338 				if (i / sub_stripes < remaining_stripes)
4339 					bbio->stripes[i].length +=
4340 						map->stripe_len;
4341 
4342 				/*
4343 				 * Special for the first stripe and
4344 				 * the last stripe:
4345 				 *
4346 				 * |-------|...|-------|
4347 				 *     |----------|
4348 				 *    off     end_off
4349 				 */
4350 				if (i < sub_stripes)
4351 					bbio->stripes[i].length -=
4352 						stripe_offset;
4353 
4354 				if (stripe_index >= last_stripe &&
4355 				    stripe_index <= (last_stripe +
4356 						     sub_stripes - 1))
4357 					bbio->stripes[i].length -=
4358 						stripe_end_offset;
4359 
4360 				if (i == sub_stripes - 1)
4361 					stripe_offset = 0;
4362 			} else
4363 				bbio->stripes[i].length = *length;
4364 
4365 			stripe_index++;
4366 			if (stripe_index == map->num_stripes) {
4367 				/* This could only happen for RAID0/10 */
4368 				stripe_index = 0;
4369 				stripe_nr++;
4370 			}
4371 		}
4372 	} else {
4373 		for (i = 0; i < num_stripes; i++) {
4374 			bbio->stripes[i].physical =
4375 				map->stripes[stripe_index].physical +
4376 				stripe_offset +
4377 				stripe_nr * map->stripe_len;
4378 			bbio->stripes[i].dev =
4379 				map->stripes[stripe_index].dev;
4380 			stripe_index++;
4381 		}
4382 	}
4383 
4384 	if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
4385 		if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
4386 				 BTRFS_BLOCK_GROUP_RAID10 |
4387 				 BTRFS_BLOCK_GROUP_DUP)) {
4388 			max_errors = 1;
4389 		}
4390 	}
4391 
4392 	if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
4393 	    dev_replace->tgtdev != NULL) {
4394 		int index_where_to_add;
4395 		u64 srcdev_devid = dev_replace->srcdev->devid;
4396 
4397 		/*
4398 		 * duplicate the write operations while the dev replace
4399 		 * procedure is running. Since the copying of the old disk
4400 		 * to the new disk takes place at run time while the
4401 		 * filesystem is mounted writable, the regular write
4402 		 * operations to the old disk have to be duplicated to go
4403 		 * to the new disk as well.
4404 		 * Note that device->missing is handled by the caller, and
4405 		 * that the write to the old disk is already set up in the
4406 		 * stripes array.
4407 		 */
4408 		index_where_to_add = num_stripes;
4409 		for (i = 0; i < num_stripes; i++) {
4410 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
4411 				/* write to new disk, too */
4412 				struct btrfs_bio_stripe *new =
4413 					bbio->stripes + index_where_to_add;
4414 				struct btrfs_bio_stripe *old =
4415 					bbio->stripes + i;
4416 
4417 				new->physical = old->physical;
4418 				new->length = old->length;
4419 				new->dev = dev_replace->tgtdev;
4420 				index_where_to_add++;
4421 				max_errors++;
4422 			}
4423 		}
4424 		num_stripes = index_where_to_add;
4425 	} else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
4426 		   dev_replace->tgtdev != NULL) {
4427 		u64 srcdev_devid = dev_replace->srcdev->devid;
4428 		int index_srcdev = 0;
4429 		int found = 0;
4430 		u64 physical_of_found = 0;
4431 
4432 		/*
4433 		 * During the dev-replace procedure, the target drive can
4434 		 * also be used to read data in case it is needed to repair
4435 		 * a corrupt block elsewhere. This is possible if the
4436 		 * requested area is left of the left cursor. In this area,
4437 		 * the target drive is a full copy of the source drive.
4438 		 */
4439 		for (i = 0; i < num_stripes; i++) {
4440 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
4441 				/*
4442 				 * In case of DUP, in order to keep it
4443 				 * simple, only add the mirror with the
4444 				 * lowest physical address
4445 				 */
4446 				if (found &&
4447 				    physical_of_found <=
4448 				     bbio->stripes[i].physical)
4449 					continue;
4450 				index_srcdev = i;
4451 				found = 1;
4452 				physical_of_found = bbio->stripes[i].physical;
4453 			}
4454 		}
4455 		if (found) {
4456 			u64 length = map->stripe_len;
4457 
4458 			if (physical_of_found + length <=
4459 			    dev_replace->cursor_left) {
4460 				struct btrfs_bio_stripe *tgtdev_stripe =
4461 					bbio->stripes + num_stripes;
4462 
4463 				tgtdev_stripe->physical = physical_of_found;
4464 				tgtdev_stripe->length =
4465 					bbio->stripes[index_srcdev].length;
4466 				tgtdev_stripe->dev = dev_replace->tgtdev;
4467 
4468 				num_stripes++;
4469 			}
4470 		}
4471 	}
4472 
4473 	*bbio_ret = bbio;
4474 	bbio->num_stripes = num_stripes;
4475 	bbio->max_errors = max_errors;
4476 	bbio->mirror_num = mirror_num;
4477 
4478 	/*
4479 	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
4480 	 * mirror_num == num_stripes + 1 && dev_replace target drive is
4481 	 * available as a mirror
4482 	 */
4483 	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
4484 		WARN_ON(num_stripes > 1);
4485 		bbio->stripes[0].dev = dev_replace->tgtdev;
4486 		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
4487 		bbio->mirror_num = map->num_stripes + 1;
4488 	}
4489 out:
4490 	if (dev_replace_is_ongoing)
4491 		btrfs_dev_replace_unlock(dev_replace);
4492 	free_extent_map(em);
4493 	return ret;
4494 }
4495 
4496 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4497 		      u64 logical, u64 *length,
4498 		      struct btrfs_bio **bbio_ret, int mirror_num)
4499 {
4500 	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
4501 				 mirror_num);
4502 }
4503 
4504 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
4505 		     u64 chunk_start, u64 physical, u64 devid,
4506 		     u64 **logical, int *naddrs, int *stripe_len)
4507 {
4508 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4509 	struct extent_map *em;
4510 	struct map_lookup *map;
4511 	u64 *buf;
4512 	u64 bytenr;
4513 	u64 length;
4514 	u64 stripe_nr;
4515 	int i, j, nr = 0;
4516 
4517 	read_lock(&em_tree->lock);
4518 	em = lookup_extent_mapping(em_tree, chunk_start, 1);
4519 	read_unlock(&em_tree->lock);
4520 
4521 	BUG_ON(!em || em->start != chunk_start);
4522 	map = (struct map_lookup *)em->bdev;
4523 
4524 	length = em->len;
4525 	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4526 		do_div(length, map->num_stripes / map->sub_stripes);
4527 	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4528 		do_div(length, map->num_stripes);
4529 
4530 	buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
4531 	BUG_ON(!buf); /* -ENOMEM */
4532 
4533 	for (i = 0; i < map->num_stripes; i++) {
4534 		if (devid && map->stripes[i].dev->devid != devid)
4535 			continue;
4536 		if (map->stripes[i].physical > physical ||
4537 		    map->stripes[i].physical + length <= physical)
4538 			continue;
4539 
4540 		stripe_nr = physical - map->stripes[i].physical;
4541 		do_div(stripe_nr, map->stripe_len);
4542 
4543 		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4544 			stripe_nr = stripe_nr * map->num_stripes + i;
4545 			do_div(stripe_nr, map->sub_stripes);
4546 		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4547 			stripe_nr = stripe_nr * map->num_stripes + i;
4548 		}
4549 		bytenr = chunk_start + stripe_nr * map->stripe_len;
4550 		WARN_ON(nr >= map->num_stripes);
4551 		for (j = 0; j < nr; j++) {
4552 			if (buf[j] == bytenr)
4553 				break;
4554 		}
4555 		if (j == nr) {
4556 			WARN_ON(nr >= map->num_stripes);
4557 			buf[nr++] = bytenr;
4558 		}
4559 	}
4560 
4561 	*logical = buf;
4562 	*naddrs = nr;
4563 	*stripe_len = map->stripe_len;
4564 
4565 	free_extent_map(em);
4566 	return 0;
4567 }
4568 
4569 static void *merge_stripe_index_into_bio_private(void *bi_private,
4570 						 unsigned int stripe_index)
4571 {
4572 	/*
4573 	 * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
4574 	 * at most 1.
4575 	 * The alternative solution (instead of stealing bits from the
4576 	 * pointer) would be to allocate an intermediate structure
4577 	 * that contains the old private pointer plus the stripe_index.
4578 	 */
4579 	BUG_ON((((uintptr_t)bi_private) & 3) != 0);
4580 	BUG_ON(stripe_index > 3);
4581 	return (void *)(((uintptr_t)bi_private) | stripe_index);
4582 }
4583 
4584 static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
4585 {
4586 	return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
4587 }
4588 
4589 static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
4590 {
4591 	return (unsigned int)((uintptr_t)bi_private) & 3;
4592 }
4593 
4594 static void btrfs_end_bio(struct bio *bio, int err)
4595 {
4596 	struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
4597 	int is_orig_bio = 0;
4598 
4599 	if (err) {
4600 		atomic_inc(&bbio->error);
4601 		if (err == -EIO || err == -EREMOTEIO) {
4602 			unsigned int stripe_index =
4603 				extract_stripe_index_from_bio_private(
4604 					bio->bi_private);
4605 			struct btrfs_device *dev;
4606 
4607 			BUG_ON(stripe_index >= bbio->num_stripes);
4608 			dev = bbio->stripes[stripe_index].dev;
4609 			if (dev->bdev) {
4610 				if (bio->bi_rw & WRITE)
4611 					btrfs_dev_stat_inc(dev,
4612 						BTRFS_DEV_STAT_WRITE_ERRS);
4613 				else
4614 					btrfs_dev_stat_inc(dev,
4615 						BTRFS_DEV_STAT_READ_ERRS);
4616 				if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
4617 					btrfs_dev_stat_inc(dev,
4618 						BTRFS_DEV_STAT_FLUSH_ERRS);
4619 				btrfs_dev_stat_print_on_error(dev);
4620 			}
4621 		}
4622 	}
4623 
4624 	if (bio == bbio->orig_bio)
4625 		is_orig_bio = 1;
4626 
4627 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
4628 		if (!is_orig_bio) {
4629 			bio_put(bio);
4630 			bio = bbio->orig_bio;
4631 		}
4632 		bio->bi_private = bbio->private;
4633 		bio->bi_end_io = bbio->end_io;
4634 		bio->bi_bdev = (struct block_device *)
4635 					(unsigned long)bbio->mirror_num;
4636 		/* only send an error to the higher layers if it is
4637 		 * beyond the tolerance of the multi-bio
4638 		 */
4639 		if (atomic_read(&bbio->error) > bbio->max_errors) {
4640 			err = -EIO;
4641 		} else {
4642 			/*
4643 			 * this bio is actually up to date, we didn't
4644 			 * go over the max number of errors
4645 			 */
4646 			set_bit(BIO_UPTODATE, &bio->bi_flags);
4647 			err = 0;
4648 		}
4649 		kfree(bbio);
4650 
4651 		bio_endio(bio, err);
4652 	} else if (!is_orig_bio) {
4653 		bio_put(bio);
4654 	}
4655 }
4656 
4657 struct async_sched {
4658 	struct bio *bio;
4659 	int rw;
4660 	struct btrfs_fs_info *info;
4661 	struct btrfs_work work;
4662 };
4663 
4664 /*
4665  * see run_scheduled_bios for a description of why bios are collected for
4666  * async submit.
4667  *
4668  * This will add one bio to the pending list for a device and make sure
4669  * the work struct is scheduled.
4670  */
4671 static noinline void schedule_bio(struct btrfs_root *root,
4672 				 struct btrfs_device *device,
4673 				 int rw, struct bio *bio)
4674 {
4675 	int should_queue = 1;
4676 	struct btrfs_pending_bios *pending_bios;
4677 
4678 	/* don't bother with additional async steps for reads, right now */
4679 	if (!(rw & REQ_WRITE)) {
4680 		bio_get(bio);
4681 		btrfsic_submit_bio(rw, bio);
4682 		bio_put(bio);
4683 		return;
4684 	}
4685 
4686 	/*
4687 	 * nr_async_bios allows us to reliably return congestion to the
4688 	 * higher layers.  Otherwise, the async bio makes it appear we have
4689 	 * made progress against dirty pages when we've really just put it
4690 	 * on a queue for later
4691 	 */
4692 	atomic_inc(&root->fs_info->nr_async_bios);
4693 	WARN_ON(bio->bi_next);
4694 	bio->bi_next = NULL;
4695 	bio->bi_rw |= rw;
4696 
4697 	spin_lock(&device->io_lock);
4698 	if (bio->bi_rw & REQ_SYNC)
4699 		pending_bios = &device->pending_sync_bios;
4700 	else
4701 		pending_bios = &device->pending_bios;
4702 
4703 	if (pending_bios->tail)
4704 		pending_bios->tail->bi_next = bio;
4705 
4706 	pending_bios->tail = bio;
4707 	if (!pending_bios->head)
4708 		pending_bios->head = bio;
4709 	if (device->running_pending)
4710 		should_queue = 0;
4711 
4712 	spin_unlock(&device->io_lock);
4713 
4714 	if (should_queue)
4715 		btrfs_queue_worker(&root->fs_info->submit_workers,
4716 				   &device->work);
4717 }
4718 
4719 static int bio_size_ok(struct block_device *bdev, struct bio *bio,
4720 		       sector_t sector)
4721 {
4722 	struct bio_vec *prev;
4723 	struct request_queue *q = bdev_get_queue(bdev);
4724 	unsigned short max_sectors = queue_max_sectors(q);
4725 	struct bvec_merge_data bvm = {
4726 		.bi_bdev = bdev,
4727 		.bi_sector = sector,
4728 		.bi_rw = bio->bi_rw,
4729 	};
4730 
4731 	if (bio->bi_vcnt == 0) {
4732 		WARN_ON(1);
4733 		return 1;
4734 	}
4735 
4736 	prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
4737 	if ((bio->bi_size >> 9) > max_sectors)
4738 		return 0;
4739 
4740 	if (!q->merge_bvec_fn)
4741 		return 1;
4742 
4743 	bvm.bi_size = bio->bi_size - prev->bv_len;
4744 	if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
4745 		return 0;
4746 	return 1;
4747 }
4748 
4749 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
4750 			      struct bio *bio, u64 physical, int dev_nr,
4751 			      int rw, int async)
4752 {
4753 	struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
4754 
4755 	bio->bi_private = bbio;
4756 	bio->bi_private = merge_stripe_index_into_bio_private(
4757 			bio->bi_private, (unsigned int)dev_nr);
4758 	bio->bi_end_io = btrfs_end_bio;
4759 	bio->bi_sector = physical >> 9;
4760 #ifdef DEBUG
4761 	{
4762 		struct rcu_string *name;
4763 
4764 		rcu_read_lock();
4765 		name = rcu_dereference(dev->name);
4766 		pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
4767 			 "(%s id %llu), size=%u\n", rw,
4768 			 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
4769 			 name->str, dev->devid, bio->bi_size);
4770 		rcu_read_unlock();
4771 	}
4772 #endif
4773 	bio->bi_bdev = dev->bdev;
4774 	if (async)
4775 		schedule_bio(root, dev, rw, bio);
4776 	else
4777 		btrfsic_submit_bio(rw, bio);
4778 }
4779 
4780 static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
4781 			      struct bio *first_bio, struct btrfs_device *dev,
4782 			      int dev_nr, int rw, int async)
4783 {
4784 	struct bio_vec *bvec = first_bio->bi_io_vec;
4785 	struct bio *bio;
4786 	int nr_vecs = bio_get_nr_vecs(dev->bdev);
4787 	u64 physical = bbio->stripes[dev_nr].physical;
4788 
4789 again:
4790 	bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
4791 	if (!bio)
4792 		return -ENOMEM;
4793 
4794 	while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
4795 		if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
4796 				 bvec->bv_offset) < bvec->bv_len) {
4797 			u64 len = bio->bi_size;
4798 
4799 			atomic_inc(&bbio->stripes_pending);
4800 			submit_stripe_bio(root, bbio, bio, physical, dev_nr,
4801 					  rw, async);
4802 			physical += len;
4803 			goto again;
4804 		}
4805 		bvec++;
4806 	}
4807 
4808 	submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
4809 	return 0;
4810 }
4811 
4812 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
4813 {
4814 	atomic_inc(&bbio->error);
4815 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
4816 		bio->bi_private = bbio->private;
4817 		bio->bi_end_io = bbio->end_io;
4818 		bio->bi_bdev = (struct block_device *)
4819 			(unsigned long)bbio->mirror_num;
4820 		bio->bi_sector = logical >> 9;
4821 		kfree(bbio);
4822 		bio_endio(bio, -EIO);
4823 	}
4824 }
4825 
4826 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4827 		  int mirror_num, int async_submit)
4828 {
4829 	struct btrfs_device *dev;
4830 	struct bio *first_bio = bio;
4831 	u64 logical = (u64)bio->bi_sector << 9;
4832 	u64 length = 0;
4833 	u64 map_length;
4834 	int ret;
4835 	int dev_nr = 0;
4836 	int total_devs = 1;
4837 	struct btrfs_bio *bbio = NULL;
4838 
4839 	length = bio->bi_size;
4840 	map_length = length;
4841 
4842 	ret = btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
4843 			      mirror_num);
4844 	if (ret)
4845 		return ret;
4846 
4847 	total_devs = bbio->num_stripes;
4848 	if (map_length < length) {
4849 		printk(KERN_CRIT "btrfs: mapping failed logical %llu bio len %llu "
4850 		       "len %llu\n", (unsigned long long)logical,
4851 		       (unsigned long long)length,
4852 		       (unsigned long long)map_length);
4853 		BUG();
4854 	}
4855 
4856 	bbio->orig_bio = first_bio;
4857 	bbio->private = first_bio->bi_private;
4858 	bbio->end_io = first_bio->bi_end_io;
4859 	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
4860 
4861 	while (dev_nr < total_devs) {
4862 		dev = bbio->stripes[dev_nr].dev;
4863 		if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
4864 			bbio_error(bbio, first_bio, logical);
4865 			dev_nr++;
4866 			continue;
4867 		}
4868 
4869 		/*
4870 		 * Check and see if we're ok with this bio based on it's size
4871 		 * and offset with the given device.
4872 		 */
4873 		if (!bio_size_ok(dev->bdev, first_bio,
4874 				 bbio->stripes[dev_nr].physical >> 9)) {
4875 			ret = breakup_stripe_bio(root, bbio, first_bio, dev,
4876 						 dev_nr, rw, async_submit);
4877 			BUG_ON(ret);
4878 			dev_nr++;
4879 			continue;
4880 		}
4881 
4882 		if (dev_nr < total_devs - 1) {
4883 			bio = bio_clone(first_bio, GFP_NOFS);
4884 			BUG_ON(!bio); /* -ENOMEM */
4885 		} else {
4886 			bio = first_bio;
4887 		}
4888 
4889 		submit_stripe_bio(root, bbio, bio,
4890 				  bbio->stripes[dev_nr].physical, dev_nr, rw,
4891 				  async_submit);
4892 		dev_nr++;
4893 	}
4894 	return 0;
4895 }
4896 
4897 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
4898 				       u8 *uuid, u8 *fsid)
4899 {
4900 	struct btrfs_device *device;
4901 	struct btrfs_fs_devices *cur_devices;
4902 
4903 	cur_devices = fs_info->fs_devices;
4904 	while (cur_devices) {
4905 		if (!fsid ||
4906 		    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4907 			device = __find_device(&cur_devices->devices,
4908 					       devid, uuid);
4909 			if (device)
4910 				return device;
4911 		}
4912 		cur_devices = cur_devices->seed;
4913 	}
4914 	return NULL;
4915 }
4916 
4917 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
4918 					    u64 devid, u8 *dev_uuid)
4919 {
4920 	struct btrfs_device *device;
4921 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4922 
4923 	device = kzalloc(sizeof(*device), GFP_NOFS);
4924 	if (!device)
4925 		return NULL;
4926 	list_add(&device->dev_list,
4927 		 &fs_devices->devices);
4928 	device->dev_root = root->fs_info->dev_root;
4929 	device->devid = devid;
4930 	device->work.func = pending_bios_fn;
4931 	device->fs_devices = fs_devices;
4932 	device->missing = 1;
4933 	fs_devices->num_devices++;
4934 	fs_devices->missing_devices++;
4935 	spin_lock_init(&device->io_lock);
4936 	INIT_LIST_HEAD(&device->dev_alloc_list);
4937 	memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
4938 	return device;
4939 }
4940 
4941 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
4942 			  struct extent_buffer *leaf,
4943 			  struct btrfs_chunk *chunk)
4944 {
4945 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4946 	struct map_lookup *map;
4947 	struct extent_map *em;
4948 	u64 logical;
4949 	u64 length;
4950 	u64 devid;
4951 	u8 uuid[BTRFS_UUID_SIZE];
4952 	int num_stripes;
4953 	int ret;
4954 	int i;
4955 
4956 	logical = key->offset;
4957 	length = btrfs_chunk_length(leaf, chunk);
4958 
4959 	read_lock(&map_tree->map_tree.lock);
4960 	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
4961 	read_unlock(&map_tree->map_tree.lock);
4962 
4963 	/* already mapped? */
4964 	if (em && em->start <= logical && em->start + em->len > logical) {
4965 		free_extent_map(em);
4966 		return 0;
4967 	} else if (em) {
4968 		free_extent_map(em);
4969 	}
4970 
4971 	em = alloc_extent_map();
4972 	if (!em)
4973 		return -ENOMEM;
4974 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
4975 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4976 	if (!map) {
4977 		free_extent_map(em);
4978 		return -ENOMEM;
4979 	}
4980 
4981 	em->bdev = (struct block_device *)map;
4982 	em->start = logical;
4983 	em->len = length;
4984 	em->orig_start = 0;
4985 	em->block_start = 0;
4986 	em->block_len = em->len;
4987 
4988 	map->num_stripes = num_stripes;
4989 	map->io_width = btrfs_chunk_io_width(leaf, chunk);
4990 	map->io_align = btrfs_chunk_io_align(leaf, chunk);
4991 	map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
4992 	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
4993 	map->type = btrfs_chunk_type(leaf, chunk);
4994 	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
4995 	for (i = 0; i < num_stripes; i++) {
4996 		map->stripes[i].physical =
4997 			btrfs_stripe_offset_nr(leaf, chunk, i);
4998 		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
4999 		read_extent_buffer(leaf, uuid, (unsigned long)
5000 				   btrfs_stripe_dev_uuid_nr(chunk, i),
5001 				   BTRFS_UUID_SIZE);
5002 		map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
5003 							uuid, NULL);
5004 		if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
5005 			kfree(map);
5006 			free_extent_map(em);
5007 			return -EIO;
5008 		}
5009 		if (!map->stripes[i].dev) {
5010 			map->stripes[i].dev =
5011 				add_missing_dev(root, devid, uuid);
5012 			if (!map->stripes[i].dev) {
5013 				kfree(map);
5014 				free_extent_map(em);
5015 				return -EIO;
5016 			}
5017 		}
5018 		map->stripes[i].dev->in_fs_metadata = 1;
5019 	}
5020 
5021 	write_lock(&map_tree->map_tree.lock);
5022 	ret = add_extent_mapping(&map_tree->map_tree, em);
5023 	write_unlock(&map_tree->map_tree.lock);
5024 	BUG_ON(ret); /* Tree corruption */
5025 	free_extent_map(em);
5026 
5027 	return 0;
5028 }
5029 
5030 static void fill_device_from_item(struct extent_buffer *leaf,
5031 				 struct btrfs_dev_item *dev_item,
5032 				 struct btrfs_device *device)
5033 {
5034 	unsigned long ptr;
5035 
5036 	device->devid = btrfs_device_id(leaf, dev_item);
5037 	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
5038 	device->total_bytes = device->disk_total_bytes;
5039 	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
5040 	device->type = btrfs_device_type(leaf, dev_item);
5041 	device->io_align = btrfs_device_io_align(leaf, dev_item);
5042 	device->io_width = btrfs_device_io_width(leaf, dev_item);
5043 	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
5044 	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
5045 	device->is_tgtdev_for_dev_replace = 0;
5046 
5047 	ptr = (unsigned long)btrfs_device_uuid(dev_item);
5048 	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
5049 }
5050 
5051 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
5052 {
5053 	struct btrfs_fs_devices *fs_devices;
5054 	int ret;
5055 
5056 	BUG_ON(!mutex_is_locked(&uuid_mutex));
5057 
5058 	fs_devices = root->fs_info->fs_devices->seed;
5059 	while (fs_devices) {
5060 		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5061 			ret = 0;
5062 			goto out;
5063 		}
5064 		fs_devices = fs_devices->seed;
5065 	}
5066 
5067 	fs_devices = find_fsid(fsid);
5068 	if (!fs_devices) {
5069 		ret = -ENOENT;
5070 		goto out;
5071 	}
5072 
5073 	fs_devices = clone_fs_devices(fs_devices);
5074 	if (IS_ERR(fs_devices)) {
5075 		ret = PTR_ERR(fs_devices);
5076 		goto out;
5077 	}
5078 
5079 	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
5080 				   root->fs_info->bdev_holder);
5081 	if (ret) {
5082 		free_fs_devices(fs_devices);
5083 		goto out;
5084 	}
5085 
5086 	if (!fs_devices->seeding) {
5087 		__btrfs_close_devices(fs_devices);
5088 		free_fs_devices(fs_devices);
5089 		ret = -EINVAL;
5090 		goto out;
5091 	}
5092 
5093 	fs_devices->seed = root->fs_info->fs_devices->seed;
5094 	root->fs_info->fs_devices->seed = fs_devices;
5095 out:
5096 	return ret;
5097 }
5098 
5099 static int read_one_dev(struct btrfs_root *root,
5100 			struct extent_buffer *leaf,
5101 			struct btrfs_dev_item *dev_item)
5102 {
5103 	struct btrfs_device *device;
5104 	u64 devid;
5105 	int ret;
5106 	u8 fs_uuid[BTRFS_UUID_SIZE];
5107 	u8 dev_uuid[BTRFS_UUID_SIZE];
5108 
5109 	devid = btrfs_device_id(leaf, dev_item);
5110 	read_extent_buffer(leaf, dev_uuid,
5111 			   (unsigned long)btrfs_device_uuid(dev_item),
5112 			   BTRFS_UUID_SIZE);
5113 	read_extent_buffer(leaf, fs_uuid,
5114 			   (unsigned long)btrfs_device_fsid(dev_item),
5115 			   BTRFS_UUID_SIZE);
5116 
5117 	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
5118 		ret = open_seed_devices(root, fs_uuid);
5119 		if (ret && !btrfs_test_opt(root, DEGRADED))
5120 			return ret;
5121 	}
5122 
5123 	device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
5124 	if (!device || !device->bdev) {
5125 		if (!btrfs_test_opt(root, DEGRADED))
5126 			return -EIO;
5127 
5128 		if (!device) {
5129 			printk(KERN_WARNING "warning devid %llu missing\n",
5130 			       (unsigned long long)devid);
5131 			device = add_missing_dev(root, devid, dev_uuid);
5132 			if (!device)
5133 				return -ENOMEM;
5134 		} else if (!device->missing) {
5135 			/*
5136 			 * this happens when a device that was properly setup
5137 			 * in the device info lists suddenly goes bad.
5138 			 * device->bdev is NULL, and so we have to set
5139 			 * device->missing to one here
5140 			 */
5141 			root->fs_info->fs_devices->missing_devices++;
5142 			device->missing = 1;
5143 		}
5144 	}
5145 
5146 	if (device->fs_devices != root->fs_info->fs_devices) {
5147 		BUG_ON(device->writeable);
5148 		if (device->generation !=
5149 		    btrfs_device_generation(leaf, dev_item))
5150 			return -EINVAL;
5151 	}
5152 
5153 	fill_device_from_item(leaf, dev_item, device);
5154 	device->dev_root = root->fs_info->dev_root;
5155 	device->in_fs_metadata = 1;
5156 	if (device->writeable && !device->is_tgtdev_for_dev_replace) {
5157 		device->fs_devices->total_rw_bytes += device->total_bytes;
5158 		spin_lock(&root->fs_info->free_chunk_lock);
5159 		root->fs_info->free_chunk_space += device->total_bytes -
5160 			device->bytes_used;
5161 		spin_unlock(&root->fs_info->free_chunk_lock);
5162 	}
5163 	ret = 0;
5164 	return ret;
5165 }
5166 
5167 int btrfs_read_sys_array(struct btrfs_root *root)
5168 {
5169 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
5170 	struct extent_buffer *sb;
5171 	struct btrfs_disk_key *disk_key;
5172 	struct btrfs_chunk *chunk;
5173 	u8 *ptr;
5174 	unsigned long sb_ptr;
5175 	int ret = 0;
5176 	u32 num_stripes;
5177 	u32 array_size;
5178 	u32 len = 0;
5179 	u32 cur;
5180 	struct btrfs_key key;
5181 
5182 	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
5183 					  BTRFS_SUPER_INFO_SIZE);
5184 	if (!sb)
5185 		return -ENOMEM;
5186 	btrfs_set_buffer_uptodate(sb);
5187 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
5188 	/*
5189 	 * The sb extent buffer is artifical and just used to read the system array.
5190 	 * btrfs_set_buffer_uptodate() call does not properly mark all it's
5191 	 * pages up-to-date when the page is larger: extent does not cover the
5192 	 * whole page and consequently check_page_uptodate does not find all
5193 	 * the page's extents up-to-date (the hole beyond sb),
5194 	 * write_extent_buffer then triggers a WARN_ON.
5195 	 *
5196 	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
5197 	 * but sb spans only this function. Add an explicit SetPageUptodate call
5198 	 * to silence the warning eg. on PowerPC 64.
5199 	 */
5200 	if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
5201 		SetPageUptodate(sb->pages[0]);
5202 
5203 	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
5204 	array_size = btrfs_super_sys_array_size(super_copy);
5205 
5206 	ptr = super_copy->sys_chunk_array;
5207 	sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
5208 	cur = 0;
5209 
5210 	while (cur < array_size) {
5211 		disk_key = (struct btrfs_disk_key *)ptr;
5212 		btrfs_disk_key_to_cpu(&key, disk_key);
5213 
5214 		len = sizeof(*disk_key); ptr += len;
5215 		sb_ptr += len;
5216 		cur += len;
5217 
5218 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
5219 			chunk = (struct btrfs_chunk *)sb_ptr;
5220 			ret = read_one_chunk(root, &key, sb, chunk);
5221 			if (ret)
5222 				break;
5223 			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
5224 			len = btrfs_chunk_item_size(num_stripes);
5225 		} else {
5226 			ret = -EIO;
5227 			break;
5228 		}
5229 		ptr += len;
5230 		sb_ptr += len;
5231 		cur += len;
5232 	}
5233 	free_extent_buffer(sb);
5234 	return ret;
5235 }
5236 
5237 int btrfs_read_chunk_tree(struct btrfs_root *root)
5238 {
5239 	struct btrfs_path *path;
5240 	struct extent_buffer *leaf;
5241 	struct btrfs_key key;
5242 	struct btrfs_key found_key;
5243 	int ret;
5244 	int slot;
5245 
5246 	root = root->fs_info->chunk_root;
5247 
5248 	path = btrfs_alloc_path();
5249 	if (!path)
5250 		return -ENOMEM;
5251 
5252 	mutex_lock(&uuid_mutex);
5253 	lock_chunks(root);
5254 
5255 	/* first we search for all of the device items, and then we
5256 	 * read in all of the chunk items.  This way we can create chunk
5257 	 * mappings that reference all of the devices that are afound
5258 	 */
5259 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
5260 	key.offset = 0;
5261 	key.type = 0;
5262 again:
5263 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5264 	if (ret < 0)
5265 		goto error;
5266 	while (1) {
5267 		leaf = path->nodes[0];
5268 		slot = path->slots[0];
5269 		if (slot >= btrfs_header_nritems(leaf)) {
5270 			ret = btrfs_next_leaf(root, path);
5271 			if (ret == 0)
5272 				continue;
5273 			if (ret < 0)
5274 				goto error;
5275 			break;
5276 		}
5277 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
5278 		if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
5279 			if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
5280 				break;
5281 			if (found_key.type == BTRFS_DEV_ITEM_KEY) {
5282 				struct btrfs_dev_item *dev_item;
5283 				dev_item = btrfs_item_ptr(leaf, slot,
5284 						  struct btrfs_dev_item);
5285 				ret = read_one_dev(root, leaf, dev_item);
5286 				if (ret)
5287 					goto error;
5288 			}
5289 		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
5290 			struct btrfs_chunk *chunk;
5291 			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
5292 			ret = read_one_chunk(root, &found_key, leaf, chunk);
5293 			if (ret)
5294 				goto error;
5295 		}
5296 		path->slots[0]++;
5297 	}
5298 	if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
5299 		key.objectid = 0;
5300 		btrfs_release_path(path);
5301 		goto again;
5302 	}
5303 	ret = 0;
5304 error:
5305 	unlock_chunks(root);
5306 	mutex_unlock(&uuid_mutex);
5307 
5308 	btrfs_free_path(path);
5309 	return ret;
5310 }
5311 
5312 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
5313 {
5314 	int i;
5315 
5316 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5317 		btrfs_dev_stat_reset(dev, i);
5318 }
5319 
5320 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
5321 {
5322 	struct btrfs_key key;
5323 	struct btrfs_key found_key;
5324 	struct btrfs_root *dev_root = fs_info->dev_root;
5325 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5326 	struct extent_buffer *eb;
5327 	int slot;
5328 	int ret = 0;
5329 	struct btrfs_device *device;
5330 	struct btrfs_path *path = NULL;
5331 	int i;
5332 
5333 	path = btrfs_alloc_path();
5334 	if (!path) {
5335 		ret = -ENOMEM;
5336 		goto out;
5337 	}
5338 
5339 	mutex_lock(&fs_devices->device_list_mutex);
5340 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
5341 		int item_size;
5342 		struct btrfs_dev_stats_item *ptr;
5343 
5344 		key.objectid = 0;
5345 		key.type = BTRFS_DEV_STATS_KEY;
5346 		key.offset = device->devid;
5347 		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
5348 		if (ret) {
5349 			__btrfs_reset_dev_stats(device);
5350 			device->dev_stats_valid = 1;
5351 			btrfs_release_path(path);
5352 			continue;
5353 		}
5354 		slot = path->slots[0];
5355 		eb = path->nodes[0];
5356 		btrfs_item_key_to_cpu(eb, &found_key, slot);
5357 		item_size = btrfs_item_size_nr(eb, slot);
5358 
5359 		ptr = btrfs_item_ptr(eb, slot,
5360 				     struct btrfs_dev_stats_item);
5361 
5362 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5363 			if (item_size >= (1 + i) * sizeof(__le64))
5364 				btrfs_dev_stat_set(device, i,
5365 					btrfs_dev_stats_value(eb, ptr, i));
5366 			else
5367 				btrfs_dev_stat_reset(device, i);
5368 		}
5369 
5370 		device->dev_stats_valid = 1;
5371 		btrfs_dev_stat_print_on_load(device);
5372 		btrfs_release_path(path);
5373 	}
5374 	mutex_unlock(&fs_devices->device_list_mutex);
5375 
5376 out:
5377 	btrfs_free_path(path);
5378 	return ret < 0 ? ret : 0;
5379 }
5380 
5381 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
5382 				struct btrfs_root *dev_root,
5383 				struct btrfs_device *device)
5384 {
5385 	struct btrfs_path *path;
5386 	struct btrfs_key key;
5387 	struct extent_buffer *eb;
5388 	struct btrfs_dev_stats_item *ptr;
5389 	int ret;
5390 	int i;
5391 
5392 	key.objectid = 0;
5393 	key.type = BTRFS_DEV_STATS_KEY;
5394 	key.offset = device->devid;
5395 
5396 	path = btrfs_alloc_path();
5397 	BUG_ON(!path);
5398 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
5399 	if (ret < 0) {
5400 		printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
5401 			      ret, rcu_str_deref(device->name));
5402 		goto out;
5403 	}
5404 
5405 	if (ret == 0 &&
5406 	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
5407 		/* need to delete old one and insert a new one */
5408 		ret = btrfs_del_item(trans, dev_root, path);
5409 		if (ret != 0) {
5410 			printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
5411 				      rcu_str_deref(device->name), ret);
5412 			goto out;
5413 		}
5414 		ret = 1;
5415 	}
5416 
5417 	if (ret == 1) {
5418 		/* need to insert a new item */
5419 		btrfs_release_path(path);
5420 		ret = btrfs_insert_empty_item(trans, dev_root, path,
5421 					      &key, sizeof(*ptr));
5422 		if (ret < 0) {
5423 			printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
5424 				      rcu_str_deref(device->name), ret);
5425 			goto out;
5426 		}
5427 	}
5428 
5429 	eb = path->nodes[0];
5430 	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
5431 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5432 		btrfs_set_dev_stats_value(eb, ptr, i,
5433 					  btrfs_dev_stat_read(device, i));
5434 	btrfs_mark_buffer_dirty(eb);
5435 
5436 out:
5437 	btrfs_free_path(path);
5438 	return ret;
5439 }
5440 
5441 /*
5442  * called from commit_transaction. Writes all changed device stats to disk.
5443  */
5444 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
5445 			struct btrfs_fs_info *fs_info)
5446 {
5447 	struct btrfs_root *dev_root = fs_info->dev_root;
5448 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5449 	struct btrfs_device *device;
5450 	int ret = 0;
5451 
5452 	mutex_lock(&fs_devices->device_list_mutex);
5453 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
5454 		if (!device->dev_stats_valid || !device->dev_stats_dirty)
5455 			continue;
5456 
5457 		ret = update_dev_stat_item(trans, dev_root, device);
5458 		if (!ret)
5459 			device->dev_stats_dirty = 0;
5460 	}
5461 	mutex_unlock(&fs_devices->device_list_mutex);
5462 
5463 	return ret;
5464 }
5465 
5466 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
5467 {
5468 	btrfs_dev_stat_inc(dev, index);
5469 	btrfs_dev_stat_print_on_error(dev);
5470 }
5471 
5472 void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
5473 {
5474 	if (!dev->dev_stats_valid)
5475 		return;
5476 	printk_ratelimited_in_rcu(KERN_ERR
5477 			   "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5478 			   rcu_str_deref(dev->name),
5479 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5480 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5481 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5482 			   btrfs_dev_stat_read(dev,
5483 					       BTRFS_DEV_STAT_CORRUPTION_ERRS),
5484 			   btrfs_dev_stat_read(dev,
5485 					       BTRFS_DEV_STAT_GENERATION_ERRS));
5486 }
5487 
5488 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
5489 {
5490 	int i;
5491 
5492 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5493 		if (btrfs_dev_stat_read(dev, i) != 0)
5494 			break;
5495 	if (i == BTRFS_DEV_STAT_VALUES_MAX)
5496 		return; /* all values == 0, suppress message */
5497 
5498 	printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5499 	       rcu_str_deref(dev->name),
5500 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5501 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5502 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5503 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
5504 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
5505 }
5506 
5507 int btrfs_get_dev_stats(struct btrfs_root *root,
5508 			struct btrfs_ioctl_get_dev_stats *stats)
5509 {
5510 	struct btrfs_device *dev;
5511 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5512 	int i;
5513 
5514 	mutex_lock(&fs_devices->device_list_mutex);
5515 	dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
5516 	mutex_unlock(&fs_devices->device_list_mutex);
5517 
5518 	if (!dev) {
5519 		printk(KERN_WARNING
5520 		       "btrfs: get dev_stats failed, device not found\n");
5521 		return -ENODEV;
5522 	} else if (!dev->dev_stats_valid) {
5523 		printk(KERN_WARNING
5524 		       "btrfs: get dev_stats failed, not yet valid\n");
5525 		return -ENODEV;
5526 	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
5527 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5528 			if (stats->nr_items > i)
5529 				stats->values[i] =
5530 					btrfs_dev_stat_read_and_reset(dev, i);
5531 			else
5532 				btrfs_dev_stat_reset(dev, i);
5533 		}
5534 	} else {
5535 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5536 			if (stats->nr_items > i)
5537 				stats->values[i] = btrfs_dev_stat_read(dev, i);
5538 	}
5539 	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
5540 		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
5541 	return 0;
5542 }
5543 
5544 int btrfs_scratch_superblock(struct btrfs_device *device)
5545 {
5546 	struct buffer_head *bh;
5547 	struct btrfs_super_block *disk_super;
5548 
5549 	bh = btrfs_read_dev_super(device->bdev);
5550 	if (!bh)
5551 		return -EINVAL;
5552 	disk_super = (struct btrfs_super_block *)bh->b_data;
5553 
5554 	memset(&disk_super->magic, 0, sizeof(disk_super->magic));
5555 	set_buffer_dirty(bh);
5556 	sync_dirty_buffer(bh);
5557 	brelse(bh);
5558 
5559 	return 0;
5560 }
5561