xref: /openbmc/linux/fs/btrfs/volumes.c (revision 97da55fc)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include <linux/raid/pq.h>
29 #include <asm/div64.h>
30 #include "compat.h"
31 #include "ctree.h"
32 #include "extent_map.h"
33 #include "disk-io.h"
34 #include "transaction.h"
35 #include "print-tree.h"
36 #include "volumes.h"
37 #include "raid56.h"
38 #include "async-thread.h"
39 #include "check-integrity.h"
40 #include "rcu-string.h"
41 #include "math.h"
42 #include "dev-replace.h"
43 
44 static int init_first_rw_device(struct btrfs_trans_handle *trans,
45 				struct btrfs_root *root,
46 				struct btrfs_device *device);
47 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
48 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
49 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
50 
51 static DEFINE_MUTEX(uuid_mutex);
52 static LIST_HEAD(fs_uuids);
53 
54 static void lock_chunks(struct btrfs_root *root)
55 {
56 	mutex_lock(&root->fs_info->chunk_mutex);
57 }
58 
59 static void unlock_chunks(struct btrfs_root *root)
60 {
61 	mutex_unlock(&root->fs_info->chunk_mutex);
62 }
63 
64 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
65 {
66 	struct btrfs_device *device;
67 	WARN_ON(fs_devices->opened);
68 	while (!list_empty(&fs_devices->devices)) {
69 		device = list_entry(fs_devices->devices.next,
70 				    struct btrfs_device, dev_list);
71 		list_del(&device->dev_list);
72 		rcu_string_free(device->name);
73 		kfree(device);
74 	}
75 	kfree(fs_devices);
76 }
77 
78 static void btrfs_kobject_uevent(struct block_device *bdev,
79 				 enum kobject_action action)
80 {
81 	int ret;
82 
83 	ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
84 	if (ret)
85 		pr_warn("Sending event '%d' to kobject: '%s' (%p): failed\n",
86 			action,
87 			kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
88 			&disk_to_dev(bdev->bd_disk)->kobj);
89 }
90 
91 void btrfs_cleanup_fs_uuids(void)
92 {
93 	struct btrfs_fs_devices *fs_devices;
94 
95 	while (!list_empty(&fs_uuids)) {
96 		fs_devices = list_entry(fs_uuids.next,
97 					struct btrfs_fs_devices, list);
98 		list_del(&fs_devices->list);
99 		free_fs_devices(fs_devices);
100 	}
101 }
102 
103 static noinline struct btrfs_device *__find_device(struct list_head *head,
104 						   u64 devid, u8 *uuid)
105 {
106 	struct btrfs_device *dev;
107 
108 	list_for_each_entry(dev, head, dev_list) {
109 		if (dev->devid == devid &&
110 		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
111 			return dev;
112 		}
113 	}
114 	return NULL;
115 }
116 
117 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
118 {
119 	struct btrfs_fs_devices *fs_devices;
120 
121 	list_for_each_entry(fs_devices, &fs_uuids, list) {
122 		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
123 			return fs_devices;
124 	}
125 	return NULL;
126 }
127 
128 static int
129 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
130 		      int flush, struct block_device **bdev,
131 		      struct buffer_head **bh)
132 {
133 	int ret;
134 
135 	*bdev = blkdev_get_by_path(device_path, flags, holder);
136 
137 	if (IS_ERR(*bdev)) {
138 		ret = PTR_ERR(*bdev);
139 		printk(KERN_INFO "btrfs: open %s failed\n", device_path);
140 		goto error;
141 	}
142 
143 	if (flush)
144 		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
145 	ret = set_blocksize(*bdev, 4096);
146 	if (ret) {
147 		blkdev_put(*bdev, flags);
148 		goto error;
149 	}
150 	invalidate_bdev(*bdev);
151 	*bh = btrfs_read_dev_super(*bdev);
152 	if (!*bh) {
153 		ret = -EINVAL;
154 		blkdev_put(*bdev, flags);
155 		goto error;
156 	}
157 
158 	return 0;
159 
160 error:
161 	*bdev = NULL;
162 	*bh = NULL;
163 	return ret;
164 }
165 
166 static void requeue_list(struct btrfs_pending_bios *pending_bios,
167 			struct bio *head, struct bio *tail)
168 {
169 
170 	struct bio *old_head;
171 
172 	old_head = pending_bios->head;
173 	pending_bios->head = head;
174 	if (pending_bios->tail)
175 		tail->bi_next = old_head;
176 	else
177 		pending_bios->tail = tail;
178 }
179 
180 /*
181  * we try to collect pending bios for a device so we don't get a large
182  * number of procs sending bios down to the same device.  This greatly
183  * improves the schedulers ability to collect and merge the bios.
184  *
185  * But, it also turns into a long list of bios to process and that is sure
186  * to eventually make the worker thread block.  The solution here is to
187  * make some progress and then put this work struct back at the end of
188  * the list if the block device is congested.  This way, multiple devices
189  * can make progress from a single worker thread.
190  */
191 static noinline void run_scheduled_bios(struct btrfs_device *device)
192 {
193 	struct bio *pending;
194 	struct backing_dev_info *bdi;
195 	struct btrfs_fs_info *fs_info;
196 	struct btrfs_pending_bios *pending_bios;
197 	struct bio *tail;
198 	struct bio *cur;
199 	int again = 0;
200 	unsigned long num_run;
201 	unsigned long batch_run = 0;
202 	unsigned long limit;
203 	unsigned long last_waited = 0;
204 	int force_reg = 0;
205 	int sync_pending = 0;
206 	struct blk_plug plug;
207 
208 	/*
209 	 * this function runs all the bios we've collected for
210 	 * a particular device.  We don't want to wander off to
211 	 * another device without first sending all of these down.
212 	 * So, setup a plug here and finish it off before we return
213 	 */
214 	blk_start_plug(&plug);
215 
216 	bdi = blk_get_backing_dev_info(device->bdev);
217 	fs_info = device->dev_root->fs_info;
218 	limit = btrfs_async_submit_limit(fs_info);
219 	limit = limit * 2 / 3;
220 
221 loop:
222 	spin_lock(&device->io_lock);
223 
224 loop_lock:
225 	num_run = 0;
226 
227 	/* take all the bios off the list at once and process them
228 	 * later on (without the lock held).  But, remember the
229 	 * tail and other pointers so the bios can be properly reinserted
230 	 * into the list if we hit congestion
231 	 */
232 	if (!force_reg && device->pending_sync_bios.head) {
233 		pending_bios = &device->pending_sync_bios;
234 		force_reg = 1;
235 	} else {
236 		pending_bios = &device->pending_bios;
237 		force_reg = 0;
238 	}
239 
240 	pending = pending_bios->head;
241 	tail = pending_bios->tail;
242 	WARN_ON(pending && !tail);
243 
244 	/*
245 	 * if pending was null this time around, no bios need processing
246 	 * at all and we can stop.  Otherwise it'll loop back up again
247 	 * and do an additional check so no bios are missed.
248 	 *
249 	 * device->running_pending is used to synchronize with the
250 	 * schedule_bio code.
251 	 */
252 	if (device->pending_sync_bios.head == NULL &&
253 	    device->pending_bios.head == NULL) {
254 		again = 0;
255 		device->running_pending = 0;
256 	} else {
257 		again = 1;
258 		device->running_pending = 1;
259 	}
260 
261 	pending_bios->head = NULL;
262 	pending_bios->tail = NULL;
263 
264 	spin_unlock(&device->io_lock);
265 
266 	while (pending) {
267 
268 		rmb();
269 		/* we want to work on both lists, but do more bios on the
270 		 * sync list than the regular list
271 		 */
272 		if ((num_run > 32 &&
273 		    pending_bios != &device->pending_sync_bios &&
274 		    device->pending_sync_bios.head) ||
275 		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
276 		    device->pending_bios.head)) {
277 			spin_lock(&device->io_lock);
278 			requeue_list(pending_bios, pending, tail);
279 			goto loop_lock;
280 		}
281 
282 		cur = pending;
283 		pending = pending->bi_next;
284 		cur->bi_next = NULL;
285 
286 		if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
287 		    waitqueue_active(&fs_info->async_submit_wait))
288 			wake_up(&fs_info->async_submit_wait);
289 
290 		BUG_ON(atomic_read(&cur->bi_cnt) == 0);
291 
292 		/*
293 		 * if we're doing the sync list, record that our
294 		 * plug has some sync requests on it
295 		 *
296 		 * If we're doing the regular list and there are
297 		 * sync requests sitting around, unplug before
298 		 * we add more
299 		 */
300 		if (pending_bios == &device->pending_sync_bios) {
301 			sync_pending = 1;
302 		} else if (sync_pending) {
303 			blk_finish_plug(&plug);
304 			blk_start_plug(&plug);
305 			sync_pending = 0;
306 		}
307 
308 		btrfsic_submit_bio(cur->bi_rw, cur);
309 		num_run++;
310 		batch_run++;
311 		if (need_resched())
312 			cond_resched();
313 
314 		/*
315 		 * we made progress, there is more work to do and the bdi
316 		 * is now congested.  Back off and let other work structs
317 		 * run instead
318 		 */
319 		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
320 		    fs_info->fs_devices->open_devices > 1) {
321 			struct io_context *ioc;
322 
323 			ioc = current->io_context;
324 
325 			/*
326 			 * the main goal here is that we don't want to
327 			 * block if we're going to be able to submit
328 			 * more requests without blocking.
329 			 *
330 			 * This code does two great things, it pokes into
331 			 * the elevator code from a filesystem _and_
332 			 * it makes assumptions about how batching works.
333 			 */
334 			if (ioc && ioc->nr_batch_requests > 0 &&
335 			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
336 			    (last_waited == 0 ||
337 			     ioc->last_waited == last_waited)) {
338 				/*
339 				 * we want to go through our batch of
340 				 * requests and stop.  So, we copy out
341 				 * the ioc->last_waited time and test
342 				 * against it before looping
343 				 */
344 				last_waited = ioc->last_waited;
345 				if (need_resched())
346 					cond_resched();
347 				continue;
348 			}
349 			spin_lock(&device->io_lock);
350 			requeue_list(pending_bios, pending, tail);
351 			device->running_pending = 1;
352 
353 			spin_unlock(&device->io_lock);
354 			btrfs_requeue_work(&device->work);
355 			goto done;
356 		}
357 		/* unplug every 64 requests just for good measure */
358 		if (batch_run % 64 == 0) {
359 			blk_finish_plug(&plug);
360 			blk_start_plug(&plug);
361 			sync_pending = 0;
362 		}
363 	}
364 
365 	cond_resched();
366 	if (again)
367 		goto loop;
368 
369 	spin_lock(&device->io_lock);
370 	if (device->pending_bios.head || device->pending_sync_bios.head)
371 		goto loop_lock;
372 	spin_unlock(&device->io_lock);
373 
374 done:
375 	blk_finish_plug(&plug);
376 }
377 
378 static void pending_bios_fn(struct btrfs_work *work)
379 {
380 	struct btrfs_device *device;
381 
382 	device = container_of(work, struct btrfs_device, work);
383 	run_scheduled_bios(device);
384 }
385 
386 static noinline int device_list_add(const char *path,
387 			   struct btrfs_super_block *disk_super,
388 			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
389 {
390 	struct btrfs_device *device;
391 	struct btrfs_fs_devices *fs_devices;
392 	struct rcu_string *name;
393 	u64 found_transid = btrfs_super_generation(disk_super);
394 
395 	fs_devices = find_fsid(disk_super->fsid);
396 	if (!fs_devices) {
397 		fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
398 		if (!fs_devices)
399 			return -ENOMEM;
400 		INIT_LIST_HEAD(&fs_devices->devices);
401 		INIT_LIST_HEAD(&fs_devices->alloc_list);
402 		list_add(&fs_devices->list, &fs_uuids);
403 		memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
404 		fs_devices->latest_devid = devid;
405 		fs_devices->latest_trans = found_transid;
406 		mutex_init(&fs_devices->device_list_mutex);
407 		device = NULL;
408 	} else {
409 		device = __find_device(&fs_devices->devices, devid,
410 				       disk_super->dev_item.uuid);
411 	}
412 	if (!device) {
413 		if (fs_devices->opened)
414 			return -EBUSY;
415 
416 		device = kzalloc(sizeof(*device), GFP_NOFS);
417 		if (!device) {
418 			/* we can safely leave the fs_devices entry around */
419 			return -ENOMEM;
420 		}
421 		device->devid = devid;
422 		device->dev_stats_valid = 0;
423 		device->work.func = pending_bios_fn;
424 		memcpy(device->uuid, disk_super->dev_item.uuid,
425 		       BTRFS_UUID_SIZE);
426 		spin_lock_init(&device->io_lock);
427 
428 		name = rcu_string_strdup(path, GFP_NOFS);
429 		if (!name) {
430 			kfree(device);
431 			return -ENOMEM;
432 		}
433 		rcu_assign_pointer(device->name, name);
434 		INIT_LIST_HEAD(&device->dev_alloc_list);
435 
436 		/* init readahead state */
437 		spin_lock_init(&device->reada_lock);
438 		device->reada_curr_zone = NULL;
439 		atomic_set(&device->reada_in_flight, 0);
440 		device->reada_next = 0;
441 		INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
442 		INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
443 
444 		mutex_lock(&fs_devices->device_list_mutex);
445 		list_add_rcu(&device->dev_list, &fs_devices->devices);
446 		mutex_unlock(&fs_devices->device_list_mutex);
447 
448 		device->fs_devices = fs_devices;
449 		fs_devices->num_devices++;
450 	} else if (!device->name || strcmp(device->name->str, path)) {
451 		name = rcu_string_strdup(path, GFP_NOFS);
452 		if (!name)
453 			return -ENOMEM;
454 		rcu_string_free(device->name);
455 		rcu_assign_pointer(device->name, name);
456 		if (device->missing) {
457 			fs_devices->missing_devices--;
458 			device->missing = 0;
459 		}
460 	}
461 
462 	if (found_transid > fs_devices->latest_trans) {
463 		fs_devices->latest_devid = devid;
464 		fs_devices->latest_trans = found_transid;
465 	}
466 	*fs_devices_ret = fs_devices;
467 	return 0;
468 }
469 
470 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
471 {
472 	struct btrfs_fs_devices *fs_devices;
473 	struct btrfs_device *device;
474 	struct btrfs_device *orig_dev;
475 
476 	fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
477 	if (!fs_devices)
478 		return ERR_PTR(-ENOMEM);
479 
480 	INIT_LIST_HEAD(&fs_devices->devices);
481 	INIT_LIST_HEAD(&fs_devices->alloc_list);
482 	INIT_LIST_HEAD(&fs_devices->list);
483 	mutex_init(&fs_devices->device_list_mutex);
484 	fs_devices->latest_devid = orig->latest_devid;
485 	fs_devices->latest_trans = orig->latest_trans;
486 	fs_devices->total_devices = orig->total_devices;
487 	memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
488 
489 	/* We have held the volume lock, it is safe to get the devices. */
490 	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
491 		struct rcu_string *name;
492 
493 		device = kzalloc(sizeof(*device), GFP_NOFS);
494 		if (!device)
495 			goto error;
496 
497 		/*
498 		 * This is ok to do without rcu read locked because we hold the
499 		 * uuid mutex so nothing we touch in here is going to disappear.
500 		 */
501 		name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
502 		if (!name) {
503 			kfree(device);
504 			goto error;
505 		}
506 		rcu_assign_pointer(device->name, name);
507 
508 		device->devid = orig_dev->devid;
509 		device->work.func = pending_bios_fn;
510 		memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
511 		spin_lock_init(&device->io_lock);
512 		INIT_LIST_HEAD(&device->dev_list);
513 		INIT_LIST_HEAD(&device->dev_alloc_list);
514 
515 		list_add(&device->dev_list, &fs_devices->devices);
516 		device->fs_devices = fs_devices;
517 		fs_devices->num_devices++;
518 	}
519 	return fs_devices;
520 error:
521 	free_fs_devices(fs_devices);
522 	return ERR_PTR(-ENOMEM);
523 }
524 
525 void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
526 			       struct btrfs_fs_devices *fs_devices, int step)
527 {
528 	struct btrfs_device *device, *next;
529 
530 	struct block_device *latest_bdev = NULL;
531 	u64 latest_devid = 0;
532 	u64 latest_transid = 0;
533 
534 	mutex_lock(&uuid_mutex);
535 again:
536 	/* This is the initialized path, it is safe to release the devices. */
537 	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
538 		if (device->in_fs_metadata) {
539 			if (!device->is_tgtdev_for_dev_replace &&
540 			    (!latest_transid ||
541 			     device->generation > latest_transid)) {
542 				latest_devid = device->devid;
543 				latest_transid = device->generation;
544 				latest_bdev = device->bdev;
545 			}
546 			continue;
547 		}
548 
549 		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
550 			/*
551 			 * In the first step, keep the device which has
552 			 * the correct fsid and the devid that is used
553 			 * for the dev_replace procedure.
554 			 * In the second step, the dev_replace state is
555 			 * read from the device tree and it is known
556 			 * whether the procedure is really active or
557 			 * not, which means whether this device is
558 			 * used or whether it should be removed.
559 			 */
560 			if (step == 0 || device->is_tgtdev_for_dev_replace) {
561 				continue;
562 			}
563 		}
564 		if (device->bdev) {
565 			blkdev_put(device->bdev, device->mode);
566 			device->bdev = NULL;
567 			fs_devices->open_devices--;
568 		}
569 		if (device->writeable) {
570 			list_del_init(&device->dev_alloc_list);
571 			device->writeable = 0;
572 			if (!device->is_tgtdev_for_dev_replace)
573 				fs_devices->rw_devices--;
574 		}
575 		list_del_init(&device->dev_list);
576 		fs_devices->num_devices--;
577 		rcu_string_free(device->name);
578 		kfree(device);
579 	}
580 
581 	if (fs_devices->seed) {
582 		fs_devices = fs_devices->seed;
583 		goto again;
584 	}
585 
586 	fs_devices->latest_bdev = latest_bdev;
587 	fs_devices->latest_devid = latest_devid;
588 	fs_devices->latest_trans = latest_transid;
589 
590 	mutex_unlock(&uuid_mutex);
591 }
592 
593 static void __free_device(struct work_struct *work)
594 {
595 	struct btrfs_device *device;
596 
597 	device = container_of(work, struct btrfs_device, rcu_work);
598 
599 	if (device->bdev)
600 		blkdev_put(device->bdev, device->mode);
601 
602 	rcu_string_free(device->name);
603 	kfree(device);
604 }
605 
606 static void free_device(struct rcu_head *head)
607 {
608 	struct btrfs_device *device;
609 
610 	device = container_of(head, struct btrfs_device, rcu);
611 
612 	INIT_WORK(&device->rcu_work, __free_device);
613 	schedule_work(&device->rcu_work);
614 }
615 
616 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
617 {
618 	struct btrfs_device *device;
619 
620 	if (--fs_devices->opened > 0)
621 		return 0;
622 
623 	mutex_lock(&fs_devices->device_list_mutex);
624 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
625 		struct btrfs_device *new_device;
626 		struct rcu_string *name;
627 
628 		if (device->bdev)
629 			fs_devices->open_devices--;
630 
631 		if (device->writeable && !device->is_tgtdev_for_dev_replace) {
632 			list_del_init(&device->dev_alloc_list);
633 			fs_devices->rw_devices--;
634 		}
635 
636 		if (device->can_discard)
637 			fs_devices->num_can_discard--;
638 
639 		new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
640 		BUG_ON(!new_device); /* -ENOMEM */
641 		memcpy(new_device, device, sizeof(*new_device));
642 
643 		/* Safe because we are under uuid_mutex */
644 		if (device->name) {
645 			name = rcu_string_strdup(device->name->str, GFP_NOFS);
646 			BUG_ON(device->name && !name); /* -ENOMEM */
647 			rcu_assign_pointer(new_device->name, name);
648 		}
649 		new_device->bdev = NULL;
650 		new_device->writeable = 0;
651 		new_device->in_fs_metadata = 0;
652 		new_device->can_discard = 0;
653 		spin_lock_init(&new_device->io_lock);
654 		list_replace_rcu(&device->dev_list, &new_device->dev_list);
655 
656 		call_rcu(&device->rcu, free_device);
657 	}
658 	mutex_unlock(&fs_devices->device_list_mutex);
659 
660 	WARN_ON(fs_devices->open_devices);
661 	WARN_ON(fs_devices->rw_devices);
662 	fs_devices->opened = 0;
663 	fs_devices->seeding = 0;
664 
665 	return 0;
666 }
667 
668 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
669 {
670 	struct btrfs_fs_devices *seed_devices = NULL;
671 	int ret;
672 
673 	mutex_lock(&uuid_mutex);
674 	ret = __btrfs_close_devices(fs_devices);
675 	if (!fs_devices->opened) {
676 		seed_devices = fs_devices->seed;
677 		fs_devices->seed = NULL;
678 	}
679 	mutex_unlock(&uuid_mutex);
680 
681 	while (seed_devices) {
682 		fs_devices = seed_devices;
683 		seed_devices = fs_devices->seed;
684 		__btrfs_close_devices(fs_devices);
685 		free_fs_devices(fs_devices);
686 	}
687 	return ret;
688 }
689 
690 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
691 				fmode_t flags, void *holder)
692 {
693 	struct request_queue *q;
694 	struct block_device *bdev;
695 	struct list_head *head = &fs_devices->devices;
696 	struct btrfs_device *device;
697 	struct block_device *latest_bdev = NULL;
698 	struct buffer_head *bh;
699 	struct btrfs_super_block *disk_super;
700 	u64 latest_devid = 0;
701 	u64 latest_transid = 0;
702 	u64 devid;
703 	int seeding = 1;
704 	int ret = 0;
705 
706 	flags |= FMODE_EXCL;
707 
708 	list_for_each_entry(device, head, dev_list) {
709 		if (device->bdev)
710 			continue;
711 		if (!device->name)
712 			continue;
713 
714 		ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
715 					    &bdev, &bh);
716 		if (ret)
717 			continue;
718 
719 		disk_super = (struct btrfs_super_block *)bh->b_data;
720 		devid = btrfs_stack_device_id(&disk_super->dev_item);
721 		if (devid != device->devid)
722 			goto error_brelse;
723 
724 		if (memcmp(device->uuid, disk_super->dev_item.uuid,
725 			   BTRFS_UUID_SIZE))
726 			goto error_brelse;
727 
728 		device->generation = btrfs_super_generation(disk_super);
729 		if (!latest_transid || device->generation > latest_transid) {
730 			latest_devid = devid;
731 			latest_transid = device->generation;
732 			latest_bdev = bdev;
733 		}
734 
735 		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
736 			device->writeable = 0;
737 		} else {
738 			device->writeable = !bdev_read_only(bdev);
739 			seeding = 0;
740 		}
741 
742 		q = bdev_get_queue(bdev);
743 		if (blk_queue_discard(q)) {
744 			device->can_discard = 1;
745 			fs_devices->num_can_discard++;
746 		}
747 
748 		device->bdev = bdev;
749 		device->in_fs_metadata = 0;
750 		device->mode = flags;
751 
752 		if (!blk_queue_nonrot(bdev_get_queue(bdev)))
753 			fs_devices->rotating = 1;
754 
755 		fs_devices->open_devices++;
756 		if (device->writeable && !device->is_tgtdev_for_dev_replace) {
757 			fs_devices->rw_devices++;
758 			list_add(&device->dev_alloc_list,
759 				 &fs_devices->alloc_list);
760 		}
761 		brelse(bh);
762 		continue;
763 
764 error_brelse:
765 		brelse(bh);
766 		blkdev_put(bdev, flags);
767 		continue;
768 	}
769 	if (fs_devices->open_devices == 0) {
770 		ret = -EINVAL;
771 		goto out;
772 	}
773 	fs_devices->seeding = seeding;
774 	fs_devices->opened = 1;
775 	fs_devices->latest_bdev = latest_bdev;
776 	fs_devices->latest_devid = latest_devid;
777 	fs_devices->latest_trans = latest_transid;
778 	fs_devices->total_rw_bytes = 0;
779 out:
780 	return ret;
781 }
782 
783 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
784 		       fmode_t flags, void *holder)
785 {
786 	int ret;
787 
788 	mutex_lock(&uuid_mutex);
789 	if (fs_devices->opened) {
790 		fs_devices->opened++;
791 		ret = 0;
792 	} else {
793 		ret = __btrfs_open_devices(fs_devices, flags, holder);
794 	}
795 	mutex_unlock(&uuid_mutex);
796 	return ret;
797 }
798 
799 /*
800  * Look for a btrfs signature on a device. This may be called out of the mount path
801  * and we are not allowed to call set_blocksize during the scan. The superblock
802  * is read via pagecache
803  */
804 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
805 			  struct btrfs_fs_devices **fs_devices_ret)
806 {
807 	struct btrfs_super_block *disk_super;
808 	struct block_device *bdev;
809 	struct page *page;
810 	void *p;
811 	int ret = -EINVAL;
812 	u64 devid;
813 	u64 transid;
814 	u64 total_devices;
815 	u64 bytenr;
816 	pgoff_t index;
817 
818 	/*
819 	 * we would like to check all the supers, but that would make
820 	 * a btrfs mount succeed after a mkfs from a different FS.
821 	 * So, we need to add a special mount option to scan for
822 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
823 	 */
824 	bytenr = btrfs_sb_offset(0);
825 	flags |= FMODE_EXCL;
826 	mutex_lock(&uuid_mutex);
827 
828 	bdev = blkdev_get_by_path(path, flags, holder);
829 
830 	if (IS_ERR(bdev)) {
831 		ret = PTR_ERR(bdev);
832 		goto error;
833 	}
834 
835 	/* make sure our super fits in the device */
836 	if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
837 		goto error_bdev_put;
838 
839 	/* make sure our super fits in the page */
840 	if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
841 		goto error_bdev_put;
842 
843 	/* make sure our super doesn't straddle pages on disk */
844 	index = bytenr >> PAGE_CACHE_SHIFT;
845 	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
846 		goto error_bdev_put;
847 
848 	/* pull in the page with our super */
849 	page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
850 				   index, GFP_NOFS);
851 
852 	if (IS_ERR_OR_NULL(page))
853 		goto error_bdev_put;
854 
855 	p = kmap(page);
856 
857 	/* align our pointer to the offset of the super block */
858 	disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
859 
860 	if (btrfs_super_bytenr(disk_super) != bytenr ||
861 	    disk_super->magic != cpu_to_le64(BTRFS_MAGIC))
862 		goto error_unmap;
863 
864 	devid = btrfs_stack_device_id(&disk_super->dev_item);
865 	transid = btrfs_super_generation(disk_super);
866 	total_devices = btrfs_super_num_devices(disk_super);
867 
868 	if (disk_super->label[0]) {
869 		if (disk_super->label[BTRFS_LABEL_SIZE - 1])
870 			disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
871 		printk(KERN_INFO "device label %s ", disk_super->label);
872 	} else {
873 		printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
874 	}
875 
876 	printk(KERN_CONT "devid %llu transid %llu %s\n",
877 	       (unsigned long long)devid, (unsigned long long)transid, path);
878 
879 	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
880 	if (!ret && fs_devices_ret)
881 		(*fs_devices_ret)->total_devices = total_devices;
882 
883 error_unmap:
884 	kunmap(page);
885 	page_cache_release(page);
886 
887 error_bdev_put:
888 	blkdev_put(bdev, flags);
889 error:
890 	mutex_unlock(&uuid_mutex);
891 	return ret;
892 }
893 
894 /* helper to account the used device space in the range */
895 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
896 				   u64 end, u64 *length)
897 {
898 	struct btrfs_key key;
899 	struct btrfs_root *root = device->dev_root;
900 	struct btrfs_dev_extent *dev_extent;
901 	struct btrfs_path *path;
902 	u64 extent_end;
903 	int ret;
904 	int slot;
905 	struct extent_buffer *l;
906 
907 	*length = 0;
908 
909 	if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
910 		return 0;
911 
912 	path = btrfs_alloc_path();
913 	if (!path)
914 		return -ENOMEM;
915 	path->reada = 2;
916 
917 	key.objectid = device->devid;
918 	key.offset = start;
919 	key.type = BTRFS_DEV_EXTENT_KEY;
920 
921 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
922 	if (ret < 0)
923 		goto out;
924 	if (ret > 0) {
925 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
926 		if (ret < 0)
927 			goto out;
928 	}
929 
930 	while (1) {
931 		l = path->nodes[0];
932 		slot = path->slots[0];
933 		if (slot >= btrfs_header_nritems(l)) {
934 			ret = btrfs_next_leaf(root, path);
935 			if (ret == 0)
936 				continue;
937 			if (ret < 0)
938 				goto out;
939 
940 			break;
941 		}
942 		btrfs_item_key_to_cpu(l, &key, slot);
943 
944 		if (key.objectid < device->devid)
945 			goto next;
946 
947 		if (key.objectid > device->devid)
948 			break;
949 
950 		if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
951 			goto next;
952 
953 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
954 		extent_end = key.offset + btrfs_dev_extent_length(l,
955 								  dev_extent);
956 		if (key.offset <= start && extent_end > end) {
957 			*length = end - start + 1;
958 			break;
959 		} else if (key.offset <= start && extent_end > start)
960 			*length += extent_end - start;
961 		else if (key.offset > start && extent_end <= end)
962 			*length += extent_end - key.offset;
963 		else if (key.offset > start && key.offset <= end) {
964 			*length += end - key.offset + 1;
965 			break;
966 		} else if (key.offset > end)
967 			break;
968 
969 next:
970 		path->slots[0]++;
971 	}
972 	ret = 0;
973 out:
974 	btrfs_free_path(path);
975 	return ret;
976 }
977 
978 /*
979  * find_free_dev_extent - find free space in the specified device
980  * @device:	the device which we search the free space in
981  * @num_bytes:	the size of the free space that we need
982  * @start:	store the start of the free space.
983  * @len:	the size of the free space. that we find, or the size of the max
984  * 		free space if we don't find suitable free space
985  *
986  * this uses a pretty simple search, the expectation is that it is
987  * called very infrequently and that a given device has a small number
988  * of extents
989  *
990  * @start is used to store the start of the free space if we find. But if we
991  * don't find suitable free space, it will be used to store the start position
992  * of the max free space.
993  *
994  * @len is used to store the size of the free space that we find.
995  * But if we don't find suitable free space, it is used to store the size of
996  * the max free space.
997  */
998 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
999 			 u64 *start, u64 *len)
1000 {
1001 	struct btrfs_key key;
1002 	struct btrfs_root *root = device->dev_root;
1003 	struct btrfs_dev_extent *dev_extent;
1004 	struct btrfs_path *path;
1005 	u64 hole_size;
1006 	u64 max_hole_start;
1007 	u64 max_hole_size;
1008 	u64 extent_end;
1009 	u64 search_start;
1010 	u64 search_end = device->total_bytes;
1011 	int ret;
1012 	int slot;
1013 	struct extent_buffer *l;
1014 
1015 	/* FIXME use last free of some kind */
1016 
1017 	/* we don't want to overwrite the superblock on the drive,
1018 	 * so we make sure to start at an offset of at least 1MB
1019 	 */
1020 	search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1021 
1022 	max_hole_start = search_start;
1023 	max_hole_size = 0;
1024 	hole_size = 0;
1025 
1026 	if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1027 		ret = -ENOSPC;
1028 		goto error;
1029 	}
1030 
1031 	path = btrfs_alloc_path();
1032 	if (!path) {
1033 		ret = -ENOMEM;
1034 		goto error;
1035 	}
1036 	path->reada = 2;
1037 
1038 	key.objectid = device->devid;
1039 	key.offset = search_start;
1040 	key.type = BTRFS_DEV_EXTENT_KEY;
1041 
1042 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1043 	if (ret < 0)
1044 		goto out;
1045 	if (ret > 0) {
1046 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
1047 		if (ret < 0)
1048 			goto out;
1049 	}
1050 
1051 	while (1) {
1052 		l = path->nodes[0];
1053 		slot = path->slots[0];
1054 		if (slot >= btrfs_header_nritems(l)) {
1055 			ret = btrfs_next_leaf(root, path);
1056 			if (ret == 0)
1057 				continue;
1058 			if (ret < 0)
1059 				goto out;
1060 
1061 			break;
1062 		}
1063 		btrfs_item_key_to_cpu(l, &key, slot);
1064 
1065 		if (key.objectid < device->devid)
1066 			goto next;
1067 
1068 		if (key.objectid > device->devid)
1069 			break;
1070 
1071 		if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
1072 			goto next;
1073 
1074 		if (key.offset > search_start) {
1075 			hole_size = key.offset - search_start;
1076 
1077 			if (hole_size > max_hole_size) {
1078 				max_hole_start = search_start;
1079 				max_hole_size = hole_size;
1080 			}
1081 
1082 			/*
1083 			 * If this free space is greater than which we need,
1084 			 * it must be the max free space that we have found
1085 			 * until now, so max_hole_start must point to the start
1086 			 * of this free space and the length of this free space
1087 			 * is stored in max_hole_size. Thus, we return
1088 			 * max_hole_start and max_hole_size and go back to the
1089 			 * caller.
1090 			 */
1091 			if (hole_size >= num_bytes) {
1092 				ret = 0;
1093 				goto out;
1094 			}
1095 		}
1096 
1097 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1098 		extent_end = key.offset + btrfs_dev_extent_length(l,
1099 								  dev_extent);
1100 		if (extent_end > search_start)
1101 			search_start = extent_end;
1102 next:
1103 		path->slots[0]++;
1104 		cond_resched();
1105 	}
1106 
1107 	/*
1108 	 * At this point, search_start should be the end of
1109 	 * allocated dev extents, and when shrinking the device,
1110 	 * search_end may be smaller than search_start.
1111 	 */
1112 	if (search_end > search_start)
1113 		hole_size = search_end - search_start;
1114 
1115 	if (hole_size > max_hole_size) {
1116 		max_hole_start = search_start;
1117 		max_hole_size = hole_size;
1118 	}
1119 
1120 	/* See above. */
1121 	if (hole_size < num_bytes)
1122 		ret = -ENOSPC;
1123 	else
1124 		ret = 0;
1125 
1126 out:
1127 	btrfs_free_path(path);
1128 error:
1129 	*start = max_hole_start;
1130 	if (len)
1131 		*len = max_hole_size;
1132 	return ret;
1133 }
1134 
1135 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1136 			  struct btrfs_device *device,
1137 			  u64 start)
1138 {
1139 	int ret;
1140 	struct btrfs_path *path;
1141 	struct btrfs_root *root = device->dev_root;
1142 	struct btrfs_key key;
1143 	struct btrfs_key found_key;
1144 	struct extent_buffer *leaf = NULL;
1145 	struct btrfs_dev_extent *extent = NULL;
1146 
1147 	path = btrfs_alloc_path();
1148 	if (!path)
1149 		return -ENOMEM;
1150 
1151 	key.objectid = device->devid;
1152 	key.offset = start;
1153 	key.type = BTRFS_DEV_EXTENT_KEY;
1154 again:
1155 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1156 	if (ret > 0) {
1157 		ret = btrfs_previous_item(root, path, key.objectid,
1158 					  BTRFS_DEV_EXTENT_KEY);
1159 		if (ret)
1160 			goto out;
1161 		leaf = path->nodes[0];
1162 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1163 		extent = btrfs_item_ptr(leaf, path->slots[0],
1164 					struct btrfs_dev_extent);
1165 		BUG_ON(found_key.offset > start || found_key.offset +
1166 		       btrfs_dev_extent_length(leaf, extent) < start);
1167 		key = found_key;
1168 		btrfs_release_path(path);
1169 		goto again;
1170 	} else if (ret == 0) {
1171 		leaf = path->nodes[0];
1172 		extent = btrfs_item_ptr(leaf, path->slots[0],
1173 					struct btrfs_dev_extent);
1174 	} else {
1175 		btrfs_error(root->fs_info, ret, "Slot search failed");
1176 		goto out;
1177 	}
1178 
1179 	if (device->bytes_used > 0) {
1180 		u64 len = btrfs_dev_extent_length(leaf, extent);
1181 		device->bytes_used -= len;
1182 		spin_lock(&root->fs_info->free_chunk_lock);
1183 		root->fs_info->free_chunk_space += len;
1184 		spin_unlock(&root->fs_info->free_chunk_lock);
1185 	}
1186 	ret = btrfs_del_item(trans, root, path);
1187 	if (ret) {
1188 		btrfs_error(root->fs_info, ret,
1189 			    "Failed to remove dev extent item");
1190 	}
1191 out:
1192 	btrfs_free_path(path);
1193 	return ret;
1194 }
1195 
1196 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1197 			   struct btrfs_device *device,
1198 			   u64 chunk_tree, u64 chunk_objectid,
1199 			   u64 chunk_offset, u64 start, u64 num_bytes)
1200 {
1201 	int ret;
1202 	struct btrfs_path *path;
1203 	struct btrfs_root *root = device->dev_root;
1204 	struct btrfs_dev_extent *extent;
1205 	struct extent_buffer *leaf;
1206 	struct btrfs_key key;
1207 
1208 	WARN_ON(!device->in_fs_metadata);
1209 	WARN_ON(device->is_tgtdev_for_dev_replace);
1210 	path = btrfs_alloc_path();
1211 	if (!path)
1212 		return -ENOMEM;
1213 
1214 	key.objectid = device->devid;
1215 	key.offset = start;
1216 	key.type = BTRFS_DEV_EXTENT_KEY;
1217 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1218 				      sizeof(*extent));
1219 	if (ret)
1220 		goto out;
1221 
1222 	leaf = path->nodes[0];
1223 	extent = btrfs_item_ptr(leaf, path->slots[0],
1224 				struct btrfs_dev_extent);
1225 	btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1226 	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1227 	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1228 
1229 	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1230 		    (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1231 		    BTRFS_UUID_SIZE);
1232 
1233 	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1234 	btrfs_mark_buffer_dirty(leaf);
1235 out:
1236 	btrfs_free_path(path);
1237 	return ret;
1238 }
1239 
1240 static noinline int find_next_chunk(struct btrfs_root *root,
1241 				    u64 objectid, u64 *offset)
1242 {
1243 	struct btrfs_path *path;
1244 	int ret;
1245 	struct btrfs_key key;
1246 	struct btrfs_chunk *chunk;
1247 	struct btrfs_key found_key;
1248 
1249 	path = btrfs_alloc_path();
1250 	if (!path)
1251 		return -ENOMEM;
1252 
1253 	key.objectid = objectid;
1254 	key.offset = (u64)-1;
1255 	key.type = BTRFS_CHUNK_ITEM_KEY;
1256 
1257 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1258 	if (ret < 0)
1259 		goto error;
1260 
1261 	BUG_ON(ret == 0); /* Corruption */
1262 
1263 	ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1264 	if (ret) {
1265 		*offset = 0;
1266 	} else {
1267 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1268 				      path->slots[0]);
1269 		if (found_key.objectid != objectid)
1270 			*offset = 0;
1271 		else {
1272 			chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1273 					       struct btrfs_chunk);
1274 			*offset = found_key.offset +
1275 				btrfs_chunk_length(path->nodes[0], chunk);
1276 		}
1277 	}
1278 	ret = 0;
1279 error:
1280 	btrfs_free_path(path);
1281 	return ret;
1282 }
1283 
1284 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1285 {
1286 	int ret;
1287 	struct btrfs_key key;
1288 	struct btrfs_key found_key;
1289 	struct btrfs_path *path;
1290 
1291 	root = root->fs_info->chunk_root;
1292 
1293 	path = btrfs_alloc_path();
1294 	if (!path)
1295 		return -ENOMEM;
1296 
1297 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1298 	key.type = BTRFS_DEV_ITEM_KEY;
1299 	key.offset = (u64)-1;
1300 
1301 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1302 	if (ret < 0)
1303 		goto error;
1304 
1305 	BUG_ON(ret == 0); /* Corruption */
1306 
1307 	ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1308 				  BTRFS_DEV_ITEM_KEY);
1309 	if (ret) {
1310 		*objectid = 1;
1311 	} else {
1312 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1313 				      path->slots[0]);
1314 		*objectid = found_key.offset + 1;
1315 	}
1316 	ret = 0;
1317 error:
1318 	btrfs_free_path(path);
1319 	return ret;
1320 }
1321 
1322 /*
1323  * the device information is stored in the chunk root
1324  * the btrfs_device struct should be fully filled in
1325  */
1326 int btrfs_add_device(struct btrfs_trans_handle *trans,
1327 		     struct btrfs_root *root,
1328 		     struct btrfs_device *device)
1329 {
1330 	int ret;
1331 	struct btrfs_path *path;
1332 	struct btrfs_dev_item *dev_item;
1333 	struct extent_buffer *leaf;
1334 	struct btrfs_key key;
1335 	unsigned long ptr;
1336 
1337 	root = root->fs_info->chunk_root;
1338 
1339 	path = btrfs_alloc_path();
1340 	if (!path)
1341 		return -ENOMEM;
1342 
1343 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1344 	key.type = BTRFS_DEV_ITEM_KEY;
1345 	key.offset = device->devid;
1346 
1347 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1348 				      sizeof(*dev_item));
1349 	if (ret)
1350 		goto out;
1351 
1352 	leaf = path->nodes[0];
1353 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1354 
1355 	btrfs_set_device_id(leaf, dev_item, device->devid);
1356 	btrfs_set_device_generation(leaf, dev_item, 0);
1357 	btrfs_set_device_type(leaf, dev_item, device->type);
1358 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1359 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1360 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1361 	btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1362 	btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1363 	btrfs_set_device_group(leaf, dev_item, 0);
1364 	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1365 	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1366 	btrfs_set_device_start_offset(leaf, dev_item, 0);
1367 
1368 	ptr = (unsigned long)btrfs_device_uuid(dev_item);
1369 	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1370 	ptr = (unsigned long)btrfs_device_fsid(dev_item);
1371 	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1372 	btrfs_mark_buffer_dirty(leaf);
1373 
1374 	ret = 0;
1375 out:
1376 	btrfs_free_path(path);
1377 	return ret;
1378 }
1379 
1380 static int btrfs_rm_dev_item(struct btrfs_root *root,
1381 			     struct btrfs_device *device)
1382 {
1383 	int ret;
1384 	struct btrfs_path *path;
1385 	struct btrfs_key key;
1386 	struct btrfs_trans_handle *trans;
1387 
1388 	root = root->fs_info->chunk_root;
1389 
1390 	path = btrfs_alloc_path();
1391 	if (!path)
1392 		return -ENOMEM;
1393 
1394 	trans = btrfs_start_transaction(root, 0);
1395 	if (IS_ERR(trans)) {
1396 		btrfs_free_path(path);
1397 		return PTR_ERR(trans);
1398 	}
1399 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1400 	key.type = BTRFS_DEV_ITEM_KEY;
1401 	key.offset = device->devid;
1402 	lock_chunks(root);
1403 
1404 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1405 	if (ret < 0)
1406 		goto out;
1407 
1408 	if (ret > 0) {
1409 		ret = -ENOENT;
1410 		goto out;
1411 	}
1412 
1413 	ret = btrfs_del_item(trans, root, path);
1414 	if (ret)
1415 		goto out;
1416 out:
1417 	btrfs_free_path(path);
1418 	unlock_chunks(root);
1419 	btrfs_commit_transaction(trans, root);
1420 	return ret;
1421 }
1422 
1423 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1424 {
1425 	struct btrfs_device *device;
1426 	struct btrfs_device *next_device;
1427 	struct block_device *bdev;
1428 	struct buffer_head *bh = NULL;
1429 	struct btrfs_super_block *disk_super;
1430 	struct btrfs_fs_devices *cur_devices;
1431 	u64 all_avail;
1432 	u64 devid;
1433 	u64 num_devices;
1434 	u8 *dev_uuid;
1435 	unsigned seq;
1436 	int ret = 0;
1437 	bool clear_super = false;
1438 
1439 	mutex_lock(&uuid_mutex);
1440 
1441 	do {
1442 		seq = read_seqbegin(&root->fs_info->profiles_lock);
1443 
1444 		all_avail = root->fs_info->avail_data_alloc_bits |
1445 			    root->fs_info->avail_system_alloc_bits |
1446 			    root->fs_info->avail_metadata_alloc_bits;
1447 	} while (read_seqretry(&root->fs_info->profiles_lock, seq));
1448 
1449 	num_devices = root->fs_info->fs_devices->num_devices;
1450 	btrfs_dev_replace_lock(&root->fs_info->dev_replace);
1451 	if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1452 		WARN_ON(num_devices < 1);
1453 		num_devices--;
1454 	}
1455 	btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
1456 
1457 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1458 		printk(KERN_ERR "btrfs: unable to go below four devices "
1459 		       "on raid10\n");
1460 		ret = -EINVAL;
1461 		goto out;
1462 	}
1463 
1464 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1465 		printk(KERN_ERR "btrfs: unable to go below two "
1466 		       "devices on raid1\n");
1467 		ret = -EINVAL;
1468 		goto out;
1469 	}
1470 
1471 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
1472 	    root->fs_info->fs_devices->rw_devices <= 2) {
1473 		printk(KERN_ERR "btrfs: unable to go below two "
1474 		       "devices on raid5\n");
1475 		ret = -EINVAL;
1476 		goto out;
1477 	}
1478 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
1479 	    root->fs_info->fs_devices->rw_devices <= 3) {
1480 		printk(KERN_ERR "btrfs: unable to go below three "
1481 		       "devices on raid6\n");
1482 		ret = -EINVAL;
1483 		goto out;
1484 	}
1485 
1486 	if (strcmp(device_path, "missing") == 0) {
1487 		struct list_head *devices;
1488 		struct btrfs_device *tmp;
1489 
1490 		device = NULL;
1491 		devices = &root->fs_info->fs_devices->devices;
1492 		/*
1493 		 * It is safe to read the devices since the volume_mutex
1494 		 * is held.
1495 		 */
1496 		list_for_each_entry(tmp, devices, dev_list) {
1497 			if (tmp->in_fs_metadata &&
1498 			    !tmp->is_tgtdev_for_dev_replace &&
1499 			    !tmp->bdev) {
1500 				device = tmp;
1501 				break;
1502 			}
1503 		}
1504 		bdev = NULL;
1505 		bh = NULL;
1506 		disk_super = NULL;
1507 		if (!device) {
1508 			printk(KERN_ERR "btrfs: no missing devices found to "
1509 			       "remove\n");
1510 			goto out;
1511 		}
1512 	} else {
1513 		ret = btrfs_get_bdev_and_sb(device_path,
1514 					    FMODE_WRITE | FMODE_EXCL,
1515 					    root->fs_info->bdev_holder, 0,
1516 					    &bdev, &bh);
1517 		if (ret)
1518 			goto out;
1519 		disk_super = (struct btrfs_super_block *)bh->b_data;
1520 		devid = btrfs_stack_device_id(&disk_super->dev_item);
1521 		dev_uuid = disk_super->dev_item.uuid;
1522 		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1523 					   disk_super->fsid);
1524 		if (!device) {
1525 			ret = -ENOENT;
1526 			goto error_brelse;
1527 		}
1528 	}
1529 
1530 	if (device->is_tgtdev_for_dev_replace) {
1531 		pr_err("btrfs: unable to remove the dev_replace target dev\n");
1532 		ret = -EINVAL;
1533 		goto error_brelse;
1534 	}
1535 
1536 	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1537 		printk(KERN_ERR "btrfs: unable to remove the only writeable "
1538 		       "device\n");
1539 		ret = -EINVAL;
1540 		goto error_brelse;
1541 	}
1542 
1543 	if (device->writeable) {
1544 		lock_chunks(root);
1545 		list_del_init(&device->dev_alloc_list);
1546 		unlock_chunks(root);
1547 		root->fs_info->fs_devices->rw_devices--;
1548 		clear_super = true;
1549 	}
1550 
1551 	ret = btrfs_shrink_device(device, 0);
1552 	if (ret)
1553 		goto error_undo;
1554 
1555 	/*
1556 	 * TODO: the superblock still includes this device in its num_devices
1557 	 * counter although write_all_supers() is not locked out. This
1558 	 * could give a filesystem state which requires a degraded mount.
1559 	 */
1560 	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1561 	if (ret)
1562 		goto error_undo;
1563 
1564 	spin_lock(&root->fs_info->free_chunk_lock);
1565 	root->fs_info->free_chunk_space = device->total_bytes -
1566 		device->bytes_used;
1567 	spin_unlock(&root->fs_info->free_chunk_lock);
1568 
1569 	device->in_fs_metadata = 0;
1570 	btrfs_scrub_cancel_dev(root->fs_info, device);
1571 
1572 	/*
1573 	 * the device list mutex makes sure that we don't change
1574 	 * the device list while someone else is writing out all
1575 	 * the device supers.
1576 	 */
1577 
1578 	cur_devices = device->fs_devices;
1579 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1580 	list_del_rcu(&device->dev_list);
1581 
1582 	device->fs_devices->num_devices--;
1583 	device->fs_devices->total_devices--;
1584 
1585 	if (device->missing)
1586 		root->fs_info->fs_devices->missing_devices--;
1587 
1588 	next_device = list_entry(root->fs_info->fs_devices->devices.next,
1589 				 struct btrfs_device, dev_list);
1590 	if (device->bdev == root->fs_info->sb->s_bdev)
1591 		root->fs_info->sb->s_bdev = next_device->bdev;
1592 	if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1593 		root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1594 
1595 	if (device->bdev)
1596 		device->fs_devices->open_devices--;
1597 
1598 	call_rcu(&device->rcu, free_device);
1599 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1600 
1601 	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1602 	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1603 
1604 	if (cur_devices->open_devices == 0) {
1605 		struct btrfs_fs_devices *fs_devices;
1606 		fs_devices = root->fs_info->fs_devices;
1607 		while (fs_devices) {
1608 			if (fs_devices->seed == cur_devices)
1609 				break;
1610 			fs_devices = fs_devices->seed;
1611 		}
1612 		fs_devices->seed = cur_devices->seed;
1613 		cur_devices->seed = NULL;
1614 		lock_chunks(root);
1615 		__btrfs_close_devices(cur_devices);
1616 		unlock_chunks(root);
1617 		free_fs_devices(cur_devices);
1618 	}
1619 
1620 	root->fs_info->num_tolerated_disk_barrier_failures =
1621 		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1622 
1623 	/*
1624 	 * at this point, the device is zero sized.  We want to
1625 	 * remove it from the devices list and zero out the old super
1626 	 */
1627 	if (clear_super && disk_super) {
1628 		/* make sure this device isn't detected as part of
1629 		 * the FS anymore
1630 		 */
1631 		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1632 		set_buffer_dirty(bh);
1633 		sync_dirty_buffer(bh);
1634 	}
1635 
1636 	ret = 0;
1637 
1638 	/* Notify udev that device has changed */
1639 	if (bdev)
1640 		btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1641 
1642 error_brelse:
1643 	brelse(bh);
1644 	if (bdev)
1645 		blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1646 out:
1647 	mutex_unlock(&uuid_mutex);
1648 	return ret;
1649 error_undo:
1650 	if (device->writeable) {
1651 		lock_chunks(root);
1652 		list_add(&device->dev_alloc_list,
1653 			 &root->fs_info->fs_devices->alloc_list);
1654 		unlock_chunks(root);
1655 		root->fs_info->fs_devices->rw_devices++;
1656 	}
1657 	goto error_brelse;
1658 }
1659 
1660 void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1661 				 struct btrfs_device *srcdev)
1662 {
1663 	WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1664 	list_del_rcu(&srcdev->dev_list);
1665 	list_del_rcu(&srcdev->dev_alloc_list);
1666 	fs_info->fs_devices->num_devices--;
1667 	if (srcdev->missing) {
1668 		fs_info->fs_devices->missing_devices--;
1669 		fs_info->fs_devices->rw_devices++;
1670 	}
1671 	if (srcdev->can_discard)
1672 		fs_info->fs_devices->num_can_discard--;
1673 	if (srcdev->bdev)
1674 		fs_info->fs_devices->open_devices--;
1675 
1676 	call_rcu(&srcdev->rcu, free_device);
1677 }
1678 
1679 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
1680 				      struct btrfs_device *tgtdev)
1681 {
1682 	struct btrfs_device *next_device;
1683 
1684 	WARN_ON(!tgtdev);
1685 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
1686 	if (tgtdev->bdev) {
1687 		btrfs_scratch_superblock(tgtdev);
1688 		fs_info->fs_devices->open_devices--;
1689 	}
1690 	fs_info->fs_devices->num_devices--;
1691 	if (tgtdev->can_discard)
1692 		fs_info->fs_devices->num_can_discard++;
1693 
1694 	next_device = list_entry(fs_info->fs_devices->devices.next,
1695 				 struct btrfs_device, dev_list);
1696 	if (tgtdev->bdev == fs_info->sb->s_bdev)
1697 		fs_info->sb->s_bdev = next_device->bdev;
1698 	if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
1699 		fs_info->fs_devices->latest_bdev = next_device->bdev;
1700 	list_del_rcu(&tgtdev->dev_list);
1701 
1702 	call_rcu(&tgtdev->rcu, free_device);
1703 
1704 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1705 }
1706 
1707 int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
1708 			      struct btrfs_device **device)
1709 {
1710 	int ret = 0;
1711 	struct btrfs_super_block *disk_super;
1712 	u64 devid;
1713 	u8 *dev_uuid;
1714 	struct block_device *bdev;
1715 	struct buffer_head *bh;
1716 
1717 	*device = NULL;
1718 	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
1719 				    root->fs_info->bdev_holder, 0, &bdev, &bh);
1720 	if (ret)
1721 		return ret;
1722 	disk_super = (struct btrfs_super_block *)bh->b_data;
1723 	devid = btrfs_stack_device_id(&disk_super->dev_item);
1724 	dev_uuid = disk_super->dev_item.uuid;
1725 	*device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1726 				    disk_super->fsid);
1727 	brelse(bh);
1728 	if (!*device)
1729 		ret = -ENOENT;
1730 	blkdev_put(bdev, FMODE_READ);
1731 	return ret;
1732 }
1733 
1734 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
1735 					 char *device_path,
1736 					 struct btrfs_device **device)
1737 {
1738 	*device = NULL;
1739 	if (strcmp(device_path, "missing") == 0) {
1740 		struct list_head *devices;
1741 		struct btrfs_device *tmp;
1742 
1743 		devices = &root->fs_info->fs_devices->devices;
1744 		/*
1745 		 * It is safe to read the devices since the volume_mutex
1746 		 * is held by the caller.
1747 		 */
1748 		list_for_each_entry(tmp, devices, dev_list) {
1749 			if (tmp->in_fs_metadata && !tmp->bdev) {
1750 				*device = tmp;
1751 				break;
1752 			}
1753 		}
1754 
1755 		if (!*device) {
1756 			pr_err("btrfs: no missing device found\n");
1757 			return -ENOENT;
1758 		}
1759 
1760 		return 0;
1761 	} else {
1762 		return btrfs_find_device_by_path(root, device_path, device);
1763 	}
1764 }
1765 
1766 /*
1767  * does all the dirty work required for changing file system's UUID.
1768  */
1769 static int btrfs_prepare_sprout(struct btrfs_root *root)
1770 {
1771 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1772 	struct btrfs_fs_devices *old_devices;
1773 	struct btrfs_fs_devices *seed_devices;
1774 	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1775 	struct btrfs_device *device;
1776 	u64 super_flags;
1777 
1778 	BUG_ON(!mutex_is_locked(&uuid_mutex));
1779 	if (!fs_devices->seeding)
1780 		return -EINVAL;
1781 
1782 	seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1783 	if (!seed_devices)
1784 		return -ENOMEM;
1785 
1786 	old_devices = clone_fs_devices(fs_devices);
1787 	if (IS_ERR(old_devices)) {
1788 		kfree(seed_devices);
1789 		return PTR_ERR(old_devices);
1790 	}
1791 
1792 	list_add(&old_devices->list, &fs_uuids);
1793 
1794 	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1795 	seed_devices->opened = 1;
1796 	INIT_LIST_HEAD(&seed_devices->devices);
1797 	INIT_LIST_HEAD(&seed_devices->alloc_list);
1798 	mutex_init(&seed_devices->device_list_mutex);
1799 
1800 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1801 	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1802 			      synchronize_rcu);
1803 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1804 
1805 	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1806 	list_for_each_entry(device, &seed_devices->devices, dev_list) {
1807 		device->fs_devices = seed_devices;
1808 	}
1809 
1810 	fs_devices->seeding = 0;
1811 	fs_devices->num_devices = 0;
1812 	fs_devices->open_devices = 0;
1813 	fs_devices->total_devices = 0;
1814 	fs_devices->seed = seed_devices;
1815 
1816 	generate_random_uuid(fs_devices->fsid);
1817 	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1818 	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1819 	super_flags = btrfs_super_flags(disk_super) &
1820 		      ~BTRFS_SUPER_FLAG_SEEDING;
1821 	btrfs_set_super_flags(disk_super, super_flags);
1822 
1823 	return 0;
1824 }
1825 
1826 /*
1827  * strore the expected generation for seed devices in device items.
1828  */
1829 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1830 			       struct btrfs_root *root)
1831 {
1832 	struct btrfs_path *path;
1833 	struct extent_buffer *leaf;
1834 	struct btrfs_dev_item *dev_item;
1835 	struct btrfs_device *device;
1836 	struct btrfs_key key;
1837 	u8 fs_uuid[BTRFS_UUID_SIZE];
1838 	u8 dev_uuid[BTRFS_UUID_SIZE];
1839 	u64 devid;
1840 	int ret;
1841 
1842 	path = btrfs_alloc_path();
1843 	if (!path)
1844 		return -ENOMEM;
1845 
1846 	root = root->fs_info->chunk_root;
1847 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1848 	key.offset = 0;
1849 	key.type = BTRFS_DEV_ITEM_KEY;
1850 
1851 	while (1) {
1852 		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1853 		if (ret < 0)
1854 			goto error;
1855 
1856 		leaf = path->nodes[0];
1857 next_slot:
1858 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1859 			ret = btrfs_next_leaf(root, path);
1860 			if (ret > 0)
1861 				break;
1862 			if (ret < 0)
1863 				goto error;
1864 			leaf = path->nodes[0];
1865 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1866 			btrfs_release_path(path);
1867 			continue;
1868 		}
1869 
1870 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1871 		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1872 		    key.type != BTRFS_DEV_ITEM_KEY)
1873 			break;
1874 
1875 		dev_item = btrfs_item_ptr(leaf, path->slots[0],
1876 					  struct btrfs_dev_item);
1877 		devid = btrfs_device_id(leaf, dev_item);
1878 		read_extent_buffer(leaf, dev_uuid,
1879 				   (unsigned long)btrfs_device_uuid(dev_item),
1880 				   BTRFS_UUID_SIZE);
1881 		read_extent_buffer(leaf, fs_uuid,
1882 				   (unsigned long)btrfs_device_fsid(dev_item),
1883 				   BTRFS_UUID_SIZE);
1884 		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1885 					   fs_uuid);
1886 		BUG_ON(!device); /* Logic error */
1887 
1888 		if (device->fs_devices->seeding) {
1889 			btrfs_set_device_generation(leaf, dev_item,
1890 						    device->generation);
1891 			btrfs_mark_buffer_dirty(leaf);
1892 		}
1893 
1894 		path->slots[0]++;
1895 		goto next_slot;
1896 	}
1897 	ret = 0;
1898 error:
1899 	btrfs_free_path(path);
1900 	return ret;
1901 }
1902 
1903 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1904 {
1905 	struct request_queue *q;
1906 	struct btrfs_trans_handle *trans;
1907 	struct btrfs_device *device;
1908 	struct block_device *bdev;
1909 	struct list_head *devices;
1910 	struct super_block *sb = root->fs_info->sb;
1911 	struct rcu_string *name;
1912 	u64 total_bytes;
1913 	int seeding_dev = 0;
1914 	int ret = 0;
1915 
1916 	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1917 		return -EROFS;
1918 
1919 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1920 				  root->fs_info->bdev_holder);
1921 	if (IS_ERR(bdev))
1922 		return PTR_ERR(bdev);
1923 
1924 	if (root->fs_info->fs_devices->seeding) {
1925 		seeding_dev = 1;
1926 		down_write(&sb->s_umount);
1927 		mutex_lock(&uuid_mutex);
1928 	}
1929 
1930 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
1931 
1932 	devices = &root->fs_info->fs_devices->devices;
1933 
1934 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1935 	list_for_each_entry(device, devices, dev_list) {
1936 		if (device->bdev == bdev) {
1937 			ret = -EEXIST;
1938 			mutex_unlock(
1939 				&root->fs_info->fs_devices->device_list_mutex);
1940 			goto error;
1941 		}
1942 	}
1943 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1944 
1945 	device = kzalloc(sizeof(*device), GFP_NOFS);
1946 	if (!device) {
1947 		/* we can safely leave the fs_devices entry around */
1948 		ret = -ENOMEM;
1949 		goto error;
1950 	}
1951 
1952 	name = rcu_string_strdup(device_path, GFP_NOFS);
1953 	if (!name) {
1954 		kfree(device);
1955 		ret = -ENOMEM;
1956 		goto error;
1957 	}
1958 	rcu_assign_pointer(device->name, name);
1959 
1960 	ret = find_next_devid(root, &device->devid);
1961 	if (ret) {
1962 		rcu_string_free(device->name);
1963 		kfree(device);
1964 		goto error;
1965 	}
1966 
1967 	trans = btrfs_start_transaction(root, 0);
1968 	if (IS_ERR(trans)) {
1969 		rcu_string_free(device->name);
1970 		kfree(device);
1971 		ret = PTR_ERR(trans);
1972 		goto error;
1973 	}
1974 
1975 	lock_chunks(root);
1976 
1977 	q = bdev_get_queue(bdev);
1978 	if (blk_queue_discard(q))
1979 		device->can_discard = 1;
1980 	device->writeable = 1;
1981 	device->work.func = pending_bios_fn;
1982 	generate_random_uuid(device->uuid);
1983 	spin_lock_init(&device->io_lock);
1984 	device->generation = trans->transid;
1985 	device->io_width = root->sectorsize;
1986 	device->io_align = root->sectorsize;
1987 	device->sector_size = root->sectorsize;
1988 	device->total_bytes = i_size_read(bdev->bd_inode);
1989 	device->disk_total_bytes = device->total_bytes;
1990 	device->dev_root = root->fs_info->dev_root;
1991 	device->bdev = bdev;
1992 	device->in_fs_metadata = 1;
1993 	device->is_tgtdev_for_dev_replace = 0;
1994 	device->mode = FMODE_EXCL;
1995 	set_blocksize(device->bdev, 4096);
1996 
1997 	if (seeding_dev) {
1998 		sb->s_flags &= ~MS_RDONLY;
1999 		ret = btrfs_prepare_sprout(root);
2000 		BUG_ON(ret); /* -ENOMEM */
2001 	}
2002 
2003 	device->fs_devices = root->fs_info->fs_devices;
2004 
2005 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2006 	list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2007 	list_add(&device->dev_alloc_list,
2008 		 &root->fs_info->fs_devices->alloc_list);
2009 	root->fs_info->fs_devices->num_devices++;
2010 	root->fs_info->fs_devices->open_devices++;
2011 	root->fs_info->fs_devices->rw_devices++;
2012 	root->fs_info->fs_devices->total_devices++;
2013 	if (device->can_discard)
2014 		root->fs_info->fs_devices->num_can_discard++;
2015 	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2016 
2017 	spin_lock(&root->fs_info->free_chunk_lock);
2018 	root->fs_info->free_chunk_space += device->total_bytes;
2019 	spin_unlock(&root->fs_info->free_chunk_lock);
2020 
2021 	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2022 		root->fs_info->fs_devices->rotating = 1;
2023 
2024 	total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
2025 	btrfs_set_super_total_bytes(root->fs_info->super_copy,
2026 				    total_bytes + device->total_bytes);
2027 
2028 	total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
2029 	btrfs_set_super_num_devices(root->fs_info->super_copy,
2030 				    total_bytes + 1);
2031 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2032 
2033 	if (seeding_dev) {
2034 		ret = init_first_rw_device(trans, root, device);
2035 		if (ret) {
2036 			btrfs_abort_transaction(trans, root, ret);
2037 			goto error_trans;
2038 		}
2039 		ret = btrfs_finish_sprout(trans, root);
2040 		if (ret) {
2041 			btrfs_abort_transaction(trans, root, ret);
2042 			goto error_trans;
2043 		}
2044 	} else {
2045 		ret = btrfs_add_device(trans, root, device);
2046 		if (ret) {
2047 			btrfs_abort_transaction(trans, root, ret);
2048 			goto error_trans;
2049 		}
2050 	}
2051 
2052 	/*
2053 	 * we've got more storage, clear any full flags on the space
2054 	 * infos
2055 	 */
2056 	btrfs_clear_space_info_full(root->fs_info);
2057 
2058 	unlock_chunks(root);
2059 	root->fs_info->num_tolerated_disk_barrier_failures =
2060 		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2061 	ret = btrfs_commit_transaction(trans, root);
2062 
2063 	if (seeding_dev) {
2064 		mutex_unlock(&uuid_mutex);
2065 		up_write(&sb->s_umount);
2066 
2067 		if (ret) /* transaction commit */
2068 			return ret;
2069 
2070 		ret = btrfs_relocate_sys_chunks(root);
2071 		if (ret < 0)
2072 			btrfs_error(root->fs_info, ret,
2073 				    "Failed to relocate sys chunks after "
2074 				    "device initialization. This can be fixed "
2075 				    "using the \"btrfs balance\" command.");
2076 		trans = btrfs_attach_transaction(root);
2077 		if (IS_ERR(trans)) {
2078 			if (PTR_ERR(trans) == -ENOENT)
2079 				return 0;
2080 			return PTR_ERR(trans);
2081 		}
2082 		ret = btrfs_commit_transaction(trans, root);
2083 	}
2084 
2085 	return ret;
2086 
2087 error_trans:
2088 	unlock_chunks(root);
2089 	btrfs_end_transaction(trans, root);
2090 	rcu_string_free(device->name);
2091 	kfree(device);
2092 error:
2093 	blkdev_put(bdev, FMODE_EXCL);
2094 	if (seeding_dev) {
2095 		mutex_unlock(&uuid_mutex);
2096 		up_write(&sb->s_umount);
2097 	}
2098 	return ret;
2099 }
2100 
2101 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2102 				  struct btrfs_device **device_out)
2103 {
2104 	struct request_queue *q;
2105 	struct btrfs_device *device;
2106 	struct block_device *bdev;
2107 	struct btrfs_fs_info *fs_info = root->fs_info;
2108 	struct list_head *devices;
2109 	struct rcu_string *name;
2110 	int ret = 0;
2111 
2112 	*device_out = NULL;
2113 	if (fs_info->fs_devices->seeding)
2114 		return -EINVAL;
2115 
2116 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2117 				  fs_info->bdev_holder);
2118 	if (IS_ERR(bdev))
2119 		return PTR_ERR(bdev);
2120 
2121 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
2122 
2123 	devices = &fs_info->fs_devices->devices;
2124 	list_for_each_entry(device, devices, dev_list) {
2125 		if (device->bdev == bdev) {
2126 			ret = -EEXIST;
2127 			goto error;
2128 		}
2129 	}
2130 
2131 	device = kzalloc(sizeof(*device), GFP_NOFS);
2132 	if (!device) {
2133 		ret = -ENOMEM;
2134 		goto error;
2135 	}
2136 
2137 	name = rcu_string_strdup(device_path, GFP_NOFS);
2138 	if (!name) {
2139 		kfree(device);
2140 		ret = -ENOMEM;
2141 		goto error;
2142 	}
2143 	rcu_assign_pointer(device->name, name);
2144 
2145 	q = bdev_get_queue(bdev);
2146 	if (blk_queue_discard(q))
2147 		device->can_discard = 1;
2148 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2149 	device->writeable = 1;
2150 	device->work.func = pending_bios_fn;
2151 	generate_random_uuid(device->uuid);
2152 	device->devid = BTRFS_DEV_REPLACE_DEVID;
2153 	spin_lock_init(&device->io_lock);
2154 	device->generation = 0;
2155 	device->io_width = root->sectorsize;
2156 	device->io_align = root->sectorsize;
2157 	device->sector_size = root->sectorsize;
2158 	device->total_bytes = i_size_read(bdev->bd_inode);
2159 	device->disk_total_bytes = device->total_bytes;
2160 	device->dev_root = fs_info->dev_root;
2161 	device->bdev = bdev;
2162 	device->in_fs_metadata = 1;
2163 	device->is_tgtdev_for_dev_replace = 1;
2164 	device->mode = FMODE_EXCL;
2165 	set_blocksize(device->bdev, 4096);
2166 	device->fs_devices = fs_info->fs_devices;
2167 	list_add(&device->dev_list, &fs_info->fs_devices->devices);
2168 	fs_info->fs_devices->num_devices++;
2169 	fs_info->fs_devices->open_devices++;
2170 	if (device->can_discard)
2171 		fs_info->fs_devices->num_can_discard++;
2172 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2173 
2174 	*device_out = device;
2175 	return ret;
2176 
2177 error:
2178 	blkdev_put(bdev, FMODE_EXCL);
2179 	return ret;
2180 }
2181 
2182 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2183 					      struct btrfs_device *tgtdev)
2184 {
2185 	WARN_ON(fs_info->fs_devices->rw_devices == 0);
2186 	tgtdev->io_width = fs_info->dev_root->sectorsize;
2187 	tgtdev->io_align = fs_info->dev_root->sectorsize;
2188 	tgtdev->sector_size = fs_info->dev_root->sectorsize;
2189 	tgtdev->dev_root = fs_info->dev_root;
2190 	tgtdev->in_fs_metadata = 1;
2191 }
2192 
2193 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2194 					struct btrfs_device *device)
2195 {
2196 	int ret;
2197 	struct btrfs_path *path;
2198 	struct btrfs_root *root;
2199 	struct btrfs_dev_item *dev_item;
2200 	struct extent_buffer *leaf;
2201 	struct btrfs_key key;
2202 
2203 	root = device->dev_root->fs_info->chunk_root;
2204 
2205 	path = btrfs_alloc_path();
2206 	if (!path)
2207 		return -ENOMEM;
2208 
2209 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2210 	key.type = BTRFS_DEV_ITEM_KEY;
2211 	key.offset = device->devid;
2212 
2213 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2214 	if (ret < 0)
2215 		goto out;
2216 
2217 	if (ret > 0) {
2218 		ret = -ENOENT;
2219 		goto out;
2220 	}
2221 
2222 	leaf = path->nodes[0];
2223 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2224 
2225 	btrfs_set_device_id(leaf, dev_item, device->devid);
2226 	btrfs_set_device_type(leaf, dev_item, device->type);
2227 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2228 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2229 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2230 	btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
2231 	btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
2232 	btrfs_mark_buffer_dirty(leaf);
2233 
2234 out:
2235 	btrfs_free_path(path);
2236 	return ret;
2237 }
2238 
2239 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
2240 		      struct btrfs_device *device, u64 new_size)
2241 {
2242 	struct btrfs_super_block *super_copy =
2243 		device->dev_root->fs_info->super_copy;
2244 	u64 old_total = btrfs_super_total_bytes(super_copy);
2245 	u64 diff = new_size - device->total_bytes;
2246 
2247 	if (!device->writeable)
2248 		return -EACCES;
2249 	if (new_size <= device->total_bytes ||
2250 	    device->is_tgtdev_for_dev_replace)
2251 		return -EINVAL;
2252 
2253 	btrfs_set_super_total_bytes(super_copy, old_total + diff);
2254 	device->fs_devices->total_rw_bytes += diff;
2255 
2256 	device->total_bytes = new_size;
2257 	device->disk_total_bytes = new_size;
2258 	btrfs_clear_space_info_full(device->dev_root->fs_info);
2259 
2260 	return btrfs_update_device(trans, device);
2261 }
2262 
2263 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2264 		      struct btrfs_device *device, u64 new_size)
2265 {
2266 	int ret;
2267 	lock_chunks(device->dev_root);
2268 	ret = __btrfs_grow_device(trans, device, new_size);
2269 	unlock_chunks(device->dev_root);
2270 	return ret;
2271 }
2272 
2273 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2274 			    struct btrfs_root *root,
2275 			    u64 chunk_tree, u64 chunk_objectid,
2276 			    u64 chunk_offset)
2277 {
2278 	int ret;
2279 	struct btrfs_path *path;
2280 	struct btrfs_key key;
2281 
2282 	root = root->fs_info->chunk_root;
2283 	path = btrfs_alloc_path();
2284 	if (!path)
2285 		return -ENOMEM;
2286 
2287 	key.objectid = chunk_objectid;
2288 	key.offset = chunk_offset;
2289 	key.type = BTRFS_CHUNK_ITEM_KEY;
2290 
2291 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2292 	if (ret < 0)
2293 		goto out;
2294 	else if (ret > 0) { /* Logic error or corruption */
2295 		btrfs_error(root->fs_info, -ENOENT,
2296 			    "Failed lookup while freeing chunk.");
2297 		ret = -ENOENT;
2298 		goto out;
2299 	}
2300 
2301 	ret = btrfs_del_item(trans, root, path);
2302 	if (ret < 0)
2303 		btrfs_error(root->fs_info, ret,
2304 			    "Failed to delete chunk item.");
2305 out:
2306 	btrfs_free_path(path);
2307 	return ret;
2308 }
2309 
2310 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2311 			chunk_offset)
2312 {
2313 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2314 	struct btrfs_disk_key *disk_key;
2315 	struct btrfs_chunk *chunk;
2316 	u8 *ptr;
2317 	int ret = 0;
2318 	u32 num_stripes;
2319 	u32 array_size;
2320 	u32 len = 0;
2321 	u32 cur;
2322 	struct btrfs_key key;
2323 
2324 	array_size = btrfs_super_sys_array_size(super_copy);
2325 
2326 	ptr = super_copy->sys_chunk_array;
2327 	cur = 0;
2328 
2329 	while (cur < array_size) {
2330 		disk_key = (struct btrfs_disk_key *)ptr;
2331 		btrfs_disk_key_to_cpu(&key, disk_key);
2332 
2333 		len = sizeof(*disk_key);
2334 
2335 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2336 			chunk = (struct btrfs_chunk *)(ptr + len);
2337 			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2338 			len += btrfs_chunk_item_size(num_stripes);
2339 		} else {
2340 			ret = -EIO;
2341 			break;
2342 		}
2343 		if (key.objectid == chunk_objectid &&
2344 		    key.offset == chunk_offset) {
2345 			memmove(ptr, ptr + len, array_size - (cur + len));
2346 			array_size -= len;
2347 			btrfs_set_super_sys_array_size(super_copy, array_size);
2348 		} else {
2349 			ptr += len;
2350 			cur += len;
2351 		}
2352 	}
2353 	return ret;
2354 }
2355 
2356 static int btrfs_relocate_chunk(struct btrfs_root *root,
2357 			 u64 chunk_tree, u64 chunk_objectid,
2358 			 u64 chunk_offset)
2359 {
2360 	struct extent_map_tree *em_tree;
2361 	struct btrfs_root *extent_root;
2362 	struct btrfs_trans_handle *trans;
2363 	struct extent_map *em;
2364 	struct map_lookup *map;
2365 	int ret;
2366 	int i;
2367 
2368 	root = root->fs_info->chunk_root;
2369 	extent_root = root->fs_info->extent_root;
2370 	em_tree = &root->fs_info->mapping_tree.map_tree;
2371 
2372 	ret = btrfs_can_relocate(extent_root, chunk_offset);
2373 	if (ret)
2374 		return -ENOSPC;
2375 
2376 	/* step one, relocate all the extents inside this chunk */
2377 	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2378 	if (ret)
2379 		return ret;
2380 
2381 	trans = btrfs_start_transaction(root, 0);
2382 	if (IS_ERR(trans)) {
2383 		ret = PTR_ERR(trans);
2384 		btrfs_std_error(root->fs_info, ret);
2385 		return ret;
2386 	}
2387 
2388 	lock_chunks(root);
2389 
2390 	/*
2391 	 * step two, delete the device extents and the
2392 	 * chunk tree entries
2393 	 */
2394 	read_lock(&em_tree->lock);
2395 	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2396 	read_unlock(&em_tree->lock);
2397 
2398 	BUG_ON(!em || em->start > chunk_offset ||
2399 	       em->start + em->len < chunk_offset);
2400 	map = (struct map_lookup *)em->bdev;
2401 
2402 	for (i = 0; i < map->num_stripes; i++) {
2403 		ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2404 					    map->stripes[i].physical);
2405 		BUG_ON(ret);
2406 
2407 		if (map->stripes[i].dev) {
2408 			ret = btrfs_update_device(trans, map->stripes[i].dev);
2409 			BUG_ON(ret);
2410 		}
2411 	}
2412 	ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2413 			       chunk_offset);
2414 
2415 	BUG_ON(ret);
2416 
2417 	trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2418 
2419 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2420 		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2421 		BUG_ON(ret);
2422 	}
2423 
2424 	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2425 	BUG_ON(ret);
2426 
2427 	write_lock(&em_tree->lock);
2428 	remove_extent_mapping(em_tree, em);
2429 	write_unlock(&em_tree->lock);
2430 
2431 	kfree(map);
2432 	em->bdev = NULL;
2433 
2434 	/* once for the tree */
2435 	free_extent_map(em);
2436 	/* once for us */
2437 	free_extent_map(em);
2438 
2439 	unlock_chunks(root);
2440 	btrfs_end_transaction(trans, root);
2441 	return 0;
2442 }
2443 
2444 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2445 {
2446 	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2447 	struct btrfs_path *path;
2448 	struct extent_buffer *leaf;
2449 	struct btrfs_chunk *chunk;
2450 	struct btrfs_key key;
2451 	struct btrfs_key found_key;
2452 	u64 chunk_tree = chunk_root->root_key.objectid;
2453 	u64 chunk_type;
2454 	bool retried = false;
2455 	int failed = 0;
2456 	int ret;
2457 
2458 	path = btrfs_alloc_path();
2459 	if (!path)
2460 		return -ENOMEM;
2461 
2462 again:
2463 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2464 	key.offset = (u64)-1;
2465 	key.type = BTRFS_CHUNK_ITEM_KEY;
2466 
2467 	while (1) {
2468 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2469 		if (ret < 0)
2470 			goto error;
2471 		BUG_ON(ret == 0); /* Corruption */
2472 
2473 		ret = btrfs_previous_item(chunk_root, path, key.objectid,
2474 					  key.type);
2475 		if (ret < 0)
2476 			goto error;
2477 		if (ret > 0)
2478 			break;
2479 
2480 		leaf = path->nodes[0];
2481 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2482 
2483 		chunk = btrfs_item_ptr(leaf, path->slots[0],
2484 				       struct btrfs_chunk);
2485 		chunk_type = btrfs_chunk_type(leaf, chunk);
2486 		btrfs_release_path(path);
2487 
2488 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2489 			ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2490 						   found_key.objectid,
2491 						   found_key.offset);
2492 			if (ret == -ENOSPC)
2493 				failed++;
2494 			else if (ret)
2495 				BUG();
2496 		}
2497 
2498 		if (found_key.offset == 0)
2499 			break;
2500 		key.offset = found_key.offset - 1;
2501 	}
2502 	ret = 0;
2503 	if (failed && !retried) {
2504 		failed = 0;
2505 		retried = true;
2506 		goto again;
2507 	} else if (failed && retried) {
2508 		WARN_ON(1);
2509 		ret = -ENOSPC;
2510 	}
2511 error:
2512 	btrfs_free_path(path);
2513 	return ret;
2514 }
2515 
2516 static int insert_balance_item(struct btrfs_root *root,
2517 			       struct btrfs_balance_control *bctl)
2518 {
2519 	struct btrfs_trans_handle *trans;
2520 	struct btrfs_balance_item *item;
2521 	struct btrfs_disk_balance_args disk_bargs;
2522 	struct btrfs_path *path;
2523 	struct extent_buffer *leaf;
2524 	struct btrfs_key key;
2525 	int ret, err;
2526 
2527 	path = btrfs_alloc_path();
2528 	if (!path)
2529 		return -ENOMEM;
2530 
2531 	trans = btrfs_start_transaction(root, 0);
2532 	if (IS_ERR(trans)) {
2533 		btrfs_free_path(path);
2534 		return PTR_ERR(trans);
2535 	}
2536 
2537 	key.objectid = BTRFS_BALANCE_OBJECTID;
2538 	key.type = BTRFS_BALANCE_ITEM_KEY;
2539 	key.offset = 0;
2540 
2541 	ret = btrfs_insert_empty_item(trans, root, path, &key,
2542 				      sizeof(*item));
2543 	if (ret)
2544 		goto out;
2545 
2546 	leaf = path->nodes[0];
2547 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2548 
2549 	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2550 
2551 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2552 	btrfs_set_balance_data(leaf, item, &disk_bargs);
2553 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2554 	btrfs_set_balance_meta(leaf, item, &disk_bargs);
2555 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2556 	btrfs_set_balance_sys(leaf, item, &disk_bargs);
2557 
2558 	btrfs_set_balance_flags(leaf, item, bctl->flags);
2559 
2560 	btrfs_mark_buffer_dirty(leaf);
2561 out:
2562 	btrfs_free_path(path);
2563 	err = btrfs_commit_transaction(trans, root);
2564 	if (err && !ret)
2565 		ret = err;
2566 	return ret;
2567 }
2568 
2569 static int del_balance_item(struct btrfs_root *root)
2570 {
2571 	struct btrfs_trans_handle *trans;
2572 	struct btrfs_path *path;
2573 	struct btrfs_key key;
2574 	int ret, err;
2575 
2576 	path = btrfs_alloc_path();
2577 	if (!path)
2578 		return -ENOMEM;
2579 
2580 	trans = btrfs_start_transaction(root, 0);
2581 	if (IS_ERR(trans)) {
2582 		btrfs_free_path(path);
2583 		return PTR_ERR(trans);
2584 	}
2585 
2586 	key.objectid = BTRFS_BALANCE_OBJECTID;
2587 	key.type = BTRFS_BALANCE_ITEM_KEY;
2588 	key.offset = 0;
2589 
2590 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2591 	if (ret < 0)
2592 		goto out;
2593 	if (ret > 0) {
2594 		ret = -ENOENT;
2595 		goto out;
2596 	}
2597 
2598 	ret = btrfs_del_item(trans, root, path);
2599 out:
2600 	btrfs_free_path(path);
2601 	err = btrfs_commit_transaction(trans, root);
2602 	if (err && !ret)
2603 		ret = err;
2604 	return ret;
2605 }
2606 
2607 /*
2608  * This is a heuristic used to reduce the number of chunks balanced on
2609  * resume after balance was interrupted.
2610  */
2611 static void update_balance_args(struct btrfs_balance_control *bctl)
2612 {
2613 	/*
2614 	 * Turn on soft mode for chunk types that were being converted.
2615 	 */
2616 	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2617 		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2618 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2619 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2620 	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2621 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2622 
2623 	/*
2624 	 * Turn on usage filter if is not already used.  The idea is
2625 	 * that chunks that we have already balanced should be
2626 	 * reasonably full.  Don't do it for chunks that are being
2627 	 * converted - that will keep us from relocating unconverted
2628 	 * (albeit full) chunks.
2629 	 */
2630 	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2631 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2632 		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2633 		bctl->data.usage = 90;
2634 	}
2635 	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2636 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2637 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2638 		bctl->sys.usage = 90;
2639 	}
2640 	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2641 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2642 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2643 		bctl->meta.usage = 90;
2644 	}
2645 }
2646 
2647 /*
2648  * Should be called with both balance and volume mutexes held to
2649  * serialize other volume operations (add_dev/rm_dev/resize) with
2650  * restriper.  Same goes for unset_balance_control.
2651  */
2652 static void set_balance_control(struct btrfs_balance_control *bctl)
2653 {
2654 	struct btrfs_fs_info *fs_info = bctl->fs_info;
2655 
2656 	BUG_ON(fs_info->balance_ctl);
2657 
2658 	spin_lock(&fs_info->balance_lock);
2659 	fs_info->balance_ctl = bctl;
2660 	spin_unlock(&fs_info->balance_lock);
2661 }
2662 
2663 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2664 {
2665 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2666 
2667 	BUG_ON(!fs_info->balance_ctl);
2668 
2669 	spin_lock(&fs_info->balance_lock);
2670 	fs_info->balance_ctl = NULL;
2671 	spin_unlock(&fs_info->balance_lock);
2672 
2673 	kfree(bctl);
2674 }
2675 
2676 /*
2677  * Balance filters.  Return 1 if chunk should be filtered out
2678  * (should not be balanced).
2679  */
2680 static int chunk_profiles_filter(u64 chunk_type,
2681 				 struct btrfs_balance_args *bargs)
2682 {
2683 	chunk_type = chunk_to_extended(chunk_type) &
2684 				BTRFS_EXTENDED_PROFILE_MASK;
2685 
2686 	if (bargs->profiles & chunk_type)
2687 		return 0;
2688 
2689 	return 1;
2690 }
2691 
2692 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2693 			      struct btrfs_balance_args *bargs)
2694 {
2695 	struct btrfs_block_group_cache *cache;
2696 	u64 chunk_used, user_thresh;
2697 	int ret = 1;
2698 
2699 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2700 	chunk_used = btrfs_block_group_used(&cache->item);
2701 
2702 	if (bargs->usage == 0)
2703 		user_thresh = 1;
2704 	else if (bargs->usage > 100)
2705 		user_thresh = cache->key.offset;
2706 	else
2707 		user_thresh = div_factor_fine(cache->key.offset,
2708 					      bargs->usage);
2709 
2710 	if (chunk_used < user_thresh)
2711 		ret = 0;
2712 
2713 	btrfs_put_block_group(cache);
2714 	return ret;
2715 }
2716 
2717 static int chunk_devid_filter(struct extent_buffer *leaf,
2718 			      struct btrfs_chunk *chunk,
2719 			      struct btrfs_balance_args *bargs)
2720 {
2721 	struct btrfs_stripe *stripe;
2722 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2723 	int i;
2724 
2725 	for (i = 0; i < num_stripes; i++) {
2726 		stripe = btrfs_stripe_nr(chunk, i);
2727 		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2728 			return 0;
2729 	}
2730 
2731 	return 1;
2732 }
2733 
2734 /* [pstart, pend) */
2735 static int chunk_drange_filter(struct extent_buffer *leaf,
2736 			       struct btrfs_chunk *chunk,
2737 			       u64 chunk_offset,
2738 			       struct btrfs_balance_args *bargs)
2739 {
2740 	struct btrfs_stripe *stripe;
2741 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2742 	u64 stripe_offset;
2743 	u64 stripe_length;
2744 	int factor;
2745 	int i;
2746 
2747 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2748 		return 0;
2749 
2750 	if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2751 	     BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
2752 		factor = num_stripes / 2;
2753 	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
2754 		factor = num_stripes - 1;
2755 	} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
2756 		factor = num_stripes - 2;
2757 	} else {
2758 		factor = num_stripes;
2759 	}
2760 
2761 	for (i = 0; i < num_stripes; i++) {
2762 		stripe = btrfs_stripe_nr(chunk, i);
2763 		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2764 			continue;
2765 
2766 		stripe_offset = btrfs_stripe_offset(leaf, stripe);
2767 		stripe_length = btrfs_chunk_length(leaf, chunk);
2768 		do_div(stripe_length, factor);
2769 
2770 		if (stripe_offset < bargs->pend &&
2771 		    stripe_offset + stripe_length > bargs->pstart)
2772 			return 0;
2773 	}
2774 
2775 	return 1;
2776 }
2777 
2778 /* [vstart, vend) */
2779 static int chunk_vrange_filter(struct extent_buffer *leaf,
2780 			       struct btrfs_chunk *chunk,
2781 			       u64 chunk_offset,
2782 			       struct btrfs_balance_args *bargs)
2783 {
2784 	if (chunk_offset < bargs->vend &&
2785 	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2786 		/* at least part of the chunk is inside this vrange */
2787 		return 0;
2788 
2789 	return 1;
2790 }
2791 
2792 static int chunk_soft_convert_filter(u64 chunk_type,
2793 				     struct btrfs_balance_args *bargs)
2794 {
2795 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2796 		return 0;
2797 
2798 	chunk_type = chunk_to_extended(chunk_type) &
2799 				BTRFS_EXTENDED_PROFILE_MASK;
2800 
2801 	if (bargs->target == chunk_type)
2802 		return 1;
2803 
2804 	return 0;
2805 }
2806 
2807 static int should_balance_chunk(struct btrfs_root *root,
2808 				struct extent_buffer *leaf,
2809 				struct btrfs_chunk *chunk, u64 chunk_offset)
2810 {
2811 	struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2812 	struct btrfs_balance_args *bargs = NULL;
2813 	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2814 
2815 	/* type filter */
2816 	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2817 	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2818 		return 0;
2819 	}
2820 
2821 	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2822 		bargs = &bctl->data;
2823 	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2824 		bargs = &bctl->sys;
2825 	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2826 		bargs = &bctl->meta;
2827 
2828 	/* profiles filter */
2829 	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2830 	    chunk_profiles_filter(chunk_type, bargs)) {
2831 		return 0;
2832 	}
2833 
2834 	/* usage filter */
2835 	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2836 	    chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2837 		return 0;
2838 	}
2839 
2840 	/* devid filter */
2841 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2842 	    chunk_devid_filter(leaf, chunk, bargs)) {
2843 		return 0;
2844 	}
2845 
2846 	/* drange filter, makes sense only with devid filter */
2847 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2848 	    chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2849 		return 0;
2850 	}
2851 
2852 	/* vrange filter */
2853 	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2854 	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2855 		return 0;
2856 	}
2857 
2858 	/* soft profile changing mode */
2859 	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2860 	    chunk_soft_convert_filter(chunk_type, bargs)) {
2861 		return 0;
2862 	}
2863 
2864 	return 1;
2865 }
2866 
2867 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2868 {
2869 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2870 	struct btrfs_root *chunk_root = fs_info->chunk_root;
2871 	struct btrfs_root *dev_root = fs_info->dev_root;
2872 	struct list_head *devices;
2873 	struct btrfs_device *device;
2874 	u64 old_size;
2875 	u64 size_to_free;
2876 	struct btrfs_chunk *chunk;
2877 	struct btrfs_path *path;
2878 	struct btrfs_key key;
2879 	struct btrfs_key found_key;
2880 	struct btrfs_trans_handle *trans;
2881 	struct extent_buffer *leaf;
2882 	int slot;
2883 	int ret;
2884 	int enospc_errors = 0;
2885 	bool counting = true;
2886 
2887 	/* step one make some room on all the devices */
2888 	devices = &fs_info->fs_devices->devices;
2889 	list_for_each_entry(device, devices, dev_list) {
2890 		old_size = device->total_bytes;
2891 		size_to_free = div_factor(old_size, 1);
2892 		size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2893 		if (!device->writeable ||
2894 		    device->total_bytes - device->bytes_used > size_to_free ||
2895 		    device->is_tgtdev_for_dev_replace)
2896 			continue;
2897 
2898 		ret = btrfs_shrink_device(device, old_size - size_to_free);
2899 		if (ret == -ENOSPC)
2900 			break;
2901 		BUG_ON(ret);
2902 
2903 		trans = btrfs_start_transaction(dev_root, 0);
2904 		BUG_ON(IS_ERR(trans));
2905 
2906 		ret = btrfs_grow_device(trans, device, old_size);
2907 		BUG_ON(ret);
2908 
2909 		btrfs_end_transaction(trans, dev_root);
2910 	}
2911 
2912 	/* step two, relocate all the chunks */
2913 	path = btrfs_alloc_path();
2914 	if (!path) {
2915 		ret = -ENOMEM;
2916 		goto error;
2917 	}
2918 
2919 	/* zero out stat counters */
2920 	spin_lock(&fs_info->balance_lock);
2921 	memset(&bctl->stat, 0, sizeof(bctl->stat));
2922 	spin_unlock(&fs_info->balance_lock);
2923 again:
2924 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2925 	key.offset = (u64)-1;
2926 	key.type = BTRFS_CHUNK_ITEM_KEY;
2927 
2928 	while (1) {
2929 		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2930 		    atomic_read(&fs_info->balance_cancel_req)) {
2931 			ret = -ECANCELED;
2932 			goto error;
2933 		}
2934 
2935 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2936 		if (ret < 0)
2937 			goto error;
2938 
2939 		/*
2940 		 * this shouldn't happen, it means the last relocate
2941 		 * failed
2942 		 */
2943 		if (ret == 0)
2944 			BUG(); /* FIXME break ? */
2945 
2946 		ret = btrfs_previous_item(chunk_root, path, 0,
2947 					  BTRFS_CHUNK_ITEM_KEY);
2948 		if (ret) {
2949 			ret = 0;
2950 			break;
2951 		}
2952 
2953 		leaf = path->nodes[0];
2954 		slot = path->slots[0];
2955 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
2956 
2957 		if (found_key.objectid != key.objectid)
2958 			break;
2959 
2960 		/* chunk zero is special */
2961 		if (found_key.offset == 0)
2962 			break;
2963 
2964 		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2965 
2966 		if (!counting) {
2967 			spin_lock(&fs_info->balance_lock);
2968 			bctl->stat.considered++;
2969 			spin_unlock(&fs_info->balance_lock);
2970 		}
2971 
2972 		ret = should_balance_chunk(chunk_root, leaf, chunk,
2973 					   found_key.offset);
2974 		btrfs_release_path(path);
2975 		if (!ret)
2976 			goto loop;
2977 
2978 		if (counting) {
2979 			spin_lock(&fs_info->balance_lock);
2980 			bctl->stat.expected++;
2981 			spin_unlock(&fs_info->balance_lock);
2982 			goto loop;
2983 		}
2984 
2985 		ret = btrfs_relocate_chunk(chunk_root,
2986 					   chunk_root->root_key.objectid,
2987 					   found_key.objectid,
2988 					   found_key.offset);
2989 		if (ret && ret != -ENOSPC)
2990 			goto error;
2991 		if (ret == -ENOSPC) {
2992 			enospc_errors++;
2993 		} else {
2994 			spin_lock(&fs_info->balance_lock);
2995 			bctl->stat.completed++;
2996 			spin_unlock(&fs_info->balance_lock);
2997 		}
2998 loop:
2999 		key.offset = found_key.offset - 1;
3000 	}
3001 
3002 	if (counting) {
3003 		btrfs_release_path(path);
3004 		counting = false;
3005 		goto again;
3006 	}
3007 error:
3008 	btrfs_free_path(path);
3009 	if (enospc_errors) {
3010 		printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
3011 		       enospc_errors);
3012 		if (!ret)
3013 			ret = -ENOSPC;
3014 	}
3015 
3016 	return ret;
3017 }
3018 
3019 /**
3020  * alloc_profile_is_valid - see if a given profile is valid and reduced
3021  * @flags: profile to validate
3022  * @extended: if true @flags is treated as an extended profile
3023  */
3024 static int alloc_profile_is_valid(u64 flags, int extended)
3025 {
3026 	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3027 			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
3028 
3029 	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3030 
3031 	/* 1) check that all other bits are zeroed */
3032 	if (flags & ~mask)
3033 		return 0;
3034 
3035 	/* 2) see if profile is reduced */
3036 	if (flags == 0)
3037 		return !extended; /* "0" is valid for usual profiles */
3038 
3039 	/* true if exactly one bit set */
3040 	return (flags & (flags - 1)) == 0;
3041 }
3042 
3043 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3044 {
3045 	/* cancel requested || normal exit path */
3046 	return atomic_read(&fs_info->balance_cancel_req) ||
3047 		(atomic_read(&fs_info->balance_pause_req) == 0 &&
3048 		 atomic_read(&fs_info->balance_cancel_req) == 0);
3049 }
3050 
3051 static void __cancel_balance(struct btrfs_fs_info *fs_info)
3052 {
3053 	int ret;
3054 
3055 	unset_balance_control(fs_info);
3056 	ret = del_balance_item(fs_info->tree_root);
3057 	if (ret)
3058 		btrfs_std_error(fs_info, ret);
3059 
3060 	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3061 }
3062 
3063 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
3064 			       struct btrfs_ioctl_balance_args *bargs);
3065 
3066 /*
3067  * Should be called with both balance and volume mutexes held
3068  */
3069 int btrfs_balance(struct btrfs_balance_control *bctl,
3070 		  struct btrfs_ioctl_balance_args *bargs)
3071 {
3072 	struct btrfs_fs_info *fs_info = bctl->fs_info;
3073 	u64 allowed;
3074 	int mixed = 0;
3075 	int ret;
3076 	u64 num_devices;
3077 	unsigned seq;
3078 
3079 	if (btrfs_fs_closing(fs_info) ||
3080 	    atomic_read(&fs_info->balance_pause_req) ||
3081 	    atomic_read(&fs_info->balance_cancel_req)) {
3082 		ret = -EINVAL;
3083 		goto out;
3084 	}
3085 
3086 	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3087 	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3088 		mixed = 1;
3089 
3090 	/*
3091 	 * In case of mixed groups both data and meta should be picked,
3092 	 * and identical options should be given for both of them.
3093 	 */
3094 	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3095 	if (mixed && (bctl->flags & allowed)) {
3096 		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3097 		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3098 		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3099 			printk(KERN_ERR "btrfs: with mixed groups data and "
3100 			       "metadata balance options must be the same\n");
3101 			ret = -EINVAL;
3102 			goto out;
3103 		}
3104 	}
3105 
3106 	num_devices = fs_info->fs_devices->num_devices;
3107 	btrfs_dev_replace_lock(&fs_info->dev_replace);
3108 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3109 		BUG_ON(num_devices < 1);
3110 		num_devices--;
3111 	}
3112 	btrfs_dev_replace_unlock(&fs_info->dev_replace);
3113 	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3114 	if (num_devices == 1)
3115 		allowed |= BTRFS_BLOCK_GROUP_DUP;
3116 	else if (num_devices < 4)
3117 		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3118 	else
3119 		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
3120 				BTRFS_BLOCK_GROUP_RAID10 |
3121 				BTRFS_BLOCK_GROUP_RAID5 |
3122 				BTRFS_BLOCK_GROUP_RAID6);
3123 
3124 	if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3125 	    (!alloc_profile_is_valid(bctl->data.target, 1) ||
3126 	     (bctl->data.target & ~allowed))) {
3127 		printk(KERN_ERR "btrfs: unable to start balance with target "
3128 		       "data profile %llu\n",
3129 		       (unsigned long long)bctl->data.target);
3130 		ret = -EINVAL;
3131 		goto out;
3132 	}
3133 	if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3134 	    (!alloc_profile_is_valid(bctl->meta.target, 1) ||
3135 	     (bctl->meta.target & ~allowed))) {
3136 		printk(KERN_ERR "btrfs: unable to start balance with target "
3137 		       "metadata profile %llu\n",
3138 		       (unsigned long long)bctl->meta.target);
3139 		ret = -EINVAL;
3140 		goto out;
3141 	}
3142 	if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3143 	    (!alloc_profile_is_valid(bctl->sys.target, 1) ||
3144 	     (bctl->sys.target & ~allowed))) {
3145 		printk(KERN_ERR "btrfs: unable to start balance with target "
3146 		       "system profile %llu\n",
3147 		       (unsigned long long)bctl->sys.target);
3148 		ret = -EINVAL;
3149 		goto out;
3150 	}
3151 
3152 	/* allow dup'ed data chunks only in mixed mode */
3153 	if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3154 	    (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
3155 		printk(KERN_ERR "btrfs: dup for data is not allowed\n");
3156 		ret = -EINVAL;
3157 		goto out;
3158 	}
3159 
3160 	/* allow to reduce meta or sys integrity only if force set */
3161 	allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3162 			BTRFS_BLOCK_GROUP_RAID10 |
3163 			BTRFS_BLOCK_GROUP_RAID5 |
3164 			BTRFS_BLOCK_GROUP_RAID6;
3165 	do {
3166 		seq = read_seqbegin(&fs_info->profiles_lock);
3167 
3168 		if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3169 		     (fs_info->avail_system_alloc_bits & allowed) &&
3170 		     !(bctl->sys.target & allowed)) ||
3171 		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3172 		     (fs_info->avail_metadata_alloc_bits & allowed) &&
3173 		     !(bctl->meta.target & allowed))) {
3174 			if (bctl->flags & BTRFS_BALANCE_FORCE) {
3175 				printk(KERN_INFO "btrfs: force reducing metadata "
3176 				       "integrity\n");
3177 			} else {
3178 				printk(KERN_ERR "btrfs: balance will reduce metadata "
3179 				       "integrity, use force if you want this\n");
3180 				ret = -EINVAL;
3181 				goto out;
3182 			}
3183 		}
3184 	} while (read_seqretry(&fs_info->profiles_lock, seq));
3185 
3186 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3187 		int num_tolerated_disk_barrier_failures;
3188 		u64 target = bctl->sys.target;
3189 
3190 		num_tolerated_disk_barrier_failures =
3191 			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3192 		if (num_tolerated_disk_barrier_failures > 0 &&
3193 		    (target &
3194 		     (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3195 		      BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
3196 			num_tolerated_disk_barrier_failures = 0;
3197 		else if (num_tolerated_disk_barrier_failures > 1 &&
3198 			 (target &
3199 			  (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
3200 			num_tolerated_disk_barrier_failures = 1;
3201 
3202 		fs_info->num_tolerated_disk_barrier_failures =
3203 			num_tolerated_disk_barrier_failures;
3204 	}
3205 
3206 	ret = insert_balance_item(fs_info->tree_root, bctl);
3207 	if (ret && ret != -EEXIST)
3208 		goto out;
3209 
3210 	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3211 		BUG_ON(ret == -EEXIST);
3212 		set_balance_control(bctl);
3213 	} else {
3214 		BUG_ON(ret != -EEXIST);
3215 		spin_lock(&fs_info->balance_lock);
3216 		update_balance_args(bctl);
3217 		spin_unlock(&fs_info->balance_lock);
3218 	}
3219 
3220 	atomic_inc(&fs_info->balance_running);
3221 	mutex_unlock(&fs_info->balance_mutex);
3222 
3223 	ret = __btrfs_balance(fs_info);
3224 
3225 	mutex_lock(&fs_info->balance_mutex);
3226 	atomic_dec(&fs_info->balance_running);
3227 
3228 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3229 		fs_info->num_tolerated_disk_barrier_failures =
3230 			btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3231 	}
3232 
3233 	if (bargs) {
3234 		memset(bargs, 0, sizeof(*bargs));
3235 		update_ioctl_balance_args(fs_info, 0, bargs);
3236 	}
3237 
3238 	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3239 	    balance_need_close(fs_info)) {
3240 		__cancel_balance(fs_info);
3241 	}
3242 
3243 	wake_up(&fs_info->balance_wait_q);
3244 
3245 	return ret;
3246 out:
3247 	if (bctl->flags & BTRFS_BALANCE_RESUME)
3248 		__cancel_balance(fs_info);
3249 	else {
3250 		kfree(bctl);
3251 		atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3252 	}
3253 	return ret;
3254 }
3255 
3256 static int balance_kthread(void *data)
3257 {
3258 	struct btrfs_fs_info *fs_info = data;
3259 	int ret = 0;
3260 
3261 	mutex_lock(&fs_info->volume_mutex);
3262 	mutex_lock(&fs_info->balance_mutex);
3263 
3264 	if (fs_info->balance_ctl) {
3265 		printk(KERN_INFO "btrfs: continuing balance\n");
3266 		ret = btrfs_balance(fs_info->balance_ctl, NULL);
3267 	}
3268 
3269 	mutex_unlock(&fs_info->balance_mutex);
3270 	mutex_unlock(&fs_info->volume_mutex);
3271 
3272 	return ret;
3273 }
3274 
3275 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3276 {
3277 	struct task_struct *tsk;
3278 
3279 	spin_lock(&fs_info->balance_lock);
3280 	if (!fs_info->balance_ctl) {
3281 		spin_unlock(&fs_info->balance_lock);
3282 		return 0;
3283 	}
3284 	spin_unlock(&fs_info->balance_lock);
3285 
3286 	if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3287 		printk(KERN_INFO "btrfs: force skipping balance\n");
3288 		return 0;
3289 	}
3290 
3291 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3292 	if (IS_ERR(tsk))
3293 		return PTR_ERR(tsk);
3294 
3295 	return 0;
3296 }
3297 
3298 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3299 {
3300 	struct btrfs_balance_control *bctl;
3301 	struct btrfs_balance_item *item;
3302 	struct btrfs_disk_balance_args disk_bargs;
3303 	struct btrfs_path *path;
3304 	struct extent_buffer *leaf;
3305 	struct btrfs_key key;
3306 	int ret;
3307 
3308 	path = btrfs_alloc_path();
3309 	if (!path)
3310 		return -ENOMEM;
3311 
3312 	key.objectid = BTRFS_BALANCE_OBJECTID;
3313 	key.type = BTRFS_BALANCE_ITEM_KEY;
3314 	key.offset = 0;
3315 
3316 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3317 	if (ret < 0)
3318 		goto out;
3319 	if (ret > 0) { /* ret = -ENOENT; */
3320 		ret = 0;
3321 		goto out;
3322 	}
3323 
3324 	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3325 	if (!bctl) {
3326 		ret = -ENOMEM;
3327 		goto out;
3328 	}
3329 
3330 	leaf = path->nodes[0];
3331 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3332 
3333 	bctl->fs_info = fs_info;
3334 	bctl->flags = btrfs_balance_flags(leaf, item);
3335 	bctl->flags |= BTRFS_BALANCE_RESUME;
3336 
3337 	btrfs_balance_data(leaf, item, &disk_bargs);
3338 	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3339 	btrfs_balance_meta(leaf, item, &disk_bargs);
3340 	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3341 	btrfs_balance_sys(leaf, item, &disk_bargs);
3342 	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3343 
3344 	WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3345 
3346 	mutex_lock(&fs_info->volume_mutex);
3347 	mutex_lock(&fs_info->balance_mutex);
3348 
3349 	set_balance_control(bctl);
3350 
3351 	mutex_unlock(&fs_info->balance_mutex);
3352 	mutex_unlock(&fs_info->volume_mutex);
3353 out:
3354 	btrfs_free_path(path);
3355 	return ret;
3356 }
3357 
3358 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3359 {
3360 	int ret = 0;
3361 
3362 	mutex_lock(&fs_info->balance_mutex);
3363 	if (!fs_info->balance_ctl) {
3364 		mutex_unlock(&fs_info->balance_mutex);
3365 		return -ENOTCONN;
3366 	}
3367 
3368 	if (atomic_read(&fs_info->balance_running)) {
3369 		atomic_inc(&fs_info->balance_pause_req);
3370 		mutex_unlock(&fs_info->balance_mutex);
3371 
3372 		wait_event(fs_info->balance_wait_q,
3373 			   atomic_read(&fs_info->balance_running) == 0);
3374 
3375 		mutex_lock(&fs_info->balance_mutex);
3376 		/* we are good with balance_ctl ripped off from under us */
3377 		BUG_ON(atomic_read(&fs_info->balance_running));
3378 		atomic_dec(&fs_info->balance_pause_req);
3379 	} else {
3380 		ret = -ENOTCONN;
3381 	}
3382 
3383 	mutex_unlock(&fs_info->balance_mutex);
3384 	return ret;
3385 }
3386 
3387 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3388 {
3389 	mutex_lock(&fs_info->balance_mutex);
3390 	if (!fs_info->balance_ctl) {
3391 		mutex_unlock(&fs_info->balance_mutex);
3392 		return -ENOTCONN;
3393 	}
3394 
3395 	atomic_inc(&fs_info->balance_cancel_req);
3396 	/*
3397 	 * if we are running just wait and return, balance item is
3398 	 * deleted in btrfs_balance in this case
3399 	 */
3400 	if (atomic_read(&fs_info->balance_running)) {
3401 		mutex_unlock(&fs_info->balance_mutex);
3402 		wait_event(fs_info->balance_wait_q,
3403 			   atomic_read(&fs_info->balance_running) == 0);
3404 		mutex_lock(&fs_info->balance_mutex);
3405 	} else {
3406 		/* __cancel_balance needs volume_mutex */
3407 		mutex_unlock(&fs_info->balance_mutex);
3408 		mutex_lock(&fs_info->volume_mutex);
3409 		mutex_lock(&fs_info->balance_mutex);
3410 
3411 		if (fs_info->balance_ctl)
3412 			__cancel_balance(fs_info);
3413 
3414 		mutex_unlock(&fs_info->volume_mutex);
3415 	}
3416 
3417 	BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3418 	atomic_dec(&fs_info->balance_cancel_req);
3419 	mutex_unlock(&fs_info->balance_mutex);
3420 	return 0;
3421 }
3422 
3423 /*
3424  * shrinking a device means finding all of the device extents past
3425  * the new size, and then following the back refs to the chunks.
3426  * The chunk relocation code actually frees the device extent
3427  */
3428 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3429 {
3430 	struct btrfs_trans_handle *trans;
3431 	struct btrfs_root *root = device->dev_root;
3432 	struct btrfs_dev_extent *dev_extent = NULL;
3433 	struct btrfs_path *path;
3434 	u64 length;
3435 	u64 chunk_tree;
3436 	u64 chunk_objectid;
3437 	u64 chunk_offset;
3438 	int ret;
3439 	int slot;
3440 	int failed = 0;
3441 	bool retried = false;
3442 	struct extent_buffer *l;
3443 	struct btrfs_key key;
3444 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3445 	u64 old_total = btrfs_super_total_bytes(super_copy);
3446 	u64 old_size = device->total_bytes;
3447 	u64 diff = device->total_bytes - new_size;
3448 
3449 	if (device->is_tgtdev_for_dev_replace)
3450 		return -EINVAL;
3451 
3452 	path = btrfs_alloc_path();
3453 	if (!path)
3454 		return -ENOMEM;
3455 
3456 	path->reada = 2;
3457 
3458 	lock_chunks(root);
3459 
3460 	device->total_bytes = new_size;
3461 	if (device->writeable) {
3462 		device->fs_devices->total_rw_bytes -= diff;
3463 		spin_lock(&root->fs_info->free_chunk_lock);
3464 		root->fs_info->free_chunk_space -= diff;
3465 		spin_unlock(&root->fs_info->free_chunk_lock);
3466 	}
3467 	unlock_chunks(root);
3468 
3469 again:
3470 	key.objectid = device->devid;
3471 	key.offset = (u64)-1;
3472 	key.type = BTRFS_DEV_EXTENT_KEY;
3473 
3474 	do {
3475 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3476 		if (ret < 0)
3477 			goto done;
3478 
3479 		ret = btrfs_previous_item(root, path, 0, key.type);
3480 		if (ret < 0)
3481 			goto done;
3482 		if (ret) {
3483 			ret = 0;
3484 			btrfs_release_path(path);
3485 			break;
3486 		}
3487 
3488 		l = path->nodes[0];
3489 		slot = path->slots[0];
3490 		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3491 
3492 		if (key.objectid != device->devid) {
3493 			btrfs_release_path(path);
3494 			break;
3495 		}
3496 
3497 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3498 		length = btrfs_dev_extent_length(l, dev_extent);
3499 
3500 		if (key.offset + length <= new_size) {
3501 			btrfs_release_path(path);
3502 			break;
3503 		}
3504 
3505 		chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3506 		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3507 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3508 		btrfs_release_path(path);
3509 
3510 		ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3511 					   chunk_offset);
3512 		if (ret && ret != -ENOSPC)
3513 			goto done;
3514 		if (ret == -ENOSPC)
3515 			failed++;
3516 	} while (key.offset-- > 0);
3517 
3518 	if (failed && !retried) {
3519 		failed = 0;
3520 		retried = true;
3521 		goto again;
3522 	} else if (failed && retried) {
3523 		ret = -ENOSPC;
3524 		lock_chunks(root);
3525 
3526 		device->total_bytes = old_size;
3527 		if (device->writeable)
3528 			device->fs_devices->total_rw_bytes += diff;
3529 		spin_lock(&root->fs_info->free_chunk_lock);
3530 		root->fs_info->free_chunk_space += diff;
3531 		spin_unlock(&root->fs_info->free_chunk_lock);
3532 		unlock_chunks(root);
3533 		goto done;
3534 	}
3535 
3536 	/* Shrinking succeeded, else we would be at "done". */
3537 	trans = btrfs_start_transaction(root, 0);
3538 	if (IS_ERR(trans)) {
3539 		ret = PTR_ERR(trans);
3540 		goto done;
3541 	}
3542 
3543 	lock_chunks(root);
3544 
3545 	device->disk_total_bytes = new_size;
3546 	/* Now btrfs_update_device() will change the on-disk size. */
3547 	ret = btrfs_update_device(trans, device);
3548 	if (ret) {
3549 		unlock_chunks(root);
3550 		btrfs_end_transaction(trans, root);
3551 		goto done;
3552 	}
3553 	WARN_ON(diff > old_total);
3554 	btrfs_set_super_total_bytes(super_copy, old_total - diff);
3555 	unlock_chunks(root);
3556 	btrfs_end_transaction(trans, root);
3557 done:
3558 	btrfs_free_path(path);
3559 	return ret;
3560 }
3561 
3562 static int btrfs_add_system_chunk(struct btrfs_root *root,
3563 			   struct btrfs_key *key,
3564 			   struct btrfs_chunk *chunk, int item_size)
3565 {
3566 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3567 	struct btrfs_disk_key disk_key;
3568 	u32 array_size;
3569 	u8 *ptr;
3570 
3571 	array_size = btrfs_super_sys_array_size(super_copy);
3572 	if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3573 		return -EFBIG;
3574 
3575 	ptr = super_copy->sys_chunk_array + array_size;
3576 	btrfs_cpu_key_to_disk(&disk_key, key);
3577 	memcpy(ptr, &disk_key, sizeof(disk_key));
3578 	ptr += sizeof(disk_key);
3579 	memcpy(ptr, chunk, item_size);
3580 	item_size += sizeof(disk_key);
3581 	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3582 	return 0;
3583 }
3584 
3585 /*
3586  * sort the devices in descending order by max_avail, total_avail
3587  */
3588 static int btrfs_cmp_device_info(const void *a, const void *b)
3589 {
3590 	const struct btrfs_device_info *di_a = a;
3591 	const struct btrfs_device_info *di_b = b;
3592 
3593 	if (di_a->max_avail > di_b->max_avail)
3594 		return -1;
3595 	if (di_a->max_avail < di_b->max_avail)
3596 		return 1;
3597 	if (di_a->total_avail > di_b->total_avail)
3598 		return -1;
3599 	if (di_a->total_avail < di_b->total_avail)
3600 		return 1;
3601 	return 0;
3602 }
3603 
3604 struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
3605 	[BTRFS_RAID_RAID10] = {
3606 		.sub_stripes	= 2,
3607 		.dev_stripes	= 1,
3608 		.devs_max	= 0,	/* 0 == as many as possible */
3609 		.devs_min	= 4,
3610 		.devs_increment	= 2,
3611 		.ncopies	= 2,
3612 	},
3613 	[BTRFS_RAID_RAID1] = {
3614 		.sub_stripes	= 1,
3615 		.dev_stripes	= 1,
3616 		.devs_max	= 2,
3617 		.devs_min	= 2,
3618 		.devs_increment	= 2,
3619 		.ncopies	= 2,
3620 	},
3621 	[BTRFS_RAID_DUP] = {
3622 		.sub_stripes	= 1,
3623 		.dev_stripes	= 2,
3624 		.devs_max	= 1,
3625 		.devs_min	= 1,
3626 		.devs_increment	= 1,
3627 		.ncopies	= 2,
3628 	},
3629 	[BTRFS_RAID_RAID0] = {
3630 		.sub_stripes	= 1,
3631 		.dev_stripes	= 1,
3632 		.devs_max	= 0,
3633 		.devs_min	= 2,
3634 		.devs_increment	= 1,
3635 		.ncopies	= 1,
3636 	},
3637 	[BTRFS_RAID_SINGLE] = {
3638 		.sub_stripes	= 1,
3639 		.dev_stripes	= 1,
3640 		.devs_max	= 1,
3641 		.devs_min	= 1,
3642 		.devs_increment	= 1,
3643 		.ncopies	= 1,
3644 	},
3645 	[BTRFS_RAID_RAID5] = {
3646 		.sub_stripes	= 1,
3647 		.dev_stripes	= 1,
3648 		.devs_max	= 0,
3649 		.devs_min	= 2,
3650 		.devs_increment	= 1,
3651 		.ncopies	= 2,
3652 	},
3653 	[BTRFS_RAID_RAID6] = {
3654 		.sub_stripes	= 1,
3655 		.dev_stripes	= 1,
3656 		.devs_max	= 0,
3657 		.devs_min	= 3,
3658 		.devs_increment	= 1,
3659 		.ncopies	= 3,
3660 	},
3661 };
3662 
3663 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
3664 {
3665 	/* TODO allow them to set a preferred stripe size */
3666 	return 64 * 1024;
3667 }
3668 
3669 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
3670 {
3671 	u64 features;
3672 
3673 	if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
3674 		return;
3675 
3676 	features = btrfs_super_incompat_flags(info->super_copy);
3677 	if (features & BTRFS_FEATURE_INCOMPAT_RAID56)
3678 		return;
3679 
3680 	features |= BTRFS_FEATURE_INCOMPAT_RAID56;
3681 	btrfs_set_super_incompat_flags(info->super_copy, features);
3682 	printk(KERN_INFO "btrfs: setting RAID5/6 feature flag\n");
3683 }
3684 
3685 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3686 			       struct btrfs_root *extent_root,
3687 			       struct map_lookup **map_ret,
3688 			       u64 *num_bytes_out, u64 *stripe_size_out,
3689 			       u64 start, u64 type)
3690 {
3691 	struct btrfs_fs_info *info = extent_root->fs_info;
3692 	struct btrfs_fs_devices *fs_devices = info->fs_devices;
3693 	struct list_head *cur;
3694 	struct map_lookup *map = NULL;
3695 	struct extent_map_tree *em_tree;
3696 	struct extent_map *em;
3697 	struct btrfs_device_info *devices_info = NULL;
3698 	u64 total_avail;
3699 	int num_stripes;	/* total number of stripes to allocate */
3700 	int data_stripes;	/* number of stripes that count for
3701 				   block group size */
3702 	int sub_stripes;	/* sub_stripes info for map */
3703 	int dev_stripes;	/* stripes per dev */
3704 	int devs_max;		/* max devs to use */
3705 	int devs_min;		/* min devs needed */
3706 	int devs_increment;	/* ndevs has to be a multiple of this */
3707 	int ncopies;		/* how many copies to data has */
3708 	int ret;
3709 	u64 max_stripe_size;
3710 	u64 max_chunk_size;
3711 	u64 stripe_size;
3712 	u64 num_bytes;
3713 	u64 raid_stripe_len = BTRFS_STRIPE_LEN;
3714 	int ndevs;
3715 	int i;
3716 	int j;
3717 	int index;
3718 
3719 	BUG_ON(!alloc_profile_is_valid(type, 0));
3720 
3721 	if (list_empty(&fs_devices->alloc_list))
3722 		return -ENOSPC;
3723 
3724 	index = __get_raid_index(type);
3725 
3726 	sub_stripes = btrfs_raid_array[index].sub_stripes;
3727 	dev_stripes = btrfs_raid_array[index].dev_stripes;
3728 	devs_max = btrfs_raid_array[index].devs_max;
3729 	devs_min = btrfs_raid_array[index].devs_min;
3730 	devs_increment = btrfs_raid_array[index].devs_increment;
3731 	ncopies = btrfs_raid_array[index].ncopies;
3732 
3733 	if (type & BTRFS_BLOCK_GROUP_DATA) {
3734 		max_stripe_size = 1024 * 1024 * 1024;
3735 		max_chunk_size = 10 * max_stripe_size;
3736 	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3737 		/* for larger filesystems, use larger metadata chunks */
3738 		if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3739 			max_stripe_size = 1024 * 1024 * 1024;
3740 		else
3741 			max_stripe_size = 256 * 1024 * 1024;
3742 		max_chunk_size = max_stripe_size;
3743 	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3744 		max_stripe_size = 32 * 1024 * 1024;
3745 		max_chunk_size = 2 * max_stripe_size;
3746 	} else {
3747 		printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3748 		       type);
3749 		BUG_ON(1);
3750 	}
3751 
3752 	/* we don't want a chunk larger than 10% of writeable space */
3753 	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3754 			     max_chunk_size);
3755 
3756 	devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3757 			       GFP_NOFS);
3758 	if (!devices_info)
3759 		return -ENOMEM;
3760 
3761 	cur = fs_devices->alloc_list.next;
3762 
3763 	/*
3764 	 * in the first pass through the devices list, we gather information
3765 	 * about the available holes on each device.
3766 	 */
3767 	ndevs = 0;
3768 	while (cur != &fs_devices->alloc_list) {
3769 		struct btrfs_device *device;
3770 		u64 max_avail;
3771 		u64 dev_offset;
3772 
3773 		device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3774 
3775 		cur = cur->next;
3776 
3777 		if (!device->writeable) {
3778 			WARN(1, KERN_ERR
3779 			       "btrfs: read-only device in alloc_list\n");
3780 			continue;
3781 		}
3782 
3783 		if (!device->in_fs_metadata ||
3784 		    device->is_tgtdev_for_dev_replace)
3785 			continue;
3786 
3787 		if (device->total_bytes > device->bytes_used)
3788 			total_avail = device->total_bytes - device->bytes_used;
3789 		else
3790 			total_avail = 0;
3791 
3792 		/* If there is no space on this device, skip it. */
3793 		if (total_avail == 0)
3794 			continue;
3795 
3796 		ret = find_free_dev_extent(device,
3797 					   max_stripe_size * dev_stripes,
3798 					   &dev_offset, &max_avail);
3799 		if (ret && ret != -ENOSPC)
3800 			goto error;
3801 
3802 		if (ret == 0)
3803 			max_avail = max_stripe_size * dev_stripes;
3804 
3805 		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3806 			continue;
3807 
3808 		if (ndevs == fs_devices->rw_devices) {
3809 			WARN(1, "%s: found more than %llu devices\n",
3810 			     __func__, fs_devices->rw_devices);
3811 			break;
3812 		}
3813 		devices_info[ndevs].dev_offset = dev_offset;
3814 		devices_info[ndevs].max_avail = max_avail;
3815 		devices_info[ndevs].total_avail = total_avail;
3816 		devices_info[ndevs].dev = device;
3817 		++ndevs;
3818 	}
3819 
3820 	/*
3821 	 * now sort the devices by hole size / available space
3822 	 */
3823 	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3824 	     btrfs_cmp_device_info, NULL);
3825 
3826 	/* round down to number of usable stripes */
3827 	ndevs -= ndevs % devs_increment;
3828 
3829 	if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3830 		ret = -ENOSPC;
3831 		goto error;
3832 	}
3833 
3834 	if (devs_max && ndevs > devs_max)
3835 		ndevs = devs_max;
3836 	/*
3837 	 * the primary goal is to maximize the number of stripes, so use as many
3838 	 * devices as possible, even if the stripes are not maximum sized.
3839 	 */
3840 	stripe_size = devices_info[ndevs-1].max_avail;
3841 	num_stripes = ndevs * dev_stripes;
3842 
3843 	/*
3844 	 * this will have to be fixed for RAID1 and RAID10 over
3845 	 * more drives
3846 	 */
3847 	data_stripes = num_stripes / ncopies;
3848 
3849 	if (type & BTRFS_BLOCK_GROUP_RAID5) {
3850 		raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
3851 				 btrfs_super_stripesize(info->super_copy));
3852 		data_stripes = num_stripes - 1;
3853 	}
3854 	if (type & BTRFS_BLOCK_GROUP_RAID6) {
3855 		raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
3856 				 btrfs_super_stripesize(info->super_copy));
3857 		data_stripes = num_stripes - 2;
3858 	}
3859 
3860 	/*
3861 	 * Use the number of data stripes to figure out how big this chunk
3862 	 * is really going to be in terms of logical address space,
3863 	 * and compare that answer with the max chunk size
3864 	 */
3865 	if (stripe_size * data_stripes > max_chunk_size) {
3866 		u64 mask = (1ULL << 24) - 1;
3867 		stripe_size = max_chunk_size;
3868 		do_div(stripe_size, data_stripes);
3869 
3870 		/* bump the answer up to a 16MB boundary */
3871 		stripe_size = (stripe_size + mask) & ~mask;
3872 
3873 		/* but don't go higher than the limits we found
3874 		 * while searching for free extents
3875 		 */
3876 		if (stripe_size > devices_info[ndevs-1].max_avail)
3877 			stripe_size = devices_info[ndevs-1].max_avail;
3878 	}
3879 
3880 	do_div(stripe_size, dev_stripes);
3881 
3882 	/* align to BTRFS_STRIPE_LEN */
3883 	do_div(stripe_size, raid_stripe_len);
3884 	stripe_size *= raid_stripe_len;
3885 
3886 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3887 	if (!map) {
3888 		ret = -ENOMEM;
3889 		goto error;
3890 	}
3891 	map->num_stripes = num_stripes;
3892 
3893 	for (i = 0; i < ndevs; ++i) {
3894 		for (j = 0; j < dev_stripes; ++j) {
3895 			int s = i * dev_stripes + j;
3896 			map->stripes[s].dev = devices_info[i].dev;
3897 			map->stripes[s].physical = devices_info[i].dev_offset +
3898 						   j * stripe_size;
3899 		}
3900 	}
3901 	map->sector_size = extent_root->sectorsize;
3902 	map->stripe_len = raid_stripe_len;
3903 	map->io_align = raid_stripe_len;
3904 	map->io_width = raid_stripe_len;
3905 	map->type = type;
3906 	map->sub_stripes = sub_stripes;
3907 
3908 	*map_ret = map;
3909 	num_bytes = stripe_size * data_stripes;
3910 
3911 	*stripe_size_out = stripe_size;
3912 	*num_bytes_out = num_bytes;
3913 
3914 	trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3915 
3916 	em = alloc_extent_map();
3917 	if (!em) {
3918 		ret = -ENOMEM;
3919 		goto error;
3920 	}
3921 	em->bdev = (struct block_device *)map;
3922 	em->start = start;
3923 	em->len = num_bytes;
3924 	em->block_start = 0;
3925 	em->block_len = em->len;
3926 
3927 	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3928 	write_lock(&em_tree->lock);
3929 	ret = add_extent_mapping(em_tree, em);
3930 	write_unlock(&em_tree->lock);
3931 	if (ret) {
3932 		free_extent_map(em);
3933 		goto error;
3934 	}
3935 
3936 	for (i = 0; i < map->num_stripes; ++i) {
3937 		struct btrfs_device *device;
3938 		u64 dev_offset;
3939 
3940 		device = map->stripes[i].dev;
3941 		dev_offset = map->stripes[i].physical;
3942 
3943 		ret = btrfs_alloc_dev_extent(trans, device,
3944 				info->chunk_root->root_key.objectid,
3945 				BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3946 				start, dev_offset, stripe_size);
3947 		if (ret)
3948 			goto error_dev_extent;
3949 	}
3950 
3951 	ret = btrfs_make_block_group(trans, extent_root, 0, type,
3952 				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3953 				     start, num_bytes);
3954 	if (ret) {
3955 		i = map->num_stripes - 1;
3956 		goto error_dev_extent;
3957 	}
3958 
3959 	free_extent_map(em);
3960 	check_raid56_incompat_flag(extent_root->fs_info, type);
3961 
3962 	kfree(devices_info);
3963 	return 0;
3964 
3965 error_dev_extent:
3966 	for (; i >= 0; i--) {
3967 		struct btrfs_device *device;
3968 		int err;
3969 
3970 		device = map->stripes[i].dev;
3971 		err = btrfs_free_dev_extent(trans, device, start);
3972 		if (err) {
3973 			btrfs_abort_transaction(trans, extent_root, err);
3974 			break;
3975 		}
3976 	}
3977 	write_lock(&em_tree->lock);
3978 	remove_extent_mapping(em_tree, em);
3979 	write_unlock(&em_tree->lock);
3980 
3981 	/* One for our allocation */
3982 	free_extent_map(em);
3983 	/* One for the tree reference */
3984 	free_extent_map(em);
3985 error:
3986 	kfree(map);
3987 	kfree(devices_info);
3988 	return ret;
3989 }
3990 
3991 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3992 				struct btrfs_root *extent_root,
3993 				struct map_lookup *map, u64 chunk_offset,
3994 				u64 chunk_size, u64 stripe_size)
3995 {
3996 	u64 dev_offset;
3997 	struct btrfs_key key;
3998 	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3999 	struct btrfs_device *device;
4000 	struct btrfs_chunk *chunk;
4001 	struct btrfs_stripe *stripe;
4002 	size_t item_size = btrfs_chunk_item_size(map->num_stripes);
4003 	int index = 0;
4004 	int ret;
4005 
4006 	chunk = kzalloc(item_size, GFP_NOFS);
4007 	if (!chunk)
4008 		return -ENOMEM;
4009 
4010 	index = 0;
4011 	while (index < map->num_stripes) {
4012 		device = map->stripes[index].dev;
4013 		device->bytes_used += stripe_size;
4014 		ret = btrfs_update_device(trans, device);
4015 		if (ret)
4016 			goto out_free;
4017 		index++;
4018 	}
4019 
4020 	spin_lock(&extent_root->fs_info->free_chunk_lock);
4021 	extent_root->fs_info->free_chunk_space -= (stripe_size *
4022 						   map->num_stripes);
4023 	spin_unlock(&extent_root->fs_info->free_chunk_lock);
4024 
4025 	index = 0;
4026 	stripe = &chunk->stripe;
4027 	while (index < map->num_stripes) {
4028 		device = map->stripes[index].dev;
4029 		dev_offset = map->stripes[index].physical;
4030 
4031 		btrfs_set_stack_stripe_devid(stripe, device->devid);
4032 		btrfs_set_stack_stripe_offset(stripe, dev_offset);
4033 		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4034 		stripe++;
4035 		index++;
4036 	}
4037 
4038 	btrfs_set_stack_chunk_length(chunk, chunk_size);
4039 	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4040 	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4041 	btrfs_set_stack_chunk_type(chunk, map->type);
4042 	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4043 	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4044 	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4045 	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
4046 	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4047 
4048 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4049 	key.type = BTRFS_CHUNK_ITEM_KEY;
4050 	key.offset = chunk_offset;
4051 
4052 	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4053 
4054 	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4055 		/*
4056 		 * TODO: Cleanup of inserted chunk root in case of
4057 		 * failure.
4058 		 */
4059 		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
4060 					     item_size);
4061 	}
4062 
4063 out_free:
4064 	kfree(chunk);
4065 	return ret;
4066 }
4067 
4068 /*
4069  * Chunk allocation falls into two parts. The first part does works
4070  * that make the new allocated chunk useable, but not do any operation
4071  * that modifies the chunk tree. The second part does the works that
4072  * require modifying the chunk tree. This division is important for the
4073  * bootstrap process of adding storage to a seed btrfs.
4074  */
4075 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4076 		      struct btrfs_root *extent_root, u64 type)
4077 {
4078 	u64 chunk_offset;
4079 	u64 chunk_size;
4080 	u64 stripe_size;
4081 	struct map_lookup *map;
4082 	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
4083 	int ret;
4084 
4085 	ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4086 			      &chunk_offset);
4087 	if (ret)
4088 		return ret;
4089 
4090 	ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
4091 				  &stripe_size, chunk_offset, type);
4092 	if (ret)
4093 		return ret;
4094 
4095 	ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
4096 				   chunk_size, stripe_size);
4097 	if (ret)
4098 		return ret;
4099 	return 0;
4100 }
4101 
4102 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
4103 					 struct btrfs_root *root,
4104 					 struct btrfs_device *device)
4105 {
4106 	u64 chunk_offset;
4107 	u64 sys_chunk_offset;
4108 	u64 chunk_size;
4109 	u64 sys_chunk_size;
4110 	u64 stripe_size;
4111 	u64 sys_stripe_size;
4112 	u64 alloc_profile;
4113 	struct map_lookup *map;
4114 	struct map_lookup *sys_map;
4115 	struct btrfs_fs_info *fs_info = root->fs_info;
4116 	struct btrfs_root *extent_root = fs_info->extent_root;
4117 	int ret;
4118 
4119 	ret = find_next_chunk(fs_info->chunk_root,
4120 			      BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
4121 	if (ret)
4122 		return ret;
4123 
4124 	alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4125 	ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
4126 				  &stripe_size, chunk_offset, alloc_profile);
4127 	if (ret)
4128 		return ret;
4129 
4130 	sys_chunk_offset = chunk_offset + chunk_size;
4131 
4132 	alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4133 	ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
4134 				  &sys_chunk_size, &sys_stripe_size,
4135 				  sys_chunk_offset, alloc_profile);
4136 	if (ret) {
4137 		btrfs_abort_transaction(trans, root, ret);
4138 		goto out;
4139 	}
4140 
4141 	ret = btrfs_add_device(trans, fs_info->chunk_root, device);
4142 	if (ret) {
4143 		btrfs_abort_transaction(trans, root, ret);
4144 		goto out;
4145 	}
4146 
4147 	/*
4148 	 * Modifying chunk tree needs allocating new blocks from both
4149 	 * system block group and metadata block group. So we only can
4150 	 * do operations require modifying the chunk tree after both
4151 	 * block groups were created.
4152 	 */
4153 	ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
4154 				   chunk_size, stripe_size);
4155 	if (ret) {
4156 		btrfs_abort_transaction(trans, root, ret);
4157 		goto out;
4158 	}
4159 
4160 	ret = __finish_chunk_alloc(trans, extent_root, sys_map,
4161 				   sys_chunk_offset, sys_chunk_size,
4162 				   sys_stripe_size);
4163 	if (ret)
4164 		btrfs_abort_transaction(trans, root, ret);
4165 
4166 out:
4167 
4168 	return ret;
4169 }
4170 
4171 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
4172 {
4173 	struct extent_map *em;
4174 	struct map_lookup *map;
4175 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4176 	int readonly = 0;
4177 	int i;
4178 
4179 	read_lock(&map_tree->map_tree.lock);
4180 	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
4181 	read_unlock(&map_tree->map_tree.lock);
4182 	if (!em)
4183 		return 1;
4184 
4185 	if (btrfs_test_opt(root, DEGRADED)) {
4186 		free_extent_map(em);
4187 		return 0;
4188 	}
4189 
4190 	map = (struct map_lookup *)em->bdev;
4191 	for (i = 0; i < map->num_stripes; i++) {
4192 		if (!map->stripes[i].dev->writeable) {
4193 			readonly = 1;
4194 			break;
4195 		}
4196 	}
4197 	free_extent_map(em);
4198 	return readonly;
4199 }
4200 
4201 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
4202 {
4203 	extent_map_tree_init(&tree->map_tree);
4204 }
4205 
4206 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
4207 {
4208 	struct extent_map *em;
4209 
4210 	while (1) {
4211 		write_lock(&tree->map_tree.lock);
4212 		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
4213 		if (em)
4214 			remove_extent_mapping(&tree->map_tree, em);
4215 		write_unlock(&tree->map_tree.lock);
4216 		if (!em)
4217 			break;
4218 		kfree(em->bdev);
4219 		/* once for us */
4220 		free_extent_map(em);
4221 		/* once for the tree */
4222 		free_extent_map(em);
4223 	}
4224 }
4225 
4226 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
4227 {
4228 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4229 	struct extent_map *em;
4230 	struct map_lookup *map;
4231 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4232 	int ret;
4233 
4234 	read_lock(&em_tree->lock);
4235 	em = lookup_extent_mapping(em_tree, logical, len);
4236 	read_unlock(&em_tree->lock);
4237 	BUG_ON(!em);
4238 
4239 	BUG_ON(em->start > logical || em->start + em->len < logical);
4240 	map = (struct map_lookup *)em->bdev;
4241 	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
4242 		ret = map->num_stripes;
4243 	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4244 		ret = map->sub_stripes;
4245 	else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
4246 		ret = 2;
4247 	else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4248 		ret = 3;
4249 	else
4250 		ret = 1;
4251 	free_extent_map(em);
4252 
4253 	btrfs_dev_replace_lock(&fs_info->dev_replace);
4254 	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
4255 		ret++;
4256 	btrfs_dev_replace_unlock(&fs_info->dev_replace);
4257 
4258 	return ret;
4259 }
4260 
4261 unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
4262 				    struct btrfs_mapping_tree *map_tree,
4263 				    u64 logical)
4264 {
4265 	struct extent_map *em;
4266 	struct map_lookup *map;
4267 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4268 	unsigned long len = root->sectorsize;
4269 
4270 	read_lock(&em_tree->lock);
4271 	em = lookup_extent_mapping(em_tree, logical, len);
4272 	read_unlock(&em_tree->lock);
4273 	BUG_ON(!em);
4274 
4275 	BUG_ON(em->start > logical || em->start + em->len < logical);
4276 	map = (struct map_lookup *)em->bdev;
4277 	if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4278 			 BTRFS_BLOCK_GROUP_RAID6)) {
4279 		len = map->stripe_len * nr_data_stripes(map);
4280 	}
4281 	free_extent_map(em);
4282 	return len;
4283 }
4284 
4285 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
4286 			   u64 logical, u64 len, int mirror_num)
4287 {
4288 	struct extent_map *em;
4289 	struct map_lookup *map;
4290 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4291 	int ret = 0;
4292 
4293 	read_lock(&em_tree->lock);
4294 	em = lookup_extent_mapping(em_tree, logical, len);
4295 	read_unlock(&em_tree->lock);
4296 	BUG_ON(!em);
4297 
4298 	BUG_ON(em->start > logical || em->start + em->len < logical);
4299 	map = (struct map_lookup *)em->bdev;
4300 	if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4301 			 BTRFS_BLOCK_GROUP_RAID6))
4302 		ret = 1;
4303 	free_extent_map(em);
4304 	return ret;
4305 }
4306 
4307 static int find_live_mirror(struct btrfs_fs_info *fs_info,
4308 			    struct map_lookup *map, int first, int num,
4309 			    int optimal, int dev_replace_is_ongoing)
4310 {
4311 	int i;
4312 	int tolerance;
4313 	struct btrfs_device *srcdev;
4314 
4315 	if (dev_replace_is_ongoing &&
4316 	    fs_info->dev_replace.cont_reading_from_srcdev_mode ==
4317 	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
4318 		srcdev = fs_info->dev_replace.srcdev;
4319 	else
4320 		srcdev = NULL;
4321 
4322 	/*
4323 	 * try to avoid the drive that is the source drive for a
4324 	 * dev-replace procedure, only choose it if no other non-missing
4325 	 * mirror is available
4326 	 */
4327 	for (tolerance = 0; tolerance < 2; tolerance++) {
4328 		if (map->stripes[optimal].dev->bdev &&
4329 		    (tolerance || map->stripes[optimal].dev != srcdev))
4330 			return optimal;
4331 		for (i = first; i < first + num; i++) {
4332 			if (map->stripes[i].dev->bdev &&
4333 			    (tolerance || map->stripes[i].dev != srcdev))
4334 				return i;
4335 		}
4336 	}
4337 
4338 	/* we couldn't find one that doesn't fail.  Just return something
4339 	 * and the io error handling code will clean up eventually
4340 	 */
4341 	return optimal;
4342 }
4343 
4344 static inline int parity_smaller(u64 a, u64 b)
4345 {
4346 	return a > b;
4347 }
4348 
4349 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
4350 static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
4351 {
4352 	struct btrfs_bio_stripe s;
4353 	int i;
4354 	u64 l;
4355 	int again = 1;
4356 
4357 	while (again) {
4358 		again = 0;
4359 		for (i = 0; i < bbio->num_stripes - 1; i++) {
4360 			if (parity_smaller(raid_map[i], raid_map[i+1])) {
4361 				s = bbio->stripes[i];
4362 				l = raid_map[i];
4363 				bbio->stripes[i] = bbio->stripes[i+1];
4364 				raid_map[i] = raid_map[i+1];
4365 				bbio->stripes[i+1] = s;
4366 				raid_map[i+1] = l;
4367 				again = 1;
4368 			}
4369 		}
4370 	}
4371 }
4372 
4373 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4374 			     u64 logical, u64 *length,
4375 			     struct btrfs_bio **bbio_ret,
4376 			     int mirror_num, u64 **raid_map_ret)
4377 {
4378 	struct extent_map *em;
4379 	struct map_lookup *map;
4380 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4381 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4382 	u64 offset;
4383 	u64 stripe_offset;
4384 	u64 stripe_end_offset;
4385 	u64 stripe_nr;
4386 	u64 stripe_nr_orig;
4387 	u64 stripe_nr_end;
4388 	u64 stripe_len;
4389 	u64 *raid_map = NULL;
4390 	int stripe_index;
4391 	int i;
4392 	int ret = 0;
4393 	int num_stripes;
4394 	int max_errors = 0;
4395 	struct btrfs_bio *bbio = NULL;
4396 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
4397 	int dev_replace_is_ongoing = 0;
4398 	int num_alloc_stripes;
4399 	int patch_the_first_stripe_for_dev_replace = 0;
4400 	u64 physical_to_patch_in_first_stripe = 0;
4401 	u64 raid56_full_stripe_start = (u64)-1;
4402 
4403 	read_lock(&em_tree->lock);
4404 	em = lookup_extent_mapping(em_tree, logical, *length);
4405 	read_unlock(&em_tree->lock);
4406 
4407 	if (!em) {
4408 		printk(KERN_CRIT "btrfs: unable to find logical %llu len %llu\n",
4409 		       (unsigned long long)logical,
4410 		       (unsigned long long)*length);
4411 		BUG();
4412 	}
4413 
4414 	BUG_ON(em->start > logical || em->start + em->len < logical);
4415 	map = (struct map_lookup *)em->bdev;
4416 	offset = logical - em->start;
4417 
4418 	if (mirror_num > map->num_stripes)
4419 		mirror_num = 0;
4420 
4421 	stripe_len = map->stripe_len;
4422 	stripe_nr = offset;
4423 	/*
4424 	 * stripe_nr counts the total number of stripes we have to stride
4425 	 * to get to this block
4426 	 */
4427 	do_div(stripe_nr, stripe_len);
4428 
4429 	stripe_offset = stripe_nr * stripe_len;
4430 	BUG_ON(offset < stripe_offset);
4431 
4432 	/* stripe_offset is the offset of this block in its stripe*/
4433 	stripe_offset = offset - stripe_offset;
4434 
4435 	/* if we're here for raid56, we need to know the stripe aligned start */
4436 	if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4437 		unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
4438 		raid56_full_stripe_start = offset;
4439 
4440 		/* allow a write of a full stripe, but make sure we don't
4441 		 * allow straddling of stripes
4442 		 */
4443 		do_div(raid56_full_stripe_start, full_stripe_len);
4444 		raid56_full_stripe_start *= full_stripe_len;
4445 	}
4446 
4447 	if (rw & REQ_DISCARD) {
4448 		/* we don't discard raid56 yet */
4449 		if (map->type &
4450 		    (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4451 			ret = -EOPNOTSUPP;
4452 			goto out;
4453 		}
4454 		*length = min_t(u64, em->len - offset, *length);
4455 	} else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
4456 		u64 max_len;
4457 		/* For writes to RAID[56], allow a full stripeset across all disks.
4458 		   For other RAID types and for RAID[56] reads, just allow a single
4459 		   stripe (on a single disk). */
4460 		if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
4461 		    (rw & REQ_WRITE)) {
4462 			max_len = stripe_len * nr_data_stripes(map) -
4463 				(offset - raid56_full_stripe_start);
4464 		} else {
4465 			/* we limit the length of each bio to what fits in a stripe */
4466 			max_len = stripe_len - stripe_offset;
4467 		}
4468 		*length = min_t(u64, em->len - offset, max_len);
4469 	} else {
4470 		*length = em->len - offset;
4471 	}
4472 
4473 	/* This is for when we're called from btrfs_merge_bio_hook() and all
4474 	   it cares about is the length */
4475 	if (!bbio_ret)
4476 		goto out;
4477 
4478 	btrfs_dev_replace_lock(dev_replace);
4479 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
4480 	if (!dev_replace_is_ongoing)
4481 		btrfs_dev_replace_unlock(dev_replace);
4482 
4483 	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
4484 	    !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
4485 	    dev_replace->tgtdev != NULL) {
4486 		/*
4487 		 * in dev-replace case, for repair case (that's the only
4488 		 * case where the mirror is selected explicitly when
4489 		 * calling btrfs_map_block), blocks left of the left cursor
4490 		 * can also be read from the target drive.
4491 		 * For REQ_GET_READ_MIRRORS, the target drive is added as
4492 		 * the last one to the array of stripes. For READ, it also
4493 		 * needs to be supported using the same mirror number.
4494 		 * If the requested block is not left of the left cursor,
4495 		 * EIO is returned. This can happen because btrfs_num_copies()
4496 		 * returns one more in the dev-replace case.
4497 		 */
4498 		u64 tmp_length = *length;
4499 		struct btrfs_bio *tmp_bbio = NULL;
4500 		int tmp_num_stripes;
4501 		u64 srcdev_devid = dev_replace->srcdev->devid;
4502 		int index_srcdev = 0;
4503 		int found = 0;
4504 		u64 physical_of_found = 0;
4505 
4506 		ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
4507 			     logical, &tmp_length, &tmp_bbio, 0, NULL);
4508 		if (ret) {
4509 			WARN_ON(tmp_bbio != NULL);
4510 			goto out;
4511 		}
4512 
4513 		tmp_num_stripes = tmp_bbio->num_stripes;
4514 		if (mirror_num > tmp_num_stripes) {
4515 			/*
4516 			 * REQ_GET_READ_MIRRORS does not contain this
4517 			 * mirror, that means that the requested area
4518 			 * is not left of the left cursor
4519 			 */
4520 			ret = -EIO;
4521 			kfree(tmp_bbio);
4522 			goto out;
4523 		}
4524 
4525 		/*
4526 		 * process the rest of the function using the mirror_num
4527 		 * of the source drive. Therefore look it up first.
4528 		 * At the end, patch the device pointer to the one of the
4529 		 * target drive.
4530 		 */
4531 		for (i = 0; i < tmp_num_stripes; i++) {
4532 			if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) {
4533 				/*
4534 				 * In case of DUP, in order to keep it
4535 				 * simple, only add the mirror with the
4536 				 * lowest physical address
4537 				 */
4538 				if (found &&
4539 				    physical_of_found <=
4540 				     tmp_bbio->stripes[i].physical)
4541 					continue;
4542 				index_srcdev = i;
4543 				found = 1;
4544 				physical_of_found =
4545 					tmp_bbio->stripes[i].physical;
4546 			}
4547 		}
4548 
4549 		if (found) {
4550 			mirror_num = index_srcdev + 1;
4551 			patch_the_first_stripe_for_dev_replace = 1;
4552 			physical_to_patch_in_first_stripe = physical_of_found;
4553 		} else {
4554 			WARN_ON(1);
4555 			ret = -EIO;
4556 			kfree(tmp_bbio);
4557 			goto out;
4558 		}
4559 
4560 		kfree(tmp_bbio);
4561 	} else if (mirror_num > map->num_stripes) {
4562 		mirror_num = 0;
4563 	}
4564 
4565 	num_stripes = 1;
4566 	stripe_index = 0;
4567 	stripe_nr_orig = stripe_nr;
4568 	stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
4569 	do_div(stripe_nr_end, map->stripe_len);
4570 	stripe_end_offset = stripe_nr_end * map->stripe_len -
4571 			    (offset + *length);
4572 
4573 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4574 		if (rw & REQ_DISCARD)
4575 			num_stripes = min_t(u64, map->num_stripes,
4576 					    stripe_nr_end - stripe_nr_orig);
4577 		stripe_index = do_div(stripe_nr, map->num_stripes);
4578 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
4579 		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
4580 			num_stripes = map->num_stripes;
4581 		else if (mirror_num)
4582 			stripe_index = mirror_num - 1;
4583 		else {
4584 			stripe_index = find_live_mirror(fs_info, map, 0,
4585 					    map->num_stripes,
4586 					    current->pid % map->num_stripes,
4587 					    dev_replace_is_ongoing);
4588 			mirror_num = stripe_index + 1;
4589 		}
4590 
4591 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
4592 		if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
4593 			num_stripes = map->num_stripes;
4594 		} else if (mirror_num) {
4595 			stripe_index = mirror_num - 1;
4596 		} else {
4597 			mirror_num = 1;
4598 		}
4599 
4600 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4601 		int factor = map->num_stripes / map->sub_stripes;
4602 
4603 		stripe_index = do_div(stripe_nr, factor);
4604 		stripe_index *= map->sub_stripes;
4605 
4606 		if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
4607 			num_stripes = map->sub_stripes;
4608 		else if (rw & REQ_DISCARD)
4609 			num_stripes = min_t(u64, map->sub_stripes *
4610 					    (stripe_nr_end - stripe_nr_orig),
4611 					    map->num_stripes);
4612 		else if (mirror_num)
4613 			stripe_index += mirror_num - 1;
4614 		else {
4615 			int old_stripe_index = stripe_index;
4616 			stripe_index = find_live_mirror(fs_info, map,
4617 					      stripe_index,
4618 					      map->sub_stripes, stripe_index +
4619 					      current->pid % map->sub_stripes,
4620 					      dev_replace_is_ongoing);
4621 			mirror_num = stripe_index - old_stripe_index + 1;
4622 		}
4623 
4624 	} else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4625 				BTRFS_BLOCK_GROUP_RAID6)) {
4626 		u64 tmp;
4627 
4628 		if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1)
4629 		    && raid_map_ret) {
4630 			int i, rot;
4631 
4632 			/* push stripe_nr back to the start of the full stripe */
4633 			stripe_nr = raid56_full_stripe_start;
4634 			do_div(stripe_nr, stripe_len);
4635 
4636 			stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4637 
4638 			/* RAID[56] write or recovery. Return all stripes */
4639 			num_stripes = map->num_stripes;
4640 			max_errors = nr_parity_stripes(map);
4641 
4642 			raid_map = kmalloc(sizeof(u64) * num_stripes,
4643 					   GFP_NOFS);
4644 			if (!raid_map) {
4645 				ret = -ENOMEM;
4646 				goto out;
4647 			}
4648 
4649 			/* Work out the disk rotation on this stripe-set */
4650 			tmp = stripe_nr;
4651 			rot = do_div(tmp, num_stripes);
4652 
4653 			/* Fill in the logical address of each stripe */
4654 			tmp = stripe_nr * nr_data_stripes(map);
4655 			for (i = 0; i < nr_data_stripes(map); i++)
4656 				raid_map[(i+rot) % num_stripes] =
4657 					em->start + (tmp + i) * map->stripe_len;
4658 
4659 			raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
4660 			if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4661 				raid_map[(i+rot+1) % num_stripes] =
4662 					RAID6_Q_STRIPE;
4663 
4664 			*length = map->stripe_len;
4665 			stripe_index = 0;
4666 			stripe_offset = 0;
4667 		} else {
4668 			/*
4669 			 * Mirror #0 or #1 means the original data block.
4670 			 * Mirror #2 is RAID5 parity block.
4671 			 * Mirror #3 is RAID6 Q block.
4672 			 */
4673 			stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4674 			if (mirror_num > 1)
4675 				stripe_index = nr_data_stripes(map) +
4676 						mirror_num - 2;
4677 
4678 			/* We distribute the parity blocks across stripes */
4679 			tmp = stripe_nr + stripe_index;
4680 			stripe_index = do_div(tmp, map->num_stripes);
4681 		}
4682 	} else {
4683 		/*
4684 		 * after this do_div call, stripe_nr is the number of stripes
4685 		 * on this device we have to walk to find the data, and
4686 		 * stripe_index is the number of our device in the stripe array
4687 		 */
4688 		stripe_index = do_div(stripe_nr, map->num_stripes);
4689 		mirror_num = stripe_index + 1;
4690 	}
4691 	BUG_ON(stripe_index >= map->num_stripes);
4692 
4693 	num_alloc_stripes = num_stripes;
4694 	if (dev_replace_is_ongoing) {
4695 		if (rw & (REQ_WRITE | REQ_DISCARD))
4696 			num_alloc_stripes <<= 1;
4697 		if (rw & REQ_GET_READ_MIRRORS)
4698 			num_alloc_stripes++;
4699 	}
4700 	bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS);
4701 	if (!bbio) {
4702 		ret = -ENOMEM;
4703 		goto out;
4704 	}
4705 	atomic_set(&bbio->error, 0);
4706 
4707 	if (rw & REQ_DISCARD) {
4708 		int factor = 0;
4709 		int sub_stripes = 0;
4710 		u64 stripes_per_dev = 0;
4711 		u32 remaining_stripes = 0;
4712 		u32 last_stripe = 0;
4713 
4714 		if (map->type &
4715 		    (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
4716 			if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4717 				sub_stripes = 1;
4718 			else
4719 				sub_stripes = map->sub_stripes;
4720 
4721 			factor = map->num_stripes / sub_stripes;
4722 			stripes_per_dev = div_u64_rem(stripe_nr_end -
4723 						      stripe_nr_orig,
4724 						      factor,
4725 						      &remaining_stripes);
4726 			div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
4727 			last_stripe *= sub_stripes;
4728 		}
4729 
4730 		for (i = 0; i < num_stripes; i++) {
4731 			bbio->stripes[i].physical =
4732 				map->stripes[stripe_index].physical +
4733 				stripe_offset + stripe_nr * map->stripe_len;
4734 			bbio->stripes[i].dev = map->stripes[stripe_index].dev;
4735 
4736 			if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
4737 					 BTRFS_BLOCK_GROUP_RAID10)) {
4738 				bbio->stripes[i].length = stripes_per_dev *
4739 							  map->stripe_len;
4740 
4741 				if (i / sub_stripes < remaining_stripes)
4742 					bbio->stripes[i].length +=
4743 						map->stripe_len;
4744 
4745 				/*
4746 				 * Special for the first stripe and
4747 				 * the last stripe:
4748 				 *
4749 				 * |-------|...|-------|
4750 				 *     |----------|
4751 				 *    off     end_off
4752 				 */
4753 				if (i < sub_stripes)
4754 					bbio->stripes[i].length -=
4755 						stripe_offset;
4756 
4757 				if (stripe_index >= last_stripe &&
4758 				    stripe_index <= (last_stripe +
4759 						     sub_stripes - 1))
4760 					bbio->stripes[i].length -=
4761 						stripe_end_offset;
4762 
4763 				if (i == sub_stripes - 1)
4764 					stripe_offset = 0;
4765 			} else
4766 				bbio->stripes[i].length = *length;
4767 
4768 			stripe_index++;
4769 			if (stripe_index == map->num_stripes) {
4770 				/* This could only happen for RAID0/10 */
4771 				stripe_index = 0;
4772 				stripe_nr++;
4773 			}
4774 		}
4775 	} else {
4776 		for (i = 0; i < num_stripes; i++) {
4777 			bbio->stripes[i].physical =
4778 				map->stripes[stripe_index].physical +
4779 				stripe_offset +
4780 				stripe_nr * map->stripe_len;
4781 			bbio->stripes[i].dev =
4782 				map->stripes[stripe_index].dev;
4783 			stripe_index++;
4784 		}
4785 	}
4786 
4787 	if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
4788 		if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
4789 				 BTRFS_BLOCK_GROUP_RAID10 |
4790 				 BTRFS_BLOCK_GROUP_RAID5 |
4791 				 BTRFS_BLOCK_GROUP_DUP)) {
4792 			max_errors = 1;
4793 		} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
4794 			max_errors = 2;
4795 		}
4796 	}
4797 
4798 	if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
4799 	    dev_replace->tgtdev != NULL) {
4800 		int index_where_to_add;
4801 		u64 srcdev_devid = dev_replace->srcdev->devid;
4802 
4803 		/*
4804 		 * duplicate the write operations while the dev replace
4805 		 * procedure is running. Since the copying of the old disk
4806 		 * to the new disk takes place at run time while the
4807 		 * filesystem is mounted writable, the regular write
4808 		 * operations to the old disk have to be duplicated to go
4809 		 * to the new disk as well.
4810 		 * Note that device->missing is handled by the caller, and
4811 		 * that the write to the old disk is already set up in the
4812 		 * stripes array.
4813 		 */
4814 		index_where_to_add = num_stripes;
4815 		for (i = 0; i < num_stripes; i++) {
4816 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
4817 				/* write to new disk, too */
4818 				struct btrfs_bio_stripe *new =
4819 					bbio->stripes + index_where_to_add;
4820 				struct btrfs_bio_stripe *old =
4821 					bbio->stripes + i;
4822 
4823 				new->physical = old->physical;
4824 				new->length = old->length;
4825 				new->dev = dev_replace->tgtdev;
4826 				index_where_to_add++;
4827 				max_errors++;
4828 			}
4829 		}
4830 		num_stripes = index_where_to_add;
4831 	} else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
4832 		   dev_replace->tgtdev != NULL) {
4833 		u64 srcdev_devid = dev_replace->srcdev->devid;
4834 		int index_srcdev = 0;
4835 		int found = 0;
4836 		u64 physical_of_found = 0;
4837 
4838 		/*
4839 		 * During the dev-replace procedure, the target drive can
4840 		 * also be used to read data in case it is needed to repair
4841 		 * a corrupt block elsewhere. This is possible if the
4842 		 * requested area is left of the left cursor. In this area,
4843 		 * the target drive is a full copy of the source drive.
4844 		 */
4845 		for (i = 0; i < num_stripes; i++) {
4846 			if (bbio->stripes[i].dev->devid == srcdev_devid) {
4847 				/*
4848 				 * In case of DUP, in order to keep it
4849 				 * simple, only add the mirror with the
4850 				 * lowest physical address
4851 				 */
4852 				if (found &&
4853 				    physical_of_found <=
4854 				     bbio->stripes[i].physical)
4855 					continue;
4856 				index_srcdev = i;
4857 				found = 1;
4858 				physical_of_found = bbio->stripes[i].physical;
4859 			}
4860 		}
4861 		if (found) {
4862 			u64 length = map->stripe_len;
4863 
4864 			if (physical_of_found + length <=
4865 			    dev_replace->cursor_left) {
4866 				struct btrfs_bio_stripe *tgtdev_stripe =
4867 					bbio->stripes + num_stripes;
4868 
4869 				tgtdev_stripe->physical = physical_of_found;
4870 				tgtdev_stripe->length =
4871 					bbio->stripes[index_srcdev].length;
4872 				tgtdev_stripe->dev = dev_replace->tgtdev;
4873 
4874 				num_stripes++;
4875 			}
4876 		}
4877 	}
4878 
4879 	*bbio_ret = bbio;
4880 	bbio->num_stripes = num_stripes;
4881 	bbio->max_errors = max_errors;
4882 	bbio->mirror_num = mirror_num;
4883 
4884 	/*
4885 	 * this is the case that REQ_READ && dev_replace_is_ongoing &&
4886 	 * mirror_num == num_stripes + 1 && dev_replace target drive is
4887 	 * available as a mirror
4888 	 */
4889 	if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
4890 		WARN_ON(num_stripes > 1);
4891 		bbio->stripes[0].dev = dev_replace->tgtdev;
4892 		bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
4893 		bbio->mirror_num = map->num_stripes + 1;
4894 	}
4895 	if (raid_map) {
4896 		sort_parity_stripes(bbio, raid_map);
4897 		*raid_map_ret = raid_map;
4898 	}
4899 out:
4900 	if (dev_replace_is_ongoing)
4901 		btrfs_dev_replace_unlock(dev_replace);
4902 	free_extent_map(em);
4903 	return ret;
4904 }
4905 
4906 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4907 		      u64 logical, u64 *length,
4908 		      struct btrfs_bio **bbio_ret, int mirror_num)
4909 {
4910 	return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
4911 				 mirror_num, NULL);
4912 }
4913 
4914 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
4915 		     u64 chunk_start, u64 physical, u64 devid,
4916 		     u64 **logical, int *naddrs, int *stripe_len)
4917 {
4918 	struct extent_map_tree *em_tree = &map_tree->map_tree;
4919 	struct extent_map *em;
4920 	struct map_lookup *map;
4921 	u64 *buf;
4922 	u64 bytenr;
4923 	u64 length;
4924 	u64 stripe_nr;
4925 	u64 rmap_len;
4926 	int i, j, nr = 0;
4927 
4928 	read_lock(&em_tree->lock);
4929 	em = lookup_extent_mapping(em_tree, chunk_start, 1);
4930 	read_unlock(&em_tree->lock);
4931 
4932 	BUG_ON(!em || em->start != chunk_start);
4933 	map = (struct map_lookup *)em->bdev;
4934 
4935 	length = em->len;
4936 	rmap_len = map->stripe_len;
4937 
4938 	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4939 		do_div(length, map->num_stripes / map->sub_stripes);
4940 	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4941 		do_div(length, map->num_stripes);
4942 	else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4943 			      BTRFS_BLOCK_GROUP_RAID6)) {
4944 		do_div(length, nr_data_stripes(map));
4945 		rmap_len = map->stripe_len * nr_data_stripes(map);
4946 	}
4947 
4948 	buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
4949 	BUG_ON(!buf); /* -ENOMEM */
4950 
4951 	for (i = 0; i < map->num_stripes; i++) {
4952 		if (devid && map->stripes[i].dev->devid != devid)
4953 			continue;
4954 		if (map->stripes[i].physical > physical ||
4955 		    map->stripes[i].physical + length <= physical)
4956 			continue;
4957 
4958 		stripe_nr = physical - map->stripes[i].physical;
4959 		do_div(stripe_nr, map->stripe_len);
4960 
4961 		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4962 			stripe_nr = stripe_nr * map->num_stripes + i;
4963 			do_div(stripe_nr, map->sub_stripes);
4964 		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4965 			stripe_nr = stripe_nr * map->num_stripes + i;
4966 		} /* else if RAID[56], multiply by nr_data_stripes().
4967 		   * Alternatively, just use rmap_len below instead of
4968 		   * map->stripe_len */
4969 
4970 		bytenr = chunk_start + stripe_nr * rmap_len;
4971 		WARN_ON(nr >= map->num_stripes);
4972 		for (j = 0; j < nr; j++) {
4973 			if (buf[j] == bytenr)
4974 				break;
4975 		}
4976 		if (j == nr) {
4977 			WARN_ON(nr >= map->num_stripes);
4978 			buf[nr++] = bytenr;
4979 		}
4980 	}
4981 
4982 	*logical = buf;
4983 	*naddrs = nr;
4984 	*stripe_len = rmap_len;
4985 
4986 	free_extent_map(em);
4987 	return 0;
4988 }
4989 
4990 static void *merge_stripe_index_into_bio_private(void *bi_private,
4991 						 unsigned int stripe_index)
4992 {
4993 	/*
4994 	 * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
4995 	 * at most 1.
4996 	 * The alternative solution (instead of stealing bits from the
4997 	 * pointer) would be to allocate an intermediate structure
4998 	 * that contains the old private pointer plus the stripe_index.
4999 	 */
5000 	BUG_ON((((uintptr_t)bi_private) & 3) != 0);
5001 	BUG_ON(stripe_index > 3);
5002 	return (void *)(((uintptr_t)bi_private) | stripe_index);
5003 }
5004 
5005 static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
5006 {
5007 	return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
5008 }
5009 
5010 static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
5011 {
5012 	return (unsigned int)((uintptr_t)bi_private) & 3;
5013 }
5014 
5015 static void btrfs_end_bio(struct bio *bio, int err)
5016 {
5017 	struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
5018 	int is_orig_bio = 0;
5019 
5020 	if (err) {
5021 		atomic_inc(&bbio->error);
5022 		if (err == -EIO || err == -EREMOTEIO) {
5023 			unsigned int stripe_index =
5024 				extract_stripe_index_from_bio_private(
5025 					bio->bi_private);
5026 			struct btrfs_device *dev;
5027 
5028 			BUG_ON(stripe_index >= bbio->num_stripes);
5029 			dev = bbio->stripes[stripe_index].dev;
5030 			if (dev->bdev) {
5031 				if (bio->bi_rw & WRITE)
5032 					btrfs_dev_stat_inc(dev,
5033 						BTRFS_DEV_STAT_WRITE_ERRS);
5034 				else
5035 					btrfs_dev_stat_inc(dev,
5036 						BTRFS_DEV_STAT_READ_ERRS);
5037 				if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
5038 					btrfs_dev_stat_inc(dev,
5039 						BTRFS_DEV_STAT_FLUSH_ERRS);
5040 				btrfs_dev_stat_print_on_error(dev);
5041 			}
5042 		}
5043 	}
5044 
5045 	if (bio == bbio->orig_bio)
5046 		is_orig_bio = 1;
5047 
5048 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
5049 		if (!is_orig_bio) {
5050 			bio_put(bio);
5051 			bio = bbio->orig_bio;
5052 		}
5053 		bio->bi_private = bbio->private;
5054 		bio->bi_end_io = bbio->end_io;
5055 		bio->bi_bdev = (struct block_device *)
5056 					(unsigned long)bbio->mirror_num;
5057 		/* only send an error to the higher layers if it is
5058 		 * beyond the tolerance of the btrfs bio
5059 		 */
5060 		if (atomic_read(&bbio->error) > bbio->max_errors) {
5061 			err = -EIO;
5062 		} else {
5063 			/*
5064 			 * this bio is actually up to date, we didn't
5065 			 * go over the max number of errors
5066 			 */
5067 			set_bit(BIO_UPTODATE, &bio->bi_flags);
5068 			err = 0;
5069 		}
5070 		kfree(bbio);
5071 
5072 		bio_endio(bio, err);
5073 	} else if (!is_orig_bio) {
5074 		bio_put(bio);
5075 	}
5076 }
5077 
5078 struct async_sched {
5079 	struct bio *bio;
5080 	int rw;
5081 	struct btrfs_fs_info *info;
5082 	struct btrfs_work work;
5083 };
5084 
5085 /*
5086  * see run_scheduled_bios for a description of why bios are collected for
5087  * async submit.
5088  *
5089  * This will add one bio to the pending list for a device and make sure
5090  * the work struct is scheduled.
5091  */
5092 noinline void btrfs_schedule_bio(struct btrfs_root *root,
5093 				 struct btrfs_device *device,
5094 				 int rw, struct bio *bio)
5095 {
5096 	int should_queue = 1;
5097 	struct btrfs_pending_bios *pending_bios;
5098 
5099 	if (device->missing || !device->bdev) {
5100 		bio_endio(bio, -EIO);
5101 		return;
5102 	}
5103 
5104 	/* don't bother with additional async steps for reads, right now */
5105 	if (!(rw & REQ_WRITE)) {
5106 		bio_get(bio);
5107 		btrfsic_submit_bio(rw, bio);
5108 		bio_put(bio);
5109 		return;
5110 	}
5111 
5112 	/*
5113 	 * nr_async_bios allows us to reliably return congestion to the
5114 	 * higher layers.  Otherwise, the async bio makes it appear we have
5115 	 * made progress against dirty pages when we've really just put it
5116 	 * on a queue for later
5117 	 */
5118 	atomic_inc(&root->fs_info->nr_async_bios);
5119 	WARN_ON(bio->bi_next);
5120 	bio->bi_next = NULL;
5121 	bio->bi_rw |= rw;
5122 
5123 	spin_lock(&device->io_lock);
5124 	if (bio->bi_rw & REQ_SYNC)
5125 		pending_bios = &device->pending_sync_bios;
5126 	else
5127 		pending_bios = &device->pending_bios;
5128 
5129 	if (pending_bios->tail)
5130 		pending_bios->tail->bi_next = bio;
5131 
5132 	pending_bios->tail = bio;
5133 	if (!pending_bios->head)
5134 		pending_bios->head = bio;
5135 	if (device->running_pending)
5136 		should_queue = 0;
5137 
5138 	spin_unlock(&device->io_lock);
5139 
5140 	if (should_queue)
5141 		btrfs_queue_worker(&root->fs_info->submit_workers,
5142 				   &device->work);
5143 }
5144 
5145 static int bio_size_ok(struct block_device *bdev, struct bio *bio,
5146 		       sector_t sector)
5147 {
5148 	struct bio_vec *prev;
5149 	struct request_queue *q = bdev_get_queue(bdev);
5150 	unsigned short max_sectors = queue_max_sectors(q);
5151 	struct bvec_merge_data bvm = {
5152 		.bi_bdev = bdev,
5153 		.bi_sector = sector,
5154 		.bi_rw = bio->bi_rw,
5155 	};
5156 
5157 	if (bio->bi_vcnt == 0) {
5158 		WARN_ON(1);
5159 		return 1;
5160 	}
5161 
5162 	prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
5163 	if ((bio->bi_size >> 9) > max_sectors)
5164 		return 0;
5165 
5166 	if (!q->merge_bvec_fn)
5167 		return 1;
5168 
5169 	bvm.bi_size = bio->bi_size - prev->bv_len;
5170 	if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
5171 		return 0;
5172 	return 1;
5173 }
5174 
5175 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5176 			      struct bio *bio, u64 physical, int dev_nr,
5177 			      int rw, int async)
5178 {
5179 	struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
5180 
5181 	bio->bi_private = bbio;
5182 	bio->bi_private = merge_stripe_index_into_bio_private(
5183 			bio->bi_private, (unsigned int)dev_nr);
5184 	bio->bi_end_io = btrfs_end_bio;
5185 	bio->bi_sector = physical >> 9;
5186 #ifdef DEBUG
5187 	{
5188 		struct rcu_string *name;
5189 
5190 		rcu_read_lock();
5191 		name = rcu_dereference(dev->name);
5192 		pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
5193 			 "(%s id %llu), size=%u\n", rw,
5194 			 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
5195 			 name->str, dev->devid, bio->bi_size);
5196 		rcu_read_unlock();
5197 	}
5198 #endif
5199 	bio->bi_bdev = dev->bdev;
5200 	if (async)
5201 		btrfs_schedule_bio(root, dev, rw, bio);
5202 	else
5203 		btrfsic_submit_bio(rw, bio);
5204 }
5205 
5206 static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5207 			      struct bio *first_bio, struct btrfs_device *dev,
5208 			      int dev_nr, int rw, int async)
5209 {
5210 	struct bio_vec *bvec = first_bio->bi_io_vec;
5211 	struct bio *bio;
5212 	int nr_vecs = bio_get_nr_vecs(dev->bdev);
5213 	u64 physical = bbio->stripes[dev_nr].physical;
5214 
5215 again:
5216 	bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
5217 	if (!bio)
5218 		return -ENOMEM;
5219 
5220 	while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
5221 		if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
5222 				 bvec->bv_offset) < bvec->bv_len) {
5223 			u64 len = bio->bi_size;
5224 
5225 			atomic_inc(&bbio->stripes_pending);
5226 			submit_stripe_bio(root, bbio, bio, physical, dev_nr,
5227 					  rw, async);
5228 			physical += len;
5229 			goto again;
5230 		}
5231 		bvec++;
5232 	}
5233 
5234 	submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
5235 	return 0;
5236 }
5237 
5238 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
5239 {
5240 	atomic_inc(&bbio->error);
5241 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
5242 		bio->bi_private = bbio->private;
5243 		bio->bi_end_io = bbio->end_io;
5244 		bio->bi_bdev = (struct block_device *)
5245 			(unsigned long)bbio->mirror_num;
5246 		bio->bi_sector = logical >> 9;
5247 		kfree(bbio);
5248 		bio_endio(bio, -EIO);
5249 	}
5250 }
5251 
5252 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5253 		  int mirror_num, int async_submit)
5254 {
5255 	struct btrfs_device *dev;
5256 	struct bio *first_bio = bio;
5257 	u64 logical = (u64)bio->bi_sector << 9;
5258 	u64 length = 0;
5259 	u64 map_length;
5260 	u64 *raid_map = NULL;
5261 	int ret;
5262 	int dev_nr = 0;
5263 	int total_devs = 1;
5264 	struct btrfs_bio *bbio = NULL;
5265 
5266 	length = bio->bi_size;
5267 	map_length = length;
5268 
5269 	ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
5270 			      mirror_num, &raid_map);
5271 	if (ret) /* -ENOMEM */
5272 		return ret;
5273 
5274 	total_devs = bbio->num_stripes;
5275 	bbio->orig_bio = first_bio;
5276 	bbio->private = first_bio->bi_private;
5277 	bbio->end_io = first_bio->bi_end_io;
5278 	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
5279 
5280 	if (raid_map) {
5281 		/* In this case, map_length has been set to the length of
5282 		   a single stripe; not the whole write */
5283 		if (rw & WRITE) {
5284 			return raid56_parity_write(root, bio, bbio,
5285 						   raid_map, map_length);
5286 		} else {
5287 			return raid56_parity_recover(root, bio, bbio,
5288 						     raid_map, map_length,
5289 						     mirror_num);
5290 		}
5291 	}
5292 
5293 	if (map_length < length) {
5294 		printk(KERN_CRIT "btrfs: mapping failed logical %llu bio len %llu "
5295 		       "len %llu\n", (unsigned long long)logical,
5296 		       (unsigned long long)length,
5297 		       (unsigned long long)map_length);
5298 		BUG();
5299 	}
5300 
5301 	while (dev_nr < total_devs) {
5302 		dev = bbio->stripes[dev_nr].dev;
5303 		if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
5304 			bbio_error(bbio, first_bio, logical);
5305 			dev_nr++;
5306 			continue;
5307 		}
5308 
5309 		/*
5310 		 * Check and see if we're ok with this bio based on it's size
5311 		 * and offset with the given device.
5312 		 */
5313 		if (!bio_size_ok(dev->bdev, first_bio,
5314 				 bbio->stripes[dev_nr].physical >> 9)) {
5315 			ret = breakup_stripe_bio(root, bbio, first_bio, dev,
5316 						 dev_nr, rw, async_submit);
5317 			BUG_ON(ret);
5318 			dev_nr++;
5319 			continue;
5320 		}
5321 
5322 		if (dev_nr < total_devs - 1) {
5323 			bio = bio_clone(first_bio, GFP_NOFS);
5324 			BUG_ON(!bio); /* -ENOMEM */
5325 		} else {
5326 			bio = first_bio;
5327 		}
5328 
5329 		submit_stripe_bio(root, bbio, bio,
5330 				  bbio->stripes[dev_nr].physical, dev_nr, rw,
5331 				  async_submit);
5332 		dev_nr++;
5333 	}
5334 	return 0;
5335 }
5336 
5337 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
5338 				       u8 *uuid, u8 *fsid)
5339 {
5340 	struct btrfs_device *device;
5341 	struct btrfs_fs_devices *cur_devices;
5342 
5343 	cur_devices = fs_info->fs_devices;
5344 	while (cur_devices) {
5345 		if (!fsid ||
5346 		    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5347 			device = __find_device(&cur_devices->devices,
5348 					       devid, uuid);
5349 			if (device)
5350 				return device;
5351 		}
5352 		cur_devices = cur_devices->seed;
5353 	}
5354 	return NULL;
5355 }
5356 
5357 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
5358 					    u64 devid, u8 *dev_uuid)
5359 {
5360 	struct btrfs_device *device;
5361 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5362 
5363 	device = kzalloc(sizeof(*device), GFP_NOFS);
5364 	if (!device)
5365 		return NULL;
5366 	list_add(&device->dev_list,
5367 		 &fs_devices->devices);
5368 	device->dev_root = root->fs_info->dev_root;
5369 	device->devid = devid;
5370 	device->work.func = pending_bios_fn;
5371 	device->fs_devices = fs_devices;
5372 	device->missing = 1;
5373 	fs_devices->num_devices++;
5374 	fs_devices->missing_devices++;
5375 	spin_lock_init(&device->io_lock);
5376 	INIT_LIST_HEAD(&device->dev_alloc_list);
5377 	memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
5378 	return device;
5379 }
5380 
5381 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
5382 			  struct extent_buffer *leaf,
5383 			  struct btrfs_chunk *chunk)
5384 {
5385 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
5386 	struct map_lookup *map;
5387 	struct extent_map *em;
5388 	u64 logical;
5389 	u64 length;
5390 	u64 devid;
5391 	u8 uuid[BTRFS_UUID_SIZE];
5392 	int num_stripes;
5393 	int ret;
5394 	int i;
5395 
5396 	logical = key->offset;
5397 	length = btrfs_chunk_length(leaf, chunk);
5398 
5399 	read_lock(&map_tree->map_tree.lock);
5400 	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
5401 	read_unlock(&map_tree->map_tree.lock);
5402 
5403 	/* already mapped? */
5404 	if (em && em->start <= logical && em->start + em->len > logical) {
5405 		free_extent_map(em);
5406 		return 0;
5407 	} else if (em) {
5408 		free_extent_map(em);
5409 	}
5410 
5411 	em = alloc_extent_map();
5412 	if (!em)
5413 		return -ENOMEM;
5414 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
5415 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
5416 	if (!map) {
5417 		free_extent_map(em);
5418 		return -ENOMEM;
5419 	}
5420 
5421 	em->bdev = (struct block_device *)map;
5422 	em->start = logical;
5423 	em->len = length;
5424 	em->orig_start = 0;
5425 	em->block_start = 0;
5426 	em->block_len = em->len;
5427 
5428 	map->num_stripes = num_stripes;
5429 	map->io_width = btrfs_chunk_io_width(leaf, chunk);
5430 	map->io_align = btrfs_chunk_io_align(leaf, chunk);
5431 	map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
5432 	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
5433 	map->type = btrfs_chunk_type(leaf, chunk);
5434 	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
5435 	for (i = 0; i < num_stripes; i++) {
5436 		map->stripes[i].physical =
5437 			btrfs_stripe_offset_nr(leaf, chunk, i);
5438 		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
5439 		read_extent_buffer(leaf, uuid, (unsigned long)
5440 				   btrfs_stripe_dev_uuid_nr(chunk, i),
5441 				   BTRFS_UUID_SIZE);
5442 		map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
5443 							uuid, NULL);
5444 		if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
5445 			kfree(map);
5446 			free_extent_map(em);
5447 			return -EIO;
5448 		}
5449 		if (!map->stripes[i].dev) {
5450 			map->stripes[i].dev =
5451 				add_missing_dev(root, devid, uuid);
5452 			if (!map->stripes[i].dev) {
5453 				kfree(map);
5454 				free_extent_map(em);
5455 				return -EIO;
5456 			}
5457 		}
5458 		map->stripes[i].dev->in_fs_metadata = 1;
5459 	}
5460 
5461 	write_lock(&map_tree->map_tree.lock);
5462 	ret = add_extent_mapping(&map_tree->map_tree, em);
5463 	write_unlock(&map_tree->map_tree.lock);
5464 	BUG_ON(ret); /* Tree corruption */
5465 	free_extent_map(em);
5466 
5467 	return 0;
5468 }
5469 
5470 static void fill_device_from_item(struct extent_buffer *leaf,
5471 				 struct btrfs_dev_item *dev_item,
5472 				 struct btrfs_device *device)
5473 {
5474 	unsigned long ptr;
5475 
5476 	device->devid = btrfs_device_id(leaf, dev_item);
5477 	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
5478 	device->total_bytes = device->disk_total_bytes;
5479 	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
5480 	device->type = btrfs_device_type(leaf, dev_item);
5481 	device->io_align = btrfs_device_io_align(leaf, dev_item);
5482 	device->io_width = btrfs_device_io_width(leaf, dev_item);
5483 	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
5484 	WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
5485 	device->is_tgtdev_for_dev_replace = 0;
5486 
5487 	ptr = (unsigned long)btrfs_device_uuid(dev_item);
5488 	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
5489 }
5490 
5491 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
5492 {
5493 	struct btrfs_fs_devices *fs_devices;
5494 	int ret;
5495 
5496 	BUG_ON(!mutex_is_locked(&uuid_mutex));
5497 
5498 	fs_devices = root->fs_info->fs_devices->seed;
5499 	while (fs_devices) {
5500 		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5501 			ret = 0;
5502 			goto out;
5503 		}
5504 		fs_devices = fs_devices->seed;
5505 	}
5506 
5507 	fs_devices = find_fsid(fsid);
5508 	if (!fs_devices) {
5509 		ret = -ENOENT;
5510 		goto out;
5511 	}
5512 
5513 	fs_devices = clone_fs_devices(fs_devices);
5514 	if (IS_ERR(fs_devices)) {
5515 		ret = PTR_ERR(fs_devices);
5516 		goto out;
5517 	}
5518 
5519 	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
5520 				   root->fs_info->bdev_holder);
5521 	if (ret) {
5522 		free_fs_devices(fs_devices);
5523 		goto out;
5524 	}
5525 
5526 	if (!fs_devices->seeding) {
5527 		__btrfs_close_devices(fs_devices);
5528 		free_fs_devices(fs_devices);
5529 		ret = -EINVAL;
5530 		goto out;
5531 	}
5532 
5533 	fs_devices->seed = root->fs_info->fs_devices->seed;
5534 	root->fs_info->fs_devices->seed = fs_devices;
5535 out:
5536 	return ret;
5537 }
5538 
5539 static int read_one_dev(struct btrfs_root *root,
5540 			struct extent_buffer *leaf,
5541 			struct btrfs_dev_item *dev_item)
5542 {
5543 	struct btrfs_device *device;
5544 	u64 devid;
5545 	int ret;
5546 	u8 fs_uuid[BTRFS_UUID_SIZE];
5547 	u8 dev_uuid[BTRFS_UUID_SIZE];
5548 
5549 	devid = btrfs_device_id(leaf, dev_item);
5550 	read_extent_buffer(leaf, dev_uuid,
5551 			   (unsigned long)btrfs_device_uuid(dev_item),
5552 			   BTRFS_UUID_SIZE);
5553 	read_extent_buffer(leaf, fs_uuid,
5554 			   (unsigned long)btrfs_device_fsid(dev_item),
5555 			   BTRFS_UUID_SIZE);
5556 
5557 	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
5558 		ret = open_seed_devices(root, fs_uuid);
5559 		if (ret && !btrfs_test_opt(root, DEGRADED))
5560 			return ret;
5561 	}
5562 
5563 	device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
5564 	if (!device || !device->bdev) {
5565 		if (!btrfs_test_opt(root, DEGRADED))
5566 			return -EIO;
5567 
5568 		if (!device) {
5569 			printk(KERN_WARNING "warning devid %llu missing\n",
5570 			       (unsigned long long)devid);
5571 			device = add_missing_dev(root, devid, dev_uuid);
5572 			if (!device)
5573 				return -ENOMEM;
5574 		} else if (!device->missing) {
5575 			/*
5576 			 * this happens when a device that was properly setup
5577 			 * in the device info lists suddenly goes bad.
5578 			 * device->bdev is NULL, and so we have to set
5579 			 * device->missing to one here
5580 			 */
5581 			root->fs_info->fs_devices->missing_devices++;
5582 			device->missing = 1;
5583 		}
5584 	}
5585 
5586 	if (device->fs_devices != root->fs_info->fs_devices) {
5587 		BUG_ON(device->writeable);
5588 		if (device->generation !=
5589 		    btrfs_device_generation(leaf, dev_item))
5590 			return -EINVAL;
5591 	}
5592 
5593 	fill_device_from_item(leaf, dev_item, device);
5594 	device->dev_root = root->fs_info->dev_root;
5595 	device->in_fs_metadata = 1;
5596 	if (device->writeable && !device->is_tgtdev_for_dev_replace) {
5597 		device->fs_devices->total_rw_bytes += device->total_bytes;
5598 		spin_lock(&root->fs_info->free_chunk_lock);
5599 		root->fs_info->free_chunk_space += device->total_bytes -
5600 			device->bytes_used;
5601 		spin_unlock(&root->fs_info->free_chunk_lock);
5602 	}
5603 	ret = 0;
5604 	return ret;
5605 }
5606 
5607 int btrfs_read_sys_array(struct btrfs_root *root)
5608 {
5609 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
5610 	struct extent_buffer *sb;
5611 	struct btrfs_disk_key *disk_key;
5612 	struct btrfs_chunk *chunk;
5613 	u8 *ptr;
5614 	unsigned long sb_ptr;
5615 	int ret = 0;
5616 	u32 num_stripes;
5617 	u32 array_size;
5618 	u32 len = 0;
5619 	u32 cur;
5620 	struct btrfs_key key;
5621 
5622 	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
5623 					  BTRFS_SUPER_INFO_SIZE);
5624 	if (!sb)
5625 		return -ENOMEM;
5626 	btrfs_set_buffer_uptodate(sb);
5627 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
5628 	/*
5629 	 * The sb extent buffer is artifical and just used to read the system array.
5630 	 * btrfs_set_buffer_uptodate() call does not properly mark all it's
5631 	 * pages up-to-date when the page is larger: extent does not cover the
5632 	 * whole page and consequently check_page_uptodate does not find all
5633 	 * the page's extents up-to-date (the hole beyond sb),
5634 	 * write_extent_buffer then triggers a WARN_ON.
5635 	 *
5636 	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
5637 	 * but sb spans only this function. Add an explicit SetPageUptodate call
5638 	 * to silence the warning eg. on PowerPC 64.
5639 	 */
5640 	if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
5641 		SetPageUptodate(sb->pages[0]);
5642 
5643 	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
5644 	array_size = btrfs_super_sys_array_size(super_copy);
5645 
5646 	ptr = super_copy->sys_chunk_array;
5647 	sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
5648 	cur = 0;
5649 
5650 	while (cur < array_size) {
5651 		disk_key = (struct btrfs_disk_key *)ptr;
5652 		btrfs_disk_key_to_cpu(&key, disk_key);
5653 
5654 		len = sizeof(*disk_key); ptr += len;
5655 		sb_ptr += len;
5656 		cur += len;
5657 
5658 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
5659 			chunk = (struct btrfs_chunk *)sb_ptr;
5660 			ret = read_one_chunk(root, &key, sb, chunk);
5661 			if (ret)
5662 				break;
5663 			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
5664 			len = btrfs_chunk_item_size(num_stripes);
5665 		} else {
5666 			ret = -EIO;
5667 			break;
5668 		}
5669 		ptr += len;
5670 		sb_ptr += len;
5671 		cur += len;
5672 	}
5673 	free_extent_buffer(sb);
5674 	return ret;
5675 }
5676 
5677 int btrfs_read_chunk_tree(struct btrfs_root *root)
5678 {
5679 	struct btrfs_path *path;
5680 	struct extent_buffer *leaf;
5681 	struct btrfs_key key;
5682 	struct btrfs_key found_key;
5683 	int ret;
5684 	int slot;
5685 
5686 	root = root->fs_info->chunk_root;
5687 
5688 	path = btrfs_alloc_path();
5689 	if (!path)
5690 		return -ENOMEM;
5691 
5692 	mutex_lock(&uuid_mutex);
5693 	lock_chunks(root);
5694 
5695 	/* first we search for all of the device items, and then we
5696 	 * read in all of the chunk items.  This way we can create chunk
5697 	 * mappings that reference all of the devices that are afound
5698 	 */
5699 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
5700 	key.offset = 0;
5701 	key.type = 0;
5702 again:
5703 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5704 	if (ret < 0)
5705 		goto error;
5706 	while (1) {
5707 		leaf = path->nodes[0];
5708 		slot = path->slots[0];
5709 		if (slot >= btrfs_header_nritems(leaf)) {
5710 			ret = btrfs_next_leaf(root, path);
5711 			if (ret == 0)
5712 				continue;
5713 			if (ret < 0)
5714 				goto error;
5715 			break;
5716 		}
5717 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
5718 		if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
5719 			if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
5720 				break;
5721 			if (found_key.type == BTRFS_DEV_ITEM_KEY) {
5722 				struct btrfs_dev_item *dev_item;
5723 				dev_item = btrfs_item_ptr(leaf, slot,
5724 						  struct btrfs_dev_item);
5725 				ret = read_one_dev(root, leaf, dev_item);
5726 				if (ret)
5727 					goto error;
5728 			}
5729 		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
5730 			struct btrfs_chunk *chunk;
5731 			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
5732 			ret = read_one_chunk(root, &found_key, leaf, chunk);
5733 			if (ret)
5734 				goto error;
5735 		}
5736 		path->slots[0]++;
5737 	}
5738 	if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
5739 		key.objectid = 0;
5740 		btrfs_release_path(path);
5741 		goto again;
5742 	}
5743 	ret = 0;
5744 error:
5745 	unlock_chunks(root);
5746 	mutex_unlock(&uuid_mutex);
5747 
5748 	btrfs_free_path(path);
5749 	return ret;
5750 }
5751 
5752 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
5753 {
5754 	int i;
5755 
5756 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5757 		btrfs_dev_stat_reset(dev, i);
5758 }
5759 
5760 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
5761 {
5762 	struct btrfs_key key;
5763 	struct btrfs_key found_key;
5764 	struct btrfs_root *dev_root = fs_info->dev_root;
5765 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5766 	struct extent_buffer *eb;
5767 	int slot;
5768 	int ret = 0;
5769 	struct btrfs_device *device;
5770 	struct btrfs_path *path = NULL;
5771 	int i;
5772 
5773 	path = btrfs_alloc_path();
5774 	if (!path) {
5775 		ret = -ENOMEM;
5776 		goto out;
5777 	}
5778 
5779 	mutex_lock(&fs_devices->device_list_mutex);
5780 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
5781 		int item_size;
5782 		struct btrfs_dev_stats_item *ptr;
5783 
5784 		key.objectid = 0;
5785 		key.type = BTRFS_DEV_STATS_KEY;
5786 		key.offset = device->devid;
5787 		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
5788 		if (ret) {
5789 			__btrfs_reset_dev_stats(device);
5790 			device->dev_stats_valid = 1;
5791 			btrfs_release_path(path);
5792 			continue;
5793 		}
5794 		slot = path->slots[0];
5795 		eb = path->nodes[0];
5796 		btrfs_item_key_to_cpu(eb, &found_key, slot);
5797 		item_size = btrfs_item_size_nr(eb, slot);
5798 
5799 		ptr = btrfs_item_ptr(eb, slot,
5800 				     struct btrfs_dev_stats_item);
5801 
5802 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5803 			if (item_size >= (1 + i) * sizeof(__le64))
5804 				btrfs_dev_stat_set(device, i,
5805 					btrfs_dev_stats_value(eb, ptr, i));
5806 			else
5807 				btrfs_dev_stat_reset(device, i);
5808 		}
5809 
5810 		device->dev_stats_valid = 1;
5811 		btrfs_dev_stat_print_on_load(device);
5812 		btrfs_release_path(path);
5813 	}
5814 	mutex_unlock(&fs_devices->device_list_mutex);
5815 
5816 out:
5817 	btrfs_free_path(path);
5818 	return ret < 0 ? ret : 0;
5819 }
5820 
5821 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
5822 				struct btrfs_root *dev_root,
5823 				struct btrfs_device *device)
5824 {
5825 	struct btrfs_path *path;
5826 	struct btrfs_key key;
5827 	struct extent_buffer *eb;
5828 	struct btrfs_dev_stats_item *ptr;
5829 	int ret;
5830 	int i;
5831 
5832 	key.objectid = 0;
5833 	key.type = BTRFS_DEV_STATS_KEY;
5834 	key.offset = device->devid;
5835 
5836 	path = btrfs_alloc_path();
5837 	BUG_ON(!path);
5838 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
5839 	if (ret < 0) {
5840 		printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
5841 			      ret, rcu_str_deref(device->name));
5842 		goto out;
5843 	}
5844 
5845 	if (ret == 0 &&
5846 	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
5847 		/* need to delete old one and insert a new one */
5848 		ret = btrfs_del_item(trans, dev_root, path);
5849 		if (ret != 0) {
5850 			printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
5851 				      rcu_str_deref(device->name), ret);
5852 			goto out;
5853 		}
5854 		ret = 1;
5855 	}
5856 
5857 	if (ret == 1) {
5858 		/* need to insert a new item */
5859 		btrfs_release_path(path);
5860 		ret = btrfs_insert_empty_item(trans, dev_root, path,
5861 					      &key, sizeof(*ptr));
5862 		if (ret < 0) {
5863 			printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
5864 				      rcu_str_deref(device->name), ret);
5865 			goto out;
5866 		}
5867 	}
5868 
5869 	eb = path->nodes[0];
5870 	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
5871 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5872 		btrfs_set_dev_stats_value(eb, ptr, i,
5873 					  btrfs_dev_stat_read(device, i));
5874 	btrfs_mark_buffer_dirty(eb);
5875 
5876 out:
5877 	btrfs_free_path(path);
5878 	return ret;
5879 }
5880 
5881 /*
5882  * called from commit_transaction. Writes all changed device stats to disk.
5883  */
5884 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
5885 			struct btrfs_fs_info *fs_info)
5886 {
5887 	struct btrfs_root *dev_root = fs_info->dev_root;
5888 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5889 	struct btrfs_device *device;
5890 	int ret = 0;
5891 
5892 	mutex_lock(&fs_devices->device_list_mutex);
5893 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
5894 		if (!device->dev_stats_valid || !device->dev_stats_dirty)
5895 			continue;
5896 
5897 		ret = update_dev_stat_item(trans, dev_root, device);
5898 		if (!ret)
5899 			device->dev_stats_dirty = 0;
5900 	}
5901 	mutex_unlock(&fs_devices->device_list_mutex);
5902 
5903 	return ret;
5904 }
5905 
5906 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
5907 {
5908 	btrfs_dev_stat_inc(dev, index);
5909 	btrfs_dev_stat_print_on_error(dev);
5910 }
5911 
5912 void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
5913 {
5914 	if (!dev->dev_stats_valid)
5915 		return;
5916 	printk_ratelimited_in_rcu(KERN_ERR
5917 			   "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5918 			   rcu_str_deref(dev->name),
5919 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5920 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5921 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5922 			   btrfs_dev_stat_read(dev,
5923 					       BTRFS_DEV_STAT_CORRUPTION_ERRS),
5924 			   btrfs_dev_stat_read(dev,
5925 					       BTRFS_DEV_STAT_GENERATION_ERRS));
5926 }
5927 
5928 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
5929 {
5930 	int i;
5931 
5932 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5933 		if (btrfs_dev_stat_read(dev, i) != 0)
5934 			break;
5935 	if (i == BTRFS_DEV_STAT_VALUES_MAX)
5936 		return; /* all values == 0, suppress message */
5937 
5938 	printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5939 	       rcu_str_deref(dev->name),
5940 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5941 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5942 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5943 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
5944 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
5945 }
5946 
5947 int btrfs_get_dev_stats(struct btrfs_root *root,
5948 			struct btrfs_ioctl_get_dev_stats *stats)
5949 {
5950 	struct btrfs_device *dev;
5951 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5952 	int i;
5953 
5954 	mutex_lock(&fs_devices->device_list_mutex);
5955 	dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
5956 	mutex_unlock(&fs_devices->device_list_mutex);
5957 
5958 	if (!dev) {
5959 		printk(KERN_WARNING
5960 		       "btrfs: get dev_stats failed, device not found\n");
5961 		return -ENODEV;
5962 	} else if (!dev->dev_stats_valid) {
5963 		printk(KERN_WARNING
5964 		       "btrfs: get dev_stats failed, not yet valid\n");
5965 		return -ENODEV;
5966 	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
5967 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5968 			if (stats->nr_items > i)
5969 				stats->values[i] =
5970 					btrfs_dev_stat_read_and_reset(dev, i);
5971 			else
5972 				btrfs_dev_stat_reset(dev, i);
5973 		}
5974 	} else {
5975 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5976 			if (stats->nr_items > i)
5977 				stats->values[i] = btrfs_dev_stat_read(dev, i);
5978 	}
5979 	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
5980 		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
5981 	return 0;
5982 }
5983 
5984 int btrfs_scratch_superblock(struct btrfs_device *device)
5985 {
5986 	struct buffer_head *bh;
5987 	struct btrfs_super_block *disk_super;
5988 
5989 	bh = btrfs_read_dev_super(device->bdev);
5990 	if (!bh)
5991 		return -EINVAL;
5992 	disk_super = (struct btrfs_super_block *)bh->b_data;
5993 
5994 	memset(&disk_super->magic, 0, sizeof(disk_super->magic));
5995 	set_buffer_dirty(bh);
5996 	sync_dirty_buffer(bh);
5997 	brelse(bh);
5998 
5999 	return 0;
6000 }
6001