xref: /openbmc/linux/fs/btrfs/raid56.c (revision 133f9794)
1 /*
2  * Copyright (C) 2012 Fusion-io  All rights reserved.
3  * Copyright (C) 2012 Intel Corp. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public
7  * License v2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public
15  * License along with this program; if not, write to the
16  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17  * Boston, MA 021110-1307, USA.
18  */
19 #include <linux/sched.h>
20 #include <linux/wait.h>
21 #include <linux/bio.h>
22 #include <linux/slab.h>
23 #include <linux/buffer_head.h>
24 #include <linux/blkdev.h>
25 #include <linux/random.h>
26 #include <linux/iocontext.h>
27 #include <linux/capability.h>
28 #include <linux/ratelimit.h>
29 #include <linux/kthread.h>
30 #include <linux/raid/pq.h>
31 #include <linux/hash.h>
32 #include <linux/list_sort.h>
33 #include <linux/raid/xor.h>
34 #include <linux/mm.h>
35 #include <asm/div64.h>
36 #include "ctree.h"
37 #include "extent_map.h"
38 #include "disk-io.h"
39 #include "transaction.h"
40 #include "print-tree.h"
41 #include "volumes.h"
42 #include "raid56.h"
43 #include "async-thread.h"
44 #include "check-integrity.h"
45 #include "rcu-string.h"
46 
47 /* set when additional merges to this rbio are not allowed */
48 #define RBIO_RMW_LOCKED_BIT	1
49 
50 /*
51  * set when this rbio is sitting in the hash, but it is just a cache
52  * of past RMW
53  */
54 #define RBIO_CACHE_BIT		2
55 
56 /*
57  * set when it is safe to trust the stripe_pages for caching
58  */
59 #define RBIO_CACHE_READY_BIT	3
60 
61 #define RBIO_CACHE_SIZE 1024
62 
63 enum btrfs_rbio_ops {
64 	BTRFS_RBIO_WRITE,
65 	BTRFS_RBIO_READ_REBUILD,
66 	BTRFS_RBIO_PARITY_SCRUB,
67 	BTRFS_RBIO_REBUILD_MISSING,
68 };
69 
70 struct btrfs_raid_bio {
71 	struct btrfs_fs_info *fs_info;
72 	struct btrfs_bio *bbio;
73 
74 	/* while we're doing rmw on a stripe
75 	 * we put it into a hash table so we can
76 	 * lock the stripe and merge more rbios
77 	 * into it.
78 	 */
79 	struct list_head hash_list;
80 
81 	/*
82 	 * LRU list for the stripe cache
83 	 */
84 	struct list_head stripe_cache;
85 
86 	/*
87 	 * for scheduling work in the helper threads
88 	 */
89 	struct btrfs_work work;
90 
91 	/*
92 	 * bio list and bio_list_lock are used
93 	 * to add more bios into the stripe
94 	 * in hopes of avoiding the full rmw
95 	 */
96 	struct bio_list bio_list;
97 	spinlock_t bio_list_lock;
98 
99 	/* also protected by the bio_list_lock, the
100 	 * plug list is used by the plugging code
101 	 * to collect partial bios while plugged.  The
102 	 * stripe locking code also uses it to hand off
103 	 * the stripe lock to the next pending IO
104 	 */
105 	struct list_head plug_list;
106 
107 	/*
108 	 * flags that tell us if it is safe to
109 	 * merge with this bio
110 	 */
111 	unsigned long flags;
112 
113 	/* size of each individual stripe on disk */
114 	int stripe_len;
115 
116 	/* number of data stripes (no p/q) */
117 	int nr_data;
118 
119 	int real_stripes;
120 
121 	int stripe_npages;
122 	/*
123 	 * set if we're doing a parity rebuild
124 	 * for a read from higher up, which is handled
125 	 * differently from a parity rebuild as part of
126 	 * rmw
127 	 */
128 	enum btrfs_rbio_ops operation;
129 
130 	/* first bad stripe */
131 	int faila;
132 
133 	/* second bad stripe (for raid6 use) */
134 	int failb;
135 
136 	int scrubp;
137 	/*
138 	 * number of pages needed to represent the full
139 	 * stripe
140 	 */
141 	int nr_pages;
142 
143 	/*
144 	 * size of all the bios in the bio_list.  This
145 	 * helps us decide if the rbio maps to a full
146 	 * stripe or not
147 	 */
148 	int bio_list_bytes;
149 
150 	int generic_bio_cnt;
151 
152 	refcount_t refs;
153 
154 	atomic_t stripes_pending;
155 
156 	atomic_t error;
157 	/*
158 	 * these are two arrays of pointers.  We allocate the
159 	 * rbio big enough to hold them both and setup their
160 	 * locations when the rbio is allocated
161 	 */
162 
163 	/* pointers to pages that we allocated for
164 	 * reading/writing stripes directly from the disk (including P/Q)
165 	 */
166 	struct page **stripe_pages;
167 
168 	/*
169 	 * pointers to the pages in the bio_list.  Stored
170 	 * here for faster lookup
171 	 */
172 	struct page **bio_pages;
173 
174 	/*
175 	 * bitmap to record which horizontal stripe has data
176 	 */
177 	unsigned long *dbitmap;
178 };
179 
180 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
181 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
182 static void rmw_work(struct btrfs_work *work);
183 static void read_rebuild_work(struct btrfs_work *work);
184 static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
185 static void async_read_rebuild(struct btrfs_raid_bio *rbio);
186 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
187 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
188 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
189 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
190 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
191 
192 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
193 					 int need_check);
194 static void async_scrub_parity(struct btrfs_raid_bio *rbio);
195 
196 /*
197  * the stripe hash table is used for locking, and to collect
198  * bios in hopes of making a full stripe
199  */
200 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
201 {
202 	struct btrfs_stripe_hash_table *table;
203 	struct btrfs_stripe_hash_table *x;
204 	struct btrfs_stripe_hash *cur;
205 	struct btrfs_stripe_hash *h;
206 	int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
207 	int i;
208 	int table_size;
209 
210 	if (info->stripe_hash_table)
211 		return 0;
212 
213 	/*
214 	 * The table is large, starting with order 4 and can go as high as
215 	 * order 7 in case lock debugging is turned on.
216 	 *
217 	 * Try harder to allocate and fallback to vmalloc to lower the chance
218 	 * of a failing mount.
219 	 */
220 	table_size = sizeof(*table) + sizeof(*h) * num_entries;
221 	table = kvzalloc(table_size, GFP_KERNEL);
222 	if (!table)
223 		return -ENOMEM;
224 
225 	spin_lock_init(&table->cache_lock);
226 	INIT_LIST_HEAD(&table->stripe_cache);
227 
228 	h = table->table;
229 
230 	for (i = 0; i < num_entries; i++) {
231 		cur = h + i;
232 		INIT_LIST_HEAD(&cur->hash_list);
233 		spin_lock_init(&cur->lock);
234 	}
235 
236 	x = cmpxchg(&info->stripe_hash_table, NULL, table);
237 	if (x)
238 		kvfree(x);
239 	return 0;
240 }
241 
242 /*
243  * caching an rbio means to copy anything from the
244  * bio_pages array into the stripe_pages array.  We
245  * use the page uptodate bit in the stripe cache array
246  * to indicate if it has valid data
247  *
248  * once the caching is done, we set the cache ready
249  * bit.
250  */
251 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
252 {
253 	int i;
254 	char *s;
255 	char *d;
256 	int ret;
257 
258 	ret = alloc_rbio_pages(rbio);
259 	if (ret)
260 		return;
261 
262 	for (i = 0; i < rbio->nr_pages; i++) {
263 		if (!rbio->bio_pages[i])
264 			continue;
265 
266 		s = kmap(rbio->bio_pages[i]);
267 		d = kmap(rbio->stripe_pages[i]);
268 
269 		memcpy(d, s, PAGE_SIZE);
270 
271 		kunmap(rbio->bio_pages[i]);
272 		kunmap(rbio->stripe_pages[i]);
273 		SetPageUptodate(rbio->stripe_pages[i]);
274 	}
275 	set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
276 }
277 
278 /*
279  * we hash on the first logical address of the stripe
280  */
281 static int rbio_bucket(struct btrfs_raid_bio *rbio)
282 {
283 	u64 num = rbio->bbio->raid_map[0];
284 
285 	/*
286 	 * we shift down quite a bit.  We're using byte
287 	 * addressing, and most of the lower bits are zeros.
288 	 * This tends to upset hash_64, and it consistently
289 	 * returns just one or two different values.
290 	 *
291 	 * shifting off the lower bits fixes things.
292 	 */
293 	return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
294 }
295 
296 /*
297  * stealing an rbio means taking all the uptodate pages from the stripe
298  * array in the source rbio and putting them into the destination rbio
299  */
300 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
301 {
302 	int i;
303 	struct page *s;
304 	struct page *d;
305 
306 	if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
307 		return;
308 
309 	for (i = 0; i < dest->nr_pages; i++) {
310 		s = src->stripe_pages[i];
311 		if (!s || !PageUptodate(s)) {
312 			continue;
313 		}
314 
315 		d = dest->stripe_pages[i];
316 		if (d)
317 			__free_page(d);
318 
319 		dest->stripe_pages[i] = s;
320 		src->stripe_pages[i] = NULL;
321 	}
322 }
323 
324 /*
325  * merging means we take the bio_list from the victim and
326  * splice it into the destination.  The victim should
327  * be discarded afterwards.
328  *
329  * must be called with dest->rbio_list_lock held
330  */
331 static void merge_rbio(struct btrfs_raid_bio *dest,
332 		       struct btrfs_raid_bio *victim)
333 {
334 	bio_list_merge(&dest->bio_list, &victim->bio_list);
335 	dest->bio_list_bytes += victim->bio_list_bytes;
336 	dest->generic_bio_cnt += victim->generic_bio_cnt;
337 	bio_list_init(&victim->bio_list);
338 }
339 
340 /*
341  * used to prune items that are in the cache.  The caller
342  * must hold the hash table lock.
343  */
344 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
345 {
346 	int bucket = rbio_bucket(rbio);
347 	struct btrfs_stripe_hash_table *table;
348 	struct btrfs_stripe_hash *h;
349 	int freeit = 0;
350 
351 	/*
352 	 * check the bit again under the hash table lock.
353 	 */
354 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
355 		return;
356 
357 	table = rbio->fs_info->stripe_hash_table;
358 	h = table->table + bucket;
359 
360 	/* hold the lock for the bucket because we may be
361 	 * removing it from the hash table
362 	 */
363 	spin_lock(&h->lock);
364 
365 	/*
366 	 * hold the lock for the bio list because we need
367 	 * to make sure the bio list is empty
368 	 */
369 	spin_lock(&rbio->bio_list_lock);
370 
371 	if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
372 		list_del_init(&rbio->stripe_cache);
373 		table->cache_size -= 1;
374 		freeit = 1;
375 
376 		/* if the bio list isn't empty, this rbio is
377 		 * still involved in an IO.  We take it out
378 		 * of the cache list, and drop the ref that
379 		 * was held for the list.
380 		 *
381 		 * If the bio_list was empty, we also remove
382 		 * the rbio from the hash_table, and drop
383 		 * the corresponding ref
384 		 */
385 		if (bio_list_empty(&rbio->bio_list)) {
386 			if (!list_empty(&rbio->hash_list)) {
387 				list_del_init(&rbio->hash_list);
388 				refcount_dec(&rbio->refs);
389 				BUG_ON(!list_empty(&rbio->plug_list));
390 			}
391 		}
392 	}
393 
394 	spin_unlock(&rbio->bio_list_lock);
395 	spin_unlock(&h->lock);
396 
397 	if (freeit)
398 		__free_raid_bio(rbio);
399 }
400 
401 /*
402  * prune a given rbio from the cache
403  */
404 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
405 {
406 	struct btrfs_stripe_hash_table *table;
407 	unsigned long flags;
408 
409 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
410 		return;
411 
412 	table = rbio->fs_info->stripe_hash_table;
413 
414 	spin_lock_irqsave(&table->cache_lock, flags);
415 	__remove_rbio_from_cache(rbio);
416 	spin_unlock_irqrestore(&table->cache_lock, flags);
417 }
418 
419 /*
420  * remove everything in the cache
421  */
422 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
423 {
424 	struct btrfs_stripe_hash_table *table;
425 	unsigned long flags;
426 	struct btrfs_raid_bio *rbio;
427 
428 	table = info->stripe_hash_table;
429 
430 	spin_lock_irqsave(&table->cache_lock, flags);
431 	while (!list_empty(&table->stripe_cache)) {
432 		rbio = list_entry(table->stripe_cache.next,
433 				  struct btrfs_raid_bio,
434 				  stripe_cache);
435 		__remove_rbio_from_cache(rbio);
436 	}
437 	spin_unlock_irqrestore(&table->cache_lock, flags);
438 }
439 
440 /*
441  * remove all cached entries and free the hash table
442  * used by unmount
443  */
444 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
445 {
446 	if (!info->stripe_hash_table)
447 		return;
448 	btrfs_clear_rbio_cache(info);
449 	kvfree(info->stripe_hash_table);
450 	info->stripe_hash_table = NULL;
451 }
452 
453 /*
454  * insert an rbio into the stripe cache.  It
455  * must have already been prepared by calling
456  * cache_rbio_pages
457  *
458  * If this rbio was already cached, it gets
459  * moved to the front of the lru.
460  *
461  * If the size of the rbio cache is too big, we
462  * prune an item.
463  */
464 static void cache_rbio(struct btrfs_raid_bio *rbio)
465 {
466 	struct btrfs_stripe_hash_table *table;
467 	unsigned long flags;
468 
469 	if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
470 		return;
471 
472 	table = rbio->fs_info->stripe_hash_table;
473 
474 	spin_lock_irqsave(&table->cache_lock, flags);
475 	spin_lock(&rbio->bio_list_lock);
476 
477 	/* bump our ref if we were not in the list before */
478 	if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
479 		refcount_inc(&rbio->refs);
480 
481 	if (!list_empty(&rbio->stripe_cache)){
482 		list_move(&rbio->stripe_cache, &table->stripe_cache);
483 	} else {
484 		list_add(&rbio->stripe_cache, &table->stripe_cache);
485 		table->cache_size += 1;
486 	}
487 
488 	spin_unlock(&rbio->bio_list_lock);
489 
490 	if (table->cache_size > RBIO_CACHE_SIZE) {
491 		struct btrfs_raid_bio *found;
492 
493 		found = list_entry(table->stripe_cache.prev,
494 				  struct btrfs_raid_bio,
495 				  stripe_cache);
496 
497 		if (found != rbio)
498 			__remove_rbio_from_cache(found);
499 	}
500 
501 	spin_unlock_irqrestore(&table->cache_lock, flags);
502 }
503 
504 /*
505  * helper function to run the xor_blocks api.  It is only
506  * able to do MAX_XOR_BLOCKS at a time, so we need to
507  * loop through.
508  */
509 static void run_xor(void **pages, int src_cnt, ssize_t len)
510 {
511 	int src_off = 0;
512 	int xor_src_cnt = 0;
513 	void *dest = pages[src_cnt];
514 
515 	while(src_cnt > 0) {
516 		xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
517 		xor_blocks(xor_src_cnt, len, dest, pages + src_off);
518 
519 		src_cnt -= xor_src_cnt;
520 		src_off += xor_src_cnt;
521 	}
522 }
523 
524 /*
525  * returns true if the bio list inside this rbio
526  * covers an entire stripe (no rmw required).
527  * Must be called with the bio list lock held, or
528  * at a time when you know it is impossible to add
529  * new bios into the list
530  */
531 static int __rbio_is_full(struct btrfs_raid_bio *rbio)
532 {
533 	unsigned long size = rbio->bio_list_bytes;
534 	int ret = 1;
535 
536 	if (size != rbio->nr_data * rbio->stripe_len)
537 		ret = 0;
538 
539 	BUG_ON(size > rbio->nr_data * rbio->stripe_len);
540 	return ret;
541 }
542 
543 static int rbio_is_full(struct btrfs_raid_bio *rbio)
544 {
545 	unsigned long flags;
546 	int ret;
547 
548 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
549 	ret = __rbio_is_full(rbio);
550 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
551 	return ret;
552 }
553 
554 /*
555  * returns 1 if it is safe to merge two rbios together.
556  * The merging is safe if the two rbios correspond to
557  * the same stripe and if they are both going in the same
558  * direction (read vs write), and if neither one is
559  * locked for final IO
560  *
561  * The caller is responsible for locking such that
562  * rmw_locked is safe to test
563  */
564 static int rbio_can_merge(struct btrfs_raid_bio *last,
565 			  struct btrfs_raid_bio *cur)
566 {
567 	if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
568 	    test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
569 		return 0;
570 
571 	/*
572 	 * we can't merge with cached rbios, since the
573 	 * idea is that when we merge the destination
574 	 * rbio is going to run our IO for us.  We can
575 	 * steal from cached rbios though, other functions
576 	 * handle that.
577 	 */
578 	if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
579 	    test_bit(RBIO_CACHE_BIT, &cur->flags))
580 		return 0;
581 
582 	if (last->bbio->raid_map[0] !=
583 	    cur->bbio->raid_map[0])
584 		return 0;
585 
586 	/* we can't merge with different operations */
587 	if (last->operation != cur->operation)
588 		return 0;
589 	/*
590 	 * We've need read the full stripe from the drive.
591 	 * check and repair the parity and write the new results.
592 	 *
593 	 * We're not allowed to add any new bios to the
594 	 * bio list here, anyone else that wants to
595 	 * change this stripe needs to do their own rmw.
596 	 */
597 	if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
598 		return 0;
599 
600 	if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
601 		return 0;
602 
603 	if (last->operation == BTRFS_RBIO_READ_REBUILD) {
604 		int fa = last->faila;
605 		int fb = last->failb;
606 		int cur_fa = cur->faila;
607 		int cur_fb = cur->failb;
608 
609 		if (last->faila >= last->failb) {
610 			fa = last->failb;
611 			fb = last->faila;
612 		}
613 
614 		if (cur->faila >= cur->failb) {
615 			cur_fa = cur->failb;
616 			cur_fb = cur->faila;
617 		}
618 
619 		if (fa != cur_fa || fb != cur_fb)
620 			return 0;
621 	}
622 	return 1;
623 }
624 
625 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
626 				  int index)
627 {
628 	return stripe * rbio->stripe_npages + index;
629 }
630 
631 /*
632  * these are just the pages from the rbio array, not from anything
633  * the FS sent down to us
634  */
635 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
636 				     int index)
637 {
638 	return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
639 }
640 
641 /*
642  * helper to index into the pstripe
643  */
644 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
645 {
646 	return rbio_stripe_page(rbio, rbio->nr_data, index);
647 }
648 
649 /*
650  * helper to index into the qstripe, returns null
651  * if there is no qstripe
652  */
653 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
654 {
655 	if (rbio->nr_data + 1 == rbio->real_stripes)
656 		return NULL;
657 	return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
658 }
659 
660 /*
661  * The first stripe in the table for a logical address
662  * has the lock.  rbios are added in one of three ways:
663  *
664  * 1) Nobody has the stripe locked yet.  The rbio is given
665  * the lock and 0 is returned.  The caller must start the IO
666  * themselves.
667  *
668  * 2) Someone has the stripe locked, but we're able to merge
669  * with the lock owner.  The rbio is freed and the IO will
670  * start automatically along with the existing rbio.  1 is returned.
671  *
672  * 3) Someone has the stripe locked, but we're not able to merge.
673  * The rbio is added to the lock owner's plug list, or merged into
674  * an rbio already on the plug list.  When the lock owner unlocks,
675  * the next rbio on the list is run and the IO is started automatically.
676  * 1 is returned
677  *
678  * If we return 0, the caller still owns the rbio and must continue with
679  * IO submission.  If we return 1, the caller must assume the rbio has
680  * already been freed.
681  */
682 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
683 {
684 	int bucket = rbio_bucket(rbio);
685 	struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
686 	struct btrfs_raid_bio *cur;
687 	struct btrfs_raid_bio *pending;
688 	unsigned long flags;
689 	struct btrfs_raid_bio *freeit = NULL;
690 	struct btrfs_raid_bio *cache_drop = NULL;
691 	int ret = 0;
692 
693 	spin_lock_irqsave(&h->lock, flags);
694 	list_for_each_entry(cur, &h->hash_list, hash_list) {
695 		if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
696 			spin_lock(&cur->bio_list_lock);
697 
698 			/* can we steal this cached rbio's pages? */
699 			if (bio_list_empty(&cur->bio_list) &&
700 			    list_empty(&cur->plug_list) &&
701 			    test_bit(RBIO_CACHE_BIT, &cur->flags) &&
702 			    !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
703 				list_del_init(&cur->hash_list);
704 				refcount_dec(&cur->refs);
705 
706 				steal_rbio(cur, rbio);
707 				cache_drop = cur;
708 				spin_unlock(&cur->bio_list_lock);
709 
710 				goto lockit;
711 			}
712 
713 			/* can we merge into the lock owner? */
714 			if (rbio_can_merge(cur, rbio)) {
715 				merge_rbio(cur, rbio);
716 				spin_unlock(&cur->bio_list_lock);
717 				freeit = rbio;
718 				ret = 1;
719 				goto out;
720 			}
721 
722 
723 			/*
724 			 * we couldn't merge with the running
725 			 * rbio, see if we can merge with the
726 			 * pending ones.  We don't have to
727 			 * check for rmw_locked because there
728 			 * is no way they are inside finish_rmw
729 			 * right now
730 			 */
731 			list_for_each_entry(pending, &cur->plug_list,
732 					    plug_list) {
733 				if (rbio_can_merge(pending, rbio)) {
734 					merge_rbio(pending, rbio);
735 					spin_unlock(&cur->bio_list_lock);
736 					freeit = rbio;
737 					ret = 1;
738 					goto out;
739 				}
740 			}
741 
742 			/* no merging, put us on the tail of the plug list,
743 			 * our rbio will be started with the currently
744 			 * running rbio unlocks
745 			 */
746 			list_add_tail(&rbio->plug_list, &cur->plug_list);
747 			spin_unlock(&cur->bio_list_lock);
748 			ret = 1;
749 			goto out;
750 		}
751 	}
752 lockit:
753 	refcount_inc(&rbio->refs);
754 	list_add(&rbio->hash_list, &h->hash_list);
755 out:
756 	spin_unlock_irqrestore(&h->lock, flags);
757 	if (cache_drop)
758 		remove_rbio_from_cache(cache_drop);
759 	if (freeit)
760 		__free_raid_bio(freeit);
761 	return ret;
762 }
763 
764 /*
765  * called as rmw or parity rebuild is completed.  If the plug list has more
766  * rbios waiting for this stripe, the next one on the list will be started
767  */
768 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
769 {
770 	int bucket;
771 	struct btrfs_stripe_hash *h;
772 	unsigned long flags;
773 	int keep_cache = 0;
774 
775 	bucket = rbio_bucket(rbio);
776 	h = rbio->fs_info->stripe_hash_table->table + bucket;
777 
778 	if (list_empty(&rbio->plug_list))
779 		cache_rbio(rbio);
780 
781 	spin_lock_irqsave(&h->lock, flags);
782 	spin_lock(&rbio->bio_list_lock);
783 
784 	if (!list_empty(&rbio->hash_list)) {
785 		/*
786 		 * if we're still cached and there is no other IO
787 		 * to perform, just leave this rbio here for others
788 		 * to steal from later
789 		 */
790 		if (list_empty(&rbio->plug_list) &&
791 		    test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
792 			keep_cache = 1;
793 			clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
794 			BUG_ON(!bio_list_empty(&rbio->bio_list));
795 			goto done;
796 		}
797 
798 		list_del_init(&rbio->hash_list);
799 		refcount_dec(&rbio->refs);
800 
801 		/*
802 		 * we use the plug list to hold all the rbios
803 		 * waiting for the chance to lock this stripe.
804 		 * hand the lock over to one of them.
805 		 */
806 		if (!list_empty(&rbio->plug_list)) {
807 			struct btrfs_raid_bio *next;
808 			struct list_head *head = rbio->plug_list.next;
809 
810 			next = list_entry(head, struct btrfs_raid_bio,
811 					  plug_list);
812 
813 			list_del_init(&rbio->plug_list);
814 
815 			list_add(&next->hash_list, &h->hash_list);
816 			refcount_inc(&next->refs);
817 			spin_unlock(&rbio->bio_list_lock);
818 			spin_unlock_irqrestore(&h->lock, flags);
819 
820 			if (next->operation == BTRFS_RBIO_READ_REBUILD)
821 				async_read_rebuild(next);
822 			else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
823 				steal_rbio(rbio, next);
824 				async_read_rebuild(next);
825 			} else if (next->operation == BTRFS_RBIO_WRITE) {
826 				steal_rbio(rbio, next);
827 				async_rmw_stripe(next);
828 			} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
829 				steal_rbio(rbio, next);
830 				async_scrub_parity(next);
831 			}
832 
833 			goto done_nolock;
834 		}
835 	}
836 done:
837 	spin_unlock(&rbio->bio_list_lock);
838 	spin_unlock_irqrestore(&h->lock, flags);
839 
840 done_nolock:
841 	if (!keep_cache)
842 		remove_rbio_from_cache(rbio);
843 }
844 
845 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
846 {
847 	int i;
848 
849 	if (!refcount_dec_and_test(&rbio->refs))
850 		return;
851 
852 	WARN_ON(!list_empty(&rbio->stripe_cache));
853 	WARN_ON(!list_empty(&rbio->hash_list));
854 	WARN_ON(!bio_list_empty(&rbio->bio_list));
855 
856 	for (i = 0; i < rbio->nr_pages; i++) {
857 		if (rbio->stripe_pages[i]) {
858 			__free_page(rbio->stripe_pages[i]);
859 			rbio->stripe_pages[i] = NULL;
860 		}
861 	}
862 
863 	btrfs_put_bbio(rbio->bbio);
864 	kfree(rbio);
865 }
866 
867 static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
868 {
869 	struct bio *next;
870 
871 	while (cur) {
872 		next = cur->bi_next;
873 		cur->bi_next = NULL;
874 		cur->bi_status = err;
875 		bio_endio(cur);
876 		cur = next;
877 	}
878 }
879 
880 /*
881  * this frees the rbio and runs through all the bios in the
882  * bio_list and calls end_io on them
883  */
884 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
885 {
886 	struct bio *cur = bio_list_get(&rbio->bio_list);
887 	struct bio *extra;
888 
889 	if (rbio->generic_bio_cnt)
890 		btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
891 
892 	/*
893 	 * At this moment, rbio->bio_list is empty, however since rbio does not
894 	 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
895 	 * hash list, rbio may be merged with others so that rbio->bio_list
896 	 * becomes non-empty.
897 	 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
898 	 * more and we can call bio_endio() on all queued bios.
899 	 */
900 	unlock_stripe(rbio);
901 	extra = bio_list_get(&rbio->bio_list);
902 	__free_raid_bio(rbio);
903 
904 	rbio_endio_bio_list(cur, err);
905 	if (extra)
906 		rbio_endio_bio_list(extra, err);
907 }
908 
909 /*
910  * end io function used by finish_rmw.  When we finally
911  * get here, we've written a full stripe
912  */
913 static void raid_write_end_io(struct bio *bio)
914 {
915 	struct btrfs_raid_bio *rbio = bio->bi_private;
916 	blk_status_t err = bio->bi_status;
917 	int max_errors;
918 
919 	if (err)
920 		fail_bio_stripe(rbio, bio);
921 
922 	bio_put(bio);
923 
924 	if (!atomic_dec_and_test(&rbio->stripes_pending))
925 		return;
926 
927 	err = BLK_STS_OK;
928 
929 	/* OK, we have read all the stripes we need to. */
930 	max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
931 		     0 : rbio->bbio->max_errors;
932 	if (atomic_read(&rbio->error) > max_errors)
933 		err = BLK_STS_IOERR;
934 
935 	rbio_orig_end_io(rbio, err);
936 }
937 
938 /*
939  * the read/modify/write code wants to use the original bio for
940  * any pages it included, and then use the rbio for everything
941  * else.  This function decides if a given index (stripe number)
942  * and page number in that stripe fall inside the original bio
943  * or the rbio.
944  *
945  * if you set bio_list_only, you'll get a NULL back for any ranges
946  * that are outside the bio_list
947  *
948  * This doesn't take any refs on anything, you get a bare page pointer
949  * and the caller must bump refs as required.
950  *
951  * You must call index_rbio_pages once before you can trust
952  * the answers from this function.
953  */
954 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
955 				 int index, int pagenr, int bio_list_only)
956 {
957 	int chunk_page;
958 	struct page *p = NULL;
959 
960 	chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
961 
962 	spin_lock_irq(&rbio->bio_list_lock);
963 	p = rbio->bio_pages[chunk_page];
964 	spin_unlock_irq(&rbio->bio_list_lock);
965 
966 	if (p || bio_list_only)
967 		return p;
968 
969 	return rbio->stripe_pages[chunk_page];
970 }
971 
972 /*
973  * number of pages we need for the entire stripe across all the
974  * drives
975  */
976 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
977 {
978 	return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
979 }
980 
981 /*
982  * allocation and initial setup for the btrfs_raid_bio.  Not
983  * this does not allocate any pages for rbio->pages.
984  */
985 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
986 					 struct btrfs_bio *bbio,
987 					 u64 stripe_len)
988 {
989 	struct btrfs_raid_bio *rbio;
990 	int nr_data = 0;
991 	int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
992 	int num_pages = rbio_nr_pages(stripe_len, real_stripes);
993 	int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
994 	void *p;
995 
996 	rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 +
997 		       DIV_ROUND_UP(stripe_npages, BITS_PER_LONG) *
998 		       sizeof(long), GFP_NOFS);
999 	if (!rbio)
1000 		return ERR_PTR(-ENOMEM);
1001 
1002 	bio_list_init(&rbio->bio_list);
1003 	INIT_LIST_HEAD(&rbio->plug_list);
1004 	spin_lock_init(&rbio->bio_list_lock);
1005 	INIT_LIST_HEAD(&rbio->stripe_cache);
1006 	INIT_LIST_HEAD(&rbio->hash_list);
1007 	rbio->bbio = bbio;
1008 	rbio->fs_info = fs_info;
1009 	rbio->stripe_len = stripe_len;
1010 	rbio->nr_pages = num_pages;
1011 	rbio->real_stripes = real_stripes;
1012 	rbio->stripe_npages = stripe_npages;
1013 	rbio->faila = -1;
1014 	rbio->failb = -1;
1015 	refcount_set(&rbio->refs, 1);
1016 	atomic_set(&rbio->error, 0);
1017 	atomic_set(&rbio->stripes_pending, 0);
1018 
1019 	/*
1020 	 * the stripe_pages and bio_pages array point to the extra
1021 	 * memory we allocated past the end of the rbio
1022 	 */
1023 	p = rbio + 1;
1024 	rbio->stripe_pages = p;
1025 	rbio->bio_pages = p + sizeof(struct page *) * num_pages;
1026 	rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
1027 
1028 	if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1029 		nr_data = real_stripes - 1;
1030 	else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1031 		nr_data = real_stripes - 2;
1032 	else
1033 		BUG();
1034 
1035 	rbio->nr_data = nr_data;
1036 	return rbio;
1037 }
1038 
1039 /* allocate pages for all the stripes in the bio, including parity */
1040 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1041 {
1042 	int i;
1043 	struct page *page;
1044 
1045 	for (i = 0; i < rbio->nr_pages; i++) {
1046 		if (rbio->stripe_pages[i])
1047 			continue;
1048 		page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1049 		if (!page)
1050 			return -ENOMEM;
1051 		rbio->stripe_pages[i] = page;
1052 	}
1053 	return 0;
1054 }
1055 
1056 /* only allocate pages for p/q stripes */
1057 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1058 {
1059 	int i;
1060 	struct page *page;
1061 
1062 	i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1063 
1064 	for (; i < rbio->nr_pages; i++) {
1065 		if (rbio->stripe_pages[i])
1066 			continue;
1067 		page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1068 		if (!page)
1069 			return -ENOMEM;
1070 		rbio->stripe_pages[i] = page;
1071 	}
1072 	return 0;
1073 }
1074 
1075 /*
1076  * add a single page from a specific stripe into our list of bios for IO
1077  * this will try to merge into existing bios if possible, and returns
1078  * zero if all went well.
1079  */
1080 static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1081 			    struct bio_list *bio_list,
1082 			    struct page *page,
1083 			    int stripe_nr,
1084 			    unsigned long page_index,
1085 			    unsigned long bio_max_len)
1086 {
1087 	struct bio *last = bio_list->tail;
1088 	u64 last_end = 0;
1089 	int ret;
1090 	struct bio *bio;
1091 	struct btrfs_bio_stripe *stripe;
1092 	u64 disk_start;
1093 
1094 	stripe = &rbio->bbio->stripes[stripe_nr];
1095 	disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1096 
1097 	/* if the device is missing, just fail this stripe */
1098 	if (!stripe->dev->bdev)
1099 		return fail_rbio_index(rbio, stripe_nr);
1100 
1101 	/* see if we can add this page onto our existing bio */
1102 	if (last) {
1103 		last_end = (u64)last->bi_iter.bi_sector << 9;
1104 		last_end += last->bi_iter.bi_size;
1105 
1106 		/*
1107 		 * we can't merge these if they are from different
1108 		 * devices or if they are not contiguous
1109 		 */
1110 		if (last_end == disk_start && stripe->dev->bdev &&
1111 		    !last->bi_status &&
1112 		    last->bi_disk == stripe->dev->bdev->bd_disk &&
1113 		    last->bi_partno == stripe->dev->bdev->bd_partno) {
1114 			ret = bio_add_page(last, page, PAGE_SIZE, 0);
1115 			if (ret == PAGE_SIZE)
1116 				return 0;
1117 		}
1118 	}
1119 
1120 	/* put a new bio on the list */
1121 	bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1122 	bio->bi_iter.bi_size = 0;
1123 	bio_set_dev(bio, stripe->dev->bdev);
1124 	bio->bi_iter.bi_sector = disk_start >> 9;
1125 
1126 	bio_add_page(bio, page, PAGE_SIZE, 0);
1127 	bio_list_add(bio_list, bio);
1128 	return 0;
1129 }
1130 
1131 /*
1132  * while we're doing the read/modify/write cycle, we could
1133  * have errors in reading pages off the disk.  This checks
1134  * for errors and if we're not able to read the page it'll
1135  * trigger parity reconstruction.  The rmw will be finished
1136  * after we've reconstructed the failed stripes
1137  */
1138 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1139 {
1140 	if (rbio->faila >= 0 || rbio->failb >= 0) {
1141 		BUG_ON(rbio->faila == rbio->real_stripes - 1);
1142 		__raid56_parity_recover(rbio);
1143 	} else {
1144 		finish_rmw(rbio);
1145 	}
1146 }
1147 
1148 /*
1149  * helper function to walk our bio list and populate the bio_pages array with
1150  * the result.  This seems expensive, but it is faster than constantly
1151  * searching through the bio list as we setup the IO in finish_rmw or stripe
1152  * reconstruction.
1153  *
1154  * This must be called before you trust the answers from page_in_rbio
1155  */
1156 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1157 {
1158 	struct bio *bio;
1159 	u64 start;
1160 	unsigned long stripe_offset;
1161 	unsigned long page_index;
1162 
1163 	spin_lock_irq(&rbio->bio_list_lock);
1164 	bio_list_for_each(bio, &rbio->bio_list) {
1165 		struct bio_vec bvec;
1166 		struct bvec_iter iter;
1167 		int i = 0;
1168 
1169 		start = (u64)bio->bi_iter.bi_sector << 9;
1170 		stripe_offset = start - rbio->bbio->raid_map[0];
1171 		page_index = stripe_offset >> PAGE_SHIFT;
1172 
1173 		if (bio_flagged(bio, BIO_CLONED))
1174 			bio->bi_iter = btrfs_io_bio(bio)->iter;
1175 
1176 		bio_for_each_segment(bvec, bio, iter) {
1177 			rbio->bio_pages[page_index + i] = bvec.bv_page;
1178 			i++;
1179 		}
1180 	}
1181 	spin_unlock_irq(&rbio->bio_list_lock);
1182 }
1183 
1184 /*
1185  * this is called from one of two situations.  We either
1186  * have a full stripe from the higher layers, or we've read all
1187  * the missing bits off disk.
1188  *
1189  * This will calculate the parity and then send down any
1190  * changed blocks.
1191  */
1192 static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1193 {
1194 	struct btrfs_bio *bbio = rbio->bbio;
1195 	void *pointers[rbio->real_stripes];
1196 	int nr_data = rbio->nr_data;
1197 	int stripe;
1198 	int pagenr;
1199 	int p_stripe = -1;
1200 	int q_stripe = -1;
1201 	struct bio_list bio_list;
1202 	struct bio *bio;
1203 	int ret;
1204 
1205 	bio_list_init(&bio_list);
1206 
1207 	if (rbio->real_stripes - rbio->nr_data == 1) {
1208 		p_stripe = rbio->real_stripes - 1;
1209 	} else if (rbio->real_stripes - rbio->nr_data == 2) {
1210 		p_stripe = rbio->real_stripes - 2;
1211 		q_stripe = rbio->real_stripes - 1;
1212 	} else {
1213 		BUG();
1214 	}
1215 
1216 	/* at this point we either have a full stripe,
1217 	 * or we've read the full stripe from the drive.
1218 	 * recalculate the parity and write the new results.
1219 	 *
1220 	 * We're not allowed to add any new bios to the
1221 	 * bio list here, anyone else that wants to
1222 	 * change this stripe needs to do their own rmw.
1223 	 */
1224 	spin_lock_irq(&rbio->bio_list_lock);
1225 	set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1226 	spin_unlock_irq(&rbio->bio_list_lock);
1227 
1228 	atomic_set(&rbio->error, 0);
1229 
1230 	/*
1231 	 * now that we've set rmw_locked, run through the
1232 	 * bio list one last time and map the page pointers
1233 	 *
1234 	 * We don't cache full rbios because we're assuming
1235 	 * the higher layers are unlikely to use this area of
1236 	 * the disk again soon.  If they do use it again,
1237 	 * hopefully they will send another full bio.
1238 	 */
1239 	index_rbio_pages(rbio);
1240 	if (!rbio_is_full(rbio))
1241 		cache_rbio_pages(rbio);
1242 	else
1243 		clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1244 
1245 	for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1246 		struct page *p;
1247 		/* first collect one page from each data stripe */
1248 		for (stripe = 0; stripe < nr_data; stripe++) {
1249 			p = page_in_rbio(rbio, stripe, pagenr, 0);
1250 			pointers[stripe] = kmap(p);
1251 		}
1252 
1253 		/* then add the parity stripe */
1254 		p = rbio_pstripe_page(rbio, pagenr);
1255 		SetPageUptodate(p);
1256 		pointers[stripe++] = kmap(p);
1257 
1258 		if (q_stripe != -1) {
1259 
1260 			/*
1261 			 * raid6, add the qstripe and call the
1262 			 * library function to fill in our p/q
1263 			 */
1264 			p = rbio_qstripe_page(rbio, pagenr);
1265 			SetPageUptodate(p);
1266 			pointers[stripe++] = kmap(p);
1267 
1268 			raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1269 						pointers);
1270 		} else {
1271 			/* raid5 */
1272 			memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
1273 			run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1274 		}
1275 
1276 
1277 		for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1278 			kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1279 	}
1280 
1281 	/*
1282 	 * time to start writing.  Make bios for everything from the
1283 	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
1284 	 * everything else.
1285 	 */
1286 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1287 		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1288 			struct page *page;
1289 			if (stripe < rbio->nr_data) {
1290 				page = page_in_rbio(rbio, stripe, pagenr, 1);
1291 				if (!page)
1292 					continue;
1293 			} else {
1294 			       page = rbio_stripe_page(rbio, stripe, pagenr);
1295 			}
1296 
1297 			ret = rbio_add_io_page(rbio, &bio_list,
1298 				       page, stripe, pagenr, rbio->stripe_len);
1299 			if (ret)
1300 				goto cleanup;
1301 		}
1302 	}
1303 
1304 	if (likely(!bbio->num_tgtdevs))
1305 		goto write_data;
1306 
1307 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1308 		if (!bbio->tgtdev_map[stripe])
1309 			continue;
1310 
1311 		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1312 			struct page *page;
1313 			if (stripe < rbio->nr_data) {
1314 				page = page_in_rbio(rbio, stripe, pagenr, 1);
1315 				if (!page)
1316 					continue;
1317 			} else {
1318 			       page = rbio_stripe_page(rbio, stripe, pagenr);
1319 			}
1320 
1321 			ret = rbio_add_io_page(rbio, &bio_list, page,
1322 					       rbio->bbio->tgtdev_map[stripe],
1323 					       pagenr, rbio->stripe_len);
1324 			if (ret)
1325 				goto cleanup;
1326 		}
1327 	}
1328 
1329 write_data:
1330 	atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1331 	BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1332 
1333 	while (1) {
1334 		bio = bio_list_pop(&bio_list);
1335 		if (!bio)
1336 			break;
1337 
1338 		bio->bi_private = rbio;
1339 		bio->bi_end_io = raid_write_end_io;
1340 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1341 
1342 		submit_bio(bio);
1343 	}
1344 	return;
1345 
1346 cleanup:
1347 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1348 
1349 	while ((bio = bio_list_pop(&bio_list)))
1350 		bio_put(bio);
1351 }
1352 
1353 /*
1354  * helper to find the stripe number for a given bio.  Used to figure out which
1355  * stripe has failed.  This expects the bio to correspond to a physical disk,
1356  * so it looks up based on physical sector numbers.
1357  */
1358 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1359 			   struct bio *bio)
1360 {
1361 	u64 physical = bio->bi_iter.bi_sector;
1362 	u64 stripe_start;
1363 	int i;
1364 	struct btrfs_bio_stripe *stripe;
1365 
1366 	physical <<= 9;
1367 
1368 	for (i = 0; i < rbio->bbio->num_stripes; i++) {
1369 		stripe = &rbio->bbio->stripes[i];
1370 		stripe_start = stripe->physical;
1371 		if (physical >= stripe_start &&
1372 		    physical < stripe_start + rbio->stripe_len &&
1373 		    bio->bi_disk == stripe->dev->bdev->bd_disk &&
1374 		    bio->bi_partno == stripe->dev->bdev->bd_partno) {
1375 			return i;
1376 		}
1377 	}
1378 	return -1;
1379 }
1380 
1381 /*
1382  * helper to find the stripe number for a given
1383  * bio (before mapping).  Used to figure out which stripe has
1384  * failed.  This looks up based on logical block numbers.
1385  */
1386 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1387 				   struct bio *bio)
1388 {
1389 	u64 logical = bio->bi_iter.bi_sector;
1390 	u64 stripe_start;
1391 	int i;
1392 
1393 	logical <<= 9;
1394 
1395 	for (i = 0; i < rbio->nr_data; i++) {
1396 		stripe_start = rbio->bbio->raid_map[i];
1397 		if (logical >= stripe_start &&
1398 		    logical < stripe_start + rbio->stripe_len) {
1399 			return i;
1400 		}
1401 	}
1402 	return -1;
1403 }
1404 
1405 /*
1406  * returns -EIO if we had too many failures
1407  */
1408 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1409 {
1410 	unsigned long flags;
1411 	int ret = 0;
1412 
1413 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
1414 
1415 	/* we already know this stripe is bad, move on */
1416 	if (rbio->faila == failed || rbio->failb == failed)
1417 		goto out;
1418 
1419 	if (rbio->faila == -1) {
1420 		/* first failure on this rbio */
1421 		rbio->faila = failed;
1422 		atomic_inc(&rbio->error);
1423 	} else if (rbio->failb == -1) {
1424 		/* second failure on this rbio */
1425 		rbio->failb = failed;
1426 		atomic_inc(&rbio->error);
1427 	} else {
1428 		ret = -EIO;
1429 	}
1430 out:
1431 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1432 
1433 	return ret;
1434 }
1435 
1436 /*
1437  * helper to fail a stripe based on a physical disk
1438  * bio.
1439  */
1440 static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1441 			   struct bio *bio)
1442 {
1443 	int failed = find_bio_stripe(rbio, bio);
1444 
1445 	if (failed < 0)
1446 		return -EIO;
1447 
1448 	return fail_rbio_index(rbio, failed);
1449 }
1450 
1451 /*
1452  * this sets each page in the bio uptodate.  It should only be used on private
1453  * rbio pages, nothing that comes in from the higher layers
1454  */
1455 static void set_bio_pages_uptodate(struct bio *bio)
1456 {
1457 	struct bio_vec *bvec;
1458 	int i;
1459 
1460 	ASSERT(!bio_flagged(bio, BIO_CLONED));
1461 
1462 	bio_for_each_segment_all(bvec, bio, i)
1463 		SetPageUptodate(bvec->bv_page);
1464 }
1465 
1466 /*
1467  * end io for the read phase of the rmw cycle.  All the bios here are physical
1468  * stripe bios we've read from the disk so we can recalculate the parity of the
1469  * stripe.
1470  *
1471  * This will usually kick off finish_rmw once all the bios are read in, but it
1472  * may trigger parity reconstruction if we had any errors along the way
1473  */
1474 static void raid_rmw_end_io(struct bio *bio)
1475 {
1476 	struct btrfs_raid_bio *rbio = bio->bi_private;
1477 
1478 	if (bio->bi_status)
1479 		fail_bio_stripe(rbio, bio);
1480 	else
1481 		set_bio_pages_uptodate(bio);
1482 
1483 	bio_put(bio);
1484 
1485 	if (!atomic_dec_and_test(&rbio->stripes_pending))
1486 		return;
1487 
1488 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1489 		goto cleanup;
1490 
1491 	/*
1492 	 * this will normally call finish_rmw to start our write
1493 	 * but if there are any failed stripes we'll reconstruct
1494 	 * from parity first
1495 	 */
1496 	validate_rbio_for_rmw(rbio);
1497 	return;
1498 
1499 cleanup:
1500 
1501 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1502 }
1503 
1504 static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
1505 {
1506 	btrfs_init_work(&rbio->work, btrfs_rmw_helper, rmw_work, NULL, NULL);
1507 	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
1508 }
1509 
1510 static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1511 {
1512 	btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1513 			read_rebuild_work, NULL, NULL);
1514 
1515 	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
1516 }
1517 
1518 /*
1519  * the stripe must be locked by the caller.  It will
1520  * unlock after all the writes are done
1521  */
1522 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1523 {
1524 	int bios_to_read = 0;
1525 	struct bio_list bio_list;
1526 	int ret;
1527 	int pagenr;
1528 	int stripe;
1529 	struct bio *bio;
1530 
1531 	bio_list_init(&bio_list);
1532 
1533 	ret = alloc_rbio_pages(rbio);
1534 	if (ret)
1535 		goto cleanup;
1536 
1537 	index_rbio_pages(rbio);
1538 
1539 	atomic_set(&rbio->error, 0);
1540 	/*
1541 	 * build a list of bios to read all the missing parts of this
1542 	 * stripe
1543 	 */
1544 	for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1545 		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1546 			struct page *page;
1547 			/*
1548 			 * we want to find all the pages missing from
1549 			 * the rbio and read them from the disk.  If
1550 			 * page_in_rbio finds a page in the bio list
1551 			 * we don't need to read it off the stripe.
1552 			 */
1553 			page = page_in_rbio(rbio, stripe, pagenr, 1);
1554 			if (page)
1555 				continue;
1556 
1557 			page = rbio_stripe_page(rbio, stripe, pagenr);
1558 			/*
1559 			 * the bio cache may have handed us an uptodate
1560 			 * page.  If so, be happy and use it
1561 			 */
1562 			if (PageUptodate(page))
1563 				continue;
1564 
1565 			ret = rbio_add_io_page(rbio, &bio_list, page,
1566 				       stripe, pagenr, rbio->stripe_len);
1567 			if (ret)
1568 				goto cleanup;
1569 		}
1570 	}
1571 
1572 	bios_to_read = bio_list_size(&bio_list);
1573 	if (!bios_to_read) {
1574 		/*
1575 		 * this can happen if others have merged with
1576 		 * us, it means there is nothing left to read.
1577 		 * But if there are missing devices it may not be
1578 		 * safe to do the full stripe write yet.
1579 		 */
1580 		goto finish;
1581 	}
1582 
1583 	/*
1584 	 * the bbio may be freed once we submit the last bio.  Make sure
1585 	 * not to touch it after that
1586 	 */
1587 	atomic_set(&rbio->stripes_pending, bios_to_read);
1588 	while (1) {
1589 		bio = bio_list_pop(&bio_list);
1590 		if (!bio)
1591 			break;
1592 
1593 		bio->bi_private = rbio;
1594 		bio->bi_end_io = raid_rmw_end_io;
1595 		bio_set_op_attrs(bio, REQ_OP_READ, 0);
1596 
1597 		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1598 
1599 		submit_bio(bio);
1600 	}
1601 	/* the actual write will happen once the reads are done */
1602 	return 0;
1603 
1604 cleanup:
1605 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1606 
1607 	while ((bio = bio_list_pop(&bio_list)))
1608 		bio_put(bio);
1609 
1610 	return -EIO;
1611 
1612 finish:
1613 	validate_rbio_for_rmw(rbio);
1614 	return 0;
1615 }
1616 
1617 /*
1618  * if the upper layers pass in a full stripe, we thank them by only allocating
1619  * enough pages to hold the parity, and sending it all down quickly.
1620  */
1621 static int full_stripe_write(struct btrfs_raid_bio *rbio)
1622 {
1623 	int ret;
1624 
1625 	ret = alloc_rbio_parity_pages(rbio);
1626 	if (ret) {
1627 		__free_raid_bio(rbio);
1628 		return ret;
1629 	}
1630 
1631 	ret = lock_stripe_add(rbio);
1632 	if (ret == 0)
1633 		finish_rmw(rbio);
1634 	return 0;
1635 }
1636 
1637 /*
1638  * partial stripe writes get handed over to async helpers.
1639  * We're really hoping to merge a few more writes into this
1640  * rbio before calculating new parity
1641  */
1642 static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1643 {
1644 	int ret;
1645 
1646 	ret = lock_stripe_add(rbio);
1647 	if (ret == 0)
1648 		async_rmw_stripe(rbio);
1649 	return 0;
1650 }
1651 
1652 /*
1653  * sometimes while we were reading from the drive to
1654  * recalculate parity, enough new bios come into create
1655  * a full stripe.  So we do a check here to see if we can
1656  * go directly to finish_rmw
1657  */
1658 static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1659 {
1660 	/* head off into rmw land if we don't have a full stripe */
1661 	if (!rbio_is_full(rbio))
1662 		return partial_stripe_write(rbio);
1663 	return full_stripe_write(rbio);
1664 }
1665 
1666 /*
1667  * We use plugging call backs to collect full stripes.
1668  * Any time we get a partial stripe write while plugged
1669  * we collect it into a list.  When the unplug comes down,
1670  * we sort the list by logical block number and merge
1671  * everything we can into the same rbios
1672  */
1673 struct btrfs_plug_cb {
1674 	struct blk_plug_cb cb;
1675 	struct btrfs_fs_info *info;
1676 	struct list_head rbio_list;
1677 	struct btrfs_work work;
1678 };
1679 
1680 /*
1681  * rbios on the plug list are sorted for easier merging.
1682  */
1683 static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1684 {
1685 	struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1686 						 plug_list);
1687 	struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1688 						 plug_list);
1689 	u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1690 	u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1691 
1692 	if (a_sector < b_sector)
1693 		return -1;
1694 	if (a_sector > b_sector)
1695 		return 1;
1696 	return 0;
1697 }
1698 
1699 static void run_plug(struct btrfs_plug_cb *plug)
1700 {
1701 	struct btrfs_raid_bio *cur;
1702 	struct btrfs_raid_bio *last = NULL;
1703 
1704 	/*
1705 	 * sort our plug list then try to merge
1706 	 * everything we can in hopes of creating full
1707 	 * stripes.
1708 	 */
1709 	list_sort(NULL, &plug->rbio_list, plug_cmp);
1710 	while (!list_empty(&plug->rbio_list)) {
1711 		cur = list_entry(plug->rbio_list.next,
1712 				 struct btrfs_raid_bio, plug_list);
1713 		list_del_init(&cur->plug_list);
1714 
1715 		if (rbio_is_full(cur)) {
1716 			/* we have a full stripe, send it down */
1717 			full_stripe_write(cur);
1718 			continue;
1719 		}
1720 		if (last) {
1721 			if (rbio_can_merge(last, cur)) {
1722 				merge_rbio(last, cur);
1723 				__free_raid_bio(cur);
1724 				continue;
1725 
1726 			}
1727 			__raid56_parity_write(last);
1728 		}
1729 		last = cur;
1730 	}
1731 	if (last) {
1732 		__raid56_parity_write(last);
1733 	}
1734 	kfree(plug);
1735 }
1736 
1737 /*
1738  * if the unplug comes from schedule, we have to push the
1739  * work off to a helper thread
1740  */
1741 static void unplug_work(struct btrfs_work *work)
1742 {
1743 	struct btrfs_plug_cb *plug;
1744 	plug = container_of(work, struct btrfs_plug_cb, work);
1745 	run_plug(plug);
1746 }
1747 
1748 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1749 {
1750 	struct btrfs_plug_cb *plug;
1751 	plug = container_of(cb, struct btrfs_plug_cb, cb);
1752 
1753 	if (from_schedule) {
1754 		btrfs_init_work(&plug->work, btrfs_rmw_helper,
1755 				unplug_work, NULL, NULL);
1756 		btrfs_queue_work(plug->info->rmw_workers,
1757 				 &plug->work);
1758 		return;
1759 	}
1760 	run_plug(plug);
1761 }
1762 
1763 /*
1764  * our main entry point for writes from the rest of the FS.
1765  */
1766 int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1767 			struct btrfs_bio *bbio, u64 stripe_len)
1768 {
1769 	struct btrfs_raid_bio *rbio;
1770 	struct btrfs_plug_cb *plug = NULL;
1771 	struct blk_plug_cb *cb;
1772 	int ret;
1773 
1774 	rbio = alloc_rbio(fs_info, bbio, stripe_len);
1775 	if (IS_ERR(rbio)) {
1776 		btrfs_put_bbio(bbio);
1777 		return PTR_ERR(rbio);
1778 	}
1779 	bio_list_add(&rbio->bio_list, bio);
1780 	rbio->bio_list_bytes = bio->bi_iter.bi_size;
1781 	rbio->operation = BTRFS_RBIO_WRITE;
1782 
1783 	btrfs_bio_counter_inc_noblocked(fs_info);
1784 	rbio->generic_bio_cnt = 1;
1785 
1786 	/*
1787 	 * don't plug on full rbios, just get them out the door
1788 	 * as quickly as we can
1789 	 */
1790 	if (rbio_is_full(rbio)) {
1791 		ret = full_stripe_write(rbio);
1792 		if (ret)
1793 			btrfs_bio_counter_dec(fs_info);
1794 		return ret;
1795 	}
1796 
1797 	cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1798 	if (cb) {
1799 		plug = container_of(cb, struct btrfs_plug_cb, cb);
1800 		if (!plug->info) {
1801 			plug->info = fs_info;
1802 			INIT_LIST_HEAD(&plug->rbio_list);
1803 		}
1804 		list_add_tail(&rbio->plug_list, &plug->rbio_list);
1805 		ret = 0;
1806 	} else {
1807 		ret = __raid56_parity_write(rbio);
1808 		if (ret)
1809 			btrfs_bio_counter_dec(fs_info);
1810 	}
1811 	return ret;
1812 }
1813 
1814 /*
1815  * all parity reconstruction happens here.  We've read in everything
1816  * we can find from the drives and this does the heavy lifting of
1817  * sorting the good from the bad.
1818  */
1819 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1820 {
1821 	int pagenr, stripe;
1822 	void **pointers;
1823 	int faila = -1, failb = -1;
1824 	struct page *page;
1825 	blk_status_t err;
1826 	int i;
1827 
1828 	pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1829 	if (!pointers) {
1830 		err = BLK_STS_RESOURCE;
1831 		goto cleanup_io;
1832 	}
1833 
1834 	faila = rbio->faila;
1835 	failb = rbio->failb;
1836 
1837 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1838 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1839 		spin_lock_irq(&rbio->bio_list_lock);
1840 		set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1841 		spin_unlock_irq(&rbio->bio_list_lock);
1842 	}
1843 
1844 	index_rbio_pages(rbio);
1845 
1846 	for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1847 		/*
1848 		 * Now we just use bitmap to mark the horizontal stripes in
1849 		 * which we have data when doing parity scrub.
1850 		 */
1851 		if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1852 		    !test_bit(pagenr, rbio->dbitmap))
1853 			continue;
1854 
1855 		/* setup our array of pointers with pages
1856 		 * from each stripe
1857 		 */
1858 		for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1859 			/*
1860 			 * if we're rebuilding a read, we have to use
1861 			 * pages from the bio list
1862 			 */
1863 			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1864 			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1865 			    (stripe == faila || stripe == failb)) {
1866 				page = page_in_rbio(rbio, stripe, pagenr, 0);
1867 			} else {
1868 				page = rbio_stripe_page(rbio, stripe, pagenr);
1869 			}
1870 			pointers[stripe] = kmap(page);
1871 		}
1872 
1873 		/* all raid6 handling here */
1874 		if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1875 			/*
1876 			 * single failure, rebuild from parity raid5
1877 			 * style
1878 			 */
1879 			if (failb < 0) {
1880 				if (faila == rbio->nr_data) {
1881 					/*
1882 					 * Just the P stripe has failed, without
1883 					 * a bad data or Q stripe.
1884 					 * TODO, we should redo the xor here.
1885 					 */
1886 					err = BLK_STS_IOERR;
1887 					goto cleanup;
1888 				}
1889 				/*
1890 				 * a single failure in raid6 is rebuilt
1891 				 * in the pstripe code below
1892 				 */
1893 				goto pstripe;
1894 			}
1895 
1896 			/* make sure our ps and qs are in order */
1897 			if (faila > failb) {
1898 				int tmp = failb;
1899 				failb = faila;
1900 				faila = tmp;
1901 			}
1902 
1903 			/* if the q stripe is failed, do a pstripe reconstruction
1904 			 * from the xors.
1905 			 * If both the q stripe and the P stripe are failed, we're
1906 			 * here due to a crc mismatch and we can't give them the
1907 			 * data they want
1908 			 */
1909 			if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1910 				if (rbio->bbio->raid_map[faila] ==
1911 				    RAID5_P_STRIPE) {
1912 					err = BLK_STS_IOERR;
1913 					goto cleanup;
1914 				}
1915 				/*
1916 				 * otherwise we have one bad data stripe and
1917 				 * a good P stripe.  raid5!
1918 				 */
1919 				goto pstripe;
1920 			}
1921 
1922 			if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1923 				raid6_datap_recov(rbio->real_stripes,
1924 						  PAGE_SIZE, faila, pointers);
1925 			} else {
1926 				raid6_2data_recov(rbio->real_stripes,
1927 						  PAGE_SIZE, faila, failb,
1928 						  pointers);
1929 			}
1930 		} else {
1931 			void *p;
1932 
1933 			/* rebuild from P stripe here (raid5 or raid6) */
1934 			BUG_ON(failb != -1);
1935 pstripe:
1936 			/* Copy parity block into failed block to start with */
1937 			memcpy(pointers[faila],
1938 			       pointers[rbio->nr_data],
1939 			       PAGE_SIZE);
1940 
1941 			/* rearrange the pointer array */
1942 			p = pointers[faila];
1943 			for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1944 				pointers[stripe] = pointers[stripe + 1];
1945 			pointers[rbio->nr_data - 1] = p;
1946 
1947 			/* xor in the rest */
1948 			run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1949 		}
1950 		/* if we're doing this rebuild as part of an rmw, go through
1951 		 * and set all of our private rbio pages in the
1952 		 * failed stripes as uptodate.  This way finish_rmw will
1953 		 * know they can be trusted.  If this was a read reconstruction,
1954 		 * other endio functions will fiddle the uptodate bits
1955 		 */
1956 		if (rbio->operation == BTRFS_RBIO_WRITE) {
1957 			for (i = 0;  i < rbio->stripe_npages; i++) {
1958 				if (faila != -1) {
1959 					page = rbio_stripe_page(rbio, faila, i);
1960 					SetPageUptodate(page);
1961 				}
1962 				if (failb != -1) {
1963 					page = rbio_stripe_page(rbio, failb, i);
1964 					SetPageUptodate(page);
1965 				}
1966 			}
1967 		}
1968 		for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1969 			/*
1970 			 * if we're rebuilding a read, we have to use
1971 			 * pages from the bio list
1972 			 */
1973 			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1974 			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1975 			    (stripe == faila || stripe == failb)) {
1976 				page = page_in_rbio(rbio, stripe, pagenr, 0);
1977 			} else {
1978 				page = rbio_stripe_page(rbio, stripe, pagenr);
1979 			}
1980 			kunmap(page);
1981 		}
1982 	}
1983 
1984 	err = BLK_STS_OK;
1985 cleanup:
1986 	kfree(pointers);
1987 
1988 cleanup_io:
1989 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1990 		/*
1991 		 * - In case of two failures, where rbio->failb != -1:
1992 		 *
1993 		 *   Do not cache this rbio since the above read reconstruction
1994 		 *   (raid6_datap_recov() or raid6_2data_recov()) may have
1995 		 *   changed some content of stripes which are not identical to
1996 		 *   on-disk content any more, otherwise, a later write/recover
1997 		 *   may steal stripe_pages from this rbio and end up with
1998 		 *   corruptions or rebuild failures.
1999 		 *
2000 		 * - In case of single failure, where rbio->failb == -1:
2001 		 *
2002 		 *   Cache this rbio iff the above read reconstruction is
2003 		 *   excuted without problems.
2004 		 */
2005 		if (err == BLK_STS_OK && rbio->failb < 0)
2006 			cache_rbio_pages(rbio);
2007 		else
2008 			clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2009 
2010 		rbio_orig_end_io(rbio, err);
2011 	} else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
2012 		rbio_orig_end_io(rbio, err);
2013 	} else if (err == BLK_STS_OK) {
2014 		rbio->faila = -1;
2015 		rbio->failb = -1;
2016 
2017 		if (rbio->operation == BTRFS_RBIO_WRITE)
2018 			finish_rmw(rbio);
2019 		else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
2020 			finish_parity_scrub(rbio, 0);
2021 		else
2022 			BUG();
2023 	} else {
2024 		rbio_orig_end_io(rbio, err);
2025 	}
2026 }
2027 
2028 /*
2029  * This is called only for stripes we've read from disk to
2030  * reconstruct the parity.
2031  */
2032 static void raid_recover_end_io(struct bio *bio)
2033 {
2034 	struct btrfs_raid_bio *rbio = bio->bi_private;
2035 
2036 	/*
2037 	 * we only read stripe pages off the disk, set them
2038 	 * up to date if there were no errors
2039 	 */
2040 	if (bio->bi_status)
2041 		fail_bio_stripe(rbio, bio);
2042 	else
2043 		set_bio_pages_uptodate(bio);
2044 	bio_put(bio);
2045 
2046 	if (!atomic_dec_and_test(&rbio->stripes_pending))
2047 		return;
2048 
2049 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2050 		rbio_orig_end_io(rbio, BLK_STS_IOERR);
2051 	else
2052 		__raid_recover_end_io(rbio);
2053 }
2054 
2055 /*
2056  * reads everything we need off the disk to reconstruct
2057  * the parity. endio handlers trigger final reconstruction
2058  * when the IO is done.
2059  *
2060  * This is used both for reads from the higher layers and for
2061  * parity construction required to finish a rmw cycle.
2062  */
2063 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2064 {
2065 	int bios_to_read = 0;
2066 	struct bio_list bio_list;
2067 	int ret;
2068 	int pagenr;
2069 	int stripe;
2070 	struct bio *bio;
2071 
2072 	bio_list_init(&bio_list);
2073 
2074 	ret = alloc_rbio_pages(rbio);
2075 	if (ret)
2076 		goto cleanup;
2077 
2078 	atomic_set(&rbio->error, 0);
2079 
2080 	/*
2081 	 * read everything that hasn't failed.  Thanks to the
2082 	 * stripe cache, it is possible that some or all of these
2083 	 * pages are going to be uptodate.
2084 	 */
2085 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2086 		if (rbio->faila == stripe || rbio->failb == stripe) {
2087 			atomic_inc(&rbio->error);
2088 			continue;
2089 		}
2090 
2091 		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2092 			struct page *p;
2093 
2094 			/*
2095 			 * the rmw code may have already read this
2096 			 * page in
2097 			 */
2098 			p = rbio_stripe_page(rbio, stripe, pagenr);
2099 			if (PageUptodate(p))
2100 				continue;
2101 
2102 			ret = rbio_add_io_page(rbio, &bio_list,
2103 				       rbio_stripe_page(rbio, stripe, pagenr),
2104 				       stripe, pagenr, rbio->stripe_len);
2105 			if (ret < 0)
2106 				goto cleanup;
2107 		}
2108 	}
2109 
2110 	bios_to_read = bio_list_size(&bio_list);
2111 	if (!bios_to_read) {
2112 		/*
2113 		 * we might have no bios to read just because the pages
2114 		 * were up to date, or we might have no bios to read because
2115 		 * the devices were gone.
2116 		 */
2117 		if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2118 			__raid_recover_end_io(rbio);
2119 			goto out;
2120 		} else {
2121 			goto cleanup;
2122 		}
2123 	}
2124 
2125 	/*
2126 	 * the bbio may be freed once we submit the last bio.  Make sure
2127 	 * not to touch it after that
2128 	 */
2129 	atomic_set(&rbio->stripes_pending, bios_to_read);
2130 	while (1) {
2131 		bio = bio_list_pop(&bio_list);
2132 		if (!bio)
2133 			break;
2134 
2135 		bio->bi_private = rbio;
2136 		bio->bi_end_io = raid_recover_end_io;
2137 		bio_set_op_attrs(bio, REQ_OP_READ, 0);
2138 
2139 		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2140 
2141 		submit_bio(bio);
2142 	}
2143 out:
2144 	return 0;
2145 
2146 cleanup:
2147 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2148 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2149 		rbio_orig_end_io(rbio, BLK_STS_IOERR);
2150 
2151 	while ((bio = bio_list_pop(&bio_list)))
2152 		bio_put(bio);
2153 
2154 	return -EIO;
2155 }
2156 
2157 /*
2158  * the main entry point for reads from the higher layers.  This
2159  * is really only called when the normal read path had a failure,
2160  * so we assume the bio they send down corresponds to a failed part
2161  * of the drive.
2162  */
2163 int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2164 			  struct btrfs_bio *bbio, u64 stripe_len,
2165 			  int mirror_num, int generic_io)
2166 {
2167 	struct btrfs_raid_bio *rbio;
2168 	int ret;
2169 
2170 	if (generic_io) {
2171 		ASSERT(bbio->mirror_num == mirror_num);
2172 		btrfs_io_bio(bio)->mirror_num = mirror_num;
2173 	}
2174 
2175 	rbio = alloc_rbio(fs_info, bbio, stripe_len);
2176 	if (IS_ERR(rbio)) {
2177 		if (generic_io)
2178 			btrfs_put_bbio(bbio);
2179 		return PTR_ERR(rbio);
2180 	}
2181 
2182 	rbio->operation = BTRFS_RBIO_READ_REBUILD;
2183 	bio_list_add(&rbio->bio_list, bio);
2184 	rbio->bio_list_bytes = bio->bi_iter.bi_size;
2185 
2186 	rbio->faila = find_logical_bio_stripe(rbio, bio);
2187 	if (rbio->faila == -1) {
2188 		btrfs_warn(fs_info,
2189 	"%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2190 			   __func__, (u64)bio->bi_iter.bi_sector << 9,
2191 			   (u64)bio->bi_iter.bi_size, bbio->map_type);
2192 		if (generic_io)
2193 			btrfs_put_bbio(bbio);
2194 		kfree(rbio);
2195 		return -EIO;
2196 	}
2197 
2198 	if (generic_io) {
2199 		btrfs_bio_counter_inc_noblocked(fs_info);
2200 		rbio->generic_bio_cnt = 1;
2201 	} else {
2202 		btrfs_get_bbio(bbio);
2203 	}
2204 
2205 	/*
2206 	 * Loop retry:
2207 	 * for 'mirror == 2', reconstruct from all other stripes.
2208 	 * for 'mirror_num > 2', select a stripe to fail on every retry.
2209 	 */
2210 	if (mirror_num > 2) {
2211 		/*
2212 		 * 'mirror == 3' is to fail the p stripe and
2213 		 * reconstruct from the q stripe.  'mirror > 3' is to
2214 		 * fail a data stripe and reconstruct from p+q stripe.
2215 		 */
2216 		rbio->failb = rbio->real_stripes - (mirror_num - 1);
2217 		ASSERT(rbio->failb > 0);
2218 		if (rbio->failb <= rbio->faila)
2219 			rbio->failb--;
2220 	}
2221 
2222 	ret = lock_stripe_add(rbio);
2223 
2224 	/*
2225 	 * __raid56_parity_recover will end the bio with
2226 	 * any errors it hits.  We don't want to return
2227 	 * its error value up the stack because our caller
2228 	 * will end up calling bio_endio with any nonzero
2229 	 * return
2230 	 */
2231 	if (ret == 0)
2232 		__raid56_parity_recover(rbio);
2233 	/*
2234 	 * our rbio has been added to the list of
2235 	 * rbios that will be handled after the
2236 	 * currently lock owner is done
2237 	 */
2238 	return 0;
2239 
2240 }
2241 
2242 static void rmw_work(struct btrfs_work *work)
2243 {
2244 	struct btrfs_raid_bio *rbio;
2245 
2246 	rbio = container_of(work, struct btrfs_raid_bio, work);
2247 	raid56_rmw_stripe(rbio);
2248 }
2249 
2250 static void read_rebuild_work(struct btrfs_work *work)
2251 {
2252 	struct btrfs_raid_bio *rbio;
2253 
2254 	rbio = container_of(work, struct btrfs_raid_bio, work);
2255 	__raid56_parity_recover(rbio);
2256 }
2257 
2258 /*
2259  * The following code is used to scrub/replace the parity stripe
2260  *
2261  * Caller must have already increased bio_counter for getting @bbio.
2262  *
2263  * Note: We need make sure all the pages that add into the scrub/replace
2264  * raid bio are correct and not be changed during the scrub/replace. That
2265  * is those pages just hold metadata or file data with checksum.
2266  */
2267 
2268 struct btrfs_raid_bio *
2269 raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2270 			       struct btrfs_bio *bbio, u64 stripe_len,
2271 			       struct btrfs_device *scrub_dev,
2272 			       unsigned long *dbitmap, int stripe_nsectors)
2273 {
2274 	struct btrfs_raid_bio *rbio;
2275 	int i;
2276 
2277 	rbio = alloc_rbio(fs_info, bbio, stripe_len);
2278 	if (IS_ERR(rbio))
2279 		return NULL;
2280 	bio_list_add(&rbio->bio_list, bio);
2281 	/*
2282 	 * This is a special bio which is used to hold the completion handler
2283 	 * and make the scrub rbio is similar to the other types
2284 	 */
2285 	ASSERT(!bio->bi_iter.bi_size);
2286 	rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2287 
2288 	/*
2289 	 * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
2290 	 * to the end position, so this search can start from the first parity
2291 	 * stripe.
2292 	 */
2293 	for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2294 		if (bbio->stripes[i].dev == scrub_dev) {
2295 			rbio->scrubp = i;
2296 			break;
2297 		}
2298 	}
2299 	ASSERT(i < rbio->real_stripes);
2300 
2301 	/* Now we just support the sectorsize equals to page size */
2302 	ASSERT(fs_info->sectorsize == PAGE_SIZE);
2303 	ASSERT(rbio->stripe_npages == stripe_nsectors);
2304 	bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2305 
2306 	/*
2307 	 * We have already increased bio_counter when getting bbio, record it
2308 	 * so we can free it at rbio_orig_end_io().
2309 	 */
2310 	rbio->generic_bio_cnt = 1;
2311 
2312 	return rbio;
2313 }
2314 
2315 /* Used for both parity scrub and missing. */
2316 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2317 			    u64 logical)
2318 {
2319 	int stripe_offset;
2320 	int index;
2321 
2322 	ASSERT(logical >= rbio->bbio->raid_map[0]);
2323 	ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2324 				rbio->stripe_len * rbio->nr_data);
2325 	stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2326 	index = stripe_offset >> PAGE_SHIFT;
2327 	rbio->bio_pages[index] = page;
2328 }
2329 
2330 /*
2331  * We just scrub the parity that we have correct data on the same horizontal,
2332  * so we needn't allocate all pages for all the stripes.
2333  */
2334 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2335 {
2336 	int i;
2337 	int bit;
2338 	int index;
2339 	struct page *page;
2340 
2341 	for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2342 		for (i = 0; i < rbio->real_stripes; i++) {
2343 			index = i * rbio->stripe_npages + bit;
2344 			if (rbio->stripe_pages[index])
2345 				continue;
2346 
2347 			page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2348 			if (!page)
2349 				return -ENOMEM;
2350 			rbio->stripe_pages[index] = page;
2351 		}
2352 	}
2353 	return 0;
2354 }
2355 
2356 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2357 					 int need_check)
2358 {
2359 	struct btrfs_bio *bbio = rbio->bbio;
2360 	void *pointers[rbio->real_stripes];
2361 	DECLARE_BITMAP(pbitmap, rbio->stripe_npages);
2362 	int nr_data = rbio->nr_data;
2363 	int stripe;
2364 	int pagenr;
2365 	int p_stripe = -1;
2366 	int q_stripe = -1;
2367 	struct page *p_page = NULL;
2368 	struct page *q_page = NULL;
2369 	struct bio_list bio_list;
2370 	struct bio *bio;
2371 	int is_replace = 0;
2372 	int ret;
2373 
2374 	bio_list_init(&bio_list);
2375 
2376 	if (rbio->real_stripes - rbio->nr_data == 1) {
2377 		p_stripe = rbio->real_stripes - 1;
2378 	} else if (rbio->real_stripes - rbio->nr_data == 2) {
2379 		p_stripe = rbio->real_stripes - 2;
2380 		q_stripe = rbio->real_stripes - 1;
2381 	} else {
2382 		BUG();
2383 	}
2384 
2385 	if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2386 		is_replace = 1;
2387 		bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2388 	}
2389 
2390 	/*
2391 	 * Because the higher layers(scrubber) are unlikely to
2392 	 * use this area of the disk again soon, so don't cache
2393 	 * it.
2394 	 */
2395 	clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2396 
2397 	if (!need_check)
2398 		goto writeback;
2399 
2400 	p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2401 	if (!p_page)
2402 		goto cleanup;
2403 	SetPageUptodate(p_page);
2404 
2405 	if (q_stripe != -1) {
2406 		q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2407 		if (!q_page) {
2408 			__free_page(p_page);
2409 			goto cleanup;
2410 		}
2411 		SetPageUptodate(q_page);
2412 	}
2413 
2414 	atomic_set(&rbio->error, 0);
2415 
2416 	for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2417 		struct page *p;
2418 		void *parity;
2419 		/* first collect one page from each data stripe */
2420 		for (stripe = 0; stripe < nr_data; stripe++) {
2421 			p = page_in_rbio(rbio, stripe, pagenr, 0);
2422 			pointers[stripe] = kmap(p);
2423 		}
2424 
2425 		/* then add the parity stripe */
2426 		pointers[stripe++] = kmap(p_page);
2427 
2428 		if (q_stripe != -1) {
2429 
2430 			/*
2431 			 * raid6, add the qstripe and call the
2432 			 * library function to fill in our p/q
2433 			 */
2434 			pointers[stripe++] = kmap(q_page);
2435 
2436 			raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2437 						pointers);
2438 		} else {
2439 			/* raid5 */
2440 			memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
2441 			run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2442 		}
2443 
2444 		/* Check scrubbing parity and repair it */
2445 		p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2446 		parity = kmap(p);
2447 		if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2448 			memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
2449 		else
2450 			/* Parity is right, needn't writeback */
2451 			bitmap_clear(rbio->dbitmap, pagenr, 1);
2452 		kunmap(p);
2453 
2454 		for (stripe = 0; stripe < rbio->real_stripes; stripe++)
2455 			kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2456 	}
2457 
2458 	__free_page(p_page);
2459 	if (q_page)
2460 		__free_page(q_page);
2461 
2462 writeback:
2463 	/*
2464 	 * time to start writing.  Make bios for everything from the
2465 	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
2466 	 * everything else.
2467 	 */
2468 	for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2469 		struct page *page;
2470 
2471 		page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2472 		ret = rbio_add_io_page(rbio, &bio_list,
2473 			       page, rbio->scrubp, pagenr, rbio->stripe_len);
2474 		if (ret)
2475 			goto cleanup;
2476 	}
2477 
2478 	if (!is_replace)
2479 		goto submit_write;
2480 
2481 	for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2482 		struct page *page;
2483 
2484 		page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2485 		ret = rbio_add_io_page(rbio, &bio_list, page,
2486 				       bbio->tgtdev_map[rbio->scrubp],
2487 				       pagenr, rbio->stripe_len);
2488 		if (ret)
2489 			goto cleanup;
2490 	}
2491 
2492 submit_write:
2493 	nr_data = bio_list_size(&bio_list);
2494 	if (!nr_data) {
2495 		/* Every parity is right */
2496 		rbio_orig_end_io(rbio, BLK_STS_OK);
2497 		return;
2498 	}
2499 
2500 	atomic_set(&rbio->stripes_pending, nr_data);
2501 
2502 	while (1) {
2503 		bio = bio_list_pop(&bio_list);
2504 		if (!bio)
2505 			break;
2506 
2507 		bio->bi_private = rbio;
2508 		bio->bi_end_io = raid_write_end_io;
2509 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2510 
2511 		submit_bio(bio);
2512 	}
2513 	return;
2514 
2515 cleanup:
2516 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2517 
2518 	while ((bio = bio_list_pop(&bio_list)))
2519 		bio_put(bio);
2520 }
2521 
2522 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2523 {
2524 	if (stripe >= 0 && stripe < rbio->nr_data)
2525 		return 1;
2526 	return 0;
2527 }
2528 
2529 /*
2530  * While we're doing the parity check and repair, we could have errors
2531  * in reading pages off the disk.  This checks for errors and if we're
2532  * not able to read the page it'll trigger parity reconstruction.  The
2533  * parity scrub will be finished after we've reconstructed the failed
2534  * stripes
2535  */
2536 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2537 {
2538 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2539 		goto cleanup;
2540 
2541 	if (rbio->faila >= 0 || rbio->failb >= 0) {
2542 		int dfail = 0, failp = -1;
2543 
2544 		if (is_data_stripe(rbio, rbio->faila))
2545 			dfail++;
2546 		else if (is_parity_stripe(rbio->faila))
2547 			failp = rbio->faila;
2548 
2549 		if (is_data_stripe(rbio, rbio->failb))
2550 			dfail++;
2551 		else if (is_parity_stripe(rbio->failb))
2552 			failp = rbio->failb;
2553 
2554 		/*
2555 		 * Because we can not use a scrubbing parity to repair
2556 		 * the data, so the capability of the repair is declined.
2557 		 * (In the case of RAID5, we can not repair anything)
2558 		 */
2559 		if (dfail > rbio->bbio->max_errors - 1)
2560 			goto cleanup;
2561 
2562 		/*
2563 		 * If all data is good, only parity is correctly, just
2564 		 * repair the parity.
2565 		 */
2566 		if (dfail == 0) {
2567 			finish_parity_scrub(rbio, 0);
2568 			return;
2569 		}
2570 
2571 		/*
2572 		 * Here means we got one corrupted data stripe and one
2573 		 * corrupted parity on RAID6, if the corrupted parity
2574 		 * is scrubbing parity, luckily, use the other one to repair
2575 		 * the data, or we can not repair the data stripe.
2576 		 */
2577 		if (failp != rbio->scrubp)
2578 			goto cleanup;
2579 
2580 		__raid_recover_end_io(rbio);
2581 	} else {
2582 		finish_parity_scrub(rbio, 1);
2583 	}
2584 	return;
2585 
2586 cleanup:
2587 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2588 }
2589 
2590 /*
2591  * end io for the read phase of the rmw cycle.  All the bios here are physical
2592  * stripe bios we've read from the disk so we can recalculate the parity of the
2593  * stripe.
2594  *
2595  * This will usually kick off finish_rmw once all the bios are read in, but it
2596  * may trigger parity reconstruction if we had any errors along the way
2597  */
2598 static void raid56_parity_scrub_end_io(struct bio *bio)
2599 {
2600 	struct btrfs_raid_bio *rbio = bio->bi_private;
2601 
2602 	if (bio->bi_status)
2603 		fail_bio_stripe(rbio, bio);
2604 	else
2605 		set_bio_pages_uptodate(bio);
2606 
2607 	bio_put(bio);
2608 
2609 	if (!atomic_dec_and_test(&rbio->stripes_pending))
2610 		return;
2611 
2612 	/*
2613 	 * this will normally call finish_rmw to start our write
2614 	 * but if there are any failed stripes we'll reconstruct
2615 	 * from parity first
2616 	 */
2617 	validate_rbio_for_parity_scrub(rbio);
2618 }
2619 
2620 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2621 {
2622 	int bios_to_read = 0;
2623 	struct bio_list bio_list;
2624 	int ret;
2625 	int pagenr;
2626 	int stripe;
2627 	struct bio *bio;
2628 
2629 	bio_list_init(&bio_list);
2630 
2631 	ret = alloc_rbio_essential_pages(rbio);
2632 	if (ret)
2633 		goto cleanup;
2634 
2635 	atomic_set(&rbio->error, 0);
2636 	/*
2637 	 * build a list of bios to read all the missing parts of this
2638 	 * stripe
2639 	 */
2640 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2641 		for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2642 			struct page *page;
2643 			/*
2644 			 * we want to find all the pages missing from
2645 			 * the rbio and read them from the disk.  If
2646 			 * page_in_rbio finds a page in the bio list
2647 			 * we don't need to read it off the stripe.
2648 			 */
2649 			page = page_in_rbio(rbio, stripe, pagenr, 1);
2650 			if (page)
2651 				continue;
2652 
2653 			page = rbio_stripe_page(rbio, stripe, pagenr);
2654 			/*
2655 			 * the bio cache may have handed us an uptodate
2656 			 * page.  If so, be happy and use it
2657 			 */
2658 			if (PageUptodate(page))
2659 				continue;
2660 
2661 			ret = rbio_add_io_page(rbio, &bio_list, page,
2662 				       stripe, pagenr, rbio->stripe_len);
2663 			if (ret)
2664 				goto cleanup;
2665 		}
2666 	}
2667 
2668 	bios_to_read = bio_list_size(&bio_list);
2669 	if (!bios_to_read) {
2670 		/*
2671 		 * this can happen if others have merged with
2672 		 * us, it means there is nothing left to read.
2673 		 * But if there are missing devices it may not be
2674 		 * safe to do the full stripe write yet.
2675 		 */
2676 		goto finish;
2677 	}
2678 
2679 	/*
2680 	 * the bbio may be freed once we submit the last bio.  Make sure
2681 	 * not to touch it after that
2682 	 */
2683 	atomic_set(&rbio->stripes_pending, bios_to_read);
2684 	while (1) {
2685 		bio = bio_list_pop(&bio_list);
2686 		if (!bio)
2687 			break;
2688 
2689 		bio->bi_private = rbio;
2690 		bio->bi_end_io = raid56_parity_scrub_end_io;
2691 		bio_set_op_attrs(bio, REQ_OP_READ, 0);
2692 
2693 		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2694 
2695 		submit_bio(bio);
2696 	}
2697 	/* the actual write will happen once the reads are done */
2698 	return;
2699 
2700 cleanup:
2701 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2702 
2703 	while ((bio = bio_list_pop(&bio_list)))
2704 		bio_put(bio);
2705 
2706 	return;
2707 
2708 finish:
2709 	validate_rbio_for_parity_scrub(rbio);
2710 }
2711 
2712 static void scrub_parity_work(struct btrfs_work *work)
2713 {
2714 	struct btrfs_raid_bio *rbio;
2715 
2716 	rbio = container_of(work, struct btrfs_raid_bio, work);
2717 	raid56_parity_scrub_stripe(rbio);
2718 }
2719 
2720 static void async_scrub_parity(struct btrfs_raid_bio *rbio)
2721 {
2722 	btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2723 			scrub_parity_work, NULL, NULL);
2724 
2725 	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
2726 }
2727 
2728 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2729 {
2730 	if (!lock_stripe_add(rbio))
2731 		async_scrub_parity(rbio);
2732 }
2733 
2734 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2735 
2736 struct btrfs_raid_bio *
2737 raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2738 			  struct btrfs_bio *bbio, u64 length)
2739 {
2740 	struct btrfs_raid_bio *rbio;
2741 
2742 	rbio = alloc_rbio(fs_info, bbio, length);
2743 	if (IS_ERR(rbio))
2744 		return NULL;
2745 
2746 	rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2747 	bio_list_add(&rbio->bio_list, bio);
2748 	/*
2749 	 * This is a special bio which is used to hold the completion handler
2750 	 * and make the scrub rbio is similar to the other types
2751 	 */
2752 	ASSERT(!bio->bi_iter.bi_size);
2753 
2754 	rbio->faila = find_logical_bio_stripe(rbio, bio);
2755 	if (rbio->faila == -1) {
2756 		BUG();
2757 		kfree(rbio);
2758 		return NULL;
2759 	}
2760 
2761 	/*
2762 	 * When we get bbio, we have already increased bio_counter, record it
2763 	 * so we can free it at rbio_orig_end_io()
2764 	 */
2765 	rbio->generic_bio_cnt = 1;
2766 
2767 	return rbio;
2768 }
2769 
2770 static void missing_raid56_work(struct btrfs_work *work)
2771 {
2772 	struct btrfs_raid_bio *rbio;
2773 
2774 	rbio = container_of(work, struct btrfs_raid_bio, work);
2775 	__raid56_parity_recover(rbio);
2776 }
2777 
2778 static void async_missing_raid56(struct btrfs_raid_bio *rbio)
2779 {
2780 	btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2781 			missing_raid56_work, NULL, NULL);
2782 
2783 	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
2784 }
2785 
2786 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2787 {
2788 	if (!lock_stripe_add(rbio))
2789 		async_missing_raid56(rbio);
2790 }
2791