xref: /openbmc/linux/drivers/md/raid10.c (revision ae213c44)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * raid10.c : Multiple Devices driver for Linux
4  *
5  * Copyright (C) 2000-2004 Neil Brown
6  *
7  * RAID-10 support for md.
8  *
9  * Base on code in raid1.c.  See raid1.c for further copyright information.
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/delay.h>
14 #include <linux/blkdev.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include <linux/ratelimit.h>
18 #include <linux/kthread.h>
19 #include <linux/raid/md_p.h>
20 #include <trace/events/block.h>
21 #include "md.h"
22 #include "raid10.h"
23 #include "raid0.h"
24 #include "md-bitmap.h"
25 
26 /*
27  * RAID10 provides a combination of RAID0 and RAID1 functionality.
28  * The layout of data is defined by
29  *    chunk_size
30  *    raid_disks
31  *    near_copies (stored in low byte of layout)
32  *    far_copies (stored in second byte of layout)
33  *    far_offset (stored in bit 16 of layout )
34  *    use_far_sets (stored in bit 17 of layout )
35  *    use_far_sets_bugfixed (stored in bit 18 of layout )
36  *
37  * The data to be stored is divided into chunks using chunksize.  Each device
38  * is divided into far_copies sections.   In each section, chunks are laid out
39  * in a style similar to raid0, but near_copies copies of each chunk is stored
40  * (each on a different drive).  The starting device for each section is offset
41  * near_copies from the starting device of the previous section.  Thus there
42  * are (near_copies * far_copies) of each chunk, and each is on a different
43  * drive.  near_copies and far_copies must be at least one, and their product
44  * is at most raid_disks.
45  *
46  * If far_offset is true, then the far_copies are handled a bit differently.
47  * The copies are still in different stripes, but instead of being very far
48  * apart on disk, there are adjacent stripes.
49  *
50  * The far and offset algorithms are handled slightly differently if
51  * 'use_far_sets' is true.  In this case, the array's devices are grouped into
52  * sets that are (near_copies * far_copies) in size.  The far copied stripes
53  * are still shifted by 'near_copies' devices, but this shifting stays confined
54  * to the set rather than the entire array.  This is done to improve the number
55  * of device combinations that can fail without causing the array to fail.
56  * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
57  * on a device):
58  *    A B C D    A B C D E
59  *      ...         ...
60  *    D A B C    E A B C D
61  * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
62  *    [A B] [C D]    [A B] [C D E]
63  *    |...| |...|    |...| | ... |
64  *    [B A] [D C]    [B A] [E C D]
65  */
66 
67 /*
68  * Number of guaranteed r10bios in case of extreme VM load:
69  */
70 #define	NR_RAID10_BIOS 256
71 
72 /* when we get a read error on a read-only array, we redirect to another
73  * device without failing the first device, or trying to over-write to
74  * correct the read error.  To keep track of bad blocks on a per-bio
75  * level, we store IO_BLOCKED in the appropriate 'bios' pointer
76  */
77 #define IO_BLOCKED ((struct bio *)1)
78 /* When we successfully write to a known bad-block, we need to remove the
79  * bad-block marking which must be done from process context.  So we record
80  * the success by setting devs[n].bio to IO_MADE_GOOD
81  */
82 #define IO_MADE_GOOD ((struct bio *)2)
83 
84 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
85 
86 /* When there are this many requests queued to be written by
87  * the raid10 thread, we become 'congested' to provide back-pressure
88  * for writeback.
89  */
90 static int max_queued_requests = 1024;
91 
92 static void allow_barrier(struct r10conf *conf);
93 static void lower_barrier(struct r10conf *conf);
94 static int _enough(struct r10conf *conf, int previous, int ignore);
95 static int enough(struct r10conf *conf, int ignore);
96 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
97 				int *skipped);
98 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
99 static void end_reshape_write(struct bio *bio);
100 static void end_reshape(struct r10conf *conf);
101 
102 #define raid10_log(md, fmt, args...)				\
103 	do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
104 
105 #include "raid1-10.c"
106 
107 /*
108  * for resync bio, r10bio pointer can be retrieved from the per-bio
109  * 'struct resync_pages'.
110  */
111 static inline struct r10bio *get_resync_r10bio(struct bio *bio)
112 {
113 	return get_resync_pages(bio)->raid_bio;
114 }
115 
116 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
117 {
118 	struct r10conf *conf = data;
119 	int size = offsetof(struct r10bio, devs[conf->copies]);
120 
121 	/* allocate a r10bio with room for raid_disks entries in the
122 	 * bios array */
123 	return kzalloc(size, gfp_flags);
124 }
125 
126 static void r10bio_pool_free(void *r10_bio, void *data)
127 {
128 	kfree(r10_bio);
129 }
130 
131 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
132 /* amount of memory to reserve for resync requests */
133 #define RESYNC_WINDOW (1024*1024)
134 /* maximum number of concurrent requests, memory permitting */
135 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
136 #define CLUSTER_RESYNC_WINDOW (32 * RESYNC_WINDOW)
137 #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
138 
139 /*
140  * When performing a resync, we need to read and compare, so
141  * we need as many pages are there are copies.
142  * When performing a recovery, we need 2 bios, one for read,
143  * one for write (we recover only one drive per r10buf)
144  *
145  */
146 static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
147 {
148 	struct r10conf *conf = data;
149 	struct r10bio *r10_bio;
150 	struct bio *bio;
151 	int j;
152 	int nalloc, nalloc_rp;
153 	struct resync_pages *rps;
154 
155 	r10_bio = r10bio_pool_alloc(gfp_flags, conf);
156 	if (!r10_bio)
157 		return NULL;
158 
159 	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
160 	    test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
161 		nalloc = conf->copies; /* resync */
162 	else
163 		nalloc = 2; /* recovery */
164 
165 	/* allocate once for all bios */
166 	if (!conf->have_replacement)
167 		nalloc_rp = nalloc;
168 	else
169 		nalloc_rp = nalloc * 2;
170 	rps = kmalloc_array(nalloc_rp, sizeof(struct resync_pages), gfp_flags);
171 	if (!rps)
172 		goto out_free_r10bio;
173 
174 	/*
175 	 * Allocate bios.
176 	 */
177 	for (j = nalloc ; j-- ; ) {
178 		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
179 		if (!bio)
180 			goto out_free_bio;
181 		r10_bio->devs[j].bio = bio;
182 		if (!conf->have_replacement)
183 			continue;
184 		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
185 		if (!bio)
186 			goto out_free_bio;
187 		r10_bio->devs[j].repl_bio = bio;
188 	}
189 	/*
190 	 * Allocate RESYNC_PAGES data pages and attach them
191 	 * where needed.
192 	 */
193 	for (j = 0; j < nalloc; j++) {
194 		struct bio *rbio = r10_bio->devs[j].repl_bio;
195 		struct resync_pages *rp, *rp_repl;
196 
197 		rp = &rps[j];
198 		if (rbio)
199 			rp_repl = &rps[nalloc + j];
200 
201 		bio = r10_bio->devs[j].bio;
202 
203 		if (!j || test_bit(MD_RECOVERY_SYNC,
204 				   &conf->mddev->recovery)) {
205 			if (resync_alloc_pages(rp, gfp_flags))
206 				goto out_free_pages;
207 		} else {
208 			memcpy(rp, &rps[0], sizeof(*rp));
209 			resync_get_all_pages(rp);
210 		}
211 
212 		rp->raid_bio = r10_bio;
213 		bio->bi_private = rp;
214 		if (rbio) {
215 			memcpy(rp_repl, rp, sizeof(*rp));
216 			rbio->bi_private = rp_repl;
217 		}
218 	}
219 
220 	return r10_bio;
221 
222 out_free_pages:
223 	while (--j >= 0)
224 		resync_free_pages(&rps[j * 2]);
225 
226 	j = 0;
227 out_free_bio:
228 	for ( ; j < nalloc; j++) {
229 		if (r10_bio->devs[j].bio)
230 			bio_put(r10_bio->devs[j].bio);
231 		if (r10_bio->devs[j].repl_bio)
232 			bio_put(r10_bio->devs[j].repl_bio);
233 	}
234 	kfree(rps);
235 out_free_r10bio:
236 	r10bio_pool_free(r10_bio, conf);
237 	return NULL;
238 }
239 
240 static void r10buf_pool_free(void *__r10_bio, void *data)
241 {
242 	struct r10conf *conf = data;
243 	struct r10bio *r10bio = __r10_bio;
244 	int j;
245 	struct resync_pages *rp = NULL;
246 
247 	for (j = conf->copies; j--; ) {
248 		struct bio *bio = r10bio->devs[j].bio;
249 
250 		if (bio) {
251 			rp = get_resync_pages(bio);
252 			resync_free_pages(rp);
253 			bio_put(bio);
254 		}
255 
256 		bio = r10bio->devs[j].repl_bio;
257 		if (bio)
258 			bio_put(bio);
259 	}
260 
261 	/* resync pages array stored in the 1st bio's .bi_private */
262 	kfree(rp);
263 
264 	r10bio_pool_free(r10bio, conf);
265 }
266 
267 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
268 {
269 	int i;
270 
271 	for (i = 0; i < conf->copies; i++) {
272 		struct bio **bio = & r10_bio->devs[i].bio;
273 		if (!BIO_SPECIAL(*bio))
274 			bio_put(*bio);
275 		*bio = NULL;
276 		bio = &r10_bio->devs[i].repl_bio;
277 		if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
278 			bio_put(*bio);
279 		*bio = NULL;
280 	}
281 }
282 
283 static void free_r10bio(struct r10bio *r10_bio)
284 {
285 	struct r10conf *conf = r10_bio->mddev->private;
286 
287 	put_all_bios(conf, r10_bio);
288 	mempool_free(r10_bio, &conf->r10bio_pool);
289 }
290 
291 static void put_buf(struct r10bio *r10_bio)
292 {
293 	struct r10conf *conf = r10_bio->mddev->private;
294 
295 	mempool_free(r10_bio, &conf->r10buf_pool);
296 
297 	lower_barrier(conf);
298 }
299 
300 static void reschedule_retry(struct r10bio *r10_bio)
301 {
302 	unsigned long flags;
303 	struct mddev *mddev = r10_bio->mddev;
304 	struct r10conf *conf = mddev->private;
305 
306 	spin_lock_irqsave(&conf->device_lock, flags);
307 	list_add(&r10_bio->retry_list, &conf->retry_list);
308 	conf->nr_queued ++;
309 	spin_unlock_irqrestore(&conf->device_lock, flags);
310 
311 	/* wake up frozen array... */
312 	wake_up(&conf->wait_barrier);
313 
314 	md_wakeup_thread(mddev->thread);
315 }
316 
317 /*
318  * raid_end_bio_io() is called when we have finished servicing a mirrored
319  * operation and are ready to return a success/failure code to the buffer
320  * cache layer.
321  */
322 static void raid_end_bio_io(struct r10bio *r10_bio)
323 {
324 	struct bio *bio = r10_bio->master_bio;
325 	struct r10conf *conf = r10_bio->mddev->private;
326 
327 	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
328 		bio->bi_status = BLK_STS_IOERR;
329 
330 	bio_endio(bio);
331 	/*
332 	 * Wake up any possible resync thread that waits for the device
333 	 * to go idle.
334 	 */
335 	allow_barrier(conf);
336 
337 	free_r10bio(r10_bio);
338 }
339 
340 /*
341  * Update disk head position estimator based on IRQ completion info.
342  */
343 static inline void update_head_pos(int slot, struct r10bio *r10_bio)
344 {
345 	struct r10conf *conf = r10_bio->mddev->private;
346 
347 	conf->mirrors[r10_bio->devs[slot].devnum].head_position =
348 		r10_bio->devs[slot].addr + (r10_bio->sectors);
349 }
350 
351 /*
352  * Find the disk number which triggered given bio
353  */
354 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
355 			 struct bio *bio, int *slotp, int *replp)
356 {
357 	int slot;
358 	int repl = 0;
359 
360 	for (slot = 0; slot < conf->copies; slot++) {
361 		if (r10_bio->devs[slot].bio == bio)
362 			break;
363 		if (r10_bio->devs[slot].repl_bio == bio) {
364 			repl = 1;
365 			break;
366 		}
367 	}
368 
369 	BUG_ON(slot == conf->copies);
370 	update_head_pos(slot, r10_bio);
371 
372 	if (slotp)
373 		*slotp = slot;
374 	if (replp)
375 		*replp = repl;
376 	return r10_bio->devs[slot].devnum;
377 }
378 
379 static void raid10_end_read_request(struct bio *bio)
380 {
381 	int uptodate = !bio->bi_status;
382 	struct r10bio *r10_bio = bio->bi_private;
383 	int slot;
384 	struct md_rdev *rdev;
385 	struct r10conf *conf = r10_bio->mddev->private;
386 
387 	slot = r10_bio->read_slot;
388 	rdev = r10_bio->devs[slot].rdev;
389 	/*
390 	 * this branch is our 'one mirror IO has finished' event handler:
391 	 */
392 	update_head_pos(slot, r10_bio);
393 
394 	if (uptodate) {
395 		/*
396 		 * Set R10BIO_Uptodate in our master bio, so that
397 		 * we will return a good error code to the higher
398 		 * levels even if IO on some other mirrored buffer fails.
399 		 *
400 		 * The 'master' represents the composite IO operation to
401 		 * user-side. So if something waits for IO, then it will
402 		 * wait for the 'master' bio.
403 		 */
404 		set_bit(R10BIO_Uptodate, &r10_bio->state);
405 	} else {
406 		/* If all other devices that store this block have
407 		 * failed, we want to return the error upwards rather
408 		 * than fail the last device.  Here we redefine
409 		 * "uptodate" to mean "Don't want to retry"
410 		 */
411 		if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
412 			     rdev->raid_disk))
413 			uptodate = 1;
414 	}
415 	if (uptodate) {
416 		raid_end_bio_io(r10_bio);
417 		rdev_dec_pending(rdev, conf->mddev);
418 	} else {
419 		/*
420 		 * oops, read error - keep the refcount on the rdev
421 		 */
422 		char b[BDEVNAME_SIZE];
423 		pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n",
424 				   mdname(conf->mddev),
425 				   bdevname(rdev->bdev, b),
426 				   (unsigned long long)r10_bio->sector);
427 		set_bit(R10BIO_ReadError, &r10_bio->state);
428 		reschedule_retry(r10_bio);
429 	}
430 }
431 
432 static void close_write(struct r10bio *r10_bio)
433 {
434 	/* clear the bitmap if all writes complete successfully */
435 	md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
436 			   r10_bio->sectors,
437 			   !test_bit(R10BIO_Degraded, &r10_bio->state),
438 			   0);
439 	md_write_end(r10_bio->mddev);
440 }
441 
442 static void one_write_done(struct r10bio *r10_bio)
443 {
444 	if (atomic_dec_and_test(&r10_bio->remaining)) {
445 		if (test_bit(R10BIO_WriteError, &r10_bio->state))
446 			reschedule_retry(r10_bio);
447 		else {
448 			close_write(r10_bio);
449 			if (test_bit(R10BIO_MadeGood, &r10_bio->state))
450 				reschedule_retry(r10_bio);
451 			else
452 				raid_end_bio_io(r10_bio);
453 		}
454 	}
455 }
456 
457 static void raid10_end_write_request(struct bio *bio)
458 {
459 	struct r10bio *r10_bio = bio->bi_private;
460 	int dev;
461 	int dec_rdev = 1;
462 	struct r10conf *conf = r10_bio->mddev->private;
463 	int slot, repl;
464 	struct md_rdev *rdev = NULL;
465 	struct bio *to_put = NULL;
466 	bool discard_error;
467 
468 	discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
469 
470 	dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
471 
472 	if (repl)
473 		rdev = conf->mirrors[dev].replacement;
474 	if (!rdev) {
475 		smp_rmb();
476 		repl = 0;
477 		rdev = conf->mirrors[dev].rdev;
478 	}
479 	/*
480 	 * this branch is our 'one mirror IO has finished' event handler:
481 	 */
482 	if (bio->bi_status && !discard_error) {
483 		if (repl)
484 			/* Never record new bad blocks to replacement,
485 			 * just fail it.
486 			 */
487 			md_error(rdev->mddev, rdev);
488 		else {
489 			set_bit(WriteErrorSeen,	&rdev->flags);
490 			if (!test_and_set_bit(WantReplacement, &rdev->flags))
491 				set_bit(MD_RECOVERY_NEEDED,
492 					&rdev->mddev->recovery);
493 
494 			dec_rdev = 0;
495 			if (test_bit(FailFast, &rdev->flags) &&
496 			    (bio->bi_opf & MD_FAILFAST)) {
497 				md_error(rdev->mddev, rdev);
498 				if (!test_bit(Faulty, &rdev->flags))
499 					/* This is the only remaining device,
500 					 * We need to retry the write without
501 					 * FailFast
502 					 */
503 					set_bit(R10BIO_WriteError, &r10_bio->state);
504 				else {
505 					r10_bio->devs[slot].bio = NULL;
506 					to_put = bio;
507 					dec_rdev = 1;
508 				}
509 			} else
510 				set_bit(R10BIO_WriteError, &r10_bio->state);
511 		}
512 	} else {
513 		/*
514 		 * Set R10BIO_Uptodate in our master bio, so that
515 		 * we will return a good error code for to the higher
516 		 * levels even if IO on some other mirrored buffer fails.
517 		 *
518 		 * The 'master' represents the composite IO operation to
519 		 * user-side. So if something waits for IO, then it will
520 		 * wait for the 'master' bio.
521 		 */
522 		sector_t first_bad;
523 		int bad_sectors;
524 
525 		/*
526 		 * Do not set R10BIO_Uptodate if the current device is
527 		 * rebuilding or Faulty. This is because we cannot use
528 		 * such device for properly reading the data back (we could
529 		 * potentially use it, if the current write would have felt
530 		 * before rdev->recovery_offset, but for simplicity we don't
531 		 * check this here.
532 		 */
533 		if (test_bit(In_sync, &rdev->flags) &&
534 		    !test_bit(Faulty, &rdev->flags))
535 			set_bit(R10BIO_Uptodate, &r10_bio->state);
536 
537 		/* Maybe we can clear some bad blocks. */
538 		if (is_badblock(rdev,
539 				r10_bio->devs[slot].addr,
540 				r10_bio->sectors,
541 				&first_bad, &bad_sectors) && !discard_error) {
542 			bio_put(bio);
543 			if (repl)
544 				r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
545 			else
546 				r10_bio->devs[slot].bio = IO_MADE_GOOD;
547 			dec_rdev = 0;
548 			set_bit(R10BIO_MadeGood, &r10_bio->state);
549 		}
550 	}
551 
552 	/*
553 	 *
554 	 * Let's see if all mirrored write operations have finished
555 	 * already.
556 	 */
557 	one_write_done(r10_bio);
558 	if (dec_rdev)
559 		rdev_dec_pending(rdev, conf->mddev);
560 	if (to_put)
561 		bio_put(to_put);
562 }
563 
564 /*
565  * RAID10 layout manager
566  * As well as the chunksize and raid_disks count, there are two
567  * parameters: near_copies and far_copies.
568  * near_copies * far_copies must be <= raid_disks.
569  * Normally one of these will be 1.
570  * If both are 1, we get raid0.
571  * If near_copies == raid_disks, we get raid1.
572  *
573  * Chunks are laid out in raid0 style with near_copies copies of the
574  * first chunk, followed by near_copies copies of the next chunk and
575  * so on.
576  * If far_copies > 1, then after 1/far_copies of the array has been assigned
577  * as described above, we start again with a device offset of near_copies.
578  * So we effectively have another copy of the whole array further down all
579  * the drives, but with blocks on different drives.
580  * With this layout, and block is never stored twice on the one device.
581  *
582  * raid10_find_phys finds the sector offset of a given virtual sector
583  * on each device that it is on.
584  *
585  * raid10_find_virt does the reverse mapping, from a device and a
586  * sector offset to a virtual address
587  */
588 
589 static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
590 {
591 	int n,f;
592 	sector_t sector;
593 	sector_t chunk;
594 	sector_t stripe;
595 	int dev;
596 	int slot = 0;
597 	int last_far_set_start, last_far_set_size;
598 
599 	last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
600 	last_far_set_start *= geo->far_set_size;
601 
602 	last_far_set_size = geo->far_set_size;
603 	last_far_set_size += (geo->raid_disks % geo->far_set_size);
604 
605 	/* now calculate first sector/dev */
606 	chunk = r10bio->sector >> geo->chunk_shift;
607 	sector = r10bio->sector & geo->chunk_mask;
608 
609 	chunk *= geo->near_copies;
610 	stripe = chunk;
611 	dev = sector_div(stripe, geo->raid_disks);
612 	if (geo->far_offset)
613 		stripe *= geo->far_copies;
614 
615 	sector += stripe << geo->chunk_shift;
616 
617 	/* and calculate all the others */
618 	for (n = 0; n < geo->near_copies; n++) {
619 		int d = dev;
620 		int set;
621 		sector_t s = sector;
622 		r10bio->devs[slot].devnum = d;
623 		r10bio->devs[slot].addr = s;
624 		slot++;
625 
626 		for (f = 1; f < geo->far_copies; f++) {
627 			set = d / geo->far_set_size;
628 			d += geo->near_copies;
629 
630 			if ((geo->raid_disks % geo->far_set_size) &&
631 			    (d > last_far_set_start)) {
632 				d -= last_far_set_start;
633 				d %= last_far_set_size;
634 				d += last_far_set_start;
635 			} else {
636 				d %= geo->far_set_size;
637 				d += geo->far_set_size * set;
638 			}
639 			s += geo->stride;
640 			r10bio->devs[slot].devnum = d;
641 			r10bio->devs[slot].addr = s;
642 			slot++;
643 		}
644 		dev++;
645 		if (dev >= geo->raid_disks) {
646 			dev = 0;
647 			sector += (geo->chunk_mask + 1);
648 		}
649 	}
650 }
651 
652 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
653 {
654 	struct geom *geo = &conf->geo;
655 
656 	if (conf->reshape_progress != MaxSector &&
657 	    ((r10bio->sector >= conf->reshape_progress) !=
658 	     conf->mddev->reshape_backwards)) {
659 		set_bit(R10BIO_Previous, &r10bio->state);
660 		geo = &conf->prev;
661 	} else
662 		clear_bit(R10BIO_Previous, &r10bio->state);
663 
664 	__raid10_find_phys(geo, r10bio);
665 }
666 
667 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
668 {
669 	sector_t offset, chunk, vchunk;
670 	/* Never use conf->prev as this is only called during resync
671 	 * or recovery, so reshape isn't happening
672 	 */
673 	struct geom *geo = &conf->geo;
674 	int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
675 	int far_set_size = geo->far_set_size;
676 	int last_far_set_start;
677 
678 	if (geo->raid_disks % geo->far_set_size) {
679 		last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
680 		last_far_set_start *= geo->far_set_size;
681 
682 		if (dev >= last_far_set_start) {
683 			far_set_size = geo->far_set_size;
684 			far_set_size += (geo->raid_disks % geo->far_set_size);
685 			far_set_start = last_far_set_start;
686 		}
687 	}
688 
689 	offset = sector & geo->chunk_mask;
690 	if (geo->far_offset) {
691 		int fc;
692 		chunk = sector >> geo->chunk_shift;
693 		fc = sector_div(chunk, geo->far_copies);
694 		dev -= fc * geo->near_copies;
695 		if (dev < far_set_start)
696 			dev += far_set_size;
697 	} else {
698 		while (sector >= geo->stride) {
699 			sector -= geo->stride;
700 			if (dev < (geo->near_copies + far_set_start))
701 				dev += far_set_size - geo->near_copies;
702 			else
703 				dev -= geo->near_copies;
704 		}
705 		chunk = sector >> geo->chunk_shift;
706 	}
707 	vchunk = chunk * geo->raid_disks + dev;
708 	sector_div(vchunk, geo->near_copies);
709 	return (vchunk << geo->chunk_shift) + offset;
710 }
711 
712 /*
713  * This routine returns the disk from which the requested read should
714  * be done. There is a per-array 'next expected sequential IO' sector
715  * number - if this matches on the next IO then we use the last disk.
716  * There is also a per-disk 'last know head position' sector that is
717  * maintained from IRQ contexts, both the normal and the resync IO
718  * completion handlers update this position correctly. If there is no
719  * perfect sequential match then we pick the disk whose head is closest.
720  *
721  * If there are 2 mirrors in the same 2 devices, performance degrades
722  * because position is mirror, not device based.
723  *
724  * The rdev for the device selected will have nr_pending incremented.
725  */
726 
727 /*
728  * FIXME: possibly should rethink readbalancing and do it differently
729  * depending on near_copies / far_copies geometry.
730  */
731 static struct md_rdev *read_balance(struct r10conf *conf,
732 				    struct r10bio *r10_bio,
733 				    int *max_sectors)
734 {
735 	const sector_t this_sector = r10_bio->sector;
736 	int disk, slot;
737 	int sectors = r10_bio->sectors;
738 	int best_good_sectors;
739 	sector_t new_distance, best_dist;
740 	struct md_rdev *best_rdev, *rdev = NULL;
741 	int do_balance;
742 	int best_slot;
743 	struct geom *geo = &conf->geo;
744 
745 	raid10_find_phys(conf, r10_bio);
746 	rcu_read_lock();
747 	best_slot = -1;
748 	best_rdev = NULL;
749 	best_dist = MaxSector;
750 	best_good_sectors = 0;
751 	do_balance = 1;
752 	clear_bit(R10BIO_FailFast, &r10_bio->state);
753 	/*
754 	 * Check if we can balance. We can balance on the whole
755 	 * device if no resync is going on (recovery is ok), or below
756 	 * the resync window. We take the first readable disk when
757 	 * above the resync window.
758 	 */
759 	if ((conf->mddev->recovery_cp < MaxSector
760 	     && (this_sector + sectors >= conf->next_resync)) ||
761 	    (mddev_is_clustered(conf->mddev) &&
762 	     md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
763 					    this_sector + sectors)))
764 		do_balance = 0;
765 
766 	for (slot = 0; slot < conf->copies ; slot++) {
767 		sector_t first_bad;
768 		int bad_sectors;
769 		sector_t dev_sector;
770 
771 		if (r10_bio->devs[slot].bio == IO_BLOCKED)
772 			continue;
773 		disk = r10_bio->devs[slot].devnum;
774 		rdev = rcu_dereference(conf->mirrors[disk].replacement);
775 		if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
776 		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
777 			rdev = rcu_dereference(conf->mirrors[disk].rdev);
778 		if (rdev == NULL ||
779 		    test_bit(Faulty, &rdev->flags))
780 			continue;
781 		if (!test_bit(In_sync, &rdev->flags) &&
782 		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
783 			continue;
784 
785 		dev_sector = r10_bio->devs[slot].addr;
786 		if (is_badblock(rdev, dev_sector, sectors,
787 				&first_bad, &bad_sectors)) {
788 			if (best_dist < MaxSector)
789 				/* Already have a better slot */
790 				continue;
791 			if (first_bad <= dev_sector) {
792 				/* Cannot read here.  If this is the
793 				 * 'primary' device, then we must not read
794 				 * beyond 'bad_sectors' from another device.
795 				 */
796 				bad_sectors -= (dev_sector - first_bad);
797 				if (!do_balance && sectors > bad_sectors)
798 					sectors = bad_sectors;
799 				if (best_good_sectors > sectors)
800 					best_good_sectors = sectors;
801 			} else {
802 				sector_t good_sectors =
803 					first_bad - dev_sector;
804 				if (good_sectors > best_good_sectors) {
805 					best_good_sectors = good_sectors;
806 					best_slot = slot;
807 					best_rdev = rdev;
808 				}
809 				if (!do_balance)
810 					/* Must read from here */
811 					break;
812 			}
813 			continue;
814 		} else
815 			best_good_sectors = sectors;
816 
817 		if (!do_balance)
818 			break;
819 
820 		if (best_slot >= 0)
821 			/* At least 2 disks to choose from so failfast is OK */
822 			set_bit(R10BIO_FailFast, &r10_bio->state);
823 		/* This optimisation is debatable, and completely destroys
824 		 * sequential read speed for 'far copies' arrays.  So only
825 		 * keep it for 'near' arrays, and review those later.
826 		 */
827 		if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
828 			new_distance = 0;
829 
830 		/* for far > 1 always use the lowest address */
831 		else if (geo->far_copies > 1)
832 			new_distance = r10_bio->devs[slot].addr;
833 		else
834 			new_distance = abs(r10_bio->devs[slot].addr -
835 					   conf->mirrors[disk].head_position);
836 		if (new_distance < best_dist) {
837 			best_dist = new_distance;
838 			best_slot = slot;
839 			best_rdev = rdev;
840 		}
841 	}
842 	if (slot >= conf->copies) {
843 		slot = best_slot;
844 		rdev = best_rdev;
845 	}
846 
847 	if (slot >= 0) {
848 		atomic_inc(&rdev->nr_pending);
849 		r10_bio->read_slot = slot;
850 	} else
851 		rdev = NULL;
852 	rcu_read_unlock();
853 	*max_sectors = best_good_sectors;
854 
855 	return rdev;
856 }
857 
858 static int raid10_congested(struct mddev *mddev, int bits)
859 {
860 	struct r10conf *conf = mddev->private;
861 	int i, ret = 0;
862 
863 	if ((bits & (1 << WB_async_congested)) &&
864 	    conf->pending_count >= max_queued_requests)
865 		return 1;
866 
867 	rcu_read_lock();
868 	for (i = 0;
869 	     (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
870 		     && ret == 0;
871 	     i++) {
872 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
873 		if (rdev && !test_bit(Faulty, &rdev->flags)) {
874 			struct request_queue *q = bdev_get_queue(rdev->bdev);
875 
876 			ret |= bdi_congested(q->backing_dev_info, bits);
877 		}
878 	}
879 	rcu_read_unlock();
880 	return ret;
881 }
882 
883 static void flush_pending_writes(struct r10conf *conf)
884 {
885 	/* Any writes that have been queued but are awaiting
886 	 * bitmap updates get flushed here.
887 	 */
888 	spin_lock_irq(&conf->device_lock);
889 
890 	if (conf->pending_bio_list.head) {
891 		struct blk_plug plug;
892 		struct bio *bio;
893 
894 		bio = bio_list_get(&conf->pending_bio_list);
895 		conf->pending_count = 0;
896 		spin_unlock_irq(&conf->device_lock);
897 
898 		/*
899 		 * As this is called in a wait_event() loop (see freeze_array),
900 		 * current->state might be TASK_UNINTERRUPTIBLE which will
901 		 * cause a warning when we prepare to wait again.  As it is
902 		 * rare that this path is taken, it is perfectly safe to force
903 		 * us to go around the wait_event() loop again, so the warning
904 		 * is a false-positive. Silence the warning by resetting
905 		 * thread state
906 		 */
907 		__set_current_state(TASK_RUNNING);
908 
909 		blk_start_plug(&plug);
910 		/* flush any pending bitmap writes to disk
911 		 * before proceeding w/ I/O */
912 		md_bitmap_unplug(conf->mddev->bitmap);
913 		wake_up(&conf->wait_barrier);
914 
915 		while (bio) { /* submit pending writes */
916 			struct bio *next = bio->bi_next;
917 			struct md_rdev *rdev = (void*)bio->bi_disk;
918 			bio->bi_next = NULL;
919 			bio_set_dev(bio, rdev->bdev);
920 			if (test_bit(Faulty, &rdev->flags)) {
921 				bio_io_error(bio);
922 			} else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
923 					    !blk_queue_discard(bio->bi_disk->queue)))
924 				/* Just ignore it */
925 				bio_endio(bio);
926 			else
927 				generic_make_request(bio);
928 			bio = next;
929 		}
930 		blk_finish_plug(&plug);
931 	} else
932 		spin_unlock_irq(&conf->device_lock);
933 }
934 
935 /* Barriers....
936  * Sometimes we need to suspend IO while we do something else,
937  * either some resync/recovery, or reconfigure the array.
938  * To do this we raise a 'barrier'.
939  * The 'barrier' is a counter that can be raised multiple times
940  * to count how many activities are happening which preclude
941  * normal IO.
942  * We can only raise the barrier if there is no pending IO.
943  * i.e. if nr_pending == 0.
944  * We choose only to raise the barrier if no-one is waiting for the
945  * barrier to go down.  This means that as soon as an IO request
946  * is ready, no other operations which require a barrier will start
947  * until the IO request has had a chance.
948  *
949  * So: regular IO calls 'wait_barrier'.  When that returns there
950  *    is no backgroup IO happening,  It must arrange to call
951  *    allow_barrier when it has finished its IO.
952  * backgroup IO calls must call raise_barrier.  Once that returns
953  *    there is no normal IO happeing.  It must arrange to call
954  *    lower_barrier when the particular background IO completes.
955  */
956 
957 static void raise_barrier(struct r10conf *conf, int force)
958 {
959 	BUG_ON(force && !conf->barrier);
960 	spin_lock_irq(&conf->resync_lock);
961 
962 	/* Wait until no block IO is waiting (unless 'force') */
963 	wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
964 			    conf->resync_lock);
965 
966 	/* block any new IO from starting */
967 	conf->barrier++;
968 
969 	/* Now wait for all pending IO to complete */
970 	wait_event_lock_irq(conf->wait_barrier,
971 			    !atomic_read(&conf->nr_pending) && conf->barrier < RESYNC_DEPTH,
972 			    conf->resync_lock);
973 
974 	spin_unlock_irq(&conf->resync_lock);
975 }
976 
977 static void lower_barrier(struct r10conf *conf)
978 {
979 	unsigned long flags;
980 	spin_lock_irqsave(&conf->resync_lock, flags);
981 	conf->barrier--;
982 	spin_unlock_irqrestore(&conf->resync_lock, flags);
983 	wake_up(&conf->wait_barrier);
984 }
985 
986 static void wait_barrier(struct r10conf *conf)
987 {
988 	spin_lock_irq(&conf->resync_lock);
989 	if (conf->barrier) {
990 		conf->nr_waiting++;
991 		/* Wait for the barrier to drop.
992 		 * However if there are already pending
993 		 * requests (preventing the barrier from
994 		 * rising completely), and the
995 		 * pre-process bio queue isn't empty,
996 		 * then don't wait, as we need to empty
997 		 * that queue to get the nr_pending
998 		 * count down.
999 		 */
1000 		raid10_log(conf->mddev, "wait barrier");
1001 		wait_event_lock_irq(conf->wait_barrier,
1002 				    !conf->barrier ||
1003 				    (atomic_read(&conf->nr_pending) &&
1004 				     current->bio_list &&
1005 				     (!bio_list_empty(&current->bio_list[0]) ||
1006 				      !bio_list_empty(&current->bio_list[1]))),
1007 				    conf->resync_lock);
1008 		conf->nr_waiting--;
1009 		if (!conf->nr_waiting)
1010 			wake_up(&conf->wait_barrier);
1011 	}
1012 	atomic_inc(&conf->nr_pending);
1013 	spin_unlock_irq(&conf->resync_lock);
1014 }
1015 
1016 static void allow_barrier(struct r10conf *conf)
1017 {
1018 	if ((atomic_dec_and_test(&conf->nr_pending)) ||
1019 			(conf->array_freeze_pending))
1020 		wake_up(&conf->wait_barrier);
1021 }
1022 
1023 static void freeze_array(struct r10conf *conf, int extra)
1024 {
1025 	/* stop syncio and normal IO and wait for everything to
1026 	 * go quiet.
1027 	 * We increment barrier and nr_waiting, and then
1028 	 * wait until nr_pending match nr_queued+extra
1029 	 * This is called in the context of one normal IO request
1030 	 * that has failed. Thus any sync request that might be pending
1031 	 * will be blocked by nr_pending, and we need to wait for
1032 	 * pending IO requests to complete or be queued for re-try.
1033 	 * Thus the number queued (nr_queued) plus this request (extra)
1034 	 * must match the number of pending IOs (nr_pending) before
1035 	 * we continue.
1036 	 */
1037 	spin_lock_irq(&conf->resync_lock);
1038 	conf->array_freeze_pending++;
1039 	conf->barrier++;
1040 	conf->nr_waiting++;
1041 	wait_event_lock_irq_cmd(conf->wait_barrier,
1042 				atomic_read(&conf->nr_pending) == conf->nr_queued+extra,
1043 				conf->resync_lock,
1044 				flush_pending_writes(conf));
1045 
1046 	conf->array_freeze_pending--;
1047 	spin_unlock_irq(&conf->resync_lock);
1048 }
1049 
1050 static void unfreeze_array(struct r10conf *conf)
1051 {
1052 	/* reverse the effect of the freeze */
1053 	spin_lock_irq(&conf->resync_lock);
1054 	conf->barrier--;
1055 	conf->nr_waiting--;
1056 	wake_up(&conf->wait_barrier);
1057 	spin_unlock_irq(&conf->resync_lock);
1058 }
1059 
1060 static sector_t choose_data_offset(struct r10bio *r10_bio,
1061 				   struct md_rdev *rdev)
1062 {
1063 	if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1064 	    test_bit(R10BIO_Previous, &r10_bio->state))
1065 		return rdev->data_offset;
1066 	else
1067 		return rdev->new_data_offset;
1068 }
1069 
1070 struct raid10_plug_cb {
1071 	struct blk_plug_cb	cb;
1072 	struct bio_list		pending;
1073 	int			pending_cnt;
1074 };
1075 
1076 static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1077 {
1078 	struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
1079 						   cb);
1080 	struct mddev *mddev = plug->cb.data;
1081 	struct r10conf *conf = mddev->private;
1082 	struct bio *bio;
1083 
1084 	if (from_schedule || current->bio_list) {
1085 		spin_lock_irq(&conf->device_lock);
1086 		bio_list_merge(&conf->pending_bio_list, &plug->pending);
1087 		conf->pending_count += plug->pending_cnt;
1088 		spin_unlock_irq(&conf->device_lock);
1089 		wake_up(&conf->wait_barrier);
1090 		md_wakeup_thread(mddev->thread);
1091 		kfree(plug);
1092 		return;
1093 	}
1094 
1095 	/* we aren't scheduling, so we can do the write-out directly. */
1096 	bio = bio_list_get(&plug->pending);
1097 	md_bitmap_unplug(mddev->bitmap);
1098 	wake_up(&conf->wait_barrier);
1099 
1100 	while (bio) { /* submit pending writes */
1101 		struct bio *next = bio->bi_next;
1102 		struct md_rdev *rdev = (void*)bio->bi_disk;
1103 		bio->bi_next = NULL;
1104 		bio_set_dev(bio, rdev->bdev);
1105 		if (test_bit(Faulty, &rdev->flags)) {
1106 			bio_io_error(bio);
1107 		} else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
1108 				    !blk_queue_discard(bio->bi_disk->queue)))
1109 			/* Just ignore it */
1110 			bio_endio(bio);
1111 		else
1112 			generic_make_request(bio);
1113 		bio = next;
1114 	}
1115 	kfree(plug);
1116 }
1117 
1118 /*
1119  * 1. Register the new request and wait if the reconstruction thread has put
1120  * up a bar for new requests. Continue immediately if no resync is active
1121  * currently.
1122  * 2. If IO spans the reshape position.  Need to wait for reshape to pass.
1123  */
1124 static void regular_request_wait(struct mddev *mddev, struct r10conf *conf,
1125 				 struct bio *bio, sector_t sectors)
1126 {
1127 	wait_barrier(conf);
1128 	while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1129 	    bio->bi_iter.bi_sector < conf->reshape_progress &&
1130 	    bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1131 		raid10_log(conf->mddev, "wait reshape");
1132 		allow_barrier(conf);
1133 		wait_event(conf->wait_barrier,
1134 			   conf->reshape_progress <= bio->bi_iter.bi_sector ||
1135 			   conf->reshape_progress >= bio->bi_iter.bi_sector +
1136 			   sectors);
1137 		wait_barrier(conf);
1138 	}
1139 }
1140 
1141 static void raid10_read_request(struct mddev *mddev, struct bio *bio,
1142 				struct r10bio *r10_bio)
1143 {
1144 	struct r10conf *conf = mddev->private;
1145 	struct bio *read_bio;
1146 	const int op = bio_op(bio);
1147 	const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1148 	int max_sectors;
1149 	struct md_rdev *rdev;
1150 	char b[BDEVNAME_SIZE];
1151 	int slot = r10_bio->read_slot;
1152 	struct md_rdev *err_rdev = NULL;
1153 	gfp_t gfp = GFP_NOIO;
1154 
1155 	if (r10_bio->devs[slot].rdev) {
1156 		/*
1157 		 * This is an error retry, but we cannot
1158 		 * safely dereference the rdev in the r10_bio,
1159 		 * we must use the one in conf.
1160 		 * If it has already been disconnected (unlikely)
1161 		 * we lose the device name in error messages.
1162 		 */
1163 		int disk;
1164 		/*
1165 		 * As we are blocking raid10, it is a little safer to
1166 		 * use __GFP_HIGH.
1167 		 */
1168 		gfp = GFP_NOIO | __GFP_HIGH;
1169 
1170 		rcu_read_lock();
1171 		disk = r10_bio->devs[slot].devnum;
1172 		err_rdev = rcu_dereference(conf->mirrors[disk].rdev);
1173 		if (err_rdev)
1174 			bdevname(err_rdev->bdev, b);
1175 		else {
1176 			strcpy(b, "???");
1177 			/* This never gets dereferenced */
1178 			err_rdev = r10_bio->devs[slot].rdev;
1179 		}
1180 		rcu_read_unlock();
1181 	}
1182 
1183 	regular_request_wait(mddev, conf, bio, r10_bio->sectors);
1184 	rdev = read_balance(conf, r10_bio, &max_sectors);
1185 	if (!rdev) {
1186 		if (err_rdev) {
1187 			pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n",
1188 					    mdname(mddev), b,
1189 					    (unsigned long long)r10_bio->sector);
1190 		}
1191 		raid_end_bio_io(r10_bio);
1192 		return;
1193 	}
1194 	if (err_rdev)
1195 		pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n",
1196 				   mdname(mddev),
1197 				   bdevname(rdev->bdev, b),
1198 				   (unsigned long long)r10_bio->sector);
1199 	if (max_sectors < bio_sectors(bio)) {
1200 		struct bio *split = bio_split(bio, max_sectors,
1201 					      gfp, &conf->bio_split);
1202 		bio_chain(split, bio);
1203 		allow_barrier(conf);
1204 		generic_make_request(bio);
1205 		wait_barrier(conf);
1206 		bio = split;
1207 		r10_bio->master_bio = bio;
1208 		r10_bio->sectors = max_sectors;
1209 	}
1210 	slot = r10_bio->read_slot;
1211 
1212 	read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
1213 
1214 	r10_bio->devs[slot].bio = read_bio;
1215 	r10_bio->devs[slot].rdev = rdev;
1216 
1217 	read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1218 		choose_data_offset(r10_bio, rdev);
1219 	bio_set_dev(read_bio, rdev->bdev);
1220 	read_bio->bi_end_io = raid10_end_read_request;
1221 	bio_set_op_attrs(read_bio, op, do_sync);
1222 	if (test_bit(FailFast, &rdev->flags) &&
1223 	    test_bit(R10BIO_FailFast, &r10_bio->state))
1224 	        read_bio->bi_opf |= MD_FAILFAST;
1225 	read_bio->bi_private = r10_bio;
1226 
1227 	if (mddev->gendisk)
1228 	        trace_block_bio_remap(read_bio->bi_disk->queue,
1229 	                              read_bio, disk_devt(mddev->gendisk),
1230 	                              r10_bio->sector);
1231 	generic_make_request(read_bio);
1232 	return;
1233 }
1234 
1235 static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
1236 				  struct bio *bio, bool replacement,
1237 				  int n_copy)
1238 {
1239 	const int op = bio_op(bio);
1240 	const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1241 	const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
1242 	unsigned long flags;
1243 	struct blk_plug_cb *cb;
1244 	struct raid10_plug_cb *plug = NULL;
1245 	struct r10conf *conf = mddev->private;
1246 	struct md_rdev *rdev;
1247 	int devnum = r10_bio->devs[n_copy].devnum;
1248 	struct bio *mbio;
1249 
1250 	if (replacement) {
1251 		rdev = conf->mirrors[devnum].replacement;
1252 		if (rdev == NULL) {
1253 			/* Replacement just got moved to main 'rdev' */
1254 			smp_mb();
1255 			rdev = conf->mirrors[devnum].rdev;
1256 		}
1257 	} else
1258 		rdev = conf->mirrors[devnum].rdev;
1259 
1260 	mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
1261 	if (replacement)
1262 		r10_bio->devs[n_copy].repl_bio = mbio;
1263 	else
1264 		r10_bio->devs[n_copy].bio = mbio;
1265 
1266 	mbio->bi_iter.bi_sector	= (r10_bio->devs[n_copy].addr +
1267 				   choose_data_offset(r10_bio, rdev));
1268 	bio_set_dev(mbio, rdev->bdev);
1269 	mbio->bi_end_io	= raid10_end_write_request;
1270 	bio_set_op_attrs(mbio, op, do_sync | do_fua);
1271 	if (!replacement && test_bit(FailFast,
1272 				     &conf->mirrors[devnum].rdev->flags)
1273 			 && enough(conf, devnum))
1274 		mbio->bi_opf |= MD_FAILFAST;
1275 	mbio->bi_private = r10_bio;
1276 
1277 	if (conf->mddev->gendisk)
1278 		trace_block_bio_remap(mbio->bi_disk->queue,
1279 				      mbio, disk_devt(conf->mddev->gendisk),
1280 				      r10_bio->sector);
1281 	/* flush_pending_writes() needs access to the rdev so...*/
1282 	mbio->bi_disk = (void *)rdev;
1283 
1284 	atomic_inc(&r10_bio->remaining);
1285 
1286 	cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
1287 	if (cb)
1288 		plug = container_of(cb, struct raid10_plug_cb, cb);
1289 	else
1290 		plug = NULL;
1291 	if (plug) {
1292 		bio_list_add(&plug->pending, mbio);
1293 		plug->pending_cnt++;
1294 	} else {
1295 		spin_lock_irqsave(&conf->device_lock, flags);
1296 		bio_list_add(&conf->pending_bio_list, mbio);
1297 		conf->pending_count++;
1298 		spin_unlock_irqrestore(&conf->device_lock, flags);
1299 		md_wakeup_thread(mddev->thread);
1300 	}
1301 }
1302 
1303 static void raid10_write_request(struct mddev *mddev, struct bio *bio,
1304 				 struct r10bio *r10_bio)
1305 {
1306 	struct r10conf *conf = mddev->private;
1307 	int i;
1308 	struct md_rdev *blocked_rdev;
1309 	sector_t sectors;
1310 	int max_sectors;
1311 
1312 	if ((mddev_is_clustered(mddev) &&
1313 	     md_cluster_ops->area_resyncing(mddev, WRITE,
1314 					    bio->bi_iter.bi_sector,
1315 					    bio_end_sector(bio)))) {
1316 		DEFINE_WAIT(w);
1317 		for (;;) {
1318 			prepare_to_wait(&conf->wait_barrier,
1319 					&w, TASK_IDLE);
1320 			if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1321 				 bio->bi_iter.bi_sector, bio_end_sector(bio)))
1322 				break;
1323 			schedule();
1324 		}
1325 		finish_wait(&conf->wait_barrier, &w);
1326 	}
1327 
1328 	sectors = r10_bio->sectors;
1329 	regular_request_wait(mddev, conf, bio, sectors);
1330 	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1331 	    (mddev->reshape_backwards
1332 	     ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1333 		bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1334 	     : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1335 		bio->bi_iter.bi_sector < conf->reshape_progress))) {
1336 		/* Need to update reshape_position in metadata */
1337 		mddev->reshape_position = conf->reshape_progress;
1338 		set_mask_bits(&mddev->sb_flags, 0,
1339 			      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1340 		md_wakeup_thread(mddev->thread);
1341 		raid10_log(conf->mddev, "wait reshape metadata");
1342 		wait_event(mddev->sb_wait,
1343 			   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
1344 
1345 		conf->reshape_safe = mddev->reshape_position;
1346 	}
1347 
1348 	if (conf->pending_count >= max_queued_requests) {
1349 		md_wakeup_thread(mddev->thread);
1350 		raid10_log(mddev, "wait queued");
1351 		wait_event(conf->wait_barrier,
1352 			   conf->pending_count < max_queued_requests);
1353 	}
1354 	/* first select target devices under rcu_lock and
1355 	 * inc refcount on their rdev.  Record them by setting
1356 	 * bios[x] to bio
1357 	 * If there are known/acknowledged bad blocks on any device
1358 	 * on which we have seen a write error, we want to avoid
1359 	 * writing to those blocks.  This potentially requires several
1360 	 * writes to write around the bad blocks.  Each set of writes
1361 	 * gets its own r10_bio with a set of bios attached.
1362 	 */
1363 
1364 	r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1365 	raid10_find_phys(conf, r10_bio);
1366 retry_write:
1367 	blocked_rdev = NULL;
1368 	rcu_read_lock();
1369 	max_sectors = r10_bio->sectors;
1370 
1371 	for (i = 0;  i < conf->copies; i++) {
1372 		int d = r10_bio->devs[i].devnum;
1373 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
1374 		struct md_rdev *rrdev = rcu_dereference(
1375 			conf->mirrors[d].replacement);
1376 		if (rdev == rrdev)
1377 			rrdev = NULL;
1378 		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1379 			atomic_inc(&rdev->nr_pending);
1380 			blocked_rdev = rdev;
1381 			break;
1382 		}
1383 		if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
1384 			atomic_inc(&rrdev->nr_pending);
1385 			blocked_rdev = rrdev;
1386 			break;
1387 		}
1388 		if (rdev && (test_bit(Faulty, &rdev->flags)))
1389 			rdev = NULL;
1390 		if (rrdev && (test_bit(Faulty, &rrdev->flags)))
1391 			rrdev = NULL;
1392 
1393 		r10_bio->devs[i].bio = NULL;
1394 		r10_bio->devs[i].repl_bio = NULL;
1395 
1396 		if (!rdev && !rrdev) {
1397 			set_bit(R10BIO_Degraded, &r10_bio->state);
1398 			continue;
1399 		}
1400 		if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1401 			sector_t first_bad;
1402 			sector_t dev_sector = r10_bio->devs[i].addr;
1403 			int bad_sectors;
1404 			int is_bad;
1405 
1406 			is_bad = is_badblock(rdev, dev_sector, max_sectors,
1407 					     &first_bad, &bad_sectors);
1408 			if (is_bad < 0) {
1409 				/* Mustn't write here until the bad block
1410 				 * is acknowledged
1411 				 */
1412 				atomic_inc(&rdev->nr_pending);
1413 				set_bit(BlockedBadBlocks, &rdev->flags);
1414 				blocked_rdev = rdev;
1415 				break;
1416 			}
1417 			if (is_bad && first_bad <= dev_sector) {
1418 				/* Cannot write here at all */
1419 				bad_sectors -= (dev_sector - first_bad);
1420 				if (bad_sectors < max_sectors)
1421 					/* Mustn't write more than bad_sectors
1422 					 * to other devices yet
1423 					 */
1424 					max_sectors = bad_sectors;
1425 				/* We don't set R10BIO_Degraded as that
1426 				 * only applies if the disk is missing,
1427 				 * so it might be re-added, and we want to
1428 				 * know to recover this chunk.
1429 				 * In this case the device is here, and the
1430 				 * fact that this chunk is not in-sync is
1431 				 * recorded in the bad block log.
1432 				 */
1433 				continue;
1434 			}
1435 			if (is_bad) {
1436 				int good_sectors = first_bad - dev_sector;
1437 				if (good_sectors < max_sectors)
1438 					max_sectors = good_sectors;
1439 			}
1440 		}
1441 		if (rdev) {
1442 			r10_bio->devs[i].bio = bio;
1443 			atomic_inc(&rdev->nr_pending);
1444 		}
1445 		if (rrdev) {
1446 			r10_bio->devs[i].repl_bio = bio;
1447 			atomic_inc(&rrdev->nr_pending);
1448 		}
1449 	}
1450 	rcu_read_unlock();
1451 
1452 	if (unlikely(blocked_rdev)) {
1453 		/* Have to wait for this device to get unblocked, then retry */
1454 		int j;
1455 		int d;
1456 
1457 		for (j = 0; j < i; j++) {
1458 			if (r10_bio->devs[j].bio) {
1459 				d = r10_bio->devs[j].devnum;
1460 				rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1461 			}
1462 			if (r10_bio->devs[j].repl_bio) {
1463 				struct md_rdev *rdev;
1464 				d = r10_bio->devs[j].devnum;
1465 				rdev = conf->mirrors[d].replacement;
1466 				if (!rdev) {
1467 					/* Race with remove_disk */
1468 					smp_mb();
1469 					rdev = conf->mirrors[d].rdev;
1470 				}
1471 				rdev_dec_pending(rdev, mddev);
1472 			}
1473 		}
1474 		allow_barrier(conf);
1475 		raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1476 		md_wait_for_blocked_rdev(blocked_rdev, mddev);
1477 		wait_barrier(conf);
1478 		goto retry_write;
1479 	}
1480 
1481 	if (max_sectors < r10_bio->sectors)
1482 		r10_bio->sectors = max_sectors;
1483 
1484 	if (r10_bio->sectors < bio_sectors(bio)) {
1485 		struct bio *split = bio_split(bio, r10_bio->sectors,
1486 					      GFP_NOIO, &conf->bio_split);
1487 		bio_chain(split, bio);
1488 		allow_barrier(conf);
1489 		generic_make_request(bio);
1490 		wait_barrier(conf);
1491 		bio = split;
1492 		r10_bio->master_bio = bio;
1493 	}
1494 
1495 	atomic_set(&r10_bio->remaining, 1);
1496 	md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
1497 
1498 	for (i = 0; i < conf->copies; i++) {
1499 		if (r10_bio->devs[i].bio)
1500 			raid10_write_one_disk(mddev, r10_bio, bio, false, i);
1501 		if (r10_bio->devs[i].repl_bio)
1502 			raid10_write_one_disk(mddev, r10_bio, bio, true, i);
1503 	}
1504 	one_write_done(r10_bio);
1505 }
1506 
1507 static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
1508 {
1509 	struct r10conf *conf = mddev->private;
1510 	struct r10bio *r10_bio;
1511 
1512 	r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1513 
1514 	r10_bio->master_bio = bio;
1515 	r10_bio->sectors = sectors;
1516 
1517 	r10_bio->mddev = mddev;
1518 	r10_bio->sector = bio->bi_iter.bi_sector;
1519 	r10_bio->state = 0;
1520 	memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies);
1521 
1522 	if (bio_data_dir(bio) == READ)
1523 		raid10_read_request(mddev, bio, r10_bio);
1524 	else
1525 		raid10_write_request(mddev, bio, r10_bio);
1526 }
1527 
1528 static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
1529 {
1530 	struct r10conf *conf = mddev->private;
1531 	sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1532 	int chunk_sects = chunk_mask + 1;
1533 	int sectors = bio_sectors(bio);
1534 
1535 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1536 		md_flush_request(mddev, bio);
1537 		return true;
1538 	}
1539 
1540 	if (!md_write_start(mddev, bio))
1541 		return false;
1542 
1543 	/*
1544 	 * If this request crosses a chunk boundary, we need to split
1545 	 * it.
1546 	 */
1547 	if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
1548 		     sectors > chunk_sects
1549 		     && (conf->geo.near_copies < conf->geo.raid_disks
1550 			 || conf->prev.near_copies <
1551 			 conf->prev.raid_disks)))
1552 		sectors = chunk_sects -
1553 			(bio->bi_iter.bi_sector &
1554 			 (chunk_sects - 1));
1555 	__make_request(mddev, bio, sectors);
1556 
1557 	/* In case raid10d snuck in to freeze_array */
1558 	wake_up(&conf->wait_barrier);
1559 	return true;
1560 }
1561 
1562 static void raid10_status(struct seq_file *seq, struct mddev *mddev)
1563 {
1564 	struct r10conf *conf = mddev->private;
1565 	int i;
1566 
1567 	if (conf->geo.near_copies < conf->geo.raid_disks)
1568 		seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1569 	if (conf->geo.near_copies > 1)
1570 		seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1571 	if (conf->geo.far_copies > 1) {
1572 		if (conf->geo.far_offset)
1573 			seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1574 		else
1575 			seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1576 		if (conf->geo.far_set_size != conf->geo.raid_disks)
1577 			seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
1578 	}
1579 	seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1580 					conf->geo.raid_disks - mddev->degraded);
1581 	rcu_read_lock();
1582 	for (i = 0; i < conf->geo.raid_disks; i++) {
1583 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1584 		seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1585 	}
1586 	rcu_read_unlock();
1587 	seq_printf(seq, "]");
1588 }
1589 
1590 /* check if there are enough drives for
1591  * every block to appear on atleast one.
1592  * Don't consider the device numbered 'ignore'
1593  * as we might be about to remove it.
1594  */
1595 static int _enough(struct r10conf *conf, int previous, int ignore)
1596 {
1597 	int first = 0;
1598 	int has_enough = 0;
1599 	int disks, ncopies;
1600 	if (previous) {
1601 		disks = conf->prev.raid_disks;
1602 		ncopies = conf->prev.near_copies;
1603 	} else {
1604 		disks = conf->geo.raid_disks;
1605 		ncopies = conf->geo.near_copies;
1606 	}
1607 
1608 	rcu_read_lock();
1609 	do {
1610 		int n = conf->copies;
1611 		int cnt = 0;
1612 		int this = first;
1613 		while (n--) {
1614 			struct md_rdev *rdev;
1615 			if (this != ignore &&
1616 			    (rdev = rcu_dereference(conf->mirrors[this].rdev)) &&
1617 			    test_bit(In_sync, &rdev->flags))
1618 				cnt++;
1619 			this = (this+1) % disks;
1620 		}
1621 		if (cnt == 0)
1622 			goto out;
1623 		first = (first + ncopies) % disks;
1624 	} while (first != 0);
1625 	has_enough = 1;
1626 out:
1627 	rcu_read_unlock();
1628 	return has_enough;
1629 }
1630 
1631 static int enough(struct r10conf *conf, int ignore)
1632 {
1633 	/* when calling 'enough', both 'prev' and 'geo' must
1634 	 * be stable.
1635 	 * This is ensured if ->reconfig_mutex or ->device_lock
1636 	 * is held.
1637 	 */
1638 	return _enough(conf, 0, ignore) &&
1639 		_enough(conf, 1, ignore);
1640 }
1641 
1642 static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
1643 {
1644 	char b[BDEVNAME_SIZE];
1645 	struct r10conf *conf = mddev->private;
1646 	unsigned long flags;
1647 
1648 	/*
1649 	 * If it is not operational, then we have already marked it as dead
1650 	 * else if it is the last working disks, ignore the error, let the
1651 	 * next level up know.
1652 	 * else mark the drive as failed
1653 	 */
1654 	spin_lock_irqsave(&conf->device_lock, flags);
1655 	if (test_bit(In_sync, &rdev->flags)
1656 	    && !enough(conf, rdev->raid_disk)) {
1657 		/*
1658 		 * Don't fail the drive, just return an IO error.
1659 		 */
1660 		spin_unlock_irqrestore(&conf->device_lock, flags);
1661 		return;
1662 	}
1663 	if (test_and_clear_bit(In_sync, &rdev->flags))
1664 		mddev->degraded++;
1665 	/*
1666 	 * If recovery is running, make sure it aborts.
1667 	 */
1668 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1669 	set_bit(Blocked, &rdev->flags);
1670 	set_bit(Faulty, &rdev->flags);
1671 	set_mask_bits(&mddev->sb_flags, 0,
1672 		      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1673 	spin_unlock_irqrestore(&conf->device_lock, flags);
1674 	pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n"
1675 		"md/raid10:%s: Operation continuing on %d devices.\n",
1676 		mdname(mddev), bdevname(rdev->bdev, b),
1677 		mdname(mddev), conf->geo.raid_disks - mddev->degraded);
1678 }
1679 
1680 static void print_conf(struct r10conf *conf)
1681 {
1682 	int i;
1683 	struct md_rdev *rdev;
1684 
1685 	pr_debug("RAID10 conf printout:\n");
1686 	if (!conf) {
1687 		pr_debug("(!conf)\n");
1688 		return;
1689 	}
1690 	pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
1691 		 conf->geo.raid_disks);
1692 
1693 	/* This is only called with ->reconfix_mutex held, so
1694 	 * rcu protection of rdev is not needed */
1695 	for (i = 0; i < conf->geo.raid_disks; i++) {
1696 		char b[BDEVNAME_SIZE];
1697 		rdev = conf->mirrors[i].rdev;
1698 		if (rdev)
1699 			pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
1700 				 i, !test_bit(In_sync, &rdev->flags),
1701 				 !test_bit(Faulty, &rdev->flags),
1702 				 bdevname(rdev->bdev,b));
1703 	}
1704 }
1705 
1706 static void close_sync(struct r10conf *conf)
1707 {
1708 	wait_barrier(conf);
1709 	allow_barrier(conf);
1710 
1711 	mempool_exit(&conf->r10buf_pool);
1712 }
1713 
1714 static int raid10_spare_active(struct mddev *mddev)
1715 {
1716 	int i;
1717 	struct r10conf *conf = mddev->private;
1718 	struct raid10_info *tmp;
1719 	int count = 0;
1720 	unsigned long flags;
1721 
1722 	/*
1723 	 * Find all non-in_sync disks within the RAID10 configuration
1724 	 * and mark them in_sync
1725 	 */
1726 	for (i = 0; i < conf->geo.raid_disks; i++) {
1727 		tmp = conf->mirrors + i;
1728 		if (tmp->replacement
1729 		    && tmp->replacement->recovery_offset == MaxSector
1730 		    && !test_bit(Faulty, &tmp->replacement->flags)
1731 		    && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
1732 			/* Replacement has just become active */
1733 			if (!tmp->rdev
1734 			    || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
1735 				count++;
1736 			if (tmp->rdev) {
1737 				/* Replaced device not technically faulty,
1738 				 * but we need to be sure it gets removed
1739 				 * and never re-added.
1740 				 */
1741 				set_bit(Faulty, &tmp->rdev->flags);
1742 				sysfs_notify_dirent_safe(
1743 					tmp->rdev->sysfs_state);
1744 			}
1745 			sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
1746 		} else if (tmp->rdev
1747 			   && tmp->rdev->recovery_offset == MaxSector
1748 			   && !test_bit(Faulty, &tmp->rdev->flags)
1749 			   && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1750 			count++;
1751 			sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
1752 		}
1753 	}
1754 	spin_lock_irqsave(&conf->device_lock, flags);
1755 	mddev->degraded -= count;
1756 	spin_unlock_irqrestore(&conf->device_lock, flags);
1757 
1758 	print_conf(conf);
1759 	return count;
1760 }
1761 
1762 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1763 {
1764 	struct r10conf *conf = mddev->private;
1765 	int err = -EEXIST;
1766 	int mirror;
1767 	int first = 0;
1768 	int last = conf->geo.raid_disks - 1;
1769 
1770 	if (mddev->recovery_cp < MaxSector)
1771 		/* only hot-add to in-sync arrays, as recovery is
1772 		 * very different from resync
1773 		 */
1774 		return -EBUSY;
1775 	if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
1776 		return -EINVAL;
1777 
1778 	if (md_integrity_add_rdev(rdev, mddev))
1779 		return -ENXIO;
1780 
1781 	if (rdev->raid_disk >= 0)
1782 		first = last = rdev->raid_disk;
1783 
1784 	if (rdev->saved_raid_disk >= first &&
1785 	    rdev->saved_raid_disk < conf->geo.raid_disks &&
1786 	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1787 		mirror = rdev->saved_raid_disk;
1788 	else
1789 		mirror = first;
1790 	for ( ; mirror <= last ; mirror++) {
1791 		struct raid10_info *p = &conf->mirrors[mirror];
1792 		if (p->recovery_disabled == mddev->recovery_disabled)
1793 			continue;
1794 		if (p->rdev) {
1795 			if (!test_bit(WantReplacement, &p->rdev->flags) ||
1796 			    p->replacement != NULL)
1797 				continue;
1798 			clear_bit(In_sync, &rdev->flags);
1799 			set_bit(Replacement, &rdev->flags);
1800 			rdev->raid_disk = mirror;
1801 			err = 0;
1802 			if (mddev->gendisk)
1803 				disk_stack_limits(mddev->gendisk, rdev->bdev,
1804 						  rdev->data_offset << 9);
1805 			conf->fullsync = 1;
1806 			rcu_assign_pointer(p->replacement, rdev);
1807 			break;
1808 		}
1809 
1810 		if (mddev->gendisk)
1811 			disk_stack_limits(mddev->gendisk, rdev->bdev,
1812 					  rdev->data_offset << 9);
1813 
1814 		p->head_position = 0;
1815 		p->recovery_disabled = mddev->recovery_disabled - 1;
1816 		rdev->raid_disk = mirror;
1817 		err = 0;
1818 		if (rdev->saved_raid_disk != mirror)
1819 			conf->fullsync = 1;
1820 		rcu_assign_pointer(p->rdev, rdev);
1821 		break;
1822 	}
1823 	if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1824 		blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
1825 
1826 	print_conf(conf);
1827 	return err;
1828 }
1829 
1830 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1831 {
1832 	struct r10conf *conf = mddev->private;
1833 	int err = 0;
1834 	int number = rdev->raid_disk;
1835 	struct md_rdev **rdevp;
1836 	struct raid10_info *p = conf->mirrors + number;
1837 
1838 	print_conf(conf);
1839 	if (rdev == p->rdev)
1840 		rdevp = &p->rdev;
1841 	else if (rdev == p->replacement)
1842 		rdevp = &p->replacement;
1843 	else
1844 		return 0;
1845 
1846 	if (test_bit(In_sync, &rdev->flags) ||
1847 	    atomic_read(&rdev->nr_pending)) {
1848 		err = -EBUSY;
1849 		goto abort;
1850 	}
1851 	/* Only remove non-faulty devices if recovery
1852 	 * is not possible.
1853 	 */
1854 	if (!test_bit(Faulty, &rdev->flags) &&
1855 	    mddev->recovery_disabled != p->recovery_disabled &&
1856 	    (!p->replacement || p->replacement == rdev) &&
1857 	    number < conf->geo.raid_disks &&
1858 	    enough(conf, -1)) {
1859 		err = -EBUSY;
1860 		goto abort;
1861 	}
1862 	*rdevp = NULL;
1863 	if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1864 		synchronize_rcu();
1865 		if (atomic_read(&rdev->nr_pending)) {
1866 			/* lost the race, try later */
1867 			err = -EBUSY;
1868 			*rdevp = rdev;
1869 			goto abort;
1870 		}
1871 	}
1872 	if (p->replacement) {
1873 		/* We must have just cleared 'rdev' */
1874 		p->rdev = p->replacement;
1875 		clear_bit(Replacement, &p->replacement->flags);
1876 		smp_mb(); /* Make sure other CPUs may see both as identical
1877 			   * but will never see neither -- if they are careful.
1878 			   */
1879 		p->replacement = NULL;
1880 	}
1881 
1882 	clear_bit(WantReplacement, &rdev->flags);
1883 	err = md_integrity_register(mddev);
1884 
1885 abort:
1886 
1887 	print_conf(conf);
1888 	return err;
1889 }
1890 
1891 static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
1892 {
1893 	struct r10conf *conf = r10_bio->mddev->private;
1894 
1895 	if (!bio->bi_status)
1896 		set_bit(R10BIO_Uptodate, &r10_bio->state);
1897 	else
1898 		/* The write handler will notice the lack of
1899 		 * R10BIO_Uptodate and record any errors etc
1900 		 */
1901 		atomic_add(r10_bio->sectors,
1902 			   &conf->mirrors[d].rdev->corrected_errors);
1903 
1904 	/* for reconstruct, we always reschedule after a read.
1905 	 * for resync, only after all reads
1906 	 */
1907 	rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1908 	if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1909 	    atomic_dec_and_test(&r10_bio->remaining)) {
1910 		/* we have read all the blocks,
1911 		 * do the comparison in process context in raid10d
1912 		 */
1913 		reschedule_retry(r10_bio);
1914 	}
1915 }
1916 
1917 static void end_sync_read(struct bio *bio)
1918 {
1919 	struct r10bio *r10_bio = get_resync_r10bio(bio);
1920 	struct r10conf *conf = r10_bio->mddev->private;
1921 	int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
1922 
1923 	__end_sync_read(r10_bio, bio, d);
1924 }
1925 
1926 static void end_reshape_read(struct bio *bio)
1927 {
1928 	/* reshape read bio isn't allocated from r10buf_pool */
1929 	struct r10bio *r10_bio = bio->bi_private;
1930 
1931 	__end_sync_read(r10_bio, bio, r10_bio->read_slot);
1932 }
1933 
1934 static void end_sync_request(struct r10bio *r10_bio)
1935 {
1936 	struct mddev *mddev = r10_bio->mddev;
1937 
1938 	while (atomic_dec_and_test(&r10_bio->remaining)) {
1939 		if (r10_bio->master_bio == NULL) {
1940 			/* the primary of several recovery bios */
1941 			sector_t s = r10_bio->sectors;
1942 			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1943 			    test_bit(R10BIO_WriteError, &r10_bio->state))
1944 				reschedule_retry(r10_bio);
1945 			else
1946 				put_buf(r10_bio);
1947 			md_done_sync(mddev, s, 1);
1948 			break;
1949 		} else {
1950 			struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
1951 			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1952 			    test_bit(R10BIO_WriteError, &r10_bio->state))
1953 				reschedule_retry(r10_bio);
1954 			else
1955 				put_buf(r10_bio);
1956 			r10_bio = r10_bio2;
1957 		}
1958 	}
1959 }
1960 
1961 static void end_sync_write(struct bio *bio)
1962 {
1963 	struct r10bio *r10_bio = get_resync_r10bio(bio);
1964 	struct mddev *mddev = r10_bio->mddev;
1965 	struct r10conf *conf = mddev->private;
1966 	int d;
1967 	sector_t first_bad;
1968 	int bad_sectors;
1969 	int slot;
1970 	int repl;
1971 	struct md_rdev *rdev = NULL;
1972 
1973 	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1974 	if (repl)
1975 		rdev = conf->mirrors[d].replacement;
1976 	else
1977 		rdev = conf->mirrors[d].rdev;
1978 
1979 	if (bio->bi_status) {
1980 		if (repl)
1981 			md_error(mddev, rdev);
1982 		else {
1983 			set_bit(WriteErrorSeen, &rdev->flags);
1984 			if (!test_and_set_bit(WantReplacement, &rdev->flags))
1985 				set_bit(MD_RECOVERY_NEEDED,
1986 					&rdev->mddev->recovery);
1987 			set_bit(R10BIO_WriteError, &r10_bio->state);
1988 		}
1989 	} else if (is_badblock(rdev,
1990 			     r10_bio->devs[slot].addr,
1991 			     r10_bio->sectors,
1992 			     &first_bad, &bad_sectors))
1993 		set_bit(R10BIO_MadeGood, &r10_bio->state);
1994 
1995 	rdev_dec_pending(rdev, mddev);
1996 
1997 	end_sync_request(r10_bio);
1998 }
1999 
2000 /*
2001  * Note: sync and recover and handled very differently for raid10
2002  * This code is for resync.
2003  * For resync, we read through virtual addresses and read all blocks.
2004  * If there is any error, we schedule a write.  The lowest numbered
2005  * drive is authoritative.
2006  * However requests come for physical address, so we need to map.
2007  * For every physical address there are raid_disks/copies virtual addresses,
2008  * which is always are least one, but is not necessarly an integer.
2009  * This means that a physical address can span multiple chunks, so we may
2010  * have to submit multiple io requests for a single sync request.
2011  */
2012 /*
2013  * We check if all blocks are in-sync and only write to blocks that
2014  * aren't in sync
2015  */
2016 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2017 {
2018 	struct r10conf *conf = mddev->private;
2019 	int i, first;
2020 	struct bio *tbio, *fbio;
2021 	int vcnt;
2022 	struct page **tpages, **fpages;
2023 
2024 	atomic_set(&r10_bio->remaining, 1);
2025 
2026 	/* find the first device with a block */
2027 	for (i=0; i<conf->copies; i++)
2028 		if (!r10_bio->devs[i].bio->bi_status)
2029 			break;
2030 
2031 	if (i == conf->copies)
2032 		goto done;
2033 
2034 	first = i;
2035 	fbio = r10_bio->devs[i].bio;
2036 	fbio->bi_iter.bi_size = r10_bio->sectors << 9;
2037 	fbio->bi_iter.bi_idx = 0;
2038 	fpages = get_resync_pages(fbio)->pages;
2039 
2040 	vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
2041 	/* now find blocks with errors */
2042 	for (i=0 ; i < conf->copies ; i++) {
2043 		int  j, d;
2044 		struct md_rdev *rdev;
2045 		struct resync_pages *rp;
2046 
2047 		tbio = r10_bio->devs[i].bio;
2048 
2049 		if (tbio->bi_end_io != end_sync_read)
2050 			continue;
2051 		if (i == first)
2052 			continue;
2053 
2054 		tpages = get_resync_pages(tbio)->pages;
2055 		d = r10_bio->devs[i].devnum;
2056 		rdev = conf->mirrors[d].rdev;
2057 		if (!r10_bio->devs[i].bio->bi_status) {
2058 			/* We know that the bi_io_vec layout is the same for
2059 			 * both 'first' and 'i', so we just compare them.
2060 			 * All vec entries are PAGE_SIZE;
2061 			 */
2062 			int sectors = r10_bio->sectors;
2063 			for (j = 0; j < vcnt; j++) {
2064 				int len = PAGE_SIZE;
2065 				if (sectors < (len / 512))
2066 					len = sectors * 512;
2067 				if (memcmp(page_address(fpages[j]),
2068 					   page_address(tpages[j]),
2069 					   len))
2070 					break;
2071 				sectors -= len/512;
2072 			}
2073 			if (j == vcnt)
2074 				continue;
2075 			atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
2076 			if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2077 				/* Don't fix anything. */
2078 				continue;
2079 		} else if (test_bit(FailFast, &rdev->flags)) {
2080 			/* Just give up on this device */
2081 			md_error(rdev->mddev, rdev);
2082 			continue;
2083 		}
2084 		/* Ok, we need to write this bio, either to correct an
2085 		 * inconsistency or to correct an unreadable block.
2086 		 * First we need to fixup bv_offset, bv_len and
2087 		 * bi_vecs, as the read request might have corrupted these
2088 		 */
2089 		rp = get_resync_pages(tbio);
2090 		bio_reset(tbio);
2091 
2092 		md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
2093 
2094 		rp->raid_bio = r10_bio;
2095 		tbio->bi_private = rp;
2096 		tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
2097 		tbio->bi_end_io = end_sync_write;
2098 		bio_set_op_attrs(tbio, REQ_OP_WRITE, 0);
2099 
2100 		bio_copy_data(tbio, fbio);
2101 
2102 		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2103 		atomic_inc(&r10_bio->remaining);
2104 		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2105 
2106 		if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
2107 			tbio->bi_opf |= MD_FAILFAST;
2108 		tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
2109 		bio_set_dev(tbio, conf->mirrors[d].rdev->bdev);
2110 		generic_make_request(tbio);
2111 	}
2112 
2113 	/* Now write out to any replacement devices
2114 	 * that are active
2115 	 */
2116 	for (i = 0; i < conf->copies; i++) {
2117 		int d;
2118 
2119 		tbio = r10_bio->devs[i].repl_bio;
2120 		if (!tbio || !tbio->bi_end_io)
2121 			continue;
2122 		if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2123 		    && r10_bio->devs[i].bio != fbio)
2124 			bio_copy_data(tbio, fbio);
2125 		d = r10_bio->devs[i].devnum;
2126 		atomic_inc(&r10_bio->remaining);
2127 		md_sync_acct(conf->mirrors[d].replacement->bdev,
2128 			     bio_sectors(tbio));
2129 		generic_make_request(tbio);
2130 	}
2131 
2132 done:
2133 	if (atomic_dec_and_test(&r10_bio->remaining)) {
2134 		md_done_sync(mddev, r10_bio->sectors, 1);
2135 		put_buf(r10_bio);
2136 	}
2137 }
2138 
2139 /*
2140  * Now for the recovery code.
2141  * Recovery happens across physical sectors.
2142  * We recover all non-is_sync drives by finding the virtual address of
2143  * each, and then choose a working drive that also has that virt address.
2144  * There is a separate r10_bio for each non-in_sync drive.
2145  * Only the first two slots are in use. The first for reading,
2146  * The second for writing.
2147  *
2148  */
2149 static void fix_recovery_read_error(struct r10bio *r10_bio)
2150 {
2151 	/* We got a read error during recovery.
2152 	 * We repeat the read in smaller page-sized sections.
2153 	 * If a read succeeds, write it to the new device or record
2154 	 * a bad block if we cannot.
2155 	 * If a read fails, record a bad block on both old and
2156 	 * new devices.
2157 	 */
2158 	struct mddev *mddev = r10_bio->mddev;
2159 	struct r10conf *conf = mddev->private;
2160 	struct bio *bio = r10_bio->devs[0].bio;
2161 	sector_t sect = 0;
2162 	int sectors = r10_bio->sectors;
2163 	int idx = 0;
2164 	int dr = r10_bio->devs[0].devnum;
2165 	int dw = r10_bio->devs[1].devnum;
2166 	struct page **pages = get_resync_pages(bio)->pages;
2167 
2168 	while (sectors) {
2169 		int s = sectors;
2170 		struct md_rdev *rdev;
2171 		sector_t addr;
2172 		int ok;
2173 
2174 		if (s > (PAGE_SIZE>>9))
2175 			s = PAGE_SIZE >> 9;
2176 
2177 		rdev = conf->mirrors[dr].rdev;
2178 		addr = r10_bio->devs[0].addr + sect,
2179 		ok = sync_page_io(rdev,
2180 				  addr,
2181 				  s << 9,
2182 				  pages[idx],
2183 				  REQ_OP_READ, 0, false);
2184 		if (ok) {
2185 			rdev = conf->mirrors[dw].rdev;
2186 			addr = r10_bio->devs[1].addr + sect;
2187 			ok = sync_page_io(rdev,
2188 					  addr,
2189 					  s << 9,
2190 					  pages[idx],
2191 					  REQ_OP_WRITE, 0, false);
2192 			if (!ok) {
2193 				set_bit(WriteErrorSeen, &rdev->flags);
2194 				if (!test_and_set_bit(WantReplacement,
2195 						      &rdev->flags))
2196 					set_bit(MD_RECOVERY_NEEDED,
2197 						&rdev->mddev->recovery);
2198 			}
2199 		}
2200 		if (!ok) {
2201 			/* We don't worry if we cannot set a bad block -
2202 			 * it really is bad so there is no loss in not
2203 			 * recording it yet
2204 			 */
2205 			rdev_set_badblocks(rdev, addr, s, 0);
2206 
2207 			if (rdev != conf->mirrors[dw].rdev) {
2208 				/* need bad block on destination too */
2209 				struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2210 				addr = r10_bio->devs[1].addr + sect;
2211 				ok = rdev_set_badblocks(rdev2, addr, s, 0);
2212 				if (!ok) {
2213 					/* just abort the recovery */
2214 					pr_notice("md/raid10:%s: recovery aborted due to read error\n",
2215 						  mdname(mddev));
2216 
2217 					conf->mirrors[dw].recovery_disabled
2218 						= mddev->recovery_disabled;
2219 					set_bit(MD_RECOVERY_INTR,
2220 						&mddev->recovery);
2221 					break;
2222 				}
2223 			}
2224 		}
2225 
2226 		sectors -= s;
2227 		sect += s;
2228 		idx++;
2229 	}
2230 }
2231 
2232 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2233 {
2234 	struct r10conf *conf = mddev->private;
2235 	int d;
2236 	struct bio *wbio, *wbio2;
2237 
2238 	if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2239 		fix_recovery_read_error(r10_bio);
2240 		end_sync_request(r10_bio);
2241 		return;
2242 	}
2243 
2244 	/*
2245 	 * share the pages with the first bio
2246 	 * and submit the write request
2247 	 */
2248 	d = r10_bio->devs[1].devnum;
2249 	wbio = r10_bio->devs[1].bio;
2250 	wbio2 = r10_bio->devs[1].repl_bio;
2251 	/* Need to test wbio2->bi_end_io before we call
2252 	 * generic_make_request as if the former is NULL,
2253 	 * the latter is free to free wbio2.
2254 	 */
2255 	if (wbio2 && !wbio2->bi_end_io)
2256 		wbio2 = NULL;
2257 	if (wbio->bi_end_io) {
2258 		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2259 		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2260 		generic_make_request(wbio);
2261 	}
2262 	if (wbio2) {
2263 		atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2264 		md_sync_acct(conf->mirrors[d].replacement->bdev,
2265 			     bio_sectors(wbio2));
2266 		generic_make_request(wbio2);
2267 	}
2268 }
2269 
2270 /*
2271  * Used by fix_read_error() to decay the per rdev read_errors.
2272  * We halve the read error count for every hour that has elapsed
2273  * since the last recorded read error.
2274  *
2275  */
2276 static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
2277 {
2278 	long cur_time_mon;
2279 	unsigned long hours_since_last;
2280 	unsigned int read_errors = atomic_read(&rdev->read_errors);
2281 
2282 	cur_time_mon = ktime_get_seconds();
2283 
2284 	if (rdev->last_read_error == 0) {
2285 		/* first time we've seen a read error */
2286 		rdev->last_read_error = cur_time_mon;
2287 		return;
2288 	}
2289 
2290 	hours_since_last = (long)(cur_time_mon -
2291 			    rdev->last_read_error) / 3600;
2292 
2293 	rdev->last_read_error = cur_time_mon;
2294 
2295 	/*
2296 	 * if hours_since_last is > the number of bits in read_errors
2297 	 * just set read errors to 0. We do this to avoid
2298 	 * overflowing the shift of read_errors by hours_since_last.
2299 	 */
2300 	if (hours_since_last >= 8 * sizeof(read_errors))
2301 		atomic_set(&rdev->read_errors, 0);
2302 	else
2303 		atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
2304 }
2305 
2306 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
2307 			    int sectors, struct page *page, int rw)
2308 {
2309 	sector_t first_bad;
2310 	int bad_sectors;
2311 
2312 	if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
2313 	    && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
2314 		return -1;
2315 	if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
2316 		/* success */
2317 		return 1;
2318 	if (rw == WRITE) {
2319 		set_bit(WriteErrorSeen, &rdev->flags);
2320 		if (!test_and_set_bit(WantReplacement, &rdev->flags))
2321 			set_bit(MD_RECOVERY_NEEDED,
2322 				&rdev->mddev->recovery);
2323 	}
2324 	/* need to record an error - either for the block or the device */
2325 	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2326 		md_error(rdev->mddev, rdev);
2327 	return 0;
2328 }
2329 
2330 /*
2331  * This is a kernel thread which:
2332  *
2333  *	1.	Retries failed read operations on working mirrors.
2334  *	2.	Updates the raid superblock when problems encounter.
2335  *	3.	Performs writes following reads for array synchronising.
2336  */
2337 
2338 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2339 {
2340 	int sect = 0; /* Offset from r10_bio->sector */
2341 	int sectors = r10_bio->sectors;
2342 	struct md_rdev *rdev;
2343 	int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
2344 	int d = r10_bio->devs[r10_bio->read_slot].devnum;
2345 
2346 	/* still own a reference to this rdev, so it cannot
2347 	 * have been cleared recently.
2348 	 */
2349 	rdev = conf->mirrors[d].rdev;
2350 
2351 	if (test_bit(Faulty, &rdev->flags))
2352 		/* drive has already been failed, just ignore any
2353 		   more fix_read_error() attempts */
2354 		return;
2355 
2356 	check_decay_read_errors(mddev, rdev);
2357 	atomic_inc(&rdev->read_errors);
2358 	if (atomic_read(&rdev->read_errors) > max_read_errors) {
2359 		char b[BDEVNAME_SIZE];
2360 		bdevname(rdev->bdev, b);
2361 
2362 		pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n",
2363 			  mdname(mddev), b,
2364 			  atomic_read(&rdev->read_errors), max_read_errors);
2365 		pr_notice("md/raid10:%s: %s: Failing raid device\n",
2366 			  mdname(mddev), b);
2367 		md_error(mddev, rdev);
2368 		r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
2369 		return;
2370 	}
2371 
2372 	while(sectors) {
2373 		int s = sectors;
2374 		int sl = r10_bio->read_slot;
2375 		int success = 0;
2376 		int start;
2377 
2378 		if (s > (PAGE_SIZE>>9))
2379 			s = PAGE_SIZE >> 9;
2380 
2381 		rcu_read_lock();
2382 		do {
2383 			sector_t first_bad;
2384 			int bad_sectors;
2385 
2386 			d = r10_bio->devs[sl].devnum;
2387 			rdev = rcu_dereference(conf->mirrors[d].rdev);
2388 			if (rdev &&
2389 			    test_bit(In_sync, &rdev->flags) &&
2390 			    !test_bit(Faulty, &rdev->flags) &&
2391 			    is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2392 					&first_bad, &bad_sectors) == 0) {
2393 				atomic_inc(&rdev->nr_pending);
2394 				rcu_read_unlock();
2395 				success = sync_page_io(rdev,
2396 						       r10_bio->devs[sl].addr +
2397 						       sect,
2398 						       s<<9,
2399 						       conf->tmppage,
2400 						       REQ_OP_READ, 0, false);
2401 				rdev_dec_pending(rdev, mddev);
2402 				rcu_read_lock();
2403 				if (success)
2404 					break;
2405 			}
2406 			sl++;
2407 			if (sl == conf->copies)
2408 				sl = 0;
2409 		} while (!success && sl != r10_bio->read_slot);
2410 		rcu_read_unlock();
2411 
2412 		if (!success) {
2413 			/* Cannot read from anywhere, just mark the block
2414 			 * as bad on the first device to discourage future
2415 			 * reads.
2416 			 */
2417 			int dn = r10_bio->devs[r10_bio->read_slot].devnum;
2418 			rdev = conf->mirrors[dn].rdev;
2419 
2420 			if (!rdev_set_badblocks(
2421 				    rdev,
2422 				    r10_bio->devs[r10_bio->read_slot].addr
2423 				    + sect,
2424 				    s, 0)) {
2425 				md_error(mddev, rdev);
2426 				r10_bio->devs[r10_bio->read_slot].bio
2427 					= IO_BLOCKED;
2428 			}
2429 			break;
2430 		}
2431 
2432 		start = sl;
2433 		/* write it back and re-read */
2434 		rcu_read_lock();
2435 		while (sl != r10_bio->read_slot) {
2436 			char b[BDEVNAME_SIZE];
2437 
2438 			if (sl==0)
2439 				sl = conf->copies;
2440 			sl--;
2441 			d = r10_bio->devs[sl].devnum;
2442 			rdev = rcu_dereference(conf->mirrors[d].rdev);
2443 			if (!rdev ||
2444 			    test_bit(Faulty, &rdev->flags) ||
2445 			    !test_bit(In_sync, &rdev->flags))
2446 				continue;
2447 
2448 			atomic_inc(&rdev->nr_pending);
2449 			rcu_read_unlock();
2450 			if (r10_sync_page_io(rdev,
2451 					     r10_bio->devs[sl].addr +
2452 					     sect,
2453 					     s, conf->tmppage, WRITE)
2454 			    == 0) {
2455 				/* Well, this device is dead */
2456 				pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n",
2457 					  mdname(mddev), s,
2458 					  (unsigned long long)(
2459 						  sect +
2460 						  choose_data_offset(r10_bio,
2461 								     rdev)),
2462 					  bdevname(rdev->bdev, b));
2463 				pr_notice("md/raid10:%s: %s: failing drive\n",
2464 					  mdname(mddev),
2465 					  bdevname(rdev->bdev, b));
2466 			}
2467 			rdev_dec_pending(rdev, mddev);
2468 			rcu_read_lock();
2469 		}
2470 		sl = start;
2471 		while (sl != r10_bio->read_slot) {
2472 			char b[BDEVNAME_SIZE];
2473 
2474 			if (sl==0)
2475 				sl = conf->copies;
2476 			sl--;
2477 			d = r10_bio->devs[sl].devnum;
2478 			rdev = rcu_dereference(conf->mirrors[d].rdev);
2479 			if (!rdev ||
2480 			    test_bit(Faulty, &rdev->flags) ||
2481 			    !test_bit(In_sync, &rdev->flags))
2482 				continue;
2483 
2484 			atomic_inc(&rdev->nr_pending);
2485 			rcu_read_unlock();
2486 			switch (r10_sync_page_io(rdev,
2487 					     r10_bio->devs[sl].addr +
2488 					     sect,
2489 					     s, conf->tmppage,
2490 						 READ)) {
2491 			case 0:
2492 				/* Well, this device is dead */
2493 				pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n",
2494 				       mdname(mddev), s,
2495 				       (unsigned long long)(
2496 					       sect +
2497 					       choose_data_offset(r10_bio, rdev)),
2498 				       bdevname(rdev->bdev, b));
2499 				pr_notice("md/raid10:%s: %s: failing drive\n",
2500 				       mdname(mddev),
2501 				       bdevname(rdev->bdev, b));
2502 				break;
2503 			case 1:
2504 				pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n",
2505 				       mdname(mddev), s,
2506 				       (unsigned long long)(
2507 					       sect +
2508 					       choose_data_offset(r10_bio, rdev)),
2509 				       bdevname(rdev->bdev, b));
2510 				atomic_add(s, &rdev->corrected_errors);
2511 			}
2512 
2513 			rdev_dec_pending(rdev, mddev);
2514 			rcu_read_lock();
2515 		}
2516 		rcu_read_unlock();
2517 
2518 		sectors -= s;
2519 		sect += s;
2520 	}
2521 }
2522 
2523 static int narrow_write_error(struct r10bio *r10_bio, int i)
2524 {
2525 	struct bio *bio = r10_bio->master_bio;
2526 	struct mddev *mddev = r10_bio->mddev;
2527 	struct r10conf *conf = mddev->private;
2528 	struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2529 	/* bio has the data to be written to slot 'i' where
2530 	 * we just recently had a write error.
2531 	 * We repeatedly clone the bio and trim down to one block,
2532 	 * then try the write.  Where the write fails we record
2533 	 * a bad block.
2534 	 * It is conceivable that the bio doesn't exactly align with
2535 	 * blocks.  We must handle this.
2536 	 *
2537 	 * We currently own a reference to the rdev.
2538 	 */
2539 
2540 	int block_sectors;
2541 	sector_t sector;
2542 	int sectors;
2543 	int sect_to_write = r10_bio->sectors;
2544 	int ok = 1;
2545 
2546 	if (rdev->badblocks.shift < 0)
2547 		return 0;
2548 
2549 	block_sectors = roundup(1 << rdev->badblocks.shift,
2550 				bdev_logical_block_size(rdev->bdev) >> 9);
2551 	sector = r10_bio->sector;
2552 	sectors = ((r10_bio->sector + block_sectors)
2553 		   & ~(sector_t)(block_sectors - 1))
2554 		- sector;
2555 
2556 	while (sect_to_write) {
2557 		struct bio *wbio;
2558 		sector_t wsector;
2559 		if (sectors > sect_to_write)
2560 			sectors = sect_to_write;
2561 		/* Write at 'sector' for 'sectors' */
2562 		wbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
2563 		bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
2564 		wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
2565 		wbio->bi_iter.bi_sector = wsector +
2566 				   choose_data_offset(r10_bio, rdev);
2567 		bio_set_dev(wbio, rdev->bdev);
2568 		bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2569 
2570 		if (submit_bio_wait(wbio) < 0)
2571 			/* Failure! */
2572 			ok = rdev_set_badblocks(rdev, wsector,
2573 						sectors, 0)
2574 				&& ok;
2575 
2576 		bio_put(wbio);
2577 		sect_to_write -= sectors;
2578 		sector += sectors;
2579 		sectors = block_sectors;
2580 	}
2581 	return ok;
2582 }
2583 
2584 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2585 {
2586 	int slot = r10_bio->read_slot;
2587 	struct bio *bio;
2588 	struct r10conf *conf = mddev->private;
2589 	struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2590 
2591 	/* we got a read error. Maybe the drive is bad.  Maybe just
2592 	 * the block and we can fix it.
2593 	 * We freeze all other IO, and try reading the block from
2594 	 * other devices.  When we find one, we re-write
2595 	 * and check it that fixes the read error.
2596 	 * This is all done synchronously while the array is
2597 	 * frozen.
2598 	 */
2599 	bio = r10_bio->devs[slot].bio;
2600 	bio_put(bio);
2601 	r10_bio->devs[slot].bio = NULL;
2602 
2603 	if (mddev->ro)
2604 		r10_bio->devs[slot].bio = IO_BLOCKED;
2605 	else if (!test_bit(FailFast, &rdev->flags)) {
2606 		freeze_array(conf, 1);
2607 		fix_read_error(conf, mddev, r10_bio);
2608 		unfreeze_array(conf);
2609 	} else
2610 		md_error(mddev, rdev);
2611 
2612 	rdev_dec_pending(rdev, mddev);
2613 	allow_barrier(conf);
2614 	r10_bio->state = 0;
2615 	raid10_read_request(mddev, r10_bio->master_bio, r10_bio);
2616 }
2617 
2618 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2619 {
2620 	/* Some sort of write request has finished and it
2621 	 * succeeded in writing where we thought there was a
2622 	 * bad block.  So forget the bad block.
2623 	 * Or possibly if failed and we need to record
2624 	 * a bad block.
2625 	 */
2626 	int m;
2627 	struct md_rdev *rdev;
2628 
2629 	if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2630 	    test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2631 		for (m = 0; m < conf->copies; m++) {
2632 			int dev = r10_bio->devs[m].devnum;
2633 			rdev = conf->mirrors[dev].rdev;
2634 			if (r10_bio->devs[m].bio == NULL ||
2635 				r10_bio->devs[m].bio->bi_end_io == NULL)
2636 				continue;
2637 			if (!r10_bio->devs[m].bio->bi_status) {
2638 				rdev_clear_badblocks(
2639 					rdev,
2640 					r10_bio->devs[m].addr,
2641 					r10_bio->sectors, 0);
2642 			} else {
2643 				if (!rdev_set_badblocks(
2644 					    rdev,
2645 					    r10_bio->devs[m].addr,
2646 					    r10_bio->sectors, 0))
2647 					md_error(conf->mddev, rdev);
2648 			}
2649 			rdev = conf->mirrors[dev].replacement;
2650 			if (r10_bio->devs[m].repl_bio == NULL ||
2651 				r10_bio->devs[m].repl_bio->bi_end_io == NULL)
2652 				continue;
2653 
2654 			if (!r10_bio->devs[m].repl_bio->bi_status) {
2655 				rdev_clear_badblocks(
2656 					rdev,
2657 					r10_bio->devs[m].addr,
2658 					r10_bio->sectors, 0);
2659 			} else {
2660 				if (!rdev_set_badblocks(
2661 					    rdev,
2662 					    r10_bio->devs[m].addr,
2663 					    r10_bio->sectors, 0))
2664 					md_error(conf->mddev, rdev);
2665 			}
2666 		}
2667 		put_buf(r10_bio);
2668 	} else {
2669 		bool fail = false;
2670 		for (m = 0; m < conf->copies; m++) {
2671 			int dev = r10_bio->devs[m].devnum;
2672 			struct bio *bio = r10_bio->devs[m].bio;
2673 			rdev = conf->mirrors[dev].rdev;
2674 			if (bio == IO_MADE_GOOD) {
2675 				rdev_clear_badblocks(
2676 					rdev,
2677 					r10_bio->devs[m].addr,
2678 					r10_bio->sectors, 0);
2679 				rdev_dec_pending(rdev, conf->mddev);
2680 			} else if (bio != NULL && bio->bi_status) {
2681 				fail = true;
2682 				if (!narrow_write_error(r10_bio, m)) {
2683 					md_error(conf->mddev, rdev);
2684 					set_bit(R10BIO_Degraded,
2685 						&r10_bio->state);
2686 				}
2687 				rdev_dec_pending(rdev, conf->mddev);
2688 			}
2689 			bio = r10_bio->devs[m].repl_bio;
2690 			rdev = conf->mirrors[dev].replacement;
2691 			if (rdev && bio == IO_MADE_GOOD) {
2692 				rdev_clear_badblocks(
2693 					rdev,
2694 					r10_bio->devs[m].addr,
2695 					r10_bio->sectors, 0);
2696 				rdev_dec_pending(rdev, conf->mddev);
2697 			}
2698 		}
2699 		if (fail) {
2700 			spin_lock_irq(&conf->device_lock);
2701 			list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
2702 			conf->nr_queued++;
2703 			spin_unlock_irq(&conf->device_lock);
2704 			/*
2705 			 * In case freeze_array() is waiting for condition
2706 			 * nr_pending == nr_queued + extra to be true.
2707 			 */
2708 			wake_up(&conf->wait_barrier);
2709 			md_wakeup_thread(conf->mddev->thread);
2710 		} else {
2711 			if (test_bit(R10BIO_WriteError,
2712 				     &r10_bio->state))
2713 				close_write(r10_bio);
2714 			raid_end_bio_io(r10_bio);
2715 		}
2716 	}
2717 }
2718 
2719 static void raid10d(struct md_thread *thread)
2720 {
2721 	struct mddev *mddev = thread->mddev;
2722 	struct r10bio *r10_bio;
2723 	unsigned long flags;
2724 	struct r10conf *conf = mddev->private;
2725 	struct list_head *head = &conf->retry_list;
2726 	struct blk_plug plug;
2727 
2728 	md_check_recovery(mddev);
2729 
2730 	if (!list_empty_careful(&conf->bio_end_io_list) &&
2731 	    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2732 		LIST_HEAD(tmp);
2733 		spin_lock_irqsave(&conf->device_lock, flags);
2734 		if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2735 			while (!list_empty(&conf->bio_end_io_list)) {
2736 				list_move(conf->bio_end_io_list.prev, &tmp);
2737 				conf->nr_queued--;
2738 			}
2739 		}
2740 		spin_unlock_irqrestore(&conf->device_lock, flags);
2741 		while (!list_empty(&tmp)) {
2742 			r10_bio = list_first_entry(&tmp, struct r10bio,
2743 						   retry_list);
2744 			list_del(&r10_bio->retry_list);
2745 			if (mddev->degraded)
2746 				set_bit(R10BIO_Degraded, &r10_bio->state);
2747 
2748 			if (test_bit(R10BIO_WriteError,
2749 				     &r10_bio->state))
2750 				close_write(r10_bio);
2751 			raid_end_bio_io(r10_bio);
2752 		}
2753 	}
2754 
2755 	blk_start_plug(&plug);
2756 	for (;;) {
2757 
2758 		flush_pending_writes(conf);
2759 
2760 		spin_lock_irqsave(&conf->device_lock, flags);
2761 		if (list_empty(head)) {
2762 			spin_unlock_irqrestore(&conf->device_lock, flags);
2763 			break;
2764 		}
2765 		r10_bio = list_entry(head->prev, struct r10bio, retry_list);
2766 		list_del(head->prev);
2767 		conf->nr_queued--;
2768 		spin_unlock_irqrestore(&conf->device_lock, flags);
2769 
2770 		mddev = r10_bio->mddev;
2771 		conf = mddev->private;
2772 		if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2773 		    test_bit(R10BIO_WriteError, &r10_bio->state))
2774 			handle_write_completed(conf, r10_bio);
2775 		else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
2776 			reshape_request_write(mddev, r10_bio);
2777 		else if (test_bit(R10BIO_IsSync, &r10_bio->state))
2778 			sync_request_write(mddev, r10_bio);
2779 		else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
2780 			recovery_request_write(mddev, r10_bio);
2781 		else if (test_bit(R10BIO_ReadError, &r10_bio->state))
2782 			handle_read_error(mddev, r10_bio);
2783 		else
2784 			WARN_ON_ONCE(1);
2785 
2786 		cond_resched();
2787 		if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2788 			md_check_recovery(mddev);
2789 	}
2790 	blk_finish_plug(&plug);
2791 }
2792 
2793 static int init_resync(struct r10conf *conf)
2794 {
2795 	int ret, buffs, i;
2796 
2797 	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2798 	BUG_ON(mempool_initialized(&conf->r10buf_pool));
2799 	conf->have_replacement = 0;
2800 	for (i = 0; i < conf->geo.raid_disks; i++)
2801 		if (conf->mirrors[i].replacement)
2802 			conf->have_replacement = 1;
2803 	ret = mempool_init(&conf->r10buf_pool, buffs,
2804 			   r10buf_pool_alloc, r10buf_pool_free, conf);
2805 	if (ret)
2806 		return ret;
2807 	conf->next_resync = 0;
2808 	return 0;
2809 }
2810 
2811 static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
2812 {
2813 	struct r10bio *r10bio = mempool_alloc(&conf->r10buf_pool, GFP_NOIO);
2814 	struct rsync_pages *rp;
2815 	struct bio *bio;
2816 	int nalloc;
2817 	int i;
2818 
2819 	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
2820 	    test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
2821 		nalloc = conf->copies; /* resync */
2822 	else
2823 		nalloc = 2; /* recovery */
2824 
2825 	for (i = 0; i < nalloc; i++) {
2826 		bio = r10bio->devs[i].bio;
2827 		rp = bio->bi_private;
2828 		bio_reset(bio);
2829 		bio->bi_private = rp;
2830 		bio = r10bio->devs[i].repl_bio;
2831 		if (bio) {
2832 			rp = bio->bi_private;
2833 			bio_reset(bio);
2834 			bio->bi_private = rp;
2835 		}
2836 	}
2837 	return r10bio;
2838 }
2839 
2840 /*
2841  * Set cluster_sync_high since we need other nodes to add the
2842  * range [cluster_sync_low, cluster_sync_high] to suspend list.
2843  */
2844 static void raid10_set_cluster_sync_high(struct r10conf *conf)
2845 {
2846 	sector_t window_size;
2847 	int extra_chunk, chunks;
2848 
2849 	/*
2850 	 * First, here we define "stripe" as a unit which across
2851 	 * all member devices one time, so we get chunks by use
2852 	 * raid_disks / near_copies. Otherwise, if near_copies is
2853 	 * close to raid_disks, then resync window could increases
2854 	 * linearly with the increase of raid_disks, which means
2855 	 * we will suspend a really large IO window while it is not
2856 	 * necessary. If raid_disks is not divisible by near_copies,
2857 	 * an extra chunk is needed to ensure the whole "stripe" is
2858 	 * covered.
2859 	 */
2860 
2861 	chunks = conf->geo.raid_disks / conf->geo.near_copies;
2862 	if (conf->geo.raid_disks % conf->geo.near_copies == 0)
2863 		extra_chunk = 0;
2864 	else
2865 		extra_chunk = 1;
2866 	window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors;
2867 
2868 	/*
2869 	 * At least use a 32M window to align with raid1's resync window
2870 	 */
2871 	window_size = (CLUSTER_RESYNC_WINDOW_SECTORS > window_size) ?
2872 			CLUSTER_RESYNC_WINDOW_SECTORS : window_size;
2873 
2874 	conf->cluster_sync_high = conf->cluster_sync_low + window_size;
2875 }
2876 
2877 /*
2878  * perform a "sync" on one "block"
2879  *
2880  * We need to make sure that no normal I/O request - particularly write
2881  * requests - conflict with active sync requests.
2882  *
2883  * This is achieved by tracking pending requests and a 'barrier' concept
2884  * that can be installed to exclude normal IO requests.
2885  *
2886  * Resync and recovery are handled very differently.
2887  * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
2888  *
2889  * For resync, we iterate over virtual addresses, read all copies,
2890  * and update if there are differences.  If only one copy is live,
2891  * skip it.
2892  * For recovery, we iterate over physical addresses, read a good
2893  * value for each non-in_sync drive, and over-write.
2894  *
2895  * So, for recovery we may have several outstanding complex requests for a
2896  * given address, one for each out-of-sync device.  We model this by allocating
2897  * a number of r10_bio structures, one for each out-of-sync device.
2898  * As we setup these structures, we collect all bio's together into a list
2899  * which we then process collectively to add pages, and then process again
2900  * to pass to generic_make_request.
2901  *
2902  * The r10_bio structures are linked using a borrowed master_bio pointer.
2903  * This link is counted in ->remaining.  When the r10_bio that points to NULL
2904  * has its remaining count decremented to 0, the whole complex operation
2905  * is complete.
2906  *
2907  */
2908 
2909 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
2910 			     int *skipped)
2911 {
2912 	struct r10conf *conf = mddev->private;
2913 	struct r10bio *r10_bio;
2914 	struct bio *biolist = NULL, *bio;
2915 	sector_t max_sector, nr_sectors;
2916 	int i;
2917 	int max_sync;
2918 	sector_t sync_blocks;
2919 	sector_t sectors_skipped = 0;
2920 	int chunks_skipped = 0;
2921 	sector_t chunk_mask = conf->geo.chunk_mask;
2922 	int page_idx = 0;
2923 
2924 	if (!mempool_initialized(&conf->r10buf_pool))
2925 		if (init_resync(conf))
2926 			return 0;
2927 
2928 	/*
2929 	 * Allow skipping a full rebuild for incremental assembly
2930 	 * of a clean array, like RAID1 does.
2931 	 */
2932 	if (mddev->bitmap == NULL &&
2933 	    mddev->recovery_cp == MaxSector &&
2934 	    mddev->reshape_position == MaxSector &&
2935 	    !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2936 	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2937 	    !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
2938 	    conf->fullsync == 0) {
2939 		*skipped = 1;
2940 		return mddev->dev_sectors - sector_nr;
2941 	}
2942 
2943  skipped:
2944 	max_sector = mddev->dev_sectors;
2945 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
2946 	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2947 		max_sector = mddev->resync_max_sectors;
2948 	if (sector_nr >= max_sector) {
2949 		conf->cluster_sync_low = 0;
2950 		conf->cluster_sync_high = 0;
2951 
2952 		/* If we aborted, we need to abort the
2953 		 * sync on the 'current' bitmap chucks (there can
2954 		 * be several when recovering multiple devices).
2955 		 * as we may have started syncing it but not finished.
2956 		 * We can find the current address in
2957 		 * mddev->curr_resync, but for recovery,
2958 		 * we need to convert that to several
2959 		 * virtual addresses.
2960 		 */
2961 		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
2962 			end_reshape(conf);
2963 			close_sync(conf);
2964 			return 0;
2965 		}
2966 
2967 		if (mddev->curr_resync < max_sector) { /* aborted */
2968 			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2969 				md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2970 						   &sync_blocks, 1);
2971 			else for (i = 0; i < conf->geo.raid_disks; i++) {
2972 				sector_t sect =
2973 					raid10_find_virt(conf, mddev->curr_resync, i);
2974 				md_bitmap_end_sync(mddev->bitmap, sect,
2975 						   &sync_blocks, 1);
2976 			}
2977 		} else {
2978 			/* completed sync */
2979 			if ((!mddev->bitmap || conf->fullsync)
2980 			    && conf->have_replacement
2981 			    && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2982 				/* Completed a full sync so the replacements
2983 				 * are now fully recovered.
2984 				 */
2985 				rcu_read_lock();
2986 				for (i = 0; i < conf->geo.raid_disks; i++) {
2987 					struct md_rdev *rdev =
2988 						rcu_dereference(conf->mirrors[i].replacement);
2989 					if (rdev)
2990 						rdev->recovery_offset = MaxSector;
2991 				}
2992 				rcu_read_unlock();
2993 			}
2994 			conf->fullsync = 0;
2995 		}
2996 		md_bitmap_close_sync(mddev->bitmap);
2997 		close_sync(conf);
2998 		*skipped = 1;
2999 		return sectors_skipped;
3000 	}
3001 
3002 	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3003 		return reshape_request(mddev, sector_nr, skipped);
3004 
3005 	if (chunks_skipped >= conf->geo.raid_disks) {
3006 		/* if there has been nothing to do on any drive,
3007 		 * then there is nothing to do at all..
3008 		 */
3009 		*skipped = 1;
3010 		return (max_sector - sector_nr) + sectors_skipped;
3011 	}
3012 
3013 	if (max_sector > mddev->resync_max)
3014 		max_sector = mddev->resync_max; /* Don't do IO beyond here */
3015 
3016 	/* make sure whole request will fit in a chunk - if chunks
3017 	 * are meaningful
3018 	 */
3019 	if (conf->geo.near_copies < conf->geo.raid_disks &&
3020 	    max_sector > (sector_nr | chunk_mask))
3021 		max_sector = (sector_nr | chunk_mask) + 1;
3022 
3023 	/*
3024 	 * If there is non-resync activity waiting for a turn, then let it
3025 	 * though before starting on this new sync request.
3026 	 */
3027 	if (conf->nr_waiting)
3028 		schedule_timeout_uninterruptible(1);
3029 
3030 	/* Again, very different code for resync and recovery.
3031 	 * Both must result in an r10bio with a list of bios that
3032 	 * have bi_end_io, bi_sector, bi_disk set,
3033 	 * and bi_private set to the r10bio.
3034 	 * For recovery, we may actually create several r10bios
3035 	 * with 2 bios in each, that correspond to the bios in the main one.
3036 	 * In this case, the subordinate r10bios link back through a
3037 	 * borrowed master_bio pointer, and the counter in the master
3038 	 * includes a ref from each subordinate.
3039 	 */
3040 	/* First, we decide what to do and set ->bi_end_io
3041 	 * To end_sync_read if we want to read, and
3042 	 * end_sync_write if we will want to write.
3043 	 */
3044 
3045 	max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
3046 	if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3047 		/* recovery... the complicated one */
3048 		int j;
3049 		r10_bio = NULL;
3050 
3051 		for (i = 0 ; i < conf->geo.raid_disks; i++) {
3052 			int still_degraded;
3053 			struct r10bio *rb2;
3054 			sector_t sect;
3055 			int must_sync;
3056 			int any_working;
3057 			int need_recover = 0;
3058 			int need_replace = 0;
3059 			struct raid10_info *mirror = &conf->mirrors[i];
3060 			struct md_rdev *mrdev, *mreplace;
3061 
3062 			rcu_read_lock();
3063 			mrdev = rcu_dereference(mirror->rdev);
3064 			mreplace = rcu_dereference(mirror->replacement);
3065 
3066 			if (mrdev != NULL &&
3067 			    !test_bit(Faulty, &mrdev->flags) &&
3068 			    !test_bit(In_sync, &mrdev->flags))
3069 				need_recover = 1;
3070 			if (mreplace != NULL &&
3071 			    !test_bit(Faulty, &mreplace->flags))
3072 				need_replace = 1;
3073 
3074 			if (!need_recover && !need_replace) {
3075 				rcu_read_unlock();
3076 				continue;
3077 			}
3078 
3079 			still_degraded = 0;
3080 			/* want to reconstruct this device */
3081 			rb2 = r10_bio;
3082 			sect = raid10_find_virt(conf, sector_nr, i);
3083 			if (sect >= mddev->resync_max_sectors) {
3084 				/* last stripe is not complete - don't
3085 				 * try to recover this sector.
3086 				 */
3087 				rcu_read_unlock();
3088 				continue;
3089 			}
3090 			if (mreplace && test_bit(Faulty, &mreplace->flags))
3091 				mreplace = NULL;
3092 			/* Unless we are doing a full sync, or a replacement
3093 			 * we only need to recover the block if it is set in
3094 			 * the bitmap
3095 			 */
3096 			must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
3097 							 &sync_blocks, 1);
3098 			if (sync_blocks < max_sync)
3099 				max_sync = sync_blocks;
3100 			if (!must_sync &&
3101 			    mreplace == NULL &&
3102 			    !conf->fullsync) {
3103 				/* yep, skip the sync_blocks here, but don't assume
3104 				 * that there will never be anything to do here
3105 				 */
3106 				chunks_skipped = -1;
3107 				rcu_read_unlock();
3108 				continue;
3109 			}
3110 			atomic_inc(&mrdev->nr_pending);
3111 			if (mreplace)
3112 				atomic_inc(&mreplace->nr_pending);
3113 			rcu_read_unlock();
3114 
3115 			r10_bio = raid10_alloc_init_r10buf(conf);
3116 			r10_bio->state = 0;
3117 			raise_barrier(conf, rb2 != NULL);
3118 			atomic_set(&r10_bio->remaining, 0);
3119 
3120 			r10_bio->master_bio = (struct bio*)rb2;
3121 			if (rb2)
3122 				atomic_inc(&rb2->remaining);
3123 			r10_bio->mddev = mddev;
3124 			set_bit(R10BIO_IsRecover, &r10_bio->state);
3125 			r10_bio->sector = sect;
3126 
3127 			raid10_find_phys(conf, r10_bio);
3128 
3129 			/* Need to check if the array will still be
3130 			 * degraded
3131 			 */
3132 			rcu_read_lock();
3133 			for (j = 0; j < conf->geo.raid_disks; j++) {
3134 				struct md_rdev *rdev = rcu_dereference(
3135 					conf->mirrors[j].rdev);
3136 				if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3137 					still_degraded = 1;
3138 					break;
3139 				}
3140 			}
3141 
3142 			must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
3143 							 &sync_blocks, still_degraded);
3144 
3145 			any_working = 0;
3146 			for (j=0; j<conf->copies;j++) {
3147 				int k;
3148 				int d = r10_bio->devs[j].devnum;
3149 				sector_t from_addr, to_addr;
3150 				struct md_rdev *rdev =
3151 					rcu_dereference(conf->mirrors[d].rdev);
3152 				sector_t sector, first_bad;
3153 				int bad_sectors;
3154 				if (!rdev ||
3155 				    !test_bit(In_sync, &rdev->flags))
3156 					continue;
3157 				/* This is where we read from */
3158 				any_working = 1;
3159 				sector = r10_bio->devs[j].addr;
3160 
3161 				if (is_badblock(rdev, sector, max_sync,
3162 						&first_bad, &bad_sectors)) {
3163 					if (first_bad > sector)
3164 						max_sync = first_bad - sector;
3165 					else {
3166 						bad_sectors -= (sector
3167 								- first_bad);
3168 						if (max_sync > bad_sectors)
3169 							max_sync = bad_sectors;
3170 						continue;
3171 					}
3172 				}
3173 				bio = r10_bio->devs[0].bio;
3174 				bio->bi_next = biolist;
3175 				biolist = bio;
3176 				bio->bi_end_io = end_sync_read;
3177 				bio_set_op_attrs(bio, REQ_OP_READ, 0);
3178 				if (test_bit(FailFast, &rdev->flags))
3179 					bio->bi_opf |= MD_FAILFAST;
3180 				from_addr = r10_bio->devs[j].addr;
3181 				bio->bi_iter.bi_sector = from_addr +
3182 					rdev->data_offset;
3183 				bio_set_dev(bio, rdev->bdev);
3184 				atomic_inc(&rdev->nr_pending);
3185 				/* and we write to 'i' (if not in_sync) */
3186 
3187 				for (k=0; k<conf->copies; k++)
3188 					if (r10_bio->devs[k].devnum == i)
3189 						break;
3190 				BUG_ON(k == conf->copies);
3191 				to_addr = r10_bio->devs[k].addr;
3192 				r10_bio->devs[0].devnum = d;
3193 				r10_bio->devs[0].addr = from_addr;
3194 				r10_bio->devs[1].devnum = i;
3195 				r10_bio->devs[1].addr = to_addr;
3196 
3197 				if (need_recover) {
3198 					bio = r10_bio->devs[1].bio;
3199 					bio->bi_next = biolist;
3200 					biolist = bio;
3201 					bio->bi_end_io = end_sync_write;
3202 					bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
3203 					bio->bi_iter.bi_sector = to_addr
3204 						+ mrdev->data_offset;
3205 					bio_set_dev(bio, mrdev->bdev);
3206 					atomic_inc(&r10_bio->remaining);
3207 				} else
3208 					r10_bio->devs[1].bio->bi_end_io = NULL;
3209 
3210 				/* and maybe write to replacement */
3211 				bio = r10_bio->devs[1].repl_bio;
3212 				if (bio)
3213 					bio->bi_end_io = NULL;
3214 				/* Note: if need_replace, then bio
3215 				 * cannot be NULL as r10buf_pool_alloc will
3216 				 * have allocated it.
3217 				 */
3218 				if (!need_replace)
3219 					break;
3220 				bio->bi_next = biolist;
3221 				biolist = bio;
3222 				bio->bi_end_io = end_sync_write;
3223 				bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
3224 				bio->bi_iter.bi_sector = to_addr +
3225 					mreplace->data_offset;
3226 				bio_set_dev(bio, mreplace->bdev);
3227 				atomic_inc(&r10_bio->remaining);
3228 				break;
3229 			}
3230 			rcu_read_unlock();
3231 			if (j == conf->copies) {
3232 				/* Cannot recover, so abort the recovery or
3233 				 * record a bad block */
3234 				if (any_working) {
3235 					/* problem is that there are bad blocks
3236 					 * on other device(s)
3237 					 */
3238 					int k;
3239 					for (k = 0; k < conf->copies; k++)
3240 						if (r10_bio->devs[k].devnum == i)
3241 							break;
3242 					if (!test_bit(In_sync,
3243 						      &mrdev->flags)
3244 					    && !rdev_set_badblocks(
3245 						    mrdev,
3246 						    r10_bio->devs[k].addr,
3247 						    max_sync, 0))
3248 						any_working = 0;
3249 					if (mreplace &&
3250 					    !rdev_set_badblocks(
3251 						    mreplace,
3252 						    r10_bio->devs[k].addr,
3253 						    max_sync, 0))
3254 						any_working = 0;
3255 				}
3256 				if (!any_working)  {
3257 					if (!test_and_set_bit(MD_RECOVERY_INTR,
3258 							      &mddev->recovery))
3259 						pr_warn("md/raid10:%s: insufficient working devices for recovery.\n",
3260 						       mdname(mddev));
3261 					mirror->recovery_disabled
3262 						= mddev->recovery_disabled;
3263 				}
3264 				put_buf(r10_bio);
3265 				if (rb2)
3266 					atomic_dec(&rb2->remaining);
3267 				r10_bio = rb2;
3268 				rdev_dec_pending(mrdev, mddev);
3269 				if (mreplace)
3270 					rdev_dec_pending(mreplace, mddev);
3271 				break;
3272 			}
3273 			rdev_dec_pending(mrdev, mddev);
3274 			if (mreplace)
3275 				rdev_dec_pending(mreplace, mddev);
3276 			if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
3277 				/* Only want this if there is elsewhere to
3278 				 * read from. 'j' is currently the first
3279 				 * readable copy.
3280 				 */
3281 				int targets = 1;
3282 				for (; j < conf->copies; j++) {
3283 					int d = r10_bio->devs[j].devnum;
3284 					if (conf->mirrors[d].rdev &&
3285 					    test_bit(In_sync,
3286 						      &conf->mirrors[d].rdev->flags))
3287 						targets++;
3288 				}
3289 				if (targets == 1)
3290 					r10_bio->devs[0].bio->bi_opf
3291 						&= ~MD_FAILFAST;
3292 			}
3293 		}
3294 		if (biolist == NULL) {
3295 			while (r10_bio) {
3296 				struct r10bio *rb2 = r10_bio;
3297 				r10_bio = (struct r10bio*) rb2->master_bio;
3298 				rb2->master_bio = NULL;
3299 				put_buf(rb2);
3300 			}
3301 			goto giveup;
3302 		}
3303 	} else {
3304 		/* resync. Schedule a read for every block at this virt offset */
3305 		int count = 0;
3306 
3307 		/*
3308 		 * Since curr_resync_completed could probably not update in
3309 		 * time, and we will set cluster_sync_low based on it.
3310 		 * Let's check against "sector_nr + 2 * RESYNC_SECTORS" for
3311 		 * safety reason, which ensures curr_resync_completed is
3312 		 * updated in bitmap_cond_end_sync.
3313 		 */
3314 		md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
3315 					mddev_is_clustered(mddev) &&
3316 					(sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
3317 
3318 		if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
3319 					  &sync_blocks, mddev->degraded) &&
3320 		    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3321 						 &mddev->recovery)) {
3322 			/* We can skip this block */
3323 			*skipped = 1;
3324 			return sync_blocks + sectors_skipped;
3325 		}
3326 		if (sync_blocks < max_sync)
3327 			max_sync = sync_blocks;
3328 		r10_bio = raid10_alloc_init_r10buf(conf);
3329 		r10_bio->state = 0;
3330 
3331 		r10_bio->mddev = mddev;
3332 		atomic_set(&r10_bio->remaining, 0);
3333 		raise_barrier(conf, 0);
3334 		conf->next_resync = sector_nr;
3335 
3336 		r10_bio->master_bio = NULL;
3337 		r10_bio->sector = sector_nr;
3338 		set_bit(R10BIO_IsSync, &r10_bio->state);
3339 		raid10_find_phys(conf, r10_bio);
3340 		r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
3341 
3342 		for (i = 0; i < conf->copies; i++) {
3343 			int d = r10_bio->devs[i].devnum;
3344 			sector_t first_bad, sector;
3345 			int bad_sectors;
3346 			struct md_rdev *rdev;
3347 
3348 			if (r10_bio->devs[i].repl_bio)
3349 				r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3350 
3351 			bio = r10_bio->devs[i].bio;
3352 			bio->bi_status = BLK_STS_IOERR;
3353 			rcu_read_lock();
3354 			rdev = rcu_dereference(conf->mirrors[d].rdev);
3355 			if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3356 				rcu_read_unlock();
3357 				continue;
3358 			}
3359 			sector = r10_bio->devs[i].addr;
3360 			if (is_badblock(rdev, sector, max_sync,
3361 					&first_bad, &bad_sectors)) {
3362 				if (first_bad > sector)
3363 					max_sync = first_bad - sector;
3364 				else {
3365 					bad_sectors -= (sector - first_bad);
3366 					if (max_sync > bad_sectors)
3367 						max_sync = bad_sectors;
3368 					rcu_read_unlock();
3369 					continue;
3370 				}
3371 			}
3372 			atomic_inc(&rdev->nr_pending);
3373 			atomic_inc(&r10_bio->remaining);
3374 			bio->bi_next = biolist;
3375 			biolist = bio;
3376 			bio->bi_end_io = end_sync_read;
3377 			bio_set_op_attrs(bio, REQ_OP_READ, 0);
3378 			if (test_bit(FailFast, &rdev->flags))
3379 				bio->bi_opf |= MD_FAILFAST;
3380 			bio->bi_iter.bi_sector = sector + rdev->data_offset;
3381 			bio_set_dev(bio, rdev->bdev);
3382 			count++;
3383 
3384 			rdev = rcu_dereference(conf->mirrors[d].replacement);
3385 			if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3386 				rcu_read_unlock();
3387 				continue;
3388 			}
3389 			atomic_inc(&rdev->nr_pending);
3390 
3391 			/* Need to set up for writing to the replacement */
3392 			bio = r10_bio->devs[i].repl_bio;
3393 			bio->bi_status = BLK_STS_IOERR;
3394 
3395 			sector = r10_bio->devs[i].addr;
3396 			bio->bi_next = biolist;
3397 			biolist = bio;
3398 			bio->bi_end_io = end_sync_write;
3399 			bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
3400 			if (test_bit(FailFast, &rdev->flags))
3401 				bio->bi_opf |= MD_FAILFAST;
3402 			bio->bi_iter.bi_sector = sector + rdev->data_offset;
3403 			bio_set_dev(bio, rdev->bdev);
3404 			count++;
3405 			rcu_read_unlock();
3406 		}
3407 
3408 		if (count < 2) {
3409 			for (i=0; i<conf->copies; i++) {
3410 				int d = r10_bio->devs[i].devnum;
3411 				if (r10_bio->devs[i].bio->bi_end_io)
3412 					rdev_dec_pending(conf->mirrors[d].rdev,
3413 							 mddev);
3414 				if (r10_bio->devs[i].repl_bio &&
3415 				    r10_bio->devs[i].repl_bio->bi_end_io)
3416 					rdev_dec_pending(
3417 						conf->mirrors[d].replacement,
3418 						mddev);
3419 			}
3420 			put_buf(r10_bio);
3421 			biolist = NULL;
3422 			goto giveup;
3423 		}
3424 	}
3425 
3426 	nr_sectors = 0;
3427 	if (sector_nr + max_sync < max_sector)
3428 		max_sector = sector_nr + max_sync;
3429 	do {
3430 		struct page *page;
3431 		int len = PAGE_SIZE;
3432 		if (sector_nr + (len>>9) > max_sector)
3433 			len = (max_sector - sector_nr) << 9;
3434 		if (len == 0)
3435 			break;
3436 		for (bio= biolist ; bio ; bio=bio->bi_next) {
3437 			struct resync_pages *rp = get_resync_pages(bio);
3438 			page = resync_fetch_page(rp, page_idx);
3439 			/*
3440 			 * won't fail because the vec table is big enough
3441 			 * to hold all these pages
3442 			 */
3443 			bio_add_page(bio, page, len, 0);
3444 		}
3445 		nr_sectors += len>>9;
3446 		sector_nr += len>>9;
3447 	} while (++page_idx < RESYNC_PAGES);
3448 	r10_bio->sectors = nr_sectors;
3449 
3450 	if (mddev_is_clustered(mddev) &&
3451 	    test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3452 		/* It is resync not recovery */
3453 		if (conf->cluster_sync_high < sector_nr + nr_sectors) {
3454 			conf->cluster_sync_low = mddev->curr_resync_completed;
3455 			raid10_set_cluster_sync_high(conf);
3456 			/* Send resync message */
3457 			md_cluster_ops->resync_info_update(mddev,
3458 						conf->cluster_sync_low,
3459 						conf->cluster_sync_high);
3460 		}
3461 	} else if (mddev_is_clustered(mddev)) {
3462 		/* This is recovery not resync */
3463 		sector_t sect_va1, sect_va2;
3464 		bool broadcast_msg = false;
3465 
3466 		for (i = 0; i < conf->geo.raid_disks; i++) {
3467 			/*
3468 			 * sector_nr is a device address for recovery, so we
3469 			 * need translate it to array address before compare
3470 			 * with cluster_sync_high.
3471 			 */
3472 			sect_va1 = raid10_find_virt(conf, sector_nr, i);
3473 
3474 			if (conf->cluster_sync_high < sect_va1 + nr_sectors) {
3475 				broadcast_msg = true;
3476 				/*
3477 				 * curr_resync_completed is similar as
3478 				 * sector_nr, so make the translation too.
3479 				 */
3480 				sect_va2 = raid10_find_virt(conf,
3481 					mddev->curr_resync_completed, i);
3482 
3483 				if (conf->cluster_sync_low == 0 ||
3484 				    conf->cluster_sync_low > sect_va2)
3485 					conf->cluster_sync_low = sect_va2;
3486 			}
3487 		}
3488 		if (broadcast_msg) {
3489 			raid10_set_cluster_sync_high(conf);
3490 			md_cluster_ops->resync_info_update(mddev,
3491 						conf->cluster_sync_low,
3492 						conf->cluster_sync_high);
3493 		}
3494 	}
3495 
3496 	while (biolist) {
3497 		bio = biolist;
3498 		biolist = biolist->bi_next;
3499 
3500 		bio->bi_next = NULL;
3501 		r10_bio = get_resync_r10bio(bio);
3502 		r10_bio->sectors = nr_sectors;
3503 
3504 		if (bio->bi_end_io == end_sync_read) {
3505 			md_sync_acct_bio(bio, nr_sectors);
3506 			bio->bi_status = 0;
3507 			generic_make_request(bio);
3508 		}
3509 	}
3510 
3511 	if (sectors_skipped)
3512 		/* pretend they weren't skipped, it makes
3513 		 * no important difference in this case
3514 		 */
3515 		md_done_sync(mddev, sectors_skipped, 1);
3516 
3517 	return sectors_skipped + nr_sectors;
3518  giveup:
3519 	/* There is nowhere to write, so all non-sync
3520 	 * drives must be failed or in resync, all drives
3521 	 * have a bad block, so try the next chunk...
3522 	 */
3523 	if (sector_nr + max_sync < max_sector)
3524 		max_sector = sector_nr + max_sync;
3525 
3526 	sectors_skipped += (max_sector - sector_nr);
3527 	chunks_skipped ++;
3528 	sector_nr = max_sector;
3529 	goto skipped;
3530 }
3531 
3532 static sector_t
3533 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3534 {
3535 	sector_t size;
3536 	struct r10conf *conf = mddev->private;
3537 
3538 	if (!raid_disks)
3539 		raid_disks = min(conf->geo.raid_disks,
3540 				 conf->prev.raid_disks);
3541 	if (!sectors)
3542 		sectors = conf->dev_sectors;
3543 
3544 	size = sectors >> conf->geo.chunk_shift;
3545 	sector_div(size, conf->geo.far_copies);
3546 	size = size * raid_disks;
3547 	sector_div(size, conf->geo.near_copies);
3548 
3549 	return size << conf->geo.chunk_shift;
3550 }
3551 
3552 static void calc_sectors(struct r10conf *conf, sector_t size)
3553 {
3554 	/* Calculate the number of sectors-per-device that will
3555 	 * actually be used, and set conf->dev_sectors and
3556 	 * conf->stride
3557 	 */
3558 
3559 	size = size >> conf->geo.chunk_shift;
3560 	sector_div(size, conf->geo.far_copies);
3561 	size = size * conf->geo.raid_disks;
3562 	sector_div(size, conf->geo.near_copies);
3563 	/* 'size' is now the number of chunks in the array */
3564 	/* calculate "used chunks per device" */
3565 	size = size * conf->copies;
3566 
3567 	/* We need to round up when dividing by raid_disks to
3568 	 * get the stride size.
3569 	 */
3570 	size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
3571 
3572 	conf->dev_sectors = size << conf->geo.chunk_shift;
3573 
3574 	if (conf->geo.far_offset)
3575 		conf->geo.stride = 1 << conf->geo.chunk_shift;
3576 	else {
3577 		sector_div(size, conf->geo.far_copies);
3578 		conf->geo.stride = size << conf->geo.chunk_shift;
3579 	}
3580 }
3581 
3582 enum geo_type {geo_new, geo_old, geo_start};
3583 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3584 {
3585 	int nc, fc, fo;
3586 	int layout, chunk, disks;
3587 	switch (new) {
3588 	case geo_old:
3589 		layout = mddev->layout;
3590 		chunk = mddev->chunk_sectors;
3591 		disks = mddev->raid_disks - mddev->delta_disks;
3592 		break;
3593 	case geo_new:
3594 		layout = mddev->new_layout;
3595 		chunk = mddev->new_chunk_sectors;
3596 		disks = mddev->raid_disks;
3597 		break;
3598 	default: /* avoid 'may be unused' warnings */
3599 	case geo_start: /* new when starting reshape - raid_disks not
3600 			 * updated yet. */
3601 		layout = mddev->new_layout;
3602 		chunk = mddev->new_chunk_sectors;
3603 		disks = mddev->raid_disks + mddev->delta_disks;
3604 		break;
3605 	}
3606 	if (layout >> 19)
3607 		return -1;
3608 	if (chunk < (PAGE_SIZE >> 9) ||
3609 	    !is_power_of_2(chunk))
3610 		return -2;
3611 	nc = layout & 255;
3612 	fc = (layout >> 8) & 255;
3613 	fo = layout & (1<<16);
3614 	geo->raid_disks = disks;
3615 	geo->near_copies = nc;
3616 	geo->far_copies = fc;
3617 	geo->far_offset = fo;
3618 	switch (layout >> 17) {
3619 	case 0:	/* original layout.  simple but not always optimal */
3620 		geo->far_set_size = disks;
3621 		break;
3622 	case 1: /* "improved" layout which was buggy.  Hopefully no-one is
3623 		 * actually using this, but leave code here just in case.*/
3624 		geo->far_set_size = disks/fc;
3625 		WARN(geo->far_set_size < fc,
3626 		     "This RAID10 layout does not provide data safety - please backup and create new array\n");
3627 		break;
3628 	case 2: /* "improved" layout fixed to match documentation */
3629 		geo->far_set_size = fc * nc;
3630 		break;
3631 	default: /* Not a valid layout */
3632 		return -1;
3633 	}
3634 	geo->chunk_mask = chunk - 1;
3635 	geo->chunk_shift = ffz(~chunk);
3636 	return nc*fc;
3637 }
3638 
3639 static struct r10conf *setup_conf(struct mddev *mddev)
3640 {
3641 	struct r10conf *conf = NULL;
3642 	int err = -EINVAL;
3643 	struct geom geo;
3644 	int copies;
3645 
3646 	copies = setup_geo(&geo, mddev, geo_new);
3647 
3648 	if (copies == -2) {
3649 		pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n",
3650 			mdname(mddev), PAGE_SIZE);
3651 		goto out;
3652 	}
3653 
3654 	if (copies < 2 || copies > mddev->raid_disks) {
3655 		pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
3656 			mdname(mddev), mddev->new_layout);
3657 		goto out;
3658 	}
3659 
3660 	err = -ENOMEM;
3661 	conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
3662 	if (!conf)
3663 		goto out;
3664 
3665 	/* FIXME calc properly */
3666 	conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks),
3667 				sizeof(struct raid10_info),
3668 				GFP_KERNEL);
3669 	if (!conf->mirrors)
3670 		goto out;
3671 
3672 	conf->tmppage = alloc_page(GFP_KERNEL);
3673 	if (!conf->tmppage)
3674 		goto out;
3675 
3676 	conf->geo = geo;
3677 	conf->copies = copies;
3678 	err = mempool_init(&conf->r10bio_pool, NR_RAID10_BIOS, r10bio_pool_alloc,
3679 			   r10bio_pool_free, conf);
3680 	if (err)
3681 		goto out;
3682 
3683 	err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
3684 	if (err)
3685 		goto out;
3686 
3687 	calc_sectors(conf, mddev->dev_sectors);
3688 	if (mddev->reshape_position == MaxSector) {
3689 		conf->prev = conf->geo;
3690 		conf->reshape_progress = MaxSector;
3691 	} else {
3692 		if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
3693 			err = -EINVAL;
3694 			goto out;
3695 		}
3696 		conf->reshape_progress = mddev->reshape_position;
3697 		if (conf->prev.far_offset)
3698 			conf->prev.stride = 1 << conf->prev.chunk_shift;
3699 		else
3700 			/* far_copies must be 1 */
3701 			conf->prev.stride = conf->dev_sectors;
3702 	}
3703 	conf->reshape_safe = conf->reshape_progress;
3704 	spin_lock_init(&conf->device_lock);
3705 	INIT_LIST_HEAD(&conf->retry_list);
3706 	INIT_LIST_HEAD(&conf->bio_end_io_list);
3707 
3708 	spin_lock_init(&conf->resync_lock);
3709 	init_waitqueue_head(&conf->wait_barrier);
3710 	atomic_set(&conf->nr_pending, 0);
3711 
3712 	err = -ENOMEM;
3713 	conf->thread = md_register_thread(raid10d, mddev, "raid10");
3714 	if (!conf->thread)
3715 		goto out;
3716 
3717 	conf->mddev = mddev;
3718 	return conf;
3719 
3720  out:
3721 	if (conf) {
3722 		mempool_exit(&conf->r10bio_pool);
3723 		kfree(conf->mirrors);
3724 		safe_put_page(conf->tmppage);
3725 		bioset_exit(&conf->bio_split);
3726 		kfree(conf);
3727 	}
3728 	return ERR_PTR(err);
3729 }
3730 
3731 static int raid10_run(struct mddev *mddev)
3732 {
3733 	struct r10conf *conf;
3734 	int i, disk_idx, chunk_size;
3735 	struct raid10_info *disk;
3736 	struct md_rdev *rdev;
3737 	sector_t size;
3738 	sector_t min_offset_diff = 0;
3739 	int first = 1;
3740 	bool discard_supported = false;
3741 
3742 	if (mddev_init_writes_pending(mddev) < 0)
3743 		return -ENOMEM;
3744 
3745 	if (mddev->private == NULL) {
3746 		conf = setup_conf(mddev);
3747 		if (IS_ERR(conf))
3748 			return PTR_ERR(conf);
3749 		mddev->private = conf;
3750 	}
3751 	conf = mddev->private;
3752 	if (!conf)
3753 		goto out;
3754 
3755 	if (mddev_is_clustered(conf->mddev)) {
3756 		int fc, fo;
3757 
3758 		fc = (mddev->layout >> 8) & 255;
3759 		fo = mddev->layout & (1<<16);
3760 		if (fc > 1 || fo > 0) {
3761 			pr_err("only near layout is supported by clustered"
3762 				" raid10\n");
3763 			goto out_free_conf;
3764 		}
3765 	}
3766 
3767 	mddev->thread = conf->thread;
3768 	conf->thread = NULL;
3769 
3770 	chunk_size = mddev->chunk_sectors << 9;
3771 	if (mddev->queue) {
3772 		blk_queue_max_discard_sectors(mddev->queue,
3773 					      mddev->chunk_sectors);
3774 		blk_queue_max_write_same_sectors(mddev->queue, 0);
3775 		blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
3776 		blk_queue_io_min(mddev->queue, chunk_size);
3777 		if (conf->geo.raid_disks % conf->geo.near_copies)
3778 			blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
3779 		else
3780 			blk_queue_io_opt(mddev->queue, chunk_size *
3781 					 (conf->geo.raid_disks / conf->geo.near_copies));
3782 	}
3783 
3784 	rdev_for_each(rdev, mddev) {
3785 		long long diff;
3786 
3787 		disk_idx = rdev->raid_disk;
3788 		if (disk_idx < 0)
3789 			continue;
3790 		if (disk_idx >= conf->geo.raid_disks &&
3791 		    disk_idx >= conf->prev.raid_disks)
3792 			continue;
3793 		disk = conf->mirrors + disk_idx;
3794 
3795 		if (test_bit(Replacement, &rdev->flags)) {
3796 			if (disk->replacement)
3797 				goto out_free_conf;
3798 			disk->replacement = rdev;
3799 		} else {
3800 			if (disk->rdev)
3801 				goto out_free_conf;
3802 			disk->rdev = rdev;
3803 		}
3804 		diff = (rdev->new_data_offset - rdev->data_offset);
3805 		if (!mddev->reshape_backwards)
3806 			diff = -diff;
3807 		if (diff < 0)
3808 			diff = 0;
3809 		if (first || diff < min_offset_diff)
3810 			min_offset_diff = diff;
3811 
3812 		if (mddev->gendisk)
3813 			disk_stack_limits(mddev->gendisk, rdev->bdev,
3814 					  rdev->data_offset << 9);
3815 
3816 		disk->head_position = 0;
3817 
3818 		if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3819 			discard_supported = true;
3820 		first = 0;
3821 	}
3822 
3823 	if (mddev->queue) {
3824 		if (discard_supported)
3825 			blk_queue_flag_set(QUEUE_FLAG_DISCARD,
3826 						mddev->queue);
3827 		else
3828 			blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
3829 						  mddev->queue);
3830 	}
3831 	/* need to check that every block has at least one working mirror */
3832 	if (!enough(conf, -1)) {
3833 		pr_err("md/raid10:%s: not enough operational mirrors.\n",
3834 		       mdname(mddev));
3835 		goto out_free_conf;
3836 	}
3837 
3838 	if (conf->reshape_progress != MaxSector) {
3839 		/* must ensure that shape change is supported */
3840 		if (conf->geo.far_copies != 1 &&
3841 		    conf->geo.far_offset == 0)
3842 			goto out_free_conf;
3843 		if (conf->prev.far_copies != 1 &&
3844 		    conf->prev.far_offset == 0)
3845 			goto out_free_conf;
3846 	}
3847 
3848 	mddev->degraded = 0;
3849 	for (i = 0;
3850 	     i < conf->geo.raid_disks
3851 		     || i < conf->prev.raid_disks;
3852 	     i++) {
3853 
3854 		disk = conf->mirrors + i;
3855 
3856 		if (!disk->rdev && disk->replacement) {
3857 			/* The replacement is all we have - use it */
3858 			disk->rdev = disk->replacement;
3859 			disk->replacement = NULL;
3860 			clear_bit(Replacement, &disk->rdev->flags);
3861 		}
3862 
3863 		if (!disk->rdev ||
3864 		    !test_bit(In_sync, &disk->rdev->flags)) {
3865 			disk->head_position = 0;
3866 			mddev->degraded++;
3867 			if (disk->rdev &&
3868 			    disk->rdev->saved_raid_disk < 0)
3869 				conf->fullsync = 1;
3870 		}
3871 
3872 		if (disk->replacement &&
3873 		    !test_bit(In_sync, &disk->replacement->flags) &&
3874 		    disk->replacement->saved_raid_disk < 0) {
3875 			conf->fullsync = 1;
3876 		}
3877 
3878 		disk->recovery_disabled = mddev->recovery_disabled - 1;
3879 	}
3880 
3881 	if (mddev->recovery_cp != MaxSector)
3882 		pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
3883 			  mdname(mddev));
3884 	pr_info("md/raid10:%s: active with %d out of %d devices\n",
3885 		mdname(mddev), conf->geo.raid_disks - mddev->degraded,
3886 		conf->geo.raid_disks);
3887 	/*
3888 	 * Ok, everything is just fine now
3889 	 */
3890 	mddev->dev_sectors = conf->dev_sectors;
3891 	size = raid10_size(mddev, 0, 0);
3892 	md_set_array_sectors(mddev, size);
3893 	mddev->resync_max_sectors = size;
3894 	set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3895 
3896 	if (mddev->queue) {
3897 		int stripe = conf->geo.raid_disks *
3898 			((mddev->chunk_sectors << 9) / PAGE_SIZE);
3899 
3900 		/* Calculate max read-ahead size.
3901 		 * We need to readahead at least twice a whole stripe....
3902 		 * maybe...
3903 		 */
3904 		stripe /= conf->geo.near_copies;
3905 		if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
3906 			mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
3907 	}
3908 
3909 	if (md_integrity_register(mddev))
3910 		goto out_free_conf;
3911 
3912 	if (conf->reshape_progress != MaxSector) {
3913 		unsigned long before_length, after_length;
3914 
3915 		before_length = ((1 << conf->prev.chunk_shift) *
3916 				 conf->prev.far_copies);
3917 		after_length = ((1 << conf->geo.chunk_shift) *
3918 				conf->geo.far_copies);
3919 
3920 		if (max(before_length, after_length) > min_offset_diff) {
3921 			/* This cannot work */
3922 			pr_warn("md/raid10: offset difference not enough to continue reshape\n");
3923 			goto out_free_conf;
3924 		}
3925 		conf->offset_diff = min_offset_diff;
3926 
3927 		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3928 		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3929 		set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
3930 		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3931 		mddev->sync_thread = md_register_thread(md_do_sync, mddev,
3932 							"reshape");
3933 		if (!mddev->sync_thread)
3934 			goto out_free_conf;
3935 	}
3936 
3937 	return 0;
3938 
3939 out_free_conf:
3940 	md_unregister_thread(&mddev->thread);
3941 	mempool_exit(&conf->r10bio_pool);
3942 	safe_put_page(conf->tmppage);
3943 	kfree(conf->mirrors);
3944 	kfree(conf);
3945 	mddev->private = NULL;
3946 out:
3947 	return -EIO;
3948 }
3949 
3950 static void raid10_free(struct mddev *mddev, void *priv)
3951 {
3952 	struct r10conf *conf = priv;
3953 
3954 	mempool_exit(&conf->r10bio_pool);
3955 	safe_put_page(conf->tmppage);
3956 	kfree(conf->mirrors);
3957 	kfree(conf->mirrors_old);
3958 	kfree(conf->mirrors_new);
3959 	bioset_exit(&conf->bio_split);
3960 	kfree(conf);
3961 }
3962 
3963 static void raid10_quiesce(struct mddev *mddev, int quiesce)
3964 {
3965 	struct r10conf *conf = mddev->private;
3966 
3967 	if (quiesce)
3968 		raise_barrier(conf, 0);
3969 	else
3970 		lower_barrier(conf);
3971 }
3972 
3973 static int raid10_resize(struct mddev *mddev, sector_t sectors)
3974 {
3975 	/* Resize of 'far' arrays is not supported.
3976 	 * For 'near' and 'offset' arrays we can set the
3977 	 * number of sectors used to be an appropriate multiple
3978 	 * of the chunk size.
3979 	 * For 'offset', this is far_copies*chunksize.
3980 	 * For 'near' the multiplier is the LCM of
3981 	 * near_copies and raid_disks.
3982 	 * So if far_copies > 1 && !far_offset, fail.
3983 	 * Else find LCM(raid_disks, near_copy)*far_copies and
3984 	 * multiply by chunk_size.  Then round to this number.
3985 	 * This is mostly done by raid10_size()
3986 	 */
3987 	struct r10conf *conf = mddev->private;
3988 	sector_t oldsize, size;
3989 
3990 	if (mddev->reshape_position != MaxSector)
3991 		return -EBUSY;
3992 
3993 	if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
3994 		return -EINVAL;
3995 
3996 	oldsize = raid10_size(mddev, 0, 0);
3997 	size = raid10_size(mddev, sectors, 0);
3998 	if (mddev->external_size &&
3999 	    mddev->array_sectors > size)
4000 		return -EINVAL;
4001 	if (mddev->bitmap) {
4002 		int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0);
4003 		if (ret)
4004 			return ret;
4005 	}
4006 	md_set_array_sectors(mddev, size);
4007 	if (sectors > mddev->dev_sectors &&
4008 	    mddev->recovery_cp > oldsize) {
4009 		mddev->recovery_cp = oldsize;
4010 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4011 	}
4012 	calc_sectors(conf, sectors);
4013 	mddev->dev_sectors = conf->dev_sectors;
4014 	mddev->resync_max_sectors = size;
4015 	return 0;
4016 }
4017 
4018 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
4019 {
4020 	struct md_rdev *rdev;
4021 	struct r10conf *conf;
4022 
4023 	if (mddev->degraded > 0) {
4024 		pr_warn("md/raid10:%s: Error: degraded raid0!\n",
4025 			mdname(mddev));
4026 		return ERR_PTR(-EINVAL);
4027 	}
4028 	sector_div(size, devs);
4029 
4030 	/* Set new parameters */
4031 	mddev->new_level = 10;
4032 	/* new layout: far_copies = 1, near_copies = 2 */
4033 	mddev->new_layout = (1<<8) + 2;
4034 	mddev->new_chunk_sectors = mddev->chunk_sectors;
4035 	mddev->delta_disks = mddev->raid_disks;
4036 	mddev->raid_disks *= 2;
4037 	/* make sure it will be not marked as dirty */
4038 	mddev->recovery_cp = MaxSector;
4039 	mddev->dev_sectors = size;
4040 
4041 	conf = setup_conf(mddev);
4042 	if (!IS_ERR(conf)) {
4043 		rdev_for_each(rdev, mddev)
4044 			if (rdev->raid_disk >= 0) {
4045 				rdev->new_raid_disk = rdev->raid_disk * 2;
4046 				rdev->sectors = size;
4047 			}
4048 		conf->barrier = 1;
4049 	}
4050 
4051 	return conf;
4052 }
4053 
4054 static void *raid10_takeover(struct mddev *mddev)
4055 {
4056 	struct r0conf *raid0_conf;
4057 
4058 	/* raid10 can take over:
4059 	 *  raid0 - providing it has only two drives
4060 	 */
4061 	if (mddev->level == 0) {
4062 		/* for raid0 takeover only one zone is supported */
4063 		raid0_conf = mddev->private;
4064 		if (raid0_conf->nr_strip_zones > 1) {
4065 			pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n",
4066 				mdname(mddev));
4067 			return ERR_PTR(-EINVAL);
4068 		}
4069 		return raid10_takeover_raid0(mddev,
4070 			raid0_conf->strip_zone->zone_end,
4071 			raid0_conf->strip_zone->nb_dev);
4072 	}
4073 	return ERR_PTR(-EINVAL);
4074 }
4075 
4076 static int raid10_check_reshape(struct mddev *mddev)
4077 {
4078 	/* Called when there is a request to change
4079 	 * - layout (to ->new_layout)
4080 	 * - chunk size (to ->new_chunk_sectors)
4081 	 * - raid_disks (by delta_disks)
4082 	 * or when trying to restart a reshape that was ongoing.
4083 	 *
4084 	 * We need to validate the request and possibly allocate
4085 	 * space if that might be an issue later.
4086 	 *
4087 	 * Currently we reject any reshape of a 'far' mode array,
4088 	 * allow chunk size to change if new is generally acceptable,
4089 	 * allow raid_disks to increase, and allow
4090 	 * a switch between 'near' mode and 'offset' mode.
4091 	 */
4092 	struct r10conf *conf = mddev->private;
4093 	struct geom geo;
4094 
4095 	if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
4096 		return -EINVAL;
4097 
4098 	if (setup_geo(&geo, mddev, geo_start) != conf->copies)
4099 		/* mustn't change number of copies */
4100 		return -EINVAL;
4101 	if (geo.far_copies > 1 && !geo.far_offset)
4102 		/* Cannot switch to 'far' mode */
4103 		return -EINVAL;
4104 
4105 	if (mddev->array_sectors & geo.chunk_mask)
4106 			/* not factor of array size */
4107 			return -EINVAL;
4108 
4109 	if (!enough(conf, -1))
4110 		return -EINVAL;
4111 
4112 	kfree(conf->mirrors_new);
4113 	conf->mirrors_new = NULL;
4114 	if (mddev->delta_disks > 0) {
4115 		/* allocate new 'mirrors' list */
4116 		conf->mirrors_new =
4117 			kcalloc(mddev->raid_disks + mddev->delta_disks,
4118 				sizeof(struct raid10_info),
4119 				GFP_KERNEL);
4120 		if (!conf->mirrors_new)
4121 			return -ENOMEM;
4122 	}
4123 	return 0;
4124 }
4125 
4126 /*
4127  * Need to check if array has failed when deciding whether to:
4128  *  - start an array
4129  *  - remove non-faulty devices
4130  *  - add a spare
4131  *  - allow a reshape
4132  * This determination is simple when no reshape is happening.
4133  * However if there is a reshape, we need to carefully check
4134  * both the before and after sections.
4135  * This is because some failed devices may only affect one
4136  * of the two sections, and some non-in_sync devices may
4137  * be insync in the section most affected by failed devices.
4138  */
4139 static int calc_degraded(struct r10conf *conf)
4140 {
4141 	int degraded, degraded2;
4142 	int i;
4143 
4144 	rcu_read_lock();
4145 	degraded = 0;
4146 	/* 'prev' section first */
4147 	for (i = 0; i < conf->prev.raid_disks; i++) {
4148 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4149 		if (!rdev || test_bit(Faulty, &rdev->flags))
4150 			degraded++;
4151 		else if (!test_bit(In_sync, &rdev->flags))
4152 			/* When we can reduce the number of devices in
4153 			 * an array, this might not contribute to
4154 			 * 'degraded'.  It does now.
4155 			 */
4156 			degraded++;
4157 	}
4158 	rcu_read_unlock();
4159 	if (conf->geo.raid_disks == conf->prev.raid_disks)
4160 		return degraded;
4161 	rcu_read_lock();
4162 	degraded2 = 0;
4163 	for (i = 0; i < conf->geo.raid_disks; i++) {
4164 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4165 		if (!rdev || test_bit(Faulty, &rdev->flags))
4166 			degraded2++;
4167 		else if (!test_bit(In_sync, &rdev->flags)) {
4168 			/* If reshape is increasing the number of devices,
4169 			 * this section has already been recovered, so
4170 			 * it doesn't contribute to degraded.
4171 			 * else it does.
4172 			 */
4173 			if (conf->geo.raid_disks <= conf->prev.raid_disks)
4174 				degraded2++;
4175 		}
4176 	}
4177 	rcu_read_unlock();
4178 	if (degraded2 > degraded)
4179 		return degraded2;
4180 	return degraded;
4181 }
4182 
4183 static int raid10_start_reshape(struct mddev *mddev)
4184 {
4185 	/* A 'reshape' has been requested. This commits
4186 	 * the various 'new' fields and sets MD_RECOVER_RESHAPE
4187 	 * This also checks if there are enough spares and adds them
4188 	 * to the array.
4189 	 * We currently require enough spares to make the final
4190 	 * array non-degraded.  We also require that the difference
4191 	 * between old and new data_offset - on each device - is
4192 	 * enough that we never risk over-writing.
4193 	 */
4194 
4195 	unsigned long before_length, after_length;
4196 	sector_t min_offset_diff = 0;
4197 	int first = 1;
4198 	struct geom new;
4199 	struct r10conf *conf = mddev->private;
4200 	struct md_rdev *rdev;
4201 	int spares = 0;
4202 	int ret;
4203 
4204 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4205 		return -EBUSY;
4206 
4207 	if (setup_geo(&new, mddev, geo_start) != conf->copies)
4208 		return -EINVAL;
4209 
4210 	before_length = ((1 << conf->prev.chunk_shift) *
4211 			 conf->prev.far_copies);
4212 	after_length = ((1 << conf->geo.chunk_shift) *
4213 			conf->geo.far_copies);
4214 
4215 	rdev_for_each(rdev, mddev) {
4216 		if (!test_bit(In_sync, &rdev->flags)
4217 		    && !test_bit(Faulty, &rdev->flags))
4218 			spares++;
4219 		if (rdev->raid_disk >= 0) {
4220 			long long diff = (rdev->new_data_offset
4221 					  - rdev->data_offset);
4222 			if (!mddev->reshape_backwards)
4223 				diff = -diff;
4224 			if (diff < 0)
4225 				diff = 0;
4226 			if (first || diff < min_offset_diff)
4227 				min_offset_diff = diff;
4228 			first = 0;
4229 		}
4230 	}
4231 
4232 	if (max(before_length, after_length) > min_offset_diff)
4233 		return -EINVAL;
4234 
4235 	if (spares < mddev->delta_disks)
4236 		return -EINVAL;
4237 
4238 	conf->offset_diff = min_offset_diff;
4239 	spin_lock_irq(&conf->device_lock);
4240 	if (conf->mirrors_new) {
4241 		memcpy(conf->mirrors_new, conf->mirrors,
4242 		       sizeof(struct raid10_info)*conf->prev.raid_disks);
4243 		smp_mb();
4244 		kfree(conf->mirrors_old);
4245 		conf->mirrors_old = conf->mirrors;
4246 		conf->mirrors = conf->mirrors_new;
4247 		conf->mirrors_new = NULL;
4248 	}
4249 	setup_geo(&conf->geo, mddev, geo_start);
4250 	smp_mb();
4251 	if (mddev->reshape_backwards) {
4252 		sector_t size = raid10_size(mddev, 0, 0);
4253 		if (size < mddev->array_sectors) {
4254 			spin_unlock_irq(&conf->device_lock);
4255 			pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
4256 				mdname(mddev));
4257 			return -EINVAL;
4258 		}
4259 		mddev->resync_max_sectors = size;
4260 		conf->reshape_progress = size;
4261 	} else
4262 		conf->reshape_progress = 0;
4263 	conf->reshape_safe = conf->reshape_progress;
4264 	spin_unlock_irq(&conf->device_lock);
4265 
4266 	if (mddev->delta_disks && mddev->bitmap) {
4267 		struct mdp_superblock_1 *sb = NULL;
4268 		sector_t oldsize, newsize;
4269 
4270 		oldsize = raid10_size(mddev, 0, 0);
4271 		newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
4272 
4273 		if (!mddev_is_clustered(mddev)) {
4274 			ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
4275 			if (ret)
4276 				goto abort;
4277 			else
4278 				goto out;
4279 		}
4280 
4281 		rdev_for_each(rdev, mddev) {
4282 			if (rdev->raid_disk > -1 &&
4283 			    !test_bit(Faulty, &rdev->flags))
4284 				sb = page_address(rdev->sb_page);
4285 		}
4286 
4287 		/*
4288 		 * some node is already performing reshape, and no need to
4289 		 * call md_bitmap_resize again since it should be called when
4290 		 * receiving BITMAP_RESIZE msg
4291 		 */
4292 		if ((sb && (le32_to_cpu(sb->feature_map) &
4293 			    MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize))
4294 			goto out;
4295 
4296 		ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
4297 		if (ret)
4298 			goto abort;
4299 
4300 		ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
4301 		if (ret) {
4302 			md_bitmap_resize(mddev->bitmap, oldsize, 0, 0);
4303 			goto abort;
4304 		}
4305 	}
4306 out:
4307 	if (mddev->delta_disks > 0) {
4308 		rdev_for_each(rdev, mddev)
4309 			if (rdev->raid_disk < 0 &&
4310 			    !test_bit(Faulty, &rdev->flags)) {
4311 				if (raid10_add_disk(mddev, rdev) == 0) {
4312 					if (rdev->raid_disk >=
4313 					    conf->prev.raid_disks)
4314 						set_bit(In_sync, &rdev->flags);
4315 					else
4316 						rdev->recovery_offset = 0;
4317 
4318 					if (sysfs_link_rdev(mddev, rdev))
4319 						/* Failure here  is OK */;
4320 				}
4321 			} else if (rdev->raid_disk >= conf->prev.raid_disks
4322 				   && !test_bit(Faulty, &rdev->flags)) {
4323 				/* This is a spare that was manually added */
4324 				set_bit(In_sync, &rdev->flags);
4325 			}
4326 	}
4327 	/* When a reshape changes the number of devices,
4328 	 * ->degraded is measured against the larger of the
4329 	 * pre and  post numbers.
4330 	 */
4331 	spin_lock_irq(&conf->device_lock);
4332 	mddev->degraded = calc_degraded(conf);
4333 	spin_unlock_irq(&conf->device_lock);
4334 	mddev->raid_disks = conf->geo.raid_disks;
4335 	mddev->reshape_position = conf->reshape_progress;
4336 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4337 
4338 	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4339 	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4340 	clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
4341 	set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4342 	set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4343 
4344 	mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4345 						"reshape");
4346 	if (!mddev->sync_thread) {
4347 		ret = -EAGAIN;
4348 		goto abort;
4349 	}
4350 	conf->reshape_checkpoint = jiffies;
4351 	md_wakeup_thread(mddev->sync_thread);
4352 	md_new_event(mddev);
4353 	return 0;
4354 
4355 abort:
4356 	mddev->recovery = 0;
4357 	spin_lock_irq(&conf->device_lock);
4358 	conf->geo = conf->prev;
4359 	mddev->raid_disks = conf->geo.raid_disks;
4360 	rdev_for_each(rdev, mddev)
4361 		rdev->new_data_offset = rdev->data_offset;
4362 	smp_wmb();
4363 	conf->reshape_progress = MaxSector;
4364 	conf->reshape_safe = MaxSector;
4365 	mddev->reshape_position = MaxSector;
4366 	spin_unlock_irq(&conf->device_lock);
4367 	return ret;
4368 }
4369 
4370 /* Calculate the last device-address that could contain
4371  * any block from the chunk that includes the array-address 's'
4372  * and report the next address.
4373  * i.e. the address returned will be chunk-aligned and after
4374  * any data that is in the chunk containing 's'.
4375  */
4376 static sector_t last_dev_address(sector_t s, struct geom *geo)
4377 {
4378 	s = (s | geo->chunk_mask) + 1;
4379 	s >>= geo->chunk_shift;
4380 	s *= geo->near_copies;
4381 	s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
4382 	s *= geo->far_copies;
4383 	s <<= geo->chunk_shift;
4384 	return s;
4385 }
4386 
4387 /* Calculate the first device-address that could contain
4388  * any block from the chunk that includes the array-address 's'.
4389  * This too will be the start of a chunk
4390  */
4391 static sector_t first_dev_address(sector_t s, struct geom *geo)
4392 {
4393 	s >>= geo->chunk_shift;
4394 	s *= geo->near_copies;
4395 	sector_div(s, geo->raid_disks);
4396 	s *= geo->far_copies;
4397 	s <<= geo->chunk_shift;
4398 	return s;
4399 }
4400 
4401 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4402 				int *skipped)
4403 {
4404 	/* We simply copy at most one chunk (smallest of old and new)
4405 	 * at a time, possibly less if that exceeds RESYNC_PAGES,
4406 	 * or we hit a bad block or something.
4407 	 * This might mean we pause for normal IO in the middle of
4408 	 * a chunk, but that is not a problem as mddev->reshape_position
4409 	 * can record any location.
4410 	 *
4411 	 * If we will want to write to a location that isn't
4412 	 * yet recorded as 'safe' (i.e. in metadata on disk) then
4413 	 * we need to flush all reshape requests and update the metadata.
4414 	 *
4415 	 * When reshaping forwards (e.g. to more devices), we interpret
4416 	 * 'safe' as the earliest block which might not have been copied
4417 	 * down yet.  We divide this by previous stripe size and multiply
4418 	 * by previous stripe length to get lowest device offset that we
4419 	 * cannot write to yet.
4420 	 * We interpret 'sector_nr' as an address that we want to write to.
4421 	 * From this we use last_device_address() to find where we might
4422 	 * write to, and first_device_address on the  'safe' position.
4423 	 * If this 'next' write position is after the 'safe' position,
4424 	 * we must update the metadata to increase the 'safe' position.
4425 	 *
4426 	 * When reshaping backwards, we round in the opposite direction
4427 	 * and perform the reverse test:  next write position must not be
4428 	 * less than current safe position.
4429 	 *
4430 	 * In all this the minimum difference in data offsets
4431 	 * (conf->offset_diff - always positive) allows a bit of slack,
4432 	 * so next can be after 'safe', but not by more than offset_diff
4433 	 *
4434 	 * We need to prepare all the bios here before we start any IO
4435 	 * to ensure the size we choose is acceptable to all devices.
4436 	 * The means one for each copy for write-out and an extra one for
4437 	 * read-in.
4438 	 * We store the read-in bio in ->master_bio and the others in
4439 	 * ->devs[x].bio and ->devs[x].repl_bio.
4440 	 */
4441 	struct r10conf *conf = mddev->private;
4442 	struct r10bio *r10_bio;
4443 	sector_t next, safe, last;
4444 	int max_sectors;
4445 	int nr_sectors;
4446 	int s;
4447 	struct md_rdev *rdev;
4448 	int need_flush = 0;
4449 	struct bio *blist;
4450 	struct bio *bio, *read_bio;
4451 	int sectors_done = 0;
4452 	struct page **pages;
4453 
4454 	if (sector_nr == 0) {
4455 		/* If restarting in the middle, skip the initial sectors */
4456 		if (mddev->reshape_backwards &&
4457 		    conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4458 			sector_nr = (raid10_size(mddev, 0, 0)
4459 				     - conf->reshape_progress);
4460 		} else if (!mddev->reshape_backwards &&
4461 			   conf->reshape_progress > 0)
4462 			sector_nr = conf->reshape_progress;
4463 		if (sector_nr) {
4464 			mddev->curr_resync_completed = sector_nr;
4465 			sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4466 			*skipped = 1;
4467 			return sector_nr;
4468 		}
4469 	}
4470 
4471 	/* We don't use sector_nr to track where we are up to
4472 	 * as that doesn't work well for ->reshape_backwards.
4473 	 * So just use ->reshape_progress.
4474 	 */
4475 	if (mddev->reshape_backwards) {
4476 		/* 'next' is the earliest device address that we might
4477 		 * write to for this chunk in the new layout
4478 		 */
4479 		next = first_dev_address(conf->reshape_progress - 1,
4480 					 &conf->geo);
4481 
4482 		/* 'safe' is the last device address that we might read from
4483 		 * in the old layout after a restart
4484 		 */
4485 		safe = last_dev_address(conf->reshape_safe - 1,
4486 					&conf->prev);
4487 
4488 		if (next + conf->offset_diff < safe)
4489 			need_flush = 1;
4490 
4491 		last = conf->reshape_progress - 1;
4492 		sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4493 					       & conf->prev.chunk_mask);
4494 		if (sector_nr + RESYNC_BLOCK_SIZE/512 < last)
4495 			sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512;
4496 	} else {
4497 		/* 'next' is after the last device address that we
4498 		 * might write to for this chunk in the new layout
4499 		 */
4500 		next = last_dev_address(conf->reshape_progress, &conf->geo);
4501 
4502 		/* 'safe' is the earliest device address that we might
4503 		 * read from in the old layout after a restart
4504 		 */
4505 		safe = first_dev_address(conf->reshape_safe, &conf->prev);
4506 
4507 		/* Need to update metadata if 'next' might be beyond 'safe'
4508 		 * as that would possibly corrupt data
4509 		 */
4510 		if (next > safe + conf->offset_diff)
4511 			need_flush = 1;
4512 
4513 		sector_nr = conf->reshape_progress;
4514 		last  = sector_nr | (conf->geo.chunk_mask
4515 				     & conf->prev.chunk_mask);
4516 
4517 		if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last)
4518 			last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1;
4519 	}
4520 
4521 	if (need_flush ||
4522 	    time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4523 		/* Need to update reshape_position in metadata */
4524 		wait_barrier(conf);
4525 		mddev->reshape_position = conf->reshape_progress;
4526 		if (mddev->reshape_backwards)
4527 			mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4528 				- conf->reshape_progress;
4529 		else
4530 			mddev->curr_resync_completed = conf->reshape_progress;
4531 		conf->reshape_checkpoint = jiffies;
4532 		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4533 		md_wakeup_thread(mddev->thread);
4534 		wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
4535 			   test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4536 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4537 			allow_barrier(conf);
4538 			return sectors_done;
4539 		}
4540 		conf->reshape_safe = mddev->reshape_position;
4541 		allow_barrier(conf);
4542 	}
4543 
4544 	raise_barrier(conf, 0);
4545 read_more:
4546 	/* Now schedule reads for blocks from sector_nr to last */
4547 	r10_bio = raid10_alloc_init_r10buf(conf);
4548 	r10_bio->state = 0;
4549 	raise_barrier(conf, 1);
4550 	atomic_set(&r10_bio->remaining, 0);
4551 	r10_bio->mddev = mddev;
4552 	r10_bio->sector = sector_nr;
4553 	set_bit(R10BIO_IsReshape, &r10_bio->state);
4554 	r10_bio->sectors = last - sector_nr + 1;
4555 	rdev = read_balance(conf, r10_bio, &max_sectors);
4556 	BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4557 
4558 	if (!rdev) {
4559 		/* Cannot read from here, so need to record bad blocks
4560 		 * on all the target devices.
4561 		 */
4562 		// FIXME
4563 		mempool_free(r10_bio, &conf->r10buf_pool);
4564 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4565 		return sectors_done;
4566 	}
4567 
4568 	read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
4569 
4570 	bio_set_dev(read_bio, rdev->bdev);
4571 	read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4572 			       + rdev->data_offset);
4573 	read_bio->bi_private = r10_bio;
4574 	read_bio->bi_end_io = end_reshape_read;
4575 	bio_set_op_attrs(read_bio, REQ_OP_READ, 0);
4576 	read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
4577 	read_bio->bi_status = 0;
4578 	read_bio->bi_vcnt = 0;
4579 	read_bio->bi_iter.bi_size = 0;
4580 	r10_bio->master_bio = read_bio;
4581 	r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4582 
4583 	/*
4584 	 * Broadcast RESYNC message to other nodes, so all nodes would not
4585 	 * write to the region to avoid conflict.
4586 	*/
4587 	if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
4588 		struct mdp_superblock_1 *sb = NULL;
4589 		int sb_reshape_pos = 0;
4590 
4591 		conf->cluster_sync_low = sector_nr;
4592 		conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS;
4593 		sb = page_address(rdev->sb_page);
4594 		if (sb) {
4595 			sb_reshape_pos = le64_to_cpu(sb->reshape_position);
4596 			/*
4597 			 * Set cluster_sync_low again if next address for array
4598 			 * reshape is less than cluster_sync_low. Since we can't
4599 			 * update cluster_sync_low until it has finished reshape.
4600 			 */
4601 			if (sb_reshape_pos < conf->cluster_sync_low)
4602 				conf->cluster_sync_low = sb_reshape_pos;
4603 		}
4604 
4605 		md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
4606 							  conf->cluster_sync_high);
4607 	}
4608 
4609 	/* Now find the locations in the new layout */
4610 	__raid10_find_phys(&conf->geo, r10_bio);
4611 
4612 	blist = read_bio;
4613 	read_bio->bi_next = NULL;
4614 
4615 	rcu_read_lock();
4616 	for (s = 0; s < conf->copies*2; s++) {
4617 		struct bio *b;
4618 		int d = r10_bio->devs[s/2].devnum;
4619 		struct md_rdev *rdev2;
4620 		if (s&1) {
4621 			rdev2 = rcu_dereference(conf->mirrors[d].replacement);
4622 			b = r10_bio->devs[s/2].repl_bio;
4623 		} else {
4624 			rdev2 = rcu_dereference(conf->mirrors[d].rdev);
4625 			b = r10_bio->devs[s/2].bio;
4626 		}
4627 		if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4628 			continue;
4629 
4630 		bio_set_dev(b, rdev2->bdev);
4631 		b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4632 			rdev2->new_data_offset;
4633 		b->bi_end_io = end_reshape_write;
4634 		bio_set_op_attrs(b, REQ_OP_WRITE, 0);
4635 		b->bi_next = blist;
4636 		blist = b;
4637 	}
4638 
4639 	/* Now add as many pages as possible to all of these bios. */
4640 
4641 	nr_sectors = 0;
4642 	pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
4643 	for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
4644 		struct page *page = pages[s / (PAGE_SIZE >> 9)];
4645 		int len = (max_sectors - s) << 9;
4646 		if (len > PAGE_SIZE)
4647 			len = PAGE_SIZE;
4648 		for (bio = blist; bio ; bio = bio->bi_next) {
4649 			/*
4650 			 * won't fail because the vec table is big enough
4651 			 * to hold all these pages
4652 			 */
4653 			bio_add_page(bio, page, len, 0);
4654 		}
4655 		sector_nr += len >> 9;
4656 		nr_sectors += len >> 9;
4657 	}
4658 	rcu_read_unlock();
4659 	r10_bio->sectors = nr_sectors;
4660 
4661 	/* Now submit the read */
4662 	md_sync_acct_bio(read_bio, r10_bio->sectors);
4663 	atomic_inc(&r10_bio->remaining);
4664 	read_bio->bi_next = NULL;
4665 	generic_make_request(read_bio);
4666 	sectors_done += nr_sectors;
4667 	if (sector_nr <= last)
4668 		goto read_more;
4669 
4670 	lower_barrier(conf);
4671 
4672 	/* Now that we have done the whole section we can
4673 	 * update reshape_progress
4674 	 */
4675 	if (mddev->reshape_backwards)
4676 		conf->reshape_progress -= sectors_done;
4677 	else
4678 		conf->reshape_progress += sectors_done;
4679 
4680 	return sectors_done;
4681 }
4682 
4683 static void end_reshape_request(struct r10bio *r10_bio);
4684 static int handle_reshape_read_error(struct mddev *mddev,
4685 				     struct r10bio *r10_bio);
4686 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4687 {
4688 	/* Reshape read completed.  Hopefully we have a block
4689 	 * to write out.
4690 	 * If we got a read error then we do sync 1-page reads from
4691 	 * elsewhere until we find the data - or give up.
4692 	 */
4693 	struct r10conf *conf = mddev->private;
4694 	int s;
4695 
4696 	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
4697 		if (handle_reshape_read_error(mddev, r10_bio) < 0) {
4698 			/* Reshape has been aborted */
4699 			md_done_sync(mddev, r10_bio->sectors, 0);
4700 			return;
4701 		}
4702 
4703 	/* We definitely have the data in the pages, schedule the
4704 	 * writes.
4705 	 */
4706 	atomic_set(&r10_bio->remaining, 1);
4707 	for (s = 0; s < conf->copies*2; s++) {
4708 		struct bio *b;
4709 		int d = r10_bio->devs[s/2].devnum;
4710 		struct md_rdev *rdev;
4711 		rcu_read_lock();
4712 		if (s&1) {
4713 			rdev = rcu_dereference(conf->mirrors[d].replacement);
4714 			b = r10_bio->devs[s/2].repl_bio;
4715 		} else {
4716 			rdev = rcu_dereference(conf->mirrors[d].rdev);
4717 			b = r10_bio->devs[s/2].bio;
4718 		}
4719 		if (!rdev || test_bit(Faulty, &rdev->flags)) {
4720 			rcu_read_unlock();
4721 			continue;
4722 		}
4723 		atomic_inc(&rdev->nr_pending);
4724 		rcu_read_unlock();
4725 		md_sync_acct_bio(b, r10_bio->sectors);
4726 		atomic_inc(&r10_bio->remaining);
4727 		b->bi_next = NULL;
4728 		generic_make_request(b);
4729 	}
4730 	end_reshape_request(r10_bio);
4731 }
4732 
4733 static void end_reshape(struct r10conf *conf)
4734 {
4735 	if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
4736 		return;
4737 
4738 	spin_lock_irq(&conf->device_lock);
4739 	conf->prev = conf->geo;
4740 	md_finish_reshape(conf->mddev);
4741 	smp_wmb();
4742 	conf->reshape_progress = MaxSector;
4743 	conf->reshape_safe = MaxSector;
4744 	spin_unlock_irq(&conf->device_lock);
4745 
4746 	/* read-ahead size must cover two whole stripes, which is
4747 	 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4748 	 */
4749 	if (conf->mddev->queue) {
4750 		int stripe = conf->geo.raid_disks *
4751 			((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
4752 		stripe /= conf->geo.near_copies;
4753 		if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
4754 			conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
4755 	}
4756 	conf->fullsync = 0;
4757 }
4758 
4759 static void raid10_update_reshape_pos(struct mddev *mddev)
4760 {
4761 	struct r10conf *conf = mddev->private;
4762 	sector_t lo, hi;
4763 
4764 	md_cluster_ops->resync_info_get(mddev, &lo, &hi);
4765 	if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo))
4766 	    || mddev->reshape_position == MaxSector)
4767 		conf->reshape_progress = mddev->reshape_position;
4768 	else
4769 		WARN_ON_ONCE(1);
4770 }
4771 
4772 static int handle_reshape_read_error(struct mddev *mddev,
4773 				     struct r10bio *r10_bio)
4774 {
4775 	/* Use sync reads to get the blocks from somewhere else */
4776 	int sectors = r10_bio->sectors;
4777 	struct r10conf *conf = mddev->private;
4778 	struct r10bio *r10b;
4779 	int slot = 0;
4780 	int idx = 0;
4781 	struct page **pages;
4782 
4783 	r10b = kmalloc(sizeof(*r10b) +
4784 	       sizeof(struct r10dev) * conf->copies, GFP_NOIO);
4785 	if (!r10b) {
4786 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4787 		return -ENOMEM;
4788 	}
4789 
4790 	/* reshape IOs share pages from .devs[0].bio */
4791 	pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
4792 
4793 	r10b->sector = r10_bio->sector;
4794 	__raid10_find_phys(&conf->prev, r10b);
4795 
4796 	while (sectors) {
4797 		int s = sectors;
4798 		int success = 0;
4799 		int first_slot = slot;
4800 
4801 		if (s > (PAGE_SIZE >> 9))
4802 			s = PAGE_SIZE >> 9;
4803 
4804 		rcu_read_lock();
4805 		while (!success) {
4806 			int d = r10b->devs[slot].devnum;
4807 			struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
4808 			sector_t addr;
4809 			if (rdev == NULL ||
4810 			    test_bit(Faulty, &rdev->flags) ||
4811 			    !test_bit(In_sync, &rdev->flags))
4812 				goto failed;
4813 
4814 			addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
4815 			atomic_inc(&rdev->nr_pending);
4816 			rcu_read_unlock();
4817 			success = sync_page_io(rdev,
4818 					       addr,
4819 					       s << 9,
4820 					       pages[idx],
4821 					       REQ_OP_READ, 0, false);
4822 			rdev_dec_pending(rdev, mddev);
4823 			rcu_read_lock();
4824 			if (success)
4825 				break;
4826 		failed:
4827 			slot++;
4828 			if (slot >= conf->copies)
4829 				slot = 0;
4830 			if (slot == first_slot)
4831 				break;
4832 		}
4833 		rcu_read_unlock();
4834 		if (!success) {
4835 			/* couldn't read this block, must give up */
4836 			set_bit(MD_RECOVERY_INTR,
4837 				&mddev->recovery);
4838 			kfree(r10b);
4839 			return -EIO;
4840 		}
4841 		sectors -= s;
4842 		idx++;
4843 	}
4844 	kfree(r10b);
4845 	return 0;
4846 }
4847 
4848 static void end_reshape_write(struct bio *bio)
4849 {
4850 	struct r10bio *r10_bio = get_resync_r10bio(bio);
4851 	struct mddev *mddev = r10_bio->mddev;
4852 	struct r10conf *conf = mddev->private;
4853 	int d;
4854 	int slot;
4855 	int repl;
4856 	struct md_rdev *rdev = NULL;
4857 
4858 	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
4859 	if (repl)
4860 		rdev = conf->mirrors[d].replacement;
4861 	if (!rdev) {
4862 		smp_mb();
4863 		rdev = conf->mirrors[d].rdev;
4864 	}
4865 
4866 	if (bio->bi_status) {
4867 		/* FIXME should record badblock */
4868 		md_error(mddev, rdev);
4869 	}
4870 
4871 	rdev_dec_pending(rdev, mddev);
4872 	end_reshape_request(r10_bio);
4873 }
4874 
4875 static void end_reshape_request(struct r10bio *r10_bio)
4876 {
4877 	if (!atomic_dec_and_test(&r10_bio->remaining))
4878 		return;
4879 	md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
4880 	bio_put(r10_bio->master_bio);
4881 	put_buf(r10_bio);
4882 }
4883 
4884 static void raid10_finish_reshape(struct mddev *mddev)
4885 {
4886 	struct r10conf *conf = mddev->private;
4887 
4888 	if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
4889 		return;
4890 
4891 	if (mddev->delta_disks > 0) {
4892 		if (mddev->recovery_cp > mddev->resync_max_sectors) {
4893 			mddev->recovery_cp = mddev->resync_max_sectors;
4894 			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4895 		}
4896 		mddev->resync_max_sectors = mddev->array_sectors;
4897 	} else {
4898 		int d;
4899 		rcu_read_lock();
4900 		for (d = conf->geo.raid_disks ;
4901 		     d < conf->geo.raid_disks - mddev->delta_disks;
4902 		     d++) {
4903 			struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
4904 			if (rdev)
4905 				clear_bit(In_sync, &rdev->flags);
4906 			rdev = rcu_dereference(conf->mirrors[d].replacement);
4907 			if (rdev)
4908 				clear_bit(In_sync, &rdev->flags);
4909 		}
4910 		rcu_read_unlock();
4911 	}
4912 	mddev->layout = mddev->new_layout;
4913 	mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
4914 	mddev->reshape_position = MaxSector;
4915 	mddev->delta_disks = 0;
4916 	mddev->reshape_backwards = 0;
4917 }
4918 
4919 static struct md_personality raid10_personality =
4920 {
4921 	.name		= "raid10",
4922 	.level		= 10,
4923 	.owner		= THIS_MODULE,
4924 	.make_request	= raid10_make_request,
4925 	.run		= raid10_run,
4926 	.free		= raid10_free,
4927 	.status		= raid10_status,
4928 	.error_handler	= raid10_error,
4929 	.hot_add_disk	= raid10_add_disk,
4930 	.hot_remove_disk= raid10_remove_disk,
4931 	.spare_active	= raid10_spare_active,
4932 	.sync_request	= raid10_sync_request,
4933 	.quiesce	= raid10_quiesce,
4934 	.size		= raid10_size,
4935 	.resize		= raid10_resize,
4936 	.takeover	= raid10_takeover,
4937 	.check_reshape	= raid10_check_reshape,
4938 	.start_reshape	= raid10_start_reshape,
4939 	.finish_reshape	= raid10_finish_reshape,
4940 	.update_reshape_pos = raid10_update_reshape_pos,
4941 	.congested	= raid10_congested,
4942 };
4943 
4944 static int __init raid_init(void)
4945 {
4946 	return register_md_personality(&raid10_personality);
4947 }
4948 
4949 static void raid_exit(void)
4950 {
4951 	unregister_md_personality(&raid10_personality);
4952 }
4953 
4954 module_init(raid_init);
4955 module_exit(raid_exit);
4956 MODULE_LICENSE("GPL");
4957 MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
4958 MODULE_ALIAS("md-personality-9"); /* RAID10 */
4959 MODULE_ALIAS("md-raid10");
4960 MODULE_ALIAS("md-level-10");
4961 
4962 module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
4963