xref: /openbmc/linux/drivers/md/raid10.c (revision 93dc544c)
1 /*
2  * raid10.c : Multiple Devices driver for Linux
3  *
4  * Copyright (C) 2000-2004 Neil Brown
5  *
6  * RAID-10 support for md.
7  *
8  * Base on code in raid1.c.  See raid1.c for futher copyright information.
9  *
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  * You should have received a copy of the GNU General Public License
17  * (for example /usr/src/linux/COPYING); if not, write to the Free
18  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20 
21 #include "dm-bio-list.h"
22 #include <linux/raid/raid10.h>
23 #include <linux/raid/bitmap.h>
24 
25 /*
26  * RAID10 provides a combination of RAID0 and RAID1 functionality.
27  * The layout of data is defined by
28  *    chunk_size
29  *    raid_disks
30  *    near_copies (stored in low byte of layout)
31  *    far_copies (stored in second byte of layout)
32  *    far_offset (stored in bit 16 of layout )
33  *
34  * The data to be stored is divided into chunks using chunksize.
35  * Each device is divided into far_copies sections.
36  * In each section, chunks are laid out in a style similar to raid0, but
37  * near_copies copies of each chunk is stored (each on a different drive).
38  * The starting device for each section is offset near_copies from the starting
39  * device of the previous section.
40  * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
41  * drive.
42  * near_copies and far_copies must be at least one, and their product is at most
43  * raid_disks.
44  *
45  * If far_offset is true, then the far_copies are handled a bit differently.
46  * The copies are still in different stripes, but instead of be very far apart
47  * on disk, there are adjacent stripes.
48  */
49 
50 /*
51  * Number of guaranteed r10bios in case of extreme VM load:
52  */
53 #define	NR_RAID10_BIOS 256
54 
55 static void unplug_slaves(mddev_t *mddev);
56 
57 static void allow_barrier(conf_t *conf);
58 static void lower_barrier(conf_t *conf);
59 
60 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
61 {
62 	conf_t *conf = data;
63 	r10bio_t *r10_bio;
64 	int size = offsetof(struct r10bio_s, devs[conf->copies]);
65 
66 	/* allocate a r10bio with room for raid_disks entries in the bios array */
67 	r10_bio = kzalloc(size, gfp_flags);
68 	if (!r10_bio)
69 		unplug_slaves(conf->mddev);
70 
71 	return r10_bio;
72 }
73 
74 static void r10bio_pool_free(void *r10_bio, void *data)
75 {
76 	kfree(r10_bio);
77 }
78 
79 #define RESYNC_BLOCK_SIZE (64*1024)
80 //#define RESYNC_BLOCK_SIZE PAGE_SIZE
81 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
82 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
83 #define RESYNC_WINDOW (2048*1024)
84 
85 /*
86  * When performing a resync, we need to read and compare, so
87  * we need as many pages are there are copies.
88  * When performing a recovery, we need 2 bios, one for read,
89  * one for write (we recover only one drive per r10buf)
90  *
91  */
92 static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
93 {
94 	conf_t *conf = data;
95 	struct page *page;
96 	r10bio_t *r10_bio;
97 	struct bio *bio;
98 	int i, j;
99 	int nalloc;
100 
101 	r10_bio = r10bio_pool_alloc(gfp_flags, conf);
102 	if (!r10_bio) {
103 		unplug_slaves(conf->mddev);
104 		return NULL;
105 	}
106 
107 	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
108 		nalloc = conf->copies; /* resync */
109 	else
110 		nalloc = 2; /* recovery */
111 
112 	/*
113 	 * Allocate bios.
114 	 */
115 	for (j = nalloc ; j-- ; ) {
116 		bio = bio_alloc(gfp_flags, RESYNC_PAGES);
117 		if (!bio)
118 			goto out_free_bio;
119 		r10_bio->devs[j].bio = bio;
120 	}
121 	/*
122 	 * Allocate RESYNC_PAGES data pages and attach them
123 	 * where needed.
124 	 */
125 	for (j = 0 ; j < nalloc; j++) {
126 		bio = r10_bio->devs[j].bio;
127 		for (i = 0; i < RESYNC_PAGES; i++) {
128 			page = alloc_page(gfp_flags);
129 			if (unlikely(!page))
130 				goto out_free_pages;
131 
132 			bio->bi_io_vec[i].bv_page = page;
133 		}
134 	}
135 
136 	return r10_bio;
137 
138 out_free_pages:
139 	for ( ; i > 0 ; i--)
140 		safe_put_page(bio->bi_io_vec[i-1].bv_page);
141 	while (j--)
142 		for (i = 0; i < RESYNC_PAGES ; i++)
143 			safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
144 	j = -1;
145 out_free_bio:
146 	while ( ++j < nalloc )
147 		bio_put(r10_bio->devs[j].bio);
148 	r10bio_pool_free(r10_bio, conf);
149 	return NULL;
150 }
151 
152 static void r10buf_pool_free(void *__r10_bio, void *data)
153 {
154 	int i;
155 	conf_t *conf = data;
156 	r10bio_t *r10bio = __r10_bio;
157 	int j;
158 
159 	for (j=0; j < conf->copies; j++) {
160 		struct bio *bio = r10bio->devs[j].bio;
161 		if (bio) {
162 			for (i = 0; i < RESYNC_PAGES; i++) {
163 				safe_put_page(bio->bi_io_vec[i].bv_page);
164 				bio->bi_io_vec[i].bv_page = NULL;
165 			}
166 			bio_put(bio);
167 		}
168 	}
169 	r10bio_pool_free(r10bio, conf);
170 }
171 
172 static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
173 {
174 	int i;
175 
176 	for (i = 0; i < conf->copies; i++) {
177 		struct bio **bio = & r10_bio->devs[i].bio;
178 		if (*bio && *bio != IO_BLOCKED)
179 			bio_put(*bio);
180 		*bio = NULL;
181 	}
182 }
183 
184 static void free_r10bio(r10bio_t *r10_bio)
185 {
186 	conf_t *conf = mddev_to_conf(r10_bio->mddev);
187 
188 	/*
189 	 * Wake up any possible resync thread that waits for the device
190 	 * to go idle.
191 	 */
192 	allow_barrier(conf);
193 
194 	put_all_bios(conf, r10_bio);
195 	mempool_free(r10_bio, conf->r10bio_pool);
196 }
197 
198 static void put_buf(r10bio_t *r10_bio)
199 {
200 	conf_t *conf = mddev_to_conf(r10_bio->mddev);
201 
202 	mempool_free(r10_bio, conf->r10buf_pool);
203 
204 	lower_barrier(conf);
205 }
206 
207 static void reschedule_retry(r10bio_t *r10_bio)
208 {
209 	unsigned long flags;
210 	mddev_t *mddev = r10_bio->mddev;
211 	conf_t *conf = mddev_to_conf(mddev);
212 
213 	spin_lock_irqsave(&conf->device_lock, flags);
214 	list_add(&r10_bio->retry_list, &conf->retry_list);
215 	conf->nr_queued ++;
216 	spin_unlock_irqrestore(&conf->device_lock, flags);
217 
218 	md_wakeup_thread(mddev->thread);
219 }
220 
221 /*
222  * raid_end_bio_io() is called when we have finished servicing a mirrored
223  * operation and are ready to return a success/failure code to the buffer
224  * cache layer.
225  */
226 static void raid_end_bio_io(r10bio_t *r10_bio)
227 {
228 	struct bio *bio = r10_bio->master_bio;
229 
230 	bio_endio(bio,
231 		test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO);
232 	free_r10bio(r10_bio);
233 }
234 
235 /*
236  * Update disk head position estimator based on IRQ completion info.
237  */
238 static inline void update_head_pos(int slot, r10bio_t *r10_bio)
239 {
240 	conf_t *conf = mddev_to_conf(r10_bio->mddev);
241 
242 	conf->mirrors[r10_bio->devs[slot].devnum].head_position =
243 		r10_bio->devs[slot].addr + (r10_bio->sectors);
244 }
245 
246 static void raid10_end_read_request(struct bio *bio, int error)
247 {
248 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
249 	r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
250 	int slot, dev;
251 	conf_t *conf = mddev_to_conf(r10_bio->mddev);
252 
253 
254 	slot = r10_bio->read_slot;
255 	dev = r10_bio->devs[slot].devnum;
256 	/*
257 	 * this branch is our 'one mirror IO has finished' event handler:
258 	 */
259 	update_head_pos(slot, r10_bio);
260 
261 	if (uptodate) {
262 		/*
263 		 * Set R10BIO_Uptodate in our master bio, so that
264 		 * we will return a good error code to the higher
265 		 * levels even if IO on some other mirrored buffer fails.
266 		 *
267 		 * The 'master' represents the composite IO operation to
268 		 * user-side. So if something waits for IO, then it will
269 		 * wait for the 'master' bio.
270 		 */
271 		set_bit(R10BIO_Uptodate, &r10_bio->state);
272 		raid_end_bio_io(r10_bio);
273 	} else {
274 		/*
275 		 * oops, read error:
276 		 */
277 		char b[BDEVNAME_SIZE];
278 		if (printk_ratelimit())
279 			printk(KERN_ERR "raid10: %s: rescheduling sector %llu\n",
280 			       bdevname(conf->mirrors[dev].rdev->bdev,b), (unsigned long long)r10_bio->sector);
281 		reschedule_retry(r10_bio);
282 	}
283 
284 	rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
285 }
286 
287 static void raid10_end_write_request(struct bio *bio, int error)
288 {
289 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
290 	r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
291 	int slot, dev;
292 	conf_t *conf = mddev_to_conf(r10_bio->mddev);
293 
294 	for (slot = 0; slot < conf->copies; slot++)
295 		if (r10_bio->devs[slot].bio == bio)
296 			break;
297 	dev = r10_bio->devs[slot].devnum;
298 
299 	/*
300 	 * this branch is our 'one mirror IO has finished' event handler:
301 	 */
302 	if (!uptodate) {
303 		md_error(r10_bio->mddev, conf->mirrors[dev].rdev);
304 		/* an I/O failed, we can't clear the bitmap */
305 		set_bit(R10BIO_Degraded, &r10_bio->state);
306 	} else
307 		/*
308 		 * Set R10BIO_Uptodate in our master bio, so that
309 		 * we will return a good error code for to the higher
310 		 * levels even if IO on some other mirrored buffer fails.
311 		 *
312 		 * The 'master' represents the composite IO operation to
313 		 * user-side. So if something waits for IO, then it will
314 		 * wait for the 'master' bio.
315 		 */
316 		set_bit(R10BIO_Uptodate, &r10_bio->state);
317 
318 	update_head_pos(slot, r10_bio);
319 
320 	/*
321 	 *
322 	 * Let's see if all mirrored write operations have finished
323 	 * already.
324 	 */
325 	if (atomic_dec_and_test(&r10_bio->remaining)) {
326 		/* clear the bitmap if all writes complete successfully */
327 		bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
328 				r10_bio->sectors,
329 				!test_bit(R10BIO_Degraded, &r10_bio->state),
330 				0);
331 		md_write_end(r10_bio->mddev);
332 		raid_end_bio_io(r10_bio);
333 	}
334 
335 	rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
336 }
337 
338 
339 /*
340  * RAID10 layout manager
341  * Aswell as the chunksize and raid_disks count, there are two
342  * parameters: near_copies and far_copies.
343  * near_copies * far_copies must be <= raid_disks.
344  * Normally one of these will be 1.
345  * If both are 1, we get raid0.
346  * If near_copies == raid_disks, we get raid1.
347  *
348  * Chunks are layed out in raid0 style with near_copies copies of the
349  * first chunk, followed by near_copies copies of the next chunk and
350  * so on.
351  * If far_copies > 1, then after 1/far_copies of the array has been assigned
352  * as described above, we start again with a device offset of near_copies.
353  * So we effectively have another copy of the whole array further down all
354  * the drives, but with blocks on different drives.
355  * With this layout, and block is never stored twice on the one device.
356  *
357  * raid10_find_phys finds the sector offset of a given virtual sector
358  * on each device that it is on.
359  *
360  * raid10_find_virt does the reverse mapping, from a device and a
361  * sector offset to a virtual address
362  */
363 
364 static void raid10_find_phys(conf_t *conf, r10bio_t *r10bio)
365 {
366 	int n,f;
367 	sector_t sector;
368 	sector_t chunk;
369 	sector_t stripe;
370 	int dev;
371 
372 	int slot = 0;
373 
374 	/* now calculate first sector/dev */
375 	chunk = r10bio->sector >> conf->chunk_shift;
376 	sector = r10bio->sector & conf->chunk_mask;
377 
378 	chunk *= conf->near_copies;
379 	stripe = chunk;
380 	dev = sector_div(stripe, conf->raid_disks);
381 	if (conf->far_offset)
382 		stripe *= conf->far_copies;
383 
384 	sector += stripe << conf->chunk_shift;
385 
386 	/* and calculate all the others */
387 	for (n=0; n < conf->near_copies; n++) {
388 		int d = dev;
389 		sector_t s = sector;
390 		r10bio->devs[slot].addr = sector;
391 		r10bio->devs[slot].devnum = d;
392 		slot++;
393 
394 		for (f = 1; f < conf->far_copies; f++) {
395 			d += conf->near_copies;
396 			if (d >= conf->raid_disks)
397 				d -= conf->raid_disks;
398 			s += conf->stride;
399 			r10bio->devs[slot].devnum = d;
400 			r10bio->devs[slot].addr = s;
401 			slot++;
402 		}
403 		dev++;
404 		if (dev >= conf->raid_disks) {
405 			dev = 0;
406 			sector += (conf->chunk_mask + 1);
407 		}
408 	}
409 	BUG_ON(slot != conf->copies);
410 }
411 
412 static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
413 {
414 	sector_t offset, chunk, vchunk;
415 
416 	offset = sector & conf->chunk_mask;
417 	if (conf->far_offset) {
418 		int fc;
419 		chunk = sector >> conf->chunk_shift;
420 		fc = sector_div(chunk, conf->far_copies);
421 		dev -= fc * conf->near_copies;
422 		if (dev < 0)
423 			dev += conf->raid_disks;
424 	} else {
425 		while (sector >= conf->stride) {
426 			sector -= conf->stride;
427 			if (dev < conf->near_copies)
428 				dev += conf->raid_disks - conf->near_copies;
429 			else
430 				dev -= conf->near_copies;
431 		}
432 		chunk = sector >> conf->chunk_shift;
433 	}
434 	vchunk = chunk * conf->raid_disks + dev;
435 	sector_div(vchunk, conf->near_copies);
436 	return (vchunk << conf->chunk_shift) + offset;
437 }
438 
439 /**
440  *	raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
441  *	@q: request queue
442  *	@bvm: properties of new bio
443  *	@biovec: the request that could be merged to it.
444  *
445  *	Return amount of bytes we can accept at this offset
446  *      If near_copies == raid_disk, there are no striping issues,
447  *      but in that case, the function isn't called at all.
448  */
449 static int raid10_mergeable_bvec(struct request_queue *q,
450 				 struct bvec_merge_data *bvm,
451 				 struct bio_vec *biovec)
452 {
453 	mddev_t *mddev = q->queuedata;
454 	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
455 	int max;
456 	unsigned int chunk_sectors = mddev->chunk_size >> 9;
457 	unsigned int bio_sectors = bvm->bi_size >> 9;
458 
459 	max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
460 	if (max < 0) max = 0; /* bio_add cannot handle a negative return */
461 	if (max <= biovec->bv_len && bio_sectors == 0)
462 		return biovec->bv_len;
463 	else
464 		return max;
465 }
466 
467 /*
468  * This routine returns the disk from which the requested read should
469  * be done. There is a per-array 'next expected sequential IO' sector
470  * number - if this matches on the next IO then we use the last disk.
471  * There is also a per-disk 'last know head position' sector that is
472  * maintained from IRQ contexts, both the normal and the resync IO
473  * completion handlers update this position correctly. If there is no
474  * perfect sequential match then we pick the disk whose head is closest.
475  *
476  * If there are 2 mirrors in the same 2 devices, performance degrades
477  * because position is mirror, not device based.
478  *
479  * The rdev for the device selected will have nr_pending incremented.
480  */
481 
482 /*
483  * FIXME: possibly should rethink readbalancing and do it differently
484  * depending on near_copies / far_copies geometry.
485  */
486 static int read_balance(conf_t *conf, r10bio_t *r10_bio)
487 {
488 	const unsigned long this_sector = r10_bio->sector;
489 	int disk, slot, nslot;
490 	const int sectors = r10_bio->sectors;
491 	sector_t new_distance, current_distance;
492 	mdk_rdev_t *rdev;
493 
494 	raid10_find_phys(conf, r10_bio);
495 	rcu_read_lock();
496 	/*
497 	 * Check if we can balance. We can balance on the whole
498 	 * device if no resync is going on (recovery is ok), or below
499 	 * the resync window. We take the first readable disk when
500 	 * above the resync window.
501 	 */
502 	if (conf->mddev->recovery_cp < MaxSector
503 	    && (this_sector + sectors >= conf->next_resync)) {
504 		/* make sure that disk is operational */
505 		slot = 0;
506 		disk = r10_bio->devs[slot].devnum;
507 
508 		while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
509 		       r10_bio->devs[slot].bio == IO_BLOCKED ||
510 		       !test_bit(In_sync, &rdev->flags)) {
511 			slot++;
512 			if (slot == conf->copies) {
513 				slot = 0;
514 				disk = -1;
515 				break;
516 			}
517 			disk = r10_bio->devs[slot].devnum;
518 		}
519 		goto rb_out;
520 	}
521 
522 
523 	/* make sure the disk is operational */
524 	slot = 0;
525 	disk = r10_bio->devs[slot].devnum;
526 	while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
527 	       r10_bio->devs[slot].bio == IO_BLOCKED ||
528 	       !test_bit(In_sync, &rdev->flags)) {
529 		slot ++;
530 		if (slot == conf->copies) {
531 			disk = -1;
532 			goto rb_out;
533 		}
534 		disk = r10_bio->devs[slot].devnum;
535 	}
536 
537 
538 	current_distance = abs(r10_bio->devs[slot].addr -
539 			       conf->mirrors[disk].head_position);
540 
541 	/* Find the disk whose head is closest,
542 	 * or - for far > 1 - find the closest to partition beginning */
543 
544 	for (nslot = slot; nslot < conf->copies; nslot++) {
545 		int ndisk = r10_bio->devs[nslot].devnum;
546 
547 
548 		if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL ||
549 		    r10_bio->devs[nslot].bio == IO_BLOCKED ||
550 		    !test_bit(In_sync, &rdev->flags))
551 			continue;
552 
553 		/* This optimisation is debatable, and completely destroys
554 		 * sequential read speed for 'far copies' arrays.  So only
555 		 * keep it for 'near' arrays, and review those later.
556 		 */
557 		if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending)) {
558 			disk = ndisk;
559 			slot = nslot;
560 			break;
561 		}
562 
563 		/* for far > 1 always use the lowest address */
564 		if (conf->far_copies > 1)
565 			new_distance = r10_bio->devs[nslot].addr;
566 		else
567 			new_distance = abs(r10_bio->devs[nslot].addr -
568 					   conf->mirrors[ndisk].head_position);
569 		if (new_distance < current_distance) {
570 			current_distance = new_distance;
571 			disk = ndisk;
572 			slot = nslot;
573 		}
574 	}
575 
576 rb_out:
577 	r10_bio->read_slot = slot;
578 /*	conf->next_seq_sect = this_sector + sectors;*/
579 
580 	if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL)
581 		atomic_inc(&conf->mirrors[disk].rdev->nr_pending);
582 	else
583 		disk = -1;
584 	rcu_read_unlock();
585 
586 	return disk;
587 }
588 
589 static void unplug_slaves(mddev_t *mddev)
590 {
591 	conf_t *conf = mddev_to_conf(mddev);
592 	int i;
593 
594 	rcu_read_lock();
595 	for (i=0; i<mddev->raid_disks; i++) {
596 		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
597 		if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
598 			struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
599 
600 			atomic_inc(&rdev->nr_pending);
601 			rcu_read_unlock();
602 
603 			blk_unplug(r_queue);
604 
605 			rdev_dec_pending(rdev, mddev);
606 			rcu_read_lock();
607 		}
608 	}
609 	rcu_read_unlock();
610 }
611 
612 static void raid10_unplug(struct request_queue *q)
613 {
614 	mddev_t *mddev = q->queuedata;
615 
616 	unplug_slaves(q->queuedata);
617 	md_wakeup_thread(mddev->thread);
618 }
619 
620 static int raid10_congested(void *data, int bits)
621 {
622 	mddev_t *mddev = data;
623 	conf_t *conf = mddev_to_conf(mddev);
624 	int i, ret = 0;
625 
626 	rcu_read_lock();
627 	for (i = 0; i < mddev->raid_disks && ret == 0; i++) {
628 		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
629 		if (rdev && !test_bit(Faulty, &rdev->flags)) {
630 			struct request_queue *q = bdev_get_queue(rdev->bdev);
631 
632 			ret |= bdi_congested(&q->backing_dev_info, bits);
633 		}
634 	}
635 	rcu_read_unlock();
636 	return ret;
637 }
638 
639 static int flush_pending_writes(conf_t *conf)
640 {
641 	/* Any writes that have been queued but are awaiting
642 	 * bitmap updates get flushed here.
643 	 * We return 1 if any requests were actually submitted.
644 	 */
645 	int rv = 0;
646 
647 	spin_lock_irq(&conf->device_lock);
648 
649 	if (conf->pending_bio_list.head) {
650 		struct bio *bio;
651 		bio = bio_list_get(&conf->pending_bio_list);
652 		blk_remove_plug(conf->mddev->queue);
653 		spin_unlock_irq(&conf->device_lock);
654 		/* flush any pending bitmap writes to disk
655 		 * before proceeding w/ I/O */
656 		bitmap_unplug(conf->mddev->bitmap);
657 
658 		while (bio) { /* submit pending writes */
659 			struct bio *next = bio->bi_next;
660 			bio->bi_next = NULL;
661 			generic_make_request(bio);
662 			bio = next;
663 		}
664 		rv = 1;
665 	} else
666 		spin_unlock_irq(&conf->device_lock);
667 	return rv;
668 }
669 /* Barriers....
670  * Sometimes we need to suspend IO while we do something else,
671  * either some resync/recovery, or reconfigure the array.
672  * To do this we raise a 'barrier'.
673  * The 'barrier' is a counter that can be raised multiple times
674  * to count how many activities are happening which preclude
675  * normal IO.
676  * We can only raise the barrier if there is no pending IO.
677  * i.e. if nr_pending == 0.
678  * We choose only to raise the barrier if no-one is waiting for the
679  * barrier to go down.  This means that as soon as an IO request
680  * is ready, no other operations which require a barrier will start
681  * until the IO request has had a chance.
682  *
683  * So: regular IO calls 'wait_barrier'.  When that returns there
684  *    is no backgroup IO happening,  It must arrange to call
685  *    allow_barrier when it has finished its IO.
686  * backgroup IO calls must call raise_barrier.  Once that returns
687  *    there is no normal IO happeing.  It must arrange to call
688  *    lower_barrier when the particular background IO completes.
689  */
690 #define RESYNC_DEPTH 32
691 
692 static void raise_barrier(conf_t *conf, int force)
693 {
694 	BUG_ON(force && !conf->barrier);
695 	spin_lock_irq(&conf->resync_lock);
696 
697 	/* Wait until no block IO is waiting (unless 'force') */
698 	wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
699 			    conf->resync_lock,
700 			    raid10_unplug(conf->mddev->queue));
701 
702 	/* block any new IO from starting */
703 	conf->barrier++;
704 
705 	/* No wait for all pending IO to complete */
706 	wait_event_lock_irq(conf->wait_barrier,
707 			    !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
708 			    conf->resync_lock,
709 			    raid10_unplug(conf->mddev->queue));
710 
711 	spin_unlock_irq(&conf->resync_lock);
712 }
713 
714 static void lower_barrier(conf_t *conf)
715 {
716 	unsigned long flags;
717 	spin_lock_irqsave(&conf->resync_lock, flags);
718 	conf->barrier--;
719 	spin_unlock_irqrestore(&conf->resync_lock, flags);
720 	wake_up(&conf->wait_barrier);
721 }
722 
723 static void wait_barrier(conf_t *conf)
724 {
725 	spin_lock_irq(&conf->resync_lock);
726 	if (conf->barrier) {
727 		conf->nr_waiting++;
728 		wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
729 				    conf->resync_lock,
730 				    raid10_unplug(conf->mddev->queue));
731 		conf->nr_waiting--;
732 	}
733 	conf->nr_pending++;
734 	spin_unlock_irq(&conf->resync_lock);
735 }
736 
737 static void allow_barrier(conf_t *conf)
738 {
739 	unsigned long flags;
740 	spin_lock_irqsave(&conf->resync_lock, flags);
741 	conf->nr_pending--;
742 	spin_unlock_irqrestore(&conf->resync_lock, flags);
743 	wake_up(&conf->wait_barrier);
744 }
745 
746 static void freeze_array(conf_t *conf)
747 {
748 	/* stop syncio and normal IO and wait for everything to
749 	 * go quiet.
750 	 * We increment barrier and nr_waiting, and then
751 	 * wait until nr_pending match nr_queued+1
752 	 * This is called in the context of one normal IO request
753 	 * that has failed. Thus any sync request that might be pending
754 	 * will be blocked by nr_pending, and we need to wait for
755 	 * pending IO requests to complete or be queued for re-try.
756 	 * Thus the number queued (nr_queued) plus this request (1)
757 	 * must match the number of pending IOs (nr_pending) before
758 	 * we continue.
759 	 */
760 	spin_lock_irq(&conf->resync_lock);
761 	conf->barrier++;
762 	conf->nr_waiting++;
763 	wait_event_lock_irq(conf->wait_barrier,
764 			    conf->nr_pending == conf->nr_queued+1,
765 			    conf->resync_lock,
766 			    ({ flush_pending_writes(conf);
767 			       raid10_unplug(conf->mddev->queue); }));
768 	spin_unlock_irq(&conf->resync_lock);
769 }
770 
771 static void unfreeze_array(conf_t *conf)
772 {
773 	/* reverse the effect of the freeze */
774 	spin_lock_irq(&conf->resync_lock);
775 	conf->barrier--;
776 	conf->nr_waiting--;
777 	wake_up(&conf->wait_barrier);
778 	spin_unlock_irq(&conf->resync_lock);
779 }
780 
781 static int make_request(struct request_queue *q, struct bio * bio)
782 {
783 	mddev_t *mddev = q->queuedata;
784 	conf_t *conf = mddev_to_conf(mddev);
785 	mirror_info_t *mirror;
786 	r10bio_t *r10_bio;
787 	struct bio *read_bio;
788 	int i;
789 	int chunk_sects = conf->chunk_mask + 1;
790 	const int rw = bio_data_dir(bio);
791 	const int do_sync = bio_sync(bio);
792 	struct bio_list bl;
793 	unsigned long flags;
794 	mdk_rdev_t *blocked_rdev;
795 
796 	if (unlikely(bio_barrier(bio))) {
797 		bio_endio(bio, -EOPNOTSUPP);
798 		return 0;
799 	}
800 
801 	/* If this request crosses a chunk boundary, we need to
802 	 * split it.  This will only happen for 1 PAGE (or less) requests.
803 	 */
804 	if (unlikely( (bio->bi_sector & conf->chunk_mask) + (bio->bi_size >> 9)
805 		      > chunk_sects &&
806 		    conf->near_copies < conf->raid_disks)) {
807 		struct bio_pair *bp;
808 		/* Sanity check -- queue functions should prevent this happening */
809 		if (bio->bi_vcnt != 1 ||
810 		    bio->bi_idx != 0)
811 			goto bad_map;
812 		/* This is a one page bio that upper layers
813 		 * refuse to split for us, so we need to split it.
814 		 */
815 		bp = bio_split(bio, bio_split_pool,
816 			       chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
817 		if (make_request(q, &bp->bio1))
818 			generic_make_request(&bp->bio1);
819 		if (make_request(q, &bp->bio2))
820 			generic_make_request(&bp->bio2);
821 
822 		bio_pair_release(bp);
823 		return 0;
824 	bad_map:
825 		printk("raid10_make_request bug: can't convert block across chunks"
826 		       " or bigger than %dk %llu %d\n", chunk_sects/2,
827 		       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
828 
829 		bio_io_error(bio);
830 		return 0;
831 	}
832 
833 	md_write_start(mddev, bio);
834 
835 	/*
836 	 * Register the new request and wait if the reconstruction
837 	 * thread has put up a bar for new requests.
838 	 * Continue immediately if no resync is active currently.
839 	 */
840 	wait_barrier(conf);
841 
842 	disk_stat_inc(mddev->gendisk, ios[rw]);
843 	disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
844 
845 	r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
846 
847 	r10_bio->master_bio = bio;
848 	r10_bio->sectors = bio->bi_size >> 9;
849 
850 	r10_bio->mddev = mddev;
851 	r10_bio->sector = bio->bi_sector;
852 	r10_bio->state = 0;
853 
854 	if (rw == READ) {
855 		/*
856 		 * read balancing logic:
857 		 */
858 		int disk = read_balance(conf, r10_bio);
859 		int slot = r10_bio->read_slot;
860 		if (disk < 0) {
861 			raid_end_bio_io(r10_bio);
862 			return 0;
863 		}
864 		mirror = conf->mirrors + disk;
865 
866 		read_bio = bio_clone(bio, GFP_NOIO);
867 
868 		r10_bio->devs[slot].bio = read_bio;
869 
870 		read_bio->bi_sector = r10_bio->devs[slot].addr +
871 			mirror->rdev->data_offset;
872 		read_bio->bi_bdev = mirror->rdev->bdev;
873 		read_bio->bi_end_io = raid10_end_read_request;
874 		read_bio->bi_rw = READ | do_sync;
875 		read_bio->bi_private = r10_bio;
876 
877 		generic_make_request(read_bio);
878 		return 0;
879 	}
880 
881 	/*
882 	 * WRITE:
883 	 */
884 	/* first select target devices under rcu_lock and
885 	 * inc refcount on their rdev.  Record them by setting
886 	 * bios[x] to bio
887 	 */
888 	raid10_find_phys(conf, r10_bio);
889  retry_write:
890 	blocked_rdev = NULL;
891 	rcu_read_lock();
892 	for (i = 0;  i < conf->copies; i++) {
893 		int d = r10_bio->devs[i].devnum;
894 		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
895 		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
896 			atomic_inc(&rdev->nr_pending);
897 			blocked_rdev = rdev;
898 			break;
899 		}
900 		if (rdev && !test_bit(Faulty, &rdev->flags)) {
901 			atomic_inc(&rdev->nr_pending);
902 			r10_bio->devs[i].bio = bio;
903 		} else {
904 			r10_bio->devs[i].bio = NULL;
905 			set_bit(R10BIO_Degraded, &r10_bio->state);
906 		}
907 	}
908 	rcu_read_unlock();
909 
910 	if (unlikely(blocked_rdev)) {
911 		/* Have to wait for this device to get unblocked, then retry */
912 		int j;
913 		int d;
914 
915 		for (j = 0; j < i; j++)
916 			if (r10_bio->devs[j].bio) {
917 				d = r10_bio->devs[j].devnum;
918 				rdev_dec_pending(conf->mirrors[d].rdev, mddev);
919 			}
920 		allow_barrier(conf);
921 		md_wait_for_blocked_rdev(blocked_rdev, mddev);
922 		wait_barrier(conf);
923 		goto retry_write;
924 	}
925 
926 	atomic_set(&r10_bio->remaining, 0);
927 
928 	bio_list_init(&bl);
929 	for (i = 0; i < conf->copies; i++) {
930 		struct bio *mbio;
931 		int d = r10_bio->devs[i].devnum;
932 		if (!r10_bio->devs[i].bio)
933 			continue;
934 
935 		mbio = bio_clone(bio, GFP_NOIO);
936 		r10_bio->devs[i].bio = mbio;
937 
938 		mbio->bi_sector	= r10_bio->devs[i].addr+
939 			conf->mirrors[d].rdev->data_offset;
940 		mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
941 		mbio->bi_end_io	= raid10_end_write_request;
942 		mbio->bi_rw = WRITE | do_sync;
943 		mbio->bi_private = r10_bio;
944 
945 		atomic_inc(&r10_bio->remaining);
946 		bio_list_add(&bl, mbio);
947 	}
948 
949 	if (unlikely(!atomic_read(&r10_bio->remaining))) {
950 		/* the array is dead */
951 		md_write_end(mddev);
952 		raid_end_bio_io(r10_bio);
953 		return 0;
954 	}
955 
956 	bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
957 	spin_lock_irqsave(&conf->device_lock, flags);
958 	bio_list_merge(&conf->pending_bio_list, &bl);
959 	blk_plug_device(mddev->queue);
960 	spin_unlock_irqrestore(&conf->device_lock, flags);
961 
962 	/* In case raid10d snuck in to freeze_array */
963 	wake_up(&conf->wait_barrier);
964 
965 	if (do_sync)
966 		md_wakeup_thread(mddev->thread);
967 
968 	return 0;
969 }
970 
971 static void status(struct seq_file *seq, mddev_t *mddev)
972 {
973 	conf_t *conf = mddev_to_conf(mddev);
974 	int i;
975 
976 	if (conf->near_copies < conf->raid_disks)
977 		seq_printf(seq, " %dK chunks", mddev->chunk_size/1024);
978 	if (conf->near_copies > 1)
979 		seq_printf(seq, " %d near-copies", conf->near_copies);
980 	if (conf->far_copies > 1) {
981 		if (conf->far_offset)
982 			seq_printf(seq, " %d offset-copies", conf->far_copies);
983 		else
984 			seq_printf(seq, " %d far-copies", conf->far_copies);
985 	}
986 	seq_printf(seq, " [%d/%d] [", conf->raid_disks,
987 					conf->raid_disks - mddev->degraded);
988 	for (i = 0; i < conf->raid_disks; i++)
989 		seq_printf(seq, "%s",
990 			      conf->mirrors[i].rdev &&
991 			      test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
992 	seq_printf(seq, "]");
993 }
994 
995 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
996 {
997 	char b[BDEVNAME_SIZE];
998 	conf_t *conf = mddev_to_conf(mddev);
999 
1000 	/*
1001 	 * If it is not operational, then we have already marked it as dead
1002 	 * else if it is the last working disks, ignore the error, let the
1003 	 * next level up know.
1004 	 * else mark the drive as failed
1005 	 */
1006 	if (test_bit(In_sync, &rdev->flags)
1007 	    && conf->raid_disks-mddev->degraded == 1)
1008 		/*
1009 		 * Don't fail the drive, just return an IO error.
1010 		 * The test should really be more sophisticated than
1011 		 * "working_disks == 1", but it isn't critical, and
1012 		 * can wait until we do more sophisticated "is the drive
1013 		 * really dead" tests...
1014 		 */
1015 		return;
1016 	if (test_and_clear_bit(In_sync, &rdev->flags)) {
1017 		unsigned long flags;
1018 		spin_lock_irqsave(&conf->device_lock, flags);
1019 		mddev->degraded++;
1020 		spin_unlock_irqrestore(&conf->device_lock, flags);
1021 		/*
1022 		 * if recovery is running, make sure it aborts.
1023 		 */
1024 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1025 	}
1026 	set_bit(Faulty, &rdev->flags);
1027 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
1028 	printk(KERN_ALERT "raid10: Disk failure on %s, disabling device.\n"
1029 		"raid10: Operation continuing on %d devices.\n",
1030 		bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
1031 }
1032 
1033 static void print_conf(conf_t *conf)
1034 {
1035 	int i;
1036 	mirror_info_t *tmp;
1037 
1038 	printk("RAID10 conf printout:\n");
1039 	if (!conf) {
1040 		printk("(!conf)\n");
1041 		return;
1042 	}
1043 	printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1044 		conf->raid_disks);
1045 
1046 	for (i = 0; i < conf->raid_disks; i++) {
1047 		char b[BDEVNAME_SIZE];
1048 		tmp = conf->mirrors + i;
1049 		if (tmp->rdev)
1050 			printk(" disk %d, wo:%d, o:%d, dev:%s\n",
1051 				i, !test_bit(In_sync, &tmp->rdev->flags),
1052 			        !test_bit(Faulty, &tmp->rdev->flags),
1053 				bdevname(tmp->rdev->bdev,b));
1054 	}
1055 }
1056 
1057 static void close_sync(conf_t *conf)
1058 {
1059 	wait_barrier(conf);
1060 	allow_barrier(conf);
1061 
1062 	mempool_destroy(conf->r10buf_pool);
1063 	conf->r10buf_pool = NULL;
1064 }
1065 
1066 /* check if there are enough drives for
1067  * every block to appear on atleast one
1068  */
1069 static int enough(conf_t *conf)
1070 {
1071 	int first = 0;
1072 
1073 	do {
1074 		int n = conf->copies;
1075 		int cnt = 0;
1076 		while (n--) {
1077 			if (conf->mirrors[first].rdev)
1078 				cnt++;
1079 			first = (first+1) % conf->raid_disks;
1080 		}
1081 		if (cnt == 0)
1082 			return 0;
1083 	} while (first != 0);
1084 	return 1;
1085 }
1086 
1087 static int raid10_spare_active(mddev_t *mddev)
1088 {
1089 	int i;
1090 	conf_t *conf = mddev->private;
1091 	mirror_info_t *tmp;
1092 
1093 	/*
1094 	 * Find all non-in_sync disks within the RAID10 configuration
1095 	 * and mark them in_sync
1096 	 */
1097 	for (i = 0; i < conf->raid_disks; i++) {
1098 		tmp = conf->mirrors + i;
1099 		if (tmp->rdev
1100 		    && !test_bit(Faulty, &tmp->rdev->flags)
1101 		    && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1102 			unsigned long flags;
1103 			spin_lock_irqsave(&conf->device_lock, flags);
1104 			mddev->degraded--;
1105 			spin_unlock_irqrestore(&conf->device_lock, flags);
1106 		}
1107 	}
1108 
1109 	print_conf(conf);
1110 	return 0;
1111 }
1112 
1113 
1114 static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1115 {
1116 	conf_t *conf = mddev->private;
1117 	int err = -EEXIST;
1118 	int mirror;
1119 	mirror_info_t *p;
1120 	int first = 0;
1121 	int last = mddev->raid_disks - 1;
1122 
1123 	if (mddev->recovery_cp < MaxSector)
1124 		/* only hot-add to in-sync arrays, as recovery is
1125 		 * very different from resync
1126 		 */
1127 		return -EBUSY;
1128 	if (!enough(conf))
1129 		return -EINVAL;
1130 
1131 	if (rdev->raid_disk)
1132 		first = last = rdev->raid_disk;
1133 
1134 	if (rdev->saved_raid_disk >= 0 &&
1135 	    rdev->saved_raid_disk >= first &&
1136 	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1137 		mirror = rdev->saved_raid_disk;
1138 	else
1139 		mirror = first;
1140 	for ( ; mirror <= last ; mirror++)
1141 		if ( !(p=conf->mirrors+mirror)->rdev) {
1142 
1143 			blk_queue_stack_limits(mddev->queue,
1144 					       rdev->bdev->bd_disk->queue);
1145 			/* as we don't honour merge_bvec_fn, we must never risk
1146 			 * violating it, so limit ->max_sector to one PAGE, as
1147 			 * a one page request is never in violation.
1148 			 */
1149 			if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1150 			    mddev->queue->max_sectors > (PAGE_SIZE>>9))
1151 				mddev->queue->max_sectors = (PAGE_SIZE>>9);
1152 
1153 			p->head_position = 0;
1154 			rdev->raid_disk = mirror;
1155 			err = 0;
1156 			if (rdev->saved_raid_disk != mirror)
1157 				conf->fullsync = 1;
1158 			rcu_assign_pointer(p->rdev, rdev);
1159 			break;
1160 		}
1161 
1162 	print_conf(conf);
1163 	return err;
1164 }
1165 
1166 static int raid10_remove_disk(mddev_t *mddev, int number)
1167 {
1168 	conf_t *conf = mddev->private;
1169 	int err = 0;
1170 	mdk_rdev_t *rdev;
1171 	mirror_info_t *p = conf->mirrors+ number;
1172 
1173 	print_conf(conf);
1174 	rdev = p->rdev;
1175 	if (rdev) {
1176 		if (test_bit(In_sync, &rdev->flags) ||
1177 		    atomic_read(&rdev->nr_pending)) {
1178 			err = -EBUSY;
1179 			goto abort;
1180 		}
1181 		/* Only remove faulty devices in recovery
1182 		 * is not possible.
1183 		 */
1184 		if (!test_bit(Faulty, &rdev->flags) &&
1185 		    enough(conf)) {
1186 			err = -EBUSY;
1187 			goto abort;
1188 		}
1189 		p->rdev = NULL;
1190 		synchronize_rcu();
1191 		if (atomic_read(&rdev->nr_pending)) {
1192 			/* lost the race, try later */
1193 			err = -EBUSY;
1194 			p->rdev = rdev;
1195 		}
1196 	}
1197 abort:
1198 
1199 	print_conf(conf);
1200 	return err;
1201 }
1202 
1203 
1204 static void end_sync_read(struct bio *bio, int error)
1205 {
1206 	r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
1207 	conf_t *conf = mddev_to_conf(r10_bio->mddev);
1208 	int i,d;
1209 
1210 	for (i=0; i<conf->copies; i++)
1211 		if (r10_bio->devs[i].bio == bio)
1212 			break;
1213 	BUG_ON(i == conf->copies);
1214 	update_head_pos(i, r10_bio);
1215 	d = r10_bio->devs[i].devnum;
1216 
1217 	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1218 		set_bit(R10BIO_Uptodate, &r10_bio->state);
1219 	else {
1220 		atomic_add(r10_bio->sectors,
1221 			   &conf->mirrors[d].rdev->corrected_errors);
1222 		if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
1223 			md_error(r10_bio->mddev,
1224 				 conf->mirrors[d].rdev);
1225 	}
1226 
1227 	/* for reconstruct, we always reschedule after a read.
1228 	 * for resync, only after all reads
1229 	 */
1230 	if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1231 	    atomic_dec_and_test(&r10_bio->remaining)) {
1232 		/* we have read all the blocks,
1233 		 * do the comparison in process context in raid10d
1234 		 */
1235 		reschedule_retry(r10_bio);
1236 	}
1237 	rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1238 }
1239 
1240 static void end_sync_write(struct bio *bio, int error)
1241 {
1242 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1243 	r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
1244 	mddev_t *mddev = r10_bio->mddev;
1245 	conf_t *conf = mddev_to_conf(mddev);
1246 	int i,d;
1247 
1248 	for (i = 0; i < conf->copies; i++)
1249 		if (r10_bio->devs[i].bio == bio)
1250 			break;
1251 	d = r10_bio->devs[i].devnum;
1252 
1253 	if (!uptodate)
1254 		md_error(mddev, conf->mirrors[d].rdev);
1255 
1256 	update_head_pos(i, r10_bio);
1257 
1258 	while (atomic_dec_and_test(&r10_bio->remaining)) {
1259 		if (r10_bio->master_bio == NULL) {
1260 			/* the primary of several recovery bios */
1261 			md_done_sync(mddev, r10_bio->sectors, 1);
1262 			put_buf(r10_bio);
1263 			break;
1264 		} else {
1265 			r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
1266 			put_buf(r10_bio);
1267 			r10_bio = r10_bio2;
1268 		}
1269 	}
1270 	rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1271 }
1272 
1273 /*
1274  * Note: sync and recover and handled very differently for raid10
1275  * This code is for resync.
1276  * For resync, we read through virtual addresses and read all blocks.
1277  * If there is any error, we schedule a write.  The lowest numbered
1278  * drive is authoritative.
1279  * However requests come for physical address, so we need to map.
1280  * For every physical address there are raid_disks/copies virtual addresses,
1281  * which is always are least one, but is not necessarly an integer.
1282  * This means that a physical address can span multiple chunks, so we may
1283  * have to submit multiple io requests for a single sync request.
1284  */
1285 /*
1286  * We check if all blocks are in-sync and only write to blocks that
1287  * aren't in sync
1288  */
1289 static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1290 {
1291 	conf_t *conf = mddev_to_conf(mddev);
1292 	int i, first;
1293 	struct bio *tbio, *fbio;
1294 
1295 	atomic_set(&r10_bio->remaining, 1);
1296 
1297 	/* find the first device with a block */
1298 	for (i=0; i<conf->copies; i++)
1299 		if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
1300 			break;
1301 
1302 	if (i == conf->copies)
1303 		goto done;
1304 
1305 	first = i;
1306 	fbio = r10_bio->devs[i].bio;
1307 
1308 	/* now find blocks with errors */
1309 	for (i=0 ; i < conf->copies ; i++) {
1310 		int  j, d;
1311 		int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
1312 
1313 		tbio = r10_bio->devs[i].bio;
1314 
1315 		if (tbio->bi_end_io != end_sync_read)
1316 			continue;
1317 		if (i == first)
1318 			continue;
1319 		if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
1320 			/* We know that the bi_io_vec layout is the same for
1321 			 * both 'first' and 'i', so we just compare them.
1322 			 * All vec entries are PAGE_SIZE;
1323 			 */
1324 			for (j = 0; j < vcnt; j++)
1325 				if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
1326 					   page_address(tbio->bi_io_vec[j].bv_page),
1327 					   PAGE_SIZE))
1328 					break;
1329 			if (j == vcnt)
1330 				continue;
1331 			mddev->resync_mismatches += r10_bio->sectors;
1332 		}
1333 		if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1334 			/* Don't fix anything. */
1335 			continue;
1336 		/* Ok, we need to write this bio
1337 		 * First we need to fixup bv_offset, bv_len and
1338 		 * bi_vecs, as the read request might have corrupted these
1339 		 */
1340 		tbio->bi_vcnt = vcnt;
1341 		tbio->bi_size = r10_bio->sectors << 9;
1342 		tbio->bi_idx = 0;
1343 		tbio->bi_phys_segments = 0;
1344 		tbio->bi_hw_segments = 0;
1345 		tbio->bi_hw_front_size = 0;
1346 		tbio->bi_hw_back_size = 0;
1347 		tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1348 		tbio->bi_flags |= 1 << BIO_UPTODATE;
1349 		tbio->bi_next = NULL;
1350 		tbio->bi_rw = WRITE;
1351 		tbio->bi_private = r10_bio;
1352 		tbio->bi_sector = r10_bio->devs[i].addr;
1353 
1354 		for (j=0; j < vcnt ; j++) {
1355 			tbio->bi_io_vec[j].bv_offset = 0;
1356 			tbio->bi_io_vec[j].bv_len = PAGE_SIZE;
1357 
1358 			memcpy(page_address(tbio->bi_io_vec[j].bv_page),
1359 			       page_address(fbio->bi_io_vec[j].bv_page),
1360 			       PAGE_SIZE);
1361 		}
1362 		tbio->bi_end_io = end_sync_write;
1363 
1364 		d = r10_bio->devs[i].devnum;
1365 		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1366 		atomic_inc(&r10_bio->remaining);
1367 		md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);
1368 
1369 		tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
1370 		tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
1371 		generic_make_request(tbio);
1372 	}
1373 
1374 done:
1375 	if (atomic_dec_and_test(&r10_bio->remaining)) {
1376 		md_done_sync(mddev, r10_bio->sectors, 1);
1377 		put_buf(r10_bio);
1378 	}
1379 }
1380 
1381 /*
1382  * Now for the recovery code.
1383  * Recovery happens across physical sectors.
1384  * We recover all non-is_sync drives by finding the virtual address of
1385  * each, and then choose a working drive that also has that virt address.
1386  * There is a separate r10_bio for each non-in_sync drive.
1387  * Only the first two slots are in use. The first for reading,
1388  * The second for writing.
1389  *
1390  */
1391 
1392 static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1393 {
1394 	conf_t *conf = mddev_to_conf(mddev);
1395 	int i, d;
1396 	struct bio *bio, *wbio;
1397 
1398 
1399 	/* move the pages across to the second bio
1400 	 * and submit the write request
1401 	 */
1402 	bio = r10_bio->devs[0].bio;
1403 	wbio = r10_bio->devs[1].bio;
1404 	for (i=0; i < wbio->bi_vcnt; i++) {
1405 		struct page *p = bio->bi_io_vec[i].bv_page;
1406 		bio->bi_io_vec[i].bv_page = wbio->bi_io_vec[i].bv_page;
1407 		wbio->bi_io_vec[i].bv_page = p;
1408 	}
1409 	d = r10_bio->devs[1].devnum;
1410 
1411 	atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1412 	md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
1413 	if (test_bit(R10BIO_Uptodate, &r10_bio->state))
1414 		generic_make_request(wbio);
1415 	else
1416 		bio_endio(wbio, -EIO);
1417 }
1418 
1419 
1420 /*
1421  * This is a kernel thread which:
1422  *
1423  *	1.	Retries failed read operations on working mirrors.
1424  *	2.	Updates the raid superblock when problems encounter.
1425  *	3.	Performs writes following reads for array synchronising.
1426  */
1427 
1428 static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
1429 {
1430 	int sect = 0; /* Offset from r10_bio->sector */
1431 	int sectors = r10_bio->sectors;
1432 	mdk_rdev_t*rdev;
1433 	while(sectors) {
1434 		int s = sectors;
1435 		int sl = r10_bio->read_slot;
1436 		int success = 0;
1437 		int start;
1438 
1439 		if (s > (PAGE_SIZE>>9))
1440 			s = PAGE_SIZE >> 9;
1441 
1442 		rcu_read_lock();
1443 		do {
1444 			int d = r10_bio->devs[sl].devnum;
1445 			rdev = rcu_dereference(conf->mirrors[d].rdev);
1446 			if (rdev &&
1447 			    test_bit(In_sync, &rdev->flags)) {
1448 				atomic_inc(&rdev->nr_pending);
1449 				rcu_read_unlock();
1450 				success = sync_page_io(rdev->bdev,
1451 						       r10_bio->devs[sl].addr +
1452 						       sect + rdev->data_offset,
1453 						       s<<9,
1454 						       conf->tmppage, READ);
1455 				rdev_dec_pending(rdev, mddev);
1456 				rcu_read_lock();
1457 				if (success)
1458 					break;
1459 			}
1460 			sl++;
1461 			if (sl == conf->copies)
1462 				sl = 0;
1463 		} while (!success && sl != r10_bio->read_slot);
1464 		rcu_read_unlock();
1465 
1466 		if (!success) {
1467 			/* Cannot read from anywhere -- bye bye array */
1468 			int dn = r10_bio->devs[r10_bio->read_slot].devnum;
1469 			md_error(mddev, conf->mirrors[dn].rdev);
1470 			break;
1471 		}
1472 
1473 		start = sl;
1474 		/* write it back and re-read */
1475 		rcu_read_lock();
1476 		while (sl != r10_bio->read_slot) {
1477 			int d;
1478 			if (sl==0)
1479 				sl = conf->copies;
1480 			sl--;
1481 			d = r10_bio->devs[sl].devnum;
1482 			rdev = rcu_dereference(conf->mirrors[d].rdev);
1483 			if (rdev &&
1484 			    test_bit(In_sync, &rdev->flags)) {
1485 				atomic_inc(&rdev->nr_pending);
1486 				rcu_read_unlock();
1487 				atomic_add(s, &rdev->corrected_errors);
1488 				if (sync_page_io(rdev->bdev,
1489 						 r10_bio->devs[sl].addr +
1490 						 sect + rdev->data_offset,
1491 						 s<<9, conf->tmppage, WRITE)
1492 				    == 0)
1493 					/* Well, this device is dead */
1494 					md_error(mddev, rdev);
1495 				rdev_dec_pending(rdev, mddev);
1496 				rcu_read_lock();
1497 			}
1498 		}
1499 		sl = start;
1500 		while (sl != r10_bio->read_slot) {
1501 			int d;
1502 			if (sl==0)
1503 				sl = conf->copies;
1504 			sl--;
1505 			d = r10_bio->devs[sl].devnum;
1506 			rdev = rcu_dereference(conf->mirrors[d].rdev);
1507 			if (rdev &&
1508 			    test_bit(In_sync, &rdev->flags)) {
1509 				char b[BDEVNAME_SIZE];
1510 				atomic_inc(&rdev->nr_pending);
1511 				rcu_read_unlock();
1512 				if (sync_page_io(rdev->bdev,
1513 						 r10_bio->devs[sl].addr +
1514 						 sect + rdev->data_offset,
1515 						 s<<9, conf->tmppage, READ) == 0)
1516 					/* Well, this device is dead */
1517 					md_error(mddev, rdev);
1518 				else
1519 					printk(KERN_INFO
1520 					       "raid10:%s: read error corrected"
1521 					       " (%d sectors at %llu on %s)\n",
1522 					       mdname(mddev), s,
1523 					       (unsigned long long)(sect+
1524 					            rdev->data_offset),
1525 					       bdevname(rdev->bdev, b));
1526 
1527 				rdev_dec_pending(rdev, mddev);
1528 				rcu_read_lock();
1529 			}
1530 		}
1531 		rcu_read_unlock();
1532 
1533 		sectors -= s;
1534 		sect += s;
1535 	}
1536 }
1537 
1538 static void raid10d(mddev_t *mddev)
1539 {
1540 	r10bio_t *r10_bio;
1541 	struct bio *bio;
1542 	unsigned long flags;
1543 	conf_t *conf = mddev_to_conf(mddev);
1544 	struct list_head *head = &conf->retry_list;
1545 	int unplug=0;
1546 	mdk_rdev_t *rdev;
1547 
1548 	md_check_recovery(mddev);
1549 
1550 	for (;;) {
1551 		char b[BDEVNAME_SIZE];
1552 
1553 		unplug += flush_pending_writes(conf);
1554 
1555 		spin_lock_irqsave(&conf->device_lock, flags);
1556 		if (list_empty(head)) {
1557 			spin_unlock_irqrestore(&conf->device_lock, flags);
1558 			break;
1559 		}
1560 		r10_bio = list_entry(head->prev, r10bio_t, retry_list);
1561 		list_del(head->prev);
1562 		conf->nr_queued--;
1563 		spin_unlock_irqrestore(&conf->device_lock, flags);
1564 
1565 		mddev = r10_bio->mddev;
1566 		conf = mddev_to_conf(mddev);
1567 		if (test_bit(R10BIO_IsSync, &r10_bio->state)) {
1568 			sync_request_write(mddev, r10_bio);
1569 			unplug = 1;
1570 		} else 	if (test_bit(R10BIO_IsRecover, &r10_bio->state)) {
1571 			recovery_request_write(mddev, r10_bio);
1572 			unplug = 1;
1573 		} else {
1574 			int mirror;
1575 			/* we got a read error. Maybe the drive is bad.  Maybe just
1576 			 * the block and we can fix it.
1577 			 * We freeze all other IO, and try reading the block from
1578 			 * other devices.  When we find one, we re-write
1579 			 * and check it that fixes the read error.
1580 			 * This is all done synchronously while the array is
1581 			 * frozen.
1582 			 */
1583 			if (mddev->ro == 0) {
1584 				freeze_array(conf);
1585 				fix_read_error(conf, mddev, r10_bio);
1586 				unfreeze_array(conf);
1587 			}
1588 
1589 			bio = r10_bio->devs[r10_bio->read_slot].bio;
1590 			r10_bio->devs[r10_bio->read_slot].bio =
1591 				mddev->ro ? IO_BLOCKED : NULL;
1592 			mirror = read_balance(conf, r10_bio);
1593 			if (mirror == -1) {
1594 				printk(KERN_ALERT "raid10: %s: unrecoverable I/O"
1595 				       " read error for block %llu\n",
1596 				       bdevname(bio->bi_bdev,b),
1597 				       (unsigned long long)r10_bio->sector);
1598 				raid_end_bio_io(r10_bio);
1599 				bio_put(bio);
1600 			} else {
1601 				const int do_sync = bio_sync(r10_bio->master_bio);
1602 				bio_put(bio);
1603 				rdev = conf->mirrors[mirror].rdev;
1604 				if (printk_ratelimit())
1605 					printk(KERN_ERR "raid10: %s: redirecting sector %llu to"
1606 					       " another mirror\n",
1607 					       bdevname(rdev->bdev,b),
1608 					       (unsigned long long)r10_bio->sector);
1609 				bio = bio_clone(r10_bio->master_bio, GFP_NOIO);
1610 				r10_bio->devs[r10_bio->read_slot].bio = bio;
1611 				bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
1612 					+ rdev->data_offset;
1613 				bio->bi_bdev = rdev->bdev;
1614 				bio->bi_rw = READ | do_sync;
1615 				bio->bi_private = r10_bio;
1616 				bio->bi_end_io = raid10_end_read_request;
1617 				unplug = 1;
1618 				generic_make_request(bio);
1619 			}
1620 		}
1621 	}
1622 	if (unplug)
1623 		unplug_slaves(mddev);
1624 }
1625 
1626 
1627 static int init_resync(conf_t *conf)
1628 {
1629 	int buffs;
1630 
1631 	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
1632 	BUG_ON(conf->r10buf_pool);
1633 	conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
1634 	if (!conf->r10buf_pool)
1635 		return -ENOMEM;
1636 	conf->next_resync = 0;
1637 	return 0;
1638 }
1639 
1640 /*
1641  * perform a "sync" on one "block"
1642  *
1643  * We need to make sure that no normal I/O request - particularly write
1644  * requests - conflict with active sync requests.
1645  *
1646  * This is achieved by tracking pending requests and a 'barrier' concept
1647  * that can be installed to exclude normal IO requests.
1648  *
1649  * Resync and recovery are handled very differently.
1650  * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
1651  *
1652  * For resync, we iterate over virtual addresses, read all copies,
1653  * and update if there are differences.  If only one copy is live,
1654  * skip it.
1655  * For recovery, we iterate over physical addresses, read a good
1656  * value for each non-in_sync drive, and over-write.
1657  *
1658  * So, for recovery we may have several outstanding complex requests for a
1659  * given address, one for each out-of-sync device.  We model this by allocating
1660  * a number of r10_bio structures, one for each out-of-sync device.
1661  * As we setup these structures, we collect all bio's together into a list
1662  * which we then process collectively to add pages, and then process again
1663  * to pass to generic_make_request.
1664  *
1665  * The r10_bio structures are linked using a borrowed master_bio pointer.
1666  * This link is counted in ->remaining.  When the r10_bio that points to NULL
1667  * has its remaining count decremented to 0, the whole complex operation
1668  * is complete.
1669  *
1670  */
1671 
1672 static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1673 {
1674 	conf_t *conf = mddev_to_conf(mddev);
1675 	r10bio_t *r10_bio;
1676 	struct bio *biolist = NULL, *bio;
1677 	sector_t max_sector, nr_sectors;
1678 	int disk;
1679 	int i;
1680 	int max_sync;
1681 	int sync_blocks;
1682 
1683 	sector_t sectors_skipped = 0;
1684 	int chunks_skipped = 0;
1685 
1686 	if (!conf->r10buf_pool)
1687 		if (init_resync(conf))
1688 			return 0;
1689 
1690  skipped:
1691 	max_sector = mddev->size << 1;
1692 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
1693 		max_sector = mddev->resync_max_sectors;
1694 	if (sector_nr >= max_sector) {
1695 		/* If we aborted, we need to abort the
1696 		 * sync on the 'current' bitmap chucks (there can
1697 		 * be several when recovering multiple devices).
1698 		 * as we may have started syncing it but not finished.
1699 		 * We can find the current address in
1700 		 * mddev->curr_resync, but for recovery,
1701 		 * we need to convert that to several
1702 		 * virtual addresses.
1703 		 */
1704 		if (mddev->curr_resync < max_sector) { /* aborted */
1705 			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
1706 				bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1707 						&sync_blocks, 1);
1708 			else for (i=0; i<conf->raid_disks; i++) {
1709 				sector_t sect =
1710 					raid10_find_virt(conf, mddev->curr_resync, i);
1711 				bitmap_end_sync(mddev->bitmap, sect,
1712 						&sync_blocks, 1);
1713 			}
1714 		} else /* completed sync */
1715 			conf->fullsync = 0;
1716 
1717 		bitmap_close_sync(mddev->bitmap);
1718 		close_sync(conf);
1719 		*skipped = 1;
1720 		return sectors_skipped;
1721 	}
1722 	if (chunks_skipped >= conf->raid_disks) {
1723 		/* if there has been nothing to do on any drive,
1724 		 * then there is nothing to do at all..
1725 		 */
1726 		*skipped = 1;
1727 		return (max_sector - sector_nr) + sectors_skipped;
1728 	}
1729 
1730 	if (max_sector > mddev->resync_max)
1731 		max_sector = mddev->resync_max; /* Don't do IO beyond here */
1732 
1733 	/* make sure whole request will fit in a chunk - if chunks
1734 	 * are meaningful
1735 	 */
1736 	if (conf->near_copies < conf->raid_disks &&
1737 	    max_sector > (sector_nr | conf->chunk_mask))
1738 		max_sector = (sector_nr | conf->chunk_mask) + 1;
1739 	/*
1740 	 * If there is non-resync activity waiting for us then
1741 	 * put in a delay to throttle resync.
1742 	 */
1743 	if (!go_faster && conf->nr_waiting)
1744 		msleep_interruptible(1000);
1745 
1746 	bitmap_cond_end_sync(mddev->bitmap, sector_nr);
1747 
1748 	/* Again, very different code for resync and recovery.
1749 	 * Both must result in an r10bio with a list of bios that
1750 	 * have bi_end_io, bi_sector, bi_bdev set,
1751 	 * and bi_private set to the r10bio.
1752 	 * For recovery, we may actually create several r10bios
1753 	 * with 2 bios in each, that correspond to the bios in the main one.
1754 	 * In this case, the subordinate r10bios link back through a
1755 	 * borrowed master_bio pointer, and the counter in the master
1756 	 * includes a ref from each subordinate.
1757 	 */
1758 	/* First, we decide what to do and set ->bi_end_io
1759 	 * To end_sync_read if we want to read, and
1760 	 * end_sync_write if we will want to write.
1761 	 */
1762 
1763 	max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
1764 	if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1765 		/* recovery... the complicated one */
1766 		int i, j, k;
1767 		r10_bio = NULL;
1768 
1769 		for (i=0 ; i<conf->raid_disks; i++)
1770 			if (conf->mirrors[i].rdev &&
1771 			    !test_bit(In_sync, &conf->mirrors[i].rdev->flags)) {
1772 				int still_degraded = 0;
1773 				/* want to reconstruct this device */
1774 				r10bio_t *rb2 = r10_bio;
1775 				sector_t sect = raid10_find_virt(conf, sector_nr, i);
1776 				int must_sync;
1777 				/* Unless we are doing a full sync, we only need
1778 				 * to recover the block if it is set in the bitmap
1779 				 */
1780 				must_sync = bitmap_start_sync(mddev->bitmap, sect,
1781 							      &sync_blocks, 1);
1782 				if (sync_blocks < max_sync)
1783 					max_sync = sync_blocks;
1784 				if (!must_sync &&
1785 				    !conf->fullsync) {
1786 					/* yep, skip the sync_blocks here, but don't assume
1787 					 * that there will never be anything to do here
1788 					 */
1789 					chunks_skipped = -1;
1790 					continue;
1791 				}
1792 
1793 				r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
1794 				raise_barrier(conf, rb2 != NULL);
1795 				atomic_set(&r10_bio->remaining, 0);
1796 
1797 				r10_bio->master_bio = (struct bio*)rb2;
1798 				if (rb2)
1799 					atomic_inc(&rb2->remaining);
1800 				r10_bio->mddev = mddev;
1801 				set_bit(R10BIO_IsRecover, &r10_bio->state);
1802 				r10_bio->sector = sect;
1803 
1804 				raid10_find_phys(conf, r10_bio);
1805 				/* Need to check if this section will still be
1806 				 * degraded
1807 				 */
1808 				for (j=0; j<conf->copies;j++) {
1809 					int d = r10_bio->devs[j].devnum;
1810 					if (conf->mirrors[d].rdev == NULL ||
1811 					    test_bit(Faulty, &conf->mirrors[d].rdev->flags)) {
1812 						still_degraded = 1;
1813 						break;
1814 					}
1815 				}
1816 				must_sync = bitmap_start_sync(mddev->bitmap, sect,
1817 							      &sync_blocks, still_degraded);
1818 
1819 				for (j=0; j<conf->copies;j++) {
1820 					int d = r10_bio->devs[j].devnum;
1821 					if (conf->mirrors[d].rdev &&
1822 					    test_bit(In_sync, &conf->mirrors[d].rdev->flags)) {
1823 						/* This is where we read from */
1824 						bio = r10_bio->devs[0].bio;
1825 						bio->bi_next = biolist;
1826 						biolist = bio;
1827 						bio->bi_private = r10_bio;
1828 						bio->bi_end_io = end_sync_read;
1829 						bio->bi_rw = READ;
1830 						bio->bi_sector = r10_bio->devs[j].addr +
1831 							conf->mirrors[d].rdev->data_offset;
1832 						bio->bi_bdev = conf->mirrors[d].rdev->bdev;
1833 						atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1834 						atomic_inc(&r10_bio->remaining);
1835 						/* and we write to 'i' */
1836 
1837 						for (k=0; k<conf->copies; k++)
1838 							if (r10_bio->devs[k].devnum == i)
1839 								break;
1840 						BUG_ON(k == conf->copies);
1841 						bio = r10_bio->devs[1].bio;
1842 						bio->bi_next = biolist;
1843 						biolist = bio;
1844 						bio->bi_private = r10_bio;
1845 						bio->bi_end_io = end_sync_write;
1846 						bio->bi_rw = WRITE;
1847 						bio->bi_sector = r10_bio->devs[k].addr +
1848 							conf->mirrors[i].rdev->data_offset;
1849 						bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1850 
1851 						r10_bio->devs[0].devnum = d;
1852 						r10_bio->devs[1].devnum = i;
1853 
1854 						break;
1855 					}
1856 				}
1857 				if (j == conf->copies) {
1858 					/* Cannot recover, so abort the recovery */
1859 					put_buf(r10_bio);
1860 					if (rb2)
1861 						atomic_dec(&rb2->remaining);
1862 					r10_bio = rb2;
1863 					if (!test_and_set_bit(MD_RECOVERY_INTR,
1864 							      &mddev->recovery))
1865 						printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n",
1866 						       mdname(mddev));
1867 					break;
1868 				}
1869 			}
1870 		if (biolist == NULL) {
1871 			while (r10_bio) {
1872 				r10bio_t *rb2 = r10_bio;
1873 				r10_bio = (r10bio_t*) rb2->master_bio;
1874 				rb2->master_bio = NULL;
1875 				put_buf(rb2);
1876 			}
1877 			goto giveup;
1878 		}
1879 	} else {
1880 		/* resync. Schedule a read for every block at this virt offset */
1881 		int count = 0;
1882 
1883 		if (!bitmap_start_sync(mddev->bitmap, sector_nr,
1884 				       &sync_blocks, mddev->degraded) &&
1885 		    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1886 			/* We can skip this block */
1887 			*skipped = 1;
1888 			return sync_blocks + sectors_skipped;
1889 		}
1890 		if (sync_blocks < max_sync)
1891 			max_sync = sync_blocks;
1892 		r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
1893 
1894 		r10_bio->mddev = mddev;
1895 		atomic_set(&r10_bio->remaining, 0);
1896 		raise_barrier(conf, 0);
1897 		conf->next_resync = sector_nr;
1898 
1899 		r10_bio->master_bio = NULL;
1900 		r10_bio->sector = sector_nr;
1901 		set_bit(R10BIO_IsSync, &r10_bio->state);
1902 		raid10_find_phys(conf, r10_bio);
1903 		r10_bio->sectors = (sector_nr | conf->chunk_mask) - sector_nr +1;
1904 
1905 		for (i=0; i<conf->copies; i++) {
1906 			int d = r10_bio->devs[i].devnum;
1907 			bio = r10_bio->devs[i].bio;
1908 			bio->bi_end_io = NULL;
1909 			clear_bit(BIO_UPTODATE, &bio->bi_flags);
1910 			if (conf->mirrors[d].rdev == NULL ||
1911 			    test_bit(Faulty, &conf->mirrors[d].rdev->flags))
1912 				continue;
1913 			atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1914 			atomic_inc(&r10_bio->remaining);
1915 			bio->bi_next = biolist;
1916 			biolist = bio;
1917 			bio->bi_private = r10_bio;
1918 			bio->bi_end_io = end_sync_read;
1919 			bio->bi_rw = READ;
1920 			bio->bi_sector = r10_bio->devs[i].addr +
1921 				conf->mirrors[d].rdev->data_offset;
1922 			bio->bi_bdev = conf->mirrors[d].rdev->bdev;
1923 			count++;
1924 		}
1925 
1926 		if (count < 2) {
1927 			for (i=0; i<conf->copies; i++) {
1928 				int d = r10_bio->devs[i].devnum;
1929 				if (r10_bio->devs[i].bio->bi_end_io)
1930 					rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1931 			}
1932 			put_buf(r10_bio);
1933 			biolist = NULL;
1934 			goto giveup;
1935 		}
1936 	}
1937 
1938 	for (bio = biolist; bio ; bio=bio->bi_next) {
1939 
1940 		bio->bi_flags &= ~(BIO_POOL_MASK - 1);
1941 		if (bio->bi_end_io)
1942 			bio->bi_flags |= 1 << BIO_UPTODATE;
1943 		bio->bi_vcnt = 0;
1944 		bio->bi_idx = 0;
1945 		bio->bi_phys_segments = 0;
1946 		bio->bi_hw_segments = 0;
1947 		bio->bi_size = 0;
1948 	}
1949 
1950 	nr_sectors = 0;
1951 	if (sector_nr + max_sync < max_sector)
1952 		max_sector = sector_nr + max_sync;
1953 	do {
1954 		struct page *page;
1955 		int len = PAGE_SIZE;
1956 		disk = 0;
1957 		if (sector_nr + (len>>9) > max_sector)
1958 			len = (max_sector - sector_nr) << 9;
1959 		if (len == 0)
1960 			break;
1961 		for (bio= biolist ; bio ; bio=bio->bi_next) {
1962 			page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
1963 			if (bio_add_page(bio, page, len, 0) == 0) {
1964 				/* stop here */
1965 				struct bio *bio2;
1966 				bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
1967 				for (bio2 = biolist; bio2 && bio2 != bio; bio2 = bio2->bi_next) {
1968 					/* remove last page from this bio */
1969 					bio2->bi_vcnt--;
1970 					bio2->bi_size -= len;
1971 					bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
1972 				}
1973 				goto bio_full;
1974 			}
1975 			disk = i;
1976 		}
1977 		nr_sectors += len>>9;
1978 		sector_nr += len>>9;
1979 	} while (biolist->bi_vcnt < RESYNC_PAGES);
1980  bio_full:
1981 	r10_bio->sectors = nr_sectors;
1982 
1983 	while (biolist) {
1984 		bio = biolist;
1985 		biolist = biolist->bi_next;
1986 
1987 		bio->bi_next = NULL;
1988 		r10_bio = bio->bi_private;
1989 		r10_bio->sectors = nr_sectors;
1990 
1991 		if (bio->bi_end_io == end_sync_read) {
1992 			md_sync_acct(bio->bi_bdev, nr_sectors);
1993 			generic_make_request(bio);
1994 		}
1995 	}
1996 
1997 	if (sectors_skipped)
1998 		/* pretend they weren't skipped, it makes
1999 		 * no important difference in this case
2000 		 */
2001 		md_done_sync(mddev, sectors_skipped, 1);
2002 
2003 	return sectors_skipped + nr_sectors;
2004  giveup:
2005 	/* There is nowhere to write, so all non-sync
2006 	 * drives must be failed, so try the next chunk...
2007 	 */
2008 	{
2009 	sector_t sec = max_sector - sector_nr;
2010 	sectors_skipped += sec;
2011 	chunks_skipped ++;
2012 	sector_nr = max_sector;
2013 	goto skipped;
2014 	}
2015 }
2016 
2017 static int run(mddev_t *mddev)
2018 {
2019 	conf_t *conf;
2020 	int i, disk_idx;
2021 	mirror_info_t *disk;
2022 	mdk_rdev_t *rdev;
2023 	struct list_head *tmp;
2024 	int nc, fc, fo;
2025 	sector_t stride, size;
2026 
2027 	if (mddev->chunk_size == 0) {
2028 		printk(KERN_ERR "md/raid10: non-zero chunk size required.\n");
2029 		return -EINVAL;
2030 	}
2031 
2032 	nc = mddev->layout & 255;
2033 	fc = (mddev->layout >> 8) & 255;
2034 	fo = mddev->layout & (1<<16);
2035 	if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
2036 	    (mddev->layout >> 17)) {
2037 		printk(KERN_ERR "raid10: %s: unsupported raid10 layout: 0x%8x\n",
2038 		       mdname(mddev), mddev->layout);
2039 		goto out;
2040 	}
2041 	/*
2042 	 * copy the already verified devices into our private RAID10
2043 	 * bookkeeping area. [whatever we allocate in run(),
2044 	 * should be freed in stop()]
2045 	 */
2046 	conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
2047 	mddev->private = conf;
2048 	if (!conf) {
2049 		printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
2050 			mdname(mddev));
2051 		goto out;
2052 	}
2053 	conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
2054 				 GFP_KERNEL);
2055 	if (!conf->mirrors) {
2056 		printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
2057 		       mdname(mddev));
2058 		goto out_free_conf;
2059 	}
2060 
2061 	conf->tmppage = alloc_page(GFP_KERNEL);
2062 	if (!conf->tmppage)
2063 		goto out_free_conf;
2064 
2065 	conf->mddev = mddev;
2066 	conf->raid_disks = mddev->raid_disks;
2067 	conf->near_copies = nc;
2068 	conf->far_copies = fc;
2069 	conf->copies = nc*fc;
2070 	conf->far_offset = fo;
2071 	conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1;
2072 	conf->chunk_shift = ffz(~mddev->chunk_size) - 9;
2073 	size = mddev->size >> (conf->chunk_shift-1);
2074 	sector_div(size, fc);
2075 	size = size * conf->raid_disks;
2076 	sector_div(size, nc);
2077 	/* 'size' is now the number of chunks in the array */
2078 	/* calculate "used chunks per device" in 'stride' */
2079 	stride = size * conf->copies;
2080 
2081 	/* We need to round up when dividing by raid_disks to
2082 	 * get the stride size.
2083 	 */
2084 	stride += conf->raid_disks - 1;
2085 	sector_div(stride, conf->raid_disks);
2086 	mddev->size = stride  << (conf->chunk_shift-1);
2087 
2088 	if (fo)
2089 		stride = 1;
2090 	else
2091 		sector_div(stride, fc);
2092 	conf->stride = stride << conf->chunk_shift;
2093 
2094 	conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
2095 						r10bio_pool_free, conf);
2096 	if (!conf->r10bio_pool) {
2097 		printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
2098 			mdname(mddev));
2099 		goto out_free_conf;
2100 	}
2101 
2102 	spin_lock_init(&conf->device_lock);
2103 	mddev->queue->queue_lock = &conf->device_lock;
2104 
2105 	rdev_for_each(rdev, tmp, mddev) {
2106 		disk_idx = rdev->raid_disk;
2107 		if (disk_idx >= mddev->raid_disks
2108 		    || disk_idx < 0)
2109 			continue;
2110 		disk = conf->mirrors + disk_idx;
2111 
2112 		disk->rdev = rdev;
2113 
2114 		blk_queue_stack_limits(mddev->queue,
2115 				       rdev->bdev->bd_disk->queue);
2116 		/* as we don't honour merge_bvec_fn, we must never risk
2117 		 * violating it, so limit ->max_sector to one PAGE, as
2118 		 * a one page request is never in violation.
2119 		 */
2120 		if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
2121 		    mddev->queue->max_sectors > (PAGE_SIZE>>9))
2122 			mddev->queue->max_sectors = (PAGE_SIZE>>9);
2123 
2124 		disk->head_position = 0;
2125 	}
2126 	INIT_LIST_HEAD(&conf->retry_list);
2127 
2128 	spin_lock_init(&conf->resync_lock);
2129 	init_waitqueue_head(&conf->wait_barrier);
2130 
2131 	/* need to check that every block has at least one working mirror */
2132 	if (!enough(conf)) {
2133 		printk(KERN_ERR "raid10: not enough operational mirrors for %s\n",
2134 		       mdname(mddev));
2135 		goto out_free_conf;
2136 	}
2137 
2138 	mddev->degraded = 0;
2139 	for (i = 0; i < conf->raid_disks; i++) {
2140 
2141 		disk = conf->mirrors + i;
2142 
2143 		if (!disk->rdev ||
2144 		    !test_bit(In_sync, &disk->rdev->flags)) {
2145 			disk->head_position = 0;
2146 			mddev->degraded++;
2147 			if (disk->rdev)
2148 				conf->fullsync = 1;
2149 		}
2150 	}
2151 
2152 
2153 	mddev->thread = md_register_thread(raid10d, mddev, "%s_raid10");
2154 	if (!mddev->thread) {
2155 		printk(KERN_ERR
2156 		       "raid10: couldn't allocate thread for %s\n",
2157 		       mdname(mddev));
2158 		goto out_free_conf;
2159 	}
2160 
2161 	printk(KERN_INFO
2162 		"raid10: raid set %s active with %d out of %d devices\n",
2163 		mdname(mddev), mddev->raid_disks - mddev->degraded,
2164 		mddev->raid_disks);
2165 	/*
2166 	 * Ok, everything is just fine now
2167 	 */
2168 	mddev->array_sectors = size << conf->chunk_shift;
2169 	mddev->resync_max_sectors = size << conf->chunk_shift;
2170 
2171 	mddev->queue->unplug_fn = raid10_unplug;
2172 	mddev->queue->backing_dev_info.congested_fn = raid10_congested;
2173 	mddev->queue->backing_dev_info.congested_data = mddev;
2174 
2175 	/* Calculate max read-ahead size.
2176 	 * We need to readahead at least twice a whole stripe....
2177 	 * maybe...
2178 	 */
2179 	{
2180 		int stripe = conf->raid_disks * (mddev->chunk_size / PAGE_SIZE);
2181 		stripe /= conf->near_copies;
2182 		if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
2183 			mddev->queue->backing_dev_info.ra_pages = 2* stripe;
2184 	}
2185 
2186 	if (conf->near_copies < mddev->raid_disks)
2187 		blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
2188 	return 0;
2189 
2190 out_free_conf:
2191 	if (conf->r10bio_pool)
2192 		mempool_destroy(conf->r10bio_pool);
2193 	safe_put_page(conf->tmppage);
2194 	kfree(conf->mirrors);
2195 	kfree(conf);
2196 	mddev->private = NULL;
2197 out:
2198 	return -EIO;
2199 }
2200 
2201 static int stop(mddev_t *mddev)
2202 {
2203 	conf_t *conf = mddev_to_conf(mddev);
2204 
2205 	md_unregister_thread(mddev->thread);
2206 	mddev->thread = NULL;
2207 	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2208 	if (conf->r10bio_pool)
2209 		mempool_destroy(conf->r10bio_pool);
2210 	kfree(conf->mirrors);
2211 	kfree(conf);
2212 	mddev->private = NULL;
2213 	return 0;
2214 }
2215 
2216 static void raid10_quiesce(mddev_t *mddev, int state)
2217 {
2218 	conf_t *conf = mddev_to_conf(mddev);
2219 
2220 	switch(state) {
2221 	case 1:
2222 		raise_barrier(conf, 0);
2223 		break;
2224 	case 0:
2225 		lower_barrier(conf);
2226 		break;
2227 	}
2228 	if (mddev->thread) {
2229 		if (mddev->bitmap)
2230 			mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
2231 		else
2232 			mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
2233 		md_wakeup_thread(mddev->thread);
2234 	}
2235 }
2236 
2237 static struct mdk_personality raid10_personality =
2238 {
2239 	.name		= "raid10",
2240 	.level		= 10,
2241 	.owner		= THIS_MODULE,
2242 	.make_request	= make_request,
2243 	.run		= run,
2244 	.stop		= stop,
2245 	.status		= status,
2246 	.error_handler	= error,
2247 	.hot_add_disk	= raid10_add_disk,
2248 	.hot_remove_disk= raid10_remove_disk,
2249 	.spare_active	= raid10_spare_active,
2250 	.sync_request	= sync_request,
2251 	.quiesce	= raid10_quiesce,
2252 };
2253 
2254 static int __init raid_init(void)
2255 {
2256 	return register_md_personality(&raid10_personality);
2257 }
2258 
2259 static void raid_exit(void)
2260 {
2261 	unregister_md_personality(&raid10_personality);
2262 }
2263 
2264 module_init(raid_init);
2265 module_exit(raid_exit);
2266 MODULE_LICENSE("GPL");
2267 MODULE_ALIAS("md-personality-9"); /* RAID10 */
2268 MODULE_ALIAS("md-raid10");
2269 MODULE_ALIAS("md-level-10");
2270