xref: /openbmc/linux/drivers/md/raid10.c (revision 87c2ce3b)
1 /*
2  * raid10.c : Multiple Devices driver for Linux
3  *
4  * Copyright (C) 2000-2004 Neil Brown
5  *
6  * RAID-10 support for md.
7  *
8  * Base on code in raid1.c.  See raid1.c for futher copyright information.
9  *
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  * You should have received a copy of the GNU General Public License
17  * (for example /usr/src/linux/COPYING); if not, write to the Free
18  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20 
21 #include "dm-bio-list.h"
22 #include <linux/raid/raid10.h>
23 #include <linux/raid/bitmap.h>
24 
25 /*
26  * RAID10 provides a combination of RAID0 and RAID1 functionality.
27  * The layout of data is defined by
28  *    chunk_size
29  *    raid_disks
30  *    near_copies (stored in low byte of layout)
31  *    far_copies (stored in second byte of layout)
32  *
33  * The data to be stored is divided into chunks using chunksize.
34  * Each device is divided into far_copies sections.
35  * In each section, chunks are laid out in a style similar to raid0, but
36  * near_copies copies of each chunk is stored (each on a different drive).
37  * The starting device for each section is offset near_copies from the starting
38  * device of the previous section.
39  * Thus there are (near_copies*far_copies) of each chunk, and each is on a different
40  * drive.
41  * near_copies and far_copies must be at least one, and their product is at most
42  * raid_disks.
43  */
44 
45 /*
46  * Number of guaranteed r10bios in case of extreme VM load:
47  */
48 #define	NR_RAID10_BIOS 256
49 
50 static void unplug_slaves(mddev_t *mddev);
51 
52 static void allow_barrier(conf_t *conf);
53 static void lower_barrier(conf_t *conf);
54 
55 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
56 {
57 	conf_t *conf = data;
58 	r10bio_t *r10_bio;
59 	int size = offsetof(struct r10bio_s, devs[conf->copies]);
60 
61 	/* allocate a r10bio with room for raid_disks entries in the bios array */
62 	r10_bio = kzalloc(size, gfp_flags);
63 	if (!r10_bio)
64 		unplug_slaves(conf->mddev);
65 
66 	return r10_bio;
67 }
68 
69 static void r10bio_pool_free(void *r10_bio, void *data)
70 {
71 	kfree(r10_bio);
72 }
73 
74 #define RESYNC_BLOCK_SIZE (64*1024)
75 //#define RESYNC_BLOCK_SIZE PAGE_SIZE
76 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
77 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
78 #define RESYNC_WINDOW (2048*1024)
79 
80 /*
81  * When performing a resync, we need to read and compare, so
82  * we need as many pages are there are copies.
83  * When performing a recovery, we need 2 bios, one for read,
84  * one for write (we recover only one drive per r10buf)
85  *
86  */
87 static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
88 {
89 	conf_t *conf = data;
90 	struct page *page;
91 	r10bio_t *r10_bio;
92 	struct bio *bio;
93 	int i, j;
94 	int nalloc;
95 
96 	r10_bio = r10bio_pool_alloc(gfp_flags, conf);
97 	if (!r10_bio) {
98 		unplug_slaves(conf->mddev);
99 		return NULL;
100 	}
101 
102 	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
103 		nalloc = conf->copies; /* resync */
104 	else
105 		nalloc = 2; /* recovery */
106 
107 	/*
108 	 * Allocate bios.
109 	 */
110 	for (j = nalloc ; j-- ; ) {
111 		bio = bio_alloc(gfp_flags, RESYNC_PAGES);
112 		if (!bio)
113 			goto out_free_bio;
114 		r10_bio->devs[j].bio = bio;
115 	}
116 	/*
117 	 * Allocate RESYNC_PAGES data pages and attach them
118 	 * where needed.
119 	 */
120 	for (j = 0 ; j < nalloc; j++) {
121 		bio = r10_bio->devs[j].bio;
122 		for (i = 0; i < RESYNC_PAGES; i++) {
123 			page = alloc_page(gfp_flags);
124 			if (unlikely(!page))
125 				goto out_free_pages;
126 
127 			bio->bi_io_vec[i].bv_page = page;
128 		}
129 	}
130 
131 	return r10_bio;
132 
133 out_free_pages:
134 	for ( ; i > 0 ; i--)
135 		safe_put_page(bio->bi_io_vec[i-1].bv_page);
136 	while (j--)
137 		for (i = 0; i < RESYNC_PAGES ; i++)
138 			safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
139 	j = -1;
140 out_free_bio:
141 	while ( ++j < nalloc )
142 		bio_put(r10_bio->devs[j].bio);
143 	r10bio_pool_free(r10_bio, conf);
144 	return NULL;
145 }
146 
147 static void r10buf_pool_free(void *__r10_bio, void *data)
148 {
149 	int i;
150 	conf_t *conf = data;
151 	r10bio_t *r10bio = __r10_bio;
152 	int j;
153 
154 	for (j=0; j < conf->copies; j++) {
155 		struct bio *bio = r10bio->devs[j].bio;
156 		if (bio) {
157 			for (i = 0; i < RESYNC_PAGES; i++) {
158 				safe_put_page(bio->bi_io_vec[i].bv_page);
159 				bio->bi_io_vec[i].bv_page = NULL;
160 			}
161 			bio_put(bio);
162 		}
163 	}
164 	r10bio_pool_free(r10bio, conf);
165 }
166 
167 static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
168 {
169 	int i;
170 
171 	for (i = 0; i < conf->copies; i++) {
172 		struct bio **bio = & r10_bio->devs[i].bio;
173 		if (*bio && *bio != IO_BLOCKED)
174 			bio_put(*bio);
175 		*bio = NULL;
176 	}
177 }
178 
179 static inline void free_r10bio(r10bio_t *r10_bio)
180 {
181 	conf_t *conf = mddev_to_conf(r10_bio->mddev);
182 
183 	/*
184 	 * Wake up any possible resync thread that waits for the device
185 	 * to go idle.
186 	 */
187 	allow_barrier(conf);
188 
189 	put_all_bios(conf, r10_bio);
190 	mempool_free(r10_bio, conf->r10bio_pool);
191 }
192 
193 static inline void put_buf(r10bio_t *r10_bio)
194 {
195 	conf_t *conf = mddev_to_conf(r10_bio->mddev);
196 
197 	mempool_free(r10_bio, conf->r10buf_pool);
198 
199 	lower_barrier(conf);
200 }
201 
202 static void reschedule_retry(r10bio_t *r10_bio)
203 {
204 	unsigned long flags;
205 	mddev_t *mddev = r10_bio->mddev;
206 	conf_t *conf = mddev_to_conf(mddev);
207 
208 	spin_lock_irqsave(&conf->device_lock, flags);
209 	list_add(&r10_bio->retry_list, &conf->retry_list);
210 	conf->nr_queued ++;
211 	spin_unlock_irqrestore(&conf->device_lock, flags);
212 
213 	md_wakeup_thread(mddev->thread);
214 }
215 
216 /*
217  * raid_end_bio_io() is called when we have finished servicing a mirrored
218  * operation and are ready to return a success/failure code to the buffer
219  * cache layer.
220  */
221 static void raid_end_bio_io(r10bio_t *r10_bio)
222 {
223 	struct bio *bio = r10_bio->master_bio;
224 
225 	bio_endio(bio, bio->bi_size,
226 		test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO);
227 	free_r10bio(r10_bio);
228 }
229 
230 /*
231  * Update disk head position estimator based on IRQ completion info.
232  */
233 static inline void update_head_pos(int slot, r10bio_t *r10_bio)
234 {
235 	conf_t *conf = mddev_to_conf(r10_bio->mddev);
236 
237 	conf->mirrors[r10_bio->devs[slot].devnum].head_position =
238 		r10_bio->devs[slot].addr + (r10_bio->sectors);
239 }
240 
241 static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int error)
242 {
243 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
244 	r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
245 	int slot, dev;
246 	conf_t *conf = mddev_to_conf(r10_bio->mddev);
247 
248 	if (bio->bi_size)
249 		return 1;
250 
251 	slot = r10_bio->read_slot;
252 	dev = r10_bio->devs[slot].devnum;
253 	/*
254 	 * this branch is our 'one mirror IO has finished' event handler:
255 	 */
256 	update_head_pos(slot, r10_bio);
257 
258 	if (uptodate) {
259 		/*
260 		 * Set R10BIO_Uptodate in our master bio, so that
261 		 * we will return a good error code to the higher
262 		 * levels even if IO on some other mirrored buffer fails.
263 		 *
264 		 * The 'master' represents the composite IO operation to
265 		 * user-side. So if something waits for IO, then it will
266 		 * wait for the 'master' bio.
267 		 */
268 		set_bit(R10BIO_Uptodate, &r10_bio->state);
269 		raid_end_bio_io(r10_bio);
270 	} else {
271 		/*
272 		 * oops, read error:
273 		 */
274 		char b[BDEVNAME_SIZE];
275 		if (printk_ratelimit())
276 			printk(KERN_ERR "raid10: %s: rescheduling sector %llu\n",
277 			       bdevname(conf->mirrors[dev].rdev->bdev,b), (unsigned long long)r10_bio->sector);
278 		reschedule_retry(r10_bio);
279 	}
280 
281 	rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
282 	return 0;
283 }
284 
285 static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, int error)
286 {
287 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
288 	r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
289 	int slot, dev;
290 	conf_t *conf = mddev_to_conf(r10_bio->mddev);
291 
292 	if (bio->bi_size)
293 		return 1;
294 
295 	for (slot = 0; slot < conf->copies; slot++)
296 		if (r10_bio->devs[slot].bio == bio)
297 			break;
298 	dev = r10_bio->devs[slot].devnum;
299 
300 	/*
301 	 * this branch is our 'one mirror IO has finished' event handler:
302 	 */
303 	if (!uptodate) {
304 		md_error(r10_bio->mddev, conf->mirrors[dev].rdev);
305 		/* an I/O failed, we can't clear the bitmap */
306 		set_bit(R10BIO_Degraded, &r10_bio->state);
307 	} else
308 		/*
309 		 * Set R10BIO_Uptodate in our master bio, so that
310 		 * we will return a good error code for to the higher
311 		 * levels even if IO on some other mirrored buffer fails.
312 		 *
313 		 * The 'master' represents the composite IO operation to
314 		 * user-side. So if something waits for IO, then it will
315 		 * wait for the 'master' bio.
316 		 */
317 		set_bit(R10BIO_Uptodate, &r10_bio->state);
318 
319 	update_head_pos(slot, r10_bio);
320 
321 	/*
322 	 *
323 	 * Let's see if all mirrored write operations have finished
324 	 * already.
325 	 */
326 	if (atomic_dec_and_test(&r10_bio->remaining)) {
327 		/* clear the bitmap if all writes complete successfully */
328 		bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
329 				r10_bio->sectors,
330 				!test_bit(R10BIO_Degraded, &r10_bio->state),
331 				0);
332 		md_write_end(r10_bio->mddev);
333 		raid_end_bio_io(r10_bio);
334 	}
335 
336 	rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
337 	return 0;
338 }
339 
340 
341 /*
342  * RAID10 layout manager
343  * Aswell as the chunksize and raid_disks count, there are two
344  * parameters: near_copies and far_copies.
345  * near_copies * far_copies must be <= raid_disks.
346  * Normally one of these will be 1.
347  * If both are 1, we get raid0.
348  * If near_copies == raid_disks, we get raid1.
349  *
350  * Chunks are layed out in raid0 style with near_copies copies of the
351  * first chunk, followed by near_copies copies of the next chunk and
352  * so on.
353  * If far_copies > 1, then after 1/far_copies of the array has been assigned
354  * as described above, we start again with a device offset of near_copies.
355  * So we effectively have another copy of the whole array further down all
356  * the drives, but with blocks on different drives.
357  * With this layout, and block is never stored twice on the one device.
358  *
359  * raid10_find_phys finds the sector offset of a given virtual sector
360  * on each device that it is on. If a block isn't on a device,
361  * that entry in the array is set to MaxSector.
362  *
363  * raid10_find_virt does the reverse mapping, from a device and a
364  * sector offset to a virtual address
365  */
366 
367 static void raid10_find_phys(conf_t *conf, r10bio_t *r10bio)
368 {
369 	int n,f;
370 	sector_t sector;
371 	sector_t chunk;
372 	sector_t stripe;
373 	int dev;
374 
375 	int slot = 0;
376 
377 	/* now calculate first sector/dev */
378 	chunk = r10bio->sector >> conf->chunk_shift;
379 	sector = r10bio->sector & conf->chunk_mask;
380 
381 	chunk *= conf->near_copies;
382 	stripe = chunk;
383 	dev = sector_div(stripe, conf->raid_disks);
384 
385 	sector += stripe << conf->chunk_shift;
386 
387 	/* and calculate all the others */
388 	for (n=0; n < conf->near_copies; n++) {
389 		int d = dev;
390 		sector_t s = sector;
391 		r10bio->devs[slot].addr = sector;
392 		r10bio->devs[slot].devnum = d;
393 		slot++;
394 
395 		for (f = 1; f < conf->far_copies; f++) {
396 			d += conf->near_copies;
397 			if (d >= conf->raid_disks)
398 				d -= conf->raid_disks;
399 			s += conf->stride;
400 			r10bio->devs[slot].devnum = d;
401 			r10bio->devs[slot].addr = s;
402 			slot++;
403 		}
404 		dev++;
405 		if (dev >= conf->raid_disks) {
406 			dev = 0;
407 			sector += (conf->chunk_mask + 1);
408 		}
409 	}
410 	BUG_ON(slot != conf->copies);
411 }
412 
413 static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
414 {
415 	sector_t offset, chunk, vchunk;
416 
417 	while (sector > conf->stride) {
418 		sector -= conf->stride;
419 		if (dev < conf->near_copies)
420 			dev += conf->raid_disks - conf->near_copies;
421 		else
422 			dev -= conf->near_copies;
423 	}
424 
425 	offset = sector & conf->chunk_mask;
426 	chunk = sector >> conf->chunk_shift;
427 	vchunk = chunk * conf->raid_disks + dev;
428 	sector_div(vchunk, conf->near_copies);
429 	return (vchunk << conf->chunk_shift) + offset;
430 }
431 
432 /**
433  *	raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
434  *	@q: request queue
435  *	@bio: the buffer head that's been built up so far
436  *	@biovec: the request that could be merged to it.
437  *
438  *	Return amount of bytes we can accept at this offset
439  *      If near_copies == raid_disk, there are no striping issues,
440  *      but in that case, the function isn't called at all.
441  */
442 static int raid10_mergeable_bvec(request_queue_t *q, struct bio *bio,
443 				struct bio_vec *bio_vec)
444 {
445 	mddev_t *mddev = q->queuedata;
446 	sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
447 	int max;
448 	unsigned int chunk_sectors = mddev->chunk_size >> 9;
449 	unsigned int bio_sectors = bio->bi_size >> 9;
450 
451 	max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
452 	if (max < 0) max = 0; /* bio_add cannot handle a negative return */
453 	if (max <= bio_vec->bv_len && bio_sectors == 0)
454 		return bio_vec->bv_len;
455 	else
456 		return max;
457 }
458 
459 /*
460  * This routine returns the disk from which the requested read should
461  * be done. There is a per-array 'next expected sequential IO' sector
462  * number - if this matches on the next IO then we use the last disk.
463  * There is also a per-disk 'last know head position' sector that is
464  * maintained from IRQ contexts, both the normal and the resync IO
465  * completion handlers update this position correctly. If there is no
466  * perfect sequential match then we pick the disk whose head is closest.
467  *
468  * If there are 2 mirrors in the same 2 devices, performance degrades
469  * because position is mirror, not device based.
470  *
471  * The rdev for the device selected will have nr_pending incremented.
472  */
473 
474 /*
475  * FIXME: possibly should rethink readbalancing and do it differently
476  * depending on near_copies / far_copies geometry.
477  */
478 static int read_balance(conf_t *conf, r10bio_t *r10_bio)
479 {
480 	const unsigned long this_sector = r10_bio->sector;
481 	int disk, slot, nslot;
482 	const int sectors = r10_bio->sectors;
483 	sector_t new_distance, current_distance;
484 	mdk_rdev_t *rdev;
485 
486 	raid10_find_phys(conf, r10_bio);
487 	rcu_read_lock();
488 	/*
489 	 * Check if we can balance. We can balance on the whole
490 	 * device if no resync is going on (recovery is ok), or below
491 	 * the resync window. We take the first readable disk when
492 	 * above the resync window.
493 	 */
494 	if (conf->mddev->recovery_cp < MaxSector
495 	    && (this_sector + sectors >= conf->next_resync)) {
496 		/* make sure that disk is operational */
497 		slot = 0;
498 		disk = r10_bio->devs[slot].devnum;
499 
500 		while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
501 		       r10_bio->devs[slot].bio == IO_BLOCKED ||
502 		       !test_bit(In_sync, &rdev->flags)) {
503 			slot++;
504 			if (slot == conf->copies) {
505 				slot = 0;
506 				disk = -1;
507 				break;
508 			}
509 			disk = r10_bio->devs[slot].devnum;
510 		}
511 		goto rb_out;
512 	}
513 
514 
515 	/* make sure the disk is operational */
516 	slot = 0;
517 	disk = r10_bio->devs[slot].devnum;
518 	while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
519 	       r10_bio->devs[slot].bio == IO_BLOCKED ||
520 	       !test_bit(In_sync, &rdev->flags)) {
521 		slot ++;
522 		if (slot == conf->copies) {
523 			disk = -1;
524 			goto rb_out;
525 		}
526 		disk = r10_bio->devs[slot].devnum;
527 	}
528 
529 
530 	current_distance = abs(r10_bio->devs[slot].addr -
531 			       conf->mirrors[disk].head_position);
532 
533 	/* Find the disk whose head is closest */
534 
535 	for (nslot = slot; nslot < conf->copies; nslot++) {
536 		int ndisk = r10_bio->devs[nslot].devnum;
537 
538 
539 		if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL ||
540 		    r10_bio->devs[nslot].bio == IO_BLOCKED ||
541 		    !test_bit(In_sync, &rdev->flags))
542 			continue;
543 
544 		/* This optimisation is debatable, and completely destroys
545 		 * sequential read speed for 'far copies' arrays.  So only
546 		 * keep it for 'near' arrays, and review those later.
547 		 */
548 		if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending)) {
549 			disk = ndisk;
550 			slot = nslot;
551 			break;
552 		}
553 		new_distance = abs(r10_bio->devs[nslot].addr -
554 				   conf->mirrors[ndisk].head_position);
555 		if (new_distance < current_distance) {
556 			current_distance = new_distance;
557 			disk = ndisk;
558 			slot = nslot;
559 		}
560 	}
561 
562 rb_out:
563 	r10_bio->read_slot = slot;
564 /*	conf->next_seq_sect = this_sector + sectors;*/
565 
566 	if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL)
567 		atomic_inc(&conf->mirrors[disk].rdev->nr_pending);
568 	rcu_read_unlock();
569 
570 	return disk;
571 }
572 
573 static void unplug_slaves(mddev_t *mddev)
574 {
575 	conf_t *conf = mddev_to_conf(mddev);
576 	int i;
577 
578 	rcu_read_lock();
579 	for (i=0; i<mddev->raid_disks; i++) {
580 		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
581 		if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
582 			request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
583 
584 			atomic_inc(&rdev->nr_pending);
585 			rcu_read_unlock();
586 
587 			if (r_queue->unplug_fn)
588 				r_queue->unplug_fn(r_queue);
589 
590 			rdev_dec_pending(rdev, mddev);
591 			rcu_read_lock();
592 		}
593 	}
594 	rcu_read_unlock();
595 }
596 
597 static void raid10_unplug(request_queue_t *q)
598 {
599 	mddev_t *mddev = q->queuedata;
600 
601 	unplug_slaves(q->queuedata);
602 	md_wakeup_thread(mddev->thread);
603 }
604 
605 static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
606 			     sector_t *error_sector)
607 {
608 	mddev_t *mddev = q->queuedata;
609 	conf_t *conf = mddev_to_conf(mddev);
610 	int i, ret = 0;
611 
612 	rcu_read_lock();
613 	for (i=0; i<mddev->raid_disks && ret == 0; i++) {
614 		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
615 		if (rdev && !test_bit(Faulty, &rdev->flags)) {
616 			struct block_device *bdev = rdev->bdev;
617 			request_queue_t *r_queue = bdev_get_queue(bdev);
618 
619 			if (!r_queue->issue_flush_fn)
620 				ret = -EOPNOTSUPP;
621 			else {
622 				atomic_inc(&rdev->nr_pending);
623 				rcu_read_unlock();
624 				ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
625 							      error_sector);
626 				rdev_dec_pending(rdev, mddev);
627 				rcu_read_lock();
628 			}
629 		}
630 	}
631 	rcu_read_unlock();
632 	return ret;
633 }
634 
635 /* Barriers....
636  * Sometimes we need to suspend IO while we do something else,
637  * either some resync/recovery, or reconfigure the array.
638  * To do this we raise a 'barrier'.
639  * The 'barrier' is a counter that can be raised multiple times
640  * to count how many activities are happening which preclude
641  * normal IO.
642  * We can only raise the barrier if there is no pending IO.
643  * i.e. if nr_pending == 0.
644  * We choose only to raise the barrier if no-one is waiting for the
645  * barrier to go down.  This means that as soon as an IO request
646  * is ready, no other operations which require a barrier will start
647  * until the IO request has had a chance.
648  *
649  * So: regular IO calls 'wait_barrier'.  When that returns there
650  *    is no backgroup IO happening,  It must arrange to call
651  *    allow_barrier when it has finished its IO.
652  * backgroup IO calls must call raise_barrier.  Once that returns
653  *    there is no normal IO happeing.  It must arrange to call
654  *    lower_barrier when the particular background IO completes.
655  */
656 #define RESYNC_DEPTH 32
657 
658 static void raise_barrier(conf_t *conf, int force)
659 {
660 	BUG_ON(force && !conf->barrier);
661 	spin_lock_irq(&conf->resync_lock);
662 
663 	/* Wait until no block IO is waiting (unless 'force') */
664 	wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
665 			    conf->resync_lock,
666 			    raid10_unplug(conf->mddev->queue));
667 
668 	/* block any new IO from starting */
669 	conf->barrier++;
670 
671 	/* No wait for all pending IO to complete */
672 	wait_event_lock_irq(conf->wait_barrier,
673 			    !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
674 			    conf->resync_lock,
675 			    raid10_unplug(conf->mddev->queue));
676 
677 	spin_unlock_irq(&conf->resync_lock);
678 }
679 
680 static void lower_barrier(conf_t *conf)
681 {
682 	unsigned long flags;
683 	spin_lock_irqsave(&conf->resync_lock, flags);
684 	conf->barrier--;
685 	spin_unlock_irqrestore(&conf->resync_lock, flags);
686 	wake_up(&conf->wait_barrier);
687 }
688 
689 static void wait_barrier(conf_t *conf)
690 {
691 	spin_lock_irq(&conf->resync_lock);
692 	if (conf->barrier) {
693 		conf->nr_waiting++;
694 		wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
695 				    conf->resync_lock,
696 				    raid10_unplug(conf->mddev->queue));
697 		conf->nr_waiting--;
698 	}
699 	conf->nr_pending++;
700 	spin_unlock_irq(&conf->resync_lock);
701 }
702 
703 static void allow_barrier(conf_t *conf)
704 {
705 	unsigned long flags;
706 	spin_lock_irqsave(&conf->resync_lock, flags);
707 	conf->nr_pending--;
708 	spin_unlock_irqrestore(&conf->resync_lock, flags);
709 	wake_up(&conf->wait_barrier);
710 }
711 
712 static void freeze_array(conf_t *conf)
713 {
714 	/* stop syncio and normal IO and wait for everything to
715 	 * go quiet.
716 	 * We increment barrier and nr_waiting, and then
717 	 * wait until barrier+nr_pending match nr_queued+2
718 	 */
719 	spin_lock_irq(&conf->resync_lock);
720 	conf->barrier++;
721 	conf->nr_waiting++;
722 	wait_event_lock_irq(conf->wait_barrier,
723 			    conf->barrier+conf->nr_pending == conf->nr_queued+2,
724 			    conf->resync_lock,
725 			    raid10_unplug(conf->mddev->queue));
726 	spin_unlock_irq(&conf->resync_lock);
727 }
728 
729 static void unfreeze_array(conf_t *conf)
730 {
731 	/* reverse the effect of the freeze */
732 	spin_lock_irq(&conf->resync_lock);
733 	conf->barrier--;
734 	conf->nr_waiting--;
735 	wake_up(&conf->wait_barrier);
736 	spin_unlock_irq(&conf->resync_lock);
737 }
738 
739 static int make_request(request_queue_t *q, struct bio * bio)
740 {
741 	mddev_t *mddev = q->queuedata;
742 	conf_t *conf = mddev_to_conf(mddev);
743 	mirror_info_t *mirror;
744 	r10bio_t *r10_bio;
745 	struct bio *read_bio;
746 	int i;
747 	int chunk_sects = conf->chunk_mask + 1;
748 	const int rw = bio_data_dir(bio);
749 	struct bio_list bl;
750 	unsigned long flags;
751 
752 	if (unlikely(bio_barrier(bio))) {
753 		bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
754 		return 0;
755 	}
756 
757 	/* If this request crosses a chunk boundary, we need to
758 	 * split it.  This will only happen for 1 PAGE (or less) requests.
759 	 */
760 	if (unlikely( (bio->bi_sector & conf->chunk_mask) + (bio->bi_size >> 9)
761 		      > chunk_sects &&
762 		    conf->near_copies < conf->raid_disks)) {
763 		struct bio_pair *bp;
764 		/* Sanity check -- queue functions should prevent this happening */
765 		if (bio->bi_vcnt != 1 ||
766 		    bio->bi_idx != 0)
767 			goto bad_map;
768 		/* This is a one page bio that upper layers
769 		 * refuse to split for us, so we need to split it.
770 		 */
771 		bp = bio_split(bio, bio_split_pool,
772 			       chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
773 		if (make_request(q, &bp->bio1))
774 			generic_make_request(&bp->bio1);
775 		if (make_request(q, &bp->bio2))
776 			generic_make_request(&bp->bio2);
777 
778 		bio_pair_release(bp);
779 		return 0;
780 	bad_map:
781 		printk("raid10_make_request bug: can't convert block across chunks"
782 		       " or bigger than %dk %llu %d\n", chunk_sects/2,
783 		       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
784 
785 		bio_io_error(bio, bio->bi_size);
786 		return 0;
787 	}
788 
789 	md_write_start(mddev, bio);
790 
791 	/*
792 	 * Register the new request and wait if the reconstruction
793 	 * thread has put up a bar for new requests.
794 	 * Continue immediately if no resync is active currently.
795 	 */
796 	wait_barrier(conf);
797 
798 	disk_stat_inc(mddev->gendisk, ios[rw]);
799 	disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
800 
801 	r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
802 
803 	r10_bio->master_bio = bio;
804 	r10_bio->sectors = bio->bi_size >> 9;
805 
806 	r10_bio->mddev = mddev;
807 	r10_bio->sector = bio->bi_sector;
808 	r10_bio->state = 0;
809 
810 	if (rw == READ) {
811 		/*
812 		 * read balancing logic:
813 		 */
814 		int disk = read_balance(conf, r10_bio);
815 		int slot = r10_bio->read_slot;
816 		if (disk < 0) {
817 			raid_end_bio_io(r10_bio);
818 			return 0;
819 		}
820 		mirror = conf->mirrors + disk;
821 
822 		read_bio = bio_clone(bio, GFP_NOIO);
823 
824 		r10_bio->devs[slot].bio = read_bio;
825 
826 		read_bio->bi_sector = r10_bio->devs[slot].addr +
827 			mirror->rdev->data_offset;
828 		read_bio->bi_bdev = mirror->rdev->bdev;
829 		read_bio->bi_end_io = raid10_end_read_request;
830 		read_bio->bi_rw = READ;
831 		read_bio->bi_private = r10_bio;
832 
833 		generic_make_request(read_bio);
834 		return 0;
835 	}
836 
837 	/*
838 	 * WRITE:
839 	 */
840 	/* first select target devices under spinlock and
841 	 * inc refcount on their rdev.  Record them by setting
842 	 * bios[x] to bio
843 	 */
844 	raid10_find_phys(conf, r10_bio);
845 	rcu_read_lock();
846 	for (i = 0;  i < conf->copies; i++) {
847 		int d = r10_bio->devs[i].devnum;
848 		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
849 		if (rdev &&
850 		    !test_bit(Faulty, &rdev->flags)) {
851 			atomic_inc(&rdev->nr_pending);
852 			r10_bio->devs[i].bio = bio;
853 		} else {
854 			r10_bio->devs[i].bio = NULL;
855 			set_bit(R10BIO_Degraded, &r10_bio->state);
856 		}
857 	}
858 	rcu_read_unlock();
859 
860 	atomic_set(&r10_bio->remaining, 0);
861 
862 	bio_list_init(&bl);
863 	for (i = 0; i < conf->copies; i++) {
864 		struct bio *mbio;
865 		int d = r10_bio->devs[i].devnum;
866 		if (!r10_bio->devs[i].bio)
867 			continue;
868 
869 		mbio = bio_clone(bio, GFP_NOIO);
870 		r10_bio->devs[i].bio = mbio;
871 
872 		mbio->bi_sector	= r10_bio->devs[i].addr+
873 			conf->mirrors[d].rdev->data_offset;
874 		mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
875 		mbio->bi_end_io	= raid10_end_write_request;
876 		mbio->bi_rw = WRITE;
877 		mbio->bi_private = r10_bio;
878 
879 		atomic_inc(&r10_bio->remaining);
880 		bio_list_add(&bl, mbio);
881 	}
882 
883 	bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
884 	spin_lock_irqsave(&conf->device_lock, flags);
885 	bio_list_merge(&conf->pending_bio_list, &bl);
886 	blk_plug_device(mddev->queue);
887 	spin_unlock_irqrestore(&conf->device_lock, flags);
888 
889 	return 0;
890 }
891 
892 static void status(struct seq_file *seq, mddev_t *mddev)
893 {
894 	conf_t *conf = mddev_to_conf(mddev);
895 	int i;
896 
897 	if (conf->near_copies < conf->raid_disks)
898 		seq_printf(seq, " %dK chunks", mddev->chunk_size/1024);
899 	if (conf->near_copies > 1)
900 		seq_printf(seq, " %d near-copies", conf->near_copies);
901 	if (conf->far_copies > 1)
902 		seq_printf(seq, " %d far-copies", conf->far_copies);
903 
904 	seq_printf(seq, " [%d/%d] [", conf->raid_disks,
905 						conf->working_disks);
906 	for (i = 0; i < conf->raid_disks; i++)
907 		seq_printf(seq, "%s",
908 			      conf->mirrors[i].rdev &&
909 			      test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
910 	seq_printf(seq, "]");
911 }
912 
913 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
914 {
915 	char b[BDEVNAME_SIZE];
916 	conf_t *conf = mddev_to_conf(mddev);
917 
918 	/*
919 	 * If it is not operational, then we have already marked it as dead
920 	 * else if it is the last working disks, ignore the error, let the
921 	 * next level up know.
922 	 * else mark the drive as failed
923 	 */
924 	if (test_bit(In_sync, &rdev->flags)
925 	    && conf->working_disks == 1)
926 		/*
927 		 * Don't fail the drive, just return an IO error.
928 		 * The test should really be more sophisticated than
929 		 * "working_disks == 1", but it isn't critical, and
930 		 * can wait until we do more sophisticated "is the drive
931 		 * really dead" tests...
932 		 */
933 		return;
934 	if (test_bit(In_sync, &rdev->flags)) {
935 		mddev->degraded++;
936 		conf->working_disks--;
937 		/*
938 		 * if recovery is running, make sure it aborts.
939 		 */
940 		set_bit(MD_RECOVERY_ERR, &mddev->recovery);
941 	}
942 	clear_bit(In_sync, &rdev->flags);
943 	set_bit(Faulty, &rdev->flags);
944 	mddev->sb_dirty = 1;
945 	printk(KERN_ALERT "raid10: Disk failure on %s, disabling device. \n"
946 		"	Operation continuing on %d devices\n",
947 		bdevname(rdev->bdev,b), conf->working_disks);
948 }
949 
950 static void print_conf(conf_t *conf)
951 {
952 	int i;
953 	mirror_info_t *tmp;
954 
955 	printk("RAID10 conf printout:\n");
956 	if (!conf) {
957 		printk("(!conf)\n");
958 		return;
959 	}
960 	printk(" --- wd:%d rd:%d\n", conf->working_disks,
961 		conf->raid_disks);
962 
963 	for (i = 0; i < conf->raid_disks; i++) {
964 		char b[BDEVNAME_SIZE];
965 		tmp = conf->mirrors + i;
966 		if (tmp->rdev)
967 			printk(" disk %d, wo:%d, o:%d, dev:%s\n",
968 				i, !test_bit(In_sync, &tmp->rdev->flags),
969 			        !test_bit(Faulty, &tmp->rdev->flags),
970 				bdevname(tmp->rdev->bdev,b));
971 	}
972 }
973 
974 static void close_sync(conf_t *conf)
975 {
976 	wait_barrier(conf);
977 	allow_barrier(conf);
978 
979 	mempool_destroy(conf->r10buf_pool);
980 	conf->r10buf_pool = NULL;
981 }
982 
983 /* check if there are enough drives for
984  * every block to appear on atleast one
985  */
986 static int enough(conf_t *conf)
987 {
988 	int first = 0;
989 
990 	do {
991 		int n = conf->copies;
992 		int cnt = 0;
993 		while (n--) {
994 			if (conf->mirrors[first].rdev)
995 				cnt++;
996 			first = (first+1) % conf->raid_disks;
997 		}
998 		if (cnt == 0)
999 			return 0;
1000 	} while (first != 0);
1001 	return 1;
1002 }
1003 
1004 static int raid10_spare_active(mddev_t *mddev)
1005 {
1006 	int i;
1007 	conf_t *conf = mddev->private;
1008 	mirror_info_t *tmp;
1009 
1010 	/*
1011 	 * Find all non-in_sync disks within the RAID10 configuration
1012 	 * and mark them in_sync
1013 	 */
1014 	for (i = 0; i < conf->raid_disks; i++) {
1015 		tmp = conf->mirrors + i;
1016 		if (tmp->rdev
1017 		    && !test_bit(Faulty, &tmp->rdev->flags)
1018 		    && !test_bit(In_sync, &tmp->rdev->flags)) {
1019 			conf->working_disks++;
1020 			mddev->degraded--;
1021 			set_bit(In_sync, &tmp->rdev->flags);
1022 		}
1023 	}
1024 
1025 	print_conf(conf);
1026 	return 0;
1027 }
1028 
1029 
1030 static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1031 {
1032 	conf_t *conf = mddev->private;
1033 	int found = 0;
1034 	int mirror;
1035 	mirror_info_t *p;
1036 
1037 	if (mddev->recovery_cp < MaxSector)
1038 		/* only hot-add to in-sync arrays, as recovery is
1039 		 * very different from resync
1040 		 */
1041 		return 0;
1042 	if (!enough(conf))
1043 		return 0;
1044 
1045 	if (rdev->saved_raid_disk >= 0 &&
1046 	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1047 		mirror = rdev->saved_raid_disk;
1048 	else
1049 		mirror = 0;
1050 	for ( ; mirror < mddev->raid_disks; mirror++)
1051 		if ( !(p=conf->mirrors+mirror)->rdev) {
1052 
1053 			blk_queue_stack_limits(mddev->queue,
1054 					       rdev->bdev->bd_disk->queue);
1055 			/* as we don't honour merge_bvec_fn, we must never risk
1056 			 * violating it, so limit ->max_sector to one PAGE, as
1057 			 * a one page request is never in violation.
1058 			 */
1059 			if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1060 			    mddev->queue->max_sectors > (PAGE_SIZE>>9))
1061 				mddev->queue->max_sectors = (PAGE_SIZE>>9);
1062 
1063 			p->head_position = 0;
1064 			rdev->raid_disk = mirror;
1065 			found = 1;
1066 			if (rdev->saved_raid_disk != mirror)
1067 				conf->fullsync = 1;
1068 			rcu_assign_pointer(p->rdev, rdev);
1069 			break;
1070 		}
1071 
1072 	print_conf(conf);
1073 	return found;
1074 }
1075 
1076 static int raid10_remove_disk(mddev_t *mddev, int number)
1077 {
1078 	conf_t *conf = mddev->private;
1079 	int err = 0;
1080 	mdk_rdev_t *rdev;
1081 	mirror_info_t *p = conf->mirrors+ number;
1082 
1083 	print_conf(conf);
1084 	rdev = p->rdev;
1085 	if (rdev) {
1086 		if (test_bit(In_sync, &rdev->flags) ||
1087 		    atomic_read(&rdev->nr_pending)) {
1088 			err = -EBUSY;
1089 			goto abort;
1090 		}
1091 		p->rdev = NULL;
1092 		synchronize_rcu();
1093 		if (atomic_read(&rdev->nr_pending)) {
1094 			/* lost the race, try later */
1095 			err = -EBUSY;
1096 			p->rdev = rdev;
1097 		}
1098 	}
1099 abort:
1100 
1101 	print_conf(conf);
1102 	return err;
1103 }
1104 
1105 
1106 static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
1107 {
1108 	r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
1109 	conf_t *conf = mddev_to_conf(r10_bio->mddev);
1110 	int i,d;
1111 
1112 	if (bio->bi_size)
1113 		return 1;
1114 
1115 	for (i=0; i<conf->copies; i++)
1116 		if (r10_bio->devs[i].bio == bio)
1117 			break;
1118 	if (i == conf->copies)
1119 		BUG();
1120 	update_head_pos(i, r10_bio);
1121 	d = r10_bio->devs[i].devnum;
1122 
1123 	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1124 		set_bit(R10BIO_Uptodate, &r10_bio->state);
1125 	else {
1126 		atomic_add(r10_bio->sectors,
1127 			   &conf->mirrors[d].rdev->corrected_errors);
1128 		if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
1129 			md_error(r10_bio->mddev,
1130 				 conf->mirrors[d].rdev);
1131 	}
1132 
1133 	/* for reconstruct, we always reschedule after a read.
1134 	 * for resync, only after all reads
1135 	 */
1136 	if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1137 	    atomic_dec_and_test(&r10_bio->remaining)) {
1138 		/* we have read all the blocks,
1139 		 * do the comparison in process context in raid10d
1140 		 */
1141 		reschedule_retry(r10_bio);
1142 	}
1143 	rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1144 	return 0;
1145 }
1146 
1147 static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
1148 {
1149 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1150 	r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
1151 	mddev_t *mddev = r10_bio->mddev;
1152 	conf_t *conf = mddev_to_conf(mddev);
1153 	int i,d;
1154 
1155 	if (bio->bi_size)
1156 		return 1;
1157 
1158 	for (i = 0; i < conf->copies; i++)
1159 		if (r10_bio->devs[i].bio == bio)
1160 			break;
1161 	d = r10_bio->devs[i].devnum;
1162 
1163 	if (!uptodate)
1164 		md_error(mddev, conf->mirrors[d].rdev);
1165 	update_head_pos(i, r10_bio);
1166 
1167 	while (atomic_dec_and_test(&r10_bio->remaining)) {
1168 		if (r10_bio->master_bio == NULL) {
1169 			/* the primary of several recovery bios */
1170 			md_done_sync(mddev, r10_bio->sectors, 1);
1171 			put_buf(r10_bio);
1172 			break;
1173 		} else {
1174 			r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
1175 			put_buf(r10_bio);
1176 			r10_bio = r10_bio2;
1177 		}
1178 	}
1179 	rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1180 	return 0;
1181 }
1182 
1183 /*
1184  * Note: sync and recover and handled very differently for raid10
1185  * This code is for resync.
1186  * For resync, we read through virtual addresses and read all blocks.
1187  * If there is any error, we schedule a write.  The lowest numbered
1188  * drive is authoritative.
1189  * However requests come for physical address, so we need to map.
1190  * For every physical address there are raid_disks/copies virtual addresses,
1191  * which is always are least one, but is not necessarly an integer.
1192  * This means that a physical address can span multiple chunks, so we may
1193  * have to submit multiple io requests for a single sync request.
1194  */
1195 /*
1196  * We check if all blocks are in-sync and only write to blocks that
1197  * aren't in sync
1198  */
1199 static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1200 {
1201 	conf_t *conf = mddev_to_conf(mddev);
1202 	int i, first;
1203 	struct bio *tbio, *fbio;
1204 
1205 	atomic_set(&r10_bio->remaining, 1);
1206 
1207 	/* find the first device with a block */
1208 	for (i=0; i<conf->copies; i++)
1209 		if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
1210 			break;
1211 
1212 	if (i == conf->copies)
1213 		goto done;
1214 
1215 	first = i;
1216 	fbio = r10_bio->devs[i].bio;
1217 
1218 	/* now find blocks with errors */
1219 	for (i=0 ; i < conf->copies ; i++) {
1220 		int  j, d;
1221 		int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
1222 
1223 		tbio = r10_bio->devs[i].bio;
1224 
1225 		if (tbio->bi_end_io != end_sync_read)
1226 			continue;
1227 		if (i == first)
1228 			continue;
1229 		if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
1230 			/* We know that the bi_io_vec layout is the same for
1231 			 * both 'first' and 'i', so we just compare them.
1232 			 * All vec entries are PAGE_SIZE;
1233 			 */
1234 			for (j = 0; j < vcnt; j++)
1235 				if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
1236 					   page_address(tbio->bi_io_vec[j].bv_page),
1237 					   PAGE_SIZE))
1238 					break;
1239 			if (j == vcnt)
1240 				continue;
1241 			mddev->resync_mismatches += r10_bio->sectors;
1242 		}
1243 		if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1244 			/* Don't fix anything. */
1245 			continue;
1246 		/* Ok, we need to write this bio
1247 		 * First we need to fixup bv_offset, bv_len and
1248 		 * bi_vecs, as the read request might have corrupted these
1249 		 */
1250 		tbio->bi_vcnt = vcnt;
1251 		tbio->bi_size = r10_bio->sectors << 9;
1252 		tbio->bi_idx = 0;
1253 		tbio->bi_phys_segments = 0;
1254 		tbio->bi_hw_segments = 0;
1255 		tbio->bi_hw_front_size = 0;
1256 		tbio->bi_hw_back_size = 0;
1257 		tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1258 		tbio->bi_flags |= 1 << BIO_UPTODATE;
1259 		tbio->bi_next = NULL;
1260 		tbio->bi_rw = WRITE;
1261 		tbio->bi_private = r10_bio;
1262 		tbio->bi_sector = r10_bio->devs[i].addr;
1263 
1264 		for (j=0; j < vcnt ; j++) {
1265 			tbio->bi_io_vec[j].bv_offset = 0;
1266 			tbio->bi_io_vec[j].bv_len = PAGE_SIZE;
1267 
1268 			memcpy(page_address(tbio->bi_io_vec[j].bv_page),
1269 			       page_address(fbio->bi_io_vec[j].bv_page),
1270 			       PAGE_SIZE);
1271 		}
1272 		tbio->bi_end_io = end_sync_write;
1273 
1274 		d = r10_bio->devs[i].devnum;
1275 		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1276 		atomic_inc(&r10_bio->remaining);
1277 		md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);
1278 
1279 		tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
1280 		tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
1281 		generic_make_request(tbio);
1282 	}
1283 
1284 done:
1285 	if (atomic_dec_and_test(&r10_bio->remaining)) {
1286 		md_done_sync(mddev, r10_bio->sectors, 1);
1287 		put_buf(r10_bio);
1288 	}
1289 }
1290 
1291 /*
1292  * Now for the recovery code.
1293  * Recovery happens across physical sectors.
1294  * We recover all non-is_sync drives by finding the virtual address of
1295  * each, and then choose a working drive that also has that virt address.
1296  * There is a separate r10_bio for each non-in_sync drive.
1297  * Only the first two slots are in use. The first for reading,
1298  * The second for writing.
1299  *
1300  */
1301 
1302 static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1303 {
1304 	conf_t *conf = mddev_to_conf(mddev);
1305 	int i, d;
1306 	struct bio *bio, *wbio;
1307 
1308 
1309 	/* move the pages across to the second bio
1310 	 * and submit the write request
1311 	 */
1312 	bio = r10_bio->devs[0].bio;
1313 	wbio = r10_bio->devs[1].bio;
1314 	for (i=0; i < wbio->bi_vcnt; i++) {
1315 		struct page *p = bio->bi_io_vec[i].bv_page;
1316 		bio->bi_io_vec[i].bv_page = wbio->bi_io_vec[i].bv_page;
1317 		wbio->bi_io_vec[i].bv_page = p;
1318 	}
1319 	d = r10_bio->devs[1].devnum;
1320 
1321 	atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1322 	md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
1323 	if (test_bit(R10BIO_Uptodate, &r10_bio->state))
1324 		generic_make_request(wbio);
1325 	else
1326 		bio_endio(wbio, wbio->bi_size, -EIO);
1327 }
1328 
1329 
1330 /*
1331  * This is a kernel thread which:
1332  *
1333  *	1.	Retries failed read operations on working mirrors.
1334  *	2.	Updates the raid superblock when problems encounter.
1335  *	3.	Performs writes following reads for array syncronising.
1336  */
1337 
1338 static void raid10d(mddev_t *mddev)
1339 {
1340 	r10bio_t *r10_bio;
1341 	struct bio *bio;
1342 	unsigned long flags;
1343 	conf_t *conf = mddev_to_conf(mddev);
1344 	struct list_head *head = &conf->retry_list;
1345 	int unplug=0;
1346 	mdk_rdev_t *rdev;
1347 
1348 	md_check_recovery(mddev);
1349 
1350 	for (;;) {
1351 		char b[BDEVNAME_SIZE];
1352 		spin_lock_irqsave(&conf->device_lock, flags);
1353 
1354 		if (conf->pending_bio_list.head) {
1355 			bio = bio_list_get(&conf->pending_bio_list);
1356 			blk_remove_plug(mddev->queue);
1357 			spin_unlock_irqrestore(&conf->device_lock, flags);
1358 			/* flush any pending bitmap writes to disk before proceeding w/ I/O */
1359 			if (bitmap_unplug(mddev->bitmap) != 0)
1360 				printk("%s: bitmap file write failed!\n", mdname(mddev));
1361 
1362 			while (bio) { /* submit pending writes */
1363 				struct bio *next = bio->bi_next;
1364 				bio->bi_next = NULL;
1365 				generic_make_request(bio);
1366 				bio = next;
1367 			}
1368 			unplug = 1;
1369 
1370 			continue;
1371 		}
1372 
1373 		if (list_empty(head))
1374 			break;
1375 		r10_bio = list_entry(head->prev, r10bio_t, retry_list);
1376 		list_del(head->prev);
1377 		conf->nr_queued--;
1378 		spin_unlock_irqrestore(&conf->device_lock, flags);
1379 
1380 		mddev = r10_bio->mddev;
1381 		conf = mddev_to_conf(mddev);
1382 		if (test_bit(R10BIO_IsSync, &r10_bio->state)) {
1383 			sync_request_write(mddev, r10_bio);
1384 			unplug = 1;
1385 		} else 	if (test_bit(R10BIO_IsRecover, &r10_bio->state)) {
1386 			recovery_request_write(mddev, r10_bio);
1387 			unplug = 1;
1388 		} else {
1389 			int mirror;
1390 			/* we got a read error. Maybe the drive is bad.  Maybe just
1391 			 * the block and we can fix it.
1392 			 * We freeze all other IO, and try reading the block from
1393 			 * other devices.  When we find one, we re-write
1394 			 * and check it that fixes the read error.
1395 			 * This is all done synchronously while the array is
1396 			 * frozen.
1397 			 */
1398 			int sect = 0; /* Offset from r10_bio->sector */
1399 			int sectors = r10_bio->sectors;
1400 			freeze_array(conf);
1401 			if (mddev->ro == 0) while(sectors) {
1402 				int s = sectors;
1403 				int sl = r10_bio->read_slot;
1404 				int success = 0;
1405 
1406 				if (s > (PAGE_SIZE>>9))
1407 					s = PAGE_SIZE >> 9;
1408 
1409 				do {
1410 					int d = r10_bio->devs[sl].devnum;
1411 					rdev = conf->mirrors[d].rdev;
1412 					if (rdev &&
1413 					    test_bit(In_sync, &rdev->flags) &&
1414 					    sync_page_io(rdev->bdev,
1415 							 r10_bio->devs[sl].addr +
1416 							 sect + rdev->data_offset,
1417 							 s<<9,
1418 							 conf->tmppage, READ))
1419 						success = 1;
1420 					else {
1421 						sl++;
1422 						if (sl == conf->copies)
1423 							sl = 0;
1424 					}
1425 				} while (!success && sl != r10_bio->read_slot);
1426 
1427 				if (success) {
1428 					int start = sl;
1429 					/* write it back and re-read */
1430 					while (sl != r10_bio->read_slot) {
1431 						int d;
1432 						if (sl==0)
1433 							sl = conf->copies;
1434 						sl--;
1435 						d = r10_bio->devs[sl].devnum;
1436 						rdev = conf->mirrors[d].rdev;
1437 						atomic_add(s, &rdev->corrected_errors);
1438 						if (rdev &&
1439 						    test_bit(In_sync, &rdev->flags)) {
1440 							if (sync_page_io(rdev->bdev,
1441 									 r10_bio->devs[sl].addr +
1442 									 sect + rdev->data_offset,
1443 									 s<<9, conf->tmppage, WRITE) == 0)
1444 								/* Well, this device is dead */
1445 								md_error(mddev, rdev);
1446 						}
1447 					}
1448 					sl = start;
1449 					while (sl != r10_bio->read_slot) {
1450 						int d;
1451 						if (sl==0)
1452 							sl = conf->copies;
1453 						sl--;
1454 						d = r10_bio->devs[sl].devnum;
1455 						rdev = conf->mirrors[d].rdev;
1456 						if (rdev &&
1457 						    test_bit(In_sync, &rdev->flags)) {
1458 							if (sync_page_io(rdev->bdev,
1459 									 r10_bio->devs[sl].addr +
1460 									 sect + rdev->data_offset,
1461 									 s<<9, conf->tmppage, READ) == 0)
1462 								/* Well, this device is dead */
1463 								md_error(mddev, rdev);
1464 						}
1465 					}
1466 				} else {
1467 					/* Cannot read from anywhere -- bye bye array */
1468 					md_error(mddev, conf->mirrors[r10_bio->devs[r10_bio->read_slot].devnum].rdev);
1469 					break;
1470 				}
1471 				sectors -= s;
1472 				sect += s;
1473 			}
1474 
1475 			unfreeze_array(conf);
1476 
1477 			bio = r10_bio->devs[r10_bio->read_slot].bio;
1478 			r10_bio->devs[r10_bio->read_slot].bio =
1479 				mddev->ro ? IO_BLOCKED : NULL;
1480 			bio_put(bio);
1481 			mirror = read_balance(conf, r10_bio);
1482 			if (mirror == -1) {
1483 				printk(KERN_ALERT "raid10: %s: unrecoverable I/O"
1484 				       " read error for block %llu\n",
1485 				       bdevname(bio->bi_bdev,b),
1486 				       (unsigned long long)r10_bio->sector);
1487 				raid_end_bio_io(r10_bio);
1488 			} else {
1489 				rdev = conf->mirrors[mirror].rdev;
1490 				if (printk_ratelimit())
1491 					printk(KERN_ERR "raid10: %s: redirecting sector %llu to"
1492 					       " another mirror\n",
1493 					       bdevname(rdev->bdev,b),
1494 					       (unsigned long long)r10_bio->sector);
1495 				bio = bio_clone(r10_bio->master_bio, GFP_NOIO);
1496 				r10_bio->devs[r10_bio->read_slot].bio = bio;
1497 				bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
1498 					+ rdev->data_offset;
1499 				bio->bi_bdev = rdev->bdev;
1500 				bio->bi_rw = READ;
1501 				bio->bi_private = r10_bio;
1502 				bio->bi_end_io = raid10_end_read_request;
1503 				unplug = 1;
1504 				generic_make_request(bio);
1505 			}
1506 		}
1507 	}
1508 	spin_unlock_irqrestore(&conf->device_lock, flags);
1509 	if (unplug)
1510 		unplug_slaves(mddev);
1511 }
1512 
1513 
1514 static int init_resync(conf_t *conf)
1515 {
1516 	int buffs;
1517 
1518 	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
1519 	if (conf->r10buf_pool)
1520 		BUG();
1521 	conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
1522 	if (!conf->r10buf_pool)
1523 		return -ENOMEM;
1524 	conf->next_resync = 0;
1525 	return 0;
1526 }
1527 
1528 /*
1529  * perform a "sync" on one "block"
1530  *
1531  * We need to make sure that no normal I/O request - particularly write
1532  * requests - conflict with active sync requests.
1533  *
1534  * This is achieved by tracking pending requests and a 'barrier' concept
1535  * that can be installed to exclude normal IO requests.
1536  *
1537  * Resync and recovery are handled very differently.
1538  * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
1539  *
1540  * For resync, we iterate over virtual addresses, read all copies,
1541  * and update if there are differences.  If only one copy is live,
1542  * skip it.
1543  * For recovery, we iterate over physical addresses, read a good
1544  * value for each non-in_sync drive, and over-write.
1545  *
1546  * So, for recovery we may have several outstanding complex requests for a
1547  * given address, one for each out-of-sync device.  We model this by allocating
1548  * a number of r10_bio structures, one for each out-of-sync device.
1549  * As we setup these structures, we collect all bio's together into a list
1550  * which we then process collectively to add pages, and then process again
1551  * to pass to generic_make_request.
1552  *
1553  * The r10_bio structures are linked using a borrowed master_bio pointer.
1554  * This link is counted in ->remaining.  When the r10_bio that points to NULL
1555  * has its remaining count decremented to 0, the whole complex operation
1556  * is complete.
1557  *
1558  */
1559 
1560 static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1561 {
1562 	conf_t *conf = mddev_to_conf(mddev);
1563 	r10bio_t *r10_bio;
1564 	struct bio *biolist = NULL, *bio;
1565 	sector_t max_sector, nr_sectors;
1566 	int disk;
1567 	int i;
1568 	int max_sync;
1569 	int sync_blocks;
1570 
1571 	sector_t sectors_skipped = 0;
1572 	int chunks_skipped = 0;
1573 
1574 	if (!conf->r10buf_pool)
1575 		if (init_resync(conf))
1576 			return 0;
1577 
1578  skipped:
1579 	max_sector = mddev->size << 1;
1580 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
1581 		max_sector = mddev->resync_max_sectors;
1582 	if (sector_nr >= max_sector) {
1583 		/* If we aborted, we need to abort the
1584 		 * sync on the 'current' bitmap chucks (there can
1585 		 * be several when recovering multiple devices).
1586 		 * as we may have started syncing it but not finished.
1587 		 * We can find the current address in
1588 		 * mddev->curr_resync, but for recovery,
1589 		 * we need to convert that to several
1590 		 * virtual addresses.
1591 		 */
1592 		if (mddev->curr_resync < max_sector) { /* aborted */
1593 			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
1594 				bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1595 						&sync_blocks, 1);
1596 			else for (i=0; i<conf->raid_disks; i++) {
1597 				sector_t sect =
1598 					raid10_find_virt(conf, mddev->curr_resync, i);
1599 				bitmap_end_sync(mddev->bitmap, sect,
1600 						&sync_blocks, 1);
1601 			}
1602 		} else /* completed sync */
1603 			conf->fullsync = 0;
1604 
1605 		bitmap_close_sync(mddev->bitmap);
1606 		close_sync(conf);
1607 		*skipped = 1;
1608 		return sectors_skipped;
1609 	}
1610 	if (chunks_skipped >= conf->raid_disks) {
1611 		/* if there has been nothing to do on any drive,
1612 		 * then there is nothing to do at all..
1613 		 */
1614 		*skipped = 1;
1615 		return (max_sector - sector_nr) + sectors_skipped;
1616 	}
1617 
1618 	/* make sure whole request will fit in a chunk - if chunks
1619 	 * are meaningful
1620 	 */
1621 	if (conf->near_copies < conf->raid_disks &&
1622 	    max_sector > (sector_nr | conf->chunk_mask))
1623 		max_sector = (sector_nr | conf->chunk_mask) + 1;
1624 	/*
1625 	 * If there is non-resync activity waiting for us then
1626 	 * put in a delay to throttle resync.
1627 	 */
1628 	if (!go_faster && conf->nr_waiting)
1629 		msleep_interruptible(1000);
1630 
1631 	/* Again, very different code for resync and recovery.
1632 	 * Both must result in an r10bio with a list of bios that
1633 	 * have bi_end_io, bi_sector, bi_bdev set,
1634 	 * and bi_private set to the r10bio.
1635 	 * For recovery, we may actually create several r10bios
1636 	 * with 2 bios in each, that correspond to the bios in the main one.
1637 	 * In this case, the subordinate r10bios link back through a
1638 	 * borrowed master_bio pointer, and the counter in the master
1639 	 * includes a ref from each subordinate.
1640 	 */
1641 	/* First, we decide what to do and set ->bi_end_io
1642 	 * To end_sync_read if we want to read, and
1643 	 * end_sync_write if we will want to write.
1644 	 */
1645 
1646 	max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
1647 	if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1648 		/* recovery... the complicated one */
1649 		int i, j, k;
1650 		r10_bio = NULL;
1651 
1652 		for (i=0 ; i<conf->raid_disks; i++)
1653 			if (conf->mirrors[i].rdev &&
1654 			    !test_bit(In_sync, &conf->mirrors[i].rdev->flags)) {
1655 				int still_degraded = 0;
1656 				/* want to reconstruct this device */
1657 				r10bio_t *rb2 = r10_bio;
1658 				sector_t sect = raid10_find_virt(conf, sector_nr, i);
1659 				int must_sync;
1660 				/* Unless we are doing a full sync, we only need
1661 				 * to recover the block if it is set in the bitmap
1662 				 */
1663 				must_sync = bitmap_start_sync(mddev->bitmap, sect,
1664 							      &sync_blocks, 1);
1665 				if (sync_blocks < max_sync)
1666 					max_sync = sync_blocks;
1667 				if (!must_sync &&
1668 				    !conf->fullsync) {
1669 					/* yep, skip the sync_blocks here, but don't assume
1670 					 * that there will never be anything to do here
1671 					 */
1672 					chunks_skipped = -1;
1673 					continue;
1674 				}
1675 
1676 				r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
1677 				raise_barrier(conf, rb2 != NULL);
1678 				atomic_set(&r10_bio->remaining, 0);
1679 
1680 				r10_bio->master_bio = (struct bio*)rb2;
1681 				if (rb2)
1682 					atomic_inc(&rb2->remaining);
1683 				r10_bio->mddev = mddev;
1684 				set_bit(R10BIO_IsRecover, &r10_bio->state);
1685 				r10_bio->sector = sect;
1686 
1687 				raid10_find_phys(conf, r10_bio);
1688 				/* Need to check if this section will still be
1689 				 * degraded
1690 				 */
1691 				for (j=0; j<conf->copies;j++) {
1692 					int d = r10_bio->devs[j].devnum;
1693 					if (conf->mirrors[d].rdev == NULL ||
1694 					    test_bit(Faulty, &conf->mirrors[d].rdev->flags)) {
1695 						still_degraded = 1;
1696 						break;
1697 					}
1698 				}
1699 				must_sync = bitmap_start_sync(mddev->bitmap, sect,
1700 							      &sync_blocks, still_degraded);
1701 
1702 				for (j=0; j<conf->copies;j++) {
1703 					int d = r10_bio->devs[j].devnum;
1704 					if (conf->mirrors[d].rdev &&
1705 					    test_bit(In_sync, &conf->mirrors[d].rdev->flags)) {
1706 						/* This is where we read from */
1707 						bio = r10_bio->devs[0].bio;
1708 						bio->bi_next = biolist;
1709 						biolist = bio;
1710 						bio->bi_private = r10_bio;
1711 						bio->bi_end_io = end_sync_read;
1712 						bio->bi_rw = 0;
1713 						bio->bi_sector = r10_bio->devs[j].addr +
1714 							conf->mirrors[d].rdev->data_offset;
1715 						bio->bi_bdev = conf->mirrors[d].rdev->bdev;
1716 						atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1717 						atomic_inc(&r10_bio->remaining);
1718 						/* and we write to 'i' */
1719 
1720 						for (k=0; k<conf->copies; k++)
1721 							if (r10_bio->devs[k].devnum == i)
1722 								break;
1723 						bio = r10_bio->devs[1].bio;
1724 						bio->bi_next = biolist;
1725 						biolist = bio;
1726 						bio->bi_private = r10_bio;
1727 						bio->bi_end_io = end_sync_write;
1728 						bio->bi_rw = 1;
1729 						bio->bi_sector = r10_bio->devs[k].addr +
1730 							conf->mirrors[i].rdev->data_offset;
1731 						bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1732 
1733 						r10_bio->devs[0].devnum = d;
1734 						r10_bio->devs[1].devnum = i;
1735 
1736 						break;
1737 					}
1738 				}
1739 				if (j == conf->copies) {
1740 					/* Cannot recover, so abort the recovery */
1741 					put_buf(r10_bio);
1742 					r10_bio = rb2;
1743 					if (!test_and_set_bit(MD_RECOVERY_ERR, &mddev->recovery))
1744 						printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n",
1745 						       mdname(mddev));
1746 					break;
1747 				}
1748 			}
1749 		if (biolist == NULL) {
1750 			while (r10_bio) {
1751 				r10bio_t *rb2 = r10_bio;
1752 				r10_bio = (r10bio_t*) rb2->master_bio;
1753 				rb2->master_bio = NULL;
1754 				put_buf(rb2);
1755 			}
1756 			goto giveup;
1757 		}
1758 	} else {
1759 		/* resync. Schedule a read for every block at this virt offset */
1760 		int count = 0;
1761 
1762 		if (!bitmap_start_sync(mddev->bitmap, sector_nr,
1763 				       &sync_blocks, mddev->degraded) &&
1764 		    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1765 			/* We can skip this block */
1766 			*skipped = 1;
1767 			return sync_blocks + sectors_skipped;
1768 		}
1769 		if (sync_blocks < max_sync)
1770 			max_sync = sync_blocks;
1771 		r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
1772 
1773 		r10_bio->mddev = mddev;
1774 		atomic_set(&r10_bio->remaining, 0);
1775 		raise_barrier(conf, 0);
1776 		conf->next_resync = sector_nr;
1777 
1778 		r10_bio->master_bio = NULL;
1779 		r10_bio->sector = sector_nr;
1780 		set_bit(R10BIO_IsSync, &r10_bio->state);
1781 		raid10_find_phys(conf, r10_bio);
1782 		r10_bio->sectors = (sector_nr | conf->chunk_mask) - sector_nr +1;
1783 
1784 		for (i=0; i<conf->copies; i++) {
1785 			int d = r10_bio->devs[i].devnum;
1786 			bio = r10_bio->devs[i].bio;
1787 			bio->bi_end_io = NULL;
1788 			if (conf->mirrors[d].rdev == NULL ||
1789 			    test_bit(Faulty, &conf->mirrors[d].rdev->flags))
1790 				continue;
1791 			atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1792 			atomic_inc(&r10_bio->remaining);
1793 			bio->bi_next = biolist;
1794 			biolist = bio;
1795 			bio->bi_private = r10_bio;
1796 			bio->bi_end_io = end_sync_read;
1797 			bio->bi_rw = 0;
1798 			bio->bi_sector = r10_bio->devs[i].addr +
1799 				conf->mirrors[d].rdev->data_offset;
1800 			bio->bi_bdev = conf->mirrors[d].rdev->bdev;
1801 			count++;
1802 		}
1803 
1804 		if (count < 2) {
1805 			for (i=0; i<conf->copies; i++) {
1806 				int d = r10_bio->devs[i].devnum;
1807 				if (r10_bio->devs[i].bio->bi_end_io)
1808 					rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1809 			}
1810 			put_buf(r10_bio);
1811 			biolist = NULL;
1812 			goto giveup;
1813 		}
1814 	}
1815 
1816 	for (bio = biolist; bio ; bio=bio->bi_next) {
1817 
1818 		bio->bi_flags &= ~(BIO_POOL_MASK - 1);
1819 		if (bio->bi_end_io)
1820 			bio->bi_flags |= 1 << BIO_UPTODATE;
1821 		bio->bi_vcnt = 0;
1822 		bio->bi_idx = 0;
1823 		bio->bi_phys_segments = 0;
1824 		bio->bi_hw_segments = 0;
1825 		bio->bi_size = 0;
1826 	}
1827 
1828 	nr_sectors = 0;
1829 	if (sector_nr + max_sync < max_sector)
1830 		max_sector = sector_nr + max_sync;
1831 	do {
1832 		struct page *page;
1833 		int len = PAGE_SIZE;
1834 		disk = 0;
1835 		if (sector_nr + (len>>9) > max_sector)
1836 			len = (max_sector - sector_nr) << 9;
1837 		if (len == 0)
1838 			break;
1839 		for (bio= biolist ; bio ; bio=bio->bi_next) {
1840 			page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
1841 			if (bio_add_page(bio, page, len, 0) == 0) {
1842 				/* stop here */
1843 				struct bio *bio2;
1844 				bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
1845 				for (bio2 = biolist; bio2 && bio2 != bio; bio2 = bio2->bi_next) {
1846 					/* remove last page from this bio */
1847 					bio2->bi_vcnt--;
1848 					bio2->bi_size -= len;
1849 					bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
1850 				}
1851 				goto bio_full;
1852 			}
1853 			disk = i;
1854 		}
1855 		nr_sectors += len>>9;
1856 		sector_nr += len>>9;
1857 	} while (biolist->bi_vcnt < RESYNC_PAGES);
1858  bio_full:
1859 	r10_bio->sectors = nr_sectors;
1860 
1861 	while (biolist) {
1862 		bio = biolist;
1863 		biolist = biolist->bi_next;
1864 
1865 		bio->bi_next = NULL;
1866 		r10_bio = bio->bi_private;
1867 		r10_bio->sectors = nr_sectors;
1868 
1869 		if (bio->bi_end_io == end_sync_read) {
1870 			md_sync_acct(bio->bi_bdev, nr_sectors);
1871 			generic_make_request(bio);
1872 		}
1873 	}
1874 
1875 	if (sectors_skipped)
1876 		/* pretend they weren't skipped, it makes
1877 		 * no important difference in this case
1878 		 */
1879 		md_done_sync(mddev, sectors_skipped, 1);
1880 
1881 	return sectors_skipped + nr_sectors;
1882  giveup:
1883 	/* There is nowhere to write, so all non-sync
1884 	 * drives must be failed, so try the next chunk...
1885 	 */
1886 	{
1887 	sector_t sec = max_sector - sector_nr;
1888 	sectors_skipped += sec;
1889 	chunks_skipped ++;
1890 	sector_nr = max_sector;
1891 	goto skipped;
1892 	}
1893 }
1894 
1895 static int run(mddev_t *mddev)
1896 {
1897 	conf_t *conf;
1898 	int i, disk_idx;
1899 	mirror_info_t *disk;
1900 	mdk_rdev_t *rdev;
1901 	struct list_head *tmp;
1902 	int nc, fc;
1903 	sector_t stride, size;
1904 
1905 	if (mddev->chunk_size == 0) {
1906 		printk(KERN_ERR "md/raid10: non-zero chunk size required.\n");
1907 		return -EINVAL;
1908 	}
1909 
1910 	nc = mddev->layout & 255;
1911 	fc = (mddev->layout >> 8) & 255;
1912 	if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
1913 	    (mddev->layout >> 16)) {
1914 		printk(KERN_ERR "raid10: %s: unsupported raid10 layout: 0x%8x\n",
1915 		       mdname(mddev), mddev->layout);
1916 		goto out;
1917 	}
1918 	/*
1919 	 * copy the already verified devices into our private RAID10
1920 	 * bookkeeping area. [whatever we allocate in run(),
1921 	 * should be freed in stop()]
1922 	 */
1923 	conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
1924 	mddev->private = conf;
1925 	if (!conf) {
1926 		printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
1927 			mdname(mddev));
1928 		goto out;
1929 	}
1930 	conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
1931 				 GFP_KERNEL);
1932 	if (!conf->mirrors) {
1933 		printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
1934 		       mdname(mddev));
1935 		goto out_free_conf;
1936 	}
1937 
1938 	conf->tmppage = alloc_page(GFP_KERNEL);
1939 	if (!conf->tmppage)
1940 		goto out_free_conf;
1941 
1942 	conf->near_copies = nc;
1943 	conf->far_copies = fc;
1944 	conf->copies = nc*fc;
1945 	conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1;
1946 	conf->chunk_shift = ffz(~mddev->chunk_size) - 9;
1947 	stride = mddev->size >> (conf->chunk_shift-1);
1948 	sector_div(stride, fc);
1949 	conf->stride = stride << conf->chunk_shift;
1950 
1951 	conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
1952 						r10bio_pool_free, conf);
1953 	if (!conf->r10bio_pool) {
1954 		printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
1955 			mdname(mddev));
1956 		goto out_free_conf;
1957 	}
1958 
1959 	ITERATE_RDEV(mddev, rdev, tmp) {
1960 		disk_idx = rdev->raid_disk;
1961 		if (disk_idx >= mddev->raid_disks
1962 		    || disk_idx < 0)
1963 			continue;
1964 		disk = conf->mirrors + disk_idx;
1965 
1966 		disk->rdev = rdev;
1967 
1968 		blk_queue_stack_limits(mddev->queue,
1969 				       rdev->bdev->bd_disk->queue);
1970 		/* as we don't honour merge_bvec_fn, we must never risk
1971 		 * violating it, so limit ->max_sector to one PAGE, as
1972 		 * a one page request is never in violation.
1973 		 */
1974 		if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1975 		    mddev->queue->max_sectors > (PAGE_SIZE>>9))
1976 			mddev->queue->max_sectors = (PAGE_SIZE>>9);
1977 
1978 		disk->head_position = 0;
1979 		if (!test_bit(Faulty, &rdev->flags) && test_bit(In_sync, &rdev->flags))
1980 			conf->working_disks++;
1981 	}
1982 	conf->raid_disks = mddev->raid_disks;
1983 	conf->mddev = mddev;
1984 	spin_lock_init(&conf->device_lock);
1985 	INIT_LIST_HEAD(&conf->retry_list);
1986 
1987 	spin_lock_init(&conf->resync_lock);
1988 	init_waitqueue_head(&conf->wait_barrier);
1989 
1990 	/* need to check that every block has at least one working mirror */
1991 	if (!enough(conf)) {
1992 		printk(KERN_ERR "raid10: not enough operational mirrors for %s\n",
1993 		       mdname(mddev));
1994 		goto out_free_conf;
1995 	}
1996 
1997 	mddev->degraded = 0;
1998 	for (i = 0; i < conf->raid_disks; i++) {
1999 
2000 		disk = conf->mirrors + i;
2001 
2002 		if (!disk->rdev) {
2003 			disk->head_position = 0;
2004 			mddev->degraded++;
2005 		}
2006 	}
2007 
2008 
2009 	mddev->thread = md_register_thread(raid10d, mddev, "%s_raid10");
2010 	if (!mddev->thread) {
2011 		printk(KERN_ERR
2012 		       "raid10: couldn't allocate thread for %s\n",
2013 		       mdname(mddev));
2014 		goto out_free_conf;
2015 	}
2016 
2017 	printk(KERN_INFO
2018 		"raid10: raid set %s active with %d out of %d devices\n",
2019 		mdname(mddev), mddev->raid_disks - mddev->degraded,
2020 		mddev->raid_disks);
2021 	/*
2022 	 * Ok, everything is just fine now
2023 	 */
2024 	size = conf->stride * conf->raid_disks;
2025 	sector_div(size, conf->near_copies);
2026 	mddev->array_size = size/2;
2027 	mddev->resync_max_sectors = size;
2028 
2029 	mddev->queue->unplug_fn = raid10_unplug;
2030 	mddev->queue->issue_flush_fn = raid10_issue_flush;
2031 
2032 	/* Calculate max read-ahead size.
2033 	 * We need to readahead at least twice a whole stripe....
2034 	 * maybe...
2035 	 */
2036 	{
2037 		int stripe = conf->raid_disks * mddev->chunk_size / PAGE_SIZE;
2038 		stripe /= conf->near_copies;
2039 		if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
2040 			mddev->queue->backing_dev_info.ra_pages = 2* stripe;
2041 	}
2042 
2043 	if (conf->near_copies < mddev->raid_disks)
2044 		blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
2045 	return 0;
2046 
2047 out_free_conf:
2048 	if (conf->r10bio_pool)
2049 		mempool_destroy(conf->r10bio_pool);
2050 	safe_put_page(conf->tmppage);
2051 	kfree(conf->mirrors);
2052 	kfree(conf);
2053 	mddev->private = NULL;
2054 out:
2055 	return -EIO;
2056 }
2057 
2058 static int stop(mddev_t *mddev)
2059 {
2060 	conf_t *conf = mddev_to_conf(mddev);
2061 
2062 	md_unregister_thread(mddev->thread);
2063 	mddev->thread = NULL;
2064 	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2065 	if (conf->r10bio_pool)
2066 		mempool_destroy(conf->r10bio_pool);
2067 	kfree(conf->mirrors);
2068 	kfree(conf);
2069 	mddev->private = NULL;
2070 	return 0;
2071 }
2072 
2073 static void raid10_quiesce(mddev_t *mddev, int state)
2074 {
2075 	conf_t *conf = mddev_to_conf(mddev);
2076 
2077 	switch(state) {
2078 	case 1:
2079 		raise_barrier(conf, 0);
2080 		break;
2081 	case 0:
2082 		lower_barrier(conf);
2083 		break;
2084 	}
2085 	if (mddev->thread) {
2086 		if (mddev->bitmap)
2087 			mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
2088 		else
2089 			mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
2090 		md_wakeup_thread(mddev->thread);
2091 	}
2092 }
2093 
2094 static struct mdk_personality raid10_personality =
2095 {
2096 	.name		= "raid10",
2097 	.level		= 10,
2098 	.owner		= THIS_MODULE,
2099 	.make_request	= make_request,
2100 	.run		= run,
2101 	.stop		= stop,
2102 	.status		= status,
2103 	.error_handler	= error,
2104 	.hot_add_disk	= raid10_add_disk,
2105 	.hot_remove_disk= raid10_remove_disk,
2106 	.spare_active	= raid10_spare_active,
2107 	.sync_request	= sync_request,
2108 	.quiesce	= raid10_quiesce,
2109 };
2110 
2111 static int __init raid_init(void)
2112 {
2113 	return register_md_personality(&raid10_personality);
2114 }
2115 
2116 static void raid_exit(void)
2117 {
2118 	unregister_md_personality(&raid10_personality);
2119 }
2120 
2121 module_init(raid_init);
2122 module_exit(raid_exit);
2123 MODULE_LICENSE("GPL");
2124 MODULE_ALIAS("md-personality-9"); /* RAID10 */
2125 MODULE_ALIAS("md-raid10");
2126 MODULE_ALIAS("md-level-10");
2127