xref: /openbmc/linux/drivers/md/raid1.c (revision 1877db75)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * raid1.c : Multiple Devices driver for Linux
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * RAID-1 management functions.
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
111da177e4SLinus Torvalds  *
1296de0e25SJan Engelhardt  * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
131da177e4SLinus Torvalds  * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
141da177e4SLinus Torvalds  *
15191ea9b2SNeilBrown  * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
16191ea9b2SNeilBrown  * bitmapped intelligence in resync:
17191ea9b2SNeilBrown  *
18191ea9b2SNeilBrown  *      - bitmap marked during normal i/o
19191ea9b2SNeilBrown  *      - bitmap used to skip nondirty blocks during sync
20191ea9b2SNeilBrown  *
21191ea9b2SNeilBrown  * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
22191ea9b2SNeilBrown  * - persistent bitmap code
23191ea9b2SNeilBrown  *
241da177e4SLinus Torvalds  * This program is free software; you can redistribute it and/or modify
251da177e4SLinus Torvalds  * it under the terms of the GNU General Public License as published by
261da177e4SLinus Torvalds  * the Free Software Foundation; either version 2, or (at your option)
271da177e4SLinus Torvalds  * any later version.
281da177e4SLinus Torvalds  *
291da177e4SLinus Torvalds  * You should have received a copy of the GNU General Public License
301da177e4SLinus Torvalds  * (for example /usr/src/linux/COPYING); if not, write to the Free
311da177e4SLinus Torvalds  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
321da177e4SLinus Torvalds  */
331da177e4SLinus Torvalds 
345a0e3ad6STejun Heo #include <linux/slab.h>
3525570727SStephen Rothwell #include <linux/delay.h>
36bff61975SNeilBrown #include <linux/blkdev.h>
37056075c7SPaul Gortmaker #include <linux/module.h>
38bff61975SNeilBrown #include <linux/seq_file.h>
398bda470eSChristian Dietrich #include <linux/ratelimit.h>
4043b2e5d8SNeilBrown #include "md.h"
41ef740c37SChristoph Hellwig #include "raid1.h"
42ef740c37SChristoph Hellwig #include "bitmap.h"
43191ea9b2SNeilBrown 
441da177e4SLinus Torvalds /*
451da177e4SLinus Torvalds  * Number of guaranteed r1bios in case of extreme VM load:
461da177e4SLinus Torvalds  */
471da177e4SLinus Torvalds #define	NR_RAID1_BIOS 256
481da177e4SLinus Torvalds 
49473e87ceSJonathan Brassow /* when we get a read error on a read-only array, we redirect to another
50473e87ceSJonathan Brassow  * device without failing the first device, or trying to over-write to
51473e87ceSJonathan Brassow  * correct the read error.  To keep track of bad blocks on a per-bio
52473e87ceSJonathan Brassow  * level, we store IO_BLOCKED in the appropriate 'bios' pointer
53473e87ceSJonathan Brassow  */
54473e87ceSJonathan Brassow #define IO_BLOCKED ((struct bio *)1)
55473e87ceSJonathan Brassow /* When we successfully write to a known bad-block, we need to remove the
56473e87ceSJonathan Brassow  * bad-block marking which must be done from process context.  So we record
57473e87ceSJonathan Brassow  * the success by setting devs[n].bio to IO_MADE_GOOD
58473e87ceSJonathan Brassow  */
59473e87ceSJonathan Brassow #define IO_MADE_GOOD ((struct bio *)2)
60473e87ceSJonathan Brassow 
61473e87ceSJonathan Brassow #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
62473e87ceSJonathan Brassow 
6334db0cd6SNeilBrown /* When there are this many requests queue to be written by
6434db0cd6SNeilBrown  * the raid1 thread, we become 'congested' to provide back-pressure
6534db0cd6SNeilBrown  * for writeback.
6634db0cd6SNeilBrown  */
6734db0cd6SNeilBrown static int max_queued_requests = 1024;
681da177e4SLinus Torvalds 
6979ef3a8aSmajianpeng static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
7079ef3a8aSmajianpeng 			  sector_t bi_sector);
71e8096360SNeilBrown static void lower_barrier(struct r1conf *conf);
721da177e4SLinus Torvalds 
73dd0fc66fSAl Viro static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
741da177e4SLinus Torvalds {
751da177e4SLinus Torvalds 	struct pool_info *pi = data;
769f2c9d12SNeilBrown 	int size = offsetof(struct r1bio, bios[pi->raid_disks]);
771da177e4SLinus Torvalds 
781da177e4SLinus Torvalds 	/* allocate a r1bio with room for raid_disks entries in the bios array */
797eaceaccSJens Axboe 	return kzalloc(size, gfp_flags);
801da177e4SLinus Torvalds }
811da177e4SLinus Torvalds 
821da177e4SLinus Torvalds static void r1bio_pool_free(void *r1_bio, void *data)
831da177e4SLinus Torvalds {
841da177e4SLinus Torvalds 	kfree(r1_bio);
851da177e4SLinus Torvalds }
861da177e4SLinus Torvalds 
871da177e4SLinus Torvalds #define RESYNC_BLOCK_SIZE (64*1024)
888e005f7cSmajianpeng #define RESYNC_DEPTH 32
891da177e4SLinus Torvalds #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
901da177e4SLinus Torvalds #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
918e005f7cSmajianpeng #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
928e005f7cSmajianpeng #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
938e005f7cSmajianpeng #define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)
941da177e4SLinus Torvalds 
95dd0fc66fSAl Viro static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
961da177e4SLinus Torvalds {
971da177e4SLinus Torvalds 	struct pool_info *pi = data;
989f2c9d12SNeilBrown 	struct r1bio *r1_bio;
991da177e4SLinus Torvalds 	struct bio *bio;
1001da177e4SLinus Torvalds 	int i, j;
1011da177e4SLinus Torvalds 
1021da177e4SLinus Torvalds 	r1_bio = r1bio_pool_alloc(gfp_flags, pi);
1037eaceaccSJens Axboe 	if (!r1_bio)
1041da177e4SLinus Torvalds 		return NULL;
1051da177e4SLinus Torvalds 
1061da177e4SLinus Torvalds 	/*
1071da177e4SLinus Torvalds 	 * Allocate bios : 1 for reading, n-1 for writing
1081da177e4SLinus Torvalds 	 */
1091da177e4SLinus Torvalds 	for (j = pi->raid_disks ; j-- ; ) {
1106746557fSNeilBrown 		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
1111da177e4SLinus Torvalds 		if (!bio)
1121da177e4SLinus Torvalds 			goto out_free_bio;
1131da177e4SLinus Torvalds 		r1_bio->bios[j] = bio;
1141da177e4SLinus Torvalds 	}
1151da177e4SLinus Torvalds 	/*
1161da177e4SLinus Torvalds 	 * Allocate RESYNC_PAGES data pages and attach them to
117d11c171eSNeilBrown 	 * the first bio.
118d11c171eSNeilBrown 	 * If this is a user-requested check/repair, allocate
119d11c171eSNeilBrown 	 * RESYNC_PAGES for each bio.
1201da177e4SLinus Torvalds 	 */
121d11c171eSNeilBrown 	if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
122d11c171eSNeilBrown 		j = pi->raid_disks;
123d11c171eSNeilBrown 	else
124d11c171eSNeilBrown 		j = 1;
125d11c171eSNeilBrown 	while(j--) {
126d11c171eSNeilBrown 		bio = r1_bio->bios[j];
127a0787606SKent Overstreet 		bio->bi_vcnt = RESYNC_PAGES;
1281da177e4SLinus Torvalds 
129a0787606SKent Overstreet 		if (bio_alloc_pages(bio, gfp_flags))
130a0787606SKent Overstreet 			goto out_free_bio;
131d11c171eSNeilBrown 	}
132d11c171eSNeilBrown 	/* If not user-requests, copy the page pointers to all bios */
133d11c171eSNeilBrown 	if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
134d11c171eSNeilBrown 		for (i=0; i<RESYNC_PAGES ; i++)
135d11c171eSNeilBrown 			for (j=1; j<pi->raid_disks; j++)
136d11c171eSNeilBrown 				r1_bio->bios[j]->bi_io_vec[i].bv_page =
137d11c171eSNeilBrown 					r1_bio->bios[0]->bi_io_vec[i].bv_page;
138d11c171eSNeilBrown 	}
1391da177e4SLinus Torvalds 
1401da177e4SLinus Torvalds 	r1_bio->master_bio = NULL;
1411da177e4SLinus Torvalds 
1421da177e4SLinus Torvalds 	return r1_bio;
1431da177e4SLinus Torvalds 
1441da177e4SLinus Torvalds out_free_bio:
1451da177e4SLinus Torvalds 	while (++j < pi->raid_disks)
1461da177e4SLinus Torvalds 		bio_put(r1_bio->bios[j]);
1471da177e4SLinus Torvalds 	r1bio_pool_free(r1_bio, data);
1481da177e4SLinus Torvalds 	return NULL;
1491da177e4SLinus Torvalds }
1501da177e4SLinus Torvalds 
1511da177e4SLinus Torvalds static void r1buf_pool_free(void *__r1_bio, void *data)
1521da177e4SLinus Torvalds {
1531da177e4SLinus Torvalds 	struct pool_info *pi = data;
154d11c171eSNeilBrown 	int i,j;
1559f2c9d12SNeilBrown 	struct r1bio *r1bio = __r1_bio;
1561da177e4SLinus Torvalds 
157d11c171eSNeilBrown 	for (i = 0; i < RESYNC_PAGES; i++)
158d11c171eSNeilBrown 		for (j = pi->raid_disks; j-- ;) {
159d11c171eSNeilBrown 			if (j == 0 ||
160d11c171eSNeilBrown 			    r1bio->bios[j]->bi_io_vec[i].bv_page !=
161d11c171eSNeilBrown 			    r1bio->bios[0]->bi_io_vec[i].bv_page)
1621345b1d8SNeilBrown 				safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
1631da177e4SLinus Torvalds 		}
1641da177e4SLinus Torvalds 	for (i=0 ; i < pi->raid_disks; i++)
1651da177e4SLinus Torvalds 		bio_put(r1bio->bios[i]);
1661da177e4SLinus Torvalds 
1671da177e4SLinus Torvalds 	r1bio_pool_free(r1bio, data);
1681da177e4SLinus Torvalds }
1691da177e4SLinus Torvalds 
170e8096360SNeilBrown static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
1711da177e4SLinus Torvalds {
1721da177e4SLinus Torvalds 	int i;
1731da177e4SLinus Torvalds 
1748f19ccb2SNeilBrown 	for (i = 0; i < conf->raid_disks * 2; i++) {
1751da177e4SLinus Torvalds 		struct bio **bio = r1_bio->bios + i;
1764367af55SNeilBrown 		if (!BIO_SPECIAL(*bio))
1771da177e4SLinus Torvalds 			bio_put(*bio);
1781da177e4SLinus Torvalds 		*bio = NULL;
1791da177e4SLinus Torvalds 	}
1801da177e4SLinus Torvalds }
1811da177e4SLinus Torvalds 
1829f2c9d12SNeilBrown static void free_r1bio(struct r1bio *r1_bio)
1831da177e4SLinus Torvalds {
184e8096360SNeilBrown 	struct r1conf *conf = r1_bio->mddev->private;
1851da177e4SLinus Torvalds 
1861da177e4SLinus Torvalds 	put_all_bios(conf, r1_bio);
1871da177e4SLinus Torvalds 	mempool_free(r1_bio, conf->r1bio_pool);
1881da177e4SLinus Torvalds }
1891da177e4SLinus Torvalds 
1909f2c9d12SNeilBrown static void put_buf(struct r1bio *r1_bio)
1911da177e4SLinus Torvalds {
192e8096360SNeilBrown 	struct r1conf *conf = r1_bio->mddev->private;
1933e198f78SNeilBrown 	int i;
1943e198f78SNeilBrown 
1958f19ccb2SNeilBrown 	for (i = 0; i < conf->raid_disks * 2; i++) {
1963e198f78SNeilBrown 		struct bio *bio = r1_bio->bios[i];
1973e198f78SNeilBrown 		if (bio->bi_end_io)
1983e198f78SNeilBrown 			rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
1993e198f78SNeilBrown 	}
2001da177e4SLinus Torvalds 
2011da177e4SLinus Torvalds 	mempool_free(r1_bio, conf->r1buf_pool);
2021da177e4SLinus Torvalds 
20317999be4SNeilBrown 	lower_barrier(conf);
2041da177e4SLinus Torvalds }
2051da177e4SLinus Torvalds 
2069f2c9d12SNeilBrown static void reschedule_retry(struct r1bio *r1_bio)
2071da177e4SLinus Torvalds {
2081da177e4SLinus Torvalds 	unsigned long flags;
209fd01b88cSNeilBrown 	struct mddev *mddev = r1_bio->mddev;
210e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
2111da177e4SLinus Torvalds 
2121da177e4SLinus Torvalds 	spin_lock_irqsave(&conf->device_lock, flags);
2131da177e4SLinus Torvalds 	list_add(&r1_bio->retry_list, &conf->retry_list);
214ddaf22abSNeilBrown 	conf->nr_queued ++;
2151da177e4SLinus Torvalds 	spin_unlock_irqrestore(&conf->device_lock, flags);
2161da177e4SLinus Torvalds 
21717999be4SNeilBrown 	wake_up(&conf->wait_barrier);
2181da177e4SLinus Torvalds 	md_wakeup_thread(mddev->thread);
2191da177e4SLinus Torvalds }
2201da177e4SLinus Torvalds 
2211da177e4SLinus Torvalds /*
2221da177e4SLinus Torvalds  * raid_end_bio_io() is called when we have finished servicing a mirrored
2231da177e4SLinus Torvalds  * operation and are ready to return a success/failure code to the buffer
2241da177e4SLinus Torvalds  * cache layer.
2251da177e4SLinus Torvalds  */
2269f2c9d12SNeilBrown static void call_bio_endio(struct r1bio *r1_bio)
227d2eb35acSNeilBrown {
228d2eb35acSNeilBrown 	struct bio *bio = r1_bio->master_bio;
229d2eb35acSNeilBrown 	int done;
230e8096360SNeilBrown 	struct r1conf *conf = r1_bio->mddev->private;
23179ef3a8aSmajianpeng 	sector_t start_next_window = r1_bio->start_next_window;
2324f024f37SKent Overstreet 	sector_t bi_sector = bio->bi_iter.bi_sector;
233d2eb35acSNeilBrown 
234d2eb35acSNeilBrown 	if (bio->bi_phys_segments) {
235d2eb35acSNeilBrown 		unsigned long flags;
236d2eb35acSNeilBrown 		spin_lock_irqsave(&conf->device_lock, flags);
237d2eb35acSNeilBrown 		bio->bi_phys_segments--;
238d2eb35acSNeilBrown 		done = (bio->bi_phys_segments == 0);
239d2eb35acSNeilBrown 		spin_unlock_irqrestore(&conf->device_lock, flags);
24079ef3a8aSmajianpeng 		/*
24179ef3a8aSmajianpeng 		 * make_request() might be waiting for
24279ef3a8aSmajianpeng 		 * bi_phys_segments to decrease
24379ef3a8aSmajianpeng 		 */
24479ef3a8aSmajianpeng 		wake_up(&conf->wait_barrier);
245d2eb35acSNeilBrown 	} else
246d2eb35acSNeilBrown 		done = 1;
247d2eb35acSNeilBrown 
248d2eb35acSNeilBrown 	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
249d2eb35acSNeilBrown 		clear_bit(BIO_UPTODATE, &bio->bi_flags);
250d2eb35acSNeilBrown 	if (done) {
251d2eb35acSNeilBrown 		bio_endio(bio, 0);
252d2eb35acSNeilBrown 		/*
253d2eb35acSNeilBrown 		 * Wake up any possible resync thread that waits for the device
254d2eb35acSNeilBrown 		 * to go idle.
255d2eb35acSNeilBrown 		 */
25679ef3a8aSmajianpeng 		allow_barrier(conf, start_next_window, bi_sector);
257d2eb35acSNeilBrown 	}
258d2eb35acSNeilBrown }
259d2eb35acSNeilBrown 
2609f2c9d12SNeilBrown static void raid_end_bio_io(struct r1bio *r1_bio)
2611da177e4SLinus Torvalds {
2621da177e4SLinus Torvalds 	struct bio *bio = r1_bio->master_bio;
2631da177e4SLinus Torvalds 
2644b6d287fSNeilBrown 	/* if nobody has done the final endio yet, do it now */
2654b6d287fSNeilBrown 	if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
26636a4e1feSNeilBrown 		pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
2674b6d287fSNeilBrown 			 (bio_data_dir(bio) == WRITE) ? "write" : "read",
2684f024f37SKent Overstreet 			 (unsigned long long) bio->bi_iter.bi_sector,
2694f024f37SKent Overstreet 			 (unsigned long long) bio_end_sector(bio) - 1);
2704b6d287fSNeilBrown 
271d2eb35acSNeilBrown 		call_bio_endio(r1_bio);
2724b6d287fSNeilBrown 	}
2731da177e4SLinus Torvalds 	free_r1bio(r1_bio);
2741da177e4SLinus Torvalds }
2751da177e4SLinus Torvalds 
2761da177e4SLinus Torvalds /*
2771da177e4SLinus Torvalds  * Update disk head position estimator based on IRQ completion info.
2781da177e4SLinus Torvalds  */
2799f2c9d12SNeilBrown static inline void update_head_pos(int disk, struct r1bio *r1_bio)
2801da177e4SLinus Torvalds {
281e8096360SNeilBrown 	struct r1conf *conf = r1_bio->mddev->private;
2821da177e4SLinus Torvalds 
2831da177e4SLinus Torvalds 	conf->mirrors[disk].head_position =
2841da177e4SLinus Torvalds 		r1_bio->sector + (r1_bio->sectors);
2851da177e4SLinus Torvalds }
2861da177e4SLinus Torvalds 
287ba3ae3beSNamhyung Kim /*
288ba3ae3beSNamhyung Kim  * Find the disk number which triggered given bio
289ba3ae3beSNamhyung Kim  */
2909f2c9d12SNeilBrown static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
291ba3ae3beSNamhyung Kim {
292ba3ae3beSNamhyung Kim 	int mirror;
29330194636SNeilBrown 	struct r1conf *conf = r1_bio->mddev->private;
29430194636SNeilBrown 	int raid_disks = conf->raid_disks;
295ba3ae3beSNamhyung Kim 
2968f19ccb2SNeilBrown 	for (mirror = 0; mirror < raid_disks * 2; mirror++)
297ba3ae3beSNamhyung Kim 		if (r1_bio->bios[mirror] == bio)
298ba3ae3beSNamhyung Kim 			break;
299ba3ae3beSNamhyung Kim 
3008f19ccb2SNeilBrown 	BUG_ON(mirror == raid_disks * 2);
301ba3ae3beSNamhyung Kim 	update_head_pos(mirror, r1_bio);
302ba3ae3beSNamhyung Kim 
303ba3ae3beSNamhyung Kim 	return mirror;
304ba3ae3beSNamhyung Kim }
305ba3ae3beSNamhyung Kim 
3066712ecf8SNeilBrown static void raid1_end_read_request(struct bio *bio, int error)
3071da177e4SLinus Torvalds {
3081da177e4SLinus Torvalds 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
3099f2c9d12SNeilBrown 	struct r1bio *r1_bio = bio->bi_private;
3101da177e4SLinus Torvalds 	int mirror;
311e8096360SNeilBrown 	struct r1conf *conf = r1_bio->mddev->private;
3121da177e4SLinus Torvalds 
3131da177e4SLinus Torvalds 	mirror = r1_bio->read_disk;
3141da177e4SLinus Torvalds 	/*
3151da177e4SLinus Torvalds 	 * this branch is our 'one mirror IO has finished' event handler:
3161da177e4SLinus Torvalds 	 */
317ddaf22abSNeilBrown 	update_head_pos(mirror, r1_bio);
318ddaf22abSNeilBrown 
319220946c9SNeilBrown 	if (uptodate)
3201da177e4SLinus Torvalds 		set_bit(R1BIO_Uptodate, &r1_bio->state);
321dd00a99eSNeilBrown 	else {
322dd00a99eSNeilBrown 		/* If all other devices have failed, we want to return
323dd00a99eSNeilBrown 		 * the error upwards rather than fail the last device.
324dd00a99eSNeilBrown 		 * Here we redefine "uptodate" to mean "Don't want to retry"
325dd00a99eSNeilBrown 		 */
326dd00a99eSNeilBrown 		unsigned long flags;
327dd00a99eSNeilBrown 		spin_lock_irqsave(&conf->device_lock, flags);
328dd00a99eSNeilBrown 		if (r1_bio->mddev->degraded == conf->raid_disks ||
329dd00a99eSNeilBrown 		    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
330dd00a99eSNeilBrown 		     !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
331dd00a99eSNeilBrown 			uptodate = 1;
332dd00a99eSNeilBrown 		spin_unlock_irqrestore(&conf->device_lock, flags);
333dd00a99eSNeilBrown 	}
3341da177e4SLinus Torvalds 
3357ad4d4a6SNeilBrown 	if (uptodate) {
3361da177e4SLinus Torvalds 		raid_end_bio_io(r1_bio);
3377ad4d4a6SNeilBrown 		rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
3387ad4d4a6SNeilBrown 	} else {
3391da177e4SLinus Torvalds 		/*
3401da177e4SLinus Torvalds 		 * oops, read error:
3411da177e4SLinus Torvalds 		 */
3421da177e4SLinus Torvalds 		char b[BDEVNAME_SIZE];
3438bda470eSChristian Dietrich 		printk_ratelimited(
3448bda470eSChristian Dietrich 			KERN_ERR "md/raid1:%s: %s: "
3458bda470eSChristian Dietrich 			"rescheduling sector %llu\n",
3469dd1e2faSNeilBrown 			mdname(conf->mddev),
3478bda470eSChristian Dietrich 			bdevname(conf->mirrors[mirror].rdev->bdev,
3488bda470eSChristian Dietrich 				 b),
3498bda470eSChristian Dietrich 			(unsigned long long)r1_bio->sector);
350d2eb35acSNeilBrown 		set_bit(R1BIO_ReadError, &r1_bio->state);
3511da177e4SLinus Torvalds 		reschedule_retry(r1_bio);
3527ad4d4a6SNeilBrown 		/* don't drop the reference on read_disk yet */
3531da177e4SLinus Torvalds 	}
3541da177e4SLinus Torvalds }
3551da177e4SLinus Torvalds 
3569f2c9d12SNeilBrown static void close_write(struct r1bio *r1_bio)
3574e78064fSNeilBrown {
3584e78064fSNeilBrown 	/* it really is the end of this request */
3594e78064fSNeilBrown 	if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
3604e78064fSNeilBrown 		/* free extra copy of the data pages */
361af6d7b76SNeilBrown 		int i = r1_bio->behind_page_count;
3624e78064fSNeilBrown 		while (i--)
3632ca68f5eSNeilBrown 			safe_put_page(r1_bio->behind_bvecs[i].bv_page);
3642ca68f5eSNeilBrown 		kfree(r1_bio->behind_bvecs);
3652ca68f5eSNeilBrown 		r1_bio->behind_bvecs = NULL;
3664e78064fSNeilBrown 	}
3674e78064fSNeilBrown 	/* clear the bitmap if all writes complete successfully */
3684e78064fSNeilBrown 	bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
3694e78064fSNeilBrown 			r1_bio->sectors,
3704e78064fSNeilBrown 			!test_bit(R1BIO_Degraded, &r1_bio->state),
371af6d7b76SNeilBrown 			test_bit(R1BIO_BehindIO, &r1_bio->state));
3724e78064fSNeilBrown 	md_write_end(r1_bio->mddev);
373cd5ff9a1SNeilBrown }
374cd5ff9a1SNeilBrown 
3759f2c9d12SNeilBrown static void r1_bio_write_done(struct r1bio *r1_bio)
376cd5ff9a1SNeilBrown {
377cd5ff9a1SNeilBrown 	if (!atomic_dec_and_test(&r1_bio->remaining))
378cd5ff9a1SNeilBrown 		return;
379cd5ff9a1SNeilBrown 
380cd5ff9a1SNeilBrown 	if (test_bit(R1BIO_WriteError, &r1_bio->state))
381cd5ff9a1SNeilBrown 		reschedule_retry(r1_bio);
382cd5ff9a1SNeilBrown 	else {
383cd5ff9a1SNeilBrown 		close_write(r1_bio);
3844367af55SNeilBrown 		if (test_bit(R1BIO_MadeGood, &r1_bio->state))
3854367af55SNeilBrown 			reschedule_retry(r1_bio);
3864367af55SNeilBrown 		else
3874e78064fSNeilBrown 			raid_end_bio_io(r1_bio);
3884e78064fSNeilBrown 	}
3894e78064fSNeilBrown }
3904e78064fSNeilBrown 
3916712ecf8SNeilBrown static void raid1_end_write_request(struct bio *bio, int error)
3921da177e4SLinus Torvalds {
3931da177e4SLinus Torvalds 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
3949f2c9d12SNeilBrown 	struct r1bio *r1_bio = bio->bi_private;
395a9701a30SNeilBrown 	int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
396e8096360SNeilBrown 	struct r1conf *conf = r1_bio->mddev->private;
39704b857f7SNeilBrown 	struct bio *to_put = NULL;
3981da177e4SLinus Torvalds 
399ba3ae3beSNamhyung Kim 	mirror = find_bio_disk(r1_bio, bio);
4001da177e4SLinus Torvalds 
4011da177e4SLinus Torvalds 	/*
402e9c7469bSTejun Heo 	 * 'one mirror IO has finished' event handler:
4031da177e4SLinus Torvalds 	 */
404191ea9b2SNeilBrown 	if (!uptodate) {
405cd5ff9a1SNeilBrown 		set_bit(WriteErrorSeen,
406cd5ff9a1SNeilBrown 			&conf->mirrors[mirror].rdev->flags);
40719d67169SNeilBrown 		if (!test_and_set_bit(WantReplacement,
40819d67169SNeilBrown 				      &conf->mirrors[mirror].rdev->flags))
40919d67169SNeilBrown 			set_bit(MD_RECOVERY_NEEDED, &
41019d67169SNeilBrown 				conf->mddev->recovery);
41119d67169SNeilBrown 
412cd5ff9a1SNeilBrown 		set_bit(R1BIO_WriteError, &r1_bio->state);
4134367af55SNeilBrown 	} else {
4141da177e4SLinus Torvalds 		/*
415e9c7469bSTejun Heo 		 * Set R1BIO_Uptodate in our master bio, so that we
416e9c7469bSTejun Heo 		 * will return a good error code for to the higher
417e9c7469bSTejun Heo 		 * levels even if IO on some other mirrored buffer
418e9c7469bSTejun Heo 		 * fails.
4191da177e4SLinus Torvalds 		 *
420e9c7469bSTejun Heo 		 * The 'master' represents the composite IO operation
421e9c7469bSTejun Heo 		 * to user-side. So if something waits for IO, then it
422e9c7469bSTejun Heo 		 * will wait for the 'master' bio.
4231da177e4SLinus Torvalds 		 */
4244367af55SNeilBrown 		sector_t first_bad;
4254367af55SNeilBrown 		int bad_sectors;
4264367af55SNeilBrown 
427cd5ff9a1SNeilBrown 		r1_bio->bios[mirror] = NULL;
428cd5ff9a1SNeilBrown 		to_put = bio;
4293056e3aeSAlex Lyakas 		/*
4303056e3aeSAlex Lyakas 		 * Do not set R1BIO_Uptodate if the current device is
4313056e3aeSAlex Lyakas 		 * rebuilding or Faulty. This is because we cannot use
4323056e3aeSAlex Lyakas 		 * such device for properly reading the data back (we could
4333056e3aeSAlex Lyakas 		 * potentially use it, if the current write would have felt
4343056e3aeSAlex Lyakas 		 * before rdev->recovery_offset, but for simplicity we don't
4353056e3aeSAlex Lyakas 		 * check this here.
4363056e3aeSAlex Lyakas 		 */
4373056e3aeSAlex Lyakas 		if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
4383056e3aeSAlex Lyakas 		    !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
4391da177e4SLinus Torvalds 			set_bit(R1BIO_Uptodate, &r1_bio->state);
4401da177e4SLinus Torvalds 
4414367af55SNeilBrown 		/* Maybe we can clear some bad blocks. */
4424367af55SNeilBrown 		if (is_badblock(conf->mirrors[mirror].rdev,
4434367af55SNeilBrown 				r1_bio->sector, r1_bio->sectors,
4444367af55SNeilBrown 				&first_bad, &bad_sectors)) {
4454367af55SNeilBrown 			r1_bio->bios[mirror] = IO_MADE_GOOD;
4464367af55SNeilBrown 			set_bit(R1BIO_MadeGood, &r1_bio->state);
4474367af55SNeilBrown 		}
4484367af55SNeilBrown 	}
4494367af55SNeilBrown 
4504b6d287fSNeilBrown 	if (behind) {
4514b6d287fSNeilBrown 		if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
4524b6d287fSNeilBrown 			atomic_dec(&r1_bio->behind_remaining);
4534b6d287fSNeilBrown 
454e9c7469bSTejun Heo 		/*
455e9c7469bSTejun Heo 		 * In behind mode, we ACK the master bio once the I/O
456e9c7469bSTejun Heo 		 * has safely reached all non-writemostly
457e9c7469bSTejun Heo 		 * disks. Setting the Returned bit ensures that this
458e9c7469bSTejun Heo 		 * gets done only once -- we don't ever want to return
459e9c7469bSTejun Heo 		 * -EIO here, instead we'll wait
460e9c7469bSTejun Heo 		 */
4614b6d287fSNeilBrown 		if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
4624b6d287fSNeilBrown 		    test_bit(R1BIO_Uptodate, &r1_bio->state)) {
4634b6d287fSNeilBrown 			/* Maybe we can return now */
4644b6d287fSNeilBrown 			if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
4654b6d287fSNeilBrown 				struct bio *mbio = r1_bio->master_bio;
46636a4e1feSNeilBrown 				pr_debug("raid1: behind end write sectors"
46736a4e1feSNeilBrown 					 " %llu-%llu\n",
4684f024f37SKent Overstreet 					 (unsigned long long) mbio->bi_iter.bi_sector,
4694f024f37SKent Overstreet 					 (unsigned long long) bio_end_sector(mbio) - 1);
470d2eb35acSNeilBrown 				call_bio_endio(r1_bio);
4714b6d287fSNeilBrown 			}
4724b6d287fSNeilBrown 		}
4734b6d287fSNeilBrown 	}
4744367af55SNeilBrown 	if (r1_bio->bios[mirror] == NULL)
4754367af55SNeilBrown 		rdev_dec_pending(conf->mirrors[mirror].rdev,
4764367af55SNeilBrown 				 conf->mddev);
477e9c7469bSTejun Heo 
4781da177e4SLinus Torvalds 	/*
4791da177e4SLinus Torvalds 	 * Let's see if all mirrored write operations have finished
4801da177e4SLinus Torvalds 	 * already.
4811da177e4SLinus Torvalds 	 */
482af6d7b76SNeilBrown 	r1_bio_write_done(r1_bio);
483c70810b3SNeilBrown 
48404b857f7SNeilBrown 	if (to_put)
48504b857f7SNeilBrown 		bio_put(to_put);
4861da177e4SLinus Torvalds }
4871da177e4SLinus Torvalds 
4881da177e4SLinus Torvalds 
4891da177e4SLinus Torvalds /*
4901da177e4SLinus Torvalds  * This routine returns the disk from which the requested read should
4911da177e4SLinus Torvalds  * be done. There is a per-array 'next expected sequential IO' sector
4921da177e4SLinus Torvalds  * number - if this matches on the next IO then we use the last disk.
4931da177e4SLinus Torvalds  * There is also a per-disk 'last know head position' sector that is
4941da177e4SLinus Torvalds  * maintained from IRQ contexts, both the normal and the resync IO
4951da177e4SLinus Torvalds  * completion handlers update this position correctly. If there is no
4961da177e4SLinus Torvalds  * perfect sequential match then we pick the disk whose head is closest.
4971da177e4SLinus Torvalds  *
4981da177e4SLinus Torvalds  * If there are 2 mirrors in the same 2 devices, performance degrades
4991da177e4SLinus Torvalds  * because position is mirror, not device based.
5001da177e4SLinus Torvalds  *
5011da177e4SLinus Torvalds  * The rdev for the device selected will have nr_pending incremented.
5021da177e4SLinus Torvalds  */
503e8096360SNeilBrown static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
5041da177e4SLinus Torvalds {
505af3a2cd6SNeilBrown 	const sector_t this_sector = r1_bio->sector;
506d2eb35acSNeilBrown 	int sectors;
507d2eb35acSNeilBrown 	int best_good_sectors;
5089dedf603SShaohua Li 	int best_disk, best_dist_disk, best_pending_disk;
5099dedf603SShaohua Li 	int has_nonrot_disk;
510be4d3280SShaohua Li 	int disk;
51176073054SNeilBrown 	sector_t best_dist;
5129dedf603SShaohua Li 	unsigned int min_pending;
5133cb03002SNeilBrown 	struct md_rdev *rdev;
514f3ac8bf7SNeilBrown 	int choose_first;
51512cee5a8SShaohua Li 	int choose_next_idle;
5161da177e4SLinus Torvalds 
5171da177e4SLinus Torvalds 	rcu_read_lock();
5181da177e4SLinus Torvalds 	/*
5198ddf9efeSNeilBrown 	 * Check if we can balance. We can balance on the whole
5201da177e4SLinus Torvalds 	 * device if no resync is going on, or below the resync window.
5211da177e4SLinus Torvalds 	 * We take the first readable disk when above the resync window.
5221da177e4SLinus Torvalds 	 */
5231da177e4SLinus Torvalds  retry:
524d2eb35acSNeilBrown 	sectors = r1_bio->sectors;
52576073054SNeilBrown 	best_disk = -1;
5269dedf603SShaohua Li 	best_dist_disk = -1;
52776073054SNeilBrown 	best_dist = MaxSector;
5289dedf603SShaohua Li 	best_pending_disk = -1;
5299dedf603SShaohua Li 	min_pending = UINT_MAX;
530d2eb35acSNeilBrown 	best_good_sectors = 0;
5319dedf603SShaohua Li 	has_nonrot_disk = 0;
53212cee5a8SShaohua Li 	choose_next_idle = 0;
533d2eb35acSNeilBrown 
5341da177e4SLinus Torvalds 	if (conf->mddev->recovery_cp < MaxSector &&
535be4d3280SShaohua Li 	    (this_sector + sectors >= conf->next_resync))
536f3ac8bf7SNeilBrown 		choose_first = 1;
537be4d3280SShaohua Li 	else
538f3ac8bf7SNeilBrown 		choose_first = 0;
5391da177e4SLinus Torvalds 
540be4d3280SShaohua Li 	for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
54176073054SNeilBrown 		sector_t dist;
542d2eb35acSNeilBrown 		sector_t first_bad;
543d2eb35acSNeilBrown 		int bad_sectors;
5449dedf603SShaohua Li 		unsigned int pending;
54512cee5a8SShaohua Li 		bool nonrot;
546d2eb35acSNeilBrown 
547f3ac8bf7SNeilBrown 		rdev = rcu_dereference(conf->mirrors[disk].rdev);
548f3ac8bf7SNeilBrown 		if (r1_bio->bios[disk] == IO_BLOCKED
549f3ac8bf7SNeilBrown 		    || rdev == NULL
5506b740b8dSNeilBrown 		    || test_bit(Unmerged, &rdev->flags)
55176073054SNeilBrown 		    || test_bit(Faulty, &rdev->flags))
552f3ac8bf7SNeilBrown 			continue;
55376073054SNeilBrown 		if (!test_bit(In_sync, &rdev->flags) &&
55476073054SNeilBrown 		    rdev->recovery_offset < this_sector + sectors)
55576073054SNeilBrown 			continue;
55676073054SNeilBrown 		if (test_bit(WriteMostly, &rdev->flags)) {
55776073054SNeilBrown 			/* Don't balance among write-mostly, just
55876073054SNeilBrown 			 * use the first as a last resort */
559307729c8SNeilBrown 			if (best_disk < 0) {
560307729c8SNeilBrown 				if (is_badblock(rdev, this_sector, sectors,
561307729c8SNeilBrown 						&first_bad, &bad_sectors)) {
562307729c8SNeilBrown 					if (first_bad < this_sector)
563307729c8SNeilBrown 						/* Cannot use this */
564307729c8SNeilBrown 						continue;
565307729c8SNeilBrown 					best_good_sectors = first_bad - this_sector;
566307729c8SNeilBrown 				} else
567307729c8SNeilBrown 					best_good_sectors = sectors;
56876073054SNeilBrown 				best_disk = disk;
569307729c8SNeilBrown 			}
57076073054SNeilBrown 			continue;
5718ddf9efeSNeilBrown 		}
57276073054SNeilBrown 		/* This is a reasonable device to use.  It might
57376073054SNeilBrown 		 * even be best.
5741da177e4SLinus Torvalds 		 */
575d2eb35acSNeilBrown 		if (is_badblock(rdev, this_sector, sectors,
576d2eb35acSNeilBrown 				&first_bad, &bad_sectors)) {
577d2eb35acSNeilBrown 			if (best_dist < MaxSector)
578d2eb35acSNeilBrown 				/* already have a better device */
579d2eb35acSNeilBrown 				continue;
580d2eb35acSNeilBrown 			if (first_bad <= this_sector) {
581d2eb35acSNeilBrown 				/* cannot read here. If this is the 'primary'
582d2eb35acSNeilBrown 				 * device, then we must not read beyond
583d2eb35acSNeilBrown 				 * bad_sectors from another device..
584d2eb35acSNeilBrown 				 */
585d2eb35acSNeilBrown 				bad_sectors -= (this_sector - first_bad);
586d2eb35acSNeilBrown 				if (choose_first && sectors > bad_sectors)
587d2eb35acSNeilBrown 					sectors = bad_sectors;
588d2eb35acSNeilBrown 				if (best_good_sectors > sectors)
589d2eb35acSNeilBrown 					best_good_sectors = sectors;
590d2eb35acSNeilBrown 
591d2eb35acSNeilBrown 			} else {
592d2eb35acSNeilBrown 				sector_t good_sectors = first_bad - this_sector;
593d2eb35acSNeilBrown 				if (good_sectors > best_good_sectors) {
594d2eb35acSNeilBrown 					best_good_sectors = good_sectors;
595d2eb35acSNeilBrown 					best_disk = disk;
596d2eb35acSNeilBrown 				}
597d2eb35acSNeilBrown 				if (choose_first)
598d2eb35acSNeilBrown 					break;
599d2eb35acSNeilBrown 			}
600d2eb35acSNeilBrown 			continue;
601d2eb35acSNeilBrown 		} else
602d2eb35acSNeilBrown 			best_good_sectors = sectors;
603d2eb35acSNeilBrown 
60412cee5a8SShaohua Li 		nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
60512cee5a8SShaohua Li 		has_nonrot_disk |= nonrot;
6069dedf603SShaohua Li 		pending = atomic_read(&rdev->nr_pending);
60776073054SNeilBrown 		dist = abs(this_sector - conf->mirrors[disk].head_position);
60812cee5a8SShaohua Li 		if (choose_first) {
60976073054SNeilBrown 			best_disk = disk;
6101da177e4SLinus Torvalds 			break;
6111da177e4SLinus Torvalds 		}
61212cee5a8SShaohua Li 		/* Don't change to another disk for sequential reads */
61312cee5a8SShaohua Li 		if (conf->mirrors[disk].next_seq_sect == this_sector
61412cee5a8SShaohua Li 		    || dist == 0) {
61512cee5a8SShaohua Li 			int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
61612cee5a8SShaohua Li 			struct raid1_info *mirror = &conf->mirrors[disk];
61712cee5a8SShaohua Li 
61812cee5a8SShaohua Li 			best_disk = disk;
61912cee5a8SShaohua Li 			/*
62012cee5a8SShaohua Li 			 * If buffered sequential IO size exceeds optimal
62112cee5a8SShaohua Li 			 * iosize, check if there is idle disk. If yes, choose
62212cee5a8SShaohua Li 			 * the idle disk. read_balance could already choose an
62312cee5a8SShaohua Li 			 * idle disk before noticing it's a sequential IO in
62412cee5a8SShaohua Li 			 * this disk. This doesn't matter because this disk
62512cee5a8SShaohua Li 			 * will idle, next time it will be utilized after the
62612cee5a8SShaohua Li 			 * first disk has IO size exceeds optimal iosize. In
62712cee5a8SShaohua Li 			 * this way, iosize of the first disk will be optimal
62812cee5a8SShaohua Li 			 * iosize at least. iosize of the second disk might be
62912cee5a8SShaohua Li 			 * small, but not a big deal since when the second disk
63012cee5a8SShaohua Li 			 * starts IO, the first disk is likely still busy.
63112cee5a8SShaohua Li 			 */
63212cee5a8SShaohua Li 			if (nonrot && opt_iosize > 0 &&
63312cee5a8SShaohua Li 			    mirror->seq_start != MaxSector &&
63412cee5a8SShaohua Li 			    mirror->next_seq_sect > opt_iosize &&
63512cee5a8SShaohua Li 			    mirror->next_seq_sect - opt_iosize >=
63612cee5a8SShaohua Li 			    mirror->seq_start) {
63712cee5a8SShaohua Li 				choose_next_idle = 1;
63812cee5a8SShaohua Li 				continue;
63912cee5a8SShaohua Li 			}
64012cee5a8SShaohua Li 			break;
64112cee5a8SShaohua Li 		}
64212cee5a8SShaohua Li 		/* If device is idle, use it */
64312cee5a8SShaohua Li 		if (pending == 0) {
64412cee5a8SShaohua Li 			best_disk = disk;
64512cee5a8SShaohua Li 			break;
64612cee5a8SShaohua Li 		}
64712cee5a8SShaohua Li 
64812cee5a8SShaohua Li 		if (choose_next_idle)
64912cee5a8SShaohua Li 			continue;
6509dedf603SShaohua Li 
6519dedf603SShaohua Li 		if (min_pending > pending) {
6529dedf603SShaohua Li 			min_pending = pending;
6539dedf603SShaohua Li 			best_pending_disk = disk;
6549dedf603SShaohua Li 		}
6559dedf603SShaohua Li 
65676073054SNeilBrown 		if (dist < best_dist) {
65776073054SNeilBrown 			best_dist = dist;
6589dedf603SShaohua Li 			best_dist_disk = disk;
6591da177e4SLinus Torvalds 		}
660f3ac8bf7SNeilBrown 	}
6611da177e4SLinus Torvalds 
6629dedf603SShaohua Li 	/*
6639dedf603SShaohua Li 	 * If all disks are rotational, choose the closest disk. If any disk is
6649dedf603SShaohua Li 	 * non-rotational, choose the disk with less pending request even the
6659dedf603SShaohua Li 	 * disk is rotational, which might/might not be optimal for raids with
6669dedf603SShaohua Li 	 * mixed ratation/non-rotational disks depending on workload.
6679dedf603SShaohua Li 	 */
6689dedf603SShaohua Li 	if (best_disk == -1) {
6699dedf603SShaohua Li 		if (has_nonrot_disk)
6709dedf603SShaohua Li 			best_disk = best_pending_disk;
6719dedf603SShaohua Li 		else
6729dedf603SShaohua Li 			best_disk = best_dist_disk;
6739dedf603SShaohua Li 	}
6749dedf603SShaohua Li 
67576073054SNeilBrown 	if (best_disk >= 0) {
67676073054SNeilBrown 		rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
6778ddf9efeSNeilBrown 		if (!rdev)
6788ddf9efeSNeilBrown 			goto retry;
6798ddf9efeSNeilBrown 		atomic_inc(&rdev->nr_pending);
68076073054SNeilBrown 		if (test_bit(Faulty, &rdev->flags)) {
6811da177e4SLinus Torvalds 			/* cannot risk returning a device that failed
6821da177e4SLinus Torvalds 			 * before we inc'ed nr_pending
6831da177e4SLinus Torvalds 			 */
68403c902e1SNeilBrown 			rdev_dec_pending(rdev, conf->mddev);
6851da177e4SLinus Torvalds 			goto retry;
6861da177e4SLinus Torvalds 		}
687d2eb35acSNeilBrown 		sectors = best_good_sectors;
68812cee5a8SShaohua Li 
68912cee5a8SShaohua Li 		if (conf->mirrors[best_disk].next_seq_sect != this_sector)
69012cee5a8SShaohua Li 			conf->mirrors[best_disk].seq_start = this_sector;
69112cee5a8SShaohua Li 
692be4d3280SShaohua Li 		conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
6931da177e4SLinus Torvalds 	}
6941da177e4SLinus Torvalds 	rcu_read_unlock();
695d2eb35acSNeilBrown 	*max_sectors = sectors;
6961da177e4SLinus Torvalds 
69776073054SNeilBrown 	return best_disk;
6981da177e4SLinus Torvalds }
6991da177e4SLinus Torvalds 
7006b740b8dSNeilBrown static int raid1_mergeable_bvec(struct request_queue *q,
7016b740b8dSNeilBrown 				struct bvec_merge_data *bvm,
7026b740b8dSNeilBrown 				struct bio_vec *biovec)
7036b740b8dSNeilBrown {
7046b740b8dSNeilBrown 	struct mddev *mddev = q->queuedata;
7056b740b8dSNeilBrown 	struct r1conf *conf = mddev->private;
7066b740b8dSNeilBrown 	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
7076b740b8dSNeilBrown 	int max = biovec->bv_len;
7086b740b8dSNeilBrown 
7096b740b8dSNeilBrown 	if (mddev->merge_check_needed) {
7106b740b8dSNeilBrown 		int disk;
7116b740b8dSNeilBrown 		rcu_read_lock();
7126b740b8dSNeilBrown 		for (disk = 0; disk < conf->raid_disks * 2; disk++) {
7136b740b8dSNeilBrown 			struct md_rdev *rdev = rcu_dereference(
7146b740b8dSNeilBrown 				conf->mirrors[disk].rdev);
7156b740b8dSNeilBrown 			if (rdev && !test_bit(Faulty, &rdev->flags)) {
7166b740b8dSNeilBrown 				struct request_queue *q =
7176b740b8dSNeilBrown 					bdev_get_queue(rdev->bdev);
7186b740b8dSNeilBrown 				if (q->merge_bvec_fn) {
7196b740b8dSNeilBrown 					bvm->bi_sector = sector +
7206b740b8dSNeilBrown 						rdev->data_offset;
7216b740b8dSNeilBrown 					bvm->bi_bdev = rdev->bdev;
7226b740b8dSNeilBrown 					max = min(max, q->merge_bvec_fn(
7236b740b8dSNeilBrown 							  q, bvm, biovec));
7246b740b8dSNeilBrown 				}
7256b740b8dSNeilBrown 			}
7266b740b8dSNeilBrown 		}
7276b740b8dSNeilBrown 		rcu_read_unlock();
7286b740b8dSNeilBrown 	}
7296b740b8dSNeilBrown 	return max;
7306b740b8dSNeilBrown 
7316b740b8dSNeilBrown }
7326b740b8dSNeilBrown 
733fd01b88cSNeilBrown int md_raid1_congested(struct mddev *mddev, int bits)
7340d129228SNeilBrown {
735e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
7360d129228SNeilBrown 	int i, ret = 0;
7370d129228SNeilBrown 
73834db0cd6SNeilBrown 	if ((bits & (1 << BDI_async_congested)) &&
73934db0cd6SNeilBrown 	    conf->pending_count >= max_queued_requests)
74034db0cd6SNeilBrown 		return 1;
74134db0cd6SNeilBrown 
7420d129228SNeilBrown 	rcu_read_lock();
743f53e29fcSNeilBrown 	for (i = 0; i < conf->raid_disks * 2; i++) {
7443cb03002SNeilBrown 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
7450d129228SNeilBrown 		if (rdev && !test_bit(Faulty, &rdev->flags)) {
746165125e1SJens Axboe 			struct request_queue *q = bdev_get_queue(rdev->bdev);
7470d129228SNeilBrown 
7481ed7242eSJonathan Brassow 			BUG_ON(!q);
7491ed7242eSJonathan Brassow 
7500d129228SNeilBrown 			/* Note the '|| 1' - when read_balance prefers
7510d129228SNeilBrown 			 * non-congested targets, it can be removed
7520d129228SNeilBrown 			 */
75391a9e99dSAlexander Beregalov 			if ((bits & (1<<BDI_async_congested)) || 1)
7540d129228SNeilBrown 				ret |= bdi_congested(&q->backing_dev_info, bits);
7550d129228SNeilBrown 			else
7560d129228SNeilBrown 				ret &= bdi_congested(&q->backing_dev_info, bits);
7570d129228SNeilBrown 		}
7580d129228SNeilBrown 	}
7590d129228SNeilBrown 	rcu_read_unlock();
7600d129228SNeilBrown 	return ret;
7610d129228SNeilBrown }
7621ed7242eSJonathan Brassow EXPORT_SYMBOL_GPL(md_raid1_congested);
7630d129228SNeilBrown 
7641ed7242eSJonathan Brassow static int raid1_congested(void *data, int bits)
7651ed7242eSJonathan Brassow {
766fd01b88cSNeilBrown 	struct mddev *mddev = data;
7671ed7242eSJonathan Brassow 
7681ed7242eSJonathan Brassow 	return mddev_congested(mddev, bits) ||
7691ed7242eSJonathan Brassow 		md_raid1_congested(mddev, bits);
7701ed7242eSJonathan Brassow }
7710d129228SNeilBrown 
772e8096360SNeilBrown static void flush_pending_writes(struct r1conf *conf)
773a35e63efSNeilBrown {
774a35e63efSNeilBrown 	/* Any writes that have been queued but are awaiting
775a35e63efSNeilBrown 	 * bitmap updates get flushed here.
776a35e63efSNeilBrown 	 */
777a35e63efSNeilBrown 	spin_lock_irq(&conf->device_lock);
778a35e63efSNeilBrown 
779a35e63efSNeilBrown 	if (conf->pending_bio_list.head) {
780a35e63efSNeilBrown 		struct bio *bio;
781a35e63efSNeilBrown 		bio = bio_list_get(&conf->pending_bio_list);
78234db0cd6SNeilBrown 		conf->pending_count = 0;
783a35e63efSNeilBrown 		spin_unlock_irq(&conf->device_lock);
784a35e63efSNeilBrown 		/* flush any pending bitmap writes to
785a35e63efSNeilBrown 		 * disk before proceeding w/ I/O */
786a35e63efSNeilBrown 		bitmap_unplug(conf->mddev->bitmap);
78734db0cd6SNeilBrown 		wake_up(&conf->wait_barrier);
788a35e63efSNeilBrown 
789a35e63efSNeilBrown 		while (bio) { /* submit pending writes */
790a35e63efSNeilBrown 			struct bio *next = bio->bi_next;
791a35e63efSNeilBrown 			bio->bi_next = NULL;
7922ff8cc2cSShaohua Li 			if (unlikely((bio->bi_rw & REQ_DISCARD) &&
7932ff8cc2cSShaohua Li 			    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
7942ff8cc2cSShaohua Li 				/* Just ignore it */
7952ff8cc2cSShaohua Li 				bio_endio(bio, 0);
7962ff8cc2cSShaohua Li 			else
797a35e63efSNeilBrown 				generic_make_request(bio);
798a35e63efSNeilBrown 			bio = next;
799a35e63efSNeilBrown 		}
800a35e63efSNeilBrown 	} else
801a35e63efSNeilBrown 		spin_unlock_irq(&conf->device_lock);
8027eaceaccSJens Axboe }
8037eaceaccSJens Axboe 
80417999be4SNeilBrown /* Barriers....
80517999be4SNeilBrown  * Sometimes we need to suspend IO while we do something else,
80617999be4SNeilBrown  * either some resync/recovery, or reconfigure the array.
80717999be4SNeilBrown  * To do this we raise a 'barrier'.
80817999be4SNeilBrown  * The 'barrier' is a counter that can be raised multiple times
80917999be4SNeilBrown  * to count how many activities are happening which preclude
81017999be4SNeilBrown  * normal IO.
81117999be4SNeilBrown  * We can only raise the barrier if there is no pending IO.
81217999be4SNeilBrown  * i.e. if nr_pending == 0.
81317999be4SNeilBrown  * We choose only to raise the barrier if no-one is waiting for the
81417999be4SNeilBrown  * barrier to go down.  This means that as soon as an IO request
81517999be4SNeilBrown  * is ready, no other operations which require a barrier will start
81617999be4SNeilBrown  * until the IO request has had a chance.
81717999be4SNeilBrown  *
81817999be4SNeilBrown  * So: regular IO calls 'wait_barrier'.  When that returns there
81917999be4SNeilBrown  *    is no backgroup IO happening,  It must arrange to call
82017999be4SNeilBrown  *    allow_barrier when it has finished its IO.
82117999be4SNeilBrown  * backgroup IO calls must call raise_barrier.  Once that returns
82217999be4SNeilBrown  *    there is no normal IO happeing.  It must arrange to call
82317999be4SNeilBrown  *    lower_barrier when the particular background IO completes.
8241da177e4SLinus Torvalds  */
825e8096360SNeilBrown static void raise_barrier(struct r1conf *conf)
8261da177e4SLinus Torvalds {
8271da177e4SLinus Torvalds 	spin_lock_irq(&conf->resync_lock);
8281da177e4SLinus Torvalds 
82917999be4SNeilBrown 	/* Wait until no block IO is waiting */
83017999be4SNeilBrown 	wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
831eed8c02eSLukas Czerner 			    conf->resync_lock);
83217999be4SNeilBrown 
83317999be4SNeilBrown 	/* block any new IO from starting */
83417999be4SNeilBrown 	conf->barrier++;
83517999be4SNeilBrown 
83679ef3a8aSmajianpeng 	/* For these conditions we must wait:
83779ef3a8aSmajianpeng 	 * A: while the array is in frozen state
83879ef3a8aSmajianpeng 	 * B: while barrier >= RESYNC_DEPTH, meaning resync reach
83979ef3a8aSmajianpeng 	 *    the max count which allowed.
84079ef3a8aSmajianpeng 	 * C: next_resync + RESYNC_SECTORS > start_next_window, meaning
84179ef3a8aSmajianpeng 	 *    next resync will reach to the window which normal bios are
84279ef3a8aSmajianpeng 	 *    handling.
84379ef3a8aSmajianpeng 	 */
84417999be4SNeilBrown 	wait_event_lock_irq(conf->wait_barrier,
845b364e3d0Smajianpeng 			    !conf->array_frozen &&
84679ef3a8aSmajianpeng 			    conf->barrier < RESYNC_DEPTH &&
84779ef3a8aSmajianpeng 			    (conf->start_next_window >=
84879ef3a8aSmajianpeng 			     conf->next_resync + RESYNC_SECTORS),
849eed8c02eSLukas Czerner 			    conf->resync_lock);
85017999be4SNeilBrown 
8511da177e4SLinus Torvalds 	spin_unlock_irq(&conf->resync_lock);
8521da177e4SLinus Torvalds }
8531da177e4SLinus Torvalds 
854e8096360SNeilBrown static void lower_barrier(struct r1conf *conf)
85517999be4SNeilBrown {
85617999be4SNeilBrown 	unsigned long flags;
857709ae487SNeilBrown 	BUG_ON(conf->barrier <= 0);
85817999be4SNeilBrown 	spin_lock_irqsave(&conf->resync_lock, flags);
85917999be4SNeilBrown 	conf->barrier--;
86017999be4SNeilBrown 	spin_unlock_irqrestore(&conf->resync_lock, flags);
86117999be4SNeilBrown 	wake_up(&conf->wait_barrier);
86217999be4SNeilBrown }
86317999be4SNeilBrown 
86479ef3a8aSmajianpeng static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
86517999be4SNeilBrown {
86679ef3a8aSmajianpeng 	bool wait = false;
86779ef3a8aSmajianpeng 
86879ef3a8aSmajianpeng 	if (conf->array_frozen || !bio)
86979ef3a8aSmajianpeng 		wait = true;
87079ef3a8aSmajianpeng 	else if (conf->barrier && bio_data_dir(bio) == WRITE) {
87179ef3a8aSmajianpeng 		if (conf->next_resync < RESYNC_WINDOW_SECTORS)
87279ef3a8aSmajianpeng 			wait = true;
87379ef3a8aSmajianpeng 		else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
87479ef3a8aSmajianpeng 				>= bio_end_sector(bio)) ||
87579ef3a8aSmajianpeng 			 (conf->next_resync + NEXT_NORMALIO_DISTANCE
8764f024f37SKent Overstreet 				<= bio->bi_iter.bi_sector))
87779ef3a8aSmajianpeng 			wait = false;
87879ef3a8aSmajianpeng 		else
87979ef3a8aSmajianpeng 			wait = true;
88079ef3a8aSmajianpeng 	}
88179ef3a8aSmajianpeng 
88279ef3a8aSmajianpeng 	return wait;
88379ef3a8aSmajianpeng }
88479ef3a8aSmajianpeng 
88579ef3a8aSmajianpeng static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
88679ef3a8aSmajianpeng {
88779ef3a8aSmajianpeng 	sector_t sector = 0;
88879ef3a8aSmajianpeng 
88917999be4SNeilBrown 	spin_lock_irq(&conf->resync_lock);
89079ef3a8aSmajianpeng 	if (need_to_wait_for_sync(conf, bio)) {
89117999be4SNeilBrown 		conf->nr_waiting++;
892d6b42dcbSNeilBrown 		/* Wait for the barrier to drop.
893d6b42dcbSNeilBrown 		 * However if there are already pending
894d6b42dcbSNeilBrown 		 * requests (preventing the barrier from
895d6b42dcbSNeilBrown 		 * rising completely), and the
896d6b42dcbSNeilBrown 		 * pre-process bio queue isn't empty,
897d6b42dcbSNeilBrown 		 * then don't wait, as we need to empty
898d6b42dcbSNeilBrown 		 * that queue to get the nr_pending
899d6b42dcbSNeilBrown 		 * count down.
900d6b42dcbSNeilBrown 		 */
901d6b42dcbSNeilBrown 		wait_event_lock_irq(conf->wait_barrier,
902b364e3d0Smajianpeng 				    !conf->array_frozen &&
903b364e3d0Smajianpeng 				    (!conf->barrier ||
90479ef3a8aSmajianpeng 				    ((conf->start_next_window <
90579ef3a8aSmajianpeng 				      conf->next_resync + RESYNC_SECTORS) &&
906d6b42dcbSNeilBrown 				     current->bio_list &&
907b364e3d0Smajianpeng 				     !bio_list_empty(current->bio_list))),
908eed8c02eSLukas Czerner 				    conf->resync_lock);
90917999be4SNeilBrown 		conf->nr_waiting--;
91017999be4SNeilBrown 	}
91179ef3a8aSmajianpeng 
91279ef3a8aSmajianpeng 	if (bio && bio_data_dir(bio) == WRITE) {
91379ef3a8aSmajianpeng 		if (conf->next_resync + NEXT_NORMALIO_DISTANCE
9144f024f37SKent Overstreet 		    <= bio->bi_iter.bi_sector) {
91579ef3a8aSmajianpeng 			if (conf->start_next_window == MaxSector)
91679ef3a8aSmajianpeng 				conf->start_next_window =
91779ef3a8aSmajianpeng 					conf->next_resync +
91879ef3a8aSmajianpeng 					NEXT_NORMALIO_DISTANCE;
91979ef3a8aSmajianpeng 
92079ef3a8aSmajianpeng 			if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
9214f024f37SKent Overstreet 			    <= bio->bi_iter.bi_sector)
92279ef3a8aSmajianpeng 				conf->next_window_requests++;
92379ef3a8aSmajianpeng 			else
92479ef3a8aSmajianpeng 				conf->current_window_requests++;
92579ef3a8aSmajianpeng 			sector = conf->start_next_window;
92617999be4SNeilBrown 		}
92741a336e0SNeilBrown 	}
92817999be4SNeilBrown 
92979ef3a8aSmajianpeng 	conf->nr_pending++;
93079ef3a8aSmajianpeng 	spin_unlock_irq(&conf->resync_lock);
93179ef3a8aSmajianpeng 	return sector;
93279ef3a8aSmajianpeng }
93379ef3a8aSmajianpeng 
93479ef3a8aSmajianpeng static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
93579ef3a8aSmajianpeng 			  sector_t bi_sector)
93617999be4SNeilBrown {
93717999be4SNeilBrown 	unsigned long flags;
93879ef3a8aSmajianpeng 
93917999be4SNeilBrown 	spin_lock_irqsave(&conf->resync_lock, flags);
94017999be4SNeilBrown 	conf->nr_pending--;
94179ef3a8aSmajianpeng 	if (start_next_window) {
94279ef3a8aSmajianpeng 		if (start_next_window == conf->start_next_window) {
94379ef3a8aSmajianpeng 			if (conf->start_next_window + NEXT_NORMALIO_DISTANCE
94479ef3a8aSmajianpeng 			    <= bi_sector)
94579ef3a8aSmajianpeng 				conf->next_window_requests--;
94679ef3a8aSmajianpeng 			else
94779ef3a8aSmajianpeng 				conf->current_window_requests--;
94879ef3a8aSmajianpeng 		} else
94979ef3a8aSmajianpeng 			conf->current_window_requests--;
95079ef3a8aSmajianpeng 
95179ef3a8aSmajianpeng 		if (!conf->current_window_requests) {
95279ef3a8aSmajianpeng 			if (conf->next_window_requests) {
95379ef3a8aSmajianpeng 				conf->current_window_requests =
95479ef3a8aSmajianpeng 					conf->next_window_requests;
95579ef3a8aSmajianpeng 				conf->next_window_requests = 0;
95679ef3a8aSmajianpeng 				conf->start_next_window +=
95779ef3a8aSmajianpeng 					NEXT_NORMALIO_DISTANCE;
95879ef3a8aSmajianpeng 			} else
95979ef3a8aSmajianpeng 				conf->start_next_window = MaxSector;
96079ef3a8aSmajianpeng 		}
96179ef3a8aSmajianpeng 	}
96217999be4SNeilBrown 	spin_unlock_irqrestore(&conf->resync_lock, flags);
96317999be4SNeilBrown 	wake_up(&conf->wait_barrier);
96417999be4SNeilBrown }
96517999be4SNeilBrown 
966e2d59925SNeilBrown static void freeze_array(struct r1conf *conf, int extra)
967ddaf22abSNeilBrown {
968ddaf22abSNeilBrown 	/* stop syncio and normal IO and wait for everything to
969ddaf22abSNeilBrown 	 * go quite.
970b364e3d0Smajianpeng 	 * We wait until nr_pending match nr_queued+extra
9711c830532SNeilBrown 	 * This is called in the context of one normal IO request
9721c830532SNeilBrown 	 * that has failed. Thus any sync request that might be pending
9731c830532SNeilBrown 	 * will be blocked by nr_pending, and we need to wait for
9741c830532SNeilBrown 	 * pending IO requests to complete or be queued for re-try.
975e2d59925SNeilBrown 	 * Thus the number queued (nr_queued) plus this request (extra)
9761c830532SNeilBrown 	 * must match the number of pending IOs (nr_pending) before
9771c830532SNeilBrown 	 * we continue.
978ddaf22abSNeilBrown 	 */
979ddaf22abSNeilBrown 	spin_lock_irq(&conf->resync_lock);
980b364e3d0Smajianpeng 	conf->array_frozen = 1;
981eed8c02eSLukas Czerner 	wait_event_lock_irq_cmd(conf->wait_barrier,
982e2d59925SNeilBrown 				conf->nr_pending == conf->nr_queued+extra,
983ddaf22abSNeilBrown 				conf->resync_lock,
984c3b328acSNeilBrown 				flush_pending_writes(conf));
985ddaf22abSNeilBrown 	spin_unlock_irq(&conf->resync_lock);
986ddaf22abSNeilBrown }
987e8096360SNeilBrown static void unfreeze_array(struct r1conf *conf)
988ddaf22abSNeilBrown {
989ddaf22abSNeilBrown 	/* reverse the effect of the freeze */
990ddaf22abSNeilBrown 	spin_lock_irq(&conf->resync_lock);
991b364e3d0Smajianpeng 	conf->array_frozen = 0;
992ddaf22abSNeilBrown 	wake_up(&conf->wait_barrier);
993ddaf22abSNeilBrown 	spin_unlock_irq(&conf->resync_lock);
994ddaf22abSNeilBrown }
995ddaf22abSNeilBrown 
99617999be4SNeilBrown 
9974e78064fSNeilBrown /* duplicate the data pages for behind I/O
9984e78064fSNeilBrown  */
9999f2c9d12SNeilBrown static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
10004b6d287fSNeilBrown {
10014b6d287fSNeilBrown 	int i;
10024b6d287fSNeilBrown 	struct bio_vec *bvec;
10032ca68f5eSNeilBrown 	struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
10044b6d287fSNeilBrown 					GFP_NOIO);
10052ca68f5eSNeilBrown 	if (unlikely(!bvecs))
1006af6d7b76SNeilBrown 		return;
10074b6d287fSNeilBrown 
1008cb34e057SKent Overstreet 	bio_for_each_segment_all(bvec, bio, i) {
10092ca68f5eSNeilBrown 		bvecs[i] = *bvec;
10102ca68f5eSNeilBrown 		bvecs[i].bv_page = alloc_page(GFP_NOIO);
10112ca68f5eSNeilBrown 		if (unlikely(!bvecs[i].bv_page))
10124b6d287fSNeilBrown 			goto do_sync_io;
10132ca68f5eSNeilBrown 		memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
10144b6d287fSNeilBrown 		       kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
10152ca68f5eSNeilBrown 		kunmap(bvecs[i].bv_page);
10164b6d287fSNeilBrown 		kunmap(bvec->bv_page);
10174b6d287fSNeilBrown 	}
10182ca68f5eSNeilBrown 	r1_bio->behind_bvecs = bvecs;
1019af6d7b76SNeilBrown 	r1_bio->behind_page_count = bio->bi_vcnt;
1020af6d7b76SNeilBrown 	set_bit(R1BIO_BehindIO, &r1_bio->state);
1021af6d7b76SNeilBrown 	return;
10224b6d287fSNeilBrown 
10234b6d287fSNeilBrown do_sync_io:
1024af6d7b76SNeilBrown 	for (i = 0; i < bio->bi_vcnt; i++)
10252ca68f5eSNeilBrown 		if (bvecs[i].bv_page)
10262ca68f5eSNeilBrown 			put_page(bvecs[i].bv_page);
10272ca68f5eSNeilBrown 	kfree(bvecs);
10284f024f37SKent Overstreet 	pr_debug("%dB behind alloc failed, doing sync I/O\n",
10294f024f37SKent Overstreet 		 bio->bi_iter.bi_size);
10304b6d287fSNeilBrown }
10314b6d287fSNeilBrown 
1032f54a9d0eSNeilBrown struct raid1_plug_cb {
1033f54a9d0eSNeilBrown 	struct blk_plug_cb	cb;
1034f54a9d0eSNeilBrown 	struct bio_list		pending;
1035f54a9d0eSNeilBrown 	int			pending_cnt;
1036f54a9d0eSNeilBrown };
1037f54a9d0eSNeilBrown 
1038f54a9d0eSNeilBrown static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1039f54a9d0eSNeilBrown {
1040f54a9d0eSNeilBrown 	struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1041f54a9d0eSNeilBrown 						  cb);
1042f54a9d0eSNeilBrown 	struct mddev *mddev = plug->cb.data;
1043f54a9d0eSNeilBrown 	struct r1conf *conf = mddev->private;
1044f54a9d0eSNeilBrown 	struct bio *bio;
1045f54a9d0eSNeilBrown 
1046874807a8SNeilBrown 	if (from_schedule || current->bio_list) {
1047f54a9d0eSNeilBrown 		spin_lock_irq(&conf->device_lock);
1048f54a9d0eSNeilBrown 		bio_list_merge(&conf->pending_bio_list, &plug->pending);
1049f54a9d0eSNeilBrown 		conf->pending_count += plug->pending_cnt;
1050f54a9d0eSNeilBrown 		spin_unlock_irq(&conf->device_lock);
1051ee0b0244SNeilBrown 		wake_up(&conf->wait_barrier);
1052f54a9d0eSNeilBrown 		md_wakeup_thread(mddev->thread);
1053f54a9d0eSNeilBrown 		kfree(plug);
1054f54a9d0eSNeilBrown 		return;
1055f54a9d0eSNeilBrown 	}
1056f54a9d0eSNeilBrown 
1057f54a9d0eSNeilBrown 	/* we aren't scheduling, so we can do the write-out directly. */
1058f54a9d0eSNeilBrown 	bio = bio_list_get(&plug->pending);
1059f54a9d0eSNeilBrown 	bitmap_unplug(mddev->bitmap);
1060f54a9d0eSNeilBrown 	wake_up(&conf->wait_barrier);
1061f54a9d0eSNeilBrown 
1062f54a9d0eSNeilBrown 	while (bio) { /* submit pending writes */
1063f54a9d0eSNeilBrown 		struct bio *next = bio->bi_next;
1064f54a9d0eSNeilBrown 		bio->bi_next = NULL;
106532f9f570SShaohua Li 		if (unlikely((bio->bi_rw & REQ_DISCARD) &&
106632f9f570SShaohua Li 		    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
106732f9f570SShaohua Li 			/* Just ignore it */
106832f9f570SShaohua Li 			bio_endio(bio, 0);
106932f9f570SShaohua Li 		else
1070f54a9d0eSNeilBrown 			generic_make_request(bio);
1071f54a9d0eSNeilBrown 		bio = next;
1072f54a9d0eSNeilBrown 	}
1073f54a9d0eSNeilBrown 	kfree(plug);
1074f54a9d0eSNeilBrown }
1075f54a9d0eSNeilBrown 
1076b4fdcb02SLinus Torvalds static void make_request(struct mddev *mddev, struct bio * bio)
10771da177e4SLinus Torvalds {
1078e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
10790eaf822cSJonathan Brassow 	struct raid1_info *mirror;
10809f2c9d12SNeilBrown 	struct r1bio *r1_bio;
10811da177e4SLinus Torvalds 	struct bio *read_bio;
10821f68f0c4SNeilBrown 	int i, disks;
108384255d10SNeilBrown 	struct bitmap *bitmap;
1084191ea9b2SNeilBrown 	unsigned long flags;
1085a362357bSJens Axboe 	const int rw = bio_data_dir(bio);
10862c7d46ecSNeilBrown 	const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
1087e9c7469bSTejun Heo 	const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
10882ff8cc2cSShaohua Li 	const unsigned long do_discard = (bio->bi_rw
10892ff8cc2cSShaohua Li 					  & (REQ_DISCARD | REQ_SECURE));
1090c8dc9c65SJoe Lawrence 	const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
10913cb03002SNeilBrown 	struct md_rdev *blocked_rdev;
1092f54a9d0eSNeilBrown 	struct blk_plug_cb *cb;
1093f54a9d0eSNeilBrown 	struct raid1_plug_cb *plug = NULL;
10941f68f0c4SNeilBrown 	int first_clone;
10951f68f0c4SNeilBrown 	int sectors_handled;
10961f68f0c4SNeilBrown 	int max_sectors;
109779ef3a8aSmajianpeng 	sector_t start_next_window;
1098191ea9b2SNeilBrown 
10991da177e4SLinus Torvalds 	/*
11001da177e4SLinus Torvalds 	 * Register the new request and wait if the reconstruction
11011da177e4SLinus Torvalds 	 * thread has put up a bar for new requests.
11021da177e4SLinus Torvalds 	 * Continue immediately if no resync is active currently.
11031da177e4SLinus Torvalds 	 */
110462de608dSNeilBrown 
11053d310eb7SNeilBrown 	md_write_start(mddev, bio); /* wait on superblock update early */
11063d310eb7SNeilBrown 
11076eef4b21SNeilBrown 	if (bio_data_dir(bio) == WRITE &&
1108f73a1c7dSKent Overstreet 	    bio_end_sector(bio) > mddev->suspend_lo &&
11094f024f37SKent Overstreet 	    bio->bi_iter.bi_sector < mddev->suspend_hi) {
11106eef4b21SNeilBrown 		/* As the suspend_* range is controlled by
11116eef4b21SNeilBrown 		 * userspace, we want an interruptible
11126eef4b21SNeilBrown 		 * wait.
11136eef4b21SNeilBrown 		 */
11146eef4b21SNeilBrown 		DEFINE_WAIT(w);
11156eef4b21SNeilBrown 		for (;;) {
11166eef4b21SNeilBrown 			flush_signals(current);
11176eef4b21SNeilBrown 			prepare_to_wait(&conf->wait_barrier,
11186eef4b21SNeilBrown 					&w, TASK_INTERRUPTIBLE);
1119f73a1c7dSKent Overstreet 			if (bio_end_sector(bio) <= mddev->suspend_lo ||
11204f024f37SKent Overstreet 			    bio->bi_iter.bi_sector >= mddev->suspend_hi)
11216eef4b21SNeilBrown 				break;
11226eef4b21SNeilBrown 			schedule();
11236eef4b21SNeilBrown 		}
11246eef4b21SNeilBrown 		finish_wait(&conf->wait_barrier, &w);
11256eef4b21SNeilBrown 	}
112662de608dSNeilBrown 
112779ef3a8aSmajianpeng 	start_next_window = wait_barrier(conf, bio);
11281da177e4SLinus Torvalds 
112984255d10SNeilBrown 	bitmap = mddev->bitmap;
113084255d10SNeilBrown 
11311da177e4SLinus Torvalds 	/*
11321da177e4SLinus Torvalds 	 * make_request() can abort the operation when READA is being
11331da177e4SLinus Torvalds 	 * used and no empty request is available.
11341da177e4SLinus Torvalds 	 *
11351da177e4SLinus Torvalds 	 */
11361da177e4SLinus Torvalds 	r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
11371da177e4SLinus Torvalds 
11381da177e4SLinus Torvalds 	r1_bio->master_bio = bio;
1139aa8b57aaSKent Overstreet 	r1_bio->sectors = bio_sectors(bio);
1140191ea9b2SNeilBrown 	r1_bio->state = 0;
11411da177e4SLinus Torvalds 	r1_bio->mddev = mddev;
11424f024f37SKent Overstreet 	r1_bio->sector = bio->bi_iter.bi_sector;
11431da177e4SLinus Torvalds 
1144d2eb35acSNeilBrown 	/* We might need to issue multiple reads to different
1145d2eb35acSNeilBrown 	 * devices if there are bad blocks around, so we keep
1146d2eb35acSNeilBrown 	 * track of the number of reads in bio->bi_phys_segments.
1147d2eb35acSNeilBrown 	 * If this is 0, there is only one r1_bio and no locking
1148d2eb35acSNeilBrown 	 * will be needed when requests complete.  If it is
1149d2eb35acSNeilBrown 	 * non-zero, then it is the number of not-completed requests.
1150d2eb35acSNeilBrown 	 */
1151d2eb35acSNeilBrown 	bio->bi_phys_segments = 0;
1152d2eb35acSNeilBrown 	clear_bit(BIO_SEG_VALID, &bio->bi_flags);
1153d2eb35acSNeilBrown 
1154a362357bSJens Axboe 	if (rw == READ) {
11551da177e4SLinus Torvalds 		/*
11561da177e4SLinus Torvalds 		 * read balancing logic:
11571da177e4SLinus Torvalds 		 */
1158d2eb35acSNeilBrown 		int rdisk;
1159d2eb35acSNeilBrown 
1160d2eb35acSNeilBrown read_again:
1161d2eb35acSNeilBrown 		rdisk = read_balance(conf, r1_bio, &max_sectors);
11621da177e4SLinus Torvalds 
11631da177e4SLinus Torvalds 		if (rdisk < 0) {
11641da177e4SLinus Torvalds 			/* couldn't find anywhere to read from */
11651da177e4SLinus Torvalds 			raid_end_bio_io(r1_bio);
11665a7bbad2SChristoph Hellwig 			return;
11671da177e4SLinus Torvalds 		}
11681da177e4SLinus Torvalds 		mirror = conf->mirrors + rdisk;
11691da177e4SLinus Torvalds 
1170e555190dSNeilBrown 		if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1171e555190dSNeilBrown 		    bitmap) {
1172e555190dSNeilBrown 			/* Reading from a write-mostly device must
1173e555190dSNeilBrown 			 * take care not to over-take any writes
1174e555190dSNeilBrown 			 * that are 'behind'
1175e555190dSNeilBrown 			 */
1176e555190dSNeilBrown 			wait_event(bitmap->behind_wait,
1177e555190dSNeilBrown 				   atomic_read(&bitmap->behind_writes) == 0);
1178e555190dSNeilBrown 		}
11791da177e4SLinus Torvalds 		r1_bio->read_disk = rdisk;
11801da177e4SLinus Torvalds 
1181a167f663SNeilBrown 		read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
11824f024f37SKent Overstreet 		bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
1183d2eb35acSNeilBrown 			 max_sectors);
11841da177e4SLinus Torvalds 
11851da177e4SLinus Torvalds 		r1_bio->bios[rdisk] = read_bio;
11861da177e4SLinus Torvalds 
11874f024f37SKent Overstreet 		read_bio->bi_iter.bi_sector = r1_bio->sector +
11884f024f37SKent Overstreet 			mirror->rdev->data_offset;
11891da177e4SLinus Torvalds 		read_bio->bi_bdev = mirror->rdev->bdev;
11901da177e4SLinus Torvalds 		read_bio->bi_end_io = raid1_end_read_request;
11917b6d91daSChristoph Hellwig 		read_bio->bi_rw = READ | do_sync;
11921da177e4SLinus Torvalds 		read_bio->bi_private = r1_bio;
11931da177e4SLinus Torvalds 
1194d2eb35acSNeilBrown 		if (max_sectors < r1_bio->sectors) {
1195d2eb35acSNeilBrown 			/* could not read all from this device, so we will
1196d2eb35acSNeilBrown 			 * need another r1_bio.
1197d2eb35acSNeilBrown 			 */
1198d2eb35acSNeilBrown 
1199d2eb35acSNeilBrown 			sectors_handled = (r1_bio->sector + max_sectors
12004f024f37SKent Overstreet 					   - bio->bi_iter.bi_sector);
1201d2eb35acSNeilBrown 			r1_bio->sectors = max_sectors;
1202d2eb35acSNeilBrown 			spin_lock_irq(&conf->device_lock);
1203d2eb35acSNeilBrown 			if (bio->bi_phys_segments == 0)
1204d2eb35acSNeilBrown 				bio->bi_phys_segments = 2;
1205d2eb35acSNeilBrown 			else
1206d2eb35acSNeilBrown 				bio->bi_phys_segments++;
1207d2eb35acSNeilBrown 			spin_unlock_irq(&conf->device_lock);
1208d2eb35acSNeilBrown 			/* Cannot call generic_make_request directly
1209d2eb35acSNeilBrown 			 * as that will be queued in __make_request
1210d2eb35acSNeilBrown 			 * and subsequent mempool_alloc might block waiting
1211d2eb35acSNeilBrown 			 * for it.  So hand bio over to raid1d.
1212d2eb35acSNeilBrown 			 */
1213d2eb35acSNeilBrown 			reschedule_retry(r1_bio);
1214d2eb35acSNeilBrown 
1215d2eb35acSNeilBrown 			r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1216d2eb35acSNeilBrown 
1217d2eb35acSNeilBrown 			r1_bio->master_bio = bio;
1218aa8b57aaSKent Overstreet 			r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1219d2eb35acSNeilBrown 			r1_bio->state = 0;
1220d2eb35acSNeilBrown 			r1_bio->mddev = mddev;
12214f024f37SKent Overstreet 			r1_bio->sector = bio->bi_iter.bi_sector +
12224f024f37SKent Overstreet 				sectors_handled;
1223d2eb35acSNeilBrown 			goto read_again;
1224d2eb35acSNeilBrown 		} else
12251da177e4SLinus Torvalds 			generic_make_request(read_bio);
12265a7bbad2SChristoph Hellwig 		return;
12271da177e4SLinus Torvalds 	}
12281da177e4SLinus Torvalds 
12291da177e4SLinus Torvalds 	/*
12301da177e4SLinus Torvalds 	 * WRITE:
12311da177e4SLinus Torvalds 	 */
123234db0cd6SNeilBrown 	if (conf->pending_count >= max_queued_requests) {
123334db0cd6SNeilBrown 		md_wakeup_thread(mddev->thread);
123434db0cd6SNeilBrown 		wait_event(conf->wait_barrier,
123534db0cd6SNeilBrown 			   conf->pending_count < max_queued_requests);
123634db0cd6SNeilBrown 	}
12371f68f0c4SNeilBrown 	/* first select target devices under rcu_lock and
12381da177e4SLinus Torvalds 	 * inc refcount on their rdev.  Record them by setting
12391da177e4SLinus Torvalds 	 * bios[x] to bio
12401f68f0c4SNeilBrown 	 * If there are known/acknowledged bad blocks on any device on
12411f68f0c4SNeilBrown 	 * which we have seen a write error, we want to avoid writing those
12421f68f0c4SNeilBrown 	 * blocks.
12431f68f0c4SNeilBrown 	 * This potentially requires several writes to write around
12441f68f0c4SNeilBrown 	 * the bad blocks.  Each set of writes gets it's own r1bio
12451f68f0c4SNeilBrown 	 * with a set of bios attached.
12461da177e4SLinus Torvalds 	 */
1247c3b328acSNeilBrown 
12488f19ccb2SNeilBrown 	disks = conf->raid_disks * 2;
12496bfe0b49SDan Williams  retry_write:
125079ef3a8aSmajianpeng 	r1_bio->start_next_window = start_next_window;
12516bfe0b49SDan Williams 	blocked_rdev = NULL;
12521da177e4SLinus Torvalds 	rcu_read_lock();
12531f68f0c4SNeilBrown 	max_sectors = r1_bio->sectors;
12541da177e4SLinus Torvalds 	for (i = 0;  i < disks; i++) {
12553cb03002SNeilBrown 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
12566bfe0b49SDan Williams 		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
12576bfe0b49SDan Williams 			atomic_inc(&rdev->nr_pending);
12586bfe0b49SDan Williams 			blocked_rdev = rdev;
12596bfe0b49SDan Williams 			break;
12606bfe0b49SDan Williams 		}
12611da177e4SLinus Torvalds 		r1_bio->bios[i] = NULL;
12626b740b8dSNeilBrown 		if (!rdev || test_bit(Faulty, &rdev->flags)
12636b740b8dSNeilBrown 		    || test_bit(Unmerged, &rdev->flags)) {
12648f19ccb2SNeilBrown 			if (i < conf->raid_disks)
12651f68f0c4SNeilBrown 				set_bit(R1BIO_Degraded, &r1_bio->state);
12661f68f0c4SNeilBrown 			continue;
1267964147d5SNeilBrown 		}
12681f68f0c4SNeilBrown 
12691f68f0c4SNeilBrown 		atomic_inc(&rdev->nr_pending);
12701f68f0c4SNeilBrown 		if (test_bit(WriteErrorSeen, &rdev->flags)) {
12711f68f0c4SNeilBrown 			sector_t first_bad;
12721f68f0c4SNeilBrown 			int bad_sectors;
12731f68f0c4SNeilBrown 			int is_bad;
12741f68f0c4SNeilBrown 
12751f68f0c4SNeilBrown 			is_bad = is_badblock(rdev, r1_bio->sector,
12761f68f0c4SNeilBrown 					     max_sectors,
12771f68f0c4SNeilBrown 					     &first_bad, &bad_sectors);
12781f68f0c4SNeilBrown 			if (is_bad < 0) {
12791f68f0c4SNeilBrown 				/* mustn't write here until the bad block is
12801f68f0c4SNeilBrown 				 * acknowledged*/
12811f68f0c4SNeilBrown 				set_bit(BlockedBadBlocks, &rdev->flags);
12821f68f0c4SNeilBrown 				blocked_rdev = rdev;
12831f68f0c4SNeilBrown 				break;
12841f68f0c4SNeilBrown 			}
12851f68f0c4SNeilBrown 			if (is_bad && first_bad <= r1_bio->sector) {
12861f68f0c4SNeilBrown 				/* Cannot write here at all */
12871f68f0c4SNeilBrown 				bad_sectors -= (r1_bio->sector - first_bad);
12881f68f0c4SNeilBrown 				if (bad_sectors < max_sectors)
12891f68f0c4SNeilBrown 					/* mustn't write more than bad_sectors
12901f68f0c4SNeilBrown 					 * to other devices yet
12911f68f0c4SNeilBrown 					 */
12921f68f0c4SNeilBrown 					max_sectors = bad_sectors;
12931f68f0c4SNeilBrown 				rdev_dec_pending(rdev, mddev);
12941f68f0c4SNeilBrown 				/* We don't set R1BIO_Degraded as that
12951f68f0c4SNeilBrown 				 * only applies if the disk is
12961f68f0c4SNeilBrown 				 * missing, so it might be re-added,
12971f68f0c4SNeilBrown 				 * and we want to know to recover this
12981f68f0c4SNeilBrown 				 * chunk.
12991f68f0c4SNeilBrown 				 * In this case the device is here,
13001f68f0c4SNeilBrown 				 * and the fact that this chunk is not
13011f68f0c4SNeilBrown 				 * in-sync is recorded in the bad
13021f68f0c4SNeilBrown 				 * block log
13031f68f0c4SNeilBrown 				 */
13041f68f0c4SNeilBrown 				continue;
13051f68f0c4SNeilBrown 			}
13061f68f0c4SNeilBrown 			if (is_bad) {
13071f68f0c4SNeilBrown 				int good_sectors = first_bad - r1_bio->sector;
13081f68f0c4SNeilBrown 				if (good_sectors < max_sectors)
13091f68f0c4SNeilBrown 					max_sectors = good_sectors;
13101f68f0c4SNeilBrown 			}
13111f68f0c4SNeilBrown 		}
13121f68f0c4SNeilBrown 		r1_bio->bios[i] = bio;
13131da177e4SLinus Torvalds 	}
13141da177e4SLinus Torvalds 	rcu_read_unlock();
13151da177e4SLinus Torvalds 
13166bfe0b49SDan Williams 	if (unlikely(blocked_rdev)) {
13176bfe0b49SDan Williams 		/* Wait for this device to become unblocked */
13186bfe0b49SDan Williams 		int j;
131979ef3a8aSmajianpeng 		sector_t old = start_next_window;
13206bfe0b49SDan Williams 
13216bfe0b49SDan Williams 		for (j = 0; j < i; j++)
13226bfe0b49SDan Williams 			if (r1_bio->bios[j])
13236bfe0b49SDan Williams 				rdev_dec_pending(conf->mirrors[j].rdev, mddev);
13241f68f0c4SNeilBrown 		r1_bio->state = 0;
13254f024f37SKent Overstreet 		allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
13266bfe0b49SDan Williams 		md_wait_for_blocked_rdev(blocked_rdev, mddev);
132779ef3a8aSmajianpeng 		start_next_window = wait_barrier(conf, bio);
132879ef3a8aSmajianpeng 		/*
132979ef3a8aSmajianpeng 		 * We must make sure the multi r1bios of bio have
133079ef3a8aSmajianpeng 		 * the same value of bi_phys_segments
133179ef3a8aSmajianpeng 		 */
133279ef3a8aSmajianpeng 		if (bio->bi_phys_segments && old &&
133379ef3a8aSmajianpeng 		    old != start_next_window)
133479ef3a8aSmajianpeng 			/* Wait for the former r1bio(s) to complete */
133579ef3a8aSmajianpeng 			wait_event(conf->wait_barrier,
133679ef3a8aSmajianpeng 				   bio->bi_phys_segments == 1);
13376bfe0b49SDan Williams 		goto retry_write;
13386bfe0b49SDan Williams 	}
13396bfe0b49SDan Williams 
13401f68f0c4SNeilBrown 	if (max_sectors < r1_bio->sectors) {
13411f68f0c4SNeilBrown 		/* We are splitting this write into multiple parts, so
13421f68f0c4SNeilBrown 		 * we need to prepare for allocating another r1_bio.
13431f68f0c4SNeilBrown 		 */
13441f68f0c4SNeilBrown 		r1_bio->sectors = max_sectors;
13451f68f0c4SNeilBrown 		spin_lock_irq(&conf->device_lock);
13461f68f0c4SNeilBrown 		if (bio->bi_phys_segments == 0)
13471f68f0c4SNeilBrown 			bio->bi_phys_segments = 2;
13481f68f0c4SNeilBrown 		else
13491f68f0c4SNeilBrown 			bio->bi_phys_segments++;
13501f68f0c4SNeilBrown 		spin_unlock_irq(&conf->device_lock);
1351191ea9b2SNeilBrown 	}
13524f024f37SKent Overstreet 	sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
13534b6d287fSNeilBrown 
13544e78064fSNeilBrown 	atomic_set(&r1_bio->remaining, 1);
13554b6d287fSNeilBrown 	atomic_set(&r1_bio->behind_remaining, 0);
1356191ea9b2SNeilBrown 
13571f68f0c4SNeilBrown 	first_clone = 1;
13581da177e4SLinus Torvalds 	for (i = 0; i < disks; i++) {
13591da177e4SLinus Torvalds 		struct bio *mbio;
13601da177e4SLinus Torvalds 		if (!r1_bio->bios[i])
13611da177e4SLinus Torvalds 			continue;
13621da177e4SLinus Torvalds 
1363a167f663SNeilBrown 		mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
13644f024f37SKent Overstreet 		bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
13651da177e4SLinus Torvalds 
13661f68f0c4SNeilBrown 		if (first_clone) {
13671f68f0c4SNeilBrown 			/* do behind I/O ?
13681f68f0c4SNeilBrown 			 * Not if there are too many, or cannot
13691f68f0c4SNeilBrown 			 * allocate memory, or a reader on WriteMostly
13701f68f0c4SNeilBrown 			 * is waiting for behind writes to flush */
13711f68f0c4SNeilBrown 			if (bitmap &&
13721f68f0c4SNeilBrown 			    (atomic_read(&bitmap->behind_writes)
13731f68f0c4SNeilBrown 			     < mddev->bitmap_info.max_write_behind) &&
13741f68f0c4SNeilBrown 			    !waitqueue_active(&bitmap->behind_wait))
13751f68f0c4SNeilBrown 				alloc_behind_pages(mbio, r1_bio);
13761da177e4SLinus Torvalds 
13771f68f0c4SNeilBrown 			bitmap_startwrite(bitmap, r1_bio->sector,
13781f68f0c4SNeilBrown 					  r1_bio->sectors,
13791f68f0c4SNeilBrown 					  test_bit(R1BIO_BehindIO,
13801f68f0c4SNeilBrown 						   &r1_bio->state));
13811f68f0c4SNeilBrown 			first_clone = 0;
13821f68f0c4SNeilBrown 		}
13832ca68f5eSNeilBrown 		if (r1_bio->behind_bvecs) {
13844b6d287fSNeilBrown 			struct bio_vec *bvec;
13854b6d287fSNeilBrown 			int j;
13864b6d287fSNeilBrown 
1387cb34e057SKent Overstreet 			/*
1388cb34e057SKent Overstreet 			 * We trimmed the bio, so _all is legit
13894b6d287fSNeilBrown 			 */
1390d74c6d51SKent Overstreet 			bio_for_each_segment_all(bvec, mbio, j)
13912ca68f5eSNeilBrown 				bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
13924b6d287fSNeilBrown 			if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
13934b6d287fSNeilBrown 				atomic_inc(&r1_bio->behind_remaining);
13944b6d287fSNeilBrown 		}
13954b6d287fSNeilBrown 
13961f68f0c4SNeilBrown 		r1_bio->bios[i] = mbio;
13971f68f0c4SNeilBrown 
13984f024f37SKent Overstreet 		mbio->bi_iter.bi_sector	= (r1_bio->sector +
13991f68f0c4SNeilBrown 				   conf->mirrors[i].rdev->data_offset);
14001f68f0c4SNeilBrown 		mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
14011f68f0c4SNeilBrown 		mbio->bi_end_io	= raid1_end_write_request;
1402c8dc9c65SJoe Lawrence 		mbio->bi_rw =
1403c8dc9c65SJoe Lawrence 			WRITE | do_flush_fua | do_sync | do_discard | do_same;
14041f68f0c4SNeilBrown 		mbio->bi_private = r1_bio;
14051f68f0c4SNeilBrown 
14061da177e4SLinus Torvalds 		atomic_inc(&r1_bio->remaining);
1407f54a9d0eSNeilBrown 
1408f54a9d0eSNeilBrown 		cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1409f54a9d0eSNeilBrown 		if (cb)
1410f54a9d0eSNeilBrown 			plug = container_of(cb, struct raid1_plug_cb, cb);
1411f54a9d0eSNeilBrown 		else
1412f54a9d0eSNeilBrown 			plug = NULL;
1413191ea9b2SNeilBrown 		spin_lock_irqsave(&conf->device_lock, flags);
1414f54a9d0eSNeilBrown 		if (plug) {
1415f54a9d0eSNeilBrown 			bio_list_add(&plug->pending, mbio);
1416f54a9d0eSNeilBrown 			plug->pending_cnt++;
1417f54a9d0eSNeilBrown 		} else {
14184e78064fSNeilBrown 			bio_list_add(&conf->pending_bio_list, mbio);
141934db0cd6SNeilBrown 			conf->pending_count++;
1420f54a9d0eSNeilBrown 		}
1421191ea9b2SNeilBrown 		spin_unlock_irqrestore(&conf->device_lock, flags);
1422f54a9d0eSNeilBrown 		if (!plug)
1423b357f04aSNeilBrown 			md_wakeup_thread(mddev->thread);
14244e78064fSNeilBrown 	}
1425079fa166SNeilBrown 	/* Mustn't call r1_bio_write_done before this next test,
1426079fa166SNeilBrown 	 * as it could result in the bio being freed.
1427079fa166SNeilBrown 	 */
1428aa8b57aaSKent Overstreet 	if (sectors_handled < bio_sectors(bio)) {
1429079fa166SNeilBrown 		r1_bio_write_done(r1_bio);
14301f68f0c4SNeilBrown 		/* We need another r1_bio.  It has already been counted
14311f68f0c4SNeilBrown 		 * in bio->bi_phys_segments
14321f68f0c4SNeilBrown 		 */
14331f68f0c4SNeilBrown 		r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
14341f68f0c4SNeilBrown 		r1_bio->master_bio = bio;
1435aa8b57aaSKent Overstreet 		r1_bio->sectors = bio_sectors(bio) - sectors_handled;
14361f68f0c4SNeilBrown 		r1_bio->state = 0;
14371f68f0c4SNeilBrown 		r1_bio->mddev = mddev;
14384f024f37SKent Overstreet 		r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
14391f68f0c4SNeilBrown 		goto retry_write;
14401f68f0c4SNeilBrown 	}
14411f68f0c4SNeilBrown 
1442079fa166SNeilBrown 	r1_bio_write_done(r1_bio);
1443079fa166SNeilBrown 
1444079fa166SNeilBrown 	/* In case raid1d snuck in to freeze_array */
1445079fa166SNeilBrown 	wake_up(&conf->wait_barrier);
14461da177e4SLinus Torvalds }
14471da177e4SLinus Torvalds 
1448fd01b88cSNeilBrown static void status(struct seq_file *seq, struct mddev *mddev)
14491da177e4SLinus Torvalds {
1450e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
14511da177e4SLinus Torvalds 	int i;
14521da177e4SLinus Torvalds 
14531da177e4SLinus Torvalds 	seq_printf(seq, " [%d/%d] [", conf->raid_disks,
145411ce99e6SNeilBrown 		   conf->raid_disks - mddev->degraded);
1455ddac7c7eSNeilBrown 	rcu_read_lock();
1456ddac7c7eSNeilBrown 	for (i = 0; i < conf->raid_disks; i++) {
14573cb03002SNeilBrown 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
14581da177e4SLinus Torvalds 		seq_printf(seq, "%s",
1459ddac7c7eSNeilBrown 			   rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1460ddac7c7eSNeilBrown 	}
1461ddac7c7eSNeilBrown 	rcu_read_unlock();
14621da177e4SLinus Torvalds 	seq_printf(seq, "]");
14631da177e4SLinus Torvalds }
14641da177e4SLinus Torvalds 
14651da177e4SLinus Torvalds 
1466fd01b88cSNeilBrown static void error(struct mddev *mddev, struct md_rdev *rdev)
14671da177e4SLinus Torvalds {
14681da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1469e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
14701da177e4SLinus Torvalds 
14711da177e4SLinus Torvalds 	/*
14721da177e4SLinus Torvalds 	 * If it is not operational, then we have already marked it as dead
14731da177e4SLinus Torvalds 	 * else if it is the last working disks, ignore the error, let the
14741da177e4SLinus Torvalds 	 * next level up know.
14751da177e4SLinus Torvalds 	 * else mark the drive as failed
14761da177e4SLinus Torvalds 	 */
1477b2d444d7SNeilBrown 	if (test_bit(In_sync, &rdev->flags)
14784044ba58SNeilBrown 	    && (conf->raid_disks - mddev->degraded) == 1) {
14791da177e4SLinus Torvalds 		/*
14801da177e4SLinus Torvalds 		 * Don't fail the drive, act as though we were just a
14814044ba58SNeilBrown 		 * normal single drive.
14824044ba58SNeilBrown 		 * However don't try a recovery from this drive as
14834044ba58SNeilBrown 		 * it is very likely to fail.
14841da177e4SLinus Torvalds 		 */
14855389042fSNeilBrown 		conf->recovery_disabled = mddev->recovery_disabled;
14861da177e4SLinus Torvalds 		return;
14874044ba58SNeilBrown 	}
1488de393cdeSNeilBrown 	set_bit(Blocked, &rdev->flags);
1489c04be0aaSNeilBrown 	if (test_and_clear_bit(In_sync, &rdev->flags)) {
1490c04be0aaSNeilBrown 		unsigned long flags;
1491c04be0aaSNeilBrown 		spin_lock_irqsave(&conf->device_lock, flags);
14921da177e4SLinus Torvalds 		mddev->degraded++;
1493dd00a99eSNeilBrown 		set_bit(Faulty, &rdev->flags);
1494c04be0aaSNeilBrown 		spin_unlock_irqrestore(&conf->device_lock, flags);
14951da177e4SLinus Torvalds 		/*
14961da177e4SLinus Torvalds 		 * if recovery is running, make sure it aborts.
14971da177e4SLinus Torvalds 		 */
1498dfc70645SNeilBrown 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1499dd00a99eSNeilBrown 	} else
1500b2d444d7SNeilBrown 		set_bit(Faulty, &rdev->flags);
1501850b2b42SNeilBrown 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
1502067032bcSJoe Perches 	printk(KERN_ALERT
1503067032bcSJoe Perches 	       "md/raid1:%s: Disk failure on %s, disabling device.\n"
1504067032bcSJoe Perches 	       "md/raid1:%s: Operation continuing on %d devices.\n",
15059dd1e2faSNeilBrown 	       mdname(mddev), bdevname(rdev->bdev, b),
15069dd1e2faSNeilBrown 	       mdname(mddev), conf->raid_disks - mddev->degraded);
15071da177e4SLinus Torvalds }
15081da177e4SLinus Torvalds 
1509e8096360SNeilBrown static void print_conf(struct r1conf *conf)
15101da177e4SLinus Torvalds {
15111da177e4SLinus Torvalds 	int i;
15121da177e4SLinus Torvalds 
15139dd1e2faSNeilBrown 	printk(KERN_DEBUG "RAID1 conf printout:\n");
15141da177e4SLinus Torvalds 	if (!conf) {
15159dd1e2faSNeilBrown 		printk(KERN_DEBUG "(!conf)\n");
15161da177e4SLinus Torvalds 		return;
15171da177e4SLinus Torvalds 	}
15189dd1e2faSNeilBrown 	printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
15191da177e4SLinus Torvalds 		conf->raid_disks);
15201da177e4SLinus Torvalds 
1521ddac7c7eSNeilBrown 	rcu_read_lock();
15221da177e4SLinus Torvalds 	for (i = 0; i < conf->raid_disks; i++) {
15231da177e4SLinus Torvalds 		char b[BDEVNAME_SIZE];
15243cb03002SNeilBrown 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1525ddac7c7eSNeilBrown 		if (rdev)
15269dd1e2faSNeilBrown 			printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1527ddac7c7eSNeilBrown 			       i, !test_bit(In_sync, &rdev->flags),
1528ddac7c7eSNeilBrown 			       !test_bit(Faulty, &rdev->flags),
1529ddac7c7eSNeilBrown 			       bdevname(rdev->bdev,b));
15301da177e4SLinus Torvalds 	}
1531ddac7c7eSNeilBrown 	rcu_read_unlock();
15321da177e4SLinus Torvalds }
15331da177e4SLinus Torvalds 
1534e8096360SNeilBrown static void close_sync(struct r1conf *conf)
15351da177e4SLinus Torvalds {
153679ef3a8aSmajianpeng 	wait_barrier(conf, NULL);
153779ef3a8aSmajianpeng 	allow_barrier(conf, 0, 0);
15381da177e4SLinus Torvalds 
15391da177e4SLinus Torvalds 	mempool_destroy(conf->r1buf_pool);
15401da177e4SLinus Torvalds 	conf->r1buf_pool = NULL;
154179ef3a8aSmajianpeng 
154279ef3a8aSmajianpeng 	conf->next_resync = 0;
154379ef3a8aSmajianpeng 	conf->start_next_window = MaxSector;
15441da177e4SLinus Torvalds }
15451da177e4SLinus Torvalds 
1546fd01b88cSNeilBrown static int raid1_spare_active(struct mddev *mddev)
15471da177e4SLinus Torvalds {
15481da177e4SLinus Torvalds 	int i;
1549e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
15506b965620SNeilBrown 	int count = 0;
15516b965620SNeilBrown 	unsigned long flags;
15521da177e4SLinus Torvalds 
15531da177e4SLinus Torvalds 	/*
15541da177e4SLinus Torvalds 	 * Find all failed disks within the RAID1 configuration
1555ddac7c7eSNeilBrown 	 * and mark them readable.
1556ddac7c7eSNeilBrown 	 * Called under mddev lock, so rcu protection not needed.
15571da177e4SLinus Torvalds 	 */
15581da177e4SLinus Torvalds 	for (i = 0; i < conf->raid_disks; i++) {
15593cb03002SNeilBrown 		struct md_rdev *rdev = conf->mirrors[i].rdev;
15608c7a2c2bSNeilBrown 		struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
15618c7a2c2bSNeilBrown 		if (repl
15628c7a2c2bSNeilBrown 		    && repl->recovery_offset == MaxSector
15638c7a2c2bSNeilBrown 		    && !test_bit(Faulty, &repl->flags)
15648c7a2c2bSNeilBrown 		    && !test_and_set_bit(In_sync, &repl->flags)) {
15658c7a2c2bSNeilBrown 			/* replacement has just become active */
15668c7a2c2bSNeilBrown 			if (!rdev ||
15678c7a2c2bSNeilBrown 			    !test_and_clear_bit(In_sync, &rdev->flags))
15688c7a2c2bSNeilBrown 				count++;
15698c7a2c2bSNeilBrown 			if (rdev) {
15708c7a2c2bSNeilBrown 				/* Replaced device not technically
15718c7a2c2bSNeilBrown 				 * faulty, but we need to be sure
15728c7a2c2bSNeilBrown 				 * it gets removed and never re-added
15738c7a2c2bSNeilBrown 				 */
15748c7a2c2bSNeilBrown 				set_bit(Faulty, &rdev->flags);
15758c7a2c2bSNeilBrown 				sysfs_notify_dirent_safe(
15768c7a2c2bSNeilBrown 					rdev->sysfs_state);
15778c7a2c2bSNeilBrown 			}
15788c7a2c2bSNeilBrown 		}
1579ddac7c7eSNeilBrown 		if (rdev
158061e4947cSLukasz Dorau 		    && rdev->recovery_offset == MaxSector
1581ddac7c7eSNeilBrown 		    && !test_bit(Faulty, &rdev->flags)
1582c04be0aaSNeilBrown 		    && !test_and_set_bit(In_sync, &rdev->flags)) {
15836b965620SNeilBrown 			count++;
1584654e8b5aSJonathan Brassow 			sysfs_notify_dirent_safe(rdev->sysfs_state);
15851da177e4SLinus Torvalds 		}
15861da177e4SLinus Torvalds 	}
15876b965620SNeilBrown 	spin_lock_irqsave(&conf->device_lock, flags);
15886b965620SNeilBrown 	mddev->degraded -= count;
15896b965620SNeilBrown 	spin_unlock_irqrestore(&conf->device_lock, flags);
15901da177e4SLinus Torvalds 
15911da177e4SLinus Torvalds 	print_conf(conf);
15926b965620SNeilBrown 	return count;
15931da177e4SLinus Torvalds }
15941da177e4SLinus Torvalds 
15951da177e4SLinus Torvalds 
1596fd01b88cSNeilBrown static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
15971da177e4SLinus Torvalds {
1598e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
1599199050eaSNeil Brown 	int err = -EEXIST;
160041158c7eSNeilBrown 	int mirror = 0;
16010eaf822cSJonathan Brassow 	struct raid1_info *p;
16026c2fce2eSNeil Brown 	int first = 0;
160330194636SNeilBrown 	int last = conf->raid_disks - 1;
16046b740b8dSNeilBrown 	struct request_queue *q = bdev_get_queue(rdev->bdev);
16051da177e4SLinus Torvalds 
16065389042fSNeilBrown 	if (mddev->recovery_disabled == conf->recovery_disabled)
16075389042fSNeilBrown 		return -EBUSY;
16085389042fSNeilBrown 
16096c2fce2eSNeil Brown 	if (rdev->raid_disk >= 0)
16106c2fce2eSNeil Brown 		first = last = rdev->raid_disk;
16116c2fce2eSNeil Brown 
16126b740b8dSNeilBrown 	if (q->merge_bvec_fn) {
16136b740b8dSNeilBrown 		set_bit(Unmerged, &rdev->flags);
16146b740b8dSNeilBrown 		mddev->merge_check_needed = 1;
16156b740b8dSNeilBrown 	}
16166b740b8dSNeilBrown 
16177ef449d1SNeilBrown 	for (mirror = first; mirror <= last; mirror++) {
16187ef449d1SNeilBrown 		p = conf->mirrors+mirror;
16197ef449d1SNeilBrown 		if (!p->rdev) {
16201da177e4SLinus Torvalds 
16219092c02dSJonathan Brassow 			if (mddev->gendisk)
16228f6c2e4bSMartin K. Petersen 				disk_stack_limits(mddev->gendisk, rdev->bdev,
16238f6c2e4bSMartin K. Petersen 						  rdev->data_offset << 9);
16241da177e4SLinus Torvalds 
16251da177e4SLinus Torvalds 			p->head_position = 0;
16261da177e4SLinus Torvalds 			rdev->raid_disk = mirror;
1627199050eaSNeil Brown 			err = 0;
16286aea114aSNeilBrown 			/* As all devices are equivalent, we don't need a full recovery
16296aea114aSNeilBrown 			 * if this was recently any drive of the array
16306aea114aSNeilBrown 			 */
16316aea114aSNeilBrown 			if (rdev->saved_raid_disk < 0)
163241158c7eSNeilBrown 				conf->fullsync = 1;
1633d6065f7bSSuzanne Wood 			rcu_assign_pointer(p->rdev, rdev);
16341da177e4SLinus Torvalds 			break;
16351da177e4SLinus Torvalds 		}
16367ef449d1SNeilBrown 		if (test_bit(WantReplacement, &p->rdev->flags) &&
16377ef449d1SNeilBrown 		    p[conf->raid_disks].rdev == NULL) {
16387ef449d1SNeilBrown 			/* Add this device as a replacement */
16397ef449d1SNeilBrown 			clear_bit(In_sync, &rdev->flags);
16407ef449d1SNeilBrown 			set_bit(Replacement, &rdev->flags);
16417ef449d1SNeilBrown 			rdev->raid_disk = mirror;
16427ef449d1SNeilBrown 			err = 0;
16437ef449d1SNeilBrown 			conf->fullsync = 1;
16447ef449d1SNeilBrown 			rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
16457ef449d1SNeilBrown 			break;
16467ef449d1SNeilBrown 		}
16477ef449d1SNeilBrown 	}
16486b740b8dSNeilBrown 	if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
16496b740b8dSNeilBrown 		/* Some requests might not have seen this new
16506b740b8dSNeilBrown 		 * merge_bvec_fn.  We must wait for them to complete
16516b740b8dSNeilBrown 		 * before merging the device fully.
16526b740b8dSNeilBrown 		 * First we make sure any code which has tested
16536b740b8dSNeilBrown 		 * our function has submitted the request, then
16546b740b8dSNeilBrown 		 * we wait for all outstanding requests to complete.
16556b740b8dSNeilBrown 		 */
16566b740b8dSNeilBrown 		synchronize_sched();
1657e2d59925SNeilBrown 		freeze_array(conf, 0);
1658e2d59925SNeilBrown 		unfreeze_array(conf);
16596b740b8dSNeilBrown 		clear_bit(Unmerged, &rdev->flags);
16606b740b8dSNeilBrown 	}
1661ac5e7113SAndre Noll 	md_integrity_add_rdev(rdev, mddev);
16629092c02dSJonathan Brassow 	if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
16632ff8cc2cSShaohua Li 		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
16641da177e4SLinus Torvalds 	print_conf(conf);
1665199050eaSNeil Brown 	return err;
16661da177e4SLinus Torvalds }
16671da177e4SLinus Torvalds 
1668b8321b68SNeilBrown static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
16691da177e4SLinus Torvalds {
1670e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
16711da177e4SLinus Torvalds 	int err = 0;
1672b8321b68SNeilBrown 	int number = rdev->raid_disk;
16730eaf822cSJonathan Brassow 	struct raid1_info *p = conf->mirrors + number;
16741da177e4SLinus Torvalds 
1675b014f14cSNeilBrown 	if (rdev != p->rdev)
1676b014f14cSNeilBrown 		p = conf->mirrors + conf->raid_disks + number;
1677b014f14cSNeilBrown 
16781da177e4SLinus Torvalds 	print_conf(conf);
1679b8321b68SNeilBrown 	if (rdev == p->rdev) {
1680b2d444d7SNeilBrown 		if (test_bit(In_sync, &rdev->flags) ||
16811da177e4SLinus Torvalds 		    atomic_read(&rdev->nr_pending)) {
16821da177e4SLinus Torvalds 			err = -EBUSY;
16831da177e4SLinus Torvalds 			goto abort;
16841da177e4SLinus Torvalds 		}
1685046abeedSNeilBrown 		/* Only remove non-faulty devices if recovery
1686dfc70645SNeilBrown 		 * is not possible.
1687dfc70645SNeilBrown 		 */
1688dfc70645SNeilBrown 		if (!test_bit(Faulty, &rdev->flags) &&
16895389042fSNeilBrown 		    mddev->recovery_disabled != conf->recovery_disabled &&
1690dfc70645SNeilBrown 		    mddev->degraded < conf->raid_disks) {
1691dfc70645SNeilBrown 			err = -EBUSY;
1692dfc70645SNeilBrown 			goto abort;
1693dfc70645SNeilBrown 		}
16941da177e4SLinus Torvalds 		p->rdev = NULL;
1695fbd568a3SPaul E. McKenney 		synchronize_rcu();
16961da177e4SLinus Torvalds 		if (atomic_read(&rdev->nr_pending)) {
16971da177e4SLinus Torvalds 			/* lost the race, try later */
16981da177e4SLinus Torvalds 			err = -EBUSY;
16991da177e4SLinus Torvalds 			p->rdev = rdev;
1700ac5e7113SAndre Noll 			goto abort;
17018c7a2c2bSNeilBrown 		} else if (conf->mirrors[conf->raid_disks + number].rdev) {
17028c7a2c2bSNeilBrown 			/* We just removed a device that is being replaced.
17038c7a2c2bSNeilBrown 			 * Move down the replacement.  We drain all IO before
17048c7a2c2bSNeilBrown 			 * doing this to avoid confusion.
17058c7a2c2bSNeilBrown 			 */
17068c7a2c2bSNeilBrown 			struct md_rdev *repl =
17078c7a2c2bSNeilBrown 				conf->mirrors[conf->raid_disks + number].rdev;
1708e2d59925SNeilBrown 			freeze_array(conf, 0);
17098c7a2c2bSNeilBrown 			clear_bit(Replacement, &repl->flags);
17108c7a2c2bSNeilBrown 			p->rdev = repl;
17118c7a2c2bSNeilBrown 			conf->mirrors[conf->raid_disks + number].rdev = NULL;
1712e2d59925SNeilBrown 			unfreeze_array(conf);
1713b014f14cSNeilBrown 			clear_bit(WantReplacement, &rdev->flags);
17148c7a2c2bSNeilBrown 		} else
17158c7a2c2bSNeilBrown 			clear_bit(WantReplacement, &rdev->flags);
1716a91a2785SMartin K. Petersen 		err = md_integrity_register(mddev);
17171da177e4SLinus Torvalds 	}
17181da177e4SLinus Torvalds abort:
17191da177e4SLinus Torvalds 
17201da177e4SLinus Torvalds 	print_conf(conf);
17211da177e4SLinus Torvalds 	return err;
17221da177e4SLinus Torvalds }
17231da177e4SLinus Torvalds 
17241da177e4SLinus Torvalds 
17256712ecf8SNeilBrown static void end_sync_read(struct bio *bio, int error)
17261da177e4SLinus Torvalds {
17279f2c9d12SNeilBrown 	struct r1bio *r1_bio = bio->bi_private;
17281da177e4SLinus Torvalds 
17290fc280f6SNeilBrown 	update_head_pos(r1_bio->read_disk, r1_bio);
1730ba3ae3beSNamhyung Kim 
17311da177e4SLinus Torvalds 	/*
17321da177e4SLinus Torvalds 	 * we have read a block, now it needs to be re-written,
17331da177e4SLinus Torvalds 	 * or re-read if the read failed.
17341da177e4SLinus Torvalds 	 * We don't do much here, just schedule handling by raid1d
17351da177e4SLinus Torvalds 	 */
173669382e85SNeilBrown 	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
17371da177e4SLinus Torvalds 		set_bit(R1BIO_Uptodate, &r1_bio->state);
1738d11c171eSNeilBrown 
1739d11c171eSNeilBrown 	if (atomic_dec_and_test(&r1_bio->remaining))
17401da177e4SLinus Torvalds 		reschedule_retry(r1_bio);
17411da177e4SLinus Torvalds }
17421da177e4SLinus Torvalds 
17436712ecf8SNeilBrown static void end_sync_write(struct bio *bio, int error)
17441da177e4SLinus Torvalds {
17451da177e4SLinus Torvalds 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
17469f2c9d12SNeilBrown 	struct r1bio *r1_bio = bio->bi_private;
1747fd01b88cSNeilBrown 	struct mddev *mddev = r1_bio->mddev;
1748e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
17491da177e4SLinus Torvalds 	int mirror=0;
17504367af55SNeilBrown 	sector_t first_bad;
17514367af55SNeilBrown 	int bad_sectors;
17521da177e4SLinus Torvalds 
1753ba3ae3beSNamhyung Kim 	mirror = find_bio_disk(r1_bio, bio);
1754ba3ae3beSNamhyung Kim 
17556b1117d5SNeilBrown 	if (!uptodate) {
175657dab0bdSNeilBrown 		sector_t sync_blocks = 0;
17576b1117d5SNeilBrown 		sector_t s = r1_bio->sector;
17586b1117d5SNeilBrown 		long sectors_to_go = r1_bio->sectors;
17596b1117d5SNeilBrown 		/* make sure these bits doesn't get cleared. */
17606b1117d5SNeilBrown 		do {
17615e3db645SNeilBrown 			bitmap_end_sync(mddev->bitmap, s,
17626b1117d5SNeilBrown 					&sync_blocks, 1);
17636b1117d5SNeilBrown 			s += sync_blocks;
17646b1117d5SNeilBrown 			sectors_to_go -= sync_blocks;
17656b1117d5SNeilBrown 		} while (sectors_to_go > 0);
1766d8f05d29SNeilBrown 		set_bit(WriteErrorSeen,
1767d8f05d29SNeilBrown 			&conf->mirrors[mirror].rdev->flags);
176819d67169SNeilBrown 		if (!test_and_set_bit(WantReplacement,
176919d67169SNeilBrown 				      &conf->mirrors[mirror].rdev->flags))
177019d67169SNeilBrown 			set_bit(MD_RECOVERY_NEEDED, &
177119d67169SNeilBrown 				mddev->recovery);
1772d8f05d29SNeilBrown 		set_bit(R1BIO_WriteError, &r1_bio->state);
17734367af55SNeilBrown 	} else if (is_badblock(conf->mirrors[mirror].rdev,
17744367af55SNeilBrown 			       r1_bio->sector,
17754367af55SNeilBrown 			       r1_bio->sectors,
17763a9f28a5SNeilBrown 			       &first_bad, &bad_sectors) &&
17773a9f28a5SNeilBrown 		   !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
17783a9f28a5SNeilBrown 				r1_bio->sector,
17793a9f28a5SNeilBrown 				r1_bio->sectors,
17803a9f28a5SNeilBrown 				&first_bad, &bad_sectors)
17813a9f28a5SNeilBrown 		)
17824367af55SNeilBrown 		set_bit(R1BIO_MadeGood, &r1_bio->state);
1783e3b9703eSNeilBrown 
17841da177e4SLinus Torvalds 	if (atomic_dec_and_test(&r1_bio->remaining)) {
17854367af55SNeilBrown 		int s = r1_bio->sectors;
1786d8f05d29SNeilBrown 		if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1787d8f05d29SNeilBrown 		    test_bit(R1BIO_WriteError, &r1_bio->state))
17884367af55SNeilBrown 			reschedule_retry(r1_bio);
17894367af55SNeilBrown 		else {
17901da177e4SLinus Torvalds 			put_buf(r1_bio);
179173d5c38aSNeilBrown 			md_done_sync(mddev, s, uptodate);
17921da177e4SLinus Torvalds 		}
17931da177e4SLinus Torvalds 	}
17944367af55SNeilBrown }
17951da177e4SLinus Torvalds 
17963cb03002SNeilBrown static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1797d8f05d29SNeilBrown 			    int sectors, struct page *page, int rw)
1798d8f05d29SNeilBrown {
1799d8f05d29SNeilBrown 	if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
1800d8f05d29SNeilBrown 		/* success */
1801d8f05d29SNeilBrown 		return 1;
180219d67169SNeilBrown 	if (rw == WRITE) {
1803d8f05d29SNeilBrown 		set_bit(WriteErrorSeen, &rdev->flags);
180419d67169SNeilBrown 		if (!test_and_set_bit(WantReplacement,
180519d67169SNeilBrown 				      &rdev->flags))
180619d67169SNeilBrown 			set_bit(MD_RECOVERY_NEEDED, &
180719d67169SNeilBrown 				rdev->mddev->recovery);
180819d67169SNeilBrown 	}
1809d8f05d29SNeilBrown 	/* need to record an error - either for the block or the device */
1810d8f05d29SNeilBrown 	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1811d8f05d29SNeilBrown 		md_error(rdev->mddev, rdev);
1812d8f05d29SNeilBrown 	return 0;
1813d8f05d29SNeilBrown }
1814d8f05d29SNeilBrown 
18159f2c9d12SNeilBrown static int fix_sync_read_error(struct r1bio *r1_bio)
18161da177e4SLinus Torvalds {
1817a68e5870SNeilBrown 	/* Try some synchronous reads of other devices to get
181869382e85SNeilBrown 	 * good data, much like with normal read errors.  Only
1819ddac7c7eSNeilBrown 	 * read into the pages we already have so we don't
182069382e85SNeilBrown 	 * need to re-issue the read request.
182169382e85SNeilBrown 	 * We don't need to freeze the array, because being in an
182269382e85SNeilBrown 	 * active sync request, there is no normal IO, and
182369382e85SNeilBrown 	 * no overlapping syncs.
182406f60385SNeilBrown 	 * We don't need to check is_badblock() again as we
182506f60385SNeilBrown 	 * made sure that anything with a bad block in range
182606f60385SNeilBrown 	 * will have bi_end_io clear.
18271da177e4SLinus Torvalds 	 */
1828fd01b88cSNeilBrown 	struct mddev *mddev = r1_bio->mddev;
1829e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
1830a68e5870SNeilBrown 	struct bio *bio = r1_bio->bios[r1_bio->read_disk];
183169382e85SNeilBrown 	sector_t sect = r1_bio->sector;
183269382e85SNeilBrown 	int sectors = r1_bio->sectors;
183369382e85SNeilBrown 	int idx = 0;
183469382e85SNeilBrown 
183569382e85SNeilBrown 	while(sectors) {
183669382e85SNeilBrown 		int s = sectors;
183769382e85SNeilBrown 		int d = r1_bio->read_disk;
183869382e85SNeilBrown 		int success = 0;
18393cb03002SNeilBrown 		struct md_rdev *rdev;
184078d7f5f7SNeilBrown 		int start;
184169382e85SNeilBrown 
184269382e85SNeilBrown 		if (s > (PAGE_SIZE>>9))
184369382e85SNeilBrown 			s = PAGE_SIZE >> 9;
184469382e85SNeilBrown 		do {
184569382e85SNeilBrown 			if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1846ddac7c7eSNeilBrown 				/* No rcu protection needed here devices
1847ddac7c7eSNeilBrown 				 * can only be removed when no resync is
1848ddac7c7eSNeilBrown 				 * active, and resync is currently active
1849ddac7c7eSNeilBrown 				 */
185069382e85SNeilBrown 				rdev = conf->mirrors[d].rdev;
18519d3d8011SNamhyung Kim 				if (sync_page_io(rdev, sect, s<<9,
185269382e85SNeilBrown 						 bio->bi_io_vec[idx].bv_page,
1853ccebd4c4SJonathan Brassow 						 READ, false)) {
185469382e85SNeilBrown 					success = 1;
185569382e85SNeilBrown 					break;
185669382e85SNeilBrown 				}
185769382e85SNeilBrown 			}
185869382e85SNeilBrown 			d++;
18598f19ccb2SNeilBrown 			if (d == conf->raid_disks * 2)
186069382e85SNeilBrown 				d = 0;
186169382e85SNeilBrown 		} while (!success && d != r1_bio->read_disk);
186269382e85SNeilBrown 
186378d7f5f7SNeilBrown 		if (!success) {
186478d7f5f7SNeilBrown 			char b[BDEVNAME_SIZE];
18653a9f28a5SNeilBrown 			int abort = 0;
18663a9f28a5SNeilBrown 			/* Cannot read from anywhere, this block is lost.
18673a9f28a5SNeilBrown 			 * Record a bad block on each device.  If that doesn't
18683a9f28a5SNeilBrown 			 * work just disable and interrupt the recovery.
18693a9f28a5SNeilBrown 			 * Don't fail devices as that won't really help.
18703a9f28a5SNeilBrown 			 */
187178d7f5f7SNeilBrown 			printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
187278d7f5f7SNeilBrown 			       " for block %llu\n",
187378d7f5f7SNeilBrown 			       mdname(mddev),
187478d7f5f7SNeilBrown 			       bdevname(bio->bi_bdev, b),
187578d7f5f7SNeilBrown 			       (unsigned long long)r1_bio->sector);
18768f19ccb2SNeilBrown 			for (d = 0; d < conf->raid_disks * 2; d++) {
18773a9f28a5SNeilBrown 				rdev = conf->mirrors[d].rdev;
18783a9f28a5SNeilBrown 				if (!rdev || test_bit(Faulty, &rdev->flags))
18793a9f28a5SNeilBrown 					continue;
18803a9f28a5SNeilBrown 				if (!rdev_set_badblocks(rdev, sect, s, 0))
18813a9f28a5SNeilBrown 					abort = 1;
18823a9f28a5SNeilBrown 			}
18833a9f28a5SNeilBrown 			if (abort) {
1884d890fa2bSNeilBrown 				conf->recovery_disabled =
1885d890fa2bSNeilBrown 					mddev->recovery_disabled;
18863a9f28a5SNeilBrown 				set_bit(MD_RECOVERY_INTR, &mddev->recovery);
188778d7f5f7SNeilBrown 				md_done_sync(mddev, r1_bio->sectors, 0);
188878d7f5f7SNeilBrown 				put_buf(r1_bio);
188978d7f5f7SNeilBrown 				return 0;
189078d7f5f7SNeilBrown 			}
18913a9f28a5SNeilBrown 			/* Try next page */
18923a9f28a5SNeilBrown 			sectors -= s;
18933a9f28a5SNeilBrown 			sect += s;
18943a9f28a5SNeilBrown 			idx++;
18953a9f28a5SNeilBrown 			continue;
18963a9f28a5SNeilBrown 		}
189778d7f5f7SNeilBrown 
189878d7f5f7SNeilBrown 		start = d;
189969382e85SNeilBrown 		/* write it back and re-read */
190069382e85SNeilBrown 		while (d != r1_bio->read_disk) {
190169382e85SNeilBrown 			if (d == 0)
19028f19ccb2SNeilBrown 				d = conf->raid_disks * 2;
190369382e85SNeilBrown 			d--;
190469382e85SNeilBrown 			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
190569382e85SNeilBrown 				continue;
190669382e85SNeilBrown 			rdev = conf->mirrors[d].rdev;
1907d8f05d29SNeilBrown 			if (r1_sync_page_io(rdev, sect, s,
190869382e85SNeilBrown 					    bio->bi_io_vec[idx].bv_page,
1909d8f05d29SNeilBrown 					    WRITE) == 0) {
191078d7f5f7SNeilBrown 				r1_bio->bios[d]->bi_end_io = NULL;
191178d7f5f7SNeilBrown 				rdev_dec_pending(rdev, mddev);
19129d3d8011SNamhyung Kim 			}
1913097426f6SNeilBrown 		}
1914097426f6SNeilBrown 		d = start;
1915097426f6SNeilBrown 		while (d != r1_bio->read_disk) {
1916097426f6SNeilBrown 			if (d == 0)
19178f19ccb2SNeilBrown 				d = conf->raid_disks * 2;
1918097426f6SNeilBrown 			d--;
1919097426f6SNeilBrown 			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1920097426f6SNeilBrown 				continue;
1921097426f6SNeilBrown 			rdev = conf->mirrors[d].rdev;
1922d8f05d29SNeilBrown 			if (r1_sync_page_io(rdev, sect, s,
192369382e85SNeilBrown 					    bio->bi_io_vec[idx].bv_page,
1924d8f05d29SNeilBrown 					    READ) != 0)
19259d3d8011SNamhyung Kim 				atomic_add(s, &rdev->corrected_errors);
192669382e85SNeilBrown 		}
192769382e85SNeilBrown 		sectors -= s;
192869382e85SNeilBrown 		sect += s;
192969382e85SNeilBrown 		idx ++;
193069382e85SNeilBrown 	}
193178d7f5f7SNeilBrown 	set_bit(R1BIO_Uptodate, &r1_bio->state);
19327ca78d57SNeilBrown 	set_bit(BIO_UPTODATE, &bio->bi_flags);
1933a68e5870SNeilBrown 	return 1;
193469382e85SNeilBrown }
1935d11c171eSNeilBrown 
19369f2c9d12SNeilBrown static int process_checks(struct r1bio *r1_bio)
1937a68e5870SNeilBrown {
1938a68e5870SNeilBrown 	/* We have read all readable devices.  If we haven't
1939a68e5870SNeilBrown 	 * got the block, then there is no hope left.
1940a68e5870SNeilBrown 	 * If we have, then we want to do a comparison
1941a68e5870SNeilBrown 	 * and skip the write if everything is the same.
1942a68e5870SNeilBrown 	 * If any blocks failed to read, then we need to
1943a68e5870SNeilBrown 	 * attempt an over-write
1944a68e5870SNeilBrown 	 */
1945fd01b88cSNeilBrown 	struct mddev *mddev = r1_bio->mddev;
1946e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
1947a68e5870SNeilBrown 	int primary;
1948a68e5870SNeilBrown 	int i;
1949f4380a91Smajianpeng 	int vcnt;
1950a68e5870SNeilBrown 
195130bc9b53SNeilBrown 	/* Fix variable parts of all bios */
195230bc9b53SNeilBrown 	vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
195330bc9b53SNeilBrown 	for (i = 0; i < conf->raid_disks * 2; i++) {
195430bc9b53SNeilBrown 		int j;
195530bc9b53SNeilBrown 		int size;
19561877db75SNeilBrown 		int uptodate;
195730bc9b53SNeilBrown 		struct bio *b = r1_bio->bios[i];
195830bc9b53SNeilBrown 		if (b->bi_end_io != end_sync_read)
195930bc9b53SNeilBrown 			continue;
19601877db75SNeilBrown 		/* fixup the bio for reuse, but preserve BIO_UPTODATE */
19611877db75SNeilBrown 		uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
196230bc9b53SNeilBrown 		bio_reset(b);
19631877db75SNeilBrown 		if (!uptodate)
19641877db75SNeilBrown 			clear_bit(BIO_UPTODATE, &b->bi_flags);
196530bc9b53SNeilBrown 		b->bi_vcnt = vcnt;
19664f024f37SKent Overstreet 		b->bi_iter.bi_size = r1_bio->sectors << 9;
19674f024f37SKent Overstreet 		b->bi_iter.bi_sector = r1_bio->sector +
196830bc9b53SNeilBrown 			conf->mirrors[i].rdev->data_offset;
196930bc9b53SNeilBrown 		b->bi_bdev = conf->mirrors[i].rdev->bdev;
197030bc9b53SNeilBrown 		b->bi_end_io = end_sync_read;
197130bc9b53SNeilBrown 		b->bi_private = r1_bio;
197230bc9b53SNeilBrown 
19734f024f37SKent Overstreet 		size = b->bi_iter.bi_size;
197430bc9b53SNeilBrown 		for (j = 0; j < vcnt ; j++) {
197530bc9b53SNeilBrown 			struct bio_vec *bi;
197630bc9b53SNeilBrown 			bi = &b->bi_io_vec[j];
197730bc9b53SNeilBrown 			bi->bv_offset = 0;
197830bc9b53SNeilBrown 			if (size > PAGE_SIZE)
197930bc9b53SNeilBrown 				bi->bv_len = PAGE_SIZE;
198030bc9b53SNeilBrown 			else
198130bc9b53SNeilBrown 				bi->bv_len = size;
198230bc9b53SNeilBrown 			size -= PAGE_SIZE;
198330bc9b53SNeilBrown 		}
198430bc9b53SNeilBrown 	}
19858f19ccb2SNeilBrown 	for (primary = 0; primary < conf->raid_disks * 2; primary++)
1986a68e5870SNeilBrown 		if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1987a68e5870SNeilBrown 		    test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
1988a68e5870SNeilBrown 			r1_bio->bios[primary]->bi_end_io = NULL;
1989a68e5870SNeilBrown 			rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
1990a68e5870SNeilBrown 			break;
1991a68e5870SNeilBrown 		}
1992a68e5870SNeilBrown 	r1_bio->read_disk = primary;
19938f19ccb2SNeilBrown 	for (i = 0; i < conf->raid_disks * 2; i++) {
1994a68e5870SNeilBrown 		int j;
1995a68e5870SNeilBrown 		struct bio *pbio = r1_bio->bios[primary];
1996a68e5870SNeilBrown 		struct bio *sbio = r1_bio->bios[i];
19971877db75SNeilBrown 		int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);
199878d7f5f7SNeilBrown 
19992aabaa65SKent Overstreet 		if (sbio->bi_end_io != end_sync_read)
200078d7f5f7SNeilBrown 			continue;
20011877db75SNeilBrown 		/* Now we can 'fixup' the BIO_UPTODATE flag */
20021877db75SNeilBrown 		set_bit(BIO_UPTODATE, &sbio->bi_flags);
2003a68e5870SNeilBrown 
20041877db75SNeilBrown 		if (uptodate) {
2005a68e5870SNeilBrown 			for (j = vcnt; j-- ; ) {
2006a68e5870SNeilBrown 				struct page *p, *s;
2007a68e5870SNeilBrown 				p = pbio->bi_io_vec[j].bv_page;
2008a68e5870SNeilBrown 				s = sbio->bi_io_vec[j].bv_page;
2009a68e5870SNeilBrown 				if (memcmp(page_address(p),
2010a68e5870SNeilBrown 					   page_address(s),
20115020ad7dSNeilBrown 					   sbio->bi_io_vec[j].bv_len))
2012a68e5870SNeilBrown 					break;
2013a68e5870SNeilBrown 			}
2014a68e5870SNeilBrown 		} else
2015a68e5870SNeilBrown 			j = 0;
2016a68e5870SNeilBrown 		if (j >= 0)
20177f7583d4SJianpeng Ma 			atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2018a68e5870SNeilBrown 		if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
20191877db75SNeilBrown 			      && uptodate)) {
202078d7f5f7SNeilBrown 			/* No need to write to this device. */
2021a68e5870SNeilBrown 			sbio->bi_end_io = NULL;
2022a68e5870SNeilBrown 			rdev_dec_pending(conf->mirrors[i].rdev, mddev);
202378d7f5f7SNeilBrown 			continue;
202478d7f5f7SNeilBrown 		}
2025d3b45c2aSKent Overstreet 
2026d3b45c2aSKent Overstreet 		bio_copy_data(sbio, pbio);
2027a68e5870SNeilBrown 	}
2028a68e5870SNeilBrown 	return 0;
2029a68e5870SNeilBrown }
2030a68e5870SNeilBrown 
20319f2c9d12SNeilBrown static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2032a68e5870SNeilBrown {
2033e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
2034a68e5870SNeilBrown 	int i;
20358f19ccb2SNeilBrown 	int disks = conf->raid_disks * 2;
2036a68e5870SNeilBrown 	struct bio *bio, *wbio;
2037a68e5870SNeilBrown 
2038a68e5870SNeilBrown 	bio = r1_bio->bios[r1_bio->read_disk];
2039a68e5870SNeilBrown 
2040a68e5870SNeilBrown 	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2041a68e5870SNeilBrown 		/* ouch - failed to read all of that. */
2042a68e5870SNeilBrown 		if (!fix_sync_read_error(r1_bio))
2043a68e5870SNeilBrown 			return;
20447ca78d57SNeilBrown 
20457ca78d57SNeilBrown 	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
20467ca78d57SNeilBrown 		if (process_checks(r1_bio) < 0)
20477ca78d57SNeilBrown 			return;
2048d11c171eSNeilBrown 	/*
2049d11c171eSNeilBrown 	 * schedule writes
2050d11c171eSNeilBrown 	 */
20511da177e4SLinus Torvalds 	atomic_set(&r1_bio->remaining, 1);
20521da177e4SLinus Torvalds 	for (i = 0; i < disks ; i++) {
20531da177e4SLinus Torvalds 		wbio = r1_bio->bios[i];
20543e198f78SNeilBrown 		if (wbio->bi_end_io == NULL ||
20553e198f78SNeilBrown 		    (wbio->bi_end_io == end_sync_read &&
20563e198f78SNeilBrown 		     (i == r1_bio->read_disk ||
20573e198f78SNeilBrown 		      !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
20581da177e4SLinus Torvalds 			continue;
20591da177e4SLinus Torvalds 
20603e198f78SNeilBrown 		wbio->bi_rw = WRITE;
20613e198f78SNeilBrown 		wbio->bi_end_io = end_sync_write;
20621da177e4SLinus Torvalds 		atomic_inc(&r1_bio->remaining);
2063aa8b57aaSKent Overstreet 		md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2064191ea9b2SNeilBrown 
20651da177e4SLinus Torvalds 		generic_make_request(wbio);
20661da177e4SLinus Torvalds 	}
20671da177e4SLinus Torvalds 
20681da177e4SLinus Torvalds 	if (atomic_dec_and_test(&r1_bio->remaining)) {
2069191ea9b2SNeilBrown 		/* if we're here, all write(s) have completed, so clean up */
207058e94ae1SNeilBrown 		int s = r1_bio->sectors;
207158e94ae1SNeilBrown 		if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
207258e94ae1SNeilBrown 		    test_bit(R1BIO_WriteError, &r1_bio->state))
207358e94ae1SNeilBrown 			reschedule_retry(r1_bio);
207458e94ae1SNeilBrown 		else {
20751da177e4SLinus Torvalds 			put_buf(r1_bio);
207658e94ae1SNeilBrown 			md_done_sync(mddev, s, 1);
207758e94ae1SNeilBrown 		}
20781da177e4SLinus Torvalds 	}
20791da177e4SLinus Torvalds }
20801da177e4SLinus Torvalds 
20811da177e4SLinus Torvalds /*
20821da177e4SLinus Torvalds  * This is a kernel thread which:
20831da177e4SLinus Torvalds  *
20841da177e4SLinus Torvalds  *	1.	Retries failed read operations on working mirrors.
20851da177e4SLinus Torvalds  *	2.	Updates the raid superblock when problems encounter.
2086d2eb35acSNeilBrown  *	3.	Performs writes following reads for array synchronising.
20871da177e4SLinus Torvalds  */
20881da177e4SLinus Torvalds 
2089e8096360SNeilBrown static void fix_read_error(struct r1conf *conf, int read_disk,
2090867868fbSNeilBrown 			   sector_t sect, int sectors)
2091867868fbSNeilBrown {
2092fd01b88cSNeilBrown 	struct mddev *mddev = conf->mddev;
2093867868fbSNeilBrown 	while(sectors) {
2094867868fbSNeilBrown 		int s = sectors;
2095867868fbSNeilBrown 		int d = read_disk;
2096867868fbSNeilBrown 		int success = 0;
2097867868fbSNeilBrown 		int start;
20983cb03002SNeilBrown 		struct md_rdev *rdev;
2099867868fbSNeilBrown 
2100867868fbSNeilBrown 		if (s > (PAGE_SIZE>>9))
2101867868fbSNeilBrown 			s = PAGE_SIZE >> 9;
2102867868fbSNeilBrown 
2103867868fbSNeilBrown 		do {
2104867868fbSNeilBrown 			/* Note: no rcu protection needed here
2105867868fbSNeilBrown 			 * as this is synchronous in the raid1d thread
2106867868fbSNeilBrown 			 * which is the thread that might remove
2107867868fbSNeilBrown 			 * a device.  If raid1d ever becomes multi-threaded....
2108867868fbSNeilBrown 			 */
2109d2eb35acSNeilBrown 			sector_t first_bad;
2110d2eb35acSNeilBrown 			int bad_sectors;
2111d2eb35acSNeilBrown 
2112867868fbSNeilBrown 			rdev = conf->mirrors[d].rdev;
2113867868fbSNeilBrown 			if (rdev &&
2114da8840a7Smajianpeng 			    (test_bit(In_sync, &rdev->flags) ||
2115da8840a7Smajianpeng 			     (!test_bit(Faulty, &rdev->flags) &&
2116da8840a7Smajianpeng 			      rdev->recovery_offset >= sect + s)) &&
2117d2eb35acSNeilBrown 			    is_badblock(rdev, sect, s,
2118d2eb35acSNeilBrown 					&first_bad, &bad_sectors) == 0 &&
2119ccebd4c4SJonathan Brassow 			    sync_page_io(rdev, sect, s<<9,
2120ccebd4c4SJonathan Brassow 					 conf->tmppage, READ, false))
2121867868fbSNeilBrown 				success = 1;
2122867868fbSNeilBrown 			else {
2123867868fbSNeilBrown 				d++;
21248f19ccb2SNeilBrown 				if (d == conf->raid_disks * 2)
2125867868fbSNeilBrown 					d = 0;
2126867868fbSNeilBrown 			}
2127867868fbSNeilBrown 		} while (!success && d != read_disk);
2128867868fbSNeilBrown 
2129867868fbSNeilBrown 		if (!success) {
2130d8f05d29SNeilBrown 			/* Cannot read from anywhere - mark it bad */
21313cb03002SNeilBrown 			struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2132d8f05d29SNeilBrown 			if (!rdev_set_badblocks(rdev, sect, s, 0))
2133d8f05d29SNeilBrown 				md_error(mddev, rdev);
2134867868fbSNeilBrown 			break;
2135867868fbSNeilBrown 		}
2136867868fbSNeilBrown 		/* write it back and re-read */
2137867868fbSNeilBrown 		start = d;
2138867868fbSNeilBrown 		while (d != read_disk) {
2139867868fbSNeilBrown 			if (d==0)
21408f19ccb2SNeilBrown 				d = conf->raid_disks * 2;
2141867868fbSNeilBrown 			d--;
2142867868fbSNeilBrown 			rdev = conf->mirrors[d].rdev;
2143867868fbSNeilBrown 			if (rdev &&
2144d8f05d29SNeilBrown 			    test_bit(In_sync, &rdev->flags))
2145d8f05d29SNeilBrown 				r1_sync_page_io(rdev, sect, s,
2146d8f05d29SNeilBrown 						conf->tmppage, WRITE);
2147867868fbSNeilBrown 		}
2148867868fbSNeilBrown 		d = start;
2149867868fbSNeilBrown 		while (d != read_disk) {
2150867868fbSNeilBrown 			char b[BDEVNAME_SIZE];
2151867868fbSNeilBrown 			if (d==0)
21528f19ccb2SNeilBrown 				d = conf->raid_disks * 2;
2153867868fbSNeilBrown 			d--;
2154867868fbSNeilBrown 			rdev = conf->mirrors[d].rdev;
2155867868fbSNeilBrown 			if (rdev &&
2156867868fbSNeilBrown 			    test_bit(In_sync, &rdev->flags)) {
2157d8f05d29SNeilBrown 				if (r1_sync_page_io(rdev, sect, s,
2158d8f05d29SNeilBrown 						    conf->tmppage, READ)) {
2159867868fbSNeilBrown 					atomic_add(s, &rdev->corrected_errors);
2160867868fbSNeilBrown 					printk(KERN_INFO
21619dd1e2faSNeilBrown 					       "md/raid1:%s: read error corrected "
2162867868fbSNeilBrown 					       "(%d sectors at %llu on %s)\n",
2163867868fbSNeilBrown 					       mdname(mddev), s,
2164969b755aSRandy Dunlap 					       (unsigned long long)(sect +
2165969b755aSRandy Dunlap 					           rdev->data_offset),
2166867868fbSNeilBrown 					       bdevname(rdev->bdev, b));
2167867868fbSNeilBrown 				}
2168867868fbSNeilBrown 			}
2169867868fbSNeilBrown 		}
2170867868fbSNeilBrown 		sectors -= s;
2171867868fbSNeilBrown 		sect += s;
2172867868fbSNeilBrown 	}
2173867868fbSNeilBrown }
2174867868fbSNeilBrown 
21759f2c9d12SNeilBrown static int narrow_write_error(struct r1bio *r1_bio, int i)
2176cd5ff9a1SNeilBrown {
2177fd01b88cSNeilBrown 	struct mddev *mddev = r1_bio->mddev;
2178e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
21793cb03002SNeilBrown 	struct md_rdev *rdev = conf->mirrors[i].rdev;
2180cd5ff9a1SNeilBrown 
2181cd5ff9a1SNeilBrown 	/* bio has the data to be written to device 'i' where
2182cd5ff9a1SNeilBrown 	 * we just recently had a write error.
2183cd5ff9a1SNeilBrown 	 * We repeatedly clone the bio and trim down to one block,
2184cd5ff9a1SNeilBrown 	 * then try the write.  Where the write fails we record
2185cd5ff9a1SNeilBrown 	 * a bad block.
2186cd5ff9a1SNeilBrown 	 * It is conceivable that the bio doesn't exactly align with
2187cd5ff9a1SNeilBrown 	 * blocks.  We must handle this somehow.
2188cd5ff9a1SNeilBrown 	 *
2189cd5ff9a1SNeilBrown 	 * We currently own a reference on the rdev.
2190cd5ff9a1SNeilBrown 	 */
2191cd5ff9a1SNeilBrown 
2192cd5ff9a1SNeilBrown 	int block_sectors;
2193cd5ff9a1SNeilBrown 	sector_t sector;
2194cd5ff9a1SNeilBrown 	int sectors;
2195cd5ff9a1SNeilBrown 	int sect_to_write = r1_bio->sectors;
2196cd5ff9a1SNeilBrown 	int ok = 1;
2197cd5ff9a1SNeilBrown 
2198cd5ff9a1SNeilBrown 	if (rdev->badblocks.shift < 0)
2199cd5ff9a1SNeilBrown 		return 0;
2200cd5ff9a1SNeilBrown 
2201cd5ff9a1SNeilBrown 	block_sectors = 1 << rdev->badblocks.shift;
2202cd5ff9a1SNeilBrown 	sector = r1_bio->sector;
2203cd5ff9a1SNeilBrown 	sectors = ((sector + block_sectors)
2204cd5ff9a1SNeilBrown 		   & ~(sector_t)(block_sectors - 1))
2205cd5ff9a1SNeilBrown 		- sector;
2206cd5ff9a1SNeilBrown 
2207cd5ff9a1SNeilBrown 	while (sect_to_write) {
2208cd5ff9a1SNeilBrown 		struct bio *wbio;
2209cd5ff9a1SNeilBrown 		if (sectors > sect_to_write)
2210cd5ff9a1SNeilBrown 			sectors = sect_to_write;
2211cd5ff9a1SNeilBrown 		/* Write at 'sector' for 'sectors'*/
2212cd5ff9a1SNeilBrown 
2213b783863fSKent Overstreet 		if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2214b783863fSKent Overstreet 			unsigned vcnt = r1_bio->behind_page_count;
2215b783863fSKent Overstreet 			struct bio_vec *vec = r1_bio->behind_bvecs;
2216b783863fSKent Overstreet 
2217b783863fSKent Overstreet 			while (!vec->bv_page) {
2218b783863fSKent Overstreet 				vec++;
2219b783863fSKent Overstreet 				vcnt--;
2220b783863fSKent Overstreet 			}
2221b783863fSKent Overstreet 
2222cd5ff9a1SNeilBrown 			wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
2223cd5ff9a1SNeilBrown 			memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
2224b783863fSKent Overstreet 
2225cd5ff9a1SNeilBrown 			wbio->bi_vcnt = vcnt;
2226b783863fSKent Overstreet 		} else {
2227b783863fSKent Overstreet 			wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2228b783863fSKent Overstreet 		}
2229b783863fSKent Overstreet 
2230b783863fSKent Overstreet 		wbio->bi_rw = WRITE;
22314f024f37SKent Overstreet 		wbio->bi_iter.bi_sector = r1_bio->sector;
22324f024f37SKent Overstreet 		wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2233cd5ff9a1SNeilBrown 
22346678d83fSKent Overstreet 		bio_trim(wbio, sector - r1_bio->sector, sectors);
22354f024f37SKent Overstreet 		wbio->bi_iter.bi_sector += rdev->data_offset;
2236cd5ff9a1SNeilBrown 		wbio->bi_bdev = rdev->bdev;
2237cd5ff9a1SNeilBrown 		if (submit_bio_wait(WRITE, wbio) == 0)
2238cd5ff9a1SNeilBrown 			/* failure! */
2239cd5ff9a1SNeilBrown 			ok = rdev_set_badblocks(rdev, sector,
2240cd5ff9a1SNeilBrown 						sectors, 0)
2241cd5ff9a1SNeilBrown 				&& ok;
2242cd5ff9a1SNeilBrown 
2243cd5ff9a1SNeilBrown 		bio_put(wbio);
2244cd5ff9a1SNeilBrown 		sect_to_write -= sectors;
2245cd5ff9a1SNeilBrown 		sector += sectors;
2246cd5ff9a1SNeilBrown 		sectors = block_sectors;
2247cd5ff9a1SNeilBrown 	}
2248cd5ff9a1SNeilBrown 	return ok;
2249cd5ff9a1SNeilBrown }
2250cd5ff9a1SNeilBrown 
2251e8096360SNeilBrown static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
225262096bceSNeilBrown {
225362096bceSNeilBrown 	int m;
225462096bceSNeilBrown 	int s = r1_bio->sectors;
22558f19ccb2SNeilBrown 	for (m = 0; m < conf->raid_disks * 2 ; m++) {
22563cb03002SNeilBrown 		struct md_rdev *rdev = conf->mirrors[m].rdev;
225762096bceSNeilBrown 		struct bio *bio = r1_bio->bios[m];
225862096bceSNeilBrown 		if (bio->bi_end_io == NULL)
225962096bceSNeilBrown 			continue;
226062096bceSNeilBrown 		if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
226162096bceSNeilBrown 		    test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2262c6563a8cSNeilBrown 			rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
226362096bceSNeilBrown 		}
226462096bceSNeilBrown 		if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
226562096bceSNeilBrown 		    test_bit(R1BIO_WriteError, &r1_bio->state)) {
226662096bceSNeilBrown 			if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
226762096bceSNeilBrown 				md_error(conf->mddev, rdev);
226862096bceSNeilBrown 		}
226962096bceSNeilBrown 	}
227062096bceSNeilBrown 	put_buf(r1_bio);
227162096bceSNeilBrown 	md_done_sync(conf->mddev, s, 1);
227262096bceSNeilBrown }
227362096bceSNeilBrown 
2274e8096360SNeilBrown static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
227562096bceSNeilBrown {
227662096bceSNeilBrown 	int m;
22778f19ccb2SNeilBrown 	for (m = 0; m < conf->raid_disks * 2 ; m++)
227862096bceSNeilBrown 		if (r1_bio->bios[m] == IO_MADE_GOOD) {
22793cb03002SNeilBrown 			struct md_rdev *rdev = conf->mirrors[m].rdev;
228062096bceSNeilBrown 			rdev_clear_badblocks(rdev,
228162096bceSNeilBrown 					     r1_bio->sector,
2282c6563a8cSNeilBrown 					     r1_bio->sectors, 0);
228362096bceSNeilBrown 			rdev_dec_pending(rdev, conf->mddev);
228462096bceSNeilBrown 		} else if (r1_bio->bios[m] != NULL) {
228562096bceSNeilBrown 			/* This drive got a write error.  We need to
228662096bceSNeilBrown 			 * narrow down and record precise write
228762096bceSNeilBrown 			 * errors.
228862096bceSNeilBrown 			 */
228962096bceSNeilBrown 			if (!narrow_write_error(r1_bio, m)) {
229062096bceSNeilBrown 				md_error(conf->mddev,
229162096bceSNeilBrown 					 conf->mirrors[m].rdev);
229262096bceSNeilBrown 				/* an I/O failed, we can't clear the bitmap */
229362096bceSNeilBrown 				set_bit(R1BIO_Degraded, &r1_bio->state);
229462096bceSNeilBrown 			}
229562096bceSNeilBrown 			rdev_dec_pending(conf->mirrors[m].rdev,
229662096bceSNeilBrown 					 conf->mddev);
229762096bceSNeilBrown 		}
229862096bceSNeilBrown 	if (test_bit(R1BIO_WriteError, &r1_bio->state))
229962096bceSNeilBrown 		close_write(r1_bio);
230062096bceSNeilBrown 	raid_end_bio_io(r1_bio);
230162096bceSNeilBrown }
230262096bceSNeilBrown 
2303e8096360SNeilBrown static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
230462096bceSNeilBrown {
230562096bceSNeilBrown 	int disk;
230662096bceSNeilBrown 	int max_sectors;
2307fd01b88cSNeilBrown 	struct mddev *mddev = conf->mddev;
230862096bceSNeilBrown 	struct bio *bio;
230962096bceSNeilBrown 	char b[BDEVNAME_SIZE];
23103cb03002SNeilBrown 	struct md_rdev *rdev;
231162096bceSNeilBrown 
231262096bceSNeilBrown 	clear_bit(R1BIO_ReadError, &r1_bio->state);
231362096bceSNeilBrown 	/* we got a read error. Maybe the drive is bad.  Maybe just
231462096bceSNeilBrown 	 * the block and we can fix it.
231562096bceSNeilBrown 	 * We freeze all other IO, and try reading the block from
231662096bceSNeilBrown 	 * other devices.  When we find one, we re-write
231762096bceSNeilBrown 	 * and check it that fixes the read error.
231862096bceSNeilBrown 	 * This is all done synchronously while the array is
231962096bceSNeilBrown 	 * frozen
232062096bceSNeilBrown 	 */
232162096bceSNeilBrown 	if (mddev->ro == 0) {
2322e2d59925SNeilBrown 		freeze_array(conf, 1);
232362096bceSNeilBrown 		fix_read_error(conf, r1_bio->read_disk,
232462096bceSNeilBrown 			       r1_bio->sector, r1_bio->sectors);
232562096bceSNeilBrown 		unfreeze_array(conf);
232662096bceSNeilBrown 	} else
232762096bceSNeilBrown 		md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
23287ad4d4a6SNeilBrown 	rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
232962096bceSNeilBrown 
233062096bceSNeilBrown 	bio = r1_bio->bios[r1_bio->read_disk];
233162096bceSNeilBrown 	bdevname(bio->bi_bdev, b);
233262096bceSNeilBrown read_more:
233362096bceSNeilBrown 	disk = read_balance(conf, r1_bio, &max_sectors);
233462096bceSNeilBrown 	if (disk == -1) {
233562096bceSNeilBrown 		printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
233662096bceSNeilBrown 		       " read error for block %llu\n",
233762096bceSNeilBrown 		       mdname(mddev), b, (unsigned long long)r1_bio->sector);
233862096bceSNeilBrown 		raid_end_bio_io(r1_bio);
233962096bceSNeilBrown 	} else {
234062096bceSNeilBrown 		const unsigned long do_sync
234162096bceSNeilBrown 			= r1_bio->master_bio->bi_rw & REQ_SYNC;
234262096bceSNeilBrown 		if (bio) {
234362096bceSNeilBrown 			r1_bio->bios[r1_bio->read_disk] =
234462096bceSNeilBrown 				mddev->ro ? IO_BLOCKED : NULL;
234562096bceSNeilBrown 			bio_put(bio);
234662096bceSNeilBrown 		}
234762096bceSNeilBrown 		r1_bio->read_disk = disk;
234862096bceSNeilBrown 		bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
23494f024f37SKent Overstreet 		bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
23504f024f37SKent Overstreet 			 max_sectors);
235162096bceSNeilBrown 		r1_bio->bios[r1_bio->read_disk] = bio;
235262096bceSNeilBrown 		rdev = conf->mirrors[disk].rdev;
235362096bceSNeilBrown 		printk_ratelimited(KERN_ERR
235462096bceSNeilBrown 				   "md/raid1:%s: redirecting sector %llu"
235562096bceSNeilBrown 				   " to other mirror: %s\n",
235662096bceSNeilBrown 				   mdname(mddev),
235762096bceSNeilBrown 				   (unsigned long long)r1_bio->sector,
235862096bceSNeilBrown 				   bdevname(rdev->bdev, b));
23594f024f37SKent Overstreet 		bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
236062096bceSNeilBrown 		bio->bi_bdev = rdev->bdev;
236162096bceSNeilBrown 		bio->bi_end_io = raid1_end_read_request;
236262096bceSNeilBrown 		bio->bi_rw = READ | do_sync;
236362096bceSNeilBrown 		bio->bi_private = r1_bio;
236462096bceSNeilBrown 		if (max_sectors < r1_bio->sectors) {
236562096bceSNeilBrown 			/* Drat - have to split this up more */
236662096bceSNeilBrown 			struct bio *mbio = r1_bio->master_bio;
236762096bceSNeilBrown 			int sectors_handled = (r1_bio->sector + max_sectors
23684f024f37SKent Overstreet 					       - mbio->bi_iter.bi_sector);
236962096bceSNeilBrown 			r1_bio->sectors = max_sectors;
237062096bceSNeilBrown 			spin_lock_irq(&conf->device_lock);
237162096bceSNeilBrown 			if (mbio->bi_phys_segments == 0)
237262096bceSNeilBrown 				mbio->bi_phys_segments = 2;
237362096bceSNeilBrown 			else
237462096bceSNeilBrown 				mbio->bi_phys_segments++;
237562096bceSNeilBrown 			spin_unlock_irq(&conf->device_lock);
237662096bceSNeilBrown 			generic_make_request(bio);
237762096bceSNeilBrown 			bio = NULL;
237862096bceSNeilBrown 
237962096bceSNeilBrown 			r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
238062096bceSNeilBrown 
238162096bceSNeilBrown 			r1_bio->master_bio = mbio;
2382aa8b57aaSKent Overstreet 			r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
238362096bceSNeilBrown 			r1_bio->state = 0;
238462096bceSNeilBrown 			set_bit(R1BIO_ReadError, &r1_bio->state);
238562096bceSNeilBrown 			r1_bio->mddev = mddev;
23864f024f37SKent Overstreet 			r1_bio->sector = mbio->bi_iter.bi_sector +
23874f024f37SKent Overstreet 				sectors_handled;
238862096bceSNeilBrown 
238962096bceSNeilBrown 			goto read_more;
239062096bceSNeilBrown 		} else
239162096bceSNeilBrown 			generic_make_request(bio);
239262096bceSNeilBrown 	}
239362096bceSNeilBrown }
239462096bceSNeilBrown 
23954ed8731dSShaohua Li static void raid1d(struct md_thread *thread)
23961da177e4SLinus Torvalds {
23974ed8731dSShaohua Li 	struct mddev *mddev = thread->mddev;
23989f2c9d12SNeilBrown 	struct r1bio *r1_bio;
23991da177e4SLinus Torvalds 	unsigned long flags;
2400e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
24011da177e4SLinus Torvalds 	struct list_head *head = &conf->retry_list;
2402e1dfa0a2SNeilBrown 	struct blk_plug plug;
24031da177e4SLinus Torvalds 
24041da177e4SLinus Torvalds 	md_check_recovery(mddev);
24051da177e4SLinus Torvalds 
2406e1dfa0a2SNeilBrown 	blk_start_plug(&plug);
24071da177e4SLinus Torvalds 	for (;;) {
2408a35e63efSNeilBrown 
24097eaceaccSJens Axboe 		flush_pending_writes(conf);
2410a35e63efSNeilBrown 
24111da177e4SLinus Torvalds 		spin_lock_irqsave(&conf->device_lock, flags);
2412a35e63efSNeilBrown 		if (list_empty(head)) {
2413191ea9b2SNeilBrown 			spin_unlock_irqrestore(&conf->device_lock, flags);
24141da177e4SLinus Torvalds 			break;
2415a35e63efSNeilBrown 		}
24169f2c9d12SNeilBrown 		r1_bio = list_entry(head->prev, struct r1bio, retry_list);
24171da177e4SLinus Torvalds 		list_del(head->prev);
2418ddaf22abSNeilBrown 		conf->nr_queued--;
24191da177e4SLinus Torvalds 		spin_unlock_irqrestore(&conf->device_lock, flags);
24201da177e4SLinus Torvalds 
24211da177e4SLinus Torvalds 		mddev = r1_bio->mddev;
2422070ec55dSNeilBrown 		conf = mddev->private;
24234367af55SNeilBrown 		if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2424d8f05d29SNeilBrown 			if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
242562096bceSNeilBrown 			    test_bit(R1BIO_WriteError, &r1_bio->state))
242662096bceSNeilBrown 				handle_sync_write_finished(conf, r1_bio);
242762096bceSNeilBrown 			else
24281da177e4SLinus Torvalds 				sync_request_write(mddev, r1_bio);
2429cd5ff9a1SNeilBrown 		} else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
243062096bceSNeilBrown 			   test_bit(R1BIO_WriteError, &r1_bio->state))
243162096bceSNeilBrown 			handle_write_finished(conf, r1_bio);
243262096bceSNeilBrown 		else if (test_bit(R1BIO_ReadError, &r1_bio->state))
243362096bceSNeilBrown 			handle_read_error(conf, r1_bio);
2434d2eb35acSNeilBrown 		else
2435d2eb35acSNeilBrown 			/* just a partial read to be scheduled from separate
2436d2eb35acSNeilBrown 			 * context
2437d2eb35acSNeilBrown 			 */
2438d2eb35acSNeilBrown 			generic_make_request(r1_bio->bios[r1_bio->read_disk]);
243962096bceSNeilBrown 
24401d9d5241SNeilBrown 		cond_resched();
2441de393cdeSNeilBrown 		if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2442de393cdeSNeilBrown 			md_check_recovery(mddev);
24431da177e4SLinus Torvalds 	}
2444e1dfa0a2SNeilBrown 	blk_finish_plug(&plug);
24451da177e4SLinus Torvalds }
24461da177e4SLinus Torvalds 
24471da177e4SLinus Torvalds 
2448e8096360SNeilBrown static int init_resync(struct r1conf *conf)
24491da177e4SLinus Torvalds {
24501da177e4SLinus Torvalds 	int buffs;
24511da177e4SLinus Torvalds 
24521da177e4SLinus Torvalds 	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
24539e77c485SEric Sesterhenn 	BUG_ON(conf->r1buf_pool);
24541da177e4SLinus Torvalds 	conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
24551da177e4SLinus Torvalds 					  conf->poolinfo);
24561da177e4SLinus Torvalds 	if (!conf->r1buf_pool)
24571da177e4SLinus Torvalds 		return -ENOMEM;
24581da177e4SLinus Torvalds 	conf->next_resync = 0;
24591da177e4SLinus Torvalds 	return 0;
24601da177e4SLinus Torvalds }
24611da177e4SLinus Torvalds 
24621da177e4SLinus Torvalds /*
24631da177e4SLinus Torvalds  * perform a "sync" on one "block"
24641da177e4SLinus Torvalds  *
24651da177e4SLinus Torvalds  * We need to make sure that no normal I/O request - particularly write
24661da177e4SLinus Torvalds  * requests - conflict with active sync requests.
24671da177e4SLinus Torvalds  *
24681da177e4SLinus Torvalds  * This is achieved by tracking pending requests and a 'barrier' concept
24691da177e4SLinus Torvalds  * that can be installed to exclude normal IO requests.
24701da177e4SLinus Torvalds  */
24711da177e4SLinus Torvalds 
2472fd01b88cSNeilBrown static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
24731da177e4SLinus Torvalds {
2474e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
24759f2c9d12SNeilBrown 	struct r1bio *r1_bio;
24761da177e4SLinus Torvalds 	struct bio *bio;
24771da177e4SLinus Torvalds 	sector_t max_sector, nr_sectors;
24783e198f78SNeilBrown 	int disk = -1;
24791da177e4SLinus Torvalds 	int i;
24803e198f78SNeilBrown 	int wonly = -1;
24813e198f78SNeilBrown 	int write_targets = 0, read_targets = 0;
248257dab0bdSNeilBrown 	sector_t sync_blocks;
2483e3b9703eSNeilBrown 	int still_degraded = 0;
248406f60385SNeilBrown 	int good_sectors = RESYNC_SECTORS;
248506f60385SNeilBrown 	int min_bad = 0; /* number of sectors that are bad in all devices */
24861da177e4SLinus Torvalds 
24871da177e4SLinus Torvalds 	if (!conf->r1buf_pool)
24881da177e4SLinus Torvalds 		if (init_resync(conf))
248957afd89fSNeilBrown 			return 0;
24901da177e4SLinus Torvalds 
249158c0fed4SAndre Noll 	max_sector = mddev->dev_sectors;
24921da177e4SLinus Torvalds 	if (sector_nr >= max_sector) {
2493191ea9b2SNeilBrown 		/* If we aborted, we need to abort the
2494191ea9b2SNeilBrown 		 * sync on the 'current' bitmap chunk (there will
2495191ea9b2SNeilBrown 		 * only be one in raid1 resync.
2496191ea9b2SNeilBrown 		 * We can find the current addess in mddev->curr_resync
2497191ea9b2SNeilBrown 		 */
24986a806c51SNeilBrown 		if (mddev->curr_resync < max_sector) /* aborted */
24996a806c51SNeilBrown 			bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2500191ea9b2SNeilBrown 						&sync_blocks, 1);
25016a806c51SNeilBrown 		else /* completed sync */
2502191ea9b2SNeilBrown 			conf->fullsync = 0;
25036a806c51SNeilBrown 
25046a806c51SNeilBrown 		bitmap_close_sync(mddev->bitmap);
25051da177e4SLinus Torvalds 		close_sync(conf);
25061da177e4SLinus Torvalds 		return 0;
25071da177e4SLinus Torvalds 	}
25081da177e4SLinus Torvalds 
250907d84d10SNeilBrown 	if (mddev->bitmap == NULL &&
251007d84d10SNeilBrown 	    mddev->recovery_cp == MaxSector &&
25116394cca5SNeilBrown 	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
251207d84d10SNeilBrown 	    conf->fullsync == 0) {
251307d84d10SNeilBrown 		*skipped = 1;
251407d84d10SNeilBrown 		return max_sector - sector_nr;
251507d84d10SNeilBrown 	}
25166394cca5SNeilBrown 	/* before building a request, check if we can skip these blocks..
25176394cca5SNeilBrown 	 * This call the bitmap_start_sync doesn't actually record anything
25186394cca5SNeilBrown 	 */
2519e3b9703eSNeilBrown 	if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2520e5de485fSNeilBrown 	    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2521191ea9b2SNeilBrown 		/* We can skip this block, and probably several more */
2522191ea9b2SNeilBrown 		*skipped = 1;
2523191ea9b2SNeilBrown 		return sync_blocks;
2524191ea9b2SNeilBrown 	}
25251da177e4SLinus Torvalds 	/*
252617999be4SNeilBrown 	 * If there is non-resync activity waiting for a turn,
252717999be4SNeilBrown 	 * and resync is going fast enough,
252817999be4SNeilBrown 	 * then let it though before starting on this new sync request.
25291da177e4SLinus Torvalds 	 */
253017999be4SNeilBrown 	if (!go_faster && conf->nr_waiting)
25311da177e4SLinus Torvalds 		msleep_interruptible(1000);
253217999be4SNeilBrown 
2533b47490c9SNeilBrown 	bitmap_cond_end_sync(mddev->bitmap, sector_nr);
25341c4588e9SNeilBrown 	r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
253517999be4SNeilBrown 	raise_barrier(conf);
253617999be4SNeilBrown 
253717999be4SNeilBrown 	conf->next_resync = sector_nr;
25381da177e4SLinus Torvalds 
25393e198f78SNeilBrown 	rcu_read_lock();
25403e198f78SNeilBrown 	/*
25413e198f78SNeilBrown 	 * If we get a correctably read error during resync or recovery,
25423e198f78SNeilBrown 	 * we might want to read from a different device.  So we
25433e198f78SNeilBrown 	 * flag all drives that could conceivably be read from for READ,
25443e198f78SNeilBrown 	 * and any others (which will be non-In_sync devices) for WRITE.
25453e198f78SNeilBrown 	 * If a read fails, we try reading from something else for which READ
25463e198f78SNeilBrown 	 * is OK.
25473e198f78SNeilBrown 	 */
25481da177e4SLinus Torvalds 
25491da177e4SLinus Torvalds 	r1_bio->mddev = mddev;
25501da177e4SLinus Torvalds 	r1_bio->sector = sector_nr;
2551191ea9b2SNeilBrown 	r1_bio->state = 0;
25521da177e4SLinus Torvalds 	set_bit(R1BIO_IsSync, &r1_bio->state);
25531da177e4SLinus Torvalds 
25548f19ccb2SNeilBrown 	for (i = 0; i < conf->raid_disks * 2; i++) {
25553cb03002SNeilBrown 		struct md_rdev *rdev;
25561da177e4SLinus Torvalds 		bio = r1_bio->bios[i];
25572aabaa65SKent Overstreet 		bio_reset(bio);
25581da177e4SLinus Torvalds 
25593e198f78SNeilBrown 		rdev = rcu_dereference(conf->mirrors[i].rdev);
25603e198f78SNeilBrown 		if (rdev == NULL ||
25613e198f78SNeilBrown 		    test_bit(Faulty, &rdev->flags)) {
25628f19ccb2SNeilBrown 			if (i < conf->raid_disks)
2563e3b9703eSNeilBrown 				still_degraded = 1;
25643e198f78SNeilBrown 		} else if (!test_bit(In_sync, &rdev->flags)) {
25651da177e4SLinus Torvalds 			bio->bi_rw = WRITE;
25661da177e4SLinus Torvalds 			bio->bi_end_io = end_sync_write;
25671da177e4SLinus Torvalds 			write_targets ++;
25683e198f78SNeilBrown 		} else {
25693e198f78SNeilBrown 			/* may need to read from here */
257006f60385SNeilBrown 			sector_t first_bad = MaxSector;
257106f60385SNeilBrown 			int bad_sectors;
257206f60385SNeilBrown 
257306f60385SNeilBrown 			if (is_badblock(rdev, sector_nr, good_sectors,
257406f60385SNeilBrown 					&first_bad, &bad_sectors)) {
257506f60385SNeilBrown 				if (first_bad > sector_nr)
257606f60385SNeilBrown 					good_sectors = first_bad - sector_nr;
257706f60385SNeilBrown 				else {
257806f60385SNeilBrown 					bad_sectors -= (sector_nr - first_bad);
257906f60385SNeilBrown 					if (min_bad == 0 ||
258006f60385SNeilBrown 					    min_bad > bad_sectors)
258106f60385SNeilBrown 						min_bad = bad_sectors;
258206f60385SNeilBrown 				}
258306f60385SNeilBrown 			}
258406f60385SNeilBrown 			if (sector_nr < first_bad) {
25853e198f78SNeilBrown 				if (test_bit(WriteMostly, &rdev->flags)) {
25863e198f78SNeilBrown 					if (wonly < 0)
25873e198f78SNeilBrown 						wonly = i;
25883e198f78SNeilBrown 				} else {
25893e198f78SNeilBrown 					if (disk < 0)
25903e198f78SNeilBrown 						disk = i;
25913e198f78SNeilBrown 				}
259206f60385SNeilBrown 				bio->bi_rw = READ;
259306f60385SNeilBrown 				bio->bi_end_io = end_sync_read;
25943e198f78SNeilBrown 				read_targets++;
2595d57368afSAlexander Lyakas 			} else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2596d57368afSAlexander Lyakas 				test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2597d57368afSAlexander Lyakas 				!test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2598d57368afSAlexander Lyakas 				/*
2599d57368afSAlexander Lyakas 				 * The device is suitable for reading (InSync),
2600d57368afSAlexander Lyakas 				 * but has bad block(s) here. Let's try to correct them,
2601d57368afSAlexander Lyakas 				 * if we are doing resync or repair. Otherwise, leave
2602d57368afSAlexander Lyakas 				 * this device alone for this sync request.
2603d57368afSAlexander Lyakas 				 */
2604d57368afSAlexander Lyakas 				bio->bi_rw = WRITE;
2605d57368afSAlexander Lyakas 				bio->bi_end_io = end_sync_write;
2606d57368afSAlexander Lyakas 				write_targets++;
26073e198f78SNeilBrown 			}
260806f60385SNeilBrown 		}
260906f60385SNeilBrown 		if (bio->bi_end_io) {
26103e198f78SNeilBrown 			atomic_inc(&rdev->nr_pending);
26114f024f37SKent Overstreet 			bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
26123e198f78SNeilBrown 			bio->bi_bdev = rdev->bdev;
26131da177e4SLinus Torvalds 			bio->bi_private = r1_bio;
26141da177e4SLinus Torvalds 		}
261506f60385SNeilBrown 	}
26163e198f78SNeilBrown 	rcu_read_unlock();
26173e198f78SNeilBrown 	if (disk < 0)
26183e198f78SNeilBrown 		disk = wonly;
26193e198f78SNeilBrown 	r1_bio->read_disk = disk;
2620191ea9b2SNeilBrown 
262106f60385SNeilBrown 	if (read_targets == 0 && min_bad > 0) {
262206f60385SNeilBrown 		/* These sectors are bad on all InSync devices, so we
262306f60385SNeilBrown 		 * need to mark them bad on all write targets
262406f60385SNeilBrown 		 */
262506f60385SNeilBrown 		int ok = 1;
26268f19ccb2SNeilBrown 		for (i = 0 ; i < conf->raid_disks * 2 ; i++)
262706f60385SNeilBrown 			if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2628a42f9d83Smajianpeng 				struct md_rdev *rdev = conf->mirrors[i].rdev;
262906f60385SNeilBrown 				ok = rdev_set_badblocks(rdev, sector_nr,
263006f60385SNeilBrown 							min_bad, 0
263106f60385SNeilBrown 					) && ok;
263206f60385SNeilBrown 			}
263306f60385SNeilBrown 		set_bit(MD_CHANGE_DEVS, &mddev->flags);
263406f60385SNeilBrown 		*skipped = 1;
263506f60385SNeilBrown 		put_buf(r1_bio);
263606f60385SNeilBrown 
263706f60385SNeilBrown 		if (!ok) {
263806f60385SNeilBrown 			/* Cannot record the badblocks, so need to
263906f60385SNeilBrown 			 * abort the resync.
264006f60385SNeilBrown 			 * If there are multiple read targets, could just
264106f60385SNeilBrown 			 * fail the really bad ones ???
264206f60385SNeilBrown 			 */
264306f60385SNeilBrown 			conf->recovery_disabled = mddev->recovery_disabled;
264406f60385SNeilBrown 			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
264506f60385SNeilBrown 			return 0;
264606f60385SNeilBrown 		} else
264706f60385SNeilBrown 			return min_bad;
264806f60385SNeilBrown 
264906f60385SNeilBrown 	}
265006f60385SNeilBrown 	if (min_bad > 0 && min_bad < good_sectors) {
265106f60385SNeilBrown 		/* only resync enough to reach the next bad->good
265206f60385SNeilBrown 		 * transition */
265306f60385SNeilBrown 		good_sectors = min_bad;
265406f60385SNeilBrown 	}
265506f60385SNeilBrown 
26563e198f78SNeilBrown 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
26573e198f78SNeilBrown 		/* extra read targets are also write targets */
26583e198f78SNeilBrown 		write_targets += read_targets-1;
26593e198f78SNeilBrown 
26603e198f78SNeilBrown 	if (write_targets == 0 || read_targets == 0) {
26611da177e4SLinus Torvalds 		/* There is nowhere to write, so all non-sync
26621da177e4SLinus Torvalds 		 * drives must be failed - so we are finished
26631da177e4SLinus Torvalds 		 */
2664b7219ccbSNeilBrown 		sector_t rv;
2665b7219ccbSNeilBrown 		if (min_bad > 0)
2666b7219ccbSNeilBrown 			max_sector = sector_nr + min_bad;
2667b7219ccbSNeilBrown 		rv = max_sector - sector_nr;
266857afd89fSNeilBrown 		*skipped = 1;
26691da177e4SLinus Torvalds 		put_buf(r1_bio);
26701da177e4SLinus Torvalds 		return rv;
26711da177e4SLinus Torvalds 	}
26721da177e4SLinus Torvalds 
2673c6207277SNeilBrown 	if (max_sector > mddev->resync_max)
2674c6207277SNeilBrown 		max_sector = mddev->resync_max; /* Don't do IO beyond here */
267506f60385SNeilBrown 	if (max_sector > sector_nr + good_sectors)
267606f60385SNeilBrown 		max_sector = sector_nr + good_sectors;
26771da177e4SLinus Torvalds 	nr_sectors = 0;
2678289e99e8SNeilBrown 	sync_blocks = 0;
26791da177e4SLinus Torvalds 	do {
26801da177e4SLinus Torvalds 		struct page *page;
26811da177e4SLinus Torvalds 		int len = PAGE_SIZE;
26821da177e4SLinus Torvalds 		if (sector_nr + (len>>9) > max_sector)
26831da177e4SLinus Torvalds 			len = (max_sector - sector_nr) << 9;
26841da177e4SLinus Torvalds 		if (len == 0)
26851da177e4SLinus Torvalds 			break;
2686ab7a30c7SNeilBrown 		if (sync_blocks == 0) {
26876a806c51SNeilBrown 			if (!bitmap_start_sync(mddev->bitmap, sector_nr,
2688e3b9703eSNeilBrown 					       &sync_blocks, still_degraded) &&
2689e5de485fSNeilBrown 			    !conf->fullsync &&
2690e5de485fSNeilBrown 			    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2691191ea9b2SNeilBrown 				break;
26929e77c485SEric Sesterhenn 			BUG_ON(sync_blocks < (PAGE_SIZE>>9));
26937571ae88SNeilBrown 			if ((len >> 9) > sync_blocks)
26946a806c51SNeilBrown 				len = sync_blocks<<9;
2695ab7a30c7SNeilBrown 		}
2696191ea9b2SNeilBrown 
26978f19ccb2SNeilBrown 		for (i = 0 ; i < conf->raid_disks * 2; i++) {
26981da177e4SLinus Torvalds 			bio = r1_bio->bios[i];
26991da177e4SLinus Torvalds 			if (bio->bi_end_io) {
2700d11c171eSNeilBrown 				page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
27011da177e4SLinus Torvalds 				if (bio_add_page(bio, page, len, 0) == 0) {
27021da177e4SLinus Torvalds 					/* stop here */
2703d11c171eSNeilBrown 					bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
27041da177e4SLinus Torvalds 					while (i > 0) {
27051da177e4SLinus Torvalds 						i--;
27061da177e4SLinus Torvalds 						bio = r1_bio->bios[i];
27076a806c51SNeilBrown 						if (bio->bi_end_io==NULL)
27086a806c51SNeilBrown 							continue;
27091da177e4SLinus Torvalds 						/* remove last page from this bio */
27101da177e4SLinus Torvalds 						bio->bi_vcnt--;
27114f024f37SKent Overstreet 						bio->bi_iter.bi_size -= len;
27121da177e4SLinus Torvalds 						bio->bi_flags &= ~(1<< BIO_SEG_VALID);
27131da177e4SLinus Torvalds 					}
27141da177e4SLinus Torvalds 					goto bio_full;
27151da177e4SLinus Torvalds 				}
27161da177e4SLinus Torvalds 			}
27171da177e4SLinus Torvalds 		}
27181da177e4SLinus Torvalds 		nr_sectors += len>>9;
27191da177e4SLinus Torvalds 		sector_nr += len>>9;
2720191ea9b2SNeilBrown 		sync_blocks -= (len>>9);
27211da177e4SLinus Torvalds 	} while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
27221da177e4SLinus Torvalds  bio_full:
27231da177e4SLinus Torvalds 	r1_bio->sectors = nr_sectors;
27241da177e4SLinus Torvalds 
2725d11c171eSNeilBrown 	/* For a user-requested sync, we read all readable devices and do a
2726d11c171eSNeilBrown 	 * compare
2727d11c171eSNeilBrown 	 */
2728d11c171eSNeilBrown 	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2729d11c171eSNeilBrown 		atomic_set(&r1_bio->remaining, read_targets);
27302d4f4f33SNeilBrown 		for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2731d11c171eSNeilBrown 			bio = r1_bio->bios[i];
2732d11c171eSNeilBrown 			if (bio->bi_end_io == end_sync_read) {
27332d4f4f33SNeilBrown 				read_targets--;
2734ddac7c7eSNeilBrown 				md_sync_acct(bio->bi_bdev, nr_sectors);
27351da177e4SLinus Torvalds 				generic_make_request(bio);
2736d11c171eSNeilBrown 			}
2737d11c171eSNeilBrown 		}
2738d11c171eSNeilBrown 	} else {
2739d11c171eSNeilBrown 		atomic_set(&r1_bio->remaining, 1);
2740d11c171eSNeilBrown 		bio = r1_bio->bios[r1_bio->read_disk];
2741ddac7c7eSNeilBrown 		md_sync_acct(bio->bi_bdev, nr_sectors);
2742d11c171eSNeilBrown 		generic_make_request(bio);
2743d11c171eSNeilBrown 
2744d11c171eSNeilBrown 	}
27451da177e4SLinus Torvalds 	return nr_sectors;
27461da177e4SLinus Torvalds }
27471da177e4SLinus Torvalds 
2748fd01b88cSNeilBrown static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
274980c3a6ceSDan Williams {
275080c3a6ceSDan Williams 	if (sectors)
275180c3a6ceSDan Williams 		return sectors;
275280c3a6ceSDan Williams 
275380c3a6ceSDan Williams 	return mddev->dev_sectors;
275480c3a6ceSDan Williams }
275580c3a6ceSDan Williams 
2756e8096360SNeilBrown static struct r1conf *setup_conf(struct mddev *mddev)
27571da177e4SLinus Torvalds {
2758e8096360SNeilBrown 	struct r1conf *conf;
2759709ae487SNeilBrown 	int i;
27600eaf822cSJonathan Brassow 	struct raid1_info *disk;
27613cb03002SNeilBrown 	struct md_rdev *rdev;
2762709ae487SNeilBrown 	int err = -ENOMEM;
27631da177e4SLinus Torvalds 
2764e8096360SNeilBrown 	conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
27651da177e4SLinus Torvalds 	if (!conf)
2766709ae487SNeilBrown 		goto abort;
27671da177e4SLinus Torvalds 
27680eaf822cSJonathan Brassow 	conf->mirrors = kzalloc(sizeof(struct raid1_info)
27698f19ccb2SNeilBrown 				* mddev->raid_disks * 2,
27701da177e4SLinus Torvalds 				 GFP_KERNEL);
27711da177e4SLinus Torvalds 	if (!conf->mirrors)
2772709ae487SNeilBrown 		goto abort;
27731da177e4SLinus Torvalds 
2774ddaf22abSNeilBrown 	conf->tmppage = alloc_page(GFP_KERNEL);
2775ddaf22abSNeilBrown 	if (!conf->tmppage)
2776709ae487SNeilBrown 		goto abort;
2777ddaf22abSNeilBrown 
2778709ae487SNeilBrown 	conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
27791da177e4SLinus Torvalds 	if (!conf->poolinfo)
2780709ae487SNeilBrown 		goto abort;
27818f19ccb2SNeilBrown 	conf->poolinfo->raid_disks = mddev->raid_disks * 2;
27821da177e4SLinus Torvalds 	conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
27831da177e4SLinus Torvalds 					  r1bio_pool_free,
27841da177e4SLinus Torvalds 					  conf->poolinfo);
27851da177e4SLinus Torvalds 	if (!conf->r1bio_pool)
2786709ae487SNeilBrown 		goto abort;
2787709ae487SNeilBrown 
2788ed9bfdf1SNeilBrown 	conf->poolinfo->mddev = mddev;
27891da177e4SLinus Torvalds 
2790c19d5798SNeilBrown 	err = -EINVAL;
2791e7e72bf6SNeil Brown 	spin_lock_init(&conf->device_lock);
2792dafb20faSNeilBrown 	rdev_for_each(rdev, mddev) {
2793aba336bdSNeilBrown 		struct request_queue *q;
2794709ae487SNeilBrown 		int disk_idx = rdev->raid_disk;
27951da177e4SLinus Torvalds 		if (disk_idx >= mddev->raid_disks
27961da177e4SLinus Torvalds 		    || disk_idx < 0)
27971da177e4SLinus Torvalds 			continue;
2798c19d5798SNeilBrown 		if (test_bit(Replacement, &rdev->flags))
279902b898f2SNeilBrown 			disk = conf->mirrors + mddev->raid_disks + disk_idx;
2800c19d5798SNeilBrown 		else
28011da177e4SLinus Torvalds 			disk = conf->mirrors + disk_idx;
28021da177e4SLinus Torvalds 
2803c19d5798SNeilBrown 		if (disk->rdev)
2804c19d5798SNeilBrown 			goto abort;
28051da177e4SLinus Torvalds 		disk->rdev = rdev;
2806aba336bdSNeilBrown 		q = bdev_get_queue(rdev->bdev);
2807aba336bdSNeilBrown 		if (q->merge_bvec_fn)
2808aba336bdSNeilBrown 			mddev->merge_check_needed = 1;
28091da177e4SLinus Torvalds 
28101da177e4SLinus Torvalds 		disk->head_position = 0;
281112cee5a8SShaohua Li 		disk->seq_start = MaxSector;
28121da177e4SLinus Torvalds 	}
28131da177e4SLinus Torvalds 	conf->raid_disks = mddev->raid_disks;
28141da177e4SLinus Torvalds 	conf->mddev = mddev;
28151da177e4SLinus Torvalds 	INIT_LIST_HEAD(&conf->retry_list);
28161da177e4SLinus Torvalds 
28171da177e4SLinus Torvalds 	spin_lock_init(&conf->resync_lock);
281817999be4SNeilBrown 	init_waitqueue_head(&conf->wait_barrier);
28191da177e4SLinus Torvalds 
2820191ea9b2SNeilBrown 	bio_list_init(&conf->pending_bio_list);
282134db0cd6SNeilBrown 	conf->pending_count = 0;
2822d890fa2bSNeilBrown 	conf->recovery_disabled = mddev->recovery_disabled - 1;
2823191ea9b2SNeilBrown 
282479ef3a8aSmajianpeng 	conf->start_next_window = MaxSector;
282579ef3a8aSmajianpeng 	conf->current_window_requests = conf->next_window_requests = 0;
282679ef3a8aSmajianpeng 
2827c19d5798SNeilBrown 	err = -EIO;
28288f19ccb2SNeilBrown 	for (i = 0; i < conf->raid_disks * 2; i++) {
28291da177e4SLinus Torvalds 
28301da177e4SLinus Torvalds 		disk = conf->mirrors + i;
28311da177e4SLinus Torvalds 
2832c19d5798SNeilBrown 		if (i < conf->raid_disks &&
2833c19d5798SNeilBrown 		    disk[conf->raid_disks].rdev) {
2834c19d5798SNeilBrown 			/* This slot has a replacement. */
2835c19d5798SNeilBrown 			if (!disk->rdev) {
2836c19d5798SNeilBrown 				/* No original, just make the replacement
2837c19d5798SNeilBrown 				 * a recovering spare
2838c19d5798SNeilBrown 				 */
2839c19d5798SNeilBrown 				disk->rdev =
2840c19d5798SNeilBrown 					disk[conf->raid_disks].rdev;
2841c19d5798SNeilBrown 				disk[conf->raid_disks].rdev = NULL;
2842c19d5798SNeilBrown 			} else if (!test_bit(In_sync, &disk->rdev->flags))
2843c19d5798SNeilBrown 				/* Original is not in_sync - bad */
2844c19d5798SNeilBrown 				goto abort;
2845c19d5798SNeilBrown 		}
2846c19d5798SNeilBrown 
28475fd6c1dcSNeilBrown 		if (!disk->rdev ||
28485fd6c1dcSNeilBrown 		    !test_bit(In_sync, &disk->rdev->flags)) {
28491da177e4SLinus Torvalds 			disk->head_position = 0;
28504f0a5e01SJonathan Brassow 			if (disk->rdev &&
28514f0a5e01SJonathan Brassow 			    (disk->rdev->saved_raid_disk < 0))
285217571284SNeilBrown 				conf->fullsync = 1;
2853be4d3280SShaohua Li 		}
28541da177e4SLinus Torvalds 	}
2855709ae487SNeilBrown 
2856709ae487SNeilBrown 	err = -ENOMEM;
28570232605dSNeilBrown 	conf->thread = md_register_thread(raid1d, mddev, "raid1");
2858709ae487SNeilBrown 	if (!conf->thread) {
28591da177e4SLinus Torvalds 		printk(KERN_ERR
28609dd1e2faSNeilBrown 		       "md/raid1:%s: couldn't allocate thread\n",
28611da177e4SLinus Torvalds 		       mdname(mddev));
2862709ae487SNeilBrown 		goto abort;
28631da177e4SLinus Torvalds 	}
2864191ea9b2SNeilBrown 
2865709ae487SNeilBrown 	return conf;
2866709ae487SNeilBrown 
2867709ae487SNeilBrown  abort:
2868709ae487SNeilBrown 	if (conf) {
2869709ae487SNeilBrown 		if (conf->r1bio_pool)
2870709ae487SNeilBrown 			mempool_destroy(conf->r1bio_pool);
2871709ae487SNeilBrown 		kfree(conf->mirrors);
2872709ae487SNeilBrown 		safe_put_page(conf->tmppage);
2873709ae487SNeilBrown 		kfree(conf->poolinfo);
2874709ae487SNeilBrown 		kfree(conf);
2875709ae487SNeilBrown 	}
2876709ae487SNeilBrown 	return ERR_PTR(err);
2877709ae487SNeilBrown }
2878709ae487SNeilBrown 
28795220ea1eSmajianpeng static int stop(struct mddev *mddev);
2880fd01b88cSNeilBrown static int run(struct mddev *mddev)
2881709ae487SNeilBrown {
2882e8096360SNeilBrown 	struct r1conf *conf;
2883709ae487SNeilBrown 	int i;
28843cb03002SNeilBrown 	struct md_rdev *rdev;
28855220ea1eSmajianpeng 	int ret;
28862ff8cc2cSShaohua Li 	bool discard_supported = false;
2887709ae487SNeilBrown 
2888709ae487SNeilBrown 	if (mddev->level != 1) {
28899dd1e2faSNeilBrown 		printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
2890709ae487SNeilBrown 		       mdname(mddev), mddev->level);
2891709ae487SNeilBrown 		return -EIO;
2892709ae487SNeilBrown 	}
2893709ae487SNeilBrown 	if (mddev->reshape_position != MaxSector) {
28949dd1e2faSNeilBrown 		printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
2895709ae487SNeilBrown 		       mdname(mddev));
2896709ae487SNeilBrown 		return -EIO;
2897709ae487SNeilBrown 	}
2898709ae487SNeilBrown 	/*
2899709ae487SNeilBrown 	 * copy the already verified devices into our private RAID1
2900709ae487SNeilBrown 	 * bookkeeping area. [whatever we allocate in run(),
2901709ae487SNeilBrown 	 * should be freed in stop()]
2902709ae487SNeilBrown 	 */
2903709ae487SNeilBrown 	if (mddev->private == NULL)
2904709ae487SNeilBrown 		conf = setup_conf(mddev);
2905709ae487SNeilBrown 	else
2906709ae487SNeilBrown 		conf = mddev->private;
2907709ae487SNeilBrown 
2908709ae487SNeilBrown 	if (IS_ERR(conf))
2909709ae487SNeilBrown 		return PTR_ERR(conf);
2910709ae487SNeilBrown 
2911c8dc9c65SJoe Lawrence 	if (mddev->queue)
29125026d7a9SH. Peter Anvin 		blk_queue_max_write_same_sectors(mddev->queue, 0);
29135026d7a9SH. Peter Anvin 
2914dafb20faSNeilBrown 	rdev_for_each(rdev, mddev) {
29151ed7242eSJonathan Brassow 		if (!mddev->gendisk)
29161ed7242eSJonathan Brassow 			continue;
2917709ae487SNeilBrown 		disk_stack_limits(mddev->gendisk, rdev->bdev,
2918709ae487SNeilBrown 				  rdev->data_offset << 9);
29192ff8cc2cSShaohua Li 		if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
29202ff8cc2cSShaohua Li 			discard_supported = true;
2921709ae487SNeilBrown 	}
2922709ae487SNeilBrown 
2923709ae487SNeilBrown 	mddev->degraded = 0;
2924709ae487SNeilBrown 	for (i=0; i < conf->raid_disks; i++)
2925709ae487SNeilBrown 		if (conf->mirrors[i].rdev == NULL ||
2926709ae487SNeilBrown 		    !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
2927709ae487SNeilBrown 		    test_bit(Faulty, &conf->mirrors[i].rdev->flags))
2928709ae487SNeilBrown 			mddev->degraded++;
2929709ae487SNeilBrown 
2930709ae487SNeilBrown 	if (conf->raid_disks - mddev->degraded == 1)
2931709ae487SNeilBrown 		mddev->recovery_cp = MaxSector;
2932709ae487SNeilBrown 
29338c6ac868SAndre Noll 	if (mddev->recovery_cp != MaxSector)
29349dd1e2faSNeilBrown 		printk(KERN_NOTICE "md/raid1:%s: not clean"
29358c6ac868SAndre Noll 		       " -- starting background reconstruction\n",
29368c6ac868SAndre Noll 		       mdname(mddev));
29371da177e4SLinus Torvalds 	printk(KERN_INFO
29389dd1e2faSNeilBrown 		"md/raid1:%s: active with %d out of %d mirrors\n",
29391da177e4SLinus Torvalds 		mdname(mddev), mddev->raid_disks - mddev->degraded,
29401da177e4SLinus Torvalds 		mddev->raid_disks);
2941709ae487SNeilBrown 
29421da177e4SLinus Torvalds 	/*
29431da177e4SLinus Torvalds 	 * Ok, everything is just fine now
29441da177e4SLinus Torvalds 	 */
2945709ae487SNeilBrown 	mddev->thread = conf->thread;
2946709ae487SNeilBrown 	conf->thread = NULL;
2947709ae487SNeilBrown 	mddev->private = conf;
2948709ae487SNeilBrown 
29491f403624SDan Williams 	md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
29501da177e4SLinus Torvalds 
29511ed7242eSJonathan Brassow 	if (mddev->queue) {
29520d129228SNeilBrown 		mddev->queue->backing_dev_info.congested_fn = raid1_congested;
29530d129228SNeilBrown 		mddev->queue->backing_dev_info.congested_data = mddev;
29546b740b8dSNeilBrown 		blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
29552ff8cc2cSShaohua Li 
29562ff8cc2cSShaohua Li 		if (discard_supported)
29572ff8cc2cSShaohua Li 			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
29582ff8cc2cSShaohua Li 						mddev->queue);
29592ff8cc2cSShaohua Li 		else
29602ff8cc2cSShaohua Li 			queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
29612ff8cc2cSShaohua Li 						  mddev->queue);
29621ed7242eSJonathan Brassow 	}
29635220ea1eSmajianpeng 
29645220ea1eSmajianpeng 	ret =  md_integrity_register(mddev);
29655220ea1eSmajianpeng 	if (ret)
29665220ea1eSmajianpeng 		stop(mddev);
29675220ea1eSmajianpeng 	return ret;
29681da177e4SLinus Torvalds }
29691da177e4SLinus Torvalds 
2970fd01b88cSNeilBrown static int stop(struct mddev *mddev)
29711da177e4SLinus Torvalds {
2972e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
29734b6d287fSNeilBrown 	struct bitmap *bitmap = mddev->bitmap;
29744b6d287fSNeilBrown 
29754b6d287fSNeilBrown 	/* wait for behind writes to complete */
2976e555190dSNeilBrown 	if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
29779dd1e2faSNeilBrown 		printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n",
29789dd1e2faSNeilBrown 		       mdname(mddev));
29794b6d287fSNeilBrown 		/* need to kick something here to make sure I/O goes? */
2980e555190dSNeilBrown 		wait_event(bitmap->behind_wait,
2981e555190dSNeilBrown 			   atomic_read(&bitmap->behind_writes) == 0);
29824b6d287fSNeilBrown 	}
29831da177e4SLinus Torvalds 
298407169fd4Smajianpeng 	freeze_array(conf, 0);
298507169fd4Smajianpeng 	unfreeze_array(conf);
2986409c57f3SNeilBrown 
298701f96c0aSNeilBrown 	md_unregister_thread(&mddev->thread);
29881da177e4SLinus Torvalds 	if (conf->r1bio_pool)
29891da177e4SLinus Torvalds 		mempool_destroy(conf->r1bio_pool);
29901da177e4SLinus Torvalds 	kfree(conf->mirrors);
29910fea7ed8SHirokazu Takahashi 	safe_put_page(conf->tmppage);
29921da177e4SLinus Torvalds 	kfree(conf->poolinfo);
29931da177e4SLinus Torvalds 	kfree(conf);
29941da177e4SLinus Torvalds 	mddev->private = NULL;
29951da177e4SLinus Torvalds 	return 0;
29961da177e4SLinus Torvalds }
29971da177e4SLinus Torvalds 
2998fd01b88cSNeilBrown static int raid1_resize(struct mddev *mddev, sector_t sectors)
29991da177e4SLinus Torvalds {
30001da177e4SLinus Torvalds 	/* no resync is happening, and there is enough space
30011da177e4SLinus Torvalds 	 * on all devices, so we can resize.
30021da177e4SLinus Torvalds 	 * We need to make sure resync covers any new space.
30031da177e4SLinus Torvalds 	 * If the array is shrinking we should possibly wait until
30041da177e4SLinus Torvalds 	 * any io in the removed space completes, but it hardly seems
30051da177e4SLinus Torvalds 	 * worth it.
30061da177e4SLinus Torvalds 	 */
3007a4a6125aSNeilBrown 	sector_t newsize = raid1_size(mddev, sectors, 0);
3008a4a6125aSNeilBrown 	if (mddev->external_size &&
3009a4a6125aSNeilBrown 	    mddev->array_sectors > newsize)
3010b522adcdSDan Williams 		return -EINVAL;
3011a4a6125aSNeilBrown 	if (mddev->bitmap) {
3012a4a6125aSNeilBrown 		int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
3013a4a6125aSNeilBrown 		if (ret)
3014a4a6125aSNeilBrown 			return ret;
3015a4a6125aSNeilBrown 	}
3016a4a6125aSNeilBrown 	md_set_array_sectors(mddev, newsize);
3017f233ea5cSAndre Noll 	set_capacity(mddev->gendisk, mddev->array_sectors);
3018449aad3eSNeilBrown 	revalidate_disk(mddev->gendisk);
3019b522adcdSDan Williams 	if (sectors > mddev->dev_sectors &&
3020b098636cSNeilBrown 	    mddev->recovery_cp > mddev->dev_sectors) {
302158c0fed4SAndre Noll 		mddev->recovery_cp = mddev->dev_sectors;
30221da177e4SLinus Torvalds 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
30231da177e4SLinus Torvalds 	}
3024b522adcdSDan Williams 	mddev->dev_sectors = sectors;
30254b5c7ae8SNeilBrown 	mddev->resync_max_sectors = sectors;
30261da177e4SLinus Torvalds 	return 0;
30271da177e4SLinus Torvalds }
30281da177e4SLinus Torvalds 
3029fd01b88cSNeilBrown static int raid1_reshape(struct mddev *mddev)
30301da177e4SLinus Torvalds {
30311da177e4SLinus Torvalds 	/* We need to:
30321da177e4SLinus Torvalds 	 * 1/ resize the r1bio_pool
30331da177e4SLinus Torvalds 	 * 2/ resize conf->mirrors
30341da177e4SLinus Torvalds 	 *
30351da177e4SLinus Torvalds 	 * We allocate a new r1bio_pool if we can.
30361da177e4SLinus Torvalds 	 * Then raise a device barrier and wait until all IO stops.
30371da177e4SLinus Torvalds 	 * Then resize conf->mirrors and swap in the new r1bio pool.
30386ea9c07cSNeilBrown 	 *
30396ea9c07cSNeilBrown 	 * At the same time, we "pack" the devices so that all the missing
30406ea9c07cSNeilBrown 	 * devices have the higher raid_disk numbers.
30411da177e4SLinus Torvalds 	 */
30421da177e4SLinus Torvalds 	mempool_t *newpool, *oldpool;
30431da177e4SLinus Torvalds 	struct pool_info *newpoolinfo;
30440eaf822cSJonathan Brassow 	struct raid1_info *newmirrors;
3045e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
304663c70c4fSNeilBrown 	int cnt, raid_disks;
3047c04be0aaSNeilBrown 	unsigned long flags;
3048b5470dc5SDan Williams 	int d, d2, err;
30491da177e4SLinus Torvalds 
305063c70c4fSNeilBrown 	/* Cannot change chunk_size, layout, or level */
3051664e7c41SAndre Noll 	if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
305263c70c4fSNeilBrown 	    mddev->layout != mddev->new_layout ||
305363c70c4fSNeilBrown 	    mddev->level != mddev->new_level) {
3054664e7c41SAndre Noll 		mddev->new_chunk_sectors = mddev->chunk_sectors;
305563c70c4fSNeilBrown 		mddev->new_layout = mddev->layout;
305663c70c4fSNeilBrown 		mddev->new_level = mddev->level;
305763c70c4fSNeilBrown 		return -EINVAL;
305863c70c4fSNeilBrown 	}
305963c70c4fSNeilBrown 
3060b5470dc5SDan Williams 	err = md_allow_write(mddev);
3061b5470dc5SDan Williams 	if (err)
3062b5470dc5SDan Williams 		return err;
30632a2275d6SNeilBrown 
306463c70c4fSNeilBrown 	raid_disks = mddev->raid_disks + mddev->delta_disks;
306563c70c4fSNeilBrown 
30666ea9c07cSNeilBrown 	if (raid_disks < conf->raid_disks) {
30676ea9c07cSNeilBrown 		cnt=0;
30686ea9c07cSNeilBrown 		for (d= 0; d < conf->raid_disks; d++)
30691da177e4SLinus Torvalds 			if (conf->mirrors[d].rdev)
30706ea9c07cSNeilBrown 				cnt++;
30716ea9c07cSNeilBrown 		if (cnt > raid_disks)
30721da177e4SLinus Torvalds 			return -EBUSY;
30736ea9c07cSNeilBrown 	}
30741da177e4SLinus Torvalds 
30751da177e4SLinus Torvalds 	newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
30761da177e4SLinus Torvalds 	if (!newpoolinfo)
30771da177e4SLinus Torvalds 		return -ENOMEM;
30781da177e4SLinus Torvalds 	newpoolinfo->mddev = mddev;
30798f19ccb2SNeilBrown 	newpoolinfo->raid_disks = raid_disks * 2;
30801da177e4SLinus Torvalds 
30811da177e4SLinus Torvalds 	newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
30821da177e4SLinus Torvalds 				 r1bio_pool_free, newpoolinfo);
30831da177e4SLinus Torvalds 	if (!newpool) {
30841da177e4SLinus Torvalds 		kfree(newpoolinfo);
30851da177e4SLinus Torvalds 		return -ENOMEM;
30861da177e4SLinus Torvalds 	}
30870eaf822cSJonathan Brassow 	newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
30888f19ccb2SNeilBrown 			     GFP_KERNEL);
30891da177e4SLinus Torvalds 	if (!newmirrors) {
30901da177e4SLinus Torvalds 		kfree(newpoolinfo);
30911da177e4SLinus Torvalds 		mempool_destroy(newpool);
30921da177e4SLinus Torvalds 		return -ENOMEM;
30931da177e4SLinus Torvalds 	}
30941da177e4SLinus Torvalds 
3095e2d59925SNeilBrown 	freeze_array(conf, 0);
30961da177e4SLinus Torvalds 
30971da177e4SLinus Torvalds 	/* ok, everything is stopped */
30981da177e4SLinus Torvalds 	oldpool = conf->r1bio_pool;
30991da177e4SLinus Torvalds 	conf->r1bio_pool = newpool;
31006ea9c07cSNeilBrown 
3101a88aa786SNeilBrown 	for (d = d2 = 0; d < conf->raid_disks; d++) {
31023cb03002SNeilBrown 		struct md_rdev *rdev = conf->mirrors[d].rdev;
3103a88aa786SNeilBrown 		if (rdev && rdev->raid_disk != d2) {
310436fad858SNamhyung Kim 			sysfs_unlink_rdev(mddev, rdev);
3105a88aa786SNeilBrown 			rdev->raid_disk = d2;
310636fad858SNamhyung Kim 			sysfs_unlink_rdev(mddev, rdev);
310736fad858SNamhyung Kim 			if (sysfs_link_rdev(mddev, rdev))
3108a88aa786SNeilBrown 				printk(KERN_WARNING
310936fad858SNamhyung Kim 				       "md/raid1:%s: cannot register rd%d\n",
311036fad858SNamhyung Kim 				       mdname(mddev), rdev->raid_disk);
3111a88aa786SNeilBrown 		}
3112a88aa786SNeilBrown 		if (rdev)
3113a88aa786SNeilBrown 			newmirrors[d2++].rdev = rdev;
31146ea9c07cSNeilBrown 	}
31151da177e4SLinus Torvalds 	kfree(conf->mirrors);
31161da177e4SLinus Torvalds 	conf->mirrors = newmirrors;
31171da177e4SLinus Torvalds 	kfree(conf->poolinfo);
31181da177e4SLinus Torvalds 	conf->poolinfo = newpoolinfo;
31191da177e4SLinus Torvalds 
3120c04be0aaSNeilBrown 	spin_lock_irqsave(&conf->device_lock, flags);
31211da177e4SLinus Torvalds 	mddev->degraded += (raid_disks - conf->raid_disks);
3122c04be0aaSNeilBrown 	spin_unlock_irqrestore(&conf->device_lock, flags);
31231da177e4SLinus Torvalds 	conf->raid_disks = mddev->raid_disks = raid_disks;
312463c70c4fSNeilBrown 	mddev->delta_disks = 0;
31251da177e4SLinus Torvalds 
3126e2d59925SNeilBrown 	unfreeze_array(conf);
31271da177e4SLinus Torvalds 
31281da177e4SLinus Torvalds 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
31291da177e4SLinus Torvalds 	md_wakeup_thread(mddev->thread);
31301da177e4SLinus Torvalds 
31311da177e4SLinus Torvalds 	mempool_destroy(oldpool);
31321da177e4SLinus Torvalds 	return 0;
31331da177e4SLinus Torvalds }
31341da177e4SLinus Torvalds 
3135fd01b88cSNeilBrown static void raid1_quiesce(struct mddev *mddev, int state)
313636fa3063SNeilBrown {
3137e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
313836fa3063SNeilBrown 
313936fa3063SNeilBrown 	switch(state) {
31406eef4b21SNeilBrown 	case 2: /* wake for suspend */
31416eef4b21SNeilBrown 		wake_up(&conf->wait_barrier);
31426eef4b21SNeilBrown 		break;
31439e6603daSNeilBrown 	case 1:
314407169fd4Smajianpeng 		freeze_array(conf, 0);
314536fa3063SNeilBrown 		break;
31469e6603daSNeilBrown 	case 0:
314707169fd4Smajianpeng 		unfreeze_array(conf);
314836fa3063SNeilBrown 		break;
314936fa3063SNeilBrown 	}
315036fa3063SNeilBrown }
315136fa3063SNeilBrown 
3152fd01b88cSNeilBrown static void *raid1_takeover(struct mddev *mddev)
3153709ae487SNeilBrown {
3154709ae487SNeilBrown 	/* raid1 can take over:
3155709ae487SNeilBrown 	 *  raid5 with 2 devices, any layout or chunk size
3156709ae487SNeilBrown 	 */
3157709ae487SNeilBrown 	if (mddev->level == 5 && mddev->raid_disks == 2) {
3158e8096360SNeilBrown 		struct r1conf *conf;
3159709ae487SNeilBrown 		mddev->new_level = 1;
3160709ae487SNeilBrown 		mddev->new_layout = 0;
3161709ae487SNeilBrown 		mddev->new_chunk_sectors = 0;
3162709ae487SNeilBrown 		conf = setup_conf(mddev);
3163709ae487SNeilBrown 		if (!IS_ERR(conf))
316407169fd4Smajianpeng 			/* Array must appear to be quiesced */
316507169fd4Smajianpeng 			conf->array_frozen = 1;
3166709ae487SNeilBrown 		return conf;
3167709ae487SNeilBrown 	}
3168709ae487SNeilBrown 	return ERR_PTR(-EINVAL);
3169709ae487SNeilBrown }
31701da177e4SLinus Torvalds 
317184fc4b56SNeilBrown static struct md_personality raid1_personality =
31721da177e4SLinus Torvalds {
31731da177e4SLinus Torvalds 	.name		= "raid1",
31742604b703SNeilBrown 	.level		= 1,
31751da177e4SLinus Torvalds 	.owner		= THIS_MODULE,
31761da177e4SLinus Torvalds 	.make_request	= make_request,
31771da177e4SLinus Torvalds 	.run		= run,
31781da177e4SLinus Torvalds 	.stop		= stop,
31791da177e4SLinus Torvalds 	.status		= status,
31801da177e4SLinus Torvalds 	.error_handler	= error,
31811da177e4SLinus Torvalds 	.hot_add_disk	= raid1_add_disk,
31821da177e4SLinus Torvalds 	.hot_remove_disk= raid1_remove_disk,
31831da177e4SLinus Torvalds 	.spare_active	= raid1_spare_active,
31841da177e4SLinus Torvalds 	.sync_request	= sync_request,
31851da177e4SLinus Torvalds 	.resize		= raid1_resize,
318680c3a6ceSDan Williams 	.size		= raid1_size,
318763c70c4fSNeilBrown 	.check_reshape	= raid1_reshape,
318836fa3063SNeilBrown 	.quiesce	= raid1_quiesce,
3189709ae487SNeilBrown 	.takeover	= raid1_takeover,
31901da177e4SLinus Torvalds };
31911da177e4SLinus Torvalds 
31921da177e4SLinus Torvalds static int __init raid_init(void)
31931da177e4SLinus Torvalds {
31942604b703SNeilBrown 	return register_md_personality(&raid1_personality);
31951da177e4SLinus Torvalds }
31961da177e4SLinus Torvalds 
31971da177e4SLinus Torvalds static void raid_exit(void)
31981da177e4SLinus Torvalds {
31992604b703SNeilBrown 	unregister_md_personality(&raid1_personality);
32001da177e4SLinus Torvalds }
32011da177e4SLinus Torvalds 
32021da177e4SLinus Torvalds module_init(raid_init);
32031da177e4SLinus Torvalds module_exit(raid_exit);
32041da177e4SLinus Torvalds MODULE_LICENSE("GPL");
32050efb9e61SNeilBrown MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
32061da177e4SLinus Torvalds MODULE_ALIAS("md-personality-3"); /* RAID1 */
3207d9d166c2SNeilBrown MODULE_ALIAS("md-raid1");
32082604b703SNeilBrown MODULE_ALIAS("md-level-1");
320934db0cd6SNeilBrown 
321034db0cd6SNeilBrown module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
3211