xref: /openbmc/linux/drivers/md/raid1.c (revision 4f024f37)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * raid1.c : Multiple Devices driver for Linux
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * RAID-1 management functions.
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
111da177e4SLinus Torvalds  *
1296de0e25SJan Engelhardt  * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
131da177e4SLinus Torvalds  * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
141da177e4SLinus Torvalds  *
15191ea9b2SNeilBrown  * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
16191ea9b2SNeilBrown  * bitmapped intelligence in resync:
17191ea9b2SNeilBrown  *
18191ea9b2SNeilBrown  *      - bitmap marked during normal i/o
19191ea9b2SNeilBrown  *      - bitmap used to skip nondirty blocks during sync
20191ea9b2SNeilBrown  *
21191ea9b2SNeilBrown  * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
22191ea9b2SNeilBrown  * - persistent bitmap code
23191ea9b2SNeilBrown  *
241da177e4SLinus Torvalds  * This program is free software; you can redistribute it and/or modify
251da177e4SLinus Torvalds  * it under the terms of the GNU General Public License as published by
261da177e4SLinus Torvalds  * the Free Software Foundation; either version 2, or (at your option)
271da177e4SLinus Torvalds  * any later version.
281da177e4SLinus Torvalds  *
291da177e4SLinus Torvalds  * You should have received a copy of the GNU General Public License
301da177e4SLinus Torvalds  * (for example /usr/src/linux/COPYING); if not, write to the Free
311da177e4SLinus Torvalds  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
321da177e4SLinus Torvalds  */
331da177e4SLinus Torvalds 
345a0e3ad6STejun Heo #include <linux/slab.h>
3525570727SStephen Rothwell #include <linux/delay.h>
36bff61975SNeilBrown #include <linux/blkdev.h>
37056075c7SPaul Gortmaker #include <linux/module.h>
38bff61975SNeilBrown #include <linux/seq_file.h>
398bda470eSChristian Dietrich #include <linux/ratelimit.h>
4043b2e5d8SNeilBrown #include "md.h"
41ef740c37SChristoph Hellwig #include "raid1.h"
42ef740c37SChristoph Hellwig #include "bitmap.h"
43191ea9b2SNeilBrown 
441da177e4SLinus Torvalds /*
451da177e4SLinus Torvalds  * Number of guaranteed r1bios in case of extreme VM load:
461da177e4SLinus Torvalds  */
471da177e4SLinus Torvalds #define	NR_RAID1_BIOS 256
481da177e4SLinus Torvalds 
49473e87ceSJonathan Brassow /* when we get a read error on a read-only array, we redirect to another
50473e87ceSJonathan Brassow  * device without failing the first device, or trying to over-write to
51473e87ceSJonathan Brassow  * correct the read error.  To keep track of bad blocks on a per-bio
52473e87ceSJonathan Brassow  * level, we store IO_BLOCKED in the appropriate 'bios' pointer
53473e87ceSJonathan Brassow  */
54473e87ceSJonathan Brassow #define IO_BLOCKED ((struct bio *)1)
55473e87ceSJonathan Brassow /* When we successfully write to a known bad-block, we need to remove the
56473e87ceSJonathan Brassow  * bad-block marking which must be done from process context.  So we record
57473e87ceSJonathan Brassow  * the success by setting devs[n].bio to IO_MADE_GOOD
58473e87ceSJonathan Brassow  */
59473e87ceSJonathan Brassow #define IO_MADE_GOOD ((struct bio *)2)
60473e87ceSJonathan Brassow 
61473e87ceSJonathan Brassow #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
62473e87ceSJonathan Brassow 
6334db0cd6SNeilBrown /* When there are this many requests queue to be written by
6434db0cd6SNeilBrown  * the raid1 thread, we become 'congested' to provide back-pressure
6534db0cd6SNeilBrown  * for writeback.
6634db0cd6SNeilBrown  */
6734db0cd6SNeilBrown static int max_queued_requests = 1024;
681da177e4SLinus Torvalds 
6979ef3a8aSmajianpeng static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
7079ef3a8aSmajianpeng 			  sector_t bi_sector);
71e8096360SNeilBrown static void lower_barrier(struct r1conf *conf);
721da177e4SLinus Torvalds 
73dd0fc66fSAl Viro static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
741da177e4SLinus Torvalds {
751da177e4SLinus Torvalds 	struct pool_info *pi = data;
769f2c9d12SNeilBrown 	int size = offsetof(struct r1bio, bios[pi->raid_disks]);
771da177e4SLinus Torvalds 
781da177e4SLinus Torvalds 	/* allocate a r1bio with room for raid_disks entries in the bios array */
797eaceaccSJens Axboe 	return kzalloc(size, gfp_flags);
801da177e4SLinus Torvalds }
811da177e4SLinus Torvalds 
821da177e4SLinus Torvalds static void r1bio_pool_free(void *r1_bio, void *data)
831da177e4SLinus Torvalds {
841da177e4SLinus Torvalds 	kfree(r1_bio);
851da177e4SLinus Torvalds }
861da177e4SLinus Torvalds 
871da177e4SLinus Torvalds #define RESYNC_BLOCK_SIZE (64*1024)
888e005f7cSmajianpeng #define RESYNC_DEPTH 32
891da177e4SLinus Torvalds #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
901da177e4SLinus Torvalds #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
918e005f7cSmajianpeng #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
928e005f7cSmajianpeng #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
938e005f7cSmajianpeng #define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)
941da177e4SLinus Torvalds 
95dd0fc66fSAl Viro static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
961da177e4SLinus Torvalds {
971da177e4SLinus Torvalds 	struct pool_info *pi = data;
989f2c9d12SNeilBrown 	struct r1bio *r1_bio;
991da177e4SLinus Torvalds 	struct bio *bio;
1001da177e4SLinus Torvalds 	int i, j;
1011da177e4SLinus Torvalds 
1021da177e4SLinus Torvalds 	r1_bio = r1bio_pool_alloc(gfp_flags, pi);
1037eaceaccSJens Axboe 	if (!r1_bio)
1041da177e4SLinus Torvalds 		return NULL;
1051da177e4SLinus Torvalds 
1061da177e4SLinus Torvalds 	/*
1071da177e4SLinus Torvalds 	 * Allocate bios : 1 for reading, n-1 for writing
1081da177e4SLinus Torvalds 	 */
1091da177e4SLinus Torvalds 	for (j = pi->raid_disks ; j-- ; ) {
1106746557fSNeilBrown 		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
1111da177e4SLinus Torvalds 		if (!bio)
1121da177e4SLinus Torvalds 			goto out_free_bio;
1131da177e4SLinus Torvalds 		r1_bio->bios[j] = bio;
1141da177e4SLinus Torvalds 	}
1151da177e4SLinus Torvalds 	/*
1161da177e4SLinus Torvalds 	 * Allocate RESYNC_PAGES data pages and attach them to
117d11c171eSNeilBrown 	 * the first bio.
118d11c171eSNeilBrown 	 * If this is a user-requested check/repair, allocate
119d11c171eSNeilBrown 	 * RESYNC_PAGES for each bio.
1201da177e4SLinus Torvalds 	 */
121d11c171eSNeilBrown 	if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
122d11c171eSNeilBrown 		j = pi->raid_disks;
123d11c171eSNeilBrown 	else
124d11c171eSNeilBrown 		j = 1;
125d11c171eSNeilBrown 	while(j--) {
126d11c171eSNeilBrown 		bio = r1_bio->bios[j];
127a0787606SKent Overstreet 		bio->bi_vcnt = RESYNC_PAGES;
1281da177e4SLinus Torvalds 
129a0787606SKent Overstreet 		if (bio_alloc_pages(bio, gfp_flags))
130a0787606SKent Overstreet 			goto out_free_bio;
131d11c171eSNeilBrown 	}
132d11c171eSNeilBrown 	/* If not user-requests, copy the page pointers to all bios */
133d11c171eSNeilBrown 	if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
134d11c171eSNeilBrown 		for (i=0; i<RESYNC_PAGES ; i++)
135d11c171eSNeilBrown 			for (j=1; j<pi->raid_disks; j++)
136d11c171eSNeilBrown 				r1_bio->bios[j]->bi_io_vec[i].bv_page =
137d11c171eSNeilBrown 					r1_bio->bios[0]->bi_io_vec[i].bv_page;
138d11c171eSNeilBrown 	}
1391da177e4SLinus Torvalds 
1401da177e4SLinus Torvalds 	r1_bio->master_bio = NULL;
1411da177e4SLinus Torvalds 
1421da177e4SLinus Torvalds 	return r1_bio;
1431da177e4SLinus Torvalds 
1441da177e4SLinus Torvalds out_free_bio:
1451da177e4SLinus Torvalds 	while (++j < pi->raid_disks)
1461da177e4SLinus Torvalds 		bio_put(r1_bio->bios[j]);
1471da177e4SLinus Torvalds 	r1bio_pool_free(r1_bio, data);
1481da177e4SLinus Torvalds 	return NULL;
1491da177e4SLinus Torvalds }
1501da177e4SLinus Torvalds 
1511da177e4SLinus Torvalds static void r1buf_pool_free(void *__r1_bio, void *data)
1521da177e4SLinus Torvalds {
1531da177e4SLinus Torvalds 	struct pool_info *pi = data;
154d11c171eSNeilBrown 	int i,j;
1559f2c9d12SNeilBrown 	struct r1bio *r1bio = __r1_bio;
1561da177e4SLinus Torvalds 
157d11c171eSNeilBrown 	for (i = 0; i < RESYNC_PAGES; i++)
158d11c171eSNeilBrown 		for (j = pi->raid_disks; j-- ;) {
159d11c171eSNeilBrown 			if (j == 0 ||
160d11c171eSNeilBrown 			    r1bio->bios[j]->bi_io_vec[i].bv_page !=
161d11c171eSNeilBrown 			    r1bio->bios[0]->bi_io_vec[i].bv_page)
1621345b1d8SNeilBrown 				safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
1631da177e4SLinus Torvalds 		}
1641da177e4SLinus Torvalds 	for (i=0 ; i < pi->raid_disks; i++)
1651da177e4SLinus Torvalds 		bio_put(r1bio->bios[i]);
1661da177e4SLinus Torvalds 
1671da177e4SLinus Torvalds 	r1bio_pool_free(r1bio, data);
1681da177e4SLinus Torvalds }
1691da177e4SLinus Torvalds 
170e8096360SNeilBrown static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
1711da177e4SLinus Torvalds {
1721da177e4SLinus Torvalds 	int i;
1731da177e4SLinus Torvalds 
1748f19ccb2SNeilBrown 	for (i = 0; i < conf->raid_disks * 2; i++) {
1751da177e4SLinus Torvalds 		struct bio **bio = r1_bio->bios + i;
1764367af55SNeilBrown 		if (!BIO_SPECIAL(*bio))
1771da177e4SLinus Torvalds 			bio_put(*bio);
1781da177e4SLinus Torvalds 		*bio = NULL;
1791da177e4SLinus Torvalds 	}
1801da177e4SLinus Torvalds }
1811da177e4SLinus Torvalds 
1829f2c9d12SNeilBrown static void free_r1bio(struct r1bio *r1_bio)
1831da177e4SLinus Torvalds {
184e8096360SNeilBrown 	struct r1conf *conf = r1_bio->mddev->private;
1851da177e4SLinus Torvalds 
1861da177e4SLinus Torvalds 	put_all_bios(conf, r1_bio);
1871da177e4SLinus Torvalds 	mempool_free(r1_bio, conf->r1bio_pool);
1881da177e4SLinus Torvalds }
1891da177e4SLinus Torvalds 
1909f2c9d12SNeilBrown static void put_buf(struct r1bio *r1_bio)
1911da177e4SLinus Torvalds {
192e8096360SNeilBrown 	struct r1conf *conf = r1_bio->mddev->private;
1933e198f78SNeilBrown 	int i;
1943e198f78SNeilBrown 
1958f19ccb2SNeilBrown 	for (i = 0; i < conf->raid_disks * 2; i++) {
1963e198f78SNeilBrown 		struct bio *bio = r1_bio->bios[i];
1973e198f78SNeilBrown 		if (bio->bi_end_io)
1983e198f78SNeilBrown 			rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
1993e198f78SNeilBrown 	}
2001da177e4SLinus Torvalds 
2011da177e4SLinus Torvalds 	mempool_free(r1_bio, conf->r1buf_pool);
2021da177e4SLinus Torvalds 
20317999be4SNeilBrown 	lower_barrier(conf);
2041da177e4SLinus Torvalds }
2051da177e4SLinus Torvalds 
2069f2c9d12SNeilBrown static void reschedule_retry(struct r1bio *r1_bio)
2071da177e4SLinus Torvalds {
2081da177e4SLinus Torvalds 	unsigned long flags;
209fd01b88cSNeilBrown 	struct mddev *mddev = r1_bio->mddev;
210e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
2111da177e4SLinus Torvalds 
2121da177e4SLinus Torvalds 	spin_lock_irqsave(&conf->device_lock, flags);
2131da177e4SLinus Torvalds 	list_add(&r1_bio->retry_list, &conf->retry_list);
214ddaf22abSNeilBrown 	conf->nr_queued ++;
2151da177e4SLinus Torvalds 	spin_unlock_irqrestore(&conf->device_lock, flags);
2161da177e4SLinus Torvalds 
21717999be4SNeilBrown 	wake_up(&conf->wait_barrier);
2181da177e4SLinus Torvalds 	md_wakeup_thread(mddev->thread);
2191da177e4SLinus Torvalds }
2201da177e4SLinus Torvalds 
2211da177e4SLinus Torvalds /*
2221da177e4SLinus Torvalds  * raid_end_bio_io() is called when we have finished servicing a mirrored
2231da177e4SLinus Torvalds  * operation and are ready to return a success/failure code to the buffer
2241da177e4SLinus Torvalds  * cache layer.
2251da177e4SLinus Torvalds  */
2269f2c9d12SNeilBrown static void call_bio_endio(struct r1bio *r1_bio)
227d2eb35acSNeilBrown {
228d2eb35acSNeilBrown 	struct bio *bio = r1_bio->master_bio;
229d2eb35acSNeilBrown 	int done;
230e8096360SNeilBrown 	struct r1conf *conf = r1_bio->mddev->private;
23179ef3a8aSmajianpeng 	sector_t start_next_window = r1_bio->start_next_window;
2324f024f37SKent Overstreet 	sector_t bi_sector = bio->bi_iter.bi_sector;
233d2eb35acSNeilBrown 
234d2eb35acSNeilBrown 	if (bio->bi_phys_segments) {
235d2eb35acSNeilBrown 		unsigned long flags;
236d2eb35acSNeilBrown 		spin_lock_irqsave(&conf->device_lock, flags);
237d2eb35acSNeilBrown 		bio->bi_phys_segments--;
238d2eb35acSNeilBrown 		done = (bio->bi_phys_segments == 0);
239d2eb35acSNeilBrown 		spin_unlock_irqrestore(&conf->device_lock, flags);
24079ef3a8aSmajianpeng 		/*
24179ef3a8aSmajianpeng 		 * make_request() might be waiting for
24279ef3a8aSmajianpeng 		 * bi_phys_segments to decrease
24379ef3a8aSmajianpeng 		 */
24479ef3a8aSmajianpeng 		wake_up(&conf->wait_barrier);
245d2eb35acSNeilBrown 	} else
246d2eb35acSNeilBrown 		done = 1;
247d2eb35acSNeilBrown 
248d2eb35acSNeilBrown 	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
249d2eb35acSNeilBrown 		clear_bit(BIO_UPTODATE, &bio->bi_flags);
250d2eb35acSNeilBrown 	if (done) {
251d2eb35acSNeilBrown 		bio_endio(bio, 0);
252d2eb35acSNeilBrown 		/*
253d2eb35acSNeilBrown 		 * Wake up any possible resync thread that waits for the device
254d2eb35acSNeilBrown 		 * to go idle.
255d2eb35acSNeilBrown 		 */
25679ef3a8aSmajianpeng 		allow_barrier(conf, start_next_window, bi_sector);
257d2eb35acSNeilBrown 	}
258d2eb35acSNeilBrown }
259d2eb35acSNeilBrown 
2609f2c9d12SNeilBrown static void raid_end_bio_io(struct r1bio *r1_bio)
2611da177e4SLinus Torvalds {
2621da177e4SLinus Torvalds 	struct bio *bio = r1_bio->master_bio;
2631da177e4SLinus Torvalds 
2644b6d287fSNeilBrown 	/* if nobody has done the final endio yet, do it now */
2654b6d287fSNeilBrown 	if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
26636a4e1feSNeilBrown 		pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
2674b6d287fSNeilBrown 			 (bio_data_dir(bio) == WRITE) ? "write" : "read",
2684f024f37SKent Overstreet 			 (unsigned long long) bio->bi_iter.bi_sector,
2694f024f37SKent Overstreet 			 (unsigned long long) bio_end_sector(bio) - 1);
2704b6d287fSNeilBrown 
271d2eb35acSNeilBrown 		call_bio_endio(r1_bio);
2724b6d287fSNeilBrown 	}
2731da177e4SLinus Torvalds 	free_r1bio(r1_bio);
2741da177e4SLinus Torvalds }
2751da177e4SLinus Torvalds 
2761da177e4SLinus Torvalds /*
2771da177e4SLinus Torvalds  * Update disk head position estimator based on IRQ completion info.
2781da177e4SLinus Torvalds  */
2799f2c9d12SNeilBrown static inline void update_head_pos(int disk, struct r1bio *r1_bio)
2801da177e4SLinus Torvalds {
281e8096360SNeilBrown 	struct r1conf *conf = r1_bio->mddev->private;
2821da177e4SLinus Torvalds 
2831da177e4SLinus Torvalds 	conf->mirrors[disk].head_position =
2841da177e4SLinus Torvalds 		r1_bio->sector + (r1_bio->sectors);
2851da177e4SLinus Torvalds }
2861da177e4SLinus Torvalds 
287ba3ae3beSNamhyung Kim /*
288ba3ae3beSNamhyung Kim  * Find the disk number which triggered given bio
289ba3ae3beSNamhyung Kim  */
2909f2c9d12SNeilBrown static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
291ba3ae3beSNamhyung Kim {
292ba3ae3beSNamhyung Kim 	int mirror;
29330194636SNeilBrown 	struct r1conf *conf = r1_bio->mddev->private;
29430194636SNeilBrown 	int raid_disks = conf->raid_disks;
295ba3ae3beSNamhyung Kim 
2968f19ccb2SNeilBrown 	for (mirror = 0; mirror < raid_disks * 2; mirror++)
297ba3ae3beSNamhyung Kim 		if (r1_bio->bios[mirror] == bio)
298ba3ae3beSNamhyung Kim 			break;
299ba3ae3beSNamhyung Kim 
3008f19ccb2SNeilBrown 	BUG_ON(mirror == raid_disks * 2);
301ba3ae3beSNamhyung Kim 	update_head_pos(mirror, r1_bio);
302ba3ae3beSNamhyung Kim 
303ba3ae3beSNamhyung Kim 	return mirror;
304ba3ae3beSNamhyung Kim }
305ba3ae3beSNamhyung Kim 
3066712ecf8SNeilBrown static void raid1_end_read_request(struct bio *bio, int error)
3071da177e4SLinus Torvalds {
3081da177e4SLinus Torvalds 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
3099f2c9d12SNeilBrown 	struct r1bio *r1_bio = bio->bi_private;
3101da177e4SLinus Torvalds 	int mirror;
311e8096360SNeilBrown 	struct r1conf *conf = r1_bio->mddev->private;
3121da177e4SLinus Torvalds 
3131da177e4SLinus Torvalds 	mirror = r1_bio->read_disk;
3141da177e4SLinus Torvalds 	/*
3151da177e4SLinus Torvalds 	 * this branch is our 'one mirror IO has finished' event handler:
3161da177e4SLinus Torvalds 	 */
317ddaf22abSNeilBrown 	update_head_pos(mirror, r1_bio);
318ddaf22abSNeilBrown 
319220946c9SNeilBrown 	if (uptodate)
3201da177e4SLinus Torvalds 		set_bit(R1BIO_Uptodate, &r1_bio->state);
321dd00a99eSNeilBrown 	else {
322dd00a99eSNeilBrown 		/* If all other devices have failed, we want to return
323dd00a99eSNeilBrown 		 * the error upwards rather than fail the last device.
324dd00a99eSNeilBrown 		 * Here we redefine "uptodate" to mean "Don't want to retry"
325dd00a99eSNeilBrown 		 */
326dd00a99eSNeilBrown 		unsigned long flags;
327dd00a99eSNeilBrown 		spin_lock_irqsave(&conf->device_lock, flags);
328dd00a99eSNeilBrown 		if (r1_bio->mddev->degraded == conf->raid_disks ||
329dd00a99eSNeilBrown 		    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
330dd00a99eSNeilBrown 		     !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
331dd00a99eSNeilBrown 			uptodate = 1;
332dd00a99eSNeilBrown 		spin_unlock_irqrestore(&conf->device_lock, flags);
333dd00a99eSNeilBrown 	}
3341da177e4SLinus Torvalds 
3357ad4d4a6SNeilBrown 	if (uptodate) {
3361da177e4SLinus Torvalds 		raid_end_bio_io(r1_bio);
3377ad4d4a6SNeilBrown 		rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
3387ad4d4a6SNeilBrown 	} else {
3391da177e4SLinus Torvalds 		/*
3401da177e4SLinus Torvalds 		 * oops, read error:
3411da177e4SLinus Torvalds 		 */
3421da177e4SLinus Torvalds 		char b[BDEVNAME_SIZE];
3438bda470eSChristian Dietrich 		printk_ratelimited(
3448bda470eSChristian Dietrich 			KERN_ERR "md/raid1:%s: %s: "
3458bda470eSChristian Dietrich 			"rescheduling sector %llu\n",
3469dd1e2faSNeilBrown 			mdname(conf->mddev),
3478bda470eSChristian Dietrich 			bdevname(conf->mirrors[mirror].rdev->bdev,
3488bda470eSChristian Dietrich 				 b),
3498bda470eSChristian Dietrich 			(unsigned long long)r1_bio->sector);
350d2eb35acSNeilBrown 		set_bit(R1BIO_ReadError, &r1_bio->state);
3511da177e4SLinus Torvalds 		reschedule_retry(r1_bio);
3527ad4d4a6SNeilBrown 		/* don't drop the reference on read_disk yet */
3531da177e4SLinus Torvalds 	}
3541da177e4SLinus Torvalds }
3551da177e4SLinus Torvalds 
3569f2c9d12SNeilBrown static void close_write(struct r1bio *r1_bio)
3574e78064fSNeilBrown {
3584e78064fSNeilBrown 	/* it really is the end of this request */
3594e78064fSNeilBrown 	if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
3604e78064fSNeilBrown 		/* free extra copy of the data pages */
361af6d7b76SNeilBrown 		int i = r1_bio->behind_page_count;
3624e78064fSNeilBrown 		while (i--)
3632ca68f5eSNeilBrown 			safe_put_page(r1_bio->behind_bvecs[i].bv_page);
3642ca68f5eSNeilBrown 		kfree(r1_bio->behind_bvecs);
3652ca68f5eSNeilBrown 		r1_bio->behind_bvecs = NULL;
3664e78064fSNeilBrown 	}
3674e78064fSNeilBrown 	/* clear the bitmap if all writes complete successfully */
3684e78064fSNeilBrown 	bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
3694e78064fSNeilBrown 			r1_bio->sectors,
3704e78064fSNeilBrown 			!test_bit(R1BIO_Degraded, &r1_bio->state),
371af6d7b76SNeilBrown 			test_bit(R1BIO_BehindIO, &r1_bio->state));
3724e78064fSNeilBrown 	md_write_end(r1_bio->mddev);
373cd5ff9a1SNeilBrown }
374cd5ff9a1SNeilBrown 
3759f2c9d12SNeilBrown static void r1_bio_write_done(struct r1bio *r1_bio)
376cd5ff9a1SNeilBrown {
377cd5ff9a1SNeilBrown 	if (!atomic_dec_and_test(&r1_bio->remaining))
378cd5ff9a1SNeilBrown 		return;
379cd5ff9a1SNeilBrown 
380cd5ff9a1SNeilBrown 	if (test_bit(R1BIO_WriteError, &r1_bio->state))
381cd5ff9a1SNeilBrown 		reschedule_retry(r1_bio);
382cd5ff9a1SNeilBrown 	else {
383cd5ff9a1SNeilBrown 		close_write(r1_bio);
3844367af55SNeilBrown 		if (test_bit(R1BIO_MadeGood, &r1_bio->state))
3854367af55SNeilBrown 			reschedule_retry(r1_bio);
3864367af55SNeilBrown 		else
3874e78064fSNeilBrown 			raid_end_bio_io(r1_bio);
3884e78064fSNeilBrown 	}
3894e78064fSNeilBrown }
3904e78064fSNeilBrown 
3916712ecf8SNeilBrown static void raid1_end_write_request(struct bio *bio, int error)
3921da177e4SLinus Torvalds {
3931da177e4SLinus Torvalds 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
3949f2c9d12SNeilBrown 	struct r1bio *r1_bio = bio->bi_private;
395a9701a30SNeilBrown 	int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
396e8096360SNeilBrown 	struct r1conf *conf = r1_bio->mddev->private;
39704b857f7SNeilBrown 	struct bio *to_put = NULL;
3981da177e4SLinus Torvalds 
399ba3ae3beSNamhyung Kim 	mirror = find_bio_disk(r1_bio, bio);
4001da177e4SLinus Torvalds 
4011da177e4SLinus Torvalds 	/*
402e9c7469bSTejun Heo 	 * 'one mirror IO has finished' event handler:
4031da177e4SLinus Torvalds 	 */
404191ea9b2SNeilBrown 	if (!uptodate) {
405cd5ff9a1SNeilBrown 		set_bit(WriteErrorSeen,
406cd5ff9a1SNeilBrown 			&conf->mirrors[mirror].rdev->flags);
40719d67169SNeilBrown 		if (!test_and_set_bit(WantReplacement,
40819d67169SNeilBrown 				      &conf->mirrors[mirror].rdev->flags))
40919d67169SNeilBrown 			set_bit(MD_RECOVERY_NEEDED, &
41019d67169SNeilBrown 				conf->mddev->recovery);
41119d67169SNeilBrown 
412cd5ff9a1SNeilBrown 		set_bit(R1BIO_WriteError, &r1_bio->state);
4134367af55SNeilBrown 	} else {
4141da177e4SLinus Torvalds 		/*
415e9c7469bSTejun Heo 		 * Set R1BIO_Uptodate in our master bio, so that we
416e9c7469bSTejun Heo 		 * will return a good error code for to the higher
417e9c7469bSTejun Heo 		 * levels even if IO on some other mirrored buffer
418e9c7469bSTejun Heo 		 * fails.
4191da177e4SLinus Torvalds 		 *
420e9c7469bSTejun Heo 		 * The 'master' represents the composite IO operation
421e9c7469bSTejun Heo 		 * to user-side. So if something waits for IO, then it
422e9c7469bSTejun Heo 		 * will wait for the 'master' bio.
4231da177e4SLinus Torvalds 		 */
4244367af55SNeilBrown 		sector_t first_bad;
4254367af55SNeilBrown 		int bad_sectors;
4264367af55SNeilBrown 
427cd5ff9a1SNeilBrown 		r1_bio->bios[mirror] = NULL;
428cd5ff9a1SNeilBrown 		to_put = bio;
4293056e3aeSAlex Lyakas 		/*
4303056e3aeSAlex Lyakas 		 * Do not set R1BIO_Uptodate if the current device is
4313056e3aeSAlex Lyakas 		 * rebuilding or Faulty. This is because we cannot use
4323056e3aeSAlex Lyakas 		 * such device for properly reading the data back (we could
4333056e3aeSAlex Lyakas 		 * potentially use it, if the current write would have felt
4343056e3aeSAlex Lyakas 		 * before rdev->recovery_offset, but for simplicity we don't
4353056e3aeSAlex Lyakas 		 * check this here.
4363056e3aeSAlex Lyakas 		 */
4373056e3aeSAlex Lyakas 		if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
4383056e3aeSAlex Lyakas 		    !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
4391da177e4SLinus Torvalds 			set_bit(R1BIO_Uptodate, &r1_bio->state);
4401da177e4SLinus Torvalds 
4414367af55SNeilBrown 		/* Maybe we can clear some bad blocks. */
4424367af55SNeilBrown 		if (is_badblock(conf->mirrors[mirror].rdev,
4434367af55SNeilBrown 				r1_bio->sector, r1_bio->sectors,
4444367af55SNeilBrown 				&first_bad, &bad_sectors)) {
4454367af55SNeilBrown 			r1_bio->bios[mirror] = IO_MADE_GOOD;
4464367af55SNeilBrown 			set_bit(R1BIO_MadeGood, &r1_bio->state);
4474367af55SNeilBrown 		}
4484367af55SNeilBrown 	}
4494367af55SNeilBrown 
4504b6d287fSNeilBrown 	if (behind) {
4514b6d287fSNeilBrown 		if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
4524b6d287fSNeilBrown 			atomic_dec(&r1_bio->behind_remaining);
4534b6d287fSNeilBrown 
454e9c7469bSTejun Heo 		/*
455e9c7469bSTejun Heo 		 * In behind mode, we ACK the master bio once the I/O
456e9c7469bSTejun Heo 		 * has safely reached all non-writemostly
457e9c7469bSTejun Heo 		 * disks. Setting the Returned bit ensures that this
458e9c7469bSTejun Heo 		 * gets done only once -- we don't ever want to return
459e9c7469bSTejun Heo 		 * -EIO here, instead we'll wait
460e9c7469bSTejun Heo 		 */
4614b6d287fSNeilBrown 		if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
4624b6d287fSNeilBrown 		    test_bit(R1BIO_Uptodate, &r1_bio->state)) {
4634b6d287fSNeilBrown 			/* Maybe we can return now */
4644b6d287fSNeilBrown 			if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
4654b6d287fSNeilBrown 				struct bio *mbio = r1_bio->master_bio;
46636a4e1feSNeilBrown 				pr_debug("raid1: behind end write sectors"
46736a4e1feSNeilBrown 					 " %llu-%llu\n",
4684f024f37SKent Overstreet 					 (unsigned long long) mbio->bi_iter.bi_sector,
4694f024f37SKent Overstreet 					 (unsigned long long) bio_end_sector(mbio) - 1);
470d2eb35acSNeilBrown 				call_bio_endio(r1_bio);
4714b6d287fSNeilBrown 			}
4724b6d287fSNeilBrown 		}
4734b6d287fSNeilBrown 	}
4744367af55SNeilBrown 	if (r1_bio->bios[mirror] == NULL)
4754367af55SNeilBrown 		rdev_dec_pending(conf->mirrors[mirror].rdev,
4764367af55SNeilBrown 				 conf->mddev);
477e9c7469bSTejun Heo 
4781da177e4SLinus Torvalds 	/*
4791da177e4SLinus Torvalds 	 * Let's see if all mirrored write operations have finished
4801da177e4SLinus Torvalds 	 * already.
4811da177e4SLinus Torvalds 	 */
482af6d7b76SNeilBrown 	r1_bio_write_done(r1_bio);
483c70810b3SNeilBrown 
48404b857f7SNeilBrown 	if (to_put)
48504b857f7SNeilBrown 		bio_put(to_put);
4861da177e4SLinus Torvalds }
4871da177e4SLinus Torvalds 
4881da177e4SLinus Torvalds 
4891da177e4SLinus Torvalds /*
4901da177e4SLinus Torvalds  * This routine returns the disk from which the requested read should
4911da177e4SLinus Torvalds  * be done. There is a per-array 'next expected sequential IO' sector
4921da177e4SLinus Torvalds  * number - if this matches on the next IO then we use the last disk.
4931da177e4SLinus Torvalds  * There is also a per-disk 'last know head position' sector that is
4941da177e4SLinus Torvalds  * maintained from IRQ contexts, both the normal and the resync IO
4951da177e4SLinus Torvalds  * completion handlers update this position correctly. If there is no
4961da177e4SLinus Torvalds  * perfect sequential match then we pick the disk whose head is closest.
4971da177e4SLinus Torvalds  *
4981da177e4SLinus Torvalds  * If there are 2 mirrors in the same 2 devices, performance degrades
4991da177e4SLinus Torvalds  * because position is mirror, not device based.
5001da177e4SLinus Torvalds  *
5011da177e4SLinus Torvalds  * The rdev for the device selected will have nr_pending incremented.
5021da177e4SLinus Torvalds  */
503e8096360SNeilBrown static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
5041da177e4SLinus Torvalds {
505af3a2cd6SNeilBrown 	const sector_t this_sector = r1_bio->sector;
506d2eb35acSNeilBrown 	int sectors;
507d2eb35acSNeilBrown 	int best_good_sectors;
5089dedf603SShaohua Li 	int best_disk, best_dist_disk, best_pending_disk;
5099dedf603SShaohua Li 	int has_nonrot_disk;
510be4d3280SShaohua Li 	int disk;
51176073054SNeilBrown 	sector_t best_dist;
5129dedf603SShaohua Li 	unsigned int min_pending;
5133cb03002SNeilBrown 	struct md_rdev *rdev;
514f3ac8bf7SNeilBrown 	int choose_first;
51512cee5a8SShaohua Li 	int choose_next_idle;
5161da177e4SLinus Torvalds 
5171da177e4SLinus Torvalds 	rcu_read_lock();
5181da177e4SLinus Torvalds 	/*
5198ddf9efeSNeilBrown 	 * Check if we can balance. We can balance on the whole
5201da177e4SLinus Torvalds 	 * device if no resync is going on, or below the resync window.
5211da177e4SLinus Torvalds 	 * We take the first readable disk when above the resync window.
5221da177e4SLinus Torvalds 	 */
5231da177e4SLinus Torvalds  retry:
524d2eb35acSNeilBrown 	sectors = r1_bio->sectors;
52576073054SNeilBrown 	best_disk = -1;
5269dedf603SShaohua Li 	best_dist_disk = -1;
52776073054SNeilBrown 	best_dist = MaxSector;
5289dedf603SShaohua Li 	best_pending_disk = -1;
5299dedf603SShaohua Li 	min_pending = UINT_MAX;
530d2eb35acSNeilBrown 	best_good_sectors = 0;
5319dedf603SShaohua Li 	has_nonrot_disk = 0;
53212cee5a8SShaohua Li 	choose_next_idle = 0;
533d2eb35acSNeilBrown 
5341da177e4SLinus Torvalds 	if (conf->mddev->recovery_cp < MaxSector &&
535be4d3280SShaohua Li 	    (this_sector + sectors >= conf->next_resync))
536f3ac8bf7SNeilBrown 		choose_first = 1;
537be4d3280SShaohua Li 	else
538f3ac8bf7SNeilBrown 		choose_first = 0;
5391da177e4SLinus Torvalds 
540be4d3280SShaohua Li 	for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
54176073054SNeilBrown 		sector_t dist;
542d2eb35acSNeilBrown 		sector_t first_bad;
543d2eb35acSNeilBrown 		int bad_sectors;
5449dedf603SShaohua Li 		unsigned int pending;
54512cee5a8SShaohua Li 		bool nonrot;
546d2eb35acSNeilBrown 
547f3ac8bf7SNeilBrown 		rdev = rcu_dereference(conf->mirrors[disk].rdev);
548f3ac8bf7SNeilBrown 		if (r1_bio->bios[disk] == IO_BLOCKED
549f3ac8bf7SNeilBrown 		    || rdev == NULL
5506b740b8dSNeilBrown 		    || test_bit(Unmerged, &rdev->flags)
55176073054SNeilBrown 		    || test_bit(Faulty, &rdev->flags))
552f3ac8bf7SNeilBrown 			continue;
55376073054SNeilBrown 		if (!test_bit(In_sync, &rdev->flags) &&
55476073054SNeilBrown 		    rdev->recovery_offset < this_sector + sectors)
55576073054SNeilBrown 			continue;
55676073054SNeilBrown 		if (test_bit(WriteMostly, &rdev->flags)) {
55776073054SNeilBrown 			/* Don't balance among write-mostly, just
55876073054SNeilBrown 			 * use the first as a last resort */
559307729c8SNeilBrown 			if (best_disk < 0) {
560307729c8SNeilBrown 				if (is_badblock(rdev, this_sector, sectors,
561307729c8SNeilBrown 						&first_bad, &bad_sectors)) {
562307729c8SNeilBrown 					if (first_bad < this_sector)
563307729c8SNeilBrown 						/* Cannot use this */
564307729c8SNeilBrown 						continue;
565307729c8SNeilBrown 					best_good_sectors = first_bad - this_sector;
566307729c8SNeilBrown 				} else
567307729c8SNeilBrown 					best_good_sectors = sectors;
56876073054SNeilBrown 				best_disk = disk;
569307729c8SNeilBrown 			}
57076073054SNeilBrown 			continue;
5718ddf9efeSNeilBrown 		}
57276073054SNeilBrown 		/* This is a reasonable device to use.  It might
57376073054SNeilBrown 		 * even be best.
5741da177e4SLinus Torvalds 		 */
575d2eb35acSNeilBrown 		if (is_badblock(rdev, this_sector, sectors,
576d2eb35acSNeilBrown 				&first_bad, &bad_sectors)) {
577d2eb35acSNeilBrown 			if (best_dist < MaxSector)
578d2eb35acSNeilBrown 				/* already have a better device */
579d2eb35acSNeilBrown 				continue;
580d2eb35acSNeilBrown 			if (first_bad <= this_sector) {
581d2eb35acSNeilBrown 				/* cannot read here. If this is the 'primary'
582d2eb35acSNeilBrown 				 * device, then we must not read beyond
583d2eb35acSNeilBrown 				 * bad_sectors from another device..
584d2eb35acSNeilBrown 				 */
585d2eb35acSNeilBrown 				bad_sectors -= (this_sector - first_bad);
586d2eb35acSNeilBrown 				if (choose_first && sectors > bad_sectors)
587d2eb35acSNeilBrown 					sectors = bad_sectors;
588d2eb35acSNeilBrown 				if (best_good_sectors > sectors)
589d2eb35acSNeilBrown 					best_good_sectors = sectors;
590d2eb35acSNeilBrown 
591d2eb35acSNeilBrown 			} else {
592d2eb35acSNeilBrown 				sector_t good_sectors = first_bad - this_sector;
593d2eb35acSNeilBrown 				if (good_sectors > best_good_sectors) {
594d2eb35acSNeilBrown 					best_good_sectors = good_sectors;
595d2eb35acSNeilBrown 					best_disk = disk;
596d2eb35acSNeilBrown 				}
597d2eb35acSNeilBrown 				if (choose_first)
598d2eb35acSNeilBrown 					break;
599d2eb35acSNeilBrown 			}
600d2eb35acSNeilBrown 			continue;
601d2eb35acSNeilBrown 		} else
602d2eb35acSNeilBrown 			best_good_sectors = sectors;
603d2eb35acSNeilBrown 
60412cee5a8SShaohua Li 		nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
60512cee5a8SShaohua Li 		has_nonrot_disk |= nonrot;
6069dedf603SShaohua Li 		pending = atomic_read(&rdev->nr_pending);
60776073054SNeilBrown 		dist = abs(this_sector - conf->mirrors[disk].head_position);
60812cee5a8SShaohua Li 		if (choose_first) {
60976073054SNeilBrown 			best_disk = disk;
6101da177e4SLinus Torvalds 			break;
6111da177e4SLinus Torvalds 		}
61212cee5a8SShaohua Li 		/* Don't change to another disk for sequential reads */
61312cee5a8SShaohua Li 		if (conf->mirrors[disk].next_seq_sect == this_sector
61412cee5a8SShaohua Li 		    || dist == 0) {
61512cee5a8SShaohua Li 			int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
61612cee5a8SShaohua Li 			struct raid1_info *mirror = &conf->mirrors[disk];
61712cee5a8SShaohua Li 
61812cee5a8SShaohua Li 			best_disk = disk;
61912cee5a8SShaohua Li 			/*
62012cee5a8SShaohua Li 			 * If buffered sequential IO size exceeds optimal
62112cee5a8SShaohua Li 			 * iosize, check if there is idle disk. If yes, choose
62212cee5a8SShaohua Li 			 * the idle disk. read_balance could already choose an
62312cee5a8SShaohua Li 			 * idle disk before noticing it's a sequential IO in
62412cee5a8SShaohua Li 			 * this disk. This doesn't matter because this disk
62512cee5a8SShaohua Li 			 * will idle, next time it will be utilized after the
62612cee5a8SShaohua Li 			 * first disk has IO size exceeds optimal iosize. In
62712cee5a8SShaohua Li 			 * this way, iosize of the first disk will be optimal
62812cee5a8SShaohua Li 			 * iosize at least. iosize of the second disk might be
62912cee5a8SShaohua Li 			 * small, but not a big deal since when the second disk
63012cee5a8SShaohua Li 			 * starts IO, the first disk is likely still busy.
63112cee5a8SShaohua Li 			 */
63212cee5a8SShaohua Li 			if (nonrot && opt_iosize > 0 &&
63312cee5a8SShaohua Li 			    mirror->seq_start != MaxSector &&
63412cee5a8SShaohua Li 			    mirror->next_seq_sect > opt_iosize &&
63512cee5a8SShaohua Li 			    mirror->next_seq_sect - opt_iosize >=
63612cee5a8SShaohua Li 			    mirror->seq_start) {
63712cee5a8SShaohua Li 				choose_next_idle = 1;
63812cee5a8SShaohua Li 				continue;
63912cee5a8SShaohua Li 			}
64012cee5a8SShaohua Li 			break;
64112cee5a8SShaohua Li 		}
64212cee5a8SShaohua Li 		/* If device is idle, use it */
64312cee5a8SShaohua Li 		if (pending == 0) {
64412cee5a8SShaohua Li 			best_disk = disk;
64512cee5a8SShaohua Li 			break;
64612cee5a8SShaohua Li 		}
64712cee5a8SShaohua Li 
64812cee5a8SShaohua Li 		if (choose_next_idle)
64912cee5a8SShaohua Li 			continue;
6509dedf603SShaohua Li 
6519dedf603SShaohua Li 		if (min_pending > pending) {
6529dedf603SShaohua Li 			min_pending = pending;
6539dedf603SShaohua Li 			best_pending_disk = disk;
6549dedf603SShaohua Li 		}
6559dedf603SShaohua Li 
65676073054SNeilBrown 		if (dist < best_dist) {
65776073054SNeilBrown 			best_dist = dist;
6589dedf603SShaohua Li 			best_dist_disk = disk;
6591da177e4SLinus Torvalds 		}
660f3ac8bf7SNeilBrown 	}
6611da177e4SLinus Torvalds 
6629dedf603SShaohua Li 	/*
6639dedf603SShaohua Li 	 * If all disks are rotational, choose the closest disk. If any disk is
6649dedf603SShaohua Li 	 * non-rotational, choose the disk with less pending request even the
6659dedf603SShaohua Li 	 * disk is rotational, which might/might not be optimal for raids with
6669dedf603SShaohua Li 	 * mixed ratation/non-rotational disks depending on workload.
6679dedf603SShaohua Li 	 */
6689dedf603SShaohua Li 	if (best_disk == -1) {
6699dedf603SShaohua Li 		if (has_nonrot_disk)
6709dedf603SShaohua Li 			best_disk = best_pending_disk;
6719dedf603SShaohua Li 		else
6729dedf603SShaohua Li 			best_disk = best_dist_disk;
6739dedf603SShaohua Li 	}
6749dedf603SShaohua Li 
67576073054SNeilBrown 	if (best_disk >= 0) {
67676073054SNeilBrown 		rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
6778ddf9efeSNeilBrown 		if (!rdev)
6788ddf9efeSNeilBrown 			goto retry;
6798ddf9efeSNeilBrown 		atomic_inc(&rdev->nr_pending);
68076073054SNeilBrown 		if (test_bit(Faulty, &rdev->flags)) {
6811da177e4SLinus Torvalds 			/* cannot risk returning a device that failed
6821da177e4SLinus Torvalds 			 * before we inc'ed nr_pending
6831da177e4SLinus Torvalds 			 */
68403c902e1SNeilBrown 			rdev_dec_pending(rdev, conf->mddev);
6851da177e4SLinus Torvalds 			goto retry;
6861da177e4SLinus Torvalds 		}
687d2eb35acSNeilBrown 		sectors = best_good_sectors;
68812cee5a8SShaohua Li 
68912cee5a8SShaohua Li 		if (conf->mirrors[best_disk].next_seq_sect != this_sector)
69012cee5a8SShaohua Li 			conf->mirrors[best_disk].seq_start = this_sector;
69112cee5a8SShaohua Li 
692be4d3280SShaohua Li 		conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
6931da177e4SLinus Torvalds 	}
6941da177e4SLinus Torvalds 	rcu_read_unlock();
695d2eb35acSNeilBrown 	*max_sectors = sectors;
6961da177e4SLinus Torvalds 
69776073054SNeilBrown 	return best_disk;
6981da177e4SLinus Torvalds }
6991da177e4SLinus Torvalds 
7006b740b8dSNeilBrown static int raid1_mergeable_bvec(struct request_queue *q,
7016b740b8dSNeilBrown 				struct bvec_merge_data *bvm,
7026b740b8dSNeilBrown 				struct bio_vec *biovec)
7036b740b8dSNeilBrown {
7046b740b8dSNeilBrown 	struct mddev *mddev = q->queuedata;
7056b740b8dSNeilBrown 	struct r1conf *conf = mddev->private;
7066b740b8dSNeilBrown 	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
7076b740b8dSNeilBrown 	int max = biovec->bv_len;
7086b740b8dSNeilBrown 
7096b740b8dSNeilBrown 	if (mddev->merge_check_needed) {
7106b740b8dSNeilBrown 		int disk;
7116b740b8dSNeilBrown 		rcu_read_lock();
7126b740b8dSNeilBrown 		for (disk = 0; disk < conf->raid_disks * 2; disk++) {
7136b740b8dSNeilBrown 			struct md_rdev *rdev = rcu_dereference(
7146b740b8dSNeilBrown 				conf->mirrors[disk].rdev);
7156b740b8dSNeilBrown 			if (rdev && !test_bit(Faulty, &rdev->flags)) {
7166b740b8dSNeilBrown 				struct request_queue *q =
7176b740b8dSNeilBrown 					bdev_get_queue(rdev->bdev);
7186b740b8dSNeilBrown 				if (q->merge_bvec_fn) {
7196b740b8dSNeilBrown 					bvm->bi_sector = sector +
7206b740b8dSNeilBrown 						rdev->data_offset;
7216b740b8dSNeilBrown 					bvm->bi_bdev = rdev->bdev;
7226b740b8dSNeilBrown 					max = min(max, q->merge_bvec_fn(
7236b740b8dSNeilBrown 							  q, bvm, biovec));
7246b740b8dSNeilBrown 				}
7256b740b8dSNeilBrown 			}
7266b740b8dSNeilBrown 		}
7276b740b8dSNeilBrown 		rcu_read_unlock();
7286b740b8dSNeilBrown 	}
7296b740b8dSNeilBrown 	return max;
7306b740b8dSNeilBrown 
7316b740b8dSNeilBrown }
7326b740b8dSNeilBrown 
733fd01b88cSNeilBrown int md_raid1_congested(struct mddev *mddev, int bits)
7340d129228SNeilBrown {
735e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
7360d129228SNeilBrown 	int i, ret = 0;
7370d129228SNeilBrown 
73834db0cd6SNeilBrown 	if ((bits & (1 << BDI_async_congested)) &&
73934db0cd6SNeilBrown 	    conf->pending_count >= max_queued_requests)
74034db0cd6SNeilBrown 		return 1;
74134db0cd6SNeilBrown 
7420d129228SNeilBrown 	rcu_read_lock();
743f53e29fcSNeilBrown 	for (i = 0; i < conf->raid_disks * 2; i++) {
7443cb03002SNeilBrown 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
7450d129228SNeilBrown 		if (rdev && !test_bit(Faulty, &rdev->flags)) {
746165125e1SJens Axboe 			struct request_queue *q = bdev_get_queue(rdev->bdev);
7470d129228SNeilBrown 
7481ed7242eSJonathan Brassow 			BUG_ON(!q);
7491ed7242eSJonathan Brassow 
7500d129228SNeilBrown 			/* Note the '|| 1' - when read_balance prefers
7510d129228SNeilBrown 			 * non-congested targets, it can be removed
7520d129228SNeilBrown 			 */
75391a9e99dSAlexander Beregalov 			if ((bits & (1<<BDI_async_congested)) || 1)
7540d129228SNeilBrown 				ret |= bdi_congested(&q->backing_dev_info, bits);
7550d129228SNeilBrown 			else
7560d129228SNeilBrown 				ret &= bdi_congested(&q->backing_dev_info, bits);
7570d129228SNeilBrown 		}
7580d129228SNeilBrown 	}
7590d129228SNeilBrown 	rcu_read_unlock();
7600d129228SNeilBrown 	return ret;
7610d129228SNeilBrown }
7621ed7242eSJonathan Brassow EXPORT_SYMBOL_GPL(md_raid1_congested);
7630d129228SNeilBrown 
7641ed7242eSJonathan Brassow static int raid1_congested(void *data, int bits)
7651ed7242eSJonathan Brassow {
766fd01b88cSNeilBrown 	struct mddev *mddev = data;
7671ed7242eSJonathan Brassow 
7681ed7242eSJonathan Brassow 	return mddev_congested(mddev, bits) ||
7691ed7242eSJonathan Brassow 		md_raid1_congested(mddev, bits);
7701ed7242eSJonathan Brassow }
7710d129228SNeilBrown 
772e8096360SNeilBrown static void flush_pending_writes(struct r1conf *conf)
773a35e63efSNeilBrown {
774a35e63efSNeilBrown 	/* Any writes that have been queued but are awaiting
775a35e63efSNeilBrown 	 * bitmap updates get flushed here.
776a35e63efSNeilBrown 	 */
777a35e63efSNeilBrown 	spin_lock_irq(&conf->device_lock);
778a35e63efSNeilBrown 
779a35e63efSNeilBrown 	if (conf->pending_bio_list.head) {
780a35e63efSNeilBrown 		struct bio *bio;
781a35e63efSNeilBrown 		bio = bio_list_get(&conf->pending_bio_list);
78234db0cd6SNeilBrown 		conf->pending_count = 0;
783a35e63efSNeilBrown 		spin_unlock_irq(&conf->device_lock);
784a35e63efSNeilBrown 		/* flush any pending bitmap writes to
785a35e63efSNeilBrown 		 * disk before proceeding w/ I/O */
786a35e63efSNeilBrown 		bitmap_unplug(conf->mddev->bitmap);
78734db0cd6SNeilBrown 		wake_up(&conf->wait_barrier);
788a35e63efSNeilBrown 
789a35e63efSNeilBrown 		while (bio) { /* submit pending writes */
790a35e63efSNeilBrown 			struct bio *next = bio->bi_next;
791a35e63efSNeilBrown 			bio->bi_next = NULL;
7922ff8cc2cSShaohua Li 			if (unlikely((bio->bi_rw & REQ_DISCARD) &&
7932ff8cc2cSShaohua Li 			    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
7942ff8cc2cSShaohua Li 				/* Just ignore it */
7952ff8cc2cSShaohua Li 				bio_endio(bio, 0);
7962ff8cc2cSShaohua Li 			else
797a35e63efSNeilBrown 				generic_make_request(bio);
798a35e63efSNeilBrown 			bio = next;
799a35e63efSNeilBrown 		}
800a35e63efSNeilBrown 	} else
801a35e63efSNeilBrown 		spin_unlock_irq(&conf->device_lock);
8027eaceaccSJens Axboe }
8037eaceaccSJens Axboe 
80417999be4SNeilBrown /* Barriers....
80517999be4SNeilBrown  * Sometimes we need to suspend IO while we do something else,
80617999be4SNeilBrown  * either some resync/recovery, or reconfigure the array.
80717999be4SNeilBrown  * To do this we raise a 'barrier'.
80817999be4SNeilBrown  * The 'barrier' is a counter that can be raised multiple times
80917999be4SNeilBrown  * to count how many activities are happening which preclude
81017999be4SNeilBrown  * normal IO.
81117999be4SNeilBrown  * We can only raise the barrier if there is no pending IO.
81217999be4SNeilBrown  * i.e. if nr_pending == 0.
81317999be4SNeilBrown  * We choose only to raise the barrier if no-one is waiting for the
81417999be4SNeilBrown  * barrier to go down.  This means that as soon as an IO request
81517999be4SNeilBrown  * is ready, no other operations which require a barrier will start
81617999be4SNeilBrown  * until the IO request has had a chance.
81717999be4SNeilBrown  *
81817999be4SNeilBrown  * So: regular IO calls 'wait_barrier'.  When that returns there
81917999be4SNeilBrown  *    is no backgroup IO happening,  It must arrange to call
82017999be4SNeilBrown  *    allow_barrier when it has finished its IO.
82117999be4SNeilBrown  * backgroup IO calls must call raise_barrier.  Once that returns
82217999be4SNeilBrown  *    there is no normal IO happeing.  It must arrange to call
82317999be4SNeilBrown  *    lower_barrier when the particular background IO completes.
8241da177e4SLinus Torvalds  */
825e8096360SNeilBrown static void raise_barrier(struct r1conf *conf)
8261da177e4SLinus Torvalds {
8271da177e4SLinus Torvalds 	spin_lock_irq(&conf->resync_lock);
8281da177e4SLinus Torvalds 
82917999be4SNeilBrown 	/* Wait until no block IO is waiting */
83017999be4SNeilBrown 	wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
831eed8c02eSLukas Czerner 			    conf->resync_lock);
83217999be4SNeilBrown 
83317999be4SNeilBrown 	/* block any new IO from starting */
83417999be4SNeilBrown 	conf->barrier++;
83517999be4SNeilBrown 
83679ef3a8aSmajianpeng 	/* For these conditions we must wait:
83779ef3a8aSmajianpeng 	 * A: while the array is in frozen state
83879ef3a8aSmajianpeng 	 * B: while barrier >= RESYNC_DEPTH, meaning resync reach
83979ef3a8aSmajianpeng 	 *    the max count which allowed.
84079ef3a8aSmajianpeng 	 * C: next_resync + RESYNC_SECTORS > start_next_window, meaning
84179ef3a8aSmajianpeng 	 *    next resync will reach to the window which normal bios are
84279ef3a8aSmajianpeng 	 *    handling.
84379ef3a8aSmajianpeng 	 */
84417999be4SNeilBrown 	wait_event_lock_irq(conf->wait_barrier,
845b364e3d0Smajianpeng 			    !conf->array_frozen &&
84679ef3a8aSmajianpeng 			    conf->barrier < RESYNC_DEPTH &&
84779ef3a8aSmajianpeng 			    (conf->start_next_window >=
84879ef3a8aSmajianpeng 			     conf->next_resync + RESYNC_SECTORS),
849eed8c02eSLukas Czerner 			    conf->resync_lock);
85017999be4SNeilBrown 
8511da177e4SLinus Torvalds 	spin_unlock_irq(&conf->resync_lock);
8521da177e4SLinus Torvalds }
8531da177e4SLinus Torvalds 
854e8096360SNeilBrown static void lower_barrier(struct r1conf *conf)
85517999be4SNeilBrown {
85617999be4SNeilBrown 	unsigned long flags;
857709ae487SNeilBrown 	BUG_ON(conf->barrier <= 0);
85817999be4SNeilBrown 	spin_lock_irqsave(&conf->resync_lock, flags);
85917999be4SNeilBrown 	conf->barrier--;
86017999be4SNeilBrown 	spin_unlock_irqrestore(&conf->resync_lock, flags);
86117999be4SNeilBrown 	wake_up(&conf->wait_barrier);
86217999be4SNeilBrown }
86317999be4SNeilBrown 
86479ef3a8aSmajianpeng static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
86517999be4SNeilBrown {
86679ef3a8aSmajianpeng 	bool wait = false;
86779ef3a8aSmajianpeng 
86879ef3a8aSmajianpeng 	if (conf->array_frozen || !bio)
86979ef3a8aSmajianpeng 		wait = true;
87079ef3a8aSmajianpeng 	else if (conf->barrier && bio_data_dir(bio) == WRITE) {
87179ef3a8aSmajianpeng 		if (conf->next_resync < RESYNC_WINDOW_SECTORS)
87279ef3a8aSmajianpeng 			wait = true;
87379ef3a8aSmajianpeng 		else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
87479ef3a8aSmajianpeng 				>= bio_end_sector(bio)) ||
87579ef3a8aSmajianpeng 			 (conf->next_resync + NEXT_NORMALIO_DISTANCE
8764f024f37SKent Overstreet 				<= bio->bi_iter.bi_sector))
87779ef3a8aSmajianpeng 			wait = false;
87879ef3a8aSmajianpeng 		else
87979ef3a8aSmajianpeng 			wait = true;
88079ef3a8aSmajianpeng 	}
88179ef3a8aSmajianpeng 
88279ef3a8aSmajianpeng 	return wait;
88379ef3a8aSmajianpeng }
88479ef3a8aSmajianpeng 
88579ef3a8aSmajianpeng static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
88679ef3a8aSmajianpeng {
88779ef3a8aSmajianpeng 	sector_t sector = 0;
88879ef3a8aSmajianpeng 
88917999be4SNeilBrown 	spin_lock_irq(&conf->resync_lock);
89079ef3a8aSmajianpeng 	if (need_to_wait_for_sync(conf, bio)) {
89117999be4SNeilBrown 		conf->nr_waiting++;
892d6b42dcbSNeilBrown 		/* Wait for the barrier to drop.
893d6b42dcbSNeilBrown 		 * However if there are already pending
894d6b42dcbSNeilBrown 		 * requests (preventing the barrier from
895d6b42dcbSNeilBrown 		 * rising completely), and the
896d6b42dcbSNeilBrown 		 * pre-process bio queue isn't empty,
897d6b42dcbSNeilBrown 		 * then don't wait, as we need to empty
898d6b42dcbSNeilBrown 		 * that queue to get the nr_pending
899d6b42dcbSNeilBrown 		 * count down.
900d6b42dcbSNeilBrown 		 */
901d6b42dcbSNeilBrown 		wait_event_lock_irq(conf->wait_barrier,
902b364e3d0Smajianpeng 				    !conf->array_frozen &&
903b364e3d0Smajianpeng 				    (!conf->barrier ||
90479ef3a8aSmajianpeng 				    ((conf->start_next_window <
90579ef3a8aSmajianpeng 				      conf->next_resync + RESYNC_SECTORS) &&
906d6b42dcbSNeilBrown 				     current->bio_list &&
907b364e3d0Smajianpeng 				     !bio_list_empty(current->bio_list))),
908eed8c02eSLukas Czerner 				    conf->resync_lock);
90917999be4SNeilBrown 		conf->nr_waiting--;
91017999be4SNeilBrown 	}
91179ef3a8aSmajianpeng 
91279ef3a8aSmajianpeng 	if (bio && bio_data_dir(bio) == WRITE) {
91379ef3a8aSmajianpeng 		if (conf->next_resync + NEXT_NORMALIO_DISTANCE
9144f024f37SKent Overstreet 		    <= bio->bi_iter.bi_sector) {
91579ef3a8aSmajianpeng 			if (conf->start_next_window == MaxSector)
91679ef3a8aSmajianpeng 				conf->start_next_window =
91779ef3a8aSmajianpeng 					conf->next_resync +
91879ef3a8aSmajianpeng 					NEXT_NORMALIO_DISTANCE;
91979ef3a8aSmajianpeng 
92079ef3a8aSmajianpeng 			if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
9214f024f37SKent Overstreet 			    <= bio->bi_iter.bi_sector)
92279ef3a8aSmajianpeng 				conf->next_window_requests++;
92379ef3a8aSmajianpeng 			else
92479ef3a8aSmajianpeng 				conf->current_window_requests++;
92579ef3a8aSmajianpeng 		}
9264f024f37SKent Overstreet 		if (bio->bi_iter.bi_sector >= conf->start_next_window)
92779ef3a8aSmajianpeng 			sector = conf->start_next_window;
92817999be4SNeilBrown 	}
92917999be4SNeilBrown 
93079ef3a8aSmajianpeng 	conf->nr_pending++;
93179ef3a8aSmajianpeng 	spin_unlock_irq(&conf->resync_lock);
93279ef3a8aSmajianpeng 	return sector;
93379ef3a8aSmajianpeng }
93479ef3a8aSmajianpeng 
93579ef3a8aSmajianpeng static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
93679ef3a8aSmajianpeng 			  sector_t bi_sector)
93717999be4SNeilBrown {
93817999be4SNeilBrown 	unsigned long flags;
93979ef3a8aSmajianpeng 
94017999be4SNeilBrown 	spin_lock_irqsave(&conf->resync_lock, flags);
94117999be4SNeilBrown 	conf->nr_pending--;
94279ef3a8aSmajianpeng 	if (start_next_window) {
94379ef3a8aSmajianpeng 		if (start_next_window == conf->start_next_window) {
94479ef3a8aSmajianpeng 			if (conf->start_next_window + NEXT_NORMALIO_DISTANCE
94579ef3a8aSmajianpeng 			    <= bi_sector)
94679ef3a8aSmajianpeng 				conf->next_window_requests--;
94779ef3a8aSmajianpeng 			else
94879ef3a8aSmajianpeng 				conf->current_window_requests--;
94979ef3a8aSmajianpeng 		} else
95079ef3a8aSmajianpeng 			conf->current_window_requests--;
95179ef3a8aSmajianpeng 
95279ef3a8aSmajianpeng 		if (!conf->current_window_requests) {
95379ef3a8aSmajianpeng 			if (conf->next_window_requests) {
95479ef3a8aSmajianpeng 				conf->current_window_requests =
95579ef3a8aSmajianpeng 					conf->next_window_requests;
95679ef3a8aSmajianpeng 				conf->next_window_requests = 0;
95779ef3a8aSmajianpeng 				conf->start_next_window +=
95879ef3a8aSmajianpeng 					NEXT_NORMALIO_DISTANCE;
95979ef3a8aSmajianpeng 			} else
96079ef3a8aSmajianpeng 				conf->start_next_window = MaxSector;
96179ef3a8aSmajianpeng 		}
96279ef3a8aSmajianpeng 	}
96317999be4SNeilBrown 	spin_unlock_irqrestore(&conf->resync_lock, flags);
96417999be4SNeilBrown 	wake_up(&conf->wait_barrier);
96517999be4SNeilBrown }
96617999be4SNeilBrown 
967e2d59925SNeilBrown static void freeze_array(struct r1conf *conf, int extra)
968ddaf22abSNeilBrown {
969ddaf22abSNeilBrown 	/* stop syncio and normal IO and wait for everything to
970ddaf22abSNeilBrown 	 * go quite.
971b364e3d0Smajianpeng 	 * We wait until nr_pending match nr_queued+extra
9721c830532SNeilBrown 	 * This is called in the context of one normal IO request
9731c830532SNeilBrown 	 * that has failed. Thus any sync request that might be pending
9741c830532SNeilBrown 	 * will be blocked by nr_pending, and we need to wait for
9751c830532SNeilBrown 	 * pending IO requests to complete or be queued for re-try.
976e2d59925SNeilBrown 	 * Thus the number queued (nr_queued) plus this request (extra)
9771c830532SNeilBrown 	 * must match the number of pending IOs (nr_pending) before
9781c830532SNeilBrown 	 * we continue.
979ddaf22abSNeilBrown 	 */
980ddaf22abSNeilBrown 	spin_lock_irq(&conf->resync_lock);
981b364e3d0Smajianpeng 	conf->array_frozen = 1;
982eed8c02eSLukas Czerner 	wait_event_lock_irq_cmd(conf->wait_barrier,
983e2d59925SNeilBrown 				conf->nr_pending == conf->nr_queued+extra,
984ddaf22abSNeilBrown 				conf->resync_lock,
985c3b328acSNeilBrown 				flush_pending_writes(conf));
986ddaf22abSNeilBrown 	spin_unlock_irq(&conf->resync_lock);
987ddaf22abSNeilBrown }
988e8096360SNeilBrown static void unfreeze_array(struct r1conf *conf)
989ddaf22abSNeilBrown {
990ddaf22abSNeilBrown 	/* reverse the effect of the freeze */
991ddaf22abSNeilBrown 	spin_lock_irq(&conf->resync_lock);
992b364e3d0Smajianpeng 	conf->array_frozen = 0;
993ddaf22abSNeilBrown 	wake_up(&conf->wait_barrier);
994ddaf22abSNeilBrown 	spin_unlock_irq(&conf->resync_lock);
995ddaf22abSNeilBrown }
996ddaf22abSNeilBrown 
99717999be4SNeilBrown 
9984e78064fSNeilBrown /* duplicate the data pages for behind I/O
9994e78064fSNeilBrown  */
10009f2c9d12SNeilBrown static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
10014b6d287fSNeilBrown {
10024b6d287fSNeilBrown 	int i;
10034b6d287fSNeilBrown 	struct bio_vec *bvec;
10042ca68f5eSNeilBrown 	struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
10054b6d287fSNeilBrown 					GFP_NOIO);
10062ca68f5eSNeilBrown 	if (unlikely(!bvecs))
1007af6d7b76SNeilBrown 		return;
10084b6d287fSNeilBrown 
1009cb34e057SKent Overstreet 	bio_for_each_segment_all(bvec, bio, i) {
10102ca68f5eSNeilBrown 		bvecs[i] = *bvec;
10112ca68f5eSNeilBrown 		bvecs[i].bv_page = alloc_page(GFP_NOIO);
10122ca68f5eSNeilBrown 		if (unlikely(!bvecs[i].bv_page))
10134b6d287fSNeilBrown 			goto do_sync_io;
10142ca68f5eSNeilBrown 		memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
10154b6d287fSNeilBrown 		       kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
10162ca68f5eSNeilBrown 		kunmap(bvecs[i].bv_page);
10174b6d287fSNeilBrown 		kunmap(bvec->bv_page);
10184b6d287fSNeilBrown 	}
10192ca68f5eSNeilBrown 	r1_bio->behind_bvecs = bvecs;
1020af6d7b76SNeilBrown 	r1_bio->behind_page_count = bio->bi_vcnt;
1021af6d7b76SNeilBrown 	set_bit(R1BIO_BehindIO, &r1_bio->state);
1022af6d7b76SNeilBrown 	return;
10234b6d287fSNeilBrown 
10244b6d287fSNeilBrown do_sync_io:
1025af6d7b76SNeilBrown 	for (i = 0; i < bio->bi_vcnt; i++)
10262ca68f5eSNeilBrown 		if (bvecs[i].bv_page)
10272ca68f5eSNeilBrown 			put_page(bvecs[i].bv_page);
10282ca68f5eSNeilBrown 	kfree(bvecs);
10294f024f37SKent Overstreet 	pr_debug("%dB behind alloc failed, doing sync I/O\n",
10304f024f37SKent Overstreet 		 bio->bi_iter.bi_size);
10314b6d287fSNeilBrown }
10324b6d287fSNeilBrown 
1033f54a9d0eSNeilBrown struct raid1_plug_cb {
1034f54a9d0eSNeilBrown 	struct blk_plug_cb	cb;
1035f54a9d0eSNeilBrown 	struct bio_list		pending;
1036f54a9d0eSNeilBrown 	int			pending_cnt;
1037f54a9d0eSNeilBrown };
1038f54a9d0eSNeilBrown 
1039f54a9d0eSNeilBrown static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1040f54a9d0eSNeilBrown {
1041f54a9d0eSNeilBrown 	struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1042f54a9d0eSNeilBrown 						  cb);
1043f54a9d0eSNeilBrown 	struct mddev *mddev = plug->cb.data;
1044f54a9d0eSNeilBrown 	struct r1conf *conf = mddev->private;
1045f54a9d0eSNeilBrown 	struct bio *bio;
1046f54a9d0eSNeilBrown 
1047874807a8SNeilBrown 	if (from_schedule || current->bio_list) {
1048f54a9d0eSNeilBrown 		spin_lock_irq(&conf->device_lock);
1049f54a9d0eSNeilBrown 		bio_list_merge(&conf->pending_bio_list, &plug->pending);
1050f54a9d0eSNeilBrown 		conf->pending_count += plug->pending_cnt;
1051f54a9d0eSNeilBrown 		spin_unlock_irq(&conf->device_lock);
1052ee0b0244SNeilBrown 		wake_up(&conf->wait_barrier);
1053f54a9d0eSNeilBrown 		md_wakeup_thread(mddev->thread);
1054f54a9d0eSNeilBrown 		kfree(plug);
1055f54a9d0eSNeilBrown 		return;
1056f54a9d0eSNeilBrown 	}
1057f54a9d0eSNeilBrown 
1058f54a9d0eSNeilBrown 	/* we aren't scheduling, so we can do the write-out directly. */
1059f54a9d0eSNeilBrown 	bio = bio_list_get(&plug->pending);
1060f54a9d0eSNeilBrown 	bitmap_unplug(mddev->bitmap);
1061f54a9d0eSNeilBrown 	wake_up(&conf->wait_barrier);
1062f54a9d0eSNeilBrown 
1063f54a9d0eSNeilBrown 	while (bio) { /* submit pending writes */
1064f54a9d0eSNeilBrown 		struct bio *next = bio->bi_next;
1065f54a9d0eSNeilBrown 		bio->bi_next = NULL;
106632f9f570SShaohua Li 		if (unlikely((bio->bi_rw & REQ_DISCARD) &&
106732f9f570SShaohua Li 		    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
106832f9f570SShaohua Li 			/* Just ignore it */
106932f9f570SShaohua Li 			bio_endio(bio, 0);
107032f9f570SShaohua Li 		else
1071f54a9d0eSNeilBrown 			generic_make_request(bio);
1072f54a9d0eSNeilBrown 		bio = next;
1073f54a9d0eSNeilBrown 	}
1074f54a9d0eSNeilBrown 	kfree(plug);
1075f54a9d0eSNeilBrown }
1076f54a9d0eSNeilBrown 
1077b4fdcb02SLinus Torvalds static void make_request(struct mddev *mddev, struct bio * bio)
10781da177e4SLinus Torvalds {
1079e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
10800eaf822cSJonathan Brassow 	struct raid1_info *mirror;
10819f2c9d12SNeilBrown 	struct r1bio *r1_bio;
10821da177e4SLinus Torvalds 	struct bio *read_bio;
10831f68f0c4SNeilBrown 	int i, disks;
108484255d10SNeilBrown 	struct bitmap *bitmap;
1085191ea9b2SNeilBrown 	unsigned long flags;
1086a362357bSJens Axboe 	const int rw = bio_data_dir(bio);
10872c7d46ecSNeilBrown 	const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
1088e9c7469bSTejun Heo 	const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
10892ff8cc2cSShaohua Li 	const unsigned long do_discard = (bio->bi_rw
10902ff8cc2cSShaohua Li 					  & (REQ_DISCARD | REQ_SECURE));
1091c8dc9c65SJoe Lawrence 	const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
10923cb03002SNeilBrown 	struct md_rdev *blocked_rdev;
1093f54a9d0eSNeilBrown 	struct blk_plug_cb *cb;
1094f54a9d0eSNeilBrown 	struct raid1_plug_cb *plug = NULL;
10951f68f0c4SNeilBrown 	int first_clone;
10961f68f0c4SNeilBrown 	int sectors_handled;
10971f68f0c4SNeilBrown 	int max_sectors;
109879ef3a8aSmajianpeng 	sector_t start_next_window;
1099191ea9b2SNeilBrown 
11001da177e4SLinus Torvalds 	/*
11011da177e4SLinus Torvalds 	 * Register the new request and wait if the reconstruction
11021da177e4SLinus Torvalds 	 * thread has put up a bar for new requests.
11031da177e4SLinus Torvalds 	 * Continue immediately if no resync is active currently.
11041da177e4SLinus Torvalds 	 */
110562de608dSNeilBrown 
11063d310eb7SNeilBrown 	md_write_start(mddev, bio); /* wait on superblock update early */
11073d310eb7SNeilBrown 
11086eef4b21SNeilBrown 	if (bio_data_dir(bio) == WRITE &&
1109f73a1c7dSKent Overstreet 	    bio_end_sector(bio) > mddev->suspend_lo &&
11104f024f37SKent Overstreet 	    bio->bi_iter.bi_sector < mddev->suspend_hi) {
11116eef4b21SNeilBrown 		/* As the suspend_* range is controlled by
11126eef4b21SNeilBrown 		 * userspace, we want an interruptible
11136eef4b21SNeilBrown 		 * wait.
11146eef4b21SNeilBrown 		 */
11156eef4b21SNeilBrown 		DEFINE_WAIT(w);
11166eef4b21SNeilBrown 		for (;;) {
11176eef4b21SNeilBrown 			flush_signals(current);
11186eef4b21SNeilBrown 			prepare_to_wait(&conf->wait_barrier,
11196eef4b21SNeilBrown 					&w, TASK_INTERRUPTIBLE);
1120f73a1c7dSKent Overstreet 			if (bio_end_sector(bio) <= mddev->suspend_lo ||
11214f024f37SKent Overstreet 			    bio->bi_iter.bi_sector >= mddev->suspend_hi)
11226eef4b21SNeilBrown 				break;
11236eef4b21SNeilBrown 			schedule();
11246eef4b21SNeilBrown 		}
11256eef4b21SNeilBrown 		finish_wait(&conf->wait_barrier, &w);
11266eef4b21SNeilBrown 	}
112762de608dSNeilBrown 
112879ef3a8aSmajianpeng 	start_next_window = wait_barrier(conf, bio);
11291da177e4SLinus Torvalds 
113084255d10SNeilBrown 	bitmap = mddev->bitmap;
113184255d10SNeilBrown 
11321da177e4SLinus Torvalds 	/*
11331da177e4SLinus Torvalds 	 * make_request() can abort the operation when READA is being
11341da177e4SLinus Torvalds 	 * used and no empty request is available.
11351da177e4SLinus Torvalds 	 *
11361da177e4SLinus Torvalds 	 */
11371da177e4SLinus Torvalds 	r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
11381da177e4SLinus Torvalds 
11391da177e4SLinus Torvalds 	r1_bio->master_bio = bio;
1140aa8b57aaSKent Overstreet 	r1_bio->sectors = bio_sectors(bio);
1141191ea9b2SNeilBrown 	r1_bio->state = 0;
11421da177e4SLinus Torvalds 	r1_bio->mddev = mddev;
11434f024f37SKent Overstreet 	r1_bio->sector = bio->bi_iter.bi_sector;
11441da177e4SLinus Torvalds 
1145d2eb35acSNeilBrown 	/* We might need to issue multiple reads to different
1146d2eb35acSNeilBrown 	 * devices if there are bad blocks around, so we keep
1147d2eb35acSNeilBrown 	 * track of the number of reads in bio->bi_phys_segments.
1148d2eb35acSNeilBrown 	 * If this is 0, there is only one r1_bio and no locking
1149d2eb35acSNeilBrown 	 * will be needed when requests complete.  If it is
1150d2eb35acSNeilBrown 	 * non-zero, then it is the number of not-completed requests.
1151d2eb35acSNeilBrown 	 */
1152d2eb35acSNeilBrown 	bio->bi_phys_segments = 0;
1153d2eb35acSNeilBrown 	clear_bit(BIO_SEG_VALID, &bio->bi_flags);
1154d2eb35acSNeilBrown 
1155a362357bSJens Axboe 	if (rw == READ) {
11561da177e4SLinus Torvalds 		/*
11571da177e4SLinus Torvalds 		 * read balancing logic:
11581da177e4SLinus Torvalds 		 */
1159d2eb35acSNeilBrown 		int rdisk;
1160d2eb35acSNeilBrown 
1161d2eb35acSNeilBrown read_again:
1162d2eb35acSNeilBrown 		rdisk = read_balance(conf, r1_bio, &max_sectors);
11631da177e4SLinus Torvalds 
11641da177e4SLinus Torvalds 		if (rdisk < 0) {
11651da177e4SLinus Torvalds 			/* couldn't find anywhere to read from */
11661da177e4SLinus Torvalds 			raid_end_bio_io(r1_bio);
11675a7bbad2SChristoph Hellwig 			return;
11681da177e4SLinus Torvalds 		}
11691da177e4SLinus Torvalds 		mirror = conf->mirrors + rdisk;
11701da177e4SLinus Torvalds 
1171e555190dSNeilBrown 		if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1172e555190dSNeilBrown 		    bitmap) {
1173e555190dSNeilBrown 			/* Reading from a write-mostly device must
1174e555190dSNeilBrown 			 * take care not to over-take any writes
1175e555190dSNeilBrown 			 * that are 'behind'
1176e555190dSNeilBrown 			 */
1177e555190dSNeilBrown 			wait_event(bitmap->behind_wait,
1178e555190dSNeilBrown 				   atomic_read(&bitmap->behind_writes) == 0);
1179e555190dSNeilBrown 		}
11801da177e4SLinus Torvalds 		r1_bio->read_disk = rdisk;
11811da177e4SLinus Torvalds 
1182a167f663SNeilBrown 		read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
11834f024f37SKent Overstreet 		bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
1184d2eb35acSNeilBrown 			 max_sectors);
11851da177e4SLinus Torvalds 
11861da177e4SLinus Torvalds 		r1_bio->bios[rdisk] = read_bio;
11871da177e4SLinus Torvalds 
11884f024f37SKent Overstreet 		read_bio->bi_iter.bi_sector = r1_bio->sector +
11894f024f37SKent Overstreet 			mirror->rdev->data_offset;
11901da177e4SLinus Torvalds 		read_bio->bi_bdev = mirror->rdev->bdev;
11911da177e4SLinus Torvalds 		read_bio->bi_end_io = raid1_end_read_request;
11927b6d91daSChristoph Hellwig 		read_bio->bi_rw = READ | do_sync;
11931da177e4SLinus Torvalds 		read_bio->bi_private = r1_bio;
11941da177e4SLinus Torvalds 
1195d2eb35acSNeilBrown 		if (max_sectors < r1_bio->sectors) {
1196d2eb35acSNeilBrown 			/* could not read all from this device, so we will
1197d2eb35acSNeilBrown 			 * need another r1_bio.
1198d2eb35acSNeilBrown 			 */
1199d2eb35acSNeilBrown 
1200d2eb35acSNeilBrown 			sectors_handled = (r1_bio->sector + max_sectors
12014f024f37SKent Overstreet 					   - bio->bi_iter.bi_sector);
1202d2eb35acSNeilBrown 			r1_bio->sectors = max_sectors;
1203d2eb35acSNeilBrown 			spin_lock_irq(&conf->device_lock);
1204d2eb35acSNeilBrown 			if (bio->bi_phys_segments == 0)
1205d2eb35acSNeilBrown 				bio->bi_phys_segments = 2;
1206d2eb35acSNeilBrown 			else
1207d2eb35acSNeilBrown 				bio->bi_phys_segments++;
1208d2eb35acSNeilBrown 			spin_unlock_irq(&conf->device_lock);
1209d2eb35acSNeilBrown 			/* Cannot call generic_make_request directly
1210d2eb35acSNeilBrown 			 * as that will be queued in __make_request
1211d2eb35acSNeilBrown 			 * and subsequent mempool_alloc might block waiting
1212d2eb35acSNeilBrown 			 * for it.  So hand bio over to raid1d.
1213d2eb35acSNeilBrown 			 */
1214d2eb35acSNeilBrown 			reschedule_retry(r1_bio);
1215d2eb35acSNeilBrown 
1216d2eb35acSNeilBrown 			r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1217d2eb35acSNeilBrown 
1218d2eb35acSNeilBrown 			r1_bio->master_bio = bio;
1219aa8b57aaSKent Overstreet 			r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1220d2eb35acSNeilBrown 			r1_bio->state = 0;
1221d2eb35acSNeilBrown 			r1_bio->mddev = mddev;
12224f024f37SKent Overstreet 			r1_bio->sector = bio->bi_iter.bi_sector +
12234f024f37SKent Overstreet 				sectors_handled;
1224d2eb35acSNeilBrown 			goto read_again;
1225d2eb35acSNeilBrown 		} else
12261da177e4SLinus Torvalds 			generic_make_request(read_bio);
12275a7bbad2SChristoph Hellwig 		return;
12281da177e4SLinus Torvalds 	}
12291da177e4SLinus Torvalds 
12301da177e4SLinus Torvalds 	/*
12311da177e4SLinus Torvalds 	 * WRITE:
12321da177e4SLinus Torvalds 	 */
123334db0cd6SNeilBrown 	if (conf->pending_count >= max_queued_requests) {
123434db0cd6SNeilBrown 		md_wakeup_thread(mddev->thread);
123534db0cd6SNeilBrown 		wait_event(conf->wait_barrier,
123634db0cd6SNeilBrown 			   conf->pending_count < max_queued_requests);
123734db0cd6SNeilBrown 	}
12381f68f0c4SNeilBrown 	/* first select target devices under rcu_lock and
12391da177e4SLinus Torvalds 	 * inc refcount on their rdev.  Record them by setting
12401da177e4SLinus Torvalds 	 * bios[x] to bio
12411f68f0c4SNeilBrown 	 * If there are known/acknowledged bad blocks on any device on
12421f68f0c4SNeilBrown 	 * which we have seen a write error, we want to avoid writing those
12431f68f0c4SNeilBrown 	 * blocks.
12441f68f0c4SNeilBrown 	 * This potentially requires several writes to write around
12451f68f0c4SNeilBrown 	 * the bad blocks.  Each set of writes gets it's own r1bio
12461f68f0c4SNeilBrown 	 * with a set of bios attached.
12471da177e4SLinus Torvalds 	 */
1248c3b328acSNeilBrown 
12498f19ccb2SNeilBrown 	disks = conf->raid_disks * 2;
12506bfe0b49SDan Williams  retry_write:
125179ef3a8aSmajianpeng 	r1_bio->start_next_window = start_next_window;
12526bfe0b49SDan Williams 	blocked_rdev = NULL;
12531da177e4SLinus Torvalds 	rcu_read_lock();
12541f68f0c4SNeilBrown 	max_sectors = r1_bio->sectors;
12551da177e4SLinus Torvalds 	for (i = 0;  i < disks; i++) {
12563cb03002SNeilBrown 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
12576bfe0b49SDan Williams 		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
12586bfe0b49SDan Williams 			atomic_inc(&rdev->nr_pending);
12596bfe0b49SDan Williams 			blocked_rdev = rdev;
12606bfe0b49SDan Williams 			break;
12616bfe0b49SDan Williams 		}
12621da177e4SLinus Torvalds 		r1_bio->bios[i] = NULL;
12636b740b8dSNeilBrown 		if (!rdev || test_bit(Faulty, &rdev->flags)
12646b740b8dSNeilBrown 		    || test_bit(Unmerged, &rdev->flags)) {
12658f19ccb2SNeilBrown 			if (i < conf->raid_disks)
12661f68f0c4SNeilBrown 				set_bit(R1BIO_Degraded, &r1_bio->state);
12671f68f0c4SNeilBrown 			continue;
1268964147d5SNeilBrown 		}
12691f68f0c4SNeilBrown 
12701f68f0c4SNeilBrown 		atomic_inc(&rdev->nr_pending);
12711f68f0c4SNeilBrown 		if (test_bit(WriteErrorSeen, &rdev->flags)) {
12721f68f0c4SNeilBrown 			sector_t first_bad;
12731f68f0c4SNeilBrown 			int bad_sectors;
12741f68f0c4SNeilBrown 			int is_bad;
12751f68f0c4SNeilBrown 
12761f68f0c4SNeilBrown 			is_bad = is_badblock(rdev, r1_bio->sector,
12771f68f0c4SNeilBrown 					     max_sectors,
12781f68f0c4SNeilBrown 					     &first_bad, &bad_sectors);
12791f68f0c4SNeilBrown 			if (is_bad < 0) {
12801f68f0c4SNeilBrown 				/* mustn't write here until the bad block is
12811f68f0c4SNeilBrown 				 * acknowledged*/
12821f68f0c4SNeilBrown 				set_bit(BlockedBadBlocks, &rdev->flags);
12831f68f0c4SNeilBrown 				blocked_rdev = rdev;
12841f68f0c4SNeilBrown 				break;
12851f68f0c4SNeilBrown 			}
12861f68f0c4SNeilBrown 			if (is_bad && first_bad <= r1_bio->sector) {
12871f68f0c4SNeilBrown 				/* Cannot write here at all */
12881f68f0c4SNeilBrown 				bad_sectors -= (r1_bio->sector - first_bad);
12891f68f0c4SNeilBrown 				if (bad_sectors < max_sectors)
12901f68f0c4SNeilBrown 					/* mustn't write more than bad_sectors
12911f68f0c4SNeilBrown 					 * to other devices yet
12921f68f0c4SNeilBrown 					 */
12931f68f0c4SNeilBrown 					max_sectors = bad_sectors;
12941f68f0c4SNeilBrown 				rdev_dec_pending(rdev, mddev);
12951f68f0c4SNeilBrown 				/* We don't set R1BIO_Degraded as that
12961f68f0c4SNeilBrown 				 * only applies if the disk is
12971f68f0c4SNeilBrown 				 * missing, so it might be re-added,
12981f68f0c4SNeilBrown 				 * and we want to know to recover this
12991f68f0c4SNeilBrown 				 * chunk.
13001f68f0c4SNeilBrown 				 * In this case the device is here,
13011f68f0c4SNeilBrown 				 * and the fact that this chunk is not
13021f68f0c4SNeilBrown 				 * in-sync is recorded in the bad
13031f68f0c4SNeilBrown 				 * block log
13041f68f0c4SNeilBrown 				 */
13051f68f0c4SNeilBrown 				continue;
13061f68f0c4SNeilBrown 			}
13071f68f0c4SNeilBrown 			if (is_bad) {
13081f68f0c4SNeilBrown 				int good_sectors = first_bad - r1_bio->sector;
13091f68f0c4SNeilBrown 				if (good_sectors < max_sectors)
13101f68f0c4SNeilBrown 					max_sectors = good_sectors;
13111f68f0c4SNeilBrown 			}
13121f68f0c4SNeilBrown 		}
13131f68f0c4SNeilBrown 		r1_bio->bios[i] = bio;
13141da177e4SLinus Torvalds 	}
13151da177e4SLinus Torvalds 	rcu_read_unlock();
13161da177e4SLinus Torvalds 
13176bfe0b49SDan Williams 	if (unlikely(blocked_rdev)) {
13186bfe0b49SDan Williams 		/* Wait for this device to become unblocked */
13196bfe0b49SDan Williams 		int j;
132079ef3a8aSmajianpeng 		sector_t old = start_next_window;
13216bfe0b49SDan Williams 
13226bfe0b49SDan Williams 		for (j = 0; j < i; j++)
13236bfe0b49SDan Williams 			if (r1_bio->bios[j])
13246bfe0b49SDan Williams 				rdev_dec_pending(conf->mirrors[j].rdev, mddev);
13251f68f0c4SNeilBrown 		r1_bio->state = 0;
13264f024f37SKent Overstreet 		allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
13276bfe0b49SDan Williams 		md_wait_for_blocked_rdev(blocked_rdev, mddev);
132879ef3a8aSmajianpeng 		start_next_window = wait_barrier(conf, bio);
132979ef3a8aSmajianpeng 		/*
133079ef3a8aSmajianpeng 		 * We must make sure the multi r1bios of bio have
133179ef3a8aSmajianpeng 		 * the same value of bi_phys_segments
133279ef3a8aSmajianpeng 		 */
133379ef3a8aSmajianpeng 		if (bio->bi_phys_segments && old &&
133479ef3a8aSmajianpeng 		    old != start_next_window)
133579ef3a8aSmajianpeng 			/* Wait for the former r1bio(s) to complete */
133679ef3a8aSmajianpeng 			wait_event(conf->wait_barrier,
133779ef3a8aSmajianpeng 				   bio->bi_phys_segments == 1);
13386bfe0b49SDan Williams 		goto retry_write;
13396bfe0b49SDan Williams 	}
13406bfe0b49SDan Williams 
13411f68f0c4SNeilBrown 	if (max_sectors < r1_bio->sectors) {
13421f68f0c4SNeilBrown 		/* We are splitting this write into multiple parts, so
13431f68f0c4SNeilBrown 		 * we need to prepare for allocating another r1_bio.
13441f68f0c4SNeilBrown 		 */
13451f68f0c4SNeilBrown 		r1_bio->sectors = max_sectors;
13461f68f0c4SNeilBrown 		spin_lock_irq(&conf->device_lock);
13471f68f0c4SNeilBrown 		if (bio->bi_phys_segments == 0)
13481f68f0c4SNeilBrown 			bio->bi_phys_segments = 2;
13491f68f0c4SNeilBrown 		else
13501f68f0c4SNeilBrown 			bio->bi_phys_segments++;
13511f68f0c4SNeilBrown 		spin_unlock_irq(&conf->device_lock);
1352191ea9b2SNeilBrown 	}
13534f024f37SKent Overstreet 	sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
13544b6d287fSNeilBrown 
13554e78064fSNeilBrown 	atomic_set(&r1_bio->remaining, 1);
13564b6d287fSNeilBrown 	atomic_set(&r1_bio->behind_remaining, 0);
1357191ea9b2SNeilBrown 
13581f68f0c4SNeilBrown 	first_clone = 1;
13591da177e4SLinus Torvalds 	for (i = 0; i < disks; i++) {
13601da177e4SLinus Torvalds 		struct bio *mbio;
13611da177e4SLinus Torvalds 		if (!r1_bio->bios[i])
13621da177e4SLinus Torvalds 			continue;
13631da177e4SLinus Torvalds 
1364a167f663SNeilBrown 		mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
13654f024f37SKent Overstreet 		bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
13661da177e4SLinus Torvalds 
13671f68f0c4SNeilBrown 		if (first_clone) {
13681f68f0c4SNeilBrown 			/* do behind I/O ?
13691f68f0c4SNeilBrown 			 * Not if there are too many, or cannot
13701f68f0c4SNeilBrown 			 * allocate memory, or a reader on WriteMostly
13711f68f0c4SNeilBrown 			 * is waiting for behind writes to flush */
13721f68f0c4SNeilBrown 			if (bitmap &&
13731f68f0c4SNeilBrown 			    (atomic_read(&bitmap->behind_writes)
13741f68f0c4SNeilBrown 			     < mddev->bitmap_info.max_write_behind) &&
13751f68f0c4SNeilBrown 			    !waitqueue_active(&bitmap->behind_wait))
13761f68f0c4SNeilBrown 				alloc_behind_pages(mbio, r1_bio);
13771da177e4SLinus Torvalds 
13781f68f0c4SNeilBrown 			bitmap_startwrite(bitmap, r1_bio->sector,
13791f68f0c4SNeilBrown 					  r1_bio->sectors,
13801f68f0c4SNeilBrown 					  test_bit(R1BIO_BehindIO,
13811f68f0c4SNeilBrown 						   &r1_bio->state));
13821f68f0c4SNeilBrown 			first_clone = 0;
13831f68f0c4SNeilBrown 		}
13842ca68f5eSNeilBrown 		if (r1_bio->behind_bvecs) {
13854b6d287fSNeilBrown 			struct bio_vec *bvec;
13864b6d287fSNeilBrown 			int j;
13874b6d287fSNeilBrown 
1388cb34e057SKent Overstreet 			/*
1389cb34e057SKent Overstreet 			 * We trimmed the bio, so _all is legit
13904b6d287fSNeilBrown 			 */
1391d74c6d51SKent Overstreet 			bio_for_each_segment_all(bvec, mbio, j)
13922ca68f5eSNeilBrown 				bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
13934b6d287fSNeilBrown 			if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
13944b6d287fSNeilBrown 				atomic_inc(&r1_bio->behind_remaining);
13954b6d287fSNeilBrown 		}
13964b6d287fSNeilBrown 
13971f68f0c4SNeilBrown 		r1_bio->bios[i] = mbio;
13981f68f0c4SNeilBrown 
13994f024f37SKent Overstreet 		mbio->bi_iter.bi_sector	= (r1_bio->sector +
14001f68f0c4SNeilBrown 				   conf->mirrors[i].rdev->data_offset);
14011f68f0c4SNeilBrown 		mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
14021f68f0c4SNeilBrown 		mbio->bi_end_io	= raid1_end_write_request;
1403c8dc9c65SJoe Lawrence 		mbio->bi_rw =
1404c8dc9c65SJoe Lawrence 			WRITE | do_flush_fua | do_sync | do_discard | do_same;
14051f68f0c4SNeilBrown 		mbio->bi_private = r1_bio;
14061f68f0c4SNeilBrown 
14071da177e4SLinus Torvalds 		atomic_inc(&r1_bio->remaining);
1408f54a9d0eSNeilBrown 
1409f54a9d0eSNeilBrown 		cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1410f54a9d0eSNeilBrown 		if (cb)
1411f54a9d0eSNeilBrown 			plug = container_of(cb, struct raid1_plug_cb, cb);
1412f54a9d0eSNeilBrown 		else
1413f54a9d0eSNeilBrown 			plug = NULL;
1414191ea9b2SNeilBrown 		spin_lock_irqsave(&conf->device_lock, flags);
1415f54a9d0eSNeilBrown 		if (plug) {
1416f54a9d0eSNeilBrown 			bio_list_add(&plug->pending, mbio);
1417f54a9d0eSNeilBrown 			plug->pending_cnt++;
1418f54a9d0eSNeilBrown 		} else {
14194e78064fSNeilBrown 			bio_list_add(&conf->pending_bio_list, mbio);
142034db0cd6SNeilBrown 			conf->pending_count++;
1421f54a9d0eSNeilBrown 		}
1422191ea9b2SNeilBrown 		spin_unlock_irqrestore(&conf->device_lock, flags);
1423f54a9d0eSNeilBrown 		if (!plug)
1424b357f04aSNeilBrown 			md_wakeup_thread(mddev->thread);
14254e78064fSNeilBrown 	}
1426079fa166SNeilBrown 	/* Mustn't call r1_bio_write_done before this next test,
1427079fa166SNeilBrown 	 * as it could result in the bio being freed.
1428079fa166SNeilBrown 	 */
1429aa8b57aaSKent Overstreet 	if (sectors_handled < bio_sectors(bio)) {
1430079fa166SNeilBrown 		r1_bio_write_done(r1_bio);
14311f68f0c4SNeilBrown 		/* We need another r1_bio.  It has already been counted
14321f68f0c4SNeilBrown 		 * in bio->bi_phys_segments
14331f68f0c4SNeilBrown 		 */
14341f68f0c4SNeilBrown 		r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
14351f68f0c4SNeilBrown 		r1_bio->master_bio = bio;
1436aa8b57aaSKent Overstreet 		r1_bio->sectors = bio_sectors(bio) - sectors_handled;
14371f68f0c4SNeilBrown 		r1_bio->state = 0;
14381f68f0c4SNeilBrown 		r1_bio->mddev = mddev;
14394f024f37SKent Overstreet 		r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
14401f68f0c4SNeilBrown 		goto retry_write;
14411f68f0c4SNeilBrown 	}
14421f68f0c4SNeilBrown 
1443079fa166SNeilBrown 	r1_bio_write_done(r1_bio);
1444079fa166SNeilBrown 
1445079fa166SNeilBrown 	/* In case raid1d snuck in to freeze_array */
1446079fa166SNeilBrown 	wake_up(&conf->wait_barrier);
14471da177e4SLinus Torvalds }
14481da177e4SLinus Torvalds 
1449fd01b88cSNeilBrown static void status(struct seq_file *seq, struct mddev *mddev)
14501da177e4SLinus Torvalds {
1451e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
14521da177e4SLinus Torvalds 	int i;
14531da177e4SLinus Torvalds 
14541da177e4SLinus Torvalds 	seq_printf(seq, " [%d/%d] [", conf->raid_disks,
145511ce99e6SNeilBrown 		   conf->raid_disks - mddev->degraded);
1456ddac7c7eSNeilBrown 	rcu_read_lock();
1457ddac7c7eSNeilBrown 	for (i = 0; i < conf->raid_disks; i++) {
14583cb03002SNeilBrown 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
14591da177e4SLinus Torvalds 		seq_printf(seq, "%s",
1460ddac7c7eSNeilBrown 			   rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1461ddac7c7eSNeilBrown 	}
1462ddac7c7eSNeilBrown 	rcu_read_unlock();
14631da177e4SLinus Torvalds 	seq_printf(seq, "]");
14641da177e4SLinus Torvalds }
14651da177e4SLinus Torvalds 
14661da177e4SLinus Torvalds 
1467fd01b88cSNeilBrown static void error(struct mddev *mddev, struct md_rdev *rdev)
14681da177e4SLinus Torvalds {
14691da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1470e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
14711da177e4SLinus Torvalds 
14721da177e4SLinus Torvalds 	/*
14731da177e4SLinus Torvalds 	 * If it is not operational, then we have already marked it as dead
14741da177e4SLinus Torvalds 	 * else if it is the last working disks, ignore the error, let the
14751da177e4SLinus Torvalds 	 * next level up know.
14761da177e4SLinus Torvalds 	 * else mark the drive as failed
14771da177e4SLinus Torvalds 	 */
1478b2d444d7SNeilBrown 	if (test_bit(In_sync, &rdev->flags)
14794044ba58SNeilBrown 	    && (conf->raid_disks - mddev->degraded) == 1) {
14801da177e4SLinus Torvalds 		/*
14811da177e4SLinus Torvalds 		 * Don't fail the drive, act as though we were just a
14824044ba58SNeilBrown 		 * normal single drive.
14834044ba58SNeilBrown 		 * However don't try a recovery from this drive as
14844044ba58SNeilBrown 		 * it is very likely to fail.
14851da177e4SLinus Torvalds 		 */
14865389042fSNeilBrown 		conf->recovery_disabled = mddev->recovery_disabled;
14871da177e4SLinus Torvalds 		return;
14884044ba58SNeilBrown 	}
1489de393cdeSNeilBrown 	set_bit(Blocked, &rdev->flags);
1490c04be0aaSNeilBrown 	if (test_and_clear_bit(In_sync, &rdev->flags)) {
1491c04be0aaSNeilBrown 		unsigned long flags;
1492c04be0aaSNeilBrown 		spin_lock_irqsave(&conf->device_lock, flags);
14931da177e4SLinus Torvalds 		mddev->degraded++;
1494dd00a99eSNeilBrown 		set_bit(Faulty, &rdev->flags);
1495c04be0aaSNeilBrown 		spin_unlock_irqrestore(&conf->device_lock, flags);
14961da177e4SLinus Torvalds 		/*
14971da177e4SLinus Torvalds 		 * if recovery is running, make sure it aborts.
14981da177e4SLinus Torvalds 		 */
1499dfc70645SNeilBrown 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1500dd00a99eSNeilBrown 	} else
1501b2d444d7SNeilBrown 		set_bit(Faulty, &rdev->flags);
1502850b2b42SNeilBrown 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
1503067032bcSJoe Perches 	printk(KERN_ALERT
1504067032bcSJoe Perches 	       "md/raid1:%s: Disk failure on %s, disabling device.\n"
1505067032bcSJoe Perches 	       "md/raid1:%s: Operation continuing on %d devices.\n",
15069dd1e2faSNeilBrown 	       mdname(mddev), bdevname(rdev->bdev, b),
15079dd1e2faSNeilBrown 	       mdname(mddev), conf->raid_disks - mddev->degraded);
15081da177e4SLinus Torvalds }
15091da177e4SLinus Torvalds 
1510e8096360SNeilBrown static void print_conf(struct r1conf *conf)
15111da177e4SLinus Torvalds {
15121da177e4SLinus Torvalds 	int i;
15131da177e4SLinus Torvalds 
15149dd1e2faSNeilBrown 	printk(KERN_DEBUG "RAID1 conf printout:\n");
15151da177e4SLinus Torvalds 	if (!conf) {
15169dd1e2faSNeilBrown 		printk(KERN_DEBUG "(!conf)\n");
15171da177e4SLinus Torvalds 		return;
15181da177e4SLinus Torvalds 	}
15199dd1e2faSNeilBrown 	printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
15201da177e4SLinus Torvalds 		conf->raid_disks);
15211da177e4SLinus Torvalds 
1522ddac7c7eSNeilBrown 	rcu_read_lock();
15231da177e4SLinus Torvalds 	for (i = 0; i < conf->raid_disks; i++) {
15241da177e4SLinus Torvalds 		char b[BDEVNAME_SIZE];
15253cb03002SNeilBrown 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1526ddac7c7eSNeilBrown 		if (rdev)
15279dd1e2faSNeilBrown 			printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1528ddac7c7eSNeilBrown 			       i, !test_bit(In_sync, &rdev->flags),
1529ddac7c7eSNeilBrown 			       !test_bit(Faulty, &rdev->flags),
1530ddac7c7eSNeilBrown 			       bdevname(rdev->bdev,b));
15311da177e4SLinus Torvalds 	}
1532ddac7c7eSNeilBrown 	rcu_read_unlock();
15331da177e4SLinus Torvalds }
15341da177e4SLinus Torvalds 
1535e8096360SNeilBrown static void close_sync(struct r1conf *conf)
15361da177e4SLinus Torvalds {
153779ef3a8aSmajianpeng 	wait_barrier(conf, NULL);
153879ef3a8aSmajianpeng 	allow_barrier(conf, 0, 0);
15391da177e4SLinus Torvalds 
15401da177e4SLinus Torvalds 	mempool_destroy(conf->r1buf_pool);
15411da177e4SLinus Torvalds 	conf->r1buf_pool = NULL;
154279ef3a8aSmajianpeng 
154379ef3a8aSmajianpeng 	conf->next_resync = 0;
154479ef3a8aSmajianpeng 	conf->start_next_window = MaxSector;
15451da177e4SLinus Torvalds }
15461da177e4SLinus Torvalds 
1547fd01b88cSNeilBrown static int raid1_spare_active(struct mddev *mddev)
15481da177e4SLinus Torvalds {
15491da177e4SLinus Torvalds 	int i;
1550e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
15516b965620SNeilBrown 	int count = 0;
15526b965620SNeilBrown 	unsigned long flags;
15531da177e4SLinus Torvalds 
15541da177e4SLinus Torvalds 	/*
15551da177e4SLinus Torvalds 	 * Find all failed disks within the RAID1 configuration
1556ddac7c7eSNeilBrown 	 * and mark them readable.
1557ddac7c7eSNeilBrown 	 * Called under mddev lock, so rcu protection not needed.
15581da177e4SLinus Torvalds 	 */
15591da177e4SLinus Torvalds 	for (i = 0; i < conf->raid_disks; i++) {
15603cb03002SNeilBrown 		struct md_rdev *rdev = conf->mirrors[i].rdev;
15618c7a2c2bSNeilBrown 		struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
15628c7a2c2bSNeilBrown 		if (repl
15638c7a2c2bSNeilBrown 		    && repl->recovery_offset == MaxSector
15648c7a2c2bSNeilBrown 		    && !test_bit(Faulty, &repl->flags)
15658c7a2c2bSNeilBrown 		    && !test_and_set_bit(In_sync, &repl->flags)) {
15668c7a2c2bSNeilBrown 			/* replacement has just become active */
15678c7a2c2bSNeilBrown 			if (!rdev ||
15688c7a2c2bSNeilBrown 			    !test_and_clear_bit(In_sync, &rdev->flags))
15698c7a2c2bSNeilBrown 				count++;
15708c7a2c2bSNeilBrown 			if (rdev) {
15718c7a2c2bSNeilBrown 				/* Replaced device not technically
15728c7a2c2bSNeilBrown 				 * faulty, but we need to be sure
15738c7a2c2bSNeilBrown 				 * it gets removed and never re-added
15748c7a2c2bSNeilBrown 				 */
15758c7a2c2bSNeilBrown 				set_bit(Faulty, &rdev->flags);
15768c7a2c2bSNeilBrown 				sysfs_notify_dirent_safe(
15778c7a2c2bSNeilBrown 					rdev->sysfs_state);
15788c7a2c2bSNeilBrown 			}
15798c7a2c2bSNeilBrown 		}
1580ddac7c7eSNeilBrown 		if (rdev
158161e4947cSLukasz Dorau 		    && rdev->recovery_offset == MaxSector
1582ddac7c7eSNeilBrown 		    && !test_bit(Faulty, &rdev->flags)
1583c04be0aaSNeilBrown 		    && !test_and_set_bit(In_sync, &rdev->flags)) {
15846b965620SNeilBrown 			count++;
1585654e8b5aSJonathan Brassow 			sysfs_notify_dirent_safe(rdev->sysfs_state);
15861da177e4SLinus Torvalds 		}
15871da177e4SLinus Torvalds 	}
15886b965620SNeilBrown 	spin_lock_irqsave(&conf->device_lock, flags);
15896b965620SNeilBrown 	mddev->degraded -= count;
15906b965620SNeilBrown 	spin_unlock_irqrestore(&conf->device_lock, flags);
15911da177e4SLinus Torvalds 
15921da177e4SLinus Torvalds 	print_conf(conf);
15936b965620SNeilBrown 	return count;
15941da177e4SLinus Torvalds }
15951da177e4SLinus Torvalds 
15961da177e4SLinus Torvalds 
1597fd01b88cSNeilBrown static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
15981da177e4SLinus Torvalds {
1599e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
1600199050eaSNeil Brown 	int err = -EEXIST;
160141158c7eSNeilBrown 	int mirror = 0;
16020eaf822cSJonathan Brassow 	struct raid1_info *p;
16036c2fce2eSNeil Brown 	int first = 0;
160430194636SNeilBrown 	int last = conf->raid_disks - 1;
16056b740b8dSNeilBrown 	struct request_queue *q = bdev_get_queue(rdev->bdev);
16061da177e4SLinus Torvalds 
16075389042fSNeilBrown 	if (mddev->recovery_disabled == conf->recovery_disabled)
16085389042fSNeilBrown 		return -EBUSY;
16095389042fSNeilBrown 
16106c2fce2eSNeil Brown 	if (rdev->raid_disk >= 0)
16116c2fce2eSNeil Brown 		first = last = rdev->raid_disk;
16126c2fce2eSNeil Brown 
16136b740b8dSNeilBrown 	if (q->merge_bvec_fn) {
16146b740b8dSNeilBrown 		set_bit(Unmerged, &rdev->flags);
16156b740b8dSNeilBrown 		mddev->merge_check_needed = 1;
16166b740b8dSNeilBrown 	}
16176b740b8dSNeilBrown 
16187ef449d1SNeilBrown 	for (mirror = first; mirror <= last; mirror++) {
16197ef449d1SNeilBrown 		p = conf->mirrors+mirror;
16207ef449d1SNeilBrown 		if (!p->rdev) {
16211da177e4SLinus Torvalds 
16229092c02dSJonathan Brassow 			if (mddev->gendisk)
16238f6c2e4bSMartin K. Petersen 				disk_stack_limits(mddev->gendisk, rdev->bdev,
16248f6c2e4bSMartin K. Petersen 						  rdev->data_offset << 9);
16251da177e4SLinus Torvalds 
16261da177e4SLinus Torvalds 			p->head_position = 0;
16271da177e4SLinus Torvalds 			rdev->raid_disk = mirror;
1628199050eaSNeil Brown 			err = 0;
16296aea114aSNeilBrown 			/* As all devices are equivalent, we don't need a full recovery
16306aea114aSNeilBrown 			 * if this was recently any drive of the array
16316aea114aSNeilBrown 			 */
16326aea114aSNeilBrown 			if (rdev->saved_raid_disk < 0)
163341158c7eSNeilBrown 				conf->fullsync = 1;
1634d6065f7bSSuzanne Wood 			rcu_assign_pointer(p->rdev, rdev);
16351da177e4SLinus Torvalds 			break;
16361da177e4SLinus Torvalds 		}
16377ef449d1SNeilBrown 		if (test_bit(WantReplacement, &p->rdev->flags) &&
16387ef449d1SNeilBrown 		    p[conf->raid_disks].rdev == NULL) {
16397ef449d1SNeilBrown 			/* Add this device as a replacement */
16407ef449d1SNeilBrown 			clear_bit(In_sync, &rdev->flags);
16417ef449d1SNeilBrown 			set_bit(Replacement, &rdev->flags);
16427ef449d1SNeilBrown 			rdev->raid_disk = mirror;
16437ef449d1SNeilBrown 			err = 0;
16447ef449d1SNeilBrown 			conf->fullsync = 1;
16457ef449d1SNeilBrown 			rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
16467ef449d1SNeilBrown 			break;
16477ef449d1SNeilBrown 		}
16487ef449d1SNeilBrown 	}
16496b740b8dSNeilBrown 	if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
16506b740b8dSNeilBrown 		/* Some requests might not have seen this new
16516b740b8dSNeilBrown 		 * merge_bvec_fn.  We must wait for them to complete
16526b740b8dSNeilBrown 		 * before merging the device fully.
16536b740b8dSNeilBrown 		 * First we make sure any code which has tested
16546b740b8dSNeilBrown 		 * our function has submitted the request, then
16556b740b8dSNeilBrown 		 * we wait for all outstanding requests to complete.
16566b740b8dSNeilBrown 		 */
16576b740b8dSNeilBrown 		synchronize_sched();
1658e2d59925SNeilBrown 		freeze_array(conf, 0);
1659e2d59925SNeilBrown 		unfreeze_array(conf);
16606b740b8dSNeilBrown 		clear_bit(Unmerged, &rdev->flags);
16616b740b8dSNeilBrown 	}
1662ac5e7113SAndre Noll 	md_integrity_add_rdev(rdev, mddev);
16639092c02dSJonathan Brassow 	if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
16642ff8cc2cSShaohua Li 		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
16651da177e4SLinus Torvalds 	print_conf(conf);
1666199050eaSNeil Brown 	return err;
16671da177e4SLinus Torvalds }
16681da177e4SLinus Torvalds 
1669b8321b68SNeilBrown static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
16701da177e4SLinus Torvalds {
1671e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
16721da177e4SLinus Torvalds 	int err = 0;
1673b8321b68SNeilBrown 	int number = rdev->raid_disk;
16740eaf822cSJonathan Brassow 	struct raid1_info *p = conf->mirrors + number;
16751da177e4SLinus Torvalds 
1676b014f14cSNeilBrown 	if (rdev != p->rdev)
1677b014f14cSNeilBrown 		p = conf->mirrors + conf->raid_disks + number;
1678b014f14cSNeilBrown 
16791da177e4SLinus Torvalds 	print_conf(conf);
1680b8321b68SNeilBrown 	if (rdev == p->rdev) {
1681b2d444d7SNeilBrown 		if (test_bit(In_sync, &rdev->flags) ||
16821da177e4SLinus Torvalds 		    atomic_read(&rdev->nr_pending)) {
16831da177e4SLinus Torvalds 			err = -EBUSY;
16841da177e4SLinus Torvalds 			goto abort;
16851da177e4SLinus Torvalds 		}
1686046abeedSNeilBrown 		/* Only remove non-faulty devices if recovery
1687dfc70645SNeilBrown 		 * is not possible.
1688dfc70645SNeilBrown 		 */
1689dfc70645SNeilBrown 		if (!test_bit(Faulty, &rdev->flags) &&
16905389042fSNeilBrown 		    mddev->recovery_disabled != conf->recovery_disabled &&
1691dfc70645SNeilBrown 		    mddev->degraded < conf->raid_disks) {
1692dfc70645SNeilBrown 			err = -EBUSY;
1693dfc70645SNeilBrown 			goto abort;
1694dfc70645SNeilBrown 		}
16951da177e4SLinus Torvalds 		p->rdev = NULL;
1696fbd568a3SPaul E. McKenney 		synchronize_rcu();
16971da177e4SLinus Torvalds 		if (atomic_read(&rdev->nr_pending)) {
16981da177e4SLinus Torvalds 			/* lost the race, try later */
16991da177e4SLinus Torvalds 			err = -EBUSY;
17001da177e4SLinus Torvalds 			p->rdev = rdev;
1701ac5e7113SAndre Noll 			goto abort;
17028c7a2c2bSNeilBrown 		} else if (conf->mirrors[conf->raid_disks + number].rdev) {
17038c7a2c2bSNeilBrown 			/* We just removed a device that is being replaced.
17048c7a2c2bSNeilBrown 			 * Move down the replacement.  We drain all IO before
17058c7a2c2bSNeilBrown 			 * doing this to avoid confusion.
17068c7a2c2bSNeilBrown 			 */
17078c7a2c2bSNeilBrown 			struct md_rdev *repl =
17088c7a2c2bSNeilBrown 				conf->mirrors[conf->raid_disks + number].rdev;
1709e2d59925SNeilBrown 			freeze_array(conf, 0);
17108c7a2c2bSNeilBrown 			clear_bit(Replacement, &repl->flags);
17118c7a2c2bSNeilBrown 			p->rdev = repl;
17128c7a2c2bSNeilBrown 			conf->mirrors[conf->raid_disks + number].rdev = NULL;
1713e2d59925SNeilBrown 			unfreeze_array(conf);
1714b014f14cSNeilBrown 			clear_bit(WantReplacement, &rdev->flags);
17158c7a2c2bSNeilBrown 		} else
17168c7a2c2bSNeilBrown 			clear_bit(WantReplacement, &rdev->flags);
1717a91a2785SMartin K. Petersen 		err = md_integrity_register(mddev);
17181da177e4SLinus Torvalds 	}
17191da177e4SLinus Torvalds abort:
17201da177e4SLinus Torvalds 
17211da177e4SLinus Torvalds 	print_conf(conf);
17221da177e4SLinus Torvalds 	return err;
17231da177e4SLinus Torvalds }
17241da177e4SLinus Torvalds 
17251da177e4SLinus Torvalds 
17266712ecf8SNeilBrown static void end_sync_read(struct bio *bio, int error)
17271da177e4SLinus Torvalds {
17289f2c9d12SNeilBrown 	struct r1bio *r1_bio = bio->bi_private;
17291da177e4SLinus Torvalds 
17300fc280f6SNeilBrown 	update_head_pos(r1_bio->read_disk, r1_bio);
1731ba3ae3beSNamhyung Kim 
17321da177e4SLinus Torvalds 	/*
17331da177e4SLinus Torvalds 	 * we have read a block, now it needs to be re-written,
17341da177e4SLinus Torvalds 	 * or re-read if the read failed.
17351da177e4SLinus Torvalds 	 * We don't do much here, just schedule handling by raid1d
17361da177e4SLinus Torvalds 	 */
173769382e85SNeilBrown 	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
17381da177e4SLinus Torvalds 		set_bit(R1BIO_Uptodate, &r1_bio->state);
1739d11c171eSNeilBrown 
1740d11c171eSNeilBrown 	if (atomic_dec_and_test(&r1_bio->remaining))
17411da177e4SLinus Torvalds 		reschedule_retry(r1_bio);
17421da177e4SLinus Torvalds }
17431da177e4SLinus Torvalds 
17446712ecf8SNeilBrown static void end_sync_write(struct bio *bio, int error)
17451da177e4SLinus Torvalds {
17461da177e4SLinus Torvalds 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
17479f2c9d12SNeilBrown 	struct r1bio *r1_bio = bio->bi_private;
1748fd01b88cSNeilBrown 	struct mddev *mddev = r1_bio->mddev;
1749e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
17501da177e4SLinus Torvalds 	int mirror=0;
17514367af55SNeilBrown 	sector_t first_bad;
17524367af55SNeilBrown 	int bad_sectors;
17531da177e4SLinus Torvalds 
1754ba3ae3beSNamhyung Kim 	mirror = find_bio_disk(r1_bio, bio);
1755ba3ae3beSNamhyung Kim 
17566b1117d5SNeilBrown 	if (!uptodate) {
175757dab0bdSNeilBrown 		sector_t sync_blocks = 0;
17586b1117d5SNeilBrown 		sector_t s = r1_bio->sector;
17596b1117d5SNeilBrown 		long sectors_to_go = r1_bio->sectors;
17606b1117d5SNeilBrown 		/* make sure these bits doesn't get cleared. */
17616b1117d5SNeilBrown 		do {
17625e3db645SNeilBrown 			bitmap_end_sync(mddev->bitmap, s,
17636b1117d5SNeilBrown 					&sync_blocks, 1);
17646b1117d5SNeilBrown 			s += sync_blocks;
17656b1117d5SNeilBrown 			sectors_to_go -= sync_blocks;
17666b1117d5SNeilBrown 		} while (sectors_to_go > 0);
1767d8f05d29SNeilBrown 		set_bit(WriteErrorSeen,
1768d8f05d29SNeilBrown 			&conf->mirrors[mirror].rdev->flags);
176919d67169SNeilBrown 		if (!test_and_set_bit(WantReplacement,
177019d67169SNeilBrown 				      &conf->mirrors[mirror].rdev->flags))
177119d67169SNeilBrown 			set_bit(MD_RECOVERY_NEEDED, &
177219d67169SNeilBrown 				mddev->recovery);
1773d8f05d29SNeilBrown 		set_bit(R1BIO_WriteError, &r1_bio->state);
17744367af55SNeilBrown 	} else if (is_badblock(conf->mirrors[mirror].rdev,
17754367af55SNeilBrown 			       r1_bio->sector,
17764367af55SNeilBrown 			       r1_bio->sectors,
17773a9f28a5SNeilBrown 			       &first_bad, &bad_sectors) &&
17783a9f28a5SNeilBrown 		   !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
17793a9f28a5SNeilBrown 				r1_bio->sector,
17803a9f28a5SNeilBrown 				r1_bio->sectors,
17813a9f28a5SNeilBrown 				&first_bad, &bad_sectors)
17823a9f28a5SNeilBrown 		)
17834367af55SNeilBrown 		set_bit(R1BIO_MadeGood, &r1_bio->state);
1784e3b9703eSNeilBrown 
17851da177e4SLinus Torvalds 	if (atomic_dec_and_test(&r1_bio->remaining)) {
17864367af55SNeilBrown 		int s = r1_bio->sectors;
1787d8f05d29SNeilBrown 		if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1788d8f05d29SNeilBrown 		    test_bit(R1BIO_WriteError, &r1_bio->state))
17894367af55SNeilBrown 			reschedule_retry(r1_bio);
17904367af55SNeilBrown 		else {
17911da177e4SLinus Torvalds 			put_buf(r1_bio);
179273d5c38aSNeilBrown 			md_done_sync(mddev, s, uptodate);
17931da177e4SLinus Torvalds 		}
17941da177e4SLinus Torvalds 	}
17954367af55SNeilBrown }
17961da177e4SLinus Torvalds 
17973cb03002SNeilBrown static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1798d8f05d29SNeilBrown 			    int sectors, struct page *page, int rw)
1799d8f05d29SNeilBrown {
1800d8f05d29SNeilBrown 	if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
1801d8f05d29SNeilBrown 		/* success */
1802d8f05d29SNeilBrown 		return 1;
180319d67169SNeilBrown 	if (rw == WRITE) {
1804d8f05d29SNeilBrown 		set_bit(WriteErrorSeen, &rdev->flags);
180519d67169SNeilBrown 		if (!test_and_set_bit(WantReplacement,
180619d67169SNeilBrown 				      &rdev->flags))
180719d67169SNeilBrown 			set_bit(MD_RECOVERY_NEEDED, &
180819d67169SNeilBrown 				rdev->mddev->recovery);
180919d67169SNeilBrown 	}
1810d8f05d29SNeilBrown 	/* need to record an error - either for the block or the device */
1811d8f05d29SNeilBrown 	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1812d8f05d29SNeilBrown 		md_error(rdev->mddev, rdev);
1813d8f05d29SNeilBrown 	return 0;
1814d8f05d29SNeilBrown }
1815d8f05d29SNeilBrown 
18169f2c9d12SNeilBrown static int fix_sync_read_error(struct r1bio *r1_bio)
18171da177e4SLinus Torvalds {
1818a68e5870SNeilBrown 	/* Try some synchronous reads of other devices to get
181969382e85SNeilBrown 	 * good data, much like with normal read errors.  Only
1820ddac7c7eSNeilBrown 	 * read into the pages we already have so we don't
182169382e85SNeilBrown 	 * need to re-issue the read request.
182269382e85SNeilBrown 	 * We don't need to freeze the array, because being in an
182369382e85SNeilBrown 	 * active sync request, there is no normal IO, and
182469382e85SNeilBrown 	 * no overlapping syncs.
182506f60385SNeilBrown 	 * We don't need to check is_badblock() again as we
182606f60385SNeilBrown 	 * made sure that anything with a bad block in range
182706f60385SNeilBrown 	 * will have bi_end_io clear.
18281da177e4SLinus Torvalds 	 */
1829fd01b88cSNeilBrown 	struct mddev *mddev = r1_bio->mddev;
1830e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
1831a68e5870SNeilBrown 	struct bio *bio = r1_bio->bios[r1_bio->read_disk];
183269382e85SNeilBrown 	sector_t sect = r1_bio->sector;
183369382e85SNeilBrown 	int sectors = r1_bio->sectors;
183469382e85SNeilBrown 	int idx = 0;
183569382e85SNeilBrown 
183669382e85SNeilBrown 	while(sectors) {
183769382e85SNeilBrown 		int s = sectors;
183869382e85SNeilBrown 		int d = r1_bio->read_disk;
183969382e85SNeilBrown 		int success = 0;
18403cb03002SNeilBrown 		struct md_rdev *rdev;
184178d7f5f7SNeilBrown 		int start;
184269382e85SNeilBrown 
184369382e85SNeilBrown 		if (s > (PAGE_SIZE>>9))
184469382e85SNeilBrown 			s = PAGE_SIZE >> 9;
184569382e85SNeilBrown 		do {
184669382e85SNeilBrown 			if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1847ddac7c7eSNeilBrown 				/* No rcu protection needed here devices
1848ddac7c7eSNeilBrown 				 * can only be removed when no resync is
1849ddac7c7eSNeilBrown 				 * active, and resync is currently active
1850ddac7c7eSNeilBrown 				 */
185169382e85SNeilBrown 				rdev = conf->mirrors[d].rdev;
18529d3d8011SNamhyung Kim 				if (sync_page_io(rdev, sect, s<<9,
185369382e85SNeilBrown 						 bio->bi_io_vec[idx].bv_page,
1854ccebd4c4SJonathan Brassow 						 READ, false)) {
185569382e85SNeilBrown 					success = 1;
185669382e85SNeilBrown 					break;
185769382e85SNeilBrown 				}
185869382e85SNeilBrown 			}
185969382e85SNeilBrown 			d++;
18608f19ccb2SNeilBrown 			if (d == conf->raid_disks * 2)
186169382e85SNeilBrown 				d = 0;
186269382e85SNeilBrown 		} while (!success && d != r1_bio->read_disk);
186369382e85SNeilBrown 
186478d7f5f7SNeilBrown 		if (!success) {
186578d7f5f7SNeilBrown 			char b[BDEVNAME_SIZE];
18663a9f28a5SNeilBrown 			int abort = 0;
18673a9f28a5SNeilBrown 			/* Cannot read from anywhere, this block is lost.
18683a9f28a5SNeilBrown 			 * Record a bad block on each device.  If that doesn't
18693a9f28a5SNeilBrown 			 * work just disable and interrupt the recovery.
18703a9f28a5SNeilBrown 			 * Don't fail devices as that won't really help.
18713a9f28a5SNeilBrown 			 */
187278d7f5f7SNeilBrown 			printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
187378d7f5f7SNeilBrown 			       " for block %llu\n",
187478d7f5f7SNeilBrown 			       mdname(mddev),
187578d7f5f7SNeilBrown 			       bdevname(bio->bi_bdev, b),
187678d7f5f7SNeilBrown 			       (unsigned long long)r1_bio->sector);
18778f19ccb2SNeilBrown 			for (d = 0; d < conf->raid_disks * 2; d++) {
18783a9f28a5SNeilBrown 				rdev = conf->mirrors[d].rdev;
18793a9f28a5SNeilBrown 				if (!rdev || test_bit(Faulty, &rdev->flags))
18803a9f28a5SNeilBrown 					continue;
18813a9f28a5SNeilBrown 				if (!rdev_set_badblocks(rdev, sect, s, 0))
18823a9f28a5SNeilBrown 					abort = 1;
18833a9f28a5SNeilBrown 			}
18843a9f28a5SNeilBrown 			if (abort) {
1885d890fa2bSNeilBrown 				conf->recovery_disabled =
1886d890fa2bSNeilBrown 					mddev->recovery_disabled;
18873a9f28a5SNeilBrown 				set_bit(MD_RECOVERY_INTR, &mddev->recovery);
188878d7f5f7SNeilBrown 				md_done_sync(mddev, r1_bio->sectors, 0);
188978d7f5f7SNeilBrown 				put_buf(r1_bio);
189078d7f5f7SNeilBrown 				return 0;
189178d7f5f7SNeilBrown 			}
18923a9f28a5SNeilBrown 			/* Try next page */
18933a9f28a5SNeilBrown 			sectors -= s;
18943a9f28a5SNeilBrown 			sect += s;
18953a9f28a5SNeilBrown 			idx++;
18963a9f28a5SNeilBrown 			continue;
18973a9f28a5SNeilBrown 		}
189878d7f5f7SNeilBrown 
189978d7f5f7SNeilBrown 		start = d;
190069382e85SNeilBrown 		/* write it back and re-read */
190169382e85SNeilBrown 		while (d != r1_bio->read_disk) {
190269382e85SNeilBrown 			if (d == 0)
19038f19ccb2SNeilBrown 				d = conf->raid_disks * 2;
190469382e85SNeilBrown 			d--;
190569382e85SNeilBrown 			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
190669382e85SNeilBrown 				continue;
190769382e85SNeilBrown 			rdev = conf->mirrors[d].rdev;
1908d8f05d29SNeilBrown 			if (r1_sync_page_io(rdev, sect, s,
190969382e85SNeilBrown 					    bio->bi_io_vec[idx].bv_page,
1910d8f05d29SNeilBrown 					    WRITE) == 0) {
191178d7f5f7SNeilBrown 				r1_bio->bios[d]->bi_end_io = NULL;
191278d7f5f7SNeilBrown 				rdev_dec_pending(rdev, mddev);
19139d3d8011SNamhyung Kim 			}
1914097426f6SNeilBrown 		}
1915097426f6SNeilBrown 		d = start;
1916097426f6SNeilBrown 		while (d != r1_bio->read_disk) {
1917097426f6SNeilBrown 			if (d == 0)
19188f19ccb2SNeilBrown 				d = conf->raid_disks * 2;
1919097426f6SNeilBrown 			d--;
1920097426f6SNeilBrown 			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1921097426f6SNeilBrown 				continue;
1922097426f6SNeilBrown 			rdev = conf->mirrors[d].rdev;
1923d8f05d29SNeilBrown 			if (r1_sync_page_io(rdev, sect, s,
192469382e85SNeilBrown 					    bio->bi_io_vec[idx].bv_page,
1925d8f05d29SNeilBrown 					    READ) != 0)
19269d3d8011SNamhyung Kim 				atomic_add(s, &rdev->corrected_errors);
192769382e85SNeilBrown 		}
192869382e85SNeilBrown 		sectors -= s;
192969382e85SNeilBrown 		sect += s;
193069382e85SNeilBrown 		idx ++;
193169382e85SNeilBrown 	}
193278d7f5f7SNeilBrown 	set_bit(R1BIO_Uptodate, &r1_bio->state);
19337ca78d57SNeilBrown 	set_bit(BIO_UPTODATE, &bio->bi_flags);
1934a68e5870SNeilBrown 	return 1;
193569382e85SNeilBrown }
1936d11c171eSNeilBrown 
19379f2c9d12SNeilBrown static int process_checks(struct r1bio *r1_bio)
1938a68e5870SNeilBrown {
1939a68e5870SNeilBrown 	/* We have read all readable devices.  If we haven't
1940a68e5870SNeilBrown 	 * got the block, then there is no hope left.
1941a68e5870SNeilBrown 	 * If we have, then we want to do a comparison
1942a68e5870SNeilBrown 	 * and skip the write if everything is the same.
1943a68e5870SNeilBrown 	 * If any blocks failed to read, then we need to
1944a68e5870SNeilBrown 	 * attempt an over-write
1945a68e5870SNeilBrown 	 */
1946fd01b88cSNeilBrown 	struct mddev *mddev = r1_bio->mddev;
1947e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
1948a68e5870SNeilBrown 	int primary;
1949a68e5870SNeilBrown 	int i;
1950f4380a91Smajianpeng 	int vcnt;
1951a68e5870SNeilBrown 
195230bc9b53SNeilBrown 	/* Fix variable parts of all bios */
195330bc9b53SNeilBrown 	vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
195430bc9b53SNeilBrown 	for (i = 0; i < conf->raid_disks * 2; i++) {
195530bc9b53SNeilBrown 		int j;
195630bc9b53SNeilBrown 		int size;
195730bc9b53SNeilBrown 		struct bio *b = r1_bio->bios[i];
195830bc9b53SNeilBrown 		if (b->bi_end_io != end_sync_read)
195930bc9b53SNeilBrown 			continue;
196030bc9b53SNeilBrown 		/* fixup the bio for reuse */
196130bc9b53SNeilBrown 		bio_reset(b);
196230bc9b53SNeilBrown 		b->bi_vcnt = vcnt;
19634f024f37SKent Overstreet 		b->bi_iter.bi_size = r1_bio->sectors << 9;
19644f024f37SKent Overstreet 		b->bi_iter.bi_sector = r1_bio->sector +
196530bc9b53SNeilBrown 			conf->mirrors[i].rdev->data_offset;
196630bc9b53SNeilBrown 		b->bi_bdev = conf->mirrors[i].rdev->bdev;
196730bc9b53SNeilBrown 		b->bi_end_io = end_sync_read;
196830bc9b53SNeilBrown 		b->bi_private = r1_bio;
196930bc9b53SNeilBrown 
19704f024f37SKent Overstreet 		size = b->bi_iter.bi_size;
197130bc9b53SNeilBrown 		for (j = 0; j < vcnt ; j++) {
197230bc9b53SNeilBrown 			struct bio_vec *bi;
197330bc9b53SNeilBrown 			bi = &b->bi_io_vec[j];
197430bc9b53SNeilBrown 			bi->bv_offset = 0;
197530bc9b53SNeilBrown 			if (size > PAGE_SIZE)
197630bc9b53SNeilBrown 				bi->bv_len = PAGE_SIZE;
197730bc9b53SNeilBrown 			else
197830bc9b53SNeilBrown 				bi->bv_len = size;
197930bc9b53SNeilBrown 			size -= PAGE_SIZE;
198030bc9b53SNeilBrown 		}
198130bc9b53SNeilBrown 	}
19828f19ccb2SNeilBrown 	for (primary = 0; primary < conf->raid_disks * 2; primary++)
1983a68e5870SNeilBrown 		if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1984a68e5870SNeilBrown 		    test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
1985a68e5870SNeilBrown 			r1_bio->bios[primary]->bi_end_io = NULL;
1986a68e5870SNeilBrown 			rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
1987a68e5870SNeilBrown 			break;
1988a68e5870SNeilBrown 		}
1989a68e5870SNeilBrown 	r1_bio->read_disk = primary;
19908f19ccb2SNeilBrown 	for (i = 0; i < conf->raid_disks * 2; i++) {
1991a68e5870SNeilBrown 		int j;
1992a68e5870SNeilBrown 		struct bio *pbio = r1_bio->bios[primary];
1993a68e5870SNeilBrown 		struct bio *sbio = r1_bio->bios[i];
199478d7f5f7SNeilBrown 
19952aabaa65SKent Overstreet 		if (sbio->bi_end_io != end_sync_read)
199678d7f5f7SNeilBrown 			continue;
1997a68e5870SNeilBrown 
1998a68e5870SNeilBrown 		if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
1999a68e5870SNeilBrown 			for (j = vcnt; j-- ; ) {
2000a68e5870SNeilBrown 				struct page *p, *s;
2001a68e5870SNeilBrown 				p = pbio->bi_io_vec[j].bv_page;
2002a68e5870SNeilBrown 				s = sbio->bi_io_vec[j].bv_page;
2003a68e5870SNeilBrown 				if (memcmp(page_address(p),
2004a68e5870SNeilBrown 					   page_address(s),
20055020ad7dSNeilBrown 					   sbio->bi_io_vec[j].bv_len))
2006a68e5870SNeilBrown 					break;
2007a68e5870SNeilBrown 			}
2008a68e5870SNeilBrown 		} else
2009a68e5870SNeilBrown 			j = 0;
2010a68e5870SNeilBrown 		if (j >= 0)
20117f7583d4SJianpeng Ma 			atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2012a68e5870SNeilBrown 		if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2013a68e5870SNeilBrown 			      && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
201478d7f5f7SNeilBrown 			/* No need to write to this device. */
2015a68e5870SNeilBrown 			sbio->bi_end_io = NULL;
2016a68e5870SNeilBrown 			rdev_dec_pending(conf->mirrors[i].rdev, mddev);
201778d7f5f7SNeilBrown 			continue;
201878d7f5f7SNeilBrown 		}
2019d3b45c2aSKent Overstreet 
2020d3b45c2aSKent Overstreet 		bio_copy_data(sbio, pbio);
2021a68e5870SNeilBrown 	}
2022a68e5870SNeilBrown 	return 0;
2023a68e5870SNeilBrown }
2024a68e5870SNeilBrown 
20259f2c9d12SNeilBrown static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2026a68e5870SNeilBrown {
2027e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
2028a68e5870SNeilBrown 	int i;
20298f19ccb2SNeilBrown 	int disks = conf->raid_disks * 2;
2030a68e5870SNeilBrown 	struct bio *bio, *wbio;
2031a68e5870SNeilBrown 
2032a68e5870SNeilBrown 	bio = r1_bio->bios[r1_bio->read_disk];
2033a68e5870SNeilBrown 
2034a68e5870SNeilBrown 	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2035a68e5870SNeilBrown 		/* ouch - failed to read all of that. */
2036a68e5870SNeilBrown 		if (!fix_sync_read_error(r1_bio))
2037a68e5870SNeilBrown 			return;
20387ca78d57SNeilBrown 
20397ca78d57SNeilBrown 	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
20407ca78d57SNeilBrown 		if (process_checks(r1_bio) < 0)
20417ca78d57SNeilBrown 			return;
2042d11c171eSNeilBrown 	/*
2043d11c171eSNeilBrown 	 * schedule writes
2044d11c171eSNeilBrown 	 */
20451da177e4SLinus Torvalds 	atomic_set(&r1_bio->remaining, 1);
20461da177e4SLinus Torvalds 	for (i = 0; i < disks ; i++) {
20471da177e4SLinus Torvalds 		wbio = r1_bio->bios[i];
20483e198f78SNeilBrown 		if (wbio->bi_end_io == NULL ||
20493e198f78SNeilBrown 		    (wbio->bi_end_io == end_sync_read &&
20503e198f78SNeilBrown 		     (i == r1_bio->read_disk ||
20513e198f78SNeilBrown 		      !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
20521da177e4SLinus Torvalds 			continue;
20531da177e4SLinus Torvalds 
20543e198f78SNeilBrown 		wbio->bi_rw = WRITE;
20553e198f78SNeilBrown 		wbio->bi_end_io = end_sync_write;
20561da177e4SLinus Torvalds 		atomic_inc(&r1_bio->remaining);
2057aa8b57aaSKent Overstreet 		md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2058191ea9b2SNeilBrown 
20591da177e4SLinus Torvalds 		generic_make_request(wbio);
20601da177e4SLinus Torvalds 	}
20611da177e4SLinus Torvalds 
20621da177e4SLinus Torvalds 	if (atomic_dec_and_test(&r1_bio->remaining)) {
2063191ea9b2SNeilBrown 		/* if we're here, all write(s) have completed, so clean up */
206458e94ae1SNeilBrown 		int s = r1_bio->sectors;
206558e94ae1SNeilBrown 		if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
206658e94ae1SNeilBrown 		    test_bit(R1BIO_WriteError, &r1_bio->state))
206758e94ae1SNeilBrown 			reschedule_retry(r1_bio);
206858e94ae1SNeilBrown 		else {
20691da177e4SLinus Torvalds 			put_buf(r1_bio);
207058e94ae1SNeilBrown 			md_done_sync(mddev, s, 1);
207158e94ae1SNeilBrown 		}
20721da177e4SLinus Torvalds 	}
20731da177e4SLinus Torvalds }
20741da177e4SLinus Torvalds 
20751da177e4SLinus Torvalds /*
20761da177e4SLinus Torvalds  * This is a kernel thread which:
20771da177e4SLinus Torvalds  *
20781da177e4SLinus Torvalds  *	1.	Retries failed read operations on working mirrors.
20791da177e4SLinus Torvalds  *	2.	Updates the raid superblock when problems encounter.
2080d2eb35acSNeilBrown  *	3.	Performs writes following reads for array synchronising.
20811da177e4SLinus Torvalds  */
20821da177e4SLinus Torvalds 
2083e8096360SNeilBrown static void fix_read_error(struct r1conf *conf, int read_disk,
2084867868fbSNeilBrown 			   sector_t sect, int sectors)
2085867868fbSNeilBrown {
2086fd01b88cSNeilBrown 	struct mddev *mddev = conf->mddev;
2087867868fbSNeilBrown 	while(sectors) {
2088867868fbSNeilBrown 		int s = sectors;
2089867868fbSNeilBrown 		int d = read_disk;
2090867868fbSNeilBrown 		int success = 0;
2091867868fbSNeilBrown 		int start;
20923cb03002SNeilBrown 		struct md_rdev *rdev;
2093867868fbSNeilBrown 
2094867868fbSNeilBrown 		if (s > (PAGE_SIZE>>9))
2095867868fbSNeilBrown 			s = PAGE_SIZE >> 9;
2096867868fbSNeilBrown 
2097867868fbSNeilBrown 		do {
2098867868fbSNeilBrown 			/* Note: no rcu protection needed here
2099867868fbSNeilBrown 			 * as this is synchronous in the raid1d thread
2100867868fbSNeilBrown 			 * which is the thread that might remove
2101867868fbSNeilBrown 			 * a device.  If raid1d ever becomes multi-threaded....
2102867868fbSNeilBrown 			 */
2103d2eb35acSNeilBrown 			sector_t first_bad;
2104d2eb35acSNeilBrown 			int bad_sectors;
2105d2eb35acSNeilBrown 
2106867868fbSNeilBrown 			rdev = conf->mirrors[d].rdev;
2107867868fbSNeilBrown 			if (rdev &&
2108da8840a7Smajianpeng 			    (test_bit(In_sync, &rdev->flags) ||
2109da8840a7Smajianpeng 			     (!test_bit(Faulty, &rdev->flags) &&
2110da8840a7Smajianpeng 			      rdev->recovery_offset >= sect + s)) &&
2111d2eb35acSNeilBrown 			    is_badblock(rdev, sect, s,
2112d2eb35acSNeilBrown 					&first_bad, &bad_sectors) == 0 &&
2113ccebd4c4SJonathan Brassow 			    sync_page_io(rdev, sect, s<<9,
2114ccebd4c4SJonathan Brassow 					 conf->tmppage, READ, false))
2115867868fbSNeilBrown 				success = 1;
2116867868fbSNeilBrown 			else {
2117867868fbSNeilBrown 				d++;
21188f19ccb2SNeilBrown 				if (d == conf->raid_disks * 2)
2119867868fbSNeilBrown 					d = 0;
2120867868fbSNeilBrown 			}
2121867868fbSNeilBrown 		} while (!success && d != read_disk);
2122867868fbSNeilBrown 
2123867868fbSNeilBrown 		if (!success) {
2124d8f05d29SNeilBrown 			/* Cannot read from anywhere - mark it bad */
21253cb03002SNeilBrown 			struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2126d8f05d29SNeilBrown 			if (!rdev_set_badblocks(rdev, sect, s, 0))
2127d8f05d29SNeilBrown 				md_error(mddev, rdev);
2128867868fbSNeilBrown 			break;
2129867868fbSNeilBrown 		}
2130867868fbSNeilBrown 		/* write it back and re-read */
2131867868fbSNeilBrown 		start = d;
2132867868fbSNeilBrown 		while (d != read_disk) {
2133867868fbSNeilBrown 			if (d==0)
21348f19ccb2SNeilBrown 				d = conf->raid_disks * 2;
2135867868fbSNeilBrown 			d--;
2136867868fbSNeilBrown 			rdev = conf->mirrors[d].rdev;
2137867868fbSNeilBrown 			if (rdev &&
2138d8f05d29SNeilBrown 			    test_bit(In_sync, &rdev->flags))
2139d8f05d29SNeilBrown 				r1_sync_page_io(rdev, sect, s,
2140d8f05d29SNeilBrown 						conf->tmppage, WRITE);
2141867868fbSNeilBrown 		}
2142867868fbSNeilBrown 		d = start;
2143867868fbSNeilBrown 		while (d != read_disk) {
2144867868fbSNeilBrown 			char b[BDEVNAME_SIZE];
2145867868fbSNeilBrown 			if (d==0)
21468f19ccb2SNeilBrown 				d = conf->raid_disks * 2;
2147867868fbSNeilBrown 			d--;
2148867868fbSNeilBrown 			rdev = conf->mirrors[d].rdev;
2149867868fbSNeilBrown 			if (rdev &&
2150867868fbSNeilBrown 			    test_bit(In_sync, &rdev->flags)) {
2151d8f05d29SNeilBrown 				if (r1_sync_page_io(rdev, sect, s,
2152d8f05d29SNeilBrown 						    conf->tmppage, READ)) {
2153867868fbSNeilBrown 					atomic_add(s, &rdev->corrected_errors);
2154867868fbSNeilBrown 					printk(KERN_INFO
21559dd1e2faSNeilBrown 					       "md/raid1:%s: read error corrected "
2156867868fbSNeilBrown 					       "(%d sectors at %llu on %s)\n",
2157867868fbSNeilBrown 					       mdname(mddev), s,
2158969b755aSRandy Dunlap 					       (unsigned long long)(sect +
2159969b755aSRandy Dunlap 					           rdev->data_offset),
2160867868fbSNeilBrown 					       bdevname(rdev->bdev, b));
2161867868fbSNeilBrown 				}
2162867868fbSNeilBrown 			}
2163867868fbSNeilBrown 		}
2164867868fbSNeilBrown 		sectors -= s;
2165867868fbSNeilBrown 		sect += s;
2166867868fbSNeilBrown 	}
2167867868fbSNeilBrown }
2168867868fbSNeilBrown 
21699f2c9d12SNeilBrown static int narrow_write_error(struct r1bio *r1_bio, int i)
2170cd5ff9a1SNeilBrown {
2171fd01b88cSNeilBrown 	struct mddev *mddev = r1_bio->mddev;
2172e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
21733cb03002SNeilBrown 	struct md_rdev *rdev = conf->mirrors[i].rdev;
2174cd5ff9a1SNeilBrown 
2175cd5ff9a1SNeilBrown 	/* bio has the data to be written to device 'i' where
2176cd5ff9a1SNeilBrown 	 * we just recently had a write error.
2177cd5ff9a1SNeilBrown 	 * We repeatedly clone the bio and trim down to one block,
2178cd5ff9a1SNeilBrown 	 * then try the write.  Where the write fails we record
2179cd5ff9a1SNeilBrown 	 * a bad block.
2180cd5ff9a1SNeilBrown 	 * It is conceivable that the bio doesn't exactly align with
2181cd5ff9a1SNeilBrown 	 * blocks.  We must handle this somehow.
2182cd5ff9a1SNeilBrown 	 *
2183cd5ff9a1SNeilBrown 	 * We currently own a reference on the rdev.
2184cd5ff9a1SNeilBrown 	 */
2185cd5ff9a1SNeilBrown 
2186cd5ff9a1SNeilBrown 	int block_sectors;
2187cd5ff9a1SNeilBrown 	sector_t sector;
2188cd5ff9a1SNeilBrown 	int sectors;
2189cd5ff9a1SNeilBrown 	int sect_to_write = r1_bio->sectors;
2190cd5ff9a1SNeilBrown 	int ok = 1;
2191cd5ff9a1SNeilBrown 
2192cd5ff9a1SNeilBrown 	if (rdev->badblocks.shift < 0)
2193cd5ff9a1SNeilBrown 		return 0;
2194cd5ff9a1SNeilBrown 
2195cd5ff9a1SNeilBrown 	block_sectors = 1 << rdev->badblocks.shift;
2196cd5ff9a1SNeilBrown 	sector = r1_bio->sector;
2197cd5ff9a1SNeilBrown 	sectors = ((sector + block_sectors)
2198cd5ff9a1SNeilBrown 		   & ~(sector_t)(block_sectors - 1))
2199cd5ff9a1SNeilBrown 		- sector;
2200cd5ff9a1SNeilBrown 
2201cd5ff9a1SNeilBrown 	while (sect_to_write) {
2202cd5ff9a1SNeilBrown 		struct bio *wbio;
2203cd5ff9a1SNeilBrown 		if (sectors > sect_to_write)
2204cd5ff9a1SNeilBrown 			sectors = sect_to_write;
2205cd5ff9a1SNeilBrown 		/* Write at 'sector' for 'sectors'*/
2206cd5ff9a1SNeilBrown 
2207b783863fSKent Overstreet 		if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2208b783863fSKent Overstreet 			unsigned vcnt = r1_bio->behind_page_count;
2209b783863fSKent Overstreet 			struct bio_vec *vec = r1_bio->behind_bvecs;
2210b783863fSKent Overstreet 
2211b783863fSKent Overstreet 			while (!vec->bv_page) {
2212b783863fSKent Overstreet 				vec++;
2213b783863fSKent Overstreet 				vcnt--;
2214b783863fSKent Overstreet 			}
2215b783863fSKent Overstreet 
2216cd5ff9a1SNeilBrown 			wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
2217cd5ff9a1SNeilBrown 			memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
2218b783863fSKent Overstreet 
2219cd5ff9a1SNeilBrown 			wbio->bi_vcnt = vcnt;
2220b783863fSKent Overstreet 		} else {
2221b783863fSKent Overstreet 			wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2222b783863fSKent Overstreet 		}
2223b783863fSKent Overstreet 
2224b783863fSKent Overstreet 		wbio->bi_rw = WRITE;
22254f024f37SKent Overstreet 		wbio->bi_iter.bi_sector = r1_bio->sector;
22264f024f37SKent Overstreet 		wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2227cd5ff9a1SNeilBrown 
22286678d83fSKent Overstreet 		bio_trim(wbio, sector - r1_bio->sector, sectors);
22294f024f37SKent Overstreet 		wbio->bi_iter.bi_sector += rdev->data_offset;
2230cd5ff9a1SNeilBrown 		wbio->bi_bdev = rdev->bdev;
2231cd5ff9a1SNeilBrown 		if (submit_bio_wait(WRITE, wbio) == 0)
2232cd5ff9a1SNeilBrown 			/* failure! */
2233cd5ff9a1SNeilBrown 			ok = rdev_set_badblocks(rdev, sector,
2234cd5ff9a1SNeilBrown 						sectors, 0)
2235cd5ff9a1SNeilBrown 				&& ok;
2236cd5ff9a1SNeilBrown 
2237cd5ff9a1SNeilBrown 		bio_put(wbio);
2238cd5ff9a1SNeilBrown 		sect_to_write -= sectors;
2239cd5ff9a1SNeilBrown 		sector += sectors;
2240cd5ff9a1SNeilBrown 		sectors = block_sectors;
2241cd5ff9a1SNeilBrown 	}
2242cd5ff9a1SNeilBrown 	return ok;
2243cd5ff9a1SNeilBrown }
2244cd5ff9a1SNeilBrown 
2245e8096360SNeilBrown static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
224662096bceSNeilBrown {
224762096bceSNeilBrown 	int m;
224862096bceSNeilBrown 	int s = r1_bio->sectors;
22498f19ccb2SNeilBrown 	for (m = 0; m < conf->raid_disks * 2 ; m++) {
22503cb03002SNeilBrown 		struct md_rdev *rdev = conf->mirrors[m].rdev;
225162096bceSNeilBrown 		struct bio *bio = r1_bio->bios[m];
225262096bceSNeilBrown 		if (bio->bi_end_io == NULL)
225362096bceSNeilBrown 			continue;
225462096bceSNeilBrown 		if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
225562096bceSNeilBrown 		    test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2256c6563a8cSNeilBrown 			rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
225762096bceSNeilBrown 		}
225862096bceSNeilBrown 		if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
225962096bceSNeilBrown 		    test_bit(R1BIO_WriteError, &r1_bio->state)) {
226062096bceSNeilBrown 			if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
226162096bceSNeilBrown 				md_error(conf->mddev, rdev);
226262096bceSNeilBrown 		}
226362096bceSNeilBrown 	}
226462096bceSNeilBrown 	put_buf(r1_bio);
226562096bceSNeilBrown 	md_done_sync(conf->mddev, s, 1);
226662096bceSNeilBrown }
226762096bceSNeilBrown 
2268e8096360SNeilBrown static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
226962096bceSNeilBrown {
227062096bceSNeilBrown 	int m;
22718f19ccb2SNeilBrown 	for (m = 0; m < conf->raid_disks * 2 ; m++)
227262096bceSNeilBrown 		if (r1_bio->bios[m] == IO_MADE_GOOD) {
22733cb03002SNeilBrown 			struct md_rdev *rdev = conf->mirrors[m].rdev;
227462096bceSNeilBrown 			rdev_clear_badblocks(rdev,
227562096bceSNeilBrown 					     r1_bio->sector,
2276c6563a8cSNeilBrown 					     r1_bio->sectors, 0);
227762096bceSNeilBrown 			rdev_dec_pending(rdev, conf->mddev);
227862096bceSNeilBrown 		} else if (r1_bio->bios[m] != NULL) {
227962096bceSNeilBrown 			/* This drive got a write error.  We need to
228062096bceSNeilBrown 			 * narrow down and record precise write
228162096bceSNeilBrown 			 * errors.
228262096bceSNeilBrown 			 */
228362096bceSNeilBrown 			if (!narrow_write_error(r1_bio, m)) {
228462096bceSNeilBrown 				md_error(conf->mddev,
228562096bceSNeilBrown 					 conf->mirrors[m].rdev);
228662096bceSNeilBrown 				/* an I/O failed, we can't clear the bitmap */
228762096bceSNeilBrown 				set_bit(R1BIO_Degraded, &r1_bio->state);
228862096bceSNeilBrown 			}
228962096bceSNeilBrown 			rdev_dec_pending(conf->mirrors[m].rdev,
229062096bceSNeilBrown 					 conf->mddev);
229162096bceSNeilBrown 		}
229262096bceSNeilBrown 	if (test_bit(R1BIO_WriteError, &r1_bio->state))
229362096bceSNeilBrown 		close_write(r1_bio);
229462096bceSNeilBrown 	raid_end_bio_io(r1_bio);
229562096bceSNeilBrown }
229662096bceSNeilBrown 
2297e8096360SNeilBrown static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
229862096bceSNeilBrown {
229962096bceSNeilBrown 	int disk;
230062096bceSNeilBrown 	int max_sectors;
2301fd01b88cSNeilBrown 	struct mddev *mddev = conf->mddev;
230262096bceSNeilBrown 	struct bio *bio;
230362096bceSNeilBrown 	char b[BDEVNAME_SIZE];
23043cb03002SNeilBrown 	struct md_rdev *rdev;
230562096bceSNeilBrown 
230662096bceSNeilBrown 	clear_bit(R1BIO_ReadError, &r1_bio->state);
230762096bceSNeilBrown 	/* we got a read error. Maybe the drive is bad.  Maybe just
230862096bceSNeilBrown 	 * the block and we can fix it.
230962096bceSNeilBrown 	 * We freeze all other IO, and try reading the block from
231062096bceSNeilBrown 	 * other devices.  When we find one, we re-write
231162096bceSNeilBrown 	 * and check it that fixes the read error.
231262096bceSNeilBrown 	 * This is all done synchronously while the array is
231362096bceSNeilBrown 	 * frozen
231462096bceSNeilBrown 	 */
231562096bceSNeilBrown 	if (mddev->ro == 0) {
2316e2d59925SNeilBrown 		freeze_array(conf, 1);
231762096bceSNeilBrown 		fix_read_error(conf, r1_bio->read_disk,
231862096bceSNeilBrown 			       r1_bio->sector, r1_bio->sectors);
231962096bceSNeilBrown 		unfreeze_array(conf);
232062096bceSNeilBrown 	} else
232162096bceSNeilBrown 		md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
23227ad4d4a6SNeilBrown 	rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
232362096bceSNeilBrown 
232462096bceSNeilBrown 	bio = r1_bio->bios[r1_bio->read_disk];
232562096bceSNeilBrown 	bdevname(bio->bi_bdev, b);
232662096bceSNeilBrown read_more:
232762096bceSNeilBrown 	disk = read_balance(conf, r1_bio, &max_sectors);
232862096bceSNeilBrown 	if (disk == -1) {
232962096bceSNeilBrown 		printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
233062096bceSNeilBrown 		       " read error for block %llu\n",
233162096bceSNeilBrown 		       mdname(mddev), b, (unsigned long long)r1_bio->sector);
233262096bceSNeilBrown 		raid_end_bio_io(r1_bio);
233362096bceSNeilBrown 	} else {
233462096bceSNeilBrown 		const unsigned long do_sync
233562096bceSNeilBrown 			= r1_bio->master_bio->bi_rw & REQ_SYNC;
233662096bceSNeilBrown 		if (bio) {
233762096bceSNeilBrown 			r1_bio->bios[r1_bio->read_disk] =
233862096bceSNeilBrown 				mddev->ro ? IO_BLOCKED : NULL;
233962096bceSNeilBrown 			bio_put(bio);
234062096bceSNeilBrown 		}
234162096bceSNeilBrown 		r1_bio->read_disk = disk;
234262096bceSNeilBrown 		bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
23434f024f37SKent Overstreet 		bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
23444f024f37SKent Overstreet 			 max_sectors);
234562096bceSNeilBrown 		r1_bio->bios[r1_bio->read_disk] = bio;
234662096bceSNeilBrown 		rdev = conf->mirrors[disk].rdev;
234762096bceSNeilBrown 		printk_ratelimited(KERN_ERR
234862096bceSNeilBrown 				   "md/raid1:%s: redirecting sector %llu"
234962096bceSNeilBrown 				   " to other mirror: %s\n",
235062096bceSNeilBrown 				   mdname(mddev),
235162096bceSNeilBrown 				   (unsigned long long)r1_bio->sector,
235262096bceSNeilBrown 				   bdevname(rdev->bdev, b));
23534f024f37SKent Overstreet 		bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
235462096bceSNeilBrown 		bio->bi_bdev = rdev->bdev;
235562096bceSNeilBrown 		bio->bi_end_io = raid1_end_read_request;
235662096bceSNeilBrown 		bio->bi_rw = READ | do_sync;
235762096bceSNeilBrown 		bio->bi_private = r1_bio;
235862096bceSNeilBrown 		if (max_sectors < r1_bio->sectors) {
235962096bceSNeilBrown 			/* Drat - have to split this up more */
236062096bceSNeilBrown 			struct bio *mbio = r1_bio->master_bio;
236162096bceSNeilBrown 			int sectors_handled = (r1_bio->sector + max_sectors
23624f024f37SKent Overstreet 					       - mbio->bi_iter.bi_sector);
236362096bceSNeilBrown 			r1_bio->sectors = max_sectors;
236462096bceSNeilBrown 			spin_lock_irq(&conf->device_lock);
236562096bceSNeilBrown 			if (mbio->bi_phys_segments == 0)
236662096bceSNeilBrown 				mbio->bi_phys_segments = 2;
236762096bceSNeilBrown 			else
236862096bceSNeilBrown 				mbio->bi_phys_segments++;
236962096bceSNeilBrown 			spin_unlock_irq(&conf->device_lock);
237062096bceSNeilBrown 			generic_make_request(bio);
237162096bceSNeilBrown 			bio = NULL;
237262096bceSNeilBrown 
237362096bceSNeilBrown 			r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
237462096bceSNeilBrown 
237562096bceSNeilBrown 			r1_bio->master_bio = mbio;
2376aa8b57aaSKent Overstreet 			r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
237762096bceSNeilBrown 			r1_bio->state = 0;
237862096bceSNeilBrown 			set_bit(R1BIO_ReadError, &r1_bio->state);
237962096bceSNeilBrown 			r1_bio->mddev = mddev;
23804f024f37SKent Overstreet 			r1_bio->sector = mbio->bi_iter.bi_sector +
23814f024f37SKent Overstreet 				sectors_handled;
238262096bceSNeilBrown 
238362096bceSNeilBrown 			goto read_more;
238462096bceSNeilBrown 		} else
238562096bceSNeilBrown 			generic_make_request(bio);
238662096bceSNeilBrown 	}
238762096bceSNeilBrown }
238862096bceSNeilBrown 
23894ed8731dSShaohua Li static void raid1d(struct md_thread *thread)
23901da177e4SLinus Torvalds {
23914ed8731dSShaohua Li 	struct mddev *mddev = thread->mddev;
23929f2c9d12SNeilBrown 	struct r1bio *r1_bio;
23931da177e4SLinus Torvalds 	unsigned long flags;
2394e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
23951da177e4SLinus Torvalds 	struct list_head *head = &conf->retry_list;
2396e1dfa0a2SNeilBrown 	struct blk_plug plug;
23971da177e4SLinus Torvalds 
23981da177e4SLinus Torvalds 	md_check_recovery(mddev);
23991da177e4SLinus Torvalds 
2400e1dfa0a2SNeilBrown 	blk_start_plug(&plug);
24011da177e4SLinus Torvalds 	for (;;) {
2402a35e63efSNeilBrown 
24037eaceaccSJens Axboe 		flush_pending_writes(conf);
2404a35e63efSNeilBrown 
24051da177e4SLinus Torvalds 		spin_lock_irqsave(&conf->device_lock, flags);
2406a35e63efSNeilBrown 		if (list_empty(head)) {
2407191ea9b2SNeilBrown 			spin_unlock_irqrestore(&conf->device_lock, flags);
24081da177e4SLinus Torvalds 			break;
2409a35e63efSNeilBrown 		}
24109f2c9d12SNeilBrown 		r1_bio = list_entry(head->prev, struct r1bio, retry_list);
24111da177e4SLinus Torvalds 		list_del(head->prev);
2412ddaf22abSNeilBrown 		conf->nr_queued--;
24131da177e4SLinus Torvalds 		spin_unlock_irqrestore(&conf->device_lock, flags);
24141da177e4SLinus Torvalds 
24151da177e4SLinus Torvalds 		mddev = r1_bio->mddev;
2416070ec55dSNeilBrown 		conf = mddev->private;
24174367af55SNeilBrown 		if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2418d8f05d29SNeilBrown 			if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
241962096bceSNeilBrown 			    test_bit(R1BIO_WriteError, &r1_bio->state))
242062096bceSNeilBrown 				handle_sync_write_finished(conf, r1_bio);
242162096bceSNeilBrown 			else
24221da177e4SLinus Torvalds 				sync_request_write(mddev, r1_bio);
2423cd5ff9a1SNeilBrown 		} else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
242462096bceSNeilBrown 			   test_bit(R1BIO_WriteError, &r1_bio->state))
242562096bceSNeilBrown 			handle_write_finished(conf, r1_bio);
242662096bceSNeilBrown 		else if (test_bit(R1BIO_ReadError, &r1_bio->state))
242762096bceSNeilBrown 			handle_read_error(conf, r1_bio);
2428d2eb35acSNeilBrown 		else
2429d2eb35acSNeilBrown 			/* just a partial read to be scheduled from separate
2430d2eb35acSNeilBrown 			 * context
2431d2eb35acSNeilBrown 			 */
2432d2eb35acSNeilBrown 			generic_make_request(r1_bio->bios[r1_bio->read_disk]);
243362096bceSNeilBrown 
24341d9d5241SNeilBrown 		cond_resched();
2435de393cdeSNeilBrown 		if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2436de393cdeSNeilBrown 			md_check_recovery(mddev);
24371da177e4SLinus Torvalds 	}
2438e1dfa0a2SNeilBrown 	blk_finish_plug(&plug);
24391da177e4SLinus Torvalds }
24401da177e4SLinus Torvalds 
24411da177e4SLinus Torvalds 
2442e8096360SNeilBrown static int init_resync(struct r1conf *conf)
24431da177e4SLinus Torvalds {
24441da177e4SLinus Torvalds 	int buffs;
24451da177e4SLinus Torvalds 
24461da177e4SLinus Torvalds 	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
24479e77c485SEric Sesterhenn 	BUG_ON(conf->r1buf_pool);
24481da177e4SLinus Torvalds 	conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
24491da177e4SLinus Torvalds 					  conf->poolinfo);
24501da177e4SLinus Torvalds 	if (!conf->r1buf_pool)
24511da177e4SLinus Torvalds 		return -ENOMEM;
24521da177e4SLinus Torvalds 	conf->next_resync = 0;
24531da177e4SLinus Torvalds 	return 0;
24541da177e4SLinus Torvalds }
24551da177e4SLinus Torvalds 
24561da177e4SLinus Torvalds /*
24571da177e4SLinus Torvalds  * perform a "sync" on one "block"
24581da177e4SLinus Torvalds  *
24591da177e4SLinus Torvalds  * We need to make sure that no normal I/O request - particularly write
24601da177e4SLinus Torvalds  * requests - conflict with active sync requests.
24611da177e4SLinus Torvalds  *
24621da177e4SLinus Torvalds  * This is achieved by tracking pending requests and a 'barrier' concept
24631da177e4SLinus Torvalds  * that can be installed to exclude normal IO requests.
24641da177e4SLinus Torvalds  */
24651da177e4SLinus Torvalds 
2466fd01b88cSNeilBrown static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
24671da177e4SLinus Torvalds {
2468e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
24699f2c9d12SNeilBrown 	struct r1bio *r1_bio;
24701da177e4SLinus Torvalds 	struct bio *bio;
24711da177e4SLinus Torvalds 	sector_t max_sector, nr_sectors;
24723e198f78SNeilBrown 	int disk = -1;
24731da177e4SLinus Torvalds 	int i;
24743e198f78SNeilBrown 	int wonly = -1;
24753e198f78SNeilBrown 	int write_targets = 0, read_targets = 0;
247657dab0bdSNeilBrown 	sector_t sync_blocks;
2477e3b9703eSNeilBrown 	int still_degraded = 0;
247806f60385SNeilBrown 	int good_sectors = RESYNC_SECTORS;
247906f60385SNeilBrown 	int min_bad = 0; /* number of sectors that are bad in all devices */
24801da177e4SLinus Torvalds 
24811da177e4SLinus Torvalds 	if (!conf->r1buf_pool)
24821da177e4SLinus Torvalds 		if (init_resync(conf))
248357afd89fSNeilBrown 			return 0;
24841da177e4SLinus Torvalds 
248558c0fed4SAndre Noll 	max_sector = mddev->dev_sectors;
24861da177e4SLinus Torvalds 	if (sector_nr >= max_sector) {
2487191ea9b2SNeilBrown 		/* If we aborted, we need to abort the
2488191ea9b2SNeilBrown 		 * sync on the 'current' bitmap chunk (there will
2489191ea9b2SNeilBrown 		 * only be one in raid1 resync.
2490191ea9b2SNeilBrown 		 * We can find the current addess in mddev->curr_resync
2491191ea9b2SNeilBrown 		 */
24926a806c51SNeilBrown 		if (mddev->curr_resync < max_sector) /* aborted */
24936a806c51SNeilBrown 			bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2494191ea9b2SNeilBrown 						&sync_blocks, 1);
24956a806c51SNeilBrown 		else /* completed sync */
2496191ea9b2SNeilBrown 			conf->fullsync = 0;
24976a806c51SNeilBrown 
24986a806c51SNeilBrown 		bitmap_close_sync(mddev->bitmap);
24991da177e4SLinus Torvalds 		close_sync(conf);
25001da177e4SLinus Torvalds 		return 0;
25011da177e4SLinus Torvalds 	}
25021da177e4SLinus Torvalds 
250307d84d10SNeilBrown 	if (mddev->bitmap == NULL &&
250407d84d10SNeilBrown 	    mddev->recovery_cp == MaxSector &&
25056394cca5SNeilBrown 	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
250607d84d10SNeilBrown 	    conf->fullsync == 0) {
250707d84d10SNeilBrown 		*skipped = 1;
250807d84d10SNeilBrown 		return max_sector - sector_nr;
250907d84d10SNeilBrown 	}
25106394cca5SNeilBrown 	/* before building a request, check if we can skip these blocks..
25116394cca5SNeilBrown 	 * This call the bitmap_start_sync doesn't actually record anything
25126394cca5SNeilBrown 	 */
2513e3b9703eSNeilBrown 	if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2514e5de485fSNeilBrown 	    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2515191ea9b2SNeilBrown 		/* We can skip this block, and probably several more */
2516191ea9b2SNeilBrown 		*skipped = 1;
2517191ea9b2SNeilBrown 		return sync_blocks;
2518191ea9b2SNeilBrown 	}
25191da177e4SLinus Torvalds 	/*
252017999be4SNeilBrown 	 * If there is non-resync activity waiting for a turn,
252117999be4SNeilBrown 	 * and resync is going fast enough,
252217999be4SNeilBrown 	 * then let it though before starting on this new sync request.
25231da177e4SLinus Torvalds 	 */
252417999be4SNeilBrown 	if (!go_faster && conf->nr_waiting)
25251da177e4SLinus Torvalds 		msleep_interruptible(1000);
252617999be4SNeilBrown 
2527b47490c9SNeilBrown 	bitmap_cond_end_sync(mddev->bitmap, sector_nr);
25281c4588e9SNeilBrown 	r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
252917999be4SNeilBrown 	raise_barrier(conf);
253017999be4SNeilBrown 
253117999be4SNeilBrown 	conf->next_resync = sector_nr;
25321da177e4SLinus Torvalds 
25333e198f78SNeilBrown 	rcu_read_lock();
25343e198f78SNeilBrown 	/*
25353e198f78SNeilBrown 	 * If we get a correctably read error during resync or recovery,
25363e198f78SNeilBrown 	 * we might want to read from a different device.  So we
25373e198f78SNeilBrown 	 * flag all drives that could conceivably be read from for READ,
25383e198f78SNeilBrown 	 * and any others (which will be non-In_sync devices) for WRITE.
25393e198f78SNeilBrown 	 * If a read fails, we try reading from something else for which READ
25403e198f78SNeilBrown 	 * is OK.
25413e198f78SNeilBrown 	 */
25421da177e4SLinus Torvalds 
25431da177e4SLinus Torvalds 	r1_bio->mddev = mddev;
25441da177e4SLinus Torvalds 	r1_bio->sector = sector_nr;
2545191ea9b2SNeilBrown 	r1_bio->state = 0;
25461da177e4SLinus Torvalds 	set_bit(R1BIO_IsSync, &r1_bio->state);
25471da177e4SLinus Torvalds 
25488f19ccb2SNeilBrown 	for (i = 0; i < conf->raid_disks * 2; i++) {
25493cb03002SNeilBrown 		struct md_rdev *rdev;
25501da177e4SLinus Torvalds 		bio = r1_bio->bios[i];
25512aabaa65SKent Overstreet 		bio_reset(bio);
25521da177e4SLinus Torvalds 
25533e198f78SNeilBrown 		rdev = rcu_dereference(conf->mirrors[i].rdev);
25543e198f78SNeilBrown 		if (rdev == NULL ||
25553e198f78SNeilBrown 		    test_bit(Faulty, &rdev->flags)) {
25568f19ccb2SNeilBrown 			if (i < conf->raid_disks)
2557e3b9703eSNeilBrown 				still_degraded = 1;
25583e198f78SNeilBrown 		} else if (!test_bit(In_sync, &rdev->flags)) {
25591da177e4SLinus Torvalds 			bio->bi_rw = WRITE;
25601da177e4SLinus Torvalds 			bio->bi_end_io = end_sync_write;
25611da177e4SLinus Torvalds 			write_targets ++;
25623e198f78SNeilBrown 		} else {
25633e198f78SNeilBrown 			/* may need to read from here */
256406f60385SNeilBrown 			sector_t first_bad = MaxSector;
256506f60385SNeilBrown 			int bad_sectors;
256606f60385SNeilBrown 
256706f60385SNeilBrown 			if (is_badblock(rdev, sector_nr, good_sectors,
256806f60385SNeilBrown 					&first_bad, &bad_sectors)) {
256906f60385SNeilBrown 				if (first_bad > sector_nr)
257006f60385SNeilBrown 					good_sectors = first_bad - sector_nr;
257106f60385SNeilBrown 				else {
257206f60385SNeilBrown 					bad_sectors -= (sector_nr - first_bad);
257306f60385SNeilBrown 					if (min_bad == 0 ||
257406f60385SNeilBrown 					    min_bad > bad_sectors)
257506f60385SNeilBrown 						min_bad = bad_sectors;
257606f60385SNeilBrown 				}
257706f60385SNeilBrown 			}
257806f60385SNeilBrown 			if (sector_nr < first_bad) {
25793e198f78SNeilBrown 				if (test_bit(WriteMostly, &rdev->flags)) {
25803e198f78SNeilBrown 					if (wonly < 0)
25813e198f78SNeilBrown 						wonly = i;
25823e198f78SNeilBrown 				} else {
25833e198f78SNeilBrown 					if (disk < 0)
25843e198f78SNeilBrown 						disk = i;
25853e198f78SNeilBrown 				}
258606f60385SNeilBrown 				bio->bi_rw = READ;
258706f60385SNeilBrown 				bio->bi_end_io = end_sync_read;
25883e198f78SNeilBrown 				read_targets++;
2589d57368afSAlexander Lyakas 			} else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2590d57368afSAlexander Lyakas 				test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2591d57368afSAlexander Lyakas 				!test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2592d57368afSAlexander Lyakas 				/*
2593d57368afSAlexander Lyakas 				 * The device is suitable for reading (InSync),
2594d57368afSAlexander Lyakas 				 * but has bad block(s) here. Let's try to correct them,
2595d57368afSAlexander Lyakas 				 * if we are doing resync or repair. Otherwise, leave
2596d57368afSAlexander Lyakas 				 * this device alone for this sync request.
2597d57368afSAlexander Lyakas 				 */
2598d57368afSAlexander Lyakas 				bio->bi_rw = WRITE;
2599d57368afSAlexander Lyakas 				bio->bi_end_io = end_sync_write;
2600d57368afSAlexander Lyakas 				write_targets++;
26013e198f78SNeilBrown 			}
260206f60385SNeilBrown 		}
260306f60385SNeilBrown 		if (bio->bi_end_io) {
26043e198f78SNeilBrown 			atomic_inc(&rdev->nr_pending);
26054f024f37SKent Overstreet 			bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
26063e198f78SNeilBrown 			bio->bi_bdev = rdev->bdev;
26071da177e4SLinus Torvalds 			bio->bi_private = r1_bio;
26081da177e4SLinus Torvalds 		}
260906f60385SNeilBrown 	}
26103e198f78SNeilBrown 	rcu_read_unlock();
26113e198f78SNeilBrown 	if (disk < 0)
26123e198f78SNeilBrown 		disk = wonly;
26133e198f78SNeilBrown 	r1_bio->read_disk = disk;
2614191ea9b2SNeilBrown 
261506f60385SNeilBrown 	if (read_targets == 0 && min_bad > 0) {
261606f60385SNeilBrown 		/* These sectors are bad on all InSync devices, so we
261706f60385SNeilBrown 		 * need to mark them bad on all write targets
261806f60385SNeilBrown 		 */
261906f60385SNeilBrown 		int ok = 1;
26208f19ccb2SNeilBrown 		for (i = 0 ; i < conf->raid_disks * 2 ; i++)
262106f60385SNeilBrown 			if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2622a42f9d83Smajianpeng 				struct md_rdev *rdev = conf->mirrors[i].rdev;
262306f60385SNeilBrown 				ok = rdev_set_badblocks(rdev, sector_nr,
262406f60385SNeilBrown 							min_bad, 0
262506f60385SNeilBrown 					) && ok;
262606f60385SNeilBrown 			}
262706f60385SNeilBrown 		set_bit(MD_CHANGE_DEVS, &mddev->flags);
262806f60385SNeilBrown 		*skipped = 1;
262906f60385SNeilBrown 		put_buf(r1_bio);
263006f60385SNeilBrown 
263106f60385SNeilBrown 		if (!ok) {
263206f60385SNeilBrown 			/* Cannot record the badblocks, so need to
263306f60385SNeilBrown 			 * abort the resync.
263406f60385SNeilBrown 			 * If there are multiple read targets, could just
263506f60385SNeilBrown 			 * fail the really bad ones ???
263606f60385SNeilBrown 			 */
263706f60385SNeilBrown 			conf->recovery_disabled = mddev->recovery_disabled;
263806f60385SNeilBrown 			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
263906f60385SNeilBrown 			return 0;
264006f60385SNeilBrown 		} else
264106f60385SNeilBrown 			return min_bad;
264206f60385SNeilBrown 
264306f60385SNeilBrown 	}
264406f60385SNeilBrown 	if (min_bad > 0 && min_bad < good_sectors) {
264506f60385SNeilBrown 		/* only resync enough to reach the next bad->good
264606f60385SNeilBrown 		 * transition */
264706f60385SNeilBrown 		good_sectors = min_bad;
264806f60385SNeilBrown 	}
264906f60385SNeilBrown 
26503e198f78SNeilBrown 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
26513e198f78SNeilBrown 		/* extra read targets are also write targets */
26523e198f78SNeilBrown 		write_targets += read_targets-1;
26533e198f78SNeilBrown 
26543e198f78SNeilBrown 	if (write_targets == 0 || read_targets == 0) {
26551da177e4SLinus Torvalds 		/* There is nowhere to write, so all non-sync
26561da177e4SLinus Torvalds 		 * drives must be failed - so we are finished
26571da177e4SLinus Torvalds 		 */
2658b7219ccbSNeilBrown 		sector_t rv;
2659b7219ccbSNeilBrown 		if (min_bad > 0)
2660b7219ccbSNeilBrown 			max_sector = sector_nr + min_bad;
2661b7219ccbSNeilBrown 		rv = max_sector - sector_nr;
266257afd89fSNeilBrown 		*skipped = 1;
26631da177e4SLinus Torvalds 		put_buf(r1_bio);
26641da177e4SLinus Torvalds 		return rv;
26651da177e4SLinus Torvalds 	}
26661da177e4SLinus Torvalds 
2667c6207277SNeilBrown 	if (max_sector > mddev->resync_max)
2668c6207277SNeilBrown 		max_sector = mddev->resync_max; /* Don't do IO beyond here */
266906f60385SNeilBrown 	if (max_sector > sector_nr + good_sectors)
267006f60385SNeilBrown 		max_sector = sector_nr + good_sectors;
26711da177e4SLinus Torvalds 	nr_sectors = 0;
2672289e99e8SNeilBrown 	sync_blocks = 0;
26731da177e4SLinus Torvalds 	do {
26741da177e4SLinus Torvalds 		struct page *page;
26751da177e4SLinus Torvalds 		int len = PAGE_SIZE;
26761da177e4SLinus Torvalds 		if (sector_nr + (len>>9) > max_sector)
26771da177e4SLinus Torvalds 			len = (max_sector - sector_nr) << 9;
26781da177e4SLinus Torvalds 		if (len == 0)
26791da177e4SLinus Torvalds 			break;
2680ab7a30c7SNeilBrown 		if (sync_blocks == 0) {
26816a806c51SNeilBrown 			if (!bitmap_start_sync(mddev->bitmap, sector_nr,
2682e3b9703eSNeilBrown 					       &sync_blocks, still_degraded) &&
2683e5de485fSNeilBrown 			    !conf->fullsync &&
2684e5de485fSNeilBrown 			    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2685191ea9b2SNeilBrown 				break;
26869e77c485SEric Sesterhenn 			BUG_ON(sync_blocks < (PAGE_SIZE>>9));
26877571ae88SNeilBrown 			if ((len >> 9) > sync_blocks)
26886a806c51SNeilBrown 				len = sync_blocks<<9;
2689ab7a30c7SNeilBrown 		}
2690191ea9b2SNeilBrown 
26918f19ccb2SNeilBrown 		for (i = 0 ; i < conf->raid_disks * 2; i++) {
26921da177e4SLinus Torvalds 			bio = r1_bio->bios[i];
26931da177e4SLinus Torvalds 			if (bio->bi_end_io) {
2694d11c171eSNeilBrown 				page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
26951da177e4SLinus Torvalds 				if (bio_add_page(bio, page, len, 0) == 0) {
26961da177e4SLinus Torvalds 					/* stop here */
2697d11c171eSNeilBrown 					bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
26981da177e4SLinus Torvalds 					while (i > 0) {
26991da177e4SLinus Torvalds 						i--;
27001da177e4SLinus Torvalds 						bio = r1_bio->bios[i];
27016a806c51SNeilBrown 						if (bio->bi_end_io==NULL)
27026a806c51SNeilBrown 							continue;
27031da177e4SLinus Torvalds 						/* remove last page from this bio */
27041da177e4SLinus Torvalds 						bio->bi_vcnt--;
27054f024f37SKent Overstreet 						bio->bi_iter.bi_size -= len;
27061da177e4SLinus Torvalds 						bio->bi_flags &= ~(1<< BIO_SEG_VALID);
27071da177e4SLinus Torvalds 					}
27081da177e4SLinus Torvalds 					goto bio_full;
27091da177e4SLinus Torvalds 				}
27101da177e4SLinus Torvalds 			}
27111da177e4SLinus Torvalds 		}
27121da177e4SLinus Torvalds 		nr_sectors += len>>9;
27131da177e4SLinus Torvalds 		sector_nr += len>>9;
2714191ea9b2SNeilBrown 		sync_blocks -= (len>>9);
27151da177e4SLinus Torvalds 	} while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
27161da177e4SLinus Torvalds  bio_full:
27171da177e4SLinus Torvalds 	r1_bio->sectors = nr_sectors;
27181da177e4SLinus Torvalds 
2719d11c171eSNeilBrown 	/* For a user-requested sync, we read all readable devices and do a
2720d11c171eSNeilBrown 	 * compare
2721d11c171eSNeilBrown 	 */
2722d11c171eSNeilBrown 	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2723d11c171eSNeilBrown 		atomic_set(&r1_bio->remaining, read_targets);
27242d4f4f33SNeilBrown 		for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2725d11c171eSNeilBrown 			bio = r1_bio->bios[i];
2726d11c171eSNeilBrown 			if (bio->bi_end_io == end_sync_read) {
27272d4f4f33SNeilBrown 				read_targets--;
2728ddac7c7eSNeilBrown 				md_sync_acct(bio->bi_bdev, nr_sectors);
27291da177e4SLinus Torvalds 				generic_make_request(bio);
2730d11c171eSNeilBrown 			}
2731d11c171eSNeilBrown 		}
2732d11c171eSNeilBrown 	} else {
2733d11c171eSNeilBrown 		atomic_set(&r1_bio->remaining, 1);
2734d11c171eSNeilBrown 		bio = r1_bio->bios[r1_bio->read_disk];
2735ddac7c7eSNeilBrown 		md_sync_acct(bio->bi_bdev, nr_sectors);
2736d11c171eSNeilBrown 		generic_make_request(bio);
2737d11c171eSNeilBrown 
2738d11c171eSNeilBrown 	}
27391da177e4SLinus Torvalds 	return nr_sectors;
27401da177e4SLinus Torvalds }
27411da177e4SLinus Torvalds 
2742fd01b88cSNeilBrown static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
274380c3a6ceSDan Williams {
274480c3a6ceSDan Williams 	if (sectors)
274580c3a6ceSDan Williams 		return sectors;
274680c3a6ceSDan Williams 
274780c3a6ceSDan Williams 	return mddev->dev_sectors;
274880c3a6ceSDan Williams }
274980c3a6ceSDan Williams 
2750e8096360SNeilBrown static struct r1conf *setup_conf(struct mddev *mddev)
27511da177e4SLinus Torvalds {
2752e8096360SNeilBrown 	struct r1conf *conf;
2753709ae487SNeilBrown 	int i;
27540eaf822cSJonathan Brassow 	struct raid1_info *disk;
27553cb03002SNeilBrown 	struct md_rdev *rdev;
2756709ae487SNeilBrown 	int err = -ENOMEM;
27571da177e4SLinus Torvalds 
2758e8096360SNeilBrown 	conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
27591da177e4SLinus Torvalds 	if (!conf)
2760709ae487SNeilBrown 		goto abort;
27611da177e4SLinus Torvalds 
27620eaf822cSJonathan Brassow 	conf->mirrors = kzalloc(sizeof(struct raid1_info)
27638f19ccb2SNeilBrown 				* mddev->raid_disks * 2,
27641da177e4SLinus Torvalds 				 GFP_KERNEL);
27651da177e4SLinus Torvalds 	if (!conf->mirrors)
2766709ae487SNeilBrown 		goto abort;
27671da177e4SLinus Torvalds 
2768ddaf22abSNeilBrown 	conf->tmppage = alloc_page(GFP_KERNEL);
2769ddaf22abSNeilBrown 	if (!conf->tmppage)
2770709ae487SNeilBrown 		goto abort;
2771ddaf22abSNeilBrown 
2772709ae487SNeilBrown 	conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
27731da177e4SLinus Torvalds 	if (!conf->poolinfo)
2774709ae487SNeilBrown 		goto abort;
27758f19ccb2SNeilBrown 	conf->poolinfo->raid_disks = mddev->raid_disks * 2;
27761da177e4SLinus Torvalds 	conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
27771da177e4SLinus Torvalds 					  r1bio_pool_free,
27781da177e4SLinus Torvalds 					  conf->poolinfo);
27791da177e4SLinus Torvalds 	if (!conf->r1bio_pool)
2780709ae487SNeilBrown 		goto abort;
2781709ae487SNeilBrown 
2782ed9bfdf1SNeilBrown 	conf->poolinfo->mddev = mddev;
27831da177e4SLinus Torvalds 
2784c19d5798SNeilBrown 	err = -EINVAL;
2785e7e72bf6SNeil Brown 	spin_lock_init(&conf->device_lock);
2786dafb20faSNeilBrown 	rdev_for_each(rdev, mddev) {
2787aba336bdSNeilBrown 		struct request_queue *q;
2788709ae487SNeilBrown 		int disk_idx = rdev->raid_disk;
27891da177e4SLinus Torvalds 		if (disk_idx >= mddev->raid_disks
27901da177e4SLinus Torvalds 		    || disk_idx < 0)
27911da177e4SLinus Torvalds 			continue;
2792c19d5798SNeilBrown 		if (test_bit(Replacement, &rdev->flags))
279302b898f2SNeilBrown 			disk = conf->mirrors + mddev->raid_disks + disk_idx;
2794c19d5798SNeilBrown 		else
27951da177e4SLinus Torvalds 			disk = conf->mirrors + disk_idx;
27961da177e4SLinus Torvalds 
2797c19d5798SNeilBrown 		if (disk->rdev)
2798c19d5798SNeilBrown 			goto abort;
27991da177e4SLinus Torvalds 		disk->rdev = rdev;
2800aba336bdSNeilBrown 		q = bdev_get_queue(rdev->bdev);
2801aba336bdSNeilBrown 		if (q->merge_bvec_fn)
2802aba336bdSNeilBrown 			mddev->merge_check_needed = 1;
28031da177e4SLinus Torvalds 
28041da177e4SLinus Torvalds 		disk->head_position = 0;
280512cee5a8SShaohua Li 		disk->seq_start = MaxSector;
28061da177e4SLinus Torvalds 	}
28071da177e4SLinus Torvalds 	conf->raid_disks = mddev->raid_disks;
28081da177e4SLinus Torvalds 	conf->mddev = mddev;
28091da177e4SLinus Torvalds 	INIT_LIST_HEAD(&conf->retry_list);
28101da177e4SLinus Torvalds 
28111da177e4SLinus Torvalds 	spin_lock_init(&conf->resync_lock);
281217999be4SNeilBrown 	init_waitqueue_head(&conf->wait_barrier);
28131da177e4SLinus Torvalds 
2814191ea9b2SNeilBrown 	bio_list_init(&conf->pending_bio_list);
281534db0cd6SNeilBrown 	conf->pending_count = 0;
2816d890fa2bSNeilBrown 	conf->recovery_disabled = mddev->recovery_disabled - 1;
2817191ea9b2SNeilBrown 
281879ef3a8aSmajianpeng 	conf->start_next_window = MaxSector;
281979ef3a8aSmajianpeng 	conf->current_window_requests = conf->next_window_requests = 0;
282079ef3a8aSmajianpeng 
2821c19d5798SNeilBrown 	err = -EIO;
28228f19ccb2SNeilBrown 	for (i = 0; i < conf->raid_disks * 2; i++) {
28231da177e4SLinus Torvalds 
28241da177e4SLinus Torvalds 		disk = conf->mirrors + i;
28251da177e4SLinus Torvalds 
2826c19d5798SNeilBrown 		if (i < conf->raid_disks &&
2827c19d5798SNeilBrown 		    disk[conf->raid_disks].rdev) {
2828c19d5798SNeilBrown 			/* This slot has a replacement. */
2829c19d5798SNeilBrown 			if (!disk->rdev) {
2830c19d5798SNeilBrown 				/* No original, just make the replacement
2831c19d5798SNeilBrown 				 * a recovering spare
2832c19d5798SNeilBrown 				 */
2833c19d5798SNeilBrown 				disk->rdev =
2834c19d5798SNeilBrown 					disk[conf->raid_disks].rdev;
2835c19d5798SNeilBrown 				disk[conf->raid_disks].rdev = NULL;
2836c19d5798SNeilBrown 			} else if (!test_bit(In_sync, &disk->rdev->flags))
2837c19d5798SNeilBrown 				/* Original is not in_sync - bad */
2838c19d5798SNeilBrown 				goto abort;
2839c19d5798SNeilBrown 		}
2840c19d5798SNeilBrown 
28415fd6c1dcSNeilBrown 		if (!disk->rdev ||
28425fd6c1dcSNeilBrown 		    !test_bit(In_sync, &disk->rdev->flags)) {
28431da177e4SLinus Torvalds 			disk->head_position = 0;
28444f0a5e01SJonathan Brassow 			if (disk->rdev &&
28454f0a5e01SJonathan Brassow 			    (disk->rdev->saved_raid_disk < 0))
284617571284SNeilBrown 				conf->fullsync = 1;
2847be4d3280SShaohua Li 		}
28481da177e4SLinus Torvalds 	}
2849709ae487SNeilBrown 
2850709ae487SNeilBrown 	err = -ENOMEM;
28510232605dSNeilBrown 	conf->thread = md_register_thread(raid1d, mddev, "raid1");
2852709ae487SNeilBrown 	if (!conf->thread) {
28531da177e4SLinus Torvalds 		printk(KERN_ERR
28549dd1e2faSNeilBrown 		       "md/raid1:%s: couldn't allocate thread\n",
28551da177e4SLinus Torvalds 		       mdname(mddev));
2856709ae487SNeilBrown 		goto abort;
28571da177e4SLinus Torvalds 	}
2858191ea9b2SNeilBrown 
2859709ae487SNeilBrown 	return conf;
2860709ae487SNeilBrown 
2861709ae487SNeilBrown  abort:
2862709ae487SNeilBrown 	if (conf) {
2863709ae487SNeilBrown 		if (conf->r1bio_pool)
2864709ae487SNeilBrown 			mempool_destroy(conf->r1bio_pool);
2865709ae487SNeilBrown 		kfree(conf->mirrors);
2866709ae487SNeilBrown 		safe_put_page(conf->tmppage);
2867709ae487SNeilBrown 		kfree(conf->poolinfo);
2868709ae487SNeilBrown 		kfree(conf);
2869709ae487SNeilBrown 	}
2870709ae487SNeilBrown 	return ERR_PTR(err);
2871709ae487SNeilBrown }
2872709ae487SNeilBrown 
28735220ea1eSmajianpeng static int stop(struct mddev *mddev);
2874fd01b88cSNeilBrown static int run(struct mddev *mddev)
2875709ae487SNeilBrown {
2876e8096360SNeilBrown 	struct r1conf *conf;
2877709ae487SNeilBrown 	int i;
28783cb03002SNeilBrown 	struct md_rdev *rdev;
28795220ea1eSmajianpeng 	int ret;
28802ff8cc2cSShaohua Li 	bool discard_supported = false;
2881709ae487SNeilBrown 
2882709ae487SNeilBrown 	if (mddev->level != 1) {
28839dd1e2faSNeilBrown 		printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
2884709ae487SNeilBrown 		       mdname(mddev), mddev->level);
2885709ae487SNeilBrown 		return -EIO;
2886709ae487SNeilBrown 	}
2887709ae487SNeilBrown 	if (mddev->reshape_position != MaxSector) {
28889dd1e2faSNeilBrown 		printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
2889709ae487SNeilBrown 		       mdname(mddev));
2890709ae487SNeilBrown 		return -EIO;
2891709ae487SNeilBrown 	}
2892709ae487SNeilBrown 	/*
2893709ae487SNeilBrown 	 * copy the already verified devices into our private RAID1
2894709ae487SNeilBrown 	 * bookkeeping area. [whatever we allocate in run(),
2895709ae487SNeilBrown 	 * should be freed in stop()]
2896709ae487SNeilBrown 	 */
2897709ae487SNeilBrown 	if (mddev->private == NULL)
2898709ae487SNeilBrown 		conf = setup_conf(mddev);
2899709ae487SNeilBrown 	else
2900709ae487SNeilBrown 		conf = mddev->private;
2901709ae487SNeilBrown 
2902709ae487SNeilBrown 	if (IS_ERR(conf))
2903709ae487SNeilBrown 		return PTR_ERR(conf);
2904709ae487SNeilBrown 
2905c8dc9c65SJoe Lawrence 	if (mddev->queue)
29065026d7a9SH. Peter Anvin 		blk_queue_max_write_same_sectors(mddev->queue, 0);
29075026d7a9SH. Peter Anvin 
2908dafb20faSNeilBrown 	rdev_for_each(rdev, mddev) {
29091ed7242eSJonathan Brassow 		if (!mddev->gendisk)
29101ed7242eSJonathan Brassow 			continue;
2911709ae487SNeilBrown 		disk_stack_limits(mddev->gendisk, rdev->bdev,
2912709ae487SNeilBrown 				  rdev->data_offset << 9);
29132ff8cc2cSShaohua Li 		if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
29142ff8cc2cSShaohua Li 			discard_supported = true;
2915709ae487SNeilBrown 	}
2916709ae487SNeilBrown 
2917709ae487SNeilBrown 	mddev->degraded = 0;
2918709ae487SNeilBrown 	for (i=0; i < conf->raid_disks; i++)
2919709ae487SNeilBrown 		if (conf->mirrors[i].rdev == NULL ||
2920709ae487SNeilBrown 		    !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
2921709ae487SNeilBrown 		    test_bit(Faulty, &conf->mirrors[i].rdev->flags))
2922709ae487SNeilBrown 			mddev->degraded++;
2923709ae487SNeilBrown 
2924709ae487SNeilBrown 	if (conf->raid_disks - mddev->degraded == 1)
2925709ae487SNeilBrown 		mddev->recovery_cp = MaxSector;
2926709ae487SNeilBrown 
29278c6ac868SAndre Noll 	if (mddev->recovery_cp != MaxSector)
29289dd1e2faSNeilBrown 		printk(KERN_NOTICE "md/raid1:%s: not clean"
29298c6ac868SAndre Noll 		       " -- starting background reconstruction\n",
29308c6ac868SAndre Noll 		       mdname(mddev));
29311da177e4SLinus Torvalds 	printk(KERN_INFO
29329dd1e2faSNeilBrown 		"md/raid1:%s: active with %d out of %d mirrors\n",
29331da177e4SLinus Torvalds 		mdname(mddev), mddev->raid_disks - mddev->degraded,
29341da177e4SLinus Torvalds 		mddev->raid_disks);
2935709ae487SNeilBrown 
29361da177e4SLinus Torvalds 	/*
29371da177e4SLinus Torvalds 	 * Ok, everything is just fine now
29381da177e4SLinus Torvalds 	 */
2939709ae487SNeilBrown 	mddev->thread = conf->thread;
2940709ae487SNeilBrown 	conf->thread = NULL;
2941709ae487SNeilBrown 	mddev->private = conf;
2942709ae487SNeilBrown 
29431f403624SDan Williams 	md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
29441da177e4SLinus Torvalds 
29451ed7242eSJonathan Brassow 	if (mddev->queue) {
29460d129228SNeilBrown 		mddev->queue->backing_dev_info.congested_fn = raid1_congested;
29470d129228SNeilBrown 		mddev->queue->backing_dev_info.congested_data = mddev;
29486b740b8dSNeilBrown 		blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
29492ff8cc2cSShaohua Li 
29502ff8cc2cSShaohua Li 		if (discard_supported)
29512ff8cc2cSShaohua Li 			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
29522ff8cc2cSShaohua Li 						mddev->queue);
29532ff8cc2cSShaohua Li 		else
29542ff8cc2cSShaohua Li 			queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
29552ff8cc2cSShaohua Li 						  mddev->queue);
29561ed7242eSJonathan Brassow 	}
29575220ea1eSmajianpeng 
29585220ea1eSmajianpeng 	ret =  md_integrity_register(mddev);
29595220ea1eSmajianpeng 	if (ret)
29605220ea1eSmajianpeng 		stop(mddev);
29615220ea1eSmajianpeng 	return ret;
29621da177e4SLinus Torvalds }
29631da177e4SLinus Torvalds 
2964fd01b88cSNeilBrown static int stop(struct mddev *mddev)
29651da177e4SLinus Torvalds {
2966e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
29674b6d287fSNeilBrown 	struct bitmap *bitmap = mddev->bitmap;
29684b6d287fSNeilBrown 
29694b6d287fSNeilBrown 	/* wait for behind writes to complete */
2970e555190dSNeilBrown 	if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
29719dd1e2faSNeilBrown 		printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n",
29729dd1e2faSNeilBrown 		       mdname(mddev));
29734b6d287fSNeilBrown 		/* need to kick something here to make sure I/O goes? */
2974e555190dSNeilBrown 		wait_event(bitmap->behind_wait,
2975e555190dSNeilBrown 			   atomic_read(&bitmap->behind_writes) == 0);
29764b6d287fSNeilBrown 	}
29771da177e4SLinus Torvalds 
297807169fd4Smajianpeng 	freeze_array(conf, 0);
297907169fd4Smajianpeng 	unfreeze_array(conf);
2980409c57f3SNeilBrown 
298101f96c0aSNeilBrown 	md_unregister_thread(&mddev->thread);
29821da177e4SLinus Torvalds 	if (conf->r1bio_pool)
29831da177e4SLinus Torvalds 		mempool_destroy(conf->r1bio_pool);
29841da177e4SLinus Torvalds 	kfree(conf->mirrors);
29850fea7ed8SHirokazu Takahashi 	safe_put_page(conf->tmppage);
29861da177e4SLinus Torvalds 	kfree(conf->poolinfo);
29871da177e4SLinus Torvalds 	kfree(conf);
29881da177e4SLinus Torvalds 	mddev->private = NULL;
29891da177e4SLinus Torvalds 	return 0;
29901da177e4SLinus Torvalds }
29911da177e4SLinus Torvalds 
2992fd01b88cSNeilBrown static int raid1_resize(struct mddev *mddev, sector_t sectors)
29931da177e4SLinus Torvalds {
29941da177e4SLinus Torvalds 	/* no resync is happening, and there is enough space
29951da177e4SLinus Torvalds 	 * on all devices, so we can resize.
29961da177e4SLinus Torvalds 	 * We need to make sure resync covers any new space.
29971da177e4SLinus Torvalds 	 * If the array is shrinking we should possibly wait until
29981da177e4SLinus Torvalds 	 * any io in the removed space completes, but it hardly seems
29991da177e4SLinus Torvalds 	 * worth it.
30001da177e4SLinus Torvalds 	 */
3001a4a6125aSNeilBrown 	sector_t newsize = raid1_size(mddev, sectors, 0);
3002a4a6125aSNeilBrown 	if (mddev->external_size &&
3003a4a6125aSNeilBrown 	    mddev->array_sectors > newsize)
3004b522adcdSDan Williams 		return -EINVAL;
3005a4a6125aSNeilBrown 	if (mddev->bitmap) {
3006a4a6125aSNeilBrown 		int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
3007a4a6125aSNeilBrown 		if (ret)
3008a4a6125aSNeilBrown 			return ret;
3009a4a6125aSNeilBrown 	}
3010a4a6125aSNeilBrown 	md_set_array_sectors(mddev, newsize);
3011f233ea5cSAndre Noll 	set_capacity(mddev->gendisk, mddev->array_sectors);
3012449aad3eSNeilBrown 	revalidate_disk(mddev->gendisk);
3013b522adcdSDan Williams 	if (sectors > mddev->dev_sectors &&
3014b098636cSNeilBrown 	    mddev->recovery_cp > mddev->dev_sectors) {
301558c0fed4SAndre Noll 		mddev->recovery_cp = mddev->dev_sectors;
30161da177e4SLinus Torvalds 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
30171da177e4SLinus Torvalds 	}
3018b522adcdSDan Williams 	mddev->dev_sectors = sectors;
30194b5c7ae8SNeilBrown 	mddev->resync_max_sectors = sectors;
30201da177e4SLinus Torvalds 	return 0;
30211da177e4SLinus Torvalds }
30221da177e4SLinus Torvalds 
3023fd01b88cSNeilBrown static int raid1_reshape(struct mddev *mddev)
30241da177e4SLinus Torvalds {
30251da177e4SLinus Torvalds 	/* We need to:
30261da177e4SLinus Torvalds 	 * 1/ resize the r1bio_pool
30271da177e4SLinus Torvalds 	 * 2/ resize conf->mirrors
30281da177e4SLinus Torvalds 	 *
30291da177e4SLinus Torvalds 	 * We allocate a new r1bio_pool if we can.
30301da177e4SLinus Torvalds 	 * Then raise a device barrier and wait until all IO stops.
30311da177e4SLinus Torvalds 	 * Then resize conf->mirrors and swap in the new r1bio pool.
30326ea9c07cSNeilBrown 	 *
30336ea9c07cSNeilBrown 	 * At the same time, we "pack" the devices so that all the missing
30346ea9c07cSNeilBrown 	 * devices have the higher raid_disk numbers.
30351da177e4SLinus Torvalds 	 */
30361da177e4SLinus Torvalds 	mempool_t *newpool, *oldpool;
30371da177e4SLinus Torvalds 	struct pool_info *newpoolinfo;
30380eaf822cSJonathan Brassow 	struct raid1_info *newmirrors;
3039e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
304063c70c4fSNeilBrown 	int cnt, raid_disks;
3041c04be0aaSNeilBrown 	unsigned long flags;
3042b5470dc5SDan Williams 	int d, d2, err;
30431da177e4SLinus Torvalds 
304463c70c4fSNeilBrown 	/* Cannot change chunk_size, layout, or level */
3045664e7c41SAndre Noll 	if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
304663c70c4fSNeilBrown 	    mddev->layout != mddev->new_layout ||
304763c70c4fSNeilBrown 	    mddev->level != mddev->new_level) {
3048664e7c41SAndre Noll 		mddev->new_chunk_sectors = mddev->chunk_sectors;
304963c70c4fSNeilBrown 		mddev->new_layout = mddev->layout;
305063c70c4fSNeilBrown 		mddev->new_level = mddev->level;
305163c70c4fSNeilBrown 		return -EINVAL;
305263c70c4fSNeilBrown 	}
305363c70c4fSNeilBrown 
3054b5470dc5SDan Williams 	err = md_allow_write(mddev);
3055b5470dc5SDan Williams 	if (err)
3056b5470dc5SDan Williams 		return err;
30572a2275d6SNeilBrown 
305863c70c4fSNeilBrown 	raid_disks = mddev->raid_disks + mddev->delta_disks;
305963c70c4fSNeilBrown 
30606ea9c07cSNeilBrown 	if (raid_disks < conf->raid_disks) {
30616ea9c07cSNeilBrown 		cnt=0;
30626ea9c07cSNeilBrown 		for (d= 0; d < conf->raid_disks; d++)
30631da177e4SLinus Torvalds 			if (conf->mirrors[d].rdev)
30646ea9c07cSNeilBrown 				cnt++;
30656ea9c07cSNeilBrown 		if (cnt > raid_disks)
30661da177e4SLinus Torvalds 			return -EBUSY;
30676ea9c07cSNeilBrown 	}
30681da177e4SLinus Torvalds 
30691da177e4SLinus Torvalds 	newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
30701da177e4SLinus Torvalds 	if (!newpoolinfo)
30711da177e4SLinus Torvalds 		return -ENOMEM;
30721da177e4SLinus Torvalds 	newpoolinfo->mddev = mddev;
30738f19ccb2SNeilBrown 	newpoolinfo->raid_disks = raid_disks * 2;
30741da177e4SLinus Torvalds 
30751da177e4SLinus Torvalds 	newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
30761da177e4SLinus Torvalds 				 r1bio_pool_free, newpoolinfo);
30771da177e4SLinus Torvalds 	if (!newpool) {
30781da177e4SLinus Torvalds 		kfree(newpoolinfo);
30791da177e4SLinus Torvalds 		return -ENOMEM;
30801da177e4SLinus Torvalds 	}
30810eaf822cSJonathan Brassow 	newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
30828f19ccb2SNeilBrown 			     GFP_KERNEL);
30831da177e4SLinus Torvalds 	if (!newmirrors) {
30841da177e4SLinus Torvalds 		kfree(newpoolinfo);
30851da177e4SLinus Torvalds 		mempool_destroy(newpool);
30861da177e4SLinus Torvalds 		return -ENOMEM;
30871da177e4SLinus Torvalds 	}
30881da177e4SLinus Torvalds 
3089e2d59925SNeilBrown 	freeze_array(conf, 0);
30901da177e4SLinus Torvalds 
30911da177e4SLinus Torvalds 	/* ok, everything is stopped */
30921da177e4SLinus Torvalds 	oldpool = conf->r1bio_pool;
30931da177e4SLinus Torvalds 	conf->r1bio_pool = newpool;
30946ea9c07cSNeilBrown 
3095a88aa786SNeilBrown 	for (d = d2 = 0; d < conf->raid_disks; d++) {
30963cb03002SNeilBrown 		struct md_rdev *rdev = conf->mirrors[d].rdev;
3097a88aa786SNeilBrown 		if (rdev && rdev->raid_disk != d2) {
309836fad858SNamhyung Kim 			sysfs_unlink_rdev(mddev, rdev);
3099a88aa786SNeilBrown 			rdev->raid_disk = d2;
310036fad858SNamhyung Kim 			sysfs_unlink_rdev(mddev, rdev);
310136fad858SNamhyung Kim 			if (sysfs_link_rdev(mddev, rdev))
3102a88aa786SNeilBrown 				printk(KERN_WARNING
310336fad858SNamhyung Kim 				       "md/raid1:%s: cannot register rd%d\n",
310436fad858SNamhyung Kim 				       mdname(mddev), rdev->raid_disk);
3105a88aa786SNeilBrown 		}
3106a88aa786SNeilBrown 		if (rdev)
3107a88aa786SNeilBrown 			newmirrors[d2++].rdev = rdev;
31086ea9c07cSNeilBrown 	}
31091da177e4SLinus Torvalds 	kfree(conf->mirrors);
31101da177e4SLinus Torvalds 	conf->mirrors = newmirrors;
31111da177e4SLinus Torvalds 	kfree(conf->poolinfo);
31121da177e4SLinus Torvalds 	conf->poolinfo = newpoolinfo;
31131da177e4SLinus Torvalds 
3114c04be0aaSNeilBrown 	spin_lock_irqsave(&conf->device_lock, flags);
31151da177e4SLinus Torvalds 	mddev->degraded += (raid_disks - conf->raid_disks);
3116c04be0aaSNeilBrown 	spin_unlock_irqrestore(&conf->device_lock, flags);
31171da177e4SLinus Torvalds 	conf->raid_disks = mddev->raid_disks = raid_disks;
311863c70c4fSNeilBrown 	mddev->delta_disks = 0;
31191da177e4SLinus Torvalds 
3120e2d59925SNeilBrown 	unfreeze_array(conf);
31211da177e4SLinus Torvalds 
31221da177e4SLinus Torvalds 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
31231da177e4SLinus Torvalds 	md_wakeup_thread(mddev->thread);
31241da177e4SLinus Torvalds 
31251da177e4SLinus Torvalds 	mempool_destroy(oldpool);
31261da177e4SLinus Torvalds 	return 0;
31271da177e4SLinus Torvalds }
31281da177e4SLinus Torvalds 
3129fd01b88cSNeilBrown static void raid1_quiesce(struct mddev *mddev, int state)
313036fa3063SNeilBrown {
3131e8096360SNeilBrown 	struct r1conf *conf = mddev->private;
313236fa3063SNeilBrown 
313336fa3063SNeilBrown 	switch(state) {
31346eef4b21SNeilBrown 	case 2: /* wake for suspend */
31356eef4b21SNeilBrown 		wake_up(&conf->wait_barrier);
31366eef4b21SNeilBrown 		break;
31379e6603daSNeilBrown 	case 1:
313807169fd4Smajianpeng 		freeze_array(conf, 0);
313936fa3063SNeilBrown 		break;
31409e6603daSNeilBrown 	case 0:
314107169fd4Smajianpeng 		unfreeze_array(conf);
314236fa3063SNeilBrown 		break;
314336fa3063SNeilBrown 	}
314436fa3063SNeilBrown }
314536fa3063SNeilBrown 
3146fd01b88cSNeilBrown static void *raid1_takeover(struct mddev *mddev)
3147709ae487SNeilBrown {
3148709ae487SNeilBrown 	/* raid1 can take over:
3149709ae487SNeilBrown 	 *  raid5 with 2 devices, any layout or chunk size
3150709ae487SNeilBrown 	 */
3151709ae487SNeilBrown 	if (mddev->level == 5 && mddev->raid_disks == 2) {
3152e8096360SNeilBrown 		struct r1conf *conf;
3153709ae487SNeilBrown 		mddev->new_level = 1;
3154709ae487SNeilBrown 		mddev->new_layout = 0;
3155709ae487SNeilBrown 		mddev->new_chunk_sectors = 0;
3156709ae487SNeilBrown 		conf = setup_conf(mddev);
3157709ae487SNeilBrown 		if (!IS_ERR(conf))
315807169fd4Smajianpeng 			/* Array must appear to be quiesced */
315907169fd4Smajianpeng 			conf->array_frozen = 1;
3160709ae487SNeilBrown 		return conf;
3161709ae487SNeilBrown 	}
3162709ae487SNeilBrown 	return ERR_PTR(-EINVAL);
3163709ae487SNeilBrown }
31641da177e4SLinus Torvalds 
316584fc4b56SNeilBrown static struct md_personality raid1_personality =
31661da177e4SLinus Torvalds {
31671da177e4SLinus Torvalds 	.name		= "raid1",
31682604b703SNeilBrown 	.level		= 1,
31691da177e4SLinus Torvalds 	.owner		= THIS_MODULE,
31701da177e4SLinus Torvalds 	.make_request	= make_request,
31711da177e4SLinus Torvalds 	.run		= run,
31721da177e4SLinus Torvalds 	.stop		= stop,
31731da177e4SLinus Torvalds 	.status		= status,
31741da177e4SLinus Torvalds 	.error_handler	= error,
31751da177e4SLinus Torvalds 	.hot_add_disk	= raid1_add_disk,
31761da177e4SLinus Torvalds 	.hot_remove_disk= raid1_remove_disk,
31771da177e4SLinus Torvalds 	.spare_active	= raid1_spare_active,
31781da177e4SLinus Torvalds 	.sync_request	= sync_request,
31791da177e4SLinus Torvalds 	.resize		= raid1_resize,
318080c3a6ceSDan Williams 	.size		= raid1_size,
318163c70c4fSNeilBrown 	.check_reshape	= raid1_reshape,
318236fa3063SNeilBrown 	.quiesce	= raid1_quiesce,
3183709ae487SNeilBrown 	.takeover	= raid1_takeover,
31841da177e4SLinus Torvalds };
31851da177e4SLinus Torvalds 
31861da177e4SLinus Torvalds static int __init raid_init(void)
31871da177e4SLinus Torvalds {
31882604b703SNeilBrown 	return register_md_personality(&raid1_personality);
31891da177e4SLinus Torvalds }
31901da177e4SLinus Torvalds 
31911da177e4SLinus Torvalds static void raid_exit(void)
31921da177e4SLinus Torvalds {
31932604b703SNeilBrown 	unregister_md_personality(&raid1_personality);
31941da177e4SLinus Torvalds }
31951da177e4SLinus Torvalds 
31961da177e4SLinus Torvalds module_init(raid_init);
31971da177e4SLinus Torvalds module_exit(raid_exit);
31981da177e4SLinus Torvalds MODULE_LICENSE("GPL");
31990efb9e61SNeilBrown MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
32001da177e4SLinus Torvalds MODULE_ALIAS("md-personality-3"); /* RAID1 */
3201d9d166c2SNeilBrown MODULE_ALIAS("md-raid1");
32022604b703SNeilBrown MODULE_ALIAS("md-level-1");
320334db0cd6SNeilBrown 
320434db0cd6SNeilBrown module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
3205