11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * raid1.c : Multiple Devices driver for Linux 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * RAID-1 management functions. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000 111da177e4SLinus Torvalds * 1296de0e25SJan Engelhardt * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk> 131da177e4SLinus Torvalds * Various fixes by Neil Brown <neilb@cse.unsw.edu.au> 141da177e4SLinus Torvalds * 15191ea9b2SNeilBrown * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support 16191ea9b2SNeilBrown * bitmapped intelligence in resync: 17191ea9b2SNeilBrown * 18191ea9b2SNeilBrown * - bitmap marked during normal i/o 19191ea9b2SNeilBrown * - bitmap used to skip nondirty blocks during sync 20191ea9b2SNeilBrown * 21191ea9b2SNeilBrown * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology: 22191ea9b2SNeilBrown * - persistent bitmap code 23191ea9b2SNeilBrown * 241da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 251da177e4SLinus Torvalds * it under the terms of the GNU General Public License as published by 261da177e4SLinus Torvalds * the Free Software Foundation; either version 2, or (at your option) 271da177e4SLinus Torvalds * any later version. 281da177e4SLinus Torvalds * 291da177e4SLinus Torvalds * You should have received a copy of the GNU General Public License 301da177e4SLinus Torvalds * (for example /usr/src/linux/COPYING); if not, write to the Free 311da177e4SLinus Torvalds * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 321da177e4SLinus Torvalds */ 331da177e4SLinus Torvalds 345a0e3ad6STejun Heo #include <linux/slab.h> 3525570727SStephen Rothwell #include <linux/delay.h> 36bff61975SNeilBrown #include <linux/blkdev.h> 37056075c7SPaul Gortmaker #include <linux/module.h> 38bff61975SNeilBrown #include <linux/seq_file.h> 398bda470eSChristian Dietrich #include <linux/ratelimit.h> 403f07c014SIngo Molnar #include <linux/sched/signal.h> 413f07c014SIngo Molnar 42109e3765SNeilBrown #include <trace/events/block.h> 433f07c014SIngo Molnar 4443b2e5d8SNeilBrown #include "md.h" 45ef740c37SChristoph Hellwig #include "raid1.h" 46ef740c37SChristoph Hellwig #include "bitmap.h" 47191ea9b2SNeilBrown 48394ed8e4SShaohua Li #define UNSUPPORTED_MDDEV_FLAGS \ 49394ed8e4SShaohua Li ((1L << MD_HAS_JOURNAL) | \ 50ea0213e0SArtur Paszkiewicz (1L << MD_JOURNAL_CLEAN) | \ 51ea0213e0SArtur Paszkiewicz (1L << MD_HAS_PPL)) 52394ed8e4SShaohua Li 531da177e4SLinus Torvalds /* 541da177e4SLinus Torvalds * Number of guaranteed r1bios in case of extreme VM load: 551da177e4SLinus Torvalds */ 561da177e4SLinus Torvalds #define NR_RAID1_BIOS 256 571da177e4SLinus Torvalds 58473e87ceSJonathan Brassow /* when we get a read error on a read-only array, we redirect to another 59473e87ceSJonathan Brassow * device without failing the first device, or trying to over-write to 60473e87ceSJonathan Brassow * correct the read error. To keep track of bad blocks on a per-bio 61473e87ceSJonathan Brassow * level, we store IO_BLOCKED in the appropriate 'bios' pointer 62473e87ceSJonathan Brassow */ 63473e87ceSJonathan Brassow #define IO_BLOCKED ((struct bio *)1) 64473e87ceSJonathan Brassow /* When we successfully write to a known bad-block, we need to remove the 65473e87ceSJonathan Brassow * bad-block marking which must be done from process context. So we record 66473e87ceSJonathan Brassow * the success by setting devs[n].bio to IO_MADE_GOOD 67473e87ceSJonathan Brassow */ 68473e87ceSJonathan Brassow #define IO_MADE_GOOD ((struct bio *)2) 69473e87ceSJonathan Brassow 70473e87ceSJonathan Brassow #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) 71473e87ceSJonathan Brassow 7234db0cd6SNeilBrown /* When there are this many requests queue to be written by 7334db0cd6SNeilBrown * the raid1 thread, we become 'congested' to provide back-pressure 7434db0cd6SNeilBrown * for writeback. 7534db0cd6SNeilBrown */ 7634db0cd6SNeilBrown static int max_queued_requests = 1024; 771da177e4SLinus Torvalds 78fd76863eScolyli@suse.de static void allow_barrier(struct r1conf *conf, sector_t sector_nr); 79fd76863eScolyli@suse.de static void lower_barrier(struct r1conf *conf, sector_t sector_nr); 801da177e4SLinus Torvalds 81578b54adSNeilBrown #define raid1_log(md, fmt, args...) \ 82578b54adSNeilBrown do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0) 83578b54adSNeilBrown 84fb0eb5dfSMing Lei #include "raid1-10.c" 85fb0eb5dfSMing Lei 8698d30c58SMing Lei /* 8798d30c58SMing Lei * 'strct resync_pages' stores actual pages used for doing the resync 8898d30c58SMing Lei * IO, and it is per-bio, so make .bi_private points to it. 8998d30c58SMing Lei */ 9098d30c58SMing Lei static inline struct resync_pages *get_resync_pages(struct bio *bio) 9198d30c58SMing Lei { 9298d30c58SMing Lei return bio->bi_private; 9398d30c58SMing Lei } 9498d30c58SMing Lei 9598d30c58SMing Lei /* 9698d30c58SMing Lei * for resync bio, r1bio pointer can be retrieved from the per-bio 9798d30c58SMing Lei * 'struct resync_pages'. 9898d30c58SMing Lei */ 9998d30c58SMing Lei static inline struct r1bio *get_resync_r1bio(struct bio *bio) 10098d30c58SMing Lei { 10198d30c58SMing Lei return get_resync_pages(bio)->raid_bio; 10298d30c58SMing Lei } 10398d30c58SMing Lei 104dd0fc66fSAl Viro static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) 1051da177e4SLinus Torvalds { 1061da177e4SLinus Torvalds struct pool_info *pi = data; 1079f2c9d12SNeilBrown int size = offsetof(struct r1bio, bios[pi->raid_disks]); 1081da177e4SLinus Torvalds 1091da177e4SLinus Torvalds /* allocate a r1bio with room for raid_disks entries in the bios array */ 1107eaceaccSJens Axboe return kzalloc(size, gfp_flags); 1111da177e4SLinus Torvalds } 1121da177e4SLinus Torvalds 1131da177e4SLinus Torvalds static void r1bio_pool_free(void *r1_bio, void *data) 1141da177e4SLinus Torvalds { 1151da177e4SLinus Torvalds kfree(r1_bio); 1161da177e4SLinus Torvalds } 1171da177e4SLinus Torvalds 1188e005f7cSmajianpeng #define RESYNC_DEPTH 32 1191da177e4SLinus Torvalds #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) 1208e005f7cSmajianpeng #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH) 1218e005f7cSmajianpeng #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9) 122c40f341fSGoldwyn Rodrigues #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW) 123c40f341fSGoldwyn Rodrigues #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9) 1241da177e4SLinus Torvalds 125dd0fc66fSAl Viro static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) 1261da177e4SLinus Torvalds { 1271da177e4SLinus Torvalds struct pool_info *pi = data; 1289f2c9d12SNeilBrown struct r1bio *r1_bio; 1291da177e4SLinus Torvalds struct bio *bio; 130da1aab3dSNeilBrown int need_pages; 13198d30c58SMing Lei int j; 13298d30c58SMing Lei struct resync_pages *rps; 1331da177e4SLinus Torvalds 1341da177e4SLinus Torvalds r1_bio = r1bio_pool_alloc(gfp_flags, pi); 1357eaceaccSJens Axboe if (!r1_bio) 1361da177e4SLinus Torvalds return NULL; 1371da177e4SLinus Torvalds 13898d30c58SMing Lei rps = kmalloc(sizeof(struct resync_pages) * pi->raid_disks, 13998d30c58SMing Lei gfp_flags); 14098d30c58SMing Lei if (!rps) 14198d30c58SMing Lei goto out_free_r1bio; 14298d30c58SMing Lei 1431da177e4SLinus Torvalds /* 1441da177e4SLinus Torvalds * Allocate bios : 1 for reading, n-1 for writing 1451da177e4SLinus Torvalds */ 1461da177e4SLinus Torvalds for (j = pi->raid_disks ; j-- ; ) { 1476746557fSNeilBrown bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); 1481da177e4SLinus Torvalds if (!bio) 1491da177e4SLinus Torvalds goto out_free_bio; 1501da177e4SLinus Torvalds r1_bio->bios[j] = bio; 1511da177e4SLinus Torvalds } 1521da177e4SLinus Torvalds /* 1531da177e4SLinus Torvalds * Allocate RESYNC_PAGES data pages and attach them to 154d11c171eSNeilBrown * the first bio. 155d11c171eSNeilBrown * If this is a user-requested check/repair, allocate 156d11c171eSNeilBrown * RESYNC_PAGES for each bio. 1571da177e4SLinus Torvalds */ 158d11c171eSNeilBrown if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) 159da1aab3dSNeilBrown need_pages = pi->raid_disks; 160d11c171eSNeilBrown else 161da1aab3dSNeilBrown need_pages = 1; 16298d30c58SMing Lei for (j = 0; j < pi->raid_disks; j++) { 16398d30c58SMing Lei struct resync_pages *rp = &rps[j]; 1641da177e4SLinus Torvalds 16598d30c58SMing Lei bio = r1_bio->bios[j]; 16698d30c58SMing Lei 16798d30c58SMing Lei if (j < need_pages) { 16898d30c58SMing Lei if (resync_alloc_pages(rp, gfp_flags)) 169da1aab3dSNeilBrown goto out_free_pages; 17098d30c58SMing Lei } else { 17198d30c58SMing Lei memcpy(rp, &rps[0], sizeof(*rp)); 17298d30c58SMing Lei resync_get_all_pages(rp); 173d11c171eSNeilBrown } 17498d30c58SMing Lei 17598d30c58SMing Lei rp->raid_bio = r1_bio; 17698d30c58SMing Lei bio->bi_private = rp; 177d11c171eSNeilBrown } 1781da177e4SLinus Torvalds 1791da177e4SLinus Torvalds r1_bio->master_bio = NULL; 1801da177e4SLinus Torvalds 1811da177e4SLinus Torvalds return r1_bio; 1821da177e4SLinus Torvalds 183da1aab3dSNeilBrown out_free_pages: 184491221f8SGuoqing Jiang while (--j >= 0) 18598d30c58SMing Lei resync_free_pages(&rps[j]); 186da1aab3dSNeilBrown 1871da177e4SLinus Torvalds out_free_bio: 1881da177e4SLinus Torvalds while (++j < pi->raid_disks) 1891da177e4SLinus Torvalds bio_put(r1_bio->bios[j]); 19098d30c58SMing Lei kfree(rps); 19198d30c58SMing Lei 19298d30c58SMing Lei out_free_r1bio: 1931da177e4SLinus Torvalds r1bio_pool_free(r1_bio, data); 1941da177e4SLinus Torvalds return NULL; 1951da177e4SLinus Torvalds } 1961da177e4SLinus Torvalds 1971da177e4SLinus Torvalds static void r1buf_pool_free(void *__r1_bio, void *data) 1981da177e4SLinus Torvalds { 1991da177e4SLinus Torvalds struct pool_info *pi = data; 20098d30c58SMing Lei int i; 2019f2c9d12SNeilBrown struct r1bio *r1bio = __r1_bio; 20298d30c58SMing Lei struct resync_pages *rp = NULL; 2031da177e4SLinus Torvalds 20498d30c58SMing Lei for (i = pi->raid_disks; i--; ) { 20598d30c58SMing Lei rp = get_resync_pages(r1bio->bios[i]); 20698d30c58SMing Lei resync_free_pages(rp); 2071da177e4SLinus Torvalds bio_put(r1bio->bios[i]); 20898d30c58SMing Lei } 20998d30c58SMing Lei 21098d30c58SMing Lei /* resync pages array stored in the 1st bio's .bi_private */ 21198d30c58SMing Lei kfree(rp); 2121da177e4SLinus Torvalds 2131da177e4SLinus Torvalds r1bio_pool_free(r1bio, data); 2141da177e4SLinus Torvalds } 2151da177e4SLinus Torvalds 216e8096360SNeilBrown static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio) 2171da177e4SLinus Torvalds { 2181da177e4SLinus Torvalds int i; 2191da177e4SLinus Torvalds 2208f19ccb2SNeilBrown for (i = 0; i < conf->raid_disks * 2; i++) { 2211da177e4SLinus Torvalds struct bio **bio = r1_bio->bios + i; 2224367af55SNeilBrown if (!BIO_SPECIAL(*bio)) 2231da177e4SLinus Torvalds bio_put(*bio); 2241da177e4SLinus Torvalds *bio = NULL; 2251da177e4SLinus Torvalds } 2261da177e4SLinus Torvalds } 2271da177e4SLinus Torvalds 2289f2c9d12SNeilBrown static void free_r1bio(struct r1bio *r1_bio) 2291da177e4SLinus Torvalds { 230e8096360SNeilBrown struct r1conf *conf = r1_bio->mddev->private; 2311da177e4SLinus Torvalds 2321da177e4SLinus Torvalds put_all_bios(conf, r1_bio); 2331da177e4SLinus Torvalds mempool_free(r1_bio, conf->r1bio_pool); 2341da177e4SLinus Torvalds } 2351da177e4SLinus Torvalds 2369f2c9d12SNeilBrown static void put_buf(struct r1bio *r1_bio) 2371da177e4SLinus Torvalds { 238e8096360SNeilBrown struct r1conf *conf = r1_bio->mddev->private; 239af5f42a7SShaohua Li sector_t sect = r1_bio->sector; 2403e198f78SNeilBrown int i; 2413e198f78SNeilBrown 2428f19ccb2SNeilBrown for (i = 0; i < conf->raid_disks * 2; i++) { 2433e198f78SNeilBrown struct bio *bio = r1_bio->bios[i]; 2443e198f78SNeilBrown if (bio->bi_end_io) 2453e198f78SNeilBrown rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); 2463e198f78SNeilBrown } 2471da177e4SLinus Torvalds 2481da177e4SLinus Torvalds mempool_free(r1_bio, conf->r1buf_pool); 2491da177e4SLinus Torvalds 250af5f42a7SShaohua Li lower_barrier(conf, sect); 2511da177e4SLinus Torvalds } 2521da177e4SLinus Torvalds 2539f2c9d12SNeilBrown static void reschedule_retry(struct r1bio *r1_bio) 2541da177e4SLinus Torvalds { 2551da177e4SLinus Torvalds unsigned long flags; 256fd01b88cSNeilBrown struct mddev *mddev = r1_bio->mddev; 257e8096360SNeilBrown struct r1conf *conf = mddev->private; 258fd76863eScolyli@suse.de int idx; 2591da177e4SLinus Torvalds 260fd76863eScolyli@suse.de idx = sector_to_idx(r1_bio->sector); 2611da177e4SLinus Torvalds spin_lock_irqsave(&conf->device_lock, flags); 2621da177e4SLinus Torvalds list_add(&r1_bio->retry_list, &conf->retry_list); 263824e47daScolyli@suse.de atomic_inc(&conf->nr_queued[idx]); 2641da177e4SLinus Torvalds spin_unlock_irqrestore(&conf->device_lock, flags); 2651da177e4SLinus Torvalds 26617999be4SNeilBrown wake_up(&conf->wait_barrier); 2671da177e4SLinus Torvalds md_wakeup_thread(mddev->thread); 2681da177e4SLinus Torvalds } 2691da177e4SLinus Torvalds 2701da177e4SLinus Torvalds /* 2711da177e4SLinus Torvalds * raid_end_bio_io() is called when we have finished servicing a mirrored 2721da177e4SLinus Torvalds * operation and are ready to return a success/failure code to the buffer 2731da177e4SLinus Torvalds * cache layer. 2741da177e4SLinus Torvalds */ 2759f2c9d12SNeilBrown static void call_bio_endio(struct r1bio *r1_bio) 276d2eb35acSNeilBrown { 277d2eb35acSNeilBrown struct bio *bio = r1_bio->master_bio; 278e8096360SNeilBrown struct r1conf *conf = r1_bio->mddev->private; 279d2eb35acSNeilBrown 280d2eb35acSNeilBrown if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) 2814e4cbee9SChristoph Hellwig bio->bi_status = BLK_STS_IOERR; 2824246a0b6SChristoph Hellwig 2834246a0b6SChristoph Hellwig bio_endio(bio); 284d2eb35acSNeilBrown /* 285d2eb35acSNeilBrown * Wake up any possible resync thread that waits for the device 286d2eb35acSNeilBrown * to go idle. 287d2eb35acSNeilBrown */ 28837011e3aSNeilBrown allow_barrier(conf, r1_bio->sector); 289d2eb35acSNeilBrown } 290d2eb35acSNeilBrown 2919f2c9d12SNeilBrown static void raid_end_bio_io(struct r1bio *r1_bio) 2921da177e4SLinus Torvalds { 2931da177e4SLinus Torvalds struct bio *bio = r1_bio->master_bio; 2941da177e4SLinus Torvalds 2954b6d287fSNeilBrown /* if nobody has done the final endio yet, do it now */ 2964b6d287fSNeilBrown if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 29736a4e1feSNeilBrown pr_debug("raid1: sync end %s on sectors %llu-%llu\n", 2984b6d287fSNeilBrown (bio_data_dir(bio) == WRITE) ? "write" : "read", 2994f024f37SKent Overstreet (unsigned long long) bio->bi_iter.bi_sector, 3004f024f37SKent Overstreet (unsigned long long) bio_end_sector(bio) - 1); 3014b6d287fSNeilBrown 302d2eb35acSNeilBrown call_bio_endio(r1_bio); 3034b6d287fSNeilBrown } 3041da177e4SLinus Torvalds free_r1bio(r1_bio); 3051da177e4SLinus Torvalds } 3061da177e4SLinus Torvalds 3071da177e4SLinus Torvalds /* 3081da177e4SLinus Torvalds * Update disk head position estimator based on IRQ completion info. 3091da177e4SLinus Torvalds */ 3109f2c9d12SNeilBrown static inline void update_head_pos(int disk, struct r1bio *r1_bio) 3111da177e4SLinus Torvalds { 312e8096360SNeilBrown struct r1conf *conf = r1_bio->mddev->private; 3131da177e4SLinus Torvalds 3141da177e4SLinus Torvalds conf->mirrors[disk].head_position = 3151da177e4SLinus Torvalds r1_bio->sector + (r1_bio->sectors); 3161da177e4SLinus Torvalds } 3171da177e4SLinus Torvalds 318ba3ae3beSNamhyung Kim /* 319ba3ae3beSNamhyung Kim * Find the disk number which triggered given bio 320ba3ae3beSNamhyung Kim */ 3219f2c9d12SNeilBrown static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio) 322ba3ae3beSNamhyung Kim { 323ba3ae3beSNamhyung Kim int mirror; 32430194636SNeilBrown struct r1conf *conf = r1_bio->mddev->private; 32530194636SNeilBrown int raid_disks = conf->raid_disks; 326ba3ae3beSNamhyung Kim 3278f19ccb2SNeilBrown for (mirror = 0; mirror < raid_disks * 2; mirror++) 328ba3ae3beSNamhyung Kim if (r1_bio->bios[mirror] == bio) 329ba3ae3beSNamhyung Kim break; 330ba3ae3beSNamhyung Kim 3318f19ccb2SNeilBrown BUG_ON(mirror == raid_disks * 2); 332ba3ae3beSNamhyung Kim update_head_pos(mirror, r1_bio); 333ba3ae3beSNamhyung Kim 334ba3ae3beSNamhyung Kim return mirror; 335ba3ae3beSNamhyung Kim } 336ba3ae3beSNamhyung Kim 3374246a0b6SChristoph Hellwig static void raid1_end_read_request(struct bio *bio) 3381da177e4SLinus Torvalds { 3394e4cbee9SChristoph Hellwig int uptodate = !bio->bi_status; 3409f2c9d12SNeilBrown struct r1bio *r1_bio = bio->bi_private; 341e8096360SNeilBrown struct r1conf *conf = r1_bio->mddev->private; 342e5872d58SNeilBrown struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; 3431da177e4SLinus Torvalds 3441da177e4SLinus Torvalds /* 3451da177e4SLinus Torvalds * this branch is our 'one mirror IO has finished' event handler: 3461da177e4SLinus Torvalds */ 347e5872d58SNeilBrown update_head_pos(r1_bio->read_disk, r1_bio); 348ddaf22abSNeilBrown 349220946c9SNeilBrown if (uptodate) 3501da177e4SLinus Torvalds set_bit(R1BIO_Uptodate, &r1_bio->state); 3512e52d449SNeilBrown else if (test_bit(FailFast, &rdev->flags) && 3522e52d449SNeilBrown test_bit(R1BIO_FailFast, &r1_bio->state)) 3532e52d449SNeilBrown /* This was a fail-fast read so we definitely 3542e52d449SNeilBrown * want to retry */ 3552e52d449SNeilBrown ; 356dd00a99eSNeilBrown else { 357dd00a99eSNeilBrown /* If all other devices have failed, we want to return 358dd00a99eSNeilBrown * the error upwards rather than fail the last device. 359dd00a99eSNeilBrown * Here we redefine "uptodate" to mean "Don't want to retry" 360dd00a99eSNeilBrown */ 361dd00a99eSNeilBrown unsigned long flags; 362dd00a99eSNeilBrown spin_lock_irqsave(&conf->device_lock, flags); 363dd00a99eSNeilBrown if (r1_bio->mddev->degraded == conf->raid_disks || 364dd00a99eSNeilBrown (r1_bio->mddev->degraded == conf->raid_disks-1 && 365e5872d58SNeilBrown test_bit(In_sync, &rdev->flags))) 366dd00a99eSNeilBrown uptodate = 1; 367dd00a99eSNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 368dd00a99eSNeilBrown } 3691da177e4SLinus Torvalds 3707ad4d4a6SNeilBrown if (uptodate) { 3711da177e4SLinus Torvalds raid_end_bio_io(r1_bio); 372e5872d58SNeilBrown rdev_dec_pending(rdev, conf->mddev); 3737ad4d4a6SNeilBrown } else { 3741da177e4SLinus Torvalds /* 3751da177e4SLinus Torvalds * oops, read error: 3761da177e4SLinus Torvalds */ 3771da177e4SLinus Torvalds char b[BDEVNAME_SIZE]; 3781d41c216SNeilBrown pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n", 3799dd1e2faSNeilBrown mdname(conf->mddev), 3801d41c216SNeilBrown bdevname(rdev->bdev, b), 3818bda470eSChristian Dietrich (unsigned long long)r1_bio->sector); 382d2eb35acSNeilBrown set_bit(R1BIO_ReadError, &r1_bio->state); 3831da177e4SLinus Torvalds reschedule_retry(r1_bio); 3847ad4d4a6SNeilBrown /* don't drop the reference on read_disk yet */ 3851da177e4SLinus Torvalds } 3861da177e4SLinus Torvalds } 3871da177e4SLinus Torvalds 3889f2c9d12SNeilBrown static void close_write(struct r1bio *r1_bio) 3894e78064fSNeilBrown { 3904e78064fSNeilBrown /* it really is the end of this request */ 3914e78064fSNeilBrown if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 392841c1316SMing Lei bio_free_pages(r1_bio->behind_master_bio); 393841c1316SMing Lei bio_put(r1_bio->behind_master_bio); 394841c1316SMing Lei r1_bio->behind_master_bio = NULL; 3954e78064fSNeilBrown } 3964e78064fSNeilBrown /* clear the bitmap if all writes complete successfully */ 3974e78064fSNeilBrown bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, 3984e78064fSNeilBrown r1_bio->sectors, 3994e78064fSNeilBrown !test_bit(R1BIO_Degraded, &r1_bio->state), 400af6d7b76SNeilBrown test_bit(R1BIO_BehindIO, &r1_bio->state)); 4014e78064fSNeilBrown md_write_end(r1_bio->mddev); 402cd5ff9a1SNeilBrown } 403cd5ff9a1SNeilBrown 4049f2c9d12SNeilBrown static void r1_bio_write_done(struct r1bio *r1_bio) 405cd5ff9a1SNeilBrown { 406cd5ff9a1SNeilBrown if (!atomic_dec_and_test(&r1_bio->remaining)) 407cd5ff9a1SNeilBrown return; 408cd5ff9a1SNeilBrown 409cd5ff9a1SNeilBrown if (test_bit(R1BIO_WriteError, &r1_bio->state)) 410cd5ff9a1SNeilBrown reschedule_retry(r1_bio); 411cd5ff9a1SNeilBrown else { 412cd5ff9a1SNeilBrown close_write(r1_bio); 4134367af55SNeilBrown if (test_bit(R1BIO_MadeGood, &r1_bio->state)) 4144367af55SNeilBrown reschedule_retry(r1_bio); 4154367af55SNeilBrown else 4164e78064fSNeilBrown raid_end_bio_io(r1_bio); 4174e78064fSNeilBrown } 4184e78064fSNeilBrown } 4194e78064fSNeilBrown 4204246a0b6SChristoph Hellwig static void raid1_end_write_request(struct bio *bio) 4211da177e4SLinus Torvalds { 4229f2c9d12SNeilBrown struct r1bio *r1_bio = bio->bi_private; 423e5872d58SNeilBrown int behind = test_bit(R1BIO_BehindIO, &r1_bio->state); 424e8096360SNeilBrown struct r1conf *conf = r1_bio->mddev->private; 42504b857f7SNeilBrown struct bio *to_put = NULL; 426e5872d58SNeilBrown int mirror = find_bio_disk(r1_bio, bio); 427e5872d58SNeilBrown struct md_rdev *rdev = conf->mirrors[mirror].rdev; 428e3f948cdSShaohua Li bool discard_error; 429e3f948cdSShaohua Li 4304e4cbee9SChristoph Hellwig discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD; 4311da177e4SLinus Torvalds 4321da177e4SLinus Torvalds /* 433e9c7469bSTejun Heo * 'one mirror IO has finished' event handler: 4341da177e4SLinus Torvalds */ 4354e4cbee9SChristoph Hellwig if (bio->bi_status && !discard_error) { 436e5872d58SNeilBrown set_bit(WriteErrorSeen, &rdev->flags); 437e5872d58SNeilBrown if (!test_and_set_bit(WantReplacement, &rdev->flags)) 43819d67169SNeilBrown set_bit(MD_RECOVERY_NEEDED, & 43919d67169SNeilBrown conf->mddev->recovery); 44019d67169SNeilBrown 441212e7eb7SNeilBrown if (test_bit(FailFast, &rdev->flags) && 442212e7eb7SNeilBrown (bio->bi_opf & MD_FAILFAST) && 443212e7eb7SNeilBrown /* We never try FailFast to WriteMostly devices */ 444212e7eb7SNeilBrown !test_bit(WriteMostly, &rdev->flags)) { 445212e7eb7SNeilBrown md_error(r1_bio->mddev, rdev); 446212e7eb7SNeilBrown if (!test_bit(Faulty, &rdev->flags)) 447212e7eb7SNeilBrown /* This is the only remaining device, 448212e7eb7SNeilBrown * We need to retry the write without 449212e7eb7SNeilBrown * FailFast 450212e7eb7SNeilBrown */ 451212e7eb7SNeilBrown set_bit(R1BIO_WriteError, &r1_bio->state); 452212e7eb7SNeilBrown else { 453212e7eb7SNeilBrown /* Finished with this branch */ 454212e7eb7SNeilBrown r1_bio->bios[mirror] = NULL; 455212e7eb7SNeilBrown to_put = bio; 456212e7eb7SNeilBrown } 457212e7eb7SNeilBrown } else 458cd5ff9a1SNeilBrown set_bit(R1BIO_WriteError, &r1_bio->state); 4594367af55SNeilBrown } else { 4601da177e4SLinus Torvalds /* 461e9c7469bSTejun Heo * Set R1BIO_Uptodate in our master bio, so that we 462e9c7469bSTejun Heo * will return a good error code for to the higher 463e9c7469bSTejun Heo * levels even if IO on some other mirrored buffer 464e9c7469bSTejun Heo * fails. 4651da177e4SLinus Torvalds * 466e9c7469bSTejun Heo * The 'master' represents the composite IO operation 467e9c7469bSTejun Heo * to user-side. So if something waits for IO, then it 468e9c7469bSTejun Heo * will wait for the 'master' bio. 4691da177e4SLinus Torvalds */ 4704367af55SNeilBrown sector_t first_bad; 4714367af55SNeilBrown int bad_sectors; 4724367af55SNeilBrown 473cd5ff9a1SNeilBrown r1_bio->bios[mirror] = NULL; 474cd5ff9a1SNeilBrown to_put = bio; 4753056e3aeSAlex Lyakas /* 4763056e3aeSAlex Lyakas * Do not set R1BIO_Uptodate if the current device is 4773056e3aeSAlex Lyakas * rebuilding or Faulty. This is because we cannot use 4783056e3aeSAlex Lyakas * such device for properly reading the data back (we could 4793056e3aeSAlex Lyakas * potentially use it, if the current write would have felt 4803056e3aeSAlex Lyakas * before rdev->recovery_offset, but for simplicity we don't 4813056e3aeSAlex Lyakas * check this here. 4823056e3aeSAlex Lyakas */ 483e5872d58SNeilBrown if (test_bit(In_sync, &rdev->flags) && 484e5872d58SNeilBrown !test_bit(Faulty, &rdev->flags)) 4851da177e4SLinus Torvalds set_bit(R1BIO_Uptodate, &r1_bio->state); 4861da177e4SLinus Torvalds 4874367af55SNeilBrown /* Maybe we can clear some bad blocks. */ 488e5872d58SNeilBrown if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, 489e3f948cdSShaohua Li &first_bad, &bad_sectors) && !discard_error) { 4904367af55SNeilBrown r1_bio->bios[mirror] = IO_MADE_GOOD; 4914367af55SNeilBrown set_bit(R1BIO_MadeGood, &r1_bio->state); 4924367af55SNeilBrown } 4934367af55SNeilBrown } 4944367af55SNeilBrown 4954b6d287fSNeilBrown if (behind) { 496841c1316SMing Lei /* we release behind master bio when all write are done */ 497841c1316SMing Lei if (r1_bio->behind_master_bio == bio) 498841c1316SMing Lei to_put = NULL; 499841c1316SMing Lei 500e5872d58SNeilBrown if (test_bit(WriteMostly, &rdev->flags)) 5014b6d287fSNeilBrown atomic_dec(&r1_bio->behind_remaining); 5024b6d287fSNeilBrown 503e9c7469bSTejun Heo /* 504e9c7469bSTejun Heo * In behind mode, we ACK the master bio once the I/O 505e9c7469bSTejun Heo * has safely reached all non-writemostly 506e9c7469bSTejun Heo * disks. Setting the Returned bit ensures that this 507e9c7469bSTejun Heo * gets done only once -- we don't ever want to return 508e9c7469bSTejun Heo * -EIO here, instead we'll wait 509e9c7469bSTejun Heo */ 5104b6d287fSNeilBrown if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && 5114b6d287fSNeilBrown test_bit(R1BIO_Uptodate, &r1_bio->state)) { 5124b6d287fSNeilBrown /* Maybe we can return now */ 5134b6d287fSNeilBrown if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 5144b6d287fSNeilBrown struct bio *mbio = r1_bio->master_bio; 51536a4e1feSNeilBrown pr_debug("raid1: behind end write sectors" 51636a4e1feSNeilBrown " %llu-%llu\n", 5174f024f37SKent Overstreet (unsigned long long) mbio->bi_iter.bi_sector, 5184f024f37SKent Overstreet (unsigned long long) bio_end_sector(mbio) - 1); 519d2eb35acSNeilBrown call_bio_endio(r1_bio); 5204b6d287fSNeilBrown } 5214b6d287fSNeilBrown } 5224b6d287fSNeilBrown } 5234367af55SNeilBrown if (r1_bio->bios[mirror] == NULL) 524e5872d58SNeilBrown rdev_dec_pending(rdev, conf->mddev); 525e9c7469bSTejun Heo 5261da177e4SLinus Torvalds /* 5271da177e4SLinus Torvalds * Let's see if all mirrored write operations have finished 5281da177e4SLinus Torvalds * already. 5291da177e4SLinus Torvalds */ 530af6d7b76SNeilBrown r1_bio_write_done(r1_bio); 531c70810b3SNeilBrown 53204b857f7SNeilBrown if (to_put) 53304b857f7SNeilBrown bio_put(to_put); 5341da177e4SLinus Torvalds } 5351da177e4SLinus Torvalds 536fd76863eScolyli@suse.de static sector_t align_to_barrier_unit_end(sector_t start_sector, 537fd76863eScolyli@suse.de sector_t sectors) 538fd76863eScolyli@suse.de { 539fd76863eScolyli@suse.de sector_t len; 540fd76863eScolyli@suse.de 541fd76863eScolyli@suse.de WARN_ON(sectors == 0); 542fd76863eScolyli@suse.de /* 543fd76863eScolyli@suse.de * len is the number of sectors from start_sector to end of the 544fd76863eScolyli@suse.de * barrier unit which start_sector belongs to. 545fd76863eScolyli@suse.de */ 546fd76863eScolyli@suse.de len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) - 547fd76863eScolyli@suse.de start_sector; 548fd76863eScolyli@suse.de 549fd76863eScolyli@suse.de if (len > sectors) 550fd76863eScolyli@suse.de len = sectors; 551fd76863eScolyli@suse.de 552fd76863eScolyli@suse.de return len; 553fd76863eScolyli@suse.de } 554fd76863eScolyli@suse.de 5551da177e4SLinus Torvalds /* 5561da177e4SLinus Torvalds * This routine returns the disk from which the requested read should 5571da177e4SLinus Torvalds * be done. There is a per-array 'next expected sequential IO' sector 5581da177e4SLinus Torvalds * number - if this matches on the next IO then we use the last disk. 5591da177e4SLinus Torvalds * There is also a per-disk 'last know head position' sector that is 5601da177e4SLinus Torvalds * maintained from IRQ contexts, both the normal and the resync IO 5611da177e4SLinus Torvalds * completion handlers update this position correctly. If there is no 5621da177e4SLinus Torvalds * perfect sequential match then we pick the disk whose head is closest. 5631da177e4SLinus Torvalds * 5641da177e4SLinus Torvalds * If there are 2 mirrors in the same 2 devices, performance degrades 5651da177e4SLinus Torvalds * because position is mirror, not device based. 5661da177e4SLinus Torvalds * 5671da177e4SLinus Torvalds * The rdev for the device selected will have nr_pending incremented. 5681da177e4SLinus Torvalds */ 569e8096360SNeilBrown static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors) 5701da177e4SLinus Torvalds { 571af3a2cd6SNeilBrown const sector_t this_sector = r1_bio->sector; 572d2eb35acSNeilBrown int sectors; 573d2eb35acSNeilBrown int best_good_sectors; 5749dedf603SShaohua Li int best_disk, best_dist_disk, best_pending_disk; 5759dedf603SShaohua Li int has_nonrot_disk; 576be4d3280SShaohua Li int disk; 57776073054SNeilBrown sector_t best_dist; 5789dedf603SShaohua Li unsigned int min_pending; 5793cb03002SNeilBrown struct md_rdev *rdev; 580f3ac8bf7SNeilBrown int choose_first; 58112cee5a8SShaohua Li int choose_next_idle; 5821da177e4SLinus Torvalds 5831da177e4SLinus Torvalds rcu_read_lock(); 5841da177e4SLinus Torvalds /* 5858ddf9efeSNeilBrown * Check if we can balance. We can balance on the whole 5861da177e4SLinus Torvalds * device if no resync is going on, or below the resync window. 5871da177e4SLinus Torvalds * We take the first readable disk when above the resync window. 5881da177e4SLinus Torvalds */ 5891da177e4SLinus Torvalds retry: 590d2eb35acSNeilBrown sectors = r1_bio->sectors; 59176073054SNeilBrown best_disk = -1; 5929dedf603SShaohua Li best_dist_disk = -1; 59376073054SNeilBrown best_dist = MaxSector; 5949dedf603SShaohua Li best_pending_disk = -1; 5959dedf603SShaohua Li min_pending = UINT_MAX; 596d2eb35acSNeilBrown best_good_sectors = 0; 5979dedf603SShaohua Li has_nonrot_disk = 0; 59812cee5a8SShaohua Li choose_next_idle = 0; 5992e52d449SNeilBrown clear_bit(R1BIO_FailFast, &r1_bio->state); 600d2eb35acSNeilBrown 6017d49ffcfSGoldwyn Rodrigues if ((conf->mddev->recovery_cp < this_sector + sectors) || 6027d49ffcfSGoldwyn Rodrigues (mddev_is_clustered(conf->mddev) && 60390382ed9SGoldwyn Rodrigues md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, 6047d49ffcfSGoldwyn Rodrigues this_sector + sectors))) 6057d49ffcfSGoldwyn Rodrigues choose_first = 1; 6067d49ffcfSGoldwyn Rodrigues else 6077d49ffcfSGoldwyn Rodrigues choose_first = 0; 6081da177e4SLinus Torvalds 609be4d3280SShaohua Li for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { 61076073054SNeilBrown sector_t dist; 611d2eb35acSNeilBrown sector_t first_bad; 612d2eb35acSNeilBrown int bad_sectors; 6139dedf603SShaohua Li unsigned int pending; 61412cee5a8SShaohua Li bool nonrot; 615d2eb35acSNeilBrown 616f3ac8bf7SNeilBrown rdev = rcu_dereference(conf->mirrors[disk].rdev); 617f3ac8bf7SNeilBrown if (r1_bio->bios[disk] == IO_BLOCKED 618f3ac8bf7SNeilBrown || rdev == NULL 61976073054SNeilBrown || test_bit(Faulty, &rdev->flags)) 620f3ac8bf7SNeilBrown continue; 62176073054SNeilBrown if (!test_bit(In_sync, &rdev->flags) && 62276073054SNeilBrown rdev->recovery_offset < this_sector + sectors) 62376073054SNeilBrown continue; 62476073054SNeilBrown if (test_bit(WriteMostly, &rdev->flags)) { 62576073054SNeilBrown /* Don't balance among write-mostly, just 62676073054SNeilBrown * use the first as a last resort */ 627d1901ef0STomáš Hodek if (best_dist_disk < 0) { 628307729c8SNeilBrown if (is_badblock(rdev, this_sector, sectors, 629307729c8SNeilBrown &first_bad, &bad_sectors)) { 630816b0acfSWei Fang if (first_bad <= this_sector) 631307729c8SNeilBrown /* Cannot use this */ 632307729c8SNeilBrown continue; 633307729c8SNeilBrown best_good_sectors = first_bad - this_sector; 634307729c8SNeilBrown } else 635307729c8SNeilBrown best_good_sectors = sectors; 636d1901ef0STomáš Hodek best_dist_disk = disk; 637d1901ef0STomáš Hodek best_pending_disk = disk; 638307729c8SNeilBrown } 63976073054SNeilBrown continue; 6408ddf9efeSNeilBrown } 64176073054SNeilBrown /* This is a reasonable device to use. It might 64276073054SNeilBrown * even be best. 6431da177e4SLinus Torvalds */ 644d2eb35acSNeilBrown if (is_badblock(rdev, this_sector, sectors, 645d2eb35acSNeilBrown &first_bad, &bad_sectors)) { 646d2eb35acSNeilBrown if (best_dist < MaxSector) 647d2eb35acSNeilBrown /* already have a better device */ 648d2eb35acSNeilBrown continue; 649d2eb35acSNeilBrown if (first_bad <= this_sector) { 650d2eb35acSNeilBrown /* cannot read here. If this is the 'primary' 651d2eb35acSNeilBrown * device, then we must not read beyond 652d2eb35acSNeilBrown * bad_sectors from another device.. 653d2eb35acSNeilBrown */ 654d2eb35acSNeilBrown bad_sectors -= (this_sector - first_bad); 655d2eb35acSNeilBrown if (choose_first && sectors > bad_sectors) 656d2eb35acSNeilBrown sectors = bad_sectors; 657d2eb35acSNeilBrown if (best_good_sectors > sectors) 658d2eb35acSNeilBrown best_good_sectors = sectors; 659d2eb35acSNeilBrown 660d2eb35acSNeilBrown } else { 661d2eb35acSNeilBrown sector_t good_sectors = first_bad - this_sector; 662d2eb35acSNeilBrown if (good_sectors > best_good_sectors) { 663d2eb35acSNeilBrown best_good_sectors = good_sectors; 664d2eb35acSNeilBrown best_disk = disk; 665d2eb35acSNeilBrown } 666d2eb35acSNeilBrown if (choose_first) 667d2eb35acSNeilBrown break; 668d2eb35acSNeilBrown } 669d2eb35acSNeilBrown continue; 670d82dd0e3STomasz Majchrzak } else { 671d82dd0e3STomasz Majchrzak if ((sectors > best_good_sectors) && (best_disk >= 0)) 672d82dd0e3STomasz Majchrzak best_disk = -1; 673d2eb35acSNeilBrown best_good_sectors = sectors; 674d82dd0e3STomasz Majchrzak } 675d2eb35acSNeilBrown 6762e52d449SNeilBrown if (best_disk >= 0) 6772e52d449SNeilBrown /* At least two disks to choose from so failfast is OK */ 6782e52d449SNeilBrown set_bit(R1BIO_FailFast, &r1_bio->state); 6792e52d449SNeilBrown 68012cee5a8SShaohua Li nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); 68112cee5a8SShaohua Li has_nonrot_disk |= nonrot; 6829dedf603SShaohua Li pending = atomic_read(&rdev->nr_pending); 68376073054SNeilBrown dist = abs(this_sector - conf->mirrors[disk].head_position); 68412cee5a8SShaohua Li if (choose_first) { 68576073054SNeilBrown best_disk = disk; 6861da177e4SLinus Torvalds break; 6871da177e4SLinus Torvalds } 68812cee5a8SShaohua Li /* Don't change to another disk for sequential reads */ 68912cee5a8SShaohua Li if (conf->mirrors[disk].next_seq_sect == this_sector 69012cee5a8SShaohua Li || dist == 0) { 69112cee5a8SShaohua Li int opt_iosize = bdev_io_opt(rdev->bdev) >> 9; 69212cee5a8SShaohua Li struct raid1_info *mirror = &conf->mirrors[disk]; 69312cee5a8SShaohua Li 69412cee5a8SShaohua Li best_disk = disk; 69512cee5a8SShaohua Li /* 69612cee5a8SShaohua Li * If buffered sequential IO size exceeds optimal 69712cee5a8SShaohua Li * iosize, check if there is idle disk. If yes, choose 69812cee5a8SShaohua Li * the idle disk. read_balance could already choose an 69912cee5a8SShaohua Li * idle disk before noticing it's a sequential IO in 70012cee5a8SShaohua Li * this disk. This doesn't matter because this disk 70112cee5a8SShaohua Li * will idle, next time it will be utilized after the 70212cee5a8SShaohua Li * first disk has IO size exceeds optimal iosize. In 70312cee5a8SShaohua Li * this way, iosize of the first disk will be optimal 70412cee5a8SShaohua Li * iosize at least. iosize of the second disk might be 70512cee5a8SShaohua Li * small, but not a big deal since when the second disk 70612cee5a8SShaohua Li * starts IO, the first disk is likely still busy. 70712cee5a8SShaohua Li */ 70812cee5a8SShaohua Li if (nonrot && opt_iosize > 0 && 70912cee5a8SShaohua Li mirror->seq_start != MaxSector && 71012cee5a8SShaohua Li mirror->next_seq_sect > opt_iosize && 71112cee5a8SShaohua Li mirror->next_seq_sect - opt_iosize >= 71212cee5a8SShaohua Li mirror->seq_start) { 71312cee5a8SShaohua Li choose_next_idle = 1; 71412cee5a8SShaohua Li continue; 71512cee5a8SShaohua Li } 71612cee5a8SShaohua Li break; 71712cee5a8SShaohua Li } 71812cee5a8SShaohua Li 71912cee5a8SShaohua Li if (choose_next_idle) 72012cee5a8SShaohua Li continue; 7219dedf603SShaohua Li 7229dedf603SShaohua Li if (min_pending > pending) { 7239dedf603SShaohua Li min_pending = pending; 7249dedf603SShaohua Li best_pending_disk = disk; 7259dedf603SShaohua Li } 7269dedf603SShaohua Li 72776073054SNeilBrown if (dist < best_dist) { 72876073054SNeilBrown best_dist = dist; 7299dedf603SShaohua Li best_dist_disk = disk; 7301da177e4SLinus Torvalds } 731f3ac8bf7SNeilBrown } 7321da177e4SLinus Torvalds 7339dedf603SShaohua Li /* 7349dedf603SShaohua Li * If all disks are rotational, choose the closest disk. If any disk is 7359dedf603SShaohua Li * non-rotational, choose the disk with less pending request even the 7369dedf603SShaohua Li * disk is rotational, which might/might not be optimal for raids with 7379dedf603SShaohua Li * mixed ratation/non-rotational disks depending on workload. 7389dedf603SShaohua Li */ 7399dedf603SShaohua Li if (best_disk == -1) { 7402e52d449SNeilBrown if (has_nonrot_disk || min_pending == 0) 7419dedf603SShaohua Li best_disk = best_pending_disk; 7429dedf603SShaohua Li else 7439dedf603SShaohua Li best_disk = best_dist_disk; 7449dedf603SShaohua Li } 7459dedf603SShaohua Li 74676073054SNeilBrown if (best_disk >= 0) { 74776073054SNeilBrown rdev = rcu_dereference(conf->mirrors[best_disk].rdev); 7488ddf9efeSNeilBrown if (!rdev) 7498ddf9efeSNeilBrown goto retry; 7508ddf9efeSNeilBrown atomic_inc(&rdev->nr_pending); 751d2eb35acSNeilBrown sectors = best_good_sectors; 75212cee5a8SShaohua Li 75312cee5a8SShaohua Li if (conf->mirrors[best_disk].next_seq_sect != this_sector) 75412cee5a8SShaohua Li conf->mirrors[best_disk].seq_start = this_sector; 75512cee5a8SShaohua Li 756be4d3280SShaohua Li conf->mirrors[best_disk].next_seq_sect = this_sector + sectors; 7571da177e4SLinus Torvalds } 7581da177e4SLinus Torvalds rcu_read_unlock(); 759d2eb35acSNeilBrown *max_sectors = sectors; 7601da177e4SLinus Torvalds 76176073054SNeilBrown return best_disk; 7621da177e4SLinus Torvalds } 7631da177e4SLinus Torvalds 7645c675f83SNeilBrown static int raid1_congested(struct mddev *mddev, int bits) 7650d129228SNeilBrown { 766e8096360SNeilBrown struct r1conf *conf = mddev->private; 7670d129228SNeilBrown int i, ret = 0; 7680d129228SNeilBrown 7694452226eSTejun Heo if ((bits & (1 << WB_async_congested)) && 77034db0cd6SNeilBrown conf->pending_count >= max_queued_requests) 77134db0cd6SNeilBrown return 1; 77234db0cd6SNeilBrown 7730d129228SNeilBrown rcu_read_lock(); 774f53e29fcSNeilBrown for (i = 0; i < conf->raid_disks * 2; i++) { 7753cb03002SNeilBrown struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 7760d129228SNeilBrown if (rdev && !test_bit(Faulty, &rdev->flags)) { 777165125e1SJens Axboe struct request_queue *q = bdev_get_queue(rdev->bdev); 7780d129228SNeilBrown 7791ed7242eSJonathan Brassow BUG_ON(!q); 7801ed7242eSJonathan Brassow 7810d129228SNeilBrown /* Note the '|| 1' - when read_balance prefers 7820d129228SNeilBrown * non-congested targets, it can be removed 7830d129228SNeilBrown */ 7844452226eSTejun Heo if ((bits & (1 << WB_async_congested)) || 1) 785dc3b17ccSJan Kara ret |= bdi_congested(q->backing_dev_info, bits); 7860d129228SNeilBrown else 787dc3b17ccSJan Kara ret &= bdi_congested(q->backing_dev_info, bits); 7880d129228SNeilBrown } 7890d129228SNeilBrown } 7900d129228SNeilBrown rcu_read_unlock(); 7910d129228SNeilBrown return ret; 7920d129228SNeilBrown } 7930d129228SNeilBrown 794673ca68dSNeilBrown static void flush_bio_list(struct r1conf *conf, struct bio *bio) 795a35e63efSNeilBrown { 796673ca68dSNeilBrown /* flush any pending bitmap writes to disk before proceeding w/ I/O */ 797a35e63efSNeilBrown bitmap_unplug(conf->mddev->bitmap); 79834db0cd6SNeilBrown wake_up(&conf->wait_barrier); 799a35e63efSNeilBrown 800a35e63efSNeilBrown while (bio) { /* submit pending writes */ 801a35e63efSNeilBrown struct bio *next = bio->bi_next; 8025e2c7a36SNeilBrown struct md_rdev *rdev = (void*)bio->bi_bdev; 803a35e63efSNeilBrown bio->bi_next = NULL; 8045e2c7a36SNeilBrown bio->bi_bdev = rdev->bdev; 8055e2c7a36SNeilBrown if (test_bit(Faulty, &rdev->flags)) { 8064e4cbee9SChristoph Hellwig bio->bi_status = BLK_STS_IOERR; 8075e2c7a36SNeilBrown bio_endio(bio); 8085e2c7a36SNeilBrown } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 8092ff8cc2cSShaohua Li !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 8102ff8cc2cSShaohua Li /* Just ignore it */ 8114246a0b6SChristoph Hellwig bio_endio(bio); 8122ff8cc2cSShaohua Li else 813a35e63efSNeilBrown generic_make_request(bio); 814a35e63efSNeilBrown bio = next; 815a35e63efSNeilBrown } 816673ca68dSNeilBrown } 817673ca68dSNeilBrown 818673ca68dSNeilBrown static void flush_pending_writes(struct r1conf *conf) 819673ca68dSNeilBrown { 820673ca68dSNeilBrown /* Any writes that have been queued but are awaiting 821673ca68dSNeilBrown * bitmap updates get flushed here. 822673ca68dSNeilBrown */ 823673ca68dSNeilBrown spin_lock_irq(&conf->device_lock); 824673ca68dSNeilBrown 825673ca68dSNeilBrown if (conf->pending_bio_list.head) { 826673ca68dSNeilBrown struct bio *bio; 827673ca68dSNeilBrown bio = bio_list_get(&conf->pending_bio_list); 828673ca68dSNeilBrown conf->pending_count = 0; 829673ca68dSNeilBrown spin_unlock_irq(&conf->device_lock); 830673ca68dSNeilBrown flush_bio_list(conf, bio); 831a35e63efSNeilBrown } else 832a35e63efSNeilBrown spin_unlock_irq(&conf->device_lock); 8337eaceaccSJens Axboe } 8347eaceaccSJens Axboe 83517999be4SNeilBrown /* Barriers.... 83617999be4SNeilBrown * Sometimes we need to suspend IO while we do something else, 83717999be4SNeilBrown * either some resync/recovery, or reconfigure the array. 83817999be4SNeilBrown * To do this we raise a 'barrier'. 83917999be4SNeilBrown * The 'barrier' is a counter that can be raised multiple times 84017999be4SNeilBrown * to count how many activities are happening which preclude 84117999be4SNeilBrown * normal IO. 84217999be4SNeilBrown * We can only raise the barrier if there is no pending IO. 84317999be4SNeilBrown * i.e. if nr_pending == 0. 84417999be4SNeilBrown * We choose only to raise the barrier if no-one is waiting for the 84517999be4SNeilBrown * barrier to go down. This means that as soon as an IO request 84617999be4SNeilBrown * is ready, no other operations which require a barrier will start 84717999be4SNeilBrown * until the IO request has had a chance. 84817999be4SNeilBrown * 84917999be4SNeilBrown * So: regular IO calls 'wait_barrier'. When that returns there 85017999be4SNeilBrown * is no backgroup IO happening, It must arrange to call 85117999be4SNeilBrown * allow_barrier when it has finished its IO. 85217999be4SNeilBrown * backgroup IO calls must call raise_barrier. Once that returns 85317999be4SNeilBrown * there is no normal IO happeing. It must arrange to call 85417999be4SNeilBrown * lower_barrier when the particular background IO completes. 8551da177e4SLinus Torvalds */ 856c2fd4c94SNeilBrown static void raise_barrier(struct r1conf *conf, sector_t sector_nr) 8571da177e4SLinus Torvalds { 858fd76863eScolyli@suse.de int idx = sector_to_idx(sector_nr); 859fd76863eScolyli@suse.de 8601da177e4SLinus Torvalds spin_lock_irq(&conf->resync_lock); 8611da177e4SLinus Torvalds 86217999be4SNeilBrown /* Wait until no block IO is waiting */ 863824e47daScolyli@suse.de wait_event_lock_irq(conf->wait_barrier, 864824e47daScolyli@suse.de !atomic_read(&conf->nr_waiting[idx]), 865eed8c02eSLukas Czerner conf->resync_lock); 86617999be4SNeilBrown 86717999be4SNeilBrown /* block any new IO from starting */ 868824e47daScolyli@suse.de atomic_inc(&conf->barrier[idx]); 869824e47daScolyli@suse.de /* 870824e47daScolyli@suse.de * In raise_barrier() we firstly increase conf->barrier[idx] then 871824e47daScolyli@suse.de * check conf->nr_pending[idx]. In _wait_barrier() we firstly 872824e47daScolyli@suse.de * increase conf->nr_pending[idx] then check conf->barrier[idx]. 873824e47daScolyli@suse.de * A memory barrier here to make sure conf->nr_pending[idx] won't 874824e47daScolyli@suse.de * be fetched before conf->barrier[idx] is increased. Otherwise 875824e47daScolyli@suse.de * there will be a race between raise_barrier() and _wait_barrier(). 876824e47daScolyli@suse.de */ 877824e47daScolyli@suse.de smp_mb__after_atomic(); 87817999be4SNeilBrown 87979ef3a8aSmajianpeng /* For these conditions we must wait: 88079ef3a8aSmajianpeng * A: while the array is in frozen state 881fd76863eScolyli@suse.de * B: while conf->nr_pending[idx] is not 0, meaning regular I/O 882fd76863eScolyli@suse.de * existing in corresponding I/O barrier bucket. 883fd76863eScolyli@suse.de * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches 884fd76863eScolyli@suse.de * max resync count which allowed on current I/O barrier bucket. 88579ef3a8aSmajianpeng */ 88617999be4SNeilBrown wait_event_lock_irq(conf->wait_barrier, 887b364e3d0Smajianpeng !conf->array_frozen && 888824e47daScolyli@suse.de !atomic_read(&conf->nr_pending[idx]) && 889824e47daScolyli@suse.de atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH, 890eed8c02eSLukas Czerner conf->resync_lock); 89117999be4SNeilBrown 89243ac9b84SXiao Ni atomic_inc(&conf->nr_sync_pending); 8931da177e4SLinus Torvalds spin_unlock_irq(&conf->resync_lock); 8941da177e4SLinus Torvalds } 8951da177e4SLinus Torvalds 896fd76863eScolyli@suse.de static void lower_barrier(struct r1conf *conf, sector_t sector_nr) 89717999be4SNeilBrown { 898fd76863eScolyli@suse.de int idx = sector_to_idx(sector_nr); 899fd76863eScolyli@suse.de 900824e47daScolyli@suse.de BUG_ON(atomic_read(&conf->barrier[idx]) <= 0); 901fd76863eScolyli@suse.de 902824e47daScolyli@suse.de atomic_dec(&conf->barrier[idx]); 90343ac9b84SXiao Ni atomic_dec(&conf->nr_sync_pending); 90417999be4SNeilBrown wake_up(&conf->wait_barrier); 90517999be4SNeilBrown } 90617999be4SNeilBrown 907fd76863eScolyli@suse.de static void _wait_barrier(struct r1conf *conf, int idx) 90817999be4SNeilBrown { 909824e47daScolyli@suse.de /* 910824e47daScolyli@suse.de * We need to increase conf->nr_pending[idx] very early here, 911824e47daScolyli@suse.de * then raise_barrier() can be blocked when it waits for 912824e47daScolyli@suse.de * conf->nr_pending[idx] to be 0. Then we can avoid holding 913824e47daScolyli@suse.de * conf->resync_lock when there is no barrier raised in same 914824e47daScolyli@suse.de * barrier unit bucket. Also if the array is frozen, I/O 915824e47daScolyli@suse.de * should be blocked until array is unfrozen. 916824e47daScolyli@suse.de */ 917824e47daScolyli@suse.de atomic_inc(&conf->nr_pending[idx]); 918824e47daScolyli@suse.de /* 919824e47daScolyli@suse.de * In _wait_barrier() we firstly increase conf->nr_pending[idx], then 920824e47daScolyli@suse.de * check conf->barrier[idx]. In raise_barrier() we firstly increase 921824e47daScolyli@suse.de * conf->barrier[idx], then check conf->nr_pending[idx]. A memory 922824e47daScolyli@suse.de * barrier is necessary here to make sure conf->barrier[idx] won't be 923824e47daScolyli@suse.de * fetched before conf->nr_pending[idx] is increased. Otherwise there 924824e47daScolyli@suse.de * will be a race between _wait_barrier() and raise_barrier(). 925824e47daScolyli@suse.de */ 926824e47daScolyli@suse.de smp_mb__after_atomic(); 92779ef3a8aSmajianpeng 928824e47daScolyli@suse.de /* 929824e47daScolyli@suse.de * Don't worry about checking two atomic_t variables at same time 930824e47daScolyli@suse.de * here. If during we check conf->barrier[idx], the array is 931824e47daScolyli@suse.de * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is 932824e47daScolyli@suse.de * 0, it is safe to return and make the I/O continue. Because the 933824e47daScolyli@suse.de * array is frozen, all I/O returned here will eventually complete 934824e47daScolyli@suse.de * or be queued, no race will happen. See code comment in 935824e47daScolyli@suse.de * frozen_array(). 936824e47daScolyli@suse.de */ 937824e47daScolyli@suse.de if (!READ_ONCE(conf->array_frozen) && 938824e47daScolyli@suse.de !atomic_read(&conf->barrier[idx])) 939824e47daScolyli@suse.de return; 940824e47daScolyli@suse.de 941824e47daScolyli@suse.de /* 942824e47daScolyli@suse.de * After holding conf->resync_lock, conf->nr_pending[idx] 943824e47daScolyli@suse.de * should be decreased before waiting for barrier to drop. 944824e47daScolyli@suse.de * Otherwise, we may encounter a race condition because 945824e47daScolyli@suse.de * raise_barrer() might be waiting for conf->nr_pending[idx] 946824e47daScolyli@suse.de * to be 0 at same time. 947824e47daScolyli@suse.de */ 948824e47daScolyli@suse.de spin_lock_irq(&conf->resync_lock); 949824e47daScolyli@suse.de atomic_inc(&conf->nr_waiting[idx]); 950824e47daScolyli@suse.de atomic_dec(&conf->nr_pending[idx]); 951824e47daScolyli@suse.de /* 952824e47daScolyli@suse.de * In case freeze_array() is waiting for 953824e47daScolyli@suse.de * get_unqueued_pending() == extra 954824e47daScolyli@suse.de */ 955824e47daScolyli@suse.de wake_up(&conf->wait_barrier); 956824e47daScolyli@suse.de /* Wait for the barrier in same barrier unit bucket to drop. */ 957824e47daScolyli@suse.de wait_event_lock_irq(conf->wait_barrier, 958824e47daScolyli@suse.de !conf->array_frozen && 959824e47daScolyli@suse.de !atomic_read(&conf->barrier[idx]), 960824e47daScolyli@suse.de conf->resync_lock); 961824e47daScolyli@suse.de atomic_inc(&conf->nr_pending[idx]); 962824e47daScolyli@suse.de atomic_dec(&conf->nr_waiting[idx]); 963fd76863eScolyli@suse.de spin_unlock_irq(&conf->resync_lock); 96479ef3a8aSmajianpeng } 96579ef3a8aSmajianpeng 966fd76863eScolyli@suse.de static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr) 96779ef3a8aSmajianpeng { 968fd76863eScolyli@suse.de int idx = sector_to_idx(sector_nr); 96979ef3a8aSmajianpeng 970824e47daScolyli@suse.de /* 971824e47daScolyli@suse.de * Very similar to _wait_barrier(). The difference is, for read 972824e47daScolyli@suse.de * I/O we don't need wait for sync I/O, but if the whole array 973824e47daScolyli@suse.de * is frozen, the read I/O still has to wait until the array is 974824e47daScolyli@suse.de * unfrozen. Since there is no ordering requirement with 975824e47daScolyli@suse.de * conf->barrier[idx] here, memory barrier is unnecessary as well. 976824e47daScolyli@suse.de */ 977824e47daScolyli@suse.de atomic_inc(&conf->nr_pending[idx]); 978824e47daScolyli@suse.de 979824e47daScolyli@suse.de if (!READ_ONCE(conf->array_frozen)) 980824e47daScolyli@suse.de return; 98117999be4SNeilBrown 98217999be4SNeilBrown spin_lock_irq(&conf->resync_lock); 983824e47daScolyli@suse.de atomic_inc(&conf->nr_waiting[idx]); 984824e47daScolyli@suse.de atomic_dec(&conf->nr_pending[idx]); 985824e47daScolyli@suse.de /* 986824e47daScolyli@suse.de * In case freeze_array() is waiting for 987824e47daScolyli@suse.de * get_unqueued_pending() == extra 988d6b42dcbSNeilBrown */ 98917999be4SNeilBrown wake_up(&conf->wait_barrier); 990824e47daScolyli@suse.de /* Wait for array to be unfrozen */ 991824e47daScolyli@suse.de wait_event_lock_irq(conf->wait_barrier, 992fd76863eScolyli@suse.de !conf->array_frozen, 9931da177e4SLinus Torvalds conf->resync_lock); 994824e47daScolyli@suse.de atomic_inc(&conf->nr_pending[idx]); 995824e47daScolyli@suse.de atomic_dec(&conf->nr_waiting[idx]); 99617999be4SNeilBrown spin_unlock_irq(&conf->resync_lock); 99717999be4SNeilBrown } 99817999be4SNeilBrown 999fd76863eScolyli@suse.de static void wait_barrier(struct r1conf *conf, sector_t sector_nr) 1000fd76863eScolyli@suse.de { 1001fd76863eScolyli@suse.de int idx = sector_to_idx(sector_nr); 1002fd76863eScolyli@suse.de 1003fd76863eScolyli@suse.de _wait_barrier(conf, idx); 1004fd76863eScolyli@suse.de } 1005fd76863eScolyli@suse.de 1006fd76863eScolyli@suse.de static void wait_all_barriers(struct r1conf *conf) 1007fd76863eScolyli@suse.de { 1008fd76863eScolyli@suse.de int idx; 1009fd76863eScolyli@suse.de 1010fd76863eScolyli@suse.de for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) 1011fd76863eScolyli@suse.de _wait_barrier(conf, idx); 1012fd76863eScolyli@suse.de } 1013fd76863eScolyli@suse.de 1014fd76863eScolyli@suse.de static void _allow_barrier(struct r1conf *conf, int idx) 101517999be4SNeilBrown { 1016824e47daScolyli@suse.de atomic_dec(&conf->nr_pending[idx]); 101717999be4SNeilBrown wake_up(&conf->wait_barrier); 101817999be4SNeilBrown } 101917999be4SNeilBrown 1020fd76863eScolyli@suse.de static void allow_barrier(struct r1conf *conf, sector_t sector_nr) 1021fd76863eScolyli@suse.de { 1022fd76863eScolyli@suse.de int idx = sector_to_idx(sector_nr); 1023fd76863eScolyli@suse.de 1024fd76863eScolyli@suse.de _allow_barrier(conf, idx); 1025fd76863eScolyli@suse.de } 1026fd76863eScolyli@suse.de 1027fd76863eScolyli@suse.de static void allow_all_barriers(struct r1conf *conf) 1028fd76863eScolyli@suse.de { 1029fd76863eScolyli@suse.de int idx; 1030fd76863eScolyli@suse.de 1031fd76863eScolyli@suse.de for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) 1032fd76863eScolyli@suse.de _allow_barrier(conf, idx); 1033fd76863eScolyli@suse.de } 1034fd76863eScolyli@suse.de 1035fd76863eScolyli@suse.de /* conf->resync_lock should be held */ 1036fd76863eScolyli@suse.de static int get_unqueued_pending(struct r1conf *conf) 1037fd76863eScolyli@suse.de { 1038fd76863eScolyli@suse.de int idx, ret; 1039fd76863eScolyli@suse.de 104043ac9b84SXiao Ni ret = atomic_read(&conf->nr_sync_pending); 104143ac9b84SXiao Ni for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) 1042824e47daScolyli@suse.de ret += atomic_read(&conf->nr_pending[idx]) - 1043824e47daScolyli@suse.de atomic_read(&conf->nr_queued[idx]); 1044fd76863eScolyli@suse.de 1045fd76863eScolyli@suse.de return ret; 104617999be4SNeilBrown } 104717999be4SNeilBrown 1048e2d59925SNeilBrown static void freeze_array(struct r1conf *conf, int extra) 1049ddaf22abSNeilBrown { 1050fd76863eScolyli@suse.de /* Stop sync I/O and normal I/O and wait for everything to 105111353b9dSZhilong Liu * go quiet. 1052fd76863eScolyli@suse.de * This is called in two situations: 1053fd76863eScolyli@suse.de * 1) management command handlers (reshape, remove disk, quiesce). 1054fd76863eScolyli@suse.de * 2) one normal I/O request failed. 1055fd76863eScolyli@suse.de 1056fd76863eScolyli@suse.de * After array_frozen is set to 1, new sync IO will be blocked at 1057fd76863eScolyli@suse.de * raise_barrier(), and new normal I/O will blocked at _wait_barrier() 1058fd76863eScolyli@suse.de * or wait_read_barrier(). The flying I/Os will either complete or be 1059fd76863eScolyli@suse.de * queued. When everything goes quite, there are only queued I/Os left. 1060fd76863eScolyli@suse.de 1061fd76863eScolyli@suse.de * Every flying I/O contributes to a conf->nr_pending[idx], idx is the 1062fd76863eScolyli@suse.de * barrier bucket index which this I/O request hits. When all sync and 1063fd76863eScolyli@suse.de * normal I/O are queued, sum of all conf->nr_pending[] will match sum 1064fd76863eScolyli@suse.de * of all conf->nr_queued[]. But normal I/O failure is an exception, 1065fd76863eScolyli@suse.de * in handle_read_error(), we may call freeze_array() before trying to 1066fd76863eScolyli@suse.de * fix the read error. In this case, the error read I/O is not queued, 1067fd76863eScolyli@suse.de * so get_unqueued_pending() == 1. 1068fd76863eScolyli@suse.de * 1069fd76863eScolyli@suse.de * Therefore before this function returns, we need to wait until 1070fd76863eScolyli@suse.de * get_unqueued_pendings(conf) gets equal to extra. For 1071fd76863eScolyli@suse.de * normal I/O context, extra is 1, in rested situations extra is 0. 1072ddaf22abSNeilBrown */ 1073ddaf22abSNeilBrown spin_lock_irq(&conf->resync_lock); 1074b364e3d0Smajianpeng conf->array_frozen = 1; 1075578b54adSNeilBrown raid1_log(conf->mddev, "wait freeze"); 1076fd76863eScolyli@suse.de wait_event_lock_irq_cmd( 1077fd76863eScolyli@suse.de conf->wait_barrier, 1078fd76863eScolyli@suse.de get_unqueued_pending(conf) == extra, 1079ddaf22abSNeilBrown conf->resync_lock, 1080c3b328acSNeilBrown flush_pending_writes(conf)); 1081ddaf22abSNeilBrown spin_unlock_irq(&conf->resync_lock); 1082ddaf22abSNeilBrown } 1083e8096360SNeilBrown static void unfreeze_array(struct r1conf *conf) 1084ddaf22abSNeilBrown { 1085ddaf22abSNeilBrown /* reverse the effect of the freeze */ 1086ddaf22abSNeilBrown spin_lock_irq(&conf->resync_lock); 1087b364e3d0Smajianpeng conf->array_frozen = 0; 1088ddaf22abSNeilBrown spin_unlock_irq(&conf->resync_lock); 1089824e47daScolyli@suse.de wake_up(&conf->wait_barrier); 1090ddaf22abSNeilBrown } 1091ddaf22abSNeilBrown 1092841c1316SMing Lei static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio, 1093cb83efcfSNeilBrown struct bio *bio) 10944b6d287fSNeilBrown { 1095cb83efcfSNeilBrown int size = bio->bi_iter.bi_size; 1096841c1316SMing Lei unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1097841c1316SMing Lei int i = 0; 1098841c1316SMing Lei struct bio *behind_bio = NULL; 10994b6d287fSNeilBrown 1100841c1316SMing Lei behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev); 1101841c1316SMing Lei if (!behind_bio) 1102841c1316SMing Lei goto fail; 1103841c1316SMing Lei 110441743c1fSShaohua Li /* discard op, we don't support writezero/writesame yet */ 110541743c1fSShaohua Li if (!bio_has_data(bio)) 110641743c1fSShaohua Li goto skip_copy; 110741743c1fSShaohua Li 1108841c1316SMing Lei while (i < vcnt && size) { 1109841c1316SMing Lei struct page *page; 1110841c1316SMing Lei int len = min_t(int, PAGE_SIZE, size); 1111841c1316SMing Lei 1112841c1316SMing Lei page = alloc_page(GFP_NOIO); 1113841c1316SMing Lei if (unlikely(!page)) 1114841c1316SMing Lei goto free_pages; 1115841c1316SMing Lei 1116841c1316SMing Lei bio_add_page(behind_bio, page, len, 0); 1117841c1316SMing Lei 1118841c1316SMing Lei size -= len; 1119841c1316SMing Lei i++; 11204b6d287fSNeilBrown } 11214b6d287fSNeilBrown 1122cb83efcfSNeilBrown bio_copy_data(behind_bio, bio); 112341743c1fSShaohua Li skip_copy: 1124841c1316SMing Lei r1_bio->behind_master_bio = behind_bio;; 1125841c1316SMing Lei set_bit(R1BIO_BehindIO, &r1_bio->state); 1126841c1316SMing Lei 1127841c1316SMing Lei return behind_bio; 1128841c1316SMing Lei 1129841c1316SMing Lei free_pages: 11304f024f37SKent Overstreet pr_debug("%dB behind alloc failed, doing sync I/O\n", 11314f024f37SKent Overstreet bio->bi_iter.bi_size); 1132841c1316SMing Lei bio_free_pages(behind_bio); 1133841c1316SMing Lei fail: 1134841c1316SMing Lei return behind_bio; 11354b6d287fSNeilBrown } 11364b6d287fSNeilBrown 1137f54a9d0eSNeilBrown struct raid1_plug_cb { 1138f54a9d0eSNeilBrown struct blk_plug_cb cb; 1139f54a9d0eSNeilBrown struct bio_list pending; 1140f54a9d0eSNeilBrown int pending_cnt; 1141f54a9d0eSNeilBrown }; 1142f54a9d0eSNeilBrown 1143f54a9d0eSNeilBrown static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) 1144f54a9d0eSNeilBrown { 1145f54a9d0eSNeilBrown struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, 1146f54a9d0eSNeilBrown cb); 1147f54a9d0eSNeilBrown struct mddev *mddev = plug->cb.data; 1148f54a9d0eSNeilBrown struct r1conf *conf = mddev->private; 1149f54a9d0eSNeilBrown struct bio *bio; 1150f54a9d0eSNeilBrown 1151874807a8SNeilBrown if (from_schedule || current->bio_list) { 1152f54a9d0eSNeilBrown spin_lock_irq(&conf->device_lock); 1153f54a9d0eSNeilBrown bio_list_merge(&conf->pending_bio_list, &plug->pending); 1154f54a9d0eSNeilBrown conf->pending_count += plug->pending_cnt; 1155f54a9d0eSNeilBrown spin_unlock_irq(&conf->device_lock); 1156ee0b0244SNeilBrown wake_up(&conf->wait_barrier); 1157f54a9d0eSNeilBrown md_wakeup_thread(mddev->thread); 1158f54a9d0eSNeilBrown kfree(plug); 1159f54a9d0eSNeilBrown return; 1160f54a9d0eSNeilBrown } 1161f54a9d0eSNeilBrown 1162f54a9d0eSNeilBrown /* we aren't scheduling, so we can do the write-out directly. */ 1163f54a9d0eSNeilBrown bio = bio_list_get(&plug->pending); 1164673ca68dSNeilBrown flush_bio_list(conf, bio); 1165f54a9d0eSNeilBrown kfree(plug); 1166f54a9d0eSNeilBrown } 1167f54a9d0eSNeilBrown 1168689389a0SNeilBrown static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio) 1169689389a0SNeilBrown { 1170689389a0SNeilBrown r1_bio->master_bio = bio; 1171689389a0SNeilBrown r1_bio->sectors = bio_sectors(bio); 1172689389a0SNeilBrown r1_bio->state = 0; 1173689389a0SNeilBrown r1_bio->mddev = mddev; 1174689389a0SNeilBrown r1_bio->sector = bio->bi_iter.bi_sector; 1175689389a0SNeilBrown } 1176689389a0SNeilBrown 1177fd76863eScolyli@suse.de static inline struct r1bio * 1178689389a0SNeilBrown alloc_r1bio(struct mddev *mddev, struct bio *bio) 1179fd76863eScolyli@suse.de { 1180fd76863eScolyli@suse.de struct r1conf *conf = mddev->private; 1181fd76863eScolyli@suse.de struct r1bio *r1_bio; 1182fd76863eScolyli@suse.de 1183fd76863eScolyli@suse.de r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); 1184689389a0SNeilBrown /* Ensure no bio records IO_BLOCKED */ 1185689389a0SNeilBrown memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0])); 1186689389a0SNeilBrown init_r1bio(r1_bio, mddev, bio); 1187fd76863eScolyli@suse.de return r1_bio; 1188fd76863eScolyli@suse.de } 1189fd76863eScolyli@suse.de 1190c230e7e5SNeilBrown static void raid1_read_request(struct mddev *mddev, struct bio *bio, 1191689389a0SNeilBrown int max_read_sectors, struct r1bio *r1_bio) 11921da177e4SLinus Torvalds { 1193e8096360SNeilBrown struct r1conf *conf = mddev->private; 11940eaf822cSJonathan Brassow struct raid1_info *mirror; 11951da177e4SLinus Torvalds struct bio *read_bio; 11963b046a97SRobert LeBlanc struct bitmap *bitmap = mddev->bitmap; 1197796a5cf0SMike Christie const int op = bio_op(bio); 11981eff9d32SJens Axboe const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); 11991f68f0c4SNeilBrown int max_sectors; 1200d2eb35acSNeilBrown int rdisk; 1201689389a0SNeilBrown bool print_msg = !!r1_bio; 1202689389a0SNeilBrown char b[BDEVNAME_SIZE]; 1203689389a0SNeilBrown 1204689389a0SNeilBrown /* 1205689389a0SNeilBrown * If r1_bio is set, we are blocking the raid1d thread 1206689389a0SNeilBrown * so there is a tiny risk of deadlock. So ask for 1207689389a0SNeilBrown * emergency memory if needed. 1208689389a0SNeilBrown */ 1209689389a0SNeilBrown gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO; 1210689389a0SNeilBrown 1211689389a0SNeilBrown if (print_msg) { 1212689389a0SNeilBrown /* Need to get the block device name carefully */ 1213689389a0SNeilBrown struct md_rdev *rdev; 1214689389a0SNeilBrown rcu_read_lock(); 1215689389a0SNeilBrown rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev); 1216689389a0SNeilBrown if (rdev) 1217689389a0SNeilBrown bdevname(rdev->bdev, b); 1218689389a0SNeilBrown else 1219689389a0SNeilBrown strcpy(b, "???"); 1220689389a0SNeilBrown rcu_read_unlock(); 1221689389a0SNeilBrown } 1222d2eb35acSNeilBrown 1223fd76863eScolyli@suse.de /* 1224fd76863eScolyli@suse.de * Still need barrier for READ in case that whole 1225fd76863eScolyli@suse.de * array is frozen. 1226fd76863eScolyli@suse.de */ 1227fd76863eScolyli@suse.de wait_read_barrier(conf, bio->bi_iter.bi_sector); 12283b046a97SRobert LeBlanc 1229689389a0SNeilBrown if (!r1_bio) 1230689389a0SNeilBrown r1_bio = alloc_r1bio(mddev, bio); 1231689389a0SNeilBrown else 1232689389a0SNeilBrown init_r1bio(r1_bio, mddev, bio); 1233c230e7e5SNeilBrown r1_bio->sectors = max_read_sectors; 1234fd76863eScolyli@suse.de 1235fd76863eScolyli@suse.de /* 1236fd76863eScolyli@suse.de * make_request() can abort the operation when read-ahead is being 1237fd76863eScolyli@suse.de * used and no empty request is available. 1238fd76863eScolyli@suse.de */ 1239d2eb35acSNeilBrown rdisk = read_balance(conf, r1_bio, &max_sectors); 12401da177e4SLinus Torvalds 12411da177e4SLinus Torvalds if (rdisk < 0) { 12421da177e4SLinus Torvalds /* couldn't find anywhere to read from */ 1243689389a0SNeilBrown if (print_msg) { 1244689389a0SNeilBrown pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", 1245689389a0SNeilBrown mdname(mddev), 1246689389a0SNeilBrown b, 1247689389a0SNeilBrown (unsigned long long)r1_bio->sector); 1248689389a0SNeilBrown } 12491da177e4SLinus Torvalds raid_end_bio_io(r1_bio); 12505a7bbad2SChristoph Hellwig return; 12511da177e4SLinus Torvalds } 12521da177e4SLinus Torvalds mirror = conf->mirrors + rdisk; 12531da177e4SLinus Torvalds 1254689389a0SNeilBrown if (print_msg) 1255689389a0SNeilBrown pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n", 1256689389a0SNeilBrown mdname(mddev), 1257689389a0SNeilBrown (unsigned long long)r1_bio->sector, 1258689389a0SNeilBrown bdevname(mirror->rdev->bdev, b)); 1259689389a0SNeilBrown 1260e555190dSNeilBrown if (test_bit(WriteMostly, &mirror->rdev->flags) && 1261e555190dSNeilBrown bitmap) { 12623b046a97SRobert LeBlanc /* 12633b046a97SRobert LeBlanc * Reading from a write-mostly device must take care not to 12643b046a97SRobert LeBlanc * over-take any writes that are 'behind' 1265e555190dSNeilBrown */ 1266578b54adSNeilBrown raid1_log(mddev, "wait behind writes"); 1267e555190dSNeilBrown wait_event(bitmap->behind_wait, 1268e555190dSNeilBrown atomic_read(&bitmap->behind_writes) == 0); 1269e555190dSNeilBrown } 1270c230e7e5SNeilBrown 1271c230e7e5SNeilBrown if (max_sectors < bio_sectors(bio)) { 1272c230e7e5SNeilBrown struct bio *split = bio_split(bio, max_sectors, 1273689389a0SNeilBrown gfp, conf->bio_split); 1274c230e7e5SNeilBrown bio_chain(split, bio); 1275c230e7e5SNeilBrown generic_make_request(bio); 1276c230e7e5SNeilBrown bio = split; 1277c230e7e5SNeilBrown r1_bio->master_bio = bio; 1278c230e7e5SNeilBrown r1_bio->sectors = max_sectors; 1279c230e7e5SNeilBrown } 1280c230e7e5SNeilBrown 12811da177e4SLinus Torvalds r1_bio->read_disk = rdisk; 12821da177e4SLinus Torvalds 1283689389a0SNeilBrown read_bio = bio_clone_fast(bio, gfp, mddev->bio_set); 12841da177e4SLinus Torvalds 12851da177e4SLinus Torvalds r1_bio->bios[rdisk] = read_bio; 12861da177e4SLinus Torvalds 12874f024f37SKent Overstreet read_bio->bi_iter.bi_sector = r1_bio->sector + 12884f024f37SKent Overstreet mirror->rdev->data_offset; 12891da177e4SLinus Torvalds read_bio->bi_bdev = mirror->rdev->bdev; 12901da177e4SLinus Torvalds read_bio->bi_end_io = raid1_end_read_request; 1291796a5cf0SMike Christie bio_set_op_attrs(read_bio, op, do_sync); 12922e52d449SNeilBrown if (test_bit(FailFast, &mirror->rdev->flags) && 12932e52d449SNeilBrown test_bit(R1BIO_FailFast, &r1_bio->state)) 12942e52d449SNeilBrown read_bio->bi_opf |= MD_FAILFAST; 12951da177e4SLinus Torvalds read_bio->bi_private = r1_bio; 12961da177e4SLinus Torvalds 1297109e3765SNeilBrown if (mddev->gendisk) 1298109e3765SNeilBrown trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), 1299109e3765SNeilBrown read_bio, disk_devt(mddev->gendisk), 1300109e3765SNeilBrown r1_bio->sector); 1301109e3765SNeilBrown 13021da177e4SLinus Torvalds generic_make_request(read_bio); 13031da177e4SLinus Torvalds } 13041da177e4SLinus Torvalds 1305c230e7e5SNeilBrown static void raid1_write_request(struct mddev *mddev, struct bio *bio, 1306c230e7e5SNeilBrown int max_write_sectors) 13073b046a97SRobert LeBlanc { 13083b046a97SRobert LeBlanc struct r1conf *conf = mddev->private; 1309fd76863eScolyli@suse.de struct r1bio *r1_bio; 13103b046a97SRobert LeBlanc int i, disks; 13113b046a97SRobert LeBlanc struct bitmap *bitmap = mddev->bitmap; 13123b046a97SRobert LeBlanc unsigned long flags; 13133b046a97SRobert LeBlanc struct md_rdev *blocked_rdev; 13143b046a97SRobert LeBlanc struct blk_plug_cb *cb; 13153b046a97SRobert LeBlanc struct raid1_plug_cb *plug = NULL; 13163b046a97SRobert LeBlanc int first_clone; 13173b046a97SRobert LeBlanc int max_sectors; 13183b046a97SRobert LeBlanc 13191da177e4SLinus Torvalds /* 13203b046a97SRobert LeBlanc * Register the new request and wait if the reconstruction 13213b046a97SRobert LeBlanc * thread has put up a bar for new requests. 13223b046a97SRobert LeBlanc * Continue immediately if no resync is active currently. 13231da177e4SLinus Torvalds */ 13243b046a97SRobert LeBlanc 13253b046a97SRobert LeBlanc 13263b046a97SRobert LeBlanc if ((bio_end_sector(bio) > mddev->suspend_lo && 13273b046a97SRobert LeBlanc bio->bi_iter.bi_sector < mddev->suspend_hi) || 13283b046a97SRobert LeBlanc (mddev_is_clustered(mddev) && 13293b046a97SRobert LeBlanc md_cluster_ops->area_resyncing(mddev, WRITE, 13303b046a97SRobert LeBlanc bio->bi_iter.bi_sector, bio_end_sector(bio)))) { 13313b046a97SRobert LeBlanc 13323b046a97SRobert LeBlanc /* 13333b046a97SRobert LeBlanc * As the suspend_* range is controlled by userspace, we want 13343b046a97SRobert LeBlanc * an interruptible wait. 13353b046a97SRobert LeBlanc */ 13363b046a97SRobert LeBlanc DEFINE_WAIT(w); 13373b046a97SRobert LeBlanc for (;;) { 1338f9c79bc0SMikulas Patocka sigset_t full, old; 13393b046a97SRobert LeBlanc prepare_to_wait(&conf->wait_barrier, 13403b046a97SRobert LeBlanc &w, TASK_INTERRUPTIBLE); 13413b046a97SRobert LeBlanc if (bio_end_sector(bio) <= mddev->suspend_lo || 13423b046a97SRobert LeBlanc bio->bi_iter.bi_sector >= mddev->suspend_hi || 13433b046a97SRobert LeBlanc (mddev_is_clustered(mddev) && 13443b046a97SRobert LeBlanc !md_cluster_ops->area_resyncing(mddev, WRITE, 13453b046a97SRobert LeBlanc bio->bi_iter.bi_sector, 13463b046a97SRobert LeBlanc bio_end_sector(bio)))) 13473b046a97SRobert LeBlanc break; 1348f9c79bc0SMikulas Patocka sigfillset(&full); 1349f9c79bc0SMikulas Patocka sigprocmask(SIG_BLOCK, &full, &old); 13503b046a97SRobert LeBlanc schedule(); 1351f9c79bc0SMikulas Patocka sigprocmask(SIG_SETMASK, &old, NULL); 13523b046a97SRobert LeBlanc } 13533b046a97SRobert LeBlanc finish_wait(&conf->wait_barrier, &w); 13543b046a97SRobert LeBlanc } 1355fd76863eScolyli@suse.de wait_barrier(conf, bio->bi_iter.bi_sector); 1356fd76863eScolyli@suse.de 1357689389a0SNeilBrown r1_bio = alloc_r1bio(mddev, bio); 1358c230e7e5SNeilBrown r1_bio->sectors = max_write_sectors; 13593b046a97SRobert LeBlanc 136034db0cd6SNeilBrown if (conf->pending_count >= max_queued_requests) { 136134db0cd6SNeilBrown md_wakeup_thread(mddev->thread); 1362578b54adSNeilBrown raid1_log(mddev, "wait queued"); 136334db0cd6SNeilBrown wait_event(conf->wait_barrier, 136434db0cd6SNeilBrown conf->pending_count < max_queued_requests); 136534db0cd6SNeilBrown } 13661f68f0c4SNeilBrown /* first select target devices under rcu_lock and 13671da177e4SLinus Torvalds * inc refcount on their rdev. Record them by setting 13681da177e4SLinus Torvalds * bios[x] to bio 13691f68f0c4SNeilBrown * If there are known/acknowledged bad blocks on any device on 13701f68f0c4SNeilBrown * which we have seen a write error, we want to avoid writing those 13711f68f0c4SNeilBrown * blocks. 13721f68f0c4SNeilBrown * This potentially requires several writes to write around 13731f68f0c4SNeilBrown * the bad blocks. Each set of writes gets it's own r1bio 13741f68f0c4SNeilBrown * with a set of bios attached. 13751da177e4SLinus Torvalds */ 1376c3b328acSNeilBrown 13778f19ccb2SNeilBrown disks = conf->raid_disks * 2; 13786bfe0b49SDan Williams retry_write: 13796bfe0b49SDan Williams blocked_rdev = NULL; 13801da177e4SLinus Torvalds rcu_read_lock(); 13811f68f0c4SNeilBrown max_sectors = r1_bio->sectors; 13821da177e4SLinus Torvalds for (i = 0; i < disks; i++) { 13833cb03002SNeilBrown struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 13846bfe0b49SDan Williams if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 13856bfe0b49SDan Williams atomic_inc(&rdev->nr_pending); 13866bfe0b49SDan Williams blocked_rdev = rdev; 13876bfe0b49SDan Williams break; 13886bfe0b49SDan Williams } 13891da177e4SLinus Torvalds r1_bio->bios[i] = NULL; 13908ae12666SKent Overstreet if (!rdev || test_bit(Faulty, &rdev->flags)) { 13918f19ccb2SNeilBrown if (i < conf->raid_disks) 13921f68f0c4SNeilBrown set_bit(R1BIO_Degraded, &r1_bio->state); 13931f68f0c4SNeilBrown continue; 1394964147d5SNeilBrown } 13951f68f0c4SNeilBrown 13961f68f0c4SNeilBrown atomic_inc(&rdev->nr_pending); 13971f68f0c4SNeilBrown if (test_bit(WriteErrorSeen, &rdev->flags)) { 13981f68f0c4SNeilBrown sector_t first_bad; 13991f68f0c4SNeilBrown int bad_sectors; 14001f68f0c4SNeilBrown int is_bad; 14011f68f0c4SNeilBrown 14023b046a97SRobert LeBlanc is_bad = is_badblock(rdev, r1_bio->sector, max_sectors, 14031f68f0c4SNeilBrown &first_bad, &bad_sectors); 14041f68f0c4SNeilBrown if (is_bad < 0) { 14051f68f0c4SNeilBrown /* mustn't write here until the bad block is 14061f68f0c4SNeilBrown * acknowledged*/ 14071f68f0c4SNeilBrown set_bit(BlockedBadBlocks, &rdev->flags); 14081f68f0c4SNeilBrown blocked_rdev = rdev; 14091f68f0c4SNeilBrown break; 14101f68f0c4SNeilBrown } 14111f68f0c4SNeilBrown if (is_bad && first_bad <= r1_bio->sector) { 14121f68f0c4SNeilBrown /* Cannot write here at all */ 14131f68f0c4SNeilBrown bad_sectors -= (r1_bio->sector - first_bad); 14141f68f0c4SNeilBrown if (bad_sectors < max_sectors) 14151f68f0c4SNeilBrown /* mustn't write more than bad_sectors 14161f68f0c4SNeilBrown * to other devices yet 14171f68f0c4SNeilBrown */ 14181f68f0c4SNeilBrown max_sectors = bad_sectors; 14191f68f0c4SNeilBrown rdev_dec_pending(rdev, mddev); 14201f68f0c4SNeilBrown /* We don't set R1BIO_Degraded as that 14211f68f0c4SNeilBrown * only applies if the disk is 14221f68f0c4SNeilBrown * missing, so it might be re-added, 14231f68f0c4SNeilBrown * and we want to know to recover this 14241f68f0c4SNeilBrown * chunk. 14251f68f0c4SNeilBrown * In this case the device is here, 14261f68f0c4SNeilBrown * and the fact that this chunk is not 14271f68f0c4SNeilBrown * in-sync is recorded in the bad 14281f68f0c4SNeilBrown * block log 14291f68f0c4SNeilBrown */ 14301f68f0c4SNeilBrown continue; 14311f68f0c4SNeilBrown } 14321f68f0c4SNeilBrown if (is_bad) { 14331f68f0c4SNeilBrown int good_sectors = first_bad - r1_bio->sector; 14341f68f0c4SNeilBrown if (good_sectors < max_sectors) 14351f68f0c4SNeilBrown max_sectors = good_sectors; 14361f68f0c4SNeilBrown } 14371f68f0c4SNeilBrown } 14381f68f0c4SNeilBrown r1_bio->bios[i] = bio; 14391da177e4SLinus Torvalds } 14401da177e4SLinus Torvalds rcu_read_unlock(); 14411da177e4SLinus Torvalds 14426bfe0b49SDan Williams if (unlikely(blocked_rdev)) { 14436bfe0b49SDan Williams /* Wait for this device to become unblocked */ 14446bfe0b49SDan Williams int j; 14456bfe0b49SDan Williams 14466bfe0b49SDan Williams for (j = 0; j < i; j++) 14476bfe0b49SDan Williams if (r1_bio->bios[j]) 14486bfe0b49SDan Williams rdev_dec_pending(conf->mirrors[j].rdev, mddev); 14491f68f0c4SNeilBrown r1_bio->state = 0; 1450fd76863eScolyli@suse.de allow_barrier(conf, bio->bi_iter.bi_sector); 1451578b54adSNeilBrown raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); 14526bfe0b49SDan Williams md_wait_for_blocked_rdev(blocked_rdev, mddev); 1453fd76863eScolyli@suse.de wait_barrier(conf, bio->bi_iter.bi_sector); 14546bfe0b49SDan Williams goto retry_write; 14556bfe0b49SDan Williams } 14566bfe0b49SDan Williams 1457c230e7e5SNeilBrown if (max_sectors < bio_sectors(bio)) { 1458c230e7e5SNeilBrown struct bio *split = bio_split(bio, max_sectors, 1459c230e7e5SNeilBrown GFP_NOIO, conf->bio_split); 1460c230e7e5SNeilBrown bio_chain(split, bio); 1461c230e7e5SNeilBrown generic_make_request(bio); 1462c230e7e5SNeilBrown bio = split; 1463c230e7e5SNeilBrown r1_bio->master_bio = bio; 14641f68f0c4SNeilBrown r1_bio->sectors = max_sectors; 1465191ea9b2SNeilBrown } 14664b6d287fSNeilBrown 14674e78064fSNeilBrown atomic_set(&r1_bio->remaining, 1); 14684b6d287fSNeilBrown atomic_set(&r1_bio->behind_remaining, 0); 1469191ea9b2SNeilBrown 14701f68f0c4SNeilBrown first_clone = 1; 1471d8c84c4fSMing Lei 14721da177e4SLinus Torvalds for (i = 0; i < disks; i++) { 14738e58e327SMing Lei struct bio *mbio = NULL; 14741da177e4SLinus Torvalds if (!r1_bio->bios[i]) 14751da177e4SLinus Torvalds continue; 14761da177e4SLinus Torvalds 14771da177e4SLinus Torvalds 14781f68f0c4SNeilBrown if (first_clone) { 14791f68f0c4SNeilBrown /* do behind I/O ? 14801f68f0c4SNeilBrown * Not if there are too many, or cannot 14811f68f0c4SNeilBrown * allocate memory, or a reader on WriteMostly 14821f68f0c4SNeilBrown * is waiting for behind writes to flush */ 14831f68f0c4SNeilBrown if (bitmap && 14841f68f0c4SNeilBrown (atomic_read(&bitmap->behind_writes) 14851f68f0c4SNeilBrown < mddev->bitmap_info.max_write_behind) && 14868e58e327SMing Lei !waitqueue_active(&bitmap->behind_wait)) { 1487cb83efcfSNeilBrown mbio = alloc_behind_master_bio(r1_bio, bio); 14888e58e327SMing Lei } 14891da177e4SLinus Torvalds 14901f68f0c4SNeilBrown bitmap_startwrite(bitmap, r1_bio->sector, 14911f68f0c4SNeilBrown r1_bio->sectors, 14921f68f0c4SNeilBrown test_bit(R1BIO_BehindIO, 14931f68f0c4SNeilBrown &r1_bio->state)); 14941f68f0c4SNeilBrown first_clone = 0; 14951f68f0c4SNeilBrown } 14968e58e327SMing Lei 14978e58e327SMing Lei if (!mbio) { 1498841c1316SMing Lei if (r1_bio->behind_master_bio) 1499841c1316SMing Lei mbio = bio_clone_fast(r1_bio->behind_master_bio, 1500841c1316SMing Lei GFP_NOIO, 1501841c1316SMing Lei mddev->bio_set); 1502c230e7e5SNeilBrown else 1503d7a10308SMing Lei mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); 15041ec49223SShaohua Li } 15058e58e327SMing Lei 1506841c1316SMing Lei if (r1_bio->behind_master_bio) { 15074b6d287fSNeilBrown if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) 15084b6d287fSNeilBrown atomic_inc(&r1_bio->behind_remaining); 15094b6d287fSNeilBrown } 15104b6d287fSNeilBrown 15111f68f0c4SNeilBrown r1_bio->bios[i] = mbio; 15121f68f0c4SNeilBrown 15134f024f37SKent Overstreet mbio->bi_iter.bi_sector = (r1_bio->sector + 15141f68f0c4SNeilBrown conf->mirrors[i].rdev->data_offset); 1515109e3765SNeilBrown mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 15161f68f0c4SNeilBrown mbio->bi_end_io = raid1_end_write_request; 1517a682e003SLinus Torvalds mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA)); 1518212e7eb7SNeilBrown if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) && 1519212e7eb7SNeilBrown !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) && 1520212e7eb7SNeilBrown conf->raid_disks - mddev->degraded > 1) 1521212e7eb7SNeilBrown mbio->bi_opf |= MD_FAILFAST; 15221f68f0c4SNeilBrown mbio->bi_private = r1_bio; 15231f68f0c4SNeilBrown 15241da177e4SLinus Torvalds atomic_inc(&r1_bio->remaining); 1525f54a9d0eSNeilBrown 1526109e3765SNeilBrown if (mddev->gendisk) 1527109e3765SNeilBrown trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev), 1528109e3765SNeilBrown mbio, disk_devt(mddev->gendisk), 1529109e3765SNeilBrown r1_bio->sector); 1530109e3765SNeilBrown /* flush_pending_writes() needs access to the rdev so...*/ 1531109e3765SNeilBrown mbio->bi_bdev = (void*)conf->mirrors[i].rdev; 1532109e3765SNeilBrown 1533f54a9d0eSNeilBrown cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); 1534f54a9d0eSNeilBrown if (cb) 1535f54a9d0eSNeilBrown plug = container_of(cb, struct raid1_plug_cb, cb); 1536f54a9d0eSNeilBrown else 1537f54a9d0eSNeilBrown plug = NULL; 1538f54a9d0eSNeilBrown if (plug) { 1539f54a9d0eSNeilBrown bio_list_add(&plug->pending, mbio); 1540f54a9d0eSNeilBrown plug->pending_cnt++; 1541f54a9d0eSNeilBrown } else { 154223b245c0SShaohua Li spin_lock_irqsave(&conf->device_lock, flags); 15434e78064fSNeilBrown bio_list_add(&conf->pending_bio_list, mbio); 154434db0cd6SNeilBrown conf->pending_count++; 1545191ea9b2SNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 1546b357f04aSNeilBrown md_wakeup_thread(mddev->thread); 15474e78064fSNeilBrown } 154823b245c0SShaohua Li } 15491f68f0c4SNeilBrown 1550079fa166SNeilBrown r1_bio_write_done(r1_bio); 1551079fa166SNeilBrown 1552079fa166SNeilBrown /* In case raid1d snuck in to freeze_array */ 1553079fa166SNeilBrown wake_up(&conf->wait_barrier); 15541da177e4SLinus Torvalds } 15551da177e4SLinus Torvalds 1556cc27b0c7SNeilBrown static bool raid1_make_request(struct mddev *mddev, struct bio *bio) 15573b046a97SRobert LeBlanc { 1558fd76863eScolyli@suse.de sector_t sectors; 15593b046a97SRobert LeBlanc 1560aff8da09SShaohua Li if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { 1561aff8da09SShaohua Li md_flush_request(mddev, bio); 1562cc27b0c7SNeilBrown return true; 1563aff8da09SShaohua Li } 15643b046a97SRobert LeBlanc 1565c230e7e5SNeilBrown /* 1566c230e7e5SNeilBrown * There is a limit to the maximum size, but 1567c230e7e5SNeilBrown * the read/write handler might find a lower limit 1568c230e7e5SNeilBrown * due to bad blocks. To avoid multiple splits, 1569c230e7e5SNeilBrown * we pass the maximum number of sectors down 1570c230e7e5SNeilBrown * and let the lower level perform the split. 1571c230e7e5SNeilBrown */ 1572fd76863eScolyli@suse.de sectors = align_to_barrier_unit_end( 1573fd76863eScolyli@suse.de bio->bi_iter.bi_sector, bio_sectors(bio)); 15743b046a97SRobert LeBlanc 1575c230e7e5SNeilBrown if (bio_data_dir(bio) == READ) 1576689389a0SNeilBrown raid1_read_request(mddev, bio, sectors, NULL); 1577cc27b0c7SNeilBrown else { 1578cc27b0c7SNeilBrown if (!md_write_start(mddev,bio)) 1579cc27b0c7SNeilBrown return false; 1580c230e7e5SNeilBrown raid1_write_request(mddev, bio, sectors); 15813b046a97SRobert LeBlanc } 1582cc27b0c7SNeilBrown return true; 1583cc27b0c7SNeilBrown } 15843b046a97SRobert LeBlanc 1585849674e4SShaohua Li static void raid1_status(struct seq_file *seq, struct mddev *mddev) 15861da177e4SLinus Torvalds { 1587e8096360SNeilBrown struct r1conf *conf = mddev->private; 15881da177e4SLinus Torvalds int i; 15891da177e4SLinus Torvalds 15901da177e4SLinus Torvalds seq_printf(seq, " [%d/%d] [", conf->raid_disks, 159111ce99e6SNeilBrown conf->raid_disks - mddev->degraded); 1592ddac7c7eSNeilBrown rcu_read_lock(); 1593ddac7c7eSNeilBrown for (i = 0; i < conf->raid_disks; i++) { 15943cb03002SNeilBrown struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 15951da177e4SLinus Torvalds seq_printf(seq, "%s", 1596ddac7c7eSNeilBrown rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); 1597ddac7c7eSNeilBrown } 1598ddac7c7eSNeilBrown rcu_read_unlock(); 15991da177e4SLinus Torvalds seq_printf(seq, "]"); 16001da177e4SLinus Torvalds } 16011da177e4SLinus Torvalds 1602849674e4SShaohua Li static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) 16031da177e4SLinus Torvalds { 16041da177e4SLinus Torvalds char b[BDEVNAME_SIZE]; 1605e8096360SNeilBrown struct r1conf *conf = mddev->private; 1606423f04d6SNeilBrown unsigned long flags; 16071da177e4SLinus Torvalds 16081da177e4SLinus Torvalds /* 16091da177e4SLinus Torvalds * If it is not operational, then we have already marked it as dead 16101da177e4SLinus Torvalds * else if it is the last working disks, ignore the error, let the 16111da177e4SLinus Torvalds * next level up know. 16121da177e4SLinus Torvalds * else mark the drive as failed 16131da177e4SLinus Torvalds */ 16142e52d449SNeilBrown spin_lock_irqsave(&conf->device_lock, flags); 1615b2d444d7SNeilBrown if (test_bit(In_sync, &rdev->flags) 16164044ba58SNeilBrown && (conf->raid_disks - mddev->degraded) == 1) { 16171da177e4SLinus Torvalds /* 16181da177e4SLinus Torvalds * Don't fail the drive, act as though we were just a 16194044ba58SNeilBrown * normal single drive. 16204044ba58SNeilBrown * However don't try a recovery from this drive as 16214044ba58SNeilBrown * it is very likely to fail. 16221da177e4SLinus Torvalds */ 16235389042fSNeilBrown conf->recovery_disabled = mddev->recovery_disabled; 16242e52d449SNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 16251da177e4SLinus Torvalds return; 16264044ba58SNeilBrown } 1627de393cdeSNeilBrown set_bit(Blocked, &rdev->flags); 1628423f04d6SNeilBrown if (test_and_clear_bit(In_sync, &rdev->flags)) { 16291da177e4SLinus Torvalds mddev->degraded++; 1630dd00a99eSNeilBrown set_bit(Faulty, &rdev->flags); 16312446dba0SNeilBrown } else 16322446dba0SNeilBrown set_bit(Faulty, &rdev->flags); 1633423f04d6SNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 16341da177e4SLinus Torvalds /* 16351da177e4SLinus Torvalds * if recovery is running, make sure it aborts. 16361da177e4SLinus Torvalds */ 1637dfc70645SNeilBrown set_bit(MD_RECOVERY_INTR, &mddev->recovery); 16382953079cSShaohua Li set_mask_bits(&mddev->sb_flags, 0, 16392953079cSShaohua Li BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); 16401d41c216SNeilBrown pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n" 1641067032bcSJoe Perches "md/raid1:%s: Operation continuing on %d devices.\n", 16429dd1e2faSNeilBrown mdname(mddev), bdevname(rdev->bdev, b), 16439dd1e2faSNeilBrown mdname(mddev), conf->raid_disks - mddev->degraded); 16441da177e4SLinus Torvalds } 16451da177e4SLinus Torvalds 1646e8096360SNeilBrown static void print_conf(struct r1conf *conf) 16471da177e4SLinus Torvalds { 16481da177e4SLinus Torvalds int i; 16491da177e4SLinus Torvalds 16501d41c216SNeilBrown pr_debug("RAID1 conf printout:\n"); 16511da177e4SLinus Torvalds if (!conf) { 16521d41c216SNeilBrown pr_debug("(!conf)\n"); 16531da177e4SLinus Torvalds return; 16541da177e4SLinus Torvalds } 16551d41c216SNeilBrown pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, 16561da177e4SLinus Torvalds conf->raid_disks); 16571da177e4SLinus Torvalds 1658ddac7c7eSNeilBrown rcu_read_lock(); 16591da177e4SLinus Torvalds for (i = 0; i < conf->raid_disks; i++) { 16601da177e4SLinus Torvalds char b[BDEVNAME_SIZE]; 16613cb03002SNeilBrown struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 1662ddac7c7eSNeilBrown if (rdev) 16631d41c216SNeilBrown pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n", 1664ddac7c7eSNeilBrown i, !test_bit(In_sync, &rdev->flags), 1665ddac7c7eSNeilBrown !test_bit(Faulty, &rdev->flags), 1666ddac7c7eSNeilBrown bdevname(rdev->bdev,b)); 16671da177e4SLinus Torvalds } 1668ddac7c7eSNeilBrown rcu_read_unlock(); 16691da177e4SLinus Torvalds } 16701da177e4SLinus Torvalds 1671e8096360SNeilBrown static void close_sync(struct r1conf *conf) 16721da177e4SLinus Torvalds { 1673fd76863eScolyli@suse.de wait_all_barriers(conf); 1674fd76863eScolyli@suse.de allow_all_barriers(conf); 16751da177e4SLinus Torvalds 16761da177e4SLinus Torvalds mempool_destroy(conf->r1buf_pool); 16771da177e4SLinus Torvalds conf->r1buf_pool = NULL; 16781da177e4SLinus Torvalds } 16791da177e4SLinus Torvalds 1680fd01b88cSNeilBrown static int raid1_spare_active(struct mddev *mddev) 16811da177e4SLinus Torvalds { 16821da177e4SLinus Torvalds int i; 1683e8096360SNeilBrown struct r1conf *conf = mddev->private; 16846b965620SNeilBrown int count = 0; 16856b965620SNeilBrown unsigned long flags; 16861da177e4SLinus Torvalds 16871da177e4SLinus Torvalds /* 16881da177e4SLinus Torvalds * Find all failed disks within the RAID1 configuration 1689ddac7c7eSNeilBrown * and mark them readable. 1690ddac7c7eSNeilBrown * Called under mddev lock, so rcu protection not needed. 1691423f04d6SNeilBrown * device_lock used to avoid races with raid1_end_read_request 1692423f04d6SNeilBrown * which expects 'In_sync' flags and ->degraded to be consistent. 16931da177e4SLinus Torvalds */ 1694423f04d6SNeilBrown spin_lock_irqsave(&conf->device_lock, flags); 16951da177e4SLinus Torvalds for (i = 0; i < conf->raid_disks; i++) { 16963cb03002SNeilBrown struct md_rdev *rdev = conf->mirrors[i].rdev; 16978c7a2c2bSNeilBrown struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; 16988c7a2c2bSNeilBrown if (repl 16991aee41f6SGoldwyn Rodrigues && !test_bit(Candidate, &repl->flags) 17008c7a2c2bSNeilBrown && repl->recovery_offset == MaxSector 17018c7a2c2bSNeilBrown && !test_bit(Faulty, &repl->flags) 17028c7a2c2bSNeilBrown && !test_and_set_bit(In_sync, &repl->flags)) { 17038c7a2c2bSNeilBrown /* replacement has just become active */ 17048c7a2c2bSNeilBrown if (!rdev || 17058c7a2c2bSNeilBrown !test_and_clear_bit(In_sync, &rdev->flags)) 17068c7a2c2bSNeilBrown count++; 17078c7a2c2bSNeilBrown if (rdev) { 17088c7a2c2bSNeilBrown /* Replaced device not technically 17098c7a2c2bSNeilBrown * faulty, but we need to be sure 17108c7a2c2bSNeilBrown * it gets removed and never re-added 17118c7a2c2bSNeilBrown */ 17128c7a2c2bSNeilBrown set_bit(Faulty, &rdev->flags); 17138c7a2c2bSNeilBrown sysfs_notify_dirent_safe( 17148c7a2c2bSNeilBrown rdev->sysfs_state); 17158c7a2c2bSNeilBrown } 17168c7a2c2bSNeilBrown } 1717ddac7c7eSNeilBrown if (rdev 171861e4947cSLukasz Dorau && rdev->recovery_offset == MaxSector 1719ddac7c7eSNeilBrown && !test_bit(Faulty, &rdev->flags) 1720c04be0aaSNeilBrown && !test_and_set_bit(In_sync, &rdev->flags)) { 17216b965620SNeilBrown count++; 1722654e8b5aSJonathan Brassow sysfs_notify_dirent_safe(rdev->sysfs_state); 17231da177e4SLinus Torvalds } 17241da177e4SLinus Torvalds } 17256b965620SNeilBrown mddev->degraded -= count; 17266b965620SNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 17271da177e4SLinus Torvalds 17281da177e4SLinus Torvalds print_conf(conf); 17296b965620SNeilBrown return count; 17301da177e4SLinus Torvalds } 17311da177e4SLinus Torvalds 1732fd01b88cSNeilBrown static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) 17331da177e4SLinus Torvalds { 1734e8096360SNeilBrown struct r1conf *conf = mddev->private; 1735199050eaSNeil Brown int err = -EEXIST; 173641158c7eSNeilBrown int mirror = 0; 17370eaf822cSJonathan Brassow struct raid1_info *p; 17386c2fce2eSNeil Brown int first = 0; 173930194636SNeilBrown int last = conf->raid_disks - 1; 17401da177e4SLinus Torvalds 17415389042fSNeilBrown if (mddev->recovery_disabled == conf->recovery_disabled) 17425389042fSNeilBrown return -EBUSY; 17435389042fSNeilBrown 17441501efadSDan Williams if (md_integrity_add_rdev(rdev, mddev)) 17451501efadSDan Williams return -ENXIO; 17461501efadSDan Williams 17476c2fce2eSNeil Brown if (rdev->raid_disk >= 0) 17486c2fce2eSNeil Brown first = last = rdev->raid_disk; 17496c2fce2eSNeil Brown 175070bcecdbSGoldwyn Rodrigues /* 175170bcecdbSGoldwyn Rodrigues * find the disk ... but prefer rdev->saved_raid_disk 175270bcecdbSGoldwyn Rodrigues * if possible. 175370bcecdbSGoldwyn Rodrigues */ 175470bcecdbSGoldwyn Rodrigues if (rdev->saved_raid_disk >= 0 && 175570bcecdbSGoldwyn Rodrigues rdev->saved_raid_disk >= first && 175670bcecdbSGoldwyn Rodrigues conf->mirrors[rdev->saved_raid_disk].rdev == NULL) 175770bcecdbSGoldwyn Rodrigues first = last = rdev->saved_raid_disk; 175870bcecdbSGoldwyn Rodrigues 17597ef449d1SNeilBrown for (mirror = first; mirror <= last; mirror++) { 17607ef449d1SNeilBrown p = conf->mirrors+mirror; 17617ef449d1SNeilBrown if (!p->rdev) { 17621da177e4SLinus Torvalds 17639092c02dSJonathan Brassow if (mddev->gendisk) 17648f6c2e4bSMartin K. Petersen disk_stack_limits(mddev->gendisk, rdev->bdev, 17658f6c2e4bSMartin K. Petersen rdev->data_offset << 9); 17661da177e4SLinus Torvalds 17671da177e4SLinus Torvalds p->head_position = 0; 17681da177e4SLinus Torvalds rdev->raid_disk = mirror; 1769199050eaSNeil Brown err = 0; 17706aea114aSNeilBrown /* As all devices are equivalent, we don't need a full recovery 17716aea114aSNeilBrown * if this was recently any drive of the array 17726aea114aSNeilBrown */ 17736aea114aSNeilBrown if (rdev->saved_raid_disk < 0) 177441158c7eSNeilBrown conf->fullsync = 1; 1775d6065f7bSSuzanne Wood rcu_assign_pointer(p->rdev, rdev); 17761da177e4SLinus Torvalds break; 17771da177e4SLinus Torvalds } 17787ef449d1SNeilBrown if (test_bit(WantReplacement, &p->rdev->flags) && 17797ef449d1SNeilBrown p[conf->raid_disks].rdev == NULL) { 17807ef449d1SNeilBrown /* Add this device as a replacement */ 17817ef449d1SNeilBrown clear_bit(In_sync, &rdev->flags); 17827ef449d1SNeilBrown set_bit(Replacement, &rdev->flags); 17837ef449d1SNeilBrown rdev->raid_disk = mirror; 17847ef449d1SNeilBrown err = 0; 17857ef449d1SNeilBrown conf->fullsync = 1; 17867ef449d1SNeilBrown rcu_assign_pointer(p[conf->raid_disks].rdev, rdev); 17877ef449d1SNeilBrown break; 17887ef449d1SNeilBrown } 17897ef449d1SNeilBrown } 17909092c02dSJonathan Brassow if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) 17912ff8cc2cSShaohua Li queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); 17921da177e4SLinus Torvalds print_conf(conf); 1793199050eaSNeil Brown return err; 17941da177e4SLinus Torvalds } 17951da177e4SLinus Torvalds 1796b8321b68SNeilBrown static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) 17971da177e4SLinus Torvalds { 1798e8096360SNeilBrown struct r1conf *conf = mddev->private; 17991da177e4SLinus Torvalds int err = 0; 1800b8321b68SNeilBrown int number = rdev->raid_disk; 18010eaf822cSJonathan Brassow struct raid1_info *p = conf->mirrors + number; 18021da177e4SLinus Torvalds 1803b014f14cSNeilBrown if (rdev != p->rdev) 1804b014f14cSNeilBrown p = conf->mirrors + conf->raid_disks + number; 1805b014f14cSNeilBrown 18061da177e4SLinus Torvalds print_conf(conf); 1807b8321b68SNeilBrown if (rdev == p->rdev) { 1808b2d444d7SNeilBrown if (test_bit(In_sync, &rdev->flags) || 18091da177e4SLinus Torvalds atomic_read(&rdev->nr_pending)) { 18101da177e4SLinus Torvalds err = -EBUSY; 18111da177e4SLinus Torvalds goto abort; 18121da177e4SLinus Torvalds } 1813046abeedSNeilBrown /* Only remove non-faulty devices if recovery 1814dfc70645SNeilBrown * is not possible. 1815dfc70645SNeilBrown */ 1816dfc70645SNeilBrown if (!test_bit(Faulty, &rdev->flags) && 18175389042fSNeilBrown mddev->recovery_disabled != conf->recovery_disabled && 1818dfc70645SNeilBrown mddev->degraded < conf->raid_disks) { 1819dfc70645SNeilBrown err = -EBUSY; 1820dfc70645SNeilBrown goto abort; 1821dfc70645SNeilBrown } 18221da177e4SLinus Torvalds p->rdev = NULL; 1823d787be40SNeilBrown if (!test_bit(RemoveSynchronized, &rdev->flags)) { 1824fbd568a3SPaul E. McKenney synchronize_rcu(); 18251da177e4SLinus Torvalds if (atomic_read(&rdev->nr_pending)) { 18261da177e4SLinus Torvalds /* lost the race, try later */ 18271da177e4SLinus Torvalds err = -EBUSY; 18281da177e4SLinus Torvalds p->rdev = rdev; 1829ac5e7113SAndre Noll goto abort; 1830d787be40SNeilBrown } 1831d787be40SNeilBrown } 1832d787be40SNeilBrown if (conf->mirrors[conf->raid_disks + number].rdev) { 18338c7a2c2bSNeilBrown /* We just removed a device that is being replaced. 18348c7a2c2bSNeilBrown * Move down the replacement. We drain all IO before 18358c7a2c2bSNeilBrown * doing this to avoid confusion. 18368c7a2c2bSNeilBrown */ 18378c7a2c2bSNeilBrown struct md_rdev *repl = 18388c7a2c2bSNeilBrown conf->mirrors[conf->raid_disks + number].rdev; 1839e2d59925SNeilBrown freeze_array(conf, 0); 18408c7a2c2bSNeilBrown clear_bit(Replacement, &repl->flags); 18418c7a2c2bSNeilBrown p->rdev = repl; 18428c7a2c2bSNeilBrown conf->mirrors[conf->raid_disks + number].rdev = NULL; 1843e2d59925SNeilBrown unfreeze_array(conf); 1844e5bc9c3cSGuoqing Jiang } 1845e5bc9c3cSGuoqing Jiang 18468c7a2c2bSNeilBrown clear_bit(WantReplacement, &rdev->flags); 1847a91a2785SMartin K. Petersen err = md_integrity_register(mddev); 18481da177e4SLinus Torvalds } 18491da177e4SLinus Torvalds abort: 18501da177e4SLinus Torvalds 18511da177e4SLinus Torvalds print_conf(conf); 18521da177e4SLinus Torvalds return err; 18531da177e4SLinus Torvalds } 18541da177e4SLinus Torvalds 18554246a0b6SChristoph Hellwig static void end_sync_read(struct bio *bio) 18561da177e4SLinus Torvalds { 185798d30c58SMing Lei struct r1bio *r1_bio = get_resync_r1bio(bio); 18581da177e4SLinus Torvalds 18590fc280f6SNeilBrown update_head_pos(r1_bio->read_disk, r1_bio); 1860ba3ae3beSNamhyung Kim 18611da177e4SLinus Torvalds /* 18621da177e4SLinus Torvalds * we have read a block, now it needs to be re-written, 18631da177e4SLinus Torvalds * or re-read if the read failed. 18641da177e4SLinus Torvalds * We don't do much here, just schedule handling by raid1d 18651da177e4SLinus Torvalds */ 18664e4cbee9SChristoph Hellwig if (!bio->bi_status) 18671da177e4SLinus Torvalds set_bit(R1BIO_Uptodate, &r1_bio->state); 1868d11c171eSNeilBrown 1869d11c171eSNeilBrown if (atomic_dec_and_test(&r1_bio->remaining)) 18701da177e4SLinus Torvalds reschedule_retry(r1_bio); 18711da177e4SLinus Torvalds } 18721da177e4SLinus Torvalds 18734246a0b6SChristoph Hellwig static void end_sync_write(struct bio *bio) 18741da177e4SLinus Torvalds { 18754e4cbee9SChristoph Hellwig int uptodate = !bio->bi_status; 187698d30c58SMing Lei struct r1bio *r1_bio = get_resync_r1bio(bio); 1877fd01b88cSNeilBrown struct mddev *mddev = r1_bio->mddev; 1878e8096360SNeilBrown struct r1conf *conf = mddev->private; 18794367af55SNeilBrown sector_t first_bad; 18804367af55SNeilBrown int bad_sectors; 1881854abd75SNeilBrown struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; 1882ba3ae3beSNamhyung Kim 18836b1117d5SNeilBrown if (!uptodate) { 188457dab0bdSNeilBrown sector_t sync_blocks = 0; 18856b1117d5SNeilBrown sector_t s = r1_bio->sector; 18866b1117d5SNeilBrown long sectors_to_go = r1_bio->sectors; 18876b1117d5SNeilBrown /* make sure these bits doesn't get cleared. */ 18886b1117d5SNeilBrown do { 18895e3db645SNeilBrown bitmap_end_sync(mddev->bitmap, s, 18906b1117d5SNeilBrown &sync_blocks, 1); 18916b1117d5SNeilBrown s += sync_blocks; 18926b1117d5SNeilBrown sectors_to_go -= sync_blocks; 18936b1117d5SNeilBrown } while (sectors_to_go > 0); 1894854abd75SNeilBrown set_bit(WriteErrorSeen, &rdev->flags); 1895854abd75SNeilBrown if (!test_and_set_bit(WantReplacement, &rdev->flags)) 189619d67169SNeilBrown set_bit(MD_RECOVERY_NEEDED, & 189719d67169SNeilBrown mddev->recovery); 1898d8f05d29SNeilBrown set_bit(R1BIO_WriteError, &r1_bio->state); 1899854abd75SNeilBrown } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, 19003a9f28a5SNeilBrown &first_bad, &bad_sectors) && 19013a9f28a5SNeilBrown !is_badblock(conf->mirrors[r1_bio->read_disk].rdev, 19023a9f28a5SNeilBrown r1_bio->sector, 19033a9f28a5SNeilBrown r1_bio->sectors, 19043a9f28a5SNeilBrown &first_bad, &bad_sectors) 19053a9f28a5SNeilBrown ) 19064367af55SNeilBrown set_bit(R1BIO_MadeGood, &r1_bio->state); 1907e3b9703eSNeilBrown 19081da177e4SLinus Torvalds if (atomic_dec_and_test(&r1_bio->remaining)) { 19094367af55SNeilBrown int s = r1_bio->sectors; 1910d8f05d29SNeilBrown if (test_bit(R1BIO_MadeGood, &r1_bio->state) || 1911d8f05d29SNeilBrown test_bit(R1BIO_WriteError, &r1_bio->state)) 19124367af55SNeilBrown reschedule_retry(r1_bio); 19134367af55SNeilBrown else { 19141da177e4SLinus Torvalds put_buf(r1_bio); 191573d5c38aSNeilBrown md_done_sync(mddev, s, uptodate); 19161da177e4SLinus Torvalds } 19171da177e4SLinus Torvalds } 19184367af55SNeilBrown } 19191da177e4SLinus Torvalds 19203cb03002SNeilBrown static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector, 1921d8f05d29SNeilBrown int sectors, struct page *page, int rw) 1922d8f05d29SNeilBrown { 1923796a5cf0SMike Christie if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) 1924d8f05d29SNeilBrown /* success */ 1925d8f05d29SNeilBrown return 1; 192619d67169SNeilBrown if (rw == WRITE) { 1927d8f05d29SNeilBrown set_bit(WriteErrorSeen, &rdev->flags); 192819d67169SNeilBrown if (!test_and_set_bit(WantReplacement, 192919d67169SNeilBrown &rdev->flags)) 193019d67169SNeilBrown set_bit(MD_RECOVERY_NEEDED, & 193119d67169SNeilBrown rdev->mddev->recovery); 193219d67169SNeilBrown } 1933d8f05d29SNeilBrown /* need to record an error - either for the block or the device */ 1934d8f05d29SNeilBrown if (!rdev_set_badblocks(rdev, sector, sectors, 0)) 1935d8f05d29SNeilBrown md_error(rdev->mddev, rdev); 1936d8f05d29SNeilBrown return 0; 1937d8f05d29SNeilBrown } 1938d8f05d29SNeilBrown 19399f2c9d12SNeilBrown static int fix_sync_read_error(struct r1bio *r1_bio) 19401da177e4SLinus Torvalds { 1941a68e5870SNeilBrown /* Try some synchronous reads of other devices to get 194269382e85SNeilBrown * good data, much like with normal read errors. Only 1943ddac7c7eSNeilBrown * read into the pages we already have so we don't 194469382e85SNeilBrown * need to re-issue the read request. 194569382e85SNeilBrown * We don't need to freeze the array, because being in an 194669382e85SNeilBrown * active sync request, there is no normal IO, and 194769382e85SNeilBrown * no overlapping syncs. 194806f60385SNeilBrown * We don't need to check is_badblock() again as we 194906f60385SNeilBrown * made sure that anything with a bad block in range 195006f60385SNeilBrown * will have bi_end_io clear. 19511da177e4SLinus Torvalds */ 1952fd01b88cSNeilBrown struct mddev *mddev = r1_bio->mddev; 1953e8096360SNeilBrown struct r1conf *conf = mddev->private; 1954a68e5870SNeilBrown struct bio *bio = r1_bio->bios[r1_bio->read_disk]; 195544cf0f4dSMing Lei struct page **pages = get_resync_pages(bio)->pages; 195669382e85SNeilBrown sector_t sect = r1_bio->sector; 195769382e85SNeilBrown int sectors = r1_bio->sectors; 195869382e85SNeilBrown int idx = 0; 19592e52d449SNeilBrown struct md_rdev *rdev; 19602e52d449SNeilBrown 19612e52d449SNeilBrown rdev = conf->mirrors[r1_bio->read_disk].rdev; 19622e52d449SNeilBrown if (test_bit(FailFast, &rdev->flags)) { 19632e52d449SNeilBrown /* Don't try recovering from here - just fail it 19642e52d449SNeilBrown * ... unless it is the last working device of course */ 19652e52d449SNeilBrown md_error(mddev, rdev); 19662e52d449SNeilBrown if (test_bit(Faulty, &rdev->flags)) 19672e52d449SNeilBrown /* Don't try to read from here, but make sure 19682e52d449SNeilBrown * put_buf does it's thing 19692e52d449SNeilBrown */ 19702e52d449SNeilBrown bio->bi_end_io = end_sync_write; 19712e52d449SNeilBrown } 197269382e85SNeilBrown 197369382e85SNeilBrown while(sectors) { 197469382e85SNeilBrown int s = sectors; 197569382e85SNeilBrown int d = r1_bio->read_disk; 197669382e85SNeilBrown int success = 0; 197778d7f5f7SNeilBrown int start; 197869382e85SNeilBrown 197969382e85SNeilBrown if (s > (PAGE_SIZE>>9)) 198069382e85SNeilBrown s = PAGE_SIZE >> 9; 198169382e85SNeilBrown do { 198269382e85SNeilBrown if (r1_bio->bios[d]->bi_end_io == end_sync_read) { 1983ddac7c7eSNeilBrown /* No rcu protection needed here devices 1984ddac7c7eSNeilBrown * can only be removed when no resync is 1985ddac7c7eSNeilBrown * active, and resync is currently active 1986ddac7c7eSNeilBrown */ 198769382e85SNeilBrown rdev = conf->mirrors[d].rdev; 19889d3d8011SNamhyung Kim if (sync_page_io(rdev, sect, s<<9, 198944cf0f4dSMing Lei pages[idx], 1990796a5cf0SMike Christie REQ_OP_READ, 0, false)) { 199169382e85SNeilBrown success = 1; 199269382e85SNeilBrown break; 199369382e85SNeilBrown } 199469382e85SNeilBrown } 199569382e85SNeilBrown d++; 19968f19ccb2SNeilBrown if (d == conf->raid_disks * 2) 199769382e85SNeilBrown d = 0; 199869382e85SNeilBrown } while (!success && d != r1_bio->read_disk); 199969382e85SNeilBrown 200078d7f5f7SNeilBrown if (!success) { 200178d7f5f7SNeilBrown char b[BDEVNAME_SIZE]; 20023a9f28a5SNeilBrown int abort = 0; 20033a9f28a5SNeilBrown /* Cannot read from anywhere, this block is lost. 20043a9f28a5SNeilBrown * Record a bad block on each device. If that doesn't 20053a9f28a5SNeilBrown * work just disable and interrupt the recovery. 20063a9f28a5SNeilBrown * Don't fail devices as that won't really help. 20073a9f28a5SNeilBrown */ 20081d41c216SNeilBrown pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", 200978d7f5f7SNeilBrown mdname(mddev), 201078d7f5f7SNeilBrown bdevname(bio->bi_bdev, b), 201178d7f5f7SNeilBrown (unsigned long long)r1_bio->sector); 20128f19ccb2SNeilBrown for (d = 0; d < conf->raid_disks * 2; d++) { 20133a9f28a5SNeilBrown rdev = conf->mirrors[d].rdev; 20143a9f28a5SNeilBrown if (!rdev || test_bit(Faulty, &rdev->flags)) 20153a9f28a5SNeilBrown continue; 20163a9f28a5SNeilBrown if (!rdev_set_badblocks(rdev, sect, s, 0)) 20173a9f28a5SNeilBrown abort = 1; 20183a9f28a5SNeilBrown } 20193a9f28a5SNeilBrown if (abort) { 2020d890fa2bSNeilBrown conf->recovery_disabled = 2021d890fa2bSNeilBrown mddev->recovery_disabled; 20223a9f28a5SNeilBrown set_bit(MD_RECOVERY_INTR, &mddev->recovery); 202378d7f5f7SNeilBrown md_done_sync(mddev, r1_bio->sectors, 0); 202478d7f5f7SNeilBrown put_buf(r1_bio); 202578d7f5f7SNeilBrown return 0; 202678d7f5f7SNeilBrown } 20273a9f28a5SNeilBrown /* Try next page */ 20283a9f28a5SNeilBrown sectors -= s; 20293a9f28a5SNeilBrown sect += s; 20303a9f28a5SNeilBrown idx++; 20313a9f28a5SNeilBrown continue; 20323a9f28a5SNeilBrown } 203378d7f5f7SNeilBrown 203478d7f5f7SNeilBrown start = d; 203569382e85SNeilBrown /* write it back and re-read */ 203669382e85SNeilBrown while (d != r1_bio->read_disk) { 203769382e85SNeilBrown if (d == 0) 20388f19ccb2SNeilBrown d = conf->raid_disks * 2; 203969382e85SNeilBrown d--; 204069382e85SNeilBrown if (r1_bio->bios[d]->bi_end_io != end_sync_read) 204169382e85SNeilBrown continue; 204269382e85SNeilBrown rdev = conf->mirrors[d].rdev; 2043d8f05d29SNeilBrown if (r1_sync_page_io(rdev, sect, s, 204444cf0f4dSMing Lei pages[idx], 2045d8f05d29SNeilBrown WRITE) == 0) { 204678d7f5f7SNeilBrown r1_bio->bios[d]->bi_end_io = NULL; 204778d7f5f7SNeilBrown rdev_dec_pending(rdev, mddev); 20489d3d8011SNamhyung Kim } 2049097426f6SNeilBrown } 2050097426f6SNeilBrown d = start; 2051097426f6SNeilBrown while (d != r1_bio->read_disk) { 2052097426f6SNeilBrown if (d == 0) 20538f19ccb2SNeilBrown d = conf->raid_disks * 2; 2054097426f6SNeilBrown d--; 2055097426f6SNeilBrown if (r1_bio->bios[d]->bi_end_io != end_sync_read) 2056097426f6SNeilBrown continue; 2057097426f6SNeilBrown rdev = conf->mirrors[d].rdev; 2058d8f05d29SNeilBrown if (r1_sync_page_io(rdev, sect, s, 205944cf0f4dSMing Lei pages[idx], 2060d8f05d29SNeilBrown READ) != 0) 20619d3d8011SNamhyung Kim atomic_add(s, &rdev->corrected_errors); 206269382e85SNeilBrown } 206369382e85SNeilBrown sectors -= s; 206469382e85SNeilBrown sect += s; 206569382e85SNeilBrown idx ++; 206669382e85SNeilBrown } 206778d7f5f7SNeilBrown set_bit(R1BIO_Uptodate, &r1_bio->state); 20684e4cbee9SChristoph Hellwig bio->bi_status = 0; 2069a68e5870SNeilBrown return 1; 207069382e85SNeilBrown } 2071d11c171eSNeilBrown 2072c95e6385SNeilBrown static void process_checks(struct r1bio *r1_bio) 2073a68e5870SNeilBrown { 2074a68e5870SNeilBrown /* We have read all readable devices. If we haven't 2075a68e5870SNeilBrown * got the block, then there is no hope left. 2076a68e5870SNeilBrown * If we have, then we want to do a comparison 2077a68e5870SNeilBrown * and skip the write if everything is the same. 2078a68e5870SNeilBrown * If any blocks failed to read, then we need to 2079a68e5870SNeilBrown * attempt an over-write 2080a68e5870SNeilBrown */ 2081fd01b88cSNeilBrown struct mddev *mddev = r1_bio->mddev; 2082e8096360SNeilBrown struct r1conf *conf = mddev->private; 2083a68e5870SNeilBrown int primary; 2084a68e5870SNeilBrown int i; 2085f4380a91Smajianpeng int vcnt; 2086a68e5870SNeilBrown 208730bc9b53SNeilBrown /* Fix variable parts of all bios */ 208830bc9b53SNeilBrown vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); 208930bc9b53SNeilBrown for (i = 0; i < conf->raid_disks * 2; i++) { 20904e4cbee9SChristoph Hellwig blk_status_t status; 209130bc9b53SNeilBrown struct bio *b = r1_bio->bios[i]; 209298d30c58SMing Lei struct resync_pages *rp = get_resync_pages(b); 209330bc9b53SNeilBrown if (b->bi_end_io != end_sync_read) 209430bc9b53SNeilBrown continue; 20954246a0b6SChristoph Hellwig /* fixup the bio for reuse, but preserve errno */ 20964e4cbee9SChristoph Hellwig status = b->bi_status; 209730bc9b53SNeilBrown bio_reset(b); 20984e4cbee9SChristoph Hellwig b->bi_status = status; 20994f024f37SKent Overstreet b->bi_iter.bi_sector = r1_bio->sector + 210030bc9b53SNeilBrown conf->mirrors[i].rdev->data_offset; 210130bc9b53SNeilBrown b->bi_bdev = conf->mirrors[i].rdev->bdev; 210230bc9b53SNeilBrown b->bi_end_io = end_sync_read; 210398d30c58SMing Lei rp->raid_bio = r1_bio; 210498d30c58SMing Lei b->bi_private = rp; 210530bc9b53SNeilBrown 2106fb0eb5dfSMing Lei /* initialize bvec table again */ 2107fb0eb5dfSMing Lei md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9); 210830bc9b53SNeilBrown } 21098f19ccb2SNeilBrown for (primary = 0; primary < conf->raid_disks * 2; primary++) 2110a68e5870SNeilBrown if (r1_bio->bios[primary]->bi_end_io == end_sync_read && 21114e4cbee9SChristoph Hellwig !r1_bio->bios[primary]->bi_status) { 2112a68e5870SNeilBrown r1_bio->bios[primary]->bi_end_io = NULL; 2113a68e5870SNeilBrown rdev_dec_pending(conf->mirrors[primary].rdev, mddev); 2114a68e5870SNeilBrown break; 2115a68e5870SNeilBrown } 2116a68e5870SNeilBrown r1_bio->read_disk = primary; 21178f19ccb2SNeilBrown for (i = 0; i < conf->raid_disks * 2; i++) { 2118a68e5870SNeilBrown int j; 2119a68e5870SNeilBrown struct bio *pbio = r1_bio->bios[primary]; 2120a68e5870SNeilBrown struct bio *sbio = r1_bio->bios[i]; 21214e4cbee9SChristoph Hellwig blk_status_t status = sbio->bi_status; 212244cf0f4dSMing Lei struct page **ppages = get_resync_pages(pbio)->pages; 212344cf0f4dSMing Lei struct page **spages = get_resync_pages(sbio)->pages; 212460928a91SMing Lei struct bio_vec *bi; 21258fc04e6eSMing Lei int page_len[RESYNC_PAGES] = { 0 }; 212678d7f5f7SNeilBrown 21272aabaa65SKent Overstreet if (sbio->bi_end_io != end_sync_read) 212878d7f5f7SNeilBrown continue; 21294246a0b6SChristoph Hellwig /* Now we can 'fixup' the error value */ 21304e4cbee9SChristoph Hellwig sbio->bi_status = 0; 2131a68e5870SNeilBrown 213260928a91SMing Lei bio_for_each_segment_all(bi, sbio, j) 213360928a91SMing Lei page_len[j] = bi->bv_len; 213460928a91SMing Lei 21354e4cbee9SChristoph Hellwig if (!status) { 2136a68e5870SNeilBrown for (j = vcnt; j-- ; ) { 213744cf0f4dSMing Lei if (memcmp(page_address(ppages[j]), 213844cf0f4dSMing Lei page_address(spages[j]), 213960928a91SMing Lei page_len[j])) 2140a68e5870SNeilBrown break; 2141a68e5870SNeilBrown } 2142a68e5870SNeilBrown } else 2143a68e5870SNeilBrown j = 0; 2144a68e5870SNeilBrown if (j >= 0) 21457f7583d4SJianpeng Ma atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); 2146a68e5870SNeilBrown if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) 21474e4cbee9SChristoph Hellwig && !status)) { 214878d7f5f7SNeilBrown /* No need to write to this device. */ 2149a68e5870SNeilBrown sbio->bi_end_io = NULL; 2150a68e5870SNeilBrown rdev_dec_pending(conf->mirrors[i].rdev, mddev); 215178d7f5f7SNeilBrown continue; 215278d7f5f7SNeilBrown } 2153d3b45c2aSKent Overstreet 2154d3b45c2aSKent Overstreet bio_copy_data(sbio, pbio); 2155a68e5870SNeilBrown } 2156a68e5870SNeilBrown } 2157a68e5870SNeilBrown 21589f2c9d12SNeilBrown static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) 2159a68e5870SNeilBrown { 2160e8096360SNeilBrown struct r1conf *conf = mddev->private; 2161a68e5870SNeilBrown int i; 21628f19ccb2SNeilBrown int disks = conf->raid_disks * 2; 2163037d2ff6SGuoqing Jiang struct bio *wbio; 2164a68e5870SNeilBrown 2165a68e5870SNeilBrown if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) 2166a68e5870SNeilBrown /* ouch - failed to read all of that. */ 2167a68e5870SNeilBrown if (!fix_sync_read_error(r1_bio)) 2168a68e5870SNeilBrown return; 21697ca78d57SNeilBrown 21707ca78d57SNeilBrown if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 2171c95e6385SNeilBrown process_checks(r1_bio); 2172c95e6385SNeilBrown 2173d11c171eSNeilBrown /* 2174d11c171eSNeilBrown * schedule writes 2175d11c171eSNeilBrown */ 21761da177e4SLinus Torvalds atomic_set(&r1_bio->remaining, 1); 21771da177e4SLinus Torvalds for (i = 0; i < disks ; i++) { 21781da177e4SLinus Torvalds wbio = r1_bio->bios[i]; 21793e198f78SNeilBrown if (wbio->bi_end_io == NULL || 21803e198f78SNeilBrown (wbio->bi_end_io == end_sync_read && 21813e198f78SNeilBrown (i == r1_bio->read_disk || 21823e198f78SNeilBrown !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) 21831da177e4SLinus Torvalds continue; 21840c9d5b12SNeilBrown if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) 21850c9d5b12SNeilBrown continue; 21861da177e4SLinus Torvalds 2187796a5cf0SMike Christie bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); 2188212e7eb7SNeilBrown if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) 2189212e7eb7SNeilBrown wbio->bi_opf |= MD_FAILFAST; 2190212e7eb7SNeilBrown 21913e198f78SNeilBrown wbio->bi_end_io = end_sync_write; 21921da177e4SLinus Torvalds atomic_inc(&r1_bio->remaining); 2193aa8b57aaSKent Overstreet md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); 2194191ea9b2SNeilBrown 21951da177e4SLinus Torvalds generic_make_request(wbio); 21961da177e4SLinus Torvalds } 21971da177e4SLinus Torvalds 21981da177e4SLinus Torvalds if (atomic_dec_and_test(&r1_bio->remaining)) { 2199191ea9b2SNeilBrown /* if we're here, all write(s) have completed, so clean up */ 220058e94ae1SNeilBrown int s = r1_bio->sectors; 220158e94ae1SNeilBrown if (test_bit(R1BIO_MadeGood, &r1_bio->state) || 220258e94ae1SNeilBrown test_bit(R1BIO_WriteError, &r1_bio->state)) 220358e94ae1SNeilBrown reschedule_retry(r1_bio); 220458e94ae1SNeilBrown else { 22051da177e4SLinus Torvalds put_buf(r1_bio); 220658e94ae1SNeilBrown md_done_sync(mddev, s, 1); 220758e94ae1SNeilBrown } 22081da177e4SLinus Torvalds } 22091da177e4SLinus Torvalds } 22101da177e4SLinus Torvalds 22111da177e4SLinus Torvalds /* 22121da177e4SLinus Torvalds * This is a kernel thread which: 22131da177e4SLinus Torvalds * 22141da177e4SLinus Torvalds * 1. Retries failed read operations on working mirrors. 22151da177e4SLinus Torvalds * 2. Updates the raid superblock when problems encounter. 2216d2eb35acSNeilBrown * 3. Performs writes following reads for array synchronising. 22171da177e4SLinus Torvalds */ 22181da177e4SLinus Torvalds 2219e8096360SNeilBrown static void fix_read_error(struct r1conf *conf, int read_disk, 2220867868fbSNeilBrown sector_t sect, int sectors) 2221867868fbSNeilBrown { 2222fd01b88cSNeilBrown struct mddev *mddev = conf->mddev; 2223867868fbSNeilBrown while(sectors) { 2224867868fbSNeilBrown int s = sectors; 2225867868fbSNeilBrown int d = read_disk; 2226867868fbSNeilBrown int success = 0; 2227867868fbSNeilBrown int start; 22283cb03002SNeilBrown struct md_rdev *rdev; 2229867868fbSNeilBrown 2230867868fbSNeilBrown if (s > (PAGE_SIZE>>9)) 2231867868fbSNeilBrown s = PAGE_SIZE >> 9; 2232867868fbSNeilBrown 2233867868fbSNeilBrown do { 2234d2eb35acSNeilBrown sector_t first_bad; 2235d2eb35acSNeilBrown int bad_sectors; 2236d2eb35acSNeilBrown 2237707a6a42SNeilBrown rcu_read_lock(); 2238707a6a42SNeilBrown rdev = rcu_dereference(conf->mirrors[d].rdev); 2239867868fbSNeilBrown if (rdev && 2240da8840a7Smajianpeng (test_bit(In_sync, &rdev->flags) || 2241da8840a7Smajianpeng (!test_bit(Faulty, &rdev->flags) && 2242da8840a7Smajianpeng rdev->recovery_offset >= sect + s)) && 2243d2eb35acSNeilBrown is_badblock(rdev, sect, s, 2244707a6a42SNeilBrown &first_bad, &bad_sectors) == 0) { 2245707a6a42SNeilBrown atomic_inc(&rdev->nr_pending); 2246707a6a42SNeilBrown rcu_read_unlock(); 2247707a6a42SNeilBrown if (sync_page_io(rdev, sect, s<<9, 2248796a5cf0SMike Christie conf->tmppage, REQ_OP_READ, 0, false)) 2249867868fbSNeilBrown success = 1; 2250707a6a42SNeilBrown rdev_dec_pending(rdev, mddev); 2251707a6a42SNeilBrown if (success) 2252707a6a42SNeilBrown break; 2253707a6a42SNeilBrown } else 2254707a6a42SNeilBrown rcu_read_unlock(); 2255867868fbSNeilBrown d++; 22568f19ccb2SNeilBrown if (d == conf->raid_disks * 2) 2257867868fbSNeilBrown d = 0; 2258867868fbSNeilBrown } while (!success && d != read_disk); 2259867868fbSNeilBrown 2260867868fbSNeilBrown if (!success) { 2261d8f05d29SNeilBrown /* Cannot read from anywhere - mark it bad */ 22623cb03002SNeilBrown struct md_rdev *rdev = conf->mirrors[read_disk].rdev; 2263d8f05d29SNeilBrown if (!rdev_set_badblocks(rdev, sect, s, 0)) 2264d8f05d29SNeilBrown md_error(mddev, rdev); 2265867868fbSNeilBrown break; 2266867868fbSNeilBrown } 2267867868fbSNeilBrown /* write it back and re-read */ 2268867868fbSNeilBrown start = d; 2269867868fbSNeilBrown while (d != read_disk) { 2270867868fbSNeilBrown if (d==0) 22718f19ccb2SNeilBrown d = conf->raid_disks * 2; 2272867868fbSNeilBrown d--; 2273707a6a42SNeilBrown rcu_read_lock(); 2274707a6a42SNeilBrown rdev = rcu_dereference(conf->mirrors[d].rdev); 2275867868fbSNeilBrown if (rdev && 2276707a6a42SNeilBrown !test_bit(Faulty, &rdev->flags)) { 2277707a6a42SNeilBrown atomic_inc(&rdev->nr_pending); 2278707a6a42SNeilBrown rcu_read_unlock(); 2279d8f05d29SNeilBrown r1_sync_page_io(rdev, sect, s, 2280d8f05d29SNeilBrown conf->tmppage, WRITE); 2281707a6a42SNeilBrown rdev_dec_pending(rdev, mddev); 2282707a6a42SNeilBrown } else 2283707a6a42SNeilBrown rcu_read_unlock(); 2284867868fbSNeilBrown } 2285867868fbSNeilBrown d = start; 2286867868fbSNeilBrown while (d != read_disk) { 2287867868fbSNeilBrown char b[BDEVNAME_SIZE]; 2288867868fbSNeilBrown if (d==0) 22898f19ccb2SNeilBrown d = conf->raid_disks * 2; 2290867868fbSNeilBrown d--; 2291707a6a42SNeilBrown rcu_read_lock(); 2292707a6a42SNeilBrown rdev = rcu_dereference(conf->mirrors[d].rdev); 2293867868fbSNeilBrown if (rdev && 2294b8cb6b4cSNeilBrown !test_bit(Faulty, &rdev->flags)) { 2295707a6a42SNeilBrown atomic_inc(&rdev->nr_pending); 2296707a6a42SNeilBrown rcu_read_unlock(); 2297d8f05d29SNeilBrown if (r1_sync_page_io(rdev, sect, s, 2298d8f05d29SNeilBrown conf->tmppage, READ)) { 2299867868fbSNeilBrown atomic_add(s, &rdev->corrected_errors); 23001d41c216SNeilBrown pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n", 2301867868fbSNeilBrown mdname(mddev), s, 2302969b755aSRandy Dunlap (unsigned long long)(sect + 2303969b755aSRandy Dunlap rdev->data_offset), 2304867868fbSNeilBrown bdevname(rdev->bdev, b)); 2305867868fbSNeilBrown } 2306707a6a42SNeilBrown rdev_dec_pending(rdev, mddev); 2307707a6a42SNeilBrown } else 2308707a6a42SNeilBrown rcu_read_unlock(); 2309867868fbSNeilBrown } 2310867868fbSNeilBrown sectors -= s; 2311867868fbSNeilBrown sect += s; 2312867868fbSNeilBrown } 2313867868fbSNeilBrown } 2314867868fbSNeilBrown 23159f2c9d12SNeilBrown static int narrow_write_error(struct r1bio *r1_bio, int i) 2316cd5ff9a1SNeilBrown { 2317fd01b88cSNeilBrown struct mddev *mddev = r1_bio->mddev; 2318e8096360SNeilBrown struct r1conf *conf = mddev->private; 23193cb03002SNeilBrown struct md_rdev *rdev = conf->mirrors[i].rdev; 2320cd5ff9a1SNeilBrown 2321cd5ff9a1SNeilBrown /* bio has the data to be written to device 'i' where 2322cd5ff9a1SNeilBrown * we just recently had a write error. 2323cd5ff9a1SNeilBrown * We repeatedly clone the bio and trim down to one block, 2324cd5ff9a1SNeilBrown * then try the write. Where the write fails we record 2325cd5ff9a1SNeilBrown * a bad block. 2326cd5ff9a1SNeilBrown * It is conceivable that the bio doesn't exactly align with 2327cd5ff9a1SNeilBrown * blocks. We must handle this somehow. 2328cd5ff9a1SNeilBrown * 2329cd5ff9a1SNeilBrown * We currently own a reference on the rdev. 2330cd5ff9a1SNeilBrown */ 2331cd5ff9a1SNeilBrown 2332cd5ff9a1SNeilBrown int block_sectors; 2333cd5ff9a1SNeilBrown sector_t sector; 2334cd5ff9a1SNeilBrown int sectors; 2335cd5ff9a1SNeilBrown int sect_to_write = r1_bio->sectors; 2336cd5ff9a1SNeilBrown int ok = 1; 2337cd5ff9a1SNeilBrown 2338cd5ff9a1SNeilBrown if (rdev->badblocks.shift < 0) 2339cd5ff9a1SNeilBrown return 0; 2340cd5ff9a1SNeilBrown 2341ab713cdcSNate Dailey block_sectors = roundup(1 << rdev->badblocks.shift, 2342ab713cdcSNate Dailey bdev_logical_block_size(rdev->bdev) >> 9); 2343cd5ff9a1SNeilBrown sector = r1_bio->sector; 2344cd5ff9a1SNeilBrown sectors = ((sector + block_sectors) 2345cd5ff9a1SNeilBrown & ~(sector_t)(block_sectors - 1)) 2346cd5ff9a1SNeilBrown - sector; 2347cd5ff9a1SNeilBrown 2348cd5ff9a1SNeilBrown while (sect_to_write) { 2349cd5ff9a1SNeilBrown struct bio *wbio; 2350cd5ff9a1SNeilBrown if (sectors > sect_to_write) 2351cd5ff9a1SNeilBrown sectors = sect_to_write; 2352cd5ff9a1SNeilBrown /* Write at 'sector' for 'sectors'*/ 2353cd5ff9a1SNeilBrown 2354b783863fSKent Overstreet if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 2355841c1316SMing Lei wbio = bio_clone_fast(r1_bio->behind_master_bio, 2356841c1316SMing Lei GFP_NOIO, 2357841c1316SMing Lei mddev->bio_set); 2358841c1316SMing Lei /* We really need a _all clone */ 2359841c1316SMing Lei wbio->bi_iter = (struct bvec_iter){ 0 }; 2360b783863fSKent Overstreet } else { 2361d7a10308SMing Lei wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, 2362d7a10308SMing Lei mddev->bio_set); 2363b783863fSKent Overstreet } 2364b783863fSKent Overstreet 2365796a5cf0SMike Christie bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); 23664f024f37SKent Overstreet wbio->bi_iter.bi_sector = r1_bio->sector; 23674f024f37SKent Overstreet wbio->bi_iter.bi_size = r1_bio->sectors << 9; 2368cd5ff9a1SNeilBrown 23696678d83fSKent Overstreet bio_trim(wbio, sector - r1_bio->sector, sectors); 23704f024f37SKent Overstreet wbio->bi_iter.bi_sector += rdev->data_offset; 2371cd5ff9a1SNeilBrown wbio->bi_bdev = rdev->bdev; 23724e49ea4aSMike Christie 23734e49ea4aSMike Christie if (submit_bio_wait(wbio) < 0) 2374cd5ff9a1SNeilBrown /* failure! */ 2375cd5ff9a1SNeilBrown ok = rdev_set_badblocks(rdev, sector, 2376cd5ff9a1SNeilBrown sectors, 0) 2377cd5ff9a1SNeilBrown && ok; 2378cd5ff9a1SNeilBrown 2379cd5ff9a1SNeilBrown bio_put(wbio); 2380cd5ff9a1SNeilBrown sect_to_write -= sectors; 2381cd5ff9a1SNeilBrown sector += sectors; 2382cd5ff9a1SNeilBrown sectors = block_sectors; 2383cd5ff9a1SNeilBrown } 2384cd5ff9a1SNeilBrown return ok; 2385cd5ff9a1SNeilBrown } 2386cd5ff9a1SNeilBrown 2387e8096360SNeilBrown static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio) 238862096bceSNeilBrown { 238962096bceSNeilBrown int m; 239062096bceSNeilBrown int s = r1_bio->sectors; 23918f19ccb2SNeilBrown for (m = 0; m < conf->raid_disks * 2 ; m++) { 23923cb03002SNeilBrown struct md_rdev *rdev = conf->mirrors[m].rdev; 239362096bceSNeilBrown struct bio *bio = r1_bio->bios[m]; 239462096bceSNeilBrown if (bio->bi_end_io == NULL) 239562096bceSNeilBrown continue; 23964e4cbee9SChristoph Hellwig if (!bio->bi_status && 239762096bceSNeilBrown test_bit(R1BIO_MadeGood, &r1_bio->state)) { 2398c6563a8cSNeilBrown rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); 239962096bceSNeilBrown } 24004e4cbee9SChristoph Hellwig if (bio->bi_status && 240162096bceSNeilBrown test_bit(R1BIO_WriteError, &r1_bio->state)) { 240262096bceSNeilBrown if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) 240362096bceSNeilBrown md_error(conf->mddev, rdev); 240462096bceSNeilBrown } 240562096bceSNeilBrown } 240662096bceSNeilBrown put_buf(r1_bio); 240762096bceSNeilBrown md_done_sync(conf->mddev, s, 1); 240862096bceSNeilBrown } 240962096bceSNeilBrown 2410e8096360SNeilBrown static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) 241162096bceSNeilBrown { 2412fd76863eScolyli@suse.de int m, idx; 241355ce74d4SNeilBrown bool fail = false; 2414fd76863eScolyli@suse.de 24158f19ccb2SNeilBrown for (m = 0; m < conf->raid_disks * 2 ; m++) 241662096bceSNeilBrown if (r1_bio->bios[m] == IO_MADE_GOOD) { 24173cb03002SNeilBrown struct md_rdev *rdev = conf->mirrors[m].rdev; 241862096bceSNeilBrown rdev_clear_badblocks(rdev, 241962096bceSNeilBrown r1_bio->sector, 2420c6563a8cSNeilBrown r1_bio->sectors, 0); 242162096bceSNeilBrown rdev_dec_pending(rdev, conf->mddev); 242262096bceSNeilBrown } else if (r1_bio->bios[m] != NULL) { 242362096bceSNeilBrown /* This drive got a write error. We need to 242462096bceSNeilBrown * narrow down and record precise write 242562096bceSNeilBrown * errors. 242662096bceSNeilBrown */ 242755ce74d4SNeilBrown fail = true; 242862096bceSNeilBrown if (!narrow_write_error(r1_bio, m)) { 242962096bceSNeilBrown md_error(conf->mddev, 243062096bceSNeilBrown conf->mirrors[m].rdev); 243162096bceSNeilBrown /* an I/O failed, we can't clear the bitmap */ 243262096bceSNeilBrown set_bit(R1BIO_Degraded, &r1_bio->state); 243362096bceSNeilBrown } 243462096bceSNeilBrown rdev_dec_pending(conf->mirrors[m].rdev, 243562096bceSNeilBrown conf->mddev); 243662096bceSNeilBrown } 243755ce74d4SNeilBrown if (fail) { 243855ce74d4SNeilBrown spin_lock_irq(&conf->device_lock); 243955ce74d4SNeilBrown list_add(&r1_bio->retry_list, &conf->bio_end_io_list); 2440fd76863eScolyli@suse.de idx = sector_to_idx(r1_bio->sector); 2441824e47daScolyli@suse.de atomic_inc(&conf->nr_queued[idx]); 244255ce74d4SNeilBrown spin_unlock_irq(&conf->device_lock); 2443824e47daScolyli@suse.de /* 2444824e47daScolyli@suse.de * In case freeze_array() is waiting for condition 2445824e47daScolyli@suse.de * get_unqueued_pending() == extra to be true. 2446824e47daScolyli@suse.de */ 2447824e47daScolyli@suse.de wake_up(&conf->wait_barrier); 244855ce74d4SNeilBrown md_wakeup_thread(conf->mddev->thread); 2449bd8688a1SNeilBrown } else { 2450bd8688a1SNeilBrown if (test_bit(R1BIO_WriteError, &r1_bio->state)) 2451bd8688a1SNeilBrown close_write(r1_bio); 245262096bceSNeilBrown raid_end_bio_io(r1_bio); 245362096bceSNeilBrown } 2454bd8688a1SNeilBrown } 245562096bceSNeilBrown 2456e8096360SNeilBrown static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) 245762096bceSNeilBrown { 2458fd01b88cSNeilBrown struct mddev *mddev = conf->mddev; 245962096bceSNeilBrown struct bio *bio; 24603cb03002SNeilBrown struct md_rdev *rdev; 2461109e3765SNeilBrown dev_t bio_dev; 2462109e3765SNeilBrown sector_t bio_sector; 246362096bceSNeilBrown 246462096bceSNeilBrown clear_bit(R1BIO_ReadError, &r1_bio->state); 246562096bceSNeilBrown /* we got a read error. Maybe the drive is bad. Maybe just 246662096bceSNeilBrown * the block and we can fix it. 246762096bceSNeilBrown * We freeze all other IO, and try reading the block from 246862096bceSNeilBrown * other devices. When we find one, we re-write 246962096bceSNeilBrown * and check it that fixes the read error. 247062096bceSNeilBrown * This is all done synchronously while the array is 247162096bceSNeilBrown * frozen 247262096bceSNeilBrown */ 24737449f699STomasz Majchrzak 24747449f699STomasz Majchrzak bio = r1_bio->bios[r1_bio->read_disk]; 2475109e3765SNeilBrown bio_dev = bio->bi_bdev->bd_dev; 2476109e3765SNeilBrown bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector; 24777449f699STomasz Majchrzak bio_put(bio); 24787449f699STomasz Majchrzak r1_bio->bios[r1_bio->read_disk] = NULL; 24797449f699STomasz Majchrzak 24802e52d449SNeilBrown rdev = conf->mirrors[r1_bio->read_disk].rdev; 24812e52d449SNeilBrown if (mddev->ro == 0 24822e52d449SNeilBrown && !test_bit(FailFast, &rdev->flags)) { 2483e2d59925SNeilBrown freeze_array(conf, 1); 248462096bceSNeilBrown fix_read_error(conf, r1_bio->read_disk, 248562096bceSNeilBrown r1_bio->sector, r1_bio->sectors); 248662096bceSNeilBrown unfreeze_array(conf); 24877449f699STomasz Majchrzak } else { 24887449f699STomasz Majchrzak r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED; 24897449f699STomasz Majchrzak } 24907449f699STomasz Majchrzak 24912e52d449SNeilBrown rdev_dec_pending(rdev, conf->mddev); 2492689389a0SNeilBrown allow_barrier(conf, r1_bio->sector); 2493689389a0SNeilBrown bio = r1_bio->master_bio; 249462096bceSNeilBrown 2495689389a0SNeilBrown /* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */ 2496689389a0SNeilBrown r1_bio->state = 0; 2497689389a0SNeilBrown raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio); 2498109e3765SNeilBrown } 249962096bceSNeilBrown 25004ed8731dSShaohua Li static void raid1d(struct md_thread *thread) 25011da177e4SLinus Torvalds { 25024ed8731dSShaohua Li struct mddev *mddev = thread->mddev; 25039f2c9d12SNeilBrown struct r1bio *r1_bio; 25041da177e4SLinus Torvalds unsigned long flags; 2505e8096360SNeilBrown struct r1conf *conf = mddev->private; 25061da177e4SLinus Torvalds struct list_head *head = &conf->retry_list; 2507e1dfa0a2SNeilBrown struct blk_plug plug; 2508fd76863eScolyli@suse.de int idx; 25091da177e4SLinus Torvalds 25101da177e4SLinus Torvalds md_check_recovery(mddev); 25111da177e4SLinus Torvalds 251255ce74d4SNeilBrown if (!list_empty_careful(&conf->bio_end_io_list) && 25132953079cSShaohua Li !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 251455ce74d4SNeilBrown LIST_HEAD(tmp); 251555ce74d4SNeilBrown spin_lock_irqsave(&conf->device_lock, flags); 2516fd76863eScolyli@suse.de if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 2517fd76863eScolyli@suse.de list_splice_init(&conf->bio_end_io_list, &tmp); 251855ce74d4SNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 251955ce74d4SNeilBrown while (!list_empty(&tmp)) { 2520a452744bSMikulas Patocka r1_bio = list_first_entry(&tmp, struct r1bio, 2521a452744bSMikulas Patocka retry_list); 252255ce74d4SNeilBrown list_del(&r1_bio->retry_list); 2523fd76863eScolyli@suse.de idx = sector_to_idx(r1_bio->sector); 2524824e47daScolyli@suse.de atomic_dec(&conf->nr_queued[idx]); 2525bd8688a1SNeilBrown if (mddev->degraded) 2526bd8688a1SNeilBrown set_bit(R1BIO_Degraded, &r1_bio->state); 2527bd8688a1SNeilBrown if (test_bit(R1BIO_WriteError, &r1_bio->state)) 2528bd8688a1SNeilBrown close_write(r1_bio); 252955ce74d4SNeilBrown raid_end_bio_io(r1_bio); 253055ce74d4SNeilBrown } 253155ce74d4SNeilBrown } 253255ce74d4SNeilBrown 2533e1dfa0a2SNeilBrown blk_start_plug(&plug); 25341da177e4SLinus Torvalds for (;;) { 2535a35e63efSNeilBrown 25367eaceaccSJens Axboe flush_pending_writes(conf); 2537a35e63efSNeilBrown 25381da177e4SLinus Torvalds spin_lock_irqsave(&conf->device_lock, flags); 2539a35e63efSNeilBrown if (list_empty(head)) { 2540191ea9b2SNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 25411da177e4SLinus Torvalds break; 2542a35e63efSNeilBrown } 25439f2c9d12SNeilBrown r1_bio = list_entry(head->prev, struct r1bio, retry_list); 25441da177e4SLinus Torvalds list_del(head->prev); 2545fd76863eScolyli@suse.de idx = sector_to_idx(r1_bio->sector); 2546824e47daScolyli@suse.de atomic_dec(&conf->nr_queued[idx]); 25471da177e4SLinus Torvalds spin_unlock_irqrestore(&conf->device_lock, flags); 25481da177e4SLinus Torvalds 25491da177e4SLinus Torvalds mddev = r1_bio->mddev; 2550070ec55dSNeilBrown conf = mddev->private; 25514367af55SNeilBrown if (test_bit(R1BIO_IsSync, &r1_bio->state)) { 2552d8f05d29SNeilBrown if (test_bit(R1BIO_MadeGood, &r1_bio->state) || 255362096bceSNeilBrown test_bit(R1BIO_WriteError, &r1_bio->state)) 255462096bceSNeilBrown handle_sync_write_finished(conf, r1_bio); 255562096bceSNeilBrown else 25561da177e4SLinus Torvalds sync_request_write(mddev, r1_bio); 2557cd5ff9a1SNeilBrown } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) || 255862096bceSNeilBrown test_bit(R1BIO_WriteError, &r1_bio->state)) 255962096bceSNeilBrown handle_write_finished(conf, r1_bio); 256062096bceSNeilBrown else if (test_bit(R1BIO_ReadError, &r1_bio->state)) 256162096bceSNeilBrown handle_read_error(conf, r1_bio); 2562d2eb35acSNeilBrown else 2563c230e7e5SNeilBrown WARN_ON_ONCE(1); 256462096bceSNeilBrown 25651d9d5241SNeilBrown cond_resched(); 25662953079cSShaohua Li if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING)) 2567de393cdeSNeilBrown md_check_recovery(mddev); 25681da177e4SLinus Torvalds } 2569e1dfa0a2SNeilBrown blk_finish_plug(&plug); 25701da177e4SLinus Torvalds } 25711da177e4SLinus Torvalds 2572e8096360SNeilBrown static int init_resync(struct r1conf *conf) 25731da177e4SLinus Torvalds { 25741da177e4SLinus Torvalds int buffs; 25751da177e4SLinus Torvalds 25761da177e4SLinus Torvalds buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; 25779e77c485SEric Sesterhenn BUG_ON(conf->r1buf_pool); 25781da177e4SLinus Torvalds conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free, 25791da177e4SLinus Torvalds conf->poolinfo); 25801da177e4SLinus Torvalds if (!conf->r1buf_pool) 25811da177e4SLinus Torvalds return -ENOMEM; 25821da177e4SLinus Torvalds return 0; 25831da177e4SLinus Torvalds } 25841da177e4SLinus Torvalds 25851da177e4SLinus Torvalds /* 25861da177e4SLinus Torvalds * perform a "sync" on one "block" 25871da177e4SLinus Torvalds * 25881da177e4SLinus Torvalds * We need to make sure that no normal I/O request - particularly write 25891da177e4SLinus Torvalds * requests - conflict with active sync requests. 25901da177e4SLinus Torvalds * 25911da177e4SLinus Torvalds * This is achieved by tracking pending requests and a 'barrier' concept 25921da177e4SLinus Torvalds * that can be installed to exclude normal IO requests. 25931da177e4SLinus Torvalds */ 25941da177e4SLinus Torvalds 2595849674e4SShaohua Li static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, 2596849674e4SShaohua Li int *skipped) 25971da177e4SLinus Torvalds { 2598e8096360SNeilBrown struct r1conf *conf = mddev->private; 25999f2c9d12SNeilBrown struct r1bio *r1_bio; 26001da177e4SLinus Torvalds struct bio *bio; 26011da177e4SLinus Torvalds sector_t max_sector, nr_sectors; 26023e198f78SNeilBrown int disk = -1; 26031da177e4SLinus Torvalds int i; 26043e198f78SNeilBrown int wonly = -1; 26053e198f78SNeilBrown int write_targets = 0, read_targets = 0; 260657dab0bdSNeilBrown sector_t sync_blocks; 2607e3b9703eSNeilBrown int still_degraded = 0; 260806f60385SNeilBrown int good_sectors = RESYNC_SECTORS; 260906f60385SNeilBrown int min_bad = 0; /* number of sectors that are bad in all devices */ 2610fd76863eScolyli@suse.de int idx = sector_to_idx(sector_nr); 2611022e510fSMing Lei int page_idx = 0; 26121da177e4SLinus Torvalds 26131da177e4SLinus Torvalds if (!conf->r1buf_pool) 26141da177e4SLinus Torvalds if (init_resync(conf)) 261557afd89fSNeilBrown return 0; 26161da177e4SLinus Torvalds 261758c0fed4SAndre Noll max_sector = mddev->dev_sectors; 26181da177e4SLinus Torvalds if (sector_nr >= max_sector) { 2619191ea9b2SNeilBrown /* If we aborted, we need to abort the 2620191ea9b2SNeilBrown * sync on the 'current' bitmap chunk (there will 2621191ea9b2SNeilBrown * only be one in raid1 resync. 2622191ea9b2SNeilBrown * We can find the current addess in mddev->curr_resync 2623191ea9b2SNeilBrown */ 26246a806c51SNeilBrown if (mddev->curr_resync < max_sector) /* aborted */ 26256a806c51SNeilBrown bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 2626191ea9b2SNeilBrown &sync_blocks, 1); 26276a806c51SNeilBrown else /* completed sync */ 2628191ea9b2SNeilBrown conf->fullsync = 0; 26296a806c51SNeilBrown 26306a806c51SNeilBrown bitmap_close_sync(mddev->bitmap); 26311da177e4SLinus Torvalds close_sync(conf); 2632c40f341fSGoldwyn Rodrigues 2633c40f341fSGoldwyn Rodrigues if (mddev_is_clustered(mddev)) { 2634c40f341fSGoldwyn Rodrigues conf->cluster_sync_low = 0; 2635c40f341fSGoldwyn Rodrigues conf->cluster_sync_high = 0; 2636c40f341fSGoldwyn Rodrigues } 26371da177e4SLinus Torvalds return 0; 26381da177e4SLinus Torvalds } 26391da177e4SLinus Torvalds 264007d84d10SNeilBrown if (mddev->bitmap == NULL && 264107d84d10SNeilBrown mddev->recovery_cp == MaxSector && 26426394cca5SNeilBrown !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 264307d84d10SNeilBrown conf->fullsync == 0) { 264407d84d10SNeilBrown *skipped = 1; 264507d84d10SNeilBrown return max_sector - sector_nr; 264607d84d10SNeilBrown } 26476394cca5SNeilBrown /* before building a request, check if we can skip these blocks.. 26486394cca5SNeilBrown * This call the bitmap_start_sync doesn't actually record anything 26496394cca5SNeilBrown */ 2650e3b9703eSNeilBrown if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 2651e5de485fSNeilBrown !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 2652191ea9b2SNeilBrown /* We can skip this block, and probably several more */ 2653191ea9b2SNeilBrown *skipped = 1; 2654191ea9b2SNeilBrown return sync_blocks; 2655191ea9b2SNeilBrown } 265617999be4SNeilBrown 26577ac50447STomasz Majchrzak /* 26587ac50447STomasz Majchrzak * If there is non-resync activity waiting for a turn, then let it 26597ac50447STomasz Majchrzak * though before starting on this new sync request. 26607ac50447STomasz Majchrzak */ 2661824e47daScolyli@suse.de if (atomic_read(&conf->nr_waiting[idx])) 26627ac50447STomasz Majchrzak schedule_timeout_uninterruptible(1); 26637ac50447STomasz Majchrzak 2664c40f341fSGoldwyn Rodrigues /* we are incrementing sector_nr below. To be safe, we check against 2665c40f341fSGoldwyn Rodrigues * sector_nr + two times RESYNC_SECTORS 2666c40f341fSGoldwyn Rodrigues */ 2667c40f341fSGoldwyn Rodrigues 2668c40f341fSGoldwyn Rodrigues bitmap_cond_end_sync(mddev->bitmap, sector_nr, 2669c40f341fSGoldwyn Rodrigues mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); 26701c4588e9SNeilBrown r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); 267117999be4SNeilBrown 2672c2fd4c94SNeilBrown raise_barrier(conf, sector_nr); 26731da177e4SLinus Torvalds 26743e198f78SNeilBrown rcu_read_lock(); 26753e198f78SNeilBrown /* 26763e198f78SNeilBrown * If we get a correctably read error during resync or recovery, 26773e198f78SNeilBrown * we might want to read from a different device. So we 26783e198f78SNeilBrown * flag all drives that could conceivably be read from for READ, 26793e198f78SNeilBrown * and any others (which will be non-In_sync devices) for WRITE. 26803e198f78SNeilBrown * If a read fails, we try reading from something else for which READ 26813e198f78SNeilBrown * is OK. 26823e198f78SNeilBrown */ 26831da177e4SLinus Torvalds 26841da177e4SLinus Torvalds r1_bio->mddev = mddev; 26851da177e4SLinus Torvalds r1_bio->sector = sector_nr; 2686191ea9b2SNeilBrown r1_bio->state = 0; 26871da177e4SLinus Torvalds set_bit(R1BIO_IsSync, &r1_bio->state); 2688fd76863eScolyli@suse.de /* make sure good_sectors won't go across barrier unit boundary */ 2689fd76863eScolyli@suse.de good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors); 26901da177e4SLinus Torvalds 26918f19ccb2SNeilBrown for (i = 0; i < conf->raid_disks * 2; i++) { 26923cb03002SNeilBrown struct md_rdev *rdev; 26931da177e4SLinus Torvalds bio = r1_bio->bios[i]; 26941da177e4SLinus Torvalds 26953e198f78SNeilBrown rdev = rcu_dereference(conf->mirrors[i].rdev); 26963e198f78SNeilBrown if (rdev == NULL || 26973e198f78SNeilBrown test_bit(Faulty, &rdev->flags)) { 26988f19ccb2SNeilBrown if (i < conf->raid_disks) 2699e3b9703eSNeilBrown still_degraded = 1; 27003e198f78SNeilBrown } else if (!test_bit(In_sync, &rdev->flags)) { 2701796a5cf0SMike Christie bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 27021da177e4SLinus Torvalds bio->bi_end_io = end_sync_write; 27031da177e4SLinus Torvalds write_targets ++; 27043e198f78SNeilBrown } else { 27053e198f78SNeilBrown /* may need to read from here */ 270606f60385SNeilBrown sector_t first_bad = MaxSector; 270706f60385SNeilBrown int bad_sectors; 270806f60385SNeilBrown 270906f60385SNeilBrown if (is_badblock(rdev, sector_nr, good_sectors, 271006f60385SNeilBrown &first_bad, &bad_sectors)) { 271106f60385SNeilBrown if (first_bad > sector_nr) 271206f60385SNeilBrown good_sectors = first_bad - sector_nr; 271306f60385SNeilBrown else { 271406f60385SNeilBrown bad_sectors -= (sector_nr - first_bad); 271506f60385SNeilBrown if (min_bad == 0 || 271606f60385SNeilBrown min_bad > bad_sectors) 271706f60385SNeilBrown min_bad = bad_sectors; 271806f60385SNeilBrown } 271906f60385SNeilBrown } 272006f60385SNeilBrown if (sector_nr < first_bad) { 27213e198f78SNeilBrown if (test_bit(WriteMostly, &rdev->flags)) { 27223e198f78SNeilBrown if (wonly < 0) 27233e198f78SNeilBrown wonly = i; 27243e198f78SNeilBrown } else { 27253e198f78SNeilBrown if (disk < 0) 27263e198f78SNeilBrown disk = i; 27273e198f78SNeilBrown } 2728796a5cf0SMike Christie bio_set_op_attrs(bio, REQ_OP_READ, 0); 272906f60385SNeilBrown bio->bi_end_io = end_sync_read; 27303e198f78SNeilBrown read_targets++; 2731d57368afSAlexander Lyakas } else if (!test_bit(WriteErrorSeen, &rdev->flags) && 2732d57368afSAlexander Lyakas test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 2733d57368afSAlexander Lyakas !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 2734d57368afSAlexander Lyakas /* 2735d57368afSAlexander Lyakas * The device is suitable for reading (InSync), 2736d57368afSAlexander Lyakas * but has bad block(s) here. Let's try to correct them, 2737d57368afSAlexander Lyakas * if we are doing resync or repair. Otherwise, leave 2738d57368afSAlexander Lyakas * this device alone for this sync request. 2739d57368afSAlexander Lyakas */ 2740796a5cf0SMike Christie bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 2741d57368afSAlexander Lyakas bio->bi_end_io = end_sync_write; 2742d57368afSAlexander Lyakas write_targets++; 27433e198f78SNeilBrown } 274406f60385SNeilBrown } 274506f60385SNeilBrown if (bio->bi_end_io) { 27463e198f78SNeilBrown atomic_inc(&rdev->nr_pending); 27474f024f37SKent Overstreet bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; 27483e198f78SNeilBrown bio->bi_bdev = rdev->bdev; 27492e52d449SNeilBrown if (test_bit(FailFast, &rdev->flags)) 27502e52d449SNeilBrown bio->bi_opf |= MD_FAILFAST; 27511da177e4SLinus Torvalds } 275206f60385SNeilBrown } 27533e198f78SNeilBrown rcu_read_unlock(); 27543e198f78SNeilBrown if (disk < 0) 27553e198f78SNeilBrown disk = wonly; 27563e198f78SNeilBrown r1_bio->read_disk = disk; 2757191ea9b2SNeilBrown 275806f60385SNeilBrown if (read_targets == 0 && min_bad > 0) { 275906f60385SNeilBrown /* These sectors are bad on all InSync devices, so we 276006f60385SNeilBrown * need to mark them bad on all write targets 276106f60385SNeilBrown */ 276206f60385SNeilBrown int ok = 1; 27638f19ccb2SNeilBrown for (i = 0 ; i < conf->raid_disks * 2 ; i++) 276406f60385SNeilBrown if (r1_bio->bios[i]->bi_end_io == end_sync_write) { 2765a42f9d83Smajianpeng struct md_rdev *rdev = conf->mirrors[i].rdev; 276606f60385SNeilBrown ok = rdev_set_badblocks(rdev, sector_nr, 276706f60385SNeilBrown min_bad, 0 276806f60385SNeilBrown ) && ok; 276906f60385SNeilBrown } 27702953079cSShaohua Li set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 277106f60385SNeilBrown *skipped = 1; 277206f60385SNeilBrown put_buf(r1_bio); 277306f60385SNeilBrown 277406f60385SNeilBrown if (!ok) { 277506f60385SNeilBrown /* Cannot record the badblocks, so need to 277606f60385SNeilBrown * abort the resync. 277706f60385SNeilBrown * If there are multiple read targets, could just 277806f60385SNeilBrown * fail the really bad ones ??? 277906f60385SNeilBrown */ 278006f60385SNeilBrown conf->recovery_disabled = mddev->recovery_disabled; 278106f60385SNeilBrown set_bit(MD_RECOVERY_INTR, &mddev->recovery); 278206f60385SNeilBrown return 0; 278306f60385SNeilBrown } else 278406f60385SNeilBrown return min_bad; 278506f60385SNeilBrown 278606f60385SNeilBrown } 278706f60385SNeilBrown if (min_bad > 0 && min_bad < good_sectors) { 278806f60385SNeilBrown /* only resync enough to reach the next bad->good 278906f60385SNeilBrown * transition */ 279006f60385SNeilBrown good_sectors = min_bad; 279106f60385SNeilBrown } 279206f60385SNeilBrown 27933e198f78SNeilBrown if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0) 27943e198f78SNeilBrown /* extra read targets are also write targets */ 27953e198f78SNeilBrown write_targets += read_targets-1; 27963e198f78SNeilBrown 27973e198f78SNeilBrown if (write_targets == 0 || read_targets == 0) { 27981da177e4SLinus Torvalds /* There is nowhere to write, so all non-sync 27991da177e4SLinus Torvalds * drives must be failed - so we are finished 28001da177e4SLinus Torvalds */ 2801b7219ccbSNeilBrown sector_t rv; 2802b7219ccbSNeilBrown if (min_bad > 0) 2803b7219ccbSNeilBrown max_sector = sector_nr + min_bad; 2804b7219ccbSNeilBrown rv = max_sector - sector_nr; 280557afd89fSNeilBrown *skipped = 1; 28061da177e4SLinus Torvalds put_buf(r1_bio); 28071da177e4SLinus Torvalds return rv; 28081da177e4SLinus Torvalds } 28091da177e4SLinus Torvalds 2810c6207277SNeilBrown if (max_sector > mddev->resync_max) 2811c6207277SNeilBrown max_sector = mddev->resync_max; /* Don't do IO beyond here */ 281206f60385SNeilBrown if (max_sector > sector_nr + good_sectors) 281306f60385SNeilBrown max_sector = sector_nr + good_sectors; 28141da177e4SLinus Torvalds nr_sectors = 0; 2815289e99e8SNeilBrown sync_blocks = 0; 28161da177e4SLinus Torvalds do { 28171da177e4SLinus Torvalds struct page *page; 28181da177e4SLinus Torvalds int len = PAGE_SIZE; 28191da177e4SLinus Torvalds if (sector_nr + (len>>9) > max_sector) 28201da177e4SLinus Torvalds len = (max_sector - sector_nr) << 9; 28211da177e4SLinus Torvalds if (len == 0) 28221da177e4SLinus Torvalds break; 2823ab7a30c7SNeilBrown if (sync_blocks == 0) { 28246a806c51SNeilBrown if (!bitmap_start_sync(mddev->bitmap, sector_nr, 2825e3b9703eSNeilBrown &sync_blocks, still_degraded) && 2826e5de485fSNeilBrown !conf->fullsync && 2827e5de485fSNeilBrown !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 2828191ea9b2SNeilBrown break; 28297571ae88SNeilBrown if ((len >> 9) > sync_blocks) 28306a806c51SNeilBrown len = sync_blocks<<9; 2831ab7a30c7SNeilBrown } 2832191ea9b2SNeilBrown 28338f19ccb2SNeilBrown for (i = 0 ; i < conf->raid_disks * 2; i++) { 283498d30c58SMing Lei struct resync_pages *rp; 283598d30c58SMing Lei 28361da177e4SLinus Torvalds bio = r1_bio->bios[i]; 283798d30c58SMing Lei rp = get_resync_pages(bio); 28381da177e4SLinus Torvalds if (bio->bi_end_io) { 2839022e510fSMing Lei page = resync_fetch_page(rp, page_idx); 2840c85ba149SMing Lei 2841c85ba149SMing Lei /* 2842c85ba149SMing Lei * won't fail because the vec table is big 2843c85ba149SMing Lei * enough to hold all these pages 2844c85ba149SMing Lei */ 2845c85ba149SMing Lei bio_add_page(bio, page, len, 0); 28461da177e4SLinus Torvalds } 28471da177e4SLinus Torvalds } 28481da177e4SLinus Torvalds nr_sectors += len>>9; 28491da177e4SLinus Torvalds sector_nr += len>>9; 2850191ea9b2SNeilBrown sync_blocks -= (len>>9); 2851022e510fSMing Lei } while (++page_idx < RESYNC_PAGES); 285298d30c58SMing Lei 28531da177e4SLinus Torvalds r1_bio->sectors = nr_sectors; 28541da177e4SLinus Torvalds 2855c40f341fSGoldwyn Rodrigues if (mddev_is_clustered(mddev) && 2856c40f341fSGoldwyn Rodrigues conf->cluster_sync_high < sector_nr + nr_sectors) { 2857c40f341fSGoldwyn Rodrigues conf->cluster_sync_low = mddev->curr_resync_completed; 2858c40f341fSGoldwyn Rodrigues conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS; 2859c40f341fSGoldwyn Rodrigues /* Send resync message */ 2860c40f341fSGoldwyn Rodrigues md_cluster_ops->resync_info_update(mddev, 2861c40f341fSGoldwyn Rodrigues conf->cluster_sync_low, 2862c40f341fSGoldwyn Rodrigues conf->cluster_sync_high); 2863c40f341fSGoldwyn Rodrigues } 2864c40f341fSGoldwyn Rodrigues 2865d11c171eSNeilBrown /* For a user-requested sync, we read all readable devices and do a 2866d11c171eSNeilBrown * compare 2867d11c171eSNeilBrown */ 2868d11c171eSNeilBrown if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 2869d11c171eSNeilBrown atomic_set(&r1_bio->remaining, read_targets); 28702d4f4f33SNeilBrown for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) { 2871d11c171eSNeilBrown bio = r1_bio->bios[i]; 2872d11c171eSNeilBrown if (bio->bi_end_io == end_sync_read) { 28732d4f4f33SNeilBrown read_targets--; 2874ddac7c7eSNeilBrown md_sync_acct(bio->bi_bdev, nr_sectors); 28752e52d449SNeilBrown if (read_targets == 1) 28762e52d449SNeilBrown bio->bi_opf &= ~MD_FAILFAST; 28771da177e4SLinus Torvalds generic_make_request(bio); 2878d11c171eSNeilBrown } 2879d11c171eSNeilBrown } 2880d11c171eSNeilBrown } else { 2881d11c171eSNeilBrown atomic_set(&r1_bio->remaining, 1); 2882d11c171eSNeilBrown bio = r1_bio->bios[r1_bio->read_disk]; 2883ddac7c7eSNeilBrown md_sync_acct(bio->bi_bdev, nr_sectors); 28842e52d449SNeilBrown if (read_targets == 1) 28852e52d449SNeilBrown bio->bi_opf &= ~MD_FAILFAST; 2886d11c171eSNeilBrown generic_make_request(bio); 2887d11c171eSNeilBrown 2888d11c171eSNeilBrown } 28891da177e4SLinus Torvalds return nr_sectors; 28901da177e4SLinus Torvalds } 28911da177e4SLinus Torvalds 2892fd01b88cSNeilBrown static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks) 289380c3a6ceSDan Williams { 289480c3a6ceSDan Williams if (sectors) 289580c3a6ceSDan Williams return sectors; 289680c3a6ceSDan Williams 289780c3a6ceSDan Williams return mddev->dev_sectors; 289880c3a6ceSDan Williams } 289980c3a6ceSDan Williams 2900e8096360SNeilBrown static struct r1conf *setup_conf(struct mddev *mddev) 29011da177e4SLinus Torvalds { 2902e8096360SNeilBrown struct r1conf *conf; 2903709ae487SNeilBrown int i; 29040eaf822cSJonathan Brassow struct raid1_info *disk; 29053cb03002SNeilBrown struct md_rdev *rdev; 2906709ae487SNeilBrown int err = -ENOMEM; 29071da177e4SLinus Torvalds 2908e8096360SNeilBrown conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL); 29091da177e4SLinus Torvalds if (!conf) 2910709ae487SNeilBrown goto abort; 29111da177e4SLinus Torvalds 2912fd76863eScolyli@suse.de conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR, 2913824e47daScolyli@suse.de sizeof(atomic_t), GFP_KERNEL); 2914fd76863eScolyli@suse.de if (!conf->nr_pending) 2915fd76863eScolyli@suse.de goto abort; 2916fd76863eScolyli@suse.de 2917fd76863eScolyli@suse.de conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR, 2918824e47daScolyli@suse.de sizeof(atomic_t), GFP_KERNEL); 2919fd76863eScolyli@suse.de if (!conf->nr_waiting) 2920fd76863eScolyli@suse.de goto abort; 2921fd76863eScolyli@suse.de 2922fd76863eScolyli@suse.de conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR, 2923824e47daScolyli@suse.de sizeof(atomic_t), GFP_KERNEL); 2924fd76863eScolyli@suse.de if (!conf->nr_queued) 2925fd76863eScolyli@suse.de goto abort; 2926fd76863eScolyli@suse.de 2927fd76863eScolyli@suse.de conf->barrier = kcalloc(BARRIER_BUCKETS_NR, 2928824e47daScolyli@suse.de sizeof(atomic_t), GFP_KERNEL); 2929fd76863eScolyli@suse.de if (!conf->barrier) 2930fd76863eScolyli@suse.de goto abort; 2931fd76863eScolyli@suse.de 29320eaf822cSJonathan Brassow conf->mirrors = kzalloc(sizeof(struct raid1_info) 29338f19ccb2SNeilBrown * mddev->raid_disks * 2, 29341da177e4SLinus Torvalds GFP_KERNEL); 29351da177e4SLinus Torvalds if (!conf->mirrors) 2936709ae487SNeilBrown goto abort; 29371da177e4SLinus Torvalds 2938ddaf22abSNeilBrown conf->tmppage = alloc_page(GFP_KERNEL); 2939ddaf22abSNeilBrown if (!conf->tmppage) 2940709ae487SNeilBrown goto abort; 2941ddaf22abSNeilBrown 2942709ae487SNeilBrown conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL); 29431da177e4SLinus Torvalds if (!conf->poolinfo) 2944709ae487SNeilBrown goto abort; 29458f19ccb2SNeilBrown conf->poolinfo->raid_disks = mddev->raid_disks * 2; 29461da177e4SLinus Torvalds conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, 29471da177e4SLinus Torvalds r1bio_pool_free, 29481da177e4SLinus Torvalds conf->poolinfo); 29491da177e4SLinus Torvalds if (!conf->r1bio_pool) 2950709ae487SNeilBrown goto abort; 2951709ae487SNeilBrown 2952011067b0SNeilBrown conf->bio_split = bioset_create(BIO_POOL_SIZE, 0, 0); 2953c230e7e5SNeilBrown if (!conf->bio_split) 2954c230e7e5SNeilBrown goto abort; 2955c230e7e5SNeilBrown 2956ed9bfdf1SNeilBrown conf->poolinfo->mddev = mddev; 29571da177e4SLinus Torvalds 2958c19d5798SNeilBrown err = -EINVAL; 2959e7e72bf6SNeil Brown spin_lock_init(&conf->device_lock); 2960dafb20faSNeilBrown rdev_for_each(rdev, mddev) { 2961709ae487SNeilBrown int disk_idx = rdev->raid_disk; 29621da177e4SLinus Torvalds if (disk_idx >= mddev->raid_disks 29631da177e4SLinus Torvalds || disk_idx < 0) 29641da177e4SLinus Torvalds continue; 2965c19d5798SNeilBrown if (test_bit(Replacement, &rdev->flags)) 296602b898f2SNeilBrown disk = conf->mirrors + mddev->raid_disks + disk_idx; 2967c19d5798SNeilBrown else 29681da177e4SLinus Torvalds disk = conf->mirrors + disk_idx; 29691da177e4SLinus Torvalds 2970c19d5798SNeilBrown if (disk->rdev) 2971c19d5798SNeilBrown goto abort; 29721da177e4SLinus Torvalds disk->rdev = rdev; 29731da177e4SLinus Torvalds disk->head_position = 0; 297412cee5a8SShaohua Li disk->seq_start = MaxSector; 29751da177e4SLinus Torvalds } 29761da177e4SLinus Torvalds conf->raid_disks = mddev->raid_disks; 29771da177e4SLinus Torvalds conf->mddev = mddev; 29781da177e4SLinus Torvalds INIT_LIST_HEAD(&conf->retry_list); 297955ce74d4SNeilBrown INIT_LIST_HEAD(&conf->bio_end_io_list); 29801da177e4SLinus Torvalds 29811da177e4SLinus Torvalds spin_lock_init(&conf->resync_lock); 298217999be4SNeilBrown init_waitqueue_head(&conf->wait_barrier); 29831da177e4SLinus Torvalds 2984191ea9b2SNeilBrown bio_list_init(&conf->pending_bio_list); 298534db0cd6SNeilBrown conf->pending_count = 0; 2986d890fa2bSNeilBrown conf->recovery_disabled = mddev->recovery_disabled - 1; 2987191ea9b2SNeilBrown 2988c19d5798SNeilBrown err = -EIO; 29898f19ccb2SNeilBrown for (i = 0; i < conf->raid_disks * 2; i++) { 29901da177e4SLinus Torvalds 29911da177e4SLinus Torvalds disk = conf->mirrors + i; 29921da177e4SLinus Torvalds 2993c19d5798SNeilBrown if (i < conf->raid_disks && 2994c19d5798SNeilBrown disk[conf->raid_disks].rdev) { 2995c19d5798SNeilBrown /* This slot has a replacement. */ 2996c19d5798SNeilBrown if (!disk->rdev) { 2997c19d5798SNeilBrown /* No original, just make the replacement 2998c19d5798SNeilBrown * a recovering spare 2999c19d5798SNeilBrown */ 3000c19d5798SNeilBrown disk->rdev = 3001c19d5798SNeilBrown disk[conf->raid_disks].rdev; 3002c19d5798SNeilBrown disk[conf->raid_disks].rdev = NULL; 3003c19d5798SNeilBrown } else if (!test_bit(In_sync, &disk->rdev->flags)) 3004c19d5798SNeilBrown /* Original is not in_sync - bad */ 3005c19d5798SNeilBrown goto abort; 3006c19d5798SNeilBrown } 3007c19d5798SNeilBrown 30085fd6c1dcSNeilBrown if (!disk->rdev || 30095fd6c1dcSNeilBrown !test_bit(In_sync, &disk->rdev->flags)) { 30101da177e4SLinus Torvalds disk->head_position = 0; 30114f0a5e01SJonathan Brassow if (disk->rdev && 30124f0a5e01SJonathan Brassow (disk->rdev->saved_raid_disk < 0)) 301317571284SNeilBrown conf->fullsync = 1; 3014be4d3280SShaohua Li } 30151da177e4SLinus Torvalds } 3016709ae487SNeilBrown 3017709ae487SNeilBrown err = -ENOMEM; 30180232605dSNeilBrown conf->thread = md_register_thread(raid1d, mddev, "raid1"); 30191d41c216SNeilBrown if (!conf->thread) 3020709ae487SNeilBrown goto abort; 3021191ea9b2SNeilBrown 3022709ae487SNeilBrown return conf; 3023709ae487SNeilBrown 3024709ae487SNeilBrown abort: 3025709ae487SNeilBrown if (conf) { 3026709ae487SNeilBrown mempool_destroy(conf->r1bio_pool); 3027709ae487SNeilBrown kfree(conf->mirrors); 3028709ae487SNeilBrown safe_put_page(conf->tmppage); 3029709ae487SNeilBrown kfree(conf->poolinfo); 3030fd76863eScolyli@suse.de kfree(conf->nr_pending); 3031fd76863eScolyli@suse.de kfree(conf->nr_waiting); 3032fd76863eScolyli@suse.de kfree(conf->nr_queued); 3033fd76863eScolyli@suse.de kfree(conf->barrier); 3034c230e7e5SNeilBrown if (conf->bio_split) 3035c230e7e5SNeilBrown bioset_free(conf->bio_split); 3036709ae487SNeilBrown kfree(conf); 3037709ae487SNeilBrown } 3038709ae487SNeilBrown return ERR_PTR(err); 3039709ae487SNeilBrown } 3040709ae487SNeilBrown 3041afa0f557SNeilBrown static void raid1_free(struct mddev *mddev, void *priv); 3042849674e4SShaohua Li static int raid1_run(struct mddev *mddev) 3043709ae487SNeilBrown { 3044e8096360SNeilBrown struct r1conf *conf; 3045709ae487SNeilBrown int i; 30463cb03002SNeilBrown struct md_rdev *rdev; 30475220ea1eSmajianpeng int ret; 30482ff8cc2cSShaohua Li bool discard_supported = false; 3049709ae487SNeilBrown 3050709ae487SNeilBrown if (mddev->level != 1) { 30511d41c216SNeilBrown pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n", 3052709ae487SNeilBrown mdname(mddev), mddev->level); 3053709ae487SNeilBrown return -EIO; 3054709ae487SNeilBrown } 3055709ae487SNeilBrown if (mddev->reshape_position != MaxSector) { 30561d41c216SNeilBrown pr_warn("md/raid1:%s: reshape_position set but not supported\n", 3057709ae487SNeilBrown mdname(mddev)); 3058709ae487SNeilBrown return -EIO; 3059709ae487SNeilBrown } 3060a415c0f1SNeilBrown if (mddev_init_writes_pending(mddev) < 0) 3061a415c0f1SNeilBrown return -ENOMEM; 3062709ae487SNeilBrown /* 3063709ae487SNeilBrown * copy the already verified devices into our private RAID1 3064709ae487SNeilBrown * bookkeeping area. [whatever we allocate in run(), 3065afa0f557SNeilBrown * should be freed in raid1_free()] 3066709ae487SNeilBrown */ 3067709ae487SNeilBrown if (mddev->private == NULL) 3068709ae487SNeilBrown conf = setup_conf(mddev); 3069709ae487SNeilBrown else 3070709ae487SNeilBrown conf = mddev->private; 3071709ae487SNeilBrown 3072709ae487SNeilBrown if (IS_ERR(conf)) 3073709ae487SNeilBrown return PTR_ERR(conf); 3074709ae487SNeilBrown 30753deff1a7SChristoph Hellwig if (mddev->queue) { 30765026d7a9SH. Peter Anvin blk_queue_max_write_same_sectors(mddev->queue, 0); 30773deff1a7SChristoph Hellwig blk_queue_max_write_zeroes_sectors(mddev->queue, 0); 30783deff1a7SChristoph Hellwig } 30795026d7a9SH. Peter Anvin 3080dafb20faSNeilBrown rdev_for_each(rdev, mddev) { 30811ed7242eSJonathan Brassow if (!mddev->gendisk) 30821ed7242eSJonathan Brassow continue; 3083709ae487SNeilBrown disk_stack_limits(mddev->gendisk, rdev->bdev, 3084709ae487SNeilBrown rdev->data_offset << 9); 30852ff8cc2cSShaohua Li if (blk_queue_discard(bdev_get_queue(rdev->bdev))) 30862ff8cc2cSShaohua Li discard_supported = true; 3087709ae487SNeilBrown } 3088709ae487SNeilBrown 3089709ae487SNeilBrown mddev->degraded = 0; 3090709ae487SNeilBrown for (i=0; i < conf->raid_disks; i++) 3091709ae487SNeilBrown if (conf->mirrors[i].rdev == NULL || 3092709ae487SNeilBrown !test_bit(In_sync, &conf->mirrors[i].rdev->flags) || 3093709ae487SNeilBrown test_bit(Faulty, &conf->mirrors[i].rdev->flags)) 3094709ae487SNeilBrown mddev->degraded++; 3095709ae487SNeilBrown 3096709ae487SNeilBrown if (conf->raid_disks - mddev->degraded == 1) 3097709ae487SNeilBrown mddev->recovery_cp = MaxSector; 3098709ae487SNeilBrown 30998c6ac868SAndre Noll if (mddev->recovery_cp != MaxSector) 31001d41c216SNeilBrown pr_info("md/raid1:%s: not clean -- starting background reconstruction\n", 31018c6ac868SAndre Noll mdname(mddev)); 31021d41c216SNeilBrown pr_info("md/raid1:%s: active with %d out of %d mirrors\n", 31031da177e4SLinus Torvalds mdname(mddev), mddev->raid_disks - mddev->degraded, 31041da177e4SLinus Torvalds mddev->raid_disks); 3105709ae487SNeilBrown 31061da177e4SLinus Torvalds /* 31071da177e4SLinus Torvalds * Ok, everything is just fine now 31081da177e4SLinus Torvalds */ 3109709ae487SNeilBrown mddev->thread = conf->thread; 3110709ae487SNeilBrown conf->thread = NULL; 3111709ae487SNeilBrown mddev->private = conf; 311246533ff7SNeilBrown set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); 3113709ae487SNeilBrown 31141f403624SDan Williams md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); 31151da177e4SLinus Torvalds 31161ed7242eSJonathan Brassow if (mddev->queue) { 31172ff8cc2cSShaohua Li if (discard_supported) 31182ff8cc2cSShaohua Li queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 31192ff8cc2cSShaohua Li mddev->queue); 31202ff8cc2cSShaohua Li else 31212ff8cc2cSShaohua Li queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, 31222ff8cc2cSShaohua Li mddev->queue); 31231ed7242eSJonathan Brassow } 31245220ea1eSmajianpeng 31255220ea1eSmajianpeng ret = md_integrity_register(mddev); 31265aa61f42SNeilBrown if (ret) { 31275aa61f42SNeilBrown md_unregister_thread(&mddev->thread); 3128afa0f557SNeilBrown raid1_free(mddev, conf); 31295aa61f42SNeilBrown } 31305220ea1eSmajianpeng return ret; 31311da177e4SLinus Torvalds } 31321da177e4SLinus Torvalds 3133afa0f557SNeilBrown static void raid1_free(struct mddev *mddev, void *priv) 31341da177e4SLinus Torvalds { 3135afa0f557SNeilBrown struct r1conf *conf = priv; 31364b6d287fSNeilBrown 31371da177e4SLinus Torvalds mempool_destroy(conf->r1bio_pool); 31381da177e4SLinus Torvalds kfree(conf->mirrors); 31390fea7ed8SHirokazu Takahashi safe_put_page(conf->tmppage); 31401da177e4SLinus Torvalds kfree(conf->poolinfo); 3141fd76863eScolyli@suse.de kfree(conf->nr_pending); 3142fd76863eScolyli@suse.de kfree(conf->nr_waiting); 3143fd76863eScolyli@suse.de kfree(conf->nr_queued); 3144fd76863eScolyli@suse.de kfree(conf->barrier); 3145c230e7e5SNeilBrown if (conf->bio_split) 3146c230e7e5SNeilBrown bioset_free(conf->bio_split); 31471da177e4SLinus Torvalds kfree(conf); 31481da177e4SLinus Torvalds } 31491da177e4SLinus Torvalds 3150fd01b88cSNeilBrown static int raid1_resize(struct mddev *mddev, sector_t sectors) 31511da177e4SLinus Torvalds { 31521da177e4SLinus Torvalds /* no resync is happening, and there is enough space 31531da177e4SLinus Torvalds * on all devices, so we can resize. 31541da177e4SLinus Torvalds * We need to make sure resync covers any new space. 31551da177e4SLinus Torvalds * If the array is shrinking we should possibly wait until 31561da177e4SLinus Torvalds * any io in the removed space completes, but it hardly seems 31571da177e4SLinus Torvalds * worth it. 31581da177e4SLinus Torvalds */ 3159a4a6125aSNeilBrown sector_t newsize = raid1_size(mddev, sectors, 0); 3160a4a6125aSNeilBrown if (mddev->external_size && 3161a4a6125aSNeilBrown mddev->array_sectors > newsize) 3162b522adcdSDan Williams return -EINVAL; 3163a4a6125aSNeilBrown if (mddev->bitmap) { 3164a4a6125aSNeilBrown int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0); 3165a4a6125aSNeilBrown if (ret) 3166a4a6125aSNeilBrown return ret; 3167a4a6125aSNeilBrown } 3168a4a6125aSNeilBrown md_set_array_sectors(mddev, newsize); 3169b522adcdSDan Williams if (sectors > mddev->dev_sectors && 3170b098636cSNeilBrown mddev->recovery_cp > mddev->dev_sectors) { 317158c0fed4SAndre Noll mddev->recovery_cp = mddev->dev_sectors; 31721da177e4SLinus Torvalds set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 31731da177e4SLinus Torvalds } 3174b522adcdSDan Williams mddev->dev_sectors = sectors; 31754b5c7ae8SNeilBrown mddev->resync_max_sectors = sectors; 31761da177e4SLinus Torvalds return 0; 31771da177e4SLinus Torvalds } 31781da177e4SLinus Torvalds 3179fd01b88cSNeilBrown static int raid1_reshape(struct mddev *mddev) 31801da177e4SLinus Torvalds { 31811da177e4SLinus Torvalds /* We need to: 31821da177e4SLinus Torvalds * 1/ resize the r1bio_pool 31831da177e4SLinus Torvalds * 2/ resize conf->mirrors 31841da177e4SLinus Torvalds * 31851da177e4SLinus Torvalds * We allocate a new r1bio_pool if we can. 31861da177e4SLinus Torvalds * Then raise a device barrier and wait until all IO stops. 31871da177e4SLinus Torvalds * Then resize conf->mirrors and swap in the new r1bio pool. 31886ea9c07cSNeilBrown * 31896ea9c07cSNeilBrown * At the same time, we "pack" the devices so that all the missing 31906ea9c07cSNeilBrown * devices have the higher raid_disk numbers. 31911da177e4SLinus Torvalds */ 31921da177e4SLinus Torvalds mempool_t *newpool, *oldpool; 31931da177e4SLinus Torvalds struct pool_info *newpoolinfo; 31940eaf822cSJonathan Brassow struct raid1_info *newmirrors; 3195e8096360SNeilBrown struct r1conf *conf = mddev->private; 319663c70c4fSNeilBrown int cnt, raid_disks; 3197c04be0aaSNeilBrown unsigned long flags; 31982214c260SArtur Paszkiewicz int d, d2; 31991da177e4SLinus Torvalds 320063c70c4fSNeilBrown /* Cannot change chunk_size, layout, or level */ 3201664e7c41SAndre Noll if (mddev->chunk_sectors != mddev->new_chunk_sectors || 320263c70c4fSNeilBrown mddev->layout != mddev->new_layout || 320363c70c4fSNeilBrown mddev->level != mddev->new_level) { 3204664e7c41SAndre Noll mddev->new_chunk_sectors = mddev->chunk_sectors; 320563c70c4fSNeilBrown mddev->new_layout = mddev->layout; 320663c70c4fSNeilBrown mddev->new_level = mddev->level; 320763c70c4fSNeilBrown return -EINVAL; 320863c70c4fSNeilBrown } 320963c70c4fSNeilBrown 32102214c260SArtur Paszkiewicz if (!mddev_is_clustered(mddev)) 32112214c260SArtur Paszkiewicz md_allow_write(mddev); 32122a2275d6SNeilBrown 321363c70c4fSNeilBrown raid_disks = mddev->raid_disks + mddev->delta_disks; 321463c70c4fSNeilBrown 32156ea9c07cSNeilBrown if (raid_disks < conf->raid_disks) { 32166ea9c07cSNeilBrown cnt=0; 32176ea9c07cSNeilBrown for (d= 0; d < conf->raid_disks; d++) 32181da177e4SLinus Torvalds if (conf->mirrors[d].rdev) 32196ea9c07cSNeilBrown cnt++; 32206ea9c07cSNeilBrown if (cnt > raid_disks) 32211da177e4SLinus Torvalds return -EBUSY; 32226ea9c07cSNeilBrown } 32231da177e4SLinus Torvalds 32241da177e4SLinus Torvalds newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL); 32251da177e4SLinus Torvalds if (!newpoolinfo) 32261da177e4SLinus Torvalds return -ENOMEM; 32271da177e4SLinus Torvalds newpoolinfo->mddev = mddev; 32288f19ccb2SNeilBrown newpoolinfo->raid_disks = raid_disks * 2; 32291da177e4SLinus Torvalds 32301da177e4SLinus Torvalds newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, 32311da177e4SLinus Torvalds r1bio_pool_free, newpoolinfo); 32321da177e4SLinus Torvalds if (!newpool) { 32331da177e4SLinus Torvalds kfree(newpoolinfo); 32341da177e4SLinus Torvalds return -ENOMEM; 32351da177e4SLinus Torvalds } 32360eaf822cSJonathan Brassow newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2, 32378f19ccb2SNeilBrown GFP_KERNEL); 32381da177e4SLinus Torvalds if (!newmirrors) { 32391da177e4SLinus Torvalds kfree(newpoolinfo); 32401da177e4SLinus Torvalds mempool_destroy(newpool); 32411da177e4SLinus Torvalds return -ENOMEM; 32421da177e4SLinus Torvalds } 32431da177e4SLinus Torvalds 3244e2d59925SNeilBrown freeze_array(conf, 0); 32451da177e4SLinus Torvalds 32461da177e4SLinus Torvalds /* ok, everything is stopped */ 32471da177e4SLinus Torvalds oldpool = conf->r1bio_pool; 32481da177e4SLinus Torvalds conf->r1bio_pool = newpool; 32496ea9c07cSNeilBrown 3250a88aa786SNeilBrown for (d = d2 = 0; d < conf->raid_disks; d++) { 32513cb03002SNeilBrown struct md_rdev *rdev = conf->mirrors[d].rdev; 3252a88aa786SNeilBrown if (rdev && rdev->raid_disk != d2) { 325336fad858SNamhyung Kim sysfs_unlink_rdev(mddev, rdev); 3254a88aa786SNeilBrown rdev->raid_disk = d2; 325536fad858SNamhyung Kim sysfs_unlink_rdev(mddev, rdev); 325636fad858SNamhyung Kim if (sysfs_link_rdev(mddev, rdev)) 32571d41c216SNeilBrown pr_warn("md/raid1:%s: cannot register rd%d\n", 325836fad858SNamhyung Kim mdname(mddev), rdev->raid_disk); 3259a88aa786SNeilBrown } 3260a88aa786SNeilBrown if (rdev) 3261a88aa786SNeilBrown newmirrors[d2++].rdev = rdev; 32626ea9c07cSNeilBrown } 32631da177e4SLinus Torvalds kfree(conf->mirrors); 32641da177e4SLinus Torvalds conf->mirrors = newmirrors; 32651da177e4SLinus Torvalds kfree(conf->poolinfo); 32661da177e4SLinus Torvalds conf->poolinfo = newpoolinfo; 32671da177e4SLinus Torvalds 3268c04be0aaSNeilBrown spin_lock_irqsave(&conf->device_lock, flags); 32691da177e4SLinus Torvalds mddev->degraded += (raid_disks - conf->raid_disks); 3270c04be0aaSNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 32711da177e4SLinus Torvalds conf->raid_disks = mddev->raid_disks = raid_disks; 327263c70c4fSNeilBrown mddev->delta_disks = 0; 32731da177e4SLinus Torvalds 3274e2d59925SNeilBrown unfreeze_array(conf); 32751da177e4SLinus Torvalds 3276985ca973SNeilBrown set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 32771da177e4SLinus Torvalds set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 32781da177e4SLinus Torvalds md_wakeup_thread(mddev->thread); 32791da177e4SLinus Torvalds 32801da177e4SLinus Torvalds mempool_destroy(oldpool); 32811da177e4SLinus Torvalds return 0; 32821da177e4SLinus Torvalds } 32831da177e4SLinus Torvalds 3284fd01b88cSNeilBrown static void raid1_quiesce(struct mddev *mddev, int state) 328536fa3063SNeilBrown { 3286e8096360SNeilBrown struct r1conf *conf = mddev->private; 328736fa3063SNeilBrown 328836fa3063SNeilBrown switch(state) { 32896eef4b21SNeilBrown case 2: /* wake for suspend */ 32906eef4b21SNeilBrown wake_up(&conf->wait_barrier); 32916eef4b21SNeilBrown break; 32929e6603daSNeilBrown case 1: 329307169fd4Smajianpeng freeze_array(conf, 0); 329436fa3063SNeilBrown break; 32959e6603daSNeilBrown case 0: 329607169fd4Smajianpeng unfreeze_array(conf); 329736fa3063SNeilBrown break; 329836fa3063SNeilBrown } 329936fa3063SNeilBrown } 330036fa3063SNeilBrown 3301fd01b88cSNeilBrown static void *raid1_takeover(struct mddev *mddev) 3302709ae487SNeilBrown { 3303709ae487SNeilBrown /* raid1 can take over: 3304709ae487SNeilBrown * raid5 with 2 devices, any layout or chunk size 3305709ae487SNeilBrown */ 3306709ae487SNeilBrown if (mddev->level == 5 && mddev->raid_disks == 2) { 3307e8096360SNeilBrown struct r1conf *conf; 3308709ae487SNeilBrown mddev->new_level = 1; 3309709ae487SNeilBrown mddev->new_layout = 0; 3310709ae487SNeilBrown mddev->new_chunk_sectors = 0; 3311709ae487SNeilBrown conf = setup_conf(mddev); 33126995f0b2SShaohua Li if (!IS_ERR(conf)) { 331307169fd4Smajianpeng /* Array must appear to be quiesced */ 331407169fd4Smajianpeng conf->array_frozen = 1; 3315394ed8e4SShaohua Li mddev_clear_unsupported_flags(mddev, 3316394ed8e4SShaohua Li UNSUPPORTED_MDDEV_FLAGS); 33176995f0b2SShaohua Li } 3318709ae487SNeilBrown return conf; 3319709ae487SNeilBrown } 3320709ae487SNeilBrown return ERR_PTR(-EINVAL); 3321709ae487SNeilBrown } 33221da177e4SLinus Torvalds 332384fc4b56SNeilBrown static struct md_personality raid1_personality = 33241da177e4SLinus Torvalds { 33251da177e4SLinus Torvalds .name = "raid1", 33262604b703SNeilBrown .level = 1, 33271da177e4SLinus Torvalds .owner = THIS_MODULE, 3328849674e4SShaohua Li .make_request = raid1_make_request, 3329849674e4SShaohua Li .run = raid1_run, 3330afa0f557SNeilBrown .free = raid1_free, 3331849674e4SShaohua Li .status = raid1_status, 3332849674e4SShaohua Li .error_handler = raid1_error, 33331da177e4SLinus Torvalds .hot_add_disk = raid1_add_disk, 33341da177e4SLinus Torvalds .hot_remove_disk= raid1_remove_disk, 33351da177e4SLinus Torvalds .spare_active = raid1_spare_active, 3336849674e4SShaohua Li .sync_request = raid1_sync_request, 33371da177e4SLinus Torvalds .resize = raid1_resize, 333880c3a6ceSDan Williams .size = raid1_size, 333963c70c4fSNeilBrown .check_reshape = raid1_reshape, 334036fa3063SNeilBrown .quiesce = raid1_quiesce, 3341709ae487SNeilBrown .takeover = raid1_takeover, 33425c675f83SNeilBrown .congested = raid1_congested, 33431da177e4SLinus Torvalds }; 33441da177e4SLinus Torvalds 33451da177e4SLinus Torvalds static int __init raid_init(void) 33461da177e4SLinus Torvalds { 33472604b703SNeilBrown return register_md_personality(&raid1_personality); 33481da177e4SLinus Torvalds } 33491da177e4SLinus Torvalds 33501da177e4SLinus Torvalds static void raid_exit(void) 33511da177e4SLinus Torvalds { 33522604b703SNeilBrown unregister_md_personality(&raid1_personality); 33531da177e4SLinus Torvalds } 33541da177e4SLinus Torvalds 33551da177e4SLinus Torvalds module_init(raid_init); 33561da177e4SLinus Torvalds module_exit(raid_exit); 33571da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 33580efb9e61SNeilBrown MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD"); 33591da177e4SLinus Torvalds MODULE_ALIAS("md-personality-3"); /* RAID1 */ 3360d9d166c2SNeilBrown MODULE_ALIAS("md-raid1"); 33612604b703SNeilBrown MODULE_ALIAS("md-level-1"); 336234db0cd6SNeilBrown 336334db0cd6SNeilBrown module_param(max_queued_requests, int, S_IRUGO|S_IWUSR); 3364