1af1a8899SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * raid1.c : Multiple Devices driver for Linux 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman 81da177e4SLinus Torvalds * 91da177e4SLinus Torvalds * RAID-1 management functions. 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000 121da177e4SLinus Torvalds * 1396de0e25SJan Engelhardt * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk> 141da177e4SLinus Torvalds * Various fixes by Neil Brown <neilb@cse.unsw.edu.au> 151da177e4SLinus Torvalds * 16191ea9b2SNeilBrown * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support 17191ea9b2SNeilBrown * bitmapped intelligence in resync: 18191ea9b2SNeilBrown * 19191ea9b2SNeilBrown * - bitmap marked during normal i/o 20191ea9b2SNeilBrown * - bitmap used to skip nondirty blocks during sync 21191ea9b2SNeilBrown * 22191ea9b2SNeilBrown * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology: 23191ea9b2SNeilBrown * - persistent bitmap code 241da177e4SLinus Torvalds */ 251da177e4SLinus Torvalds 265a0e3ad6STejun Heo #include <linux/slab.h> 2725570727SStephen Rothwell #include <linux/delay.h> 28bff61975SNeilBrown #include <linux/blkdev.h> 29056075c7SPaul Gortmaker #include <linux/module.h> 30bff61975SNeilBrown #include <linux/seq_file.h> 318bda470eSChristian Dietrich #include <linux/ratelimit.h> 323f07c014SIngo Molnar 33109e3765SNeilBrown #include <trace/events/block.h> 343f07c014SIngo Molnar 3543b2e5d8SNeilBrown #include "md.h" 36ef740c37SChristoph Hellwig #include "raid1.h" 37935fe098SMike Snitzer #include "md-bitmap.h" 38191ea9b2SNeilBrown 39394ed8e4SShaohua Li #define UNSUPPORTED_MDDEV_FLAGS \ 40394ed8e4SShaohua Li ((1L << MD_HAS_JOURNAL) | \ 41ea0213e0SArtur Paszkiewicz (1L << MD_JOURNAL_CLEAN) | \ 42ddc08823SPawel Baldysiak (1L << MD_HAS_PPL) | \ 43ddc08823SPawel Baldysiak (1L << MD_HAS_MULTIPLE_PPLS)) 44394ed8e4SShaohua Li 45fd76863eScolyli@suse.de static void allow_barrier(struct r1conf *conf, sector_t sector_nr); 46fd76863eScolyli@suse.de static void lower_barrier(struct r1conf *conf, sector_t sector_nr); 471da177e4SLinus Torvalds 48578b54adSNeilBrown #define raid1_log(md, fmt, args...) \ 49578b54adSNeilBrown do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0) 50578b54adSNeilBrown 51fb0eb5dfSMing Lei #include "raid1-10.c" 52fb0eb5dfSMing Lei 533e148a32SGuoqing Jiang static int check_and_add_wb(struct md_rdev *rdev, sector_t lo, sector_t hi) 543e148a32SGuoqing Jiang { 553e148a32SGuoqing Jiang struct wb_info *wi, *temp_wi; 563e148a32SGuoqing Jiang unsigned long flags; 573e148a32SGuoqing Jiang int ret = 0; 583e148a32SGuoqing Jiang struct mddev *mddev = rdev->mddev; 593e148a32SGuoqing Jiang 603e148a32SGuoqing Jiang wi = mempool_alloc(mddev->wb_info_pool, GFP_NOIO); 613e148a32SGuoqing Jiang 623e148a32SGuoqing Jiang spin_lock_irqsave(&rdev->wb_list_lock, flags); 633e148a32SGuoqing Jiang list_for_each_entry(temp_wi, &rdev->wb_list, list) { 643e148a32SGuoqing Jiang /* collision happened */ 653e148a32SGuoqing Jiang if (hi > temp_wi->lo && lo < temp_wi->hi) { 663e148a32SGuoqing Jiang ret = -EBUSY; 673e148a32SGuoqing Jiang break; 683e148a32SGuoqing Jiang } 693e148a32SGuoqing Jiang } 703e148a32SGuoqing Jiang 713e148a32SGuoqing Jiang if (!ret) { 723e148a32SGuoqing Jiang wi->lo = lo; 733e148a32SGuoqing Jiang wi->hi = hi; 743e148a32SGuoqing Jiang list_add(&wi->list, &rdev->wb_list); 753e148a32SGuoqing Jiang } else 763e148a32SGuoqing Jiang mempool_free(wi, mddev->wb_info_pool); 773e148a32SGuoqing Jiang spin_unlock_irqrestore(&rdev->wb_list_lock, flags); 783e148a32SGuoqing Jiang 793e148a32SGuoqing Jiang return ret; 803e148a32SGuoqing Jiang } 813e148a32SGuoqing Jiang 823e148a32SGuoqing Jiang static void remove_wb(struct md_rdev *rdev, sector_t lo, sector_t hi) 833e148a32SGuoqing Jiang { 843e148a32SGuoqing Jiang struct wb_info *wi; 853e148a32SGuoqing Jiang unsigned long flags; 863e148a32SGuoqing Jiang int found = 0; 873e148a32SGuoqing Jiang struct mddev *mddev = rdev->mddev; 883e148a32SGuoqing Jiang 893e148a32SGuoqing Jiang spin_lock_irqsave(&rdev->wb_list_lock, flags); 903e148a32SGuoqing Jiang list_for_each_entry(wi, &rdev->wb_list, list) 913e148a32SGuoqing Jiang if (hi == wi->hi && lo == wi->lo) { 923e148a32SGuoqing Jiang list_del(&wi->list); 933e148a32SGuoqing Jiang mempool_free(wi, mddev->wb_info_pool); 943e148a32SGuoqing Jiang found = 1; 953e148a32SGuoqing Jiang break; 963e148a32SGuoqing Jiang } 973e148a32SGuoqing Jiang 983e148a32SGuoqing Jiang if (!found) 9916d4b746SDan Carpenter WARN(1, "The write behind IO is not recorded\n"); 1003e148a32SGuoqing Jiang spin_unlock_irqrestore(&rdev->wb_list_lock, flags); 1013e148a32SGuoqing Jiang wake_up(&rdev->wb_io_wait); 1023e148a32SGuoqing Jiang } 1033e148a32SGuoqing Jiang 10498d30c58SMing Lei /* 10598d30c58SMing Lei * for resync bio, r1bio pointer can be retrieved from the per-bio 10698d30c58SMing Lei * 'struct resync_pages'. 10798d30c58SMing Lei */ 10898d30c58SMing Lei static inline struct r1bio *get_resync_r1bio(struct bio *bio) 10998d30c58SMing Lei { 11098d30c58SMing Lei return get_resync_pages(bio)->raid_bio; 11198d30c58SMing Lei } 11298d30c58SMing Lei 113dd0fc66fSAl Viro static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) 1141da177e4SLinus Torvalds { 1151da177e4SLinus Torvalds struct pool_info *pi = data; 1169f2c9d12SNeilBrown int size = offsetof(struct r1bio, bios[pi->raid_disks]); 1171da177e4SLinus Torvalds 1181da177e4SLinus Torvalds /* allocate a r1bio with room for raid_disks entries in the bios array */ 1197eaceaccSJens Axboe return kzalloc(size, gfp_flags); 1201da177e4SLinus Torvalds } 1211da177e4SLinus Torvalds 1228e005f7cSmajianpeng #define RESYNC_DEPTH 32 1231da177e4SLinus Torvalds #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) 1248e005f7cSmajianpeng #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH) 1258e005f7cSmajianpeng #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9) 126c40f341fSGoldwyn Rodrigues #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW) 127c40f341fSGoldwyn Rodrigues #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9) 1281da177e4SLinus Torvalds 129dd0fc66fSAl Viro static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) 1301da177e4SLinus Torvalds { 1311da177e4SLinus Torvalds struct pool_info *pi = data; 1329f2c9d12SNeilBrown struct r1bio *r1_bio; 1331da177e4SLinus Torvalds struct bio *bio; 134da1aab3dSNeilBrown int need_pages; 13598d30c58SMing Lei int j; 13698d30c58SMing Lei struct resync_pages *rps; 1371da177e4SLinus Torvalds 1381da177e4SLinus Torvalds r1_bio = r1bio_pool_alloc(gfp_flags, pi); 1397eaceaccSJens Axboe if (!r1_bio) 1401da177e4SLinus Torvalds return NULL; 1411da177e4SLinus Torvalds 1426da2ec56SKees Cook rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages), 14398d30c58SMing Lei gfp_flags); 14498d30c58SMing Lei if (!rps) 14598d30c58SMing Lei goto out_free_r1bio; 14698d30c58SMing Lei 1471da177e4SLinus Torvalds /* 1481da177e4SLinus Torvalds * Allocate bios : 1 for reading, n-1 for writing 1491da177e4SLinus Torvalds */ 1501da177e4SLinus Torvalds for (j = pi->raid_disks ; j-- ; ) { 1516746557fSNeilBrown bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); 1521da177e4SLinus Torvalds if (!bio) 1531da177e4SLinus Torvalds goto out_free_bio; 1541da177e4SLinus Torvalds r1_bio->bios[j] = bio; 1551da177e4SLinus Torvalds } 1561da177e4SLinus Torvalds /* 1571da177e4SLinus Torvalds * Allocate RESYNC_PAGES data pages and attach them to 158d11c171eSNeilBrown * the first bio. 159d11c171eSNeilBrown * If this is a user-requested check/repair, allocate 160d11c171eSNeilBrown * RESYNC_PAGES for each bio. 1611da177e4SLinus Torvalds */ 162d11c171eSNeilBrown if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) 163da1aab3dSNeilBrown need_pages = pi->raid_disks; 164d11c171eSNeilBrown else 165da1aab3dSNeilBrown need_pages = 1; 16698d30c58SMing Lei for (j = 0; j < pi->raid_disks; j++) { 16798d30c58SMing Lei struct resync_pages *rp = &rps[j]; 1681da177e4SLinus Torvalds 16998d30c58SMing Lei bio = r1_bio->bios[j]; 17098d30c58SMing Lei 17198d30c58SMing Lei if (j < need_pages) { 17298d30c58SMing Lei if (resync_alloc_pages(rp, gfp_flags)) 173da1aab3dSNeilBrown goto out_free_pages; 17498d30c58SMing Lei } else { 17598d30c58SMing Lei memcpy(rp, &rps[0], sizeof(*rp)); 17698d30c58SMing Lei resync_get_all_pages(rp); 177d11c171eSNeilBrown } 17898d30c58SMing Lei 17998d30c58SMing Lei rp->raid_bio = r1_bio; 18098d30c58SMing Lei bio->bi_private = rp; 181d11c171eSNeilBrown } 1821da177e4SLinus Torvalds 1831da177e4SLinus Torvalds r1_bio->master_bio = NULL; 1841da177e4SLinus Torvalds 1851da177e4SLinus Torvalds return r1_bio; 1861da177e4SLinus Torvalds 187da1aab3dSNeilBrown out_free_pages: 188491221f8SGuoqing Jiang while (--j >= 0) 18998d30c58SMing Lei resync_free_pages(&rps[j]); 190da1aab3dSNeilBrown 1911da177e4SLinus Torvalds out_free_bio: 1921da177e4SLinus Torvalds while (++j < pi->raid_disks) 1931da177e4SLinus Torvalds bio_put(r1_bio->bios[j]); 19498d30c58SMing Lei kfree(rps); 19598d30c58SMing Lei 19698d30c58SMing Lei out_free_r1bio: 197c7afa803SMarcos Paulo de Souza rbio_pool_free(r1_bio, data); 1981da177e4SLinus Torvalds return NULL; 1991da177e4SLinus Torvalds } 2001da177e4SLinus Torvalds 2011da177e4SLinus Torvalds static void r1buf_pool_free(void *__r1_bio, void *data) 2021da177e4SLinus Torvalds { 2031da177e4SLinus Torvalds struct pool_info *pi = data; 20498d30c58SMing Lei int i; 2059f2c9d12SNeilBrown struct r1bio *r1bio = __r1_bio; 20698d30c58SMing Lei struct resync_pages *rp = NULL; 2071da177e4SLinus Torvalds 20898d30c58SMing Lei for (i = pi->raid_disks; i--; ) { 20998d30c58SMing Lei rp = get_resync_pages(r1bio->bios[i]); 21098d30c58SMing Lei resync_free_pages(rp); 2111da177e4SLinus Torvalds bio_put(r1bio->bios[i]); 21298d30c58SMing Lei } 21398d30c58SMing Lei 21498d30c58SMing Lei /* resync pages array stored in the 1st bio's .bi_private */ 21598d30c58SMing Lei kfree(rp); 2161da177e4SLinus Torvalds 217c7afa803SMarcos Paulo de Souza rbio_pool_free(r1bio, data); 2181da177e4SLinus Torvalds } 2191da177e4SLinus Torvalds 220e8096360SNeilBrown static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio) 2211da177e4SLinus Torvalds { 2221da177e4SLinus Torvalds int i; 2231da177e4SLinus Torvalds 2248f19ccb2SNeilBrown for (i = 0; i < conf->raid_disks * 2; i++) { 2251da177e4SLinus Torvalds struct bio **bio = r1_bio->bios + i; 2264367af55SNeilBrown if (!BIO_SPECIAL(*bio)) 2271da177e4SLinus Torvalds bio_put(*bio); 2281da177e4SLinus Torvalds *bio = NULL; 2291da177e4SLinus Torvalds } 2301da177e4SLinus Torvalds } 2311da177e4SLinus Torvalds 2329f2c9d12SNeilBrown static void free_r1bio(struct r1bio *r1_bio) 2331da177e4SLinus Torvalds { 234e8096360SNeilBrown struct r1conf *conf = r1_bio->mddev->private; 2351da177e4SLinus Torvalds 2361da177e4SLinus Torvalds put_all_bios(conf, r1_bio); 237afeee514SKent Overstreet mempool_free(r1_bio, &conf->r1bio_pool); 2381da177e4SLinus Torvalds } 2391da177e4SLinus Torvalds 2409f2c9d12SNeilBrown static void put_buf(struct r1bio *r1_bio) 2411da177e4SLinus Torvalds { 242e8096360SNeilBrown struct r1conf *conf = r1_bio->mddev->private; 243af5f42a7SShaohua Li sector_t sect = r1_bio->sector; 2443e198f78SNeilBrown int i; 2453e198f78SNeilBrown 2468f19ccb2SNeilBrown for (i = 0; i < conf->raid_disks * 2; i++) { 2473e198f78SNeilBrown struct bio *bio = r1_bio->bios[i]; 2483e198f78SNeilBrown if (bio->bi_end_io) 2493e198f78SNeilBrown rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); 2503e198f78SNeilBrown } 2511da177e4SLinus Torvalds 252afeee514SKent Overstreet mempool_free(r1_bio, &conf->r1buf_pool); 2531da177e4SLinus Torvalds 254af5f42a7SShaohua Li lower_barrier(conf, sect); 2551da177e4SLinus Torvalds } 2561da177e4SLinus Torvalds 2579f2c9d12SNeilBrown static void reschedule_retry(struct r1bio *r1_bio) 2581da177e4SLinus Torvalds { 2591da177e4SLinus Torvalds unsigned long flags; 260fd01b88cSNeilBrown struct mddev *mddev = r1_bio->mddev; 261e8096360SNeilBrown struct r1conf *conf = mddev->private; 262fd76863eScolyli@suse.de int idx; 2631da177e4SLinus Torvalds 264fd76863eScolyli@suse.de idx = sector_to_idx(r1_bio->sector); 2651da177e4SLinus Torvalds spin_lock_irqsave(&conf->device_lock, flags); 2661da177e4SLinus Torvalds list_add(&r1_bio->retry_list, &conf->retry_list); 267824e47daScolyli@suse.de atomic_inc(&conf->nr_queued[idx]); 2681da177e4SLinus Torvalds spin_unlock_irqrestore(&conf->device_lock, flags); 2691da177e4SLinus Torvalds 27017999be4SNeilBrown wake_up(&conf->wait_barrier); 2711da177e4SLinus Torvalds md_wakeup_thread(mddev->thread); 2721da177e4SLinus Torvalds } 2731da177e4SLinus Torvalds 2741da177e4SLinus Torvalds /* 2751da177e4SLinus Torvalds * raid_end_bio_io() is called when we have finished servicing a mirrored 2761da177e4SLinus Torvalds * operation and are ready to return a success/failure code to the buffer 2771da177e4SLinus Torvalds * cache layer. 2781da177e4SLinus Torvalds */ 2799f2c9d12SNeilBrown static void call_bio_endio(struct r1bio *r1_bio) 280d2eb35acSNeilBrown { 281d2eb35acSNeilBrown struct bio *bio = r1_bio->master_bio; 282e8096360SNeilBrown struct r1conf *conf = r1_bio->mddev->private; 283d2eb35acSNeilBrown 284d2eb35acSNeilBrown if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) 2854e4cbee9SChristoph Hellwig bio->bi_status = BLK_STS_IOERR; 2864246a0b6SChristoph Hellwig 2874246a0b6SChristoph Hellwig bio_endio(bio); 288d2eb35acSNeilBrown /* 289d2eb35acSNeilBrown * Wake up any possible resync thread that waits for the device 290d2eb35acSNeilBrown * to go idle. 291d2eb35acSNeilBrown */ 29237011e3aSNeilBrown allow_barrier(conf, r1_bio->sector); 293d2eb35acSNeilBrown } 294d2eb35acSNeilBrown 2959f2c9d12SNeilBrown static void raid_end_bio_io(struct r1bio *r1_bio) 2961da177e4SLinus Torvalds { 2971da177e4SLinus Torvalds struct bio *bio = r1_bio->master_bio; 2981da177e4SLinus Torvalds 2994b6d287fSNeilBrown /* if nobody has done the final endio yet, do it now */ 3004b6d287fSNeilBrown if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 30136a4e1feSNeilBrown pr_debug("raid1: sync end %s on sectors %llu-%llu\n", 3024b6d287fSNeilBrown (bio_data_dir(bio) == WRITE) ? "write" : "read", 3034f024f37SKent Overstreet (unsigned long long) bio->bi_iter.bi_sector, 3044f024f37SKent Overstreet (unsigned long long) bio_end_sector(bio) - 1); 3054b6d287fSNeilBrown 306d2eb35acSNeilBrown call_bio_endio(r1_bio); 3074b6d287fSNeilBrown } 3081da177e4SLinus Torvalds free_r1bio(r1_bio); 3091da177e4SLinus Torvalds } 3101da177e4SLinus Torvalds 3111da177e4SLinus Torvalds /* 3121da177e4SLinus Torvalds * Update disk head position estimator based on IRQ completion info. 3131da177e4SLinus Torvalds */ 3149f2c9d12SNeilBrown static inline void update_head_pos(int disk, struct r1bio *r1_bio) 3151da177e4SLinus Torvalds { 316e8096360SNeilBrown struct r1conf *conf = r1_bio->mddev->private; 3171da177e4SLinus Torvalds 3181da177e4SLinus Torvalds conf->mirrors[disk].head_position = 3191da177e4SLinus Torvalds r1_bio->sector + (r1_bio->sectors); 3201da177e4SLinus Torvalds } 3211da177e4SLinus Torvalds 322ba3ae3beSNamhyung Kim /* 323ba3ae3beSNamhyung Kim * Find the disk number which triggered given bio 324ba3ae3beSNamhyung Kim */ 3259f2c9d12SNeilBrown static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio) 326ba3ae3beSNamhyung Kim { 327ba3ae3beSNamhyung Kim int mirror; 32830194636SNeilBrown struct r1conf *conf = r1_bio->mddev->private; 32930194636SNeilBrown int raid_disks = conf->raid_disks; 330ba3ae3beSNamhyung Kim 3318f19ccb2SNeilBrown for (mirror = 0; mirror < raid_disks * 2; mirror++) 332ba3ae3beSNamhyung Kim if (r1_bio->bios[mirror] == bio) 333ba3ae3beSNamhyung Kim break; 334ba3ae3beSNamhyung Kim 3358f19ccb2SNeilBrown BUG_ON(mirror == raid_disks * 2); 336ba3ae3beSNamhyung Kim update_head_pos(mirror, r1_bio); 337ba3ae3beSNamhyung Kim 338ba3ae3beSNamhyung Kim return mirror; 339ba3ae3beSNamhyung Kim } 340ba3ae3beSNamhyung Kim 3414246a0b6SChristoph Hellwig static void raid1_end_read_request(struct bio *bio) 3421da177e4SLinus Torvalds { 3434e4cbee9SChristoph Hellwig int uptodate = !bio->bi_status; 3449f2c9d12SNeilBrown struct r1bio *r1_bio = bio->bi_private; 345e8096360SNeilBrown struct r1conf *conf = r1_bio->mddev->private; 346e5872d58SNeilBrown struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; 3471da177e4SLinus Torvalds 3481da177e4SLinus Torvalds /* 3491da177e4SLinus Torvalds * this branch is our 'one mirror IO has finished' event handler: 3501da177e4SLinus Torvalds */ 351e5872d58SNeilBrown update_head_pos(r1_bio->read_disk, r1_bio); 352ddaf22abSNeilBrown 353220946c9SNeilBrown if (uptodate) 3541da177e4SLinus Torvalds set_bit(R1BIO_Uptodate, &r1_bio->state); 3552e52d449SNeilBrown else if (test_bit(FailFast, &rdev->flags) && 3562e52d449SNeilBrown test_bit(R1BIO_FailFast, &r1_bio->state)) 3572e52d449SNeilBrown /* This was a fail-fast read so we definitely 3582e52d449SNeilBrown * want to retry */ 3592e52d449SNeilBrown ; 360dd00a99eSNeilBrown else { 361dd00a99eSNeilBrown /* If all other devices have failed, we want to return 362dd00a99eSNeilBrown * the error upwards rather than fail the last device. 363dd00a99eSNeilBrown * Here we redefine "uptodate" to mean "Don't want to retry" 364dd00a99eSNeilBrown */ 365dd00a99eSNeilBrown unsigned long flags; 366dd00a99eSNeilBrown spin_lock_irqsave(&conf->device_lock, flags); 367dd00a99eSNeilBrown if (r1_bio->mddev->degraded == conf->raid_disks || 368dd00a99eSNeilBrown (r1_bio->mddev->degraded == conf->raid_disks-1 && 369e5872d58SNeilBrown test_bit(In_sync, &rdev->flags))) 370dd00a99eSNeilBrown uptodate = 1; 371dd00a99eSNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 372dd00a99eSNeilBrown } 3731da177e4SLinus Torvalds 3747ad4d4a6SNeilBrown if (uptodate) { 3751da177e4SLinus Torvalds raid_end_bio_io(r1_bio); 376e5872d58SNeilBrown rdev_dec_pending(rdev, conf->mddev); 3777ad4d4a6SNeilBrown } else { 3781da177e4SLinus Torvalds /* 3791da177e4SLinus Torvalds * oops, read error: 3801da177e4SLinus Torvalds */ 3811da177e4SLinus Torvalds char b[BDEVNAME_SIZE]; 3821d41c216SNeilBrown pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n", 3839dd1e2faSNeilBrown mdname(conf->mddev), 3841d41c216SNeilBrown bdevname(rdev->bdev, b), 3858bda470eSChristian Dietrich (unsigned long long)r1_bio->sector); 386d2eb35acSNeilBrown set_bit(R1BIO_ReadError, &r1_bio->state); 3871da177e4SLinus Torvalds reschedule_retry(r1_bio); 3887ad4d4a6SNeilBrown /* don't drop the reference on read_disk yet */ 3891da177e4SLinus Torvalds } 3901da177e4SLinus Torvalds } 3911da177e4SLinus Torvalds 3929f2c9d12SNeilBrown static void close_write(struct r1bio *r1_bio) 3934e78064fSNeilBrown { 3944e78064fSNeilBrown /* it really is the end of this request */ 3954e78064fSNeilBrown if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 396841c1316SMing Lei bio_free_pages(r1_bio->behind_master_bio); 397841c1316SMing Lei bio_put(r1_bio->behind_master_bio); 398841c1316SMing Lei r1_bio->behind_master_bio = NULL; 3994e78064fSNeilBrown } 4004e78064fSNeilBrown /* clear the bitmap if all writes complete successfully */ 401e64e4018SAndy Shevchenko md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, 4024e78064fSNeilBrown r1_bio->sectors, 4034e78064fSNeilBrown !test_bit(R1BIO_Degraded, &r1_bio->state), 404af6d7b76SNeilBrown test_bit(R1BIO_BehindIO, &r1_bio->state)); 4054e78064fSNeilBrown md_write_end(r1_bio->mddev); 406cd5ff9a1SNeilBrown } 407cd5ff9a1SNeilBrown 4089f2c9d12SNeilBrown static void r1_bio_write_done(struct r1bio *r1_bio) 409cd5ff9a1SNeilBrown { 410cd5ff9a1SNeilBrown if (!atomic_dec_and_test(&r1_bio->remaining)) 411cd5ff9a1SNeilBrown return; 412cd5ff9a1SNeilBrown 413cd5ff9a1SNeilBrown if (test_bit(R1BIO_WriteError, &r1_bio->state)) 414cd5ff9a1SNeilBrown reschedule_retry(r1_bio); 415cd5ff9a1SNeilBrown else { 416cd5ff9a1SNeilBrown close_write(r1_bio); 4174367af55SNeilBrown if (test_bit(R1BIO_MadeGood, &r1_bio->state)) 4184367af55SNeilBrown reschedule_retry(r1_bio); 4194367af55SNeilBrown else 4204e78064fSNeilBrown raid_end_bio_io(r1_bio); 4214e78064fSNeilBrown } 4224e78064fSNeilBrown } 4234e78064fSNeilBrown 4244246a0b6SChristoph Hellwig static void raid1_end_write_request(struct bio *bio) 4251da177e4SLinus Torvalds { 4269f2c9d12SNeilBrown struct r1bio *r1_bio = bio->bi_private; 427e5872d58SNeilBrown int behind = test_bit(R1BIO_BehindIO, &r1_bio->state); 428e8096360SNeilBrown struct r1conf *conf = r1_bio->mddev->private; 42904b857f7SNeilBrown struct bio *to_put = NULL; 430e5872d58SNeilBrown int mirror = find_bio_disk(r1_bio, bio); 431e5872d58SNeilBrown struct md_rdev *rdev = conf->mirrors[mirror].rdev; 432e3f948cdSShaohua Li bool discard_error; 433e3f948cdSShaohua Li 4344e4cbee9SChristoph Hellwig discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD; 4351da177e4SLinus Torvalds 4361da177e4SLinus Torvalds /* 437e9c7469bSTejun Heo * 'one mirror IO has finished' event handler: 4381da177e4SLinus Torvalds */ 4394e4cbee9SChristoph Hellwig if (bio->bi_status && !discard_error) { 440e5872d58SNeilBrown set_bit(WriteErrorSeen, &rdev->flags); 441e5872d58SNeilBrown if (!test_and_set_bit(WantReplacement, &rdev->flags)) 44219d67169SNeilBrown set_bit(MD_RECOVERY_NEEDED, & 44319d67169SNeilBrown conf->mddev->recovery); 44419d67169SNeilBrown 445212e7eb7SNeilBrown if (test_bit(FailFast, &rdev->flags) && 446212e7eb7SNeilBrown (bio->bi_opf & MD_FAILFAST) && 447212e7eb7SNeilBrown /* We never try FailFast to WriteMostly devices */ 448212e7eb7SNeilBrown !test_bit(WriteMostly, &rdev->flags)) { 449212e7eb7SNeilBrown md_error(r1_bio->mddev, rdev); 450eeba6809SYufen Yu } 451eeba6809SYufen Yu 452eeba6809SYufen Yu /* 453eeba6809SYufen Yu * When the device is faulty, it is not necessary to 454eeba6809SYufen Yu * handle write error. 455eeba6809SYufen Yu * For failfast, this is the only remaining device, 456eeba6809SYufen Yu * We need to retry the write without FailFast. 457212e7eb7SNeilBrown */ 458eeba6809SYufen Yu if (!test_bit(Faulty, &rdev->flags)) 459212e7eb7SNeilBrown set_bit(R1BIO_WriteError, &r1_bio->state); 460212e7eb7SNeilBrown else { 461212e7eb7SNeilBrown /* Finished with this branch */ 462212e7eb7SNeilBrown r1_bio->bios[mirror] = NULL; 463212e7eb7SNeilBrown to_put = bio; 464212e7eb7SNeilBrown } 4654367af55SNeilBrown } else { 4661da177e4SLinus Torvalds /* 467e9c7469bSTejun Heo * Set R1BIO_Uptodate in our master bio, so that we 468e9c7469bSTejun Heo * will return a good error code for to the higher 469e9c7469bSTejun Heo * levels even if IO on some other mirrored buffer 470e9c7469bSTejun Heo * fails. 4711da177e4SLinus Torvalds * 472e9c7469bSTejun Heo * The 'master' represents the composite IO operation 473e9c7469bSTejun Heo * to user-side. So if something waits for IO, then it 474e9c7469bSTejun Heo * will wait for the 'master' bio. 4751da177e4SLinus Torvalds */ 4764367af55SNeilBrown sector_t first_bad; 4774367af55SNeilBrown int bad_sectors; 4784367af55SNeilBrown 479cd5ff9a1SNeilBrown r1_bio->bios[mirror] = NULL; 480cd5ff9a1SNeilBrown to_put = bio; 4813056e3aeSAlex Lyakas /* 4823056e3aeSAlex Lyakas * Do not set R1BIO_Uptodate if the current device is 4833056e3aeSAlex Lyakas * rebuilding or Faulty. This is because we cannot use 4843056e3aeSAlex Lyakas * such device for properly reading the data back (we could 4853056e3aeSAlex Lyakas * potentially use it, if the current write would have felt 4863056e3aeSAlex Lyakas * before rdev->recovery_offset, but for simplicity we don't 4873056e3aeSAlex Lyakas * check this here. 4883056e3aeSAlex Lyakas */ 489e5872d58SNeilBrown if (test_bit(In_sync, &rdev->flags) && 490e5872d58SNeilBrown !test_bit(Faulty, &rdev->flags)) 4911da177e4SLinus Torvalds set_bit(R1BIO_Uptodate, &r1_bio->state); 4921da177e4SLinus Torvalds 4934367af55SNeilBrown /* Maybe we can clear some bad blocks. */ 494e5872d58SNeilBrown if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, 495e3f948cdSShaohua Li &first_bad, &bad_sectors) && !discard_error) { 4964367af55SNeilBrown r1_bio->bios[mirror] = IO_MADE_GOOD; 4974367af55SNeilBrown set_bit(R1BIO_MadeGood, &r1_bio->state); 4984367af55SNeilBrown } 4994367af55SNeilBrown } 5004367af55SNeilBrown 5014b6d287fSNeilBrown if (behind) { 5023e148a32SGuoqing Jiang if (test_bit(WBCollisionCheck, &rdev->flags)) { 5033e148a32SGuoqing Jiang sector_t lo = r1_bio->sector; 5043e148a32SGuoqing Jiang sector_t hi = r1_bio->sector + r1_bio->sectors; 5053e148a32SGuoqing Jiang 5063e148a32SGuoqing Jiang remove_wb(rdev, lo, hi); 5073e148a32SGuoqing Jiang } 508e5872d58SNeilBrown if (test_bit(WriteMostly, &rdev->flags)) 5094b6d287fSNeilBrown atomic_dec(&r1_bio->behind_remaining); 5104b6d287fSNeilBrown 511e9c7469bSTejun Heo /* 512e9c7469bSTejun Heo * In behind mode, we ACK the master bio once the I/O 513e9c7469bSTejun Heo * has safely reached all non-writemostly 514e9c7469bSTejun Heo * disks. Setting the Returned bit ensures that this 515e9c7469bSTejun Heo * gets done only once -- we don't ever want to return 516e9c7469bSTejun Heo * -EIO here, instead we'll wait 517e9c7469bSTejun Heo */ 5184b6d287fSNeilBrown if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && 5194b6d287fSNeilBrown test_bit(R1BIO_Uptodate, &r1_bio->state)) { 5204b6d287fSNeilBrown /* Maybe we can return now */ 5214b6d287fSNeilBrown if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 5224b6d287fSNeilBrown struct bio *mbio = r1_bio->master_bio; 52336a4e1feSNeilBrown pr_debug("raid1: behind end write sectors" 52436a4e1feSNeilBrown " %llu-%llu\n", 5254f024f37SKent Overstreet (unsigned long long) mbio->bi_iter.bi_sector, 5264f024f37SKent Overstreet (unsigned long long) bio_end_sector(mbio) - 1); 527d2eb35acSNeilBrown call_bio_endio(r1_bio); 5284b6d287fSNeilBrown } 5294b6d287fSNeilBrown } 5304b6d287fSNeilBrown } 5314367af55SNeilBrown if (r1_bio->bios[mirror] == NULL) 532e5872d58SNeilBrown rdev_dec_pending(rdev, conf->mddev); 533e9c7469bSTejun Heo 5341da177e4SLinus Torvalds /* 5351da177e4SLinus Torvalds * Let's see if all mirrored write operations have finished 5361da177e4SLinus Torvalds * already. 5371da177e4SLinus Torvalds */ 538af6d7b76SNeilBrown r1_bio_write_done(r1_bio); 539c70810b3SNeilBrown 54004b857f7SNeilBrown if (to_put) 54104b857f7SNeilBrown bio_put(to_put); 5421da177e4SLinus Torvalds } 5431da177e4SLinus Torvalds 544fd76863eScolyli@suse.de static sector_t align_to_barrier_unit_end(sector_t start_sector, 545fd76863eScolyli@suse.de sector_t sectors) 546fd76863eScolyli@suse.de { 547fd76863eScolyli@suse.de sector_t len; 548fd76863eScolyli@suse.de 549fd76863eScolyli@suse.de WARN_ON(sectors == 0); 550fd76863eScolyli@suse.de /* 551fd76863eScolyli@suse.de * len is the number of sectors from start_sector to end of the 552fd76863eScolyli@suse.de * barrier unit which start_sector belongs to. 553fd76863eScolyli@suse.de */ 554fd76863eScolyli@suse.de len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) - 555fd76863eScolyli@suse.de start_sector; 556fd76863eScolyli@suse.de 557fd76863eScolyli@suse.de if (len > sectors) 558fd76863eScolyli@suse.de len = sectors; 559fd76863eScolyli@suse.de 560fd76863eScolyli@suse.de return len; 561fd76863eScolyli@suse.de } 562fd76863eScolyli@suse.de 5631da177e4SLinus Torvalds /* 5641da177e4SLinus Torvalds * This routine returns the disk from which the requested read should 5651da177e4SLinus Torvalds * be done. There is a per-array 'next expected sequential IO' sector 5661da177e4SLinus Torvalds * number - if this matches on the next IO then we use the last disk. 5671da177e4SLinus Torvalds * There is also a per-disk 'last know head position' sector that is 5681da177e4SLinus Torvalds * maintained from IRQ contexts, both the normal and the resync IO 5691da177e4SLinus Torvalds * completion handlers update this position correctly. If there is no 5701da177e4SLinus Torvalds * perfect sequential match then we pick the disk whose head is closest. 5711da177e4SLinus Torvalds * 5721da177e4SLinus Torvalds * If there are 2 mirrors in the same 2 devices, performance degrades 5731da177e4SLinus Torvalds * because position is mirror, not device based. 5741da177e4SLinus Torvalds * 5751da177e4SLinus Torvalds * The rdev for the device selected will have nr_pending incremented. 5761da177e4SLinus Torvalds */ 577e8096360SNeilBrown static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors) 5781da177e4SLinus Torvalds { 579af3a2cd6SNeilBrown const sector_t this_sector = r1_bio->sector; 580d2eb35acSNeilBrown int sectors; 581d2eb35acSNeilBrown int best_good_sectors; 5829dedf603SShaohua Li int best_disk, best_dist_disk, best_pending_disk; 5839dedf603SShaohua Li int has_nonrot_disk; 584be4d3280SShaohua Li int disk; 58576073054SNeilBrown sector_t best_dist; 5869dedf603SShaohua Li unsigned int min_pending; 5873cb03002SNeilBrown struct md_rdev *rdev; 588f3ac8bf7SNeilBrown int choose_first; 58912cee5a8SShaohua Li int choose_next_idle; 5901da177e4SLinus Torvalds 5911da177e4SLinus Torvalds rcu_read_lock(); 5921da177e4SLinus Torvalds /* 5938ddf9efeSNeilBrown * Check if we can balance. We can balance on the whole 5941da177e4SLinus Torvalds * device if no resync is going on, or below the resync window. 5951da177e4SLinus Torvalds * We take the first readable disk when above the resync window. 5961da177e4SLinus Torvalds */ 5971da177e4SLinus Torvalds retry: 598d2eb35acSNeilBrown sectors = r1_bio->sectors; 59976073054SNeilBrown best_disk = -1; 6009dedf603SShaohua Li best_dist_disk = -1; 60176073054SNeilBrown best_dist = MaxSector; 6029dedf603SShaohua Li best_pending_disk = -1; 6039dedf603SShaohua Li min_pending = UINT_MAX; 604d2eb35acSNeilBrown best_good_sectors = 0; 6059dedf603SShaohua Li has_nonrot_disk = 0; 60612cee5a8SShaohua Li choose_next_idle = 0; 6072e52d449SNeilBrown clear_bit(R1BIO_FailFast, &r1_bio->state); 608d2eb35acSNeilBrown 6097d49ffcfSGoldwyn Rodrigues if ((conf->mddev->recovery_cp < this_sector + sectors) || 6107d49ffcfSGoldwyn Rodrigues (mddev_is_clustered(conf->mddev) && 61190382ed9SGoldwyn Rodrigues md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, 6127d49ffcfSGoldwyn Rodrigues this_sector + sectors))) 6137d49ffcfSGoldwyn Rodrigues choose_first = 1; 6147d49ffcfSGoldwyn Rodrigues else 6157d49ffcfSGoldwyn Rodrigues choose_first = 0; 6161da177e4SLinus Torvalds 617be4d3280SShaohua Li for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { 61876073054SNeilBrown sector_t dist; 619d2eb35acSNeilBrown sector_t first_bad; 620d2eb35acSNeilBrown int bad_sectors; 6219dedf603SShaohua Li unsigned int pending; 62212cee5a8SShaohua Li bool nonrot; 623d2eb35acSNeilBrown 624f3ac8bf7SNeilBrown rdev = rcu_dereference(conf->mirrors[disk].rdev); 625f3ac8bf7SNeilBrown if (r1_bio->bios[disk] == IO_BLOCKED 626f3ac8bf7SNeilBrown || rdev == NULL 62776073054SNeilBrown || test_bit(Faulty, &rdev->flags)) 628f3ac8bf7SNeilBrown continue; 62976073054SNeilBrown if (!test_bit(In_sync, &rdev->flags) && 63076073054SNeilBrown rdev->recovery_offset < this_sector + sectors) 63176073054SNeilBrown continue; 63276073054SNeilBrown if (test_bit(WriteMostly, &rdev->flags)) { 63376073054SNeilBrown /* Don't balance among write-mostly, just 63476073054SNeilBrown * use the first as a last resort */ 635d1901ef0STomáš Hodek if (best_dist_disk < 0) { 636307729c8SNeilBrown if (is_badblock(rdev, this_sector, sectors, 637307729c8SNeilBrown &first_bad, &bad_sectors)) { 638816b0acfSWei Fang if (first_bad <= this_sector) 639307729c8SNeilBrown /* Cannot use this */ 640307729c8SNeilBrown continue; 641307729c8SNeilBrown best_good_sectors = first_bad - this_sector; 642307729c8SNeilBrown } else 643307729c8SNeilBrown best_good_sectors = sectors; 644d1901ef0STomáš Hodek best_dist_disk = disk; 645d1901ef0STomáš Hodek best_pending_disk = disk; 646307729c8SNeilBrown } 64776073054SNeilBrown continue; 6488ddf9efeSNeilBrown } 64976073054SNeilBrown /* This is a reasonable device to use. It might 65076073054SNeilBrown * even be best. 6511da177e4SLinus Torvalds */ 652d2eb35acSNeilBrown if (is_badblock(rdev, this_sector, sectors, 653d2eb35acSNeilBrown &first_bad, &bad_sectors)) { 654d2eb35acSNeilBrown if (best_dist < MaxSector) 655d2eb35acSNeilBrown /* already have a better device */ 656d2eb35acSNeilBrown continue; 657d2eb35acSNeilBrown if (first_bad <= this_sector) { 658d2eb35acSNeilBrown /* cannot read here. If this is the 'primary' 659d2eb35acSNeilBrown * device, then we must not read beyond 660d2eb35acSNeilBrown * bad_sectors from another device.. 661d2eb35acSNeilBrown */ 662d2eb35acSNeilBrown bad_sectors -= (this_sector - first_bad); 663d2eb35acSNeilBrown if (choose_first && sectors > bad_sectors) 664d2eb35acSNeilBrown sectors = bad_sectors; 665d2eb35acSNeilBrown if (best_good_sectors > sectors) 666d2eb35acSNeilBrown best_good_sectors = sectors; 667d2eb35acSNeilBrown 668d2eb35acSNeilBrown } else { 669d2eb35acSNeilBrown sector_t good_sectors = first_bad - this_sector; 670d2eb35acSNeilBrown if (good_sectors > best_good_sectors) { 671d2eb35acSNeilBrown best_good_sectors = good_sectors; 672d2eb35acSNeilBrown best_disk = disk; 673d2eb35acSNeilBrown } 674d2eb35acSNeilBrown if (choose_first) 675d2eb35acSNeilBrown break; 676d2eb35acSNeilBrown } 677d2eb35acSNeilBrown continue; 678d82dd0e3STomasz Majchrzak } else { 679d82dd0e3STomasz Majchrzak if ((sectors > best_good_sectors) && (best_disk >= 0)) 680d82dd0e3STomasz Majchrzak best_disk = -1; 681d2eb35acSNeilBrown best_good_sectors = sectors; 682d82dd0e3STomasz Majchrzak } 683d2eb35acSNeilBrown 6842e52d449SNeilBrown if (best_disk >= 0) 6852e52d449SNeilBrown /* At least two disks to choose from so failfast is OK */ 6862e52d449SNeilBrown set_bit(R1BIO_FailFast, &r1_bio->state); 6872e52d449SNeilBrown 68812cee5a8SShaohua Li nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); 68912cee5a8SShaohua Li has_nonrot_disk |= nonrot; 6909dedf603SShaohua Li pending = atomic_read(&rdev->nr_pending); 69176073054SNeilBrown dist = abs(this_sector - conf->mirrors[disk].head_position); 69212cee5a8SShaohua Li if (choose_first) { 69376073054SNeilBrown best_disk = disk; 6941da177e4SLinus Torvalds break; 6951da177e4SLinus Torvalds } 69612cee5a8SShaohua Li /* Don't change to another disk for sequential reads */ 69712cee5a8SShaohua Li if (conf->mirrors[disk].next_seq_sect == this_sector 69812cee5a8SShaohua Li || dist == 0) { 69912cee5a8SShaohua Li int opt_iosize = bdev_io_opt(rdev->bdev) >> 9; 70012cee5a8SShaohua Li struct raid1_info *mirror = &conf->mirrors[disk]; 70112cee5a8SShaohua Li 70212cee5a8SShaohua Li best_disk = disk; 70312cee5a8SShaohua Li /* 70412cee5a8SShaohua Li * If buffered sequential IO size exceeds optimal 70512cee5a8SShaohua Li * iosize, check if there is idle disk. If yes, choose 70612cee5a8SShaohua Li * the idle disk. read_balance could already choose an 70712cee5a8SShaohua Li * idle disk before noticing it's a sequential IO in 70812cee5a8SShaohua Li * this disk. This doesn't matter because this disk 70912cee5a8SShaohua Li * will idle, next time it will be utilized after the 71012cee5a8SShaohua Li * first disk has IO size exceeds optimal iosize. In 71112cee5a8SShaohua Li * this way, iosize of the first disk will be optimal 71212cee5a8SShaohua Li * iosize at least. iosize of the second disk might be 71312cee5a8SShaohua Li * small, but not a big deal since when the second disk 71412cee5a8SShaohua Li * starts IO, the first disk is likely still busy. 71512cee5a8SShaohua Li */ 71612cee5a8SShaohua Li if (nonrot && opt_iosize > 0 && 71712cee5a8SShaohua Li mirror->seq_start != MaxSector && 71812cee5a8SShaohua Li mirror->next_seq_sect > opt_iosize && 71912cee5a8SShaohua Li mirror->next_seq_sect - opt_iosize >= 72012cee5a8SShaohua Li mirror->seq_start) { 72112cee5a8SShaohua Li choose_next_idle = 1; 72212cee5a8SShaohua Li continue; 72312cee5a8SShaohua Li } 72412cee5a8SShaohua Li break; 72512cee5a8SShaohua Li } 72612cee5a8SShaohua Li 72712cee5a8SShaohua Li if (choose_next_idle) 72812cee5a8SShaohua Li continue; 7299dedf603SShaohua Li 7309dedf603SShaohua Li if (min_pending > pending) { 7319dedf603SShaohua Li min_pending = pending; 7329dedf603SShaohua Li best_pending_disk = disk; 7339dedf603SShaohua Li } 7349dedf603SShaohua Li 73576073054SNeilBrown if (dist < best_dist) { 73676073054SNeilBrown best_dist = dist; 7379dedf603SShaohua Li best_dist_disk = disk; 7381da177e4SLinus Torvalds } 739f3ac8bf7SNeilBrown } 7401da177e4SLinus Torvalds 7419dedf603SShaohua Li /* 7429dedf603SShaohua Li * If all disks are rotational, choose the closest disk. If any disk is 7439dedf603SShaohua Li * non-rotational, choose the disk with less pending request even the 7449dedf603SShaohua Li * disk is rotational, which might/might not be optimal for raids with 7459dedf603SShaohua Li * mixed ratation/non-rotational disks depending on workload. 7469dedf603SShaohua Li */ 7479dedf603SShaohua Li if (best_disk == -1) { 7482e52d449SNeilBrown if (has_nonrot_disk || min_pending == 0) 7499dedf603SShaohua Li best_disk = best_pending_disk; 7509dedf603SShaohua Li else 7519dedf603SShaohua Li best_disk = best_dist_disk; 7529dedf603SShaohua Li } 7539dedf603SShaohua Li 75476073054SNeilBrown if (best_disk >= 0) { 75576073054SNeilBrown rdev = rcu_dereference(conf->mirrors[best_disk].rdev); 7568ddf9efeSNeilBrown if (!rdev) 7578ddf9efeSNeilBrown goto retry; 7588ddf9efeSNeilBrown atomic_inc(&rdev->nr_pending); 759d2eb35acSNeilBrown sectors = best_good_sectors; 76012cee5a8SShaohua Li 76112cee5a8SShaohua Li if (conf->mirrors[best_disk].next_seq_sect != this_sector) 76212cee5a8SShaohua Li conf->mirrors[best_disk].seq_start = this_sector; 76312cee5a8SShaohua Li 764be4d3280SShaohua Li conf->mirrors[best_disk].next_seq_sect = this_sector + sectors; 7651da177e4SLinus Torvalds } 7661da177e4SLinus Torvalds rcu_read_unlock(); 767d2eb35acSNeilBrown *max_sectors = sectors; 7681da177e4SLinus Torvalds 76976073054SNeilBrown return best_disk; 7701da177e4SLinus Torvalds } 7711da177e4SLinus Torvalds 7725c675f83SNeilBrown static int raid1_congested(struct mddev *mddev, int bits) 7730d129228SNeilBrown { 774e8096360SNeilBrown struct r1conf *conf = mddev->private; 7750d129228SNeilBrown int i, ret = 0; 7760d129228SNeilBrown 7774452226eSTejun Heo if ((bits & (1 << WB_async_congested)) && 77834db0cd6SNeilBrown conf->pending_count >= max_queued_requests) 77934db0cd6SNeilBrown return 1; 78034db0cd6SNeilBrown 7810d129228SNeilBrown rcu_read_lock(); 782f53e29fcSNeilBrown for (i = 0; i < conf->raid_disks * 2; i++) { 7833cb03002SNeilBrown struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 7840d129228SNeilBrown if (rdev && !test_bit(Faulty, &rdev->flags)) { 785165125e1SJens Axboe struct request_queue *q = bdev_get_queue(rdev->bdev); 7860d129228SNeilBrown 7871ed7242eSJonathan Brassow BUG_ON(!q); 7881ed7242eSJonathan Brassow 7890d129228SNeilBrown /* Note the '|| 1' - when read_balance prefers 7900d129228SNeilBrown * non-congested targets, it can be removed 7910d129228SNeilBrown */ 7924452226eSTejun Heo if ((bits & (1 << WB_async_congested)) || 1) 793dc3b17ccSJan Kara ret |= bdi_congested(q->backing_dev_info, bits); 7940d129228SNeilBrown else 795dc3b17ccSJan Kara ret &= bdi_congested(q->backing_dev_info, bits); 7960d129228SNeilBrown } 7970d129228SNeilBrown } 7980d129228SNeilBrown rcu_read_unlock(); 7990d129228SNeilBrown return ret; 8000d129228SNeilBrown } 8010d129228SNeilBrown 802673ca68dSNeilBrown static void flush_bio_list(struct r1conf *conf, struct bio *bio) 803a35e63efSNeilBrown { 804673ca68dSNeilBrown /* flush any pending bitmap writes to disk before proceeding w/ I/O */ 805e64e4018SAndy Shevchenko md_bitmap_unplug(conf->mddev->bitmap); 80634db0cd6SNeilBrown wake_up(&conf->wait_barrier); 807a35e63efSNeilBrown 808a35e63efSNeilBrown while (bio) { /* submit pending writes */ 809a35e63efSNeilBrown struct bio *next = bio->bi_next; 81074d46992SChristoph Hellwig struct md_rdev *rdev = (void *)bio->bi_disk; 811a35e63efSNeilBrown bio->bi_next = NULL; 81274d46992SChristoph Hellwig bio_set_dev(bio, rdev->bdev); 8135e2c7a36SNeilBrown if (test_bit(Faulty, &rdev->flags)) { 8146308d8e3SGuoqing Jiang bio_io_error(bio); 8155e2c7a36SNeilBrown } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 81674d46992SChristoph Hellwig !blk_queue_discard(bio->bi_disk->queue))) 8172ff8cc2cSShaohua Li /* Just ignore it */ 8184246a0b6SChristoph Hellwig bio_endio(bio); 8192ff8cc2cSShaohua Li else 820a35e63efSNeilBrown generic_make_request(bio); 821a35e63efSNeilBrown bio = next; 8225fa4f8baSHannes Reinecke cond_resched(); 823a35e63efSNeilBrown } 824673ca68dSNeilBrown } 825673ca68dSNeilBrown 826673ca68dSNeilBrown static void flush_pending_writes(struct r1conf *conf) 827673ca68dSNeilBrown { 828673ca68dSNeilBrown /* Any writes that have been queued but are awaiting 829673ca68dSNeilBrown * bitmap updates get flushed here. 830673ca68dSNeilBrown */ 831673ca68dSNeilBrown spin_lock_irq(&conf->device_lock); 832673ca68dSNeilBrown 833673ca68dSNeilBrown if (conf->pending_bio_list.head) { 83418022a1bSShaohua Li struct blk_plug plug; 835673ca68dSNeilBrown struct bio *bio; 83618022a1bSShaohua Li 837673ca68dSNeilBrown bio = bio_list_get(&conf->pending_bio_list); 838673ca68dSNeilBrown conf->pending_count = 0; 839673ca68dSNeilBrown spin_unlock_irq(&conf->device_lock); 840474beb57SNeilBrown 841474beb57SNeilBrown /* 842474beb57SNeilBrown * As this is called in a wait_event() loop (see freeze_array), 843474beb57SNeilBrown * current->state might be TASK_UNINTERRUPTIBLE which will 844474beb57SNeilBrown * cause a warning when we prepare to wait again. As it is 845474beb57SNeilBrown * rare that this path is taken, it is perfectly safe to force 846474beb57SNeilBrown * us to go around the wait_event() loop again, so the warning 847474beb57SNeilBrown * is a false-positive. Silence the warning by resetting 848474beb57SNeilBrown * thread state 849474beb57SNeilBrown */ 850474beb57SNeilBrown __set_current_state(TASK_RUNNING); 85118022a1bSShaohua Li blk_start_plug(&plug); 852673ca68dSNeilBrown flush_bio_list(conf, bio); 85318022a1bSShaohua Li blk_finish_plug(&plug); 854a35e63efSNeilBrown } else 855a35e63efSNeilBrown spin_unlock_irq(&conf->device_lock); 8567eaceaccSJens Axboe } 8577eaceaccSJens Axboe 85817999be4SNeilBrown /* Barriers.... 85917999be4SNeilBrown * Sometimes we need to suspend IO while we do something else, 86017999be4SNeilBrown * either some resync/recovery, or reconfigure the array. 86117999be4SNeilBrown * To do this we raise a 'barrier'. 86217999be4SNeilBrown * The 'barrier' is a counter that can be raised multiple times 86317999be4SNeilBrown * to count how many activities are happening which preclude 86417999be4SNeilBrown * normal IO. 86517999be4SNeilBrown * We can only raise the barrier if there is no pending IO. 86617999be4SNeilBrown * i.e. if nr_pending == 0. 86717999be4SNeilBrown * We choose only to raise the barrier if no-one is waiting for the 86817999be4SNeilBrown * barrier to go down. This means that as soon as an IO request 86917999be4SNeilBrown * is ready, no other operations which require a barrier will start 87017999be4SNeilBrown * until the IO request has had a chance. 87117999be4SNeilBrown * 87217999be4SNeilBrown * So: regular IO calls 'wait_barrier'. When that returns there 87317999be4SNeilBrown * is no backgroup IO happening, It must arrange to call 87417999be4SNeilBrown * allow_barrier when it has finished its IO. 87517999be4SNeilBrown * backgroup IO calls must call raise_barrier. Once that returns 87617999be4SNeilBrown * there is no normal IO happeing. It must arrange to call 87717999be4SNeilBrown * lower_barrier when the particular background IO completes. 8784675719dSHou Tao * 8794675719dSHou Tao * If resync/recovery is interrupted, returns -EINTR; 8804675719dSHou Tao * Otherwise, returns 0. 8811da177e4SLinus Torvalds */ 8824675719dSHou Tao static int raise_barrier(struct r1conf *conf, sector_t sector_nr) 8831da177e4SLinus Torvalds { 884fd76863eScolyli@suse.de int idx = sector_to_idx(sector_nr); 885fd76863eScolyli@suse.de 8861da177e4SLinus Torvalds spin_lock_irq(&conf->resync_lock); 8871da177e4SLinus Torvalds 88817999be4SNeilBrown /* Wait until no block IO is waiting */ 889824e47daScolyli@suse.de wait_event_lock_irq(conf->wait_barrier, 890824e47daScolyli@suse.de !atomic_read(&conf->nr_waiting[idx]), 891eed8c02eSLukas Czerner conf->resync_lock); 89217999be4SNeilBrown 89317999be4SNeilBrown /* block any new IO from starting */ 894824e47daScolyli@suse.de atomic_inc(&conf->barrier[idx]); 895824e47daScolyli@suse.de /* 896824e47daScolyli@suse.de * In raise_barrier() we firstly increase conf->barrier[idx] then 897824e47daScolyli@suse.de * check conf->nr_pending[idx]. In _wait_barrier() we firstly 898824e47daScolyli@suse.de * increase conf->nr_pending[idx] then check conf->barrier[idx]. 899824e47daScolyli@suse.de * A memory barrier here to make sure conf->nr_pending[idx] won't 900824e47daScolyli@suse.de * be fetched before conf->barrier[idx] is increased. Otherwise 901824e47daScolyli@suse.de * there will be a race between raise_barrier() and _wait_barrier(). 902824e47daScolyli@suse.de */ 903824e47daScolyli@suse.de smp_mb__after_atomic(); 90417999be4SNeilBrown 90579ef3a8aSmajianpeng /* For these conditions we must wait: 90679ef3a8aSmajianpeng * A: while the array is in frozen state 907fd76863eScolyli@suse.de * B: while conf->nr_pending[idx] is not 0, meaning regular I/O 908fd76863eScolyli@suse.de * existing in corresponding I/O barrier bucket. 909fd76863eScolyli@suse.de * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches 910fd76863eScolyli@suse.de * max resync count which allowed on current I/O barrier bucket. 91179ef3a8aSmajianpeng */ 91217999be4SNeilBrown wait_event_lock_irq(conf->wait_barrier, 9138c242593SYufen Yu (!conf->array_frozen && 914824e47daScolyli@suse.de !atomic_read(&conf->nr_pending[idx]) && 9158c242593SYufen Yu atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) || 9168c242593SYufen Yu test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery), 917eed8c02eSLukas Czerner conf->resync_lock); 91817999be4SNeilBrown 9198c242593SYufen Yu if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 9208c242593SYufen Yu atomic_dec(&conf->barrier[idx]); 9218c242593SYufen Yu spin_unlock_irq(&conf->resync_lock); 9228c242593SYufen Yu wake_up(&conf->wait_barrier); 9238c242593SYufen Yu return -EINTR; 9248c242593SYufen Yu } 9258c242593SYufen Yu 92643ac9b84SXiao Ni atomic_inc(&conf->nr_sync_pending); 9271da177e4SLinus Torvalds spin_unlock_irq(&conf->resync_lock); 9288c242593SYufen Yu 9298c242593SYufen Yu return 0; 9301da177e4SLinus Torvalds } 9311da177e4SLinus Torvalds 932fd76863eScolyli@suse.de static void lower_barrier(struct r1conf *conf, sector_t sector_nr) 93317999be4SNeilBrown { 934fd76863eScolyli@suse.de int idx = sector_to_idx(sector_nr); 935fd76863eScolyli@suse.de 936824e47daScolyli@suse.de BUG_ON(atomic_read(&conf->barrier[idx]) <= 0); 937fd76863eScolyli@suse.de 938824e47daScolyli@suse.de atomic_dec(&conf->barrier[idx]); 93943ac9b84SXiao Ni atomic_dec(&conf->nr_sync_pending); 94017999be4SNeilBrown wake_up(&conf->wait_barrier); 94117999be4SNeilBrown } 94217999be4SNeilBrown 943fd76863eScolyli@suse.de static void _wait_barrier(struct r1conf *conf, int idx) 94417999be4SNeilBrown { 945824e47daScolyli@suse.de /* 946824e47daScolyli@suse.de * We need to increase conf->nr_pending[idx] very early here, 947824e47daScolyli@suse.de * then raise_barrier() can be blocked when it waits for 948824e47daScolyli@suse.de * conf->nr_pending[idx] to be 0. Then we can avoid holding 949824e47daScolyli@suse.de * conf->resync_lock when there is no barrier raised in same 950824e47daScolyli@suse.de * barrier unit bucket. Also if the array is frozen, I/O 951824e47daScolyli@suse.de * should be blocked until array is unfrozen. 952824e47daScolyli@suse.de */ 953824e47daScolyli@suse.de atomic_inc(&conf->nr_pending[idx]); 954824e47daScolyli@suse.de /* 955824e47daScolyli@suse.de * In _wait_barrier() we firstly increase conf->nr_pending[idx], then 956824e47daScolyli@suse.de * check conf->barrier[idx]. In raise_barrier() we firstly increase 957824e47daScolyli@suse.de * conf->barrier[idx], then check conf->nr_pending[idx]. A memory 958824e47daScolyli@suse.de * barrier is necessary here to make sure conf->barrier[idx] won't be 959824e47daScolyli@suse.de * fetched before conf->nr_pending[idx] is increased. Otherwise there 960824e47daScolyli@suse.de * will be a race between _wait_barrier() and raise_barrier(). 961824e47daScolyli@suse.de */ 962824e47daScolyli@suse.de smp_mb__after_atomic(); 96379ef3a8aSmajianpeng 964824e47daScolyli@suse.de /* 965824e47daScolyli@suse.de * Don't worry about checking two atomic_t variables at same time 966824e47daScolyli@suse.de * here. If during we check conf->barrier[idx], the array is 967824e47daScolyli@suse.de * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is 968824e47daScolyli@suse.de * 0, it is safe to return and make the I/O continue. Because the 969824e47daScolyli@suse.de * array is frozen, all I/O returned here will eventually complete 970824e47daScolyli@suse.de * or be queued, no race will happen. See code comment in 971824e47daScolyli@suse.de * frozen_array(). 972824e47daScolyli@suse.de */ 973824e47daScolyli@suse.de if (!READ_ONCE(conf->array_frozen) && 974824e47daScolyli@suse.de !atomic_read(&conf->barrier[idx])) 975824e47daScolyli@suse.de return; 976824e47daScolyli@suse.de 977824e47daScolyli@suse.de /* 978824e47daScolyli@suse.de * After holding conf->resync_lock, conf->nr_pending[idx] 979824e47daScolyli@suse.de * should be decreased before waiting for barrier to drop. 980824e47daScolyli@suse.de * Otherwise, we may encounter a race condition because 981824e47daScolyli@suse.de * raise_barrer() might be waiting for conf->nr_pending[idx] 982824e47daScolyli@suse.de * to be 0 at same time. 983824e47daScolyli@suse.de */ 984824e47daScolyli@suse.de spin_lock_irq(&conf->resync_lock); 985824e47daScolyli@suse.de atomic_inc(&conf->nr_waiting[idx]); 986824e47daScolyli@suse.de atomic_dec(&conf->nr_pending[idx]); 987824e47daScolyli@suse.de /* 988824e47daScolyli@suse.de * In case freeze_array() is waiting for 989824e47daScolyli@suse.de * get_unqueued_pending() == extra 990824e47daScolyli@suse.de */ 991824e47daScolyli@suse.de wake_up(&conf->wait_barrier); 992824e47daScolyli@suse.de /* Wait for the barrier in same barrier unit bucket to drop. */ 993824e47daScolyli@suse.de wait_event_lock_irq(conf->wait_barrier, 994824e47daScolyli@suse.de !conf->array_frozen && 995824e47daScolyli@suse.de !atomic_read(&conf->barrier[idx]), 996824e47daScolyli@suse.de conf->resync_lock); 997824e47daScolyli@suse.de atomic_inc(&conf->nr_pending[idx]); 998824e47daScolyli@suse.de atomic_dec(&conf->nr_waiting[idx]); 999fd76863eScolyli@suse.de spin_unlock_irq(&conf->resync_lock); 100079ef3a8aSmajianpeng } 100179ef3a8aSmajianpeng 1002fd76863eScolyli@suse.de static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr) 100379ef3a8aSmajianpeng { 1004fd76863eScolyli@suse.de int idx = sector_to_idx(sector_nr); 100579ef3a8aSmajianpeng 1006824e47daScolyli@suse.de /* 1007824e47daScolyli@suse.de * Very similar to _wait_barrier(). The difference is, for read 1008824e47daScolyli@suse.de * I/O we don't need wait for sync I/O, but if the whole array 1009824e47daScolyli@suse.de * is frozen, the read I/O still has to wait until the array is 1010824e47daScolyli@suse.de * unfrozen. Since there is no ordering requirement with 1011824e47daScolyli@suse.de * conf->barrier[idx] here, memory barrier is unnecessary as well. 1012824e47daScolyli@suse.de */ 1013824e47daScolyli@suse.de atomic_inc(&conf->nr_pending[idx]); 1014824e47daScolyli@suse.de 1015824e47daScolyli@suse.de if (!READ_ONCE(conf->array_frozen)) 1016824e47daScolyli@suse.de return; 101717999be4SNeilBrown 101817999be4SNeilBrown spin_lock_irq(&conf->resync_lock); 1019824e47daScolyli@suse.de atomic_inc(&conf->nr_waiting[idx]); 1020824e47daScolyli@suse.de atomic_dec(&conf->nr_pending[idx]); 1021824e47daScolyli@suse.de /* 1022824e47daScolyli@suse.de * In case freeze_array() is waiting for 1023824e47daScolyli@suse.de * get_unqueued_pending() == extra 1024d6b42dcbSNeilBrown */ 102517999be4SNeilBrown wake_up(&conf->wait_barrier); 1026824e47daScolyli@suse.de /* Wait for array to be unfrozen */ 1027824e47daScolyli@suse.de wait_event_lock_irq(conf->wait_barrier, 1028fd76863eScolyli@suse.de !conf->array_frozen, 10291da177e4SLinus Torvalds conf->resync_lock); 1030824e47daScolyli@suse.de atomic_inc(&conf->nr_pending[idx]); 1031824e47daScolyli@suse.de atomic_dec(&conf->nr_waiting[idx]); 103217999be4SNeilBrown spin_unlock_irq(&conf->resync_lock); 103317999be4SNeilBrown } 103417999be4SNeilBrown 1035fd76863eScolyli@suse.de static void wait_barrier(struct r1conf *conf, sector_t sector_nr) 1036fd76863eScolyli@suse.de { 1037fd76863eScolyli@suse.de int idx = sector_to_idx(sector_nr); 1038fd76863eScolyli@suse.de 1039fd76863eScolyli@suse.de _wait_barrier(conf, idx); 1040fd76863eScolyli@suse.de } 1041fd76863eScolyli@suse.de 1042fd76863eScolyli@suse.de static void _allow_barrier(struct r1conf *conf, int idx) 104317999be4SNeilBrown { 1044824e47daScolyli@suse.de atomic_dec(&conf->nr_pending[idx]); 104517999be4SNeilBrown wake_up(&conf->wait_barrier); 104617999be4SNeilBrown } 104717999be4SNeilBrown 1048fd76863eScolyli@suse.de static void allow_barrier(struct r1conf *conf, sector_t sector_nr) 1049fd76863eScolyli@suse.de { 1050fd76863eScolyli@suse.de int idx = sector_to_idx(sector_nr); 1051fd76863eScolyli@suse.de 1052fd76863eScolyli@suse.de _allow_barrier(conf, idx); 1053fd76863eScolyli@suse.de } 1054fd76863eScolyli@suse.de 1055fd76863eScolyli@suse.de /* conf->resync_lock should be held */ 1056fd76863eScolyli@suse.de static int get_unqueued_pending(struct r1conf *conf) 1057fd76863eScolyli@suse.de { 1058fd76863eScolyli@suse.de int idx, ret; 1059fd76863eScolyli@suse.de 106043ac9b84SXiao Ni ret = atomic_read(&conf->nr_sync_pending); 106143ac9b84SXiao Ni for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) 1062824e47daScolyli@suse.de ret += atomic_read(&conf->nr_pending[idx]) - 1063824e47daScolyli@suse.de atomic_read(&conf->nr_queued[idx]); 1064fd76863eScolyli@suse.de 1065fd76863eScolyli@suse.de return ret; 106617999be4SNeilBrown } 106717999be4SNeilBrown 1068e2d59925SNeilBrown static void freeze_array(struct r1conf *conf, int extra) 1069ddaf22abSNeilBrown { 1070fd76863eScolyli@suse.de /* Stop sync I/O and normal I/O and wait for everything to 107111353b9dSZhilong Liu * go quiet. 1072fd76863eScolyli@suse.de * This is called in two situations: 1073fd76863eScolyli@suse.de * 1) management command handlers (reshape, remove disk, quiesce). 1074fd76863eScolyli@suse.de * 2) one normal I/O request failed. 1075fd76863eScolyli@suse.de 1076fd76863eScolyli@suse.de * After array_frozen is set to 1, new sync IO will be blocked at 1077fd76863eScolyli@suse.de * raise_barrier(), and new normal I/O will blocked at _wait_barrier() 1078fd76863eScolyli@suse.de * or wait_read_barrier(). The flying I/Os will either complete or be 1079fd76863eScolyli@suse.de * queued. When everything goes quite, there are only queued I/Os left. 1080fd76863eScolyli@suse.de 1081fd76863eScolyli@suse.de * Every flying I/O contributes to a conf->nr_pending[idx], idx is the 1082fd76863eScolyli@suse.de * barrier bucket index which this I/O request hits. When all sync and 1083fd76863eScolyli@suse.de * normal I/O are queued, sum of all conf->nr_pending[] will match sum 1084fd76863eScolyli@suse.de * of all conf->nr_queued[]. But normal I/O failure is an exception, 1085fd76863eScolyli@suse.de * in handle_read_error(), we may call freeze_array() before trying to 1086fd76863eScolyli@suse.de * fix the read error. In this case, the error read I/O is not queued, 1087fd76863eScolyli@suse.de * so get_unqueued_pending() == 1. 1088fd76863eScolyli@suse.de * 1089fd76863eScolyli@suse.de * Therefore before this function returns, we need to wait until 1090fd76863eScolyli@suse.de * get_unqueued_pendings(conf) gets equal to extra. For 1091fd76863eScolyli@suse.de * normal I/O context, extra is 1, in rested situations extra is 0. 1092ddaf22abSNeilBrown */ 1093ddaf22abSNeilBrown spin_lock_irq(&conf->resync_lock); 1094b364e3d0Smajianpeng conf->array_frozen = 1; 1095578b54adSNeilBrown raid1_log(conf->mddev, "wait freeze"); 1096fd76863eScolyli@suse.de wait_event_lock_irq_cmd( 1097fd76863eScolyli@suse.de conf->wait_barrier, 1098fd76863eScolyli@suse.de get_unqueued_pending(conf) == extra, 1099ddaf22abSNeilBrown conf->resync_lock, 1100c3b328acSNeilBrown flush_pending_writes(conf)); 1101ddaf22abSNeilBrown spin_unlock_irq(&conf->resync_lock); 1102ddaf22abSNeilBrown } 1103e8096360SNeilBrown static void unfreeze_array(struct r1conf *conf) 1104ddaf22abSNeilBrown { 1105ddaf22abSNeilBrown /* reverse the effect of the freeze */ 1106ddaf22abSNeilBrown spin_lock_irq(&conf->resync_lock); 1107b364e3d0Smajianpeng conf->array_frozen = 0; 1108ddaf22abSNeilBrown spin_unlock_irq(&conf->resync_lock); 1109824e47daScolyli@suse.de wake_up(&conf->wait_barrier); 1110ddaf22abSNeilBrown } 1111ddaf22abSNeilBrown 111216d56e2fSShaohua Li static void alloc_behind_master_bio(struct r1bio *r1_bio, 1113cb83efcfSNeilBrown struct bio *bio) 11144b6d287fSNeilBrown { 1115cb83efcfSNeilBrown int size = bio->bi_iter.bi_size; 1116841c1316SMing Lei unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1117841c1316SMing Lei int i = 0; 1118841c1316SMing Lei struct bio *behind_bio = NULL; 11194b6d287fSNeilBrown 1120841c1316SMing Lei behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev); 1121841c1316SMing Lei if (!behind_bio) 112216d56e2fSShaohua Li return; 1123841c1316SMing Lei 112441743c1fSShaohua Li /* discard op, we don't support writezero/writesame yet */ 112516d56e2fSShaohua Li if (!bio_has_data(bio)) { 112616d56e2fSShaohua Li behind_bio->bi_iter.bi_size = size; 112741743c1fSShaohua Li goto skip_copy; 112816d56e2fSShaohua Li } 112941743c1fSShaohua Li 1130dba40d46SMariusz Dabrowski behind_bio->bi_write_hint = bio->bi_write_hint; 1131dba40d46SMariusz Dabrowski 1132841c1316SMing Lei while (i < vcnt && size) { 1133841c1316SMing Lei struct page *page; 1134841c1316SMing Lei int len = min_t(int, PAGE_SIZE, size); 1135841c1316SMing Lei 1136841c1316SMing Lei page = alloc_page(GFP_NOIO); 1137841c1316SMing Lei if (unlikely(!page)) 1138841c1316SMing Lei goto free_pages; 1139841c1316SMing Lei 1140841c1316SMing Lei bio_add_page(behind_bio, page, len, 0); 1141841c1316SMing Lei 1142841c1316SMing Lei size -= len; 1143841c1316SMing Lei i++; 11444b6d287fSNeilBrown } 11454b6d287fSNeilBrown 1146cb83efcfSNeilBrown bio_copy_data(behind_bio, bio); 114741743c1fSShaohua Li skip_copy: 114856a64c17SLuis de Bethencourt r1_bio->behind_master_bio = behind_bio; 1149841c1316SMing Lei set_bit(R1BIO_BehindIO, &r1_bio->state); 1150841c1316SMing Lei 115116d56e2fSShaohua Li return; 1152841c1316SMing Lei 1153841c1316SMing Lei free_pages: 11544f024f37SKent Overstreet pr_debug("%dB behind alloc failed, doing sync I/O\n", 11554f024f37SKent Overstreet bio->bi_iter.bi_size); 1156841c1316SMing Lei bio_free_pages(behind_bio); 115716d56e2fSShaohua Li bio_put(behind_bio); 11584b6d287fSNeilBrown } 11594b6d287fSNeilBrown 1160f54a9d0eSNeilBrown struct raid1_plug_cb { 1161f54a9d0eSNeilBrown struct blk_plug_cb cb; 1162f54a9d0eSNeilBrown struct bio_list pending; 1163f54a9d0eSNeilBrown int pending_cnt; 1164f54a9d0eSNeilBrown }; 1165f54a9d0eSNeilBrown 1166f54a9d0eSNeilBrown static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) 1167f54a9d0eSNeilBrown { 1168f54a9d0eSNeilBrown struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, 1169f54a9d0eSNeilBrown cb); 1170f54a9d0eSNeilBrown struct mddev *mddev = plug->cb.data; 1171f54a9d0eSNeilBrown struct r1conf *conf = mddev->private; 1172f54a9d0eSNeilBrown struct bio *bio; 1173f54a9d0eSNeilBrown 1174874807a8SNeilBrown if (from_schedule || current->bio_list) { 1175f54a9d0eSNeilBrown spin_lock_irq(&conf->device_lock); 1176f54a9d0eSNeilBrown bio_list_merge(&conf->pending_bio_list, &plug->pending); 1177f54a9d0eSNeilBrown conf->pending_count += plug->pending_cnt; 1178f54a9d0eSNeilBrown spin_unlock_irq(&conf->device_lock); 1179ee0b0244SNeilBrown wake_up(&conf->wait_barrier); 1180f54a9d0eSNeilBrown md_wakeup_thread(mddev->thread); 1181f54a9d0eSNeilBrown kfree(plug); 1182f54a9d0eSNeilBrown return; 1183f54a9d0eSNeilBrown } 1184f54a9d0eSNeilBrown 1185f54a9d0eSNeilBrown /* we aren't scheduling, so we can do the write-out directly. */ 1186f54a9d0eSNeilBrown bio = bio_list_get(&plug->pending); 1187673ca68dSNeilBrown flush_bio_list(conf, bio); 1188f54a9d0eSNeilBrown kfree(plug); 1189f54a9d0eSNeilBrown } 1190f54a9d0eSNeilBrown 1191689389a0SNeilBrown static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio) 1192689389a0SNeilBrown { 1193689389a0SNeilBrown r1_bio->master_bio = bio; 1194689389a0SNeilBrown r1_bio->sectors = bio_sectors(bio); 1195689389a0SNeilBrown r1_bio->state = 0; 1196689389a0SNeilBrown r1_bio->mddev = mddev; 1197689389a0SNeilBrown r1_bio->sector = bio->bi_iter.bi_sector; 1198689389a0SNeilBrown } 1199689389a0SNeilBrown 1200fd76863eScolyli@suse.de static inline struct r1bio * 1201689389a0SNeilBrown alloc_r1bio(struct mddev *mddev, struct bio *bio) 1202fd76863eScolyli@suse.de { 1203fd76863eScolyli@suse.de struct r1conf *conf = mddev->private; 1204fd76863eScolyli@suse.de struct r1bio *r1_bio; 1205fd76863eScolyli@suse.de 1206afeee514SKent Overstreet r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO); 1207689389a0SNeilBrown /* Ensure no bio records IO_BLOCKED */ 1208689389a0SNeilBrown memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0])); 1209689389a0SNeilBrown init_r1bio(r1_bio, mddev, bio); 1210fd76863eScolyli@suse.de return r1_bio; 1211fd76863eScolyli@suse.de } 1212fd76863eScolyli@suse.de 1213c230e7e5SNeilBrown static void raid1_read_request(struct mddev *mddev, struct bio *bio, 1214689389a0SNeilBrown int max_read_sectors, struct r1bio *r1_bio) 12151da177e4SLinus Torvalds { 1216e8096360SNeilBrown struct r1conf *conf = mddev->private; 12170eaf822cSJonathan Brassow struct raid1_info *mirror; 12181da177e4SLinus Torvalds struct bio *read_bio; 12193b046a97SRobert LeBlanc struct bitmap *bitmap = mddev->bitmap; 1220796a5cf0SMike Christie const int op = bio_op(bio); 12211eff9d32SJens Axboe const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); 12221f68f0c4SNeilBrown int max_sectors; 1223d2eb35acSNeilBrown int rdisk; 1224689389a0SNeilBrown bool print_msg = !!r1_bio; 1225689389a0SNeilBrown char b[BDEVNAME_SIZE]; 1226689389a0SNeilBrown 1227689389a0SNeilBrown /* 1228689389a0SNeilBrown * If r1_bio is set, we are blocking the raid1d thread 1229689389a0SNeilBrown * so there is a tiny risk of deadlock. So ask for 1230689389a0SNeilBrown * emergency memory if needed. 1231689389a0SNeilBrown */ 1232689389a0SNeilBrown gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO; 1233689389a0SNeilBrown 1234689389a0SNeilBrown if (print_msg) { 1235689389a0SNeilBrown /* Need to get the block device name carefully */ 1236689389a0SNeilBrown struct md_rdev *rdev; 1237689389a0SNeilBrown rcu_read_lock(); 1238689389a0SNeilBrown rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev); 1239689389a0SNeilBrown if (rdev) 1240689389a0SNeilBrown bdevname(rdev->bdev, b); 1241689389a0SNeilBrown else 1242689389a0SNeilBrown strcpy(b, "???"); 1243689389a0SNeilBrown rcu_read_unlock(); 1244689389a0SNeilBrown } 1245d2eb35acSNeilBrown 1246fd76863eScolyli@suse.de /* 1247fd76863eScolyli@suse.de * Still need barrier for READ in case that whole 1248fd76863eScolyli@suse.de * array is frozen. 1249fd76863eScolyli@suse.de */ 1250fd76863eScolyli@suse.de wait_read_barrier(conf, bio->bi_iter.bi_sector); 12513b046a97SRobert LeBlanc 1252689389a0SNeilBrown if (!r1_bio) 1253689389a0SNeilBrown r1_bio = alloc_r1bio(mddev, bio); 1254689389a0SNeilBrown else 1255689389a0SNeilBrown init_r1bio(r1_bio, mddev, bio); 1256c230e7e5SNeilBrown r1_bio->sectors = max_read_sectors; 1257fd76863eScolyli@suse.de 1258fd76863eScolyli@suse.de /* 1259fd76863eScolyli@suse.de * make_request() can abort the operation when read-ahead is being 1260fd76863eScolyli@suse.de * used and no empty request is available. 1261fd76863eScolyli@suse.de */ 1262d2eb35acSNeilBrown rdisk = read_balance(conf, r1_bio, &max_sectors); 12631da177e4SLinus Torvalds 12641da177e4SLinus Torvalds if (rdisk < 0) { 12651da177e4SLinus Torvalds /* couldn't find anywhere to read from */ 1266689389a0SNeilBrown if (print_msg) { 1267689389a0SNeilBrown pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", 1268689389a0SNeilBrown mdname(mddev), 1269689389a0SNeilBrown b, 1270689389a0SNeilBrown (unsigned long long)r1_bio->sector); 1271689389a0SNeilBrown } 12721da177e4SLinus Torvalds raid_end_bio_io(r1_bio); 12735a7bbad2SChristoph Hellwig return; 12741da177e4SLinus Torvalds } 12751da177e4SLinus Torvalds mirror = conf->mirrors + rdisk; 12761da177e4SLinus Torvalds 1277689389a0SNeilBrown if (print_msg) 1278689389a0SNeilBrown pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n", 1279689389a0SNeilBrown mdname(mddev), 1280689389a0SNeilBrown (unsigned long long)r1_bio->sector, 1281689389a0SNeilBrown bdevname(mirror->rdev->bdev, b)); 1282689389a0SNeilBrown 1283e555190dSNeilBrown if (test_bit(WriteMostly, &mirror->rdev->flags) && 1284e555190dSNeilBrown bitmap) { 12853b046a97SRobert LeBlanc /* 12863b046a97SRobert LeBlanc * Reading from a write-mostly device must take care not to 12873b046a97SRobert LeBlanc * over-take any writes that are 'behind' 1288e555190dSNeilBrown */ 1289578b54adSNeilBrown raid1_log(mddev, "wait behind writes"); 1290e555190dSNeilBrown wait_event(bitmap->behind_wait, 1291e555190dSNeilBrown atomic_read(&bitmap->behind_writes) == 0); 1292e555190dSNeilBrown } 1293c230e7e5SNeilBrown 1294c230e7e5SNeilBrown if (max_sectors < bio_sectors(bio)) { 1295c230e7e5SNeilBrown struct bio *split = bio_split(bio, max_sectors, 1296afeee514SKent Overstreet gfp, &conf->bio_split); 1297c230e7e5SNeilBrown bio_chain(split, bio); 1298c230e7e5SNeilBrown generic_make_request(bio); 1299c230e7e5SNeilBrown bio = split; 1300c230e7e5SNeilBrown r1_bio->master_bio = bio; 1301c230e7e5SNeilBrown r1_bio->sectors = max_sectors; 1302c230e7e5SNeilBrown } 1303c230e7e5SNeilBrown 13041da177e4SLinus Torvalds r1_bio->read_disk = rdisk; 13051da177e4SLinus Torvalds 1306afeee514SKent Overstreet read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); 13071da177e4SLinus Torvalds 13081da177e4SLinus Torvalds r1_bio->bios[rdisk] = read_bio; 13091da177e4SLinus Torvalds 13104f024f37SKent Overstreet read_bio->bi_iter.bi_sector = r1_bio->sector + 13114f024f37SKent Overstreet mirror->rdev->data_offset; 131274d46992SChristoph Hellwig bio_set_dev(read_bio, mirror->rdev->bdev); 13131da177e4SLinus Torvalds read_bio->bi_end_io = raid1_end_read_request; 1314796a5cf0SMike Christie bio_set_op_attrs(read_bio, op, do_sync); 13152e52d449SNeilBrown if (test_bit(FailFast, &mirror->rdev->flags) && 13162e52d449SNeilBrown test_bit(R1BIO_FailFast, &r1_bio->state)) 13172e52d449SNeilBrown read_bio->bi_opf |= MD_FAILFAST; 13181da177e4SLinus Torvalds read_bio->bi_private = r1_bio; 13191da177e4SLinus Torvalds 1320109e3765SNeilBrown if (mddev->gendisk) 132174d46992SChristoph Hellwig trace_block_bio_remap(read_bio->bi_disk->queue, read_bio, 132274d46992SChristoph Hellwig disk_devt(mddev->gendisk), r1_bio->sector); 1323109e3765SNeilBrown 13241da177e4SLinus Torvalds generic_make_request(read_bio); 13251da177e4SLinus Torvalds } 13261da177e4SLinus Torvalds 1327c230e7e5SNeilBrown static void raid1_write_request(struct mddev *mddev, struct bio *bio, 1328c230e7e5SNeilBrown int max_write_sectors) 13293b046a97SRobert LeBlanc { 13303b046a97SRobert LeBlanc struct r1conf *conf = mddev->private; 1331fd76863eScolyli@suse.de struct r1bio *r1_bio; 13323b046a97SRobert LeBlanc int i, disks; 13333b046a97SRobert LeBlanc struct bitmap *bitmap = mddev->bitmap; 13343b046a97SRobert LeBlanc unsigned long flags; 13353b046a97SRobert LeBlanc struct md_rdev *blocked_rdev; 13363b046a97SRobert LeBlanc struct blk_plug_cb *cb; 13373b046a97SRobert LeBlanc struct raid1_plug_cb *plug = NULL; 13383b046a97SRobert LeBlanc int first_clone; 13393b046a97SRobert LeBlanc int max_sectors; 13403b046a97SRobert LeBlanc 1341b3143b9aSNeilBrown if (mddev_is_clustered(mddev) && 13423b046a97SRobert LeBlanc md_cluster_ops->area_resyncing(mddev, WRITE, 1343b3143b9aSNeilBrown bio->bi_iter.bi_sector, bio_end_sector(bio))) { 13443b046a97SRobert LeBlanc 13453b046a97SRobert LeBlanc DEFINE_WAIT(w); 13463b046a97SRobert LeBlanc for (;;) { 13473b046a97SRobert LeBlanc prepare_to_wait(&conf->wait_barrier, 1348ae89fd3dSMikulas Patocka &w, TASK_IDLE); 1349f81f7302SGuoqing Jiang if (!md_cluster_ops->area_resyncing(mddev, WRITE, 13503b046a97SRobert LeBlanc bio->bi_iter.bi_sector, 1351b3143b9aSNeilBrown bio_end_sector(bio))) 13523b046a97SRobert LeBlanc break; 13533b046a97SRobert LeBlanc schedule(); 13543b046a97SRobert LeBlanc } 13553b046a97SRobert LeBlanc finish_wait(&conf->wait_barrier, &w); 13563b046a97SRobert LeBlanc } 1357f81f7302SGuoqing Jiang 1358f81f7302SGuoqing Jiang /* 1359f81f7302SGuoqing Jiang * Register the new request and wait if the reconstruction 1360f81f7302SGuoqing Jiang * thread has put up a bar for new requests. 1361f81f7302SGuoqing Jiang * Continue immediately if no resync is active currently. 1362f81f7302SGuoqing Jiang */ 1363fd76863eScolyli@suse.de wait_barrier(conf, bio->bi_iter.bi_sector); 1364fd76863eScolyli@suse.de 1365689389a0SNeilBrown r1_bio = alloc_r1bio(mddev, bio); 1366c230e7e5SNeilBrown r1_bio->sectors = max_write_sectors; 13673b046a97SRobert LeBlanc 136834db0cd6SNeilBrown if (conf->pending_count >= max_queued_requests) { 136934db0cd6SNeilBrown md_wakeup_thread(mddev->thread); 1370578b54adSNeilBrown raid1_log(mddev, "wait queued"); 137134db0cd6SNeilBrown wait_event(conf->wait_barrier, 137234db0cd6SNeilBrown conf->pending_count < max_queued_requests); 137334db0cd6SNeilBrown } 13741f68f0c4SNeilBrown /* first select target devices under rcu_lock and 13751da177e4SLinus Torvalds * inc refcount on their rdev. Record them by setting 13761da177e4SLinus Torvalds * bios[x] to bio 13771f68f0c4SNeilBrown * If there are known/acknowledged bad blocks on any device on 13781f68f0c4SNeilBrown * which we have seen a write error, we want to avoid writing those 13791f68f0c4SNeilBrown * blocks. 13801f68f0c4SNeilBrown * This potentially requires several writes to write around 13811f68f0c4SNeilBrown * the bad blocks. Each set of writes gets it's own r1bio 13821f68f0c4SNeilBrown * with a set of bios attached. 13831da177e4SLinus Torvalds */ 1384c3b328acSNeilBrown 13858f19ccb2SNeilBrown disks = conf->raid_disks * 2; 13866bfe0b49SDan Williams retry_write: 13876bfe0b49SDan Williams blocked_rdev = NULL; 13881da177e4SLinus Torvalds rcu_read_lock(); 13891f68f0c4SNeilBrown max_sectors = r1_bio->sectors; 13901da177e4SLinus Torvalds for (i = 0; i < disks; i++) { 13913cb03002SNeilBrown struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 13926bfe0b49SDan Williams if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 13936bfe0b49SDan Williams atomic_inc(&rdev->nr_pending); 13946bfe0b49SDan Williams blocked_rdev = rdev; 13956bfe0b49SDan Williams break; 13966bfe0b49SDan Williams } 13971da177e4SLinus Torvalds r1_bio->bios[i] = NULL; 13988ae12666SKent Overstreet if (!rdev || test_bit(Faulty, &rdev->flags)) { 13998f19ccb2SNeilBrown if (i < conf->raid_disks) 14001f68f0c4SNeilBrown set_bit(R1BIO_Degraded, &r1_bio->state); 14011f68f0c4SNeilBrown continue; 1402964147d5SNeilBrown } 14031f68f0c4SNeilBrown 14041f68f0c4SNeilBrown atomic_inc(&rdev->nr_pending); 14051f68f0c4SNeilBrown if (test_bit(WriteErrorSeen, &rdev->flags)) { 14061f68f0c4SNeilBrown sector_t first_bad; 14071f68f0c4SNeilBrown int bad_sectors; 14081f68f0c4SNeilBrown int is_bad; 14091f68f0c4SNeilBrown 14103b046a97SRobert LeBlanc is_bad = is_badblock(rdev, r1_bio->sector, max_sectors, 14111f68f0c4SNeilBrown &first_bad, &bad_sectors); 14121f68f0c4SNeilBrown if (is_bad < 0) { 14131f68f0c4SNeilBrown /* mustn't write here until the bad block is 14141f68f0c4SNeilBrown * acknowledged*/ 14151f68f0c4SNeilBrown set_bit(BlockedBadBlocks, &rdev->flags); 14161f68f0c4SNeilBrown blocked_rdev = rdev; 14171f68f0c4SNeilBrown break; 14181f68f0c4SNeilBrown } 14191f68f0c4SNeilBrown if (is_bad && first_bad <= r1_bio->sector) { 14201f68f0c4SNeilBrown /* Cannot write here at all */ 14211f68f0c4SNeilBrown bad_sectors -= (r1_bio->sector - first_bad); 14221f68f0c4SNeilBrown if (bad_sectors < max_sectors) 14231f68f0c4SNeilBrown /* mustn't write more than bad_sectors 14241f68f0c4SNeilBrown * to other devices yet 14251f68f0c4SNeilBrown */ 14261f68f0c4SNeilBrown max_sectors = bad_sectors; 14271f68f0c4SNeilBrown rdev_dec_pending(rdev, mddev); 14281f68f0c4SNeilBrown /* We don't set R1BIO_Degraded as that 14291f68f0c4SNeilBrown * only applies if the disk is 14301f68f0c4SNeilBrown * missing, so it might be re-added, 14311f68f0c4SNeilBrown * and we want to know to recover this 14321f68f0c4SNeilBrown * chunk. 14331f68f0c4SNeilBrown * In this case the device is here, 14341f68f0c4SNeilBrown * and the fact that this chunk is not 14351f68f0c4SNeilBrown * in-sync is recorded in the bad 14361f68f0c4SNeilBrown * block log 14371f68f0c4SNeilBrown */ 14381f68f0c4SNeilBrown continue; 14391f68f0c4SNeilBrown } 14401f68f0c4SNeilBrown if (is_bad) { 14411f68f0c4SNeilBrown int good_sectors = first_bad - r1_bio->sector; 14421f68f0c4SNeilBrown if (good_sectors < max_sectors) 14431f68f0c4SNeilBrown max_sectors = good_sectors; 14441f68f0c4SNeilBrown } 14451f68f0c4SNeilBrown } 14461f68f0c4SNeilBrown r1_bio->bios[i] = bio; 14471da177e4SLinus Torvalds } 14481da177e4SLinus Torvalds rcu_read_unlock(); 14491da177e4SLinus Torvalds 14506bfe0b49SDan Williams if (unlikely(blocked_rdev)) { 14516bfe0b49SDan Williams /* Wait for this device to become unblocked */ 14526bfe0b49SDan Williams int j; 14536bfe0b49SDan Williams 14546bfe0b49SDan Williams for (j = 0; j < i; j++) 14556bfe0b49SDan Williams if (r1_bio->bios[j]) 14566bfe0b49SDan Williams rdev_dec_pending(conf->mirrors[j].rdev, mddev); 14571f68f0c4SNeilBrown r1_bio->state = 0; 1458fd76863eScolyli@suse.de allow_barrier(conf, bio->bi_iter.bi_sector); 1459578b54adSNeilBrown raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); 14606bfe0b49SDan Williams md_wait_for_blocked_rdev(blocked_rdev, mddev); 1461fd76863eScolyli@suse.de wait_barrier(conf, bio->bi_iter.bi_sector); 14626bfe0b49SDan Williams goto retry_write; 14636bfe0b49SDan Williams } 14646bfe0b49SDan Williams 1465c230e7e5SNeilBrown if (max_sectors < bio_sectors(bio)) { 1466c230e7e5SNeilBrown struct bio *split = bio_split(bio, max_sectors, 1467afeee514SKent Overstreet GFP_NOIO, &conf->bio_split); 1468c230e7e5SNeilBrown bio_chain(split, bio); 1469c230e7e5SNeilBrown generic_make_request(bio); 1470c230e7e5SNeilBrown bio = split; 1471c230e7e5SNeilBrown r1_bio->master_bio = bio; 14721f68f0c4SNeilBrown r1_bio->sectors = max_sectors; 1473191ea9b2SNeilBrown } 14744b6d287fSNeilBrown 14754e78064fSNeilBrown atomic_set(&r1_bio->remaining, 1); 14764b6d287fSNeilBrown atomic_set(&r1_bio->behind_remaining, 0); 1477191ea9b2SNeilBrown 14781f68f0c4SNeilBrown first_clone = 1; 1479d8c84c4fSMing Lei 14801da177e4SLinus Torvalds for (i = 0; i < disks; i++) { 14818e58e327SMing Lei struct bio *mbio = NULL; 14821da177e4SLinus Torvalds if (!r1_bio->bios[i]) 14831da177e4SLinus Torvalds continue; 14841da177e4SLinus Torvalds 14851f68f0c4SNeilBrown if (first_clone) { 14861f68f0c4SNeilBrown /* do behind I/O ? 14871f68f0c4SNeilBrown * Not if there are too many, or cannot 14881f68f0c4SNeilBrown * allocate memory, or a reader on WriteMostly 14891f68f0c4SNeilBrown * is waiting for behind writes to flush */ 14901f68f0c4SNeilBrown if (bitmap && 14911f68f0c4SNeilBrown (atomic_read(&bitmap->behind_writes) 14921f68f0c4SNeilBrown < mddev->bitmap_info.max_write_behind) && 14938e58e327SMing Lei !waitqueue_active(&bitmap->behind_wait)) { 149416d56e2fSShaohua Li alloc_behind_master_bio(r1_bio, bio); 14958e58e327SMing Lei } 14961da177e4SLinus Torvalds 1497e64e4018SAndy Shevchenko md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors, 1498e64e4018SAndy Shevchenko test_bit(R1BIO_BehindIO, &r1_bio->state)); 14991f68f0c4SNeilBrown first_clone = 0; 15001f68f0c4SNeilBrown } 15018e58e327SMing Lei 1502841c1316SMing Lei if (r1_bio->behind_master_bio) 1503841c1316SMing Lei mbio = bio_clone_fast(r1_bio->behind_master_bio, 1504afeee514SKent Overstreet GFP_NOIO, &mddev->bio_set); 1505c230e7e5SNeilBrown else 1506afeee514SKent Overstreet mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); 15078e58e327SMing Lei 1508841c1316SMing Lei if (r1_bio->behind_master_bio) { 15093e148a32SGuoqing Jiang struct md_rdev *rdev = conf->mirrors[i].rdev; 15103e148a32SGuoqing Jiang 15113e148a32SGuoqing Jiang if (test_bit(WBCollisionCheck, &rdev->flags)) { 15123e148a32SGuoqing Jiang sector_t lo = r1_bio->sector; 15133e148a32SGuoqing Jiang sector_t hi = r1_bio->sector + r1_bio->sectors; 15143e148a32SGuoqing Jiang 15153e148a32SGuoqing Jiang wait_event(rdev->wb_io_wait, 15163e148a32SGuoqing Jiang check_and_add_wb(rdev, lo, hi) == 0); 15173e148a32SGuoqing Jiang } 15183e148a32SGuoqing Jiang if (test_bit(WriteMostly, &rdev->flags)) 15194b6d287fSNeilBrown atomic_inc(&r1_bio->behind_remaining); 15204b6d287fSNeilBrown } 15214b6d287fSNeilBrown 15221f68f0c4SNeilBrown r1_bio->bios[i] = mbio; 15231f68f0c4SNeilBrown 15244f024f37SKent Overstreet mbio->bi_iter.bi_sector = (r1_bio->sector + 15251f68f0c4SNeilBrown conf->mirrors[i].rdev->data_offset); 152674d46992SChristoph Hellwig bio_set_dev(mbio, conf->mirrors[i].rdev->bdev); 15271f68f0c4SNeilBrown mbio->bi_end_io = raid1_end_write_request; 1528a682e003SLinus Torvalds mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA)); 1529212e7eb7SNeilBrown if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) && 1530212e7eb7SNeilBrown !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) && 1531212e7eb7SNeilBrown conf->raid_disks - mddev->degraded > 1) 1532212e7eb7SNeilBrown mbio->bi_opf |= MD_FAILFAST; 15331f68f0c4SNeilBrown mbio->bi_private = r1_bio; 15341f68f0c4SNeilBrown 15351da177e4SLinus Torvalds atomic_inc(&r1_bio->remaining); 1536f54a9d0eSNeilBrown 1537109e3765SNeilBrown if (mddev->gendisk) 153874d46992SChristoph Hellwig trace_block_bio_remap(mbio->bi_disk->queue, 1539109e3765SNeilBrown mbio, disk_devt(mddev->gendisk), 1540109e3765SNeilBrown r1_bio->sector); 1541109e3765SNeilBrown /* flush_pending_writes() needs access to the rdev so...*/ 154274d46992SChristoph Hellwig mbio->bi_disk = (void *)conf->mirrors[i].rdev; 1543109e3765SNeilBrown 1544f54a9d0eSNeilBrown cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); 1545f54a9d0eSNeilBrown if (cb) 1546f54a9d0eSNeilBrown plug = container_of(cb, struct raid1_plug_cb, cb); 1547f54a9d0eSNeilBrown else 1548f54a9d0eSNeilBrown plug = NULL; 1549f54a9d0eSNeilBrown if (plug) { 1550f54a9d0eSNeilBrown bio_list_add(&plug->pending, mbio); 1551f54a9d0eSNeilBrown plug->pending_cnt++; 1552f54a9d0eSNeilBrown } else { 155323b245c0SShaohua Li spin_lock_irqsave(&conf->device_lock, flags); 15544e78064fSNeilBrown bio_list_add(&conf->pending_bio_list, mbio); 155534db0cd6SNeilBrown conf->pending_count++; 1556191ea9b2SNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 1557b357f04aSNeilBrown md_wakeup_thread(mddev->thread); 15584e78064fSNeilBrown } 155923b245c0SShaohua Li } 15601f68f0c4SNeilBrown 1561079fa166SNeilBrown r1_bio_write_done(r1_bio); 1562079fa166SNeilBrown 1563079fa166SNeilBrown /* In case raid1d snuck in to freeze_array */ 1564079fa166SNeilBrown wake_up(&conf->wait_barrier); 15651da177e4SLinus Torvalds } 15661da177e4SLinus Torvalds 1567cc27b0c7SNeilBrown static bool raid1_make_request(struct mddev *mddev, struct bio *bio) 15683b046a97SRobert LeBlanc { 1569fd76863eScolyli@suse.de sector_t sectors; 15703b046a97SRobert LeBlanc 1571775d7831SDavid Jeffery if (unlikely(bio->bi_opf & REQ_PREFLUSH) 1572775d7831SDavid Jeffery && md_flush_request(mddev, bio)) 1573cc27b0c7SNeilBrown return true; 15743b046a97SRobert LeBlanc 1575c230e7e5SNeilBrown /* 1576c230e7e5SNeilBrown * There is a limit to the maximum size, but 1577c230e7e5SNeilBrown * the read/write handler might find a lower limit 1578c230e7e5SNeilBrown * due to bad blocks. To avoid multiple splits, 1579c230e7e5SNeilBrown * we pass the maximum number of sectors down 1580c230e7e5SNeilBrown * and let the lower level perform the split. 1581c230e7e5SNeilBrown */ 1582fd76863eScolyli@suse.de sectors = align_to_barrier_unit_end( 1583fd76863eScolyli@suse.de bio->bi_iter.bi_sector, bio_sectors(bio)); 15843b046a97SRobert LeBlanc 1585c230e7e5SNeilBrown if (bio_data_dir(bio) == READ) 1586689389a0SNeilBrown raid1_read_request(mddev, bio, sectors, NULL); 1587cc27b0c7SNeilBrown else { 1588cc27b0c7SNeilBrown if (!md_write_start(mddev,bio)) 1589cc27b0c7SNeilBrown return false; 1590c230e7e5SNeilBrown raid1_write_request(mddev, bio, sectors); 15913b046a97SRobert LeBlanc } 1592cc27b0c7SNeilBrown return true; 1593cc27b0c7SNeilBrown } 15943b046a97SRobert LeBlanc 1595849674e4SShaohua Li static void raid1_status(struct seq_file *seq, struct mddev *mddev) 15961da177e4SLinus Torvalds { 1597e8096360SNeilBrown struct r1conf *conf = mddev->private; 15981da177e4SLinus Torvalds int i; 15991da177e4SLinus Torvalds 16001da177e4SLinus Torvalds seq_printf(seq, " [%d/%d] [", conf->raid_disks, 160111ce99e6SNeilBrown conf->raid_disks - mddev->degraded); 1602ddac7c7eSNeilBrown rcu_read_lock(); 1603ddac7c7eSNeilBrown for (i = 0; i < conf->raid_disks; i++) { 16043cb03002SNeilBrown struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 16051da177e4SLinus Torvalds seq_printf(seq, "%s", 1606ddac7c7eSNeilBrown rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); 1607ddac7c7eSNeilBrown } 1608ddac7c7eSNeilBrown rcu_read_unlock(); 16091da177e4SLinus Torvalds seq_printf(seq, "]"); 16101da177e4SLinus Torvalds } 16111da177e4SLinus Torvalds 1612849674e4SShaohua Li static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) 16131da177e4SLinus Torvalds { 16141da177e4SLinus Torvalds char b[BDEVNAME_SIZE]; 1615e8096360SNeilBrown struct r1conf *conf = mddev->private; 1616423f04d6SNeilBrown unsigned long flags; 16171da177e4SLinus Torvalds 16181da177e4SLinus Torvalds /* 16191da177e4SLinus Torvalds * If it is not operational, then we have already marked it as dead 16209a567843SGuoqing Jiang * else if it is the last working disks with "fail_last_dev == false", 16219a567843SGuoqing Jiang * ignore the error, let the next level up know. 16221da177e4SLinus Torvalds * else mark the drive as failed 16231da177e4SLinus Torvalds */ 16242e52d449SNeilBrown spin_lock_irqsave(&conf->device_lock, flags); 16259a567843SGuoqing Jiang if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev 16264044ba58SNeilBrown && (conf->raid_disks - mddev->degraded) == 1) { 16271da177e4SLinus Torvalds /* 16281da177e4SLinus Torvalds * Don't fail the drive, act as though we were just a 16294044ba58SNeilBrown * normal single drive. 16304044ba58SNeilBrown * However don't try a recovery from this drive as 16314044ba58SNeilBrown * it is very likely to fail. 16321da177e4SLinus Torvalds */ 16335389042fSNeilBrown conf->recovery_disabled = mddev->recovery_disabled; 16342e52d449SNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 16351da177e4SLinus Torvalds return; 16364044ba58SNeilBrown } 1637de393cdeSNeilBrown set_bit(Blocked, &rdev->flags); 1638ebda52faSYufen Yu if (test_and_clear_bit(In_sync, &rdev->flags)) 16391da177e4SLinus Torvalds mddev->degraded++; 1640dd00a99eSNeilBrown set_bit(Faulty, &rdev->flags); 1641423f04d6SNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 16421da177e4SLinus Torvalds /* 16431da177e4SLinus Torvalds * if recovery is running, make sure it aborts. 16441da177e4SLinus Torvalds */ 1645dfc70645SNeilBrown set_bit(MD_RECOVERY_INTR, &mddev->recovery); 16462953079cSShaohua Li set_mask_bits(&mddev->sb_flags, 0, 16472953079cSShaohua Li BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); 16481d41c216SNeilBrown pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n" 1649067032bcSJoe Perches "md/raid1:%s: Operation continuing on %d devices.\n", 16509dd1e2faSNeilBrown mdname(mddev), bdevname(rdev->bdev, b), 16519dd1e2faSNeilBrown mdname(mddev), conf->raid_disks - mddev->degraded); 16521da177e4SLinus Torvalds } 16531da177e4SLinus Torvalds 1654e8096360SNeilBrown static void print_conf(struct r1conf *conf) 16551da177e4SLinus Torvalds { 16561da177e4SLinus Torvalds int i; 16571da177e4SLinus Torvalds 16581d41c216SNeilBrown pr_debug("RAID1 conf printout:\n"); 16591da177e4SLinus Torvalds if (!conf) { 16601d41c216SNeilBrown pr_debug("(!conf)\n"); 16611da177e4SLinus Torvalds return; 16621da177e4SLinus Torvalds } 16631d41c216SNeilBrown pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, 16641da177e4SLinus Torvalds conf->raid_disks); 16651da177e4SLinus Torvalds 1666ddac7c7eSNeilBrown rcu_read_lock(); 16671da177e4SLinus Torvalds for (i = 0; i < conf->raid_disks; i++) { 16681da177e4SLinus Torvalds char b[BDEVNAME_SIZE]; 16693cb03002SNeilBrown struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 1670ddac7c7eSNeilBrown if (rdev) 16711d41c216SNeilBrown pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n", 1672ddac7c7eSNeilBrown i, !test_bit(In_sync, &rdev->flags), 1673ddac7c7eSNeilBrown !test_bit(Faulty, &rdev->flags), 1674ddac7c7eSNeilBrown bdevname(rdev->bdev,b)); 16751da177e4SLinus Torvalds } 1676ddac7c7eSNeilBrown rcu_read_unlock(); 16771da177e4SLinus Torvalds } 16781da177e4SLinus Torvalds 1679e8096360SNeilBrown static void close_sync(struct r1conf *conf) 16801da177e4SLinus Torvalds { 1681f6eca2d4SNate Dailey int idx; 1682f6eca2d4SNate Dailey 1683f6eca2d4SNate Dailey for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) { 1684f6eca2d4SNate Dailey _wait_barrier(conf, idx); 1685f6eca2d4SNate Dailey _allow_barrier(conf, idx); 1686f6eca2d4SNate Dailey } 16871da177e4SLinus Torvalds 1688afeee514SKent Overstreet mempool_exit(&conf->r1buf_pool); 16891da177e4SLinus Torvalds } 16901da177e4SLinus Torvalds 1691fd01b88cSNeilBrown static int raid1_spare_active(struct mddev *mddev) 16921da177e4SLinus Torvalds { 16931da177e4SLinus Torvalds int i; 1694e8096360SNeilBrown struct r1conf *conf = mddev->private; 16956b965620SNeilBrown int count = 0; 16966b965620SNeilBrown unsigned long flags; 16971da177e4SLinus Torvalds 16981da177e4SLinus Torvalds /* 16991da177e4SLinus Torvalds * Find all failed disks within the RAID1 configuration 1700ddac7c7eSNeilBrown * and mark them readable. 1701ddac7c7eSNeilBrown * Called under mddev lock, so rcu protection not needed. 1702423f04d6SNeilBrown * device_lock used to avoid races with raid1_end_read_request 1703423f04d6SNeilBrown * which expects 'In_sync' flags and ->degraded to be consistent. 17041da177e4SLinus Torvalds */ 1705423f04d6SNeilBrown spin_lock_irqsave(&conf->device_lock, flags); 17061da177e4SLinus Torvalds for (i = 0; i < conf->raid_disks; i++) { 17073cb03002SNeilBrown struct md_rdev *rdev = conf->mirrors[i].rdev; 17088c7a2c2bSNeilBrown struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; 17098c7a2c2bSNeilBrown if (repl 17101aee41f6SGoldwyn Rodrigues && !test_bit(Candidate, &repl->flags) 17118c7a2c2bSNeilBrown && repl->recovery_offset == MaxSector 17128c7a2c2bSNeilBrown && !test_bit(Faulty, &repl->flags) 17138c7a2c2bSNeilBrown && !test_and_set_bit(In_sync, &repl->flags)) { 17148c7a2c2bSNeilBrown /* replacement has just become active */ 17158c7a2c2bSNeilBrown if (!rdev || 17168c7a2c2bSNeilBrown !test_and_clear_bit(In_sync, &rdev->flags)) 17178c7a2c2bSNeilBrown count++; 17188c7a2c2bSNeilBrown if (rdev) { 17198c7a2c2bSNeilBrown /* Replaced device not technically 17208c7a2c2bSNeilBrown * faulty, but we need to be sure 17218c7a2c2bSNeilBrown * it gets removed and never re-added 17228c7a2c2bSNeilBrown */ 17238c7a2c2bSNeilBrown set_bit(Faulty, &rdev->flags); 17248c7a2c2bSNeilBrown sysfs_notify_dirent_safe( 17258c7a2c2bSNeilBrown rdev->sysfs_state); 17268c7a2c2bSNeilBrown } 17278c7a2c2bSNeilBrown } 1728ddac7c7eSNeilBrown if (rdev 172961e4947cSLukasz Dorau && rdev->recovery_offset == MaxSector 1730ddac7c7eSNeilBrown && !test_bit(Faulty, &rdev->flags) 1731c04be0aaSNeilBrown && !test_and_set_bit(In_sync, &rdev->flags)) { 17326b965620SNeilBrown count++; 1733654e8b5aSJonathan Brassow sysfs_notify_dirent_safe(rdev->sysfs_state); 17341da177e4SLinus Torvalds } 17351da177e4SLinus Torvalds } 17366b965620SNeilBrown mddev->degraded -= count; 17376b965620SNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 17381da177e4SLinus Torvalds 17391da177e4SLinus Torvalds print_conf(conf); 17406b965620SNeilBrown return count; 17411da177e4SLinus Torvalds } 17421da177e4SLinus Torvalds 1743fd01b88cSNeilBrown static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) 17441da177e4SLinus Torvalds { 1745e8096360SNeilBrown struct r1conf *conf = mddev->private; 1746199050eaSNeil Brown int err = -EEXIST; 174741158c7eSNeilBrown int mirror = 0; 17480eaf822cSJonathan Brassow struct raid1_info *p; 17496c2fce2eSNeil Brown int first = 0; 175030194636SNeilBrown int last = conf->raid_disks - 1; 17511da177e4SLinus Torvalds 17525389042fSNeilBrown if (mddev->recovery_disabled == conf->recovery_disabled) 17535389042fSNeilBrown return -EBUSY; 17545389042fSNeilBrown 17551501efadSDan Williams if (md_integrity_add_rdev(rdev, mddev)) 17561501efadSDan Williams return -ENXIO; 17571501efadSDan Williams 17586c2fce2eSNeil Brown if (rdev->raid_disk >= 0) 17596c2fce2eSNeil Brown first = last = rdev->raid_disk; 17606c2fce2eSNeil Brown 176170bcecdbSGoldwyn Rodrigues /* 176270bcecdbSGoldwyn Rodrigues * find the disk ... but prefer rdev->saved_raid_disk 176370bcecdbSGoldwyn Rodrigues * if possible. 176470bcecdbSGoldwyn Rodrigues */ 176570bcecdbSGoldwyn Rodrigues if (rdev->saved_raid_disk >= 0 && 176670bcecdbSGoldwyn Rodrigues rdev->saved_raid_disk >= first && 17679e753ba9SShaohua Li rdev->saved_raid_disk < conf->raid_disks && 176870bcecdbSGoldwyn Rodrigues conf->mirrors[rdev->saved_raid_disk].rdev == NULL) 176970bcecdbSGoldwyn Rodrigues first = last = rdev->saved_raid_disk; 177070bcecdbSGoldwyn Rodrigues 17717ef449d1SNeilBrown for (mirror = first; mirror <= last; mirror++) { 17727ef449d1SNeilBrown p = conf->mirrors + mirror; 17737ef449d1SNeilBrown if (!p->rdev) { 17749092c02dSJonathan Brassow if (mddev->gendisk) 17758f6c2e4bSMartin K. Petersen disk_stack_limits(mddev->gendisk, rdev->bdev, 17768f6c2e4bSMartin K. Petersen rdev->data_offset << 9); 17771da177e4SLinus Torvalds 17781da177e4SLinus Torvalds p->head_position = 0; 17791da177e4SLinus Torvalds rdev->raid_disk = mirror; 1780199050eaSNeil Brown err = 0; 17816aea114aSNeilBrown /* As all devices are equivalent, we don't need a full recovery 17826aea114aSNeilBrown * if this was recently any drive of the array 17836aea114aSNeilBrown */ 17846aea114aSNeilBrown if (rdev->saved_raid_disk < 0) 178541158c7eSNeilBrown conf->fullsync = 1; 1786d6065f7bSSuzanne Wood rcu_assign_pointer(p->rdev, rdev); 17871da177e4SLinus Torvalds break; 17881da177e4SLinus Torvalds } 17897ef449d1SNeilBrown if (test_bit(WantReplacement, &p->rdev->flags) && 17907ef449d1SNeilBrown p[conf->raid_disks].rdev == NULL) { 17917ef449d1SNeilBrown /* Add this device as a replacement */ 17927ef449d1SNeilBrown clear_bit(In_sync, &rdev->flags); 17937ef449d1SNeilBrown set_bit(Replacement, &rdev->flags); 17947ef449d1SNeilBrown rdev->raid_disk = mirror; 17957ef449d1SNeilBrown err = 0; 17967ef449d1SNeilBrown conf->fullsync = 1; 17977ef449d1SNeilBrown rcu_assign_pointer(p[conf->raid_disks].rdev, rdev); 17987ef449d1SNeilBrown break; 17997ef449d1SNeilBrown } 18007ef449d1SNeilBrown } 18019092c02dSJonathan Brassow if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) 18028b904b5bSBart Van Assche blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); 18031da177e4SLinus Torvalds print_conf(conf); 1804199050eaSNeil Brown return err; 18051da177e4SLinus Torvalds } 18061da177e4SLinus Torvalds 1807b8321b68SNeilBrown static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) 18081da177e4SLinus Torvalds { 1809e8096360SNeilBrown struct r1conf *conf = mddev->private; 18101da177e4SLinus Torvalds int err = 0; 1811b8321b68SNeilBrown int number = rdev->raid_disk; 18120eaf822cSJonathan Brassow struct raid1_info *p = conf->mirrors + number; 18131da177e4SLinus Torvalds 1814b014f14cSNeilBrown if (rdev != p->rdev) 1815b014f14cSNeilBrown p = conf->mirrors + conf->raid_disks + number; 1816b014f14cSNeilBrown 18171da177e4SLinus Torvalds print_conf(conf); 1818b8321b68SNeilBrown if (rdev == p->rdev) { 1819b2d444d7SNeilBrown if (test_bit(In_sync, &rdev->flags) || 18201da177e4SLinus Torvalds atomic_read(&rdev->nr_pending)) { 18211da177e4SLinus Torvalds err = -EBUSY; 18221da177e4SLinus Torvalds goto abort; 18231da177e4SLinus Torvalds } 1824046abeedSNeilBrown /* Only remove non-faulty devices if recovery 1825dfc70645SNeilBrown * is not possible. 1826dfc70645SNeilBrown */ 1827dfc70645SNeilBrown if (!test_bit(Faulty, &rdev->flags) && 18285389042fSNeilBrown mddev->recovery_disabled != conf->recovery_disabled && 1829dfc70645SNeilBrown mddev->degraded < conf->raid_disks) { 1830dfc70645SNeilBrown err = -EBUSY; 1831dfc70645SNeilBrown goto abort; 1832dfc70645SNeilBrown } 18331da177e4SLinus Torvalds p->rdev = NULL; 1834d787be40SNeilBrown if (!test_bit(RemoveSynchronized, &rdev->flags)) { 1835fbd568a3SPaul E. McKenney synchronize_rcu(); 18361da177e4SLinus Torvalds if (atomic_read(&rdev->nr_pending)) { 18371da177e4SLinus Torvalds /* lost the race, try later */ 18381da177e4SLinus Torvalds err = -EBUSY; 18391da177e4SLinus Torvalds p->rdev = rdev; 1840ac5e7113SAndre Noll goto abort; 1841d787be40SNeilBrown } 1842d787be40SNeilBrown } 1843d787be40SNeilBrown if (conf->mirrors[conf->raid_disks + number].rdev) { 18448c7a2c2bSNeilBrown /* We just removed a device that is being replaced. 18458c7a2c2bSNeilBrown * Move down the replacement. We drain all IO before 18468c7a2c2bSNeilBrown * doing this to avoid confusion. 18478c7a2c2bSNeilBrown */ 18488c7a2c2bSNeilBrown struct md_rdev *repl = 18498c7a2c2bSNeilBrown conf->mirrors[conf->raid_disks + number].rdev; 1850e2d59925SNeilBrown freeze_array(conf, 0); 18513de59bb9SYufen Yu if (atomic_read(&repl->nr_pending)) { 18523de59bb9SYufen Yu /* It means that some queued IO of retry_list 18533de59bb9SYufen Yu * hold repl. Thus, we cannot set replacement 18543de59bb9SYufen Yu * as NULL, avoiding rdev NULL pointer 18553de59bb9SYufen Yu * dereference in sync_request_write and 18563de59bb9SYufen Yu * handle_write_finished. 18573de59bb9SYufen Yu */ 18583de59bb9SYufen Yu err = -EBUSY; 18593de59bb9SYufen Yu unfreeze_array(conf); 18603de59bb9SYufen Yu goto abort; 18613de59bb9SYufen Yu } 18628c7a2c2bSNeilBrown clear_bit(Replacement, &repl->flags); 18638c7a2c2bSNeilBrown p->rdev = repl; 18648c7a2c2bSNeilBrown conf->mirrors[conf->raid_disks + number].rdev = NULL; 1865e2d59925SNeilBrown unfreeze_array(conf); 1866e5bc9c3cSGuoqing Jiang } 1867e5bc9c3cSGuoqing Jiang 18688c7a2c2bSNeilBrown clear_bit(WantReplacement, &rdev->flags); 1869a91a2785SMartin K. Petersen err = md_integrity_register(mddev); 18701da177e4SLinus Torvalds } 18711da177e4SLinus Torvalds abort: 18721da177e4SLinus Torvalds 18731da177e4SLinus Torvalds print_conf(conf); 18741da177e4SLinus Torvalds return err; 18751da177e4SLinus Torvalds } 18761da177e4SLinus Torvalds 18774246a0b6SChristoph Hellwig static void end_sync_read(struct bio *bio) 18781da177e4SLinus Torvalds { 187998d30c58SMing Lei struct r1bio *r1_bio = get_resync_r1bio(bio); 18801da177e4SLinus Torvalds 18810fc280f6SNeilBrown update_head_pos(r1_bio->read_disk, r1_bio); 1882ba3ae3beSNamhyung Kim 18831da177e4SLinus Torvalds /* 18841da177e4SLinus Torvalds * we have read a block, now it needs to be re-written, 18851da177e4SLinus Torvalds * or re-read if the read failed. 18861da177e4SLinus Torvalds * We don't do much here, just schedule handling by raid1d 18871da177e4SLinus Torvalds */ 18884e4cbee9SChristoph Hellwig if (!bio->bi_status) 18891da177e4SLinus Torvalds set_bit(R1BIO_Uptodate, &r1_bio->state); 1890d11c171eSNeilBrown 1891d11c171eSNeilBrown if (atomic_dec_and_test(&r1_bio->remaining)) 18921da177e4SLinus Torvalds reschedule_retry(r1_bio); 18931da177e4SLinus Torvalds } 18941da177e4SLinus Torvalds 1895dfcc34c9SNate Dailey static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio) 1896dfcc34c9SNate Dailey { 1897dfcc34c9SNate Dailey sector_t sync_blocks = 0; 1898dfcc34c9SNate Dailey sector_t s = r1_bio->sector; 1899dfcc34c9SNate Dailey long sectors_to_go = r1_bio->sectors; 1900dfcc34c9SNate Dailey 1901dfcc34c9SNate Dailey /* make sure these bits don't get cleared. */ 1902dfcc34c9SNate Dailey do { 1903dfcc34c9SNate Dailey md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); 1904dfcc34c9SNate Dailey s += sync_blocks; 1905dfcc34c9SNate Dailey sectors_to_go -= sync_blocks; 1906dfcc34c9SNate Dailey } while (sectors_to_go > 0); 1907dfcc34c9SNate Dailey } 1908dfcc34c9SNate Dailey 1909449808a2SHou Tao static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate) 1910449808a2SHou Tao { 1911449808a2SHou Tao if (atomic_dec_and_test(&r1_bio->remaining)) { 1912449808a2SHou Tao struct mddev *mddev = r1_bio->mddev; 1913449808a2SHou Tao int s = r1_bio->sectors; 1914449808a2SHou Tao 1915449808a2SHou Tao if (test_bit(R1BIO_MadeGood, &r1_bio->state) || 1916449808a2SHou Tao test_bit(R1BIO_WriteError, &r1_bio->state)) 1917449808a2SHou Tao reschedule_retry(r1_bio); 1918449808a2SHou Tao else { 1919449808a2SHou Tao put_buf(r1_bio); 1920449808a2SHou Tao md_done_sync(mddev, s, uptodate); 1921449808a2SHou Tao } 1922449808a2SHou Tao } 1923449808a2SHou Tao } 1924449808a2SHou Tao 19254246a0b6SChristoph Hellwig static void end_sync_write(struct bio *bio) 19261da177e4SLinus Torvalds { 19274e4cbee9SChristoph Hellwig int uptodate = !bio->bi_status; 192898d30c58SMing Lei struct r1bio *r1_bio = get_resync_r1bio(bio); 1929fd01b88cSNeilBrown struct mddev *mddev = r1_bio->mddev; 1930e8096360SNeilBrown struct r1conf *conf = mddev->private; 19314367af55SNeilBrown sector_t first_bad; 19324367af55SNeilBrown int bad_sectors; 1933854abd75SNeilBrown struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; 1934ba3ae3beSNamhyung Kim 19356b1117d5SNeilBrown if (!uptodate) { 1936dfcc34c9SNate Dailey abort_sync_write(mddev, r1_bio); 1937854abd75SNeilBrown set_bit(WriteErrorSeen, &rdev->flags); 1938854abd75SNeilBrown if (!test_and_set_bit(WantReplacement, &rdev->flags)) 193919d67169SNeilBrown set_bit(MD_RECOVERY_NEEDED, & 194019d67169SNeilBrown mddev->recovery); 1941d8f05d29SNeilBrown set_bit(R1BIO_WriteError, &r1_bio->state); 1942854abd75SNeilBrown } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, 19433a9f28a5SNeilBrown &first_bad, &bad_sectors) && 19443a9f28a5SNeilBrown !is_badblock(conf->mirrors[r1_bio->read_disk].rdev, 19453a9f28a5SNeilBrown r1_bio->sector, 19463a9f28a5SNeilBrown r1_bio->sectors, 19473a9f28a5SNeilBrown &first_bad, &bad_sectors) 19483a9f28a5SNeilBrown ) 19494367af55SNeilBrown set_bit(R1BIO_MadeGood, &r1_bio->state); 1950e3b9703eSNeilBrown 1951449808a2SHou Tao put_sync_write_buf(r1_bio, uptodate); 19524367af55SNeilBrown } 19531da177e4SLinus Torvalds 19543cb03002SNeilBrown static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector, 1955d8f05d29SNeilBrown int sectors, struct page *page, int rw) 1956d8f05d29SNeilBrown { 1957796a5cf0SMike Christie if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) 1958d8f05d29SNeilBrown /* success */ 1959d8f05d29SNeilBrown return 1; 196019d67169SNeilBrown if (rw == WRITE) { 1961d8f05d29SNeilBrown set_bit(WriteErrorSeen, &rdev->flags); 196219d67169SNeilBrown if (!test_and_set_bit(WantReplacement, 196319d67169SNeilBrown &rdev->flags)) 196419d67169SNeilBrown set_bit(MD_RECOVERY_NEEDED, & 196519d67169SNeilBrown rdev->mddev->recovery); 196619d67169SNeilBrown } 1967d8f05d29SNeilBrown /* need to record an error - either for the block or the device */ 1968d8f05d29SNeilBrown if (!rdev_set_badblocks(rdev, sector, sectors, 0)) 1969d8f05d29SNeilBrown md_error(rdev->mddev, rdev); 1970d8f05d29SNeilBrown return 0; 1971d8f05d29SNeilBrown } 1972d8f05d29SNeilBrown 19739f2c9d12SNeilBrown static int fix_sync_read_error(struct r1bio *r1_bio) 19741da177e4SLinus Torvalds { 1975a68e5870SNeilBrown /* Try some synchronous reads of other devices to get 197669382e85SNeilBrown * good data, much like with normal read errors. Only 1977ddac7c7eSNeilBrown * read into the pages we already have so we don't 197869382e85SNeilBrown * need to re-issue the read request. 197969382e85SNeilBrown * We don't need to freeze the array, because being in an 198069382e85SNeilBrown * active sync request, there is no normal IO, and 198169382e85SNeilBrown * no overlapping syncs. 198206f60385SNeilBrown * We don't need to check is_badblock() again as we 198306f60385SNeilBrown * made sure that anything with a bad block in range 198406f60385SNeilBrown * will have bi_end_io clear. 19851da177e4SLinus Torvalds */ 1986fd01b88cSNeilBrown struct mddev *mddev = r1_bio->mddev; 1987e8096360SNeilBrown struct r1conf *conf = mddev->private; 1988a68e5870SNeilBrown struct bio *bio = r1_bio->bios[r1_bio->read_disk]; 198944cf0f4dSMing Lei struct page **pages = get_resync_pages(bio)->pages; 199069382e85SNeilBrown sector_t sect = r1_bio->sector; 199169382e85SNeilBrown int sectors = r1_bio->sectors; 199269382e85SNeilBrown int idx = 0; 19932e52d449SNeilBrown struct md_rdev *rdev; 19942e52d449SNeilBrown 19952e52d449SNeilBrown rdev = conf->mirrors[r1_bio->read_disk].rdev; 19962e52d449SNeilBrown if (test_bit(FailFast, &rdev->flags)) { 19972e52d449SNeilBrown /* Don't try recovering from here - just fail it 19982e52d449SNeilBrown * ... unless it is the last working device of course */ 19992e52d449SNeilBrown md_error(mddev, rdev); 20002e52d449SNeilBrown if (test_bit(Faulty, &rdev->flags)) 20012e52d449SNeilBrown /* Don't try to read from here, but make sure 20022e52d449SNeilBrown * put_buf does it's thing 20032e52d449SNeilBrown */ 20042e52d449SNeilBrown bio->bi_end_io = end_sync_write; 20052e52d449SNeilBrown } 200669382e85SNeilBrown 200769382e85SNeilBrown while(sectors) { 200869382e85SNeilBrown int s = sectors; 200969382e85SNeilBrown int d = r1_bio->read_disk; 201069382e85SNeilBrown int success = 0; 201178d7f5f7SNeilBrown int start; 201269382e85SNeilBrown 201369382e85SNeilBrown if (s > (PAGE_SIZE>>9)) 201469382e85SNeilBrown s = PAGE_SIZE >> 9; 201569382e85SNeilBrown do { 201669382e85SNeilBrown if (r1_bio->bios[d]->bi_end_io == end_sync_read) { 2017ddac7c7eSNeilBrown /* No rcu protection needed here devices 2018ddac7c7eSNeilBrown * can only be removed when no resync is 2019ddac7c7eSNeilBrown * active, and resync is currently active 2020ddac7c7eSNeilBrown */ 202169382e85SNeilBrown rdev = conf->mirrors[d].rdev; 20229d3d8011SNamhyung Kim if (sync_page_io(rdev, sect, s<<9, 202344cf0f4dSMing Lei pages[idx], 2024796a5cf0SMike Christie REQ_OP_READ, 0, false)) { 202569382e85SNeilBrown success = 1; 202669382e85SNeilBrown break; 202769382e85SNeilBrown } 202869382e85SNeilBrown } 202969382e85SNeilBrown d++; 20308f19ccb2SNeilBrown if (d == conf->raid_disks * 2) 203169382e85SNeilBrown d = 0; 203269382e85SNeilBrown } while (!success && d != r1_bio->read_disk); 203369382e85SNeilBrown 203478d7f5f7SNeilBrown if (!success) { 203578d7f5f7SNeilBrown char b[BDEVNAME_SIZE]; 20363a9f28a5SNeilBrown int abort = 0; 20373a9f28a5SNeilBrown /* Cannot read from anywhere, this block is lost. 20383a9f28a5SNeilBrown * Record a bad block on each device. If that doesn't 20393a9f28a5SNeilBrown * work just disable and interrupt the recovery. 20403a9f28a5SNeilBrown * Don't fail devices as that won't really help. 20413a9f28a5SNeilBrown */ 20421d41c216SNeilBrown pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", 204374d46992SChristoph Hellwig mdname(mddev), bio_devname(bio, b), 204478d7f5f7SNeilBrown (unsigned long long)r1_bio->sector); 20458f19ccb2SNeilBrown for (d = 0; d < conf->raid_disks * 2; d++) { 20463a9f28a5SNeilBrown rdev = conf->mirrors[d].rdev; 20473a9f28a5SNeilBrown if (!rdev || test_bit(Faulty, &rdev->flags)) 20483a9f28a5SNeilBrown continue; 20493a9f28a5SNeilBrown if (!rdev_set_badblocks(rdev, sect, s, 0)) 20503a9f28a5SNeilBrown abort = 1; 20513a9f28a5SNeilBrown } 20523a9f28a5SNeilBrown if (abort) { 2053d890fa2bSNeilBrown conf->recovery_disabled = 2054d890fa2bSNeilBrown mddev->recovery_disabled; 20553a9f28a5SNeilBrown set_bit(MD_RECOVERY_INTR, &mddev->recovery); 205678d7f5f7SNeilBrown md_done_sync(mddev, r1_bio->sectors, 0); 205778d7f5f7SNeilBrown put_buf(r1_bio); 205878d7f5f7SNeilBrown return 0; 205978d7f5f7SNeilBrown } 20603a9f28a5SNeilBrown /* Try next page */ 20613a9f28a5SNeilBrown sectors -= s; 20623a9f28a5SNeilBrown sect += s; 20633a9f28a5SNeilBrown idx++; 20643a9f28a5SNeilBrown continue; 20653a9f28a5SNeilBrown } 206678d7f5f7SNeilBrown 206778d7f5f7SNeilBrown start = d; 206869382e85SNeilBrown /* write it back and re-read */ 206969382e85SNeilBrown while (d != r1_bio->read_disk) { 207069382e85SNeilBrown if (d == 0) 20718f19ccb2SNeilBrown d = conf->raid_disks * 2; 207269382e85SNeilBrown d--; 207369382e85SNeilBrown if (r1_bio->bios[d]->bi_end_io != end_sync_read) 207469382e85SNeilBrown continue; 207569382e85SNeilBrown rdev = conf->mirrors[d].rdev; 2076d8f05d29SNeilBrown if (r1_sync_page_io(rdev, sect, s, 207744cf0f4dSMing Lei pages[idx], 2078d8f05d29SNeilBrown WRITE) == 0) { 207978d7f5f7SNeilBrown r1_bio->bios[d]->bi_end_io = NULL; 208078d7f5f7SNeilBrown rdev_dec_pending(rdev, mddev); 20819d3d8011SNamhyung Kim } 2082097426f6SNeilBrown } 2083097426f6SNeilBrown d = start; 2084097426f6SNeilBrown while (d != r1_bio->read_disk) { 2085097426f6SNeilBrown if (d == 0) 20868f19ccb2SNeilBrown d = conf->raid_disks * 2; 2087097426f6SNeilBrown d--; 2088097426f6SNeilBrown if (r1_bio->bios[d]->bi_end_io != end_sync_read) 2089097426f6SNeilBrown continue; 2090097426f6SNeilBrown rdev = conf->mirrors[d].rdev; 2091d8f05d29SNeilBrown if (r1_sync_page_io(rdev, sect, s, 209244cf0f4dSMing Lei pages[idx], 2093d8f05d29SNeilBrown READ) != 0) 20949d3d8011SNamhyung Kim atomic_add(s, &rdev->corrected_errors); 209569382e85SNeilBrown } 209669382e85SNeilBrown sectors -= s; 209769382e85SNeilBrown sect += s; 209869382e85SNeilBrown idx ++; 209969382e85SNeilBrown } 210078d7f5f7SNeilBrown set_bit(R1BIO_Uptodate, &r1_bio->state); 21014e4cbee9SChristoph Hellwig bio->bi_status = 0; 2102a68e5870SNeilBrown return 1; 210369382e85SNeilBrown } 2104d11c171eSNeilBrown 2105c95e6385SNeilBrown static void process_checks(struct r1bio *r1_bio) 2106a68e5870SNeilBrown { 2107a68e5870SNeilBrown /* We have read all readable devices. If we haven't 2108a68e5870SNeilBrown * got the block, then there is no hope left. 2109a68e5870SNeilBrown * If we have, then we want to do a comparison 2110a68e5870SNeilBrown * and skip the write if everything is the same. 2111a68e5870SNeilBrown * If any blocks failed to read, then we need to 2112a68e5870SNeilBrown * attempt an over-write 2113a68e5870SNeilBrown */ 2114fd01b88cSNeilBrown struct mddev *mddev = r1_bio->mddev; 2115e8096360SNeilBrown struct r1conf *conf = mddev->private; 2116a68e5870SNeilBrown int primary; 2117a68e5870SNeilBrown int i; 2118f4380a91Smajianpeng int vcnt; 2119a68e5870SNeilBrown 212030bc9b53SNeilBrown /* Fix variable parts of all bios */ 212130bc9b53SNeilBrown vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); 212230bc9b53SNeilBrown for (i = 0; i < conf->raid_disks * 2; i++) { 21234e4cbee9SChristoph Hellwig blk_status_t status; 212430bc9b53SNeilBrown struct bio *b = r1_bio->bios[i]; 212598d30c58SMing Lei struct resync_pages *rp = get_resync_pages(b); 212630bc9b53SNeilBrown if (b->bi_end_io != end_sync_read) 212730bc9b53SNeilBrown continue; 21284246a0b6SChristoph Hellwig /* fixup the bio for reuse, but preserve errno */ 21294e4cbee9SChristoph Hellwig status = b->bi_status; 213030bc9b53SNeilBrown bio_reset(b); 21314e4cbee9SChristoph Hellwig b->bi_status = status; 21324f024f37SKent Overstreet b->bi_iter.bi_sector = r1_bio->sector + 213330bc9b53SNeilBrown conf->mirrors[i].rdev->data_offset; 213474d46992SChristoph Hellwig bio_set_dev(b, conf->mirrors[i].rdev->bdev); 213530bc9b53SNeilBrown b->bi_end_io = end_sync_read; 213698d30c58SMing Lei rp->raid_bio = r1_bio; 213798d30c58SMing Lei b->bi_private = rp; 213830bc9b53SNeilBrown 2139fb0eb5dfSMing Lei /* initialize bvec table again */ 2140fb0eb5dfSMing Lei md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9); 214130bc9b53SNeilBrown } 21428f19ccb2SNeilBrown for (primary = 0; primary < conf->raid_disks * 2; primary++) 2143a68e5870SNeilBrown if (r1_bio->bios[primary]->bi_end_io == end_sync_read && 21444e4cbee9SChristoph Hellwig !r1_bio->bios[primary]->bi_status) { 2145a68e5870SNeilBrown r1_bio->bios[primary]->bi_end_io = NULL; 2146a68e5870SNeilBrown rdev_dec_pending(conf->mirrors[primary].rdev, mddev); 2147a68e5870SNeilBrown break; 2148a68e5870SNeilBrown } 2149a68e5870SNeilBrown r1_bio->read_disk = primary; 21508f19ccb2SNeilBrown for (i = 0; i < conf->raid_disks * 2; i++) { 21512b070cfeSChristoph Hellwig int j = 0; 2152a68e5870SNeilBrown struct bio *pbio = r1_bio->bios[primary]; 2153a68e5870SNeilBrown struct bio *sbio = r1_bio->bios[i]; 21544e4cbee9SChristoph Hellwig blk_status_t status = sbio->bi_status; 215544cf0f4dSMing Lei struct page **ppages = get_resync_pages(pbio)->pages; 215644cf0f4dSMing Lei struct page **spages = get_resync_pages(sbio)->pages; 215760928a91SMing Lei struct bio_vec *bi; 21588fc04e6eSMing Lei int page_len[RESYNC_PAGES] = { 0 }; 21596dc4f100SMing Lei struct bvec_iter_all iter_all; 216078d7f5f7SNeilBrown 21612aabaa65SKent Overstreet if (sbio->bi_end_io != end_sync_read) 216278d7f5f7SNeilBrown continue; 21634246a0b6SChristoph Hellwig /* Now we can 'fixup' the error value */ 21644e4cbee9SChristoph Hellwig sbio->bi_status = 0; 2165a68e5870SNeilBrown 21662b070cfeSChristoph Hellwig bio_for_each_segment_all(bi, sbio, iter_all) 21672b070cfeSChristoph Hellwig page_len[j++] = bi->bv_len; 216860928a91SMing Lei 21694e4cbee9SChristoph Hellwig if (!status) { 2170a68e5870SNeilBrown for (j = vcnt; j-- ; ) { 217144cf0f4dSMing Lei if (memcmp(page_address(ppages[j]), 217244cf0f4dSMing Lei page_address(spages[j]), 217360928a91SMing Lei page_len[j])) 2174a68e5870SNeilBrown break; 2175a68e5870SNeilBrown } 2176a68e5870SNeilBrown } else 2177a68e5870SNeilBrown j = 0; 2178a68e5870SNeilBrown if (j >= 0) 21797f7583d4SJianpeng Ma atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); 2180a68e5870SNeilBrown if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) 21814e4cbee9SChristoph Hellwig && !status)) { 218278d7f5f7SNeilBrown /* No need to write to this device. */ 2183a68e5870SNeilBrown sbio->bi_end_io = NULL; 2184a68e5870SNeilBrown rdev_dec_pending(conf->mirrors[i].rdev, mddev); 218578d7f5f7SNeilBrown continue; 218678d7f5f7SNeilBrown } 2187d3b45c2aSKent Overstreet 2188d3b45c2aSKent Overstreet bio_copy_data(sbio, pbio); 2189a68e5870SNeilBrown } 2190a68e5870SNeilBrown } 2191a68e5870SNeilBrown 21929f2c9d12SNeilBrown static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) 2193a68e5870SNeilBrown { 2194e8096360SNeilBrown struct r1conf *conf = mddev->private; 2195a68e5870SNeilBrown int i; 21968f19ccb2SNeilBrown int disks = conf->raid_disks * 2; 2197037d2ff6SGuoqing Jiang struct bio *wbio; 2198a68e5870SNeilBrown 2199a68e5870SNeilBrown if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) 2200a68e5870SNeilBrown /* ouch - failed to read all of that. */ 2201a68e5870SNeilBrown if (!fix_sync_read_error(r1_bio)) 2202a68e5870SNeilBrown return; 22037ca78d57SNeilBrown 22047ca78d57SNeilBrown if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 2205c95e6385SNeilBrown process_checks(r1_bio); 2206c95e6385SNeilBrown 2207d11c171eSNeilBrown /* 2208d11c171eSNeilBrown * schedule writes 2209d11c171eSNeilBrown */ 22101da177e4SLinus Torvalds atomic_set(&r1_bio->remaining, 1); 22111da177e4SLinus Torvalds for (i = 0; i < disks ; i++) { 22121da177e4SLinus Torvalds wbio = r1_bio->bios[i]; 22133e198f78SNeilBrown if (wbio->bi_end_io == NULL || 22143e198f78SNeilBrown (wbio->bi_end_io == end_sync_read && 22153e198f78SNeilBrown (i == r1_bio->read_disk || 22163e198f78SNeilBrown !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) 22171da177e4SLinus Torvalds continue; 2218dfcc34c9SNate Dailey if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { 2219dfcc34c9SNate Dailey abort_sync_write(mddev, r1_bio); 22200c9d5b12SNeilBrown continue; 2221dfcc34c9SNate Dailey } 22221da177e4SLinus Torvalds 2223796a5cf0SMike Christie bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); 2224212e7eb7SNeilBrown if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) 2225212e7eb7SNeilBrown wbio->bi_opf |= MD_FAILFAST; 2226212e7eb7SNeilBrown 22273e198f78SNeilBrown wbio->bi_end_io = end_sync_write; 22281da177e4SLinus Torvalds atomic_inc(&r1_bio->remaining); 2229aa8b57aaSKent Overstreet md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); 2230191ea9b2SNeilBrown 22311da177e4SLinus Torvalds generic_make_request(wbio); 22321da177e4SLinus Torvalds } 22331da177e4SLinus Torvalds 2234449808a2SHou Tao put_sync_write_buf(r1_bio, 1); 22351da177e4SLinus Torvalds } 22361da177e4SLinus Torvalds 22371da177e4SLinus Torvalds /* 22381da177e4SLinus Torvalds * This is a kernel thread which: 22391da177e4SLinus Torvalds * 22401da177e4SLinus Torvalds * 1. Retries failed read operations on working mirrors. 22411da177e4SLinus Torvalds * 2. Updates the raid superblock when problems encounter. 2242d2eb35acSNeilBrown * 3. Performs writes following reads for array synchronising. 22431da177e4SLinus Torvalds */ 22441da177e4SLinus Torvalds 2245e8096360SNeilBrown static void fix_read_error(struct r1conf *conf, int read_disk, 2246867868fbSNeilBrown sector_t sect, int sectors) 2247867868fbSNeilBrown { 2248fd01b88cSNeilBrown struct mddev *mddev = conf->mddev; 2249867868fbSNeilBrown while(sectors) { 2250867868fbSNeilBrown int s = sectors; 2251867868fbSNeilBrown int d = read_disk; 2252867868fbSNeilBrown int success = 0; 2253867868fbSNeilBrown int start; 22543cb03002SNeilBrown struct md_rdev *rdev; 2255867868fbSNeilBrown 2256867868fbSNeilBrown if (s > (PAGE_SIZE>>9)) 2257867868fbSNeilBrown s = PAGE_SIZE >> 9; 2258867868fbSNeilBrown 2259867868fbSNeilBrown do { 2260d2eb35acSNeilBrown sector_t first_bad; 2261d2eb35acSNeilBrown int bad_sectors; 2262d2eb35acSNeilBrown 2263707a6a42SNeilBrown rcu_read_lock(); 2264707a6a42SNeilBrown rdev = rcu_dereference(conf->mirrors[d].rdev); 2265867868fbSNeilBrown if (rdev && 2266da8840a7Smajianpeng (test_bit(In_sync, &rdev->flags) || 2267da8840a7Smajianpeng (!test_bit(Faulty, &rdev->flags) && 2268da8840a7Smajianpeng rdev->recovery_offset >= sect + s)) && 2269d2eb35acSNeilBrown is_badblock(rdev, sect, s, 2270707a6a42SNeilBrown &first_bad, &bad_sectors) == 0) { 2271707a6a42SNeilBrown atomic_inc(&rdev->nr_pending); 2272707a6a42SNeilBrown rcu_read_unlock(); 2273707a6a42SNeilBrown if (sync_page_io(rdev, sect, s<<9, 2274796a5cf0SMike Christie conf->tmppage, REQ_OP_READ, 0, false)) 2275867868fbSNeilBrown success = 1; 2276707a6a42SNeilBrown rdev_dec_pending(rdev, mddev); 2277707a6a42SNeilBrown if (success) 2278707a6a42SNeilBrown break; 2279707a6a42SNeilBrown } else 2280707a6a42SNeilBrown rcu_read_unlock(); 2281867868fbSNeilBrown d++; 22828f19ccb2SNeilBrown if (d == conf->raid_disks * 2) 2283867868fbSNeilBrown d = 0; 2284867868fbSNeilBrown } while (!success && d != read_disk); 2285867868fbSNeilBrown 2286867868fbSNeilBrown if (!success) { 2287d8f05d29SNeilBrown /* Cannot read from anywhere - mark it bad */ 22883cb03002SNeilBrown struct md_rdev *rdev = conf->mirrors[read_disk].rdev; 2289d8f05d29SNeilBrown if (!rdev_set_badblocks(rdev, sect, s, 0)) 2290d8f05d29SNeilBrown md_error(mddev, rdev); 2291867868fbSNeilBrown break; 2292867868fbSNeilBrown } 2293867868fbSNeilBrown /* write it back and re-read */ 2294867868fbSNeilBrown start = d; 2295867868fbSNeilBrown while (d != read_disk) { 2296867868fbSNeilBrown if (d==0) 22978f19ccb2SNeilBrown d = conf->raid_disks * 2; 2298867868fbSNeilBrown d--; 2299707a6a42SNeilBrown rcu_read_lock(); 2300707a6a42SNeilBrown rdev = rcu_dereference(conf->mirrors[d].rdev); 2301867868fbSNeilBrown if (rdev && 2302707a6a42SNeilBrown !test_bit(Faulty, &rdev->flags)) { 2303707a6a42SNeilBrown atomic_inc(&rdev->nr_pending); 2304707a6a42SNeilBrown rcu_read_unlock(); 2305d8f05d29SNeilBrown r1_sync_page_io(rdev, sect, s, 2306d8f05d29SNeilBrown conf->tmppage, WRITE); 2307707a6a42SNeilBrown rdev_dec_pending(rdev, mddev); 2308707a6a42SNeilBrown } else 2309707a6a42SNeilBrown rcu_read_unlock(); 2310867868fbSNeilBrown } 2311867868fbSNeilBrown d = start; 2312867868fbSNeilBrown while (d != read_disk) { 2313867868fbSNeilBrown char b[BDEVNAME_SIZE]; 2314867868fbSNeilBrown if (d==0) 23158f19ccb2SNeilBrown d = conf->raid_disks * 2; 2316867868fbSNeilBrown d--; 2317707a6a42SNeilBrown rcu_read_lock(); 2318707a6a42SNeilBrown rdev = rcu_dereference(conf->mirrors[d].rdev); 2319867868fbSNeilBrown if (rdev && 2320b8cb6b4cSNeilBrown !test_bit(Faulty, &rdev->flags)) { 2321707a6a42SNeilBrown atomic_inc(&rdev->nr_pending); 2322707a6a42SNeilBrown rcu_read_unlock(); 2323d8f05d29SNeilBrown if (r1_sync_page_io(rdev, sect, s, 2324d8f05d29SNeilBrown conf->tmppage, READ)) { 2325867868fbSNeilBrown atomic_add(s, &rdev->corrected_errors); 23261d41c216SNeilBrown pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n", 2327867868fbSNeilBrown mdname(mddev), s, 2328969b755aSRandy Dunlap (unsigned long long)(sect + 2329969b755aSRandy Dunlap rdev->data_offset), 2330867868fbSNeilBrown bdevname(rdev->bdev, b)); 2331867868fbSNeilBrown } 2332707a6a42SNeilBrown rdev_dec_pending(rdev, mddev); 2333707a6a42SNeilBrown } else 2334707a6a42SNeilBrown rcu_read_unlock(); 2335867868fbSNeilBrown } 2336867868fbSNeilBrown sectors -= s; 2337867868fbSNeilBrown sect += s; 2338867868fbSNeilBrown } 2339867868fbSNeilBrown } 2340867868fbSNeilBrown 23419f2c9d12SNeilBrown static int narrow_write_error(struct r1bio *r1_bio, int i) 2342cd5ff9a1SNeilBrown { 2343fd01b88cSNeilBrown struct mddev *mddev = r1_bio->mddev; 2344e8096360SNeilBrown struct r1conf *conf = mddev->private; 23453cb03002SNeilBrown struct md_rdev *rdev = conf->mirrors[i].rdev; 2346cd5ff9a1SNeilBrown 2347cd5ff9a1SNeilBrown /* bio has the data to be written to device 'i' where 2348cd5ff9a1SNeilBrown * we just recently had a write error. 2349cd5ff9a1SNeilBrown * We repeatedly clone the bio and trim down to one block, 2350cd5ff9a1SNeilBrown * then try the write. Where the write fails we record 2351cd5ff9a1SNeilBrown * a bad block. 2352cd5ff9a1SNeilBrown * It is conceivable that the bio doesn't exactly align with 2353cd5ff9a1SNeilBrown * blocks. We must handle this somehow. 2354cd5ff9a1SNeilBrown * 2355cd5ff9a1SNeilBrown * We currently own a reference on the rdev. 2356cd5ff9a1SNeilBrown */ 2357cd5ff9a1SNeilBrown 2358cd5ff9a1SNeilBrown int block_sectors; 2359cd5ff9a1SNeilBrown sector_t sector; 2360cd5ff9a1SNeilBrown int sectors; 2361cd5ff9a1SNeilBrown int sect_to_write = r1_bio->sectors; 2362cd5ff9a1SNeilBrown int ok = 1; 2363cd5ff9a1SNeilBrown 2364cd5ff9a1SNeilBrown if (rdev->badblocks.shift < 0) 2365cd5ff9a1SNeilBrown return 0; 2366cd5ff9a1SNeilBrown 2367ab713cdcSNate Dailey block_sectors = roundup(1 << rdev->badblocks.shift, 2368ab713cdcSNate Dailey bdev_logical_block_size(rdev->bdev) >> 9); 2369cd5ff9a1SNeilBrown sector = r1_bio->sector; 2370cd5ff9a1SNeilBrown sectors = ((sector + block_sectors) 2371cd5ff9a1SNeilBrown & ~(sector_t)(block_sectors - 1)) 2372cd5ff9a1SNeilBrown - sector; 2373cd5ff9a1SNeilBrown 2374cd5ff9a1SNeilBrown while (sect_to_write) { 2375cd5ff9a1SNeilBrown struct bio *wbio; 2376cd5ff9a1SNeilBrown if (sectors > sect_to_write) 2377cd5ff9a1SNeilBrown sectors = sect_to_write; 2378cd5ff9a1SNeilBrown /* Write at 'sector' for 'sectors'*/ 2379cd5ff9a1SNeilBrown 2380b783863fSKent Overstreet if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 2381841c1316SMing Lei wbio = bio_clone_fast(r1_bio->behind_master_bio, 2382841c1316SMing Lei GFP_NOIO, 2383afeee514SKent Overstreet &mddev->bio_set); 2384b783863fSKent Overstreet } else { 2385d7a10308SMing Lei wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, 2386afeee514SKent Overstreet &mddev->bio_set); 2387b783863fSKent Overstreet } 2388b783863fSKent Overstreet 2389796a5cf0SMike Christie bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); 23904f024f37SKent Overstreet wbio->bi_iter.bi_sector = r1_bio->sector; 23914f024f37SKent Overstreet wbio->bi_iter.bi_size = r1_bio->sectors << 9; 2392cd5ff9a1SNeilBrown 23936678d83fSKent Overstreet bio_trim(wbio, sector - r1_bio->sector, sectors); 23944f024f37SKent Overstreet wbio->bi_iter.bi_sector += rdev->data_offset; 239574d46992SChristoph Hellwig bio_set_dev(wbio, rdev->bdev); 23964e49ea4aSMike Christie 23974e49ea4aSMike Christie if (submit_bio_wait(wbio) < 0) 2398cd5ff9a1SNeilBrown /* failure! */ 2399cd5ff9a1SNeilBrown ok = rdev_set_badblocks(rdev, sector, 2400cd5ff9a1SNeilBrown sectors, 0) 2401cd5ff9a1SNeilBrown && ok; 2402cd5ff9a1SNeilBrown 2403cd5ff9a1SNeilBrown bio_put(wbio); 2404cd5ff9a1SNeilBrown sect_to_write -= sectors; 2405cd5ff9a1SNeilBrown sector += sectors; 2406cd5ff9a1SNeilBrown sectors = block_sectors; 2407cd5ff9a1SNeilBrown } 2408cd5ff9a1SNeilBrown return ok; 2409cd5ff9a1SNeilBrown } 2410cd5ff9a1SNeilBrown 2411e8096360SNeilBrown static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio) 241262096bceSNeilBrown { 241362096bceSNeilBrown int m; 241462096bceSNeilBrown int s = r1_bio->sectors; 24158f19ccb2SNeilBrown for (m = 0; m < conf->raid_disks * 2 ; m++) { 24163cb03002SNeilBrown struct md_rdev *rdev = conf->mirrors[m].rdev; 241762096bceSNeilBrown struct bio *bio = r1_bio->bios[m]; 241862096bceSNeilBrown if (bio->bi_end_io == NULL) 241962096bceSNeilBrown continue; 24204e4cbee9SChristoph Hellwig if (!bio->bi_status && 242162096bceSNeilBrown test_bit(R1BIO_MadeGood, &r1_bio->state)) { 2422c6563a8cSNeilBrown rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); 242362096bceSNeilBrown } 24244e4cbee9SChristoph Hellwig if (bio->bi_status && 242562096bceSNeilBrown test_bit(R1BIO_WriteError, &r1_bio->state)) { 242662096bceSNeilBrown if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) 242762096bceSNeilBrown md_error(conf->mddev, rdev); 242862096bceSNeilBrown } 242962096bceSNeilBrown } 243062096bceSNeilBrown put_buf(r1_bio); 243162096bceSNeilBrown md_done_sync(conf->mddev, s, 1); 243262096bceSNeilBrown } 243362096bceSNeilBrown 2434e8096360SNeilBrown static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) 243562096bceSNeilBrown { 2436fd76863eScolyli@suse.de int m, idx; 243755ce74d4SNeilBrown bool fail = false; 2438fd76863eScolyli@suse.de 24398f19ccb2SNeilBrown for (m = 0; m < conf->raid_disks * 2 ; m++) 244062096bceSNeilBrown if (r1_bio->bios[m] == IO_MADE_GOOD) { 24413cb03002SNeilBrown struct md_rdev *rdev = conf->mirrors[m].rdev; 244262096bceSNeilBrown rdev_clear_badblocks(rdev, 244362096bceSNeilBrown r1_bio->sector, 2444c6563a8cSNeilBrown r1_bio->sectors, 0); 244562096bceSNeilBrown rdev_dec_pending(rdev, conf->mddev); 244662096bceSNeilBrown } else if (r1_bio->bios[m] != NULL) { 244762096bceSNeilBrown /* This drive got a write error. We need to 244862096bceSNeilBrown * narrow down and record precise write 244962096bceSNeilBrown * errors. 245062096bceSNeilBrown */ 245155ce74d4SNeilBrown fail = true; 245262096bceSNeilBrown if (!narrow_write_error(r1_bio, m)) { 245362096bceSNeilBrown md_error(conf->mddev, 245462096bceSNeilBrown conf->mirrors[m].rdev); 245562096bceSNeilBrown /* an I/O failed, we can't clear the bitmap */ 245662096bceSNeilBrown set_bit(R1BIO_Degraded, &r1_bio->state); 245762096bceSNeilBrown } 245862096bceSNeilBrown rdev_dec_pending(conf->mirrors[m].rdev, 245962096bceSNeilBrown conf->mddev); 246062096bceSNeilBrown } 246155ce74d4SNeilBrown if (fail) { 246255ce74d4SNeilBrown spin_lock_irq(&conf->device_lock); 246355ce74d4SNeilBrown list_add(&r1_bio->retry_list, &conf->bio_end_io_list); 2464fd76863eScolyli@suse.de idx = sector_to_idx(r1_bio->sector); 2465824e47daScolyli@suse.de atomic_inc(&conf->nr_queued[idx]); 246655ce74d4SNeilBrown spin_unlock_irq(&conf->device_lock); 2467824e47daScolyli@suse.de /* 2468824e47daScolyli@suse.de * In case freeze_array() is waiting for condition 2469824e47daScolyli@suse.de * get_unqueued_pending() == extra to be true. 2470824e47daScolyli@suse.de */ 2471824e47daScolyli@suse.de wake_up(&conf->wait_barrier); 247255ce74d4SNeilBrown md_wakeup_thread(conf->mddev->thread); 2473bd8688a1SNeilBrown } else { 2474bd8688a1SNeilBrown if (test_bit(R1BIO_WriteError, &r1_bio->state)) 2475bd8688a1SNeilBrown close_write(r1_bio); 247662096bceSNeilBrown raid_end_bio_io(r1_bio); 247762096bceSNeilBrown } 2478bd8688a1SNeilBrown } 247962096bceSNeilBrown 2480e8096360SNeilBrown static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) 248162096bceSNeilBrown { 2482fd01b88cSNeilBrown struct mddev *mddev = conf->mddev; 248362096bceSNeilBrown struct bio *bio; 24843cb03002SNeilBrown struct md_rdev *rdev; 248562096bceSNeilBrown 248662096bceSNeilBrown clear_bit(R1BIO_ReadError, &r1_bio->state); 248762096bceSNeilBrown /* we got a read error. Maybe the drive is bad. Maybe just 248862096bceSNeilBrown * the block and we can fix it. 248962096bceSNeilBrown * We freeze all other IO, and try reading the block from 249062096bceSNeilBrown * other devices. When we find one, we re-write 249162096bceSNeilBrown * and check it that fixes the read error. 249262096bceSNeilBrown * This is all done synchronously while the array is 249362096bceSNeilBrown * frozen 249462096bceSNeilBrown */ 24957449f699STomasz Majchrzak 24967449f699STomasz Majchrzak bio = r1_bio->bios[r1_bio->read_disk]; 24977449f699STomasz Majchrzak bio_put(bio); 24987449f699STomasz Majchrzak r1_bio->bios[r1_bio->read_disk] = NULL; 24997449f699STomasz Majchrzak 25002e52d449SNeilBrown rdev = conf->mirrors[r1_bio->read_disk].rdev; 25012e52d449SNeilBrown if (mddev->ro == 0 25022e52d449SNeilBrown && !test_bit(FailFast, &rdev->flags)) { 2503e2d59925SNeilBrown freeze_array(conf, 1); 250462096bceSNeilBrown fix_read_error(conf, r1_bio->read_disk, 250562096bceSNeilBrown r1_bio->sector, r1_bio->sectors); 250662096bceSNeilBrown unfreeze_array(conf); 2507b33d1062SGioh Kim } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) { 2508b33d1062SGioh Kim md_error(mddev, rdev); 25097449f699STomasz Majchrzak } else { 25107449f699STomasz Majchrzak r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED; 25117449f699STomasz Majchrzak } 25127449f699STomasz Majchrzak 25132e52d449SNeilBrown rdev_dec_pending(rdev, conf->mddev); 2514689389a0SNeilBrown allow_barrier(conf, r1_bio->sector); 2515689389a0SNeilBrown bio = r1_bio->master_bio; 251662096bceSNeilBrown 2517689389a0SNeilBrown /* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */ 2518689389a0SNeilBrown r1_bio->state = 0; 2519689389a0SNeilBrown raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio); 2520109e3765SNeilBrown } 252162096bceSNeilBrown 25224ed8731dSShaohua Li static void raid1d(struct md_thread *thread) 25231da177e4SLinus Torvalds { 25244ed8731dSShaohua Li struct mddev *mddev = thread->mddev; 25259f2c9d12SNeilBrown struct r1bio *r1_bio; 25261da177e4SLinus Torvalds unsigned long flags; 2527e8096360SNeilBrown struct r1conf *conf = mddev->private; 25281da177e4SLinus Torvalds struct list_head *head = &conf->retry_list; 2529e1dfa0a2SNeilBrown struct blk_plug plug; 2530fd76863eScolyli@suse.de int idx; 25311da177e4SLinus Torvalds 25321da177e4SLinus Torvalds md_check_recovery(mddev); 25331da177e4SLinus Torvalds 253455ce74d4SNeilBrown if (!list_empty_careful(&conf->bio_end_io_list) && 25352953079cSShaohua Li !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 253655ce74d4SNeilBrown LIST_HEAD(tmp); 253755ce74d4SNeilBrown spin_lock_irqsave(&conf->device_lock, flags); 2538fd76863eScolyli@suse.de if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 2539fd76863eScolyli@suse.de list_splice_init(&conf->bio_end_io_list, &tmp); 254055ce74d4SNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 254155ce74d4SNeilBrown while (!list_empty(&tmp)) { 2542a452744bSMikulas Patocka r1_bio = list_first_entry(&tmp, struct r1bio, 2543a452744bSMikulas Patocka retry_list); 254455ce74d4SNeilBrown list_del(&r1_bio->retry_list); 2545fd76863eScolyli@suse.de idx = sector_to_idx(r1_bio->sector); 2546824e47daScolyli@suse.de atomic_dec(&conf->nr_queued[idx]); 2547bd8688a1SNeilBrown if (mddev->degraded) 2548bd8688a1SNeilBrown set_bit(R1BIO_Degraded, &r1_bio->state); 2549bd8688a1SNeilBrown if (test_bit(R1BIO_WriteError, &r1_bio->state)) 2550bd8688a1SNeilBrown close_write(r1_bio); 255155ce74d4SNeilBrown raid_end_bio_io(r1_bio); 255255ce74d4SNeilBrown } 255355ce74d4SNeilBrown } 255455ce74d4SNeilBrown 2555e1dfa0a2SNeilBrown blk_start_plug(&plug); 25561da177e4SLinus Torvalds for (;;) { 2557a35e63efSNeilBrown 25587eaceaccSJens Axboe flush_pending_writes(conf); 2559a35e63efSNeilBrown 25601da177e4SLinus Torvalds spin_lock_irqsave(&conf->device_lock, flags); 2561a35e63efSNeilBrown if (list_empty(head)) { 2562191ea9b2SNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 25631da177e4SLinus Torvalds break; 2564a35e63efSNeilBrown } 25659f2c9d12SNeilBrown r1_bio = list_entry(head->prev, struct r1bio, retry_list); 25661da177e4SLinus Torvalds list_del(head->prev); 2567fd76863eScolyli@suse.de idx = sector_to_idx(r1_bio->sector); 2568824e47daScolyli@suse.de atomic_dec(&conf->nr_queued[idx]); 25691da177e4SLinus Torvalds spin_unlock_irqrestore(&conf->device_lock, flags); 25701da177e4SLinus Torvalds 25711da177e4SLinus Torvalds mddev = r1_bio->mddev; 2572070ec55dSNeilBrown conf = mddev->private; 25734367af55SNeilBrown if (test_bit(R1BIO_IsSync, &r1_bio->state)) { 2574d8f05d29SNeilBrown if (test_bit(R1BIO_MadeGood, &r1_bio->state) || 257562096bceSNeilBrown test_bit(R1BIO_WriteError, &r1_bio->state)) 257662096bceSNeilBrown handle_sync_write_finished(conf, r1_bio); 257762096bceSNeilBrown else 25781da177e4SLinus Torvalds sync_request_write(mddev, r1_bio); 2579cd5ff9a1SNeilBrown } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) || 258062096bceSNeilBrown test_bit(R1BIO_WriteError, &r1_bio->state)) 258162096bceSNeilBrown handle_write_finished(conf, r1_bio); 258262096bceSNeilBrown else if (test_bit(R1BIO_ReadError, &r1_bio->state)) 258362096bceSNeilBrown handle_read_error(conf, r1_bio); 2584d2eb35acSNeilBrown else 2585c230e7e5SNeilBrown WARN_ON_ONCE(1); 258662096bceSNeilBrown 25871d9d5241SNeilBrown cond_resched(); 25882953079cSShaohua Li if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING)) 2589de393cdeSNeilBrown md_check_recovery(mddev); 25901da177e4SLinus Torvalds } 2591e1dfa0a2SNeilBrown blk_finish_plug(&plug); 25921da177e4SLinus Torvalds } 25931da177e4SLinus Torvalds 2594e8096360SNeilBrown static int init_resync(struct r1conf *conf) 25951da177e4SLinus Torvalds { 25961da177e4SLinus Torvalds int buffs; 25971da177e4SLinus Torvalds 25981da177e4SLinus Torvalds buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; 2599afeee514SKent Overstreet BUG_ON(mempool_initialized(&conf->r1buf_pool)); 2600afeee514SKent Overstreet 2601afeee514SKent Overstreet return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc, 2602afeee514SKent Overstreet r1buf_pool_free, conf->poolinfo); 26031da177e4SLinus Torvalds } 26041da177e4SLinus Torvalds 2605208410b5SShaohua Li static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf) 2606208410b5SShaohua Li { 2607afeee514SKent Overstreet struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO); 2608208410b5SShaohua Li struct resync_pages *rps; 2609208410b5SShaohua Li struct bio *bio; 2610208410b5SShaohua Li int i; 2611208410b5SShaohua Li 2612208410b5SShaohua Li for (i = conf->poolinfo->raid_disks; i--; ) { 2613208410b5SShaohua Li bio = r1bio->bios[i]; 2614208410b5SShaohua Li rps = bio->bi_private; 2615208410b5SShaohua Li bio_reset(bio); 2616208410b5SShaohua Li bio->bi_private = rps; 2617208410b5SShaohua Li } 2618208410b5SShaohua Li r1bio->master_bio = NULL; 2619208410b5SShaohua Li return r1bio; 2620208410b5SShaohua Li } 2621208410b5SShaohua Li 26221da177e4SLinus Torvalds /* 26231da177e4SLinus Torvalds * perform a "sync" on one "block" 26241da177e4SLinus Torvalds * 26251da177e4SLinus Torvalds * We need to make sure that no normal I/O request - particularly write 26261da177e4SLinus Torvalds * requests - conflict with active sync requests. 26271da177e4SLinus Torvalds * 26281da177e4SLinus Torvalds * This is achieved by tracking pending requests and a 'barrier' concept 26291da177e4SLinus Torvalds * that can be installed to exclude normal IO requests. 26301da177e4SLinus Torvalds */ 26311da177e4SLinus Torvalds 2632849674e4SShaohua Li static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, 2633849674e4SShaohua Li int *skipped) 26341da177e4SLinus Torvalds { 2635e8096360SNeilBrown struct r1conf *conf = mddev->private; 26369f2c9d12SNeilBrown struct r1bio *r1_bio; 26371da177e4SLinus Torvalds struct bio *bio; 26381da177e4SLinus Torvalds sector_t max_sector, nr_sectors; 26393e198f78SNeilBrown int disk = -1; 26401da177e4SLinus Torvalds int i; 26413e198f78SNeilBrown int wonly = -1; 26423e198f78SNeilBrown int write_targets = 0, read_targets = 0; 264357dab0bdSNeilBrown sector_t sync_blocks; 2644e3b9703eSNeilBrown int still_degraded = 0; 264506f60385SNeilBrown int good_sectors = RESYNC_SECTORS; 264606f60385SNeilBrown int min_bad = 0; /* number of sectors that are bad in all devices */ 2647fd76863eScolyli@suse.de int idx = sector_to_idx(sector_nr); 2648022e510fSMing Lei int page_idx = 0; 26491da177e4SLinus Torvalds 2650afeee514SKent Overstreet if (!mempool_initialized(&conf->r1buf_pool)) 26511da177e4SLinus Torvalds if (init_resync(conf)) 265257afd89fSNeilBrown return 0; 26531da177e4SLinus Torvalds 265458c0fed4SAndre Noll max_sector = mddev->dev_sectors; 26551da177e4SLinus Torvalds if (sector_nr >= max_sector) { 2656191ea9b2SNeilBrown /* If we aborted, we need to abort the 2657191ea9b2SNeilBrown * sync on the 'current' bitmap chunk (there will 2658191ea9b2SNeilBrown * only be one in raid1 resync. 2659191ea9b2SNeilBrown * We can find the current addess in mddev->curr_resync 2660191ea9b2SNeilBrown */ 26616a806c51SNeilBrown if (mddev->curr_resync < max_sector) /* aborted */ 2662e64e4018SAndy Shevchenko md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 2663191ea9b2SNeilBrown &sync_blocks, 1); 26646a806c51SNeilBrown else /* completed sync */ 2665191ea9b2SNeilBrown conf->fullsync = 0; 26666a806c51SNeilBrown 2667e64e4018SAndy Shevchenko md_bitmap_close_sync(mddev->bitmap); 26681da177e4SLinus Torvalds close_sync(conf); 2669c40f341fSGoldwyn Rodrigues 2670c40f341fSGoldwyn Rodrigues if (mddev_is_clustered(mddev)) { 2671c40f341fSGoldwyn Rodrigues conf->cluster_sync_low = 0; 2672c40f341fSGoldwyn Rodrigues conf->cluster_sync_high = 0; 2673c40f341fSGoldwyn Rodrigues } 26741da177e4SLinus Torvalds return 0; 26751da177e4SLinus Torvalds } 26761da177e4SLinus Torvalds 267707d84d10SNeilBrown if (mddev->bitmap == NULL && 267807d84d10SNeilBrown mddev->recovery_cp == MaxSector && 26796394cca5SNeilBrown !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 268007d84d10SNeilBrown conf->fullsync == 0) { 268107d84d10SNeilBrown *skipped = 1; 268207d84d10SNeilBrown return max_sector - sector_nr; 268307d84d10SNeilBrown } 26846394cca5SNeilBrown /* before building a request, check if we can skip these blocks.. 26856394cca5SNeilBrown * This call the bitmap_start_sync doesn't actually record anything 26866394cca5SNeilBrown */ 2687e64e4018SAndy Shevchenko if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 2688e5de485fSNeilBrown !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 2689191ea9b2SNeilBrown /* We can skip this block, and probably several more */ 2690191ea9b2SNeilBrown *skipped = 1; 2691191ea9b2SNeilBrown return sync_blocks; 2692191ea9b2SNeilBrown } 269317999be4SNeilBrown 26947ac50447STomasz Majchrzak /* 26957ac50447STomasz Majchrzak * If there is non-resync activity waiting for a turn, then let it 26967ac50447STomasz Majchrzak * though before starting on this new sync request. 26977ac50447STomasz Majchrzak */ 2698824e47daScolyli@suse.de if (atomic_read(&conf->nr_waiting[idx])) 26997ac50447STomasz Majchrzak schedule_timeout_uninterruptible(1); 27007ac50447STomasz Majchrzak 2701c40f341fSGoldwyn Rodrigues /* we are incrementing sector_nr below. To be safe, we check against 2702c40f341fSGoldwyn Rodrigues * sector_nr + two times RESYNC_SECTORS 2703c40f341fSGoldwyn Rodrigues */ 2704c40f341fSGoldwyn Rodrigues 2705e64e4018SAndy Shevchenko md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, 2706c40f341fSGoldwyn Rodrigues mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); 270717999be4SNeilBrown 27088c242593SYufen Yu 27098c242593SYufen Yu if (raise_barrier(conf, sector_nr)) 27108c242593SYufen Yu return 0; 27118c242593SYufen Yu 27128c242593SYufen Yu r1_bio = raid1_alloc_init_r1buf(conf); 27131da177e4SLinus Torvalds 27143e198f78SNeilBrown rcu_read_lock(); 27153e198f78SNeilBrown /* 27163e198f78SNeilBrown * If we get a correctably read error during resync or recovery, 27173e198f78SNeilBrown * we might want to read from a different device. So we 27183e198f78SNeilBrown * flag all drives that could conceivably be read from for READ, 27193e198f78SNeilBrown * and any others (which will be non-In_sync devices) for WRITE. 27203e198f78SNeilBrown * If a read fails, we try reading from something else for which READ 27213e198f78SNeilBrown * is OK. 27223e198f78SNeilBrown */ 27231da177e4SLinus Torvalds 27241da177e4SLinus Torvalds r1_bio->mddev = mddev; 27251da177e4SLinus Torvalds r1_bio->sector = sector_nr; 2726191ea9b2SNeilBrown r1_bio->state = 0; 27271da177e4SLinus Torvalds set_bit(R1BIO_IsSync, &r1_bio->state); 2728fd76863eScolyli@suse.de /* make sure good_sectors won't go across barrier unit boundary */ 2729fd76863eScolyli@suse.de good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors); 27301da177e4SLinus Torvalds 27318f19ccb2SNeilBrown for (i = 0; i < conf->raid_disks * 2; i++) { 27323cb03002SNeilBrown struct md_rdev *rdev; 27331da177e4SLinus Torvalds bio = r1_bio->bios[i]; 27341da177e4SLinus Torvalds 27353e198f78SNeilBrown rdev = rcu_dereference(conf->mirrors[i].rdev); 27363e198f78SNeilBrown if (rdev == NULL || 27373e198f78SNeilBrown test_bit(Faulty, &rdev->flags)) { 27388f19ccb2SNeilBrown if (i < conf->raid_disks) 2739e3b9703eSNeilBrown still_degraded = 1; 27403e198f78SNeilBrown } else if (!test_bit(In_sync, &rdev->flags)) { 2741796a5cf0SMike Christie bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 27421da177e4SLinus Torvalds bio->bi_end_io = end_sync_write; 27431da177e4SLinus Torvalds write_targets ++; 27443e198f78SNeilBrown } else { 27453e198f78SNeilBrown /* may need to read from here */ 274606f60385SNeilBrown sector_t first_bad = MaxSector; 274706f60385SNeilBrown int bad_sectors; 274806f60385SNeilBrown 274906f60385SNeilBrown if (is_badblock(rdev, sector_nr, good_sectors, 275006f60385SNeilBrown &first_bad, &bad_sectors)) { 275106f60385SNeilBrown if (first_bad > sector_nr) 275206f60385SNeilBrown good_sectors = first_bad - sector_nr; 275306f60385SNeilBrown else { 275406f60385SNeilBrown bad_sectors -= (sector_nr - first_bad); 275506f60385SNeilBrown if (min_bad == 0 || 275606f60385SNeilBrown min_bad > bad_sectors) 275706f60385SNeilBrown min_bad = bad_sectors; 275806f60385SNeilBrown } 275906f60385SNeilBrown } 276006f60385SNeilBrown if (sector_nr < first_bad) { 27613e198f78SNeilBrown if (test_bit(WriteMostly, &rdev->flags)) { 27623e198f78SNeilBrown if (wonly < 0) 27633e198f78SNeilBrown wonly = i; 27643e198f78SNeilBrown } else { 27653e198f78SNeilBrown if (disk < 0) 27663e198f78SNeilBrown disk = i; 27673e198f78SNeilBrown } 2768796a5cf0SMike Christie bio_set_op_attrs(bio, REQ_OP_READ, 0); 276906f60385SNeilBrown bio->bi_end_io = end_sync_read; 27703e198f78SNeilBrown read_targets++; 2771d57368afSAlexander Lyakas } else if (!test_bit(WriteErrorSeen, &rdev->flags) && 2772d57368afSAlexander Lyakas test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 2773d57368afSAlexander Lyakas !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 2774d57368afSAlexander Lyakas /* 2775d57368afSAlexander Lyakas * The device is suitable for reading (InSync), 2776d57368afSAlexander Lyakas * but has bad block(s) here. Let's try to correct them, 2777d57368afSAlexander Lyakas * if we are doing resync or repair. Otherwise, leave 2778d57368afSAlexander Lyakas * this device alone for this sync request. 2779d57368afSAlexander Lyakas */ 2780796a5cf0SMike Christie bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 2781d57368afSAlexander Lyakas bio->bi_end_io = end_sync_write; 2782d57368afSAlexander Lyakas write_targets++; 27833e198f78SNeilBrown } 278406f60385SNeilBrown } 2785028288dfSZhiqiang Liu if (rdev && bio->bi_end_io) { 27863e198f78SNeilBrown atomic_inc(&rdev->nr_pending); 27874f024f37SKent Overstreet bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; 278874d46992SChristoph Hellwig bio_set_dev(bio, rdev->bdev); 27892e52d449SNeilBrown if (test_bit(FailFast, &rdev->flags)) 27902e52d449SNeilBrown bio->bi_opf |= MD_FAILFAST; 27911da177e4SLinus Torvalds } 279206f60385SNeilBrown } 27933e198f78SNeilBrown rcu_read_unlock(); 27943e198f78SNeilBrown if (disk < 0) 27953e198f78SNeilBrown disk = wonly; 27963e198f78SNeilBrown r1_bio->read_disk = disk; 2797191ea9b2SNeilBrown 279806f60385SNeilBrown if (read_targets == 0 && min_bad > 0) { 279906f60385SNeilBrown /* These sectors are bad on all InSync devices, so we 280006f60385SNeilBrown * need to mark them bad on all write targets 280106f60385SNeilBrown */ 280206f60385SNeilBrown int ok = 1; 28038f19ccb2SNeilBrown for (i = 0 ; i < conf->raid_disks * 2 ; i++) 280406f60385SNeilBrown if (r1_bio->bios[i]->bi_end_io == end_sync_write) { 2805a42f9d83Smajianpeng struct md_rdev *rdev = conf->mirrors[i].rdev; 280606f60385SNeilBrown ok = rdev_set_badblocks(rdev, sector_nr, 280706f60385SNeilBrown min_bad, 0 280806f60385SNeilBrown ) && ok; 280906f60385SNeilBrown } 28102953079cSShaohua Li set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 281106f60385SNeilBrown *skipped = 1; 281206f60385SNeilBrown put_buf(r1_bio); 281306f60385SNeilBrown 281406f60385SNeilBrown if (!ok) { 281506f60385SNeilBrown /* Cannot record the badblocks, so need to 281606f60385SNeilBrown * abort the resync. 281706f60385SNeilBrown * If there are multiple read targets, could just 281806f60385SNeilBrown * fail the really bad ones ??? 281906f60385SNeilBrown */ 282006f60385SNeilBrown conf->recovery_disabled = mddev->recovery_disabled; 282106f60385SNeilBrown set_bit(MD_RECOVERY_INTR, &mddev->recovery); 282206f60385SNeilBrown return 0; 282306f60385SNeilBrown } else 282406f60385SNeilBrown return min_bad; 282506f60385SNeilBrown 282606f60385SNeilBrown } 282706f60385SNeilBrown if (min_bad > 0 && min_bad < good_sectors) { 282806f60385SNeilBrown /* only resync enough to reach the next bad->good 282906f60385SNeilBrown * transition */ 283006f60385SNeilBrown good_sectors = min_bad; 283106f60385SNeilBrown } 283206f60385SNeilBrown 28333e198f78SNeilBrown if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0) 28343e198f78SNeilBrown /* extra read targets are also write targets */ 28353e198f78SNeilBrown write_targets += read_targets-1; 28363e198f78SNeilBrown 28373e198f78SNeilBrown if (write_targets == 0 || read_targets == 0) { 28381da177e4SLinus Torvalds /* There is nowhere to write, so all non-sync 28391da177e4SLinus Torvalds * drives must be failed - so we are finished 28401da177e4SLinus Torvalds */ 2841b7219ccbSNeilBrown sector_t rv; 2842b7219ccbSNeilBrown if (min_bad > 0) 2843b7219ccbSNeilBrown max_sector = sector_nr + min_bad; 2844b7219ccbSNeilBrown rv = max_sector - sector_nr; 284557afd89fSNeilBrown *skipped = 1; 28461da177e4SLinus Torvalds put_buf(r1_bio); 28471da177e4SLinus Torvalds return rv; 28481da177e4SLinus Torvalds } 28491da177e4SLinus Torvalds 2850c6207277SNeilBrown if (max_sector > mddev->resync_max) 2851c6207277SNeilBrown max_sector = mddev->resync_max; /* Don't do IO beyond here */ 285206f60385SNeilBrown if (max_sector > sector_nr + good_sectors) 285306f60385SNeilBrown max_sector = sector_nr + good_sectors; 28541da177e4SLinus Torvalds nr_sectors = 0; 2855289e99e8SNeilBrown sync_blocks = 0; 28561da177e4SLinus Torvalds do { 28571da177e4SLinus Torvalds struct page *page; 28581da177e4SLinus Torvalds int len = PAGE_SIZE; 28591da177e4SLinus Torvalds if (sector_nr + (len>>9) > max_sector) 28601da177e4SLinus Torvalds len = (max_sector - sector_nr) << 9; 28611da177e4SLinus Torvalds if (len == 0) 28621da177e4SLinus Torvalds break; 2863ab7a30c7SNeilBrown if (sync_blocks == 0) { 2864e64e4018SAndy Shevchenko if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, 2865e3b9703eSNeilBrown &sync_blocks, still_degraded) && 2866e5de485fSNeilBrown !conf->fullsync && 2867e5de485fSNeilBrown !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 2868191ea9b2SNeilBrown break; 28697571ae88SNeilBrown if ((len >> 9) > sync_blocks) 28706a806c51SNeilBrown len = sync_blocks<<9; 2871ab7a30c7SNeilBrown } 2872191ea9b2SNeilBrown 28738f19ccb2SNeilBrown for (i = 0 ; i < conf->raid_disks * 2; i++) { 287498d30c58SMing Lei struct resync_pages *rp; 287598d30c58SMing Lei 28761da177e4SLinus Torvalds bio = r1_bio->bios[i]; 287798d30c58SMing Lei rp = get_resync_pages(bio); 28781da177e4SLinus Torvalds if (bio->bi_end_io) { 2879022e510fSMing Lei page = resync_fetch_page(rp, page_idx); 2880c85ba149SMing Lei 2881c85ba149SMing Lei /* 2882c85ba149SMing Lei * won't fail because the vec table is big 2883c85ba149SMing Lei * enough to hold all these pages 2884c85ba149SMing Lei */ 2885c85ba149SMing Lei bio_add_page(bio, page, len, 0); 28861da177e4SLinus Torvalds } 28871da177e4SLinus Torvalds } 28881da177e4SLinus Torvalds nr_sectors += len>>9; 28891da177e4SLinus Torvalds sector_nr += len>>9; 2890191ea9b2SNeilBrown sync_blocks -= (len>>9); 2891022e510fSMing Lei } while (++page_idx < RESYNC_PAGES); 289298d30c58SMing Lei 28931da177e4SLinus Torvalds r1_bio->sectors = nr_sectors; 28941da177e4SLinus Torvalds 2895c40f341fSGoldwyn Rodrigues if (mddev_is_clustered(mddev) && 2896c40f341fSGoldwyn Rodrigues conf->cluster_sync_high < sector_nr + nr_sectors) { 2897c40f341fSGoldwyn Rodrigues conf->cluster_sync_low = mddev->curr_resync_completed; 2898c40f341fSGoldwyn Rodrigues conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS; 2899c40f341fSGoldwyn Rodrigues /* Send resync message */ 2900c40f341fSGoldwyn Rodrigues md_cluster_ops->resync_info_update(mddev, 2901c40f341fSGoldwyn Rodrigues conf->cluster_sync_low, 2902c40f341fSGoldwyn Rodrigues conf->cluster_sync_high); 2903c40f341fSGoldwyn Rodrigues } 2904c40f341fSGoldwyn Rodrigues 2905d11c171eSNeilBrown /* For a user-requested sync, we read all readable devices and do a 2906d11c171eSNeilBrown * compare 2907d11c171eSNeilBrown */ 2908d11c171eSNeilBrown if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 2909d11c171eSNeilBrown atomic_set(&r1_bio->remaining, read_targets); 29102d4f4f33SNeilBrown for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) { 2911d11c171eSNeilBrown bio = r1_bio->bios[i]; 2912d11c171eSNeilBrown if (bio->bi_end_io == end_sync_read) { 29132d4f4f33SNeilBrown read_targets--; 291474d46992SChristoph Hellwig md_sync_acct_bio(bio, nr_sectors); 29152e52d449SNeilBrown if (read_targets == 1) 29162e52d449SNeilBrown bio->bi_opf &= ~MD_FAILFAST; 29171da177e4SLinus Torvalds generic_make_request(bio); 2918d11c171eSNeilBrown } 2919d11c171eSNeilBrown } 2920d11c171eSNeilBrown } else { 2921d11c171eSNeilBrown atomic_set(&r1_bio->remaining, 1); 2922d11c171eSNeilBrown bio = r1_bio->bios[r1_bio->read_disk]; 292374d46992SChristoph Hellwig md_sync_acct_bio(bio, nr_sectors); 29242e52d449SNeilBrown if (read_targets == 1) 29252e52d449SNeilBrown bio->bi_opf &= ~MD_FAILFAST; 2926d11c171eSNeilBrown generic_make_request(bio); 2927d11c171eSNeilBrown } 29281da177e4SLinus Torvalds return nr_sectors; 29291da177e4SLinus Torvalds } 29301da177e4SLinus Torvalds 2931fd01b88cSNeilBrown static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks) 293280c3a6ceSDan Williams { 293380c3a6ceSDan Williams if (sectors) 293480c3a6ceSDan Williams return sectors; 293580c3a6ceSDan Williams 293680c3a6ceSDan Williams return mddev->dev_sectors; 293780c3a6ceSDan Williams } 293880c3a6ceSDan Williams 2939e8096360SNeilBrown static struct r1conf *setup_conf(struct mddev *mddev) 29401da177e4SLinus Torvalds { 2941e8096360SNeilBrown struct r1conf *conf; 2942709ae487SNeilBrown int i; 29430eaf822cSJonathan Brassow struct raid1_info *disk; 29443cb03002SNeilBrown struct md_rdev *rdev; 2945709ae487SNeilBrown int err = -ENOMEM; 29461da177e4SLinus Torvalds 2947e8096360SNeilBrown conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL); 29481da177e4SLinus Torvalds if (!conf) 2949709ae487SNeilBrown goto abort; 29501da177e4SLinus Torvalds 2951fd76863eScolyli@suse.de conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR, 2952824e47daScolyli@suse.de sizeof(atomic_t), GFP_KERNEL); 2953fd76863eScolyli@suse.de if (!conf->nr_pending) 2954fd76863eScolyli@suse.de goto abort; 2955fd76863eScolyli@suse.de 2956fd76863eScolyli@suse.de conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR, 2957824e47daScolyli@suse.de sizeof(atomic_t), GFP_KERNEL); 2958fd76863eScolyli@suse.de if (!conf->nr_waiting) 2959fd76863eScolyli@suse.de goto abort; 2960fd76863eScolyli@suse.de 2961fd76863eScolyli@suse.de conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR, 2962824e47daScolyli@suse.de sizeof(atomic_t), GFP_KERNEL); 2963fd76863eScolyli@suse.de if (!conf->nr_queued) 2964fd76863eScolyli@suse.de goto abort; 2965fd76863eScolyli@suse.de 2966fd76863eScolyli@suse.de conf->barrier = kcalloc(BARRIER_BUCKETS_NR, 2967824e47daScolyli@suse.de sizeof(atomic_t), GFP_KERNEL); 2968fd76863eScolyli@suse.de if (!conf->barrier) 2969fd76863eScolyli@suse.de goto abort; 2970fd76863eScolyli@suse.de 29716396bb22SKees Cook conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info), 29726396bb22SKees Cook mddev->raid_disks, 2), 29731da177e4SLinus Torvalds GFP_KERNEL); 29741da177e4SLinus Torvalds if (!conf->mirrors) 2975709ae487SNeilBrown goto abort; 29761da177e4SLinus Torvalds 2977ddaf22abSNeilBrown conf->tmppage = alloc_page(GFP_KERNEL); 2978ddaf22abSNeilBrown if (!conf->tmppage) 2979709ae487SNeilBrown goto abort; 2980ddaf22abSNeilBrown 2981709ae487SNeilBrown conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL); 29821da177e4SLinus Torvalds if (!conf->poolinfo) 2983709ae487SNeilBrown goto abort; 29848f19ccb2SNeilBrown conf->poolinfo->raid_disks = mddev->raid_disks * 2; 29853f677f9cSMarcos Paulo de Souza err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc, 2986c7afa803SMarcos Paulo de Souza rbio_pool_free, conf->poolinfo); 2987afeee514SKent Overstreet if (err) 2988709ae487SNeilBrown goto abort; 2989709ae487SNeilBrown 2990afeee514SKent Overstreet err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); 2991afeee514SKent Overstreet if (err) 2992c230e7e5SNeilBrown goto abort; 2993c230e7e5SNeilBrown 2994ed9bfdf1SNeilBrown conf->poolinfo->mddev = mddev; 29951da177e4SLinus Torvalds 2996c19d5798SNeilBrown err = -EINVAL; 2997e7e72bf6SNeil Brown spin_lock_init(&conf->device_lock); 2998dafb20faSNeilBrown rdev_for_each(rdev, mddev) { 2999709ae487SNeilBrown int disk_idx = rdev->raid_disk; 30001da177e4SLinus Torvalds if (disk_idx >= mddev->raid_disks 30011da177e4SLinus Torvalds || disk_idx < 0) 30021da177e4SLinus Torvalds continue; 3003c19d5798SNeilBrown if (test_bit(Replacement, &rdev->flags)) 300402b898f2SNeilBrown disk = conf->mirrors + mddev->raid_disks + disk_idx; 3005c19d5798SNeilBrown else 30061da177e4SLinus Torvalds disk = conf->mirrors + disk_idx; 30071da177e4SLinus Torvalds 3008c19d5798SNeilBrown if (disk->rdev) 3009c19d5798SNeilBrown goto abort; 30101da177e4SLinus Torvalds disk->rdev = rdev; 30111da177e4SLinus Torvalds disk->head_position = 0; 301212cee5a8SShaohua Li disk->seq_start = MaxSector; 30131da177e4SLinus Torvalds } 30141da177e4SLinus Torvalds conf->raid_disks = mddev->raid_disks; 30151da177e4SLinus Torvalds conf->mddev = mddev; 30161da177e4SLinus Torvalds INIT_LIST_HEAD(&conf->retry_list); 301755ce74d4SNeilBrown INIT_LIST_HEAD(&conf->bio_end_io_list); 30181da177e4SLinus Torvalds 30191da177e4SLinus Torvalds spin_lock_init(&conf->resync_lock); 302017999be4SNeilBrown init_waitqueue_head(&conf->wait_barrier); 30211da177e4SLinus Torvalds 3022191ea9b2SNeilBrown bio_list_init(&conf->pending_bio_list); 302334db0cd6SNeilBrown conf->pending_count = 0; 3024d890fa2bSNeilBrown conf->recovery_disabled = mddev->recovery_disabled - 1; 3025191ea9b2SNeilBrown 3026c19d5798SNeilBrown err = -EIO; 30278f19ccb2SNeilBrown for (i = 0; i < conf->raid_disks * 2; i++) { 30281da177e4SLinus Torvalds 30291da177e4SLinus Torvalds disk = conf->mirrors + i; 30301da177e4SLinus Torvalds 3031c19d5798SNeilBrown if (i < conf->raid_disks && 3032c19d5798SNeilBrown disk[conf->raid_disks].rdev) { 3033c19d5798SNeilBrown /* This slot has a replacement. */ 3034c19d5798SNeilBrown if (!disk->rdev) { 3035c19d5798SNeilBrown /* No original, just make the replacement 3036c19d5798SNeilBrown * a recovering spare 3037c19d5798SNeilBrown */ 3038c19d5798SNeilBrown disk->rdev = 3039c19d5798SNeilBrown disk[conf->raid_disks].rdev; 3040c19d5798SNeilBrown disk[conf->raid_disks].rdev = NULL; 3041c19d5798SNeilBrown } else if (!test_bit(In_sync, &disk->rdev->flags)) 3042c19d5798SNeilBrown /* Original is not in_sync - bad */ 3043c19d5798SNeilBrown goto abort; 3044c19d5798SNeilBrown } 3045c19d5798SNeilBrown 30465fd6c1dcSNeilBrown if (!disk->rdev || 30475fd6c1dcSNeilBrown !test_bit(In_sync, &disk->rdev->flags)) { 30481da177e4SLinus Torvalds disk->head_position = 0; 30494f0a5e01SJonathan Brassow if (disk->rdev && 30504f0a5e01SJonathan Brassow (disk->rdev->saved_raid_disk < 0)) 305117571284SNeilBrown conf->fullsync = 1; 3052be4d3280SShaohua Li } 30531da177e4SLinus Torvalds } 3054709ae487SNeilBrown 3055709ae487SNeilBrown err = -ENOMEM; 30560232605dSNeilBrown conf->thread = md_register_thread(raid1d, mddev, "raid1"); 30571d41c216SNeilBrown if (!conf->thread) 3058709ae487SNeilBrown goto abort; 3059191ea9b2SNeilBrown 3060709ae487SNeilBrown return conf; 3061709ae487SNeilBrown 3062709ae487SNeilBrown abort: 3063709ae487SNeilBrown if (conf) { 3064afeee514SKent Overstreet mempool_exit(&conf->r1bio_pool); 3065709ae487SNeilBrown kfree(conf->mirrors); 3066709ae487SNeilBrown safe_put_page(conf->tmppage); 3067709ae487SNeilBrown kfree(conf->poolinfo); 3068fd76863eScolyli@suse.de kfree(conf->nr_pending); 3069fd76863eScolyli@suse.de kfree(conf->nr_waiting); 3070fd76863eScolyli@suse.de kfree(conf->nr_queued); 3071fd76863eScolyli@suse.de kfree(conf->barrier); 3072afeee514SKent Overstreet bioset_exit(&conf->bio_split); 3073709ae487SNeilBrown kfree(conf); 3074709ae487SNeilBrown } 3075709ae487SNeilBrown return ERR_PTR(err); 3076709ae487SNeilBrown } 3077709ae487SNeilBrown 3078afa0f557SNeilBrown static void raid1_free(struct mddev *mddev, void *priv); 3079849674e4SShaohua Li static int raid1_run(struct mddev *mddev) 3080709ae487SNeilBrown { 3081e8096360SNeilBrown struct r1conf *conf; 3082709ae487SNeilBrown int i; 30833cb03002SNeilBrown struct md_rdev *rdev; 30845220ea1eSmajianpeng int ret; 30852ff8cc2cSShaohua Li bool discard_supported = false; 3086709ae487SNeilBrown 3087709ae487SNeilBrown if (mddev->level != 1) { 30881d41c216SNeilBrown pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n", 3089709ae487SNeilBrown mdname(mddev), mddev->level); 3090709ae487SNeilBrown return -EIO; 3091709ae487SNeilBrown } 3092709ae487SNeilBrown if (mddev->reshape_position != MaxSector) { 30931d41c216SNeilBrown pr_warn("md/raid1:%s: reshape_position set but not supported\n", 3094709ae487SNeilBrown mdname(mddev)); 3095709ae487SNeilBrown return -EIO; 3096709ae487SNeilBrown } 3097a415c0f1SNeilBrown if (mddev_init_writes_pending(mddev) < 0) 3098a415c0f1SNeilBrown return -ENOMEM; 3099709ae487SNeilBrown /* 3100709ae487SNeilBrown * copy the already verified devices into our private RAID1 3101709ae487SNeilBrown * bookkeeping area. [whatever we allocate in run(), 3102afa0f557SNeilBrown * should be freed in raid1_free()] 3103709ae487SNeilBrown */ 3104709ae487SNeilBrown if (mddev->private == NULL) 3105709ae487SNeilBrown conf = setup_conf(mddev); 3106709ae487SNeilBrown else 3107709ae487SNeilBrown conf = mddev->private; 3108709ae487SNeilBrown 3109709ae487SNeilBrown if (IS_ERR(conf)) 3110709ae487SNeilBrown return PTR_ERR(conf); 3111709ae487SNeilBrown 31123deff1a7SChristoph Hellwig if (mddev->queue) { 31135026d7a9SH. Peter Anvin blk_queue_max_write_same_sectors(mddev->queue, 0); 31143deff1a7SChristoph Hellwig blk_queue_max_write_zeroes_sectors(mddev->queue, 0); 31153deff1a7SChristoph Hellwig } 31165026d7a9SH. Peter Anvin 3117dafb20faSNeilBrown rdev_for_each(rdev, mddev) { 31181ed7242eSJonathan Brassow if (!mddev->gendisk) 31191ed7242eSJonathan Brassow continue; 3120709ae487SNeilBrown disk_stack_limits(mddev->gendisk, rdev->bdev, 3121709ae487SNeilBrown rdev->data_offset << 9); 31222ff8cc2cSShaohua Li if (blk_queue_discard(bdev_get_queue(rdev->bdev))) 31232ff8cc2cSShaohua Li discard_supported = true; 3124709ae487SNeilBrown } 3125709ae487SNeilBrown 3126709ae487SNeilBrown mddev->degraded = 0; 3127709ae487SNeilBrown for (i = 0; i < conf->raid_disks; i++) 3128709ae487SNeilBrown if (conf->mirrors[i].rdev == NULL || 3129709ae487SNeilBrown !test_bit(In_sync, &conf->mirrors[i].rdev->flags) || 3130709ae487SNeilBrown test_bit(Faulty, &conf->mirrors[i].rdev->flags)) 3131709ae487SNeilBrown mddev->degraded++; 313207f1a685SYufen Yu /* 313307f1a685SYufen Yu * RAID1 needs at least one disk in active 313407f1a685SYufen Yu */ 313507f1a685SYufen Yu if (conf->raid_disks - mddev->degraded < 1) { 313607f1a685SYufen Yu ret = -EINVAL; 313707f1a685SYufen Yu goto abort; 313807f1a685SYufen Yu } 3139709ae487SNeilBrown 3140709ae487SNeilBrown if (conf->raid_disks - mddev->degraded == 1) 3141709ae487SNeilBrown mddev->recovery_cp = MaxSector; 3142709ae487SNeilBrown 31438c6ac868SAndre Noll if (mddev->recovery_cp != MaxSector) 31441d41c216SNeilBrown pr_info("md/raid1:%s: not clean -- starting background reconstruction\n", 31458c6ac868SAndre Noll mdname(mddev)); 31461d41c216SNeilBrown pr_info("md/raid1:%s: active with %d out of %d mirrors\n", 31471da177e4SLinus Torvalds mdname(mddev), mddev->raid_disks - mddev->degraded, 31481da177e4SLinus Torvalds mddev->raid_disks); 3149709ae487SNeilBrown 31501da177e4SLinus Torvalds /* 31511da177e4SLinus Torvalds * Ok, everything is just fine now 31521da177e4SLinus Torvalds */ 3153709ae487SNeilBrown mddev->thread = conf->thread; 3154709ae487SNeilBrown conf->thread = NULL; 3155709ae487SNeilBrown mddev->private = conf; 315646533ff7SNeilBrown set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); 3157709ae487SNeilBrown 31581f403624SDan Williams md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); 31591da177e4SLinus Torvalds 31601ed7242eSJonathan Brassow if (mddev->queue) { 31612ff8cc2cSShaohua Li if (discard_supported) 31628b904b5bSBart Van Assche blk_queue_flag_set(QUEUE_FLAG_DISCARD, 31632ff8cc2cSShaohua Li mddev->queue); 31642ff8cc2cSShaohua Li else 31658b904b5bSBart Van Assche blk_queue_flag_clear(QUEUE_FLAG_DISCARD, 31662ff8cc2cSShaohua Li mddev->queue); 31671ed7242eSJonathan Brassow } 31685220ea1eSmajianpeng 31695220ea1eSmajianpeng ret = md_integrity_register(mddev); 31705aa61f42SNeilBrown if (ret) { 31715aa61f42SNeilBrown md_unregister_thread(&mddev->thread); 317207f1a685SYufen Yu goto abort; 31735aa61f42SNeilBrown } 317407f1a685SYufen Yu return 0; 317507f1a685SYufen Yu 317607f1a685SYufen Yu abort: 317707f1a685SYufen Yu raid1_free(mddev, conf); 31785220ea1eSmajianpeng return ret; 31791da177e4SLinus Torvalds } 31801da177e4SLinus Torvalds 3181afa0f557SNeilBrown static void raid1_free(struct mddev *mddev, void *priv) 31821da177e4SLinus Torvalds { 3183afa0f557SNeilBrown struct r1conf *conf = priv; 31844b6d287fSNeilBrown 3185afeee514SKent Overstreet mempool_exit(&conf->r1bio_pool); 31861da177e4SLinus Torvalds kfree(conf->mirrors); 31870fea7ed8SHirokazu Takahashi safe_put_page(conf->tmppage); 31881da177e4SLinus Torvalds kfree(conf->poolinfo); 3189fd76863eScolyli@suse.de kfree(conf->nr_pending); 3190fd76863eScolyli@suse.de kfree(conf->nr_waiting); 3191fd76863eScolyli@suse.de kfree(conf->nr_queued); 3192fd76863eScolyli@suse.de kfree(conf->barrier); 3193afeee514SKent Overstreet bioset_exit(&conf->bio_split); 31941da177e4SLinus Torvalds kfree(conf); 31951da177e4SLinus Torvalds } 31961da177e4SLinus Torvalds 3197fd01b88cSNeilBrown static int raid1_resize(struct mddev *mddev, sector_t sectors) 31981da177e4SLinus Torvalds { 31991da177e4SLinus Torvalds /* no resync is happening, and there is enough space 32001da177e4SLinus Torvalds * on all devices, so we can resize. 32011da177e4SLinus Torvalds * We need to make sure resync covers any new space. 32021da177e4SLinus Torvalds * If the array is shrinking we should possibly wait until 32031da177e4SLinus Torvalds * any io in the removed space completes, but it hardly seems 32041da177e4SLinus Torvalds * worth it. 32051da177e4SLinus Torvalds */ 3206a4a6125aSNeilBrown sector_t newsize = raid1_size(mddev, sectors, 0); 3207a4a6125aSNeilBrown if (mddev->external_size && 3208a4a6125aSNeilBrown mddev->array_sectors > newsize) 3209b522adcdSDan Williams return -EINVAL; 3210a4a6125aSNeilBrown if (mddev->bitmap) { 3211e64e4018SAndy Shevchenko int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); 3212a4a6125aSNeilBrown if (ret) 3213a4a6125aSNeilBrown return ret; 3214a4a6125aSNeilBrown } 3215a4a6125aSNeilBrown md_set_array_sectors(mddev, newsize); 3216b522adcdSDan Williams if (sectors > mddev->dev_sectors && 3217b098636cSNeilBrown mddev->recovery_cp > mddev->dev_sectors) { 321858c0fed4SAndre Noll mddev->recovery_cp = mddev->dev_sectors; 32191da177e4SLinus Torvalds set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 32201da177e4SLinus Torvalds } 3221b522adcdSDan Williams mddev->dev_sectors = sectors; 32224b5c7ae8SNeilBrown mddev->resync_max_sectors = sectors; 32231da177e4SLinus Torvalds return 0; 32241da177e4SLinus Torvalds } 32251da177e4SLinus Torvalds 3226fd01b88cSNeilBrown static int raid1_reshape(struct mddev *mddev) 32271da177e4SLinus Torvalds { 32281da177e4SLinus Torvalds /* We need to: 32291da177e4SLinus Torvalds * 1/ resize the r1bio_pool 32301da177e4SLinus Torvalds * 2/ resize conf->mirrors 32311da177e4SLinus Torvalds * 32321da177e4SLinus Torvalds * We allocate a new r1bio_pool if we can. 32331da177e4SLinus Torvalds * Then raise a device barrier and wait until all IO stops. 32341da177e4SLinus Torvalds * Then resize conf->mirrors and swap in the new r1bio pool. 32356ea9c07cSNeilBrown * 32366ea9c07cSNeilBrown * At the same time, we "pack" the devices so that all the missing 32376ea9c07cSNeilBrown * devices have the higher raid_disk numbers. 32381da177e4SLinus Torvalds */ 3239afeee514SKent Overstreet mempool_t newpool, oldpool; 32401da177e4SLinus Torvalds struct pool_info *newpoolinfo; 32410eaf822cSJonathan Brassow struct raid1_info *newmirrors; 3242e8096360SNeilBrown struct r1conf *conf = mddev->private; 324363c70c4fSNeilBrown int cnt, raid_disks; 3244c04be0aaSNeilBrown unsigned long flags; 32452214c260SArtur Paszkiewicz int d, d2; 3246afeee514SKent Overstreet int ret; 3247afeee514SKent Overstreet 3248afeee514SKent Overstreet memset(&newpool, 0, sizeof(newpool)); 3249afeee514SKent Overstreet memset(&oldpool, 0, sizeof(oldpool)); 32501da177e4SLinus Torvalds 325163c70c4fSNeilBrown /* Cannot change chunk_size, layout, or level */ 3252664e7c41SAndre Noll if (mddev->chunk_sectors != mddev->new_chunk_sectors || 325363c70c4fSNeilBrown mddev->layout != mddev->new_layout || 325463c70c4fSNeilBrown mddev->level != mddev->new_level) { 3255664e7c41SAndre Noll mddev->new_chunk_sectors = mddev->chunk_sectors; 325663c70c4fSNeilBrown mddev->new_layout = mddev->layout; 325763c70c4fSNeilBrown mddev->new_level = mddev->level; 325863c70c4fSNeilBrown return -EINVAL; 325963c70c4fSNeilBrown } 326063c70c4fSNeilBrown 32612214c260SArtur Paszkiewicz if (!mddev_is_clustered(mddev)) 32622214c260SArtur Paszkiewicz md_allow_write(mddev); 32632a2275d6SNeilBrown 326463c70c4fSNeilBrown raid_disks = mddev->raid_disks + mddev->delta_disks; 326563c70c4fSNeilBrown 32666ea9c07cSNeilBrown if (raid_disks < conf->raid_disks) { 32676ea9c07cSNeilBrown cnt=0; 32686ea9c07cSNeilBrown for (d= 0; d < conf->raid_disks; d++) 32691da177e4SLinus Torvalds if (conf->mirrors[d].rdev) 32706ea9c07cSNeilBrown cnt++; 32716ea9c07cSNeilBrown if (cnt > raid_disks) 32721da177e4SLinus Torvalds return -EBUSY; 32736ea9c07cSNeilBrown } 32741da177e4SLinus Torvalds 32751da177e4SLinus Torvalds newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL); 32761da177e4SLinus Torvalds if (!newpoolinfo) 32771da177e4SLinus Torvalds return -ENOMEM; 32781da177e4SLinus Torvalds newpoolinfo->mddev = mddev; 32798f19ccb2SNeilBrown newpoolinfo->raid_disks = raid_disks * 2; 32801da177e4SLinus Torvalds 32813f677f9cSMarcos Paulo de Souza ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc, 3282c7afa803SMarcos Paulo de Souza rbio_pool_free, newpoolinfo); 3283afeee514SKent Overstreet if (ret) { 32841da177e4SLinus Torvalds kfree(newpoolinfo); 3285afeee514SKent Overstreet return ret; 32861da177e4SLinus Torvalds } 32876396bb22SKees Cook newmirrors = kzalloc(array3_size(sizeof(struct raid1_info), 32886396bb22SKees Cook raid_disks, 2), 32898f19ccb2SNeilBrown GFP_KERNEL); 32901da177e4SLinus Torvalds if (!newmirrors) { 32911da177e4SLinus Torvalds kfree(newpoolinfo); 3292afeee514SKent Overstreet mempool_exit(&newpool); 32931da177e4SLinus Torvalds return -ENOMEM; 32941da177e4SLinus Torvalds } 32951da177e4SLinus Torvalds 3296e2d59925SNeilBrown freeze_array(conf, 0); 32971da177e4SLinus Torvalds 32981da177e4SLinus Torvalds /* ok, everything is stopped */ 32991da177e4SLinus Torvalds oldpool = conf->r1bio_pool; 33001da177e4SLinus Torvalds conf->r1bio_pool = newpool; 33016ea9c07cSNeilBrown 3302a88aa786SNeilBrown for (d = d2 = 0; d < conf->raid_disks; d++) { 33033cb03002SNeilBrown struct md_rdev *rdev = conf->mirrors[d].rdev; 3304a88aa786SNeilBrown if (rdev && rdev->raid_disk != d2) { 330536fad858SNamhyung Kim sysfs_unlink_rdev(mddev, rdev); 3306a88aa786SNeilBrown rdev->raid_disk = d2; 330736fad858SNamhyung Kim sysfs_unlink_rdev(mddev, rdev); 330836fad858SNamhyung Kim if (sysfs_link_rdev(mddev, rdev)) 33091d41c216SNeilBrown pr_warn("md/raid1:%s: cannot register rd%d\n", 331036fad858SNamhyung Kim mdname(mddev), rdev->raid_disk); 3311a88aa786SNeilBrown } 3312a88aa786SNeilBrown if (rdev) 3313a88aa786SNeilBrown newmirrors[d2++].rdev = rdev; 33146ea9c07cSNeilBrown } 33151da177e4SLinus Torvalds kfree(conf->mirrors); 33161da177e4SLinus Torvalds conf->mirrors = newmirrors; 33171da177e4SLinus Torvalds kfree(conf->poolinfo); 33181da177e4SLinus Torvalds conf->poolinfo = newpoolinfo; 33191da177e4SLinus Torvalds 3320c04be0aaSNeilBrown spin_lock_irqsave(&conf->device_lock, flags); 33211da177e4SLinus Torvalds mddev->degraded += (raid_disks - conf->raid_disks); 3322c04be0aaSNeilBrown spin_unlock_irqrestore(&conf->device_lock, flags); 33231da177e4SLinus Torvalds conf->raid_disks = mddev->raid_disks = raid_disks; 332463c70c4fSNeilBrown mddev->delta_disks = 0; 33251da177e4SLinus Torvalds 3326e2d59925SNeilBrown unfreeze_array(conf); 33271da177e4SLinus Torvalds 3328985ca973SNeilBrown set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 33291da177e4SLinus Torvalds set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 33301da177e4SLinus Torvalds md_wakeup_thread(mddev->thread); 33311da177e4SLinus Torvalds 3332afeee514SKent Overstreet mempool_exit(&oldpool); 33331da177e4SLinus Torvalds return 0; 33341da177e4SLinus Torvalds } 33351da177e4SLinus Torvalds 3336b03e0ccbSNeilBrown static void raid1_quiesce(struct mddev *mddev, int quiesce) 333736fa3063SNeilBrown { 3338e8096360SNeilBrown struct r1conf *conf = mddev->private; 333936fa3063SNeilBrown 3340b03e0ccbSNeilBrown if (quiesce) 334107169fd4Smajianpeng freeze_array(conf, 0); 3342b03e0ccbSNeilBrown else 334307169fd4Smajianpeng unfreeze_array(conf); 334436fa3063SNeilBrown } 334536fa3063SNeilBrown 3346fd01b88cSNeilBrown static void *raid1_takeover(struct mddev *mddev) 3347709ae487SNeilBrown { 3348709ae487SNeilBrown /* raid1 can take over: 3349709ae487SNeilBrown * raid5 with 2 devices, any layout or chunk size 3350709ae487SNeilBrown */ 3351709ae487SNeilBrown if (mddev->level == 5 && mddev->raid_disks == 2) { 3352e8096360SNeilBrown struct r1conf *conf; 3353709ae487SNeilBrown mddev->new_level = 1; 3354709ae487SNeilBrown mddev->new_layout = 0; 3355709ae487SNeilBrown mddev->new_chunk_sectors = 0; 3356709ae487SNeilBrown conf = setup_conf(mddev); 33576995f0b2SShaohua Li if (!IS_ERR(conf)) { 335807169fd4Smajianpeng /* Array must appear to be quiesced */ 335907169fd4Smajianpeng conf->array_frozen = 1; 3360394ed8e4SShaohua Li mddev_clear_unsupported_flags(mddev, 3361394ed8e4SShaohua Li UNSUPPORTED_MDDEV_FLAGS); 33626995f0b2SShaohua Li } 3363709ae487SNeilBrown return conf; 3364709ae487SNeilBrown } 3365709ae487SNeilBrown return ERR_PTR(-EINVAL); 3366709ae487SNeilBrown } 33671da177e4SLinus Torvalds 336884fc4b56SNeilBrown static struct md_personality raid1_personality = 33691da177e4SLinus Torvalds { 33701da177e4SLinus Torvalds .name = "raid1", 33712604b703SNeilBrown .level = 1, 33721da177e4SLinus Torvalds .owner = THIS_MODULE, 3373849674e4SShaohua Li .make_request = raid1_make_request, 3374849674e4SShaohua Li .run = raid1_run, 3375afa0f557SNeilBrown .free = raid1_free, 3376849674e4SShaohua Li .status = raid1_status, 3377849674e4SShaohua Li .error_handler = raid1_error, 33781da177e4SLinus Torvalds .hot_add_disk = raid1_add_disk, 33791da177e4SLinus Torvalds .hot_remove_disk= raid1_remove_disk, 33801da177e4SLinus Torvalds .spare_active = raid1_spare_active, 3381849674e4SShaohua Li .sync_request = raid1_sync_request, 33821da177e4SLinus Torvalds .resize = raid1_resize, 338380c3a6ceSDan Williams .size = raid1_size, 338463c70c4fSNeilBrown .check_reshape = raid1_reshape, 338536fa3063SNeilBrown .quiesce = raid1_quiesce, 3386709ae487SNeilBrown .takeover = raid1_takeover, 33875c675f83SNeilBrown .congested = raid1_congested, 33881da177e4SLinus Torvalds }; 33891da177e4SLinus Torvalds 33901da177e4SLinus Torvalds static int __init raid_init(void) 33911da177e4SLinus Torvalds { 33922604b703SNeilBrown return register_md_personality(&raid1_personality); 33931da177e4SLinus Torvalds } 33941da177e4SLinus Torvalds 33951da177e4SLinus Torvalds static void raid_exit(void) 33961da177e4SLinus Torvalds { 33972604b703SNeilBrown unregister_md_personality(&raid1_personality); 33981da177e4SLinus Torvalds } 33991da177e4SLinus Torvalds 34001da177e4SLinus Torvalds module_init(raid_init); 34011da177e4SLinus Torvalds module_exit(raid_exit); 34021da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 34030efb9e61SNeilBrown MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD"); 34041da177e4SLinus Torvalds MODULE_ALIAS("md-personality-3"); /* RAID1 */ 3405d9d166c2SNeilBrown MODULE_ALIAS("md-raid1"); 34062604b703SNeilBrown MODULE_ALIAS("md-level-1"); 340734db0cd6SNeilBrown 340834db0cd6SNeilBrown module_param(max_queued_requests, int, S_IRUGO|S_IWUSR); 3409